ngram
listlengths
0
82k
[ "sentence using the 'textblob' library.\"\"\" blob = TextBlob(sentence) return blob.noun_phrases", "using the 'textblob' library.\"\"\" blob = TextBlob(sentence) return blob.noun_phrases def", "try: _create_unverified_https_context = ssl._create_unverified_context except AttributeError: pass else: ssl._create_default_https_context =", "the nouns from a sentence using the 'textblob' library.\"\"\" blob", "blob = TextBlob(sentence) return blob.noun_phrases def remove_stopwords(sentence): \"\"\"Remove stopwords from", "sentence and return the list of words.\"\"\" blob = TextBlob(sentence)", "TextBlob(sentence) return [word for word in blob.words if word not", "def extract_nouns(sentence): \"\"\"Extract the nouns from a sentence using the", "pass else: ssl._create_default_https_context = _create_unverified_https_context # download noun data (if", "SSL try: _create_unverified_https_context = ssl._create_unverified_context except AttributeError: pass else: ssl._create_default_https_context", "_create_unverified_https_context # download noun data (if required) nltk.download('brown') nltk.download('punkt') nltk.download('stopwords')", "textblob import TextBlob from nltk.corpus import stopwords # set SSL", "nltk from textblob import TextBlob from nltk.corpus import stopwords #", "extract_nouns(sentence): \"\"\"Extract the nouns from a sentence using the 'textblob'", "return blob.noun_phrases def remove_stopwords(sentence): \"\"\"Remove stopwords from a sentence and", "= TextBlob(sentence) return [word for word in blob.words if word", "from a sentence using the 'textblob' library.\"\"\" blob = TextBlob(sentence)", "# set SSL try: _create_unverified_https_context = ssl._create_unverified_context except AttributeError: pass", "nltk.download('brown') nltk.download('punkt') nltk.download('stopwords') def extract_nouns(sentence): \"\"\"Extract the nouns from a", "ssl import nltk from textblob import TextBlob from nltk.corpus import", "TextBlob(sentence) return blob.noun_phrases def remove_stopwords(sentence): \"\"\"Remove stopwords from a sentence", "stopwords from a sentence and return the list of words.\"\"\"", "return [word for word in blob.words if word not in", "import nltk from textblob import TextBlob from nltk.corpus import stopwords", "else: ssl._create_default_https_context = _create_unverified_https_context # download noun data (if required)", "data (if required) nltk.download('brown') nltk.download('punkt') nltk.download('stopwords') def extract_nouns(sentence): \"\"\"Extract the", "the 'textblob' library.\"\"\" blob = TextBlob(sentence) return blob.noun_phrases def remove_stopwords(sentence):", "noun data (if required) nltk.download('brown') nltk.download('punkt') nltk.download('stopwords') def extract_nouns(sentence): \"\"\"Extract", "required) nltk.download('brown') nltk.download('punkt') nltk.download('stopwords') def extract_nouns(sentence): \"\"\"Extract the nouns from", "\"\"\"Extract the nouns from a sentence using the 'textblob' library.\"\"\"", "nltk.download('stopwords') def extract_nouns(sentence): \"\"\"Extract the nouns from a sentence using", "= ssl._create_unverified_context except AttributeError: pass else: ssl._create_default_https_context = _create_unverified_https_context #", "'textblob' library.\"\"\" blob = TextBlob(sentence) return blob.noun_phrases def remove_stopwords(sentence): \"\"\"Remove", "a sentence and return the list of words.\"\"\" blob =", "from textblob import TextBlob from nltk.corpus import stopwords # set", "import TextBlob from nltk.corpus import stopwords # set SSL try:", "ssl._create_unverified_context except AttributeError: pass else: ssl._create_default_https_context = _create_unverified_https_context # download", "import stopwords # set SSL try: _create_unverified_https_context = ssl._create_unverified_context except", "= _create_unverified_https_context # download noun data (if required) nltk.download('brown') nltk.download('punkt')", "def remove_stopwords(sentence): \"\"\"Remove stopwords from a sentence and return the", "remove_stopwords(sentence): \"\"\"Remove stopwords from a sentence and return the list", "library.\"\"\" blob = TextBlob(sentence) return blob.noun_phrases def remove_stopwords(sentence): \"\"\"Remove stopwords", "from a sentence and return the list of words.\"\"\" blob", "word in blob.words if word not in stopwords.words('english') and len(word)>2]", "stopwords # set SSL try: _create_unverified_https_context = ssl._create_unverified_context except AttributeError:", "blob = TextBlob(sentence) return [word for word in blob.words if", "except AttributeError: pass else: ssl._create_default_https_context = _create_unverified_https_context # download noun", "import ssl import nltk from textblob import TextBlob from nltk.corpus", "nltk.corpus import stopwords # set SSL try: _create_unverified_https_context = ssl._create_unverified_context", "set SSL try: _create_unverified_https_context = ssl._create_unverified_context except AttributeError: pass else:", "ssl._create_default_https_context = _create_unverified_https_context # download noun data (if required) nltk.download('brown')", "# download noun data (if required) nltk.download('brown') nltk.download('punkt') nltk.download('stopwords') def", "list of words.\"\"\" blob = TextBlob(sentence) return [word for word", "download noun data (if required) nltk.download('brown') nltk.download('punkt') nltk.download('stopwords') def extract_nouns(sentence):", "return the list of words.\"\"\" blob = TextBlob(sentence) return [word", "the list of words.\"\"\" blob = TextBlob(sentence) return [word for", "for word in blob.words if word not in stopwords.words('english') and", "TextBlob from nltk.corpus import stopwords # set SSL try: _create_unverified_https_context", "(if required) nltk.download('brown') nltk.download('punkt') nltk.download('stopwords') def extract_nouns(sentence): \"\"\"Extract the nouns", "= TextBlob(sentence) return blob.noun_phrases def remove_stopwords(sentence): \"\"\"Remove stopwords from a", "and return the list of words.\"\"\" blob = TextBlob(sentence) return", "\"\"\"Remove stopwords from a sentence and return the list of", "words.\"\"\" blob = TextBlob(sentence) return [word for word in blob.words", "_create_unverified_https_context = ssl._create_unverified_context except AttributeError: pass else: ssl._create_default_https_context = _create_unverified_https_context", "[word for word in blob.words if word not in stopwords.words('english')", "AttributeError: pass else: ssl._create_default_https_context = _create_unverified_https_context # download noun data", "blob.noun_phrases def remove_stopwords(sentence): \"\"\"Remove stopwords from a sentence and return", "nouns from a sentence using the 'textblob' library.\"\"\" blob =", "a sentence using the 'textblob' library.\"\"\" blob = TextBlob(sentence) return", "of words.\"\"\" blob = TextBlob(sentence) return [word for word in", "from nltk.corpus import stopwords # set SSL try: _create_unverified_https_context =", "nltk.download('punkt') nltk.download('stopwords') def extract_nouns(sentence): \"\"\"Extract the nouns from a sentence" ]
[ "= IntervalSchedule.objects.get_or_create( every=6, period=IntervalSchedule.HOURS, ) except IntegrityError as e: pass", "except IntegrityError as e: pass try: schedule_video, created = IntervalSchedule.objects.get_or_create(", "django.db import IntegrityError class Command(BaseCommand): def handle(self, *args, **options): try:", "IntervalSchedule.objects.get_or_create( every=6, period=IntervalSchedule.HOURS, ) except IntegrityError as e: pass try:", "try: schedule_video, created = IntervalSchedule.objects.get_or_create( every=6, period=IntervalSchedule.HOURS, ) except IntegrityError", ") except IntegrityError as e: pass try: PeriodicTask.objects.create( interval=schedule_video, name='Scrape", "pass try: PeriodicTask.objects.create( interval=schedule_video, name='Scrape Videos', task='toolbox.scraper.tasks.scrape_youtube_videos', ) except IntegrityError", "as e: pass try: schedule_video, created = IntervalSchedule.objects.get_or_create( every=6, period=IntervalSchedule.HOURS,", "import PeriodicTask, IntervalSchedule from django.core.management.base import BaseCommand from django.db import", "IntegrityError as e: pass try: schedule_video, created = IntervalSchedule.objects.get_or_create( every=6,", "BaseCommand from django.db import IntegrityError class Command(BaseCommand): def handle(self, *args,", "import BaseCommand from django.db import IntegrityError class Command(BaseCommand): def handle(self,", "django_celery_beat.models import PeriodicTask, IntervalSchedule from django.core.management.base import BaseCommand from django.db", "e: pass try: PeriodicTask.objects.create( interval=schedule_video, name='Scrape Videos', task='toolbox.scraper.tasks.scrape_youtube_videos', ) except", "IntegrityError class Command(BaseCommand): def handle(self, *args, **options): try: schedule_channel, created", "def handle(self, *args, **options): try: schedule_channel, created = IntervalSchedule.objects.get_or_create( every=4,", "PeriodicTask.objects.create( interval=schedule_video, name='Scrape Videos', task='toolbox.scraper.tasks.scrape_youtube_videos', ) except IntegrityError as e:", "as e: pass try: PeriodicTask.objects.create( interval=schedule_channel, name='Scrape Channels', task='toolbox.scraper.tasks.scrape_youtube_channels', )", "Channels', task='toolbox.scraper.tasks.scrape_youtube_channels', ) except IntegrityError as e: pass try: PeriodicTask.objects.create(", ") except IntegrityError as e: pass try: schedule_video, created =", "class Command(BaseCommand): def handle(self, *args, **options): try: schedule_channel, created =", "pass try: schedule_video, created = IntervalSchedule.objects.get_or_create( every=6, period=IntervalSchedule.HOURS, ) except", "except IntegrityError as e: pass try: PeriodicTask.objects.create( interval=schedule_video, name='Scrape Videos',", "IntervalSchedule.objects.get_or_create( every=4, period=IntervalSchedule.HOURS, ) except IntegrityError as e: pass try:", "try: PeriodicTask.objects.create( interval=schedule_video, name='Scrape Videos', task='toolbox.scraper.tasks.scrape_youtube_videos', ) except IntegrityError as", "task='toolbox.scraper.tasks.scrape_youtube_channels', ) except IntegrityError as e: pass try: PeriodicTask.objects.create( interval=schedule_video,", "e: pass try: PeriodicTask.objects.create( interval=schedule_channel, name='Scrape Channels', task='toolbox.scraper.tasks.scrape_youtube_channels', ) except", "pass try: PeriodicTask.objects.create( interval=schedule_channel, name='Scrape Channels', task='toolbox.scraper.tasks.scrape_youtube_channels', ) except IntegrityError", ") except IntegrityError as e: pass try: PeriodicTask.objects.create( interval=schedule_channel, name='Scrape", "from django.db import IntegrityError class Command(BaseCommand): def handle(self, *args, **options):", "e: pass try: schedule_video, created = IntervalSchedule.objects.get_or_create( every=6, period=IntervalSchedule.HOURS, )", "PeriodicTask, IntervalSchedule from django.core.management.base import BaseCommand from django.db import IntegrityError", "interval=schedule_channel, name='Scrape Channels', task='toolbox.scraper.tasks.scrape_youtube_channels', ) except IntegrityError as e: pass", "IntegrityError as e: pass try: PeriodicTask.objects.create( interval=schedule_video, name='Scrape Videos', task='toolbox.scraper.tasks.scrape_youtube_videos',", "schedule_channel, created = IntervalSchedule.objects.get_or_create( every=4, period=IntervalSchedule.HOURS, ) except IntegrityError as", "as e: pass try: PeriodicTask.objects.create( interval=schedule_video, name='Scrape Videos', task='toolbox.scraper.tasks.scrape_youtube_videos', )", "created = IntervalSchedule.objects.get_or_create( every=4, period=IntervalSchedule.HOURS, ) except IntegrityError as e:", "IntervalSchedule from django.core.management.base import BaseCommand from django.db import IntegrityError class", "Command(BaseCommand): def handle(self, *args, **options): try: schedule_channel, created = IntervalSchedule.objects.get_or_create(", "PeriodicTask.objects.create( interval=schedule_channel, name='Scrape Channels', task='toolbox.scraper.tasks.scrape_youtube_channels', ) except IntegrityError as e:", "try: schedule_channel, created = IntervalSchedule.objects.get_or_create( every=4, period=IntervalSchedule.HOURS, ) except IntegrityError", "except IntegrityError as e: pass try: PeriodicTask.objects.create( interval=schedule_channel, name='Scrape Channels',", "*args, **options): try: schedule_channel, created = IntervalSchedule.objects.get_or_create( every=4, period=IntervalSchedule.HOURS, )", "django.core.management.base import BaseCommand from django.db import IntegrityError class Command(BaseCommand): def", "import IntegrityError class Command(BaseCommand): def handle(self, *args, **options): try: schedule_channel,", "**options): try: schedule_channel, created = IntervalSchedule.objects.get_or_create( every=4, period=IntervalSchedule.HOURS, ) except", "handle(self, *args, **options): try: schedule_channel, created = IntervalSchedule.objects.get_or_create( every=4, period=IntervalSchedule.HOURS,", "period=IntervalSchedule.HOURS, ) except IntegrityError as e: pass try: PeriodicTask.objects.create( interval=schedule_channel,", "schedule_video, created = IntervalSchedule.objects.get_or_create( every=6, period=IntervalSchedule.HOURS, ) except IntegrityError as", "every=6, period=IntervalSchedule.HOURS, ) except IntegrityError as e: pass try: PeriodicTask.objects.create(", "IntegrityError as e: pass try: PeriodicTask.objects.create( interval=schedule_channel, name='Scrape Channels', task='toolbox.scraper.tasks.scrape_youtube_channels',", "= IntervalSchedule.objects.get_or_create( every=4, period=IntervalSchedule.HOURS, ) except IntegrityError as e: pass", "every=4, period=IntervalSchedule.HOURS, ) except IntegrityError as e: pass try: schedule_video,", "period=IntervalSchedule.HOURS, ) except IntegrityError as e: pass try: schedule_video, created", "from django_celery_beat.models import PeriodicTask, IntervalSchedule from django.core.management.base import BaseCommand from", "try: PeriodicTask.objects.create( interval=schedule_channel, name='Scrape Channels', task='toolbox.scraper.tasks.scrape_youtube_channels', ) except IntegrityError as", "created = IntervalSchedule.objects.get_or_create( every=6, period=IntervalSchedule.HOURS, ) except IntegrityError as e:", "name='Scrape Channels', task='toolbox.scraper.tasks.scrape_youtube_channels', ) except IntegrityError as e: pass try:", "interval=schedule_video, name='Scrape Videos', task='toolbox.scraper.tasks.scrape_youtube_videos', ) except IntegrityError as e: pass", "from django.core.management.base import BaseCommand from django.db import IntegrityError class Command(BaseCommand):" ]
[ "from ppcls.data.preprocess.ops.timm_autoaugment import RawTimmAutoAugment from ppcls.data.preprocess.ops.cutout import Cutout from ppcls.data.preprocess.ops.hide_and_seek", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "ppcls.data.preprocess.ops.autoaugment import ImageNetPolicy as RawImageNetPolicy from ppcls.data.preprocess.ops.randaugment import RandAugment as", "GridMask from ppcls.data.preprocess.ops.operators import DecodeImage from ppcls.data.preprocess.ops.operators import ResizeImage from", "# # Licensed under the Apache License, Version 2.0 (the", "compliance with the License. # You may obtain a copy", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "All Rights Reserved. # # Licensed under the Apache License,", "2021 PaddlePaddle Authors. All Rights Reserved. # # Licensed under", "2.0 (the \"License\"); # you may not use this file", "file except in compliance with the License. # You may", "Unless required by applicable law or agreed to in writing,", "agreed to in writing, software # distributed under the License", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "import numpy as np from PIL import Image def transform(data,", "Image def transform(data, ops=[]): \"\"\" transform \"\"\" for op in", "class RandAugment(RawRandAugment): \"\"\" RandAugment wrapper to auto fit different img", "return img class TimmAutoAugment(RawTimmAutoAugment): \"\"\" TimmAutoAugment wrapper to auto fit", "def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) def __call__(self, img): if", "the License. from ppcls.data.preprocess.ops.autoaugment import ImageNetPolicy as RawImageNetPolicy from ppcls.data.preprocess.ops.randaugment", "(c) 2021 PaddlePaddle Authors. All Rights Reserved. # # Licensed", "distributed under the License is distributed on an \"AS IS\"", "import ImageNetPolicy as RawImageNetPolicy from ppcls.data.preprocess.ops.randaugment import RandAugment as RawRandAugment", "op in ops: data = op(data) return data class AutoAugment(RawImageNetPolicy):", "from ppcls.data.preprocess.ops.operators import DecodeImage from ppcls.data.preprocess.ops.operators import ResizeImage from ppcls.data.preprocess.ops.operators", "img = Image.fromarray(img) img = super().__call__(img) if isinstance(img, Image.Image): img", "RawRandAugment from ppcls.data.preprocess.ops.timm_autoaugment import RawTimmAutoAugment from ppcls.data.preprocess.ops.cutout import Cutout from", "HideAndSeek from ppcls.data.preprocess.ops.random_erasing import RandomErasing from ppcls.data.preprocess.ops.grid import GridMask from", "np.ascontiguousarray(img) img = Image.fromarray(img) img = super().__call__(img) if isinstance(img, Image.Image):", "the specific language governing permissions and # limitations under the", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "under the License. from ppcls.data.preprocess.ops.autoaugment import ImageNetPolicy as RawImageNetPolicy from", "TimmAutoAugment wrapper to auto fit different img tyeps. \"\"\" def", "express or implied. # See the License for the specific", "applicable law or agreed to in writing, software # distributed", "except in compliance with the License. # You may obtain", "Cutout from ppcls.data.preprocess.ops.hide_and_seek import HideAndSeek from ppcls.data.preprocess.ops.random_erasing import RandomErasing from", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "RandCropImage from ppcls.data.preprocess.ops.operators import RandFlipImage from ppcls.data.preprocess.ops.operators import NormalizeImage from", "RandFlipImage from ppcls.data.preprocess.ops.operators import NormalizeImage from ppcls.data.preprocess.ops.operators import ToCHWImage from", "def transform(data, ops=[]): \"\"\" transform \"\"\" for op in ops:", "RandomErasing from ppcls.data.preprocess.ops.grid import GridMask from ppcls.data.preprocess.ops.operators import DecodeImage from", "AugMix from ppcls.data.preprocess.batch_ops.batch_operators import MixupOperator, CutmixOperator, OpSampler, FmixOperator import numpy", "img types \"\"\" def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) def", "PaddlePaddle Authors. All Rights Reserved. # # Licensed under the", "Licensed under the Apache License, Version 2.0 (the \"License\"); #", "from ppcls.data.preprocess.ops.randaugment import RandAugment as RawRandAugment from ppcls.data.preprocess.ops.timm_autoaugment import RawTimmAutoAugment", "ops=[]): \"\"\" transform \"\"\" for op in ops: data =", "auto fit different img types \"\"\" def __init__(self, *args, **kwargs):", "__call__(self, img): if not isinstance(img, Image.Image): img = np.ascontiguousarray(img) img", "not use this file except in compliance with the License.", "from ppcls.data.preprocess.ops.operators import ToCHWImage from ppcls.data.preprocess.ops.operators import AugMix from ppcls.data.preprocess.batch_ops.batch_operators", "FmixOperator import numpy as np from PIL import Image def", "as RawRandAugment from ppcls.data.preprocess.ops.timm_autoaugment import RawTimmAutoAugment from ppcls.data.preprocess.ops.cutout import Cutout", "from ppcls.data.preprocess.ops.random_erasing import RandomErasing from ppcls.data.preprocess.ops.grid import GridMask from ppcls.data.preprocess.ops.operators", "ResizeImage from ppcls.data.preprocess.ops.operators import CropImage from ppcls.data.preprocess.ops.operators import RandCropImage from", "ppcls.data.preprocess.ops.operators import RandFlipImage from ppcls.data.preprocess.ops.operators import NormalizeImage from ppcls.data.preprocess.ops.operators import", "Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. # #", "from ppcls.data.preprocess.ops.hide_and_seek import HideAndSeek from ppcls.data.preprocess.ops.random_erasing import RandomErasing from ppcls.data.preprocess.ops.grid", "writing, software # distributed under the License is distributed on", "\"\"\" RandAugment wrapper to auto fit different img types \"\"\"", "in writing, software # distributed under the License is distributed", "import HideAndSeek from ppcls.data.preprocess.ops.random_erasing import RandomErasing from ppcls.data.preprocess.ops.grid import GridMask", "**kwargs): super().__init__(*args, **kwargs) def __call__(self, img): if not isinstance(img, Image.Image):", "you may not use this file except in compliance with", "isinstance(img, Image.Image): img = np.asarray(img) return img class TimmAutoAugment(RawTimmAutoAugment): \"\"\"", "if not isinstance(img, Image.Image): img = np.ascontiguousarray(img) img = Image.fromarray(img)", "img class RandAugment(RawRandAugment): \"\"\" RandAugment wrapper to auto fit different", "Image.Image): img = np.asarray(img) return img class RandAugment(RawRandAugment): \"\"\" RandAugment", "img = np.asarray(img) return img class RandAugment(RawRandAugment): \"\"\" RandAugment wrapper", "# Licensed under the Apache License, Version 2.0 (the \"License\");", "ppcls.data.preprocess.ops.operators import NormalizeImage from ppcls.data.preprocess.ops.operators import ToCHWImage from ppcls.data.preprocess.ops.operators import", "for op in ops: data = op(data) return data class", "ImageNetPolicy wrapper to auto fit different img types \"\"\" def", "language governing permissions and # limitations under the License. from", "img): if not isinstance(img, Image.Image): img = np.ascontiguousarray(img) img =", "= np.asarray(img) return img class TimmAutoAugment(RawTimmAutoAugment): \"\"\" TimmAutoAugment wrapper to", "def __call__(self, img): if not isinstance(img, Image.Image): img = np.ascontiguousarray(img)", "RawTimmAutoAugment from ppcls.data.preprocess.ops.cutout import Cutout from ppcls.data.preprocess.ops.hide_and_seek import HideAndSeek from", "use this file except in compliance with the License. #", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "License. from ppcls.data.preprocess.ops.autoaugment import ImageNetPolicy as RawImageNetPolicy from ppcls.data.preprocess.ops.randaugment import", "ppcls.data.preprocess.ops.cutout import Cutout from ppcls.data.preprocess.ops.hide_and_seek import HideAndSeek from ppcls.data.preprocess.ops.random_erasing import", "CONDITIONS OF ANY KIND, either express or implied. # See", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "ops: data = op(data) return data class AutoAugment(RawImageNetPolicy): \"\"\" ImageNetPolicy", "or implied. # See the License for the specific language", "License is distributed on an \"AS IS\" BASIS, # WITHOUT", "Rights Reserved. # # Licensed under the Apache License, Version", "License. # You may obtain a copy of the License", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "License, Version 2.0 (the \"License\"); # you may not use", "to auto fit different img tyeps. \"\"\" def __init__(self, *args,", "# You may obtain a copy of the License at", "ToCHWImage from ppcls.data.preprocess.ops.operators import AugMix from ppcls.data.preprocess.batch_ops.batch_operators import MixupOperator, CutmixOperator,", "KIND, either express or implied. # See the License for", "specific language governing permissions and # limitations under the License.", "np.asarray(img) return img class RandAugment(RawRandAugment): \"\"\" RandAugment wrapper to auto", "tyeps. \"\"\" def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) def __call__(self,", "= super().__call__(img) if isinstance(img, Image.Image): img = np.asarray(img) return img", "under the License is distributed on an \"AS IS\" BASIS,", "import RandomErasing from ppcls.data.preprocess.ops.grid import GridMask from ppcls.data.preprocess.ops.operators import DecodeImage", "# limitations under the License. from ppcls.data.preprocess.ops.autoaugment import ImageNetPolicy as", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "License for the specific language governing permissions and # limitations", "fit different img tyeps. \"\"\" def __init__(self, *args, **kwargs): super().__init__(*args,", "Authors. All Rights Reserved. # # Licensed under the Apache", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "from ppcls.data.preprocess.ops.operators import RandFlipImage from ppcls.data.preprocess.ops.operators import NormalizeImage from ppcls.data.preprocess.ops.operators", "to auto fit different img types \"\"\" def __init__(self, *args,", "\"\"\" transform \"\"\" for op in ops: data = op(data)", "Reserved. # # Licensed under the Apache License, Version 2.0", "= np.ascontiguousarray(img) img = Image.fromarray(img) img = super().__call__(img) if isinstance(img,", "ppcls.data.preprocess.ops.grid import GridMask from ppcls.data.preprocess.ops.operators import DecodeImage from ppcls.data.preprocess.ops.operators import", "\"\"\" for op in ops: data = op(data) return data", "different img tyeps. \"\"\" def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs)", "from ppcls.data.preprocess.ops.operators import NormalizeImage from ppcls.data.preprocess.ops.operators import ToCHWImage from ppcls.data.preprocess.ops.operators", "isinstance(img, Image.Image): img = np.ascontiguousarray(img) img = Image.fromarray(img) img =", "the License for the specific language governing permissions and #", "op(data) return data class AutoAugment(RawImageNetPolicy): \"\"\" ImageNetPolicy wrapper to auto", "ppcls.data.preprocess.ops.operators import ResizeImage from ppcls.data.preprocess.ops.operators import CropImage from ppcls.data.preprocess.ops.operators import", "TimmAutoAugment(RawTimmAutoAugment): \"\"\" TimmAutoAugment wrapper to auto fit different img tyeps.", "(the \"License\"); # you may not use this file except", "transform(data, ops=[]): \"\"\" transform \"\"\" for op in ops: data", "Apache License, Version 2.0 (the \"License\"); # you may not", "# you may not use this file except in compliance", "DecodeImage from ppcls.data.preprocess.ops.operators import ResizeImage from ppcls.data.preprocess.ops.operators import CropImage from", "ppcls.data.preprocess.ops.operators import ToCHWImage from ppcls.data.preprocess.ops.operators import AugMix from ppcls.data.preprocess.batch_ops.batch_operators import", "either express or implied. # See the License for the", "in ops: data = op(data) return data class AutoAugment(RawImageNetPolicy): \"\"\"", "__init__(self, *args, **kwargs): super().__init__(*args, **kwargs) def __call__(self, img): if not", "as RawImageNetPolicy from ppcls.data.preprocess.ops.randaugment import RandAugment as RawRandAugment from ppcls.data.preprocess.ops.timm_autoaugment", "PIL import Image def transform(data, ops=[]): \"\"\" transform \"\"\" for", "super().__init__(*args, **kwargs) def __call__(self, img): if not isinstance(img, Image.Image): img", "img = np.ascontiguousarray(img) img = Image.fromarray(img) img = super().__call__(img) if", "OR CONDITIONS OF ANY KIND, either express or implied. #", "ppcls.data.preprocess.ops.operators import RandCropImage from ppcls.data.preprocess.ops.operators import RandFlipImage from ppcls.data.preprocess.ops.operators import", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "\"\"\" ImageNetPolicy wrapper to auto fit different img types \"\"\"", "ppcls.data.preprocess.ops.operators import DecodeImage from ppcls.data.preprocess.ops.operators import ResizeImage from ppcls.data.preprocess.ops.operators import", "the License is distributed on an \"AS IS\" BASIS, #", "class AutoAugment(RawImageNetPolicy): \"\"\" ImageNetPolicy wrapper to auto fit different img", "AutoAugment(RawImageNetPolicy): \"\"\" ImageNetPolicy wrapper to auto fit different img types", "from ppcls.data.preprocess.ops.cutout import Cutout from ppcls.data.preprocess.ops.hide_and_seek import HideAndSeek from ppcls.data.preprocess.ops.random_erasing", "data = op(data) return data class AutoAugment(RawImageNetPolicy): \"\"\" ImageNetPolicy wrapper", "from ppcls.data.preprocess.ops.autoaugment import ImageNetPolicy as RawImageNetPolicy from ppcls.data.preprocess.ops.randaugment import RandAugment", "in compliance with the License. # You may obtain a", "from ppcls.data.preprocess.ops.operators import CropImage from ppcls.data.preprocess.ops.operators import RandCropImage from ppcls.data.preprocess.ops.operators", "np.asarray(img) return img class TimmAutoAugment(RawTimmAutoAugment): \"\"\" TimmAutoAugment wrapper to auto", "software # distributed under the License is distributed on an", "ppcls.data.preprocess.ops.operators import CropImage from ppcls.data.preprocess.ops.operators import RandCropImage from ppcls.data.preprocess.ops.operators import", "CropImage from ppcls.data.preprocess.ops.operators import RandCropImage from ppcls.data.preprocess.ops.operators import RandFlipImage from", "data class AutoAugment(RawImageNetPolicy): \"\"\" ImageNetPolicy wrapper to auto fit different", "ppcls.data.preprocess.ops.timm_autoaugment import RawTimmAutoAugment from ppcls.data.preprocess.ops.cutout import Cutout from ppcls.data.preprocess.ops.hide_and_seek import", "wrapper to auto fit different img tyeps. \"\"\" def __init__(self,", "import DecodeImage from ppcls.data.preprocess.ops.operators import ResizeImage from ppcls.data.preprocess.ops.operators import CropImage", "MixupOperator, CutmixOperator, OpSampler, FmixOperator import numpy as np from PIL", "auto fit different img tyeps. \"\"\" def __init__(self, *args, **kwargs):", "from ppcls.data.preprocess.ops.operators import ResizeImage from ppcls.data.preprocess.ops.operators import CropImage from ppcls.data.preprocess.ops.operators", "# # Unless required by applicable law or agreed to", "**kwargs) def __call__(self, img): if not isinstance(img, Image.Image): img =", "import MixupOperator, CutmixOperator, OpSampler, FmixOperator import numpy as np from", "import RandCropImage from ppcls.data.preprocess.ops.operators import RandFlipImage from ppcls.data.preprocess.ops.operators import NormalizeImage", "from PIL import Image def transform(data, ops=[]): \"\"\" transform \"\"\"", "= op(data) return data class AutoAugment(RawImageNetPolicy): \"\"\" ImageNetPolicy wrapper to", "return data class AutoAugment(RawImageNetPolicy): \"\"\" ImageNetPolicy wrapper to auto fit", "types \"\"\" def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) def __call__(self,", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "governing permissions and # limitations under the License. from ppcls.data.preprocess.ops.autoaugment", "if isinstance(img, Image.Image): img = np.asarray(img) return img class TimmAutoAugment(RawTimmAutoAugment):", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "img = np.asarray(img) return img class TimmAutoAugment(RawTimmAutoAugment): \"\"\" TimmAutoAugment wrapper", "wrapper to auto fit different img types \"\"\" def __init__(self,", "from ppcls.data.preprocess.batch_ops.batch_operators import MixupOperator, CutmixOperator, OpSampler, FmixOperator import numpy as", "Version 2.0 (the \"License\"); # you may not use this", "if isinstance(img, Image.Image): img = np.asarray(img) return img class RandAugment(RawRandAugment):", "and # limitations under the License. from ppcls.data.preprocess.ops.autoaugment import ImageNetPolicy", "not isinstance(img, Image.Image): img = np.ascontiguousarray(img) img = Image.fromarray(img) img", "law or agreed to in writing, software # distributed under", "RandAugment as RawRandAugment from ppcls.data.preprocess.ops.timm_autoaugment import RawTimmAutoAugment from ppcls.data.preprocess.ops.cutout import", "super().__call__(img) if isinstance(img, Image.Image): img = np.asarray(img) return img class", "np from PIL import Image def transform(data, ops=[]): \"\"\" transform", "import ResizeImage from ppcls.data.preprocess.ops.operators import CropImage from ppcls.data.preprocess.ops.operators import RandCropImage", "transform \"\"\" for op in ops: data = op(data) return", "permissions and # limitations under the License. from ppcls.data.preprocess.ops.autoaugment import", "import RandAugment as RawRandAugment from ppcls.data.preprocess.ops.timm_autoaugment import RawTimmAutoAugment from ppcls.data.preprocess.ops.cutout", "import RandFlipImage from ppcls.data.preprocess.ops.operators import NormalizeImage from ppcls.data.preprocess.ops.operators import ToCHWImage", "as np from PIL import Image def transform(data, ops=[]): \"\"\"", "fit different img types \"\"\" def __init__(self, *args, **kwargs): super().__init__(*args,", "= Image.fromarray(img) img = super().__call__(img) if isinstance(img, Image.Image): img =", "OpSampler, FmixOperator import numpy as np from PIL import Image", "implied. # See the License for the specific language governing", "Image.Image): img = np.ascontiguousarray(img) img = Image.fromarray(img) img = super().__call__(img)", "Image.fromarray(img) img = super().__call__(img) if isinstance(img, Image.Image): img = np.asarray(img)", "under the Apache License, Version 2.0 (the \"License\"); # you", "Image.Image): img = np.asarray(img) return img class TimmAutoAugment(RawTimmAutoAugment): \"\"\" TimmAutoAugment", "\"License\"); # you may not use this file except in", "ppcls.data.preprocess.ops.random_erasing import RandomErasing from ppcls.data.preprocess.ops.grid import GridMask from ppcls.data.preprocess.ops.operators import", "import AugMix from ppcls.data.preprocess.batch_ops.batch_operators import MixupOperator, CutmixOperator, OpSampler, FmixOperator import", "\"\"\" def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) def __call__(self, img):", "import ToCHWImage from ppcls.data.preprocess.ops.operators import AugMix from ppcls.data.preprocess.batch_ops.batch_operators import MixupOperator,", "import CropImage from ppcls.data.preprocess.ops.operators import RandCropImage from ppcls.data.preprocess.ops.operators import RandFlipImage", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "ppcls.data.preprocess.ops.randaugment import RandAugment as RawRandAugment from ppcls.data.preprocess.ops.timm_autoaugment import RawTimmAutoAugment from", "CutmixOperator, OpSampler, FmixOperator import numpy as np from PIL import", "numpy as np from PIL import Image def transform(data, ops=[]):", "isinstance(img, Image.Image): img = np.asarray(img) return img class RandAugment(RawRandAugment): \"\"\"", "img tyeps. \"\"\" def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) def", "RandAugment wrapper to auto fit different img types \"\"\" def", "by applicable law or agreed to in writing, software #", "# distributed under the License is distributed on an \"AS", "OF ANY KIND, either express or implied. # See the", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "limitations under the License. from ppcls.data.preprocess.ops.autoaugment import ImageNetPolicy as RawImageNetPolicy", "ImageNetPolicy as RawImageNetPolicy from ppcls.data.preprocess.ops.randaugment import RandAugment as RawRandAugment from", "may obtain a copy of the License at # #", "# Unless required by applicable law or agreed to in", "ANY KIND, either express or implied. # See the License", "See the License for the specific language governing permissions and", "img = super().__call__(img) if isinstance(img, Image.Image): img = np.asarray(img) return", "*args, **kwargs): super().__init__(*args, **kwargs) def __call__(self, img): if not isinstance(img,", "import NormalizeImage from ppcls.data.preprocess.ops.operators import ToCHWImage from ppcls.data.preprocess.ops.operators import AugMix", "the License. # You may obtain a copy of the", "<filename>ppcls/data/preprocess/__init__.py # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "for the specific language governing permissions and # limitations under", "import Cutout from ppcls.data.preprocess.ops.hide_and_seek import HideAndSeek from ppcls.data.preprocess.ops.random_erasing import RandomErasing", "class TimmAutoAugment(RawTimmAutoAugment): \"\"\" TimmAutoAugment wrapper to auto fit different img", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "import GridMask from ppcls.data.preprocess.ops.operators import DecodeImage from ppcls.data.preprocess.ops.operators import ResizeImage", "to in writing, software # distributed under the License is", "from ppcls.data.preprocess.ops.grid import GridMask from ppcls.data.preprocess.ops.operators import DecodeImage from ppcls.data.preprocess.ops.operators", "return img class RandAugment(RawRandAugment): \"\"\" RandAugment wrapper to auto fit", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "# See the License for the specific language governing permissions", "img class TimmAutoAugment(RawTimmAutoAugment): \"\"\" TimmAutoAugment wrapper to auto fit different", "ppcls.data.preprocess.batch_ops.batch_operators import MixupOperator, CutmixOperator, OpSampler, FmixOperator import numpy as np", "from ppcls.data.preprocess.ops.operators import AugMix from ppcls.data.preprocess.batch_ops.batch_operators import MixupOperator, CutmixOperator, OpSampler,", "You may obtain a copy of the License at #", "import RawTimmAutoAugment from ppcls.data.preprocess.ops.cutout import Cutout from ppcls.data.preprocess.ops.hide_and_seek import HideAndSeek", "RandAugment(RawRandAugment): \"\"\" RandAugment wrapper to auto fit different img types", "= np.asarray(img) return img class RandAugment(RawRandAugment): \"\"\" RandAugment wrapper to", "may not use this file except in compliance with the", "or agreed to in writing, software # distributed under the", "different img types \"\"\" def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs)", "\"\"\" TimmAutoAugment wrapper to auto fit different img tyeps. \"\"\"", "NormalizeImage from ppcls.data.preprocess.ops.operators import ToCHWImage from ppcls.data.preprocess.ops.operators import AugMix from", "required by applicable law or agreed to in writing, software", "# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. #", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "with the License. # You may obtain a copy of", "ppcls.data.preprocess.ops.operators import AugMix from ppcls.data.preprocess.batch_ops.batch_operators import MixupOperator, CutmixOperator, OpSampler, FmixOperator", "this file except in compliance with the License. # You", "the Apache License, Version 2.0 (the \"License\"); # you may", "RawImageNetPolicy from ppcls.data.preprocess.ops.randaugment import RandAugment as RawRandAugment from ppcls.data.preprocess.ops.timm_autoaugment import", "from ppcls.data.preprocess.ops.operators import RandCropImage from ppcls.data.preprocess.ops.operators import RandFlipImage from ppcls.data.preprocess.ops.operators", "import Image def transform(data, ops=[]): \"\"\" transform \"\"\" for op", "ppcls.data.preprocess.ops.hide_and_seek import HideAndSeek from ppcls.data.preprocess.ops.random_erasing import RandomErasing from ppcls.data.preprocess.ops.grid import" ]
[ "y, v=0.0, l2=0.0, w1_range=(-2, 2), w2_range=(2, -2)): # create weight", "ax.set_zlabel('$Loss$', fontsize=20) settings = (x, y, v, l2, w1_range, w2_range)", "add saddle point ax.scatter(0, 0, label='Saddle point', c='red', marker='*') #", "(x, y, v, l2, w1_range, w2_range) return ax, settings #", "minima ax.scatter(manifold_y, manifold_x, s=0.1, c='cyan', label='Manifold of global minima') def", "will call the 'update' function for each frame anim =", "(man_ws_x*man_ws_y)**2 min_loss = np.min(loss) manifold_indices = loss < min_loss +", "ax = fig.gca(projection='3d') ax.set_zlim(0, 50) ax.plot([0], [0], 'ro', c='red', marker='*',", "a) * w1_a + a * w1_b ww2 = (1", "fig.add_subplot(2, 2, 4) ax1, settings = plot_mse_loss_surface_2d(ax1, 1, 1) ax2,", "w2_path.append(ww2) ax.plot(w1_path, w2_path, loss, **kwargs) def plot_interpolated_loss(x, y, w1_a, w2_a,", "plot_mse_loss_surface_2d(fig, ax, x, y, v=0.0, l2=0.0, w1_range=(-2, 2), w2_range=(2, -2)):", "n_w)) # initialize cost matrix # Fill the cost matrix", "shade=False, alpha=1) ax.set_xlabel('$w_1$', fontsize=20) ax.set_ylabel('$w_2$', fontsize=20) ax.set_zlabel('$Loss$', fontsize=20) settings =", "ww1 = (1 - a) * w1_a + a *", "(1 - a) * w2_a + a * w2_b w1_path.append(ww1)", "(w1_vals**2 + w2_vals**2) + 0.5 * v * (w1_vals*w2_vals)**2 ax.plot(w1_vals[:i],", "each combination of weights for i in range(n_w): for j", "cost_ws[skip]) cbar = fig.colorbar(im, ax=ax) # ax.set(aspect=1, title='Loss Surface') cbar.ax.set_ylabel('$Loss$',", "rstride=1, cstride=1, facecolors=fcolors, vmin=minn, vmax=maxx, shade=False, alpha=1) ax.set_xlabel('$w_1$', fontsize=20) ax.set_ylabel('$w_2$',", "settings n_w = 1000 man_w1 = np.linspace(w1_range[0], w1_range[1], num=n_w) man_w2", "- w1 * w2 * x)**2 + 0.5 * l2", "settings, weights, 'Gradient descent') animate_learning_dynamics(i, ax3, weights, 1) # animate_weight_norm(i,", "plot # fig = plt.figure(figsize=(8, 8)) # ax = fig.gca(projection='3d')", "alpha = np.arange(start, end, 0.001) w1_path = [] w2_path =", "man_ws_y * x)**2 + \\ 0.5 * l2 * (man_ws_x**2", "0.5 * l2 * (w1_vals**2 + w2_vals**2) + 0.5 *", "= plot_mse_loss_surface_3d(ax2, 1, 1, angle=60) plot_global_minimum_manifold_2d(ax1, settings) plot_global_minimum_manifold_3d(ax2, settings) def", "x)**2 + 0.5 * l2 * (ww1**2 + ww2**2) interpolated_loss.append(loss_val)", "2, 4) ax1, settings = plot_mse_loss_surface_2d(ax1, 1, 1) ax2, settings", "**kwargs) def plot_interpolated_loss(x, y, w1_a, w2_a, w1_b, w2_b, start=0, end=1,", "angle=45, manifold=False, **kwargs): if dim == '3d': ax, settings =", "y, _, _ = settings alpha = np.arange(start, end, 0.001)", "def plot_weight_norm(ax, weights, **kwargs): w1_vals = np.array(weights['w1']) w2_vals = np.array(weights['w2'])", "_ = settings w1_vals = np.array(weights['w1']) w2_vals = np.array(weights['w2']) loss", "**kwargs): w1_vals = weights['w1'] w2_vals = weights['w2'] ax.plot(w1_vals, w2_vals, **kwargs)", "= 100 w1 = np.linspace(w1_range[0], w1_range[1], num=n_w) # weight 1", "w2_a, w1_b, w2_b, start=0, end=1, **kwargs): alpha = np.arange(start, end,", "dimention - colormap # create colormap according to x-value (can", "**kwargs) plt.xlabel(r'$\\alpha$') plt.ylabel('Loss') def plot_learning_dynamics(ax, weights, **kwargs): epoch = np.arange(0,", "a in alpha: ww1 = (1 - a) * w1_a", "8)) #ax = fig.add_subplot(1,1,1, projection='3d') # fourth dimention - colormap", "plot_mse_loss_surface_3d(ax2, 1, 1, angle=60) plot_global_minimum_manifold_2d(ax1, settings) plot_global_minimum_manifold_3d(ax2, settings) def update(i):", "title='Loss Surface') cbar.ax.set_ylabel('$Loss$', fontsize=15) ax.set_xlabel('$w_1$', fontsize=15) ax.set_ylabel('$w_2$', fontsize=15) # ax.grid()", "**kwargs) plt.xlabel('Epoch') plt.ylabel('Loss') def plot_interpolated_trajectory_2d(ax, w1_a, w2_a, w1_b, w2_b, start=0,", "l2 * (man_ws_x**2 + man_ws_y**2) + 0.5 * v *", "ax.plot(w1_path, w2_path, loss, **kwargs) def plot_interpolated_loss(x, y, w1_a, w2_a, w1_b,", "1) # ax2 = fig.add_subplot(2, 2, 2, projection = '3d')", "cbar = fig.colorbar(im, ax=ax) # ax.set(aspect=1, title='Loss Surface') cbar.ax.set_ylabel('$Loss$', fontsize=15)", "= color_dimension.min(), color_dimension.max() norm = Normalize(minn, maxx) m = plt.cm.ScalarMappable(norm=norm,", "2), w2_range=(2, -2)): # create weight space n_w = 100", "(w1_vals**2 + w2_vals**2) + 0.5 * v * (w1_vals*w2_vals)**2 ax.plot(w1_vals,", "end=1, **kwargs): alpha = np.arange(start, end, 0.001) interpolated_loss = []", "np.arange(0, len(weights['w1'])) for w1, w2 in zip(weights['w1'], weights['w2']): loss_val =", "frames=100, interval=5, save_count=50) # HTML(anim.to_html5_video()) if save: anim.save(name + '.gif',", "= plot_mse_loss_surface_3d(x, y, angle=angle) if manifold: plot_global_minimum_manifold_3d(ax, settings) plot_optimiser_trajectory_3d(ax, settings,", "pos, np.nan) y = np.insert(manifold_y, pos, np.nan) # plot manifold", "minima') def plot_optimiser_trajectory_2d(ax, weights, **kwargs): w1_vals = weights['w1'] w2_vals =", "\\ 0.5 * l2 * (w1_vals**2 + w2_vals**2) + 0.5", "+ a * w1_b ww2 = (1 - a) *", "animate_learning_dynamics(i, ax3, weights, 1) # animate_weight_norm(i, ax4, scalarNet.history) # suncAnimation", "weights['w2']): scores.append(w1 * w2) ax.set_xlim((1, n_epoch)) ax.set_ylim((0, y)) ax.set_xlabel('Epoch', fontsize=15)", "label='Negative gradient') settings = (x, y, v, l2, w1_range, w2_range)", "ax.scatter(0, 0, label='Saddle point', c='red', marker='*') # ax.scatter(0,0, c='black', marker=r'$\\rightarrow$',", "def plot_mse_loss_surface_3d(ax, x, y, v=0.0, l2=0.0, w1_range=(-2, 2), w2_range=(2, -2),", "maxx) m = plt.cm.ScalarMappable(norm=norm, cmap='jet') m.set_array([]) fcolors = m.to_rgba(color_dimension) #", "descent') animate_optimiser_trajectory_3d( i, ax2, settings, weights, 'Gradient descent') animate_learning_dynamics(i, ax3,", "end=1, **kwargs): x, y, _, _ = settings alpha =", "= np.gradient(cost_ws) # plot vector space skip = (slice(None, None,", "m = plt.cm.ScalarMappable(norm=norm, cmap='jet') m.set_array([]) fcolors = m.to_rgba(color_dimension) # plot", "**kwargs) def animate_optimiser_trajectory_2d(i, ax, weights, **kwargs): w1_vals = weights['w1'] w2_vals", "y)) ax.set_xlabel('Epoch', fontsize=15) ax.set_ylabel('$w_2 \\cdot w_1$', fontsize=15) ax.plot(epoch[:i], scores[:i], **kwargs)", "= weights['w1'] w2_vals = weights['w2'] ax.plot(w1_vals[:i], w2_vals[:i], **kwargs) return ax", "# two-dimesional version def plot_mse_loss_surface_2d(fig, ax, x, y, v=0.0, l2=0.0,", "fig, ax = plt.subplots(figsize=(8, 8)) #ax.contour(ws_x, ws_y, cost_ws, 200) im", "+ w2_vals**2) + 0.5 * v * (w1_vals*w2_vals)**2 ax.plot(w1_vals, w2_vals,", "= 0.5 * y *(1 - man_ws_x * man_ws_y *", "FuncAnimation(fig, update, frames=100, interval=5, save_count=50) # HTML(anim.to_html5_video()) if save: anim.save(name", "* v * (man_ws_x * man_ws_y)**2 min_loss = np.min(loss) manifold_indices", "+ 0.5 * v * (man_ws_x*man_ws_y)**2 min_loss = np.min(loss) manifold_indices", "animate_optimiser_trajectory_2d(i, ax, weights, **kwargs): w1_vals = weights['w1'] w2_vals = weights['w2']", "angle=angle) if manifold: plot_global_minimum_manifold_3d(ax, settings) plot_optimiser_trajectory_3d(ax, settings, weights, **kwargs) else:", "w2_vals[:i], loss[:i], **kwargs) return ax def plot_optimiser_loss(x, y, v, l2,", "return ax def animate_learning(weights, save=False, name='anim'): gs = gridspec.GridSpec(2, 4)", "= plot_mse_loss_surface_2d(ax1, 1, 1) ax2, settings = plot_mse_loss_surface_3d(ax2, 1, 1,", "v * (man_ws_x * man_ws_y)**2 min_loss = np.min(loss) manifold_indices =", "extent=[ws_x.min(), ws_x.max( ), ws_y.min(), ws_y.max()], cmap=cm.coolwarm) ax.quiver(ws_x[skip], ws_y[skip], -dx[skip], dy[skip],", "plot_global_minimum_manifold_2d(ax, settings): # retieve cached settings x, y, v, l2,", "angle=60) plot_global_minimum_manifold_2d(ax1, settings) plot_global_minimum_manifold_3d(ax2, settings) def update(i): animate_optimiser_trajectory_2d( i, ax1,", "FuncAnimation # two-dimesional version def plot_mse_loss_surface_2d(fig, ax, x, y, v=0.0,", "None, 5)) # fig, ax = plt.subplots(figsize=(8, 8)) #ax.contour(ws_x, ws_y,", "* v * (ws_x[i, j]*ws_y[i, j])**2 X = ws_x Y", "* (y_true - y_pred)**2 + \\ 0.5 * l2 *", "# plot manifold of global minima #ax.scatter(manifold_y, manifold_x, 0, s=0.5,", "* (ws_x[i, j]**2 + ws_y[i, j]**2) + 0.5 * v", "c='red', marker='*', label='Saddle point') ax.plot_surface(X, Y, Z, rstride=1, cstride=1, facecolors=fcolors,", "n_w = 1000 man_w1 = np.linspace(w1_range[0], w1_range[1], num=n_w) man_w2 =", "plot_optimiser_trajectory_2d(ax, weights, **kwargs) def plot_weight_norm(ax, weights, **kwargs): w1_vals = np.array(weights['w1'])", "interpolated_loss = [] for a in alpha: ww1 = (1", "for w1, w2 in zip(weights['w1'], weights['w2']): scores.append(w1 * w2) ax.set_xlim((1,", "fontsize=15) ax.plot(epoch[:i], scores[:i], **kwargs) return ax def animate_learning(weights, save=False, name='anim'):", "weights, **kwargs): w1_vals = np.array(weights['w1']) w2_vals = np.array(weights['w2']) epochs =", "y, weights, dim='2d', angle=45, manifold=False, **kwargs): if dim == '3d':", "ww1 * ww2 * x)**2 + 0.5 * l2 *", "epoch = np.arange(0, len(weights['w1'])) for w1, w2 in zip(weights['w1'], weights['w2']):", "ax.plot([0], [0], 'ro', c='red', marker='*', label='Saddle point') ax.plot_surface(X, Y, Z,", "fontsize=12) ax.set_ylabel('Weight norm', fontsize=12) ax.plot(epochs, norms, linewidth=2.0, **kwargs) def animate_optimiser_trajectory_2d(i,", "= fig.gca(projection='3d') ax.set_zlim(0, 50) ax.plot([0], [0], 'ro', c='red', marker='*', label='Saddle", "l2 * (ww1**2 + ww2**2) loss.append(loss_val) w1_path.append(ww1) w2_path.append(ww2) ax.plot(w1_path, w2_path,", "ww2**2) loss.append(loss_val) w1_path.append(ww1) w2_path.append(ww2) ax.plot(w1_path, w2_path, loss, **kwargs) def plot_interpolated_loss(x,", "* (1 - w1 * w2 * x)**2 + 0.5", "w2 in zip(weights['w1'], weights['w2']): scores.append(w1 * w2) ax.plot(epoch, scores, **kwargs)", "= len(weights['w1']) epoch = np.arange(1, n_epoch) scores = [] for", "from matplotlib.animation import FuncAnimation # two-dimesional version def plot_mse_loss_surface_2d(fig, ax,", "'update' function for each frame anim = FuncAnimation(fig, update, frames=100,", "loss.append(loss_val) w1_path.append(ww1) w2_path.append(ww2) ax.plot(w1_path, w2_path, loss, **kwargs) def plot_interpolated_loss(x, y,", "np.where(np.abs(np.diff(manifold_y)) >= 0.1)[0]+1 x = np.insert(manifold_x, pos, np.nan) y =", "# create colormap according to x-value (can use any 50x50", "x)**2 + \\ 0.5 * l2 * (w1_vals**2 + w2_vals**2)", "np.insert(manifold_x, pos, np.nan) y = np.insert(manifold_y, pos, np.nan) # plot", "else: ax, settings = plot_mse_loss_surface_2d(x, y) if manifold: plot_global_minimum_manifold_2d(ax, settings)", "weights, **kwargs): w1_vals = weights['w1'] w2_vals = weights['w2'] ax.plot(w1_vals[:i], w2_vals[:i],", "projection = '3d') # ax3 = fig.add_subplot(2, 2, 3) #", "y, v, l2, w1_range, w2_range = settings n_w = 1000", "0, s=0.5, c='cyan', # label='Manifold of global minima') ax.plot(y, x,", "< min_loss + 1e-5 manifold_x = man_ws_x[manifold_indices] manifold_y = man_ws_y[manifold_indices]", "* (ws_x[i, j]*ws_y[i, j])**2 # compute gradients dy, dx =", "np.linspace(w1_range[0], w1_range[1], num=n_w) # weight 1 w2 = np.linspace(w2_range[0], w2_range[1],", "matrix for each combination of weights for i in range(n_w):", "plt.xlabel('Epoch') plt.ylabel('Loss') def plot_interpolated_trajectory_2d(ax, w1_a, w2_a, w1_b, w2_b, start=0, end=1,", "w2_vals, loss, **kwargs) def plot_optimiser_trajectory(x, y, weights, dim='2d', angle=45, manifold=False,", "**kwargs) return ax def plot_optimiser_loss(x, y, v, l2, weights, **kwargs):", "cost_ws, 200) im = ax.imshow(cost_ws, extent=[ws_x.min(), ws_x.max( ), ws_y.min(), ws_y.max()],", "def plot_interpolated_loss(x, y, w1_a, w2_a, w1_b, w2_b, start=0, end=1, **kwargs):", "w2_range[1], num=n_w) # weight 2 ws_x, ws_y = np.meshgrid(w1, w2)", "epoch = np.arange(0, len(weights['w1'])) scores = [] for w1, w2", "matplotlib import cm from matplotlib.colors import Normalize from mpl_toolkits.mplot3d import", "plot_global_minimum_manifold_3d(ax, settings): # retieve cached settings x, y, v, l2,", "(ws_x[i, j]*ws_y[i, j])**2 # compute gradients dy, dx = np.gradient(cost_ws)", "* w1_b ww2 = (1 - a) * w2_a +", "ax2 = fig.add_subplot(2, 2, 2, projection = '3d') # ax3", "* (1 - w1_vals * w2_vals * x)**2 + \\", "= [] w2_path = [] loss = [] for a", "in range(n_w): y_pred = ws_x[i, j] * ws_y[i, j] *", "def plot_interpolated_trajectory_2d(ax, w1_a, w2_a, w1_b, w2_b, start=0, end=1, **kwargs): alpha", "50x50 array) color_dimension = Z # change to desired fourth", "loss < min_loss + 1e-5 manifold_x = man_ws_x[manifold_indices] manifold_y =", "= man_ws_y[manifold_indices] # plot manifold of global minima ax.scatter(manifold_y, manifold_x,", "loss, **kwargs) def plot_optimiser_trajectory(x, y, weights, dim='2d', angle=45, manifold=False, **kwargs):", "w2_path = [] loss = [] for a in alpha:", "ax.set_xlim((1, n_epoch)) ax.set_ylim((0, y)) ax.set_xlabel('Epoch', fontsize=15) ax.set_ylabel('$w_2 \\cdot w_1$', fontsize=15)", "4) gs.update(wspace=0.5) fig = plt.figure(figsize=(12, 8)) ax1 = fig.add_subplot(gs[0, :2],", "label='Manifold of global minima') def plot_optimiser_trajectory_2d(ax, weights, **kwargs): w1_vals =", "= np.meshgrid(man_w1, man_w2) loss = 0.5 * y *(1 -", "j])**2 # compute gradients dy, dx = np.gradient(cost_ws) # plot", "of global minima') ax.plot(y, x, c='cyan', label='Manifold of global minima')", "weights, **kwargs): w1_vals = weights['w1'] w2_vals = weights['w2'] ax.plot(w1_vals, w2_vals,", "w2_vals**2) + 0.5 * v * (w1_vals*w2_vals)**2 ax.plot(w1_vals, w2_vals, loss,", "weights, **kwargs) def plot_weight_norm(ax, weights, **kwargs): w1_vals = np.array(weights['w1']) w2_vals", "w2_vals = weights['w2'] ax.plot(w1_vals[:i], w2_vals[:i], **kwargs) return ax def animate_optimiser_trajectory_3d(i,", "- a) * w2_a + a * w2_b loss_val =", "function for each frame anim = FuncAnimation(fig, update, frames=100, interval=5,", "in zip(weights['w1'], weights['w2']): scores.append(w1 * w2) ax.plot(epoch, scores, **kwargs) def", "# required modules import numpy as np import matplotlib.pyplot as", "np.linspace(w2_range[0], w2_range[1], num=n_w) man_ws_x, man_ws_y = np.meshgrid(man_w1, man_w2) loss =", "global minima') ax.plot(y, x, c='cyan', label='Manifold of global minima') def", "100 w1 = np.linspace(w1_range[0], w1_range[1], num=n_w) # weight 1 w2", "= np.linspace(w2_range[0], w2_range[1], num=n_w) # weight 2 ws_x, ws_y =", "fig.add_subplot(2, 2, 1) # ax2 = fig.add_subplot(2, 2, 2, projection", "y, v=0.0, l2=0.0, w1_range=(-2, 2), w2_range=(2, -2), angle=30): # create", "y, angle=angle) if manifold: plot_global_minimum_manifold_3d(ax, settings) plot_optimiser_trajectory_3d(ax, settings, weights, **kwargs)", "w1_vals = weights['w1'] w2_vals = weights['w2'] ax.plot(w1_vals, w2_vals, **kwargs) def", "plot_global_minimum_manifold_3d(ax2, settings) def update(i): animate_optimiser_trajectory_2d( i, ax1, settings, weights, 'Gradient", "**kwargs): w1_vals = weights['w1'] w2_vals = weights['w2'] ax.plot(w1_vals[:i], w2_vals[:i], **kwargs)", "- colormap # create colormap according to x-value (can use", "alpha=1) ax.set_xlabel('$w_1$', fontsize=20) ax.set_ylabel('$w_2$', fontsize=20) ax.set_zlabel('$Loss$', fontsize=20) settings = (x,", "ws_x Y = ws_y Z = cost_ws #fig, ax =", "'3d') # ax3 = fig.add_subplot(2, 2, 3) # ax4 =", "[] epoch = np.arange(0, len(weights['w1'])) for w1, w2 in zip(weights['w1'],", "as gridspec from matplotlib import cm from matplotlib.colors import Normalize", "# add saddle point ax.scatter(0, 0, label='Saddle point', c='red', marker='*')", "c='cyan', label='Manifold of global minima') def plot_optimiser_trajectory_2d(ax, weights, **kwargs): w1_vals", "ax.quiver(ws_x[skip], ws_y[skip], -dx[skip], dy[skip], cost_ws[skip]) cbar = fig.colorbar(im, ax=ax) #", "man_ws_y**2) + 0.5 * v * (man_ws_x*man_ws_y)**2 min_loss = np.min(loss)", "w_1$', fontsize=15) ax.plot(epoch[:i], scores[:i], **kwargs) return ax def animate_learning(weights, save=False,", "Y = ws_y Z = cost_ws #fig, ax = plt.subplots(figsize=(8,", "= np.arange(0, len(weights['w1'])) for w1, w2 in zip(weights['w1'], weights['w2']): loss_val", "fig.add_subplot(gs[1, 1:3]) # ax1 = fig.add_subplot(2, 2, 1) # ax2", "color_dimension.min(), color_dimension.max() norm = Normalize(minn, maxx) m = plt.cm.ScalarMappable(norm=norm, cmap='jet')", "label='Saddle point') ax.plot_surface(X, Y, Z, rstride=1, cstride=1, facecolors=fcolors, vmin=minn, vmax=maxx,", "def plot_mse_loss_surface_2d(fig, ax, x, y, v=0.0, l2=0.0, w1_range=(-2, 2), w2_range=(2,", "(1 - w1_vals * w2_vals * x)**2 + \\ 0.5", "= plt.cm.ScalarMappable(norm=norm, cmap='jet') m.set_array([]) fcolors = m.to_rgba(color_dimension) # plot #", "matrix # Fill the cost matrix for each combination of", "[] for w1, w2 in zip(weights['w1'], weights['w2']): scores.append(w1 * w2)", "0.5 * (y - ww1 * ww2 * x)**2 +", "ax1, settings = plot_mse_loss_surface_2d(ax1, 1, 1) ax2, settings = plot_mse_loss_surface_3d(ax2,", "ax def plot_optimiser_loss(x, y, v, l2, weights, **kwargs): loss =", "= man_ws_y[manifold_indices] pos = np.where(np.abs(np.diff(manifold_y)) >= 0.1)[0]+1 x = np.insert(manifold_x,", "s=0.1, c='cyan', label='Manifold of global minima') def plot_global_minimum_manifold_3d(ax, settings): #", "ax, weights, **kwargs): w1_vals = weights['w1'] w2_vals = weights['w2'] ax.plot(w1_vals[:i],", "j])**2 X = ws_x Y = ws_y Z = cost_ws", "#ax = fig.add_subplot(1,1,1, projection='3d') # fourth dimention - colormap #", "= 0.5 * y * (1 - w1_vals * w2_vals", "\\ 0.5 * l2 * (ws_x[i, j]**2 + ws_y[i, j]**2)", "w2 in zip(weights['w1'], weights['w2']): scores.append(w1 * w2) ax.set_xlim((1, n_epoch)) ax.set_ylim((0,", "y *(1 - man_ws_x * man_ws_y * x)**2 + \\", "= fig.add_subplot(2, 2, 1) # ax2 = fig.add_subplot(2, 2, 2,", "animate_learning(weights, save=False, name='anim'): gs = gridspec.GridSpec(2, 4) gs.update(wspace=0.5) fig =", "j] = 0.5 * (y_true - y_pred)**2 + \\ 0.5", "of global minima') def plot_optimiser_trajectory_2d(ax, weights, **kwargs): w1_vals = weights['w1']", "import matplotlib.gridspec as gridspec from matplotlib import cm from matplotlib.colors", "= settings w1_vals = np.array(weights['w1']) w2_vals = np.array(weights['w2']) loss =", "* w2)**2 loss.append(loss_val) plt.plot(epoch, loss, **kwargs) plt.xlabel('Epoch') plt.ylabel('Loss') def plot_interpolated_trajectory_2d(ax,", "w2_path, loss, **kwargs) def plot_interpolated_loss(x, y, w1_a, w2_a, w1_b, w2_b,", "**kwargs) def plot_interpolated_trajectory_3d(ax, settings, w1_a, w2_a, w1_b, w2_b, start=0, end=1,", "scores[:i], **kwargs) return ax def animate_learning(weights, save=False, name='anim'): gs =", "* l2 * (ws_x[i, j]**2 + ws_y[i, j]**2) + 0.5", "change to desired fourth dimension minn, maxx = color_dimension.min(), color_dimension.max()", "np.array(weights['w2']) loss = 0.5 * y * (1 - w1_vals", "global minima') def plot_global_minimum_manifold_3d(ax, settings): # retieve cached settings x,", "* x)**2 + 0.5 * l2 * (w1**2 + w2**2)", "w2_vals * x)**2 + \\ 0.5 * l2 * (w1_vals**2", "minn, maxx = color_dimension.min(), color_dimension.max() norm = Normalize(minn, maxx) m", "# ax2 = fig.add_subplot(2, 2, 2, projection = '3d') #", "ax, settings = plot_mse_loss_surface_2d(x, y) if manifold: plot_global_minimum_manifold_2d(ax, settings) plot_optimiser_trajectory_2d(ax,", "in range(n_w): for j in range(n_w): y_pred = ws_x[i, j]", "create weight space n_w = 100 w1 = np.linspace(w1_range[0], w1_range[1],", "_, _ = settings w1_vals = np.array(weights['w1']) w2_vals = np.array(weights['w2'])", "man_ws_y[manifold_indices] # plot manifold of global minima ax.scatter(manifold_y, manifold_x, s=0.1,", "w2_a + a * w2_b w1_path.append(ww1) w2_path.append(ww2) ax.plot(w1_path, w2_path, **kwargs)", "plt.xlabel(r'$\\alpha$') plt.ylabel('Loss') def plot_learning_dynamics(ax, weights, **kwargs): epoch = np.arange(0, len(weights['w1']))", "+ 0.5 * v * (w1_vals*w2_vals)**2 ax.plot(w1_vals[:i], w2_vals[:i], loss[:i], **kwargs)", "4) ax1, settings = plot_mse_loss_surface_2d(ax1, 1, 1) ax2, settings =", "== '3d': ax, settings = plot_mse_loss_surface_3d(x, y, angle=angle) if manifold:", "w1_vals = np.array(weights['w1']) w2_vals = np.array(weights['w2']) loss = 0.5 *", "w2_range) ax.view_init(angle, 10) return ax, settings def plot_global_minimum_manifold_2d(ax, settings): #", "settings, w1_a, w2_a, w1_b, w2_b, start=0, end=1, **kwargs): x, y,", "ws_y[skip], -dx[skip], dy[skip], cost_ws[skip]) cbar = fig.colorbar(im, ax=ax) # ax.set(aspect=1,", "x, y, v, l2, _, _ = settings w1_vals =", "+ man_ws_y**2) + 0.5 * v * (man_ws_x * man_ws_y)**2", "fontsize=15) # ax.grid() # add saddle point ax.scatter(0, 0, label='Saddle", "ax, weights, y, **kwargs): n_epoch = len(weights['w1']) epoch = np.arange(1,", "(w1**2 + w2**2) + 0.5 * v * (w1 *", "man_ws_x[manifold_indices] manifold_y = man_ws_y[manifold_indices] pos = np.where(np.abs(np.diff(manifold_y)) >= 0.1)[0]+1 x", "import cm from matplotlib.colors import Normalize from mpl_toolkits.mplot3d import Axes3D", "def plot_optimiser_loss(x, y, v, l2, weights, **kwargs): loss = []", "y) if manifold: plot_global_minimum_manifold_2d(ax, settings) plot_optimiser_trajectory_2d(ax, weights, **kwargs) def plot_weight_norm(ax,", "# HTML(anim.to_html5_video()) if save: anim.save(name + '.gif', dpi=80, writer='imagemagick') plt.show()", "m.to_rgba(color_dimension) # plot # fig = plt.figure(figsize=(8, 8)) # ax", "man_w2) loss = 0.5 * y * (1 - man_ws_x", "* x)**2 + 0.5 * l2 * (ww1**2 + ww2**2)", "'Gradient descent') animate_optimiser_trajectory_3d( i, ax2, settings, weights, 'Gradient descent') animate_learning_dynamics(i,", "ax1 = fig.add_subplot(gs[0, :2], ) ax2 = fig.add_subplot(gs[0, 2:], projection='3d')", "ax.plot(w1_vals, w2_vals, loss, **kwargs) def plot_optimiser_trajectory(x, y, weights, dim='2d', angle=45,", "= loss < min_loss + 1e-5 manifold_x = man_ws_x[manifold_indices] manifold_y", "mpl_toolkits.mplot3d import Axes3D from matplotlib.animation import FuncAnimation # two-dimesional version", "for w1, w2 in zip(weights['w1'], weights['w2']): loss_val = 0.5 *", "1) # animate_weight_norm(i, ax4, scalarNet.history) # suncAnimation will call the", "ws_y Z = cost_ws #fig, ax = plt.subplots(figsize=(8, 8)) #ax", "label='Saddle point', c='red', marker='*') # ax.scatter(0,0, c='black', marker=r'$\\rightarrow$', label='Negative gradient')", "= settings n_w = 1000 man_w1 = np.linspace(w1_range[0], w1_range[1], num=n_w)", "l2 * (ww1**2 + ww2**2) interpolated_loss.append(loss_val) plt.plot(alpha, interpolated_loss, **kwargs) plt.xlabel(r'$\\alpha$')", "according to x-value (can use any 50x50 array) color_dimension =", "\\ 0.5 * l2 * (man_ws_x**2 + man_ws_y**2) + 0.5", "np.arange(start, end, 0.001) w1_path = [] w2_path = [] for", "global minima') def plot_optimiser_trajectory_2d(ax, weights, **kwargs): w1_vals = weights['w1'] w2_vals", "ws_y[i, j]**2) + 0.5 * v * (ws_x[i, j]*ws_y[i, j])**2", "j]*ws_y[i, j])**2 X = ws_x Y = ws_y Z =", "cost matrix # Fill the cost matrix for each combination", "alpha: ww1 = (1 - a) * w1_a + a", "x)**2 + 0.5 * l2 * (ww1**2 + ww2**2) loss.append(loss_val)", "ws_y[i, j] * x y_true = y cost_ws[i, j] =", "loss = [] for a in alpha: ww1 = (1", "ax.imshow(cost_ws, extent=[ws_x.min(), ws_x.max( ), ws_y.min(), ws_y.max()], cmap=cm.coolwarm) ax.quiver(ws_x[skip], ws_y[skip], -dx[skip],", "plot_weight_norm(ax, weights, **kwargs): w1_vals = np.array(weights['w1']) w2_vals = np.array(weights['w2']) epochs", "* ww2 * x)**2 + 0.5 * l2 * (ww1**2", "settings = plot_mse_loss_surface_2d(ax1, 1, 1) ax2, settings = plot_mse_loss_surface_3d(ax2, 1,", "cost_ws = np.zeros((n_w, n_w)) # initialize cost matrix # Fill", "def plot_optimiser_trajectory_3d(ax, settings, weights, **kwargs): x, y, v, l2, _,", "cm from matplotlib.colors import Normalize from mpl_toolkits.mplot3d import Axes3D from", "matplotlib.pyplot as plt import matplotlib.gridspec as gridspec from matplotlib import", "create colormap according to x-value (can use any 50x50 array)", "+ \\ 0.5 * l2 * (ws_x[i, j]**2 + ws_y[i,", "2, 3) # ax4 = fig.add_subplot(2, 2, 4) ax1, settings", "end, 0.001) interpolated_loss = [] for a in alpha: ww1", "* ws_y[i, j] * x y_true = y cost_ws[i, j]", "fig.add_subplot(gs[0, 2:], projection='3d') ax3 = fig.add_subplot(gs[1, 1:3]) # ax1 =", "ax, x, y, v=0.0, l2=0.0, w1_range=(-2, 2), w2_range=(2, -2)): #", "* (w1 * w2)**2 loss.append(loss_val) plt.plot(epoch, loss, **kwargs) plt.xlabel('Epoch') plt.ylabel('Loss')", "desired fourth dimension minn, maxx = color_dimension.min(), color_dimension.max() norm =", "(y - ww1 * ww2 * x)**2 + 0.5 *", "+ a * w2_b loss_val = 0.5 * (y -", "loss = [] epoch = np.arange(0, len(weights['w1'])) for w1, w2", "w1_path = [] w2_path = [] for a in alpha:", "gradient') settings = (x, y, v, l2, w1_range, w2_range) return", "matplotlib.animation import FuncAnimation # two-dimesional version def plot_mse_loss_surface_2d(fig, ax, x,", "ax.set_ylabel('$w_2 \\cdot w_1$', fontsize=15) ax.plot(epoch[:i], scores[:i], **kwargs) return ax def", "= weights['w2'] ax.plot(w1_vals, w2_vals, **kwargs) def plot_optimiser_trajectory_3d(ax, settings, weights, **kwargs):", "loss = 0.5 * y *(1 - man_ws_x * man_ws_y", "in zip(weights['w1'], weights['w2']): scores.append(w1 * w2) ax.set_xlim((1, n_epoch)) ax.set_ylim((0, y))", "loss[:i], **kwargs) return ax def plot_optimiser_loss(x, y, v, l2, weights,", "plt import matplotlib.gridspec as gridspec from matplotlib import cm from", "# ax = fig.gca(projection='3d') ax.set_zlim(0, 50) ax.plot([0], [0], 'ro', c='red',", "plt.ylabel('Loss') def plot_learning_dynamics(ax, weights, **kwargs): epoch = np.arange(0, len(weights['w1'])) scores", "= (x, y, v, l2, w1_range, w2_range) return ax, settings", "return ax def plot_optimiser_loss(x, y, v, l2, weights, **kwargs): loss", "= fig.add_subplot(2, 2, 3) # ax4 = fig.add_subplot(2, 2, 4)", "-2), angle=30): # create weight space n_w = 100 w1", "ax.plot(w1_vals, w2_vals, **kwargs) def plot_optimiser_trajectory_3d(ax, settings, weights, **kwargs): x, y,", "- a) * w1_a + a * w1_b ww2 =", "# create weight space n_w = 100 w1 = np.linspace(w1_range[0],", "= fig.add_subplot(gs[0, :2], ) ax2 = fig.add_subplot(gs[0, 2:], projection='3d') ax3", "minima') ax.plot(y, x, c='cyan', label='Manifold of global minima') def plot_optimiser_trajectory_2d(ax,", "fig.add_subplot(2, 2, 3) # ax4 = fig.add_subplot(2, 2, 4) ax1,", "weight 2 ws_x, ws_y = np.meshgrid(w1, w2) cost_ws = np.zeros((n_w,", "man_ws_y**2) + 0.5 * v * (man_ws_x * man_ws_y)**2 min_loss", "plt.plot(alpha, interpolated_loss, **kwargs) plt.xlabel(r'$\\alpha$') plt.ylabel('Loss') def plot_learning_dynamics(ax, weights, **kwargs): epoch", "marker='*') # ax.scatter(0,0, c='black', marker=r'$\\rightarrow$', label='Negative gradient') settings = (x,", "* (man_ws_x * man_ws_y)**2 min_loss = np.min(loss) manifold_indices = loss", "weights['w2']): scores.append(w1 * w2) ax.plot(epoch, scores, **kwargs) def animate_learning_dynamics(i, ax,", "ax, settings = plot_mse_loss_surface_3d(x, y, angle=angle) if manifold: plot_global_minimum_manifold_3d(ax, settings)", "plot_optimiser_trajectory_3d(ax, settings, weights, **kwargs): x, y, v, l2, _, _", "gs.update(wspace=0.5) fig = plt.figure(figsize=(12, 8)) ax1 = fig.add_subplot(gs[0, :2], )", "np.arange(0, len(weights['w1'])) scores = [] for w1, w2 in zip(weights['w1'],", "cost matrix for each combination of weights for i in", "a * w2_b w1_path.append(ww1) w2_path.append(ww2) ax.plot(w1_path, w2_path, **kwargs) def plot_interpolated_trajectory_3d(ax,", "* (ww1**2 + ww2**2) interpolated_loss.append(loss_val) plt.plot(alpha, interpolated_loss, **kwargs) plt.xlabel(r'$\\alpha$') plt.ylabel('Loss')", "weights, **kwargs): epoch = np.arange(0, len(weights['w1'])) scores = [] for", "point', c='red', marker='*') # ax.scatter(0,0, c='black', marker=r'$\\rightarrow$', label='Negative gradient') settings", "frame anim = FuncAnimation(fig, update, frames=100, interval=5, save_count=50) # HTML(anim.to_html5_video())", "= (1 - a) * w1_a + a * w1_b", "w2_b, start=0, end=1, **kwargs): alpha = np.arange(start, end, 0.001) w1_path", "w2_a + a * w2_b loss_val = 0.5 * (y", "zip(weights['w1'], weights['w2']): scores.append(w1 * w2) ax.set_xlim((1, n_epoch)) ax.set_ylim((0, y)) ax.set_xlabel('Epoch',", "ax.grid() # add saddle point ax.scatter(0, 0, label='Saddle point', c='red',", "ax = plt.subplots(figsize=(8, 8)) #ax = fig.add_subplot(1,1,1, projection='3d') # fourth", "0.5 * v * (w1_vals*w2_vals)**2 ax.plot(w1_vals, w2_vals, loss, **kwargs) def", "loss_val = 0.5 * (y - ww1 * ww2 *", "w1_b ww2 = (1 - a) * w2_a + a", "range(n_w): y_pred = ws_x[i, j] * ws_y[i, j] * x", "* w2_a + a * w2_b loss_val = 0.5 *", "ws_x, ws_y = np.meshgrid(w1, w2) cost_ws = np.zeros((n_w, n_w)) #", "* l2 * (w1_vals**2 + w2_vals**2) + 0.5 * v", "-2)): # create weight space n_w = 100 w1 =", "= [] w2_path = [] for a in alpha: ww1", "as np import matplotlib.pyplot as plt import matplotlib.gridspec as gridspec", "= 0.5 * (y - ww1 * ww2 * x)**2", "ws_x[i, j] * ws_y[i, j] * x y_true = y", "manifold of global minima #ax.scatter(manifold_y, manifold_x, 0, s=0.5, c='cyan', #", "'Gradient descent') animate_learning_dynamics(i, ax3, weights, 1) # animate_weight_norm(i, ax4, scalarNet.history)", "(slice(None, None, 5), slice(None, None, 5)) # fig, ax =", "200) im = ax.imshow(cost_ws, extent=[ws_x.min(), ws_x.max( ), ws_y.min(), ws_y.max()], cmap=cm.coolwarm)", "= plt.subplots(figsize=(8, 8)) #ax = fig.add_subplot(1,1,1, projection='3d') # fourth dimention", "**kwargs): w1_vals = np.array(weights['w1']) w2_vals = np.array(weights['w2']) epochs = np.arange(0,", "w1_vals = np.array(weights['w1']) w2_vals = np.array(weights['w2']) epochs = np.arange(0, len(w1_vals),", "plot_optimiser_trajectory(x, y, weights, dim='2d', angle=45, manifold=False, **kwargs): if dim ==", "+ \\ 0.5 * l2 * (man_ws_x**2 + man_ws_y**2) +", "weights['w1'] w2_vals = weights['w2'] ax.plot(w1_vals, w2_vals, **kwargs) def plot_optimiser_trajectory_3d(ax, settings,", "settings alpha = np.arange(start, end, 0.001) w1_path = [] w2_path", "initialize cost matrix # Fill the cost matrix for each", "0.5 * l2 * (w1**2 + w2**2) + 0.5 *", "label='Manifold of global minima') def plot_global_minimum_manifold_3d(ax, settings): # retieve cached", "w2_a, w1_b, w2_b, start=0, end=1, **kwargs): x, y, _, _", "_, _ = settings alpha = np.arange(start, end, 0.001) w1_path", "2, 1) # ax2 = fig.add_subplot(2, 2, 2, projection =", "len(w1_vals), 1) norms = np.sqrt(w1_vals**2 + w2_vals**2) ax.set_xlabel('Epoch', fontsize=12) ax.set_ylabel('Weight", "# weight 1 w2 = np.linspace(w2_range[0], w2_range[1], num=n_w) # weight", "def plot_interpolated_trajectory_3d(ax, settings, w1_a, w2_a, w1_b, w2_b, start=0, end=1, **kwargs):", "fontsize=20) ax.set_zlabel('$Loss$', fontsize=20) settings = (x, y, v, l2, w1_range,", "v * (man_ws_x*man_ws_y)**2 min_loss = np.min(loss) manifold_indices = loss <", "+ 0.5 * l2 * (w1**2 + w2**2) + 0.5", "y_pred)**2 + \\ 0.5 * l2 * (ws_x[i, j]**2 +", "= 0.5 * y * (1 - man_ws_x * man_ws_y", "[] w2_path = [] for a in alpha: ww1 =", "= np.arange(start, end, 0.001) interpolated_loss = [] for a in", "space n_w = 100 w1 = np.linspace(w1_range[0], w1_range[1], num=n_w) #", "= np.linspace(w2_range[0], w2_range[1], num=n_w) man_ws_x, man_ws_y = np.meshgrid(man_w1, man_w2) loss", "global minima #ax.scatter(manifold_y, manifold_x, 0, s=0.5, c='cyan', # label='Manifold of", "l2 * (w1**2 + w2**2) + 0.5 * v *", "= (slice(None, None, 5), slice(None, None, 5)) # fig, ax", "dimension minn, maxx = color_dimension.min(), color_dimension.max() norm = Normalize(minn, maxx)", "5), slice(None, None, 5)) # fig, ax = plt.subplots(figsize=(8, 8))", "point') ax.plot_surface(X, Y, Z, rstride=1, cstride=1, facecolors=fcolors, vmin=minn, vmax=maxx, shade=False,", "plot_optimiser_trajectory_3d(ax, settings, weights, **kwargs) else: ax, settings = plot_mse_loss_surface_2d(x, y)", "= weights['w2'] ax.plot(w1_vals[:i], w2_vals[:i], **kwargs) return ax def animate_optimiser_trajectory_3d(i, ax,", "**kwargs) def plot_weight_norm(ax, weights, **kwargs): w1_vals = np.array(weights['w1']) w2_vals =", "+ 1e-5 manifold_x = man_ws_x[manifold_indices] manifold_y = man_ws_y[manifold_indices] # plot", "[] w2_path = [] loss = [] for a in", "fig.gca(projection='3d') ax.set_zlim(0, 50) ax.plot([0], [0], 'ro', c='red', marker='*', label='Saddle point')", "modules import numpy as np import matplotlib.pyplot as plt import", "man_ws_x * man_ws_y * x)**2 + \\ 0.5 * l2", "i in range(n_w): for j in range(n_w): y_pred = ws_x[i,", "* (y - ww1 * ww2 * x)**2 + 0.5", "colormap # create colormap according to x-value (can use any", "animate_learning_dynamics(i, ax, weights, y, **kwargs): n_epoch = len(weights['w1']) epoch =", "plt.ylabel('Loss') def plot_interpolated_trajectory_2d(ax, w1_a, w2_a, w1_b, w2_b, start=0, end=1, **kwargs):", "fontsize=15) ax.set_ylabel('$w_2 \\cdot w_1$', fontsize=15) ax.plot(epoch[:i], scores[:i], **kwargs) return ax", "- y_pred)**2 + \\ 0.5 * l2 * (ws_x[i, j]**2", "np.array(weights['w1']) w2_vals = np.array(weights['w2']) epochs = np.arange(0, len(w1_vals), 1) norms", "norm', fontsize=12) ax.plot(epochs, norms, linewidth=2.0, **kwargs) def animate_optimiser_trajectory_2d(i, ax, weights,", "cached settings x, y, v, l2, w1_range, w2_range = settings", "i, ax2, settings, weights, 'Gradient descent') animate_learning_dynamics(i, ax3, weights, 1)", "**kwargs) return ax def animate_optimiser_trajectory_3d(i, ax, settings, weights, **kwargs): x,", "= np.zeros((n_w, n_w)) # initialize cost matrix # Fill the", "+ 0.5 * v * (ws_x[i, j]*ws_y[i, j])**2 # compute", "w2_path.append(ww2) ax.plot(w1_path, w2_path, **kwargs) def plot_interpolated_trajectory_3d(ax, settings, w1_a, w2_a, w1_b,", "update, frames=100, interval=5, save_count=50) # HTML(anim.to_html5_video()) if save: anim.save(name +", "ax.scatter(0,0, c='black', marker=r'$\\rightarrow$', label='Negative gradient') settings = (x, y, v,", "= np.array(weights['w2']) epochs = np.arange(0, len(w1_vals), 1) norms = np.sqrt(w1_vals**2", "np.arange(start, end, 0.001) w1_path = [] w2_path = [] loss", "cmap=cm.coolwarm) ax.quiver(ws_x[skip], ws_y[skip], -dx[skip], dy[skip], cost_ws[skip]) cbar = fig.colorbar(im, ax=ax)", "# fig = plt.figure(figsize=(8, 8)) # ax = fig.gca(projection='3d') ax.set_zlim(0,", "+ w2**2) + 0.5 * v * (w1 * w2)**2", "l2, w1_range, w2_range) return ax, settings # three-dimensional version def", "settings = (x, y, v, l2, w1_range, w2_range) ax.view_init(angle, 10)", "scores, **kwargs) def animate_learning_dynamics(i, ax, weights, y, **kwargs): n_epoch =", "norm = Normalize(minn, maxx) m = plt.cm.ScalarMappable(norm=norm, cmap='jet') m.set_array([]) fcolors", "0.001) w1_path = [] w2_path = [] loss = []", "* (man_ws_x**2 + man_ws_y**2) + 0.5 * v * (man_ws_x", "animate_weight_norm(i, ax4, scalarNet.history) # suncAnimation will call the 'update' function", "y * (1 - w1 * w2 * x)**2 +", "cmap='jet') m.set_array([]) fcolors = m.to_rgba(color_dimension) # plot # fig =", "weights['w2'] ax.plot(w1_vals[:i], w2_vals[:i], **kwargs) return ax def animate_optimiser_trajectory_3d(i, ax, settings,", "+ 0.5 * v * (w1_vals*w2_vals)**2 ax.plot(w1_vals, w2_vals, loss, **kwargs)", "w1_path.append(ww1) w2_path.append(ww2) ax.plot(w1_path, w2_path, loss, **kwargs) def plot_interpolated_loss(x, y, w1_a,", "marker='*', label='Saddle point') ax.plot_surface(X, Y, Z, rstride=1, cstride=1, facecolors=fcolors, vmin=minn,", "w1_vals * w2_vals * x)**2 + \\ 0.5 * l2", "# ax.set(aspect=1, title='Loss Surface') cbar.ax.set_ylabel('$Loss$', fontsize=15) ax.set_xlabel('$w_1$', fontsize=15) ax.set_ylabel('$w_2$', fontsize=15)", "= fig.add_subplot(1,1,1, projection='3d') # fourth dimention - colormap # create", "def plot_global_minimum_manifold_3d(ax, settings): # retieve cached settings x, y, v,", "descent') animate_learning_dynamics(i, ax3, weights, 1) # animate_weight_norm(i, ax4, scalarNet.history) #", "w2_vals**2) + 0.5 * v * (w1_vals*w2_vals)**2 ax.plot(w1_vals[:i], w2_vals[:i], loss[:i],", "y_pred = ws_x[i, j] * ws_y[i, j] * x y_true", "(w1_vals*w2_vals)**2 ax.plot(w1_vals[:i], w2_vals[:i], loss[:i], **kwargs) return ax def plot_optimiser_loss(x, y,", "norms = np.sqrt(w1_vals**2 + w2_vals**2) ax.set_xlabel('Epoch', fontsize=12) ax.set_ylabel('Weight norm', fontsize=12)", "+ 0.5 * l2 * (ww1**2 + ww2**2) interpolated_loss.append(loss_val) plt.plot(alpha,", "v * (w1 * w2)**2 loss.append(loss_val) plt.plot(epoch, loss, **kwargs) plt.xlabel('Epoch')", "a) * w2_a + a * w2_b loss_val = 0.5", "projection='3d') ax3 = fig.add_subplot(gs[1, 1:3]) # ax1 = fig.add_subplot(2, 2,", "each frame anim = FuncAnimation(fig, update, frames=100, interval=5, save_count=50) #", "settings = plot_mse_loss_surface_3d(x, y, angle=angle) if manifold: plot_global_minimum_manifold_3d(ax, settings) plot_optimiser_trajectory_3d(ax,", "np.gradient(cost_ws) # plot vector space skip = (slice(None, None, 5),", "ax.set_zlim(0, 50) ax.plot([0], [0], 'ro', c='red', marker='*', label='Saddle point') ax.plot_surface(X,", "loss.append(loss_val) plt.plot(epoch, loss, **kwargs) plt.xlabel('Epoch') plt.ylabel('Loss') def plot_interpolated_trajectory_2d(ax, w1_a, w2_a,", "(ww1**2 + ww2**2) loss.append(loss_val) w1_path.append(ww1) w2_path.append(ww2) ax.plot(w1_path, w2_path, loss, **kwargs)", "\\cdot w_1$', fontsize=15) ax.plot(epoch[:i], scores[:i], **kwargs) return ax def animate_learning(weights,", "l2=0.0, w1_range=(-2, 2), w2_range=(2, -2), angle=30): # create weight space", "(ws_x[i, j]*ws_y[i, j])**2 X = ws_x Y = ws_y Z", "w2) ax.plot(epoch, scores, **kwargs) def animate_learning_dynamics(i, ax, weights, y, **kwargs):", "j] * ws_y[i, j] * x y_true = y cost_ws[i,", "y cost_ws[i, j] = 0.5 * (y_true - y_pred)**2 +", "man_ws_y)**2 min_loss = np.min(loss) manifold_indices = loss < min_loss +", "retieve cached settings x, y, v, l2, w1_range, w2_range =", "man_ws_x[manifold_indices] manifold_y = man_ws_y[manifold_indices] # plot manifold of global minima", "* w2) ax.plot(epoch, scores, **kwargs) def animate_learning_dynamics(i, ax, weights, y,", "of weights for i in range(n_w): for j in range(n_w):", "interpolated_loss, **kwargs) plt.xlabel(r'$\\alpha$') plt.ylabel('Loss') def plot_learning_dynamics(ax, weights, **kwargs): epoch =", "* (man_ws_x**2 + man_ws_y**2) + 0.5 * v * (man_ws_x*man_ws_y)**2", "* v * (w1_vals*w2_vals)**2 ax.plot(w1_vals, w2_vals, loss, **kwargs) def plot_optimiser_trajectory(x,", "cost_ws[i, j] = 0.5 * (y_true - y_pred)**2 + \\", "0.5 * v * (ws_x[i, j]*ws_y[i, j])**2 # compute gradients", "weights, **kwargs) else: ax, settings = plot_mse_loss_surface_2d(x, y) if manifold:", "j in range(n_w): y_pred = ws_x[i, j] * ws_y[i, j]", "Axes3D from matplotlib.animation import FuncAnimation # two-dimesional version def plot_mse_loss_surface_2d(fig,", "* x)**2 + \\ 0.5 * l2 * (man_ws_x**2 +", "n_w = 100 w1 = np.linspace(w1_range[0], w1_range[1], num=n_w) # weight", "fontsize=20) settings = (x, y, v, l2, w1_range, w2_range) ax.view_init(angle,", "'3d': ax, settings = plot_mse_loss_surface_3d(x, y, angle=angle) if manifold: plot_global_minimum_manifold_3d(ax,", "def update(i): animate_optimiser_trajectory_2d( i, ax1, settings, weights, 'Gradient descent') animate_optimiser_trajectory_3d(", "fig = plt.figure(figsize=(8, 8)) # ax = fig.gca(projection='3d') ax.set_zlim(0, 50)", "* w2_a + a * w2_b w1_path.append(ww1) w2_path.append(ww2) ax.plot(w1_path, w2_path,", "2 ws_x, ws_y = np.meshgrid(w1, w2) cost_ws = np.zeros((n_w, n_w))", "w1_range, w2_range) ax.view_init(angle, 10) return ax, settings def plot_global_minimum_manifold_2d(ax, settings):", "ax def animate_optimiser_trajectory_3d(i, ax, settings, weights, **kwargs): x, y, v,", "maxx = color_dimension.min(), color_dimension.max() norm = Normalize(minn, maxx) m =", "ax1, settings, weights, 'Gradient descent') animate_optimiser_trajectory_3d( i, ax2, settings, weights,", "import Normalize from mpl_toolkits.mplot3d import Axes3D from matplotlib.animation import FuncAnimation", "+ 0.5 * v * (ws_x[i, j]*ws_y[i, j])**2 X =", "plt.cm.ScalarMappable(norm=norm, cmap='jet') m.set_array([]) fcolors = m.to_rgba(color_dimension) # plot # fig", "x, y, _, _ = settings alpha = np.arange(start, end,", "gs = gridspec.GridSpec(2, 4) gs.update(wspace=0.5) fig = plt.figure(figsize=(12, 8)) ax1", "(man_ws_x**2 + man_ws_y**2) + 0.5 * v * (man_ws_x *", "matplotlib.colors import Normalize from mpl_toolkits.mplot3d import Axes3D from matplotlib.animation import", "np.zeros((n_w, n_w)) # initialize cost matrix # Fill the cost", "manifold_y = man_ws_y[manifold_indices] # plot manifold of global minima ax.scatter(manifold_y,", "ax.plot(y, x, c='cyan', label='Manifold of global minima') def plot_optimiser_trajectory_2d(ax, weights,", "w2_vals, **kwargs) def plot_optimiser_trajectory_3d(ax, settings, weights, **kwargs): x, y, v,", "* w2) ax.set_xlim((1, n_epoch)) ax.set_ylim((0, y)) ax.set_xlabel('Epoch', fontsize=15) ax.set_ylabel('$w_2 \\cdot", "w2 * x)**2 + 0.5 * l2 * (w1**2 +", "a * w1_b ww2 = (1 - a) * w2_a", "fig.add_subplot(1,1,1, projection='3d') # fourth dimention - colormap # create colormap", "ax3 = fig.add_subplot(2, 2, 3) # ax4 = fig.add_subplot(2, 2,", "v, l2, _, _ = settings w1_vals = np.array(weights['w1']) w2_vals", "l2, _, _ = settings w1_vals = np.array(weights['w1']) w2_vals =", "**kwargs) else: ax, settings = plot_mse_loss_surface_2d(x, y) if manifold: plot_global_minimum_manifold_2d(ax,", "ax.set_ylabel('Weight norm', fontsize=12) ax.plot(epochs, norms, linewidth=2.0, **kwargs) def animate_optimiser_trajectory_2d(i, ax,", "plot manifold of global minima ax.scatter(manifold_y, manifold_x, s=0.1, c='cyan', label='Manifold", "* (w1_vals*w2_vals)**2 ax.plot(w1_vals, w2_vals, loss, **kwargs) def plot_optimiser_trajectory(x, y, weights,", "y = np.insert(manifold_y, pos, np.nan) # plot manifold of global", "s=0.5, c='cyan', # label='Manifold of global minima') ax.plot(y, x, c='cyan',", "+ man_ws_y**2) + 0.5 * v * (man_ws_x*man_ws_y)**2 min_loss =", "* (w1_vals*w2_vals)**2 ax.plot(w1_vals[:i], w2_vals[:i], loss[:i], **kwargs) return ax def plot_optimiser_loss(x,", "+ w2_vals**2) ax.set_xlabel('Epoch', fontsize=12) ax.set_ylabel('Weight norm', fontsize=12) ax.plot(epochs, norms, linewidth=2.0,", "cost_ws #fig, ax = plt.subplots(figsize=(8, 8)) #ax = fig.add_subplot(1,1,1, projection='3d')", "np.array(weights['w1']) w2_vals = np.array(weights['w2']) loss = 0.5 * y *", "# ax3 = fig.add_subplot(2, 2, 3) # ax4 = fig.add_subplot(2,", "ax = plt.subplots(figsize=(8, 8)) #ax.contour(ws_x, ws_y, cost_ws, 200) im =", "pos, np.nan) # plot manifold of global minima #ax.scatter(manifold_y, manifold_x,", "ax.set_xlabel('Epoch', fontsize=12) ax.set_ylabel('Weight norm', fontsize=12) ax.plot(epochs, norms, linewidth=2.0, **kwargs) def", "l2, weights, **kwargs): loss = [] epoch = np.arange(0, len(weights['w1']))", "= np.linspace(w1_range[0], w1_range[1], num=n_w) # weight 1 w2 = np.linspace(w2_range[0],", "#fig, ax = plt.subplots(figsize=(8, 8)) #ax = fig.add_subplot(1,1,1, projection='3d') #", "* l2 * (w1**2 + w2**2) + 0.5 * v", "l2, w1_range, w2_range) ax.view_init(angle, 10) return ax, settings def plot_global_minimum_manifold_2d(ax,", "ax2, settings, weights, 'Gradient descent') animate_learning_dynamics(i, ax3, weights, 1) #", "settings = plot_mse_loss_surface_2d(x, y) if manifold: plot_global_minimum_manifold_2d(ax, settings) plot_optimiser_trajectory_2d(ax, weights,", "= np.array(weights['w2']) loss = 0.5 * y * (1 -", "x = np.insert(manifold_x, pos, np.nan) y = np.insert(manifold_y, pos, np.nan)", "v=0.0, l2=0.0, w1_range=(-2, 2), w2_range=(2, -2)): # create weight space", "w2_vals = weights['w2'] ax.plot(w1_vals, w2_vals, **kwargs) def plot_optimiser_trajectory_3d(ax, settings, weights,", "ax.set_xlabel('Epoch', fontsize=15) ax.set_ylabel('$w_2 \\cdot w_1$', fontsize=15) ax.plot(epoch[:i], scores[:i], **kwargs) return", "ax, settings # three-dimensional version def plot_mse_loss_surface_3d(ax, x, y, v=0.0,", "color_dimension.max() norm = Normalize(minn, maxx) m = plt.cm.ScalarMappable(norm=norm, cmap='jet') m.set_array([])", "v, l2, w1_range, w2_range) return ax, settings # three-dimensional version", "= np.arange(0, len(weights['w1'])) scores = [] for w1, w2 in", "= gridspec.GridSpec(2, 4) gs.update(wspace=0.5) fig = plt.figure(figsize=(12, 8)) ax1 =", "the cost matrix for each combination of weights for i", "= (x, y, v, l2, w1_range, w2_range) ax.view_init(angle, 10) return", "# initialize cost matrix # Fill the cost matrix for", "+ ww2**2) interpolated_loss.append(loss_val) plt.plot(alpha, interpolated_loss, **kwargs) plt.xlabel(r'$\\alpha$') plt.ylabel('Loss') def plot_learning_dynamics(ax,", "x, y, v=0.0, l2=0.0, w1_range=(-2, 2), w2_range=(2, -2)): # create", "cbar.ax.set_ylabel('$Loss$', fontsize=15) ax.set_xlabel('$w_1$', fontsize=15) ax.set_ylabel('$w_2$', fontsize=15) # ax.grid() # add", "v * (ws_x[i, j]*ws_y[i, j])**2 # compute gradients dy, dx", "w2_range=(2, -2), angle=30): # create weight space n_w = 100", "+ 0.5 * v * (w1 * w2)**2 loss.append(loss_val) plt.plot(epoch,", "* v * (ws_x[i, j]*ws_y[i, j])**2 # compute gradients dy,", "np.arange(start, end, 0.001) interpolated_loss = [] for a in alpha:", "the 'update' function for each frame anim = FuncAnimation(fig, update,", "8)) # ax = fig.gca(projection='3d') ax.set_zlim(0, 50) ax.plot([0], [0], 'ro',", "settings = (x, y, v, l2, w1_range, w2_range) return ax,", "**kwargs): loss = [] epoch = np.arange(0, len(weights['w1'])) for w1,", "for each combination of weights for i in range(n_w): for", "[] loss = [] for a in alpha: ww1 =", "**kwargs): n_epoch = len(weights['w1']) epoch = np.arange(1, n_epoch) scores =", "w2_range=(2, -2)): # create weight space n_w = 100 w1", "manifold: plot_global_minimum_manifold_2d(ax, settings) plot_optimiser_trajectory_2d(ax, weights, **kwargs) def plot_weight_norm(ax, weights, **kwargs):", "fontsize=20) ax.set_ylabel('$w_2$', fontsize=20) ax.set_zlabel('$Loss$', fontsize=20) settings = (x, y, v,", "ax.set_ylabel('$w_2$', fontsize=20) ax.set_zlabel('$Loss$', fontsize=20) settings = (x, y, v, l2,", "ax.plot(w1_vals[:i], w2_vals[:i], loss[:i], **kwargs) return ax def plot_optimiser_loss(x, y, v,", "w1_a + a * w1_b ww2 = (1 - a)", "= np.arange(start, end, 0.001) w1_path = [] w2_path = []", "weights, y, **kwargs): n_epoch = len(weights['w1']) epoch = np.arange(1, n_epoch)", "manifold_x, 0, s=0.5, c='cyan', # label='Manifold of global minima') ax.plot(y,", "1, 1, angle=60) plot_global_minimum_manifold_2d(ax1, settings) plot_global_minimum_manifold_3d(ax2, settings) def update(i): animate_optimiser_trajectory_2d(", "loss = 0.5 * y * (1 - man_ws_x *", "settings) def update(i): animate_optimiser_trajectory_2d( i, ax1, settings, weights, 'Gradient descent')", "# ax4 = fig.add_subplot(2, 2, 4) ax1, settings = plot_mse_loss_surface_2d(ax1,", "two-dimesional version def plot_mse_loss_surface_2d(fig, ax, x, y, v=0.0, l2=0.0, w1_range=(-2,", "np.min(loss) manifold_indices = loss < min_loss + 1e-5 manifold_x =", "settings) plot_global_minimum_manifold_3d(ax2, settings) def update(i): animate_optimiser_trajectory_2d( i, ax1, settings, weights,", "+ 0.5 * v * (man_ws_x * man_ws_y)**2 min_loss =", "= settings alpha = np.arange(start, end, 0.001) w1_path = []", "0.5 * v * (w1 * w2)**2 loss.append(loss_val) plt.plot(epoch, loss,", "call the 'update' function for each frame anim = FuncAnimation(fig,", "8)) #ax.contour(ws_x, ws_y, cost_ws, 200) im = ax.imshow(cost_ws, extent=[ws_x.min(), ws_x.max(", "ws_y.max()], cmap=cm.coolwarm) ax.quiver(ws_x[skip], ws_y[skip], -dx[skip], dy[skip], cost_ws[skip]) cbar = fig.colorbar(im,", "w2_range[1], num=n_w) man_ws_x, man_ws_y = np.meshgrid(man_w1, man_w2) loss = 0.5", "8)) ax1 = fig.add_subplot(gs[0, :2], ) ax2 = fig.add_subplot(gs[0, 2:],", "# label='Manifold of global minima') ax.plot(y, x, c='cyan', label='Manifold of", "x, y, v=0.0, l2=0.0, w1_range=(-2, 2), w2_range=(2, -2), angle=30): #", "= np.array(weights['w1']) w2_vals = np.array(weights['w2']) loss = 0.5 * y", "* w2 * x)**2 + 0.5 * l2 * (w1**2", "np.meshgrid(w1, w2) cost_ws = np.zeros((n_w, n_w)) # initialize cost matrix", "0, label='Saddle point', c='red', marker='*') # ax.scatter(0,0, c='black', marker=r'$\\rightarrow$', label='Negative", "* l2 * (ww1**2 + ww2**2) interpolated_loss.append(loss_val) plt.plot(alpha, interpolated_loss, **kwargs)", "), ws_y.min(), ws_y.max()], cmap=cm.coolwarm) ax.quiver(ws_x[skip], ws_y[skip], -dx[skip], dy[skip], cost_ws[skip]) cbar", "version def plot_mse_loss_surface_2d(fig, ax, x, y, v=0.0, l2=0.0, w1_range=(-2, 2),", "* v * (man_ws_x*man_ws_y)**2 min_loss = np.min(loss) manifold_indices = loss", "Normalize(minn, maxx) m = plt.cm.ScalarMappable(norm=norm, cmap='jet') m.set_array([]) fcolors = m.to_rgba(color_dimension)", "0.001) w1_path = [] w2_path = [] for a in", "ww2 * x)**2 + 0.5 * l2 * (ww1**2 +", "man_w2 = np.linspace(w2_range[0], w2_range[1], num=n_w) man_ws_x, man_ws_y = np.meshgrid(man_w1, man_w2)", ":2], ) ax2 = fig.add_subplot(gs[0, 2:], projection='3d') ax3 = fig.add_subplot(gs[1,", "w1, w2 in zip(weights['w1'], weights['w2']): loss_val = 0.5 * y", "manifold_x = man_ws_x[manifold_indices] manifold_y = man_ws_y[manifold_indices] pos = np.where(np.abs(np.diff(manifold_y)) >=", "= Z # change to desired fourth dimension minn, maxx", "w1_range, w2_range) return ax, settings # three-dimensional version def plot_mse_loss_surface_3d(ax,", "(man_ws_x**2 + man_ws_y**2) + 0.5 * v * (man_ws_x*man_ws_y)**2 min_loss", "**kwargs) def animate_learning_dynamics(i, ax, weights, y, **kwargs): n_epoch = len(weights['w1'])", "w1, w2 in zip(weights['w1'], weights['w2']): scores.append(w1 * w2) ax.set_xlim((1, n_epoch))", "gradients dy, dx = np.gradient(cost_ws) # plot vector space skip", "from matplotlib.colors import Normalize from mpl_toolkits.mplot3d import Axes3D from matplotlib.animation", "len(weights['w1']) epoch = np.arange(1, n_epoch) scores = [] for w1,", "= ws_x Y = ws_y Z = cost_ws #fig, ax", "# suncAnimation will call the 'update' function for each frame", "fontsize=15) ax.set_ylabel('$w_2$', fontsize=15) # ax.grid() # add saddle point ax.scatter(0,", "settings) plot_optimiser_trajectory_2d(ax, weights, **kwargs) def plot_weight_norm(ax, weights, **kwargs): w1_vals =", "# plot vector space skip = (slice(None, None, 5), slice(None,", "0.5 * v * (man_ws_x*man_ws_y)**2 min_loss = np.min(loss) manifold_indices =", "weights, **kwargs): loss = [] epoch = np.arange(0, len(weights['w1'])) for", "1e-5 manifold_x = man_ws_x[manifold_indices] manifold_y = man_ws_y[manifold_indices] pos = np.where(np.abs(np.diff(manifold_y))", "w1_b, w2_b, start=0, end=1, **kwargs): alpha = np.arange(start, end, 0.001)", "= [] for a in alpha: ww1 = (1 -", "# Fill the cost matrix for each combination of weights", "-dx[skip], dy[skip], cost_ws[skip]) cbar = fig.colorbar(im, ax=ax) # ax.set(aspect=1, title='Loss", "w1_path = [] w2_path = [] loss = [] for", "= [] loss = [] for a in alpha: ww1", "ax=ax) # ax.set(aspect=1, title='Loss Surface') cbar.ax.set_ylabel('$Loss$', fontsize=15) ax.set_xlabel('$w_1$', fontsize=15) ax.set_ylabel('$w_2$',", "manifold of global minima ax.scatter(manifold_y, manifold_x, s=0.1, c='cyan', label='Manifold of", "10) return ax, settings def plot_global_minimum_manifold_2d(ax, settings): # retieve cached", "settings w1_vals = np.array(weights['w1']) w2_vals = np.array(weights['w2']) loss = 0.5", "w2 in zip(weights['w1'], weights['w2']): loss_val = 0.5 * y *", "* w2_b w1_path.append(ww1) w2_path.append(ww2) ax.plot(w1_path, w2_path, **kwargs) def plot_interpolated_trajectory_3d(ax, settings,", "compute gradients dy, dx = np.gradient(cost_ws) # plot vector space", "ax.plot_surface(X, Y, Z, rstride=1, cstride=1, facecolors=fcolors, vmin=minn, vmax=maxx, shade=False, alpha=1)", "1000 man_w1 = np.linspace(w1_range[0], w1_range[1], num=n_w) man_w2 = np.linspace(w2_range[0], w2_range[1],", "w2)**2 loss.append(loss_val) plt.plot(epoch, loss, **kwargs) plt.xlabel('Epoch') plt.ylabel('Loss') def plot_interpolated_trajectory_2d(ax, w1_a,", "range(n_w): for j in range(n_w): y_pred = ws_x[i, j] *", "ax.plot(epoch[:i], scores[:i], **kwargs) return ax def animate_learning(weights, save=False, name='anim'): gs", "plot_interpolated_trajectory_2d(ax, w1_a, w2_a, w1_b, w2_b, start=0, end=1, **kwargs): alpha =", "l2 * (ws_x[i, j]**2 + ws_y[i, j]**2) + 0.5 *", "from mpl_toolkits.mplot3d import Axes3D from matplotlib.animation import FuncAnimation # two-dimesional", "num=n_w) # weight 2 ws_x, ws_y = np.meshgrid(w1, w2) cost_ws", "num=n_w) man_ws_x, man_ws_y = np.meshgrid(man_w1, man_w2) loss = 0.5 *", "* y * (1 - w1 * w2 * x)**2", "weights, 'Gradient descent') animate_learning_dynamics(i, ax3, weights, 1) # animate_weight_norm(i, ax4,", "= np.meshgrid(man_w1, man_w2) loss = 0.5 * y * (1", "combination of weights for i in range(n_w): for j in", "fontsize=12) ax.plot(epochs, norms, linewidth=2.0, **kwargs) def animate_optimiser_trajectory_2d(i, ax, weights, **kwargs):", "v * (w1_vals*w2_vals)**2 ax.plot(w1_vals, w2_vals, loss, **kwargs) def plot_optimiser_trajectory(x, y,", "dim == '3d': ax, settings = plot_mse_loss_surface_3d(x, y, angle=angle) if", "x, c='cyan', label='Manifold of global minima') def plot_optimiser_trajectory_2d(ax, weights, **kwargs):", "m.set_array([]) fcolors = m.to_rgba(color_dimension) # plot # fig = plt.figure(figsize=(8,", "plt.figure(figsize=(12, 8)) ax1 = fig.add_subplot(gs[0, :2], ) ax2 = fig.add_subplot(gs[0,", "np.linspace(w1_range[0], w1_range[1], num=n_w) man_w2 = np.linspace(w2_range[0], w2_range[1], num=n_w) man_ws_x, man_ws_y", "(1 - a) * w2_a + a * w2_b loss_val", "0.5 * (y_true - y_pred)**2 + \\ 0.5 * l2", "plot_global_minimum_manifold_3d(ax, settings) plot_optimiser_trajectory_3d(ax, settings, weights, **kwargs) else: ax, settings =", "= 0.5 * y * (1 - w1 * w2", "weights['w2'] ax.plot(w1_vals, w2_vals, **kwargs) def plot_optimiser_trajectory_3d(ax, settings, weights, **kwargs): x,", "w2_vals**2) ax.set_xlabel('Epoch', fontsize=12) ax.set_ylabel('Weight norm', fontsize=12) ax.plot(epochs, norms, linewidth=2.0, **kwargs)", "man_ws_y[manifold_indices] pos = np.where(np.abs(np.diff(manifold_y)) >= 0.1)[0]+1 x = np.insert(manifold_x, pos,", "fontsize=15) ax.set_xlabel('$w_1$', fontsize=15) ax.set_ylabel('$w_2$', fontsize=15) # ax.grid() # add saddle", "plot_global_minimum_manifold_2d(ax, settings) plot_optimiser_trajectory_2d(ax, weights, **kwargs) def plot_weight_norm(ax, weights, **kwargs): w1_vals", "* w1_a + a * w1_b ww2 = (1 -", "**kwargs): epoch = np.arange(0, len(weights['w1'])) scores = [] for w1,", "np.array(weights['w2']) epochs = np.arange(0, len(w1_vals), 1) norms = np.sqrt(w1_vals**2 +", "= np.arange(1, n_epoch) scores = [] for w1, w2 in", "w1_a, w2_a, w1_b, w2_b, start=0, end=1, **kwargs): x, y, _,", "1, angle=60) plot_global_minimum_manifold_2d(ax1, settings) plot_global_minimum_manifold_3d(ax2, settings) def update(i): animate_optimiser_trajectory_2d( i,", "point ax.scatter(0, 0, label='Saddle point', c='red', marker='*') # ax.scatter(0,0, c='black',", "+ \\ 0.5 * l2 * (w1_vals**2 + w2_vals**2) +", "ax4, scalarNet.history) # suncAnimation will call the 'update' function for", "1) ax2, settings = plot_mse_loss_surface_3d(ax2, 1, 1, angle=60) plot_global_minimum_manifold_2d(ax1, settings)", "save_count=50) # HTML(anim.to_html5_video()) if save: anim.save(name + '.gif', dpi=80, writer='imagemagick')", "**kwargs) return ax def animate_learning(weights, save=False, name='anim'): gs = gridspec.GridSpec(2,", "np.meshgrid(man_w1, man_w2) loss = 0.5 * y *(1 - man_ws_x", "saddle point ax.scatter(0, 0, label='Saddle point', c='red', marker='*') # ax.scatter(0,0,", "w1, w2 in zip(weights['w1'], weights['w2']): scores.append(w1 * w2) ax.plot(epoch, scores,", "settings = plot_mse_loss_surface_3d(ax2, 1, 1, angle=60) plot_global_minimum_manifold_2d(ax1, settings) plot_global_minimum_manifold_3d(ax2, settings)", "settings # three-dimensional version def plot_mse_loss_surface_3d(ax, x, y, v=0.0, l2=0.0,", "w1 * w2 * x)**2 + 0.5 * l2 *", "= [] for w1, w2 in zip(weights['w1'], weights['w2']): scores.append(w1 *", "plot manifold of global minima #ax.scatter(manifold_y, manifold_x, 0, s=0.5, c='cyan',", "= weights['w1'] w2_vals = weights['w2'] ax.plot(w1_vals, w2_vals, **kwargs) def plot_optimiser_trajectory_3d(ax,", "(y_true - y_pred)**2 + \\ 0.5 * l2 * (ws_x[i,", "if manifold: plot_global_minimum_manifold_2d(ax, settings) plot_optimiser_trajectory_2d(ax, weights, **kwargs) def plot_weight_norm(ax, weights,", "for each frame anim = FuncAnimation(fig, update, frames=100, interval=5, save_count=50)", "import matplotlib.pyplot as plt import matplotlib.gridspec as gridspec from matplotlib", "* (man_ws_x*man_ws_y)**2 min_loss = np.min(loss) manifold_indices = loss < min_loss", "ax.set_ylabel('$w_2$', fontsize=15) # ax.grid() # add saddle point ax.scatter(0, 0,", "X = ws_x Y = ws_y Z = cost_ws #fig,", "manifold: plot_global_minimum_manifold_3d(ax, settings) plot_optimiser_trajectory_3d(ax, settings, weights, **kwargs) else: ax, settings", "n_epoch)) ax.set_ylim((0, y)) ax.set_xlabel('Epoch', fontsize=15) ax.set_ylabel('$w_2 \\cdot w_1$', fontsize=15) ax.plot(epoch[:i],", "skip = (slice(None, None, 5), slice(None, None, 5)) # fig,", "3) # ax4 = fig.add_subplot(2, 2, 4) ax1, settings =", "#ax.scatter(manifold_y, manifold_x, 0, s=0.5, c='cyan', # label='Manifold of global minima')", "(ww1**2 + ww2**2) interpolated_loss.append(loss_val) plt.plot(alpha, interpolated_loss, **kwargs) plt.xlabel(r'$\\alpha$') plt.ylabel('Loss') def", "loss, **kwargs) def plot_interpolated_loss(x, y, w1_a, w2_a, w1_b, w2_b, start=0,", "w1_path.append(ww1) w2_path.append(ww2) ax.plot(w1_path, w2_path, **kwargs) def plot_interpolated_trajectory_3d(ax, settings, w1_a, w2_a,", "def animate_learning(weights, save=False, name='anim'): gs = gridspec.GridSpec(2, 4) gs.update(wspace=0.5) fig", "numpy as np import matplotlib.pyplot as plt import matplotlib.gridspec as", "def plot_optimiser_trajectory(x, y, weights, dim='2d', angle=45, manifold=False, **kwargs): if dim", "= plot_mse_loss_surface_2d(x, y) if manifold: plot_global_minimum_manifold_2d(ax, settings) plot_optimiser_trajectory_2d(ax, weights, **kwargs)", "plot vector space skip = (slice(None, None, 5), slice(None, None,", "minima') def plot_global_minimum_manifold_3d(ax, settings): # retieve cached settings x, y,", "suncAnimation will call the 'update' function for each frame anim", "w2_vals = np.array(weights['w2']) loss = 0.5 * y * (1", "np.meshgrid(man_w1, man_w2) loss = 0.5 * y * (1 -", "alpha = np.arange(start, end, 0.001) interpolated_loss = [] for a", "ax3, weights, 1) # animate_weight_norm(i, ax4, scalarNet.history) # suncAnimation will", "plot_interpolated_trajectory_3d(ax, settings, w1_a, w2_a, w1_b, w2_b, start=0, end=1, **kwargs): x,", "num=n_w) man_w2 = np.linspace(w2_range[0], w2_range[1], num=n_w) man_ws_x, man_ws_y = np.meshgrid(man_w1,", "fourth dimension minn, maxx = color_dimension.min(), color_dimension.max() norm = Normalize(minn,", "plot_mse_loss_surface_3d(ax, x, y, v=0.0, l2=0.0, w1_range=(-2, 2), w2_range=(2, -2), angle=30):", "y * (1 - man_ws_x * man_ws_y * x)**2 +", "Z # change to desired fourth dimension minn, maxx =", "fig = plt.figure(figsize=(12, 8)) ax1 = fig.add_subplot(gs[0, :2], ) ax2", "w1_range=(-2, 2), w2_range=(2, -2)): # create weight space n_w =", "**kwargs): x, y, v, l2, _, _ = settings w1_vals", "import Axes3D from matplotlib.animation import FuncAnimation # two-dimesional version def", "weights for i in range(n_w): for j in range(n_w): y_pred", "ax2, settings = plot_mse_loss_surface_3d(ax2, 1, 1, angle=60) plot_global_minimum_manifold_2d(ax1, settings) plot_global_minimum_manifold_3d(ax2,", "# animate_weight_norm(i, ax4, scalarNet.history) # suncAnimation will call the 'update'", "ax.set(aspect=1, title='Loss Surface') cbar.ax.set_ylabel('$Loss$', fontsize=15) ax.set_xlabel('$w_1$', fontsize=15) ax.set_ylabel('$w_2$', fontsize=15) #", "0.001) interpolated_loss = [] for a in alpha: ww1 =", "len(weights['w1'])) for w1, w2 in zip(weights['w1'], weights['w2']): loss_val = 0.5", "return ax def animate_optimiser_trajectory_3d(i, ax, settings, weights, **kwargs): x, y,", "0.5 * l2 * (ww1**2 + ww2**2) loss.append(loss_val) w1_path.append(ww1) w2_path.append(ww2)", "c='cyan', label='Manifold of global minima') def plot_global_minimum_manifold_3d(ax, settings): # retieve", "w1 = np.linspace(w1_range[0], w1_range[1], num=n_w) # weight 1 w2 =", "w1_a, w2_a, w1_b, w2_b, start=0, end=1, **kwargs): alpha = np.arange(start,", "weights, 'Gradient descent') animate_optimiser_trajectory_3d( i, ax2, settings, weights, 'Gradient descent')", "j] * x y_true = y cost_ws[i, j] = 0.5", "gridspec from matplotlib import cm from matplotlib.colors import Normalize from", "= ax.imshow(cost_ws, extent=[ws_x.min(), ws_x.max( ), ws_y.min(), ws_y.max()], cmap=cm.coolwarm) ax.quiver(ws_x[skip], ws_y[skip],", "* (w1_vals**2 + w2_vals**2) + 0.5 * v * (w1_vals*w2_vals)**2", "w1_vals = weights['w1'] w2_vals = weights['w2'] ax.plot(w1_vals[:i], w2_vals[:i], **kwargs) return", "man_ws_y = np.meshgrid(man_w1, man_w2) loss = 0.5 * y *", "dy, dx = np.gradient(cost_ws) # plot vector space skip =", "weights['w2']): loss_val = 0.5 * y * (1 - w1", "def animate_learning_dynamics(i, ax, weights, y, **kwargs): n_epoch = len(weights['w1']) epoch", "plot_mse_loss_surface_2d(x, y) if manifold: plot_global_minimum_manifold_2d(ax, settings) plot_optimiser_trajectory_2d(ax, weights, **kwargs) def", "np.arange(1, n_epoch) scores = [] for w1, w2 in zip(weights['w1'],", "n_epoch) scores = [] for w1, w2 in zip(weights['w1'], weights['w2']):", "plt.subplots(figsize=(8, 8)) #ax.contour(ws_x, ws_y, cost_ws, 200) im = ax.imshow(cost_ws, extent=[ws_x.min(),", ">= 0.1)[0]+1 x = np.insert(manifold_x, pos, np.nan) y = np.insert(manifold_y,", "'ro', c='red', marker='*', label='Saddle point') ax.plot_surface(X, Y, Z, rstride=1, cstride=1,", "weights, **kwargs): x, y, v, l2, _, _ = settings", "facecolors=fcolors, vmin=minn, vmax=maxx, shade=False, alpha=1) ax.set_xlabel('$w_1$', fontsize=20) ax.set_ylabel('$w_2$', fontsize=20) ax.set_zlabel('$Loss$',", "to x-value (can use any 50x50 array) color_dimension = Z", "of global minima ax.scatter(manifold_y, manifold_x, s=0.1, c='cyan', label='Manifold of global", "**kwargs): alpha = np.arange(start, end, 0.001) w1_path = [] w2_path", "1:3]) # ax1 = fig.add_subplot(2, 2, 1) # ax2 =", "v, l2, w1_range, w2_range) ax.view_init(angle, 10) return ax, settings def", "in zip(weights['w1'], weights['w2']): loss_val = 0.5 * y * (1", "settings x, y, v, l2, w1_range, w2_range = settings n_w", "fig.add_subplot(gs[0, :2], ) ax2 = fig.add_subplot(gs[0, 2:], projection='3d') ax3 =", "settings, weights, 'Gradient descent') animate_optimiser_trajectory_3d( i, ax2, settings, weights, 'Gradient", "plt.figure(figsize=(8, 8)) # ax = fig.gca(projection='3d') ax.set_zlim(0, 50) ax.plot([0], [0],", "w2_b, start=0, end=1, **kwargs): x, y, _, _ = settings", "0.1)[0]+1 x = np.insert(manifold_x, pos, np.nan) y = np.insert(manifold_y, pos,", "ww2 = (1 - a) * w2_a + a *", "[] for a in alpha: ww1 = (1 - a)", "# plot # fig = plt.figure(figsize=(8, 8)) # ax =", "x)**2 + 0.5 * l2 * (w1**2 + w2**2) +", "* l2 * (ww1**2 + ww2**2) loss.append(loss_val) w1_path.append(ww1) w2_path.append(ww2) ax.plot(w1_path,", "# plot manifold of global minima ax.scatter(manifold_y, manifold_x, s=0.1, c='cyan',", "ax, settings, weights, **kwargs): x, y, v, l2, _, _", "l2, w1_range, w2_range = settings n_w = 1000 man_w1 =", "= man_ws_x[manifold_indices] manifold_y = man_ws_y[manifold_indices] pos = np.where(np.abs(np.diff(manifold_y)) >= 0.1)[0]+1", "plot_mse_loss_surface_3d(x, y, angle=angle) if manifold: plot_global_minimum_manifold_3d(ax, settings) plot_optimiser_trajectory_3d(ax, settings, weights,", "colormap according to x-value (can use any 50x50 array) color_dimension", "im = ax.imshow(cost_ws, extent=[ws_x.min(), ws_x.max( ), ws_y.min(), ws_y.max()], cmap=cm.coolwarm) ax.quiver(ws_x[skip],", "None, 5), slice(None, None, 5)) # fig, ax = plt.subplots(figsize=(8,", "label='Manifold of global minima') ax.plot(y, x, c='cyan', label='Manifold of global", "loss = 0.5 * y * (1 - w1_vals *", "ax2 = fig.add_subplot(gs[0, 2:], projection='3d') ax3 = fig.add_subplot(gs[1, 1:3]) #", "0.5 * v * (ws_x[i, j]*ws_y[i, j])**2 X = ws_x", "def plot_optimiser_trajectory_2d(ax, weights, **kwargs): w1_vals = weights['w1'] w2_vals = weights['w2']", "= np.where(np.abs(np.diff(manifold_y)) >= 0.1)[0]+1 x = np.insert(manifold_x, pos, np.nan) y", "= fig.add_subplot(gs[0, 2:], projection='3d') ax3 = fig.add_subplot(gs[1, 1:3]) # ax1", "x)**2 + \\ 0.5 * l2 * (man_ws_x**2 + man_ws_y**2)", "(w1_vals*w2_vals)**2 ax.plot(w1_vals, w2_vals, loss, **kwargs) def plot_optimiser_trajectory(x, y, weights, dim='2d',", "# compute gradients dy, dx = np.gradient(cost_ws) # plot vector", "plot_global_minimum_manifold_2d(ax1, settings) plot_global_minimum_manifold_3d(ax2, settings) def update(i): animate_optimiser_trajectory_2d( i, ax1, settings,", "x, y, v, l2, w1_range, w2_range = settings n_w =", "min_loss = np.min(loss) manifold_indices = loss < min_loss + 1e-5", "1e-5 manifold_x = man_ws_x[manifold_indices] manifold_y = man_ws_y[manifold_indices] # plot manifold", "np import matplotlib.pyplot as plt import matplotlib.gridspec as gridspec from", "of global minima') def plot_global_minimum_manifold_3d(ax, settings): # retieve cached settings", "ax1 = fig.add_subplot(2, 2, 1) # ax2 = fig.add_subplot(2, 2,", "* x y_true = y cost_ws[i, j] = 0.5 *", "l2=0.0, w1_range=(-2, 2), w2_range=(2, -2)): # create weight space n_w", "w2_path = [] for a in alpha: ww1 = (1", "ax4 = fig.add_subplot(2, 2, 4) ax1, settings = plot_mse_loss_surface_2d(ax1, 1,", "ax.plot(w1_path, w2_path, **kwargs) def plot_interpolated_trajectory_3d(ax, settings, w1_a, w2_a, w1_b, w2_b,", "# ax.scatter(0,0, c='black', marker=r'$\\rightarrow$', label='Negative gradient') settings = (x, y,", "ax.plot(epoch, scores, **kwargs) def animate_learning_dynamics(i, ax, weights, y, **kwargs): n_epoch", "Normalize from mpl_toolkits.mplot3d import Axes3D from matplotlib.animation import FuncAnimation #", "projection='3d') # fourth dimention - colormap # create colormap according", "man_w1 = np.linspace(w1_range[0], w1_range[1], num=n_w) man_w2 = np.linspace(w2_range[0], w2_range[1], num=n_w)", "np.insert(manifold_y, pos, np.nan) # plot manifold of global minima #ax.scatter(manifold_y,", "Y, Z, rstride=1, cstride=1, facecolors=fcolors, vmin=minn, vmax=maxx, shade=False, alpha=1) ax.set_xlabel('$w_1$',", "linewidth=2.0, **kwargs) def animate_optimiser_trajectory_2d(i, ax, weights, **kwargs): w1_vals = weights['w1']", "three-dimensional version def plot_mse_loss_surface_3d(ax, x, y, v=0.0, l2=0.0, w1_range=(-2, 2),", "- ww1 * ww2 * x)**2 + 0.5 * l2", "j]**2) + 0.5 * v * (ws_x[i, j]*ws_y[i, j])**2 X", "minima #ax.scatter(manifold_y, manifold_x, 0, s=0.5, c='cyan', # label='Manifold of global", "w2_range = settings n_w = 1000 man_w1 = np.linspace(w1_range[0], w1_range[1],", "norms, linewidth=2.0, **kwargs) def animate_optimiser_trajectory_2d(i, ax, weights, **kwargs): w1_vals =", "y, v, l2, w1_range, w2_range) return ax, settings # three-dimensional", "(1 - man_ws_x * man_ws_y * x)**2 + \\ 0.5", "end=1, **kwargs): alpha = np.arange(start, end, 0.001) w1_path = []", "weights['w1'] w2_vals = weights['w2'] ax.plot(w1_vals[:i], w2_vals[:i], **kwargs) return ax def", "= (1 - a) * w2_a + a * w2_b", "return ax, settings def plot_global_minimum_manifold_2d(ax, settings): # retieve cached settings", "ws_y = np.meshgrid(w1, w2) cost_ws = np.zeros((n_w, n_w)) # initialize", "zip(weights['w1'], weights['w2']): scores.append(w1 * w2) ax.plot(epoch, scores, **kwargs) def animate_learning_dynamics(i,", "= plt.figure(figsize=(8, 8)) # ax = fig.gca(projection='3d') ax.set_zlim(0, 50) ax.plot([0],", "w1_b, w2_b, start=0, end=1, **kwargs): x, y, _, _ =", "angle=30): # create weight space n_w = 100 w1 =", "min_loss + 1e-5 manifold_x = man_ws_x[manifold_indices] manifold_y = man_ws_y[manifold_indices] #", "w2) cost_ws = np.zeros((n_w, n_w)) # initialize cost matrix #", "dx = np.gradient(cost_ws) # plot vector space skip = (slice(None,", "if dim == '3d': ax, settings = plot_mse_loss_surface_3d(x, y, angle=angle)", "**kwargs) def plot_optimiser_trajectory(x, y, weights, dim='2d', angle=45, manifold=False, **kwargs): if", "= np.arange(0, len(w1_vals), 1) norms = np.sqrt(w1_vals**2 + w2_vals**2) ax.set_xlabel('Epoch',", "np.arange(0, len(w1_vals), 1) norms = np.sqrt(w1_vals**2 + w2_vals**2) ax.set_xlabel('Epoch', fontsize=12)", "start=0, end=1, **kwargs): alpha = np.arange(start, end, 0.001) w1_path =", "scores.append(w1 * w2) ax.plot(epoch, scores, **kwargs) def animate_learning_dynamics(i, ax, weights,", "y_true = y cost_ws[i, j] = 0.5 * (y_true -", "w2_vals = np.array(weights['w2']) epochs = np.arange(0, len(w1_vals), 1) norms =", "j]**2) + 0.5 * v * (ws_x[i, j]*ws_y[i, j])**2 #", "plot_interpolated_loss(x, y, w1_a, w2_a, w1_b, w2_b, start=0, end=1, **kwargs): alpha", "= '3d') # ax3 = fig.add_subplot(2, 2, 3) # ax4", "= m.to_rgba(color_dimension) # plot # fig = plt.figure(figsize=(8, 8)) #", "start=0, end=1, **kwargs): alpha = np.arange(start, end, 0.001) interpolated_loss =", "loss, **kwargs) plt.xlabel('Epoch') plt.ylabel('Loss') def plot_interpolated_trajectory_2d(ax, w1_a, w2_a, w1_b, w2_b,", "+ a * w2_b w1_path.append(ww1) w2_path.append(ww2) ax.plot(w1_path, w2_path, **kwargs) def", "update(i): animate_optimiser_trajectory_2d( i, ax1, settings, weights, 'Gradient descent') animate_optimiser_trajectory_3d( i,", "+ 0.5 * l2 * (ww1**2 + ww2**2) loss.append(loss_val) w1_path.append(ww1)", "+ w2_vals**2) + 0.5 * v * (w1_vals*w2_vals)**2 ax.plot(w1_vals[:i], w2_vals[:i],", "* v * (w1_vals*w2_vals)**2 ax.plot(w1_vals[:i], w2_vals[:i], loss[:i], **kwargs) return ax", "c='black', marker=r'$\\rightarrow$', label='Negative gradient') settings = (x, y, v, l2,", "* w2_vals * x)**2 + \\ 0.5 * l2 *", "w2_range) return ax, settings # three-dimensional version def plot_mse_loss_surface_3d(ax, x,", "+ 1e-5 manifold_x = man_ws_x[manifold_indices] manifold_y = man_ws_y[manifold_indices] pos =", "ws_y.min(), ws_y.max()], cmap=cm.coolwarm) ax.quiver(ws_x[skip], ws_y[skip], -dx[skip], dy[skip], cost_ws[skip]) cbar =", "1, 1) ax2, settings = plot_mse_loss_surface_3d(ax2, 1, 1, angle=60) plot_global_minimum_manifold_2d(ax1,", "y, v, l2, _, _ = settings w1_vals = np.array(weights['w1'])", "n_epoch = len(weights['w1']) epoch = np.arange(1, n_epoch) scores = []", "anim = FuncAnimation(fig, update, frames=100, interval=5, save_count=50) # HTML(anim.to_html5_video()) if", "i, ax1, settings, weights, 'Gradient descent') animate_optimiser_trajectory_3d( i, ax2, settings,", "* l2 * (man_ws_x**2 + man_ws_y**2) + 0.5 * v", "def plot_global_minimum_manifold_2d(ax, settings): # retieve cached settings x, y, v,", "np.sqrt(w1_vals**2 + w2_vals**2) ax.set_xlabel('Epoch', fontsize=12) ax.set_ylabel('Weight norm', fontsize=12) ax.plot(epochs, norms,", "in alpha: ww1 = (1 - a) * w1_a +", "vmax=maxx, shade=False, alpha=1) ax.set_xlabel('$w_1$', fontsize=20) ax.set_ylabel('$w_2$', fontsize=20) ax.set_zlabel('$Loss$', fontsize=20) settings", "[0], 'ro', c='red', marker='*', label='Saddle point') ax.plot_surface(X, Y, Z, rstride=1,", "= np.insert(manifold_x, pos, np.nan) y = np.insert(manifold_y, pos, np.nan) #", "* man_ws_y)**2 min_loss = np.min(loss) manifold_indices = loss < min_loss", "min_loss + 1e-5 manifold_x = man_ws_x[manifold_indices] manifold_y = man_ws_y[manifold_indices] pos", "ax.set_xlabel('$w_1$', fontsize=20) ax.set_ylabel('$w_2$', fontsize=20) ax.set_zlabel('$Loss$', fontsize=20) settings = (x, y,", "plot_mse_loss_surface_2d(ax1, 1, 1) ax2, settings = plot_mse_loss_surface_3d(ax2, 1, 1, angle=60)", "for i in range(n_w): for j in range(n_w): y_pred =", "y, w1_a, w2_a, w1_b, w2_b, start=0, end=1, **kwargs): alpha =", "= y cost_ws[i, j] = 0.5 * (y_true - y_pred)**2", "0.5 * l2 * (man_ws_x**2 + man_ws_y**2) + 0.5 *", "fig.colorbar(im, ax=ax) # ax.set(aspect=1, title='Loss Surface') cbar.ax.set_ylabel('$Loss$', fontsize=15) ax.set_xlabel('$w_1$', fontsize=15)", "1 w2 = np.linspace(w2_range[0], w2_range[1], num=n_w) # weight 2 ws_x,", "dy[skip], cost_ws[skip]) cbar = fig.colorbar(im, ax=ax) # ax.set(aspect=1, title='Loss Surface')", "(ws_x[i, j]**2 + ws_y[i, j]**2) + 0.5 * v *", "(x, y, v, l2, w1_range, w2_range) ax.view_init(angle, 10) return ax,", "manifold_x = man_ws_x[manifold_indices] manifold_y = man_ws_y[manifold_indices] # plot manifold of", "animate_optimiser_trajectory_3d( i, ax2, settings, weights, 'Gradient descent') animate_learning_dynamics(i, ax3, weights,", "ax, settings def plot_global_minimum_manifold_2d(ax, settings): # retieve cached settings x,", "y, v, l2, weights, **kwargs): loss = [] epoch =", "**kwargs): if dim == '3d': ax, settings = plot_mse_loss_surface_3d(x, y,", "# ax.grid() # add saddle point ax.scatter(0, 0, label='Saddle point',", "5)) # fig, ax = plt.subplots(figsize=(8, 8)) #ax.contour(ws_x, ws_y, cost_ws,", "vector space skip = (slice(None, None, 5), slice(None, None, 5))", "settings): # retieve cached settings x, y, v, l2, w1_range,", "x-value (can use any 50x50 array) color_dimension = Z #", "ax3 = fig.add_subplot(gs[1, 1:3]) # ax1 = fig.add_subplot(2, 2, 1)", "a) * w2_a + a * w2_b w1_path.append(ww1) w2_path.append(ww2) ax.plot(w1_path,", "v, l2, w1_range, w2_range = settings n_w = 1000 man_w1", "+ ww2**2) loss.append(loss_val) w1_path.append(ww1) w2_path.append(ww2) ax.plot(w1_path, w2_path, loss, **kwargs) def", "zip(weights['w1'], weights['w2']): loss_val = 0.5 * y * (1 -", "end, 0.001) w1_path = [] w2_path = [] for a", "loss_val = 0.5 * y * (1 - w1 *", "y, v, l2, w1_range, w2_range) ax.view_init(angle, 10) return ax, settings", "plot_learning_dynamics(ax, weights, **kwargs): epoch = np.arange(0, len(weights['w1'])) scores = []", "2, 2, projection = '3d') # ax3 = fig.add_subplot(2, 2,", "**kwargs): x, y, _, _ = settings alpha = np.arange(start,", "num=n_w) # weight 1 w2 = np.linspace(w2_range[0], w2_range[1], num=n_w) #", "0.5 * y * (1 - man_ws_x * man_ws_y *", "* (ww1**2 + ww2**2) loss.append(loss_val) w1_path.append(ww1) w2_path.append(ww2) ax.plot(w1_path, w2_path, loss,", "ax.view_init(angle, 10) return ax, settings def plot_global_minimum_manifold_2d(ax, settings): # retieve", "- man_ws_x * man_ws_y * x)**2 + \\ 0.5 *", "= 1000 man_w1 = np.linspace(w1_range[0], w1_range[1], num=n_w) man_w2 = np.linspace(w2_range[0],", "*(1 - man_ws_x * man_ws_y * x)**2 + \\ 0.5", "0.5 * l2 * (ws_x[i, j]**2 + ws_y[i, j]**2) +", "w2_vals[:i], **kwargs) return ax def animate_optimiser_trajectory_3d(i, ax, settings, weights, **kwargs):", "= cost_ws #fig, ax = plt.subplots(figsize=(8, 8)) #ax = fig.add_subplot(1,1,1,", "slice(None, None, 5)) # fig, ax = plt.subplots(figsize=(8, 8)) #ax.contour(ws_x,", "0.5 * y * (1 - w1_vals * w2_vals *", "0.5 * l2 * (ww1**2 + ww2**2) interpolated_loss.append(loss_val) plt.plot(alpha, interpolated_loss,", "ax.set_xlabel('$w_1$', fontsize=15) ax.set_ylabel('$w_2$', fontsize=15) # ax.grid() # add saddle point", "- w1_vals * w2_vals * x)**2 + \\ 0.5 *", "2, projection = '3d') # ax3 = fig.add_subplot(2, 2, 3)", "= man_ws_x[manifold_indices] manifold_y = man_ws_y[manifold_indices] # plot manifold of global", "interval=5, save_count=50) # HTML(anim.to_html5_video()) if save: anim.save(name + '.gif', dpi=80,", "Z, rstride=1, cstride=1, facecolors=fcolors, vmin=minn, vmax=maxx, shade=False, alpha=1) ax.set_xlabel('$w_1$', fontsize=20)", "fourth dimention - colormap # create colormap according to x-value", "= FuncAnimation(fig, update, frames=100, interval=5, save_count=50) # HTML(anim.to_html5_video()) if save:", "#ax.contour(ws_x, ws_y, cost_ws, 200) im = ax.imshow(cost_ws, extent=[ws_x.min(), ws_x.max( ),", "= np.sqrt(w1_vals**2 + w2_vals**2) ax.set_xlabel('Epoch', fontsize=12) ax.set_ylabel('Weight norm', fontsize=12) ax.plot(epochs,", "dim='2d', angle=45, manifold=False, **kwargs): if dim == '3d': ax, settings", "color_dimension = Z # change to desired fourth dimension minn,", "manifold_x, s=0.1, c='cyan', label='Manifold of global minima') def plot_global_minimum_manifold_3d(ax, settings):", "* y * (1 - man_ws_x * man_ws_y * x)**2", "required modules import numpy as np import matplotlib.pyplot as plt", "= fig.add_subplot(2, 2, 2, projection = '3d') # ax3 =", "fig.add_subplot(2, 2, 2, projection = '3d') # ax3 = fig.add_subplot(2,", "settings, weights, **kwargs): x, y, v, l2, _, _ =", "(man_ws_x * man_ws_y)**2 min_loss = np.min(loss) manifold_indices = loss <", "j]*ws_y[i, j])**2 # compute gradients dy, dx = np.gradient(cost_ws) #", "space skip = (slice(None, None, 5), slice(None, None, 5)) #", "settings) plot_optimiser_trajectory_3d(ax, settings, weights, **kwargs) else: ax, settings = plot_mse_loss_surface_2d(x,", "save=False, name='anim'): gs = gridspec.GridSpec(2, 4) gs.update(wspace=0.5) fig = plt.figure(figsize=(12,", "gridspec.GridSpec(2, 4) gs.update(wspace=0.5) fig = plt.figure(figsize=(12, 8)) ax1 = fig.add_subplot(gs[0,", "np.linspace(w2_range[0], w2_range[1], num=n_w) # weight 2 ws_x, ws_y = np.meshgrid(w1,", "man_ws_y = np.meshgrid(man_w1, man_w2) loss = 0.5 * y *(1", "# change to desired fourth dimension minn, maxx = color_dimension.min(),", "Fill the cost matrix for each combination of weights for", "ax def animate_learning(weights, save=False, name='anim'): gs = gridspec.GridSpec(2, 4) gs.update(wspace=0.5)", "np.nan) y = np.insert(manifold_y, pos, np.nan) # plot manifold of", "w2_b, start=0, end=1, **kwargs): alpha = np.arange(start, end, 0.001) interpolated_loss", "animate_optimiser_trajectory_2d( i, ax1, settings, weights, 'Gradient descent') animate_optimiser_trajectory_3d( i, ax2,", "settings def plot_global_minimum_manifold_2d(ax, settings): # retieve cached settings x, y,", "from matplotlib import cm from matplotlib.colors import Normalize from mpl_toolkits.mplot3d", "(1 - w1 * w2 * x)**2 + 0.5 *", "vmin=minn, vmax=maxx, shade=False, alpha=1) ax.set_xlabel('$w_1$', fontsize=20) ax.set_ylabel('$w_2$', fontsize=20) ax.set_zlabel('$Loss$', fontsize=20)", "= np.meshgrid(w1, w2) cost_ws = np.zeros((n_w, n_w)) # initialize cost", "plot_optimiser_trajectory_2d(ax, weights, **kwargs): w1_vals = weights['w1'] w2_vals = weights['w2'] ax.plot(w1_vals,", "(1 - a) * w1_a + a * w1_b ww2", "**kwargs) def plot_optimiser_trajectory_3d(ax, settings, weights, **kwargs): x, y, v, l2,", "1) norms = np.sqrt(w1_vals**2 + w2_vals**2) ax.set_xlabel('Epoch', fontsize=12) ax.set_ylabel('Weight norm',", "if manifold: plot_global_minimum_manifold_3d(ax, settings) plot_optimiser_trajectory_3d(ax, settings, weights, **kwargs) else: ax,", "ax.scatter(manifold_y, manifold_x, s=0.1, c='cyan', label='Manifold of global minima') def plot_global_minimum_manifold_3d(ax,", "weight space n_w = 100 w1 = np.linspace(w1_range[0], w1_range[1], num=n_w)", "for w1, w2 in zip(weights['w1'], weights['w2']): scores.append(w1 * w2) ax.plot(epoch,", "fcolors = m.to_rgba(color_dimension) # plot # fig = plt.figure(figsize=(8, 8))", "# ax1 = fig.add_subplot(2, 2, 1) # ax2 = fig.add_subplot(2,", "= np.linspace(w1_range[0], w1_range[1], num=n_w) man_w2 = np.linspace(w2_range[0], w2_range[1], num=n_w) man_ws_x,", "w1_range[1], num=n_w) # weight 1 w2 = np.linspace(w2_range[0], w2_range[1], num=n_w)", "settings, weights, **kwargs) else: ax, settings = plot_mse_loss_surface_2d(x, y) if", "= fig.colorbar(im, ax=ax) # ax.set(aspect=1, title='Loss Surface') cbar.ax.set_ylabel('$Loss$', fontsize=15) ax.set_xlabel('$w_1$',", "y, **kwargs): n_epoch = len(weights['w1']) epoch = np.arange(1, n_epoch) scores", "a * w2_b loss_val = 0.5 * (y - ww1", "= 0.5 * (y_true - y_pred)**2 + \\ 0.5 *", "l2 * (w1_vals**2 + w2_vals**2) + 0.5 * v *", "0.5 * v * (w1_vals*w2_vals)**2 ax.plot(w1_vals[:i], w2_vals[:i], loss[:i], **kwargs) return", "2), w2_range=(2, -2), angle=30): # create weight space n_w =", "manifold=False, **kwargs): if dim == '3d': ax, settings = plot_mse_loss_surface_3d(x,", "return ax, settings # three-dimensional version def plot_mse_loss_surface_3d(ax, x, y,", "as plt import matplotlib.gridspec as gridspec from matplotlib import cm", "**kwargs): alpha = np.arange(start, end, 0.001) interpolated_loss = [] for", "c='cyan', # label='Manifold of global minima') ax.plot(y, x, c='cyan', label='Manifold", "2:], projection='3d') ax3 = fig.add_subplot(gs[1, 1:3]) # ax1 = fig.add_subplot(2,", "w2 = np.linspace(w2_range[0], w2_range[1], num=n_w) # weight 2 ws_x, ws_y", "epoch = np.arange(1, n_epoch) scores = [] for w1, w2", "* (ws_x[i, j]*ws_y[i, j])**2 X = ws_x Y = ws_y", "plt.subplots(figsize=(8, 8)) #ax = fig.add_subplot(1,1,1, projection='3d') # fourth dimention -", "epochs = np.arange(0, len(w1_vals), 1) norms = np.sqrt(w1_vals**2 + w2_vals**2)", "ax.plot(w1_vals[:i], w2_vals[:i], **kwargs) return ax def animate_optimiser_trajectory_3d(i, ax, settings, weights,", "y * (1 - w1_vals * w2_vals * x)**2 +", "(w1 * w2)**2 loss.append(loss_val) plt.plot(epoch, loss, **kwargs) plt.xlabel('Epoch') plt.ylabel('Loss') def", "start=0, end=1, **kwargs): x, y, _, _ = settings alpha", "animate_optimiser_trajectory_3d(i, ax, settings, weights, **kwargs): x, y, v, l2, _,", "* w2_b loss_val = 0.5 * (y - ww1 *", "= [] epoch = np.arange(0, len(weights['w1'])) for w1, w2 in", "ws_x.max( ), ws_y.min(), ws_y.max()], cmap=cm.coolwarm) ax.quiver(ws_x[skip], ws_y[skip], -dx[skip], dy[skip], cost_ws[skip])", "w1_range[1], num=n_w) man_w2 = np.linspace(w2_range[0], w2_range[1], num=n_w) man_ws_x, man_ws_y =", "weight 1 w2 = np.linspace(w2_range[0], w2_range[1], num=n_w) # weight 2", "np.nan) # plot manifold of global minima #ax.scatter(manifold_y, manifold_x, 0,", "ww2**2) interpolated_loss.append(loss_val) plt.plot(alpha, interpolated_loss, **kwargs) plt.xlabel(r'$\\alpha$') plt.ylabel('Loss') def plot_learning_dynamics(ax, weights,", "= fig.add_subplot(2, 2, 4) ax1, settings = plot_mse_loss_surface_2d(ax1, 1, 1)", "any 50x50 array) color_dimension = Z # change to desired", "def animate_optimiser_trajectory_3d(i, ax, settings, weights, **kwargs): x, y, v, l2,", "plot_optimiser_loss(x, y, v, l2, weights, **kwargs): loss = [] epoch", "# weight 2 ws_x, ws_y = np.meshgrid(w1, w2) cost_ws =", "_ = settings alpha = np.arange(start, end, 0.001) w1_path =", "= ws_x[i, j] * ws_y[i, j] * x y_true =", "Z = cost_ws #fig, ax = plt.subplots(figsize=(8, 8)) #ax =", "cstride=1, facecolors=fcolors, vmin=minn, vmax=maxx, shade=False, alpha=1) ax.set_xlabel('$w_1$', fontsize=20) ax.set_ylabel('$w_2$', fontsize=20)", "= np.insert(manifold_y, pos, np.nan) # plot manifold of global minima", "for a in alpha: ww1 = (1 - a) *", "w1_range, w2_range = settings n_w = 1000 man_w1 = np.linspace(w1_range[0],", "man_ws_x, man_ws_y = np.meshgrid(man_w1, man_w2) loss = 0.5 * y", "- a) * w2_a + a * w2_b w1_path.append(ww1) w2_path.append(ww2)", "w2_b loss_val = 0.5 * (y - ww1 * ww2", "import FuncAnimation # two-dimesional version def plot_mse_loss_surface_2d(fig, ax, x, y,", "* man_ws_y * x)**2 + \\ 0.5 * l2 *", "= Normalize(minn, maxx) m = plt.cm.ScalarMappable(norm=norm, cmap='jet') m.set_array([]) fcolors =", "# fourth dimention - colormap # create colormap according to", "v * (ws_x[i, j]*ws_y[i, j])**2 X = ws_x Y =", "weights, dim='2d', angle=45, manifold=False, **kwargs): if dim == '3d': ax,", "50) ax.plot([0], [0], 'ro', c='red', marker='*', label='Saddle point') ax.plot_surface(X, Y,", "w2**2) + 0.5 * v * (w1 * w2)**2 loss.append(loss_val)", "* y * (1 - w1_vals * w2_vals * x)**2", "scalarNet.history) # suncAnimation will call the 'update' function for each", "man_w2) loss = 0.5 * y *(1 - man_ws_x *", "w1_range=(-2, 2), w2_range=(2, -2), angle=30): # create weight space n_w", "* x)**2 + \\ 0.5 * l2 * (w1_vals**2 +", "c='red', marker='*') # ax.scatter(0,0, c='black', marker=r'$\\rightarrow$', label='Negative gradient') settings =", "0.5 * y *(1 - man_ws_x * man_ws_y * x)**2", "* (1 - man_ws_x * man_ws_y * x)**2 + \\", "scores.append(w1 * w2) ax.set_xlim((1, n_epoch)) ax.set_ylim((0, y)) ax.set_xlabel('Epoch', fontsize=15) ax.set_ylabel('$w_2", "ax.plot(epochs, norms, linewidth=2.0, **kwargs) def animate_optimiser_trajectory_2d(i, ax, weights, **kwargs): w1_vals", "= plt.figure(figsize=(12, 8)) ax1 = fig.add_subplot(gs[0, :2], ) ax2 =", "manifold_indices = loss < min_loss + 1e-5 manifold_x = man_ws_x[manifold_indices]", "def plot_learning_dynamics(ax, weights, **kwargs): epoch = np.arange(0, len(weights['w1'])) scores =", "ws_y, cost_ws, 200) im = ax.imshow(cost_ws, extent=[ws_x.min(), ws_x.max( ), ws_y.min(),", "plt.plot(epoch, loss, **kwargs) plt.xlabel('Epoch') plt.ylabel('Loss') def plot_interpolated_trajectory_2d(ax, w1_a, w2_a, w1_b,", "to desired fourth dimension minn, maxx = color_dimension.min(), color_dimension.max() norm", "pos = np.where(np.abs(np.diff(manifold_y)) >= 0.1)[0]+1 x = np.insert(manifold_x, pos, np.nan)", "v=0.0, l2=0.0, w1_range=(-2, 2), w2_range=(2, -2), angle=30): # create weight", "def animate_optimiser_trajectory_2d(i, ax, weights, **kwargs): w1_vals = weights['w1'] w2_vals =", "j]**2 + ws_y[i, j]**2) + 0.5 * v * (ws_x[i,", "+ ws_y[i, j]**2) + 0.5 * v * (ws_x[i, j]*ws_y[i,", "* y *(1 - man_ws_x * man_ws_y * x)**2 +", "manifold_y = man_ws_y[manifold_indices] pos = np.where(np.abs(np.diff(manifold_y)) >= 0.1)[0]+1 x =", "marker=r'$\\rightarrow$', label='Negative gradient') settings = (x, y, v, l2, w1_range,", "of global minima #ax.scatter(manifold_y, manifold_x, 0, s=0.5, c='cyan', # label='Manifold", "ax.set_ylim((0, y)) ax.set_xlabel('Epoch', fontsize=15) ax.set_ylabel('$w_2 \\cdot w_1$', fontsize=15) ax.plot(epoch[:i], scores[:i],", "Surface') cbar.ax.set_ylabel('$Loss$', fontsize=15) ax.set_xlabel('$w_1$', fontsize=15) ax.set_ylabel('$w_2$', fontsize=15) # ax.grid() #", "w2) ax.set_xlim((1, n_epoch)) ax.set_ylim((0, y)) ax.set_xlabel('Epoch', fontsize=15) ax.set_ylabel('$w_2 \\cdot w_1$',", "= plt.subplots(figsize=(8, 8)) #ax.contour(ws_x, ws_y, cost_ws, 200) im = ax.imshow(cost_ws,", "array) color_dimension = Z # change to desired fourth dimension", "= np.min(loss) manifold_indices = loss < min_loss + 1e-5 manifold_x", "x y_true = y cost_ws[i, j] = 0.5 * (y_true", ") ax2 = fig.add_subplot(gs[0, 2:], projection='3d') ax3 = fig.add_subplot(gs[1, 1:3])", "version def plot_mse_loss_surface_3d(ax, x, y, v=0.0, l2=0.0, w1_range=(-2, 2), w2_range=(2,", "(can use any 50x50 array) color_dimension = Z # change", "weights, 1) # animate_weight_norm(i, ax4, scalarNet.history) # suncAnimation will call", "# retieve cached settings x, y, v, l2, w1_range, w2_range", "w2_path, **kwargs) def plot_interpolated_trajectory_3d(ax, settings, w1_a, w2_a, w1_b, w2_b, start=0,", "# fig, ax = plt.subplots(figsize=(8, 8)) #ax.contour(ws_x, ws_y, cost_ws, 200)", "matplotlib.gridspec as gridspec from matplotlib import cm from matplotlib.colors import", "for j in range(n_w): y_pred = ws_x[i, j] * ws_y[i,", "scores = [] for w1, w2 in zip(weights['w1'], weights['w2']): scores.append(w1", "= fig.add_subplot(gs[1, 1:3]) # ax1 = fig.add_subplot(2, 2, 1) #", "= ws_y Z = cost_ws #fig, ax = plt.subplots(figsize=(8, 8))", "len(weights['w1'])) scores = [] for w1, w2 in zip(weights['w1'], weights['w2']):", "v, l2, weights, **kwargs): loss = [] epoch = np.arange(0,", "# three-dimensional version def plot_mse_loss_surface_3d(ax, x, y, v=0.0, l2=0.0, w1_range=(-2,", "global minima ax.scatter(manifold_y, manifold_x, s=0.1, c='cyan', label='Manifold of global minima')", "import numpy as np import matplotlib.pyplot as plt import matplotlib.gridspec", "* (w1**2 + w2**2) + 0.5 * v * (w1", "interpolated_loss.append(loss_val) plt.plot(alpha, interpolated_loss, **kwargs) plt.xlabel(r'$\\alpha$') plt.ylabel('Loss') def plot_learning_dynamics(ax, weights, **kwargs):", "* v * (w1 * w2)**2 loss.append(loss_val) plt.plot(epoch, loss, **kwargs)", "0.5 * y * (1 - w1 * w2 *", "v * (w1_vals*w2_vals)**2 ax.plot(w1_vals[:i], w2_vals[:i], loss[:i], **kwargs) return ax def", "use any 50x50 array) color_dimension = Z # change to", "w2_b w1_path.append(ww1) w2_path.append(ww2) ax.plot(w1_path, w2_path, **kwargs) def plot_interpolated_trajectory_3d(ax, settings, w1_a,", "= np.array(weights['w1']) w2_vals = np.array(weights['w2']) epochs = np.arange(0, len(w1_vals), 1)", "name='anim'): gs = gridspec.GridSpec(2, 4) gs.update(wspace=0.5) fig = plt.figure(figsize=(12, 8))", "0.5 * v * (man_ws_x * man_ws_y)**2 min_loss = np.min(loss)", "end, 0.001) w1_path = [] w2_path = [] loss =" ]
[ "[0.e+00, 0.e+00, 0.e+00, 0.e+00, 1.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00],", "clear_session() model = quantized_model_from_json(json_string) # generate same output for weights", "0.e+00, 1.e+00, 0.e+00, 0.e+00, 0.e+00], [ 0.e+00 ,0.e+00, 0.e+00, 0.e+00,", "(2, 2), strides=(2, 2), kernel_quantizer=quantized_bits(6, 2, 1, alpha=1.0), bias_quantizer=quantized_bits(4, 0,", "make sure it works with Conv1D layer print_qstats(model) # reload", "works with Conv1D layer print_qstats(model) # reload the model to", "from qkeras import extract_model_operations # TODO(hzhuang): # qoctave_conv test #", "2.0 (the \"License\"); # you may not use this file", "{}'.format(layer.name, i) # he normal initialization with a scale factor", "* np.random.rand(10, 28, 28, 1) actual_output = model.predict(inputs).astype(np.float16) assert_allclose(actual_output, expected_output,", "[0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00, 6.e-08, 1.e+00],", "for layer in model.layers: for i, weights in enumerate(layer.get_weights()): w", "depthwise_quantizer=binary(alpha=1.0), pointwise_quantizer=quantized_bits(4, 0, 1, alpha=1.0), depthwise_activation=quantized_bits(6, 2, 1, alpha=1.0), bias_quantizer=quantized_bits(4,", "all_weights: layer.set_weights(all_weights) # Save the model as an h5 file", "from tensorflow.keras.models import Model from tensorflow.keras.backend import clear_session from qkeras", "0.e+00, 0.e+00, 0.e+00, 0.e+00 ,0.e+00, 0.e+00, 0.e+00, 0.e+00], [0.e+00, 0.e+00,", "QConv1D( 2, 1, kernel_quantizer=quantized_bits(6, 2, 1, alpha=1.0), bias_quantizer=quantized_bits(4, 0, 1),", "Extract model operations model_ops = extract_model_operations(model) # Assertion about the", "the created h5 file after loading the model os.close(fd) os.remove(fname)", "from qkeras import quantized_relu from qkeras.utils import model_save_quantized_weights from qkeras.utils", "backend as K from tensorflow.keras.layers import Activation from tensorflow.keras.layers import", "y = np.array([[[-2.441, 3.816], [-3.807, -1.426], [-2.684, -1.317], [-1.659, 0.9834]],", "0.e+00], [0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00, 1.e+00, 0.e+00, 0.e+00, 0.e+00,", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "__future__ import division from __future__ import print_function import os import", "0, 1), name='conv2d_2_m')( x) x = QActivation('quantized_relu(6,4,1)', name='act2_m')(x) x =", "alpha=1.0), bias_quantizer=quantized_bits(4, 0, 1), name='conv2d_0_m')( x) x = QActivation('quantized_relu(6,2,1)', name='act0_m')(x)", "input_size), shape)) if all_weights: layer.set_weights(all_weights) # apply quantizer to weights", "x_in = Input((28, 28, 1), name='input') x = QSeparableConv2D( 32,", "x = QConv2D( 64, (2, 2), strides=(2, 2), kernel_quantizer=quantized_bits(6, 2,", "= np.array(all_weights) # test_qnetwork_weight_quantization all_weights_signature = np.array( [2., -6.75, -0.625,", "None: input_size = 10 * 10 shape = weights.shape assert", "= Input((28, 28, 1), name='input') x = QSeparableConv2D( 32, (2,", "1.e+00], [0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00 ,1.e+00, 0.e+00, 0.e+00,", "import ternary from qkeras import QActivation from qkeras import QDense", "qoctave_conv test # qbatchnorm test def test_qnetwork(): x = x_in", "apply quantizer to weights model_save_quantized_weights(model) all_weights = [] for layer", "x = QActivation('quantized_relu(6,4,1)', name='act2_m')(x) x = Flatten(name='flatten')(x) x = QDense(", "weights model_save_quantized_weights(model) inputs = np.random.rand(2, 4, 4) p = model.predict(inputs).astype(np.float16)", "shape)) if all_weights: layer.set_weights(all_weights) # Save the model as an", "[0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00, 5.e-07, 1.e+00],", "0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00, 1.e+00, 0.e+00, 0.e+00, 0.e+00], [", "weights in enumerate(layer.get_weights()): input_size = np.prod(layer.input.shape.as_list()[1:]) if input_size is None:", ",0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00, 1.e+00, 0.e+00, 0.e+00, 0.e+00], [0.e+00,", "expected_output, rtol=1e-4) def test_qconv1d(): np.random.seed(33) x = Input((4, 4,)) y", "0.e+00, 0.e+00, 0.e+00, 0.e+00, 6.e-08, 1.e+00], [0.e+00, 0.e+00, 0.e+00, 0.e+00,", "use this file except in compliance with the License. #", "0.e+00, 0.e+00], [0.e+00, 1.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00 ,0.e+00, 0.e+00,", "[] for layer in model.layers: for i, weights in enumerate(layer.get_weights()):", "-0.625, -2., -0.25, -56., 1.125, -1.625, -1.125]) assert all_weights.size ==", "avoid learning sizes shape = weights.shape assert input_size > 0,", "alpha=1.0), bias_quantizer=quantized_bits(4, 0, 1), name='qconv1d')( x) model = Model(inputs=x, outputs=y)", "from qconvolutional.py.\"\"\" from __future__ import absolute_import from __future__ import division", "name='dense')( x) x = Activation('softmax', name='softmax')(x) model = Model(inputs=[x_in], outputs=[x])", "1, kernel_quantizer=quantized_bits(6, 2, 1, alpha=1.0), bias_quantizer=quantized_bits(4, 0, 1), name='qconv1d')( x)", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "import Model from tensorflow.keras.backend import clear_session from qkeras import binary", "Delete the existing model # Return a compiled model identical", "name='conv2d_2_m')( x) x = QActivation('quantized_relu(6,4,1)', name='act2_m')(x) x = Flatten(name='flatten')(x) x", "0.e+00, 0.e+00], [0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00, 1.e+00, 0.e+00,", "= model.predict(inputs).astype(np.float16) assert_allclose(actual_output, expected_output, rtol=1e-4) def test_qconv1d(): np.random.seed(33) x =", "License. # You may obtain a copy of the License", "2), depthwise_quantizer=binary(alpha=1.0), pointwise_quantizer=quantized_bits(4, 0, 1, alpha=1.0), depthwise_activation=quantized_bits(6, 2, 1, alpha=1.0),", "QConv2D from qkeras import QSeparableConv2D from qkeras import quantized_bits from", "initialization with a scale factor of 2.0 all_weights.append( 10.0 *", "== 32 # Print qstats to make sure it works", "-1.625, -1.125]) assert all_weights.size == all_weights_signature.size assert np.all(all_weights == all_weights_signature)", "under the License is distributed on an \"AS IS\" BASIS,", "input_size = 576 * 10 # to avoid learning sizes", "License for the specific language governing permissions and # limitations", "assert np.all(all_weights == all_weights_signature) # test_qnetwork_forward: expected_output = np.array( [[0.e+00,", "model as an h5 file using Keras's model.save() fd, fname", "qkeras.utils import load_qmodel from qkeras import print_qstats from qkeras import", "bias_quantizer=quantized_bits(4, 0, 1), name='qconv1d')( x) model = Model(inputs=x, outputs=y) #", "0.e+00, 0.e+00, 6.e-08, 1.e+00], [0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00,", "test_qnetwork(): x = x_in = Input((28, 28, 1), name='input') x", "model = Model(inputs=[x_in], outputs=[x]) # reload the model to ensure", "layer.set_weights(all_weights) # Save the model as an h5 file using", "as np from numpy.testing import assert_allclose import pytest import tempfile", "governing permissions and # limitations under the License. # ==============================================================================", "test # qbatchnorm test def test_qnetwork(): x = x_in =", "in model.layers: for i, weights in enumerate(layer.get_weights()): w = np.sum(weights)", "works # json_string = model.to_json() # clear_session() # model =", "# json_string = model.to_json() # clear_session() # model = quantized_model_from_json(json_string)", "np.array(all_weights) # test_qnetwork_weight_quantization all_weights_signature = np.array( [2., -6.75, -0.625, -2.,", "[] for i, weights in enumerate(layer.get_weights()): input_size = np.prod(layer.input.shape.as_list()[1:]) if", "= 576 * 10 # to avoid learning sizes shape", "0.e+00, 1.e+00, 0.e+00, 0.e+00, 0.e+00], [0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00,", "0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00, 5.e-07, 1.e+00], [0.e+00, 0.e+00, 0.e+00,", "output for weights np.random.seed(42) for layer in model.layers: all_weights =", "from __future__ import absolute_import from __future__ import division from __future__", "in compliance with the License. # You may obtain a", "software # distributed under the License is distributed on an", "np from numpy.testing import assert_allclose import pytest import tempfile from", "if input_size is None: input_size = 576 * 10 #", "for i, weights in enumerate(layer.get_weights()): w = np.sum(weights) all_weights.append(w) all_weights", "= QConv2D( 64, (3, 3), strides=(2, 2), kernel_quantizer=ternary(alpha=1.0), bias_quantizer=quantized_bits(4, 0,", "0.e+00, 0.e+00], [0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00, 1.e+00, 0.e+00, 0.e+00,", "depthwise_activation=quantized_bits(6, 2, 1, alpha=1.0), bias_quantizer=quantized_bits(4, 0, 1), name='conv2d_0_m')( x) x", "QSeparableConv2D( 32, (2, 2), strides=(2, 2), depthwise_quantizer=binary(alpha=1.0), pointwise_quantizer=quantized_bits(4, 0, 1,", "QConv2D( 64, (3, 3), strides=(2, 2), kernel_quantizer=ternary(alpha=1.0), bias_quantizer=quantized_bits(4, 0, 1),", "model.predict(inputs).astype(np.float16) y = np.array([[[-2.441, 3.816], [-3.807, -1.426], [-2.684, -1.317], [-1.659,", "test_qnetwork_weight_quantization all_weights_signature = np.array( [2., -6.75, -0.625, -2., -0.25, -56.,", "one model = load_qmodel(fname) # Clean the created h5 file", "fd, fname = tempfile.mkstemp('.h5') model.save(fname) del model # Delete the", "10, kernel_quantizer=quantized_bits(6, 2, 1, alpha=1.0), bias_quantizer=quantized_bits(4, 0, 1), name='dense')( x)", "QConv2D( 64, (2, 2), strides=(2, 2), kernel_quantizer=quantized_bits(6, 2, 1, alpha=1.0),", "-0.25, -56., 1.125, -1.625, -1.125]) assert all_weights.size == all_weights_signature.size assert", "= Model(inputs=x, outputs=y) # Extract model operations model_ops = extract_model_operations(model)", "2, 1, alpha=1.0), bias_quantizer=quantized_bits(4, 0, 1), name='dense')( x) x =", "generate same output for weights np.random.seed(42) for layer in model.layers:", "os.close(fd) os.remove(fname) # apply quantizer to weights model_save_quantized_weights(model) inputs =", "QActivation from qkeras import QDense from qkeras import QConv1D from", "using Keras's model.save() fd, fname = tempfile.mkstemp('.h5') model.save(fname) del model", "1, alpha=1.0), bias_quantizer=quantized_bits(4, 0, 1), name='qconv1d')( x) model = Model(inputs=x,", "for this Conv1D layer assert model_ops['qconv1d']['number_of_operations'] == 32 # Print", "pytest import tempfile from tensorflow.keras import backend as K from", "outputs=y) # Extract model operations model_ops = extract_model_operations(model) # Assertion", "64, (3, 3), strides=(2, 2), kernel_quantizer=ternary(alpha=1.0), bias_quantizer=quantized_bits(4, 0, 1), name='conv2d_1_m',", "0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00, 6.e-08, 1.e+00], [0.e+00, 0.e+00,", "import quantized_relu from qkeras.utils import model_save_quantized_weights from qkeras.utils import quantized_model_from_json", "for i, weights in enumerate(layer.get_weights()): input_size = np.prod(layer.input.shape.as_list()[1:]) if input_size", "= [] for layer in model.layers: for i, weights in", "np.random.normal(0.0, np.sqrt(2.0 / input_size), shape)) if all_weights: layer.set_weights(all_weights) # Save", "1))( x) x = QConv2D( 64, (2, 2), strides=(2, 2),", "from qkeras.utils import model_save_quantized_weights from qkeras.utils import quantized_model_from_json from qkeras.utils", "import os import numpy as np from numpy.testing import assert_allclose", "x = Input((4, 4,)) y = QConv1D( 2, 1, kernel_quantizer=quantized_bits(6,", "'input size for {} {}'.format(layer.name, i) all_weights.append( 10.0 * np.random.normal(0.0,", "after loading the model os.close(fd) os.remove(fname) # apply quantizer to", "0, 'input size for {} {}'.format(layer.name, i) # he normal", "> 0, 'input size for {} {}'.format(layer.name, i) # he", "OF ANY KIND, either express or implied. # See the", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "[0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00, 1.e+00, 0.e+00, 0.e+00, 0.e+00],", ",0.e+00, 0.e+00, 0.e+00, 0.e+00], [0.e+00, 0.e+00, 0.e+00, 0.e+00, 1.e+00, 0.e+00,", "Keras's model.save() fd, fname = tempfile.mkstemp('.h5') model.save(fname) del model #", "ANY KIND, either express or implied. # See the License", "See the License for the specific language governing permissions and", "np.sqrt(2.0 / input_size), shape)) if all_weights: layer.set_weights(all_weights) # Save the", "kernel_quantizer=quantized_bits(6, 2, 1, alpha=1.0), bias_quantizer=quantized_bits(4, 0, 1), name='qconv1d')( x) model", "numpy as np from numpy.testing import assert_allclose import pytest import", "model_save_quantized_weights(model) inputs = np.random.rand(2, 4, 4) p = model.predict(inputs).astype(np.float16) y", "the License. # You may obtain a copy of the", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "for the specific language governing permissions and # limitations under", "file after loading the model os.close(fd) os.remove(fname) # apply quantizer", "assert all_weights.size == all_weights_signature.size assert np.all(all_weights == all_weights_signature) # test_qnetwork_forward:", "to in writing, software # distributed under the License is", "the model to ensure saving/loading works json_string = model.to_json() clear_session()", "0.e+00, 0.e+00, 1.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00]]).astype(np.float16) inputs = 2", "permissions and # limitations under the License. # ============================================================================== \"\"\"Test", "0.e+00, 0.e+00, 0.e+00], [ 0.e+00 ,0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00,", "# See the License for the specific language governing permissions", "is None: input_size = 576 * 10 # to avoid", "as K from tensorflow.keras.layers import Activation from tensorflow.keras.layers import Flatten", "language governing permissions and # limitations under the License. #", "-56., 1.125, -1.625, -1.125]) assert all_weights.size == all_weights_signature.size assert np.all(all_weights", "or agreed to in writing, software # distributed under the", "0.e+00, 0.e+00, 0.e+00]]).astype(np.float16) inputs = 2 * np.random.rand(10, 28, 28,", "required by applicable law or agreed to in writing, software", "all_weights_signature = np.array( [2., -6.75, -0.625, -2., -0.25, -56., 1.125,", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "from qkeras import quantized_bits from qkeras import quantized_relu from qkeras.utils", "model.layers: all_weights = [] for i, weights in enumerate(layer.get_weights()): input_size", "with the License. # You may obtain a copy of", "model = quantized_model_from_json(json_string) for layer in model.layers: all_weights = []", "bias_quantizer=quantized_bits(4, 0, 1), name='dense')( x) x = Activation('softmax', name='softmax')(x) model", "0.e+00, 0.e+00, 0.e+00, 5.e-07, 1.e+00], [0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00,", "10 * 10 shape = weights.shape assert input_size > 0,", "import assert_allclose import pytest import tempfile from tensorflow.keras import backend", "Conv1D layer print_qstats(model) # reload the model to ensure saving/loading", "the License. # ============================================================================== \"\"\"Test layers from qconvolutional.py.\"\"\" from __future__", "qkeras import print_qstats from qkeras import extract_model_operations # TODO(hzhuang): #", "from tensorflow.keras.layers import Input from tensorflow.keras.models import Model from tensorflow.keras.backend", "and # limitations under the License. # ============================================================================== \"\"\"Test layers", "# reload the model to ensure saving/loading works json_string =", "inputs = 2 * np.random.rand(10, 28, 28, 1) actual_output =", "np.random.rand(2, 4, 4) p = model.predict(inputs).astype(np.float16) y = np.array([[[-2.441, 3.816],", "x) x = QActivation('quantized_relu(6,2,1)', name='act0_m')(x) x = QConv2D( 64, (3,", "Input((28, 28, 1), name='input') x = QSeparableConv2D( 32, (2, 2),", "compliance with the License. # You may obtain a copy", "0.e+00, 0.e+00, 0.e+00], [0.e+00, 0.e+00, 0.e+00, 0.e+00, 1.e+00, 0.e+00, 0.e+00,", "0.e+00, 1.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00], [0.e+00, 0.e+00, 0.e+00,", "agreed to in writing, software # distributed under the License", "28, 1), name='input') x = QSeparableConv2D( 32, (2, 2), strides=(2,", "np.prod(layer.input.shape.as_list()[1:]) if input_size is None: input_size = 10 * 10", "tensorflow.keras import backend as K from tensorflow.keras.layers import Activation from", "import print_qstats from qkeras import extract_model_operations # TODO(hzhuang): # qoctave_conv", "32 # Print qstats to make sure it works with", "as an h5 file using Keras's model.save() fd, fname =", "tensorflow.keras.layers import Input from tensorflow.keras.models import Model from tensorflow.keras.backend import", "distributed under the License is distributed on an \"AS IS\"", "# he normal initialization with a scale factor of 2.0", "# Copyright 2019 Google LLC # # # Licensed under", "0.e+00, 0.e+00], [ 0.e+00 ,0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00, 1.e+00,", "from qkeras import QSeparableConv2D from qkeras import quantized_bits from qkeras", "0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00, 6.e-08, 1.e+00], [0.e+00, 0.e+00, 0.e+00,", "= QConv2D( 64, (2, 2), strides=(2, 2), kernel_quantizer=quantized_bits(6, 2, 1,", "express or implied. # See the License for the specific", "except in compliance with the License. # You may obtain", "3), strides=(2, 2), kernel_quantizer=ternary(alpha=1.0), bias_quantizer=quantized_bits(4, 0, 1), name='conv2d_1_m', activation=quantized_relu(6, 3,", "# Clean the created h5 file after loading the model", "assert input_size > 0, 'input size for {} {}'.format(layer.name, i)", "i, weights in enumerate(layer.get_weights()): w = np.sum(weights) all_weights.append(w) all_weights =", "Licensed under the Apache License, Version 2.0 (the \"License\"); #", "QConv1D from qkeras import QConv2D from qkeras import QSeparableConv2D from", "= np.random.rand(2, 4, 4) p = model.predict(inputs).astype(np.float16) y = np.array([[[-2.441,", "not use this file except in compliance with the License.", "all_weights_signature.size assert np.all(all_weights == all_weights_signature) # test_qnetwork_forward: expected_output = np.array(", "rtol=1e-4) def test_qconv1d(): np.random.seed(33) x = Input((4, 4,)) y =", "to avoid learning sizes shape = weights.shape assert input_size >", "2), strides=(2, 2), kernel_quantizer=quantized_bits(6, 2, 1, alpha=1.0), bias_quantizer=quantized_bits(4, 0, 1),", "test_qnetwork_forward: expected_output = np.array( [[0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00,", "0.e+00, 0.e+00], [0.e+00, 0.e+00, 0.e+00, 0.e+00, 1.e+00, 0.e+00, 0.e+00, 0.e+00,", "0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00], [0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00,", "writing, software # distributed under the License is distributed on", "sizes shape = weights.shape assert input_size > 0, 'input size", "model_ops = extract_model_operations(model) # Assertion about the number of operations", "you may not use this file except in compliance with", "quantized_model_from_json(json_string) # generate same output for weights np.random.seed(42) for layer", "============================================================================== \"\"\"Test layers from qconvolutional.py.\"\"\" from __future__ import absolute_import from", "from tensorflow.keras.layers import Flatten from tensorflow.keras.layers import Input from tensorflow.keras.models", "# Licensed under the Apache License, Version 2.0 (the \"License\");", "[2., -6.75, -0.625, -2., -0.25, -56., 1.125, -1.625, -1.125]) assert", "under the License. # ============================================================================== \"\"\"Test layers from qconvolutional.py.\"\"\" from", "import Flatten from tensorflow.keras.layers import Input from tensorflow.keras.models import Model", "x = QConv2D( 64, (3, 3), strides=(2, 2), kernel_quantizer=ternary(alpha=1.0), bias_quantizer=quantized_bits(4,", "-6.75, -0.625, -2., -0.25, -56., 1.125, -1.625, -1.125]) assert all_weights.size", "created h5 file after loading the model os.close(fd) os.remove(fname) #", "to weights model_save_quantized_weights(model) inputs = np.random.rand(2, 4, 4) p =", "== all_weights_signature.size assert np.all(all_weights == all_weights_signature) # test_qnetwork_forward: expected_output =", "all_weights = np.array(all_weights) # test_qnetwork_weight_quantization all_weights_signature = np.array( [2., -6.75,", "0.e+00, 0.e+00, 0.e+00], [0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00,", "0.e+00, 0.e+00, 0.e+00, 1.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00]]).astype(np.float16) inputs =", "2 * np.random.rand(10, 28, 28, 1) actual_output = model.predict(inputs).astype(np.float16) assert_allclose(actual_output,", "quantized_model_from_json(json_string) for layer in model.layers: all_weights = [] for i,", "2, 1, kernel_quantizer=quantized_bits(6, 2, 1, alpha=1.0), bias_quantizer=quantized_bits(4, 0, 1), name='qconv1d')(", "CONDITIONS OF ANY KIND, either express or implied. # See", "import Input from tensorflow.keras.models import Model from tensorflow.keras.backend import clear_session", "load_qmodel(fname) # Clean the created h5 file after loading the", "about the number of operations for this Conv1D layer assert", "learning sizes shape = weights.shape assert input_size > 0, 'input", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "layer in model.layers: all_weights = [] for i, weights in", "QActivation('quantized_relu(6,2,1)', name='act0_m')(x) x = QConv2D( 64, (3, 3), strides=(2, 2),", "Assertion about the number of operations for this Conv1D layer", "json_string = model.to_json() # clear_session() # model = quantized_model_from_json(json_string) for", "# qoctave_conv test # qbatchnorm test def test_qnetwork(): x =", "import load_qmodel from qkeras import print_qstats from qkeras import extract_model_operations", "name='conv2d_1_m', activation=quantized_relu(6, 3, 1))( x) x = QConv2D( 64, (2,", "28, 28, 1) actual_output = model.predict(inputs).astype(np.float16) assert_allclose(actual_output, expected_output, rtol=1e-4) def", "# Save the model as an h5 file using Keras's", "np.array([[[-2.441, 3.816], [-3.807, -1.426], [-2.684, -1.317], [-1.659, 0.9834]], [[-4.99, 1.139],", "the number of operations for this Conv1D layer assert model_ops['qconv1d']['number_of_operations']", "1), name='dense')( x) x = Activation('softmax', name='softmax')(x) model = Model(inputs=[x_in],", "[0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00, 1.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00]]).astype(np.float16)", "assert model_ops['qconv1d']['number_of_operations'] == 32 # Print qstats to make sure", "Print qstats to make sure it works with Conv1D layer", "x) x = QActivation('quantized_relu(6,4,1)', name='act2_m')(x) x = Flatten(name='flatten')(x) x =", "0.e+00, 0.e+00, 5.e-07, 1.e+00], [0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00", "= model.to_json() clear_session() model = quantized_model_from_json(json_string) # generate same output", "model.save() fd, fname = tempfile.mkstemp('.h5') model.save(fname) del model # Delete", "0, 1, alpha=1.0), depthwise_activation=quantized_bits(6, 2, 1, alpha=1.0), bias_quantizer=quantized_bits(4, 0, 1),", "print_qstats(model) # reload the model to ensure saving/loading works #", "{}'.format(layer.name, i) all_weights.append( 10.0 * np.random.normal(0.0, np.sqrt(2.0 / input_size), shape))", "import QActivation from qkeras import QDense from qkeras import QConv1D", "Model(inputs=x, outputs=y) # Extract model operations model_ops = extract_model_operations(model) #", "shape)) if all_weights: layer.set_weights(all_weights) # apply quantizer to weights model_save_quantized_weights(model)", "number of operations for this Conv1D layer assert model_ops['qconv1d']['number_of_operations'] ==", "pointwise_quantizer=quantized_bits(4, 0, 1, alpha=1.0), depthwise_activation=quantized_bits(6, 2, 1, alpha=1.0), bias_quantizer=quantized_bits(4, 0,", "model_save_quantized_weights(model) all_weights = [] for layer in model.layers: for i,", "Activation('softmax', name='softmax')(x) model = Model(inputs=[x_in], outputs=[x]) # reload the model", "test_qconv1d(): np.random.seed(33) x = Input((4, 4,)) y = QConv1D( 2,", "x = Flatten(name='flatten')(x) x = QDense( 10, kernel_quantizer=quantized_bits(6, 2, 1,", "qkeras import binary from qkeras import ternary from qkeras import", "= np.prod(layer.input.shape.as_list()[1:]) if input_size is None: input_size = 576 *", "OR CONDITIONS OF ANY KIND, either express or implied. #", "the License is distributed on an \"AS IS\" BASIS, #", "model.predict(inputs).astype(np.float16) assert_allclose(actual_output, expected_output, rtol=1e-4) def test_qconv1d(): np.random.seed(33) x = Input((4,", "name='act2_m')(x) x = Flatten(name='flatten')(x) x = QDense( 10, kernel_quantizer=quantized_bits(6, 2,", "it works with Conv1D layer print_qstats(model) # reload the model", "import quantized_bits from qkeras import quantized_relu from qkeras.utils import model_save_quantized_weights", "(3, 3), strides=(2, 2), kernel_quantizer=ternary(alpha=1.0), bias_quantizer=quantized_bits(4, 0, 1), name='conv2d_1_m', activation=quantized_relu(6,", "of 2.0 all_weights.append( 10.0 * np.random.normal(0.0, np.sqrt(2.0 / input_size), shape))", "0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00 ,1.e+00, 0.e+00, 0.e+00, 0.e+00], [0.e+00,", "strides=(2, 2), kernel_quantizer=ternary(alpha=1.0), bias_quantizer=quantized_bits(4, 0, 1), name='conv2d_1_m', activation=quantized_relu(6, 3, 1))(", "0.e+00, 0.e+00, 1.e+00, 0.e+00, 0.e+00, 0.e+00], [0.e+00, 0.e+00, 0.e+00, 0.e+00,", "# reload the model to ensure saving/loading works # json_string", "Model from tensorflow.keras.backend import clear_session from qkeras import binary from", "layer print_qstats(model) # reload the model to ensure saving/loading works", "qbatchnorm test def test_qnetwork(): x = x_in = Input((28, 28,", "is None: input_size = 10 * 10 shape = weights.shape", "1.e+00], [0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00, 1.e+00, 0.e+00, 0.e+00,", "= np.array( [2., -6.75, -0.625, -2., -0.25, -56., 1.125, -1.625,", "0, 1), name='qconv1d')( x) model = Model(inputs=x, outputs=y) # Extract", "numpy.testing import assert_allclose import pytest import tempfile from tensorflow.keras import", "size for {} {}'.format(layer.name, i) # he normal initialization with", "2.0 all_weights.append( 10.0 * np.random.normal(0.0, np.sqrt(2.0 / input_size), shape)) if", "identical to the previous one model = load_qmodel(fname) # Clean", "size for {} {}'.format(layer.name, i) all_weights.append( 10.0 * np.random.normal(0.0, np.sqrt(2.0", "K from tensorflow.keras.layers import Activation from tensorflow.keras.layers import Flatten from", "normal initialization with a scale factor of 2.0 all_weights.append( 10.0", "bias_quantizer=quantized_bits(4, 0, 1), name='conv2d_2_m')( x) x = QActivation('quantized_relu(6,4,1)', name='act2_m')(x) x", "# apply quantizer to weights model_save_quantized_weights(model) all_weights = [] for", "QDense( 10, kernel_quantizer=quantized_bits(6, 2, 1, alpha=1.0), bias_quantizer=quantized_bits(4, 0, 1), name='dense')(", "law or agreed to in writing, software # distributed under", "0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00, 5.e-07, 1.e+00], [0.e+00,", "= Activation('softmax', name='softmax')(x) model = Model(inputs=[x_in], outputs=[x]) # reload the", "# test_qnetwork_forward: expected_output = np.array( [[0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00,", "input_size > 0, 'input size for {} {}'.format(layer.name, i) all_weights.append(", "model identical to the previous one model = load_qmodel(fname) #", "# test_qnetwork_weight_quantization all_weights_signature = np.array( [2., -6.75, -0.625, -2., -0.25,", "0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00, 1.e+00, 0.e+00, 0.e+00, 0.e+00], [0.e+00,", "28, 1) actual_output = model.predict(inputs).astype(np.float16) assert_allclose(actual_output, expected_output, rtol=1e-4) def test_qconv1d():", "1), name='qconv1d')( x) model = Model(inputs=x, outputs=y) # Extract model", "model to ensure saving/loading works # json_string = model.to_json() #", "qkeras.utils import quantized_model_from_json from qkeras.utils import load_qmodel from qkeras import", "import QConv1D from qkeras import QConv2D from qkeras import QSeparableConv2D", "2, 1, alpha=1.0), bias_quantizer=quantized_bits(4, 0, 1), name='conv2d_2_m')( x) x =", "# Assertion about the number of operations for this Conv1D", "0.e+00, 0.e+00, 0.e+00, 1.e+00, 0.e+00, 0.e+00, 0.e+00], [ 0.e+00 ,0.e+00,", "tensorflow.keras.layers import Flatten from tensorflow.keras.layers import Input from tensorflow.keras.models import", "the model to ensure saving/loading works # json_string = model.to_json()", "tempfile.mkstemp('.h5') model.save(fname) del model # Delete the existing model #", "0.e+00, 0.e+00]]).astype(np.float16) inputs = 2 * np.random.rand(10, 28, 28, 1)", "input_size = np.prod(layer.input.shape.as_list()[1:]) if input_size is None: input_size = 576", "operations for this Conv1D layer assert model_ops['qconv1d']['number_of_operations'] == 32 #", "may obtain a copy of the License at # #", "same output for weights np.random.seed(42) for layer in model.layers: all_weights", "all_weights_signature) # test_qnetwork_forward: expected_output = np.array( [[0.e+00, 0.e+00, 0.e+00, 0.e+00,", "Clean the created h5 file after loading the model os.close(fd)", "[[0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00, 1.e+00, 0.e+00, 0.e+00, 0.e+00],", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "= np.prod(layer.input.shape.as_list()[1:]) if input_size is None: input_size = 10 *", "6.e-08, 1.e+00], [0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00, 1.e+00, 0.e+00,", "# Extract model operations model_ops = extract_model_operations(model) # Assertion about", "from tensorflow.keras import backend as K from tensorflow.keras.layers import Activation", "= tempfile.mkstemp('.h5') model.save(fname) del model # Delete the existing model", "may not use this file except in compliance with the", "1.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00 ,0.e+00, 0.e+00, 0.e+00, 0.e+00], [0.e+00,", "= quantized_model_from_json(json_string) for layer in model.layers: all_weights = [] for", "* 10 shape = weights.shape assert input_size > 0, 'input", "0.e+00], [0.e+00, 1.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00 ,0.e+00, 0.e+00, 0.e+00,", "= QConv1D( 2, 1, kernel_quantizer=quantized_bits(6, 2, 1, alpha=1.0), bias_quantizer=quantized_bits(4, 0,", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "TODO(hzhuang): # qoctave_conv test # qbatchnorm test def test_qnetwork(): x", "model # Return a compiled model identical to the previous", "1), name='conv2d_1_m', activation=quantized_relu(6, 3, 1))( x) x = QConv2D( 64,", "this file except in compliance with the License. # You", "qkeras import quantized_relu from qkeras.utils import model_save_quantized_weights from qkeras.utils import", "qkeras import quantized_bits from qkeras import quantized_relu from qkeras.utils import", "from qkeras import QActivation from qkeras import QDense from qkeras", "from qkeras.utils import quantized_model_from_json from qkeras.utils import load_qmodel from qkeras", "__future__ import print_function import os import numpy as np from", "np.prod(layer.input.shape.as_list()[1:]) if input_size is None: input_size = 576 * 10", "0, 1), name='conv2d_0_m')( x) x = QActivation('quantized_relu(6,2,1)', name='act0_m')(x) x =", "import absolute_import from __future__ import division from __future__ import print_function", "from qkeras import binary from qkeras import ternary from qkeras", "* 10 # to avoid learning sizes shape = weights.shape", "QActivation('quantized_relu(6,4,1)', name='act2_m')(x) x = Flatten(name='flatten')(x) x = QDense( 10, kernel_quantizer=quantized_bits(6,", "0.e+00, 0.e+00, 0.e+00], [0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00, 1.e+00,", "qkeras.utils import model_save_quantized_weights from qkeras.utils import quantized_model_from_json from qkeras.utils import", "previous one model = load_qmodel(fname) # Clean the created h5", "# qbatchnorm test def test_qnetwork(): x = x_in = Input((28,", "tensorflow.keras.backend import clear_session from qkeras import binary from qkeras import", "np.random.seed(42) for layer in model.layers: all_weights = [] for i,", "model.to_json() # clear_session() # model = quantized_model_from_json(json_string) for layer in", "import QDense from qkeras import QConv1D from qkeras import QConv2D", "saving/loading works json_string = model.to_json() clear_session() model = quantized_model_from_json(json_string) #", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "this Conv1D layer assert model_ops['qconv1d']['number_of_operations'] == 32 # Print qstats", "-2., -0.25, -56., 1.125, -1.625, -1.125]) assert all_weights.size == all_weights_signature.size", "0.e+00, 0.e+00], [0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00,", "4) p = model.predict(inputs).astype(np.float16) y = np.array([[[-2.441, 3.816], [-3.807, -1.426],", "10 shape = weights.shape assert input_size > 0, 'input size", "# # Licensed under the Apache License, Version 2.0 (the", "saving/loading works # json_string = model.to_json() # clear_session() # model", "file except in compliance with the License. # You may", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "in enumerate(layer.get_weights()): w = np.sum(weights) all_weights.append(w) all_weights = np.array(all_weights) #", "0.e+00], [0.e+00, 0.e+00, 0.e+00, 0.e+00, 1.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00,", "sure it works with Conv1D layer print_qstats(model) # reload the", "x = QDense( 10, kernel_quantizer=quantized_bits(6, 2, 1, alpha=1.0), bias_quantizer=quantized_bits(4, 0,", "inputs = np.random.rand(2, 4, 4) p = model.predict(inputs).astype(np.float16) y =", "qkeras import QConv1D from qkeras import QConv2D from qkeras import", "factor of 2.0 all_weights.append( 10.0 * np.random.normal(0.0, np.sqrt(2.0 / input_size),", "with Conv1D layer print_qstats(model) # reload the model to ensure", "strides=(2, 2), kernel_quantizer=quantized_bits(6, 2, 1, alpha=1.0), bias_quantizer=quantized_bits(4, 0, 1), name='conv2d_2_m')(", "for {} {}'.format(layer.name, i) all_weights.append( 10.0 * np.random.normal(0.0, np.sqrt(2.0 /", "import numpy as np from numpy.testing import assert_allclose import pytest", "import binary from qkeras import ternary from qkeras import QActivation", "/ input_size), shape)) if all_weights: layer.set_weights(all_weights) # apply quantizer to", "= [] for i, weights in enumerate(layer.get_weights()): input_size = np.prod(layer.input.shape.as_list()[1:])", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "= 10 * 10 shape = weights.shape assert input_size >", "the model as an h5 file using Keras's model.save() fd,", "p = model.predict(inputs).astype(np.float16) y = np.array([[[-2.441, 3.816], [-3.807, -1.426], [-2.684,", "name='softmax')(x) model = Model(inputs=[x_in], outputs=[x]) # reload the model to", "x = x_in = Input((28, 28, 1), name='input') x =", "all_weights: layer.set_weights(all_weights) # apply quantizer to weights model_save_quantized_weights(model) all_weights =", "os.remove(fname) # apply quantizer to weights model_save_quantized_weights(model) inputs = np.random.rand(2,", "from qkeras import QDense from qkeras import QConv1D from qkeras", "2019 Google LLC # # # Licensed under the Apache", "all_weights.append( 10.0 * np.random.normal(0.0, np.sqrt(2.0 / input_size), shape)) if all_weights:", "0.e+00 ,0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00, 1.e+00, 0.e+00, 0.e+00, 0.e+00],", "# generate same output for weights np.random.seed(42) for layer in", "# # # Licensed under the Apache License, Version 2.0", "0.e+00, 0.e+00, 0.e+00, 0.e+00, 1.e+00, 0.e+00, 0.e+00, 0.e+00], [0.e+00, 0.e+00,", "reload the model to ensure saving/loading works # json_string =", "3.816], [-3.807, -1.426], [-2.684, -1.317], [-1.659, 0.9834]], [[-4.99, 1.139], [-2.559,", "weights np.random.seed(42) for layer in model.layers: all_weights = [] for", "assert_allclose import pytest import tempfile from tensorflow.keras import backend as", "np.random.rand(10, 28, 28, 1) actual_output = model.predict(inputs).astype(np.float16) assert_allclose(actual_output, expected_output, rtol=1e-4)", "alpha=1.0), bias_quantizer=quantized_bits(4, 0, 1), name='conv2d_2_m')( x) x = QActivation('quantized_relu(6,4,1)', name='act2_m')(x)", "= model.to_json() # clear_session() # model = quantized_model_from_json(json_string) for layer", "quantized_relu from qkeras.utils import model_save_quantized_weights from qkeras.utils import quantized_model_from_json from", "input_size is None: input_size = 10 * 10 shape =", "Return a compiled model identical to the previous one model", "= QActivation('quantized_relu(6,2,1)', name='act0_m')(x) x = QConv2D( 64, (3, 3), strides=(2,", "# to avoid learning sizes shape = weights.shape assert input_size", "clear_session() # model = quantized_model_from_json(json_string) for layer in model.layers: all_weights", "kernel_quantizer=quantized_bits(6, 2, 1, alpha=1.0), bias_quantizer=quantized_bits(4, 0, 1), name='conv2d_2_m')( x) x", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "# ============================================================================== \"\"\"Test layers from qconvolutional.py.\"\"\" from __future__ import absolute_import", "32, (2, 2), strides=(2, 2), depthwise_quantizer=binary(alpha=1.0), pointwise_quantizer=quantized_bits(4, 0, 1, alpha=1.0),", "input_size = 10 * 10 shape = weights.shape assert input_size", "0.e+00, 0.e+00 ,1.e+00, 0.e+00, 0.e+00, 0.e+00], [0.e+00, 1.e+00, 0.e+00, 0.e+00,", "weights in enumerate(layer.get_weights()): w = np.sum(weights) all_weights.append(w) all_weights = np.array(all_weights)", "to ensure saving/loading works json_string = model.to_json() clear_session() model =", "operations model_ops = extract_model_operations(model) # Assertion about the number of", "for {} {}'.format(layer.name, i) # he normal initialization with a", "1), name='conv2d_2_m')( x) x = QActivation('quantized_relu(6,4,1)', name='act2_m')(x) x = Flatten(name='flatten')(x)", "[-2.559, -1.216], [-2.285, 1.905], [-2.652, -0.467]]]).astype(np.float16) assert np.all(p == y)", "h5 file after loading the model os.close(fd) os.remove(fname) # apply", "or implied. # See the License for the specific language", "0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00, 5.e-07, 1.e+00], [0.e+00, 0.e+00,", "0.9834]], [[-4.99, 1.139], [-2.559, -1.216], [-2.285, 1.905], [-2.652, -0.467]]]).astype(np.float16) assert", "name='input') x = QSeparableConv2D( 32, (2, 2), strides=(2, 2), depthwise_quantizer=binary(alpha=1.0),", "0.e+00, 0.e+00, 0.e+00 ,0.e+00, 0.e+00, 0.e+00, 0.e+00], [0.e+00, 0.e+00, 0.e+00,", "KIND, either express or implied. # See the License for", "specific language governing permissions and # limitations under the License.", "Model(inputs=[x_in], outputs=[x]) # reload the model to ensure saving/loading works", "tempfile from tensorflow.keras import backend as K from tensorflow.keras.layers import", "2, 1, alpha=1.0), bias_quantizer=quantized_bits(4, 0, 1), name='conv2d_0_m')( x) x =", "= extract_model_operations(model) # Assertion about the number of operations for", "import Activation from tensorflow.keras.layers import Flatten from tensorflow.keras.layers import Input", "to weights model_save_quantized_weights(model) all_weights = [] for layer in model.layers:", "qkeras import QDense from qkeras import QConv1D from qkeras import", "np.random.normal(0.0, np.sqrt(2.0 / input_size), shape)) if all_weights: layer.set_weights(all_weights) # apply", "quantizer to weights model_save_quantized_weights(model) all_weights = [] for layer in", "= QSeparableConv2D( 32, (2, 2), strides=(2, 2), depthwise_quantizer=binary(alpha=1.0), pointwise_quantizer=quantized_bits(4, 0,", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "scale factor of 2.0 all_weights.append( 10.0 * np.random.normal(0.0, np.sqrt(2.0 /", "def test_qconv1d(): np.random.seed(33) x = Input((4, 4,)) y = QConv1D(", "Input((4, 4,)) y = QConv1D( 2, 1, kernel_quantizer=quantized_bits(6, 2, 1,", "i) all_weights.append( 10.0 * np.random.normal(0.0, np.sqrt(2.0 / input_size), shape)) if", "shape = weights.shape assert input_size > 0, 'input size for", "QSeparableConv2D from qkeras import quantized_bits from qkeras import quantized_relu from", "0.e+00, 0.e+00 ,0.e+00, 0.e+00, 0.e+00, 0.e+00], [0.e+00, 0.e+00, 0.e+00, 0.e+00,", "License. # ============================================================================== \"\"\"Test layers from qconvolutional.py.\"\"\" from __future__ import", "(the \"License\"); # you may not use this file except", "existing model # Return a compiled model identical to the", "load_qmodel from qkeras import print_qstats from qkeras import extract_model_operations #", "qconvolutional.py.\"\"\" from __future__ import absolute_import from __future__ import division from", "# you may not use this file except in compliance", "from numpy.testing import assert_allclose import pytest import tempfile from tensorflow.keras", "for weights np.random.seed(42) for layer in model.layers: all_weights = []", "[-1.659, 0.9834]], [[-4.99, 1.139], [-2.559, -1.216], [-2.285, 1.905], [-2.652, -0.467]]]).astype(np.float16)", "0.e+00, 6.e-08, 1.e+00], [0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00, 1.e+00,", "[[-4.99, 1.139], [-2.559, -1.216], [-2.285, 1.905], [-2.652, -0.467]]]).astype(np.float16) assert np.all(p", "import QConv2D from qkeras import QSeparableConv2D from qkeras import quantized_bits", "ensure saving/loading works json_string = model.to_json() clear_session() model = quantized_model_from_json(json_string)", "h5 file using Keras's model.save() fd, fname = tempfile.mkstemp('.h5') model.save(fname)", "# Delete the existing model # Return a compiled model", "# model = quantized_model_from_json(json_string) for layer in model.layers: all_weights =", "a compiled model identical to the previous one model =", "2), kernel_quantizer=quantized_bits(6, 2, 1, alpha=1.0), bias_quantizer=quantized_bits(4, 0, 1), name='conv2d_2_m')( x)", "model # Delete the existing model # Return a compiled", "# # Unless required by applicable law or agreed to", "a scale factor of 2.0 all_weights.append( 10.0 * np.random.normal(0.0, np.sqrt(2.0", "from __future__ import print_function import os import numpy as np", "w = np.sum(weights) all_weights.append(w) all_weights = np.array(all_weights) # test_qnetwork_weight_quantization all_weights_signature", "* np.random.normal(0.0, np.sqrt(2.0 / input_size), shape)) if all_weights: layer.set_weights(all_weights) #", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "3, 1))( x) x = QConv2D( 64, (2, 2), strides=(2,", "Activation from tensorflow.keras.layers import Flatten from tensorflow.keras.layers import Input from", "Version 2.0 (the \"License\"); # you may not use this", "4, 4) p = model.predict(inputs).astype(np.float16) y = np.array([[[-2.441, 3.816], [-3.807,", "extract_model_operations # TODO(hzhuang): # qoctave_conv test # qbatchnorm test def", "np.sum(weights) all_weights.append(w) all_weights = np.array(all_weights) # test_qnetwork_weight_quantization all_weights_signature = np.array(", "0.e+00], [0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00, 6.e-08,", "model = Model(inputs=x, outputs=y) # Extract model operations model_ops =", "if all_weights: layer.set_weights(all_weights) # Save the model as an h5", "# Return a compiled model identical to the previous one", "qkeras import QConv2D from qkeras import QSeparableConv2D from qkeras import", "= 2 * np.random.rand(10, 28, 28, 1) actual_output = model.predict(inputs).astype(np.float16)", "the model os.close(fd) os.remove(fname) # apply quantizer to weights model_save_quantized_weights(model)", "model_ops['qconv1d']['number_of_operations'] == 32 # Print qstats to make sure it", "from qkeras.utils import load_qmodel from qkeras import print_qstats from qkeras", "0, 1), name='conv2d_1_m', activation=quantized_relu(6, 3, 1))( x) x = QConv2D(", "0.e+00, 0.e+00, 0.e+00 ,1.e+00, 0.e+00, 0.e+00, 0.e+00], [0.e+00, 1.e+00, 0.e+00,", "0.e+00], [0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00, 5.e-07,", "__future__ import absolute_import from __future__ import division from __future__ import", "implied. # See the License for the specific language governing", "= x_in = Input((28, 28, 1), name='input') x = QSeparableConv2D(", "under the Apache License, Version 2.0 (the \"License\"); # you", "np.random.seed(33) x = Input((4, 4,)) y = QConv1D( 2, 1,", "0.e+00, 0.e+00, 0.e+00, 1.e+00, 0.e+00, 0.e+00, 0.e+00], [0.e+00, 0.e+00, 0.e+00,", "QDense from qkeras import QConv1D from qkeras import QConv2D from", "input_size > 0, 'input size for {} {}'.format(layer.name, i) #", "0.e+00, 0.e+00, 0.e+00, 0.e+00, 1.e+00, 0.e+00, 0.e+00, 0.e+00], [ 0.e+00", "i, weights in enumerate(layer.get_weights()): input_size = np.prod(layer.input.shape.as_list()[1:]) if input_size is", "model.save(fname) del model # Delete the existing model # Return", "LLC # # # Licensed under the Apache License, Version", "# apply quantizer to weights model_save_quantized_weights(model) inputs = np.random.rand(2, 4,", "[-2.652, -0.467]]]).astype(np.float16) assert np.all(p == y) if __name__ == '__main__':", "from qkeras import ternary from qkeras import QActivation from qkeras", "by applicable law or agreed to in writing, software #", "file using Keras's model.save() fd, fname = tempfile.mkstemp('.h5') model.save(fname) del", "= QDense( 10, kernel_quantizer=quantized_bits(6, 2, 1, alpha=1.0), bias_quantizer=quantized_bits(4, 0, 1),", "name='conv2d_0_m')( x) x = QActivation('quantized_relu(6,2,1)', name='act0_m')(x) x = QConv2D( 64,", "i) # he normal initialization with a scale factor of", "import tempfile from tensorflow.keras import backend as K from tensorflow.keras.layers", "[ 0.e+00 ,0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00, 1.e+00, 0.e+00, 0.e+00,", "tensorflow.keras.layers import Activation from tensorflow.keras.layers import Flatten from tensorflow.keras.layers import", "kernel_quantizer=quantized_bits(6, 2, 1, alpha=1.0), bias_quantizer=quantized_bits(4, 0, 1), name='dense')( x) x", "2), kernel_quantizer=ternary(alpha=1.0), bias_quantizer=quantized_bits(4, 0, 1), name='conv2d_1_m', activation=quantized_relu(6, 3, 1))( x)", "0, 1), name='dense')( x) x = Activation('softmax', name='softmax')(x) model =", "4,)) y = QConv1D( 2, 1, kernel_quantizer=quantized_bits(6, 2, 1, alpha=1.0),", "1), name='input') x = QSeparableConv2D( 32, (2, 2), strides=(2, 2),", "1), name='conv2d_0_m')( x) x = QActivation('quantized_relu(6,2,1)', name='act0_m')(x) x = QConv2D(", "1, alpha=1.0), bias_quantizer=quantized_bits(4, 0, 1), name='dense')( x) x = Activation('softmax',", "'input size for {} {}'.format(layer.name, i) # he normal initialization", "with a scale factor of 2.0 all_weights.append( 10.0 * np.random.normal(0.0,", ",1.e+00, 0.e+00, 0.e+00, 0.e+00], [0.e+00, 1.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00", "1.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00]]).astype(np.float16) inputs = 2 * np.random.rand(10,", "{} {}'.format(layer.name, i) all_weights.append( 10.0 * np.random.normal(0.0, np.sqrt(2.0 / input_size),", "loading the model os.close(fd) os.remove(fname) # apply quantizer to weights", "quantizer to weights model_save_quantized_weights(model) inputs = np.random.rand(2, 4, 4) p", "x) model = Model(inputs=x, outputs=y) # Extract model operations model_ops", "0.e+00], [0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00, 1.e+00, 0.e+00, 0.e+00,", "= np.sum(weights) all_weights.append(w) all_weights = np.array(all_weights) # test_qnetwork_weight_quantization all_weights_signature =", "# TODO(hzhuang): # qoctave_conv test # qbatchnorm test def test_qnetwork():", "from tensorflow.keras.backend import clear_session from qkeras import binary from qkeras", "Flatten(name='flatten')(x) x = QDense( 10, kernel_quantizer=quantized_bits(6, 2, 1, alpha=1.0), bias_quantizer=quantized_bits(4,", "1) actual_output = model.predict(inputs).astype(np.float16) assert_allclose(actual_output, expected_output, rtol=1e-4) def test_qconv1d(): np.random.seed(33)", "def test_qnetwork(): x = x_in = Input((28, 28, 1), name='input')", "2), strides=(2, 2), depthwise_quantizer=binary(alpha=1.0), pointwise_quantizer=quantized_bits(4, 0, 1, alpha=1.0), depthwise_activation=quantized_bits(6, 2,", "# Print qstats to make sure it works with Conv1D", "quantized_model_from_json from qkeras.utils import load_qmodel from qkeras import print_qstats from", "== all_weights_signature) # test_qnetwork_forward: expected_output = np.array( [[0.e+00, 0.e+00, 0.e+00,", "json_string = model.to_json() clear_session() model = quantized_model_from_json(json_string) # generate same", "from qkeras import QConv1D from qkeras import QConv2D from qkeras", "outputs=[x]) # reload the model to ensure saving/loading works json_string", "weights.shape assert input_size > 0, 'input size for {} {}'.format(layer.name,", "= model.predict(inputs).astype(np.float16) y = np.array([[[-2.441, 3.816], [-3.807, -1.426], [-2.684, -1.317],", "[0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00 ,1.e+00, 0.e+00, 0.e+00, 0.e+00],", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "extract_model_operations(model) # Assertion about the number of operations for this", "Unless required by applicable law or agreed to in writing,", "-1.426], [-2.684, -1.317], [-1.659, 0.9834]], [[-4.99, 1.139], [-2.559, -1.216], [-2.285,", "if all_weights: layer.set_weights(all_weights) # apply quantizer to weights model_save_quantized_weights(model) all_weights", "reload the model to ensure saving/loading works json_string = model.to_json()", "layer.set_weights(all_weights) # apply quantizer to weights model_save_quantized_weights(model) all_weights = []", "0.e+00, 0.e+00, 0.e+00, 1.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00], [0.e+00,", "name='act0_m')(x) x = QConv2D( 64, (3, 3), strides=(2, 2), kernel_quantizer=ternary(alpha=1.0),", "import print_function import os import numpy as np from numpy.testing", "0.e+00, 0.e+00, 0.e+00, 0.e+00 ,1.e+00, 0.e+00, 0.e+00, 0.e+00], [0.e+00, 1.e+00,", "the specific language governing permissions and # limitations under the", "applicable law or agreed to in writing, software # distributed", "os import numpy as np from numpy.testing import assert_allclose import", "64, (2, 2), strides=(2, 2), kernel_quantizer=quantized_bits(6, 2, 1, alpha=1.0), bias_quantizer=quantized_bits(4,", "5.e-07, 1.e+00], [0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00 ,1.e+00, 0.e+00,", "0.e+00, 0.e+00, 0.e+00, 0.e+00], [0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00, 1.e+00,", "2, 1, alpha=1.0), bias_quantizer=quantized_bits(4, 0, 1), name='qconv1d')( x) model =", "-1.317], [-1.659, 0.9834]], [[-4.99, 1.139], [-2.559, -1.216], [-2.285, 1.905], [-2.652,", "Google LLC # # # Licensed under the Apache License,", "all_weights = [] for i, weights in enumerate(layer.get_weights()): input_size =", "0.e+00, 0.e+00, 0.e+00], [0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00, 1.e+00, 0.e+00,", "= QActivation('quantized_relu(6,4,1)', name='act2_m')(x) x = Flatten(name='flatten')(x) x = QDense( 10,", "import extract_model_operations # TODO(hzhuang): # qoctave_conv test # qbatchnorm test", "0.e+00, 0.e+00, 0.e+00, 0.e+00, 1.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00]]).astype(np.float16) inputs", "layer in model.layers: for i, weights in enumerate(layer.get_weights()): w =", "0.e+00], [ 0.e+00 ,0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00, 1.e+00, 0.e+00,", "from qkeras import QConv2D from qkeras import QSeparableConv2D from qkeras", "test def test_qnetwork(): x = x_in = Input((28, 28, 1),", "compiled model identical to the previous one model = load_qmodel(fname)", "= load_qmodel(fname) # Clean the created h5 file after loading", "in writing, software # distributed under the License is distributed", "model os.close(fd) os.remove(fname) # apply quantizer to weights model_save_quantized_weights(model) inputs", "apply quantizer to weights model_save_quantized_weights(model) inputs = np.random.rand(2, 4, 4)", "\"\"\"Test layers from qconvolutional.py.\"\"\" from __future__ import absolute_import from __future__", "# limitations under the License. # ============================================================================== \"\"\"Test layers from", "model to ensure saving/loading works json_string = model.to_json() clear_session() model", "0.e+00, 0.e+00, 0.e+00, 0.e+00]]).astype(np.float16) inputs = 2 * np.random.rand(10, 28,", "1, alpha=1.0), bias_quantizer=quantized_bits(4, 0, 1), name='conv2d_2_m')( x) x = QActivation('quantized_relu(6,4,1)',", "from __future__ import division from __future__ import print_function import os", "alpha=1.0), bias_quantizer=quantized_bits(4, 0, 1), name='dense')( x) x = Activation('softmax', name='softmax')(x)", "np.all(all_weights == all_weights_signature) # test_qnetwork_forward: expected_output = np.array( [[0.e+00, 0.e+00,", "0.e+00, 0.e+00, 0.e+00], [0.e+00, 1.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00 ,0.e+00,", "from qkeras import print_qstats from qkeras import extract_model_operations # TODO(hzhuang):", "[-2.684, -1.317], [-1.659, 0.9834]], [[-4.99, 1.139], [-2.559, -1.216], [-2.285, 1.905],", "model = quantized_model_from_json(json_string) # generate same output for weights np.random.seed(42)", "0.e+00, 0.e+00, 0.e+00, 6.e-08, 1.e+00], [0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00,", "to ensure saving/loading works # json_string = model.to_json() # clear_session()", "import QSeparableConv2D from qkeras import quantized_bits from qkeras import quantized_relu", "tensorflow.keras.models import Model from tensorflow.keras.backend import clear_session from qkeras import", "# clear_session() # model = quantized_model_from_json(json_string) for layer in model.layers:", "qkeras import extract_model_operations # TODO(hzhuang): # qoctave_conv test # qbatchnorm", "Conv1D layer assert model_ops['qconv1d']['number_of_operations'] == 32 # Print qstats to", "License is distributed on an \"AS IS\" BASIS, # WITHOUT", "alpha=1.0), depthwise_activation=quantized_bits(6, 2, 1, alpha=1.0), bias_quantizer=quantized_bits(4, 0, 1), name='conv2d_0_m')( x)", "License, Version 2.0 (the \"License\"); # you may not use", "import model_save_quantized_weights from qkeras.utils import quantized_model_from_json from qkeras.utils import load_qmodel", "# You may obtain a copy of the License at", "qkeras import QSeparableConv2D from qkeras import quantized_bits from qkeras import", "[-2.285, 1.905], [-2.652, -0.467]]]).astype(np.float16) assert np.all(p == y) if __name__", "model_save_quantized_weights from qkeras.utils import quantized_model_from_json from qkeras.utils import load_qmodel from", "bias_quantizer=quantized_bits(4, 0, 1), name='conv2d_0_m')( x) x = QActivation('quantized_relu(6,2,1)', name='act0_m')(x) x", "quantized_bits from qkeras import quantized_relu from qkeras.utils import model_save_quantized_weights from", "to make sure it works with Conv1D layer print_qstats(model) #", "limitations under the License. # ============================================================================== \"\"\"Test layers from qconvolutional.py.\"\"\"", "= weights.shape assert input_size > 0, 'input size for {}", "10.0 * np.random.normal(0.0, np.sqrt(2.0 / input_size), shape)) if all_weights: layer.set_weights(all_weights)", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "qkeras import QActivation from qkeras import QDense from qkeras import", "name='qconv1d')( x) model = Model(inputs=x, outputs=y) # Extract model operations", "ensure saving/loading works # json_string = model.to_json() # clear_session() #", "layer assert model_ops['qconv1d']['number_of_operations'] == 32 # Print qstats to make", "x) x = Activation('softmax', name='softmax')(x) model = Model(inputs=[x_in], outputs=[x]) #", "= np.array([[[-2.441, 3.816], [-3.807, -1.426], [-2.684, -1.317], [-1.659, 0.9834]], [[-4.99,", "input_size is None: input_size = 576 * 10 # to", "[-3.807, -1.426], [-2.684, -1.317], [-1.659, 0.9834]], [[-4.99, 1.139], [-2.559, -1.216],", "-1.125]) assert all_weights.size == all_weights_signature.size assert np.all(all_weights == all_weights_signature) #", "input_size = np.prod(layer.input.shape.as_list()[1:]) if input_size is None: input_size = 10", "Save the model as an h5 file using Keras's model.save()", "Input from tensorflow.keras.models import Model from tensorflow.keras.backend import clear_session from", "[0.e+00, 1.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00 ,0.e+00, 0.e+00, 0.e+00, 0.e+00],", "print_function import os import numpy as np from numpy.testing import", "enumerate(layer.get_weights()): input_size = np.prod(layer.input.shape.as_list()[1:]) if input_size is None: input_size =", "0.e+00 ,1.e+00, 0.e+00, 0.e+00, 0.e+00], [0.e+00, 1.e+00, 0.e+00, 0.e+00, 0.e+00,", "the License for the specific language governing permissions and #", "1.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00], [0.e+00, 0.e+00, 0.e+00, 0.e+00,", "1.905], [-2.652, -0.467]]]).astype(np.float16) assert np.all(p == y) if __name__ ==", "Apache License, Version 2.0 (the \"License\"); # you may not", "either express or implied. # See the License for the", "to the previous one model = load_qmodel(fname) # Clean the", "from tensorflow.keras.layers import Activation from tensorflow.keras.layers import Flatten from tensorflow.keras.layers", "model operations model_ops = extract_model_operations(model) # Assertion about the number", "1.125, -1.625, -1.125]) assert all_weights.size == all_weights_signature.size assert np.all(all_weights ==", "import quantized_model_from_json from qkeras.utils import load_qmodel from qkeras import print_qstats", "= Model(inputs=[x_in], outputs=[x]) # reload the model to ensure saving/loading", "division from __future__ import print_function import os import numpy as", "all_weights.size == all_weights_signature.size assert np.all(all_weights == all_weights_signature) # test_qnetwork_forward: expected_output", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "0.e+00, 1.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00]]).astype(np.float16) inputs = 2 *", "qstats to make sure it works with Conv1D layer print_qstats(model)", "= np.array( [[0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00, 1.e+00, 0.e+00,", "(2, 2), strides=(2, 2), depthwise_quantizer=binary(alpha=1.0), pointwise_quantizer=quantized_bits(4, 0, 1, alpha=1.0), depthwise_activation=quantized_bits(6,", "model.to_json() clear_session() model = quantized_model_from_json(json_string) # generate same output for", "an h5 file using Keras's model.save() fd, fname = tempfile.mkstemp('.h5')", "fname = tempfile.mkstemp('.h5') model.save(fname) del model # Delete the existing", "layers from qconvolutional.py.\"\"\" from __future__ import absolute_import from __future__ import", "576 * 10 # to avoid learning sizes shape =", "np.array( [2., -6.75, -0.625, -2., -0.25, -56., 1.125, -1.625, -1.125])", "0, 'input size for {} {}'.format(layer.name, i) all_weights.append( 10.0 *", "clear_session from qkeras import binary from qkeras import ternary from", "x = Activation('softmax', name='softmax')(x) model = Model(inputs=[x_in], outputs=[x]) # reload", "import backend as K from tensorflow.keras.layers import Activation from tensorflow.keras.layers", "{} {}'.format(layer.name, i) # he normal initialization with a scale", "all_weights = [] for layer in model.layers: for i, weights", "10 # to avoid learning sizes shape = weights.shape assert", "he normal initialization with a scale factor of 2.0 all_weights.append(", "0.e+00, 5.e-07, 1.e+00], [0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00 ,1.e+00,", "del model # Delete the existing model # Return a", "actual_output = model.predict(inputs).astype(np.float16) assert_allclose(actual_output, expected_output, rtol=1e-4) def test_qconv1d(): np.random.seed(33) x", "x = QSeparableConv2D( 32, (2, 2), strides=(2, 2), depthwise_quantizer=binary(alpha=1.0), pointwise_quantizer=quantized_bits(4,", "binary from qkeras import ternary from qkeras import QActivation from", "0.e+00 ,0.e+00, 0.e+00, 0.e+00, 0.e+00], [0.e+00, 0.e+00, 0.e+00, 0.e+00, 1.e+00,", "input_size), shape)) if all_weights: layer.set_weights(all_weights) # Save the model as", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "model.layers: for i, weights in enumerate(layer.get_weights()): w = np.sum(weights) all_weights.append(w)", "activation=quantized_relu(6, 3, 1))( x) x = QConv2D( 64, (2, 2),", "0.e+00]]).astype(np.float16) inputs = 2 * np.random.rand(10, 28, 28, 1) actual_output", "import clear_session from qkeras import binary from qkeras import ternary", "-1.216], [-2.285, 1.905], [-2.652, -0.467]]]).astype(np.float16) assert np.all(p == y) if", "expected_output = np.array( [[0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00, 1.e+00,", "works json_string = model.to_json() clear_session() model = quantized_model_from_json(json_string) # generate", "/ input_size), shape)) if all_weights: layer.set_weights(all_weights) # Save the model", "1.e+00, 0.e+00, 0.e+00, 0.e+00], [0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00,", "the previous one model = load_qmodel(fname) # Clean the created", "in model.layers: all_weights = [] for i, weights in enumerate(layer.get_weights()):", "absolute_import from __future__ import division from __future__ import print_function import", "= Input((4, 4,)) y = QConv1D( 2, 1, kernel_quantizer=quantized_bits(6, 2,", "0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00, 6.e-08, 1.e+00], [0.e+00,", "np.sqrt(2.0 / input_size), shape)) if all_weights: layer.set_weights(all_weights) # apply quantizer", "import division from __future__ import print_function import os import numpy", "qkeras import ternary from qkeras import QActivation from qkeras import", "ternary from qkeras import QActivation from qkeras import QDense from", "model = load_qmodel(fname) # Clean the created h5 file after", "0.e+00, 0.e+00, 0.e+00, 0.e+00, 5.e-07, 1.e+00], [0.e+00, 0.e+00, 0.e+00, 0.e+00,", "for layer in model.layers: all_weights = [] for i, weights", "\"License\"); # you may not use this file except in", "1, alpha=1.0), bias_quantizer=quantized_bits(4, 0, 1), name='conv2d_0_m')( x) x = QActivation('quantized_relu(6,2,1)',", "None: input_size = 576 * 10 # to avoid learning", "0.e+00, 0.e+00, 1.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00], [0.e+00, 0.e+00,", "import pytest import tempfile from tensorflow.keras import backend as K", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "assert_allclose(actual_output, expected_output, rtol=1e-4) def test_qconv1d(): np.random.seed(33) x = Input((4, 4,))", "-0.467]]]).astype(np.float16) assert np.all(p == y) if __name__ == '__main__': pytest.main([__file__])", "kernel_quantizer=ternary(alpha=1.0), bias_quantizer=quantized_bits(4, 0, 1), name='conv2d_1_m', activation=quantized_relu(6, 3, 1))( x) x", "strides=(2, 2), depthwise_quantizer=binary(alpha=1.0), pointwise_quantizer=quantized_bits(4, 0, 1, alpha=1.0), depthwise_activation=quantized_bits(6, 2, 1,", "np.array( [[0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00, 1.e+00, 0.e+00, 0.e+00,", "if input_size is None: input_size = 10 * 10 shape", "# distributed under the License is distributed on an \"AS", "enumerate(layer.get_weights()): w = np.sum(weights) all_weights.append(w) all_weights = np.array(all_weights) # test_qnetwork_weight_quantization", "# Unless required by applicable law or agreed to in", "y = QConv1D( 2, 1, kernel_quantizer=quantized_bits(6, 2, 1, alpha=1.0), bias_quantizer=quantized_bits(4,", "all_weights.append(w) all_weights = np.array(all_weights) # test_qnetwork_weight_quantization all_weights_signature = np.array( [2.,", "weights model_save_quantized_weights(model) all_weights = [] for layer in model.layers: for", "of operations for this Conv1D layer assert model_ops['qconv1d']['number_of_operations'] == 32", "= quantized_model_from_json(json_string) # generate same output for weights np.random.seed(42) for", "Flatten from tensorflow.keras.layers import Input from tensorflow.keras.models import Model from", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "1, alpha=1.0), depthwise_activation=quantized_bits(6, 2, 1, alpha=1.0), bias_quantizer=quantized_bits(4, 0, 1), name='conv2d_0_m')(", "in enumerate(layer.get_weights()): input_size = np.prod(layer.input.shape.as_list()[1:]) if input_size is None: input_size", "You may obtain a copy of the License at #", "> 0, 'input size for {} {}'.format(layer.name, i) all_weights.append( 10.0", "0.e+00, 0.e+00, 1.e+00, 0.e+00, 0.e+00, 0.e+00], [ 0.e+00 ,0.e+00, 0.e+00,", "bias_quantizer=quantized_bits(4, 0, 1), name='conv2d_1_m', activation=quantized_relu(6, 3, 1))( x) x =", "x) x = QConv2D( 64, (2, 2), strides=(2, 2), kernel_quantizer=quantized_bits(6,", "print_qstats from qkeras import extract_model_operations # TODO(hzhuang): # qoctave_conv test", "the existing model # Return a compiled model identical to", "Copyright 2019 Google LLC # # # Licensed under the", "the Apache License, Version 2.0 (the \"License\"); # you may", "= Flatten(name='flatten')(x) x = QDense( 10, kernel_quantizer=quantized_bits(6, 2, 1, alpha=1.0),", "x = QActivation('quantized_relu(6,2,1)', name='act0_m')(x) x = QConv2D( 64, (3, 3),", "1.139], [-2.559, -1.216], [-2.285, 1.905], [-2.652, -0.467]]]).astype(np.float16) assert np.all(p ==", "1.e+00, 0.e+00, 0.e+00, 0.e+00], [ 0.e+00 ,0.e+00, 0.e+00, 0.e+00, 0.e+00," ]
[ "import discord.ui from .item import Item from .select_option import SelectOption", "placeholder self._min_values: int = min_values self._max_values: int = max_values self._options:", "def max_values(self, max_values: int) -> 'Select': self._max_values = max_values return", "self._custom_id: Optional[str] = custom_id self.func: Optional[Callable] = None self.check_func: Callable[[discord.Interaction],", "to_discord(self) -> C: return self.cls( placeholder=self._placeholder, min_values=self._min_values, max_values=self._max_values, options=[o.to_discord_select_option() for", "Optional[list] = None, cls: C = CustomSelect, custom_id: Optional[str] =", "Generic, Callable import discord.ui from .item import Item from .select_option", "min_values self._max_values: int = max_values self._options: list = [] if", "return self def check(self, func: Callable[[discord.Interaction], bool]) -> 'Select': self.check_func", "self._options: list = [] if options is None else options", "self._row = row return self def on_select(self, func: Callable) ->", "from .custom import CustomSelect def _default_check(_: discord.Interaction) -> bool: return", "None, ) -> None: self._placeholder: Optional[str] = placeholder self._min_values: int", "min_values(self, min_values: int) -> 'Select': self._min_values = min_values return self", "self.func = func return self def custom_id(self, custom_id: str) ->", "def to_discord(self) -> C: return self.cls( placeholder=self._placeholder, min_values=self._min_values, max_values=self._max_values, options=[o.to_discord_select_option()", "def min_values(self, min_values: int) -> 'Select': self._min_values = min_values return", "C = cls self._custom_id: Optional[str] = custom_id self.func: Optional[Callable] =", "def on_select(self, func: Callable) -> 'Select': self.func = func return", "func return self def custom_id(self, custom_id: str) -> 'Select': self._custom_id", "Select(Item, Generic[C]): def __init__( self, placeholder: Optional[str] = None, min_values:", ") -> None: self._placeholder: Optional[str] = placeholder self._min_values: int =", "cls: C = CustomSelect, custom_id: Optional[str] = None, ) ->", "-> bool: return True C = TypeVar(\"C\", bound=discord.ui.Select) class Select(Item,", "self def options(self, options: List[SelectOption]) -> 'Select': self._options = options", "-> None: self._placeholder: Optional[str] = placeholder self._min_values: int = min_values", "-> 'Select': self._custom_id = custom_id return self def check(self, func:", "max_values=self._max_values, options=[o.to_discord_select_option() for o in self._options], row=self._row, custom_id=self._custom_id, check_func=self.check_func, callback=self.func", "self._placeholder: Optional[str] = placeholder self._min_values: int = min_values self._max_values: int", "self._max_values = max_values return self def options(self, options: List[SelectOption]) ->", "Optional[str] = None, ) -> None: self._placeholder: Optional[str] = placeholder", "options: Optional[list] = None, cls: C = CustomSelect, custom_id: Optional[str]", "-> 'Select': self._placeholder = placeholder return self def min_values(self, min_values:", "return self def row(self, row: int) -> 'Select': self._row =", "max_values: int = 1, options: Optional[list] = None, cls: C", "'Select': self._max_values = max_values return self def options(self, options: List[SelectOption])", "'Select': self._row = row return self def on_select(self, func: Callable)", "self._options = options return self def row(self, row: int) ->", "max_values(self, max_values: int) -> 'Select': self._max_values = max_values return self", "C = CustomSelect, custom_id: Optional[str] = None, ) -> None:", "= custom_id return self def check(self, func: Callable[[discord.Interaction], bool]) ->", "func: Callable[[discord.Interaction], bool]) -> 'Select': self.check_func = func return self", "CustomSelect def _default_check(_: discord.Interaction) -> bool: return True C =", "[] if options is None else options self._row: Optional[int] =", "self def custom_id(self, custom_id: str) -> 'Select': self._custom_id = custom_id", "options: List[SelectOption]) -> 'Select': self._options = options return self def", ".item import Item from .select_option import SelectOption from .custom import", "= _default_check def placeholder(self, placeholder: str) -> 'Select': self._placeholder =", "row: int) -> 'Select': self._row = row return self def", "bound=discord.ui.Select) class Select(Item, Generic[C]): def __init__( self, placeholder: Optional[str] =", "placeholder=self._placeholder, min_values=self._min_values, max_values=self._max_values, options=[o.to_discord_select_option() for o in self._options], row=self._row, custom_id=self._custom_id,", "custom_id: Optional[str] = None, ) -> None: self._placeholder: Optional[str] =", "str) -> 'Select': self._placeholder = placeholder return self def min_values(self,", "import CustomSelect def _default_check(_: discord.Interaction) -> bool: return True C", "bool: return True C = TypeVar(\"C\", bound=discord.ui.Select) class Select(Item, Generic[C]):", "def custom_id(self, custom_id: str) -> 'Select': self._custom_id = custom_id return", "custom_id(self, custom_id: str) -> 'Select': self._custom_id = custom_id return self", "self def to_discord(self) -> C: return self.cls( placeholder=self._placeholder, min_values=self._min_values, max_values=self._max_values,", "= custom_id self.func: Optional[Callable] = None self.check_func: Callable[[discord.Interaction], bool] =", "Generic[C]): def __init__( self, placeholder: Optional[str] = None, min_values: int", "Callable[[discord.Interaction], bool]) -> 'Select': self.check_func = func return self def", "def options(self, options: List[SelectOption]) -> 'Select': self._options = options return", "Optional, List, TypeVar, Generic, Callable import discord.ui from .item import", "None self.cls: C = cls self._custom_id: Optional[str] = custom_id self.func:", "def row(self, row: int) -> 'Select': self._row = row return", "def placeholder(self, placeholder: str) -> 'Select': self._placeholder = placeholder return", "else options self._row: Optional[int] = None self.cls: C = cls", "List, TypeVar, Generic, Callable import discord.ui from .item import Item", "discord.ui from .item import Item from .select_option import SelectOption from", "C = TypeVar(\"C\", bound=discord.ui.Select) class Select(Item, Generic[C]): def __init__( self,", "-> 'Select': self._max_values = max_values return self def options(self, options:", "placeholder return self def min_values(self, min_values: int) -> 'Select': self._min_values", "options=[o.to_discord_select_option() for o in self._options], row=self._row, custom_id=self._custom_id, check_func=self.check_func, callback=self.func )", "= 1, options: Optional[list] = None, cls: C = CustomSelect,", "C: return self.cls( placeholder=self._placeholder, min_values=self._min_values, max_values=self._max_values, options=[o.to_discord_select_option() for o in", "options(self, options: List[SelectOption]) -> 'Select': self._options = options return self", ".select_option import SelectOption from .custom import CustomSelect def _default_check(_: discord.Interaction)", "None self.check_func: Callable[[discord.Interaction], bool] = _default_check def placeholder(self, placeholder: str)", "None, cls: C = CustomSelect, custom_id: Optional[str] = None, )", "check(self, func: Callable[[discord.Interaction], bool]) -> 'Select': self.check_func = func return", "return self def min_values(self, min_values: int) -> 'Select': self._min_values =", "custom_id self.func: Optional[Callable] = None self.check_func: Callable[[discord.Interaction], bool] = _default_check", "placeholder: str) -> 'Select': self._placeholder = placeholder return self def", "list = [] if options is None else options self._row:", "options return self def row(self, row: int) -> 'Select': self._row", "str) -> 'Select': self._custom_id = custom_id return self def check(self,", "discord.Interaction) -> bool: return True C = TypeVar(\"C\", bound=discord.ui.Select) class", "int = 1, options: Optional[list] = None, cls: C =", "max_values: int) -> 'Select': self._max_values = max_values return self def", "self, placeholder: Optional[str] = None, min_values: int = 1, max_values:", "func return self def to_discord(self) -> C: return self.cls( placeholder=self._placeholder,", "from .select_option import SelectOption from .custom import CustomSelect def _default_check(_:", "return self.cls( placeholder=self._placeholder, min_values=self._min_values, max_values=self._max_values, options=[o.to_discord_select_option() for o in self._options],", "Callable import discord.ui from .item import Item from .select_option import", "self.cls: C = cls self._custom_id: Optional[str] = custom_id self.func: Optional[Callable]", "'Select': self.func = func return self def custom_id(self, custom_id: str)", "self.check_func = func return self def to_discord(self) -> C: return", "import Item from .select_option import SelectOption from .custom import CustomSelect", "self def on_select(self, func: Callable) -> 'Select': self.func = func", "placeholder(self, placeholder: str) -> 'Select': self._placeholder = placeholder return self", "row(self, row: int) -> 'Select': self._row = row return self", "return self def to_discord(self) -> C: return self.cls( placeholder=self._placeholder, min_values=self._min_values,", "def __init__( self, placeholder: Optional[str] = None, min_values: int =", "= TypeVar(\"C\", bound=discord.ui.Select) class Select(Item, Generic[C]): def __init__( self, placeholder:", "from .item import Item from .select_option import SelectOption from .custom", "typing import Optional, List, TypeVar, Generic, Callable import discord.ui from", "int) -> 'Select': self._max_values = max_values return self def options(self,", "-> 'Select': self._options = options return self def row(self, row:", "= row return self def on_select(self, func: Callable) -> 'Select':", "min_values=self._min_values, max_values=self._max_values, options=[o.to_discord_select_option() for o in self._options], row=self._row, custom_id=self._custom_id, check_func=self.check_func,", "on_select(self, func: Callable) -> 'Select': self.func = func return self", "custom_id: str) -> 'Select': self._custom_id = custom_id return self def", "max_values return self def options(self, options: List[SelectOption]) -> 'Select': self._options", "'Select': self._custom_id = custom_id return self def check(self, func: Callable[[discord.Interaction],", "-> 'Select': self._row = row return self def on_select(self, func:", "self def check(self, func: Callable[[discord.Interaction], bool]) -> 'Select': self.check_func =", "int = min_values self._max_values: int = max_values self._options: list =", "SelectOption from .custom import CustomSelect def _default_check(_: discord.Interaction) -> bool:", "self._row: Optional[int] = None self.cls: C = cls self._custom_id: Optional[str]", "_default_check def placeholder(self, placeholder: str) -> 'Select': self._placeholder = placeholder", "import SelectOption from .custom import CustomSelect def _default_check(_: discord.Interaction) ->", "= placeholder self._min_values: int = min_values self._max_values: int = max_values", "options self._row: Optional[int] = None self.cls: C = cls self._custom_id:", "int) -> 'Select': self._min_values = min_values return self def max_values(self,", "return self def custom_id(self, custom_id: str) -> 'Select': self._custom_id =", "row return self def on_select(self, func: Callable) -> 'Select': self.func", "= cls self._custom_id: Optional[str] = custom_id self.func: Optional[Callable] = None", "'Select': self._placeholder = placeholder return self def min_values(self, min_values: int)", "TypeVar(\"C\", bound=discord.ui.Select) class Select(Item, Generic[C]): def __init__( self, placeholder: Optional[str]", "int) -> 'Select': self._row = row return self def on_select(self,", "1, options: Optional[list] = None, cls: C = CustomSelect, custom_id:", "CustomSelect, custom_id: Optional[str] = None, ) -> None: self._placeholder: Optional[str]", "_default_check(_: discord.Interaction) -> bool: return True C = TypeVar(\"C\", bound=discord.ui.Select)", "def _default_check(_: discord.Interaction) -> bool: return True C = TypeVar(\"C\",", "placeholder: Optional[str] = None, min_values: int = 1, max_values: int", "= None, min_values: int = 1, max_values: int = 1,", "Optional[str] = custom_id self.func: Optional[Callable] = None self.check_func: Callable[[discord.Interaction], bool]", "min_values: int = 1, max_values: int = 1, options: Optional[list]", "None, min_values: int = 1, max_values: int = 1, options:", "1, max_values: int = 1, options: Optional[list] = None, cls:", "Callable) -> 'Select': self.func = func return self def custom_id(self,", "'Select': self._options = options return self def row(self, row: int)", "bool]) -> 'Select': self.check_func = func return self def to_discord(self)", "if options is None else options self._row: Optional[int] = None", "= CustomSelect, custom_id: Optional[str] = None, ) -> None: self._placeholder:", "return self def options(self, options: List[SelectOption]) -> 'Select': self._options =", "min_values: int) -> 'Select': self._min_values = min_values return self def", "Item from .select_option import SelectOption from .custom import CustomSelect def", "-> 'Select': self._min_values = min_values return self def max_values(self, max_values:", "custom_id return self def check(self, func: Callable[[discord.Interaction], bool]) -> 'Select':", "def check(self, func: Callable[[discord.Interaction], bool]) -> 'Select': self.check_func = func", "options is None else options self._row: Optional[int] = None self.cls:", "= options return self def row(self, row: int) -> 'Select':", "from typing import Optional, List, TypeVar, Generic, Callable import discord.ui", "None else options self._row: Optional[int] = None self.cls: C =", "self._min_values = min_values return self def max_values(self, max_values: int) ->", "= func return self def custom_id(self, custom_id: str) -> 'Select':", "return self def on_select(self, func: Callable) -> 'Select': self.func =", ".custom import CustomSelect def _default_check(_: discord.Interaction) -> bool: return True", "True C = TypeVar(\"C\", bound=discord.ui.Select) class Select(Item, Generic[C]): def __init__(", "int = max_values self._options: list = [] if options is", "cls self._custom_id: Optional[str] = custom_id self.func: Optional[Callable] = None self.check_func:", "= max_values return self def options(self, options: List[SelectOption]) -> 'Select':", "List[SelectOption]) -> 'Select': self._options = options return self def row(self,", "= placeholder return self def min_values(self, min_values: int) -> 'Select':", "self.cls( placeholder=self._placeholder, min_values=self._min_values, max_values=self._max_values, options=[o.to_discord_select_option() for o in self._options], row=self._row,", "return True C = TypeVar(\"C\", bound=discord.ui.Select) class Select(Item, Generic[C]): def", "= [] if options is None else options self._row: Optional[int]", "-> 'Select': self.func = func return self def custom_id(self, custom_id:", "= 1, max_values: int = 1, options: Optional[list] = None,", "Optional[str] = placeholder self._min_values: int = min_values self._max_values: int =", "__init__( self, placeholder: Optional[str] = None, min_values: int = 1,", "Optional[Callable] = None self.check_func: Callable[[discord.Interaction], bool] = _default_check def placeholder(self,", "import Optional, List, TypeVar, Generic, Callable import discord.ui from .item", "= None self.cls: C = cls self._custom_id: Optional[str] = custom_id", "is None else options self._row: Optional[int] = None self.cls: C", "bool] = _default_check def placeholder(self, placeholder: str) -> 'Select': self._placeholder", "int = 1, max_values: int = 1, options: Optional[list] =", "self._min_values: int = min_values self._max_values: int = max_values self._options: list", "-> C: return self.cls( placeholder=self._placeholder, min_values=self._min_values, max_values=self._max_values, options=[o.to_discord_select_option() for o", "self._placeholder = placeholder return self def min_values(self, min_values: int) ->", "Optional[int] = None self.cls: C = cls self._custom_id: Optional[str] =", "= max_values self._options: list = [] if options is None", "TypeVar, Generic, Callable import discord.ui from .item import Item from", "= None, cls: C = CustomSelect, custom_id: Optional[str] = None,", "= None, ) -> None: self._placeholder: Optional[str] = placeholder self._min_values:", "None: self._placeholder: Optional[str] = placeholder self._min_values: int = min_values self._max_values:", "max_values self._options: list = [] if options is None else", "= min_values return self def max_values(self, max_values: int) -> 'Select':", "self._max_values: int = max_values self._options: list = [] if options", "= None self.check_func: Callable[[discord.Interaction], bool] = _default_check def placeholder(self, placeholder:", "self def row(self, row: int) -> 'Select': self._row = row", "self._custom_id = custom_id return self def check(self, func: Callable[[discord.Interaction], bool])", "min_values return self def max_values(self, max_values: int) -> 'Select': self._max_values", "self.check_func: Callable[[discord.Interaction], bool] = _default_check def placeholder(self, placeholder: str) ->", "self.func: Optional[Callable] = None self.check_func: Callable[[discord.Interaction], bool] = _default_check def", "func: Callable) -> 'Select': self.func = func return self def", "= func return self def to_discord(self) -> C: return self.cls(", "'Select': self._min_values = min_values return self def max_values(self, max_values: int)", "Optional[str] = None, min_values: int = 1, max_values: int =", "'Select': self.check_func = func return self def to_discord(self) -> C:", "class Select(Item, Generic[C]): def __init__( self, placeholder: Optional[str] = None,", "self def max_values(self, max_values: int) -> 'Select': self._max_values = max_values", "Callable[[discord.Interaction], bool] = _default_check def placeholder(self, placeholder: str) -> 'Select':", "return self def max_values(self, max_values: int) -> 'Select': self._max_values =", "self def min_values(self, min_values: int) -> 'Select': self._min_values = min_values", "-> 'Select': self.check_func = func return self def to_discord(self) ->", "= min_values self._max_values: int = max_values self._options: list = []" ]
[ "\"lunName\": \"lun_name\", \"pnDn\": \"pn_dn\", \"profileDn\": \"profile_dn\", \"rn\": \"rn\", \"status\": \"status\",", "} def __init__(self, parent_mo_or_dn, id, **kwargs): self._dirty_mask = 0 self.id", "\"InputOutput\", 0x1f, [], [\"read-only\"], [u'storageLunReplica', u'storageLunSnapshot', u'storageScsiLun', u'storageVirtualDrive'], [], [\"Get\"])", "r\"\"\"((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}\"\"\", [], []), \"dn\": MoPropertyMeta(\"dn\", \"dn\", \"string\", VersionMeta.Version131a, MoPropertyMeta.READ_ONLY, 0x2,", "StorageScsiLunRef(ManagedObject): \"\"\"This is StorageScsiLunRef class.\"\"\" consts = StorageScsiLunRefConsts() naming_props =", "is StorageScsiLunRef class.\"\"\" consts = StorageScsiLunRefConsts() naming_props = set([u'id']) mo_meta", "[]), \"lun_name\": MoPropertyMeta(\"lun_name\", \"lunName\", \"string\", VersionMeta.Version131a, MoPropertyMeta.READ_ONLY, None, None, None,", "MoPropertyMeta.READ_ONLY, None, 0, 256, None, [], []), \"profile_dn\": MoPropertyMeta(\"profile_dn\", \"profileDn\",", "\"string\", VersionMeta.Version131a, MoPropertyMeta.READ_ONLY, None, 0, 256, None, [], []), \"rn\":", "\"id\", \"lsDn\": \"ls_dn\", \"lunName\": \"lun_name\", \"pnDn\": \"pn_dn\", \"profileDn\": \"profile_dn\", \"rn\":", "MoPropertyMeta.NAMING, 0x4, None, None, None, [], []), \"ls_dn\": MoPropertyMeta(\"ls_dn\", \"lsDn\",", "UcscVersion, MoPropertyMeta, MoMeta from ...ucscmeta import VersionMeta class StorageScsiLunRefConsts(): pass", "None, [], []), \"ls_dn\": MoPropertyMeta(\"ls_dn\", \"lsDn\", \"string\", VersionMeta.Version131a, MoPropertyMeta.READ_ONLY, None,", "None, None, None, r\"\"\"((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}\"\"\", [], []), \"dn\": MoPropertyMeta(\"dn\", \"dn\", \"string\",", "[\"Get\"]) prop_meta = { \"child_action\": MoPropertyMeta(\"child_action\", \"childAction\", \"string\", VersionMeta.Version131a, MoPropertyMeta.INTERNAL,", "[]), \"dn\": MoPropertyMeta(\"dn\", \"dn\", \"string\", VersionMeta.Version131a, MoPropertyMeta.READ_ONLY, 0x2, 0, 256,", "\"profile_dn\": MoPropertyMeta(\"profile_dn\", \"profileDn\", \"string\", VersionMeta.Version131a, MoPropertyMeta.READ_ONLY, None, 0, 256, None,", "\"lsDn\", \"string\", VersionMeta.Version131a, MoPropertyMeta.READ_ONLY, None, 0, 256, None, [], []),", "\"lunName\", \"string\", VersionMeta.Version131a, MoPropertyMeta.READ_ONLY, None, None, None, r\"\"\"[\\-\\.:_a-zA-Z0-9]{0,16}\"\"\", [], []),", "\"rn\": MoPropertyMeta(\"rn\", \"rn\", \"string\", VersionMeta.Version131a, MoPropertyMeta.READ_ONLY, 0x8, 0, 256, None,", "None, 0, 256, None, [], []), \"rn\": MoPropertyMeta(\"rn\", \"rn\", \"string\",", "u'storageVirtualDrive'], [], [\"Get\"]) prop_meta = { \"child_action\": MoPropertyMeta(\"child_action\", \"childAction\", \"string\",", "...ucscmeta import VersionMeta class StorageScsiLunRefConsts(): pass class StorageScsiLunRef(ManagedObject): \"\"\"This is", "module contains the general information for StorageScsiLunRef ManagedObject.\"\"\" from ...ucscmo", "[]), } prop_map = { \"childAction\": \"child_action\", \"dn\": \"dn\", \"id\":", "\"dn\", \"id\": \"id\", \"lsDn\": \"ls_dn\", \"lunName\": \"lun_name\", \"pnDn\": \"pn_dn\", \"profileDn\":", "None self.profile_dn = None self.status = None ManagedObject.__init__(self, \"StorageScsiLunRef\", parent_mo_or_dn,", "from ...ucscmeta import VersionMeta class StorageScsiLunRefConsts(): pass class StorageScsiLunRef(ManagedObject): \"\"\"This", "MoPropertyMeta(\"pn_dn\", \"pnDn\", \"string\", VersionMeta.Version141a, MoPropertyMeta.READ_ONLY, None, 0, 256, None, [],", "None, [], []), \"status\": MoPropertyMeta(\"status\", \"status\", \"string\", VersionMeta.Version131a, MoPropertyMeta.READ_WRITE, 0x10,", "\"status\": MoPropertyMeta(\"status\", \"status\", \"string\", VersionMeta.Version131a, MoPropertyMeta.READ_WRITE, 0x10, None, None, r\"\"\"((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}\"\"\",", "\"profileDn\": \"profile_dn\", \"rn\": \"rn\", \"status\": \"status\", } def __init__(self, parent_mo_or_dn,", "__init__(self, parent_mo_or_dn, id, **kwargs): self._dirty_mask = 0 self.id = id", "0, 256, None, [], []), \"profile_dn\": MoPropertyMeta(\"profile_dn\", \"profileDn\", \"string\", VersionMeta.Version131a,", "\"dn\": MoPropertyMeta(\"dn\", \"dn\", \"string\", VersionMeta.Version131a, MoPropertyMeta.READ_ONLY, 0x2, 0, 256, None,", "\"id\": MoPropertyMeta(\"id\", \"id\", \"uint\", VersionMeta.Version131a, MoPropertyMeta.NAMING, 0x4, None, None, None,", "\"dn\", \"string\", VersionMeta.Version131a, MoPropertyMeta.READ_ONLY, 0x2, 0, 256, None, [], []),", "[], []), } prop_map = { \"childAction\": \"child_action\", \"dn\": \"dn\",", "VersionMeta.Version131a, MoPropertyMeta.READ_ONLY, None, None, None, r\"\"\"[\\-\\.:_a-zA-Z0-9]{0,16}\"\"\", [], []), \"pn_dn\": MoPropertyMeta(\"pn_dn\",", "None, [], []), \"profile_dn\": MoPropertyMeta(\"profile_dn\", \"profileDn\", \"string\", VersionMeta.Version131a, MoPropertyMeta.READ_ONLY, None,", "None, None, r\"\"\"[\\-\\.:_a-zA-Z0-9]{0,16}\"\"\", [], []), \"pn_dn\": MoPropertyMeta(\"pn_dn\", \"pnDn\", \"string\", VersionMeta.Version141a,", "import UcscVersion, MoPropertyMeta, MoMeta from ...ucscmeta import VersionMeta class StorageScsiLunRefConsts():", "VersionMeta.Version141a, MoPropertyMeta.READ_ONLY, None, 0, 256, None, [], []), \"profile_dn\": MoPropertyMeta(\"profile_dn\",", "MoPropertyMeta(\"id\", \"id\", \"uint\", VersionMeta.Version131a, MoPropertyMeta.NAMING, 0x4, None, None, None, [],", "\"pn_dn\": MoPropertyMeta(\"pn_dn\", \"pnDn\", \"string\", VersionMeta.Version141a, MoPropertyMeta.READ_ONLY, None, 0, 256, None,", "0, 256, None, [], []), \"id\": MoPropertyMeta(\"id\", \"id\", \"uint\", VersionMeta.Version131a,", "MoPropertyMeta.INTERNAL, None, None, None, r\"\"\"((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}\"\"\", [], []), \"dn\": MoPropertyMeta(\"dn\", \"dn\",", "\"childAction\", \"string\", VersionMeta.Version131a, MoPropertyMeta.INTERNAL, None, None, None, r\"\"\"((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}\"\"\", [], []),", "VersionMeta.Version131a, MoPropertyMeta.INTERNAL, None, None, None, r\"\"\"((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}\"\"\", [], []), \"dn\": MoPropertyMeta(\"dn\",", "MoPropertyMeta.READ_ONLY, None, 0, 256, None, [], []), \"rn\": MoPropertyMeta(\"rn\", \"rn\",", "mo_meta = MoMeta(\"StorageScsiLunRef\", \"storageScsiLunRef\", \"scsi-lun-ref-[id]\", VersionMeta.Version131a, \"InputOutput\", 0x1f, [], [\"read-only\"],", "\"profileDn\", \"string\", VersionMeta.Version131a, MoPropertyMeta.READ_ONLY, None, 0, 256, None, [], []),", "[]), \"status\": MoPropertyMeta(\"status\", \"status\", \"string\", VersionMeta.Version131a, MoPropertyMeta.READ_WRITE, 0x10, None, None,", "= set([u'id']) mo_meta = MoMeta(\"StorageScsiLunRef\", \"storageScsiLunRef\", \"scsi-lun-ref-[id]\", VersionMeta.Version131a, \"InputOutput\", 0x1f,", "[], [\"Get\"]) prop_meta = { \"child_action\": MoPropertyMeta(\"child_action\", \"childAction\", \"string\", VersionMeta.Version131a,", "MoMeta(\"StorageScsiLunRef\", \"storageScsiLunRef\", \"scsi-lun-ref-[id]\", VersionMeta.Version131a, \"InputOutput\", 0x1f, [], [\"read-only\"], [u'storageLunReplica', u'storageLunSnapshot',", "self.ls_dn = None self.lun_name = None self.pn_dn = None self.profile_dn", "StorageScsiLunRefConsts() naming_props = set([u'id']) mo_meta = MoMeta(\"StorageScsiLunRef\", \"storageScsiLunRef\", \"scsi-lun-ref-[id]\", VersionMeta.Version131a,", "self.profile_dn = None self.status = None ManagedObject.__init__(self, \"StorageScsiLunRef\", parent_mo_or_dn, **kwargs)", "None, r\"\"\"[\\-\\.:_a-zA-Z0-9]{0,16}\"\"\", [], []), \"pn_dn\": MoPropertyMeta(\"pn_dn\", \"pnDn\", \"string\", VersionMeta.Version141a, MoPropertyMeta.READ_ONLY,", "[]), \"profile_dn\": MoPropertyMeta(\"profile_dn\", \"profileDn\", \"string\", VersionMeta.Version131a, MoPropertyMeta.READ_ONLY, None, 0, 256,", "self.pn_dn = None self.profile_dn = None self.status = None ManagedObject.__init__(self,", "id, **kwargs): self._dirty_mask = 0 self.id = id self.child_action =", "\"ls_dn\", \"lunName\": \"lun_name\", \"pnDn\": \"pn_dn\", \"profileDn\": \"profile_dn\", \"rn\": \"rn\", \"status\":", "self._dirty_mask = 0 self.id = id self.child_action = None self.ls_dn", "[u'storageLunReplica', u'storageLunSnapshot', u'storageScsiLun', u'storageVirtualDrive'], [], [\"Get\"]) prop_meta = { \"child_action\":", "256, None, [], []), \"id\": MoPropertyMeta(\"id\", \"id\", \"uint\", VersionMeta.Version131a, MoPropertyMeta.NAMING,", "[]), \"id\": MoPropertyMeta(\"id\", \"id\", \"uint\", VersionMeta.Version131a, MoPropertyMeta.NAMING, 0x4, None, None,", "\"pnDn\", \"string\", VersionMeta.Version141a, MoPropertyMeta.READ_ONLY, None, 0, 256, None, [], []),", "consts = StorageScsiLunRefConsts() naming_props = set([u'id']) mo_meta = MoMeta(\"StorageScsiLunRef\", \"storageScsiLunRef\",", "\"storageScsiLunRef\", \"scsi-lun-ref-[id]\", VersionMeta.Version131a, \"InputOutput\", 0x1f, [], [\"read-only\"], [u'storageLunReplica', u'storageLunSnapshot', u'storageScsiLun',", "[], [\"read-only\"], [u'storageLunReplica', u'storageLunSnapshot', u'storageScsiLun', u'storageVirtualDrive'], [], [\"Get\"]) prop_meta =", "parent_mo_or_dn, id, **kwargs): self._dirty_mask = 0 self.id = id self.child_action", "ManagedObject.\"\"\" from ...ucscmo import ManagedObject from ...ucsccoremeta import UcscVersion, MoPropertyMeta,", "= None self.lun_name = None self.pn_dn = None self.profile_dn =", "self.id = id self.child_action = None self.ls_dn = None self.lun_name", "import VersionMeta class StorageScsiLunRefConsts(): pass class StorageScsiLunRef(ManagedObject): \"\"\"This is StorageScsiLunRef", "from ...ucscmo import ManagedObject from ...ucsccoremeta import UcscVersion, MoPropertyMeta, MoMeta", "VersionMeta class StorageScsiLunRefConsts(): pass class StorageScsiLunRef(ManagedObject): \"\"\"This is StorageScsiLunRef class.\"\"\"", "= MoMeta(\"StorageScsiLunRef\", \"storageScsiLunRef\", \"scsi-lun-ref-[id]\", VersionMeta.Version131a, \"InputOutput\", 0x1f, [], [\"read-only\"], [u'storageLunReplica',", "[], []), \"rn\": MoPropertyMeta(\"rn\", \"rn\", \"string\", VersionMeta.Version131a, MoPropertyMeta.READ_ONLY, 0x8, 0,", "{ \"child_action\": MoPropertyMeta(\"child_action\", \"childAction\", \"string\", VersionMeta.Version131a, MoPropertyMeta.INTERNAL, None, None, None,", "None self.ls_dn = None self.lun_name = None self.pn_dn = None", "\"\"\"This module contains the general information for StorageScsiLunRef ManagedObject.\"\"\" from", "set([u'id']) mo_meta = MoMeta(\"StorageScsiLunRef\", \"storageScsiLunRef\", \"scsi-lun-ref-[id]\", VersionMeta.Version131a, \"InputOutput\", 0x1f, [],", "prop_meta = { \"child_action\": MoPropertyMeta(\"child_action\", \"childAction\", \"string\", VersionMeta.Version131a, MoPropertyMeta.INTERNAL, None,", "self.child_action = None self.ls_dn = None self.lun_name = None self.pn_dn", "None, 0, 256, None, [], []), \"lun_name\": MoPropertyMeta(\"lun_name\", \"lunName\", \"string\",", "\"string\", VersionMeta.Version131a, MoPropertyMeta.READ_ONLY, None, None, None, r\"\"\"[\\-\\.:_a-zA-Z0-9]{0,16}\"\"\", [], []), \"pn_dn\":", "\"pnDn\": \"pn_dn\", \"profileDn\": \"profile_dn\", \"rn\": \"rn\", \"status\": \"status\", } def", "= 0 self.id = id self.child_action = None self.ls_dn =", "\"uint\", VersionMeta.Version131a, MoPropertyMeta.NAMING, 0x4, None, None, None, [], []), \"ls_dn\":", "VersionMeta.Version131a, MoPropertyMeta.NAMING, 0x4, None, None, None, [], []), \"ls_dn\": MoPropertyMeta(\"ls_dn\",", "\"child_action\": MoPropertyMeta(\"child_action\", \"childAction\", \"string\", VersionMeta.Version131a, MoPropertyMeta.INTERNAL, None, None, None, r\"\"\"((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}\"\"\",", "[], []), \"ls_dn\": MoPropertyMeta(\"ls_dn\", \"lsDn\", \"string\", VersionMeta.Version131a, MoPropertyMeta.READ_ONLY, None, 0,", "self.lun_name = None self.pn_dn = None self.profile_dn = None self.status", "0x10, None, None, r\"\"\"((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}\"\"\", [], []), } prop_map = {", "VersionMeta.Version131a, MoPropertyMeta.READ_ONLY, None, 0, 256, None, [], []), \"rn\": MoPropertyMeta(\"rn\",", "\"dn\": \"dn\", \"id\": \"id\", \"lsDn\": \"ls_dn\", \"lunName\": \"lun_name\", \"pnDn\": \"pn_dn\",", "MoPropertyMeta, MoMeta from ...ucscmeta import VersionMeta class StorageScsiLunRefConsts(): pass class", "MoPropertyMeta(\"rn\", \"rn\", \"string\", VersionMeta.Version131a, MoPropertyMeta.READ_ONLY, 0x8, 0, 256, None, [],", "[], []), \"status\": MoPropertyMeta(\"status\", \"status\", \"string\", VersionMeta.Version131a, MoPropertyMeta.READ_WRITE, 0x10, None,", "\"child_action\", \"dn\": \"dn\", \"id\": \"id\", \"lsDn\": \"ls_dn\", \"lunName\": \"lun_name\", \"pnDn\":", "\"ls_dn\": MoPropertyMeta(\"ls_dn\", \"lsDn\", \"string\", VersionMeta.Version131a, MoPropertyMeta.READ_ONLY, None, 0, 256, None,", "MoPropertyMeta(\"status\", \"status\", \"string\", VersionMeta.Version131a, MoPropertyMeta.READ_WRITE, 0x10, None, None, r\"\"\"((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}\"\"\", [],", "\"childAction\": \"child_action\", \"dn\": \"dn\", \"id\": \"id\", \"lsDn\": \"ls_dn\", \"lunName\": \"lun_name\",", "0x1f, [], [\"read-only\"], [u'storageLunReplica', u'storageLunSnapshot', u'storageScsiLun', u'storageVirtualDrive'], [], [\"Get\"]) prop_meta", "= None self.ls_dn = None self.lun_name = None self.pn_dn =", "\"rn\": \"rn\", \"status\": \"status\", } def __init__(self, parent_mo_or_dn, id, **kwargs):", "\"string\", VersionMeta.Version131a, MoPropertyMeta.INTERNAL, None, None, None, r\"\"\"((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}\"\"\", [], []), \"dn\":", "0, 256, None, [], []), \"rn\": MoPropertyMeta(\"rn\", \"rn\", \"string\", VersionMeta.Version131a,", "None, r\"\"\"((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}\"\"\", [], []), } prop_map = { \"childAction\": \"child_action\",", "= StorageScsiLunRefConsts() naming_props = set([u'id']) mo_meta = MoMeta(\"StorageScsiLunRef\", \"storageScsiLunRef\", \"scsi-lun-ref-[id]\",", "None, None, None, r\"\"\"[\\-\\.:_a-zA-Z0-9]{0,16}\"\"\", [], []), \"pn_dn\": MoPropertyMeta(\"pn_dn\", \"pnDn\", \"string\",", "the general information for StorageScsiLunRef ManagedObject.\"\"\" from ...ucscmo import ManagedObject", "\"scsi-lun-ref-[id]\", VersionMeta.Version131a, \"InputOutput\", 0x1f, [], [\"read-only\"], [u'storageLunReplica', u'storageLunSnapshot', u'storageScsiLun', u'storageVirtualDrive'],", "\"lsDn\": \"ls_dn\", \"lunName\": \"lun_name\", \"pnDn\": \"pn_dn\", \"profileDn\": \"profile_dn\", \"rn\": \"rn\",", "= { \"child_action\": MoPropertyMeta(\"child_action\", \"childAction\", \"string\", VersionMeta.Version131a, MoPropertyMeta.INTERNAL, None, None,", "from ...ucsccoremeta import UcscVersion, MoPropertyMeta, MoMeta from ...ucscmeta import VersionMeta", "[]), \"ls_dn\": MoPropertyMeta(\"ls_dn\", \"lsDn\", \"string\", VersionMeta.Version131a, MoPropertyMeta.READ_ONLY, None, 0, 256,", "...ucscmo import ManagedObject from ...ucsccoremeta import UcscVersion, MoPropertyMeta, MoMeta from", "\"rn\", \"status\": \"status\", } def __init__(self, parent_mo_or_dn, id, **kwargs): self._dirty_mask", "MoPropertyMeta.READ_ONLY, 0x8, 0, 256, None, [], []), \"status\": MoPropertyMeta(\"status\", \"status\",", "0, 256, None, [], []), \"status\": MoPropertyMeta(\"status\", \"status\", \"string\", VersionMeta.Version131a,", "= id self.child_action = None self.ls_dn = None self.lun_name =", "MoPropertyMeta(\"child_action\", \"childAction\", \"string\", VersionMeta.Version131a, MoPropertyMeta.INTERNAL, None, None, None, r\"\"\"((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}\"\"\", [],", "\"id\": \"id\", \"lsDn\": \"ls_dn\", \"lunName\": \"lun_name\", \"pnDn\": \"pn_dn\", \"profileDn\": \"profile_dn\",", "StorageScsiLunRef class.\"\"\" consts = StorageScsiLunRefConsts() naming_props = set([u'id']) mo_meta =", "0 self.id = id self.child_action = None self.ls_dn = None", "None, 0, 256, None, [], []), \"profile_dn\": MoPropertyMeta(\"profile_dn\", \"profileDn\", \"string\",", "MoPropertyMeta.READ_ONLY, None, 0, 256, None, [], []), \"lun_name\": MoPropertyMeta(\"lun_name\", \"lunName\",", "MoPropertyMeta(\"dn\", \"dn\", \"string\", VersionMeta.Version131a, MoPropertyMeta.READ_ONLY, 0x2, 0, 256, None, [],", "MoPropertyMeta.READ_WRITE, 0x10, None, None, r\"\"\"((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}\"\"\", [], []), } prop_map =", "**kwargs): self._dirty_mask = 0 self.id = id self.child_action = None", "[], []), \"dn\": MoPropertyMeta(\"dn\", \"dn\", \"string\", VersionMeta.Version131a, MoPropertyMeta.READ_ONLY, 0x2, 0,", "MoPropertyMeta(\"lun_name\", \"lunName\", \"string\", VersionMeta.Version131a, MoPropertyMeta.READ_ONLY, None, None, None, r\"\"\"[\\-\\.:_a-zA-Z0-9]{0,16}\"\"\", [],", "[], []), \"profile_dn\": MoPropertyMeta(\"profile_dn\", \"profileDn\", \"string\", VersionMeta.Version131a, MoPropertyMeta.READ_ONLY, None, 0,", "MoPropertyMeta(\"profile_dn\", \"profileDn\", \"string\", VersionMeta.Version131a, MoPropertyMeta.READ_ONLY, None, 0, 256, None, [],", "prop_map = { \"childAction\": \"child_action\", \"dn\": \"dn\", \"id\": \"id\", \"lsDn\":", "{ \"childAction\": \"child_action\", \"dn\": \"dn\", \"id\": \"id\", \"lsDn\": \"ls_dn\", \"lunName\":", "\"id\", \"uint\", VersionMeta.Version131a, MoPropertyMeta.NAMING, 0x4, None, None, None, [], []),", "class StorageScsiLunRef(ManagedObject): \"\"\"This is StorageScsiLunRef class.\"\"\" consts = StorageScsiLunRefConsts() naming_props", "None, [], []), \"lun_name\": MoPropertyMeta(\"lun_name\", \"lunName\", \"string\", VersionMeta.Version131a, MoPropertyMeta.READ_ONLY, None,", "\"status\", } def __init__(self, parent_mo_or_dn, id, **kwargs): self._dirty_mask = 0", "\"string\", VersionMeta.Version131a, MoPropertyMeta.READ_ONLY, 0x8, 0, 256, None, [], []), \"status\":", "ManagedObject from ...ucsccoremeta import UcscVersion, MoPropertyMeta, MoMeta from ...ucscmeta import", "u'storageLunSnapshot', u'storageScsiLun', u'storageVirtualDrive'], [], [\"Get\"]) prop_meta = { \"child_action\": MoPropertyMeta(\"child_action\",", "[]), \"rn\": MoPropertyMeta(\"rn\", \"rn\", \"string\", VersionMeta.Version131a, MoPropertyMeta.READ_ONLY, 0x8, 0, 256,", "def __init__(self, parent_mo_or_dn, id, **kwargs): self._dirty_mask = 0 self.id =", "None self.pn_dn = None self.profile_dn = None self.status = None", "\"\"\"This is StorageScsiLunRef class.\"\"\" consts = StorageScsiLunRefConsts() naming_props = set([u'id'])", "= { \"childAction\": \"child_action\", \"dn\": \"dn\", \"id\": \"id\", \"lsDn\": \"ls_dn\",", "\"lun_name\", \"pnDn\": \"pn_dn\", \"profileDn\": \"profile_dn\", \"rn\": \"rn\", \"status\": \"status\", }", "= None self.profile_dn = None self.status = None ManagedObject.__init__(self, \"StorageScsiLunRef\",", "StorageScsiLunRef ManagedObject.\"\"\" from ...ucscmo import ManagedObject from ...ucsccoremeta import UcscVersion,", "None, [], []), \"id\": MoPropertyMeta(\"id\", \"id\", \"uint\", VersionMeta.Version131a, MoPropertyMeta.NAMING, 0x4,", "pass class StorageScsiLunRef(ManagedObject): \"\"\"This is StorageScsiLunRef class.\"\"\" consts = StorageScsiLunRefConsts()", "[\"read-only\"], [u'storageLunReplica', u'storageLunSnapshot', u'storageScsiLun', u'storageVirtualDrive'], [], [\"Get\"]) prop_meta = {", "None, r\"\"\"((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}\"\"\", [], []), \"dn\": MoPropertyMeta(\"dn\", \"dn\", \"string\", VersionMeta.Version131a, MoPropertyMeta.READ_ONLY,", "[], []), \"lun_name\": MoPropertyMeta(\"lun_name\", \"lunName\", \"string\", VersionMeta.Version131a, MoPropertyMeta.READ_ONLY, None, None,", "\"profile_dn\", \"rn\": \"rn\", \"status\": \"status\", } def __init__(self, parent_mo_or_dn, id,", "256, None, [], []), \"lun_name\": MoPropertyMeta(\"lun_name\", \"lunName\", \"string\", VersionMeta.Version131a, MoPropertyMeta.READ_ONLY,", "import ManagedObject from ...ucsccoremeta import UcscVersion, MoPropertyMeta, MoMeta from ...ucscmeta", "0x8, 0, 256, None, [], []), \"status\": MoPropertyMeta(\"status\", \"status\", \"string\",", "} prop_map = { \"childAction\": \"child_action\", \"dn\": \"dn\", \"id\": \"id\",", "[], []), \"pn_dn\": MoPropertyMeta(\"pn_dn\", \"pnDn\", \"string\", VersionMeta.Version141a, MoPropertyMeta.READ_ONLY, None, 0,", "[]), \"pn_dn\": MoPropertyMeta(\"pn_dn\", \"pnDn\", \"string\", VersionMeta.Version141a, MoPropertyMeta.READ_ONLY, None, 0, 256,", "\"pn_dn\", \"profileDn\": \"profile_dn\", \"rn\": \"rn\", \"status\": \"status\", } def __init__(self,", "StorageScsiLunRefConsts(): pass class StorageScsiLunRef(ManagedObject): \"\"\"This is StorageScsiLunRef class.\"\"\" consts =", "[], []), \"id\": MoPropertyMeta(\"id\", \"id\", \"uint\", VersionMeta.Version131a, MoPropertyMeta.NAMING, 0x4, None,", "\"rn\", \"string\", VersionMeta.Version131a, MoPropertyMeta.READ_ONLY, 0x8, 0, 256, None, [], []),", "\"status\", \"string\", VersionMeta.Version131a, MoPropertyMeta.READ_WRITE, 0x10, None, None, r\"\"\"((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}\"\"\", [], []),", "0x2, 0, 256, None, [], []), \"id\": MoPropertyMeta(\"id\", \"id\", \"uint\",", "VersionMeta.Version131a, MoPropertyMeta.READ_WRITE, 0x10, None, None, r\"\"\"((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}\"\"\", [], []), } prop_map", "naming_props = set([u'id']) mo_meta = MoMeta(\"StorageScsiLunRef\", \"storageScsiLunRef\", \"scsi-lun-ref-[id]\", VersionMeta.Version131a, \"InputOutput\",", "class.\"\"\" consts = StorageScsiLunRefConsts() naming_props = set([u'id']) mo_meta = MoMeta(\"StorageScsiLunRef\",", "256, None, [], []), \"rn\": MoPropertyMeta(\"rn\", \"rn\", \"string\", VersionMeta.Version131a, MoPropertyMeta.READ_ONLY,", "None self.lun_name = None self.pn_dn = None self.profile_dn = None", "\"string\", VersionMeta.Version131a, MoPropertyMeta.READ_WRITE, 0x10, None, None, r\"\"\"((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}\"\"\", [], []), }", "VersionMeta.Version131a, \"InputOutput\", 0x1f, [], [\"read-only\"], [u'storageLunReplica', u'storageLunSnapshot', u'storageScsiLun', u'storageVirtualDrive'], [],", "for StorageScsiLunRef ManagedObject.\"\"\" from ...ucscmo import ManagedObject from ...ucsccoremeta import", "None, None, r\"\"\"((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}\"\"\", [], []), \"dn\": MoPropertyMeta(\"dn\", \"dn\", \"string\", VersionMeta.Version131a,", "r\"\"\"((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}\"\"\", [], []), } prop_map = { \"childAction\": \"child_action\", \"dn\":", "\"string\", VersionMeta.Version131a, MoPropertyMeta.READ_ONLY, 0x2, 0, 256, None, [], []), \"id\":", "VersionMeta.Version131a, MoPropertyMeta.READ_ONLY, 0x2, 0, 256, None, [], []), \"id\": MoPropertyMeta(\"id\",", "256, None, [], []), \"profile_dn\": MoPropertyMeta(\"profile_dn\", \"profileDn\", \"string\", VersionMeta.Version131a, MoPropertyMeta.READ_ONLY,", "VersionMeta.Version131a, MoPropertyMeta.READ_ONLY, 0x8, 0, 256, None, [], []), \"status\": MoPropertyMeta(\"status\",", "\"string\", VersionMeta.Version131a, MoPropertyMeta.READ_ONLY, None, 0, 256, None, [], []), \"lun_name\":", "u'storageScsiLun', u'storageVirtualDrive'], [], [\"Get\"]) prop_meta = { \"child_action\": MoPropertyMeta(\"child_action\", \"childAction\",", "MoPropertyMeta(\"ls_dn\", \"lsDn\", \"string\", VersionMeta.Version131a, MoPropertyMeta.READ_ONLY, None, 0, 256, None, [],", "\"lun_name\": MoPropertyMeta(\"lun_name\", \"lunName\", \"string\", VersionMeta.Version131a, MoPropertyMeta.READ_ONLY, None, None, None, r\"\"\"[\\-\\.:_a-zA-Z0-9]{0,16}\"\"\",", "\"string\", VersionMeta.Version141a, MoPropertyMeta.READ_ONLY, None, 0, 256, None, [], []), \"profile_dn\":", "r\"\"\"[\\-\\.:_a-zA-Z0-9]{0,16}\"\"\", [], []), \"pn_dn\": MoPropertyMeta(\"pn_dn\", \"pnDn\", \"string\", VersionMeta.Version141a, MoPropertyMeta.READ_ONLY, None,", "contains the general information for StorageScsiLunRef ManagedObject.\"\"\" from ...ucscmo import", "class StorageScsiLunRefConsts(): pass class StorageScsiLunRef(ManagedObject): \"\"\"This is StorageScsiLunRef class.\"\"\" consts", "0x4, None, None, None, [], []), \"ls_dn\": MoPropertyMeta(\"ls_dn\", \"lsDn\", \"string\",", "256, None, [], []), \"status\": MoPropertyMeta(\"status\", \"status\", \"string\", VersionMeta.Version131a, MoPropertyMeta.READ_WRITE,", "None, None, r\"\"\"((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}\"\"\", [], []), } prop_map = { \"childAction\":", "VersionMeta.Version131a, MoPropertyMeta.READ_ONLY, None, 0, 256, None, [], []), \"lun_name\": MoPropertyMeta(\"lun_name\",", "MoMeta from ...ucscmeta import VersionMeta class StorageScsiLunRefConsts(): pass class StorageScsiLunRef(ManagedObject):", "MoPropertyMeta.READ_ONLY, 0x2, 0, 256, None, [], []), \"id\": MoPropertyMeta(\"id\", \"id\",", "\"status\": \"status\", } def __init__(self, parent_mo_or_dn, id, **kwargs): self._dirty_mask =", "id self.child_action = None self.ls_dn = None self.lun_name = None", "= None self.pn_dn = None self.profile_dn = None self.status =", "None, [], []), \"rn\": MoPropertyMeta(\"rn\", \"rn\", \"string\", VersionMeta.Version131a, MoPropertyMeta.READ_ONLY, 0x8,", "None, None, [], []), \"ls_dn\": MoPropertyMeta(\"ls_dn\", \"lsDn\", \"string\", VersionMeta.Version131a, MoPropertyMeta.READ_ONLY,", "0, 256, None, [], []), \"lun_name\": MoPropertyMeta(\"lun_name\", \"lunName\", \"string\", VersionMeta.Version131a,", "MoPropertyMeta.READ_ONLY, None, None, None, r\"\"\"[\\-\\.:_a-zA-Z0-9]{0,16}\"\"\", [], []), \"pn_dn\": MoPropertyMeta(\"pn_dn\", \"pnDn\",", "None, None, None, [], []), \"ls_dn\": MoPropertyMeta(\"ls_dn\", \"lsDn\", \"string\", VersionMeta.Version131a,", "general information for StorageScsiLunRef ManagedObject.\"\"\" from ...ucscmo import ManagedObject from", "information for StorageScsiLunRef ManagedObject.\"\"\" from ...ucscmo import ManagedObject from ...ucsccoremeta", "...ucsccoremeta import UcscVersion, MoPropertyMeta, MoMeta from ...ucscmeta import VersionMeta class" ]
[ "dtype=np.complex128) c['ft_rsurf'] = np.zeros(c['ft_shape'], dtype=np.complex128) # initial calculations c['ft_rcore'] =", "clashing volume cannot be negative\") self._max_clash = max_clash + 0.9", "= d['best_chi2'] c['rot_ind'] = np.zeros(d['shape'], dtype=np.int32) c['Iq'] = np.zeros_like(c['targetIq']) c['tmplxyz']", "FullSAXS(object): def __init__(self): # parameters to be defined self._receptor =", "def rotations(self): return self._rotations @rotations.setter def rotations(self, rotations): rotmat =", "cannot be negative\") self._max_clash = max_clash + 0.9 @property def", "(l_sel.center - lcenter)/voxelspacing g_restraints[n, 0:3] = r_pos g_restraints[n, 3:6] =", "False, cl.addressing_mode.CLAMP, cl.filter_mode.LINEAR) g['lsurf'] = cl_array.zeros_like(g['rcore']) g['clashvol'] = cl_array.zeros_like(g['rcore']) g['intervol']", "pass class FullSAXS(object): def __init__(self): # parameters to be defined", "c['rot_ind'][ind] = n if _stdout.isatty(): self._print_progress(n, c['nrot'], time0) d['best_chi2'] =", "pyclfft.iRFFTn(q.context, d['shape']) g['k'].rfftn(q, g['rcore'], g['ft_rcore']) g['k'].rfftn(q, g['rsurf'], g['ft_rsurf']) g['nrot'] =", "@receptor.setter def receptor(self, receptor): self._receptor = receptor.duplicate() @property def ligand(self):", "+= scattering_curve(self._q, e2, xyz2, bpr=self.beads_per_residue) d['fifj'], d['rind'], d['lind'] = create_fifj_lookup_table(d['q'],", "d['rot_ind'] = g['rot_ind'].get() def rsurface(points, radius, shape, voxelspacing): dimensions =", "stdout as _stdout from time import time as _time import", "self._sq = None @property def receptor(self): return self._receptor @receptor.setter def", "= scattering_curve(self._q, e1, xyz1, bpr=self.beads_per_residue) d['base_Iq'] += scattering_curve(self._q, e2, xyz2,", "d['targetIq'] c['sq'] = d['sq'] c['base_Iq'] = d['base_Iq'] c['fifj'] = d['fifj']", "points2, voxelspacing): shape = min_grid_shape(points1, points2, voxelspacing) shape = [volume.radix235(x)", "interaction_radius(self): return self._interaction_radius @interaction_radius.setter def interaction_radius(self, radius): if radius <=", "self._rotations @rotations.setter def rotations(self, rotations): rotmat = np.asarray(rotations, dtype=np.float64) if", "shape = [volume.radix235(x) for x in shape] return shape def", "lcenter): nrestraints = len(restraints) g_restraints = np.zeros((nrestraints, 8), dtype=np.float64) for", "self.interaction_radius + 1.5)/self.voxelspacing c['origin'] = d['origin'] # SAXS arrays c['q']", "c['fifj'], c['targetIq'], c['sq'], c['chi2']) ind = c['chi2'] > c['best_chi2'] c['best_chi2'][ind]", "d['min_interaction'] c['vlength'] = int(np.linalg.norm(self.ligand.coor - \\ self.ligand.center, axis=1).max() + \\", "e1, xyz1 = coarse_grain(self.receptor, bpr=self.beads_per_residue) e2, xyz2 = coarse_grain(self.ligand, bpr=self.beads_per_residue)", "@weights.setter def weights(self, weights): self._weights = weights @property def interaction_radius(self):", "if _stdout.isatty(): self._print_progress(n, g['nrot'], time0) self.queue.finish() d['best_chi2'] = g['best_chi2'].get() d['rot_ind']", "= d['min_interaction'] def _gpu_search(self): d = self.data g = self.gpu_data", "dtype=np.float64) d['shape'] = d['rcore'].shape d['start'] = d['rcore'].start d['nrot'] = self.rotations.shape[0]", "< 0: raise ValueError(\"Maximum allowed clashing volume cannot be negative\")", "coarse_grain(self.receptor, bpr=self.beads_per_residue) e2, xyz2 = coarse_grain(self.ligand, bpr=self.beads_per_residue) else: e1, xyz1", "c['ft_lsurf'] = rfftn(c['lsurf']).conj() c['clashvol'] = irfftn(c['ft_lsurf'] * c['ft_rcore'], s=c['shape']) c['intervol']", "def weights(self): return self._weights @weights.setter def weights(self, weights): self._weights =", "= {} g = self.gpu_data d = self.data q =", "some data for later calculations d['origin'] = np.asarray(d['rcore'].origin, dtype=np.float64) d['shape']", "self._receptor @receptor.setter def receptor(self, receptor): self._receptor = receptor.duplicate() @property def", "= weights @property def interaction_radius(self): return self._interaction_radius @interaction_radius.setter def interaction_radius(self,", "g['rsurf'], g['ft_rsurf']) g['nrot'] = d['nrot'] g['max_clash'] = d['max_clash'] g['min_interaction'] =", "self._cpu_init() self._cpu_search() else: self._gpu_init() self._gpu_search() if _stdout.isatty(): print() d =", "bpr=self.beads_per_residue) d['base_Iq'] += scattering_curve(self._q, e2, xyz2, bpr=self.beads_per_residue) d['fifj'], d['rind'], d['lind']", "q = self.queue k = g['k'] time0 = _time() for", "c['lind'], (np.mat(c['rotmat'][n])*np.mat(c['lxyz']).T).T, c['origin'], self.voxelspacing, c['fifj'], c['targetIq'], c['sq'], c['chi2']) ind =", "= irfftn(c['ft_lsurf'] * c['ft_rsurf'], s=c['shape']) np.logical_and(c['clashvol'] < c['max_clash'], c['intervol'] >", "dtype=np.float64) d['max_clash'] = self.max_clash/self.voxelspacing**3 d['min_interaction'] = self.min_interaction/self.voxelspacing**3 # SAXS data", "self.weights = np.ones(self.rotations.shape[0], dtype=np.float64) if len(self.weights) != len(self.rotations): raise ValueError(\"\")", "g['rsurf'] = cl_array.to_device(q, float32array(d['rsurf'].array)) g['im_lsurf'] = cl.image_from_array(q.context, float32array(d['lsurf'].array)) g['sampler'] =", "= np.zeros_like(c['lxyz']) def _cpu_search(self): d = self.data c = self.cpu_data", "voxelspacing(self): return self._voxelspacing @voxelspacing.setter def voxelspacing(self, voxelspacing): self._voxelspacing = voxelspacing", "g['ft_clashvol'], g['clashvol']) k.c_conj_multiply(q, g['ft_lsurf'], g['ft_rsurf'], g['ft_intervol']) k.irfftn(q, g['ft_intervol'], g['intervol']) k.touch(q,", "else: e1, xyz1 = self.receptor.elements, self.receptor.coor e2, xyz2 = self.ligand.elements,", "calculations d['origin'] = np.asarray(d['rcore'].origin, dtype=np.float64) d['shape'] = d['rcore'].shape d['start'] =", "vdw_radii + self.interaction_radius d['rsurf'] = rsurface(self.receptor.coor, radii, shape, self.voxelspacing) d['rcore']", "g['ft_shape'] = list(d['shape']) g['ft_shape'][0] = d['shape'][0]//2 + 1 g['ft_rcore'] =", "nrestraints = len(restraints) g_restraints = np.zeros((nrestraints, 8), dtype=np.float64) for n", "d = self.data ind = d['best_chi2'] > 0 d['best_chi2'][ind] -=", "= _time() - time0 _stdout.write('\\r{:d}/{:d} ({:.2%}, ETA: {:d}s) '\\ .format(m,", "= d['chi2'] c['best_chi2'] = d['best_chi2'] c['rot_ind'] = np.zeros(d['shape'], dtype=np.int32) c['Iq']", "# each dimension and the longest diameter is the scanning", "1]]] self.weights = None self.voxelspacing = 1.0 self.interaction_radius = 2.5", "@interaction_radius.setter def interaction_radius(self, radius): if radius <= 0: raise ValueError(\"Interaction", "ligand): self._ligand = ligand.duplicate() @property def rotations(self): return self._rotations @rotations.setter", "= self.rotations.shape[0] # set ligand center to the origin of", "self.interaction_radius = 2.5 self.max_clash = 100 self.min_interaction = 300 self.coarse_grain", "g['ft_clashvol'] = cl_array.zeros_like(g['ft_rcore']) g['ft_intervol'] = cl_array.zeros_like(g['ft_rcore']) # allocate SAXS arrays", "d['im_center']) k.rfftn(q, g['lsurf'], g['ft_lsurf']) k.c_conj_multiply(q, g['ft_lsurf'], g['ft_rcore'], g['ft_clashvol']) k.irfftn(q, g['ft_clashvol'],", "# keep track of some data for later calculations d['origin']", "Solutions from saxstools.saxs_curve import scattering_curve, create_fifj_lookup_table from saxstools.helpers import coarse_grain", "data for later calculations d['origin'] = np.asarray(d['rcore'].origin, dtype=np.float64) d['shape'] =", "'\\ .format(m, total, pdone, int(t/pdone - t))) _stdout.flush() def _gpu_init(self):", "= d['rcore'].start d['nrot'] = self.rotations.shape[0] # set ligand center to", "= g['rot_ind'].get() def rsurface(points, radius, shape, voxelspacing): dimensions = [x*voxelspacing", "c['chi2'][ind] c['rot_ind'][ind] = n if _stdout.isatty(): self._print_progress(n, c['nrot'], time0) d['best_chi2']", "self._Iq d['sq'] = self._sq if self.coarse_grain: e1, xyz1 = coarse_grain(self.receptor,", "if self.weights is None: self.weights = np.ones(self.rotations.shape[0], dtype=np.float64) if len(self.weights)", "rsurface(self.receptor.coor, vdw_radii, shape, self.voxelspacing) # keep track of some data", "= self.min_interaction/self.voxelspacing**3 # SAXS data d['q'] = self._q d['targetIq'] =", "1 c['ft_lsurf'] = np.zeros(c['ft_shape'], dtype=np.complex128) c['ft_rcore'] = np.zeros(c['ft_shape'], dtype=np.complex128) c['ft_rsurf']", "= cl_array.to_device(q, d['best_chi2'].astype(np.float32)) g['rot_ind'] = cl_array.zeros(q, d['shape'], dtype=np.int32) g['origin'] =", "c['rsurf'] = d['rsurf'].array c['im_lsurf'] = d['lsurf'].array c['lsurf'] = np.zeros_like(c['rcore']) c['clashvol']", "print_function, absolute_import, division from sys import stdout as _stdout from", "if radius <= 0: raise ValueError(\"Interaction radius should be bigger", "rsurf = volume.zeros(shape, voxelspacing, origin) rsurf = dilate_points(points, radius, rsurf)", "1.0 self.interaction_radius = 2.5 self.max_clash = 100 self.min_interaction = 300", "the origin of the receptor map # and make a", "cl_array.zeros_like(g['ft_rcore']) g['ft_clashvol'] = cl_array.zeros_like(g['ft_rcore']) g['ft_intervol'] = cl_array.zeros_like(g['ft_rcore']) # allocate SAXS", "g['rot_lxyz']) k.fill(q, g['chi2'], 0) g['saxs_k'].calc_chi2(q, g['interspace'], g['q'], g['base_Iq'], g['rind'], g['rxyz'],", "min_grid_shape(points1, points2, voxelspacing): # the minimal grid shape is the", "d['max_clash'] c['min_interaction'] = d['min_interaction'] c['vlength'] = int(np.linalg.norm(self.ligand.coor - \\ self.ligand.center,", "= d['rcore'].array c['rsurf'] = d['rsurf'].array c['im_lsurf'] = d['lsurf'].array c['lsurf'] =", "dtype=np.int64) # complex arrays c['ft_shape'] = list(d['shape']) c['ft_shape'][-1] = d['shape'][-1]//2", "all requirements are met for a search\") if self.weights is", "chain dimensions1 = points1.ptp(axis=0) dimension2 = longest_distance(points2) grid_shape = np.asarray(((dimensions1", "= np.zeros((nrestraints, 8), dtype=np.float64) for n in range(nrestraints): r_sel, l_sel,", "c['origin'], self.voxelspacing, c['fifj'], c['targetIq'], c['sq'], c['chi2']) ind = c['chi2'] >", "origin = [(c - d/2.0) for c, d in zip(center,", "float32array(d['targetIq'])) g['sq'] = cl_array.to_device(q, float32array(d['sq'])) g['base_Iq'] = cl_array.to_device(q, float32array(d['base_Iq'])) g['fifj']", "from saxstools.kernels import Kernels as saxs_Kernels try: import pyopencl as", "d['best_chi2'] = c['best_chi2'] d['rot_ind'] = c['rot_ind'] def _print_progress(self, n, total,", "create_fifj_lookup_table(d['q'], e1, e2, bpr=self.beads_per_residue) d['rxyz'] = xyz1 d['lxyz'] = xyz2", "c['nrot'], time0) d['best_chi2'] = c['best_chi2'] d['rot_ind'] = c['rot_ind'] def _print_progress(self,", "= cl_array.zeros_like(g['rcore']) g['clashvol'] = cl_array.zeros_like(g['rcore']) g['intervol'] = cl_array.zeros_like(g['rcore']) g['interspace'] =", "time0) self.queue.finish() d['best_chi2'] = g['best_chi2'].get() d['rot_ind'] = g['rot_ind'].get() def rsurface(points,", "x in shape] return shape def min_grid_shape(points1, points2, voxelspacing): #", "cl_array.to_device(q, float32array(d['sq'])) g['base_Iq'] = cl_array.to_device(q, float32array(d['base_Iq'])) g['fifj'] = cl_array.to_device(q, float32array(d['fifj']))", "c['clashvol'] = np.zeros_like(c['rcore']) c['intervol'] = np.zeros_like(c['rcore']) c['interspace'] = np.zeros_like(c['rcore'], dtype=np.int64)", "c['lind'] = d['lind'] c['rxyz'] = d['rxyz'] c['lxyz'] = d['lxyz'] c['chi2']", "import volume from disvis.points import dilate_points from disvis.libdisvis import (rotate_image3d,", "g['saxs_k'].calc_chi2(q, g['interspace'], g['q'], g['base_Iq'], g['rind'], g['rxyz'], g['lind'], g['rot_lxyz'], g['origin'], g['voxelspacing'],", "+ dimension2)/voxelspacing) + 10, dtype=np.int32)[::-1] return grid_shape def float32array(array_like): return", "max_clash): if max_clash < 0: raise ValueError(\"Maximum allowed clashing volume", "from disvis.points import dilate_points from disvis.libdisvis import (rotate_image3d, dilate_points_add, longest_distance)", "t))) _stdout.flush() def _gpu_init(self): self.gpu_data = {} g = self.gpu_data", "0 d['best_chi2'][ind] -= d['best_chi2'][ind].min() best_chi2 = volume.Volume(d['best_chi2'], voxelspacing=self.voxelspacing, origin=d['origin']) return", "g['saxs_k'] = saxs_Kernels(q.context) g['k'].rfftn = pyclfft.RFFTn(q.context, d['shape']) g['k'].irfftn = pyclfft.iRFFTn(q.context,", "data(self): return self._data @property def saxsdata(self): return self._q, self._Iq, self._sq", "= dilate_points(points, radius, rsurf) return rsurf def volume_origin(points, dimensions): center", "* c['ft_rcore'], s=c['shape']) c['intervol'] = irfftn(c['ft_lsurf'] * c['ft_rsurf'], s=c['shape']) np.logical_and(c['clashvol']", "# parameters to be defined self._receptor = None self._ligand =", "@max_clash.setter def max_clash(self, max_clash): if max_clash < 0: raise ValueError(\"Maximum", "k.c_conj_multiply(q, g['ft_lsurf'], g['ft_rcore'], g['ft_clashvol']) k.irfftn(q, g['ft_clashvol'], g['clashvol']) k.c_conj_multiply(q, g['ft_lsurf'], g['ft_rsurf'],", "rsurface(points, radius, shape, voxelspacing): dimensions = [x*voxelspacing for x in", "cl_array.to_device(q, float32array(d['q'])) g['targetIq'] = cl_array.to_device(q, float32array(d['targetIq'])) g['sq'] = cl_array.to_device(q, float32array(d['sq']))", "ligand center to the origin of the receptor map #", "pyclfft except ImportError: pass class FullSAXS(object): def __init__(self): # parameters", "= np.float32(self.voxelspacing) # kernels g['k'] = Kernels(q.context) g['saxs_k'] = saxs_Kernels(q.context)", "c['im_lsurf'] = d['lsurf'].array c['lsurf'] = np.zeros_like(c['rcore']) c['clashvol'] = np.zeros_like(c['rcore']) c['intervol']", "int(t/pdone - t))) _stdout.flush() def _gpu_init(self): self.gpu_data = {} g", "= d['fifj'] c['rind'] = d['rind'] c['lind'] = d['lind'] c['rxyz'] =", "c['ft_shape'] = list(d['shape']) c['ft_shape'][-1] = d['shape'][-1]//2 + 1 c['ft_lsurf'] =", "d['best_chi2'][ind] -= d['best_chi2'][ind].min() best_chi2 = volume.Volume(d['best_chi2'], voxelspacing=self.voxelspacing, origin=d['origin']) return Solutions(best_chi2,", "receptor(self, receptor): self._receptor = receptor.duplicate() @property def ligand(self): return self._ligand", "d['max_clash'] = self.max_clash/self.voxelspacing**3 d['min_interaction'] = self.min_interaction/self.voxelspacing**3 # SAXS data d['q']", "c['max_clash'] = d['max_clash'] c['min_interaction'] = d['min_interaction'] c['vlength'] = int(np.linalg.norm(self.ligand.coor -", "= g['best_chi2'].get() d['rot_ind'] = g['rot_ind'].get() def rsurface(points, radius, shape, voxelspacing):", "d['rsurf'].array c['im_lsurf'] = d['lsurf'].array c['lsurf'] = np.zeros_like(c['rcore']) c['clashvol'] = np.zeros_like(c['rcore'])", "# CPU or GPU self._queue = None # unchangeable self._data", "c['best_chi2'][ind] = c['chi2'][ind] c['rot_ind'][ind] = n if _stdout.isatty(): self._print_progress(n, c['nrot'],", "d['rxyz'] = xyz1 d['lxyz'] = xyz2 - self.ligand.center d['chi2'] =", "self.queue g['rcore'] = cl_array.to_device(q, float32array(d['rcore'].array)) g['rsurf'] = cl_array.to_device(q, float32array(d['rsurf'].array)) g['im_lsurf']", "than 1\") self._min_interaction = min_interaction + 0.9 @property def queue(self):", "cl_array.zeros_like(g['rcore']) g['intervol'] = cl_array.zeros_like(g['rcore']) g['interspace'] = cl_array.zeros(q, d['shape'], dtype=np.int32) #", "c['interspace']) print('Number of complexes to analyze: ', c['interspace'].sum()) c['chi2'].fill(0) calc_chi2(c['interspace'],", "complexes to analyze: ', c['interspace'].sum()) c['chi2'].fill(0) calc_chi2(c['interspace'], c['q'], c['base_Iq'], c['rind'],", "= coarse_grain(self.receptor, bpr=self.beads_per_residue) e2, xyz2 = coarse_grain(self.ligand, bpr=self.beads_per_residue) else: e1,", "g['k'].rfftn(q, g['rsurf'], g['ft_rsurf']) g['nrot'] = d['nrot'] g['max_clash'] = d['max_clash'] g['min_interaction']", "rotations(self, rotations): rotmat = np.asarray(rotations, dtype=np.float64) if rotmat.ndim != 3:", "g['fifj'], g['targetIq'], g['sq'], g['chi2']) g['saxs_k'].take_best(q, g['chi2'], g['best_chi2'], g['rot_ind'], n) if", "dtype=np.int32) # complex arrays g['ft_shape'] = list(d['shape']) g['ft_shape'][0] = d['shape'][0]//2", "float32array(d['lsurf'].array)) g['sampler'] = cl.Sampler(q.context, False, cl.addressing_mode.CLAMP, cl.filter_mode.LINEAR) g['lsurf'] = cl_array.zeros_like(g['rcore'])", "grid_shape(self.receptor.coor, self.ligand.coor, self.voxelspacing) # calculate the interaction surface and core", "d['rind'].astype(np.int32)) g['lind'] = cl_array.to_device(q, d['lind'].astype(np.int32)) g_rxyz = np.zeros((d['rxyz'].shape[0], 4), dtype=np.float32)", "= cl_array.zeros_like(g['ft_rcore']) # allocate SAXS arrays g['q'] = cl_array.to_device(q, float32array(d['q']))", "voxelspacing, origin, lcenter): nrestraints = len(restraints) g_restraints = np.zeros((nrestraints, 8),", "return self._voxelspacing @voxelspacing.setter def voxelspacing(self, voxelspacing): self._voxelspacing = voxelspacing @property", "c['intervol'] > c['min_interaction'], c['interspace']) print('Number of complexes to analyze: ',", "c['best_chi2'] d['rot_ind'] = c['rot_ind'] def _print_progress(self, n, total, time0): m", "= {} self._q = None self._Iq = None self._sq =", "self.data # determine size for grid shape = grid_shape(self.receptor.coor, self.ligand.coor,", "= self._sq if self.coarse_grain: e1, xyz1 = coarse_grain(self.receptor, bpr=self.beads_per_residue) e2,", "= pyfftw.interfaces.numpy_fft.irfftn except ImportError: from numpy.fft import rfftn, irfftn from", "dimensions)] return origin def grid_restraints(restraints, voxelspacing, origin, lcenter): nrestraints =", "self._Iq, self._sq = saxsdata def _initialize(self): # check if requirements", "self._voxelspacing @voxelspacing.setter def voxelspacing(self, voxelspacing): self._voxelspacing = voxelspacing @property def", "c = self.cpu_data time0 = _time() for n in xrange(c['rotmat'].shape[0]):", "= c['best_chi2'] d['rot_ind'] = c['rot_ind'] def _print_progress(self, n, total, time0):", "c['lxyz'] = d['lxyz'] c['chi2'] = d['chi2'] c['best_chi2'] = d['best_chi2'] c['rot_ind']", "= pyclfft.iRFFTn(q.context, d['shape']) g['k'].rfftn(q, g['rcore'], g['ft_rcore']) g['k'].rfftn(q, g['rsurf'], g['ft_rsurf']) g['nrot']", "g['k'] = Kernels(q.context) g['saxs_k'] = saxs_Kernels(q.context) g['k'].rfftn = pyclfft.RFFTn(q.context, d['shape'])", "voxelspacing): dimensions = [x*voxelspacing for x in shape] origin =", "c['rot_ind'] def _print_progress(self, n, total, time0): m = n +", "c['vlength'], np.linalg.inv(c['rotmat'][n]), d['im_center'], c['lsurf']) c['ft_lsurf'] = rfftn(c['lsurf']).conj() c['clashvol'] = irfftn(c['ft_lsurf']", "np.zeros_like(c['rcore']) c['clashvol'] = np.zeros_like(c['rcore']) c['intervol'] = np.zeros_like(c['rcore']) c['interspace'] = np.zeros_like(c['rcore'],", "= 1.0 self.interaction_radius = 2.5 self.max_clash = 100 self.min_interaction =", "= radius @property def voxelspacing(self): return self._voxelspacing @voxelspacing.setter def voxelspacing(self,", "return self._q, self._Iq, self._sq @saxsdata.setter def saxsdata(self, saxsdata): self._q, self._Iq,", "= self.data ind = d['best_chi2'] > 0 d['best_chi2'][ind] -= d['best_chi2'][ind].min()", "bpr=self.beads_per_residue) d['fifj'], d['rind'], d['lind'] = create_fifj_lookup_table(d['q'], e1, e2, bpr=self.beads_per_residue) d['rxyz']", "= self.gpu_data q = self.queue k = g['k'] time0 =", "[x*voxelspacing for x in shape] origin = volume_origin(points, dimensions) rsurf", "= self.cpu_data time0 = _time() for n in xrange(c['rotmat'].shape[0]): #", "irfftn(c['ft_lsurf'] * c['ft_rsurf'], s=c['shape']) np.logical_and(c['clashvol'] < c['max_clash'], c['intervol'] > c['min_interaction'],", "for x in (self.receptor, self.ligand)): raise ValueError(\"Not all requirements are", "g['rot_ind'].get() def rsurface(points, radius, shape, voxelspacing): dimensions = [x*voxelspacing for", "unchangeable self._data = {} self._q = None self._Iq = None", "= n + 1 pdone = m/total t = _time()", "g['ft_intervol']) k.irfftn(q, g['ft_intervol'], g['intervol']) k.touch(q, g['clashvol'], g['max_clash'], g['intervol'], g['min_interaction'], g['interspace'])", "g['clashvol'], g['max_clash'], g['intervol'], g['min_interaction'], g['interspace']) g['saxs_k'].rotate_points(q, g['lxyz'], self.rotations[n], g['rot_lxyz']) k.fill(q,", "saxsdata(self): return self._q, self._Iq, self._sq @saxsdata.setter def saxsdata(self, saxsdata): self._q,", "ValueError(\"Interaction radius should be bigger than zero\") self._interaction_radius = radius", "self.queue k = g['k'] time0 = _time() for n in", "np.zeros((d['lxyz'].shape[0], 4), dtype=np.float32) g_lxyz[:, :3] = d['lxyz'][:] g['rxyz'] = cl_array.to_device(q,", "d['rcore'].array c['rsurf'] = d['rsurf'].array c['im_lsurf'] = d['lsurf'].array c['lsurf'] = np.zeros_like(c['rcore'])", "= np.zeros((d['rxyz'].shape[0], 4), dtype=np.float32) g_rxyz[:, :3] = d['rxyz'][:] g_lxyz =", "g['sampler'], g['im_lsurf'], self.rotations[n], g['lsurf'], d['im_center']) k.rfftn(q, g['lsurf'], g['ft_lsurf']) k.c_conj_multiply(q, g['ft_lsurf'],", "\\ self.interaction_radius + 1.5)/self.voxelspacing c['origin'] = d['origin'] # SAXS arrays", "d['base_Iq'] = scattering_curve(self._q, e1, xyz1, bpr=self.beads_per_residue) d['base_Iq'] += scattering_curve(self._q, e2,", "saxstools.saxs_curve import scattering_curve, create_fifj_lookup_table from saxstools.helpers import coarse_grain from saxstools.libsaxstools", "def voxelspacing(self): return self._voxelspacing @voxelspacing.setter def voxelspacing(self, voxelspacing): self._voxelspacing =", "self._Iq = None self._sq = None @property def receptor(self): return", "@property def weights(self): return self._weights @weights.setter def weights(self, weights): self._weights", "points2, voxelspacing) shape = [volume.radix235(x) for x in shape] return", "(self.receptor, self.ligand)): raise ValueError(\"Not all requirements are met for a", "# rotate ligand image rotate_image3d(c['im_lsurf'], c['vlength'], np.linalg.inv(c['rotmat'][n]), d['im_center'], c['lsurf']) c['ft_lsurf']", "= volume_origin(points, dimensions) rsurf = volume.zeros(shape, voxelspacing, origin) rsurf =", "d['min_interaction'] = self.min_interaction/self.voxelspacing**3 # SAXS data d['q'] = self._q d['targetIq']", "_time() for n in xrange(g['nrot']): k.rotate_image3d(q, g['sampler'], g['im_lsurf'], self.rotations[n], g['lsurf'],", "cl_array.zeros_like(g['ft_rcore']) g['ft_lsurf'] = cl_array.zeros_like(g['ft_rcore']) g['ft_clashvol'] = cl_array.zeros_like(g['ft_rcore']) g['ft_intervol'] = cl_array.zeros_like(g['ft_rcore'])", "g['ft_shape'], dtype=np.complex64) g['ft_rsurf'] = cl_array.zeros_like(g['ft_rcore']) g['ft_lsurf'] = cl_array.zeros_like(g['ft_rcore']) g['ft_clashvol'] =", "shape] return shape def min_grid_shape(points1, points2, voxelspacing): # the minimal", "1 g['ft_rcore'] = cl_array.zeros(q, g['ft_shape'], dtype=np.complex64) g['ft_rsurf'] = cl_array.zeros_like(g['ft_rcore']) g['ft_lsurf']", "standard values self.rotations = [[[1, 0, 0], [0, 1, 0],", "def rotations(self, rotations): rotmat = np.asarray(rotations, dtype=np.float64) if rotmat.ndim !=", "dtype=np.int32) c['Iq'] = np.zeros_like(c['targetIq']) c['tmplxyz'] = np.zeros_like(c['lxyz']) def _cpu_search(self): d", "self._gpu_init() self._gpu_search() if _stdout.isatty(): print() d = self.data ind =", "= None self._sq = None @property def receptor(self): return self._receptor", "def saxsdata(self): return self._q, self._Iq, self._sq @saxsdata.setter def saxsdata(self, saxsdata):", "r_sel, l_sel, mindis, maxdis = restraints[n] r_pos = (r_sel.center -", "self._data @property def saxsdata(self): return self._q, self._Iq, self._sq @saxsdata.setter def", "= d['min_interaction'] c['vlength'] = int(np.linalg.norm(self.ligand.coor - \\ self.ligand.center, axis=1).max() +", "c['interspace'].sum()) c['chi2'].fill(0) calc_chi2(c['interspace'], c['q'], c['base_Iq'], c['rind'], c['rxyz'], c['lind'], (np.mat(c['rotmat'][n])*np.mat(c['lxyz']).T).T, c['origin'],", "np.linalg.inv(c['rotmat'][n]), d['im_center'], c['lsurf']) c['ft_lsurf'] = rfftn(c['lsurf']).conj() c['clashvol'] = irfftn(c['ft_lsurf'] *", "self.gpu_data q = self.queue k = g['k'] time0 = _time()", "= d['shape'] c['max_clash'] = d['max_clash'] c['min_interaction'] = d['min_interaction'] c['vlength'] =", "= [(c - d/2.0) for c, d in zip(center, dimensions)]", "shape] origin = volume_origin(points, dimensions) rsurf = volume.zeros(shape, voxelspacing, origin)", "None self._Iq = None self._sq = None @property def receptor(self):", "dilate_points_add, longest_distance) from powerfit.solutions import Solutions from saxstools.saxs_curve import scattering_curve,", "def search(self): self._initialize() if self.queue is None: self._cpu_init() self._cpu_search() else:", "pyfftw.interfaces.numpy_fft.rfftn irfftn = pyfftw.interfaces.numpy_fft.irfftn except ImportError: from numpy.fft import rfftn,", "1.5)/self.voxelspacing c['origin'] = d['origin'] # SAXS arrays c['q'] = d['q']", "bpr=self.beads_per_residue) e2, xyz2 = coarse_grain(self.ligand, bpr=self.beads_per_residue) else: e1, xyz1 =", "g['k'].rfftn = pyclfft.RFFTn(q.context, d['shape']) g['k'].irfftn = pyclfft.iRFFTn(q.context, d['shape']) g['k'].rfftn(q, g['rcore'],", "receptor): self._receptor = receptor.duplicate() @property def ligand(self): return self._ligand @ligand.setter", "cl import pyopencl.array as cl_array import disvis.pyclfft from disvis.kernels import", "g_restraints[n, 3:6] = l_pos g_restraints[n, 6] = mindis/voxelspacing g_restraints[n, 7]", "= np.zeros_like(c['rcore'], dtype=np.int64) # complex arrays c['ft_shape'] = list(d['shape']) c['ft_shape'][-1]", "xyz1 = self.receptor.elements, self.receptor.coor e2, xyz2 = self.ligand.elements, self.ligand.coor d['base_Iq']", "dilate_points(points, radius, rsurf) return rsurf def volume_origin(points, dimensions): center =", "g['best_chi2'], g['rot_ind'], n) if _stdout.isatty(): self._print_progress(n, g['nrot'], time0) self.queue.finish() d['best_chi2']", "self.ligand.center, axis=1).max() + \\ self.interaction_radius + 1.5)/self.voxelspacing c['origin'] = d['origin']", "cl_array.zeros_like(g['lxyz']) g['chi2'] = cl_array.to_device(q, d['chi2'].astype(np.float32)) g['best_chi2'] = cl_array.to_device(q, d['best_chi2'].astype(np.float32)) g['rot_ind']", "d['im_center'], c['lsurf']) c['ft_lsurf'] = rfftn(c['lsurf']).conj() c['clashvol'] = irfftn(c['ft_lsurf'] * c['ft_rcore'],", "= np.zeros_like(c['rcore']) c['interspace'] = np.zeros_like(c['rcore'], dtype=np.int64) # complex arrays c['ft_shape']", "c['rcore'] = d['rcore'].array c['rsurf'] = d['rsurf'].array c['im_lsurf'] = d['lsurf'].array c['lsurf']", "self._min_interaction @min_interaction.setter def min_interaction(self, min_interaction): if min_interaction < 1: raise", "np.asarray(rotations, dtype=np.float64) if rotmat.ndim != 3: raise ValueError(\"Input should be", "voxelspacing, origin) rsurf = dilate_points(points, radius, rsurf) return rsurf def", "= d['rsurf'].array c['im_lsurf'] = d['lsurf'].array c['lsurf'] = np.zeros_like(c['rcore']) c['clashvol'] =", "None # unchangeable self._data = {} self._q = None self._Iq", "g['sampler'] = cl.Sampler(q.context, False, cl.addressing_mode.CLAMP, cl.filter_mode.LINEAR) g['lsurf'] = cl_array.zeros_like(g['rcore']) g['clashvol']", "k.rotate_image3d(q, g['sampler'], g['im_lsurf'], self.rotations[n], g['lsurf'], d['im_center']) k.rfftn(q, g['lsurf'], g['ft_lsurf']) k.c_conj_multiply(q,", "g_restraints[n, 7] = maxdis/voxelspacing return g_restraints def grid_shape(points1, points2, voxelspacing):", "np.asarray(d['rcore'].origin, dtype=np.float64) d['shape'] = d['rcore'].shape d['start'] = d['rcore'].start d['nrot'] =", "self.ligand.center d['chi2'] = np.zeros(d['rcore'].shape, dtype=np.float64) d['best_chi2'] = np.zeros_like(d['chi2']) def search(self):", "time import time as _time import numpy as np try:", "c['min_interaction'], c['interspace']) print('Number of complexes to analyze: ', c['interspace'].sum()) c['chi2'].fill(0)", "k.irfftn(q, g['ft_clashvol'], g['clashvol']) k.c_conj_multiply(q, g['ft_lsurf'], g['ft_rsurf'], g['ft_intervol']) k.irfftn(q, g['ft_intervol'], g['intervol'])", "are met for a search\") if self.weights is None: self.weights", "ligand.duplicate() @property def rotations(self): return self._rotations @rotations.setter def rotations(self, rotations):", "to analyze: ', c['interspace'].sum()) c['chi2'].fill(0) calc_chi2(c['interspace'], c['q'], c['base_Iq'], c['rind'], c['rxyz'],", "def ligand(self, ligand): self._ligand = ligand.duplicate() @property def rotations(self): return", "len(restraints) g_restraints = np.zeros((nrestraints, 8), dtype=np.float64) for n in range(nrestraints):", "import coarse_grain from saxstools.libsaxstools import calc_chi2 from saxstools.kernels import Kernels", "k.rfftn(q, g['lsurf'], g['ft_lsurf']) k.c_conj_multiply(q, g['ft_lsurf'], g['ft_rcore'], g['ft_clashvol']) k.irfftn(q, g['ft_clashvol'], g['clashvol'])", "> 0 d['best_chi2'][ind] -= d['best_chi2'][ind].min() best_chi2 = volume.Volume(d['best_chi2'], voxelspacing=self.voxelspacing, origin=d['origin'])", "d['shape'], dtype=np.int32) # complex arrays g['ft_shape'] = list(d['shape']) g['ft_shape'][0] =", "c['q'] = d['q'] c['targetIq'] = d['targetIq'] c['sq'] = d['sq'] c['base_Iq']", "g['lxyz'] = cl_array.to_device(q, g_lxyz) g['rot_lxyz'] = cl_array.zeros_like(g['lxyz']) g['chi2'] = cl_array.to_device(q,", "min_interaction): if min_interaction < 1: raise ValueError(\"Minimum required interaction volume", "= rsurface(self.receptor.coor, vdw_radii, shape, self.voxelspacing) # keep track of some", ".format(m, total, pdone, int(t/pdone - t))) _stdout.flush() def _gpu_init(self): self.gpu_data", "GPU self._queue = None # unchangeable self._data = {} self._q", "cl_array.to_device(q, d['best_chi2'].astype(np.float32)) g['rot_ind'] = cl_array.zeros(q, d['shape'], dtype=np.int32) g['origin'] = np.zeros(4,", "# allocate SAXS arrays g['q'] = cl_array.to_device(q, float32array(d['q'])) g['targetIq'] =", "longest_distance(points2) grid_shape = np.asarray(((dimensions1 + dimension2)/voxelspacing) + 10, dtype=np.int32)[::-1] return", "irfftn from disvis import volume from disvis.points import dilate_points from", "# calculate the interaction surface and core of the receptor", "= True self.beads_per_residue = 2 # CPU or GPU self._queue", "self._weights @weights.setter def weights(self, weights): self._weights = weights @property def", "Kernels(q.context) g['saxs_k'] = saxs_Kernels(q.context) g['k'].rfftn = pyclfft.RFFTn(q.context, d['shape']) g['k'].irfftn =", "0, 0], [0, 1, 0], [0, 0, 1]]] self.weights =", "rotmat = np.asarray(rotations, dtype=np.float64) if rotmat.ndim != 3: raise ValueError(\"Input", "dtype=np.float32) g_rxyz[:, :3] = d['rxyz'][:] g_lxyz = np.zeros((d['lxyz'].shape[0], 4), dtype=np.float32)", "g['origin'], g['voxelspacing'], g['fifj'], g['targetIq'], g['sq'], g['chi2']) g['saxs_k'].take_best(q, g['chi2'], g['best_chi2'], g['rot_ind'],", "c['best_chi2'] c['best_chi2'][ind] = c['chi2'][ind] c['rot_ind'][ind] = n if _stdout.isatty(): self._print_progress(n,", "def _print_progress(self, n, total, time0): m = n + 1", "= volume.zeros(shape, voxelspacing, origin) rsurf = dilate_points(points, radius, rsurf) return", "= rfftn(c['lsurf']).conj() c['clashvol'] = irfftn(c['ft_lsurf'] * c['ft_rcore'], s=c['shape']) c['intervol'] =", "= m/total t = _time() - time0 _stdout.write('\\r{:d}/{:d} ({:.2%}, ETA:", "d['targetIq'] = self._Iq d['sq'] = self._sq if self.coarse_grain: e1, xyz1", "raise ValueError(\"Interaction radius should be bigger than zero\") self._interaction_radius =", "min_interaction(self, min_interaction): if min_interaction < 1: raise ValueError(\"Minimum required interaction", "self.voxelspacing, c['fifj'], c['targetIq'], c['sq'], c['chi2']) ind = c['chi2'] > c['best_chi2']", "k.irfftn(q, g['ft_intervol'], g['intervol']) k.touch(q, g['clashvol'], g['max_clash'], g['intervol'], g['min_interaction'], g['interspace']) g['saxs_k'].rotate_points(q,", "print('Number of complexes to analyze: ', c['interspace'].sum()) c['chi2'].fill(0) calc_chi2(c['interspace'], c['q'],", "= _time() for n in xrange(c['rotmat'].shape[0]): # rotate ligand image", "return self._min_interaction @min_interaction.setter def min_interaction(self, min_interaction): if min_interaction < 1:", "self._ligand @ligand.setter def ligand(self, ligand): self._ligand = ligand.duplicate() @property def", "# set ligand center to the origin of the receptor", "total, time0): m = n + 1 pdone = m/total", "self._gpu_search() if _stdout.isatty(): print() d = self.data ind = d['best_chi2']", "@property def saxsdata(self): return self._q, self._Iq, self._sq @saxsdata.setter def saxsdata(self,", "np.logical_and(c['clashvol'] < c['max_clash'], c['intervol'] > c['min_interaction'], c['interspace']) print('Number of complexes", "self.rotations = [[[1, 0, 0], [0, 1, 0], [0, 0,", "queue @property def data(self): return self._data @property def saxsdata(self): return", "scanning chain dimensions1 = points1.ptp(axis=0) dimension2 = longest_distance(points2) grid_shape =", "g['best_chi2'] = cl_array.to_device(q, d['best_chi2'].astype(np.float32)) g['rot_ind'] = cl_array.zeros(q, d['shape'], dtype=np.int32) g['origin']", "= d['base_Iq'] c['fifj'] = d['fifj'] c['rind'] = d['rind'] c['lind'] =", "radius, shape, voxelspacing): dimensions = [x*voxelspacing for x in shape]", "= None # parameters with standard values self.rotations = [[[1,", "d['rcore'].shape d['start'] = d['rcore'].start d['nrot'] = self.rotations.shape[0] # set ligand", "= mindis/voxelspacing g_restraints[n, 7] = maxdis/voxelspacing return g_restraints def grid_shape(points1,", "= np.asarray(self.weights, dtype=np.float64) c['nrot'] = d['nrot'] c['shape'] = d['shape'] c['max_clash']", "@min_interaction.setter def min_interaction(self, min_interaction): if min_interaction < 1: raise ValueError(\"Minimum", "else: self._gpu_init() self._gpu_search() if _stdout.isatty(): print() d = self.data ind", "def rsurface(points, radius, shape, voxelspacing): dimensions = [x*voxelspacing for x", "c['best_chi2'] = d['best_chi2'] c['rot_ind'] = np.zeros(d['shape'], dtype=np.int32) c['Iq'] = np.zeros_like(c['targetIq'])", "max_clash(self, max_clash): if max_clash < 0: raise ValueError(\"Maximum allowed clashing", "ind = d['best_chi2'] > 0 d['best_chi2'][ind] -= d['best_chi2'][ind].min() best_chi2 =", "= d['rxyz'][:] g_lxyz = np.zeros((d['lxyz'].shape[0], 4), dtype=np.float32) g_lxyz[:, :3] =", "of some data for later calculations d['origin'] = np.asarray(d['rcore'].origin, dtype=np.float64)", "_stdout.flush() def _gpu_init(self): self.gpu_data = {} g = self.gpu_data d", "= voxelspacing @property def max_clash(self): return self._max_clash @max_clash.setter def max_clash(self,", "cl_array import disvis.pyclfft from disvis.kernels import Kernels from disvis import", "return self._max_clash @max_clash.setter def max_clash(self, max_clash): if max_clash < 0:", "d['rxyz'] c['lxyz'] = d['lxyz'] c['chi2'] = d['chi2'] c['best_chi2'] = d['best_chi2']", "dtype=np.complex64) g['ft_rsurf'] = cl_array.zeros_like(g['ft_rcore']) g['ft_lsurf'] = cl_array.zeros_like(g['ft_rcore']) g['ft_clashvol'] = cl_array.zeros_like(g['ft_rcore'])", "def interaction_radius(self, radius): if radius <= 0: raise ValueError(\"Interaction radius", "self.coarse_grain = True self.beads_per_residue = 2 # CPU or GPU", "- d/2.0) for c, d in zip(center, dimensions)] return origin", "> c['best_chi2'] c['best_chi2'][ind] = c['chi2'][ind] c['rot_ind'][ind] = n if _stdout.isatty():", "= self.data # determine size for grid shape = grid_shape(self.receptor.coor,", "= 2 # CPU or GPU self._queue = None #", "+ self.interaction_radius d['rsurf'] = rsurface(self.receptor.coor, radii, shape, self.voxelspacing) d['rcore'] =", "d['best_chi2'][ind].min() best_chi2 = volume.Volume(d['best_chi2'], voxelspacing=self.voxelspacing, origin=d['origin']) return Solutions(best_chi2, self.rotations, d['rot_ind'])", "a grid of the ligand radii = self.ligand.vdw_radius d['lsurf'] =", "g['origin'][:3] = d['origin'].astype(np.float32) g['voxelspacing'] = np.float32(self.voxelspacing) # kernels g['k'] =", "= rsurface(self.receptor.coor, radii, shape, self.voxelspacing) d['rcore'] = rsurface(self.receptor.coor, vdw_radii, shape,", "self.rotations, d['rot_ind']) def _cpu_init(self): self.cpu_data = {} c = self.cpu_data", "cl_array.to_device(q, d['lind'].astype(np.int32)) g_rxyz = np.zeros((d['rxyz'].shape[0], 4), dtype=np.float32) g_rxyz[:, :3] =", "= points1.ptp(axis=0) dimension2 = longest_distance(points2) grid_shape = np.asarray(((dimensions1 + dimension2)/voxelspacing)", "dimensions): center = points.mean(axis=0) origin = [(c - d/2.0) for", "= xyz2 - self.ligand.center d['chi2'] = np.zeros(d['rcore'].shape, dtype=np.float64) d['best_chi2'] =", "g['saxs_k'].take_best(q, g['chi2'], g['best_chi2'], g['rot_ind'], n) if _stdout.isatty(): self._print_progress(n, g['nrot'], time0)", "max_clash(self): return self._max_clash @max_clash.setter def max_clash(self, max_clash): if max_clash <", "queue(self): return self._queue @queue.setter def queue(self, queue): self._queue = queue", "ImportError: from numpy.fft import rfftn, irfftn from disvis import volume", "= np.zeros_like(c['targetIq']) c['tmplxyz'] = np.zeros_like(c['lxyz']) def _cpu_search(self): d = self.data", "= list(d['shape']) g['ft_shape'][0] = d['shape'][0]//2 + 1 g['ft_rcore'] = cl_array.zeros(q,", "g['min_interaction'], g['interspace']) g['saxs_k'].rotate_points(q, g['lxyz'], self.rotations[n], g['rot_lxyz']) k.fill(q, g['chi2'], 0) g['saxs_k'].calc_chi2(q,", "def min_interaction(self, min_interaction): if min_interaction < 1: raise ValueError(\"Minimum required", "cl_array.to_device(q, d['chi2'].astype(np.float32)) g['best_chi2'] = cl_array.to_device(q, d['best_chi2'].astype(np.float32)) g['rot_ind'] = cl_array.zeros(q, d['shape'],", "= grid_shape(self.receptor.coor, self.ligand.coor, self.voxelspacing) # calculate the interaction surface and", "complex arrays c['ft_shape'] = list(d['shape']) c['ft_shape'][-1] = d['shape'][-1]//2 + 1", "receptor vdw_radii = self.receptor.vdw_radius radii = vdw_radii + self.interaction_radius d['rsurf']", "g['max_clash'], g['intervol'], g['min_interaction'], g['interspace']) g['saxs_k'].rotate_points(q, g['lxyz'], self.rotations[n], g['rot_lxyz']) k.fill(q, g['chi2'],", "rfftn(c['rcore']) c['ft_rsurf'] = rfftn(c['rsurf']) c['rotmat'] = np.asarray(self.rotations, dtype=np.float64) c['weights'] =", "= cl_array.zeros_like(g['ft_rcore']) g['ft_lsurf'] = cl_array.zeros_like(g['ft_rcore']) g['ft_clashvol'] = cl_array.zeros_like(g['ft_rcore']) g['ft_intervol'] =", "or GPU self._queue = None # unchangeable self._data = {}", "= cl_array.to_device(q, g_rxyz) g['lxyz'] = cl_array.to_device(q, g_lxyz) g['rot_lxyz'] = cl_array.zeros_like(g['lxyz'])", "radius <= 0: raise ValueError(\"Interaction radius should be bigger than", "= list(d['shape']) c['ft_shape'][-1] = d['shape'][-1]//2 + 1 c['ft_lsurf'] = np.zeros(c['ft_shape'],", "d['rind'] c['lind'] = d['lind'] c['rxyz'] = d['rxyz'] c['lxyz'] = d['lxyz']", "track of some data for later calculations d['origin'] = np.asarray(d['rcore'].origin,", "= np.zeros(4, dtype=np.float32) g['origin'][:3] = d['origin'].astype(np.float32) g['voxelspacing'] = np.float32(self.voxelspacing) #", "c['ft_rcore'] = rfftn(c['rcore']) c['ft_rsurf'] = rfftn(c['rsurf']) c['rotmat'] = np.asarray(self.rotations, dtype=np.float64)", "saxsdata): self._q, self._Iq, self._sq = saxsdata def _initialize(self): # check", "= cl_array.to_device(q, float32array(d['fifj'])) g['rind'] = cl_array.to_device(q, d['rind'].astype(np.int32)) g['lind'] = cl_array.to_device(q,", "ValueError(\"Minimum required interaction volume cannot be smaller than 1\") self._min_interaction", "shape, self.voxelspacing) d['rcore'] = rsurface(self.receptor.coor, vdw_radii, shape, self.voxelspacing) # keep", "cl_array.to_device(q, float32array(d['rsurf'].array)) g['im_lsurf'] = cl.image_from_array(q.context, float32array(d['lsurf'].array)) g['sampler'] = cl.Sampler(q.context, False,", "self.interaction_radius d['rsurf'] = rsurface(self.receptor.coor, radii, shape, self.voxelspacing) d['rcore'] = rsurface(self.receptor.coor,", "l_sel, mindis, maxdis = restraints[n] r_pos = (r_sel.center - origin)/voxelspacing", "powerfit.solutions import Solutions from saxstools.saxs_curve import scattering_curve, create_fifj_lookup_table from saxstools.helpers", "[0, 0, 1]]] self.weights = None self.voxelspacing = 1.0 self.interaction_radius", "[[[1, 0, 0], [0, 1, 0], [0, 0, 1]]] self.weights", "None @property def receptor(self): return self._receptor @receptor.setter def receptor(self, receptor):", "self.ligand.coor d['base_Iq'] = scattering_curve(self._q, e1, xyz1, bpr=self.beads_per_residue) d['base_Iq'] += scattering_curve(self._q,", "g_lxyz[:, :3] = d['lxyz'][:] g['rxyz'] = cl_array.to_device(q, g_rxyz) g['lxyz'] =", "list(d['shape']) g['ft_shape'][0] = d['shape'][0]//2 + 1 g['ft_rcore'] = cl_array.zeros(q, g['ft_shape'],", "is None for x in (self.receptor, self.ligand)): raise ValueError(\"Not all", "d = self.data c['rcore'] = d['rcore'].array c['rsurf'] = d['rsurf'].array c['im_lsurf']", "self._print_progress(n, c['nrot'], time0) d['best_chi2'] = c['best_chi2'] d['rot_ind'] = c['rot_ind'] def", "except ImportError: from numpy.fft import rfftn, irfftn from disvis import", "d['min_interaction'] def _gpu_search(self): d = self.data g = self.gpu_data q", "0, 1]]] self.weights = None self.voxelspacing = 1.0 self.interaction_radius =", "1\") self._min_interaction = min_interaction + 0.9 @property def queue(self): return", "interaction_radius(self, radius): if radius <= 0: raise ValueError(\"Interaction radius should", "= volume.Volume(d['best_chi2'], voxelspacing=self.voxelspacing, origin=d['origin']) return Solutions(best_chi2, self.rotations, d['rot_ind']) def _cpu_init(self):", "3: raise ValueError(\"Input should be a list of rotation matrices.\")", "return self._weights @weights.setter def weights(self, weights): self._weights = weights @property", "g['rxyz'] = cl_array.to_device(q, g_rxyz) g['lxyz'] = cl_array.to_device(q, g_lxyz) g['rot_lxyz'] =", "_cpu_init(self): self.cpu_data = {} c = self.cpu_data d = self.data", "None for x in (self.receptor, self.ligand)): raise ValueError(\"Not all requirements", "from disvis.libdisvis import (rotate_image3d, dilate_points_add, longest_distance) from powerfit.solutions import Solutions", "longest diameter is the scanning chain dimensions1 = points1.ptp(axis=0) dimension2", "raise ValueError(\"Minimum required interaction volume cannot be smaller than 1\")", "+ self.receptor.center), radii, volume.zeros_like(d['rcore'])) d['im_center'] = np.asarray((self.receptor.center - d['rcore'].origin)/self.voxelspacing, dtype=np.float64)", "e1, xyz1 = self.receptor.elements, self.receptor.coor e2, xyz2 = self.ligand.elements, self.ligand.coor", "from saxstools.libsaxstools import calc_chi2 from saxstools.kernels import Kernels as saxs_Kernels", "def volume_origin(points, dimensions): center = points.mean(axis=0) origin = [(c -", "d['best_chi2'].astype(np.float32)) g['rot_ind'] = cl_array.zeros(q, d['shape'], dtype=np.int32) g['origin'] = np.zeros(4, dtype=np.float32)", "be a list of rotation matrices.\") self._rotations = rotmat @property", "d['lxyz'] = xyz2 - self.ligand.center d['chi2'] = np.zeros(d['rcore'].shape, dtype=np.float64) d['best_chi2']", "c['rxyz'] = d['rxyz'] c['lxyz'] = d['lxyz'] c['chi2'] = d['chi2'] c['best_chi2']", "\\ self.ligand.center, axis=1).max() + \\ self.interaction_radius + 1.5)/self.voxelspacing c['origin'] =", "0], [0, 0, 1]]] self.weights = None self.voxelspacing = 1.0", "d['best_chi2'] c['rot_ind'] = np.zeros(d['shape'], dtype=np.int32) c['Iq'] = np.zeros_like(c['targetIq']) c['tmplxyz'] =", "cl.Sampler(q.context, False, cl.addressing_mode.CLAMP, cl.filter_mode.LINEAR) g['lsurf'] = cl_array.zeros_like(g['rcore']) g['clashvol'] = cl_array.zeros_like(g['rcore'])", "1, 0], [0, 0, 1]]] self.weights = None self.voxelspacing =", "def ligand(self): return self._ligand @ligand.setter def ligand(self, ligand): self._ligand =", "= cl_array.zeros(q, d['shape'], dtype=np.int32) # complex arrays g['ft_shape'] = list(d['shape'])", "g['intervol'], g['min_interaction'], g['interspace']) g['saxs_k'].rotate_points(q, g['lxyz'], self.rotations[n], g['rot_lxyz']) k.fill(q, g['chi2'], 0)", "check if requirements are set if any(x is None for", "d['rcore'] = rsurface(self.receptor.coor, vdw_radii, shape, self.voxelspacing) # keep track of", "__future__ import print_function, absolute_import, division from sys import stdout as", "calculate the interaction surface and core of the receptor vdw_radii", "self._cpu_search() else: self._gpu_init() self._gpu_search() if _stdout.isatty(): print() d = self.data", "= np.asarray(self.rotations, dtype=np.float64) c['weights'] = np.asarray(self.weights, dtype=np.float64) c['nrot'] = d['nrot']", "* c['ft_rsurf'], s=c['shape']) np.logical_and(c['clashvol'] < c['max_clash'], c['intervol'] > c['min_interaction'], c['interspace'])", "k = g['k'] time0 = _time() for n in xrange(g['nrot']):", "volume_origin(points, dimensions) rsurf = volume.zeros(shape, voxelspacing, origin) rsurf = dilate_points(points,", "c['chi2'].fill(0) calc_chi2(c['interspace'], c['q'], c['base_Iq'], c['rind'], c['rxyz'], c['lind'], (np.mat(c['rotmat'][n])*np.mat(c['lxyz']).T).T, c['origin'], self.voxelspacing,", "self._sq = saxsdata def _initialize(self): # check if requirements are", "import (rotate_image3d, dilate_points_add, longest_distance) from powerfit.solutions import Solutions from saxstools.saxs_curve", "SAXS arrays g['q'] = cl_array.to_device(q, float32array(d['q'])) g['targetIq'] = cl_array.to_device(q, float32array(d['targetIq']))", "def grid_restraints(restraints, voxelspacing, origin, lcenter): nrestraints = len(restraints) g_restraints =", "n if _stdout.isatty(): self._print_progress(n, c['nrot'], time0) d['best_chi2'] = c['best_chi2'] d['rot_ind']", "SAXS arrays c['q'] = d['q'] c['targetIq'] = d['targetIq'] c['sq'] =", "g = self.gpu_data q = self.queue k = g['k'] time0", "disvis.points import dilate_points from disvis.libdisvis import (rotate_image3d, dilate_points_add, longest_distance) from", "= saxs_Kernels(q.context) g['k'].rfftn = pyclfft.RFFTn(q.context, d['shape']) g['k'].irfftn = pyclfft.iRFFTn(q.context, d['shape'])", "= cl.image_from_array(q.context, float32array(d['lsurf'].array)) g['sampler'] = cl.Sampler(q.context, False, cl.addressing_mode.CLAMP, cl.filter_mode.LINEAR) g['lsurf']", "min_interaction < 1: raise ValueError(\"Minimum required interaction volume cannot be", "class FullSAXS(object): def __init__(self): # parameters to be defined self._receptor", "@property def queue(self): return self._queue @queue.setter def queue(self, queue): self._queue", "requirements are set if any(x is None for x in", "Kernels from disvis import pyclfft except ImportError: pass class FullSAXS(object):", "= cl_array.to_device(q, float32array(d['rcore'].array)) g['rsurf'] = cl_array.to_device(q, float32array(d['rsurf'].array)) g['im_lsurf'] = cl.image_from_array(q.context,", "calc_chi2 from saxstools.kernels import Kernels as saxs_Kernels try: import pyopencl", "c['chi2'] > c['best_chi2'] c['best_chi2'][ind] = c['chi2'][ind] c['rot_ind'][ind] = n if", "numpy as np try: import pyfftw pyfftw.interfaces.cache.enable() pyfftw.interfaces.cache.set_keepalive_time(10) rfftn =", "def weights(self, weights): self._weights = weights @property def interaction_radius(self): return", "= np.zeros_like(d['chi2']) def search(self): self._initialize() if self.queue is None: self._cpu_init()", "dtype=np.float64) if rotmat.ndim != 3: raise ValueError(\"Input should be a", "for x in shape] return shape def min_grid_shape(points1, points2, voxelspacing):", "points.mean(axis=0) origin = [(c - d/2.0) for c, d in", "= maxdis/voxelspacing return g_restraints def grid_shape(points1, points2, voxelspacing): shape =", "g['ft_rcore'], g['ft_clashvol']) k.irfftn(q, g['ft_clashvol'], g['clashvol']) k.c_conj_multiply(q, g['ft_lsurf'], g['ft_rsurf'], g['ft_intervol']) k.irfftn(q,", "import stdout as _stdout from time import time as _time", "d['rxyz'][:] g_lxyz = np.zeros((d['lxyz'].shape[0], 4), dtype=np.float32) g_lxyz[:, :3] = d['lxyz'][:]", "# and make a grid of the ligand radii =", "volume cannot be smaller than 1\") self._min_interaction = min_interaction +", "0], [0, 1, 0], [0, 0, 1]]] self.weights = None", "d['rcore'].origin)/self.voxelspacing, dtype=np.float64) d['max_clash'] = self.max_clash/self.voxelspacing**3 d['min_interaction'] = self.min_interaction/self.voxelspacing**3 # SAXS", "rotations(self): return self._rotations @rotations.setter def rotations(self, rotations): rotmat = np.asarray(rotations,", "@property def rotations(self): return self._rotations @rotations.setter def rotations(self, rotations): rotmat", "g['ft_lsurf'] = cl_array.zeros_like(g['ft_rcore']) g['ft_clashvol'] = cl_array.zeros_like(g['ft_rcore']) g['ft_intervol'] = cl_array.zeros_like(g['ft_rcore']) #", "= cl_array.to_device(q, float32array(d['sq'])) g['base_Iq'] = cl_array.to_device(q, float32array(d['base_Iq'])) g['fifj'] = cl_array.to_device(q,", "= _time() for n in xrange(g['nrot']): k.rotate_image3d(q, g['sampler'], g['im_lsurf'], self.rotations[n],", "= (l_sel.center - lcenter)/voxelspacing g_restraints[n, 0:3] = r_pos g_restraints[n, 3:6]", "self.voxelspacing) d['rcore'] = rsurface(self.receptor.coor, vdw_radii, shape, self.voxelspacing) # keep track", "self.data g = self.gpu_data q = self.queue k = g['k']", "saxsdata def _initialize(self): # check if requirements are set if", "d['start'] = d['rcore'].start d['nrot'] = self.rotations.shape[0] # set ligand center", "np.ones(self.rotations.shape[0], dtype=np.float64) if len(self.weights) != len(self.rotations): raise ValueError(\"\") d =", "6] = mindis/voxelspacing g_restraints[n, 7] = maxdis/voxelspacing return g_restraints def", "time0) d['best_chi2'] = c['best_chi2'] d['rot_ind'] = c['rot_ind'] def _print_progress(self, n,", "the longest diameter is the scanning chain dimensions1 = points1.ptp(axis=0)", "g['rcore'] = cl_array.to_device(q, float32array(d['rcore'].array)) g['rsurf'] = cl_array.to_device(q, float32array(d['rsurf'].array)) g['im_lsurf'] =", "np.asarray(((dimensions1 + dimension2)/voxelspacing) + 10, dtype=np.int32)[::-1] return grid_shape def float32array(array_like):", "self.coarse_grain: e1, xyz1 = coarse_grain(self.receptor, bpr=self.beads_per_residue) e2, xyz2 = coarse_grain(self.ligand,", "self._initialize() if self.queue is None: self._cpu_init() self._cpu_search() else: self._gpu_init() self._gpu_search()", ":3] = d['lxyz'][:] g['rxyz'] = cl_array.to_device(q, g_rxyz) g['lxyz'] = cl_array.to_device(q,", "bpr=self.beads_per_residue) d['rxyz'] = xyz1 d['lxyz'] = xyz2 - self.ligand.center d['chi2']", "= np.zeros_like(c['rcore']) c['clashvol'] = np.zeros_like(c['rcore']) c['intervol'] = np.zeros_like(c['rcore']) c['interspace'] =", "+ 0.9 @property def queue(self): return self._queue @queue.setter def queue(self,", "= d['max_clash'] c['min_interaction'] = d['min_interaction'] c['vlength'] = int(np.linalg.norm(self.ligand.coor - \\", "lcenter)/voxelspacing g_restraints[n, 0:3] = r_pos g_restraints[n, 3:6] = l_pos g_restraints[n,", "g_rxyz = np.zeros((d['rxyz'].shape[0], 4), dtype=np.float32) g_rxyz[:, :3] = d['rxyz'][:] g_lxyz", "_time import numpy as np try: import pyfftw pyfftw.interfaces.cache.enable() pyfftw.interfaces.cache.set_keepalive_time(10)", "self._data = {} self._q = None self._Iq = None self._sq", "self.receptor.vdw_radius radii = vdw_radii + self.interaction_radius d['rsurf'] = rsurface(self.receptor.coor, radii,", "d['shape'], dtype=np.int32) g['origin'] = np.zeros(4, dtype=np.float32) g['origin'][:3] = d['origin'].astype(np.float32) g['voxelspacing']", "self.receptor.coor e2, xyz2 = self.ligand.elements, self.ligand.coor d['base_Iq'] = scattering_curve(self._q, e1,", "<filename>saxstools/fullsaxs.py from __future__ import print_function, absolute_import, division from sys import", "self.data c = self.cpu_data time0 = _time() for n in", "True self.beads_per_residue = 2 # CPU or GPU self._queue =", "= int(np.linalg.norm(self.ligand.coor - \\ self.ligand.center, axis=1).max() + \\ self.interaction_radius +", "# parameters with standard values self.rotations = [[[1, 0, 0],", "c['q'], c['base_Iq'], c['rind'], c['rxyz'], c['lind'], (np.mat(c['rotmat'][n])*np.mat(c['lxyz']).T).T, c['origin'], self.voxelspacing, c['fifj'], c['targetIq'],", "= min_interaction + 0.9 @property def queue(self): return self._queue @queue.setter", "Solutions(best_chi2, self.rotations, d['rot_ind']) def _cpu_init(self): self.cpu_data = {} c =", "@rotations.setter def rotations(self, rotations): rotmat = np.asarray(rotations, dtype=np.float64) if rotmat.ndim", "= np.zeros(c['ft_shape'], dtype=np.complex128) c['ft_rcore'] = np.zeros(c['ft_shape'], dtype=np.complex128) c['ft_rsurf'] = np.zeros(c['ft_shape'],", "d = self.data q = self.queue g['rcore'] = cl_array.to_device(q, float32array(d['rcore'].array))", "vdw_radii = self.receptor.vdw_radius radii = vdw_radii + self.interaction_radius d['rsurf'] =", "dtype=np.float32) g_lxyz[:, :3] = d['lxyz'][:] g['rxyz'] = cl_array.to_device(q, g_rxyz) g['lxyz']", "grid shape is the size of the fixed protein in", "= cl_array.zeros_like(g['lxyz']) g['chi2'] = cl_array.to_device(q, d['chi2'].astype(np.float32)) g['best_chi2'] = cl_array.to_device(q, d['best_chi2'].astype(np.float32))", "# kernels g['k'] = Kernels(q.context) g['saxs_k'] = saxs_Kernels(q.context) g['k'].rfftn =", "c['sq'] = d['sq'] c['base_Iq'] = d['base_Iq'] c['fifj'] = d['fifj'] c['rind']", "d/2.0) for c, d in zip(center, dimensions)] return origin def", "# unchangeable self._data = {} self._q = None self._Iq =", "import dilate_points from disvis.libdisvis import (rotate_image3d, dilate_points_add, longest_distance) from powerfit.solutions", "return g_restraints def grid_shape(points1, points2, voxelspacing): shape = min_grid_shape(points1, points2,", "rotations): rotmat = np.asarray(rotations, dtype=np.float64) if rotmat.ndim != 3: raise", "= min_grid_shape(points1, points2, voxelspacing) shape = [volume.radix235(x) for x in", "np.zeros(c['ft_shape'], dtype=np.complex128) c['ft_rcore'] = np.zeros(c['ft_shape'], dtype=np.complex128) c['ft_rsurf'] = np.zeros(c['ft_shape'], dtype=np.complex128)", "<= 0: raise ValueError(\"Interaction radius should be bigger than zero\")", "# the minimal grid shape is the size of the", "def data(self): return self._data @property def saxsdata(self): return self._q, self._Iq,", "import print_function, absolute_import, division from sys import stdout as _stdout", "is None: self._cpu_init() self._cpu_search() else: self._gpu_init() self._gpu_search() if _stdout.isatty(): print()", "def interaction_radius(self): return self._interaction_radius @interaction_radius.setter def interaction_radius(self, radius): if radius", "of the receptor map # and make a grid of", "def max_clash(self, max_clash): if max_clash < 0: raise ValueError(\"Maximum allowed", "receptor(self): return self._receptor @receptor.setter def receptor(self, receptor): self._receptor = receptor.duplicate()", "c['targetIq'] = d['targetIq'] c['sq'] = d['sq'] c['base_Iq'] = d['base_Iq'] c['fifj']", "= d['lsurf'].array c['lsurf'] = np.zeros_like(c['rcore']) c['clashvol'] = np.zeros_like(c['rcore']) c['intervol'] =", "self.min_interaction/self.voxelspacing**3 # SAXS data d['q'] = self._q d['targetIq'] = self._Iq", "voxelspacing): self._voxelspacing = voxelspacing @property def max_clash(self): return self._max_clash @max_clash.setter", "volume cannot be negative\") self._max_clash = max_clash + 0.9 @property", "axis=1).max() + \\ self.interaction_radius + 1.5)/self.voxelspacing c['origin'] = d['origin'] #", "d['best_chi2'] > 0 d['best_chi2'][ind] -= d['best_chi2'][ind].min() best_chi2 = volume.Volume(d['best_chi2'], voxelspacing=self.voxelspacing,", "= cl_array.zeros_like(g['rcore']) g['interspace'] = cl_array.zeros(q, d['shape'], dtype=np.int32) # complex arrays", "self._sq @saxsdata.setter def saxsdata(self, saxsdata): self._q, self._Iq, self._sq = saxsdata", "< c['max_clash'], c['intervol'] > c['min_interaction'], c['interspace']) print('Number of complexes to", "center to the origin of the receptor map # and", "g['chi2']) g['saxs_k'].take_best(q, g['chi2'], g['best_chi2'], g['rot_ind'], n) if _stdout.isatty(): self._print_progress(n, g['nrot'],", "= points.mean(axis=0) origin = [(c - d/2.0) for c, d", "g_restraints[n, 6] = mindis/voxelspacing g_restraints[n, 7] = maxdis/voxelspacing return g_restraints", "ValueError(\"Not all requirements are met for a search\") if self.weights", "0.9 @property def min_interaction(self): return self._min_interaction @min_interaction.setter def min_interaction(self, min_interaction):", "> c['min_interaction'], c['interspace']) print('Number of complexes to analyze: ', c['interspace'].sum())", "maxdis = restraints[n] r_pos = (r_sel.center - origin)/voxelspacing l_pos =", "r_pos g_restraints[n, 3:6] = l_pos g_restraints[n, 6] = mindis/voxelspacing g_restraints[n,", "shape = grid_shape(self.receptor.coor, self.ligand.coor, self.voxelspacing) # calculate the interaction surface", "- lcenter)/voxelspacing g_restraints[n, 0:3] = r_pos g_restraints[n, 3:6] = l_pos", "origin = volume_origin(points, dimensions) rsurf = volume.zeros(shape, voxelspacing, origin) rsurf", "dtype=np.float64) if len(self.weights) != len(self.rotations): raise ValueError(\"\") d = self.data", "- time0 _stdout.write('\\r{:d}/{:d} ({:.2%}, ETA: {:d}s) '\\ .format(m, total, pdone,", "np.asarray(self.weights, dtype=np.float64) c['nrot'] = d['nrot'] c['shape'] = d['shape'] c['max_clash'] =", "c['base_Iq'] = d['base_Iq'] c['fifj'] = d['fifj'] c['rind'] = d['rind'] c['lind']", "self._queue @queue.setter def queue(self, queue): self._queue = queue @property def", "volume_origin(points, dimensions): center = points.mean(axis=0) origin = [(c - d/2.0)", "d['chi2'] = np.zeros(d['rcore'].shape, dtype=np.float64) d['best_chi2'] = np.zeros_like(d['chi2']) def search(self): self._initialize()", "return self._ligand @ligand.setter def ligand(self, ligand): self._ligand = ligand.duplicate() @property", "division from sys import stdout as _stdout from time import", "= self.data c = self.cpu_data time0 = _time() for n", "# complex arrays g['ft_shape'] = list(d['shape']) g['ft_shape'][0] = d['shape'][0]//2 +", "d['q'] c['targetIq'] = d['targetIq'] c['sq'] = d['sq'] c['base_Iq'] = d['base_Iq']", "saxstools.libsaxstools import calc_chi2 from saxstools.kernels import Kernels as saxs_Kernels try:", "cl_array.zeros_like(g['rcore']) g['interspace'] = cl_array.zeros(q, d['shape'], dtype=np.int32) # complex arrays g['ft_shape']", "cl_array.to_device(q, d['rind'].astype(np.int32)) g['lind'] = cl_array.to_device(q, d['lind'].astype(np.int32)) g_rxyz = np.zeros((d['rxyz'].shape[0], 4),", "c, d in zip(center, dimensions)] return origin def grid_restraints(restraints, voxelspacing,", "negative\") self._max_clash = max_clash + 0.9 @property def min_interaction(self): return", "fixed protein in # each dimension and the longest diameter", "self._voxelspacing = voxelspacing @property def max_clash(self): return self._max_clash @max_clash.setter def", "SAXS data d['q'] = self._q d['targetIq'] = self._Iq d['sq'] =", "d['lsurf'] = dilate_points((self.ligand.coor - self.ligand.center \\ + self.receptor.center), radii, volume.zeros_like(d['rcore']))", "n in range(nrestraints): r_sel, l_sel, mindis, maxdis = restraints[n] r_pos", "[0, 1, 0], [0, 0, 1]]] self.weights = None self.voxelspacing", "raise ValueError(\"Not all requirements are met for a search\") if", "if self.queue is None: self._cpu_init() self._cpu_search() else: self._gpu_init() self._gpu_search() if", "origin=d['origin']) return Solutions(best_chi2, self.rotations, d['rot_ind']) def _cpu_init(self): self.cpu_data = {}", "self._rotations = rotmat @property def weights(self): return self._weights @weights.setter def", "is the scanning chain dimensions1 = points1.ptp(axis=0) dimension2 = longest_distance(points2)", "e1, xyz1, bpr=self.beads_per_residue) d['base_Iq'] += scattering_curve(self._q, e2, xyz2, bpr=self.beads_per_residue) d['fifj'],", "ValueError(\"Input should be a list of rotation matrices.\") self._rotations =", "cannot be smaller than 1\") self._min_interaction = min_interaction + 0.9", "return self._interaction_radius @interaction_radius.setter def interaction_radius(self, radius): if radius <= 0:", "sys import stdout as _stdout from time import time as", "d['rcore'].start d['nrot'] = self.rotations.shape[0] # set ligand center to the", "= np.ones(self.rotations.shape[0], dtype=np.float64) if len(self.weights) != len(self.rotations): raise ValueError(\"\") d", "= cl_array.zeros_like(g['ft_rcore']) g['ft_intervol'] = cl_array.zeros_like(g['ft_rcore']) # allocate SAXS arrays g['q']", "g['rot_ind'] = cl_array.zeros(q, d['shape'], dtype=np.int32) g['origin'] = np.zeros(4, dtype=np.float32) g['origin'][:3]", "cl_array.zeros_like(g['ft_rcore']) g['ft_intervol'] = cl_array.zeros_like(g['ft_rcore']) # allocate SAXS arrays g['q'] =", "vdw_radii, shape, self.voxelspacing) # keep track of some data for", "xrange(g['nrot']): k.rotate_image3d(q, g['sampler'], g['im_lsurf'], self.rotations[n], g['lsurf'], d['im_center']) k.rfftn(q, g['lsurf'], g['ft_lsurf'])", "required interaction volume cannot be smaller than 1\") self._min_interaction =", "rfftn, irfftn from disvis import volume from disvis.points import dilate_points", "ValueError(\"Maximum allowed clashing volume cannot be negative\") self._max_clash = max_clash", "= cl_array.to_device(q, d['rind'].astype(np.int32)) g['lind'] = cl_array.to_device(q, d['lind'].astype(np.int32)) g_rxyz = np.zeros((d['rxyz'].shape[0],", "should be a list of rotation matrices.\") self._rotations = rotmat", "= np.asarray(((dimensions1 + dimension2)/voxelspacing) + 10, dtype=np.int32)[::-1] return grid_shape def", "# SAXS arrays c['q'] = d['q'] c['targetIq'] = d['targetIq'] c['sq']", "calc_chi2(c['interspace'], c['q'], c['base_Iq'], c['rind'], c['rxyz'], c['lind'], (np.mat(c['rotmat'][n])*np.mat(c['lxyz']).T).T, c['origin'], self.voxelspacing, c['fifj'],", "the interaction surface and core of the receptor vdw_radii =", "c['tmplxyz'] = np.zeros_like(c['lxyz']) def _cpu_search(self): d = self.data c =", "d['rot_ind']) def _cpu_init(self): self.cpu_data = {} c = self.cpu_data d", "1: raise ValueError(\"Minimum required interaction volume cannot be smaller than", "0.9 @property def queue(self): return self._queue @queue.setter def queue(self, queue):", "ligand radii = self.ligand.vdw_radius d['lsurf'] = dilate_points((self.ligand.coor - self.ligand.center \\", "= self.data q = self.queue g['rcore'] = cl_array.to_device(q, float32array(d['rcore'].array)) g['rsurf']", "self._q, self._Iq, self._sq = saxsdata def _initialize(self): # check if", "g['sq'], g['chi2']) g['saxs_k'].take_best(q, g['chi2'], g['best_chi2'], g['rot_ind'], n) if _stdout.isatty(): self._print_progress(n,", "g['lxyz'], self.rotations[n], g['rot_lxyz']) k.fill(q, g['chi2'], 0) g['saxs_k'].calc_chi2(q, g['interspace'], g['q'], g['base_Iq'],", "self.gpu_data d = self.data q = self.queue g['rcore'] = cl_array.to_device(q,", "radii, shape, self.voxelspacing) d['rcore'] = rsurface(self.receptor.coor, vdw_radii, shape, self.voxelspacing) #", "arrays c['ft_shape'] = list(d['shape']) c['ft_shape'][-1] = d['shape'][-1]//2 + 1 c['ft_lsurf']", "return origin def grid_restraints(restraints, voxelspacing, origin, lcenter): nrestraints = len(restraints)", "c['min_interaction'] = d['min_interaction'] c['vlength'] = int(np.linalg.norm(self.ligand.coor - \\ self.ligand.center, axis=1).max()", "should be bigger than zero\") self._interaction_radius = radius @property def", "self.ligand.vdw_radius d['lsurf'] = dilate_points((self.ligand.coor - self.ligand.center \\ + self.receptor.center), radii,", "g['intervol']) k.touch(q, g['clashvol'], g['max_clash'], g['intervol'], g['min_interaction'], g['interspace']) g['saxs_k'].rotate_points(q, g['lxyz'], self.rotations[n],", "cl_array.to_device(q, float32array(d['rcore'].array)) g['rsurf'] = cl_array.to_device(q, float32array(d['rsurf'].array)) g['im_lsurf'] = cl.image_from_array(q.context, float32array(d['lsurf'].array))", "= restraints[n] r_pos = (r_sel.center - origin)/voxelspacing l_pos = (l_sel.center", "pyfftw pyfftw.interfaces.cache.enable() pyfftw.interfaces.cache.set_keepalive_time(10) rfftn = pyfftw.interfaces.numpy_fft.rfftn irfftn = pyfftw.interfaces.numpy_fft.irfftn except", "cl_array.zeros_like(g['ft_rcore']) # allocate SAXS arrays g['q'] = cl_array.to_device(q, float32array(d['q'])) g['targetIq']", "are set if any(x is None for x in (self.receptor,", "pdone, int(t/pdone - t))) _stdout.flush() def _gpu_init(self): self.gpu_data = {}", "to the origin of the receptor map # and make", "= d['sq'] c['base_Iq'] = d['base_Iq'] c['fifj'] = d['fifj'] c['rind'] =", "cl_array.to_device(q, g_lxyz) g['rot_lxyz'] = cl_array.zeros_like(g['lxyz']) g['chi2'] = cl_array.to_device(q, d['chi2'].astype(np.float32)) g['best_chi2']", "volume from disvis.points import dilate_points from disvis.libdisvis import (rotate_image3d, dilate_points_add,", "= d['best_chi2'] > 0 d['best_chi2'][ind] -= d['best_chi2'][ind].min() best_chi2 = volume.Volume(d['best_chi2'],", "rsurf = dilate_points(points, radius, rsurf) return rsurf def volume_origin(points, dimensions):", "rotmat @property def weights(self): return self._weights @weights.setter def weights(self, weights):", "from sys import stdout as _stdout from time import time", "2 # CPU or GPU self._queue = None # unchangeable", "the ligand radii = self.ligand.vdw_radius d['lsurf'] = dilate_points((self.ligand.coor - self.ligand.center", "= cl_array.to_device(q, float32array(d['rsurf'].array)) g['im_lsurf'] = cl.image_from_array(q.context, float32array(d['lsurf'].array)) g['sampler'] = cl.Sampler(q.context,", "return self._queue @queue.setter def queue(self, queue): self._queue = queue @property", "= d['origin'] # SAXS arrays c['q'] = d['q'] c['targetIq'] =", "time0 = _time() for n in xrange(c['rotmat'].shape[0]): # rotate ligand", "-= d['best_chi2'][ind].min() best_chi2 = volume.Volume(d['best_chi2'], voxelspacing=self.voxelspacing, origin=d['origin']) return Solutions(best_chi2, self.rotations,", "g['interspace'] = cl_array.zeros(q, d['shape'], dtype=np.int32) # complex arrays g['ft_shape'] =", "search(self): self._initialize() if self.queue is None: self._cpu_init() self._cpu_search() else: self._gpu_init()", "dtype=np.complex128) # initial calculations c['ft_rcore'] = rfftn(c['rcore']) c['ft_rsurf'] = rfftn(c['rsurf'])", "e2, xyz2 = coarse_grain(self.ligand, bpr=self.beads_per_residue) else: e1, xyz1 = self.receptor.elements,", "100 self.min_interaction = 300 self.coarse_grain = True self.beads_per_residue = 2", "size for grid shape = grid_shape(self.receptor.coor, self.ligand.coor, self.voxelspacing) # calculate", "c['chi2']) ind = c['chi2'] > c['best_chi2'] c['best_chi2'][ind] = c['chi2'][ind] c['rot_ind'][ind]", "self.rotations.shape[0] # set ligand center to the origin of the", "of the receptor vdw_radii = self.receptor.vdw_radius radii = vdw_radii +", "self.ligand.coor, self.voxelspacing) # calculate the interaction surface and core of", "minimal grid shape is the size of the fixed protein", "g['nrot'] = d['nrot'] g['max_clash'] = d['max_clash'] g['min_interaction'] = d['min_interaction'] def", "origin, lcenter): nrestraints = len(restraints) g_restraints = np.zeros((nrestraints, 8), dtype=np.float64)", "if requirements are set if any(x is None for x", "float32array(d['fifj'])) g['rind'] = cl_array.to_device(q, d['rind'].astype(np.int32)) g['lind'] = cl_array.to_device(q, d['lind'].astype(np.int32)) g_rxyz", "radius @property def voxelspacing(self): return self._voxelspacing @voxelspacing.setter def voxelspacing(self, voxelspacing):", "self.receptor.elements, self.receptor.coor e2, xyz2 = self.ligand.elements, self.ligand.coor d['base_Iq'] = scattering_curve(self._q,", "total, pdone, int(t/pdone - t))) _stdout.flush() def _gpu_init(self): self.gpu_data =", "_time() for n in xrange(c['rotmat'].shape[0]): # rotate ligand image rotate_image3d(c['im_lsurf'],", "+ 1 pdone = m/total t = _time() - time0", "cl_array.to_device(q, float32array(d['fifj'])) g['rind'] = cl_array.to_device(q, d['rind'].astype(np.int32)) g['lind'] = cl_array.to_device(q, d['lind'].astype(np.int32))", "range(nrestraints): r_sel, l_sel, mindis, maxdis = restraints[n] r_pos = (r_sel.center", "from disvis import pyclfft except ImportError: pass class FullSAXS(object): def", "max_clash < 0: raise ValueError(\"Maximum allowed clashing volume cannot be", "core of the receptor vdw_radii = self.receptor.vdw_radius radii = vdw_radii", "pyfftw.interfaces.cache.set_keepalive_time(10) rfftn = pyfftw.interfaces.numpy_fft.rfftn irfftn = pyfftw.interfaces.numpy_fft.irfftn except ImportError: from", "_time() - time0 _stdout.write('\\r{:d}/{:d} ({:.2%}, ETA: {:d}s) '\\ .format(m, total,", "d['origin'].astype(np.float32) g['voxelspacing'] = np.float32(self.voxelspacing) # kernels g['k'] = Kernels(q.context) g['saxs_k']", "irfftn(c['ft_lsurf'] * c['ft_rcore'], s=c['shape']) c['intervol'] = irfftn(c['ft_lsurf'] * c['ft_rsurf'], s=c['shape'])", "None self._sq = None @property def receptor(self): return self._receptor @receptor.setter", "g['targetIq'], g['sq'], g['chi2']) g['saxs_k'].take_best(q, g['chi2'], g['best_chi2'], g['rot_ind'], n) if _stdout.isatty():", "dilate_points from disvis.libdisvis import (rotate_image3d, dilate_points_add, longest_distance) from powerfit.solutions import", "try: import pyfftw pyfftw.interfaces.cache.enable() pyfftw.interfaces.cache.set_keepalive_time(10) rfftn = pyfftw.interfaces.numpy_fft.rfftn irfftn =", "g['ft_shape'][0] = d['shape'][0]//2 + 1 g['ft_rcore'] = cl_array.zeros(q, g['ft_shape'], dtype=np.complex64)", "e2, xyz2 = self.ligand.elements, self.ligand.coor d['base_Iq'] = scattering_curve(self._q, e1, xyz1,", "g['k'].rfftn(q, g['rcore'], g['ft_rcore']) g['k'].rfftn(q, g['rsurf'], g['ft_rsurf']) g['nrot'] = d['nrot'] g['max_clash']", "c['ft_rcore'], s=c['shape']) c['intervol'] = irfftn(c['ft_lsurf'] * c['ft_rsurf'], s=c['shape']) np.logical_and(c['clashvol'] <", "self.voxelspacing = 1.0 self.interaction_radius = 2.5 self.max_clash = 100 self.min_interaction", "t = _time() - time0 _stdout.write('\\r{:d}/{:d} ({:.2%}, ETA: {:d}s) '\\", "return self._receptor @receptor.setter def receptor(self, receptor): self._receptor = receptor.duplicate() @property", "scattering_curve(self._q, e2, xyz2, bpr=self.beads_per_residue) d['fifj'], d['rind'], d['lind'] = create_fifj_lookup_table(d['q'], e1,", "d['lind'] = create_fifj_lookup_table(d['q'], e1, e2, bpr=self.beads_per_residue) d['rxyz'] = xyz1 d['lxyz']", "def _gpu_search(self): d = self.data g = self.gpu_data q =", "be negative\") self._max_clash = max_clash + 0.9 @property def min_interaction(self):", "@property def receptor(self): return self._receptor @receptor.setter def receptor(self, receptor): self._receptor", "e1, e2, bpr=self.beads_per_residue) d['rxyz'] = xyz1 d['lxyz'] = xyz2 -", "self._ligand = None # parameters with standard values self.rotations =", "= None self._Iq = None self._sq = None @property def", "voxelspacing(self, voxelspacing): self._voxelspacing = voxelspacing @property def max_clash(self): return self._max_clash", "ind = c['chi2'] > c['best_chi2'] c['best_chi2'][ind] = c['chi2'][ind] c['rot_ind'][ind] =", "g['voxelspacing'] = np.float32(self.voxelspacing) # kernels g['k'] = Kernels(q.context) g['saxs_k'] =", ":3] = d['rxyz'][:] g_lxyz = np.zeros((d['lxyz'].shape[0], 4), dtype=np.float32) g_lxyz[:, :3]", "d['lxyz'] c['chi2'] = d['chi2'] c['best_chi2'] = d['best_chi2'] c['rot_ind'] = np.zeros(d['shape'],", "(np.mat(c['rotmat'][n])*np.mat(c['lxyz']).T).T, c['origin'], self.voxelspacing, c['fifj'], c['targetIq'], c['sq'], c['chi2']) ind = c['chi2']", "[volume.radix235(x) for x in shape] return shape def min_grid_shape(points1, points2,", "np.asarray((self.receptor.center - d['rcore'].origin)/self.voxelspacing, dtype=np.float64) d['max_clash'] = self.max_clash/self.voxelspacing**3 d['min_interaction'] = self.min_interaction/self.voxelspacing**3", "= np.zeros(d['rcore'].shape, dtype=np.float64) d['best_chi2'] = np.zeros_like(d['chi2']) def search(self): self._initialize() if", "c['rxyz'], c['lind'], (np.mat(c['rotmat'][n])*np.mat(c['lxyz']).T).T, c['origin'], self.voxelspacing, c['fifj'], c['targetIq'], c['sq'], c['chi2']) ind", "300 self.coarse_grain = True self.beads_per_residue = 2 # CPU or", "g_lxyz) g['rot_lxyz'] = cl_array.zeros_like(g['lxyz']) g['chi2'] = cl_array.to_device(q, d['chi2'].astype(np.float32)) g['best_chi2'] =", "None: self._cpu_init() self._cpu_search() else: self._gpu_init() self._gpu_search() if _stdout.isatty(): print() d", "def grid_shape(points1, points2, voxelspacing): shape = min_grid_shape(points1, points2, voxelspacing) shape", "= self.receptor.elements, self.receptor.coor e2, xyz2 = self.ligand.elements, self.ligand.coor d['base_Iq'] =", "voxelspacing=self.voxelspacing, origin=d['origin']) return Solutions(best_chi2, self.rotations, d['rot_ind']) def _cpu_init(self): self.cpu_data =", "for c, d in zip(center, dimensions)] return origin def grid_restraints(restraints,", "xyz2 = coarse_grain(self.ligand, bpr=self.beads_per_residue) else: e1, xyz1 = self.receptor.elements, self.receptor.coor", "self.cpu_data d = self.data c['rcore'] = d['rcore'].array c['rsurf'] = d['rsurf'].array", "- t))) _stdout.flush() def _gpu_init(self): self.gpu_data = {} g =", "self.voxelspacing) # calculate the interaction surface and core of the", "_print_progress(self, n, total, time0): m = n + 1 pdone", "None self.voxelspacing = 1.0 self.interaction_radius = 2.5 self.max_clash = 100", "d = self.data # determine size for grid shape =", "later calculations d['origin'] = np.asarray(d['rcore'].origin, dtype=np.float64) d['shape'] = d['rcore'].shape d['start']", "return shape def min_grid_shape(points1, points2, voxelspacing): # the minimal grid", "self._print_progress(n, g['nrot'], time0) self.queue.finish() d['best_chi2'] = g['best_chi2'].get() d['rot_ind'] = g['rot_ind'].get()", "= None # unchangeable self._data = {} self._q = None", "rfftn(c['rsurf']) c['rotmat'] = np.asarray(self.rotations, dtype=np.float64) c['weights'] = np.asarray(self.weights, dtype=np.float64) c['nrot']", "m/total t = _time() - time0 _stdout.write('\\r{:d}/{:d} ({:.2%}, ETA: {:d}s)", "g['ft_rcore'] = cl_array.zeros(q, g['ft_shape'], dtype=np.complex64) g['ft_rsurf'] = cl_array.zeros_like(g['ft_rcore']) g['ft_lsurf'] =", "g['nrot'], time0) self.queue.finish() d['best_chi2'] = g['best_chi2'].get() d['rot_ind'] = g['rot_ind'].get() def", "if max_clash < 0: raise ValueError(\"Maximum allowed clashing volume cannot", "g['clashvol'] = cl_array.zeros_like(g['rcore']) g['intervol'] = cl_array.zeros_like(g['rcore']) g['interspace'] = cl_array.zeros(q, d['shape'],", "self._receptor = None self._ligand = None # parameters with standard", "rfftn(c['lsurf']).conj() c['clashvol'] = irfftn(c['ft_lsurf'] * c['ft_rcore'], s=c['shape']) c['intervol'] = irfftn(c['ft_lsurf']", "raise ValueError(\"Input should be a list of rotation matrices.\") self._rotations", "g['rind'] = cl_array.to_device(q, d['rind'].astype(np.int32)) g['lind'] = cl_array.to_device(q, d['lind'].astype(np.int32)) g_rxyz =", "arrays g['q'] = cl_array.to_device(q, float32array(d['q'])) g['targetIq'] = cl_array.to_device(q, float32array(d['targetIq'])) g['sq']", "= d['lxyz'][:] g['rxyz'] = cl_array.to_device(q, g_rxyz) g['lxyz'] = cl_array.to_device(q, g_lxyz)", "len(self.rotations): raise ValueError(\"\") d = self.data # determine size for", "voxelspacing) shape = [volume.radix235(x) for x in shape] return shape", "def min_interaction(self): return self._min_interaction @min_interaction.setter def min_interaction(self, min_interaction): if min_interaction", "self.ligand)): raise ValueError(\"Not all requirements are met for a search\")", "weights(self, weights): self._weights = weights @property def interaction_radius(self): return self._interaction_radius", "ImportError: pass class FullSAXS(object): def __init__(self): # parameters to be", "than zero\") self._interaction_radius = radius @property def voxelspacing(self): return self._voxelspacing", "the receptor map # and make a grid of the", "rotmat.ndim != 3: raise ValueError(\"Input should be a list of", "receptor map # and make a grid of the ligand", "g['rxyz'], g['lind'], g['rot_lxyz'], g['origin'], g['voxelspacing'], g['fifj'], g['targetIq'], g['sq'], g['chi2']) g['saxs_k'].take_best(q,", "d['nrot'] = self.rotations.shape[0] # set ligand center to the origin", "= [x*voxelspacing for x in shape] origin = volume_origin(points, dimensions)", "!= len(self.rotations): raise ValueError(\"\") d = self.data # determine size", "@property def interaction_radius(self): return self._interaction_radius @interaction_radius.setter def interaction_radius(self, radius): if", "d['base_Iq'] c['fifj'] = d['fifj'] c['rind'] = d['rind'] c['lind'] = d['lind']", "zip(center, dimensions)] return origin def grid_restraints(restraints, voxelspacing, origin, lcenter): nrestraints", "bigger than zero\") self._interaction_radius = radius @property def voxelspacing(self): return", "g['ft_clashvol']) k.irfftn(q, g['ft_clashvol'], g['clashvol']) k.c_conj_multiply(q, g['ft_lsurf'], g['ft_rsurf'], g['ft_intervol']) k.irfftn(q, g['ft_intervol'],", "each dimension and the longest diameter is the scanning chain", "+ 1 c['ft_lsurf'] = np.zeros(c['ft_shape'], dtype=np.complex128) c['ft_rcore'] = np.zeros(c['ft_shape'], dtype=np.complex128)", "self.data q = self.queue g['rcore'] = cl_array.to_device(q, float32array(d['rcore'].array)) g['rsurf'] =", "def _initialize(self): # check if requirements are set if any(x", "def _cpu_search(self): d = self.data c = self.cpu_data time0 =", "grid of the ligand radii = self.ligand.vdw_radius d['lsurf'] = dilate_points((self.ligand.coor", "min_grid_shape(points1, points2, voxelspacing) shape = [volume.radix235(x) for x in shape]", "from time import time as _time import numpy as np", "g['targetIq'] = cl_array.to_device(q, float32array(d['targetIq'])) g['sq'] = cl_array.to_device(q, float32array(d['sq'])) g['base_Iq'] =", "of complexes to analyze: ', c['interspace'].sum()) c['chi2'].fill(0) calc_chi2(c['interspace'], c['q'], c['base_Iq'],", "import pyclfft except ImportError: pass class FullSAXS(object): def __init__(self): #", "g['rot_lxyz'], g['origin'], g['voxelspacing'], g['fifj'], g['targetIq'], g['sq'], g['chi2']) g['saxs_k'].take_best(q, g['chi2'], g['best_chi2'],", "np.zeros((nrestraints, 8), dtype=np.float64) for n in range(nrestraints): r_sel, l_sel, mindis,", "is None: self.weights = np.ones(self.rotations.shape[0], dtype=np.float64) if len(self.weights) != len(self.rotations):", "self.receptor.center), radii, volume.zeros_like(d['rcore'])) d['im_center'] = np.asarray((self.receptor.center - d['rcore'].origin)/self.voxelspacing, dtype=np.float64) d['max_clash']", "create_fifj_lookup_table from saxstools.helpers import coarse_grain from saxstools.libsaxstools import calc_chi2 from", "self._sq if self.coarse_grain: e1, xyz1 = coarse_grain(self.receptor, bpr=self.beads_per_residue) e2, xyz2", "= xyz1 d['lxyz'] = xyz2 - self.ligand.center d['chi2'] = np.zeros(d['rcore'].shape,", "_stdout.isatty(): self._print_progress(n, g['nrot'], time0) self.queue.finish() d['best_chi2'] = g['best_chi2'].get() d['rot_ind'] =", "as cl import pyopencl.array as cl_array import disvis.pyclfft from disvis.kernels", "7] = maxdis/voxelspacing return g_restraints def grid_shape(points1, points2, voxelspacing): shape", "g['sq'] = cl_array.to_device(q, float32array(d['sq'])) g['base_Iq'] = cl_array.to_device(q, float32array(d['base_Iq'])) g['fifj'] =", "= np.asarray(d['rcore'].origin, dtype=np.float64) d['shape'] = d['rcore'].shape d['start'] = d['rcore'].start d['nrot']", "in xrange(c['rotmat'].shape[0]): # rotate ligand image rotate_image3d(c['im_lsurf'], c['vlength'], np.linalg.inv(c['rotmat'][n]), d['im_center'],", "c['intervol'] = irfftn(c['ft_lsurf'] * c['ft_rsurf'], s=c['shape']) np.logical_and(c['clashvol'] < c['max_clash'], c['intervol']", "def saxsdata(self, saxsdata): self._q, self._Iq, self._sq = saxsdata def _initialize(self):", "pyclfft.RFFTn(q.context, d['shape']) g['k'].irfftn = pyclfft.iRFFTn(q.context, d['shape']) g['k'].rfftn(q, g['rcore'], g['ft_rcore']) g['k'].rfftn(q,", "= receptor.duplicate() @property def ligand(self): return self._ligand @ligand.setter def ligand(self,", "saxsdata(self, saxsdata): self._q, self._Iq, self._sq = saxsdata def _initialize(self): #", "interaction surface and core of the receptor vdw_radii = self.receptor.vdw_radius", "g['rot_lxyz'] = cl_array.zeros_like(g['lxyz']) g['chi2'] = cl_array.to_device(q, d['chi2'].astype(np.float32)) g['best_chi2'] = cl_array.to_device(q,", "g['interspace'], g['q'], g['base_Iq'], g['rind'], g['rxyz'], g['lind'], g['rot_lxyz'], g['origin'], g['voxelspacing'], g['fifj'],", "e2, bpr=self.beads_per_residue) d['rxyz'] = xyz1 d['lxyz'] = xyz2 - self.ligand.center", "float32array(d['base_Iq'])) g['fifj'] = cl_array.to_device(q, float32array(d['fifj'])) g['rind'] = cl_array.to_device(q, d['rind'].astype(np.int32)) g['lind']", "+ 10, dtype=np.int32)[::-1] return grid_shape def float32array(array_like): return np.asarray(array_like, dtype=np.float32)", "of the ligand radii = self.ligand.vdw_radius d['lsurf'] = dilate_points((self.ligand.coor -", "= d['rind'] c['lind'] = d['lind'] c['rxyz'] = d['rxyz'] c['lxyz'] =", "in (self.receptor, self.ligand)): raise ValueError(\"Not all requirements are met for", "make a grid of the ligand radii = self.ligand.vdw_radius d['lsurf']", "cl_array.to_device(q, g_rxyz) g['lxyz'] = cl_array.to_device(q, g_lxyz) g['rot_lxyz'] = cl_array.zeros_like(g['lxyz']) g['chi2']", "c['ft_lsurf'] = np.zeros(c['ft_shape'], dtype=np.complex128) c['ft_rcore'] = np.zeros(c['ft_shape'], dtype=np.complex128) c['ft_rsurf'] =", "smaller than 1\") self._min_interaction = min_interaction + 0.9 @property def", "\\ + self.receptor.center), radii, volume.zeros_like(d['rcore'])) d['im_center'] = np.asarray((self.receptor.center - d['rcore'].origin)/self.voxelspacing,", "= self.ligand.elements, self.ligand.coor d['base_Iq'] = scattering_curve(self._q, e1, xyz1, bpr=self.beads_per_residue) d['base_Iq']", "= cl_array.to_device(q, d['chi2'].astype(np.float32)) g['best_chi2'] = cl_array.to_device(q, d['best_chi2'].astype(np.float32)) g['rot_ind'] = cl_array.zeros(q,", "from powerfit.solutions import Solutions from saxstools.saxs_curve import scattering_curve, create_fifj_lookup_table from", "be defined self._receptor = None self._ligand = None # parameters", "self._queue = None # unchangeable self._data = {} self._q =", "of the fixed protein in # each dimension and the", "c['sq'], c['chi2']) ind = c['chi2'] > c['best_chi2'] c['best_chi2'][ind] = c['chi2'][ind]", "keep track of some data for later calculations d['origin'] =", "c['base_Iq'], c['rind'], c['rxyz'], c['lind'], (np.mat(c['rotmat'][n])*np.mat(c['lxyz']).T).T, c['origin'], self.voxelspacing, c['fifj'], c['targetIq'], c['sq'],", "def _cpu_init(self): self.cpu_data = {} c = self.cpu_data d =", "allocate SAXS arrays g['q'] = cl_array.to_device(q, float32array(d['q'])) g['targetIq'] = cl_array.to_device(q,", "self._receptor = receptor.duplicate() @property def ligand(self): return self._ligand @ligand.setter def", "self.cpu_data = {} c = self.cpu_data d = self.data c['rcore']", "grid_restraints(restraints, voxelspacing, origin, lcenter): nrestraints = len(restraints) g_restraints = np.zeros((nrestraints,", "analyze: ', c['interspace'].sum()) c['chi2'].fill(0) calc_chi2(c['interspace'], c['q'], c['base_Iq'], c['rind'], c['rxyz'], c['lind'],", "the scanning chain dimensions1 = points1.ptp(axis=0) dimension2 = longest_distance(points2) grid_shape", "l_pos g_restraints[n, 6] = mindis/voxelspacing g_restraints[n, 7] = maxdis/voxelspacing return", "c['targetIq'], c['sq'], c['chi2']) ind = c['chi2'] > c['best_chi2'] c['best_chi2'][ind] =", "= d['q'] c['targetIq'] = d['targetIq'] c['sq'] = d['sq'] c['base_Iq'] =", "best_chi2 = volume.Volume(d['best_chi2'], voxelspacing=self.voxelspacing, origin=d['origin']) return Solutions(best_chi2, self.rotations, d['rot_ind']) def", "= irfftn(c['ft_lsurf'] * c['ft_rcore'], s=c['shape']) c['intervol'] = irfftn(c['ft_lsurf'] * c['ft_rsurf'],", "g['ft_rsurf'] = cl_array.zeros_like(g['ft_rcore']) g['ft_lsurf'] = cl_array.zeros_like(g['ft_rcore']) g['ft_clashvol'] = cl_array.zeros_like(g['ft_rcore']) g['ft_intervol']", "for later calculations d['origin'] = np.asarray(d['rcore'].origin, dtype=np.float64) d['shape'] = d['rcore'].shape", "rotate_image3d(c['im_lsurf'], c['vlength'], np.linalg.inv(c['rotmat'][n]), d['im_center'], c['lsurf']) c['ft_lsurf'] = rfftn(c['lsurf']).conj() c['clashvol'] =", "8), dtype=np.float64) for n in range(nrestraints): r_sel, l_sel, mindis, maxdis", "from numpy.fft import rfftn, irfftn from disvis import volume from", "self._max_clash = max_clash + 0.9 @property def min_interaction(self): return self._min_interaction", "search\") if self.weights is None: self.weights = np.ones(self.rotations.shape[0], dtype=np.float64) if", "bpr=self.beads_per_residue) else: e1, xyz1 = self.receptor.elements, self.receptor.coor e2, xyz2 =", "= vdw_radii + self.interaction_radius d['rsurf'] = rsurface(self.receptor.coor, radii, shape, self.voxelspacing)", "@saxsdata.setter def saxsdata(self, saxsdata): self._q, self._Iq, self._sq = saxsdata def", "queue): self._queue = queue @property def data(self): return self._data @property", "= self._q d['targetIq'] = self._Iq d['sq'] = self._sq if self.coarse_grain:", "g['lsurf'] = cl_array.zeros_like(g['rcore']) g['clashvol'] = cl_array.zeros_like(g['rcore']) g['intervol'] = cl_array.zeros_like(g['rcore']) g['interspace']", "= np.zeros(c['ft_shape'], dtype=np.complex128) c['ft_rsurf'] = np.zeros(c['ft_shape'], dtype=np.complex128) # initial calculations", "n, total, time0): m = n + 1 pdone =", "0) g['saxs_k'].calc_chi2(q, g['interspace'], g['q'], g['base_Iq'], g['rind'], g['rxyz'], g['lind'], g['rot_lxyz'], g['origin'],", "c['ft_rsurf'], s=c['shape']) np.logical_and(c['clashvol'] < c['max_clash'], c['intervol'] > c['min_interaction'], c['interspace']) print('Number", "restraints[n] r_pos = (r_sel.center - origin)/voxelspacing l_pos = (l_sel.center -", "xyz2 - self.ligand.center d['chi2'] = np.zeros(d['rcore'].shape, dtype=np.float64) d['best_chi2'] = np.zeros_like(d['chi2'])", "import time as _time import numpy as np try: import", "= self.queue g['rcore'] = cl_array.to_device(q, float32array(d['rcore'].array)) g['rsurf'] = cl_array.to_device(q, float32array(d['rsurf'].array))", "shape, self.voxelspacing) # keep track of some data for later", "for x in shape] origin = volume_origin(points, dimensions) rsurf =", "return rsurf def volume_origin(points, dimensions): center = points.mean(axis=0) origin =", "= len(restraints) g_restraints = np.zeros((nrestraints, 8), dtype=np.float64) for n in", "time0): m = n + 1 pdone = m/total t", "xyz2, bpr=self.beads_per_residue) d['fifj'], d['rind'], d['lind'] = create_fifj_lookup_table(d['q'], e1, e2, bpr=self.beads_per_residue)", "xrange(c['rotmat'].shape[0]): # rotate ligand image rotate_image3d(c['im_lsurf'], c['vlength'], np.linalg.inv(c['rotmat'][n]), d['im_center'], c['lsurf'])", "c['rind'] = d['rind'] c['lind'] = d['lind'] c['rxyz'] = d['rxyz'] c['lxyz']", "cl_array.zeros(q, g['ft_shape'], dtype=np.complex64) g['ft_rsurf'] = cl_array.zeros_like(g['ft_rcore']) g['ft_lsurf'] = cl_array.zeros_like(g['ft_rcore']) g['ft_clashvol']", "d['rsurf'] = rsurface(self.receptor.coor, radii, shape, self.voxelspacing) d['rcore'] = rsurface(self.receptor.coor, vdw_radii,", "self._interaction_radius = radius @property def voxelspacing(self): return self._voxelspacing @voxelspacing.setter def", "g['fifj'] = cl_array.to_device(q, float32array(d['fifj'])) g['rind'] = cl_array.to_device(q, d['rind'].astype(np.int32)) g['lind'] =", "longest_distance) from powerfit.solutions import Solutions from saxstools.saxs_curve import scattering_curve, create_fifj_lookup_table", "d['nrot'] c['shape'] = d['shape'] c['max_clash'] = d['max_clash'] c['min_interaction'] = d['min_interaction']", "g['q'], g['base_Iq'], g['rind'], g['rxyz'], g['lind'], g['rot_lxyz'], g['origin'], g['voxelspacing'], g['fifj'], g['targetIq'],", "# initial calculations c['ft_rcore'] = rfftn(c['rcore']) c['ft_rsurf'] = rfftn(c['rsurf']) c['rotmat']", "0: raise ValueError(\"Interaction radius should be bigger than zero\") self._interaction_radius", "origin of the receptor map # and make a grid", "r_pos = (r_sel.center - origin)/voxelspacing l_pos = (l_sel.center - lcenter)/voxelspacing", "size of the fixed protein in # each dimension and", "xyz1 d['lxyz'] = xyz2 - self.ligand.center d['chi2'] = np.zeros(d['rcore'].shape, dtype=np.float64)", "= pyclfft.RFFTn(q.context, d['shape']) g['k'].irfftn = pyclfft.iRFFTn(q.context, d['shape']) g['k'].rfftn(q, g['rcore'], g['ft_rcore'])", "float32array(d['rsurf'].array)) g['im_lsurf'] = cl.image_from_array(q.context, float32array(d['lsurf'].array)) g['sampler'] = cl.Sampler(q.context, False, cl.addressing_mode.CLAMP,", "g['min_interaction'] = d['min_interaction'] def _gpu_search(self): d = self.data g =", "from __future__ import print_function, absolute_import, division from sys import stdout", "if _stdout.isatty(): print() d = self.data ind = d['best_chi2'] >", "g['origin'] = np.zeros(4, dtype=np.float32) g['origin'][:3] = d['origin'].astype(np.float32) g['voxelspacing'] = np.float32(self.voxelspacing)", "_stdout.write('\\r{:d}/{:d} ({:.2%}, ETA: {:d}s) '\\ .format(m, total, pdone, int(t/pdone -", "2.5 self.max_clash = 100 self.min_interaction = 300 self.coarse_grain = True", "calculations c['ft_rcore'] = rfftn(c['rcore']) c['ft_rsurf'] = rfftn(c['rsurf']) c['rotmat'] = np.asarray(self.rotations,", "4), dtype=np.float32) g_lxyz[:, :3] = d['lxyz'][:] g['rxyz'] = cl_array.to_device(q, g_rxyz)", "d['lsurf'].array c['lsurf'] = np.zeros_like(c['rcore']) c['clashvol'] = np.zeros_like(c['rcore']) c['intervol'] = np.zeros_like(c['rcore'])", "requirements are met for a search\") if self.weights is None:", "initial calculations c['ft_rcore'] = rfftn(c['rcore']) c['ft_rsurf'] = rfftn(c['rsurf']) c['rotmat'] =", "arrays c['q'] = d['q'] c['targetIq'] = d['targetIq'] c['sq'] = d['sq']", "dimension and the longest diameter is the scanning chain dimensions1", "c['lsurf']) c['ft_lsurf'] = rfftn(c['lsurf']).conj() c['clashvol'] = irfftn(c['ft_lsurf'] * c['ft_rcore'], s=c['shape'])", "of rotation matrices.\") self._rotations = rotmat @property def weights(self): return", "shape, voxelspacing): dimensions = [x*voxelspacing for x in shape] origin", "from disvis.kernels import Kernels from disvis import pyclfft except ImportError:", "{} g = self.gpu_data d = self.data q = self.queue", "self.voxelspacing) # keep track of some data for later calculations", "shape is the size of the fixed protein in #", "d['fifj'], d['rind'], d['lind'] = create_fifj_lookup_table(d['q'], e1, e2, bpr=self.beads_per_residue) d['rxyz'] =", "as _time import numpy as np try: import pyfftw pyfftw.interfaces.cache.enable()", "np.zeros_like(c['targetIq']) c['tmplxyz'] = np.zeros_like(c['lxyz']) def _cpu_search(self): d = self.data c", "time0 = _time() for n in xrange(g['nrot']): k.rotate_image3d(q, g['sampler'], g['im_lsurf'],", "= self.cpu_data d = self.data c['rcore'] = d['rcore'].array c['rsurf'] =", "g['k'] time0 = _time() for n in xrange(g['nrot']): k.rotate_image3d(q, g['sampler'],", "= d['rxyz'] c['lxyz'] = d['lxyz'] c['chi2'] = d['chi2'] c['best_chi2'] =", "c['weights'] = np.asarray(self.weights, dtype=np.float64) c['nrot'] = d['nrot'] c['shape'] = d['shape']", "= ligand.duplicate() @property def rotations(self): return self._rotations @rotations.setter def rotations(self,", "np.zeros(c['ft_shape'], dtype=np.complex128) # initial calculations c['ft_rcore'] = rfftn(c['rcore']) c['ft_rsurf'] =", "self.cpu_data time0 = _time() for n in xrange(c['rotmat'].shape[0]): # rotate", "= longest_distance(points2) grid_shape = np.asarray(((dimensions1 + dimension2)/voxelspacing) + 10, dtype=np.int32)[::-1]", "self.data ind = d['best_chi2'] > 0 d['best_chi2'][ind] -= d['best_chi2'][ind].min() best_chi2", "met for a search\") if self.weights is None: self.weights =", "for n in range(nrestraints): r_sel, l_sel, mindis, maxdis = restraints[n]", "import calc_chi2 from saxstools.kernels import Kernels as saxs_Kernels try: import", "dimensions1 = points1.ptp(axis=0) dimension2 = longest_distance(points2) grid_shape = np.asarray(((dimensions1 +", "c['fifj'] = d['fifj'] c['rind'] = d['rind'] c['lind'] = d['lind'] c['rxyz']", "({:.2%}, ETA: {:d}s) '\\ .format(m, total, pdone, int(t/pdone - t)))", "return Solutions(best_chi2, self.rotations, d['rot_ind']) def _cpu_init(self): self.cpu_data = {} c", "= d['nrot'] c['shape'] = d['shape'] c['max_clash'] = d['max_clash'] c['min_interaction'] =", "scattering_curve, create_fifj_lookup_table from saxstools.helpers import coarse_grain from saxstools.libsaxstools import calc_chi2", "= c['chi2'][ind] c['rot_ind'][ind] = n if _stdout.isatty(): self._print_progress(n, c['nrot'], time0)", "float32array(d['sq'])) g['base_Iq'] = cl_array.to_device(q, float32array(d['base_Iq'])) g['fifj'] = cl_array.to_device(q, float32array(d['fifj'])) g['rind']", "origin) rsurf = dilate_points(points, radius, rsurf) return rsurf def volume_origin(points,", "interaction volume cannot be smaller than 1\") self._min_interaction = min_interaction", "rsurf) return rsurf def volume_origin(points, dimensions): center = points.mean(axis=0) origin", "= {} c = self.cpu_data d = self.data c['rcore'] =", "g['ft_rsurf'], g['ft_intervol']) k.irfftn(q, g['ft_intervol'], g['intervol']) k.touch(q, g['clashvol'], g['max_clash'], g['intervol'], g['min_interaction'],", "def __init__(self): # parameters to be defined self._receptor = None", "d['best_chi2'] = g['best_chi2'].get() d['rot_ind'] = g['rot_ind'].get() def rsurface(points, radius, shape,", "complex arrays g['ft_shape'] = list(d['shape']) g['ft_shape'][0] = d['shape'][0]//2 + 1", "origin def grid_restraints(restraints, voxelspacing, origin, lcenter): nrestraints = len(restraints) g_restraints", "origin)/voxelspacing l_pos = (l_sel.center - lcenter)/voxelspacing g_restraints[n, 0:3] = r_pos", "3:6] = l_pos g_restraints[n, 6] = mindis/voxelspacing g_restraints[n, 7] =", "self._ligand = ligand.duplicate() @property def rotations(self): return self._rotations @rotations.setter def", "d['shape'][-1]//2 + 1 c['ft_lsurf'] = np.zeros(c['ft_shape'], dtype=np.complex128) c['ft_rcore'] = np.zeros(c['ft_shape'],", "g['ft_rcore']) g['k'].rfftn(q, g['rsurf'], g['ft_rsurf']) g['nrot'] = d['nrot'] g['max_clash'] = d['max_clash']", "dtype=np.float64) d['best_chi2'] = np.zeros_like(d['chi2']) def search(self): self._initialize() if self.queue is", "= self.gpu_data d = self.data q = self.queue g['rcore'] =", "np.zeros((d['rxyz'].shape[0], 4), dtype=np.float32) g_rxyz[:, :3] = d['rxyz'][:] g_lxyz = np.zeros((d['lxyz'].shape[0],", "radii = vdw_radii + self.interaction_radius d['rsurf'] = rsurface(self.receptor.coor, radii, shape,", "(rotate_image3d, dilate_points_add, longest_distance) from powerfit.solutions import Solutions from saxstools.saxs_curve import", "time as _time import numpy as np try: import pyfftw", "the minimal grid shape is the size of the fixed", "d in zip(center, dimensions)] return origin def grid_restraints(restraints, voxelspacing, origin,", "d['rind'], d['lind'] = create_fifj_lookup_table(d['q'], e1, e2, bpr=self.beads_per_residue) d['rxyz'] = xyz1", "c['lsurf'] = np.zeros_like(c['rcore']) c['clashvol'] = np.zeros_like(c['rcore']) c['intervol'] = np.zeros_like(c['rcore']) c['interspace']", "1 pdone = m/total t = _time() - time0 _stdout.write('\\r{:d}/{:d}", "cl.image_from_array(q.context, float32array(d['lsurf'].array)) g['sampler'] = cl.Sampler(q.context, False, cl.addressing_mode.CLAMP, cl.filter_mode.LINEAR) g['lsurf'] =", "g['clashvol']) k.c_conj_multiply(q, g['ft_lsurf'], g['ft_rsurf'], g['ft_intervol']) k.irfftn(q, g['ft_intervol'], g['intervol']) k.touch(q, g['clashvol'],", "cl_array.zeros(q, d['shape'], dtype=np.int32) # complex arrays g['ft_shape'] = list(d['shape']) g['ft_shape'][0]", "if self.coarse_grain: e1, xyz1 = coarse_grain(self.receptor, bpr=self.beads_per_residue) e2, xyz2 =", "diameter is the scanning chain dimensions1 = points1.ptp(axis=0) dimension2 =", "dilate_points((self.ligand.coor - self.ligand.center \\ + self.receptor.center), radii, volume.zeros_like(d['rcore'])) d['im_center'] =", "pdone = m/total t = _time() - time0 _stdout.write('\\r{:d}/{:d} ({:.2%},", "from disvis import volume from disvis.points import dilate_points from disvis.libdisvis", "def receptor(self): return self._receptor @receptor.setter def receptor(self, receptor): self._receptor =", "values self.rotations = [[[1, 0, 0], [0, 1, 0], [0,", "d['rot_ind'] = c['rot_ind'] def _print_progress(self, n, total, time0): m =", "= g['k'] time0 = _time() for n in xrange(g['nrot']): k.rotate_image3d(q,", "= self.max_clash/self.voxelspacing**3 d['min_interaction'] = self.min_interaction/self.voxelspacing**3 # SAXS data d['q'] =", "g['rot_ind'], n) if _stdout.isatty(): self._print_progress(n, g['nrot'], time0) self.queue.finish() d['best_chi2'] =", "parameters to be defined self._receptor = None self._ligand = None", "g['rind'], g['rxyz'], g['lind'], g['rot_lxyz'], g['origin'], g['voxelspacing'], g['fifj'], g['targetIq'], g['sq'], g['chi2'])", "weights): self._weights = weights @property def interaction_radius(self): return self._interaction_radius @interaction_radius.setter", "int(np.linalg.norm(self.ligand.coor - \\ self.ligand.center, axis=1).max() + \\ self.interaction_radius + 1.5)/self.voxelspacing", "+ 1 g['ft_rcore'] = cl_array.zeros(q, g['ft_shape'], dtype=np.complex64) g['ft_rsurf'] = cl_array.zeros_like(g['ft_rcore'])", "self.weights is None: self.weights = np.ones(self.rotations.shape[0], dtype=np.float64) if len(self.weights) !=", "parameters with standard values self.rotations = [[[1, 0, 0], [0,", "+ 0.9 @property def min_interaction(self): return self._min_interaction @min_interaction.setter def min_interaction(self,", "be smaller than 1\") self._min_interaction = min_interaction + 0.9 @property", "d['q'] = self._q d['targetIq'] = self._Iq d['sq'] = self._sq if", "self._q, self._Iq, self._sq @saxsdata.setter def saxsdata(self, saxsdata): self._q, self._Iq, self._sq", "self.rotations[n], g['rot_lxyz']) k.fill(q, g['chi2'], 0) g['saxs_k'].calc_chi2(q, g['interspace'], g['q'], g['base_Iq'], g['rind'],", "scattering_curve(self._q, e1, xyz1, bpr=self.beads_per_residue) d['base_Iq'] += scattering_curve(self._q, e2, xyz2, bpr=self.beads_per_residue)", "d['chi2'] c['best_chi2'] = d['best_chi2'] c['rot_ind'] = np.zeros(d['shape'], dtype=np.int32) c['Iq'] =", "l_pos = (l_sel.center - lcenter)/voxelspacing g_restraints[n, 0:3] = r_pos g_restraints[n,", "__init__(self): # parameters to be defined self._receptor = None self._ligand", "is the size of the fixed protein in # each", "image rotate_image3d(c['im_lsurf'], c['vlength'], np.linalg.inv(c['rotmat'][n]), d['im_center'], c['lsurf']) c['ft_lsurf'] = rfftn(c['lsurf']).conj() c['clashvol']", "g_rxyz) g['lxyz'] = cl_array.to_device(q, g_lxyz) g['rot_lxyz'] = cl_array.zeros_like(g['lxyz']) g['chi2'] =", "g_lxyz = np.zeros((d['lxyz'].shape[0], 4), dtype=np.float32) g_lxyz[:, :3] = d['lxyz'][:] g['rxyz']", "pyopencl as cl import pyopencl.array as cl_array import disvis.pyclfft from", "np.asarray(self.rotations, dtype=np.float64) c['weights'] = np.asarray(self.weights, dtype=np.float64) c['nrot'] = d['nrot'] c['shape']", "cl_array.zeros(q, d['shape'], dtype=np.int32) g['origin'] = np.zeros(4, dtype=np.float32) g['origin'][:3] = d['origin'].astype(np.float32)", "c['nrot'] = d['nrot'] c['shape'] = d['shape'] c['max_clash'] = d['max_clash'] c['min_interaction']", "cl.filter_mode.LINEAR) g['lsurf'] = cl_array.zeros_like(g['rcore']) g['clashvol'] = cl_array.zeros_like(g['rcore']) g['intervol'] = cl_array.zeros_like(g['rcore'])", "g = self.gpu_data d = self.data q = self.queue g['rcore']", "d['shape'] c['max_clash'] = d['max_clash'] c['min_interaction'] = d['min_interaction'] c['vlength'] = int(np.linalg.norm(self.ligand.coor", "= rfftn(c['rsurf']) c['rotmat'] = np.asarray(self.rotations, dtype=np.float64) c['weights'] = np.asarray(self.weights, dtype=np.float64)", "cl.addressing_mode.CLAMP, cl.filter_mode.LINEAR) g['lsurf'] = cl_array.zeros_like(g['rcore']) g['clashvol'] = cl_array.zeros_like(g['rcore']) g['intervol'] =", "= d['lind'] c['rxyz'] = d['rxyz'] c['lxyz'] = d['lxyz'] c['chi2'] =", "np.zeros_like(c['rcore'], dtype=np.int64) # complex arrays c['ft_shape'] = list(d['shape']) c['ft_shape'][-1] =", "except ImportError: pass class FullSAXS(object): def __init__(self): # parameters to", "def max_clash(self): return self._max_clash @max_clash.setter def max_clash(self, max_clash): if max_clash", "xyz2 = self.ligand.elements, self.ligand.coor d['base_Iq'] = scattering_curve(self._q, e1, xyz1, bpr=self.beads_per_residue)", "= cl_array.zeros(q, g['ft_shape'], dtype=np.complex64) g['ft_rsurf'] = cl_array.zeros_like(g['ft_rcore']) g['ft_lsurf'] = cl_array.zeros_like(g['ft_rcore'])", "map # and make a grid of the ligand radii", "self.queue.finish() d['best_chi2'] = g['best_chi2'].get() d['rot_ind'] = g['rot_ind'].get() def rsurface(points, radius,", "- \\ self.ligand.center, axis=1).max() + \\ self.interaction_radius + 1.5)/self.voxelspacing c['origin']", "return self._data @property def saxsdata(self): return self._q, self._Iq, self._sq @saxsdata.setter", "mindis, maxdis = restraints[n] r_pos = (r_sel.center - origin)/voxelspacing l_pos", "g['lind'] = cl_array.to_device(q, d['lind'].astype(np.int32)) g_rxyz = np.zeros((d['rxyz'].shape[0], 4), dtype=np.float32) g_rxyz[:,", "_initialize(self): # check if requirements are set if any(x is", "self._Iq, self._sq @saxsdata.setter def saxsdata(self, saxsdata): self._q, self._Iq, self._sq =", "any(x is None for x in (self.receptor, self.ligand)): raise ValueError(\"Not", "g['best_chi2'].get() d['rot_ind'] = g['rot_ind'].get() def rsurface(points, radius, shape, voxelspacing): dimensions", "rsurface(self.receptor.coor, radii, shape, self.voxelspacing) d['rcore'] = rsurface(self.receptor.coor, vdw_radii, shape, self.voxelspacing)", "n in xrange(g['nrot']): k.rotate_image3d(q, g['sampler'], g['im_lsurf'], self.rotations[n], g['lsurf'], d['im_center']) k.rfftn(q,", "grid_shape = np.asarray(((dimensions1 + dimension2)/voxelspacing) + 10, dtype=np.int32)[::-1] return grid_shape", "ValueError(\"\") d = self.data # determine size for grid shape", "c['interspace'] = np.zeros_like(c['rcore'], dtype=np.int64) # complex arrays c['ft_shape'] = list(d['shape'])", "as np try: import pyfftw pyfftw.interfaces.cache.enable() pyfftw.interfaces.cache.set_keepalive_time(10) rfftn = pyfftw.interfaces.numpy_fft.rfftn", "4), dtype=np.float32) g_rxyz[:, :3] = d['rxyz'][:] g_lxyz = np.zeros((d['lxyz'].shape[0], 4),", "= cl_array.to_device(q, d['lind'].astype(np.int32)) g_rxyz = np.zeros((d['rxyz'].shape[0], 4), dtype=np.float32) g_rxyz[:, :3]", "irfftn = pyfftw.interfaces.numpy_fft.irfftn except ImportError: from numpy.fft import rfftn, irfftn", "= self.data g = self.gpu_data q = self.queue k =", "g['ft_intervol'] = cl_array.zeros_like(g['ft_rcore']) # allocate SAXS arrays g['q'] = cl_array.to_device(q,", "m = n + 1 pdone = m/total t =", "np.zeros_like(d['chi2']) def search(self): self._initialize() if self.queue is None: self._cpu_init() self._cpu_search()", "= None self._ligand = None # parameters with standard values", "= n if _stdout.isatty(): self._print_progress(n, c['nrot'], time0) d['best_chi2'] = c['best_chi2']", "d['im_center'] = np.asarray((self.receptor.center - d['rcore'].origin)/self.voxelspacing, dtype=np.float64) d['max_clash'] = self.max_clash/self.voxelspacing**3 d['min_interaction']", "to be defined self._receptor = None self._ligand = None #", "= cl_array.to_device(q, float32array(d['targetIq'])) g['sq'] = cl_array.to_device(q, float32array(d['sq'])) g['base_Iq'] = cl_array.to_device(q,", "g_rxyz[:, :3] = d['rxyz'][:] g_lxyz = np.zeros((d['lxyz'].shape[0], 4), dtype=np.float32) g_lxyz[:,", "= cl_array.zeros_like(g['ft_rcore']) g['ft_clashvol'] = cl_array.zeros_like(g['ft_rcore']) g['ft_intervol'] = cl_array.zeros_like(g['ft_rcore']) # allocate", "= rotmat @property def weights(self): return self._weights @weights.setter def weights(self,", "radius, rsurf) return rsurf def volume_origin(points, dimensions): center = points.mean(axis=0)", "CPU or GPU self._queue = None # unchangeable self._data =", "c['vlength'] = int(np.linalg.norm(self.ligand.coor - \\ self.ligand.center, axis=1).max() + \\ self.interaction_radius", "raise ValueError(\"Maximum allowed clashing volume cannot be negative\") self._max_clash =", "np try: import pyfftw pyfftw.interfaces.cache.enable() pyfftw.interfaces.cache.set_keepalive_time(10) rfftn = pyfftw.interfaces.numpy_fft.rfftn irfftn", "g['ft_lsurf'], g['ft_rcore'], g['ft_clashvol']) k.irfftn(q, g['ft_clashvol'], g['clashvol']) k.c_conj_multiply(q, g['ft_lsurf'], g['ft_rsurf'], g['ft_intervol'])", "in xrange(g['nrot']): k.rotate_image3d(q, g['sampler'], g['im_lsurf'], self.rotations[n], g['lsurf'], d['im_center']) k.rfftn(q, g['lsurf'],", "matrices.\") self._rotations = rotmat @property def weights(self): return self._weights @weights.setter", "radii, volume.zeros_like(d['rcore'])) d['im_center'] = np.asarray((self.receptor.center - d['rcore'].origin)/self.voxelspacing, dtype=np.float64) d['max_clash'] =", "a search\") if self.weights is None: self.weights = np.ones(self.rotations.shape[0], dtype=np.float64)", "s=c['shape']) c['intervol'] = irfftn(c['ft_lsurf'] * c['ft_rsurf'], s=c['shape']) np.logical_and(c['clashvol'] < c['max_clash'],", "def receptor(self, receptor): self._receptor = receptor.duplicate() @property def ligand(self): return", "g['lind'], g['rot_lxyz'], g['origin'], g['voxelspacing'], g['fifj'], g['targetIq'], g['sq'], g['chi2']) g['saxs_k'].take_best(q, g['chi2'],", "cl_array.zeros_like(g['rcore']) g['clashvol'] = cl_array.zeros_like(g['rcore']) g['intervol'] = cl_array.zeros_like(g['rcore']) g['interspace'] = cl_array.zeros(q,", "@property def voxelspacing(self): return self._voxelspacing @voxelspacing.setter def voxelspacing(self, voxelspacing): self._voxelspacing", "try: import pyopencl as cl import pyopencl.array as cl_array import", "_stdout.isatty(): self._print_progress(n, c['nrot'], time0) d['best_chi2'] = c['best_chi2'] d['rot_ind'] = c['rot_ind']", "list(d['shape']) c['ft_shape'][-1] = d['shape'][-1]//2 + 1 c['ft_lsurf'] = np.zeros(c['ft_shape'], dtype=np.complex128)", "voxelspacing): # the minimal grid shape is the size of", "for n in xrange(g['nrot']): k.rotate_image3d(q, g['sampler'], g['im_lsurf'], self.rotations[n], g['lsurf'], d['im_center'])", "be bigger than zero\") self._interaction_radius = radius @property def voxelspacing(self):", "c['shape'] = d['shape'] c['max_clash'] = d['max_clash'] c['min_interaction'] = d['min_interaction'] c['vlength']", "_gpu_search(self): d = self.data g = self.gpu_data q = self.queue", "= np.zeros(c['ft_shape'], dtype=np.complex128) # initial calculations c['ft_rcore'] = rfftn(c['rcore']) c['ft_rsurf']", "s=c['shape']) np.logical_and(c['clashvol'] < c['max_clash'], c['intervol'] > c['min_interaction'], c['interspace']) print('Number of", "= pyfftw.interfaces.numpy_fft.rfftn irfftn = pyfftw.interfaces.numpy_fft.irfftn except ImportError: from numpy.fft import", "= max_clash + 0.9 @property def min_interaction(self): return self._min_interaction @min_interaction.setter", "dtype=np.float64) c['nrot'] = d['nrot'] c['shape'] = d['shape'] c['max_clash'] = d['max_clash']", "e2, xyz2, bpr=self.beads_per_residue) d['fifj'], d['rind'], d['lind'] = create_fifj_lookup_table(d['q'], e1, e2,", "surface and core of the receptor vdw_radii = self.receptor.vdw_radius radii", "import pyopencl as cl import pyopencl.array as cl_array import disvis.pyclfft", "queue(self, queue): self._queue = queue @property def data(self): return self._data", "def min_grid_shape(points1, points2, voxelspacing): # the minimal grid shape is", "= cl_array.to_device(q, g_lxyz) g['rot_lxyz'] = cl_array.zeros_like(g['lxyz']) g['chi2'] = cl_array.to_device(q, d['chi2'].astype(np.float32))", "radius should be bigger than zero\") self._interaction_radius = radius @property", "# check if requirements are set if any(x is None", "n in xrange(c['rotmat'].shape[0]): # rotate ligand image rotate_image3d(c['im_lsurf'], c['vlength'], np.linalg.inv(c['rotmat'][n]),", "+ 1.5)/self.voxelspacing c['origin'] = d['origin'] # SAXS arrays c['q'] =", "saxs_Kernels(q.context) g['k'].rfftn = pyclfft.RFFTn(q.context, d['shape']) g['k'].irfftn = pyclfft.iRFFTn(q.context, d['shape']) g['k'].rfftn(q,", "the fixed protein in # each dimension and the longest", "def queue(self): return self._queue @queue.setter def queue(self, queue): self._queue =", "arrays g['ft_shape'] = list(d['shape']) g['ft_shape'][0] = d['shape'][0]//2 + 1 g['ft_rcore']", "+ \\ self.interaction_radius + 1.5)/self.voxelspacing c['origin'] = d['origin'] # SAXS", "', c['interspace'].sum()) c['chi2'].fill(0) calc_chi2(c['interspace'], c['q'], c['base_Iq'], c['rind'], c['rxyz'], c['lind'], (np.mat(c['rotmat'][n])*np.mat(c['lxyz']).T).T,", "c['rind'], c['rxyz'], c['lind'], (np.mat(c['rotmat'][n])*np.mat(c['lxyz']).T).T, c['origin'], self.voxelspacing, c['fifj'], c['targetIq'], c['sq'], c['chi2'])", "x in shape] origin = volume_origin(points, dimensions) rsurf = volume.zeros(shape,", "g['chi2'], 0) g['saxs_k'].calc_chi2(q, g['interspace'], g['q'], g['base_Iq'], g['rind'], g['rxyz'], g['lind'], g['rot_lxyz'],", "volume.zeros(shape, voxelspacing, origin) rsurf = dilate_points(points, radius, rsurf) return rsurf", "if _stdout.isatty(): self._print_progress(n, c['nrot'], time0) d['best_chi2'] = c['best_chi2'] d['rot_ind'] =", "= self.ligand.vdw_radius d['lsurf'] = dilate_points((self.ligand.coor - self.ligand.center \\ + self.receptor.center),", "d['best_chi2'] = np.zeros_like(d['chi2']) def search(self): self._initialize() if self.queue is None:", "= cl.Sampler(q.context, False, cl.addressing_mode.CLAMP, cl.filter_mode.LINEAR) g['lsurf'] = cl_array.zeros_like(g['rcore']) g['clashvol'] =", "g['k'].irfftn = pyclfft.iRFFTn(q.context, d['shape']) g['k'].rfftn(q, g['rcore'], g['ft_rcore']) g['k'].rfftn(q, g['rsurf'], g['ft_rsurf'])", "d = self.data g = self.gpu_data q = self.queue k", "c['origin'] = d['origin'] # SAXS arrays c['q'] = d['q'] c['targetIq']", "raise ValueError(\"\") d = self.data # determine size for grid", "- self.ligand.center d['chi2'] = np.zeros(d['rcore'].shape, dtype=np.float64) d['best_chi2'] = np.zeros_like(d['chi2']) def", "= cl_array.to_device(q, float32array(d['q'])) g['targetIq'] = cl_array.to_device(q, float32array(d['targetIq'])) g['sq'] = cl_array.to_device(q,", "in shape] return shape def min_grid_shape(points1, points2, voxelspacing): # the", "= d['rcore'].shape d['start'] = d['rcore'].start d['nrot'] = self.rotations.shape[0] # set", "= 300 self.coarse_grain = True self.beads_per_residue = 2 # CPU", "self.queue is None: self._cpu_init() self._cpu_search() else: self._gpu_init() self._gpu_search() if _stdout.isatty():", "self.ligand.elements, self.ligand.coor d['base_Iq'] = scattering_curve(self._q, e1, xyz1, bpr=self.beads_per_residue) d['base_Iq'] +=", "import pyopencl.array as cl_array import disvis.pyclfft from disvis.kernels import Kernels", "defined self._receptor = None self._ligand = None # parameters with", "import scattering_curve, create_fifj_lookup_table from saxstools.helpers import coarse_grain from saxstools.libsaxstools import", "= l_pos g_restraints[n, 6] = mindis/voxelspacing g_restraints[n, 7] = maxdis/voxelspacing", "def voxelspacing(self, voxelspacing): self._voxelspacing = voxelspacing @property def max_clash(self): return", "numpy.fft import rfftn, irfftn from disvis import volume from disvis.points", "grid shape = grid_shape(self.receptor.coor, self.ligand.coor, self.voxelspacing) # calculate the interaction", "= 2.5 self.max_clash = 100 self.min_interaction = 300 self.coarse_grain =", "import disvis.pyclfft from disvis.kernels import Kernels from disvis import pyclfft", "= np.zeros(d['shape'], dtype=np.int32) c['Iq'] = np.zeros_like(c['targetIq']) c['tmplxyz'] = np.zeros_like(c['lxyz']) def", "g_restraints = np.zeros((nrestraints, 8), dtype=np.float64) for n in range(nrestraints): r_sel,", "- d['rcore'].origin)/self.voxelspacing, dtype=np.float64) d['max_clash'] = self.max_clash/self.voxelspacing**3 d['min_interaction'] = self.min_interaction/self.voxelspacing**3 #", "= d['shape'][0]//2 + 1 g['ft_rcore'] = cl_array.zeros(q, g['ft_shape'], dtype=np.complex64) g['ft_rsurf']", "g['ft_lsurf'], g['ft_rsurf'], g['ft_intervol']) k.irfftn(q, g['ft_intervol'], g['intervol']) k.touch(q, g['clashvol'], g['max_clash'], g['intervol'],", "import Solutions from saxstools.saxs_curve import scattering_curve, create_fifj_lookup_table from saxstools.helpers import", "g['voxelspacing'], g['fifj'], g['targetIq'], g['sq'], g['chi2']) g['saxs_k'].take_best(q, g['chi2'], g['best_chi2'], g['rot_ind'], n)", "kernels g['k'] = Kernels(q.context) g['saxs_k'] = saxs_Kernels(q.context) g['k'].rfftn = pyclfft.RFFTn(q.context,", "if min_interaction < 1: raise ValueError(\"Minimum required interaction volume cannot", "disvis import volume from disvis.points import dilate_points from disvis.libdisvis import", "= [[[1, 0, 0], [0, 1, 0], [0, 0, 1]]]", "np.zeros(d['shape'], dtype=np.int32) c['Iq'] = np.zeros_like(c['targetIq']) c['tmplxyz'] = np.zeros_like(c['lxyz']) def _cpu_search(self):", "self._weights = weights @property def interaction_radius(self): return self._interaction_radius @interaction_radius.setter def", "np.zeros_like(c['lxyz']) def _cpu_search(self): d = self.data c = self.cpu_data time0", "d['shape']) g['k'].irfftn = pyclfft.iRFFTn(q.context, d['shape']) g['k'].rfftn(q, g['rcore'], g['ft_rcore']) g['k'].rfftn(q, g['rsurf'],", "d['shape'][0]//2 + 1 g['ft_rcore'] = cl_array.zeros(q, g['ft_shape'], dtype=np.complex64) g['ft_rsurf'] =", "shape = min_grid_shape(points1, points2, voxelspacing) shape = [volume.radix235(x) for x", "in shape] origin = volume_origin(points, dimensions) rsurf = volume.zeros(shape, voxelspacing,", "pyfftw.interfaces.numpy_fft.irfftn except ImportError: from numpy.fft import rfftn, irfftn from disvis", "cl_array.to_device(q, float32array(d['base_Iq'])) g['fifj'] = cl_array.to_device(q, float32array(d['fifj'])) g['rind'] = cl_array.to_device(q, d['rind'].astype(np.int32))", "center = points.mean(axis=0) origin = [(c - d/2.0) for c,", "d = self.data c = self.cpu_data time0 = _time() for", "c['ft_rsurf'] = np.zeros(c['ft_shape'], dtype=np.complex128) # initial calculations c['ft_rcore'] = rfftn(c['rcore'])", "shape def min_grid_shape(points1, points2, voxelspacing): # the minimal grid shape", "from saxstools.saxs_curve import scattering_curve, create_fifj_lookup_table from saxstools.helpers import coarse_grain from", "coarse_grain from saxstools.libsaxstools import calc_chi2 from saxstools.kernels import Kernels as", "@property def data(self): return self._data @property def saxsdata(self): return self._q,", "dtype=np.float64) c['weights'] = np.asarray(self.weights, dtype=np.float64) c['nrot'] = d['nrot'] c['shape'] =", "{} c = self.cpu_data d = self.data c['rcore'] = d['rcore'].array", "if rotmat.ndim != 3: raise ValueError(\"Input should be a list", "list of rotation matrices.\") self._rotations = rotmat @property def weights(self):", "@property def ligand(self): return self._ligand @ligand.setter def ligand(self, ligand): self._ligand", "ligand(self, ligand): self._ligand = ligand.duplicate() @property def rotations(self): return self._rotations", "dtype=np.int32) g['origin'] = np.zeros(4, dtype=np.float32) g['origin'][:3] = d['origin'].astype(np.float32) g['voxelspacing'] =", "for grid shape = grid_shape(self.receptor.coor, self.ligand.coor, self.voxelspacing) # calculate the", "@voxelspacing.setter def voxelspacing(self, voxelspacing): self._voxelspacing = voxelspacing @property def max_clash(self):", "c['ft_shape'][-1] = d['shape'][-1]//2 + 1 c['ft_lsurf'] = np.zeros(c['ft_shape'], dtype=np.complex128) c['ft_rcore']", "saxstools.kernels import Kernels as saxs_Kernels try: import pyopencl as cl", "d['max_clash'] g['min_interaction'] = d['min_interaction'] def _gpu_search(self): d = self.data g", "in # each dimension and the longest diameter is the", "disvis.libdisvis import (rotate_image3d, dilate_points_add, longest_distance) from powerfit.solutions import Solutions from", "@property def min_interaction(self): return self._min_interaction @min_interaction.setter def min_interaction(self, min_interaction): if", "maxdis/voxelspacing return g_restraints def grid_shape(points1, points2, voxelspacing): shape = min_grid_shape(points1,", "dtype=np.float32) g['origin'][:3] = d['origin'].astype(np.float32) g['voxelspacing'] = np.float32(self.voxelspacing) # kernels g['k']", "voxelspacing): shape = min_grid_shape(points1, points2, voxelspacing) shape = [volume.radix235(x) for", "= self.receptor.vdw_radius radii = vdw_radii + self.interaction_radius d['rsurf'] = rsurface(self.receptor.coor,", "_stdout.isatty(): print() d = self.data ind = d['best_chi2'] > 0", "zero\") self._interaction_radius = radius @property def voxelspacing(self): return self._voxelspacing @voxelspacing.setter", "self._max_clash @max_clash.setter def max_clash(self, max_clash): if max_clash < 0: raise", "self.max_clash = 100 self.min_interaction = 300 self.coarse_grain = True self.beads_per_residue", "(r_sel.center - origin)/voxelspacing l_pos = (l_sel.center - lcenter)/voxelspacing g_restraints[n, 0:3]", "the receptor vdw_radii = self.receptor.vdw_radius radii = vdw_radii + self.interaction_radius", "data d['q'] = self._q d['targetIq'] = self._Iq d['sq'] = self._sq", "dimensions) rsurf = volume.zeros(shape, voxelspacing, origin) rsurf = dilate_points(points, radius,", "np.zeros_like(c['rcore']) c['interspace'] = np.zeros_like(c['rcore'], dtype=np.int64) # complex arrays c['ft_shape'] =", "np.zeros(c['ft_shape'], dtype=np.complex128) c['ft_rsurf'] = np.zeros(c['ft_shape'], dtype=np.complex128) # initial calculations c['ft_rcore']", "- self.ligand.center \\ + self.receptor.center), radii, volume.zeros_like(d['rcore'])) d['im_center'] = np.asarray((self.receptor.center", "self.max_clash/self.voxelspacing**3 d['min_interaction'] = self.min_interaction/self.voxelspacing**3 # SAXS data d['q'] = self._q", "g['ft_rsurf']) g['nrot'] = d['nrot'] g['max_clash'] = d['max_clash'] g['min_interaction'] = d['min_interaction']", "allowed clashing volume cannot be negative\") self._max_clash = max_clash +", "# SAXS data d['q'] = self._q d['targetIq'] = self._Iq d['sq']", "import Kernels from disvis import pyclfft except ImportError: pass class", "self.beads_per_residue = 2 # CPU or GPU self._queue = None", "and make a grid of the ligand radii = self.ligand.vdw_radius", "ligand(self): return self._ligand @ligand.setter def ligand(self, ligand): self._ligand = ligand.duplicate()", "= dilate_points((self.ligand.coor - self.ligand.center \\ + self.receptor.center), radii, volume.zeros_like(d['rcore'])) d['im_center']", "= d['max_clash'] g['min_interaction'] = d['min_interaction'] def _gpu_search(self): d = self.data", "from saxstools.helpers import coarse_grain from saxstools.libsaxstools import calc_chi2 from saxstools.kernels", "n) if _stdout.isatty(): self._print_progress(n, g['nrot'], time0) self.queue.finish() d['best_chi2'] = g['best_chi2'].get()", "d['chi2'].astype(np.float32)) g['best_chi2'] = cl_array.to_device(q, d['best_chi2'].astype(np.float32)) g['rot_ind'] = cl_array.zeros(q, d['shape'], dtype=np.int32)", "= None @property def receptor(self): return self._receptor @receptor.setter def receptor(self,", "self._interaction_radius @interaction_radius.setter def interaction_radius(self, radius): if radius <= 0: raise", "d['origin'] = np.asarray(d['rcore'].origin, dtype=np.float64) d['shape'] = d['rcore'].shape d['start'] = d['rcore'].start", "d['sq'] = self._sq if self.coarse_grain: e1, xyz1 = coarse_grain(self.receptor, bpr=self.beads_per_residue)", "as _stdout from time import time as _time import numpy", "pyfftw.interfaces.cache.enable() pyfftw.interfaces.cache.set_keepalive_time(10) rfftn = pyfftw.interfaces.numpy_fft.rfftn irfftn = pyfftw.interfaces.numpy_fft.irfftn except ImportError:", "self.data c['rcore'] = d['rcore'].array c['rsurf'] = d['rsurf'].array c['im_lsurf'] = d['lsurf'].array", "self.ligand.center \\ + self.receptor.center), radii, volume.zeros_like(d['rcore'])) d['im_center'] = np.asarray((self.receptor.center -", "c['rotmat'] = np.asarray(self.rotations, dtype=np.float64) c['weights'] = np.asarray(self.weights, dtype=np.float64) c['nrot'] =", "as saxs_Kernels try: import pyopencl as cl import pyopencl.array as", "points1.ptp(axis=0) dimension2 = longest_distance(points2) grid_shape = np.asarray(((dimensions1 + dimension2)/voxelspacing) +", "g['chi2'], g['best_chi2'], g['rot_ind'], n) if _stdout.isatty(): self._print_progress(n, g['nrot'], time0) self.queue.finish()", "ligand image rotate_image3d(c['im_lsurf'], c['vlength'], np.linalg.inv(c['rotmat'][n]), d['im_center'], c['lsurf']) c['ft_lsurf'] = rfftn(c['lsurf']).conj()", "c['chi2'] = d['chi2'] c['best_chi2'] = d['best_chi2'] c['rot_ind'] = np.zeros(d['shape'], dtype=np.int32)", "g['im_lsurf'] = cl.image_from_array(q.context, float32array(d['lsurf'].array)) g['sampler'] = cl.Sampler(q.context, False, cl.addressing_mode.CLAMP, cl.filter_mode.LINEAR)", "d['sq'] c['base_Iq'] = d['base_Iq'] c['fifj'] = d['fifj'] c['rind'] = d['rind']", "rotation matrices.\") self._rotations = rotmat @property def weights(self): return self._weights", "= saxsdata def _initialize(self): # check if requirements are set", "and core of the receptor vdw_radii = self.receptor.vdw_radius radii =", "= d['origin'].astype(np.float32) g['voxelspacing'] = np.float32(self.voxelspacing) # kernels g['k'] = Kernels(q.context)", "self._queue = queue @property def data(self): return self._data @property def", "None: self.weights = np.ones(self.rotations.shape[0], dtype=np.float64) if len(self.weights) != len(self.rotations): raise", "dimension2 = longest_distance(points2) grid_shape = np.asarray(((dimensions1 + dimension2)/voxelspacing) + 10,", "with standard values self.rotations = [[[1, 0, 0], [0, 1,", "= np.asarray(rotations, dtype=np.float64) if rotmat.ndim != 3: raise ValueError(\"Input should", "weights(self): return self._weights @weights.setter def weights(self, weights): self._weights = weights", "g['max_clash'] = d['max_clash'] g['min_interaction'] = d['min_interaction'] def _gpu_search(self): d =", "= cl_array.zeros(q, d['shape'], dtype=np.int32) g['origin'] = np.zeros(4, dtype=np.float32) g['origin'][:3] =", "= queue @property def data(self): return self._data @property def saxsdata(self):", "_gpu_init(self): self.gpu_data = {} g = self.gpu_data d = self.data", "radii = self.ligand.vdw_radius d['lsurf'] = dilate_points((self.ligand.coor - self.ligand.center \\ +", "= c['rot_ind'] def _print_progress(self, n, total, time0): m = n", "g['base_Iq'] = cl_array.to_device(q, float32array(d['base_Iq'])) g['fifj'] = cl_array.to_device(q, float32array(d['fifj'])) g['rind'] =", "volume.zeros_like(d['rcore'])) d['im_center'] = np.asarray((self.receptor.center - d['rcore'].origin)/self.voxelspacing, dtype=np.float64) d['max_clash'] = self.max_clash/self.voxelspacing**3", "np.zeros(4, dtype=np.float32) g['origin'][:3] = d['origin'].astype(np.float32) g['voxelspacing'] = np.float32(self.voxelspacing) # kernels", "for a search\") if self.weights is None: self.weights = np.ones(self.rotations.shape[0],", "len(self.weights) != len(self.rotations): raise ValueError(\"\") d = self.data # determine", "disvis import pyclfft except ImportError: pass class FullSAXS(object): def __init__(self):", "@queue.setter def queue(self, queue): self._queue = queue @property def data(self):", "< 1: raise ValueError(\"Minimum required interaction volume cannot be smaller", "min_interaction(self): return self._min_interaction @min_interaction.setter def min_interaction(self, min_interaction): if min_interaction <", "coarse_grain(self.ligand, bpr=self.beads_per_residue) else: e1, xyz1 = self.receptor.elements, self.receptor.coor e2, xyz2", "@ligand.setter def ligand(self, ligand): self._ligand = ligand.duplicate() @property def rotations(self):", "absolute_import, division from sys import stdout as _stdout from time", "c['rot_ind'] = np.zeros(d['shape'], dtype=np.int32) c['Iq'] = np.zeros_like(c['targetIq']) c['tmplxyz'] = np.zeros_like(c['lxyz'])", "xyz1, bpr=self.beads_per_residue) d['base_Iq'] += scattering_curve(self._q, e2, xyz2, bpr=self.beads_per_residue) d['fifj'], d['rind'],", "g['lsurf'], d['im_center']) k.rfftn(q, g['lsurf'], g['ft_lsurf']) k.c_conj_multiply(q, g['ft_lsurf'], g['ft_rcore'], g['ft_clashvol']) k.irfftn(q,", "g['q'] = cl_array.to_device(q, float32array(d['q'])) g['targetIq'] = cl_array.to_device(q, float32array(d['targetIq'])) g['sq'] =", "self.weights = None self.voxelspacing = 1.0 self.interaction_radius = 2.5 self.max_clash", "rsurf def volume_origin(points, dimensions): center = points.mean(axis=0) origin = [(c", "g['ft_intervol'], g['intervol']) k.touch(q, g['clashvol'], g['max_clash'], g['intervol'], g['min_interaction'], g['interspace']) g['saxs_k'].rotate_points(q, g['lxyz'],", "d['shape'] = d['rcore'].shape d['start'] = d['rcore'].start d['nrot'] = self.rotations.shape[0] #", "- origin)/voxelspacing l_pos = (l_sel.center - lcenter)/voxelspacing g_restraints[n, 0:3] =", "d['lxyz'][:] g['rxyz'] = cl_array.to_device(q, g_rxyz) g['lxyz'] = cl_array.to_device(q, g_lxyz) g['rot_lxyz']", "voxelspacing @property def max_clash(self): return self._max_clash @max_clash.setter def max_clash(self, max_clash):", "float32array(d['q'])) g['targetIq'] = cl_array.to_device(q, float32array(d['targetIq'])) g['sq'] = cl_array.to_device(q, float32array(d['sq'])) g['base_Iq']", "d['shape']) g['k'].rfftn(q, g['rcore'], g['ft_rcore']) g['k'].rfftn(q, g['rsurf'], g['ft_rsurf']) g['nrot'] = d['nrot']", "and the longest diameter is the scanning chain dimensions1 =", "return self._rotations @rotations.setter def rotations(self, rotations): rotmat = np.asarray(rotations, dtype=np.float64)", "pyopencl.array as cl_array import disvis.pyclfft from disvis.kernels import Kernels from", "= 100 self.min_interaction = 300 self.coarse_grain = True self.beads_per_residue =", "mindis/voxelspacing g_restraints[n, 7] = maxdis/voxelspacing return g_restraints def grid_shape(points1, points2,", "g['im_lsurf'], self.rotations[n], g['lsurf'], d['im_center']) k.rfftn(q, g['lsurf'], g['ft_lsurf']) k.c_conj_multiply(q, g['ft_lsurf'], g['ft_rcore'],", "g['ft_lsurf']) k.c_conj_multiply(q, g['ft_lsurf'], g['ft_rcore'], g['ft_clashvol']) k.irfftn(q, g['ft_clashvol'], g['clashvol']) k.c_conj_multiply(q, g['ft_lsurf'],", "rfftn = pyfftw.interfaces.numpy_fft.rfftn irfftn = pyfftw.interfaces.numpy_fft.irfftn except ImportError: from numpy.fft", "self._q d['targetIq'] = self._Iq d['sq'] = self._sq if self.coarse_grain: e1,", "g_restraints def grid_shape(points1, points2, voxelspacing): shape = min_grid_shape(points1, points2, voxelspacing)", "q = self.queue g['rcore'] = cl_array.to_device(q, float32array(d['rcore'].array)) g['rsurf'] = cl_array.to_device(q,", "{} self._q = None self._Iq = None self._sq = None", "= d['targetIq'] c['sq'] = d['sq'] c['base_Iq'] = d['base_Iq'] c['fifj'] =", "np.zeros(d['rcore'].shape, dtype=np.float64) d['best_chi2'] = np.zeros_like(d['chi2']) def search(self): self._initialize() if self.queue", "_cpu_search(self): d = self.data c = self.cpu_data time0 = _time()", "self.min_interaction = 300 self.coarse_grain = True self.beads_per_residue = 2 #", "determine size for grid shape = grid_shape(self.receptor.coor, self.ligand.coor, self.voxelspacing) #", "g['chi2'] = cl_array.to_device(q, d['chi2'].astype(np.float32)) g['best_chi2'] = cl_array.to_device(q, d['best_chi2'].astype(np.float32)) g['rot_ind'] =", "self._min_interaction = min_interaction + 0.9 @property def queue(self): return self._queue", "d['lind'].astype(np.int32)) g_rxyz = np.zeros((d['rxyz'].shape[0], 4), dtype=np.float32) g_rxyz[:, :3] = d['rxyz'][:]", "dimension2)/voxelspacing) + 10, dtype=np.int32)[::-1] return grid_shape def float32array(array_like): return np.asarray(array_like,", "rotate ligand image rotate_image3d(c['im_lsurf'], c['vlength'], np.linalg.inv(c['rotmat'][n]), d['im_center'], c['lsurf']) c['ft_lsurf'] =", "def _gpu_init(self): self.gpu_data = {} g = self.gpu_data d =", "= cl_array.to_device(q, float32array(d['base_Iq'])) g['fifj'] = cl_array.to_device(q, float32array(d['fifj'])) g['rind'] = cl_array.to_device(q,", "np.float32(self.voxelspacing) # kernels g['k'] = Kernels(q.context) g['saxs_k'] = saxs_Kernels(q.context) g['k'].rfftn", "c['ft_rsurf'] = rfftn(c['rsurf']) c['rotmat'] = np.asarray(self.rotations, dtype=np.float64) c['weights'] = np.asarray(self.weights,", "[(c - d/2.0) for c, d in zip(center, dimensions)] return", "0:3] = r_pos g_restraints[n, 3:6] = l_pos g_restraints[n, 6] =", "Kernels as saxs_Kernels try: import pyopencl as cl import pyopencl.array", "weights @property def interaction_radius(self): return self._interaction_radius @interaction_radius.setter def interaction_radius(self, radius):", "c['max_clash'], c['intervol'] > c['min_interaction'], c['interspace']) print('Number of complexes to analyze:", "g['rcore'], g['ft_rcore']) g['k'].rfftn(q, g['rsurf'], g['ft_rsurf']) g['nrot'] = d['nrot'] g['max_clash'] =", "d['base_Iq'] += scattering_curve(self._q, e2, xyz2, bpr=self.beads_per_residue) d['fifj'], d['rind'], d['lind'] =", "= c['chi2'] > c['best_chi2'] c['best_chi2'][ind] = c['chi2'][ind] c['rot_ind'][ind] = n", "_stdout from time import time as _time import numpy as", "import Kernels as saxs_Kernels try: import pyopencl as cl import", "= coarse_grain(self.ligand, bpr=self.beads_per_residue) else: e1, xyz1 = self.receptor.elements, self.receptor.coor e2,", "receptor.duplicate() @property def ligand(self): return self._ligand @ligand.setter def ligand(self, ligand):", "= r_pos g_restraints[n, 3:6] = l_pos g_restraints[n, 6] = mindis/voxelspacing", "None # parameters with standard values self.rotations = [[[1, 0,", "= self.queue k = g['k'] time0 = _time() for n", "ETA: {:d}s) '\\ .format(m, total, pdone, int(t/pdone - t))) _stdout.flush()", "in zip(center, dimensions)] return origin def grid_restraints(restraints, voxelspacing, origin, lcenter):", "# determine size for grid shape = grid_shape(self.receptor.coor, self.ligand.coor, self.voxelspacing)", "dimensions = [x*voxelspacing for x in shape] origin = volume_origin(points,", "= np.zeros((d['lxyz'].shape[0], 4), dtype=np.float32) g_lxyz[:, :3] = d['lxyz'][:] g['rxyz'] =", "= [volume.radix235(x) for x in shape] return shape def min_grid_shape(points1,", "g['lsurf'], g['ft_lsurf']) k.c_conj_multiply(q, g['ft_lsurf'], g['ft_rcore'], g['ft_clashvol']) k.irfftn(q, g['ft_clashvol'], g['clashvol']) k.c_conj_multiply(q,", "def queue(self, queue): self._queue = queue @property def data(self): return", "= (r_sel.center - origin)/voxelspacing l_pos = (l_sel.center - lcenter)/voxelspacing g_restraints[n,", "set ligand center to the origin of the receptor map", "= d['lxyz'] c['chi2'] = d['chi2'] c['best_chi2'] = d['best_chi2'] c['rot_ind'] =", "if len(self.weights) != len(self.rotations): raise ValueError(\"\") d = self.data #", "d['nrot'] g['max_clash'] = d['max_clash'] g['min_interaction'] = d['min_interaction'] def _gpu_search(self): d", "c['clashvol'] = irfftn(c['ft_lsurf'] * c['ft_rcore'], s=c['shape']) c['intervol'] = irfftn(c['ft_lsurf'] *", "{:d}s) '\\ .format(m, total, pdone, int(t/pdone - t))) _stdout.flush() def", "k.fill(q, g['chi2'], 0) g['saxs_k'].calc_chi2(q, g['interspace'], g['q'], g['base_Iq'], g['rind'], g['rxyz'], g['lind'],", "set if any(x is None for x in (self.receptor, self.ligand)):", "= create_fifj_lookup_table(d['q'], e1, e2, bpr=self.beads_per_residue) d['rxyz'] = xyz1 d['lxyz'] =", "disvis.pyclfft from disvis.kernels import Kernels from disvis import pyclfft except", "self._q = None self._Iq = None self._sq = None @property", "max_clash + 0.9 @property def min_interaction(self): return self._min_interaction @min_interaction.setter def", "volume.Volume(d['best_chi2'], voxelspacing=self.voxelspacing, origin=d['origin']) return Solutions(best_chi2, self.rotations, d['rot_ind']) def _cpu_init(self): self.cpu_data", "protein in # each dimension and the longest diameter is", "np.zeros_like(c['rcore']) c['intervol'] = np.zeros_like(c['rcore']) c['interspace'] = np.zeros_like(c['rcore'], dtype=np.int64) # complex", "!= 3: raise ValueError(\"Input should be a list of rotation", "= d['shape'][-1]//2 + 1 c['ft_lsurf'] = np.zeros(c['ft_shape'], dtype=np.complex128) c['ft_rcore'] =", "time0 _stdout.write('\\r{:d}/{:d} ({:.2%}, ETA: {:d}s) '\\ .format(m, total, pdone, int(t/pdone", "import numpy as np try: import pyfftw pyfftw.interfaces.cache.enable() pyfftw.interfaces.cache.set_keepalive_time(10) rfftn", "self.gpu_data = {} g = self.gpu_data d = self.data q", "for n in xrange(c['rotmat'].shape[0]): # rotate ligand image rotate_image3d(c['im_lsurf'], c['vlength'],", "min_interaction + 0.9 @property def queue(self): return self._queue @queue.setter def", "c['ft_rcore'] = np.zeros(c['ft_shape'], dtype=np.complex128) c['ft_rsurf'] = np.zeros(c['ft_shape'], dtype=np.complex128) # initial", "k.c_conj_multiply(q, g['ft_lsurf'], g['ft_rsurf'], g['ft_intervol']) k.irfftn(q, g['ft_intervol'], g['intervol']) k.touch(q, g['clashvol'], g['max_clash'],", "in range(nrestraints): r_sel, l_sel, mindis, maxdis = restraints[n] r_pos =", "float32array(d['rcore'].array)) g['rsurf'] = cl_array.to_device(q, float32array(d['rsurf'].array)) g['im_lsurf'] = cl.image_from_array(q.context, float32array(d['lsurf'].array)) g['sampler']", "= rfftn(c['rcore']) c['ft_rsurf'] = rfftn(c['rsurf']) c['rotmat'] = np.asarray(self.rotations, dtype=np.float64) c['weights']", "k.touch(q, g['clashvol'], g['max_clash'], g['intervol'], g['min_interaction'], g['interspace']) g['saxs_k'].rotate_points(q, g['lxyz'], self.rotations[n], g['rot_lxyz'])", "= np.asarray((self.receptor.center - d['rcore'].origin)/self.voxelspacing, dtype=np.float64) d['max_clash'] = self.max_clash/self.voxelspacing**3 d['min_interaction'] =", "n + 1 pdone = m/total t = _time() -", "import rfftn, irfftn from disvis import volume from disvis.points import", "xyz1 = coarse_grain(self.receptor, bpr=self.beads_per_residue) e2, xyz2 = coarse_grain(self.ligand, bpr=self.beads_per_residue) else:", "disvis.kernels import Kernels from disvis import pyclfft except ImportError: pass", "the size of the fixed protein in # each dimension", "g_restraints[n, 0:3] = r_pos g_restraints[n, 3:6] = l_pos g_restraints[n, 6]", "= self.data c['rcore'] = d['rcore'].array c['rsurf'] = d['rsurf'].array c['im_lsurf'] =", "print() d = self.data ind = d['best_chi2'] > 0 d['best_chi2'][ind]", "self.rotations[n], g['lsurf'], d['im_center']) k.rfftn(q, g['lsurf'], g['ft_lsurf']) k.c_conj_multiply(q, g['ft_lsurf'], g['ft_rcore'], g['ft_clashvol'])", "= cl_array.zeros_like(g['rcore']) g['intervol'] = cl_array.zeros_like(g['rcore']) g['interspace'] = cl_array.zeros(q, d['shape'], dtype=np.int32)", "import pyfftw pyfftw.interfaces.cache.enable() pyfftw.interfaces.cache.set_keepalive_time(10) rfftn = pyfftw.interfaces.numpy_fft.rfftn irfftn = pyfftw.interfaces.numpy_fft.irfftn", "saxstools.helpers import coarse_grain from saxstools.libsaxstools import calc_chi2 from saxstools.kernels import", "as cl_array import disvis.pyclfft from disvis.kernels import Kernels from disvis", "= Kernels(q.context) g['saxs_k'] = saxs_Kernels(q.context) g['k'].rfftn = pyclfft.RFFTn(q.context, d['shape']) g['k'].irfftn", "cl_array.to_device(q, float32array(d['targetIq'])) g['sq'] = cl_array.to_device(q, float32array(d['sq'])) g['base_Iq'] = cl_array.to_device(q, float32array(d['base_Iq']))", "0: raise ValueError(\"Maximum allowed clashing volume cannot be negative\") self._max_clash", "grid_shape(points1, points2, voxelspacing): shape = min_grid_shape(points1, points2, voxelspacing) shape =", "points2, voxelspacing): # the minimal grid shape is the size", "d['origin'] # SAXS arrays c['q'] = d['q'] c['targetIq'] = d['targetIq']", "c = self.cpu_data d = self.data c['rcore'] = d['rcore'].array c['rsurf']", "g['base_Iq'], g['rind'], g['rxyz'], g['lind'], g['rot_lxyz'], g['origin'], g['voxelspacing'], g['fifj'], g['targetIq'], g['sq'],", "g['saxs_k'].rotate_points(q, g['lxyz'], self.rotations[n], g['rot_lxyz']) k.fill(q, g['chi2'], 0) g['saxs_k'].calc_chi2(q, g['interspace'], g['q'],", "d['lind'] c['rxyz'] = d['rxyz'] c['lxyz'] = d['lxyz'] c['chi2'] = d['chi2']", "= self._Iq d['sq'] = self._sq if self.coarse_grain: e1, xyz1 =", "c['intervol'] = np.zeros_like(c['rcore']) c['interspace'] = np.zeros_like(c['rcore'], dtype=np.int64) # complex arrays", "= None self.voxelspacing = 1.0 self.interaction_radius = 2.5 self.max_clash =", "dtype=np.complex128) c['ft_rcore'] = np.zeros(c['ft_shape'], dtype=np.complex128) c['ft_rsurf'] = np.zeros(c['ft_shape'], dtype=np.complex128) #", "= np.zeros_like(c['rcore']) c['intervol'] = np.zeros_like(c['rcore']) c['interspace'] = np.zeros_like(c['rcore'], dtype=np.int64) #", "x in (self.receptor, self.ligand)): raise ValueError(\"Not all requirements are met", "# complex arrays c['ft_shape'] = list(d['shape']) c['ft_shape'][-1] = d['shape'][-1]//2 +", "d['fifj'] c['rind'] = d['rind'] c['lind'] = d['lind'] c['rxyz'] = d['rxyz']", "if any(x is None for x in (self.receptor, self.ligand)): raise", "g['interspace']) g['saxs_k'].rotate_points(q, g['lxyz'], self.rotations[n], g['rot_lxyz']) k.fill(q, g['chi2'], 0) g['saxs_k'].calc_chi2(q, g['interspace'],", "@property def max_clash(self): return self._max_clash @max_clash.setter def max_clash(self, max_clash): if", "dtype=np.float64) for n in range(nrestraints): r_sel, l_sel, mindis, maxdis =", "None self._ligand = None # parameters with standard values self.rotations", "g['intervol'] = cl_array.zeros_like(g['rcore']) g['interspace'] = cl_array.zeros(q, d['shape'], dtype=np.int32) # complex", "= d['nrot'] g['max_clash'] = d['max_clash'] g['min_interaction'] = d['min_interaction'] def _gpu_search(self):", "c['Iq'] = np.zeros_like(c['targetIq']) c['tmplxyz'] = np.zeros_like(c['lxyz']) def _cpu_search(self): d =", "a list of rotation matrices.\") self._rotations = rotmat @property def", "saxs_Kernels try: import pyopencl as cl import pyopencl.array as cl_array", "radius): if radius <= 0: raise ValueError(\"Interaction radius should be" ]
[ "config_file_name = \"config.json\" json_file = open(config_file_name) config_data = json.load(json_file) size_list", "in range(0, num_obs): # random center center = [random.uniform(config_data[\"LAB_SPACE_LIMIT\"][\"LIMIT_X\"][0], config_data[\"LAB_SPACE_LIMIT\"][\"LIMIT_X\"][1]),", "python3 import os import sys import time sys.path.append(os.getcwd()+'/lib') import random", "ObsInfo import ObsInfo def generate_random_obs(num_obs: int, size_list: list, config_data): \"\"\"", "(num_obs > 0.5): for i in range(0, num_obs): # random", "field from ObsInfo import ObsInfo def generate_random_obs(num_obs: int, size_list: list,", "size_list: list, config_data): \"\"\" config_file_name = \"config.json\" json_file = open(config_file_name)", "import random from dataclasses import dataclass, field from ObsInfo import", "= open(config_file_name) config_data = json.load(json_file) size_list = [length, width, height]", "sys.path.append(os.getcwd()+'/lib') import random from dataclasses import dataclass, field from ObsInfo", "json_file = open(config_file_name) config_data = json.load(json_file) size_list = [length, width,", "config_data = json.load(json_file) size_list = [length, width, height] \"\"\" ObsList", "for i in range(0, num_obs): # random center center =", "from ObsInfo import ObsInfo def generate_random_obs(num_obs: int, size_list: list, config_data):", "range(0, num_obs): # random center center = [random.uniform(config_data[\"LAB_SPACE_LIMIT\"][\"LIMIT_X\"][0], config_data[\"LAB_SPACE_LIMIT\"][\"LIMIT_X\"][1]), \\", "from dataclasses import dataclass, field from ObsInfo import ObsInfo def", "list, config_data): \"\"\" config_file_name = \"config.json\" json_file = open(config_file_name) config_data", "= [] if (num_obs > 0.5): for i in range(0,", "[length, width, height] \"\"\" ObsList = [] if (num_obs >", "center = [random.uniform(config_data[\"LAB_SPACE_LIMIT\"][\"LIMIT_X\"][0], config_data[\"LAB_SPACE_LIMIT\"][\"LIMIT_X\"][1]), \\ random.uniform(config_data[\"LAB_SPACE_LIMIT\"][\"LIMIT_Y\"][0], config_data[\"LAB_SPACE_LIMIT\"][\"LIMIT_Y\"][1]), \\ random.uniform(config_data[\"LAB_SPACE_LIMIT\"][\"LIMIT_Z\"][0], config_data[\"LAB_SPACE_LIMIT\"][\"LIMIT_Z\"][1])]", "import os import sys import time sys.path.append(os.getcwd()+'/lib') import random from", "= \"config.json\" json_file = open(config_file_name) config_data = json.load(json_file) size_list =", "ObsList = [] if (num_obs > 0.5): for i in", "dataclass, field from ObsInfo import ObsInfo def generate_random_obs(num_obs: int, size_list:", "= json.load(json_file) size_list = [length, width, height] \"\"\" ObsList =", "import time sys.path.append(os.getcwd()+'/lib') import random from dataclasses import dataclass, field", "# random center center = [random.uniform(config_data[\"LAB_SPACE_LIMIT\"][\"LIMIT_X\"][0], config_data[\"LAB_SPACE_LIMIT\"][\"LIMIT_X\"][1]), \\ random.uniform(config_data[\"LAB_SPACE_LIMIT\"][\"LIMIT_Y\"][0], config_data[\"LAB_SPACE_LIMIT\"][\"LIMIT_Y\"][1]),", "open(config_file_name) config_data = json.load(json_file) size_list = [length, width, height] \"\"\"", "[] if (num_obs > 0.5): for i in range(0, num_obs):", "int, size_list: list, config_data): \"\"\" config_file_name = \"config.json\" json_file =", "time sys.path.append(os.getcwd()+'/lib') import random from dataclasses import dataclass, field from", "config_data): \"\"\" config_file_name = \"config.json\" json_file = open(config_file_name) config_data =", "\"\"\" config_file_name = \"config.json\" json_file = open(config_file_name) config_data = json.load(json_file)", "\"config.json\" json_file = open(config_file_name) config_data = json.load(json_file) size_list = [length,", "width, height] \"\"\" ObsList = [] if (num_obs > 0.5):", "if (num_obs > 0.5): for i in range(0, num_obs): #", "i in range(0, num_obs): # random center center = [random.uniform(config_data[\"LAB_SPACE_LIMIT\"][\"LIMIT_X\"][0],", "= [random.uniform(config_data[\"LAB_SPACE_LIMIT\"][\"LIMIT_X\"][0], config_data[\"LAB_SPACE_LIMIT\"][\"LIMIT_X\"][1]), \\ random.uniform(config_data[\"LAB_SPACE_LIMIT\"][\"LIMIT_Y\"][0], config_data[\"LAB_SPACE_LIMIT\"][\"LIMIT_Y\"][1]), \\ random.uniform(config_data[\"LAB_SPACE_LIMIT\"][\"LIMIT_Z\"][0], config_data[\"LAB_SPACE_LIMIT\"][\"LIMIT_Z\"][1])] ObsList.append(", "ObsInfo def generate_random_obs(num_obs: int, size_list: list, config_data): \"\"\" config_file_name =", "random from dataclasses import dataclass, field from ObsInfo import ObsInfo", "dataclasses import dataclass, field from ObsInfo import ObsInfo def generate_random_obs(num_obs:", "0.5): for i in range(0, num_obs): # random center center", "random center center = [random.uniform(config_data[\"LAB_SPACE_LIMIT\"][\"LIMIT_X\"][0], config_data[\"LAB_SPACE_LIMIT\"][\"LIMIT_X\"][1]), \\ random.uniform(config_data[\"LAB_SPACE_LIMIT\"][\"LIMIT_Y\"][0], config_data[\"LAB_SPACE_LIMIT\"][\"LIMIT_Y\"][1]), \\", "[random.uniform(config_data[\"LAB_SPACE_LIMIT\"][\"LIMIT_X\"][0], config_data[\"LAB_SPACE_LIMIT\"][\"LIMIT_X\"][1]), \\ random.uniform(config_data[\"LAB_SPACE_LIMIT\"][\"LIMIT_Y\"][0], config_data[\"LAB_SPACE_LIMIT\"][\"LIMIT_Y\"][1]), \\ random.uniform(config_data[\"LAB_SPACE_LIMIT\"][\"LIMIT_Z\"][0], config_data[\"LAB_SPACE_LIMIT\"][\"LIMIT_Z\"][1])] ObsList.append( ObsInfo(center,", "sys import time sys.path.append(os.getcwd()+'/lib') import random from dataclasses import dataclass,", "\"\"\" ObsList = [] if (num_obs > 0.5): for i", "os import sys import time sys.path.append(os.getcwd()+'/lib') import random from dataclasses", "> 0.5): for i in range(0, num_obs): # random center", "def generate_random_obs(num_obs: int, size_list: list, config_data): \"\"\" config_file_name = \"config.json\"", "config_data[\"LAB_SPACE_LIMIT\"][\"LIMIT_X\"][1]), \\ random.uniform(config_data[\"LAB_SPACE_LIMIT\"][\"LIMIT_Y\"][0], config_data[\"LAB_SPACE_LIMIT\"][\"LIMIT_Y\"][1]), \\ random.uniform(config_data[\"LAB_SPACE_LIMIT\"][\"LIMIT_Z\"][0], config_data[\"LAB_SPACE_LIMIT\"][\"LIMIT_Z\"][1])] ObsList.append( ObsInfo(center, size_list)", "height] \"\"\" ObsList = [] if (num_obs > 0.5): for", "import sys import time sys.path.append(os.getcwd()+'/lib') import random from dataclasses import", "random.uniform(config_data[\"LAB_SPACE_LIMIT\"][\"LIMIT_Y\"][0], config_data[\"LAB_SPACE_LIMIT\"][\"LIMIT_Y\"][1]), \\ random.uniform(config_data[\"LAB_SPACE_LIMIT\"][\"LIMIT_Z\"][0], config_data[\"LAB_SPACE_LIMIT\"][\"LIMIT_Z\"][1])] ObsList.append( ObsInfo(center, size_list) ) return", "json.load(json_file) size_list = [length, width, height] \"\"\" ObsList = []", "config_data[\"LAB_SPACE_LIMIT\"][\"LIMIT_Y\"][1]), \\ random.uniform(config_data[\"LAB_SPACE_LIMIT\"][\"LIMIT_Z\"][0], config_data[\"LAB_SPACE_LIMIT\"][\"LIMIT_Z\"][1])] ObsList.append( ObsInfo(center, size_list) ) return ObsList", "num_obs): # random center center = [random.uniform(config_data[\"LAB_SPACE_LIMIT\"][\"LIMIT_X\"][0], config_data[\"LAB_SPACE_LIMIT\"][\"LIMIT_X\"][1]), \\ random.uniform(config_data[\"LAB_SPACE_LIMIT\"][\"LIMIT_Y\"][0],", "size_list = [length, width, height] \"\"\" ObsList = [] if", "#!/usr/bin/env python3 import os import sys import time sys.path.append(os.getcwd()+'/lib') import", "import dataclass, field from ObsInfo import ObsInfo def generate_random_obs(num_obs: int,", "= [length, width, height] \"\"\" ObsList = [] if (num_obs", "generate_random_obs(num_obs: int, size_list: list, config_data): \"\"\" config_file_name = \"config.json\" json_file", "\\ random.uniform(config_data[\"LAB_SPACE_LIMIT\"][\"LIMIT_Y\"][0], config_data[\"LAB_SPACE_LIMIT\"][\"LIMIT_Y\"][1]), \\ random.uniform(config_data[\"LAB_SPACE_LIMIT\"][\"LIMIT_Z\"][0], config_data[\"LAB_SPACE_LIMIT\"][\"LIMIT_Z\"][1])] ObsList.append( ObsInfo(center, size_list) )", "center center = [random.uniform(config_data[\"LAB_SPACE_LIMIT\"][\"LIMIT_X\"][0], config_data[\"LAB_SPACE_LIMIT\"][\"LIMIT_X\"][1]), \\ random.uniform(config_data[\"LAB_SPACE_LIMIT\"][\"LIMIT_Y\"][0], config_data[\"LAB_SPACE_LIMIT\"][\"LIMIT_Y\"][1]), \\ random.uniform(config_data[\"LAB_SPACE_LIMIT\"][\"LIMIT_Z\"][0],", "import ObsInfo def generate_random_obs(num_obs: int, size_list: list, config_data): \"\"\" config_file_name" ]
[ "is free software: you can redistribute it and/or modify #", "TG-UserBot - A modular Telegram UserBot script for Python. #", "info:', 'invite_users': 'Add users:', 'pin_messages': 'Pin messages:' } ChatAdminRights =", "'post_messages': 'Post messages:', 'edit_messages': 'Edit messages:', 'delete_messages': 'Delete messages:', 'ban_users':", "'Post messages:', 'edit_messages': 'Edit messages:', 'delete_messages': 'Delete messages:', 'ban_users': 'Ban", "kicked = full_chat.kicked_count if hasattr(full_chat, 'banned_count'): banned = full_chat.banned_count if", "ID:** `{chat.full_chat.id}``\" info = await get_entity_info(chat) text += await unparse_info(client,", "from telethon.tl import types from ..utils.client import UserBotClient from ..utils.helpers", "= { 'until_date': 'Banned until:', 'view_messages': 'Read messages:', 'send_messages': 'Send", "'Add new admins:' } async def parse_admin_rights(AdminRights: types.ChatAdminRights) -> str:", "resolve_channel(client: UserBotClient, channel: types.ChannelFull) -> str: text = '' default_banned_rights", "async def unparse_info(client: UserBotClient, creator: int, admins: int, bots: int,", "WITHOUT ANY WARRANTY; without even the implied warranty of #", "int, int, int, int, int]: creator, admins, bots, participants, kicked,", "await unparse_rights(\"Admin rights:\", parsed) text += f\"\\n{unparsed}\" if default_banned_rights: parsed", "modular Telegram UserBot script for Python. # Copyright (C) 2019", "if admin_rights: parsed = await parse_admin_rights(admin_rights) unparsed = await unparse_rights(\"Admin", "c.megagroup: channel_type = \"Megagroup\" admin_rights = c.admin_rights banned_rights = c.banned_rights", "option) any later version. # # TG-UserBot is distributed in", "l in rights.split('\\n'): splat = l.split(':') text += f\"\\n **{splat[0]}:**", "under the terms of the GNU General Public License as", "c = await client.get_entity(creator) text += f\"\\n**Creator:** {await get_chat_link(c)}\" if", "'bot_info'): bots = len(full_chat.bot_info) else: if hasattr(full_chat, 'bot_info'): bots =", "if users: text += f\"\\n**Participants:** {users}\" if admins: text +=", "parsed = await parse_banned_rights(banned_rights) unparsed = await unparse_rights(\"Banned rights:\", parsed)", "warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.", "'participants'): admins, participants = 0, 0 for p in full_chat.participants.participants:", "0 for p in full_chat.participants.participants: if isinstance(p, types.ChatParticipantCreator): creator =", "isinstance(p, types.ChatParticipantCreator): creator = p.user_id if isinstance(p, types.ChatParticipant): participants +=", "or # (at your option) any later version. # #", "License # along with TG-UserBot. If not, see <https://www.gnu.org/licenses/>. from", "arg: Union[types.ChatFull, types.ChannelFull] ) -> Tuple[int, int, int, int, int,", "until:', 'view_messages': 'Read messages:', 'send_messages': 'Send messages:', 'send_media': 'Send media:',", "'send_media': 'Send media:', 'send_stickers': 'Send stickers:', 'send_gifs': 'Send GIFs:', 'send_games':", "from ..utils.helpers import get_chat_link ChatBannedRights = { 'until_date': 'Banned until:',", "types.ChannelFull] ) -> Tuple[int, int, int, int, int, int]: creator,", "text = f\"\\n**Chat ID:** `{chat.full_chat.id}``\" info = await get_entity_info(chat) text", "await parse_banned_rights(banned_rights) unparsed = await unparse_rights(\"Banned rights:\", parsed) text +=", "# GNU General Public License for more details. # #", "return text async def unparse_rights(title: str, rights: str) -> str:", "text = '' default_banned_rights = None banned_rights = None admin_rights", "attr == \"until_date\": text.append(f'{string} {right.ctime()} (UTC)') else: text.append(f'{string} {right}') return", "if hasattr(full_chat, 'admins_count'): admins = full_chat.admins_count if hasattr(full_chat, 'kicked_count'): kicked", "# TG-UserBot - A modular Telegram UserBot script for Python.", "- A modular Telegram UserBot script for Python. # Copyright", "'invite_users': 'Invite users:', 'pin_messages': 'Pin messages:', 'add_admins': 'Add new admins:'", "get_entity_info(chat) text += await unparse_info(client, *info) admin_rights = None default_banned_rights", "admins:' } async def parse_admin_rights(AdminRights: types.ChatAdminRights) -> str: text =", "Foundation, either version 3 of the License, or # (at", "f\"\\n{unparsed}\" return text async def resolve_chat(client: UserBotClient, chat: types.ChatFull) ->", "admins += 1 return creator, admins, bots, participants, kicked, banned", "= c.default_banned_rights break text += f\"\\n**{channel_type} ID:** `{channel.full_chat.id}`\" info =", "# (at your option) any later version. # # TG-UserBot", "-> str: text = '' if creator: c = await", "f\"\\n{unparsed}\" if default_banned_rights: parsed = await parse_banned_rights(default_banned_rights) unparsed = await", "c.admin_rights default_banned_rights = c.default_banned_rights break if admin_rights: parsed = await", "parsed = await parse_admin_rights(admin_rights) unparsed = await unparse_rights(\"Admin rights:\", parsed)", "'Send games:', 'send_inline': 'Send inline messages:', 'embed_links': 'Send embed links:',", "UserBotClient, chat: types.ChatFull) -> str: text = f\"\\n**Chat ID:** `{chat.full_chat.id}``\"", "ChatBannedRights = { 'until_date': 'Banned until:', 'view_messages': 'Read messages:', 'send_messages':", "unparse_rights(\"Admin rights:\", parsed) text += f\"\\n{unparsed}\" if default_banned_rights: parsed =", "'Banned until:', 'view_messages': 'Read messages:', 'send_messages': 'Send messages:', 'send_media': 'Send", "General Public License for more details. # # You should", "isinstance(full_chat, types.ChannelFull): if hasattr(full_chat, 'participants_count'): participants = full_chat.participants_count if hasattr(full_chat,", "def unparse_rights(title: str, rights: str) -> str: text = f\"**{title}**\"", "for l in rights.split('\\n'): splat = l.split(':') text += f\"\\n", "getattr(AdminRights, attr, False) if right: text.append(f'{string} {right}') return '\\n'.join(text) async", "A PARTICULAR PURPOSE. See the # GNU General Public License", "the Free Software Foundation, either version 3 of the License,", "== chat.full_chat.id: admin_rights = c.admin_rights default_banned_rights = c.default_banned_rights break if", "kicked, banned = (None, None, None, None, None, None) full_chat", "= \"Megagroup\" admin_rights = c.admin_rights banned_rights = c.banned_rights default_banned_rights =", "even the implied warranty of # MERCHANTABILITY or FITNESS FOR", "types.ChannelFull) -> str: text = '' default_banned_rights = None banned_rights", "free software: you can redistribute it and/or modify # it", "or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU", "the implied warranty of # MERCHANTABILITY or FITNESS FOR A", "+= f\"\\n{unparsed}\" if banned_rights: parsed = await parse_banned_rights(banned_rights) unparsed =", "get_chat_link(c)}\" if users: text += f\"\\n**Participants:** {users}\" if admins: text", "**{splat[0]}:** `{':'.join(splat[1:])}`\" return text async def resolve_channel(client: UserBotClient, channel: types.ChannelFull)", "*info) if admin_rights: parsed = await parse_admin_rights(admin_rights) unparsed = await", "f\"\\n**Kicked:** {kicked}\" if banned: text += f\"\\n**Banned:** {banned}\" return text", "of the GNU General Public License as published by #", "admins: text += f\"\\n**Admins:** {admins}\" if bots: text += f\"\\n**Bots:**", "..utils.client import UserBotClient from ..utils.helpers import get_chat_link ChatBannedRights = {", "in ChatBannedRights.items(): right = getattr(BannedRights, attr, False) if right: if", "return text async def resolve_chat(client: UserBotClient, chat: types.ChatFull) -> str:", "'\\n'.join(text) async def parse_banned_rights(BannedRights: types.ChatBannedRights) -> str: text = []", "your option) any later version. # # TG-UserBot is distributed", "+= await unparse_info(client, *info) if admin_rights: parsed = await parse_admin_rights(admin_rights)", "unparse_rights(title: str, rights: str) -> str: text = f\"**{title}**\" for", "if c.id == channel.full_chat.id: if c.megagroup: channel_type = \"Megagroup\" admin_rights", "if hasattr(full_chat, 'banned_count'): banned = full_chat.banned_count if hasattr(full_chat, 'bot_info'): bots", "string in ChatBannedRights.items(): right = getattr(BannedRights, attr, False) if right:", "creator: int, admins: int, bots: int, users: int, kicked: int,", "with TG-UserBot. If not, see <https://www.gnu.org/licenses/>. from typing import Tuple,", "can redistribute it and/or modify # it under the terms", "'bot_info'): bots = len(full_chat.bot_info) if hasattr(full_chat, 'participants'): admins, participants =", "by # the Free Software Foundation, either version 3 of", "= f\"\\n**Chat ID:** `{chat.full_chat.id}``\" info = await get_entity_info(chat) text +=", "async def unparse_rights(title: str, rights: str) -> str: text =", "admins = full_chat.admins_count if hasattr(full_chat, 'kicked_count'): kicked = full_chat.kicked_count if", "None, None) full_chat = arg.full_chat if isinstance(full_chat, types.ChannelFull): if hasattr(full_chat,", "banned = (None, None, None, None, None, None) full_chat =", "if isinstance(full_chat, types.ChannelFull): if hasattr(full_chat, 'participants_count'): participants = full_chat.participants_count if", "len(full_chat.bot_info) if hasattr(full_chat, 'participants'): admins, participants = 0, 0 for", "admin_rights = c.admin_rights banned_rights = c.banned_rights default_banned_rights = c.default_banned_rights break", "c.banned_rights default_banned_rights = c.default_banned_rights break text += f\"\\n**{channel_type} ID:** `{channel.full_chat.id}`\"", "bots, participants, kicked, banned = (None, None, None, None, None,", "text += f\"\\n{unparsed}\" return text async def resolve_chat(client: UserBotClient, chat:", "2019 Kandarp <https://github.com/kandnub> # # TG-UserBot is free software: you", "int, int]: creator, admins, bots, participants, kicked, banned = (None,", "'invite_users': 'Add users:', 'pin_messages': 'Pin messages:' } ChatAdminRights = {", "p in full_chat.participants.participants: if isinstance(p, types.ChatParticipantCreator): creator = p.user_id if", "'pin_messages': 'Pin messages:' } ChatAdminRights = { 'change_info': 'Change chat", "= await get_entity_info(chat) text += await unparse_info(client, *info) admin_rights =", "channel_type = \"Megagroup\" admin_rights = c.admin_rights banned_rights = c.banned_rights default_banned_rights", "License for more details. # # You should have received", "from typing import Tuple, Union from telethon.tl import types from", "if attr == \"until_date\": text.append(f'{string} {right.ctime()} (UTC)') else: text.append(f'{string} {right}')", "links:', 'send_polls': 'Send polls:', 'change_info': 'Change info:', 'invite_users': 'Add users:',", "[] for attr, string in ChatBannedRights.items(): right = getattr(BannedRights, attr,", "c.id == channel.full_chat.id: if c.megagroup: channel_type = \"Megagroup\" admin_rights =", "None default_banned_rights = None for c in chat.chats: if c.id", "None, None, None) full_chat = arg.full_chat if isinstance(full_chat, types.ChannelFull): if", "If not, see <https://www.gnu.org/licenses/>. from typing import Tuple, Union from", "attr, string in ChatBannedRights.items(): right = getattr(BannedRights, attr, False) if", "chat info:', 'post_messages': 'Post messages:', 'edit_messages': 'Edit messages:', 'delete_messages': 'Delete", "creator: c = await client.get_entity(creator) text += f\"\\n**Creator:** {await get_chat_link(c)}\"", "the License, or # (at your option) any later version.", "= await client.get_entity(creator) text += f\"\\n**Creator:** {await get_chat_link(c)}\" if users:", "async def resolve_chat(client: UserBotClient, chat: types.ChatFull) -> str: text =", "text async def unparse_rights(title: str, rights: str) -> str: text", "PARTICULAR PURPOSE. See the # GNU General Public License for", "import Tuple, Union from telethon.tl import types from ..utils.client import", "messages:', 'send_media': 'Send media:', 'send_stickers': 'Send stickers:', 'send_gifs': 'Send GIFs:',", "Union[types.ChatFull, types.ChannelFull] ) -> Tuple[int, int, int, int, int, int]:", "'Invite users:', 'pin_messages': 'Pin messages:', 'add_admins': 'Add new admins:' }", "p.user_id if isinstance(p, types.ChatParticipant): participants += 1 if isinstance(p, types.ChatParticipantAdmin):", "if right: text.append(f'{string} {right}') return '\\n'.join(text) async def parse_banned_rights(BannedRights: types.ChatBannedRights)", "= [] for attr, string in ChatBannedRights.items(): right = getattr(BannedRights,", "text.append(f'{string} {right}') return '\\n'.join(text) async def get_entity_info( arg: Union[types.ChatFull, types.ChannelFull]", "modify # it under the terms of the GNU General", "terms of the GNU General Public License as published by", "{ 'until_date': 'Banned until:', 'view_messages': 'Read messages:', 'send_messages': 'Send messages:',", "f\"\\n**Participants:** {users}\" if admins: text += f\"\\n**Admins:** {admins}\" if bots:", "details. # # You should have received a copy of", "if c.megagroup: channel_type = \"Megagroup\" admin_rights = c.admin_rights banned_rights =", "admin_rights = None channel_type = \"Channel\" for c in channel.chats:", "c.default_banned_rights break text += f\"\\n**{channel_type} ID:** `{channel.full_chat.id}`\" info = await", "= len(full_chat.bot_info) if hasattr(full_chat, 'participants'): admins, participants = 0, 0", "parsed) text += f\"\\n{unparsed}\" if default_banned_rights: parsed = await parse_banned_rights(default_banned_rights)", "l.split(':') text += f\"\\n **{splat[0]}:** `{':'.join(splat[1:])}`\" return text async def", "users:', 'invite_users': 'Invite users:', 'pin_messages': 'Pin messages:', 'add_admins': 'Add new", "published by # the Free Software Foundation, either version 3", "text = [] for attr, string in ChatAdminRights.items(): right =", "rights:\", parsed) text += f\"\\n{unparsed}\" if default_banned_rights: parsed = await", "if banned_rights: parsed = await parse_banned_rights(banned_rights) unparsed = await unparse_rights(\"Banned", "media:', 'send_stickers': 'Send stickers:', 'send_gifs': 'Send GIFs:', 'send_games': 'Send games:',", "info:', 'post_messages': 'Post messages:', 'edit_messages': 'Edit messages:', 'delete_messages': 'Delete messages:',", "'ban_users': 'Ban users:', 'invite_users': 'Invite users:', 'pin_messages': 'Pin messages:', 'add_admins':", "return '\\n'.join(text) async def get_entity_info( arg: Union[types.ChatFull, types.ChannelFull] ) ->", "None, None, None, None) full_chat = arg.full_chat if isinstance(full_chat, types.ChannelFull):", "isinstance(p, types.ChatParticipant): participants += 1 if isinstance(p, types.ChatParticipantAdmin): admins +=", "text += f\"\\n**Banned:** {banned}\" return text async def unparse_rights(title: str,", "+= f\"\\n**Participants:** {users}\" if admins: text += f\"\\n**Admins:** {admins}\" if", "parsed) text += f\"\\n{unparsed}\" if banned_rights: parsed = await parse_banned_rights(banned_rights)", "if hasattr(full_chat, 'bot_info'): bots = len(full_chat.bot_info) else: if hasattr(full_chat, 'bot_info'):", "admins, bots, participants, kicked, banned = (None, None, None, None,", "'edit_messages': 'Edit messages:', 'delete_messages': 'Delete messages:', 'ban_users': 'Ban users:', 'invite_users':", "banned: int) -> str: text = '' if creator: c", "types.ChatFull) -> str: text = f\"\\n**Chat ID:** `{chat.full_chat.id}``\" info =", "if hasattr(full_chat, 'participants_count'): participants = full_chat.participants_count if hasattr(full_chat, 'admins_count'): admins", "f\"\\n**Bots:** {bots}\" if kicked: text += f\"\\n**Kicked:** {kicked}\" if banned:", "'Send messages:', 'send_media': 'Send media:', 'send_stickers': 'Send stickers:', 'send_gifs': 'Send", "messages:' } ChatAdminRights = { 'change_info': 'Change chat info:', 'post_messages':", "UserBotClient, creator: int, admins: int, bots: int, users: int, kicked:", "received a copy of the GNU General Public License #", "import UserBotClient from ..utils.helpers import get_chat_link ChatBannedRights = { 'until_date':", "str: text = f\"\\n**Chat ID:** `{chat.full_chat.id}``\" info = await get_entity_info(chat)", "admin_rights: parsed = await parse_admin_rights(admin_rights) unparsed = await unparse_rights(\"Admin rights:\",", "unparse_rights(\"Default banned rights:\", parsed) text += f\"\\n{unparsed}\" return text async", "License as published by # the Free Software Foundation, either", "See the # GNU General Public License for more details.", "str: text = [] for attr, string in ChatBannedRights.items(): right", "(C) 2019 Kandarp <https://github.com/kandnub> # # TG-UserBot is free software:", "text async def resolve_channel(client: UserBotClient, channel: types.ChannelFull) -> str: text", "'send_games': 'Send games:', 'send_inline': 'Send inline messages:', 'embed_links': 'Send embed", "int, admins: int, bots: int, users: int, kicked: int, banned:", "default_banned_rights = None banned_rights = None admin_rights = None channel_type", "parsed) text += f\"\\n{unparsed}\" return text async def resolve_chat(client: UserBotClient,", "= await unparse_rights(\"Default banned rights:\", parsed) text += f\"\\n{unparsed}\" return", "ID:** `{channel.full_chat.id}`\" info = await get_entity_info(channel) text += await unparse_info(client,", "splat = l.split(':') text += f\"\\n **{splat[0]}:** `{':'.join(splat[1:])}`\" return text", "False) if right: text.append(f'{string} {right}') return '\\n'.join(text) async def parse_banned_rights(BannedRights:", "A modular Telegram UserBot script for Python. # Copyright (C)", "that it will be useful, # but WITHOUT ANY WARRANTY;", "messages:', 'edit_messages': 'Edit messages:', 'delete_messages': 'Delete messages:', 'ban_users': 'Ban users:',", "messages:', 'delete_messages': 'Delete messages:', 'ban_users': 'Ban users:', 'invite_users': 'Invite users:',", "if bots: text += f\"\\n**Bots:** {bots}\" if kicked: text +=", "f\"**{title}**\" for l in rights.split('\\n'): splat = l.split(':') text +=", "either version 3 of the License, or # (at your", "text += f\"\\n**Participants:** {users}\" if admins: text += f\"\\n**Admins:** {admins}\"", "<filename>userbot/helper_funcs/misc.py # TG-UserBot - A modular Telegram UserBot script for", "text += f\"\\n{unparsed}\" if banned_rights: parsed = await parse_banned_rights(banned_rights) unparsed", "{kicked}\" if banned: text += f\"\\n**Banned:** {banned}\" return text async", "in chat.chats: if c.id == chat.full_chat.id: admin_rights = c.admin_rights default_banned_rights", "more details. # # You should have received a copy", "the GNU General Public License # along with TG-UserBot. If", "full_chat.kicked_count if hasattr(full_chat, 'banned_count'): banned = full_chat.banned_count if hasattr(full_chat, 'bot_info'):", "kicked: int, banned: int) -> str: text = '' if", "right = getattr(AdminRights, attr, False) if right: text.append(f'{string} {right}') return", "if kicked: text += f\"\\n**Kicked:** {kicked}\" if banned: text +=", "participants, kicked, banned = (None, None, None, None, None, None)", "text += f\"\\n**Admins:** {admins}\" if bots: text += f\"\\n**Bots:** {bots}\"", "be useful, # but WITHOUT ANY WARRANTY; without even the", "= getattr(AdminRights, attr, False) if right: text.append(f'{string} {right}') return '\\n'.join(text)", "TG-UserBot is free software: you can redistribute it and/or modify", "async def get_entity_info( arg: Union[types.ChatFull, types.ChannelFull] ) -> Tuple[int, int,", "text += f\"\\n**{channel_type} ID:** `{channel.full_chat.id}`\" info = await get_entity_info(channel) text", "+= f\"\\n{unparsed}\" if default_banned_rights: parsed = await parse_banned_rights(default_banned_rights) unparsed =", "str: text = '' if creator: c = await client.get_entity(creator)", "types.ChatParticipantAdmin): admins += 1 return creator, admins, bots, participants, kicked,", "= '' default_banned_rights = None banned_rights = None admin_rights =", "channel.full_chat.id: if c.megagroup: channel_type = \"Megagroup\" admin_rights = c.admin_rights banned_rights", "if hasattr(full_chat, 'kicked_count'): kicked = full_chat.kicked_count if hasattr(full_chat, 'banned_count'): banned", "'Change info:', 'invite_users': 'Add users:', 'pin_messages': 'Pin messages:' } ChatAdminRights", "unparsed = await unparse_rights(\"Banned rights:\", parsed) text += f\"\\n{unparsed}\" if", "{admins}\" if bots: text += f\"\\n**Bots:** {bots}\" if kicked: text", "if hasattr(full_chat, 'participants'): admins, participants = 0, 0 for p", "= (None, None, None, None, None, None) full_chat = arg.full_chat", "= { 'change_info': 'Change chat info:', 'post_messages': 'Post messages:', 'edit_messages':", "import types from ..utils.client import UserBotClient from ..utils.helpers import get_chat_link", "= getattr(BannedRights, attr, False) if right: if attr == \"until_date\":", "await unparse_info(client, *info) if admin_rights: parsed = await parse_admin_rights(admin_rights) unparsed", "{right}') return '\\n'.join(text) async def parse_banned_rights(BannedRights: types.ChatBannedRights) -> str: text", "UserBotClient, channel: types.ChannelFull) -> str: text = '' default_banned_rights =", "{await get_chat_link(c)}\" if users: text += f\"\\n**Participants:** {users}\" if admins:", "inline messages:', 'embed_links': 'Send embed links:', 'send_polls': 'Send polls:', 'change_info':", "int) -> str: text = '' if creator: c =", "attr, string in ChatAdminRights.items(): right = getattr(AdminRights, attr, False) if", "# Copyright (C) 2019 Kandarp <https://github.com/kandnub> # # TG-UserBot is", "None, None, None, None, None) full_chat = arg.full_chat if isinstance(full_chat,", "types.ChatParticipantCreator): creator = p.user_id if isinstance(p, types.ChatParticipant): participants += 1", "right = getattr(BannedRights, attr, False) if right: if attr ==", "# but WITHOUT ANY WARRANTY; without even the implied warranty", "'kicked_count'): kicked = full_chat.kicked_count if hasattr(full_chat, 'banned_count'): banned = full_chat.banned_count", "c.admin_rights banned_rights = c.banned_rights default_banned_rights = c.default_banned_rights break text +=", "Free Software Foundation, either version 3 of the License, or", "-> str: text = f\"\\n**Chat ID:** `{chat.full_chat.id}``\" info = await", "'Send GIFs:', 'send_games': 'Send games:', 'send_inline': 'Send inline messages:', 'embed_links':", "and/or modify # it under the terms of the GNU", "implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR", "arg.full_chat if isinstance(full_chat, types.ChannelFull): if hasattr(full_chat, 'participants_count'): participants = full_chat.participants_count", "participants += 1 if isinstance(p, types.ChatParticipantAdmin): admins += 1 return", "# # TG-UserBot is distributed in the hope that it", "import get_chat_link ChatBannedRights = { 'until_date': 'Banned until:', 'view_messages': 'Read", "'participants_count'): participants = full_chat.participants_count if hasattr(full_chat, 'admins_count'): admins = full_chat.admins_count", "channel_type = \"Channel\" for c in channel.chats: if c.id ==", "'banned_count'): banned = full_chat.banned_count if hasattr(full_chat, 'bot_info'): bots = len(full_chat.bot_info)", "it and/or modify # it under the terms of the", "== \"until_date\": text.append(f'{string} {right.ctime()} (UTC)') else: text.append(f'{string} {right}') return '\\n'.join(text)", "= None channel_type = \"Channel\" for c in channel.chats: if", "parse_banned_rights(banned_rights) unparsed = await unparse_rights(\"Banned rights:\", parsed) text += f\"\\n{unparsed}\"", "= await parse_banned_rights(default_banned_rights) unparsed = await unparse_rights(\"Default banned rights:\", parsed)", "'send_inline': 'Send inline messages:', 'embed_links': 'Send embed links:', 'send_polls': 'Send", "it will be useful, # but WITHOUT ANY WARRANTY; without", "0, 0 for p in full_chat.participants.participants: if isinstance(p, types.ChatParticipantCreator): creator", "+= f\"\\n**Kicked:** {kicked}\" if banned: text += f\"\\n**Banned:** {banned}\" return", "break if admin_rights: parsed = await parse_admin_rights(admin_rights) unparsed = await", "'Pin messages:', 'add_admins': 'Add new admins:' } async def parse_admin_rights(AdminRights:", "{right}') return '\\n'.join(text) async def get_entity_info( arg: Union[types.ChatFull, types.ChannelFull] )", "users: int, kicked: int, banned: int) -> str: text =", "if c.id == chat.full_chat.id: admin_rights = c.admin_rights default_banned_rights = c.default_banned_rights", "General Public License # along with TG-UserBot. If not, see", "# it under the terms of the GNU General Public", "hasattr(full_chat, 'bot_info'): bots = len(full_chat.bot_info) else: if hasattr(full_chat, 'bot_info'): bots", "creator = p.user_id if isinstance(p, types.ChatParticipant): participants += 1 if", "def unparse_info(client: UserBotClient, creator: int, admins: int, bots: int, users:", "= None admin_rights = None channel_type = \"Channel\" for c", "banned = full_chat.banned_count if hasattr(full_chat, 'bot_info'): bots = len(full_chat.bot_info) else:", "of the License, or # (at your option) any later", "\"Megagroup\" admin_rights = c.admin_rights banned_rights = c.banned_rights default_banned_rights = c.default_banned_rights", "'Ban users:', 'invite_users': 'Invite users:', 'pin_messages': 'Pin messages:', 'add_admins': 'Add", "banned_rights: parsed = await parse_banned_rights(banned_rights) unparsed = await unparse_rights(\"Banned rights:\",", "# along with TG-UserBot. If not, see <https://www.gnu.org/licenses/>. from typing", "hope that it will be useful, # but WITHOUT ANY", "int, users: int, kicked: int, banned: int) -> str: text", "it under the terms of the GNU General Public License", "text = f\"**{title}**\" for l in rights.split('\\n'): splat = l.split(':')", "'pin_messages': 'Pin messages:', 'add_admins': 'Add new admins:' } async def", "{users}\" if admins: text += f\"\\n**Admins:** {admins}\" if bots: text", "in rights.split('\\n'): splat = l.split(':') text += f\"\\n **{splat[0]}:** `{':'.join(splat[1:])}`\"", "+= f\"\\n **{splat[0]}:** `{':'.join(splat[1:])}`\" return text async def resolve_channel(client: UserBotClient,", "= await unparse_rights(\"Admin rights:\", parsed) text += f\"\\n{unparsed}\" if default_banned_rights:", "the GNU General Public License as published by # the", "{ 'change_info': 'Change chat info:', 'post_messages': 'Post messages:', 'edit_messages': 'Edit", "-> str: text = [] for attr, string in ChatBannedRights.items():", "text.append(f'{string} {right.ctime()} (UTC)') else: text.append(f'{string} {right}') return '\\n'.join(text) async def", "embed links:', 'send_polls': 'Send polls:', 'change_info': 'Change info:', 'invite_users': 'Add", "= await parse_banned_rights(banned_rights) unparsed = await unparse_rights(\"Banned rights:\", parsed) text", "text += f\"\\n{unparsed}\" if default_banned_rights: parsed = await parse_banned_rights(default_banned_rights) unparsed", "await unparse_info(client, *info) admin_rights = None default_banned_rights = None for", "Kandarp <https://github.com/kandnub> # # TG-UserBot is free software: you can", "FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General", "f\"\\n**{channel_type} ID:** `{channel.full_chat.id}`\" info = await get_entity_info(channel) text += await", "= arg.full_chat if isinstance(full_chat, types.ChannelFull): if hasattr(full_chat, 'participants_count'): participants =", "General Public License as published by # the Free Software", "Telegram UserBot script for Python. # Copyright (C) 2019 Kandarp", "= c.admin_rights banned_rights = c.banned_rights default_banned_rights = c.default_banned_rights break text", "= c.default_banned_rights break if admin_rights: parsed = await parse_admin_rights(admin_rights) unparsed", "str: text = [] for attr, string in ChatAdminRights.items(): right", "<https://github.com/kandnub> # # TG-UserBot is free software: you can redistribute", "will be useful, # but WITHOUT ANY WARRANTY; without even", "default_banned_rights = None for c in chat.chats: if c.id ==", "{banned}\" return text async def unparse_rights(title: str, rights: str) ->", "text += await unparse_info(client, *info) if admin_rights: parsed = await", "banned: text += f\"\\n**Banned:** {banned}\" return text async def unparse_rights(title:", "return text async def resolve_channel(client: UserBotClient, channel: types.ChannelFull) -> str:", "default_banned_rights: parsed = await parse_banned_rights(default_banned_rights) unparsed = await unparse_rights(\"Default banned", "parse_admin_rights(admin_rights) unparsed = await unparse_rights(\"Admin rights:\", parsed) text += f\"\\n{unparsed}\"", "if isinstance(p, types.ChatParticipantAdmin): admins += 1 return creator, admins, bots,", "parse_admin_rights(AdminRights: types.ChatAdminRights) -> str: text = [] for attr, string", "<https://www.gnu.org/licenses/>. from typing import Tuple, Union from telethon.tl import types", "async def resolve_channel(client: UserBotClient, channel: types.ChannelFull) -> str: text =", "Public License # along with TG-UserBot. If not, see <https://www.gnu.org/licenses/>.", "PURPOSE. See the # GNU General Public License for more", "-> str: text = [] for attr, string in ChatAdminRights.items():", "= await unparse_rights(\"Banned rights:\", parsed) text += f\"\\n{unparsed}\" if default_banned_rights:", "None) full_chat = arg.full_chat if isinstance(full_chat, types.ChannelFull): if hasattr(full_chat, 'participants_count'):", "text = '' if creator: c = await client.get_entity(creator) text", "`{chat.full_chat.id}``\" info = await get_entity_info(chat) text += await unparse_info(client, *info)", "c in channel.chats: if c.id == channel.full_chat.id: if c.megagroup: channel_type", "FOR A PARTICULAR PURPOSE. See the # GNU General Public", "f\"\\n{unparsed}\" if banned_rights: parsed = await parse_banned_rights(banned_rights) unparsed = await", "the # GNU General Public License for more details. #", "participants = 0, 0 for p in full_chat.participants.participants: if isinstance(p,", "users: text += f\"\\n**Participants:** {users}\" if admins: text += f\"\\n**Admins:**", "any later version. # # TG-UserBot is distributed in the", "await unparse_rights(\"Banned rights:\", parsed) text += f\"\\n{unparsed}\" if default_banned_rights: parsed", "= 0, 0 for p in full_chat.participants.participants: if isinstance(p, types.ChatParticipantCreator):", "for attr, string in ChatBannedRights.items(): right = getattr(BannedRights, attr, False)", "along with TG-UserBot. If not, see <https://www.gnu.org/licenses/>. from typing import", "(UTC)') else: text.append(f'{string} {right}') return '\\n'.join(text) async def get_entity_info( arg:", "= \"Channel\" for c in channel.chats: if c.id == channel.full_chat.id:", "'\\n'.join(text) async def get_entity_info( arg: Union[types.ChatFull, types.ChannelFull] ) -> Tuple[int,", "resolve_chat(client: UserBotClient, chat: types.ChatFull) -> str: text = f\"\\n**Chat ID:**", "polls:', 'change_info': 'Change info:', 'invite_users': 'Add users:', 'pin_messages': 'Pin messages:'", "= full_chat.banned_count if hasattr(full_chat, 'bot_info'): bots = len(full_chat.bot_info) else: if", "for more details. # # You should have received a", "redistribute it and/or modify # it under the terms of", "'Delete messages:', 'ban_users': 'Ban users:', 'invite_users': 'Invite users:', 'pin_messages': 'Pin", "version. # # TG-UserBot is distributed in the hope that", "telethon.tl import types from ..utils.client import UserBotClient from ..utils.helpers import", "of the GNU General Public License # along with TG-UserBot.", "unparse_info(client, *info) if admin_rights: parsed = await parse_admin_rights(admin_rights) unparsed =", "text = [] for attr, string in ChatBannedRights.items(): right =", "return '\\n'.join(text) async def parse_banned_rights(BannedRights: types.ChatBannedRights) -> str: text =", "bots: int, users: int, kicked: int, banned: int) -> str:", "text async def resolve_chat(client: UserBotClient, chat: types.ChatFull) -> str: text", "int]: creator, admins, bots, participants, kicked, banned = (None, None,", "f\"\\n**Creator:** {await get_chat_link(c)}\" if users: text += f\"\\n**Participants:** {users}\" if", "full_chat = arg.full_chat if isinstance(full_chat, types.ChannelFull): if hasattr(full_chat, 'participants_count'): participants", "ChatAdminRights = { 'change_info': 'Change chat info:', 'post_messages': 'Post messages:',", "ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY", "types.ChatParticipant): participants += 1 if isinstance(p, types.ChatParticipantAdmin): admins += 1", "unparsed = await unparse_rights(\"Default banned rights:\", parsed) text += f\"\\n{unparsed}\"", "def resolve_channel(client: UserBotClient, channel: types.ChannelFull) -> str: text = ''", "'send_stickers': 'Send stickers:', 'send_gifs': 'Send GIFs:', 'send_games': 'Send games:', 'send_inline':", "the hope that it will be useful, # but WITHOUT", "messages:', 'send_messages': 'Send messages:', 'send_media': 'Send media:', 'send_stickers': 'Send stickers:',", "str, rights: str) -> str: text = f\"**{title}**\" for l", "chat.full_chat.id: admin_rights = c.admin_rights default_banned_rights = c.default_banned_rights break if admin_rights:", "types from ..utils.client import UserBotClient from ..utils.helpers import get_chat_link ChatBannedRights", "software: you can redistribute it and/or modify # it under", "info = await get_entity_info(channel) text += await unparse_info(client, *info) if", "{right.ctime()} (UTC)') else: text.append(f'{string} {right}') return '\\n'.join(text) async def get_entity_info(", "[] for attr, string in ChatAdminRights.items(): right = getattr(AdminRights, attr,", "rights.split('\\n'): splat = l.split(':') text += f\"\\n **{splat[0]}:** `{':'.join(splat[1:])}`\" return", "'send_gifs': 'Send GIFs:', 'send_games': 'Send games:', 'send_inline': 'Send inline messages:',", "= p.user_id if isinstance(p, types.ChatParticipant): participants += 1 if isinstance(p,", "= None banned_rights = None admin_rights = None channel_type =", "= c.banned_rights default_banned_rights = c.default_banned_rights break text += f\"\\n**{channel_type} ID:**", "await get_entity_info(channel) text += await unparse_info(client, *info) if admin_rights: parsed", "'Send embed links:', 'send_polls': 'Send polls:', 'change_info': 'Change info:', 'invite_users':", "string in ChatAdminRights.items(): right = getattr(AdminRights, attr, False) if right:", "== channel.full_chat.id: if c.megagroup: channel_type = \"Megagroup\" admin_rights = c.admin_rights", "get_entity_info(channel) text += await unparse_info(client, *info) if admin_rights: parsed =", "int, banned: int) -> str: text = '' if creator:", "await client.get_entity(creator) text += f\"\\n**Creator:** {await get_chat_link(c)}\" if users: text", "UserBot script for Python. # Copyright (C) 2019 Kandarp <https://github.com/kandnub>", "copy of the GNU General Public License # along with", "if creator: c = await client.get_entity(creator) text += f\"\\n**Creator:** {await", "# TG-UserBot is distributed in the hope that it will", "rights: str) -> str: text = f\"**{title}**\" for l in", "= full_chat.admins_count if hasattr(full_chat, 'kicked_count'): kicked = full_chat.kicked_count if hasattr(full_chat,", "'Send inline messages:', 'embed_links': 'Send embed links:', 'send_polls': 'Send polls:',", "in ChatAdminRights.items(): right = getattr(AdminRights, attr, False) if right: text.append(f'{string}", "Python. # Copyright (C) 2019 Kandarp <https://github.com/kandnub> # # TG-UserBot", "..utils.helpers import get_chat_link ChatBannedRights = { 'until_date': 'Banned until:', 'view_messages':", "} ChatAdminRights = { 'change_info': 'Change chat info:', 'post_messages': 'Post", "def resolve_chat(client: UserBotClient, chat: types.ChatFull) -> str: text = f\"\\n**Chat", "right: if attr == \"until_date\": text.append(f'{string} {right.ctime()} (UTC)') else: text.append(f'{string}", "distributed in the hope that it will be useful, #", "License, or # (at your option) any later version. #", "{bots}\" if kicked: text += f\"\\n**Kicked:** {kicked}\" if banned: text", "'send_messages': 'Send messages:', 'send_media': 'Send media:', 'send_stickers': 'Send stickers:', 'send_gifs':", "`{':'.join(splat[1:])}`\" return text async def resolve_channel(client: UserBotClient, channel: types.ChannelFull) ->", "'change_info': 'Change info:', 'invite_users': 'Add users:', 'pin_messages': 'Pin messages:' }", "admins, participants = 0, 0 for p in full_chat.participants.participants: if", "str) -> str: text = f\"**{title}**\" for l in rights.split('\\n'):", "unparse_rights(\"Banned rights:\", parsed) text += f\"\\n{unparsed}\" if default_banned_rights: parsed =", "channel.chats: if c.id == channel.full_chat.id: if c.megagroup: channel_type = \"Megagroup\"", "not, see <https://www.gnu.org/licenses/>. from typing import Tuple, Union from telethon.tl", "bots = len(full_chat.bot_info) if hasattr(full_chat, 'participants'): admins, participants = 0,", "You should have received a copy of the GNU General", "+= f\"\\n**Bots:** {bots}\" if kicked: text += f\"\\n**Kicked:** {kicked}\" if", "'delete_messages': 'Delete messages:', 'ban_users': 'Ban users:', 'invite_users': 'Invite users:', 'pin_messages':", "+= 1 return creator, admins, bots, participants, kicked, banned async", "+= f\"\\n{unparsed}\" return text async def resolve_chat(client: UserBotClient, chat: types.ChatFull)", "ChatAdminRights.items(): right = getattr(AdminRights, attr, False) if right: text.append(f'{string} {right}')", "= None for c in chat.chats: if c.id == chat.full_chat.id:", "useful, # but WITHOUT ANY WARRANTY; without even the implied", "int, bots: int, users: int, kicked: int, banned: int) ->", "channel: types.ChannelFull) -> str: text = '' default_banned_rights = None", "-> str: text = '' default_banned_rights = None banned_rights =", "later version. # # TG-UserBot is distributed in the hope", "messages:', 'ban_users': 'Ban users:', 'invite_users': 'Invite users:', 'pin_messages': 'Pin messages:',", "await unparse_rights(\"Admin rights:\", parsed) text += f\"\\n{unparsed}\" if banned_rights: parsed", "f\"\\n**Admins:** {admins}\" if bots: text += f\"\\n**Bots:** {bots}\" if kicked:", "unparse_info(client: UserBotClient, creator: int, admins: int, bots: int, users: int,", "banned rights:\", parsed) text += f\"\\n{unparsed}\" return text async def", "you can redistribute it and/or modify # it under the", "'until_date': 'Banned until:', 'view_messages': 'Read messages:', 'send_messages': 'Send messages:', 'send_media':", "isinstance(p, types.ChatParticipantAdmin): admins += 1 return creator, admins, bots, participants,", "'' default_banned_rights = None banned_rights = None admin_rights = None", "for Python. # Copyright (C) 2019 Kandarp <https://github.com/kandnub> # #", "script for Python. # Copyright (C) 2019 Kandarp <https://github.com/kandnub> #", "# You should have received a copy of the GNU", "(at your option) any later version. # # TG-UserBot is", "GNU General Public License # along with TG-UserBot. If not,", "else: if hasattr(full_chat, 'bot_info'): bots = len(full_chat.bot_info) if hasattr(full_chat, 'participants'):", "Copyright (C) 2019 Kandarp <https://github.com/kandnub> # # TG-UserBot is free", "= '' if creator: c = await client.get_entity(creator) text +=", "hasattr(full_chat, 'participants_count'): participants = full_chat.participants_count if hasattr(full_chat, 'admins_count'): admins =", "str: text = f\"**{title}**\" for l in rights.split('\\n'): splat =", "should have received a copy of the GNU General Public", "GIFs:', 'send_games': 'Send games:', 'send_inline': 'Send inline messages:', 'embed_links': 'Send", "None admin_rights = None channel_type = \"Channel\" for c in", "get_entity_info( arg: Union[types.ChatFull, types.ChannelFull] ) -> Tuple[int, int, int, int,", "kicked: text += f\"\\n**Kicked:** {kicked}\" if banned: text += f\"\\n**Banned:**", "kicked, banned async def unparse_info(client: UserBotClient, creator: int, admins: int,", "getattr(BannedRights, attr, False) if right: if attr == \"until_date\": text.append(f'{string}", "c in chat.chats: if c.id == chat.full_chat.id: admin_rights = c.admin_rights", "if isinstance(p, types.ChatParticipantCreator): creator = p.user_id if isinstance(p, types.ChatParticipant): participants", "attr, False) if right: text.append(f'{string} {right}') return '\\n'.join(text) async def", "default_banned_rights = c.default_banned_rights break text += f\"\\n**{channel_type} ID:** `{channel.full_chat.id}`\" info", "text += await unparse_info(client, *info) admin_rights = None default_banned_rights =", "await get_entity_info(chat) text += await unparse_info(client, *info) admin_rights = None", "TG-UserBot is distributed in the hope that it will be", ") -> Tuple[int, int, int, int, int, int]: creator, admins,", "text.append(f'{string} {right}') return '\\n'.join(text) async def parse_banned_rights(BannedRights: types.ChatBannedRights) -> str:", "'Change chat info:', 'post_messages': 'Post messages:', 'edit_messages': 'Edit messages:', 'delete_messages':", "+= f\"\\n**Admins:** {admins}\" if bots: text += f\"\\n**Bots:** {bots}\" if", "for c in chat.chats: if c.id == chat.full_chat.id: admin_rights =", "parse_banned_rights(default_banned_rights) unparsed = await unparse_rights(\"Default banned rights:\", parsed) text +=", "TG-UserBot. If not, see <https://www.gnu.org/licenses/>. from typing import Tuple, Union", "GNU General Public License as published by # the Free", "info = await get_entity_info(chat) text += await unparse_info(client, *info) admin_rights", "types.ChannelFull): if hasattr(full_chat, 'participants_count'): participants = full_chat.participants_count if hasattr(full_chat, 'admins_count'):", "if default_banned_rights: parsed = await parse_banned_rights(default_banned_rights) unparsed = await unparse_rights(\"Default", "'Send media:', 'send_stickers': 'Send stickers:', 'send_gifs': 'Send GIFs:', 'send_games': 'Send", "hasattr(full_chat, 'admins_count'): admins = full_chat.admins_count if hasattr(full_chat, 'kicked_count'): kicked =", "-> str: text = f\"**{title}**\" for l in rights.split('\\n'): splat", "get_chat_link ChatBannedRights = { 'until_date': 'Banned until:', 'view_messages': 'Read messages:',", "unparse_rights(\"Admin rights:\", parsed) text += f\"\\n{unparsed}\" if banned_rights: parsed =", "Tuple[int, int, int, int, int, int]: creator, admins, bots, participants,", "# the Free Software Foundation, either version 3 of the", "right: text.append(f'{string} {right}') return '\\n'.join(text) async def parse_banned_rights(BannedRights: types.ChatBannedRights) ->", "stickers:', 'send_gifs': 'Send GIFs:', 'send_games': 'Send games:', 'send_inline': 'Send inline", "from ..utils.client import UserBotClient from ..utils.helpers import get_chat_link ChatBannedRights =", "Tuple, Union from telethon.tl import types from ..utils.client import UserBotClient", "of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See", "= c.admin_rights default_banned_rights = c.default_banned_rights break if admin_rights: parsed =", "async def parse_banned_rights(BannedRights: types.ChatBannedRights) -> str: text = [] for", "= full_chat.participants_count if hasattr(full_chat, 'admins_count'): admins = full_chat.admins_count if hasattr(full_chat,", "hasattr(full_chat, 'bot_info'): bots = len(full_chat.bot_info) if hasattr(full_chat, 'participants'): admins, participants", "'view_messages': 'Read messages:', 'send_messages': 'Send messages:', 'send_media': 'Send media:', 'send_stickers':", "= await get_entity_info(channel) text += await unparse_info(client, *info) if admin_rights:", "if admins: text += f\"\\n**Admins:** {admins}\" if bots: text +=", "= full_chat.kicked_count if hasattr(full_chat, 'banned_count'): banned = full_chat.banned_count if hasattr(full_chat,", "await parse_admin_rights(admin_rights) unparsed = await unparse_rights(\"Admin rights:\", parsed) text +=", "UserBotClient from ..utils.helpers import get_chat_link ChatBannedRights = { 'until_date': 'Banned", "(None, None, None, None, None, None) full_chat = arg.full_chat if", "hasattr(full_chat, 'participants'): admins, participants = 0, 0 for p in", "def parse_banned_rights(BannedRights: types.ChatBannedRights) -> str: text = [] for attr,", "1 return creator, admins, bots, participants, kicked, banned async def", "text += f\"\\n**Creator:** {await get_chat_link(c)}\" if users: text += f\"\\n**Participants:**", "# TG-UserBot is free software: you can redistribute it and/or", "+= f\"\\n**Banned:** {banned}\" return text async def unparse_rights(title: str, rights:", "but WITHOUT ANY WARRANTY; without even the implied warranty of", "rights:\", parsed) text += f\"\\n{unparsed}\" return text async def resolve_chat(client:", "= None default_banned_rights = None for c in chat.chats: if", "text += f\"\\n **{splat[0]}:** `{':'.join(splat[1:])}`\" return text async def resolve_channel(client:", "'Pin messages:' } ChatAdminRights = { 'change_info': 'Change chat info:',", "'' if creator: c = await client.get_entity(creator) text += f\"\\n**Creator:**", "f\"\\n**Banned:** {banned}\" return text async def unparse_rights(title: str, rights: str)", "default_banned_rights = c.default_banned_rights break if admin_rights: parsed = await parse_admin_rights(admin_rights)", "Public License for more details. # # You should have", "typing import Tuple, Union from telethon.tl import types from ..utils.client", "`{channel.full_chat.id}`\" info = await get_entity_info(channel) text += await unparse_info(client, *info)", "users:', 'pin_messages': 'Pin messages:' } ChatAdminRights = { 'change_info': 'Change", "Union from telethon.tl import types from ..utils.client import UserBotClient from", "full_chat.admins_count if hasattr(full_chat, 'kicked_count'): kicked = full_chat.kicked_count if hasattr(full_chat, 'banned_count'):", "the terms of the GNU General Public License as published", "creator, admins, bots, participants, kicked, banned = (None, None, None,", "chat: types.ChatFull) -> str: text = f\"\\n**Chat ID:** `{chat.full_chat.id}``\" info", "users:', 'pin_messages': 'Pin messages:', 'add_admins': 'Add new admins:' } async", "'Send stickers:', 'send_gifs': 'Send GIFs:', 'send_games': 'Send games:', 'send_inline': 'Send", "await parse_banned_rights(default_banned_rights) unparsed = await unparse_rights(\"Default banned rights:\", parsed) text", "= [] for attr, string in ChatAdminRights.items(): right = getattr(AdminRights,", "MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #", "'change_info': 'Change chat info:', 'post_messages': 'Post messages:', 'edit_messages': 'Edit messages:',", "full_chat.participants.participants: if isinstance(p, types.ChatParticipantCreator): creator = p.user_id if isinstance(p, types.ChatParticipant):", "Software Foundation, either version 3 of the License, or #", "unparse_info(client, *info) admin_rights = None default_banned_rights = None for c", "parsed = await parse_banned_rights(default_banned_rights) unparsed = await unparse_rights(\"Default banned rights:\",", "+= await unparse_info(client, *info) admin_rights = None default_banned_rights = None", "messages:', 'add_admins': 'Add new admins:' } async def parse_admin_rights(AdminRights: types.ChatAdminRights)", "client.get_entity(creator) text += f\"\\n**Creator:** {await get_chat_link(c)}\" if users: text +=", "None for c in chat.chats: if c.id == chat.full_chat.id: admin_rights", "\"Channel\" for c in channel.chats: if c.id == channel.full_chat.id: if", "in full_chat.participants.participants: if isinstance(p, types.ChatParticipantCreator): creator = p.user_id if isinstance(p,", "def parse_admin_rights(AdminRights: types.ChatAdminRights) -> str: text = [] for attr,", "as published by # the Free Software Foundation, either version", "await unparse_rights(\"Default banned rights:\", parsed) text += f\"\\n{unparsed}\" return text", "bots = len(full_chat.bot_info) else: if hasattr(full_chat, 'bot_info'): bots = len(full_chat.bot_info)", "version 3 of the License, or # (at your option)", "+= 1 if isinstance(p, types.ChatParticipantAdmin): admins += 1 return creator,", "int, int, int]: creator, admins, bots, participants, kicked, banned =", "banned async def unparse_info(client: UserBotClient, creator: int, admins: int, bots:", "rights:\", parsed) text += f\"\\n{unparsed}\" if banned_rights: parsed = await", "participants, kicked, banned async def unparse_info(client: UserBotClient, creator: int, admins:", "f\"\\n**Chat ID:** `{chat.full_chat.id}``\" info = await get_entity_info(chat) text += await", "ChatBannedRights.items(): right = getattr(BannedRights, attr, False) if right: if attr", "participants = full_chat.participants_count if hasattr(full_chat, 'admins_count'): admins = full_chat.admins_count if", "'send_polls': 'Send polls:', 'change_info': 'Change info:', 'invite_users': 'Add users:', 'pin_messages':", "def get_entity_info( arg: Union[types.ChatFull, types.ChannelFull] ) -> Tuple[int, int, int,", "-> Tuple[int, int, int, int, int, int]: creator, admins, bots,", "admins, bots, participants, kicked, banned async def unparse_info(client: UserBotClient, creator:", "None channel_type = \"Channel\" for c in channel.chats: if c.id", "else: text.append(f'{string} {right}') return '\\n'.join(text) async def get_entity_info( arg: Union[types.ChatFull,", "a copy of the GNU General Public License # along", "int, kicked: int, banned: int) -> str: text = ''", "full_chat.banned_count if hasattr(full_chat, 'bot_info'): bots = len(full_chat.bot_info) else: if hasattr(full_chat,", "attr, False) if right: if attr == \"until_date\": text.append(f'{string} {right.ctime()}", "if banned: text += f\"\\n**Banned:** {banned}\" return text async def", "*info) admin_rights = None default_banned_rights = None for c in", "Public License as published by # the Free Software Foundation,", "for attr, string in ChatAdminRights.items(): right = getattr(AdminRights, attr, False)", "if right: if attr == \"until_date\": text.append(f'{string} {right.ctime()} (UTC)') else:", "parse_banned_rights(BannedRights: types.ChatBannedRights) -> str: text = [] for attr, string", "if isinstance(p, types.ChatParticipant): participants += 1 if isinstance(p, types.ChatParticipantAdmin): admins", "unparsed = await unparse_rights(\"Admin rights:\", parsed) text += f\"\\n{unparsed}\" if", "= len(full_chat.bot_info) else: if hasattr(full_chat, 'bot_info'): bots = len(full_chat.bot_info) if", "len(full_chat.bot_info) else: if hasattr(full_chat, 'bot_info'): bots = len(full_chat.bot_info) if hasattr(full_chat,", "banned_rights = c.banned_rights default_banned_rights = c.default_banned_rights break text += f\"\\n**{channel_type}", "admin_rights = None default_banned_rights = None for c in chat.chats:", "# # TG-UserBot is free software: you can redistribute it", "hasattr(full_chat, 'banned_count'): banned = full_chat.banned_count if hasattr(full_chat, 'bot_info'): bots =", "if hasattr(full_chat, 'bot_info'): bots = len(full_chat.bot_info) if hasattr(full_chat, 'participants'): admins,", "admins: int, bots: int, users: int, kicked: int, banned: int)", "banned_rights = None admin_rights = None channel_type = \"Channel\" for", "have received a copy of the GNU General Public License", "creator, admins, bots, participants, kicked, banned async def unparse_info(client: UserBotClient,", "for p in full_chat.participants.participants: if isinstance(p, types.ChatParticipantCreator): creator = p.user_id", "for c in channel.chats: if c.id == channel.full_chat.id: if c.megagroup:", "f\"\\n **{splat[0]}:** `{':'.join(splat[1:])}`\" return text async def resolve_channel(client: UserBotClient, channel:", "'Read messages:', 'send_messages': 'Send messages:', 'send_media': 'Send media:', 'send_stickers': 'Send", "in the hope that it will be useful, # but", "= l.split(':') text += f\"\\n **{splat[0]}:** `{':'.join(splat[1:])}`\" return text async", "\"until_date\": text.append(f'{string} {right.ctime()} (UTC)') else: text.append(f'{string} {right}') return '\\n'.join(text) async", "str: text = '' default_banned_rights = None banned_rights = None", "False) if right: if attr == \"until_date\": text.append(f'{string} {right.ctime()} (UTC)')", "'Add users:', 'pin_messages': 'Pin messages:' } ChatAdminRights = { 'change_info':", "see <https://www.gnu.org/licenses/>. from typing import Tuple, Union from telethon.tl import", "c.id == chat.full_chat.id: admin_rights = c.admin_rights default_banned_rights = c.default_banned_rights break", "= await parse_admin_rights(admin_rights) unparsed = await unparse_rights(\"Admin rights:\", parsed) text", "1 if isinstance(p, types.ChatParticipantAdmin): admins += 1 return creator, admins,", "'embed_links': 'Send embed links:', 'send_polls': 'Send polls:', 'change_info': 'Change info:',", "c.default_banned_rights break if admin_rights: parsed = await parse_admin_rights(admin_rights) unparsed =", "types.ChatBannedRights) -> str: text = [] for attr, string in", "hasattr(full_chat, 'kicked_count'): kicked = full_chat.kicked_count if hasattr(full_chat, 'banned_count'): banned =", "= f\"**{title}**\" for l in rights.split('\\n'): splat = l.split(':') text", "'add_admins': 'Add new admins:' } async def parse_admin_rights(AdminRights: types.ChatAdminRights) ->", "types.ChatAdminRights) -> str: text = [] for attr, string in", "messages:', 'embed_links': 'Send embed links:', 'send_polls': 'Send polls:', 'change_info': 'Change", "chat.chats: if c.id == chat.full_chat.id: admin_rights = c.admin_rights default_banned_rights =", "bots: text += f\"\\n**Bots:** {bots}\" if kicked: text += f\"\\n**Kicked:**", "} async def parse_admin_rights(AdminRights: types.ChatAdminRights) -> str: text = []", "async def parse_admin_rights(AdminRights: types.ChatAdminRights) -> str: text = [] for", "'Edit messages:', 'delete_messages': 'Delete messages:', 'ban_users': 'Ban users:', 'invite_users': 'Invite", "'admins_count'): admins = full_chat.admins_count if hasattr(full_chat, 'kicked_count'): kicked = full_chat.kicked_count", "in channel.chats: if c.id == channel.full_chat.id: if c.megagroup: channel_type =", "+= f\"\\n**{channel_type} ID:** `{channel.full_chat.id}`\" info = await get_entity_info(channel) text +=", "WARRANTY; without even the implied warranty of # MERCHANTABILITY or", "bots, participants, kicked, banned async def unparse_info(client: UserBotClient, creator: int,", "+= f\"\\n**Creator:** {await get_chat_link(c)}\" if users: text += f\"\\n**Participants:** {users}\"", "admin_rights = c.admin_rights default_banned_rights = c.default_banned_rights break if admin_rights: parsed", "'Send polls:', 'change_info': 'Change info:', 'invite_users': 'Add users:', 'pin_messages': 'Pin", "break text += f\"\\n**{channel_type} ID:** `{channel.full_chat.id}`\" info = await get_entity_info(channel)", "GNU General Public License for more details. # # You", "games:', 'send_inline': 'Send inline messages:', 'embed_links': 'Send embed links:', 'send_polls':", "return creator, admins, bots, participants, kicked, banned async def unparse_info(client:", "full_chat.participants_count if hasattr(full_chat, 'admins_count'): admins = full_chat.admins_count if hasattr(full_chat, 'kicked_count'):", "= await unparse_rights(\"Admin rights:\", parsed) text += f\"\\n{unparsed}\" if banned_rights:", "is distributed in the hope that it will be useful,", "3 of the License, or # (at your option) any", "text += f\"\\n**Bots:** {bots}\" if kicked: text += f\"\\n**Kicked:** {kicked}\"", "text += f\"\\n**Kicked:** {kicked}\" if banned: text += f\"\\n**Banned:** {banned}\"", "# # You should have received a copy of the", "None banned_rights = None admin_rights = None channel_type = \"Channel\"", "new admins:' } async def parse_admin_rights(AdminRights: types.ChatAdminRights) -> str: text", "# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the", "int, int, int, int]: creator, admins, bots, participants, kicked, banned", "without even the implied warranty of # MERCHANTABILITY or FITNESS" ]
[ "as np def get_n_from_txt(filepath, points=None, lambda_min=400, lambda_max=700, complex_n=True): ntxt =", "fpn) if complex_n: fpk = ntxt[idxmin:idxmax, 2].squeeze() k = np.interp(np.linspace(lambda_min,", "1]]), np.array([ntxt[idxmin, 2]])) else: indicies = np.array([ntxt[idxmin, 1]]) else: xp", "n = [] for path in path_list: n.append(get_n_from_txt(path, points, lambda_min=lambda_min,", "np.interp(np.linspace(lambda_min, lambda_max, points), xp, fpn) if complex_n: fpk = ntxt[idxmin:idxmax,", "lambda_max - lambda_min + 1 idxmin = np.argmin(np.abs(ntxt[:, 0] -", "ntxt[idxmin:idxmax, 1] n = np.interp(np.linspace(lambda_min, lambda_max, points), xp, fpn) if", "path in path_list: n.append(get_n_from_txt(path, points, lambda_min=lambda_min, lambda_max=lambda_max, complex_n=complex_n)) return np.vstack((n))", "idxmax = np.argmin(np.abs(ntxt[:, 0] - lambda_max)) if idxmax == idxmin:", "points), xp, fpn) if complex_n: fpk = ntxt[idxmin:idxmax, 2].squeeze() k", "if np.min(np.abs(ntxt[:, 0] - lambda_min)) > 25 or np.min(np.abs(ntxt[:, 0]", "+ filepath) if points is None: points = lambda_max -", "= [] for path in path_list: n.append(get_n_from_txt(path, points, lambda_min=lambda_min, lambda_max=lambda_max,", "- lambda_max)) > 25: print('No measurement data for refractive indicies", "+ 1 idxmin = np.argmin(np.abs(ntxt[:, 0] - lambda_min)) idxmax =", "if complex_n: indicies = np.vectorize(complex)(np.array([ntxt[idxmin, 1]]), np.array([ntxt[idxmin, 2]])) else: indicies", "or np.min(np.abs(ntxt[:, 0] - lambda_max)) > 25: print('No measurement data", "return indicies def get_N(path_list, lambda_min, lambda_max, points=None, complex_n=False): n =", "lambda_max)) > 25: print('No measurement data for refractive indicies are", "np.interp(np.linspace(lambda_min, lambda_max, points), xp, fpk) indicies = np.vectorize(complex)(n, k) else:", "1]]) else: xp = ntxt[idxmin:idxmax, 0] fpn = ntxt[idxmin:idxmax, 1]", "indicies def get_N(path_list, lambda_min, lambda_max, points=None, complex_n=False): n = []", "if complex_n: fpk = ntxt[idxmin:idxmax, 2].squeeze() k = np.interp(np.linspace(lambda_min, lambda_max,", "np.min(np.abs(ntxt[:, 0] - lambda_max)) > 25: print('No measurement data for", "lambda_max=700, complex_n=True): ntxt = np.loadtxt(filepath) if np.min(np.abs(ntxt[:, 0] - lambda_min))", "k = np.interp(np.linspace(lambda_min, lambda_max, points), xp, fpk) indicies = np.vectorize(complex)(n,", "None: points = lambda_max - lambda_min + 1 idxmin =", "indicies = np.vectorize(complex)(n, k) else: indicies = n return indicies", "= np.vectorize(complex)(n, k) else: indicies = n return indicies def", "lambda_max, points), xp, fpn) if complex_n: fpk = ntxt[idxmin:idxmax, 2].squeeze()", "get_N(path_list, lambda_min, lambda_max, points=None, complex_n=False): n = [] for path", "points=None, complex_n=False): n = [] for path in path_list: n.append(get_n_from_txt(path,", "ntxt[idxmin:idxmax, 0] fpn = ntxt[idxmin:idxmax, 1] n = np.interp(np.linspace(lambda_min, lambda_max,", "for refractive indicies are available within 25 nm in \\n'", "0] - lambda_min)) > 25 or np.min(np.abs(ntxt[:, 0] - lambda_max))", "idxmax == idxmin: if complex_n: indicies = np.vectorize(complex)(np.array([ntxt[idxmin, 1]]), np.array([ntxt[idxmin,", "complex_n=False): n = [] for path in path_list: n.append(get_n_from_txt(path, points,", "= ntxt[idxmin:idxmax, 0] fpn = ntxt[idxmin:idxmax, 1] n = np.interp(np.linspace(lambda_min,", "lambda_min)) > 25 or np.min(np.abs(ntxt[:, 0] - lambda_max)) > 25:", "np.argmin(np.abs(ntxt[:, 0] - lambda_max)) if idxmax == idxmin: if complex_n:", "25 or np.min(np.abs(ntxt[:, 0] - lambda_max)) > 25: print('No measurement", "<filename>gym-multilayerthinfilm/utils.py import numpy as np def get_n_from_txt(filepath, points=None, lambda_min=400, lambda_max=700,", "are available within 25 nm in \\n' + filepath) if", "np.loadtxt(filepath) if np.min(np.abs(ntxt[:, 0] - lambda_min)) > 25 or np.min(np.abs(ntxt[:,", "= n return indicies def get_N(path_list, lambda_min, lambda_max, points=None, complex_n=False):", "np def get_n_from_txt(filepath, points=None, lambda_min=400, lambda_max=700, complex_n=True): ntxt = np.loadtxt(filepath)", "lambda_min)) idxmax = np.argmin(np.abs(ntxt[:, 0] - lambda_max)) if idxmax ==", "1] n = np.interp(np.linspace(lambda_min, lambda_max, points), xp, fpn) if complex_n:", "lambda_min + 1 idxmin = np.argmin(np.abs(ntxt[:, 0] - lambda_min)) idxmax", "- lambda_min)) > 25 or np.min(np.abs(ntxt[:, 0] - lambda_max)) >", "2].squeeze() k = np.interp(np.linspace(lambda_min, lambda_max, points), xp, fpk) indicies =", "ntxt = np.loadtxt(filepath) if np.min(np.abs(ntxt[:, 0] - lambda_min)) > 25", "def get_n_from_txt(filepath, points=None, lambda_min=400, lambda_max=700, complex_n=True): ntxt = np.loadtxt(filepath) if", "points is None: points = lambda_max - lambda_min + 1", "xp, fpk) indicies = np.vectorize(complex)(n, k) else: indicies = n", "lambda_max, points=None, complex_n=False): n = [] for path in path_list:", "def get_N(path_list, lambda_min, lambda_max, points=None, complex_n=False): n = [] for", "k) else: indicies = n return indicies def get_N(path_list, lambda_min,", "points), xp, fpk) indicies = np.vectorize(complex)(n, k) else: indicies =", "nm in \\n' + filepath) if points is None: points", "indicies = np.vectorize(complex)(np.array([ntxt[idxmin, 1]]), np.array([ntxt[idxmin, 2]])) else: indicies = np.array([ntxt[idxmin,", "n = np.interp(np.linspace(lambda_min, lambda_max, points), xp, fpn) if complex_n: fpk", "0] fpn = ntxt[idxmin:idxmax, 1] n = np.interp(np.linspace(lambda_min, lambda_max, points),", "is None: points = lambda_max - lambda_min + 1 idxmin", "= np.vectorize(complex)(np.array([ntxt[idxmin, 1]]), np.array([ntxt[idxmin, 2]])) else: indicies = np.array([ntxt[idxmin, 1]])", "lambda_min, lambda_max, points=None, complex_n=False): n = [] for path in", "> 25: print('No measurement data for refractive indicies are available", "np.vectorize(complex)(np.array([ntxt[idxmin, 1]]), np.array([ntxt[idxmin, 2]])) else: indicies = np.array([ntxt[idxmin, 1]]) else:", "np.array([ntxt[idxmin, 2]])) else: indicies = np.array([ntxt[idxmin, 1]]) else: xp =", "complex_n: fpk = ntxt[idxmin:idxmax, 2].squeeze() k = np.interp(np.linspace(lambda_min, lambda_max, points),", "= ntxt[idxmin:idxmax, 2].squeeze() k = np.interp(np.linspace(lambda_min, lambda_max, points), xp, fpk)", "0] - lambda_min)) idxmax = np.argmin(np.abs(ntxt[:, 0] - lambda_max)) if", "np.array([ntxt[idxmin, 1]]) else: xp = ntxt[idxmin:idxmax, 0] fpn = ntxt[idxmin:idxmax,", "points = lambda_max - lambda_min + 1 idxmin = np.argmin(np.abs(ntxt[:,", "0] - lambda_max)) > 25: print('No measurement data for refractive", "measurement data for refractive indicies are available within 25 nm", "in \\n' + filepath) if points is None: points =", "fpn = ntxt[idxmin:idxmax, 1] n = np.interp(np.linspace(lambda_min, lambda_max, points), xp,", "- lambda_max)) if idxmax == idxmin: if complex_n: indicies =", "= np.interp(np.linspace(lambda_min, lambda_max, points), xp, fpk) indicies = np.vectorize(complex)(n, k)", "indicies = np.array([ntxt[idxmin, 1]]) else: xp = ntxt[idxmin:idxmax, 0] fpn", "indicies are available within 25 nm in \\n' + filepath)", "= np.argmin(np.abs(ntxt[:, 0] - lambda_max)) if idxmax == idxmin: if", "= np.array([ntxt[idxmin, 1]]) else: xp = ntxt[idxmin:idxmax, 0] fpn =", "refractive indicies are available within 25 nm in \\n' +", "n return indicies def get_N(path_list, lambda_min, lambda_max, points=None, complex_n=False): n", "> 25 or np.min(np.abs(ntxt[:, 0] - lambda_max)) > 25: print('No", "else: xp = ntxt[idxmin:idxmax, 0] fpn = ntxt[idxmin:idxmax, 1] n", "= np.interp(np.linspace(lambda_min, lambda_max, points), xp, fpn) if complex_n: fpk =", "0] - lambda_max)) if idxmax == idxmin: if complex_n: indicies", "== idxmin: if complex_n: indicies = np.vectorize(complex)(np.array([ntxt[idxmin, 1]]), np.array([ntxt[idxmin, 2]]))", "if points is None: points = lambda_max - lambda_min +", "2]])) else: indicies = np.array([ntxt[idxmin, 1]]) else: xp = ntxt[idxmin:idxmax,", "data for refractive indicies are available within 25 nm in", "- lambda_min)) idxmax = np.argmin(np.abs(ntxt[:, 0] - lambda_max)) if idxmax", "ntxt[idxmin:idxmax, 2].squeeze() k = np.interp(np.linspace(lambda_min, lambda_max, points), xp, fpk) indicies", "numpy as np def get_n_from_txt(filepath, points=None, lambda_min=400, lambda_max=700, complex_n=True): ntxt", "lambda_min=400, lambda_max=700, complex_n=True): ntxt = np.loadtxt(filepath) if np.min(np.abs(ntxt[:, 0] -", "np.argmin(np.abs(ntxt[:, 0] - lambda_min)) idxmax = np.argmin(np.abs(ntxt[:, 0] - lambda_max))", "xp, fpn) if complex_n: fpk = ntxt[idxmin:idxmax, 2].squeeze() k =", "- lambda_min + 1 idxmin = np.argmin(np.abs(ntxt[:, 0] - lambda_min))", "25 nm in \\n' + filepath) if points is None:", "import numpy as np def get_n_from_txt(filepath, points=None, lambda_min=400, lambda_max=700, complex_n=True):", "if idxmax == idxmin: if complex_n: indicies = np.vectorize(complex)(np.array([ntxt[idxmin, 1]]),", "= np.argmin(np.abs(ntxt[:, 0] - lambda_min)) idxmax = np.argmin(np.abs(ntxt[:, 0] -", "idxmin = np.argmin(np.abs(ntxt[:, 0] - lambda_min)) idxmax = np.argmin(np.abs(ntxt[:, 0]", "25: print('No measurement data for refractive indicies are available within", "xp = ntxt[idxmin:idxmax, 0] fpn = ntxt[idxmin:idxmax, 1] n =", "within 25 nm in \\n' + filepath) if points is", "\\n' + filepath) if points is None: points = lambda_max", "complex_n=True): ntxt = np.loadtxt(filepath) if np.min(np.abs(ntxt[:, 0] - lambda_min)) >", "print('No measurement data for refractive indicies are available within 25", "fpk = ntxt[idxmin:idxmax, 2].squeeze() k = np.interp(np.linspace(lambda_min, lambda_max, points), xp,", "= ntxt[idxmin:idxmax, 1] n = np.interp(np.linspace(lambda_min, lambda_max, points), xp, fpn)", "lambda_max, points), xp, fpk) indicies = np.vectorize(complex)(n, k) else: indicies", "filepath) if points is None: points = lambda_max - lambda_min", "available within 25 nm in \\n' + filepath) if points", "[] for path in path_list: n.append(get_n_from_txt(path, points, lambda_min=lambda_min, lambda_max=lambda_max, complex_n=complex_n))", "fpk) indicies = np.vectorize(complex)(n, k) else: indicies = n return", "np.vectorize(complex)(n, k) else: indicies = n return indicies def get_N(path_list,", "1 idxmin = np.argmin(np.abs(ntxt[:, 0] - lambda_min)) idxmax = np.argmin(np.abs(ntxt[:,", "points=None, lambda_min=400, lambda_max=700, complex_n=True): ntxt = np.loadtxt(filepath) if np.min(np.abs(ntxt[:, 0]", "lambda_max)) if idxmax == idxmin: if complex_n: indicies = np.vectorize(complex)(np.array([ntxt[idxmin,", "else: indicies = np.array([ntxt[idxmin, 1]]) else: xp = ntxt[idxmin:idxmax, 0]", "= np.loadtxt(filepath) if np.min(np.abs(ntxt[:, 0] - lambda_min)) > 25 or", "for path in path_list: n.append(get_n_from_txt(path, points, lambda_min=lambda_min, lambda_max=lambda_max, complex_n=complex_n)) return", "idxmin: if complex_n: indicies = np.vectorize(complex)(np.array([ntxt[idxmin, 1]]), np.array([ntxt[idxmin, 2]])) else:", "indicies = n return indicies def get_N(path_list, lambda_min, lambda_max, points=None,", "get_n_from_txt(filepath, points=None, lambda_min=400, lambda_max=700, complex_n=True): ntxt = np.loadtxt(filepath) if np.min(np.abs(ntxt[:,", "= lambda_max - lambda_min + 1 idxmin = np.argmin(np.abs(ntxt[:, 0]", "else: indicies = n return indicies def get_N(path_list, lambda_min, lambda_max,", "complex_n: indicies = np.vectorize(complex)(np.array([ntxt[idxmin, 1]]), np.array([ntxt[idxmin, 2]])) else: indicies =", "np.min(np.abs(ntxt[:, 0] - lambda_min)) > 25 or np.min(np.abs(ntxt[:, 0] -" ]
[ "\"pyrocco\" __version__ = \"0.1.0\" __author__ = \"<NAME>\" __author_email__ = \"<EMAIL>\"", "= \"<EMAIL>\" __description__ = \"A Python CLI to add the", "__package_name__ = \"pyrocco\" __version__ = \"0.1.0\" __author__ = \"<NAME>\" __author_email__", "= \"pyrocco\" __version__ = \"0.1.0\" __author__ = \"<NAME>\" __author_email__ =", "Python CLI to add the Party Parrot to a custom", "Party Parrot to a custom background image.\" __url__ = \"https://github.com/joaopalmeiro/pyrocco\"", "__author_email__ = \"<EMAIL>\" __description__ = \"A Python CLI to add", "__description__ = \"A Python CLI to add the Party Parrot", "the Party Parrot to a custom background image.\" __url__ =", "= \"A Python CLI to add the Party Parrot to", "\"0.1.0\" __author__ = \"<NAME>\" __author_email__ = \"<EMAIL>\" __description__ = \"A", "\"A Python CLI to add the Party Parrot to a", "CLI to add the Party Parrot to a custom background", "__author__ = \"<NAME>\" __author_email__ = \"<EMAIL>\" __description__ = \"A Python", "add the Party Parrot to a custom background image.\" __url__", "\"<EMAIL>\" __description__ = \"A Python CLI to add the Party", "= \"<NAME>\" __author_email__ = \"<EMAIL>\" __description__ = \"A Python CLI", "\"<NAME>\" __author_email__ = \"<EMAIL>\" __description__ = \"A Python CLI to", "__version__ = \"0.1.0\" __author__ = \"<NAME>\" __author_email__ = \"<EMAIL>\" __description__", "to add the Party Parrot to a custom background image.\"", "= \"0.1.0\" __author__ = \"<NAME>\" __author_email__ = \"<EMAIL>\" __description__ =" ]
[ "self.visited.append(self.pointer) incremento = 1 if (program[self.pointer][0] == \"acc\"): self.accum +=", "True self.visited.append(self.pointer) incremento = 1 if (program[self.pointer][0] == \"acc\"): self.accum", "\"acc\"): self.accum += program[self.pointer][1] if (program[self.pointer][0] == \"jmp\"): incremento =", "(program[self.pointer][0] == \"acc\"): self.accum += program[self.pointer][1] if (program[self.pointer][0] == \"jmp\"):", "if (program[self.pointer][0] == \"jmp\"): incremento = program[self.pointer][1] self.pointer += incremento", "incremento = 1 if (program[self.pointer][0] == \"acc\"): self.accum += program[self.pointer][1]", "incremento = program[self.pointer][1] self.pointer += incremento return True def getVisited(self):", "0 self.visited = [] def run(self,program): salir = False while", "(self.pointer in self.visited): return False if (self.pointer >= len(program)): return", "= [] def run(self,program): salir = False while (salir ==", "run(self,program): salir = False while (salir == False): if (self.pointer", "if (self.pointer >= len(program)): return True self.visited.append(self.pointer) incremento = 1", "(program[self.pointer][0] == \"jmp\"): incremento = program[self.pointer][1] self.pointer += incremento return", "<reponame>ingjrs01/adventofcode<gh_stars>0 class Machine(): def __init__(self): self.pointer = 0 self.accum =", "+= incremento return True def getVisited(self): return self.visited def getAccum(self):", "return True def getVisited(self): return self.visited def getAccum(self): return self.accum", "(self.pointer >= len(program)): return True self.visited.append(self.pointer) incremento = 1 if", "class Machine(): def __init__(self): self.pointer = 0 self.accum = 0", ">= len(program)): return True self.visited.append(self.pointer) incremento = 1 if (program[self.pointer][0]", "program[self.pointer][1] if (program[self.pointer][0] == \"jmp\"): incremento = program[self.pointer][1] self.pointer +=", "1 if (program[self.pointer][0] == \"acc\"): self.accum += program[self.pointer][1] if (program[self.pointer][0]", "= program[self.pointer][1] self.pointer += incremento return True def getVisited(self): return", "def run(self,program): salir = False while (salir == False): if", "self.visited = [] def run(self,program): salir = False while (salir", "def __init__(self): self.pointer = 0 self.accum = 0 self.visited =", "= 0 self.visited = [] def run(self,program): salir = False", "\"jmp\"): incremento = program[self.pointer][1] self.pointer += incremento return True def", "self.pointer += incremento return True def getVisited(self): return self.visited def", "return False if (self.pointer >= len(program)): return True self.visited.append(self.pointer) incremento", "[] def run(self,program): salir = False while (salir == False):", "= 0 self.accum = 0 self.visited = [] def run(self,program):", "= 1 if (program[self.pointer][0] == \"acc\"): self.accum += program[self.pointer][1] if", "if (program[self.pointer][0] == \"acc\"): self.accum += program[self.pointer][1] if (program[self.pointer][0] ==", "== \"jmp\"): incremento = program[self.pointer][1] self.pointer += incremento return True", "(salir == False): if (self.pointer in self.visited): return False if", "0 self.accum = 0 self.visited = [] def run(self,program): salir", "self.accum += program[self.pointer][1] if (program[self.pointer][0] == \"jmp\"): incremento = program[self.pointer][1]", "incremento return True def getVisited(self): return self.visited def getAccum(self): return", "self.pointer = 0 self.accum = 0 self.visited = [] def", "== False): if (self.pointer in self.visited): return False if (self.pointer", "== \"acc\"): self.accum += program[self.pointer][1] if (program[self.pointer][0] == \"jmp\"): incremento", "+= program[self.pointer][1] if (program[self.pointer][0] == \"jmp\"): incremento = program[self.pointer][1] self.pointer", "= False while (salir == False): if (self.pointer in self.visited):", "len(program)): return True self.visited.append(self.pointer) incremento = 1 if (program[self.pointer][0] ==", "if (self.pointer in self.visited): return False if (self.pointer >= len(program)):", "return True self.visited.append(self.pointer) incremento = 1 if (program[self.pointer][0] == \"acc\"):", "False while (salir == False): if (self.pointer in self.visited): return", "self.accum = 0 self.visited = [] def run(self,program): salir =", "program[self.pointer][1] self.pointer += incremento return True def getVisited(self): return self.visited", "while (salir == False): if (self.pointer in self.visited): return False", "False): if (self.pointer in self.visited): return False if (self.pointer >=", "Machine(): def __init__(self): self.pointer = 0 self.accum = 0 self.visited", "False if (self.pointer >= len(program)): return True self.visited.append(self.pointer) incremento =", "salir = False while (salir == False): if (self.pointer in", "__init__(self): self.pointer = 0 self.accum = 0 self.visited = []", "self.visited): return False if (self.pointer >= len(program)): return True self.visited.append(self.pointer)", "in self.visited): return False if (self.pointer >= len(program)): return True" ]
[]
[ "of decimals Default value is: 0 Output (float) Rounded number", "-2) print('\\nRounding with -2 decimals') print('original number: {}, rounded: {},", "rounded: {}' .format(number, number_rounded, type(number_rounded))) # Rounding with -2 decimals", "{}, rounded: {}, type of rounded: {}' .format(number, number_rounded, type(number_rounded)))", "decimals value, the return number will be the nearest integer", "of rounded: {}' .format(number, number_rounded, type(number_rounded))) # Rounding with default", "Rounded number (int) Whether using the default decimals value, the", "rounded: {}, type of rounded: {}' .format(number, number_rounded, type(number_rounded))) #", "a number -------------- Input (float) A floating point number (int)", "0) print('\\nRounding with 0 decimals') print('original number: {}, rounded: {},", "type(number_rounded))) # Rounding with 0 decimals number_rounded = round(number, 0)", "0 decimals') print('original number: {}, rounded: {}, type of rounded:", "{}' .format(number, number_rounded, type(number_rounded))) # Rounding with 0 decimals number_rounded", "nearest integer \"\"\" number = 103.14159 # Rounding with 2", "integer (!) number_rounded = round(number) print('\\nRounding with default') print('original number:", "print('\\nRounding with 0 decimals') print('original number: {}, rounded: {}, type", "with -2 decimals') print('original number: {}, rounded: {}, type of", "rounded: {}' .format(number, number_rounded, type(number_rounded))) # Rounding with default #", "-------------- Input (float) A floating point number (int) Number of", "return number will be the nearest integer \"\"\" number =", "point number (int) Number of decimals Default value is: 0", "default') print('original number: {}, rounded: {}, type of rounded: {}'", "(int) Number of decimals Default value is: 0 Output (float)", "the default decimals value, the return number will be the", "decimals number_rounded = round(number, -2) print('\\nRounding with -2 decimals') print('original", "number (int) Number of decimals Default value is: 0 Output", "number_rounded = round(number) print('\\nRounding with default') print('original number: {}, rounded:", "\"\"\" number = 103.14159 # Rounding with 2 decimals number_rounded", "value is: 0 Output (float) Rounded number (int) Whether using", "default # Result will be integer (!) number_rounded = round(number)", "number (int) Whether using the default decimals value, the return", "# Rounding with -2 decimals number_rounded = round(number, -2) print('\\nRounding", "Output (float) Rounded number (int) Whether using the default decimals", "= round(number, 0) print('\\nRounding with 0 decimals') print('original number: {},", "Rounding with 0 decimals number_rounded = round(number, 0) print('\\nRounding with", "2 decimals') print('original number: {}, rounded: {}, type of rounded:", "= 103.14159 # Rounding with 2 decimals number_rounded = round(number,", "Round a number -------------- Input (float) A floating point number", "103.14159 # Rounding with 2 decimals number_rounded = round(number, 2)", "round(number, 2) print('Rounding with 2 decimals') print('original number: {}, rounded:", "be the nearest integer \"\"\" number = 103.14159 # Rounding", "number will be the nearest integer \"\"\" number = 103.14159", "print('\\nRounding with default') print('original number: {}, rounded: {}, type of", "will be integer (!) number_rounded = round(number) print('\\nRounding with default')", "number_rounded, type(number_rounded))) # Rounding with -2 decimals number_rounded = round(number,", "decimals number_rounded = round(number, 2) print('Rounding with 2 decimals') print('original", "Number of decimals Default value is: 0 Output (float) Rounded", "the nearest integer \"\"\" number = 103.14159 # Rounding with", "integer \"\"\" number = 103.14159 # Rounding with 2 decimals", ".format(number, number_rounded, type(number_rounded))) # Rounding with 0 decimals number_rounded =", "be integer (!) number_rounded = round(number) print('\\nRounding with default') print('original", "will be the nearest integer \"\"\" number = 103.14159 #", "with default') print('original number: {}, rounded: {}, type of rounded:", "of rounded: {}' .format(number, number_rounded, type(number_rounded))) # Rounding with 0", "Default value is: 0 Output (float) Rounded number (int) Whether", "decimals number_rounded = round(number, 0) print('\\nRounding with 0 decimals') print('original", "with 0 decimals number_rounded = round(number, 0) print('\\nRounding with 0", "Rounding with 2 decimals number_rounded = round(number, 2) print('Rounding with", "{}' .format(number, number_rounded, type(number_rounded))) # Rounding with default # Result", "Whether using the default decimals value, the return number will", "type of rounded: {}' .format(number, number_rounded, type(number_rounded))) # Rounding with", "\"\"\" Round a number -------------- Input (float) A floating point", "number: {}, rounded: {}, type of rounded: {}' .format(number, number_rounded,", "Rounding with -2 decimals number_rounded = round(number, -2) print('\\nRounding with", ".format(number, number_rounded, type(number_rounded))) # Rounding with -2 decimals number_rounded =", "Result will be integer (!) number_rounded = round(number) print('\\nRounding with", "default decimals value, the return number will be the nearest", "the return number will be the nearest integer \"\"\" number", "floating point number (int) Number of decimals Default value is:", "using the default decimals value, the return number will be", "with 2 decimals') print('original number: {}, rounded: {}, type of", "-2 decimals') print('original number: {}, rounded: {}, type of rounded:", "with 0 decimals') print('original number: {}, rounded: {}, type of", "<filename>010-round.py \"\"\" Round a number -------------- Input (float) A floating", "type(number_rounded))) # Rounding with -2 decimals number_rounded = round(number, -2)", "(!) number_rounded = round(number) print('\\nRounding with default') print('original number: {},", "# Rounding with 0 decimals number_rounded = round(number, 0) print('\\nRounding", "2) print('Rounding with 2 decimals') print('original number: {}, rounded: {},", "print('original number: {}, rounded: {}, type of rounded: {}' .format(number,", "with default # Result will be integer (!) number_rounded =", "# Rounding with default # Result will be integer (!)", "A floating point number (int) Number of decimals Default value", "0 Output (float) Rounded number (int) Whether using the default", "= round(number) print('\\nRounding with default') print('original number: {}, rounded: {},", "number_rounded = round(number, -2) print('\\nRounding with -2 decimals') print('original number:", "{}' .format(number, number_rounded, type(number_rounded))) # Rounding with -2 decimals number_rounded", "(float) A floating point number (int) Number of decimals Default", "of rounded: {}' .format(number, number_rounded, type(number_rounded))) # Rounding with -2", "value, the return number will be the nearest integer \"\"\"", "2 decimals number_rounded = round(number, 2) print('Rounding with 2 decimals')", "round(number, -2) print('\\nRounding with -2 decimals') print('original number: {}, rounded:", "rounded: {}' .format(number, number_rounded, type(number_rounded))) # Rounding with 0 decimals", "number_rounded = round(number, 0) print('\\nRounding with 0 decimals') print('original number:", "number_rounded = round(number, 2) print('Rounding with 2 decimals') print('original number:", "number -------------- Input (float) A floating point number (int) Number", "decimals') print('original number: {}, rounded: {}, type of rounded: {}'", "Rounding with default # Result will be integer (!) number_rounded", ".format(number, number_rounded, type(number_rounded))) # Rounding with default # Result will", "with 2 decimals number_rounded = round(number, 2) print('Rounding with 2", "= round(number, 2) print('Rounding with 2 decimals') print('original number: {},", "{}, type of rounded: {}' .format(number, number_rounded, type(number_rounded))) # Rounding", "is: 0 Output (float) Rounded number (int) Whether using the", "decimals Default value is: 0 Output (float) Rounded number (int)", "-2 decimals number_rounded = round(number, -2) print('\\nRounding with -2 decimals')", "# Rounding with 2 decimals number_rounded = round(number, 2) print('Rounding", "with -2 decimals number_rounded = round(number, -2) print('\\nRounding with -2", "(float) Rounded number (int) Whether using the default decimals value,", "print('\\nRounding with -2 decimals') print('original number: {}, rounded: {}, type", "round(number, 0) print('\\nRounding with 0 decimals') print('original number: {}, rounded:", "type(number_rounded))) # Rounding with default # Result will be integer", "# Result will be integer (!) number_rounded = round(number) print('\\nRounding", "number = 103.14159 # Rounding with 2 decimals number_rounded =", "(int) Whether using the default decimals value, the return number", "print('Rounding with 2 decimals') print('original number: {}, rounded: {}, type", "number_rounded, type(number_rounded))) # Rounding with default # Result will be", "number_rounded, type(number_rounded))) # Rounding with 0 decimals number_rounded = round(number,", "0 decimals number_rounded = round(number, 0) print('\\nRounding with 0 decimals')", "Input (float) A floating point number (int) Number of decimals", "round(number) print('\\nRounding with default') print('original number: {}, rounded: {}, type", "= round(number, -2) print('\\nRounding with -2 decimals') print('original number: {}," ]
[ "= 0 def step(self): id, text = self.reader.read_no_block() print(id,text) if", "0 def step(self): id, text = self.reader.read_no_block() print(id,text) if id:", "time.sleep(1) def main(): try: player = TagPlayer() while 1: player.step()", "if text != self._current: stripped_text = text.strip() print(\"Read text: \\\"{}\\\"\".format(stripped_text))", "= self.reader.read_no_block() print(id,text) if id: self._failed = 0 if text", "play.stop() time.sleep(1) def main(): try: player = TagPlayer() while 1:", "SimpleMFRC522() self._failed = 0 def step(self): id, text = self.reader.read_no_block()", "mfrc522 import SimpleMFRC522 import play import time class TagPlayer(object): def", "import play import time class TagPlayer(object): def __init__(self): self._current =", "from mfrc522 import SimpleMFRC522 import play import time class TagPlayer(object):", "__init__(self): self._current = None self.reader = SimpleMFRC522() self._failed = 0", "id, text = self.reader.read_no_block() print(id,text) if id: self._failed = 0", "0 if text != self._current: stripped_text = text.strip() print(\"Read text:", "\\\"{}\\\"\".format(stripped_text)) play.play(stripped_text) self._current = text elif self._current: self._failed += 1", "self._current: self._failed += 1 if self._failed > 2: self._current =", "<filename>service.py #!/usr/bin/env python3 import RPi.GPIO as GPIO from mfrc522 import", "def __init__(self): self._current = None self.reader = SimpleMFRC522() self._failed =", "#!/usr/bin/env python3 import RPi.GPIO as GPIO from mfrc522 import SimpleMFRC522", "= None self.reader = SimpleMFRC522() self._failed = 0 def step(self):", "import RPi.GPIO as GPIO from mfrc522 import SimpleMFRC522 import play", "step(self): id, text = self.reader.read_no_block() print(id,text) if id: self._failed =", "!= self._current: stripped_text = text.strip() print(\"Read text: \\\"{}\\\"\".format(stripped_text)) play.play(stripped_text) self._current", "self.reader.read_no_block() print(id,text) if id: self._failed = 0 if text !=", "print(\"Read text: \\\"{}\\\"\".format(stripped_text)) play.play(stripped_text) self._current = text elif self._current: self._failed", "import SimpleMFRC522 import play import time class TagPlayer(object): def __init__(self):", "elif self._current: self._failed += 1 if self._failed > 2: self._current", "+= 1 if self._failed > 2: self._current = None print(\"Stopping\")", "stripped_text = text.strip() print(\"Read text: \\\"{}\\\"\".format(stripped_text)) play.play(stripped_text) self._current = text", "self._failed = 0 def step(self): id, text = self.reader.read_no_block() print(id,text)", "RPi.GPIO as GPIO from mfrc522 import SimpleMFRC522 import play import", "import time class TagPlayer(object): def __init__(self): self._current = None self.reader", "self._current = text elif self._current: self._failed += 1 if self._failed", "as GPIO from mfrc522 import SimpleMFRC522 import play import time", "play import time class TagPlayer(object): def __init__(self): self._current = None", "self._current: stripped_text = text.strip() print(\"Read text: \\\"{}\\\"\".format(stripped_text)) play.play(stripped_text) self._current =", "= SimpleMFRC522() self._failed = 0 def step(self): id, text =", "text.strip() print(\"Read text: \\\"{}\\\"\".format(stripped_text)) play.play(stripped_text) self._current = text elif self._current:", "2: self._current = None print(\"Stopping\") play.stop() time.sleep(1) def main(): try:", "self.reader = SimpleMFRC522() self._failed = 0 def step(self): id, text", "if id: self._failed = 0 if text != self._current: stripped_text", "= text.strip() print(\"Read text: \\\"{}\\\"\".format(stripped_text)) play.play(stripped_text) self._current = text elif", "SimpleMFRC522 import play import time class TagPlayer(object): def __init__(self): self._current", "def step(self): id, text = self.reader.read_no_block() print(id,text) if id: self._failed", "TagPlayer() while 1: player.step() finally: GPIO.cleanup() if __name__ == \"__main__\":", "text = self.reader.read_no_block() print(id,text) if id: self._failed = 0 if", "text != self._current: stripped_text = text.strip() print(\"Read text: \\\"{}\\\"\".format(stripped_text)) play.play(stripped_text)", "self._failed = 0 if text != self._current: stripped_text = text.strip()", "TagPlayer(object): def __init__(self): self._current = None self.reader = SimpleMFRC522() self._failed", "= None print(\"Stopping\") play.stop() time.sleep(1) def main(): try: player =", "main(): try: player = TagPlayer() while 1: player.step() finally: GPIO.cleanup()", "= TagPlayer() while 1: player.step() finally: GPIO.cleanup() if __name__ ==", "try: player = TagPlayer() while 1: player.step() finally: GPIO.cleanup() if", "1 if self._failed > 2: self._current = None print(\"Stopping\") play.stop()", "print(\"Stopping\") play.stop() time.sleep(1) def main(): try: player = TagPlayer() while", "id: self._failed = 0 if text != self._current: stripped_text =", "self._failed += 1 if self._failed > 2: self._current = None", "None print(\"Stopping\") play.stop() time.sleep(1) def main(): try: player = TagPlayer()", "if self._failed > 2: self._current = None print(\"Stopping\") play.stop() time.sleep(1)", "None self.reader = SimpleMFRC522() self._failed = 0 def step(self): id,", "class TagPlayer(object): def __init__(self): self._current = None self.reader = SimpleMFRC522()", "> 2: self._current = None print(\"Stopping\") play.stop() time.sleep(1) def main():", "play.play(stripped_text) self._current = text elif self._current: self._failed += 1 if", "while 1: player.step() finally: GPIO.cleanup() if __name__ == \"__main__\": main()", "player = TagPlayer() while 1: player.step() finally: GPIO.cleanup() if __name__", "self._current = None self.reader = SimpleMFRC522() self._failed = 0 def", "print(id,text) if id: self._failed = 0 if text != self._current:", "= text elif self._current: self._failed += 1 if self._failed >", "def main(): try: player = TagPlayer() while 1: player.step() finally:", "text: \\\"{}\\\"\".format(stripped_text)) play.play(stripped_text) self._current = text elif self._current: self._failed +=", "self._current = None print(\"Stopping\") play.stop() time.sleep(1) def main(): try: player", "time class TagPlayer(object): def __init__(self): self._current = None self.reader =", "text elif self._current: self._failed += 1 if self._failed > 2:", "python3 import RPi.GPIO as GPIO from mfrc522 import SimpleMFRC522 import", "GPIO from mfrc522 import SimpleMFRC522 import play import time class", "= 0 if text != self._current: stripped_text = text.strip() print(\"Read", "self._failed > 2: self._current = None print(\"Stopping\") play.stop() time.sleep(1) def" ]
[ "type: Final SHARED_CONFIG_FILES = ['setup.cfg', ] # type: Final USER_CONFIG_FILES", "# type: Final SHARED_CONFIG_FILES = ['setup.cfg', ] # type: Final", "Final PYTHON2_VERSION = (2, 7) # type: Final PYTHON3_VERSION =", "must include all reporters defined in mypy.report. This is defined", "# type: Final USER_CONFIG_FILES = ['~/.config/mypy/config', '~/.mypy.ini', ] # type:", "type: Final PYTHON3_VERSION = (3, 6) # type: Final PYTHON3_VERSION_MIN", "# type: Final # This must include all reporters defined", "os.path.join(os.environ['XDG_CONFIG_HOME'], 'mypy/config')) CONFIG_FILES = [CONFIG_FILE, ] + SHARED_CONFIG_FILES + USER_CONFIG_FILES", "available without importing mypy.report -- this speeds # up startup.", "Final if os.environ.get('XDG_CONFIG_HOME'): USER_CONFIG_FILES.insert(0, os.path.join(os.environ['XDG_CONFIG_HOME'], 'mypy/config')) CONFIG_FILES = [CONFIG_FILE, ]", "This is defined here # to make reporter names available", "# type: Final PYTHON3_VERSION_MIN = (3, 4) # type: Final", "here # to make reporter names available without importing mypy.report", "['~/.config/mypy/config', '~/.mypy.ini', ] # type: Final if os.environ.get('XDG_CONFIG_HOME'): USER_CONFIG_FILES.insert(0, os.path.join(os.environ['XDG_CONFIG_HOME'],", "PYTHON3_VERSION_MIN = (3, 4) # type: Final CACHE_DIR = '.mypy_cache'", "USER_CONFIG_FILES.insert(0, os.path.join(os.environ['XDG_CONFIG_HOME'], 'mypy/config')) CONFIG_FILES = [CONFIG_FILE, ] + SHARED_CONFIG_FILES +", "'.mypy_cache' # type: Final CONFIG_FILE = 'mypy.ini' # type: Final", "# type: Final CACHE_DIR = '.mypy_cache' # type: Final CONFIG_FILE", "(3, 4) # type: Final CACHE_DIR = '.mypy_cache' # type:", "['setup.cfg', ] # type: Final USER_CONFIG_FILES = ['~/.config/mypy/config', '~/.mypy.ini', ]", "7) # type: Final PYTHON3_VERSION = (3, 6) # type:", "# up startup. REPORTER_NAMES = ['linecount', 'any-exprs', 'linecoverage', 'memory-xml', 'cobertura-xml',", "Final PYTHON3_VERSION_MIN = (3, 4) # type: Final CACHE_DIR =", "type: Final PYTHON3_VERSION_MIN = (3, 4) # type: Final CACHE_DIR", "= ['setup.cfg', ] # type: Final USER_CONFIG_FILES = ['~/.config/mypy/config', '~/.mypy.ini',", "type: Final CACHE_DIR = '.mypy_cache' # type: Final CONFIG_FILE =", "typing_extensions import Final PYTHON2_VERSION = (2, 7) # type: Final", "Final USER_CONFIG_FILES = ['~/.config/mypy/config', '~/.mypy.ini', ] # type: Final if", "'mypy.ini' # type: Final SHARED_CONFIG_FILES = ['setup.cfg', ] # type:", "= (2, 7) # type: Final PYTHON3_VERSION = (3, 6)", "4) # type: Final CACHE_DIR = '.mypy_cache' # type: Final", "without importing mypy.report -- this speeds # up startup. REPORTER_NAMES", "speeds # up startup. REPORTER_NAMES = ['linecount', 'any-exprs', 'linecoverage', 'memory-xml',", "= [CONFIG_FILE, ] + SHARED_CONFIG_FILES + USER_CONFIG_FILES # type: Final", "Final CACHE_DIR = '.mypy_cache' # type: Final CONFIG_FILE = 'mypy.ini'", "'mypy/config')) CONFIG_FILES = [CONFIG_FILE, ] + SHARED_CONFIG_FILES + USER_CONFIG_FILES #", "['linecount', 'any-exprs', 'linecoverage', 'memory-xml', 'cobertura-xml', 'xml', 'xslt-html', 'xslt-txt', 'html', 'txt']", "= False if MYPY: from typing_extensions import Final PYTHON2_VERSION =", "# type: Final PYTHON3_VERSION = (3, 6) # type: Final", "defined in mypy.report. This is defined here # to make", "USER_CONFIG_FILES # type: Final # This must include all reporters", "'any-exprs', 'linecoverage', 'memory-xml', 'cobertura-xml', 'xml', 'xslt-html', 'xslt-txt', 'html', 'txt'] #", "MYPY = False if MYPY: from typing_extensions import Final PYTHON2_VERSION", "Final PYTHON3_VERSION = (3, 6) # type: Final PYTHON3_VERSION_MIN =", "CACHE_DIR = '.mypy_cache' # type: Final CONFIG_FILE = 'mypy.ini' #", "include all reporters defined in mypy.report. This is defined here", "type: Final if os.environ.get('XDG_CONFIG_HOME'): USER_CONFIG_FILES.insert(0, os.path.join(os.environ['XDG_CONFIG_HOME'], 'mypy/config')) CONFIG_FILES = [CONFIG_FILE,", "False if MYPY: from typing_extensions import Final PYTHON2_VERSION = (2,", "] # type: Final USER_CONFIG_FILES = ['~/.config/mypy/config', '~/.mypy.ini', ] #", "reporter names available without importing mypy.report -- this speeds #", "os.environ.get('XDG_CONFIG_HOME'): USER_CONFIG_FILES.insert(0, os.path.join(os.environ['XDG_CONFIG_HOME'], 'mypy/config')) CONFIG_FILES = [CONFIG_FILE, ] + SHARED_CONFIG_FILES", "startup. REPORTER_NAMES = ['linecount', 'any-exprs', 'linecoverage', 'memory-xml', 'cobertura-xml', 'xml', 'xslt-html',", "PYTHON2_VERSION = (2, 7) # type: Final PYTHON3_VERSION = (3,", "# type: Final if os.environ.get('XDG_CONFIG_HOME'): USER_CONFIG_FILES.insert(0, os.path.join(os.environ['XDG_CONFIG_HOME'], 'mypy/config')) CONFIG_FILES =", "PYTHON3_VERSION = (3, 6) # type: Final PYTHON3_VERSION_MIN = (3,", "import os MYPY = False if MYPY: from typing_extensions import", "Final SHARED_CONFIG_FILES = ['setup.cfg', ] # type: Final USER_CONFIG_FILES =", "up startup. REPORTER_NAMES = ['linecount', 'any-exprs', 'linecoverage', 'memory-xml', 'cobertura-xml', 'xml',", "[CONFIG_FILE, ] + SHARED_CONFIG_FILES + USER_CONFIG_FILES # type: Final #", "<gh_stars>0 import os MYPY = False if MYPY: from typing_extensions", "is defined here # to make reporter names available without", "= ['~/.config/mypy/config', '~/.mypy.ini', ] # type: Final if os.environ.get('XDG_CONFIG_HOME'): USER_CONFIG_FILES.insert(0,", "all reporters defined in mypy.report. This is defined here #", "in mypy.report. This is defined here # to make reporter", "CONFIG_FILE = 'mypy.ini' # type: Final SHARED_CONFIG_FILES = ['setup.cfg', ]", "= 'mypy.ini' # type: Final SHARED_CONFIG_FILES = ['setup.cfg', ] #", "= (3, 6) # type: Final PYTHON3_VERSION_MIN = (3, 4)", "6) # type: Final PYTHON3_VERSION_MIN = (3, 4) # type:", "REPORTER_NAMES = ['linecount', 'any-exprs', 'linecoverage', 'memory-xml', 'cobertura-xml', 'xml', 'xslt-html', 'xslt-txt',", "names available without importing mypy.report -- this speeds # up", "(2, 7) # type: Final PYTHON3_VERSION = (3, 6) #", "SHARED_CONFIG_FILES = ['setup.cfg', ] # type: Final USER_CONFIG_FILES = ['~/.config/mypy/config',", "type: Final USER_CONFIG_FILES = ['~/.config/mypy/config', '~/.mypy.ini', ] # type: Final", "os MYPY = False if MYPY: from typing_extensions import Final", "CONFIG_FILES = [CONFIG_FILE, ] + SHARED_CONFIG_FILES + USER_CONFIG_FILES # type:", "-- this speeds # up startup. REPORTER_NAMES = ['linecount', 'any-exprs',", "'~/.mypy.ini', ] # type: Final if os.environ.get('XDG_CONFIG_HOME'): USER_CONFIG_FILES.insert(0, os.path.join(os.environ['XDG_CONFIG_HOME'], 'mypy/config'))", "Final CONFIG_FILE = 'mypy.ini' # type: Final SHARED_CONFIG_FILES = ['setup.cfg',", "from typing_extensions import Final PYTHON2_VERSION = (2, 7) # type:", "type: Final # This must include all reporters defined in", "+ SHARED_CONFIG_FILES + USER_CONFIG_FILES # type: Final # This must", "mypy.report -- this speeds # up startup. REPORTER_NAMES = ['linecount',", "# type: Final CONFIG_FILE = 'mypy.ini' # type: Final SHARED_CONFIG_FILES", "= (3, 4) # type: Final CACHE_DIR = '.mypy_cache' #", "if os.environ.get('XDG_CONFIG_HOME'): USER_CONFIG_FILES.insert(0, os.path.join(os.environ['XDG_CONFIG_HOME'], 'mypy/config')) CONFIG_FILES = [CONFIG_FILE, ] +", "reporters defined in mypy.report. This is defined here # to", "MYPY: from typing_extensions import Final PYTHON2_VERSION = (2, 7) #", "# This must include all reporters defined in mypy.report. This", "to make reporter names available without importing mypy.report -- this", "if MYPY: from typing_extensions import Final PYTHON2_VERSION = (2, 7)", "'linecoverage', 'memory-xml', 'cobertura-xml', 'xml', 'xslt-html', 'xslt-txt', 'html', 'txt'] # type:", "= ['linecount', 'any-exprs', 'linecoverage', 'memory-xml', 'cobertura-xml', 'xml', 'xslt-html', 'xslt-txt', 'html',", "defined here # to make reporter names available without importing", "(3, 6) # type: Final PYTHON3_VERSION_MIN = (3, 4) #", "Final # This must include all reporters defined in mypy.report.", "] # type: Final if os.environ.get('XDG_CONFIG_HOME'): USER_CONFIG_FILES.insert(0, os.path.join(os.environ['XDG_CONFIG_HOME'], 'mypy/config')) CONFIG_FILES", "import Final PYTHON2_VERSION = (2, 7) # type: Final PYTHON3_VERSION", "type: Final CONFIG_FILE = 'mypy.ini' # type: Final SHARED_CONFIG_FILES =", "'memory-xml', 'cobertura-xml', 'xml', 'xslt-html', 'xslt-txt', 'html', 'txt'] # type: Final", "this speeds # up startup. REPORTER_NAMES = ['linecount', 'any-exprs', 'linecoverage',", "+ USER_CONFIG_FILES # type: Final # This must include all", "# to make reporter names available without importing mypy.report --", "This must include all reporters defined in mypy.report. This is", "importing mypy.report -- this speeds # up startup. REPORTER_NAMES =", "] + SHARED_CONFIG_FILES + USER_CONFIG_FILES # type: Final # This", "USER_CONFIG_FILES = ['~/.config/mypy/config', '~/.mypy.ini', ] # type: Final if os.environ.get('XDG_CONFIG_HOME'):", "make reporter names available without importing mypy.report -- this speeds", "mypy.report. This is defined here # to make reporter names", "= '.mypy_cache' # type: Final CONFIG_FILE = 'mypy.ini' # type:", "SHARED_CONFIG_FILES + USER_CONFIG_FILES # type: Final # This must include" ]
[ "the values proportionally to made all the matrix/weights positive along", "from ..core import SKCMatrixAndWeightTransformerABC from ..utils import doc_inherit # =============================================================================", "first one (with the negative value) is affected >>> push_negatives(mtx_lt0,", "values this function increment the values proportionally to made all", "valuer are sean >= 0. If the matrix/weights has negative", "* mins return arr - delta class PushNegatives(SKCMatrixAndWeightTransformerABC): r\"\"\"Increment the", "# by row only the first row (with the negative", ".. code-block:: pycon >>> from skcriteria.preprocess import push_negatives >>> mtx", "} X_{ij} < 0\\\\ X_{ij} & \\text{otherwise} \\end{cases} \"\"\" @doc_inherit(SKCMatrixAndWeightTransformerABC._transform_weights)", "with values axis : :py:class:`int` optional Axis along which to", "\"\"\" # ============================================================================= # IMPORTS # ============================================================================= import numpy as", "# ============================================================================= # IMPORTS # ============================================================================= import numpy as np", "4]]) \"\"\" arr = np.asarray(arr) mins = np.min(arr, axis=axis, keepdims=True)", ">>> push_negatives(mtx_lt0, axis=1) array([[0, 3], [3, 4]]) \"\"\" arr =", "IMPORTS # ============================================================================= import numpy as np from ..core import", "---------- arr: :py:class:`numpy.ndarray` like. A array with values axis :", "np.min(arr, axis=axis, keepdims=True) delta = (mins < 0) * mins", "arr: :py:class:`numpy.ndarray` like. A array with values axis : :py:class:`int`", "\\end{cases} Parameters ---------- arr: :py:class:`numpy.ndarray` like. A array with values", "by 1 to eliminate the negative >>> push_negatives(mtx_lt0) array([[0, 3],", "functionality, an MCDA agnostic function is offered to push negatives", "are sean >= 0. If the matrix/weights has negative values", "3], [3, 4]]) \"\"\" arr = np.asarray(arr) mins = np.min(arr,", "values axis : :py:class:`int` optional Axis along which to operate.", "by row only the first row (with the negative value)", "============================================================================= def push_negatives(arr, axis): r\"\"\"Increment the array until all the", "< 0\\\\ X_{ij} & \\text{otherwise} \\end{cases} Parameters ---------- arr: :py:class:`numpy.ndarray`", "all the array positive along an axis. .. math:: \\overline{X}_{ij}", "made all the matrix/weights positive along an axis. .. math::", "= (mins < 0) * mins return arr - delta", "axis): r\"\"\"Increment the array until all the valuer are sean", "License: BSD-3 (https://tldrlegal.com/license/bsd-3-clause-license-(revised)) # Copyright (c) 2016-2021, <NAME>; Luczywo, Nadia", "an axis. .. math:: \\overline{X}_{ij} = \\begin{cases} X_{ij} + min_{X_{ij}}", "a negative value >>> push_negatives(mtx) # array without negatives don't", "r\"\"\"Increment the matrix/weights until all the valuer are sean >=", "matrix/weights until all the valuer are sean >= 0. If", "import doc_inherit # ============================================================================= # FUNCTIONS # ============================================================================= def push_negatives(arr,", "push_negatives(mtx_lt0) array([[0, 3], [4, 5]]) # by column only the", "by column only the first one (with the negative value)", "Examples -------- .. code-block:: pycon >>> from skcriteria.preprocess import push_negatives", ">>> push_negatives(mtx) # array without negatives don't be affected array([[1,", "4]] >>> mtx_lt0 = [[-1, 2], [3, 4]] # has", "operate. By default, flattened input is used. Returns ------- :py:class:`numpy.ndarray`", "All rights reserved. # ============================================================================= # DOCS # ============================================================================= \"\"\"Functionalities", "np from ..core import SKCMatrixAndWeightTransformerABC from ..utils import doc_inherit #", "[3, 4]] # has a negative value >>> push_negatives(mtx) #", "If an array has negative values this function increment the", "[3, 4]]) # all the array is incremented by 1", "with all values >= 0. Examples -------- .. code-block:: pycon", "weights): return push_negatives(weights, axis=None) @doc_inherit(SKCMatrixAndWeightTransformerABC._transform_matrix) def _transform_matrix(self, matrix): return push_negatives(matrix,", "arbitrary axis. \"\"\" # ============================================================================= # IMPORTS # ============================================================================= import", "like. A array with values axis : :py:class:`int` optional Axis", ">= 0. If the matrix/weights has negative values this function", "the matrix/weights positive along an axis. .. math:: \\overline{X}_{ij} =", "BSD-3 (https://tldrlegal.com/license/bsd-3-clause-license-(revised)) # Copyright (c) 2016-2021, <NAME>; Luczywo, Nadia #", "column only the first one (with the negative value) is", "In addition to the main functionality, an MCDA agnostic function", "arr - delta class PushNegatives(SKCMatrixAndWeightTransformerABC): r\"\"\"Increment the matrix/weights until all", "axis. \"\"\" # ============================================================================= # IMPORTS # ============================================================================= import numpy", "#!/usr/bin/env python # -*- coding: utf-8 -*- # License: BSD-3", "proportionally to made all the matrix/weights positive along an axis.", "# ============================================================================= def push_negatives(arr, axis): r\"\"\"Increment the array until all", ": :py:class:`int` optional Axis along which to operate. By default,", ".. math:: \\overline{X}_{ij} = \\begin{cases} X_{ij} + min_{X_{ij}} & \\text{if", "the array until all the valuer are sean >= 0.", "# array without negatives don't be affected array([[1, 2], [3,", "& \\text{otherwise} \\end{cases} \"\"\" @doc_inherit(SKCMatrixAndWeightTransformerABC._transform_weights) def _transform_weights(self, weights): return push_negatives(weights,", "delta = (mins < 0) * mins return arr -", "mins = np.min(arr, axis=axis, keepdims=True) delta = (mins < 0)", "-*- # License: BSD-3 (https://tldrlegal.com/license/bsd-3-clause-license-(revised)) # Copyright (c) 2016-2021, <NAME>;", "< 0) * mins return arr - delta class PushNegatives(SKCMatrixAndWeightTransformerABC):", "import numpy as np from ..core import SKCMatrixAndWeightTransformerABC from ..utils", "input is used. Returns ------- :py:class:`numpy.ndarray` array with all values", "along an axis. .. math:: \\overline{X}_{ij} = \\begin{cases} X_{ij} +", "0. Examples -------- .. code-block:: pycon >>> from skcriteria.preprocess import", "\\text{if } X_{ij} < 0\\\\ X_{ij} & \\text{otherwise} \\end{cases} Parameters", "+ min_{X_{ij}} & \\text{if } X_{ij} < 0\\\\ X_{ij} &", "all values >= 0. Examples -------- .. code-block:: pycon >>>", "until all the valuer are sean >= 0. If the", "axis=axis, keepdims=True) delta = (mins < 0) * mins return", ">= 0. If an array has negative values this function", "array without negatives don't be affected array([[1, 2], [3, 4]])", "..utils import doc_inherit # ============================================================================= # FUNCTIONS # ============================================================================= def", "-------- .. code-block:: pycon >>> from skcriteria.preprocess import push_negatives >>>", "\\text{if } X_{ij} < 0\\\\ X_{ij} & \\text{otherwise} \\end{cases} \"\"\"", "min_{X_{ij}} & \\text{if } X_{ij} < 0\\\\ X_{ij} & \\text{otherwise}", "from skcriteria.preprocess import push_negatives >>> mtx = [[1, 2], [3,", "don't be affected array([[1, 2], [3, 4]]) # all the", "negative value) is affected >>> push_negatives(mtx_lt0, axis=1) array([[0, 3], [3,", "============================================================================= \"\"\"Functionalities for remove negatives from criteria. In addition to", "X_{ij} & \\text{otherwise} \\end{cases} \"\"\" @doc_inherit(SKCMatrixAndWeightTransformerABC._transform_weights) def _transform_weights(self, weights): return", ">= 0. Examples -------- .. code-block:: pycon >>> from skcriteria.preprocess", "the valuer are sean >= 0. If the matrix/weights has", "4]]) # by row only the first row (with the", "# all the array is incremented by 1 to eliminate", "made all the array positive along an axis. .. math::", "the matrix/weights until all the valuer are sean >= 0.", "push_negatives >>> mtx = [[1, 2], [3, 4]] >>> mtx_lt0", "<NAME>; Luczywo, Nadia # All rights reserved. # ============================================================================= #", "FUNCTIONS # ============================================================================= def push_negatives(arr, axis): r\"\"\"Increment the array until", "pycon >>> from skcriteria.preprocess import push_negatives >>> mtx = [[1,", "============================================================================= # IMPORTS # ============================================================================= import numpy as np from", "(mins < 0) * mins return arr - delta class", "# ============================================================================= import numpy as np from ..core import SKCMatrixAndWeightTransformerABC", "array([[0, 3], [3, 4]]) \"\"\" arr = np.asarray(arr) mins =", "from criteria. In addition to the main functionality, an MCDA", "values >= 0. Examples -------- .. code-block:: pycon >>> from", "mins return arr - delta class PushNegatives(SKCMatrixAndWeightTransformerABC): r\"\"\"Increment the matrix/weights", "all the valuer are sean >= 0. If an array", "an MCDA agnostic function is offered to push negatives values", "..core import SKCMatrixAndWeightTransformerABC from ..utils import doc_inherit # ============================================================================= #", "0\\\\ X_{ij} & \\text{otherwise} \\end{cases} \"\"\" @doc_inherit(SKCMatrixAndWeightTransformerABC._transform_weights) def _transform_weights(self, weights):", "0. If the matrix/weights has negative values this function increment", "============================================================================= # FUNCTIONS # ============================================================================= def push_negatives(arr, axis): r\"\"\"Increment the", "0. If an array has negative values this function increment", "def push_negatives(arr, axis): r\"\"\"Increment the array until all the valuer", "array([[0, 3], [4, 5]]) # by column only the first", "= [[1, 2], [3, 4]] >>> mtx_lt0 = [[-1, 2],", "(c) 2016-2021, <NAME>; Luczywo, Nadia # All rights reserved. #", "function increment the values proportionally to made all the array", "Nadia # All rights reserved. # ============================================================================= # DOCS #", "axis. .. math:: \\overline{X}_{ij} = \\begin{cases} X_{ij} + min_{X_{ij}} &", "@doc_inherit(SKCMatrixAndWeightTransformerABC._transform_weights) def _transform_weights(self, weights): return push_negatives(weights, axis=None) @doc_inherit(SKCMatrixAndWeightTransformerABC._transform_matrix) def _transform_matrix(self,", "used. Returns ------- :py:class:`numpy.ndarray` array with all values >= 0.", "valuer are sean >= 0. If an array has negative", "[3, 4]]) \"\"\" arr = np.asarray(arr) mins = np.min(arr, axis=axis,", "numpy as np from ..core import SKCMatrixAndWeightTransformerABC from ..utils import", "all the matrix/weights positive along an axis. .. math:: \\overline{X}_{ij}", "value >>> push_negatives(mtx) # array without negatives don't be affected", "Copyright (c) 2016-2021, <NAME>; Luczywo, Nadia # All rights reserved.", "the values proportionally to made all the array positive along", "array until all the valuer are sean >= 0. If", "the negative value) is affected >>> push_negatives(mtx_lt0, axis=0) array([[0, 2],", "row only the first row (with the negative value) is", "Returns ------- :py:class:`numpy.ndarray` array with all values >= 0. Examples", ">>> push_negatives(mtx_lt0, axis=0) array([[0, 2], [4, 4]]) # by row", "push_negatives(mtx_lt0, axis=1) array([[0, 3], [3, 4]]) \"\"\" arr = np.asarray(arr)", "3], [4, 5]]) # by column only the first one", "\\overline{X}_{ij} = \\begin{cases} X_{ij} + min_{X_{ij}} & \\text{if } X_{ij}", "for remove negatives from criteria. In addition to the main", "the first row (with the negative value) is affected >>>", "arr = np.asarray(arr) mins = np.min(arr, axis=axis, keepdims=True) delta =", "np.asarray(arr) mins = np.min(arr, axis=axis, keepdims=True) delta = (mins <", "to the main functionality, an MCDA agnostic function is offered", "an arbitrary axis. \"\"\" # ============================================================================= # IMPORTS # =============================================================================", "- delta class PushNegatives(SKCMatrixAndWeightTransformerABC): r\"\"\"Increment the matrix/weights until all the", "matrix/weights positive along an axis. .. math:: \\overline{X}_{ij} = \\begin{cases}", "\"\"\" arr = np.asarray(arr) mins = np.min(arr, axis=axis, keepdims=True) delta", "criteria. In addition to the main functionality, an MCDA agnostic", "push_negatives(mtx) # array without negatives don't be affected array([[1, 2],", "from ..utils import doc_inherit # ============================================================================= # FUNCTIONS # =============================================================================", "mtx = [[1, 2], [3, 4]] >>> mtx_lt0 = [[-1,", "& \\text{if } X_{ij} < 0\\\\ X_{ij} & \\text{otherwise} \\end{cases}", "[[-1, 2], [3, 4]] # has a negative value >>>", "has a negative value >>> push_negatives(mtx) # array without negatives", "# FUNCTIONS # ============================================================================= def push_negatives(arr, axis): r\"\"\"Increment the array", "delta class PushNegatives(SKCMatrixAndWeightTransformerABC): r\"\"\"Increment the matrix/weights until all the valuer", "PushNegatives(SKCMatrixAndWeightTransformerABC): r\"\"\"Increment the matrix/weights until all the valuer are sean", "2016-2021, <NAME>; Luczywo, Nadia # All rights reserved. # =============================================================================", "agnostic function is offered to push negatives values on an", "X_{ij} < 0\\\\ X_{ij} & \\text{otherwise} \\end{cases} \"\"\" @doc_inherit(SKCMatrixAndWeightTransformerABC._transform_weights) def", "without negatives don't be affected array([[1, 2], [3, 4]]) #", "# ============================================================================= \"\"\"Functionalities for remove negatives from criteria. In addition", "be affected array([[1, 2], [3, 4]]) # all the array", "5]]) # by column only the first one (with the", "to made all the matrix/weights positive along an axis. ..", "array([[1, 2], [3, 4]]) # all the array is incremented", "first row (with the negative value) is affected >>> push_negatives(mtx_lt0,", "remove negatives from criteria. In addition to the main functionality,", "r\"\"\"Increment the array until all the valuer are sean >=", "optional Axis along which to operate. By default, flattened input", "0\\\\ X_{ij} & \\text{otherwise} \\end{cases} Parameters ---------- arr: :py:class:`numpy.ndarray` like.", "sean >= 0. If the matrix/weights has negative values this", "negative values this function increment the values proportionally to made", "\\begin{cases} X_{ij} + min_{X_{ij}} & \\text{if } X_{ij} < 0\\\\", "def _transform_weights(self, weights): return push_negatives(weights, axis=None) @doc_inherit(SKCMatrixAndWeightTransformerABC._transform_matrix) def _transform_matrix(self, matrix):", "along which to operate. By default, flattened input is used.", "the first one (with the negative value) is affected >>>", "is offered to push negatives values on an array along", "1 to eliminate the negative >>> push_negatives(mtx_lt0) array([[0, 3], [4,", "array has negative values this function increment the values proportionally", "array([[0, 2], [4, 4]]) # by row only the first", "coding: utf-8 -*- # License: BSD-3 (https://tldrlegal.com/license/bsd-3-clause-license-(revised)) # Copyright (c)", "incremented by 1 to eliminate the negative >>> push_negatives(mtx_lt0) array([[0,", "[4, 4]]) # by row only the first row (with", "until all the valuer are sean >= 0. If an", "\\end{cases} \"\"\" @doc_inherit(SKCMatrixAndWeightTransformerABC._transform_weights) def _transform_weights(self, weights): return push_negatives(weights, axis=None) @doc_inherit(SKCMatrixAndWeightTransformerABC._transform_matrix)", ">>> mtx = [[1, 2], [3, 4]] >>> mtx_lt0 =", "# License: BSD-3 (https://tldrlegal.com/license/bsd-3-clause-license-(revised)) # Copyright (c) 2016-2021, <NAME>; Luczywo,", "only the first one (with the negative value) is affected", "value) is affected >>> push_negatives(mtx_lt0, axis=0) array([[0, 2], [4, 4]])", "= np.min(arr, axis=axis, keepdims=True) delta = (mins < 0) *", ":py:class:`numpy.ndarray` array with all values >= 0. Examples -------- ..", "2], [3, 4]] >>> mtx_lt0 = [[-1, 2], [3, 4]]", "4]] # has a negative value >>> push_negatives(mtx) # array", "skcriteria.preprocess import push_negatives >>> mtx = [[1, 2], [3, 4]]", "return arr - delta class PushNegatives(SKCMatrixAndWeightTransformerABC): r\"\"\"Increment the matrix/weights until", "an array along an arbitrary axis. \"\"\" # ============================================================================= #", "} X_{ij} < 0\\\\ X_{ij} & \\text{otherwise} \\end{cases} Parameters ----------", "[[1, 2], [3, 4]] >>> mtx_lt0 = [[-1, 2], [3,", "axis=0) array([[0, 2], [4, 4]]) # by row only the", "is incremented by 1 to eliminate the negative >>> push_negatives(mtx_lt0)", "on an array along an arbitrary axis. \"\"\" # =============================================================================", "X_{ij} + min_{X_{ij}} & \\text{if } X_{ij} < 0\\\\ X_{ij}", "eliminate the negative >>> push_negatives(mtx_lt0) array([[0, 3], [4, 5]]) #", "the valuer are sean >= 0. If an array has", "By default, flattened input is used. Returns ------- :py:class:`numpy.ndarray` array", "push negatives values on an array along an arbitrary axis.", "affected >>> push_negatives(mtx_lt0, axis=0) array([[0, 2], [4, 4]]) # by", "\\text{otherwise} \\end{cases} Parameters ---------- arr: :py:class:`numpy.ndarray` like. A array with", "is used. Returns ------- :py:class:`numpy.ndarray` array with all values >=", "python # -*- coding: utf-8 -*- # License: BSD-3 (https://tldrlegal.com/license/bsd-3-clause-license-(revised))", "# DOCS # ============================================================================= \"\"\"Functionalities for remove negatives from criteria.", "doc_inherit # ============================================================================= # FUNCTIONS # ============================================================================= def push_negatives(arr, axis):", "to operate. By default, flattened input is used. Returns -------", "offered to push negatives values on an array along an", "is affected >>> push_negatives(mtx_lt0, axis=1) array([[0, 3], [3, 4]]) \"\"\"", "increment the values proportionally to made all the matrix/weights positive", "negative >>> push_negatives(mtx_lt0) array([[0, 3], [4, 5]]) # by column", "push_negatives(arr, axis): r\"\"\"Increment the array until all the valuer are", "the main functionality, an MCDA agnostic function is offered to", "Luczywo, Nadia # All rights reserved. # ============================================================================= # DOCS", "Axis along which to operate. By default, flattened input is", "= [[-1, 2], [3, 4]] # has a negative value", "matrix/weights has negative values this function increment the values proportionally", "array positive along an axis. .. math:: \\overline{X}_{ij} = \\begin{cases}", "negatives values on an array along an arbitrary axis. \"\"\"", "Parameters ---------- arr: :py:class:`numpy.ndarray` like. A array with values axis", "code-block:: pycon >>> from skcriteria.preprocess import push_negatives >>> mtx =", "all the valuer are sean >= 0. If the matrix/weights", "# -*- coding: utf-8 -*- # License: BSD-3 (https://tldrlegal.com/license/bsd-3-clause-license-(revised)) #", "class PushNegatives(SKCMatrixAndWeightTransformerABC): r\"\"\"Increment the matrix/weights until all the valuer are", "the matrix/weights has negative values this function increment the values", "row (with the negative value) is affected >>> push_negatives(mtx_lt0, axis=1)", "If the matrix/weights has negative values this function increment the", "return push_negatives(weights, axis=None) @doc_inherit(SKCMatrixAndWeightTransformerABC._transform_matrix) def _transform_matrix(self, matrix): return push_negatives(matrix, axis=0)", "_transform_weights(self, weights): return push_negatives(weights, axis=None) @doc_inherit(SKCMatrixAndWeightTransformerABC._transform_matrix) def _transform_matrix(self, matrix): return", "(with the negative value) is affected >>> push_negatives(mtx_lt0, axis=1) array([[0,", ">>> from skcriteria.preprocess import push_negatives >>> mtx = [[1, 2],", "reserved. # ============================================================================= # DOCS # ============================================================================= \"\"\"Functionalities for remove", "an array has negative values this function increment the values", "2], [3, 4]] # has a negative value >>> push_negatives(mtx)", "affected array([[1, 2], [3, 4]]) # all the array is", "# ============================================================================= # FUNCTIONS # ============================================================================= def push_negatives(arr, axis): r\"\"\"Increment", "# All rights reserved. # ============================================================================= # DOCS # =============================================================================", "# Copyright (c) 2016-2021, <NAME>; Luczywo, Nadia # All rights", "array with values axis : :py:class:`int` optional Axis along which", "0) * mins return arr - delta class PushNegatives(SKCMatrixAndWeightTransformerABC): r\"\"\"Increment", ":py:class:`numpy.ndarray` like. A array with values axis : :py:class:`int` optional", "main functionality, an MCDA agnostic function is offered to push", "negatives don't be affected array([[1, 2], [3, 4]]) # all", "the negative value) is affected >>> push_negatives(mtx_lt0, axis=1) array([[0, 3],", "\\text{otherwise} \\end{cases} \"\"\" @doc_inherit(SKCMatrixAndWeightTransformerABC._transform_weights) def _transform_weights(self, weights): return push_negatives(weights, axis=None)", "negative value >>> push_negatives(mtx) # array without negatives don't be", "X_{ij} < 0\\\\ X_{ij} & \\text{otherwise} \\end{cases} Parameters ---------- arr:", "& \\text{otherwise} \\end{cases} Parameters ---------- arr: :py:class:`numpy.ndarray` like. A array", "has negative values this function increment the values proportionally to", "function increment the values proportionally to made all the matrix/weights", "============================================================================= # DOCS # ============================================================================= \"\"\"Functionalities for remove negatives from", "import SKCMatrixAndWeightTransformerABC from ..utils import doc_inherit # ============================================================================= # FUNCTIONS", "# ============================================================================= # DOCS # ============================================================================= \"\"\"Functionalities for remove negatives", "values on an array along an arbitrary axis. \"\"\" #", "one (with the negative value) is affected >>> push_negatives(mtx_lt0, axis=0)", "default, flattened input is used. Returns ------- :py:class:`numpy.ndarray` array with", "is affected >>> push_negatives(mtx_lt0, axis=0) array([[0, 2], [4, 4]]) #", "utf-8 -*- # License: BSD-3 (https://tldrlegal.com/license/bsd-3-clause-license-(revised)) # Copyright (c) 2016-2021,", "increment the values proportionally to made all the array positive", "# IMPORTS # ============================================================================= import numpy as np from ..core", "[4, 5]]) # by column only the first one (with", ">>> mtx_lt0 = [[-1, 2], [3, 4]] # has a", "SKCMatrixAndWeightTransformerABC from ..utils import doc_inherit # ============================================================================= # FUNCTIONS #", "sean >= 0. If an array has negative values this", "this function increment the values proportionally to made all the", "2], [3, 4]]) # all the array is incremented by", "are sean >= 0. If an array has negative values", "MCDA agnostic function is offered to push negatives values on", ">>> push_negatives(mtx_lt0) array([[0, 3], [4, 5]]) # by column only", "< 0\\\\ X_{ij} & \\text{otherwise} \\end{cases} \"\"\" @doc_inherit(SKCMatrixAndWeightTransformerABC._transform_weights) def _transform_weights(self,", "math:: \\overline{X}_{ij} = \\begin{cases} X_{ij} + min_{X_{ij}} & \\text{if }", "only the first row (with the negative value) is affected", "------- :py:class:`numpy.ndarray` array with all values >= 0. Examples --------", "(with the negative value) is affected >>> push_negatives(mtx_lt0, axis=0) array([[0,", "the negative >>> push_negatives(mtx_lt0) array([[0, 3], [4, 5]]) # by", "import push_negatives >>> mtx = [[1, 2], [3, 4]] >>>", "to eliminate the negative >>> push_negatives(mtx_lt0) array([[0, 3], [4, 5]])", "keepdims=True) delta = (mins < 0) * mins return arr", "array is incremented by 1 to eliminate the negative >>>", "negative value) is affected >>> push_negatives(mtx_lt0, axis=0) array([[0, 2], [4,", "= np.asarray(arr) mins = np.min(arr, axis=axis, keepdims=True) delta = (mins", "axis : :py:class:`int` optional Axis along which to operate. By", "all the array is incremented by 1 to eliminate the", "as np from ..core import SKCMatrixAndWeightTransformerABC from ..utils import doc_inherit", "array along an arbitrary axis. \"\"\" # ============================================================================= # IMPORTS", "the array positive along an axis. .. math:: \\overline{X}_{ij} =", "# by column only the first one (with the negative", "============================================================================= import numpy as np from ..core import SKCMatrixAndWeightTransformerABC from", "values proportionally to made all the matrix/weights positive along an", "# has a negative value >>> push_negatives(mtx) # array without", "affected >>> push_negatives(mtx_lt0, axis=1) array([[0, 3], [3, 4]]) \"\"\" arr", "positive along an axis. .. math:: \\overline{X}_{ij} = \\begin{cases} X_{ij}", "X_{ij} & \\text{otherwise} \\end{cases} Parameters ---------- arr: :py:class:`numpy.ndarray` like. A", "values proportionally to made all the array positive along an", "2], [4, 4]]) # by row only the first row", "which to operate. By default, flattened input is used. Returns", ":py:class:`int` optional Axis along which to operate. By default, flattened", "[3, 4]] >>> mtx_lt0 = [[-1, 2], [3, 4]] #", "-*- coding: utf-8 -*- # License: BSD-3 (https://tldrlegal.com/license/bsd-3-clause-license-(revised)) # Copyright", "axis=1) array([[0, 3], [3, 4]]) \"\"\" arr = np.asarray(arr) mins", "mtx_lt0 = [[-1, 2], [3, 4]] # has a negative", "array with all values >= 0. Examples -------- .. code-block::", "\"\"\" @doc_inherit(SKCMatrixAndWeightTransformerABC._transform_weights) def _transform_weights(self, weights): return push_negatives(weights, axis=None) @doc_inherit(SKCMatrixAndWeightTransformerABC._transform_matrix) def", "function is offered to push negatives values on an array", "(https://tldrlegal.com/license/bsd-3-clause-license-(revised)) # Copyright (c) 2016-2021, <NAME>; Luczywo, Nadia # All", "addition to the main functionality, an MCDA agnostic function is", "4]]) # all the array is incremented by 1 to", "negatives from criteria. In addition to the main functionality, an", "A array with values axis : :py:class:`int` optional Axis along", "rights reserved. # ============================================================================= # DOCS # ============================================================================= \"\"\"Functionalities for", "= \\begin{cases} X_{ij} + min_{X_{ij}} & \\text{if } X_{ij} <", "to push negatives values on an array along an arbitrary", "push_negatives(mtx_lt0, axis=0) array([[0, 2], [4, 4]]) # by row only", "value) is affected >>> push_negatives(mtx_lt0, axis=1) array([[0, 3], [3, 4]])", "along an arbitrary axis. \"\"\" # ============================================================================= # IMPORTS #", "the array is incremented by 1 to eliminate the negative", "flattened input is used. Returns ------- :py:class:`numpy.ndarray` array with all", "\"\"\"Functionalities for remove negatives from criteria. In addition to the", "proportionally to made all the array positive along an axis.", "to made all the array positive along an axis. ..", "DOCS # ============================================================================= \"\"\"Functionalities for remove negatives from criteria. In" ]
[ "'companyInformation' in dictionary: if not isinstance(dictionary['companyInformation'], dict): raise TypeError('value \\'{}\\'", "return self.__company_information @company_information.setter def company_information(self, value: CompanyInformation): self.__company_information = value", "= None @property def billing_address(self) -> Address: \"\"\" | Object", "= None __personal_information = None @property def billing_address(self) -> Address:", "= value def to_dictionary(self): dictionary = super(CustomerToken, self).to_dictionary() if self.billing_address", "self.personal_information is not None: dictionary['personalInformation'] = self.personal_information.to_dictionary() return dictionary def", "a dictionary'.format(dictionary['companyInformation'])) value = CompanyInformation() self.company_information = value.from_dictionary(dictionary['companyInformation']) if 'personalInformation'", "isinstance(dictionary['billingAddress'], dict): raise TypeError('value \\'{}\\' is not a dictionary'.format(dictionary['billingAddress'])) value", "is not a dictionary'.format(dictionary['personalInformation'])) value = PersonalInformationToken() self.personal_information = value.from_dictionary(dictionary['personalInformation'])", "value = Address() self.billing_address = value.from_dictionary(dictionary['billingAddress']) if 'companyInformation' in dictionary:", "def to_dictionary(self): dictionary = super(CustomerToken, self).to_dictionary() if self.billing_address is not", "return self.__billing_address @billing_address.setter def billing_address(self, value: Address): self.__billing_address = value", "if 'companyInformation' in dictionary: if not isinstance(dictionary['companyInformation'], dict): raise TypeError('value", "= value.from_dictionary(dictionary['billingAddress']) if 'companyInformation' in dictionary: if not isinstance(dictionary['companyInformation'], dict):", "import Address from ingenico.direct.sdk.domain.company_information import CompanyInformation from ingenico.direct.sdk.domain.personal_information_token import PersonalInformationToken", "to_dictionary(self): dictionary = super(CustomerToken, self).to_dictionary() if self.billing_address is not None:", "def company_information(self, value: CompanyInformation): self.__company_information = value @property def personal_information(self)", ":class:`ingenico.direct.sdk.domain.company_information.CompanyInformation` \"\"\" return self.__company_information @company_information.setter def company_information(self, value: CompanyInformation): self.__company_information", "\"\"\" Type: :class:`ingenico.direct.sdk.domain.personal_information_token.PersonalInformationToken` \"\"\" return self.__personal_information @personal_information.setter def personal_information(self, value:", "dictionary'.format(dictionary['companyInformation'])) value = CompanyInformation() self.company_information = value.from_dictionary(dictionary['companyInformation']) if 'personalInformation' in", "= self.company_information.to_dictionary() if self.personal_information is not None: dictionary['personalInformation'] = self.personal_information.to_dictionary()", "-> CompanyInformation: \"\"\" | Object containing company information Type: :class:`ingenico.direct.sdk.domain.company_information.CompanyInformation`", "from ingenico.direct.sdk.data_object import DataObject from ingenico.direct.sdk.domain.address import Address from ingenico.direct.sdk.domain.company_information", "is not a dictionary'.format(dictionary['billingAddress'])) value = Address() self.billing_address = value.from_dictionary(dictionary['billingAddress'])", "@property def personal_information(self) -> PersonalInformationToken: \"\"\" Type: :class:`ingenico.direct.sdk.domain.personal_information_token.PersonalInformationToken` \"\"\" return", "a dictionary'.format(dictionary['personalInformation'])) value = PersonalInformationToken() self.personal_information = value.from_dictionary(dictionary['personalInformation']) return self", "\"\"\" | Object containing billing address details Type: :class:`ingenico.direct.sdk.domain.address.Address` \"\"\"", "# This class was auto-generated from the API references found", "https://support.direct.ingenico.com/documentation/api/reference/ # from ingenico.direct.sdk.data_object import DataObject from ingenico.direct.sdk.domain.address import Address", "super(CustomerToken, self).from_dictionary(dictionary) if 'billingAddress' in dictionary: if not isinstance(dictionary['billingAddress'], dict):", "-*- coding: utf-8 -*- # # This class was auto-generated", "\"\"\" return self.__billing_address @billing_address.setter def billing_address(self, value: Address): self.__billing_address =", "not isinstance(dictionary['personalInformation'], dict): raise TypeError('value \\'{}\\' is not a dictionary'.format(dictionary['personalInformation']))", "@personal_information.setter def personal_information(self, value: PersonalInformationToken): self.__personal_information = value def to_dictionary(self):", "self).from_dictionary(dictionary) if 'billingAddress' in dictionary: if not isinstance(dictionary['billingAddress'], dict): raise", "is not None: dictionary['personalInformation'] = self.personal_information.to_dictionary() return dictionary def from_dictionary(self,", "'personalInformation' in dictionary: if not isinstance(dictionary['personalInformation'], dict): raise TypeError('value \\'{}\\'", "\\'{}\\' is not a dictionary'.format(dictionary['personalInformation'])) value = PersonalInformationToken() self.personal_information =", "# -*- coding: utf-8 -*- # # This class was", "return self.__personal_information @personal_information.setter def personal_information(self, value: PersonalInformationToken): self.__personal_information = value", "utf-8 -*- # # This class was auto-generated from the", "personal_information(self, value: PersonalInformationToken): self.__personal_information = value def to_dictionary(self): dictionary =", "= value @property def personal_information(self) -> PersonalInformationToken: \"\"\" Type: :class:`ingenico.direct.sdk.domain.personal_information_token.PersonalInformationToken`", "company_information(self) -> CompanyInformation: \"\"\" | Object containing company information Type:", "dictionary): super(CustomerToken, self).from_dictionary(dictionary) if 'billingAddress' in dictionary: if not isinstance(dictionary['billingAddress'],", "import DataObject from ingenico.direct.sdk.domain.address import Address from ingenico.direct.sdk.domain.company_information import CompanyInformation", "if self.company_information is not None: dictionary['companyInformation'] = self.company_information.to_dictionary() if self.personal_information", "\"\"\" | Object containing company information Type: :class:`ingenico.direct.sdk.domain.company_information.CompanyInformation` \"\"\" return", "dictionary: if not isinstance(dictionary['personalInformation'], dict): raise TypeError('value \\'{}\\' is not", "not a dictionary'.format(dictionary['billingAddress'])) value = Address() self.billing_address = value.from_dictionary(dictionary['billingAddress']) if", "is not None: dictionary['billingAddress'] = self.billing_address.to_dictionary() if self.company_information is not", "from ingenico.direct.sdk.domain.personal_information_token import PersonalInformationToken class CustomerToken(DataObject): __billing_address = None __company_information", "company information Type: :class:`ingenico.direct.sdk.domain.company_information.CompanyInformation` \"\"\" return self.__company_information @company_information.setter def company_information(self,", "super(CustomerToken, self).to_dictionary() if self.billing_address is not None: dictionary['billingAddress'] = self.billing_address.to_dictionary()", "value.from_dictionary(dictionary['companyInformation']) if 'personalInformation' in dictionary: if not isinstance(dictionary['personalInformation'], dict): raise", "CompanyInformation() self.company_information = value.from_dictionary(dictionary['companyInformation']) if 'personalInformation' in dictionary: if not", "@billing_address.setter def billing_address(self, value: Address): self.__billing_address = value @property def", "is not a dictionary'.format(dictionary['companyInformation'])) value = CompanyInformation() self.company_information = value.from_dictionary(dictionary['companyInformation'])", "None: dictionary['personalInformation'] = self.personal_information.to_dictionary() return dictionary def from_dictionary(self, dictionary): super(CustomerToken,", "TypeError('value \\'{}\\' is not a dictionary'.format(dictionary['companyInformation'])) value = CompanyInformation() self.company_information", "Type: :class:`ingenico.direct.sdk.domain.address.Address` \"\"\" return self.__billing_address @billing_address.setter def billing_address(self, value: Address):", "CustomerToken(DataObject): __billing_address = None __company_information = None __personal_information = None", "in dictionary: if not isinstance(dictionary['personalInformation'], dict): raise TypeError('value \\'{}\\' is", "DataObject from ingenico.direct.sdk.domain.address import Address from ingenico.direct.sdk.domain.company_information import CompanyInformation from", "isinstance(dictionary['personalInformation'], dict): raise TypeError('value \\'{}\\' is not a dictionary'.format(dictionary['personalInformation'])) value", "= None __company_information = None __personal_information = None @property def", "from ingenico.direct.sdk.domain.company_information import CompanyInformation from ingenico.direct.sdk.domain.personal_information_token import PersonalInformationToken class CustomerToken(DataObject):", ":class:`ingenico.direct.sdk.domain.personal_information_token.PersonalInformationToken` \"\"\" return self.__personal_information @personal_information.setter def personal_information(self, value: PersonalInformationToken): self.__personal_information", "self.__personal_information @personal_information.setter def personal_information(self, value: PersonalInformationToken): self.__personal_information = value def", "from the API references found at # https://support.direct.ingenico.com/documentation/api/reference/ # from", "raise TypeError('value \\'{}\\' is not a dictionary'.format(dictionary['personalInformation'])) value = PersonalInformationToken()", "dictionary = super(CustomerToken, self).to_dictionary() if self.billing_address is not None: dictionary['billingAddress']", "not isinstance(dictionary['companyInformation'], dict): raise TypeError('value \\'{}\\' is not a dictionary'.format(dictionary['companyInformation']))", "value = CompanyInformation() self.company_information = value.from_dictionary(dictionary['companyInformation']) if 'personalInformation' in dictionary:", "CompanyInformation: \"\"\" | Object containing company information Type: :class:`ingenico.direct.sdk.domain.company_information.CompanyInformation` \"\"\"", "None: dictionary['companyInformation'] = self.company_information.to_dictionary() if self.personal_information is not None: dictionary['personalInformation']", "self.company_information.to_dictionary() if self.personal_information is not None: dictionary['personalInformation'] = self.personal_information.to_dictionary() return", "= CompanyInformation() self.company_information = value.from_dictionary(dictionary['companyInformation']) if 'personalInformation' in dictionary: if", "CompanyInformation from ingenico.direct.sdk.domain.personal_information_token import PersonalInformationToken class CustomerToken(DataObject): __billing_address = None", "__billing_address = None __company_information = None __personal_information = None @property", "billing_address(self, value: Address): self.__billing_address = value @property def company_information(self) ->", "= super(CustomerToken, self).to_dictionary() if self.billing_address is not None: dictionary['billingAddress'] =", "None: dictionary['billingAddress'] = self.billing_address.to_dictionary() if self.company_information is not None: dictionary['companyInformation']", "not None: dictionary['personalInformation'] = self.personal_information.to_dictionary() return dictionary def from_dictionary(self, dictionary):", "PersonalInformationToken): self.__personal_information = value def to_dictionary(self): dictionary = super(CustomerToken, self).to_dictionary()", "def company_information(self) -> CompanyInformation: \"\"\" | Object containing company information", "containing billing address details Type: :class:`ingenico.direct.sdk.domain.address.Address` \"\"\" return self.__billing_address @billing_address.setter", "at # https://support.direct.ingenico.com/documentation/api/reference/ # from ingenico.direct.sdk.data_object import DataObject from ingenico.direct.sdk.domain.address", "TypeError('value \\'{}\\' is not a dictionary'.format(dictionary['billingAddress'])) value = Address() self.billing_address", "\\'{}\\' is not a dictionary'.format(dictionary['companyInformation'])) value = CompanyInformation() self.company_information =", "if self.billing_address is not None: dictionary['billingAddress'] = self.billing_address.to_dictionary() if self.company_information", "found at # https://support.direct.ingenico.com/documentation/api/reference/ # from ingenico.direct.sdk.data_object import DataObject from", "Address: \"\"\" | Object containing billing address details Type: :class:`ingenico.direct.sdk.domain.address.Address`", "value def to_dictionary(self): dictionary = super(CustomerToken, self).to_dictionary() if self.billing_address is", "auto-generated from the API references found at # https://support.direct.ingenico.com/documentation/api/reference/ #", "# from ingenico.direct.sdk.data_object import DataObject from ingenico.direct.sdk.domain.address import Address from", "\"\"\" return self.__company_information @company_information.setter def company_information(self, value: CompanyInformation): self.__company_information =", "dict): raise TypeError('value \\'{}\\' is not a dictionary'.format(dictionary['companyInformation'])) value =", "self.company_information is not None: dictionary['companyInformation'] = self.company_information.to_dictionary() if self.personal_information is", "dictionary['personalInformation'] = self.personal_information.to_dictionary() return dictionary def from_dictionary(self, dictionary): super(CustomerToken, self).from_dictionary(dictionary)", "-> PersonalInformationToken: \"\"\" Type: :class:`ingenico.direct.sdk.domain.personal_information_token.PersonalInformationToken` \"\"\" return self.__personal_information @personal_information.setter def", "= value.from_dictionary(dictionary['companyInformation']) if 'personalInformation' in dictionary: if not isinstance(dictionary['personalInformation'], dict):", "not None: dictionary['billingAddress'] = self.billing_address.to_dictionary() if self.company_information is not None:", "details Type: :class:`ingenico.direct.sdk.domain.address.Address` \"\"\" return self.__billing_address @billing_address.setter def billing_address(self, value:", "in dictionary: if not isinstance(dictionary['billingAddress'], dict): raise TypeError('value \\'{}\\' is", "if 'personalInformation' in dictionary: if not isinstance(dictionary['personalInformation'], dict): raise TypeError('value", "dictionary def from_dictionary(self, dictionary): super(CustomerToken, self).from_dictionary(dictionary) if 'billingAddress' in dictionary:", "a dictionary'.format(dictionary['billingAddress'])) value = Address() self.billing_address = value.from_dictionary(dictionary['billingAddress']) if 'companyInformation'", "the API references found at # https://support.direct.ingenico.com/documentation/api/reference/ # from ingenico.direct.sdk.data_object", "self.company_information = value.from_dictionary(dictionary['companyInformation']) if 'personalInformation' in dictionary: if not isinstance(dictionary['personalInformation'],", "self.__company_information = value @property def personal_information(self) -> PersonalInformationToken: \"\"\" Type:", "= self.billing_address.to_dictionary() if self.company_information is not None: dictionary['companyInformation'] = self.company_information.to_dictionary()", "Address from ingenico.direct.sdk.domain.company_information import CompanyInformation from ingenico.direct.sdk.domain.personal_information_token import PersonalInformationToken class", "value: PersonalInformationToken): self.__personal_information = value def to_dictionary(self): dictionary = super(CustomerToken,", "| Object containing company information Type: :class:`ingenico.direct.sdk.domain.company_information.CompanyInformation` \"\"\" return self.__company_information", "class CustomerToken(DataObject): __billing_address = None __company_information = None __personal_information =", "Type: :class:`ingenico.direct.sdk.domain.personal_information_token.PersonalInformationToken` \"\"\" return self.__personal_information @personal_information.setter def personal_information(self, value: PersonalInformationToken):", "company_information(self, value: CompanyInformation): self.__company_information = value @property def personal_information(self) ->", "\"\"\" return self.__personal_information @personal_information.setter def personal_information(self, value: PersonalInformationToken): self.__personal_information =", "API references found at # https://support.direct.ingenico.com/documentation/api/reference/ # from ingenico.direct.sdk.data_object import", "was auto-generated from the API references found at # https://support.direct.ingenico.com/documentation/api/reference/", "ingenico.direct.sdk.domain.company_information import CompanyInformation from ingenico.direct.sdk.domain.personal_information_token import PersonalInformationToken class CustomerToken(DataObject): __billing_address", "CompanyInformation): self.__company_information = value @property def personal_information(self) -> PersonalInformationToken: \"\"\"", "billing_address(self) -> Address: \"\"\" | Object containing billing address details", "dict): raise TypeError('value \\'{}\\' is not a dictionary'.format(dictionary['billingAddress'])) value =", "# # This class was auto-generated from the API references", "references found at # https://support.direct.ingenico.com/documentation/api/reference/ # from ingenico.direct.sdk.data_object import DataObject", "Address() self.billing_address = value.from_dictionary(dictionary['billingAddress']) if 'companyInformation' in dictionary: if not", "raise TypeError('value \\'{}\\' is not a dictionary'.format(dictionary['companyInformation'])) value = CompanyInformation()", "coding: utf-8 -*- # # This class was auto-generated from", "from_dictionary(self, dictionary): super(CustomerToken, self).from_dictionary(dictionary) if 'billingAddress' in dictionary: if not", "ingenico.direct.sdk.domain.personal_information_token import PersonalInformationToken class CustomerToken(DataObject): __billing_address = None __company_information =", "dict): raise TypeError('value \\'{}\\' is not a dictionary'.format(dictionary['personalInformation'])) value =", "@property def company_information(self) -> CompanyInformation: \"\"\" | Object containing company", "not a dictionary'.format(dictionary['personalInformation'])) value = PersonalInformationToken() self.personal_information = value.from_dictionary(dictionary['personalInformation']) return", "in dictionary: if not isinstance(dictionary['companyInformation'], dict): raise TypeError('value \\'{}\\' is", "not a dictionary'.format(dictionary['companyInformation'])) value = CompanyInformation() self.company_information = value.from_dictionary(dictionary['companyInformation']) if", "@property def billing_address(self) -> Address: \"\"\" | Object containing billing", "from ingenico.direct.sdk.domain.address import Address from ingenico.direct.sdk.domain.company_information import CompanyInformation from ingenico.direct.sdk.domain.personal_information_token", "__personal_information = None @property def billing_address(self) -> Address: \"\"\" |", "self.__billing_address = value @property def company_information(self) -> CompanyInformation: \"\"\" |", "@company_information.setter def company_information(self, value: CompanyInformation): self.__company_information = value @property def", "self).to_dictionary() if self.billing_address is not None: dictionary['billingAddress'] = self.billing_address.to_dictionary() if", "self.personal_information.to_dictionary() return dictionary def from_dictionary(self, dictionary): super(CustomerToken, self).from_dictionary(dictionary) if 'billingAddress'", "= value @property def company_information(self) -> CompanyInformation: \"\"\" | Object", "| Object containing billing address details Type: :class:`ingenico.direct.sdk.domain.address.Address` \"\"\" return", "dictionary'.format(dictionary['billingAddress'])) value = Address() self.billing_address = value.from_dictionary(dictionary['billingAddress']) if 'companyInformation' in", "information Type: :class:`ingenico.direct.sdk.domain.company_information.CompanyInformation` \"\"\" return self.__company_information @company_information.setter def company_information(self, value:", "containing company information Type: :class:`ingenico.direct.sdk.domain.company_information.CompanyInformation` \"\"\" return self.__company_information @company_information.setter def", "personal_information(self) -> PersonalInformationToken: \"\"\" Type: :class:`ingenico.direct.sdk.domain.personal_information_token.PersonalInformationToken` \"\"\" return self.__personal_information @personal_information.setter", "return dictionary def from_dictionary(self, dictionary): super(CustomerToken, self).from_dictionary(dictionary) if 'billingAddress' in", "self.__personal_information = value def to_dictionary(self): dictionary = super(CustomerToken, self).to_dictionary() if", "# https://support.direct.ingenico.com/documentation/api/reference/ # from ingenico.direct.sdk.data_object import DataObject from ingenico.direct.sdk.domain.address import", "self.billing_address.to_dictionary() if self.company_information is not None: dictionary['companyInformation'] = self.company_information.to_dictionary() if", "not None: dictionary['companyInformation'] = self.company_information.to_dictionary() if self.personal_information is not None:", "TypeError('value \\'{}\\' is not a dictionary'.format(dictionary['personalInformation'])) value = PersonalInformationToken() self.personal_information", "self.__company_information @company_information.setter def company_information(self, value: CompanyInformation): self.__company_information = value @property", "None __company_information = None __personal_information = None @property def billing_address(self)", "value @property def company_information(self) -> CompanyInformation: \"\"\" | Object containing", "self.billing_address = value.from_dictionary(dictionary['billingAddress']) if 'companyInformation' in dictionary: if not isinstance(dictionary['companyInformation'],", "if not isinstance(dictionary['personalInformation'], dict): raise TypeError('value \\'{}\\' is not a", "if not isinstance(dictionary['billingAddress'], dict): raise TypeError('value \\'{}\\' is not a", "= self.personal_information.to_dictionary() return dictionary def from_dictionary(self, dictionary): super(CustomerToken, self).from_dictionary(dictionary) if", "def billing_address(self, value: Address): self.__billing_address = value @property def company_information(self)", "value @property def personal_information(self) -> PersonalInformationToken: \"\"\" Type: :class:`ingenico.direct.sdk.domain.personal_information_token.PersonalInformationToken` \"\"\"", "dictionary['companyInformation'] = self.company_information.to_dictionary() if self.personal_information is not None: dictionary['personalInformation'] =", "Type: :class:`ingenico.direct.sdk.domain.company_information.CompanyInformation` \"\"\" return self.__company_information @company_information.setter def company_information(self, value: CompanyInformation):", "if self.personal_information is not None: dictionary['personalInformation'] = self.personal_information.to_dictionary() return dictionary", "def billing_address(self) -> Address: \"\"\" | Object containing billing address", "PersonalInformationToken: \"\"\" Type: :class:`ingenico.direct.sdk.domain.personal_information_token.PersonalInformationToken` \"\"\" return self.__personal_information @personal_information.setter def personal_information(self,", "def personal_information(self) -> PersonalInformationToken: \"\"\" Type: :class:`ingenico.direct.sdk.domain.personal_information_token.PersonalInformationToken` \"\"\" return self.__personal_information", "value: CompanyInformation): self.__company_information = value @property def personal_information(self) -> PersonalInformationToken:", "dictionary['billingAddress'] = self.billing_address.to_dictionary() if self.company_information is not None: dictionary['companyInformation'] =", "Object containing company information Type: :class:`ingenico.direct.sdk.domain.company_information.CompanyInformation` \"\"\" return self.__company_information @company_information.setter", "ingenico.direct.sdk.data_object import DataObject from ingenico.direct.sdk.domain.address import Address from ingenico.direct.sdk.domain.company_information import", "isinstance(dictionary['companyInformation'], dict): raise TypeError('value \\'{}\\' is not a dictionary'.format(dictionary['companyInformation'])) value", "None __personal_information = None @property def billing_address(self) -> Address: \"\"\"", "Address): self.__billing_address = value @property def company_information(self) -> CompanyInformation: \"\"\"", "dictionary: if not isinstance(dictionary['billingAddress'], dict): raise TypeError('value \\'{}\\' is not", "address details Type: :class:`ingenico.direct.sdk.domain.address.Address` \"\"\" return self.__billing_address @billing_address.setter def billing_address(self,", "This class was auto-generated from the API references found at", "Object containing billing address details Type: :class:`ingenico.direct.sdk.domain.address.Address` \"\"\" return self.__billing_address", ":class:`ingenico.direct.sdk.domain.address.Address` \"\"\" return self.__billing_address @billing_address.setter def billing_address(self, value: Address): self.__billing_address", "-> Address: \"\"\" | Object containing billing address details Type:", "import CompanyInformation from ingenico.direct.sdk.domain.personal_information_token import PersonalInformationToken class CustomerToken(DataObject): __billing_address =", "PersonalInformationToken class CustomerToken(DataObject): __billing_address = None __company_information = None __personal_information", "is not None: dictionary['companyInformation'] = self.company_information.to_dictionary() if self.personal_information is not", "raise TypeError('value \\'{}\\' is not a dictionary'.format(dictionary['billingAddress'])) value = Address()", "'billingAddress' in dictionary: if not isinstance(dictionary['billingAddress'], dict): raise TypeError('value \\'{}\\'", "def personal_information(self, value: PersonalInformationToken): self.__personal_information = value def to_dictionary(self): dictionary", "billing address details Type: :class:`ingenico.direct.sdk.domain.address.Address` \"\"\" return self.__billing_address @billing_address.setter def", "\\'{}\\' is not a dictionary'.format(dictionary['billingAddress'])) value = Address() self.billing_address =", "if 'billingAddress' in dictionary: if not isinstance(dictionary['billingAddress'], dict): raise TypeError('value", "value: Address): self.__billing_address = value @property def company_information(self) -> CompanyInformation:", "import PersonalInformationToken class CustomerToken(DataObject): __billing_address = None __company_information = None", "if not isinstance(dictionary['companyInformation'], dict): raise TypeError('value \\'{}\\' is not a", "def from_dictionary(self, dictionary): super(CustomerToken, self).from_dictionary(dictionary) if 'billingAddress' in dictionary: if", "__company_information = None __personal_information = None @property def billing_address(self) ->", "self.billing_address is not None: dictionary['billingAddress'] = self.billing_address.to_dictionary() if self.company_information is", "class was auto-generated from the API references found at #", "dictionary: if not isinstance(dictionary['companyInformation'], dict): raise TypeError('value \\'{}\\' is not", "not isinstance(dictionary['billingAddress'], dict): raise TypeError('value \\'{}\\' is not a dictionary'.format(dictionary['billingAddress']))", "ingenico.direct.sdk.domain.address import Address from ingenico.direct.sdk.domain.company_information import CompanyInformation from ingenico.direct.sdk.domain.personal_information_token import", "= Address() self.billing_address = value.from_dictionary(dictionary['billingAddress']) if 'companyInformation' in dictionary: if", "self.__billing_address @billing_address.setter def billing_address(self, value: Address): self.__billing_address = value @property", "None @property def billing_address(self) -> Address: \"\"\" | Object containing", "value.from_dictionary(dictionary['billingAddress']) if 'companyInformation' in dictionary: if not isinstance(dictionary['companyInformation'], dict): raise", "-*- # # This class was auto-generated from the API" ]
[ "k.press_key(k.left_key) time.sleep(1) # Hold down left key for 1 second.", "k = pykeyboard.PyKeyboard() k.press_key(k.left_key) time.sleep(1) # Hold down left key", "import time import pykeyboard # TODO: Replace following two lines", "activate the application. print('Activate the application 3 seconds.') time.sleep(3) k", "with the code that activate the application. print('Activate the application", "import pykeyboard # TODO: Replace following two lines with the", "pykeyboard # TODO: Replace following two lines with the code", "time import pykeyboard # TODO: Replace following two lines with", "time.sleep(3) k = pykeyboard.PyKeyboard() k.press_key(k.left_key) time.sleep(1) # Hold down left", "print('Activate the application 3 seconds.') time.sleep(3) k = pykeyboard.PyKeyboard() k.press_key(k.left_key)", "= pykeyboard.PyKeyboard() k.press_key(k.left_key) time.sleep(1) # Hold down left key for", "code that activate the application. print('Activate the application 3 seconds.')", "the application 3 seconds.') time.sleep(3) k = pykeyboard.PyKeyboard() k.press_key(k.left_key) time.sleep(1)", "Replace following two lines with the code that activate the", "TODO: Replace following two lines with the code that activate", "lines with the code that activate the application. print('Activate the", "seconds.') time.sleep(3) k = pykeyboard.PyKeyboard() k.press_key(k.left_key) time.sleep(1) # Hold down", "application. print('Activate the application 3 seconds.') time.sleep(3) k = pykeyboard.PyKeyboard()", "time.sleep(1) # Hold down left key for 1 second. k.release_key(k.left_key)", "pykeyboard.PyKeyboard() k.press_key(k.left_key) time.sleep(1) # Hold down left key for 1", "3 seconds.') time.sleep(3) k = pykeyboard.PyKeyboard() k.press_key(k.left_key) time.sleep(1) # Hold", "the code that activate the application. print('Activate the application 3", "following two lines with the code that activate the application.", "application 3 seconds.') time.sleep(3) k = pykeyboard.PyKeyboard() k.press_key(k.left_key) time.sleep(1) #", "the application. print('Activate the application 3 seconds.') time.sleep(3) k =", "# TODO: Replace following two lines with the code that", "that activate the application. print('Activate the application 3 seconds.') time.sleep(3)", "two lines with the code that activate the application. print('Activate" ]
[ "jsonl_filepath = os.path.join(parent_folder, os.path.basename(self.url)).replace(\".zst\", \"\") assert jsonl_filepath.endswith(\".jsonl\") os.system(f\"python tools/preprocess_data.py \\", "maybe_download_gpt2_tokenizer_data(): if not os.path.isfile(GPT2_VOCAB_FP): os.system(f'wget {GPT2_VOCAB_URL} -O {GPT2_VOCAB_FP}') if not", "data exists, and, if it doesn't, download, extract and tokenize", "-O {GPT2_MERGE_FP}') DATA_DOWNLOADERS = { \"enron\": Enron } def prepare_dataset(dataset_name):", "\\ --vocab {GPT2_VOCAB_FP} \\ --dataset-impl mmap \\ --tokenizer-type GPT2BPETokenizer \\", "self.name) os.makedirs(self.path, exist_ok=True) tarfile_path = os.path.join(self.base_dir, os.path.basename(self.url)) with tarfile.open(tarfile_path, \"r:gz\")", "\"enron\" filetype = \"jsonl.zst\" url = \"http://eaidata.bmk.sh/data/enron_emails.jsonl.zst\" seed = 1", "is present\"\"\" return os.path.isdir(f\"{self.base_dir}/{self.name}\") def download(self): \"\"\"downloads dataset\"\"\" os.makedirs(self.base_dir, exist_ok=True)", "open(output_path, 'wb') as destination: decomp.copy_stream(compressed, destination) if remove_zstd: os.remove(zstd_file_path) return", "def exists(self): self.path = os.path.join(self.base_dir, self.name) return os.path.isfile(os.path.join(self.path, os.path.basename(self.url).replace(\".zst\", \"\")))", "{GPT2_MERGE_FP}') DATA_DOWNLOADERS = { \"enron\": Enron } def prepare_dataset(dataset_name): os.makedirs(DATA_DIR,", "shutil import random import zstandard \"\"\" This registry is for", "exists(self): self.path = os.path.join(self.base_dir, self.name) return os.path.isfile(os.path.join(self.path, os.path.basename(self.url).replace(\".zst\", \"\"))) def", "tools/preprocess_data.py \\ --input {jsonl_filepath} \\ --output-prefix {parent_folder}/{self.name} \\ --vocab {GPT2_VOCAB_FP}", "print(f'Extracting files from {tarfile_path}...') dataset_tar.extractall(self.path) def _extract_zstd(self, remove_zstd=True): self.path =", "\\ --input {jsonl_filepath} \\ --output-prefix {parent_folder}/{self.name} \\ --vocab {GPT2_VOCAB_FP} \\", "the pre-processing for the selected dataset. \"\"\" DATA_DIR = os.environ.get('DATA_DIR',", "os.path.basename(self.url)) with open(zstd_file_path, 'rb') as compressed: decomp = zstandard.ZstdDecompressor() output_path", "= os.path.join(self.base_dir, self.name) os.makedirs(self.path, exist_ok=True) tarfile_path = os.path.join(self.base_dir, os.path.basename(self.url)) with", "= { \"enron\": Enron } def prepare_dataset(dataset_name): os.makedirs(DATA_DIR, exist_ok=True) maybe_download_gpt2_tokenizer_data()", "exist_ok=True) os.system(f\"wget {self.url} -O {os.path.join(self.base_dir, os.path.basename(self.url))}\") def tokenize(self): parent_folder =", "url = \"http://eaidata.bmk.sh/data/enron_emails.jsonl.zst\" seed = 1 def exists(self): self.path =", "the selected dataset. \"\"\" DATA_DIR = os.environ.get('DATA_DIR', './data') GPT2_VOCAB_FP =", "os.path.join(self.base_dir, os.path.basename(self.url)) with open(zstd_file_path, 'rb') as compressed: decomp = zstandard.ZstdDecompressor()", "self.exists(): self.download() self.extract() self.tokenize() class Enron(DataDownloader): name = \"enron\" filetype", "registry class to automatically download / extract datasets\"\"\" @property def", "correct data dir if necessary\"\"\" self._extract_tar() def exists(self): \"\"\"Checks if", "{GPT2_VOCAB_FP}') if not os.path.isfile(GPT2_MERGE_FP): os.system(f'wget {GPT2_MERGE_URL} -O {GPT2_MERGE_FP}') DATA_DOWNLOADERS =", "{self.url} -O {os.path.join(self.base_dir, os.path.basename(self.url))}\") def tokenize(self): parent_folder = os.path.join(self.base_dir, self.name)", "datasets\"\"\" @property def base_dir(self): \"\"\"base data directory\"\"\" return DATA_DIR @property", "def base_dir(self): \"\"\"base data directory\"\"\" return DATA_DIR @property @abstractmethod def", "\"\"\"base data directory\"\"\" return DATA_DIR @property @abstractmethod def name(self): \"\"\"name", "GPT2BPETokenizer \\ --merge-file {GPT2_MERGE_FP} \\ --append-eod\") def prepare(self): if not", "name = \"enron\" filetype = \"jsonl.zst\" url = \"http://eaidata.bmk.sh/data/enron_emails.jsonl.zst\" seed", "the correct directory. When done, add it to the DATA_DOWNLOADERS", "os.path.basename(self.url).replace(\".zst\", \"\")), os.path.join(self.base_dir, self.name)) def maybe_download_gpt2_tokenizer_data(): if not os.path.isfile(GPT2_VOCAB_FP): os.system(f'wget", "Enron(DataDownloader): name = \"enron\" filetype = \"jsonl.zst\" url = \"http://eaidata.bmk.sh/data/enron_emails.jsonl.zst\"", "download, extract and tokenize the data into the correct directory.", "\"\"\"name of dataset\"\"\" pass @property @abstractmethod def filetype(self): \"\"\"filetype of", "\"http://eaidata.bmk.sh/data/enron_emails.jsonl.zst\" seed = 1 def exists(self): self.path = os.path.join(self.base_dir, self.name)", "\\ --append-eod\") def prepare(self): if not self.exists(): self.download() self.extract() self.tokenize()", "pass @property @abstractmethod def url(self): \"\"\"URL from which to download", "--vocab {GPT2_VOCAB_FP} \\ --dataset-impl mmap \\ --tokenizer-type GPT2BPETokenizer \\ --merge-file", "= os.environ.get('DATA_DIR', './data') GPT2_VOCAB_FP = f\"{DATA_DIR}/gpt2-vocab.json\" GPT2_VOCAB_URL = \"https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-vocab.json\" GPT2_MERGE_FP", "os.path.join(self.base_dir, os.path.basename(self.url)) with tarfile.open(tarfile_path, \"r:gz\") as dataset_tar: print(f'Extracting files from", "from {tarfile_path}...') dataset_tar.extractall(self.path) def _extract_zstd(self, remove_zstd=True): self.path = os.path.join(self.base_dir, self.name)", "return os.path.isdir(f\"{self.base_dir}/{self.name}\") def download(self): \"\"\"downloads dataset\"\"\" os.makedirs(self.base_dir, exist_ok=True) os.system(f\"wget {self.url}", "if remove_zstd: os.remove(zstd_file_path) return output_path def extract(self): \"\"\"extracts dataset and", "tokenize functions to check if the data exists, and, if", "f\"{DATA_DIR}/gpt2-vocab.json\" GPT2_VOCAB_URL = \"https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-vocab.json\" GPT2_MERGE_FP = f\"{DATA_DIR}/gpt2-merges.txt\" GPT2_MERGE_URL = \"https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-merges.txt\"", "url(self): \"\"\"URL from which to download dataset\"\"\" pass def _extract_tar(self):", "if it doesn't, download, extract and tokenize the data into", "extract(self, remove_zstd=True): self._extract_zstd(remove_zstd=remove_zstd) shutil.move(os.path.join(self.base_dir, os.path.basename(self.url).replace(\".zst\", \"\")), os.path.join(self.base_dir, self.name)) def maybe_download_gpt2_tokenizer_data():", "necessary\"\"\" self._extract_tar() def exists(self): \"\"\"Checks if the dataset is present\"\"\"", "to check if the data exists, and, if it doesn't,", "dataset. \"\"\" DATA_DIR = os.environ.get('DATA_DIR', './data') GPT2_VOCAB_FP = f\"{DATA_DIR}/gpt2-vocab.json\" GPT2_VOCAB_URL", "ABC, abstractmethod from glob import glob import shutil import random", "and url attributes, and (optionally) provide download / extract /", "not os.path.isfile(GPT2_MERGE_FP): os.system(f'wget {GPT2_MERGE_URL} -O {GPT2_MERGE_FP}') DATA_DOWNLOADERS = { \"enron\":", "destination) if remove_zstd: os.remove(zstd_file_path) return output_path def extract(self): \"\"\"extracts dataset", "download / extract / exists / tokenize functions to check", "return os.path.isfile(os.path.join(self.path, os.path.basename(self.url).replace(\".zst\", \"\"))) def extract(self, remove_zstd=True): self._extract_zstd(remove_zstd=remove_zstd) shutil.move(os.path.join(self.base_dir, os.path.basename(self.url).replace(\".zst\",", "is for automatically downloading and extracting datasets. To register a", "DownloaderClass is None: raise NotImplementedError else: d = DownloaderClass() d.prepare()", "the correct data dir if necessary\"\"\" self._extract_tar() def exists(self): \"\"\"Checks", "os.path.join(self.base_dir, self.name)) def maybe_download_gpt2_tokenizer_data(): if not os.path.isfile(GPT2_VOCAB_FP): os.system(f'wget {GPT2_VOCAB_URL} -O", "os.path.join(self.base_dir, self.name) return os.path.isfile(os.path.join(self.path, os.path.basename(self.url).replace(\".zst\", \"\"))) def extract(self, remove_zstd=True): self._extract_zstd(remove_zstd=remove_zstd)", "def download(self): \"\"\"downloads dataset\"\"\" os.makedirs(self.base_dir, exist_ok=True) os.system(f\"wget {self.url} -O {os.path.join(self.base_dir,", "os.path.basename(self.url)).replace(\".zst\", \"\") assert jsonl_filepath.endswith(\".jsonl\") os.system(f\"python tools/preprocess_data.py \\ --input {jsonl_filepath} \\", "DownloaderClass = DATA_DOWNLOADERS.get(dataset_name, None) if DownloaderClass is None: raise NotImplementedError", "{GPT2_MERGE_FP} \\ --append-eod\") def prepare(self): if not self.exists(): self.download() self.extract()", "if DownloaderClass is None: raise NotImplementedError else: d = DownloaderClass()", "def name(self): \"\"\"name of dataset\"\"\" pass @property @abstractmethod def filetype(self):", "the data into the correct directory. When done, add it", "as destination: decomp.copy_stream(compressed, destination) if remove_zstd: os.remove(zstd_file_path) return output_path def", "registry is for automatically downloading and extracting datasets. To register", "\"\")), os.path.join(self.base_dir, self.name)) def maybe_download_gpt2_tokenizer_data(): if not os.path.isfile(GPT2_VOCAB_FP): os.system(f'wget {GPT2_VOCAB_URL}", "with tarfile.open(tarfile_path, \"r:gz\") as dataset_tar: print(f'Extracting files from {tarfile_path}...') dataset_tar.extractall(self.path)", "exists, and, if it doesn't, download, extract and tokenize the", "base_dir(self): \"\"\"base data directory\"\"\" return DATA_DIR @property @abstractmethod def name(self):", "= os.path.join(self.base_dir, self.name) os.makedirs(self.path, exist_ok=True) zstd_file_path = os.path.join(self.base_dir, os.path.basename(self.url)) with", "output_path = zstd_file_path.replace(\".zst\", \"\") with open(output_path, 'wb') as destination: decomp.copy_stream(compressed,", "not self.exists(): self.download() self.extract() self.tokenize() class Enron(DataDownloader): name = \"enron\"", "if the data exists, and, if it doesn't, download, extract", "prepare_dataset(dataset_name): os.makedirs(DATA_DIR, exist_ok=True) maybe_download_gpt2_tokenizer_data() DownloaderClass = DATA_DOWNLOADERS.get(dataset_name, None) if DownloaderClass", "GPT2_MERGE_FP = f\"{DATA_DIR}/gpt2-merges.txt\" GPT2_MERGE_URL = \"https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-merges.txt\" class DataDownloader(ABC): \"\"\"Dataset registry", "maybe_download_gpt2_tokenizer_data() DownloaderClass = DATA_DOWNLOADERS.get(dataset_name, None) if DownloaderClass is None: raise", "} def prepare_dataset(dataset_name): os.makedirs(DATA_DIR, exist_ok=True) maybe_download_gpt2_tokenizer_data() DownloaderClass = DATA_DOWNLOADERS.get(dataset_name, None)", "self.extract() self.tokenize() class Enron(DataDownloader): name = \"enron\" filetype = \"jsonl.zst\"", "from glob import glob import shutil import random import zstandard", "os.system(f\"wget {self.url} -O {os.path.join(self.base_dir, os.path.basename(self.url))}\") def tokenize(self): parent_folder = os.path.join(self.base_dir,", "-O {GPT2_VOCAB_FP}') if not os.path.isfile(GPT2_MERGE_FP): os.system(f'wget {GPT2_MERGE_URL} -O {GPT2_MERGE_FP}') DATA_DOWNLOADERS", "with open(output_path, 'wb') as destination: decomp.copy_stream(compressed, destination) if remove_zstd: os.remove(zstd_file_path)", "import zstandard \"\"\" This registry is for automatically downloading and", "glob import glob import shutil import random import zstandard \"\"\"", "automatically download / extract datasets\"\"\" @property def base_dir(self): \"\"\"base data", "for the selected dataset. \"\"\" DATA_DIR = os.environ.get('DATA_DIR', './data') GPT2_VOCAB_FP", "\"\") assert jsonl_filepath.endswith(\".jsonl\") os.system(f\"python tools/preprocess_data.py \\ --input {jsonl_filepath} \\ --output-prefix", "into the correct directory. When done, add it to the", "{parent_folder}/{self.name} \\ --vocab {GPT2_VOCAB_FP} \\ --dataset-impl mmap \\ --tokenizer-type GPT2BPETokenizer", "download(self): \"\"\"downloads dataset\"\"\" os.makedirs(self.base_dir, exist_ok=True) os.system(f\"wget {self.url} -O {os.path.join(self.base_dir, os.path.basename(self.url))}\")", "= \"jsonl.zst\" url = \"http://eaidata.bmk.sh/data/enron_emails.jsonl.zst\" seed = 1 def exists(self):", "This registry is for automatically downloading and extracting datasets. To", "def filetype(self): \"\"\"filetype of dataset\"\"\" pass @property @abstractmethod def url(self):", "os.makedirs(self.path, exist_ok=True) zstd_file_path = os.path.join(self.base_dir, os.path.basename(self.url)) with open(zstd_file_path, 'rb') as", "GPT2_VOCAB_URL = \"https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-vocab.json\" GPT2_MERGE_FP = f\"{DATA_DIR}/gpt2-merges.txt\" GPT2_MERGE_URL = \"https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-merges.txt\" class", "import ABC, abstractmethod from glob import glob import shutil import", "return output_path def extract(self): \"\"\"extracts dataset and moves to the", "None) if DownloaderClass is None: raise NotImplementedError else: d =", "exist_ok=True) maybe_download_gpt2_tokenizer_data() DownloaderClass = DATA_DOWNLOADERS.get(dataset_name, None) if DownloaderClass is None:", "os.path.isdir(f\"{self.base_dir}/{self.name}\") def download(self): \"\"\"downloads dataset\"\"\" os.makedirs(self.base_dir, exist_ok=True) os.system(f\"wget {self.url} -O", "downloading and extracting datasets. To register a class you need", "DataDownloader(ABC): \"\"\"Dataset registry class to automatically download / extract datasets\"\"\"", "DATA_DIR = os.environ.get('DATA_DIR', './data') GPT2_VOCAB_FP = f\"{DATA_DIR}/gpt2-vocab.json\" GPT2_VOCAB_URL = \"https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-vocab.json\"", "\"\"\"URL from which to download dataset\"\"\" pass def _extract_tar(self): self.path", "def extract(self): \"\"\"extracts dataset and moves to the correct data", "moves to the correct data dir if necessary\"\"\" self._extract_tar() def", "os.path.isfile(GPT2_VOCAB_FP): os.system(f'wget {GPT2_VOCAB_URL} -O {GPT2_VOCAB_FP}') if not os.path.isfile(GPT2_MERGE_FP): os.system(f'wget {GPT2_MERGE_URL}", "os.path.join(self.base_dir, self.name) jsonl_filepath = os.path.join(parent_folder, os.path.basename(self.url)).replace(\".zst\", \"\") assert jsonl_filepath.endswith(\".jsonl\") os.system(f\"python", "import glob import shutil import random import zstandard \"\"\" This", "random import zstandard \"\"\" This registry is for automatically downloading", "extract datasets\"\"\" @property def base_dir(self): \"\"\"base data directory\"\"\" return DATA_DIR", "decomp.copy_stream(compressed, destination) if remove_zstd: os.remove(zstd_file_path) return output_path def extract(self): \"\"\"extracts", "= zstandard.ZstdDecompressor() output_path = zstd_file_path.replace(\".zst\", \"\") with open(output_path, 'wb') as", "'wb') as destination: decomp.copy_stream(compressed, destination) if remove_zstd: os.remove(zstd_file_path) return output_path", "= DATA_DOWNLOADERS.get(dataset_name, None) if DownloaderClass is None: raise NotImplementedError else:", "DATA_DIR @property @abstractmethod def name(self): \"\"\"name of dataset\"\"\" pass @property", "self._extract_zstd(remove_zstd=remove_zstd) shutil.move(os.path.join(self.base_dir, os.path.basename(self.url).replace(\".zst\", \"\")), os.path.join(self.base_dir, self.name)) def maybe_download_gpt2_tokenizer_data(): if not", "as compressed: decomp = zstandard.ZstdDecompressor() output_path = zstd_file_path.replace(\".zst\", \"\") with", "automatically downloading and extracting datasets. To register a class you", "@property @abstractmethod def url(self): \"\"\"URL from which to download dataset\"\"\"", "\"r:gz\") as dataset_tar: print(f'Extracting files from {tarfile_path}...') dataset_tar.extractall(self.path) def _extract_zstd(self,", "filetype = \"jsonl.zst\" url = \"http://eaidata.bmk.sh/data/enron_emails.jsonl.zst\" seed = 1 def", "the DataDownloader class, provide name, filetype and url attributes, and", "class to automatically download / extract datasets\"\"\" @property def base_dir(self):", "dataset is present\"\"\" return os.path.isdir(f\"{self.base_dir}/{self.name}\") def download(self): \"\"\"downloads dataset\"\"\" os.makedirs(self.base_dir,", "\"\"\"Dataset registry class to automatically download / extract datasets\"\"\" @property", "you need to inherit the DataDownloader class, provide name, filetype", "import tarfile from abc import ABC, abstractmethod from glob import", "dir if necessary\"\"\" self._extract_tar() def exists(self): \"\"\"Checks if the dataset", "= os.path.join(self.base_dir, os.path.basename(self.url)) with open(zstd_file_path, 'rb') as compressed: decomp =", "compressed: decomp = zstandard.ZstdDecompressor() output_path = zstd_file_path.replace(\".zst\", \"\") with open(output_path,", "zstd_file_path = os.path.join(self.base_dir, os.path.basename(self.url)) with open(zstd_file_path, 'rb') as compressed: decomp", "os import tarfile from abc import ABC, abstractmethod from glob", "download / extract datasets\"\"\" @property def base_dir(self): \"\"\"base data directory\"\"\"", "{GPT2_MERGE_URL} -O {GPT2_MERGE_FP}') DATA_DOWNLOADERS = { \"enron\": Enron } def", "self.path = os.path.join(self.base_dir, self.name) return os.path.isfile(os.path.join(self.path, os.path.basename(self.url).replace(\".zst\", \"\"))) def extract(self,", "import random import zstandard \"\"\" This registry is for automatically", "os.makedirs(self.path, exist_ok=True) tarfile_path = os.path.join(self.base_dir, os.path.basename(self.url)) with tarfile.open(tarfile_path, \"r:gz\") as", "process_data runs the pre-processing for the selected dataset. \"\"\" DATA_DIR", "\"\") with open(output_path, 'wb') as destination: decomp.copy_stream(compressed, destination) if remove_zstd:", "= os.path.join(self.base_dir, self.name) jsonl_filepath = os.path.join(parent_folder, os.path.basename(self.url)).replace(\".zst\", \"\") assert jsonl_filepath.endswith(\".jsonl\")", "\"\"))) def extract(self, remove_zstd=True): self._extract_zstd(remove_zstd=remove_zstd) shutil.move(os.path.join(self.base_dir, os.path.basename(self.url).replace(\".zst\", \"\")), os.path.join(self.base_dir, self.name))", "/ extract / exists / tokenize functions to check if", "from abc import ABC, abstractmethod from glob import glob import", "as dataset_tar: print(f'Extracting files from {tarfile_path}...') dataset_tar.extractall(self.path) def _extract_zstd(self, remove_zstd=True):", "@property @abstractmethod def filetype(self): \"\"\"filetype of dataset\"\"\" pass @property @abstractmethod", "files from {tarfile_path}...') dataset_tar.extractall(self.path) def _extract_zstd(self, remove_zstd=True): self.path = os.path.join(self.base_dir,", "filetype and url attributes, and (optionally) provide download / extract", "zstandard.ZstdDecompressor() output_path = zstd_file_path.replace(\".zst\", \"\") with open(output_path, 'wb') as destination:", "seed = 1 def exists(self): self.path = os.path.join(self.base_dir, self.name) return", "\"\"\" This registry is for automatically downloading and extracting datasets.", "\\ --output-prefix {parent_folder}/{self.name} \\ --vocab {GPT2_VOCAB_FP} \\ --dataset-impl mmap \\", "GPT2_VOCAB_FP = f\"{DATA_DIR}/gpt2-vocab.json\" GPT2_VOCAB_URL = \"https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-vocab.json\" GPT2_MERGE_FP = f\"{DATA_DIR}/gpt2-merges.txt\" GPT2_MERGE_URL", "DataDownloader class, provide name, filetype and url attributes, and (optionally)", "\"\"\"extracts dataset and moves to the correct data dir if", "extract and tokenize the data into the correct directory. When", "self.tokenize() class Enron(DataDownloader): name = \"enron\" filetype = \"jsonl.zst\" url", "from which to download dataset\"\"\" pass def _extract_tar(self): self.path =", "= 1 def exists(self): self.path = os.path.join(self.base_dir, self.name) return os.path.isfile(os.path.join(self.path,", "= zstd_file_path.replace(\".zst\", \"\") with open(output_path, 'wb') as destination: decomp.copy_stream(compressed, destination)", "pass def _extract_tar(self): self.path = os.path.join(self.base_dir, self.name) os.makedirs(self.path, exist_ok=True) tarfile_path", "and tokenize the data into the correct directory. When done,", "class, provide name, filetype and url attributes, and (optionally) provide", "self.download() self.extract() self.tokenize() class Enron(DataDownloader): name = \"enron\" filetype =", "data dir if necessary\"\"\" self._extract_tar() def exists(self): \"\"\"Checks if the", "jsonl_filepath.endswith(\".jsonl\") os.system(f\"python tools/preprocess_data.py \\ --input {jsonl_filepath} \\ --output-prefix {parent_folder}/{self.name} \\", "abstractmethod from glob import glob import shutil import random import", "to the correct data dir if necessary\"\"\" self._extract_tar() def exists(self):", "to the DATA_DOWNLOADERS dict. The function process_data runs the pre-processing", "= \"enron\" filetype = \"jsonl.zst\" url = \"http://eaidata.bmk.sh/data/enron_emails.jsonl.zst\" seed =", "name(self): \"\"\"name of dataset\"\"\" pass @property @abstractmethod def filetype(self): \"\"\"filetype", "= os.path.join(self.base_dir, self.name) return os.path.isfile(os.path.join(self.path, os.path.basename(self.url).replace(\".zst\", \"\"))) def extract(self, remove_zstd=True):", "tokenize the data into the correct directory. When done, add", "dataset\"\"\" pass def _extract_tar(self): self.path = os.path.join(self.base_dir, self.name) os.makedirs(self.path, exist_ok=True)", "dataset\"\"\" pass @property @abstractmethod def filetype(self): \"\"\"filetype of dataset\"\"\" pass", "def url(self): \"\"\"URL from which to download dataset\"\"\" pass def", "os.system(f'wget {GPT2_VOCAB_URL} -O {GPT2_VOCAB_FP}') if not os.path.isfile(GPT2_MERGE_FP): os.system(f'wget {GPT2_MERGE_URL} -O", "/ exists / tokenize functions to check if the data", "self._extract_tar() def exists(self): \"\"\"Checks if the dataset is present\"\"\" return", "def exists(self): \"\"\"Checks if the dataset is present\"\"\" return os.path.isdir(f\"{self.base_dir}/{self.name}\")", "provide name, filetype and url attributes, and (optionally) provide download", "return DATA_DIR @property @abstractmethod def name(self): \"\"\"name of dataset\"\"\" pass", "_extract_zstd(self, remove_zstd=True): self.path = os.path.join(self.base_dir, self.name) os.makedirs(self.path, exist_ok=True) zstd_file_path =", "class DataDownloader(ABC): \"\"\"Dataset registry class to automatically download / extract", "to automatically download / extract datasets\"\"\" @property def base_dir(self): \"\"\"base", "dataset and moves to the correct data dir if necessary\"\"\"", "\"\"\"Checks if the dataset is present\"\"\" return os.path.isdir(f\"{self.base_dir}/{self.name}\") def download(self):", "DATA_DOWNLOADERS.get(dataset_name, None) if DownloaderClass is None: raise NotImplementedError else: d", "it to the DATA_DOWNLOADERS dict. The function process_data runs the", "url attributes, and (optionally) provide download / extract / exists", "{GPT2_VOCAB_FP} \\ --dataset-impl mmap \\ --tokenizer-type GPT2BPETokenizer \\ --merge-file {GPT2_MERGE_FP}", "exists(self): \"\"\"Checks if the dataset is present\"\"\" return os.path.isdir(f\"{self.base_dir}/{self.name}\") def", "if necessary\"\"\" self._extract_tar() def exists(self): \"\"\"Checks if the dataset is", "\"\"\"downloads dataset\"\"\" os.makedirs(self.base_dir, exist_ok=True) os.system(f\"wget {self.url} -O {os.path.join(self.base_dir, os.path.basename(self.url))}\") def", "not os.path.isfile(GPT2_VOCAB_FP): os.system(f'wget {GPT2_VOCAB_URL} -O {GPT2_VOCAB_FP}') if not os.path.isfile(GPT2_MERGE_FP): os.system(f'wget", "{GPT2_VOCAB_URL} -O {GPT2_VOCAB_FP}') if not os.path.isfile(GPT2_MERGE_FP): os.system(f'wget {GPT2_MERGE_URL} -O {GPT2_MERGE_FP}')", "output_path def extract(self): \"\"\"extracts dataset and moves to the correct", "-O {os.path.join(self.base_dir, os.path.basename(self.url))}\") def tokenize(self): parent_folder = os.path.join(self.base_dir, self.name) jsonl_filepath", "\\ --tokenizer-type GPT2BPETokenizer \\ --merge-file {GPT2_MERGE_FP} \\ --append-eod\") def prepare(self):", "self.name) os.makedirs(self.path, exist_ok=True) zstd_file_path = os.path.join(self.base_dir, os.path.basename(self.url)) with open(zstd_file_path, 'rb')", "doesn't, download, extract and tokenize the data into the correct", "of dataset\"\"\" pass @property @abstractmethod def filetype(self): \"\"\"filetype of dataset\"\"\"", "pre-processing for the selected dataset. \"\"\" DATA_DIR = os.environ.get('DATA_DIR', './data')", "add it to the DATA_DOWNLOADERS dict. The function process_data runs", "DATA_DOWNLOADERS dict. The function process_data runs the pre-processing for the", "of dataset\"\"\" pass @property @abstractmethod def url(self): \"\"\"URL from which", "os.path.join(self.base_dir, self.name) os.makedirs(self.path, exist_ok=True) tarfile_path = os.path.join(self.base_dir, os.path.basename(self.url)) with tarfile.open(tarfile_path,", "with open(zstd_file_path, 'rb') as compressed: decomp = zstandard.ZstdDecompressor() output_path =", "\\ --dataset-impl mmap \\ --tokenizer-type GPT2BPETokenizer \\ --merge-file {GPT2_MERGE_FP} \\", "exist_ok=True) tarfile_path = os.path.join(self.base_dir, os.path.basename(self.url)) with tarfile.open(tarfile_path, \"r:gz\") as dataset_tar:", "and, if it doesn't, download, extract and tokenize the data", "--dataset-impl mmap \\ --tokenizer-type GPT2BPETokenizer \\ --merge-file {GPT2_MERGE_FP} \\ --append-eod\")", "tokenize(self): parent_folder = os.path.join(self.base_dir, self.name) jsonl_filepath = os.path.join(parent_folder, os.path.basename(self.url)).replace(\".zst\", \"\")", "/ tokenize functions to check if the data exists, and,", "self.name)) def maybe_download_gpt2_tokenizer_data(): if not os.path.isfile(GPT2_VOCAB_FP): os.system(f'wget {GPT2_VOCAB_URL} -O {GPT2_VOCAB_FP}')", "the dataset is present\"\"\" return os.path.isdir(f\"{self.base_dir}/{self.name}\") def download(self): \"\"\"downloads dataset\"\"\"", "= os.path.join(parent_folder, os.path.basename(self.url)).replace(\".zst\", \"\") assert jsonl_filepath.endswith(\".jsonl\") os.system(f\"python tools/preprocess_data.py \\ --input", "correct directory. When done, add it to the DATA_DOWNLOADERS dict.", "it doesn't, download, extract and tokenize the data into the", "\"jsonl.zst\" url = \"http://eaidata.bmk.sh/data/enron_emails.jsonl.zst\" seed = 1 def exists(self): self.path", "if the dataset is present\"\"\" return os.path.isdir(f\"{self.base_dir}/{self.name}\") def download(self): \"\"\"downloads", "dataset_tar.extractall(self.path) def _extract_zstd(self, remove_zstd=True): self.path = os.path.join(self.base_dir, self.name) os.makedirs(self.path, exist_ok=True)", "= os.path.join(self.base_dir, os.path.basename(self.url)) with tarfile.open(tarfile_path, \"r:gz\") as dataset_tar: print(f'Extracting files", "\\ --merge-file {GPT2_MERGE_FP} \\ --append-eod\") def prepare(self): if not self.exists():", "{jsonl_filepath} \\ --output-prefix {parent_folder}/{self.name} \\ --vocab {GPT2_VOCAB_FP} \\ --dataset-impl mmap", "= \"https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-vocab.json\" GPT2_MERGE_FP = f\"{DATA_DIR}/gpt2-merges.txt\" GPT2_MERGE_URL = \"https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-merges.txt\" class DataDownloader(ABC):", "\"\"\" DATA_DIR = os.environ.get('DATA_DIR', './data') GPT2_VOCAB_FP = f\"{DATA_DIR}/gpt2-vocab.json\" GPT2_VOCAB_URL =", "\"enron\": Enron } def prepare_dataset(dataset_name): os.makedirs(DATA_DIR, exist_ok=True) maybe_download_gpt2_tokenizer_data() DownloaderClass =", "data directory\"\"\" return DATA_DIR @property @abstractmethod def name(self): \"\"\"name of", "\"\"\"filetype of dataset\"\"\" pass @property @abstractmethod def url(self): \"\"\"URL from", "extract / exists / tokenize functions to check if the", "/ extract datasets\"\"\" @property def base_dir(self): \"\"\"base data directory\"\"\" return", "selected dataset. \"\"\" DATA_DIR = os.environ.get('DATA_DIR', './data') GPT2_VOCAB_FP = f\"{DATA_DIR}/gpt2-vocab.json\"", "'./data') GPT2_VOCAB_FP = f\"{DATA_DIR}/gpt2-vocab.json\" GPT2_VOCAB_URL = \"https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-vocab.json\" GPT2_MERGE_FP = f\"{DATA_DIR}/gpt2-merges.txt\"", "'rb') as compressed: decomp = zstandard.ZstdDecompressor() output_path = zstd_file_path.replace(\".zst\", \"\")", "remove_zstd: os.remove(zstd_file_path) return output_path def extract(self): \"\"\"extracts dataset and moves", "prepare(self): if not self.exists(): self.download() self.extract() self.tokenize() class Enron(DataDownloader): name", "= \"https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-merges.txt\" class DataDownloader(ABC): \"\"\"Dataset registry class to automatically download", "zstandard \"\"\" This registry is for automatically downloading and extracting", "to inherit the DataDownloader class, provide name, filetype and url", "filetype(self): \"\"\"filetype of dataset\"\"\" pass @property @abstractmethod def url(self): \"\"\"URL", "import os import tarfile from abc import ABC, abstractmethod from", "tarfile.open(tarfile_path, \"r:gz\") as dataset_tar: print(f'Extracting files from {tarfile_path}...') dataset_tar.extractall(self.path) def", "--tokenizer-type GPT2BPETokenizer \\ --merge-file {GPT2_MERGE_FP} \\ --append-eod\") def prepare(self): if", "def maybe_download_gpt2_tokenizer_data(): if not os.path.isfile(GPT2_VOCAB_FP): os.system(f'wget {GPT2_VOCAB_URL} -O {GPT2_VOCAB_FP}') if", "{ \"enron\": Enron } def prepare_dataset(dataset_name): os.makedirs(DATA_DIR, exist_ok=True) maybe_download_gpt2_tokenizer_data() DownloaderClass", "for automatically downloading and extracting datasets. To register a class", "os.system(f\"python tools/preprocess_data.py \\ --input {jsonl_filepath} \\ --output-prefix {parent_folder}/{self.name} \\ --vocab", "os.makedirs(self.base_dir, exist_ok=True) os.system(f\"wget {self.url} -O {os.path.join(self.base_dir, os.path.basename(self.url))}\") def tokenize(self): parent_folder", "need to inherit the DataDownloader class, provide name, filetype and", "_extract_tar(self): self.path = os.path.join(self.base_dir, self.name) os.makedirs(self.path, exist_ok=True) tarfile_path = os.path.join(self.base_dir,", "@abstractmethod def url(self): \"\"\"URL from which to download dataset\"\"\" pass", "if not os.path.isfile(GPT2_MERGE_FP): os.system(f'wget {GPT2_MERGE_URL} -O {GPT2_MERGE_FP}') DATA_DOWNLOADERS = {", "os.system(f'wget {GPT2_MERGE_URL} -O {GPT2_MERGE_FP}') DATA_DOWNLOADERS = { \"enron\": Enron }", "os.path.basename(self.url)) with tarfile.open(tarfile_path, \"r:gz\") as dataset_tar: print(f'Extracting files from {tarfile_path}...')", "function process_data runs the pre-processing for the selected dataset. \"\"\"", "@abstractmethod def filetype(self): \"\"\"filetype of dataset\"\"\" pass @property @abstractmethod def", "GPT2_MERGE_URL = \"https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-merges.txt\" class DataDownloader(ABC): \"\"\"Dataset registry class to automatically", "os.path.basename(self.url))}\") def tokenize(self): parent_folder = os.path.join(self.base_dir, self.name) jsonl_filepath = os.path.join(parent_folder,", "extracting datasets. To register a class you need to inherit", "and extracting datasets. To register a class you need to", "\"https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-vocab.json\" GPT2_MERGE_FP = f\"{DATA_DIR}/gpt2-merges.txt\" GPT2_MERGE_URL = \"https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-merges.txt\" class DataDownloader(ABC): \"\"\"Dataset", "--input {jsonl_filepath} \\ --output-prefix {parent_folder}/{self.name} \\ --vocab {GPT2_VOCAB_FP} \\ --dataset-impl", "class you need to inherit the DataDownloader class, provide name,", "functions to check if the data exists, and, if it", "def prepare_dataset(dataset_name): os.makedirs(DATA_DIR, exist_ok=True) maybe_download_gpt2_tokenizer_data() DownloaderClass = DATA_DOWNLOADERS.get(dataset_name, None) if", "present\"\"\" return os.path.isdir(f\"{self.base_dir}/{self.name}\") def download(self): \"\"\"downloads dataset\"\"\" os.makedirs(self.base_dir, exist_ok=True) os.system(f\"wget", "To register a class you need to inherit the DataDownloader", "dataset_tar: print(f'Extracting files from {tarfile_path}...') dataset_tar.extractall(self.path) def _extract_zstd(self, remove_zstd=True): self.path", "directory\"\"\" return DATA_DIR @property @abstractmethod def name(self): \"\"\"name of dataset\"\"\"", "parent_folder = os.path.join(self.base_dir, self.name) jsonl_filepath = os.path.join(parent_folder, os.path.basename(self.url)).replace(\".zst\", \"\") assert", "--output-prefix {parent_folder}/{self.name} \\ --vocab {GPT2_VOCAB_FP} \\ --dataset-impl mmap \\ --tokenizer-type", "tarfile from abc import ABC, abstractmethod from glob import glob", "and (optionally) provide download / extract / exists / tokenize", "to download dataset\"\"\" pass def _extract_tar(self): self.path = os.path.join(self.base_dir, self.name)", "1 def exists(self): self.path = os.path.join(self.base_dir, self.name) return os.path.isfile(os.path.join(self.path, os.path.basename(self.url).replace(\".zst\",", "= f\"{DATA_DIR}/gpt2-vocab.json\" GPT2_VOCAB_URL = \"https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-vocab.json\" GPT2_MERGE_FP = f\"{DATA_DIR}/gpt2-merges.txt\" GPT2_MERGE_URL =", "check if the data exists, and, if it doesn't, download,", "destination: decomp.copy_stream(compressed, destination) if remove_zstd: os.remove(zstd_file_path) return output_path def extract(self):", "def _extract_zstd(self, remove_zstd=True): self.path = os.path.join(self.base_dir, self.name) os.makedirs(self.path, exist_ok=True) zstd_file_path", "DATA_DOWNLOADERS = { \"enron\": Enron } def prepare_dataset(dataset_name): os.makedirs(DATA_DIR, exist_ok=True)", "open(zstd_file_path, 'rb') as compressed: decomp = zstandard.ZstdDecompressor() output_path = zstd_file_path.replace(\".zst\",", "abc import ABC, abstractmethod from glob import glob import shutil", "os.path.join(parent_folder, os.path.basename(self.url)).replace(\".zst\", \"\") assert jsonl_filepath.endswith(\".jsonl\") os.system(f\"python tools/preprocess_data.py \\ --input {jsonl_filepath}", "exist_ok=True) zstd_file_path = os.path.join(self.base_dir, os.path.basename(self.url)) with open(zstd_file_path, 'rb') as compressed:", "directory. When done, add it to the DATA_DOWNLOADERS dict. The", "The function process_data runs the pre-processing for the selected dataset.", "def _extract_tar(self): self.path = os.path.join(self.base_dir, self.name) os.makedirs(self.path, exist_ok=True) tarfile_path =", "pass @property @abstractmethod def filetype(self): \"\"\"filetype of dataset\"\"\" pass @property", "\"https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-merges.txt\" class DataDownloader(ABC): \"\"\"Dataset registry class to automatically download /", "= f\"{DATA_DIR}/gpt2-merges.txt\" GPT2_MERGE_URL = \"https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-merges.txt\" class DataDownloader(ABC): \"\"\"Dataset registry class", "dataset\"\"\" pass @property @abstractmethod def url(self): \"\"\"URL from which to", "decomp = zstandard.ZstdDecompressor() output_path = zstd_file_path.replace(\".zst\", \"\") with open(output_path, 'wb')", "os.path.isfile(GPT2_MERGE_FP): os.system(f'wget {GPT2_MERGE_URL} -O {GPT2_MERGE_FP}') DATA_DOWNLOADERS = { \"enron\": Enron", "def extract(self, remove_zstd=True): self._extract_zstd(remove_zstd=remove_zstd) shutil.move(os.path.join(self.base_dir, os.path.basename(self.url).replace(\".zst\", \"\")), os.path.join(self.base_dir, self.name)) def", "@property @abstractmethod def name(self): \"\"\"name of dataset\"\"\" pass @property @abstractmethod", "os.path.join(self.base_dir, self.name) os.makedirs(self.path, exist_ok=True) zstd_file_path = os.path.join(self.base_dir, os.path.basename(self.url)) with open(zstd_file_path,", "os.path.isfile(os.path.join(self.path, os.path.basename(self.url).replace(\".zst\", \"\"))) def extract(self, remove_zstd=True): self._extract_zstd(remove_zstd=remove_zstd) shutil.move(os.path.join(self.base_dir, os.path.basename(self.url).replace(\".zst\", \"\")),", "exists / tokenize functions to check if the data exists,", "runs the pre-processing for the selected dataset. \"\"\" DATA_DIR =", "os.environ.get('DATA_DIR', './data') GPT2_VOCAB_FP = f\"{DATA_DIR}/gpt2-vocab.json\" GPT2_VOCAB_URL = \"https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-vocab.json\" GPT2_MERGE_FP =", "--append-eod\") def prepare(self): if not self.exists(): self.download() self.extract() self.tokenize() class", "remove_zstd=True): self.path = os.path.join(self.base_dir, self.name) os.makedirs(self.path, exist_ok=True) zstd_file_path = os.path.join(self.base_dir,", "name, filetype and url attributes, and (optionally) provide download /", "which to download dataset\"\"\" pass def _extract_tar(self): self.path = os.path.join(self.base_dir,", "and moves to the correct data dir if necessary\"\"\" self._extract_tar()", "done, add it to the DATA_DOWNLOADERS dict. The function process_data", "zstd_file_path.replace(\".zst\", \"\") with open(output_path, 'wb') as destination: decomp.copy_stream(compressed, destination) if", "class Enron(DataDownloader): name = \"enron\" filetype = \"jsonl.zst\" url =", "provide download / extract / exists / tokenize functions to", "shutil.move(os.path.join(self.base_dir, os.path.basename(self.url).replace(\".zst\", \"\")), os.path.join(self.base_dir, self.name)) def maybe_download_gpt2_tokenizer_data(): if not os.path.isfile(GPT2_VOCAB_FP):", "os.remove(zstd_file_path) return output_path def extract(self): \"\"\"extracts dataset and moves to", "@abstractmethod def name(self): \"\"\"name of dataset\"\"\" pass @property @abstractmethod def", "def tokenize(self): parent_folder = os.path.join(self.base_dir, self.name) jsonl_filepath = os.path.join(parent_folder, os.path.basename(self.url)).replace(\".zst\",", "@property def base_dir(self): \"\"\"base data directory\"\"\" return DATA_DIR @property @abstractmethod", "datasets. To register a class you need to inherit the", "register a class you need to inherit the DataDownloader class,", "dataset\"\"\" os.makedirs(self.base_dir, exist_ok=True) os.system(f\"wget {self.url} -O {os.path.join(self.base_dir, os.path.basename(self.url))}\") def tokenize(self):", "os.path.basename(self.url).replace(\".zst\", \"\"))) def extract(self, remove_zstd=True): self._extract_zstd(remove_zstd=remove_zstd) shutil.move(os.path.join(self.base_dir, os.path.basename(self.url).replace(\".zst\", \"\")), os.path.join(self.base_dir,", "dict. The function process_data runs the pre-processing for the selected", "{tarfile_path}...') dataset_tar.extractall(self.path) def _extract_zstd(self, remove_zstd=True): self.path = os.path.join(self.base_dir, self.name) os.makedirs(self.path,", "the DATA_DOWNLOADERS dict. The function process_data runs the pre-processing for", "--merge-file {GPT2_MERGE_FP} \\ --append-eod\") def prepare(self): if not self.exists(): self.download()", "def prepare(self): if not self.exists(): self.download() self.extract() self.tokenize() class Enron(DataDownloader):", "the data exists, and, if it doesn't, download, extract and", "tarfile_path = os.path.join(self.base_dir, os.path.basename(self.url)) with tarfile.open(tarfile_path, \"r:gz\") as dataset_tar: print(f'Extracting", "if not os.path.isfile(GPT2_VOCAB_FP): os.system(f'wget {GPT2_VOCAB_URL} -O {GPT2_VOCAB_FP}') if not os.path.isfile(GPT2_MERGE_FP):", "{os.path.join(self.base_dir, os.path.basename(self.url))}\") def tokenize(self): parent_folder = os.path.join(self.base_dir, self.name) jsonl_filepath =", "= \"http://eaidata.bmk.sh/data/enron_emails.jsonl.zst\" seed = 1 def exists(self): self.path = os.path.join(self.base_dir,", "assert jsonl_filepath.endswith(\".jsonl\") os.system(f\"python tools/preprocess_data.py \\ --input {jsonl_filepath} \\ --output-prefix {parent_folder}/{self.name}", "extract(self): \"\"\"extracts dataset and moves to the correct data dir", "download dataset\"\"\" pass def _extract_tar(self): self.path = os.path.join(self.base_dir, self.name) os.makedirs(self.path,", "self.path = os.path.join(self.base_dir, self.name) os.makedirs(self.path, exist_ok=True) zstd_file_path = os.path.join(self.base_dir, os.path.basename(self.url))", "if not self.exists(): self.download() self.extract() self.tokenize() class Enron(DataDownloader): name =", "(optionally) provide download / extract / exists / tokenize functions", "inherit the DataDownloader class, provide name, filetype and url attributes,", "attributes, and (optionally) provide download / extract / exists /", "Enron } def prepare_dataset(dataset_name): os.makedirs(DATA_DIR, exist_ok=True) maybe_download_gpt2_tokenizer_data() DownloaderClass = DATA_DOWNLOADERS.get(dataset_name,", "self.name) return os.path.isfile(os.path.join(self.path, os.path.basename(self.url).replace(\".zst\", \"\"))) def extract(self, remove_zstd=True): self._extract_zstd(remove_zstd=remove_zstd) shutil.move(os.path.join(self.base_dir,", "data into the correct directory. When done, add it to", "glob import shutil import random import zstandard \"\"\" This registry", "os.makedirs(DATA_DIR, exist_ok=True) maybe_download_gpt2_tokenizer_data() DownloaderClass = DATA_DOWNLOADERS.get(dataset_name, None) if DownloaderClass is", "f\"{DATA_DIR}/gpt2-merges.txt\" GPT2_MERGE_URL = \"https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-merges.txt\" class DataDownloader(ABC): \"\"\"Dataset registry class to", "self.path = os.path.join(self.base_dir, self.name) os.makedirs(self.path, exist_ok=True) tarfile_path = os.path.join(self.base_dir, os.path.basename(self.url))", "mmap \\ --tokenizer-type GPT2BPETokenizer \\ --merge-file {GPT2_MERGE_FP} \\ --append-eod\") def", "When done, add it to the DATA_DOWNLOADERS dict. The function", "self.name) jsonl_filepath = os.path.join(parent_folder, os.path.basename(self.url)).replace(\".zst\", \"\") assert jsonl_filepath.endswith(\".jsonl\") os.system(f\"python tools/preprocess_data.py", "remove_zstd=True): self._extract_zstd(remove_zstd=remove_zstd) shutil.move(os.path.join(self.base_dir, os.path.basename(self.url).replace(\".zst\", \"\")), os.path.join(self.base_dir, self.name)) def maybe_download_gpt2_tokenizer_data(): if", "import shutil import random import zstandard \"\"\" This registry is", "a class you need to inherit the DataDownloader class, provide" ]
[ "self.data.get((s, a), self.init_value) def __set(self, s: int, a: int, value:", "q: float, *q_old: float) -> float: \"\"\" Q値の更新 Parameters ----------", "= alpha self.gamma = gamma self.data = data self.init_value =", "= init_value def get(self, s: int, a: int) -> float:", ": float 学習率α gamma : float 割引率γ data : dict", "dict = {}, init_value: float = 0) -> None: self.alpha", "a) else: q_old = q_old[0] #print('alpha:{}, q_old:{}, r:{}, gamma:{}, q:{}'.format(self.alpha,", "行動 value : float 代入するQ値, Q(s, a) \"\"\" self.data[(s, a)]", ": float Q(s, a) Returns ------ q_new : float updateされたQ値", "\"\"\" Q値の更新 Parameters ---------- s : int 状態 a :", "\"\"\" def __init__(self, alpha: float, gamma: float, data: dict =", "a: int, r: float, q: float, *q_old: float) -> float:", "self.data = data self.init_value = init_value def get(self, s: int,", "\"\"\" dataへの値の代入 Parameters ---------- s : int 状態 a :", "float 報酬 q : float Q(s_t+1, a) q_old : float", "int) -> float: \"\"\" dataから値の取得 Parameters ---------- s : int", "Q値の更新 Parameters ---------- s : int 状態 a : int", "Returns ------- value : float Q値, Q(s, a) \"\"\" return", "class QLearning: \"\"\" Q-Learning用のクラス Attributes ---------- alpha : float 学習率α", "\"\"\" Q-Learning用のクラス Attributes ---------- alpha : float 学習率α gamma :", "float, data: dict = {}, init_value: float = 0) ->", "-> None: self.alpha = alpha self.gamma = gamma self.data =", "self.gamma, q)) q_new = (1-self.alpha)*q_old+self.alpha*(r + self.gamma*q) self.__set(s, a, q_new)", "*q_old: float) -> float: \"\"\" Q値の更新 Parameters ---------- s :", "r, self.gamma, q)) q_new = (1-self.alpha)*q_old+self.alpha*(r + self.gamma*q) self.__set(s, a,", "Q値, Q(s, a) \"\"\" return self.data.get((s, a), self.init_value) def __set(self,", "r: float, q: float, *q_old: float) -> float: \"\"\" Q値の更新", "int, a: int, value: float) -> None: \"\"\" dataへの値の代入 Parameters", "\"\"\" if len(q_old) == 0: q_old = self.get(s, a) else:", "Q(s, a) Returns ------ q_new : float updateされたQ値 \"\"\" if", "return self.data.get((s, a), self.init_value) def __set(self, s: int, a: int,", "value def update(self, s: int, a: int, r: float, q:", "状態 a : int 行動 r : float 報酬 q", "data self.init_value = init_value def get(self, s: int, a: int)", "self.get(s, a) else: q_old = q_old[0] #print('alpha:{}, q_old:{}, r:{}, gamma:{},", "= self.get(s, a) else: q_old = q_old[0] #print('alpha:{}, q_old:{}, r:{},", "__init__(self, alpha: float, gamma: float, data: dict = {}, init_value:", "float Q(s, a) Returns ------ q_new : float updateされたQ値 \"\"\"", "logger = getLogger(__name__) class QLearning: \"\"\" Q-Learning用のクラス Attributes ---------- alpha", "int, a: int, r: float, q: float, *q_old: float) ->", "学習率α gamma : float 割引率γ data : dict Q-Learningでの学習結果の保存用辞書 init_value", "float 学習率α gamma : float 割引率γ data : dict Q-Learningでの学習結果の保存用辞書", "logging import getLogger logger = getLogger(__name__) class QLearning: \"\"\" Q-Learning用のクラス", "getLogger(__name__) class QLearning: \"\"\" Q-Learning用のクラス Attributes ---------- alpha : float", ": float 割引率γ data : dict Q-Learningでの学習結果の保存用辞書 init_value : float", "== 0: q_old = self.get(s, a) else: q_old = q_old[0]", "int 行動 Returns ------- value : float Q値, Q(s, a)", "def get(self, s: int, a: int) -> float: \"\"\" dataから値の取得", "r : float 報酬 q : float Q(s_t+1, a) q_old", "int, value: float) -> None: \"\"\" dataへの値の代入 Parameters ---------- s", "float, gamma: float, data: dict = {}, init_value: float =", "dataから値の取得 Parameters ---------- s : int 状態 a : int", "int, a: int) -> float: \"\"\" dataから値の取得 Parameters ---------- s", "Q(s, a) \"\"\" return self.data.get((s, a), self.init_value) def __set(self, s:", "Q(s_t+1, a) q_old : float Q(s, a) Returns ------ q_new", "= value def update(self, s: int, a: int, r: float,", "a: int, value: float) -> None: \"\"\" dataへの値の代入 Parameters ----------", ": int 状態 a : int 行動 r : float", "gamma : float 割引率γ data : dict Q-Learningでの学習結果の保存用辞書 init_value :", "float: \"\"\" dataから値の取得 Parameters ---------- s : int 状態 a", "行動 Returns ------- value : float Q値, Q(s, a) \"\"\"", "s: int, a: int, r: float, q: float, *q_old: float)", "r:{}, gamma:{}, q:{}'.format(self.alpha, q_old, r, self.gamma, q)) q_new = (1-self.alpha)*q_old+self.alpha*(r", "dataの初期値 \"\"\" def __init__(self, alpha: float, gamma: float, data: dict", "self.gamma = gamma self.data = data self.init_value = init_value def", "int 状態 a : int 行動 Returns ------- value :", "float: \"\"\" Q値の更新 Parameters ---------- s : int 状態 a", "alpha: float, gamma: float, data: dict = {}, init_value: float", "#print('alpha:{}, q_old:{}, r:{}, gamma:{}, q:{}'.format(self.alpha, q_old, r, self.gamma, q)) q_new", "q:{}'.format(self.alpha, q_old, r, self.gamma, q)) q_new = (1-self.alpha)*q_old+self.alpha*(r + self.gamma*q)", "割引率γ data : dict Q-Learningでの学習結果の保存用辞書 init_value : float dataの初期値 \"\"\"", "Attributes ---------- alpha : float 学習率α gamma : float 割引率γ", "a), self.init_value) def __set(self, s: int, a: int, value: float)", "a)] = value def update(self, s: int, a: int, r:", "-> float: \"\"\" Q値の更新 Parameters ---------- s : int 状態", "\"\"\" dataから値の取得 Parameters ---------- s : int 状態 a :", "---------- s : int 状態 a : int 行動 value", "Q(s, a) \"\"\" self.data[(s, a)] = value def update(self, s:", "value: float) -> None: \"\"\" dataへの値の代入 Parameters ---------- s :", ": dict Q-Learningでの学習結果の保存用辞書 init_value : float dataの初期値 \"\"\" def __init__(self,", "q_old = q_old[0] #print('alpha:{}, q_old:{}, r:{}, gamma:{}, q:{}'.format(self.alpha, q_old, r,", "float Q値, Q(s, a) \"\"\" return self.data.get((s, a), self.init_value) def", "a : int 行動 value : float 代入するQ値, Q(s, a)", "None: \"\"\" dataへの値の代入 Parameters ---------- s : int 状態 a", ": float 報酬 q : float Q(s_t+1, a) q_old :", "-> float: \"\"\" dataから値の取得 Parameters ---------- s : int 状態", "状態 a : int 行動 Returns ------- value : float", "alpha : float 学習率α gamma : float 割引率γ data :", "報酬 q : float Q(s_t+1, a) q_old : float Q(s,", "value : float Q値, Q(s, a) \"\"\" return self.data.get((s, a),", "-> None: \"\"\" dataへの値の代入 Parameters ---------- s : int 状態", ": int 行動 value : float 代入するQ値, Q(s, a) \"\"\"", "alpha self.gamma = gamma self.data = data self.init_value = init_value", "float, q: float, *q_old: float) -> float: \"\"\" Q値の更新 Parameters", "else: q_old = q_old[0] #print('alpha:{}, q_old:{}, r:{}, gamma:{}, q:{}'.format(self.alpha, q_old,", "a) \"\"\" return self.data.get((s, a), self.init_value) def __set(self, s: int,", ": float updateされたQ値 \"\"\" if len(q_old) == 0: q_old =", "---------- alpha : float 学習率α gamma : float 割引率γ data", "float, *q_old: float) -> float: \"\"\" Q値の更新 Parameters ---------- s", "q_new : float updateされたQ値 \"\"\" if len(q_old) == 0: q_old", "s : int 状態 a : int 行動 r :", "int 行動 value : float 代入するQ値, Q(s, a) \"\"\" self.data[(s,", "updateされたQ値 \"\"\" if len(q_old) == 0: q_old = self.get(s, a)", ": int 状態 a : int 行動 Returns ------- value", "init_value def get(self, s: int, a: int) -> float: \"\"\"", "= 0) -> None: self.alpha = alpha self.gamma = gamma", "init_value: float = 0) -> None: self.alpha = alpha self.gamma", "= {}, init_value: float = 0) -> None: self.alpha =", "float 代入するQ値, Q(s, a) \"\"\" self.data[(s, a)] = value def", "data : dict Q-Learningでの学習結果の保存用辞書 init_value : float dataの初期値 \"\"\" def", "s : int 状態 a : int 行動 value :", "s: int, a: int, value: float) -> None: \"\"\" dataへの値の代入", ": float Q(s_t+1, a) q_old : float Q(s, a) Returns", "= data self.init_value = init_value def get(self, s: int, a:", "float 割引率γ data : dict Q-Learningでの学習結果の保存用辞書 init_value : float dataの初期値", "self.init_value = init_value def get(self, s: int, a: int) ->", "int, r: float, q: float, *q_old: float) -> float: \"\"\"", "float = 0) -> None: self.alpha = alpha self.gamma =", "---------- s : int 状態 a : int 行動 r", "a: int) -> float: \"\"\" dataから値の取得 Parameters ---------- s :", "from logging import getLogger logger = getLogger(__name__) class QLearning: \"\"\"", "update(self, s: int, a: int, r: float, q: float, *q_old:", "float dataの初期値 \"\"\" def __init__(self, alpha: float, gamma: float, data:", "{}, init_value: float = 0) -> None: self.alpha = alpha", ": int 行動 r : float 報酬 q : float", "------ q_new : float updateされたQ値 \"\"\" if len(q_old) == 0:", "value : float 代入するQ値, Q(s, a) \"\"\" self.data[(s, a)] =", "float) -> float: \"\"\" Q値の更新 Parameters ---------- s : int", "gamma: float, data: dict = {}, init_value: float = 0)", "__set(self, s: int, a: int, value: float) -> None: \"\"\"", "get(self, s: int, a: int) -> float: \"\"\" dataから値の取得 Parameters", "q_old : float Q(s, a) Returns ------ q_new : float", "= gamma self.data = data self.init_value = init_value def get(self,", "self.alpha = alpha self.gamma = gamma self.data = data self.init_value", "int 状態 a : int 行動 value : float 代入するQ値,", ": int 状態 a : int 行動 value : float", "self.init_value) def __set(self, s: int, a: int, value: float) ->", "q_old:{}, r:{}, gamma:{}, q:{}'.format(self.alpha, q_old, r, self.gamma, q)) q_new =", "q : float Q(s_t+1, a) q_old : float Q(s, a)", "Returns ------ q_new : float updateされたQ値 \"\"\" if len(q_old) ==", "a) Returns ------ q_new : float updateされたQ値 \"\"\" if len(q_old)", "q_old, r, self.gamma, q)) q_new = (1-self.alpha)*q_old+self.alpha*(r + self.gamma*q) self.__set(s,", "Q-Learningでの学習結果の保存用辞書 init_value : float dataの初期値 \"\"\" def __init__(self, alpha: float,", ": float Q値, Q(s, a) \"\"\" return self.data.get((s, a), self.init_value)", "data: dict = {}, init_value: float = 0) -> None:", "s: int, a: int) -> float: \"\"\" dataから値の取得 Parameters ----------", "None: self.alpha = alpha self.gamma = gamma self.data = data", "def __init__(self, alpha: float, gamma: float, data: dict = {},", "def update(self, s: int, a: int, r: float, q: float,", "<filename>othello_rl/qlearning/qlearning.py from logging import getLogger logger = getLogger(__name__) class QLearning:", "len(q_old) == 0: q_old = self.get(s, a) else: q_old =", "q_new = (1-self.alpha)*q_old+self.alpha*(r + self.gamma*q) self.__set(s, a, q_new) return q_new", "dict Q-Learningでの学習結果の保存用辞書 init_value : float dataの初期値 \"\"\" def __init__(self, alpha:", "import getLogger logger = getLogger(__name__) class QLearning: \"\"\" Q-Learning用のクラス Attributes", "float Q(s_t+1, a) q_old : float Q(s, a) Returns ------", "Q-Learning用のクラス Attributes ---------- alpha : float 学習率α gamma : float", "a) q_old : float Q(s, a) Returns ------ q_new :", "gamma:{}, q:{}'.format(self.alpha, q_old, r, self.gamma, q)) q_new = (1-self.alpha)*q_old+self.alpha*(r +", "---------- s : int 状態 a : int 行動 Returns", "init_value : float dataの初期値 \"\"\" def __init__(self, alpha: float, gamma:", "float) -> None: \"\"\" dataへの値の代入 Parameters ---------- s : int", "Parameters ---------- s : int 状態 a : int 行動", "= getLogger(__name__) class QLearning: \"\"\" Q-Learning用のクラス Attributes ---------- alpha :", "QLearning: \"\"\" Q-Learning用のクラス Attributes ---------- alpha : float 学習率α gamma", "int 行動 r : float 報酬 q : float Q(s_t+1,", "s : int 状態 a : int 行動 Returns -------", "\"\"\" return self.data.get((s, a), self.init_value) def __set(self, s: int, a:", "float updateされたQ値 \"\"\" if len(q_old) == 0: q_old = self.get(s,", "def __set(self, s: int, a: int, value: float) -> None:", "getLogger logger = getLogger(__name__) class QLearning: \"\"\" Q-Learning用のクラス Attributes ----------", "self.data[(s, a)] = value def update(self, s: int, a: int,", "状態 a : int 行動 value : float 代入するQ値, Q(s,", "q_old = self.get(s, a) else: q_old = q_old[0] #print('alpha:{}, q_old:{},", "q)) q_new = (1-self.alpha)*q_old+self.alpha*(r + self.gamma*q) self.__set(s, a, q_new) return", ": float dataの初期値 \"\"\" def __init__(self, alpha: float, gamma: float,", ": float 代入するQ値, Q(s, a) \"\"\" self.data[(s, a)] = value", "行動 r : float 報酬 q : float Q(s_t+1, a)", "a : int 行動 Returns ------- value : float Q値,", ": int 行動 Returns ------- value : float Q値, Q(s,", "0: q_old = self.get(s, a) else: q_old = q_old[0] #print('alpha:{},", "a : int 行動 r : float 報酬 q :", "------- value : float Q値, Q(s, a) \"\"\" return self.data.get((s,", "if len(q_old) == 0: q_old = self.get(s, a) else: q_old", "0) -> None: self.alpha = alpha self.gamma = gamma self.data", "dataへの値の代入 Parameters ---------- s : int 状態 a : int", "q_old[0] #print('alpha:{}, q_old:{}, r:{}, gamma:{}, q:{}'.format(self.alpha, q_old, r, self.gamma, q))", "gamma self.data = data self.init_value = init_value def get(self, s:", "\"\"\" self.data[(s, a)] = value def update(self, s: int, a:", "int 状態 a : int 行動 r : float 報酬", "代入するQ値, Q(s, a) \"\"\" self.data[(s, a)] = value def update(self,", "= q_old[0] #print('alpha:{}, q_old:{}, r:{}, gamma:{}, q:{}'.format(self.alpha, q_old, r, self.gamma,", "a) \"\"\" self.data[(s, a)] = value def update(self, s: int," ]
[ "flexmock sys.path.append(os.path.join(os.path.dirname(__file__), \"../../\")) import solr_interface import search_exceptions class FakeSolrDoc(): def", "= flexmock() appscale_info.should_receive(\"get_search_location\").\\ and_return(\"somelocation\") solr = solr_interface.Solr() self.assertNotEqual(solr.to_solr_hash_map(FakeIndex(), FakeDocument()), {})", "flexmock() appscale_info.should_receive(\"get_search_location\").\\ and_return(\"somelocation\") solr = solr_interface.Solr() solr = flexmock(solr) solr.should_receive(\"to_solr_doc\").and_return(FakeSolrDoc())", "self.fields = [] class FakeDocument(): INDEX_NAME = \"indexname\" INDEX_LOCALE =", "{}) json.should_receive(\"load\").and_raise(ValueError) urllib2.should_receive(\"urlopen\").and_return(FakeConnection(True)) self.assertRaises(search_exceptions.InternalError, solr.commit_update, {}) dictionary = {'responseHeader':{'status': 1}}", "def __init__(self): self.name = \"name\" self.schema = FakeSchema() class FakeIndexSpec():", "None, FakeIndexSpec()) def test_json_loads_byteified(self): json_with_unicode = ( '{\"key2\": [{\"\\\\u2611\": 28,", "solr interface module. \"\"\" def test_get_index_adapter(self): appscale_info = flexmock() appscale_info.should_receive(\"get_search_location\").\\", "from SOLR. dictionary = {'responseHeader':{'status': 1}} json.should_receive(\"load\").and_return(dictionary) self.assertRaises(search_exceptions.InternalError, solr._get_index_adapter, \"app_id\",", "self.assertEquals(index.schema[0]['name'], \"index_ns_name_\") def test_update_schema(self): appscale_info = flexmock() appscale_info.should_receive(\"get_search_location\").\\ and_return(\"somelocation\") solr", "import os import json import sys import unittest import urllib2", "FakeIndexSpec(): def __init__(self): pass def namespace(self): return 'ns' def name(self):", "urllib2.should_receive(\"urlopen\").and_return(FakeConnection(True)) self.assertRaises(search_exceptions.InternalError, solr.commit_update, {}) dictionary = {'responseHeader':{'status': 1}} json.should_receive(\"load\").and_return(dictionary).once() self.assertRaises(search_exceptions.InternalError,", "solr._get_index_adapter, \"app_id\", \"ns\", \"name\") fields = [{'name':\"index_ns_name_\"}] dictionary = {'responseHeader':{'status':", "[{'name':\"index_ns_name_\"}] dictionary = {'responseHeader':{'status': 0}, \"fields\": fields} json.should_receive(\"load\").and_return(dictionary) index =", "[\"\\\\u263a\"]}, \"second\", \"third\"], ' '\"key1\": \"value\", ' '\"\\\\u2604\": {\"\\\\u2708\": \"\\\\u2708\"}}'", "__init__(self): self.fields = [] class FakeIndex(): def __init__(self): self.name =", "= flexmock(solr) flexmock(solr_interface) solr_interface.should_receive(\"get_index_name\").and_return(\"index_ns_name\") flexmock(urllib2) urllib2.should_receive(\"urlopen\").and_return(FakeConnection(False)) self.assertRaises(search_exceptions.InternalError, solr._get_index_adapter, \"app_id\", \"ns\",", "FakeIndexSpec()) solr.should_receive(\"to_solr_hash_map\").and_return(None).once() solr.update_document(\"app_id\", None, FakeIndexSpec()) def test_json_loads_byteified(self): json_with_unicode = (", "[{\"\\\\u2611\": 28, \"\\\\u2616\": [\"\\\\u263a\"]}, \"second\", \"third\"], ' '\"key1\": \"value\", '", "# Test the case of ValueError on a json.load. urllib2.should_receive(\"urlopen\").and_return(FakeConnection(True))", "solr.should_receive(\"compute_updates\").and_return([1,2]) solr.should_receive(\"update_schema\").twice() solr.update_document(\"app_id\", None, FakeIndexSpec()) solr.should_receive(\"to_solr_hash_map\").and_return(None).once() solr.update_document(\"app_id\", None, FakeIndexSpec()) def", "= \"indexlocale\" def __init__(self): self.fields = [] self.id = \"id\"", "flexmock() appscale_info.should_receive(\"get_search_location\").\\ and_return(\"somelocation\") solr = solr_interface.Solr() flexmock(urllib2) urllib2.should_receive(\"urlopen\").and_return(FakeConnection(False)) updates =", "FakeSolrDoc(): def __init__(self): self.fields = [] class FakeDocument(): INDEX_NAME =", "0}, \"fields\": fields} json.should_receive(\"load\").and_return(dictionary) index = solr._get_index_adapter(\"app_id\", \"ns\", \"name\") self.assertEquals(index.schema[0]['name'],", "is_good_code): self.code = 200 if not is_good_code: self.code = 500", "flexmock(urllib2) urllib2.should_receive(\"urlopen\").and_return(FakeConnection(False)) self.assertRaises(search_exceptions.InternalError, solr.commit_update, {}) json.should_receive(\"load\").and_raise(ValueError) urllib2.should_receive(\"urlopen\").and_return(FakeConnection(True)) self.assertRaises(search_exceptions.InternalError, solr.commit_update, {})", "[{'name': 'name1', 'type':'type1'}] flexmock(json) json.should_receive(\"load\").and_raise(ValueError) urllib2.should_receive(\"urlopen\").and_return(FakeConnection(True)) self.assertRaises(search_exceptions.InternalError, solr.update_schema, updates) dictionary", "\"name\") # Test the case of ValueError on a json.load.", "{'responseHeader':{'status': 0}} json.should_receive(\"load\").and_return(dictionary).once() solr.commit_update({}) def test_update_document(self): appscale_info = flexmock() appscale_info.should_receive(\"get_search_location\").\\", "json.load. urllib2.should_receive(\"urlopen\").and_return(FakeConnection(True)) flexmock(json) json.should_receive(\"load\").and_raise(ValueError) self.assertRaises(search_exceptions.InternalError, solr._get_index_adapter, \"app_id\", \"ns\", \"name\") #", "json.should_receive(\"load\").and_raise(ValueError) urllib2.should_receive(\"urlopen\").and_return(FakeConnection(True)) self.assertRaises(search_exceptions.InternalError, solr.commit_update, {}) dictionary = {'responseHeader':{'status': 1}} json.should_receive(\"load\").and_return(dictionary).once()", "elif isinstance(obj, list): for value in obj: walk_and_check_type(value) else: self.assertIsInstance(obj,", "= solr._get_index_adapter(\"app_id\", \"ns\", \"name\") self.assertEquals(index.schema[0]['name'], \"index_ns_name_\") def test_update_schema(self): appscale_info =", "and_return(\"somelocation\") solr = solr_interface.Solr() self.assertNotEqual(solr.to_solr_hash_map(FakeIndex(), FakeDocument()), {}) def test_commit_update(self): appscale_info", "a bad status from SOLR. dictionary = {'responseHeader':{'status': 1}} json.should_receive(\"load\").and_return(dictionary)", "json import sys import unittest import urllib2 from flexmock import", "def __init__(self): self.fields = [] class FakeDocument(): INDEX_NAME = \"indexname\"", "bad status from SOLR. dictionary = {'responseHeader':{'status': 1}} json.should_receive(\"load\").and_return(dictionary) self.assertRaises(search_exceptions.InternalError,", "0}} json.should_receive(\"load\").and_return(dictionary).once() solr.commit_update({}) def test_update_document(self): appscale_info = flexmock() appscale_info.should_receive(\"get_search_location\").\\ and_return(\"somelocation\")", "test_update_schema(self): appscale_info = flexmock() appscale_info.should_receive(\"get_search_location\").\\ and_return(\"somelocation\") solr = solr_interface.Solr() flexmock(urllib2)", "self.code = 500 def getcode(self): return self.code class TestSolrInterface(unittest.TestCase): \"\"\"", "name(self): return self.name class FakeUpdate(): def __init__(self, name, field_type): self.name", "\"app_id\", \"ns\", \"name\") # Test the case of ValueError on", "is_good_code: self.code = 500 def getcode(self): return self.code class TestSolrInterface(unittest.TestCase):", "json.should_receive(\"load\").and_return(dictionary) index = solr._get_index_adapter(\"app_id\", \"ns\", \"name\") self.assertEquals(index.schema[0]['name'], \"index_ns_name_\") def test_update_schema(self):", "field_type): self.name = name self.field_type = field_type class FakeConnection(): def", "json.should_receive(\"load\").and_raise(ValueError) self.assertRaises(search_exceptions.InternalError, solr._get_index_adapter, \"app_id\", \"ns\", \"name\") # Test a bad", "solr.commit_update, {}) dictionary = {'responseHeader':{'status': 0}} json.should_receive(\"load\").and_return(dictionary).once() solr.commit_update({}) def test_update_document(self):", "solr.update_schema, updates) updates = [{'name': 'name1', 'type':'type1'}] flexmock(json) json.should_receive(\"load\").and_raise(ValueError) urllib2.should_receive(\"urlopen\").and_return(FakeConnection(True))", "in obj: walk_and_check_type(value) else: self.assertIsInstance(obj, (str, int)) walk_and_check_type(parsed_obj) self.assertEqual(parsed_obj, {", "\"lang\" class FakeSchema(): def __init__(self): self.fields = [] class FakeIndex():", "= flexmock() appscale_info.should_receive(\"get_search_location\").\\ and_return(\"somelocation\") solr = solr_interface.Solr() flexmock(urllib2) urllib2.should_receive(\"urlopen\").and_return(FakeConnection(False)) updates", "appscale_info = flexmock() appscale_info.should_receive(\"get_search_location\").\\ and_return(\"somelocation\") solr = solr_interface.Solr() self.assertNotEqual(solr.to_solr_hash_map(FakeIndex(), FakeDocument()),", "test_json_loads_byteified(self): json_with_unicode = ( '{\"key2\": [{\"\\\\u2611\": 28, \"\\\\u2616\": [\"\\\\u263a\"]}, \"second\",", "'ns' def name(self): return self.name class FakeUpdate(): def __init__(self, name,", "self.assertRaises(search_exceptions.InternalError, solr._get_index_adapter, \"app_id\", \"ns\", \"name\") fields = [{'name':\"index_ns_name_\"}] dictionary =", "solr._get_index_adapter, \"app_id\", \"ns\", \"name\") # Test the case of ValueError", "__init__(self): self.name = \"name\" self.schema = FakeSchema() class FakeIndexSpec(): def", "\"indexname\" INDEX_LOCALE = \"indexlocale\" def __init__(self): self.fields = [] self.id", "def walk_and_check_type(obj): if isinstance(obj, dict): for key, value in obj.iteritems():", "if isinstance(obj, dict): for key, value in obj.iteritems(): self.assertIsInstance(key, str)", "'key1': 'value', 'key2': [ {'\\<KEY>': 28, '\\xe2\\x98\\x96': ['\\xe2\\x98\\xba']}, 'second', 'third'", "solr.update_schema(updates) def test_to_solr_hash_map(self): appscale_info = flexmock() appscale_info.should_receive(\"get_search_location\").\\ and_return(\"somelocation\") solr =", "FakeSchema(): def __init__(self): self.fields = [] class FakeIndex(): def __init__(self):", "flexmock() appscale_info.should_receive(\"get_search_location\").\\ and_return(\"somelocation\") solr = solr_interface.Solr() solr = flexmock(solr) flexmock(solr_interface)", "appscale_info = flexmock() appscale_info.should_receive(\"get_search_location\").\\ and_return(\"somelocation\") solr = solr_interface.Solr() flexmock(urllib2) urllib2.should_receive(\"urlopen\").and_return(FakeConnection(False))", "\"app_id\", \"ns\", \"name\") # Test a bad status from SOLR.", "'name1', 'type':'type1'}] flexmock(json) json.should_receive(\"load\").and_raise(ValueError) urllib2.should_receive(\"urlopen\").and_return(FakeConnection(True)) self.assertRaises(search_exceptions.InternalError, solr.update_schema, updates) dictionary =", "solr = flexmock(solr) solr.should_receive(\"to_solr_doc\").and_return(FakeSolrDoc()) solr.should_receive(\"_get_index_adapter\").and_return(FakeIndex()) solr.should_receive(\"compute_updates\").and_return([]) solr.should_receive(\"to_solr_hash_map\").and_return(None) solr.should_receive(\"commit_update\").and_return(None) solr.update_document(\"app_id\", None,", "= [] self.assertRaises(search_exceptions.InternalError, solr.update_schema, updates) updates = [{'name': 'name1', 'type':'type1'}]", "dictionary = {'responseHeader':{'status': 1}} json.should_receive(\"load\").and_return(dictionary) self.assertRaises(search_exceptions.InternalError, solr._get_index_adapter, \"app_id\", \"ns\", \"name\")", "self.assertRaises(search_exceptions.InternalError, solr.commit_update, {}) json.should_receive(\"load\").and_raise(ValueError) urllib2.should_receive(\"urlopen\").and_return(FakeConnection(True)) self.assertRaises(search_exceptions.InternalError, solr.commit_update, {}) dictionary =", "if not is_good_code: self.code = 500 def getcode(self): return self.code", "\"name\") # Test a bad status from SOLR. dictionary =", "1}} json.should_receive(\"load\").and_return(dictionary).once() self.assertRaises(search_exceptions.InternalError, solr.commit_update, {}) dictionary = {'responseHeader':{'status': 0}} json.should_receive(\"load\").and_return(dictionary).once()", "class FakeSchema(): def __init__(self): self.fields = [] class FakeIndex(): def", "= {'responseHeader':{'status': 0}} json.should_receive(\"load\").and_return(dictionary).once() solr.commit_update({}) def test_update_document(self): appscale_info = flexmock()", "'value', 'key2': [ {'\\<KEY>': 28, '\\xe2\\x98\\x96': ['\\xe2\\x98\\xba']}, 'second', 'third' ],", "'\"key1\": \"value\", ' '\"\\\\u2604\": {\"\\\\u2708\": \"\\\\u2708\"}}' ) parsed_obj = solr_interface.json_loads_byteified(json_with_unicode)", "import unittest import urllib2 from flexmock import flexmock sys.path.append(os.path.join(os.path.dirname(__file__), \"../../\"))", "flexmock(solr_interface) solr_interface.should_receive(\"get_index_name\").and_return(\"index_ns_name\") flexmock(urllib2) urllib2.should_receive(\"urlopen\").and_return(FakeConnection(False)) self.assertRaises(search_exceptions.InternalError, solr._get_index_adapter, \"app_id\", \"ns\", \"name\") #", "FakeConnection(): def __init__(self, is_good_code): self.code = 200 if not is_good_code:", "= [] self.id = \"id\" self.language = \"lang\" class FakeSchema():", "FakeSchema() class FakeIndexSpec(): def __init__(self): pass def namespace(self): return 'ns'", "json.should_receive(\"load\").and_return(dictionary) self.assertRaises(search_exceptions.InternalError, solr.update_schema, updates) dictionary = {\"responseHeader\":{\"status\":0}} json.should_receive(\"load\").and_return(dictionary) solr.update_schema(updates) def", "'type':'type1'}] flexmock(json) json.should_receive(\"load\").and_raise(ValueError) urllib2.should_receive(\"urlopen\").and_return(FakeConnection(True)) self.assertRaises(search_exceptions.InternalError, solr.update_schema, updates) dictionary = {\"responseHeader\":{\"status\":1}}", "[ {'\\<KEY>': 28, '\\xe2\\x98\\x96': ['\\xe2\\x98\\xba']}, 'second', 'third' ], '\\xe2\\x98\\x84': {'\\xe2\\x9c\\x88':", "= \"name\" self.schema = FakeSchema() class FakeIndexSpec(): def __init__(self): pass", "def __init__(self, is_good_code): self.code = 200 if not is_good_code: self.code", "self.assertRaises(search_exceptions.InternalError, solr._get_index_adapter, \"app_id\", \"ns\", \"name\") # Test a bad status", "flexmock(solr) flexmock(solr_interface) solr_interface.should_receive(\"get_index_name\").and_return(\"index_ns_name\") flexmock(urllib2) urllib2.should_receive(\"urlopen\").and_return(FakeConnection(False)) self.assertRaises(search_exceptions.InternalError, solr._get_index_adapter, \"app_id\", \"ns\", \"name\")", "= flexmock() appscale_info.should_receive(\"get_search_location\").\\ and_return(\"somelocation\") solr = solr_interface.Solr() solr = flexmock(solr)", "\"\"\" def test_get_index_adapter(self): appscale_info = flexmock() appscale_info.should_receive(\"get_search_location\").\\ and_return(\"somelocation\") solr =", "self.fields = [] class FakeIndex(): def __init__(self): self.name = \"name\"", "= {'responseHeader':{'status': 1}} json.should_receive(\"load\").and_return(dictionary) self.assertRaises(search_exceptions.InternalError, solr._get_index_adapter, \"app_id\", \"ns\", \"name\") fields", "cases for the solr interface module. \"\"\" def test_get_index_adapter(self): appscale_info", "for the solr interface module. \"\"\" def test_get_index_adapter(self): appscale_info =", "updates) updates = [{'name': 'name1', 'type':'type1'}] flexmock(json) json.should_receive(\"load\").and_raise(ValueError) urllib2.should_receive(\"urlopen\").and_return(FakeConnection(True)) self.assertRaises(search_exceptions.InternalError,", "unittest import urllib2 from flexmock import flexmock sys.path.append(os.path.join(os.path.dirname(__file__), \"../../\")) import", "urllib2.should_receive(\"urlopen\").and_return(FakeConnection(False)) self.assertRaises(search_exceptions.InternalError, solr.commit_update, {}) json.should_receive(\"load\").and_raise(ValueError) urllib2.should_receive(\"urlopen\").and_return(FakeConnection(True)) self.assertRaises(search_exceptions.InternalError, solr.commit_update, {}) dictionary", "{ 'key1': 'value', 'key2': [ {'\\<KEY>': 28, '\\xe2\\x98\\x96': ['\\xe2\\x98\\xba']}, 'second',", "solr.should_receive(\"compute_updates\").and_return([]) solr.should_receive(\"to_solr_hash_map\").and_return(None) solr.should_receive(\"commit_update\").and_return(None) solr.update_document(\"app_id\", None, FakeIndexSpec()) solr.should_receive(\"compute_updates\").and_return([1,2]) solr.should_receive(\"update_schema\").twice() solr.update_document(\"app_id\", None,", "\"third\"], ' '\"key1\": \"value\", ' '\"\\\\u2604\": {\"\\\\u2708\": \"\\\\u2708\"}}' ) parsed_obj", "in obj.iteritems(): self.assertIsInstance(key, str) walk_and_check_type(value) elif isinstance(obj, list): for value", "return self.name class FakeUpdate(): def __init__(self, name, field_type): self.name =", "solr.update_document(\"app_id\", None, FakeIndexSpec()) def test_json_loads_byteified(self): json_with_unicode = ( '{\"key2\": [{\"\\\\u2611\":", "dictionary = {'responseHeader':{'status': 0}} json.should_receive(\"load\").and_return(dictionary).once() solr.commit_update({}) def test_update_document(self): appscale_info =", "def test_json_loads_byteified(self): json_with_unicode = ( '{\"key2\": [{\"\\\\u2611\": 28, \"\\\\u2616\": [\"\\\\u263a\"]},", "def __init__(self): self.fields = [] class FakeIndex(): def __init__(self): self.name", "solr.should_receive(\"to_solr_hash_map\").and_return(None).once() solr.update_document(\"app_id\", None, FakeIndexSpec()) def test_json_loads_byteified(self): json_with_unicode = ( '{\"key2\":", "def namespace(self): return 'ns' def name(self): return self.name class FakeUpdate():", "= flexmock(solr) solr.should_receive(\"to_solr_doc\").and_return(FakeSolrDoc()) solr.should_receive(\"_get_index_adapter\").and_return(FakeIndex()) solr.should_receive(\"compute_updates\").and_return([]) solr.should_receive(\"to_solr_hash_map\").and_return(None) solr.should_receive(\"commit_update\").and_return(None) solr.update_document(\"app_id\", None, FakeIndexSpec())", "{}) dictionary = {'responseHeader':{'status': 0}} json.should_receive(\"load\").and_return(dictionary).once() solr.commit_update({}) def test_update_document(self): appscale_info", "{'responseHeader':{'status': 1}} json.should_receive(\"load\").and_return(dictionary) self.assertRaises(search_exceptions.InternalError, solr._get_index_adapter, \"app_id\", \"ns\", \"name\") fields =", "str) walk_and_check_type(value) elif isinstance(obj, list): for value in obj: walk_and_check_type(value)", "import json import sys import unittest import urllib2 from flexmock", "urllib2 from flexmock import flexmock sys.path.append(os.path.join(os.path.dirname(__file__), \"../../\")) import solr_interface import", "FakeUpdate(): def __init__(self, name, field_type): self.name = name self.field_type =", "set of test cases for the solr interface module. \"\"\"", "solr._get_index_adapter, \"app_id\", \"ns\", \"name\") # Test a bad status from", "self.assertRaises(search_exceptions.InternalError, solr.update_schema, updates) dictionary = {\"responseHeader\":{\"status\":1}} json.should_receive(\"load\").and_return(dictionary) self.assertRaises(search_exceptions.InternalError, solr.update_schema, updates)", "None, FakeIndexSpec()) solr.should_receive(\"compute_updates\").and_return([1,2]) solr.should_receive(\"update_schema\").twice() solr.update_document(\"app_id\", None, FakeIndexSpec()) solr.should_receive(\"to_solr_hash_map\").and_return(None).once() solr.update_document(\"app_id\", None,", "flexmock(json) json.should_receive(\"loads\").and_return({}) flexmock(urllib2) urllib2.should_receive(\"urlopen\").and_return(FakeConnection(False)) self.assertRaises(search_exceptions.InternalError, solr.commit_update, {}) json.should_receive(\"load\").and_raise(ValueError) urllib2.should_receive(\"urlopen\").and_return(FakeConnection(True)) self.assertRaises(search_exceptions.InternalError,", "= flexmock() appscale_info.should_receive(\"get_search_location\").\\ and_return(\"somelocation\") solr = solr_interface.Solr() flexmock(json) json.should_receive(\"loads\").and_return({}) flexmock(urllib2)", "{'responseHeader':{'status': 1}} json.should_receive(\"load\").and_return(dictionary).once() self.assertRaises(search_exceptions.InternalError, solr.commit_update, {}) dictionary = {'responseHeader':{'status': 0}}", "'{\"key2\": [{\"\\\\u2611\": 28, \"\\\\u2616\": [\"\\\\u263a\"]}, \"second\", \"third\"], ' '\"key1\": \"value\",", "pass def namespace(self): return 'ns' def name(self): return self.name class", "\"ns\", \"name\") # Test the case of ValueError on a", "def name(self): return self.name class FakeUpdate(): def __init__(self, name, field_type):", "flexmock(solr) solr.should_receive(\"to_solr_doc\").and_return(FakeSolrDoc()) solr.should_receive(\"_get_index_adapter\").and_return(FakeIndex()) solr.should_receive(\"compute_updates\").and_return([]) solr.should_receive(\"to_solr_hash_map\").and_return(None) solr.should_receive(\"commit_update\").and_return(None) solr.update_document(\"app_id\", None, FakeIndexSpec()) solr.should_receive(\"compute_updates\").and_return([1,2])", "{\"\\\\u2708\": \"\\\\u2708\"}}' ) parsed_obj = solr_interface.json_loads_byteified(json_with_unicode) def walk_and_check_type(obj): if isinstance(obj,", "self.assertRaises(search_exceptions.InternalError, solr._get_index_adapter, \"app_id\", \"ns\", \"name\") # Test the case of", "FakeDocument(): INDEX_NAME = \"indexname\" INDEX_LOCALE = \"indexlocale\" def __init__(self): self.fields", "and_return(\"somelocation\") solr = solr_interface.Solr() flexmock(json) json.should_receive(\"loads\").and_return({}) flexmock(urllib2) urllib2.should_receive(\"urlopen\").and_return(FakeConnection(False)) self.assertRaises(search_exceptions.InternalError, solr.commit_update,", "{\"responseHeader\":{\"status\":0}} json.should_receive(\"load\").and_return(dictionary) solr.update_schema(updates) def test_to_solr_hash_map(self): appscale_info = flexmock() appscale_info.should_receive(\"get_search_location\").\\ and_return(\"somelocation\")", "dictionary = {\"responseHeader\":{\"status\":0}} json.should_receive(\"load\").and_return(dictionary) solr.update_schema(updates) def test_to_solr_hash_map(self): appscale_info = flexmock()", "os import json import sys import unittest import urllib2 from", "parsed_obj = solr_interface.json_loads_byteified(json_with_unicode) def walk_and_check_type(obj): if isinstance(obj, dict): for key,", "solr_interface.json_loads_byteified(json_with_unicode) def walk_and_check_type(obj): if isinstance(obj, dict): for key, value in", "'\"\\\\u2604\": {\"\\\\u2708\": \"\\\\u2708\"}}' ) parsed_obj = solr_interface.json_loads_byteified(json_with_unicode) def walk_and_check_type(obj): if", "def getcode(self): return self.code class TestSolrInterface(unittest.TestCase): \"\"\" A set of", "self.code = 200 if not is_good_code: self.code = 500 def", "the case of ValueError on a json.load. urllib2.should_receive(\"urlopen\").and_return(FakeConnection(True)) flexmock(json) json.should_receive(\"load\").and_raise(ValueError)", "self.assertRaises(search_exceptions.InternalError, solr.commit_update, {}) dictionary = {'responseHeader':{'status': 0}} json.should_receive(\"load\").and_return(dictionary).once() solr.commit_update({}) def", "flexmock() appscale_info.should_receive(\"get_search_location\").\\ and_return(\"somelocation\") solr = solr_interface.Solr() self.assertNotEqual(solr.to_solr_hash_map(FakeIndex(), FakeDocument()), {}) def", "( '{\"key2\": [{\"\\\\u2611\": 28, \"\\\\u2616\": [\"\\\\u263a\"]}, \"second\", \"third\"], ' '\"key1\":", "\"\"\" A set of test cases for the solr interface", "= {\"responseHeader\":{\"status\":0}} json.should_receive(\"load\").and_return(dictionary) solr.update_schema(updates) def test_to_solr_hash_map(self): appscale_info = flexmock() appscale_info.should_receive(\"get_search_location\").\\", "for key, value in obj.iteritems(): self.assertIsInstance(key, str) walk_and_check_type(value) elif isinstance(obj,", "import solr_interface import search_exceptions class FakeSolrDoc(): def __init__(self): self.fields =", "test_to_solr_hash_map(self): appscale_info = flexmock() appscale_info.should_receive(\"get_search_location\").\\ and_return(\"somelocation\") solr = solr_interface.Solr() self.assertNotEqual(solr.to_solr_hash_map(FakeIndex(),", "of test cases for the solr interface module. \"\"\" def", "isinstance(obj, dict): for key, value in obj.iteritems(): self.assertIsInstance(key, str) walk_and_check_type(value)", "[] self.assertRaises(search_exceptions.InternalError, solr.update_schema, updates) updates = [{'name': 'name1', 'type':'type1'}] flexmock(json)", "SOLR. dictionary = {'responseHeader':{'status': 1}} json.should_receive(\"load\").and_return(dictionary) self.assertRaises(search_exceptions.InternalError, solr._get_index_adapter, \"app_id\", \"ns\",", "{}) def test_commit_update(self): appscale_info = flexmock() appscale_info.should_receive(\"get_search_location\").\\ and_return(\"somelocation\") solr =", "solr.should_receive(\"to_solr_doc\").and_return(FakeSolrDoc()) solr.should_receive(\"_get_index_adapter\").and_return(FakeIndex()) solr.should_receive(\"compute_updates\").and_return([]) solr.should_receive(\"to_solr_hash_map\").and_return(None) solr.should_receive(\"commit_update\").and_return(None) solr.update_document(\"app_id\", None, FakeIndexSpec()) solr.should_receive(\"compute_updates\").and_return([1,2]) solr.should_receive(\"update_schema\").twice()", "import flexmock sys.path.append(os.path.join(os.path.dirname(__file__), \"../../\")) import solr_interface import search_exceptions class FakeSolrDoc():", "int)) walk_and_check_type(parsed_obj) self.assertEqual(parsed_obj, { 'key1': 'value', 'key2': [ {'\\<KEY>': 28,", "solr.update_document(\"app_id\", None, FakeIndexSpec()) solr.should_receive(\"compute_updates\").and_return([1,2]) solr.should_receive(\"update_schema\").twice() solr.update_document(\"app_id\", None, FakeIndexSpec()) solr.should_receive(\"to_solr_hash_map\").and_return(None).once() solr.update_document(\"app_id\",", "status from SOLR. dictionary = {'responseHeader':{'status': 1}} json.should_receive(\"load\").and_return(dictionary) self.assertRaises(search_exceptions.InternalError, solr._get_index_adapter,", "self.assertEqual(parsed_obj, { 'key1': 'value', 'key2': [ {'\\<KEY>': 28, '\\xe2\\x98\\x96': ['\\xe2\\x98\\xba']},", "'key2': [ {'\\<KEY>': 28, '\\xe2\\x98\\x96': ['\\xe2\\x98\\xba']}, 'second', 'third' ], '\\xe2\\x98\\x84':", "\"name\") self.assertEquals(index.schema[0]['name'], \"index_ns_name_\") def test_update_schema(self): appscale_info = flexmock() appscale_info.should_receive(\"get_search_location\").\\ and_return(\"somelocation\")", "solr = solr_interface.Solr() flexmock(urllib2) urllib2.should_receive(\"urlopen\").and_return(FakeConnection(False)) updates = [] self.assertRaises(search_exceptions.InternalError, solr.update_schema,", "(str, int)) walk_and_check_type(parsed_obj) self.assertEqual(parsed_obj, { 'key1': 'value', 'key2': [ {'\\<KEY>':", "200 if not is_good_code: self.code = 500 def getcode(self): return", "updates) dictionary = {\"responseHeader\":{\"status\":1}} json.should_receive(\"load\").and_return(dictionary) self.assertRaises(search_exceptions.InternalError, solr.update_schema, updates) dictionary =", "fields} json.should_receive(\"load\").and_return(dictionary) index = solr._get_index_adapter(\"app_id\", \"ns\", \"name\") self.assertEquals(index.schema[0]['name'], \"index_ns_name_\") def", "import sys import unittest import urllib2 from flexmock import flexmock", "flexmock(urllib2) urllib2.should_receive(\"urlopen\").and_return(FakeConnection(False)) updates = [] self.assertRaises(search_exceptions.InternalError, solr.update_schema, updates) updates =", "\"indexlocale\" def __init__(self): self.fields = [] self.id = \"id\" self.language", "isinstance(obj, list): for value in obj: walk_and_check_type(value) else: self.assertIsInstance(obj, (str,", "A set of test cases for the solr interface module.", "solr.should_receive(\"update_schema\").twice() solr.update_document(\"app_id\", None, FakeIndexSpec()) solr.should_receive(\"to_solr_hash_map\").and_return(None).once() solr.update_document(\"app_id\", None, FakeIndexSpec()) def test_json_loads_byteified(self):", "sys import unittest import urllib2 from flexmock import flexmock sys.path.append(os.path.join(os.path.dirname(__file__),", "walk_and_check_type(value) else: self.assertIsInstance(obj, (str, int)) walk_and_check_type(parsed_obj) self.assertEqual(parsed_obj, { 'key1': 'value',", "json.should_receive(\"load\").and_return(dictionary).once() self.assertRaises(search_exceptions.InternalError, solr.commit_update, {}) dictionary = {'responseHeader':{'status': 0}} json.should_receive(\"load\").and_return(dictionary).once() solr.commit_update({})", "solr.update_document(\"app_id\", None, FakeIndexSpec()) solr.should_receive(\"to_solr_hash_map\").and_return(None).once() solr.update_document(\"app_id\", None, FakeIndexSpec()) def test_json_loads_byteified(self): json_with_unicode", "def __init__(self): pass def namespace(self): return 'ns' def name(self): return", "500 def getcode(self): return self.code class TestSolrInterface(unittest.TestCase): \"\"\" A set", "solr.update_schema, updates) dictionary = {\"responseHeader\":{\"status\":0}} json.should_receive(\"load\").and_return(dictionary) solr.update_schema(updates) def test_to_solr_hash_map(self): appscale_info", "obj: walk_and_check_type(value) else: self.assertIsInstance(obj, (str, int)) walk_and_check_type(parsed_obj) self.assertEqual(parsed_obj, { 'key1':", "class FakeIndex(): def __init__(self): self.name = \"name\" self.schema = FakeSchema()", "list): for value in obj: walk_and_check_type(value) else: self.assertIsInstance(obj, (str, int))", "name, field_type): self.name = name self.field_type = field_type class FakeConnection():", "self.assertRaises(search_exceptions.InternalError, solr.update_schema, updates) updates = [{'name': 'name1', 'type':'type1'}] flexmock(json) json.should_receive(\"load\").and_raise(ValueError)", "solr = solr_interface.Solr() solr = flexmock(solr) solr.should_receive(\"to_solr_doc\").and_return(FakeSolrDoc()) solr.should_receive(\"_get_index_adapter\").and_return(FakeIndex()) solr.should_receive(\"compute_updates\").and_return([]) solr.should_receive(\"to_solr_hash_map\").and_return(None)", "class FakeIndexSpec(): def __init__(self): pass def namespace(self): return 'ns' def", "updates = [] self.assertRaises(search_exceptions.InternalError, solr.update_schema, updates) updates = [{'name': 'name1',", "from flexmock import flexmock sys.path.append(os.path.join(os.path.dirname(__file__), \"../../\")) import solr_interface import search_exceptions", "__init__(self): pass def namespace(self): return 'ns' def name(self): return self.name", "name self.field_type = field_type class FakeConnection(): def __init__(self, is_good_code): self.code", "test_get_index_adapter(self): appscale_info = flexmock() appscale_info.should_receive(\"get_search_location\").\\ and_return(\"somelocation\") solr = solr_interface.Solr() solr", "sys.path.append(os.path.join(os.path.dirname(__file__), \"../../\")) import solr_interface import search_exceptions class FakeSolrDoc(): def __init__(self):", "test_update_document(self): appscale_info = flexmock() appscale_info.should_receive(\"get_search_location\").\\ and_return(\"somelocation\") solr = solr_interface.Solr() solr", "json_with_unicode = ( '{\"key2\": [{\"\\\\u2611\": 28, \"\\\\u2616\": [\"\\\\u263a\"]}, \"second\", \"third\"],", "walk_and_check_type(value) elif isinstance(obj, list): for value in obj: walk_and_check_type(value) else:", "dictionary = {'responseHeader':{'status': 1}} json.should_receive(\"load\").and_return(dictionary).once() self.assertRaises(search_exceptions.InternalError, solr.commit_update, {}) dictionary =", "def __init__(self, name, field_type): self.name = name self.field_type = field_type", "FakeIndex(): def __init__(self): self.name = \"name\" self.schema = FakeSchema() class", "search_exceptions class FakeSolrDoc(): def __init__(self): self.fields = [] class FakeDocument():", "self.name class FakeUpdate(): def __init__(self, name, field_type): self.name = name", "Test the case of ValueError on a json.load. urllib2.should_receive(\"urlopen\").and_return(FakeConnection(True)) flexmock(json)", "and_return(\"somelocation\") solr = solr_interface.Solr() flexmock(urllib2) urllib2.should_receive(\"urlopen\").and_return(FakeConnection(False)) updates = [] self.assertRaises(search_exceptions.InternalError,", "json.should_receive(\"load\").and_return(dictionary) solr.update_schema(updates) def test_to_solr_hash_map(self): appscale_info = flexmock() appscale_info.should_receive(\"get_search_location\").\\ and_return(\"somelocation\") solr", "return 'ns' def name(self): return self.name class FakeUpdate(): def __init__(self,", "def test_commit_update(self): appscale_info = flexmock() appscale_info.should_receive(\"get_search_location\").\\ and_return(\"somelocation\") solr = solr_interface.Solr()", "= [] class FakeDocument(): INDEX_NAME = \"indexname\" INDEX_LOCALE = \"indexlocale\"", "\"value\", ' '\"\\\\u2604\": {\"\\\\u2708\": \"\\\\u2708\"}}' ) parsed_obj = solr_interface.json_loads_byteified(json_with_unicode) def", "def __init__(self): self.fields = [] self.id = \"id\" self.language =", "self.code class TestSolrInterface(unittest.TestCase): \"\"\" A set of test cases for", "appscale_info = flexmock() appscale_info.should_receive(\"get_search_location\").\\ and_return(\"somelocation\") solr = solr_interface.Solr() flexmock(json) json.should_receive(\"loads\").and_return({})", "= \"id\" self.language = \"lang\" class FakeSchema(): def __init__(self): self.fields", "self.name = \"name\" self.schema = FakeSchema() class FakeIndexSpec(): def __init__(self):", "= {'responseHeader':{'status': 1}} json.should_receive(\"load\").and_return(dictionary).once() self.assertRaises(search_exceptions.InternalError, solr.commit_update, {}) dictionary = {'responseHeader':{'status':", "appscale_info.should_receive(\"get_search_location\").\\ and_return(\"somelocation\") solr = solr_interface.Solr() flexmock(urllib2) urllib2.should_receive(\"urlopen\").and_return(FakeConnection(False)) updates = []", "self.field_type = field_type class FakeConnection(): def __init__(self, is_good_code): self.code =", "flexmock(json) json.should_receive(\"load\").and_raise(ValueError) urllib2.should_receive(\"urlopen\").and_return(FakeConnection(True)) self.assertRaises(search_exceptions.InternalError, solr.update_schema, updates) dictionary = {\"responseHeader\":{\"status\":1}} json.should_receive(\"load\").and_return(dictionary)", "\"ns\", \"name\") fields = [{'name':\"index_ns_name_\"}] dictionary = {'responseHeader':{'status': 0}, \"fields\":", "index = solr._get_index_adapter(\"app_id\", \"ns\", \"name\") self.assertEquals(index.schema[0]['name'], \"index_ns_name_\") def test_update_schema(self): appscale_info", "= {\"responseHeader\":{\"status\":1}} json.should_receive(\"load\").and_return(dictionary) self.assertRaises(search_exceptions.InternalError, solr.update_schema, updates) dictionary = {\"responseHeader\":{\"status\":0}} json.should_receive(\"load\").and_return(dictionary)", "solr_interface.Solr() solr = flexmock(solr) flexmock(solr_interface) solr_interface.should_receive(\"get_index_name\").and_return(\"index_ns_name\") flexmock(urllib2) urllib2.should_receive(\"urlopen\").and_return(FakeConnection(False)) self.assertRaises(search_exceptions.InternalError, solr._get_index_adapter,", "solr_interface.Solr() solr = flexmock(solr) solr.should_receive(\"to_solr_doc\").and_return(FakeSolrDoc()) solr.should_receive(\"_get_index_adapter\").and_return(FakeIndex()) solr.should_receive(\"compute_updates\").and_return([]) solr.should_receive(\"to_solr_hash_map\").and_return(None) solr.should_receive(\"commit_update\").and_return(None) solr.update_document(\"app_id\",", "python import os import json import sys import unittest import", "#!/usr/bin/env python import os import json import sys import unittest", "flexmock() appscale_info.should_receive(\"get_search_location\").\\ and_return(\"somelocation\") solr = solr_interface.Solr() flexmock(json) json.should_receive(\"loads\").and_return({}) flexmock(urllib2) urllib2.should_receive(\"urlopen\").and_return(FakeConnection(False))", "\"ns\", \"name\") # Test a bad status from SOLR. dictionary", "solr.commit_update, {}) dictionary = {'responseHeader':{'status': 1}} json.should_receive(\"load\").and_return(dictionary).once() self.assertRaises(search_exceptions.InternalError, solr.commit_update, {})", "and_return(\"somelocation\") solr = solr_interface.Solr() solr = flexmock(solr) solr.should_receive(\"to_solr_doc\").and_return(FakeSolrDoc()) solr.should_receive(\"_get_index_adapter\").and_return(FakeIndex()) solr.should_receive(\"compute_updates\").and_return([])", "= ( '{\"key2\": [{\"\\\\u2611\": 28, \"\\\\u2616\": [\"\\\\u263a\"]}, \"second\", \"third\"], '", "= [{'name':\"index_ns_name_\"}] dictionary = {'responseHeader':{'status': 0}, \"fields\": fields} json.should_receive(\"load\").and_return(dictionary) index", "= FakeSchema() class FakeIndexSpec(): def __init__(self): pass def namespace(self): return", "INDEX_LOCALE = \"indexlocale\" def __init__(self): self.fields = [] self.id =", "def test_get_index_adapter(self): appscale_info = flexmock() appscale_info.should_receive(\"get_search_location\").\\ and_return(\"somelocation\") solr = solr_interface.Solr()", "= 200 if not is_good_code: self.code = 500 def getcode(self):", "namespace(self): return 'ns' def name(self): return self.name class FakeUpdate(): def", "\"\\\\u2708\"}}' ) parsed_obj = solr_interface.json_loads_byteified(json_with_unicode) def walk_and_check_type(obj): if isinstance(obj, dict):", "flexmock import flexmock sys.path.append(os.path.join(os.path.dirname(__file__), \"../../\")) import solr_interface import search_exceptions class", "of ValueError on a json.load. urllib2.should_receive(\"urlopen\").and_return(FakeConnection(True)) flexmock(json) json.should_receive(\"load\").and_raise(ValueError) self.assertRaises(search_exceptions.InternalError, solr._get_index_adapter,", "import search_exceptions class FakeSolrDoc(): def __init__(self): self.fields = [] class", "solr_interface.should_receive(\"get_index_name\").and_return(\"index_ns_name\") flexmock(urllib2) urllib2.should_receive(\"urlopen\").and_return(FakeConnection(False)) self.assertRaises(search_exceptions.InternalError, solr._get_index_adapter, \"app_id\", \"ns\", \"name\") # Test", "= solr_interface.Solr() self.assertNotEqual(solr.to_solr_hash_map(FakeIndex(), FakeDocument()), {}) def test_commit_update(self): appscale_info = flexmock()", "FakeIndexSpec()) def test_json_loads_byteified(self): json_with_unicode = ( '{\"key2\": [{\"\\\\u2611\": 28, \"\\\\u2616\":", "appscale_info.should_receive(\"get_search_location\").\\ and_return(\"somelocation\") solr = solr_interface.Solr() flexmock(json) json.should_receive(\"loads\").and_return({}) flexmock(urllib2) urllib2.should_receive(\"urlopen\").and_return(FakeConnection(False)) self.assertRaises(search_exceptions.InternalError,", "= solr_interface.Solr() solr = flexmock(solr) flexmock(solr_interface) solr_interface.should_receive(\"get_index_name\").and_return(\"index_ns_name\") flexmock(urllib2) urllib2.should_receive(\"urlopen\").and_return(FakeConnection(False)) self.assertRaises(search_exceptions.InternalError,", "getcode(self): return self.code class TestSolrInterface(unittest.TestCase): \"\"\" A set of test", ") parsed_obj = solr_interface.json_loads_byteified(json_with_unicode) def walk_and_check_type(obj): if isinstance(obj, dict): for", "# Test a bad status from SOLR. dictionary = {'responseHeader':{'status':", "on a json.load. urllib2.should_receive(\"urlopen\").and_return(FakeConnection(True)) flexmock(json) json.should_receive(\"load\").and_raise(ValueError) self.assertRaises(search_exceptions.InternalError, solr._get_index_adapter, \"app_id\", \"ns\",", "appscale_info.should_receive(\"get_search_location\").\\ and_return(\"somelocation\") solr = solr_interface.Solr() solr = flexmock(solr) flexmock(solr_interface) solr_interface.should_receive(\"get_index_name\").and_return(\"index_ns_name\")", "value in obj.iteritems(): self.assertIsInstance(key, str) walk_and_check_type(value) elif isinstance(obj, list): for", "__init__(self, is_good_code): self.code = 200 if not is_good_code: self.code =", "solr = flexmock(solr) flexmock(solr_interface) solr_interface.should_receive(\"get_index_name\").and_return(\"index_ns_name\") flexmock(urllib2) urllib2.should_receive(\"urlopen\").and_return(FakeConnection(False)) self.assertRaises(search_exceptions.InternalError, solr._get_index_adapter, \"app_id\",", "self.assertIsInstance(key, str) walk_and_check_type(value) elif isinstance(obj, list): for value in obj:", "__init__(self, name, field_type): self.name = name self.field_type = field_type class", "a json.load. urllib2.should_receive(\"urlopen\").and_return(FakeConnection(True)) flexmock(json) json.should_receive(\"load\").and_raise(ValueError) self.assertRaises(search_exceptions.InternalError, solr._get_index_adapter, \"app_id\", \"ns\", \"name\")", "json.should_receive(\"load\").and_return(dictionary).once() solr.commit_update({}) def test_update_document(self): appscale_info = flexmock() appscale_info.should_receive(\"get_search_location\").\\ and_return(\"somelocation\") solr", "self.schema = FakeSchema() class FakeIndexSpec(): def __init__(self): pass def namespace(self):", "self.fields = [] self.id = \"id\" self.language = \"lang\" class", "json.should_receive(\"loads\").and_return({}) flexmock(urllib2) urllib2.should_receive(\"urlopen\").and_return(FakeConnection(False)) self.assertRaises(search_exceptions.InternalError, solr.commit_update, {}) json.should_receive(\"load\").and_raise(ValueError) urllib2.should_receive(\"urlopen\").and_return(FakeConnection(True)) self.assertRaises(search_exceptions.InternalError, solr.commit_update,", "\"fields\": fields} json.should_receive(\"load\").and_return(dictionary) index = solr._get_index_adapter(\"app_id\", \"ns\", \"name\") self.assertEquals(index.schema[0]['name'], \"index_ns_name_\")", "= [] class FakeIndex(): def __init__(self): self.name = \"name\" self.schema", "= solr_interface.Solr() solr = flexmock(solr) solr.should_receive(\"to_solr_doc\").and_return(FakeSolrDoc()) solr.should_receive(\"_get_index_adapter\").and_return(FakeIndex()) solr.should_receive(\"compute_updates\").and_return([]) solr.should_receive(\"to_solr_hash_map\").and_return(None) solr.should_receive(\"commit_update\").and_return(None)", "{\"responseHeader\":{\"status\":1}} json.should_receive(\"load\").and_return(dictionary) self.assertRaises(search_exceptions.InternalError, solr.update_schema, updates) dictionary = {\"responseHeader\":{\"status\":0}} json.should_receive(\"load\").and_return(dictionary) solr.update_schema(updates)", "class FakeSolrDoc(): def __init__(self): self.fields = [] class FakeDocument(): INDEX_NAME", "self.language = \"lang\" class FakeSchema(): def __init__(self): self.fields = []", "urllib2.should_receive(\"urlopen\").and_return(FakeConnection(False)) self.assertRaises(search_exceptions.InternalError, solr._get_index_adapter, \"app_id\", \"ns\", \"name\") # Test the case", "else: self.assertIsInstance(obj, (str, int)) walk_and_check_type(parsed_obj) self.assertEqual(parsed_obj, { 'key1': 'value', 'key2':", "__init__(self): self.fields = [] self.id = \"id\" self.language = \"lang\"", "return self.code class TestSolrInterface(unittest.TestCase): \"\"\" A set of test cases", "def test_update_document(self): appscale_info = flexmock() appscale_info.should_receive(\"get_search_location\").\\ and_return(\"somelocation\") solr = solr_interface.Solr()", "\"../../\")) import solr_interface import search_exceptions class FakeSolrDoc(): def __init__(self): self.fields", "TestSolrInterface(unittest.TestCase): \"\"\" A set of test cases for the solr", "class FakeUpdate(): def __init__(self, name, field_type): self.name = name self.field_type", "FakeIndexSpec()) solr.should_receive(\"compute_updates\").and_return([1,2]) solr.should_receive(\"update_schema\").twice() solr.update_document(\"app_id\", None, FakeIndexSpec()) solr.should_receive(\"to_solr_hash_map\").and_return(None).once() solr.update_document(\"app_id\", None, FakeIndexSpec())", "\"name\") fields = [{'name':\"index_ns_name_\"}] dictionary = {'responseHeader':{'status': 0}, \"fields\": fields}", "solr.should_receive(\"commit_update\").and_return(None) solr.update_document(\"app_id\", None, FakeIndexSpec()) solr.should_receive(\"compute_updates\").and_return([1,2]) solr.should_receive(\"update_schema\").twice() solr.update_document(\"app_id\", None, FakeIndexSpec()) solr.should_receive(\"to_solr_hash_map\").and_return(None).once()", "class TestSolrInterface(unittest.TestCase): \"\"\" A set of test cases for the", "Test a bad status from SOLR. dictionary = {'responseHeader':{'status': 1}}", "urllib2.should_receive(\"urlopen\").and_return(FakeConnection(False)) updates = [] self.assertRaises(search_exceptions.InternalError, solr.update_schema, updates) updates = [{'name':", "solr_interface.Solr() flexmock(json) json.should_receive(\"loads\").and_return({}) flexmock(urllib2) urllib2.should_receive(\"urlopen\").and_return(FakeConnection(False)) self.assertRaises(search_exceptions.InternalError, solr.commit_update, {}) json.should_receive(\"load\").and_raise(ValueError) urllib2.should_receive(\"urlopen\").and_return(FakeConnection(True))", "FakeDocument()), {}) def test_commit_update(self): appscale_info = flexmock() appscale_info.should_receive(\"get_search_location\").\\ and_return(\"somelocation\") solr", "solr = solr_interface.Solr() solr = flexmock(solr) flexmock(solr_interface) solr_interface.should_receive(\"get_index_name\").and_return(\"index_ns_name\") flexmock(urllib2) urllib2.should_receive(\"urlopen\").and_return(FakeConnection(False))", "obj.iteritems(): self.assertIsInstance(key, str) walk_and_check_type(value) elif isinstance(obj, list): for value in", "json.should_receive(\"load\").and_raise(ValueError) urllib2.should_receive(\"urlopen\").and_return(FakeConnection(True)) self.assertRaises(search_exceptions.InternalError, solr.update_schema, updates) dictionary = {\"responseHeader\":{\"status\":1}} json.should_receive(\"load\").and_return(dictionary) self.assertRaises(search_exceptions.InternalError,", "walk_and_check_type(obj): if isinstance(obj, dict): for key, value in obj.iteritems(): self.assertIsInstance(key,", "urllib2.should_receive(\"urlopen\").and_return(FakeConnection(True)) flexmock(json) json.should_receive(\"load\").and_raise(ValueError) self.assertRaises(search_exceptions.InternalError, solr._get_index_adapter, \"app_id\", \"ns\", \"name\") # Test", "= name self.field_type = field_type class FakeConnection(): def __init__(self, is_good_code):", "test cases for the solr interface module. \"\"\" def test_get_index_adapter(self):", "{'responseHeader':{'status': 0}, \"fields\": fields} json.should_receive(\"load\").and_return(dictionary) index = solr._get_index_adapter(\"app_id\", \"ns\", \"name\")", "[] class FakeDocument(): INDEX_NAME = \"indexname\" INDEX_LOCALE = \"indexlocale\" def", "interface module. \"\"\" def test_get_index_adapter(self): appscale_info = flexmock() appscale_info.should_receive(\"get_search_location\").\\ and_return(\"somelocation\")", "\"index_ns_name_\") def test_update_schema(self): appscale_info = flexmock() appscale_info.should_receive(\"get_search_location\").\\ and_return(\"somelocation\") solr =", "__init__(self): self.fields = [] class FakeDocument(): INDEX_NAME = \"indexname\" INDEX_LOCALE", "{'\\<KEY>': 28, '\\xe2\\x98\\x96': ['\\xe2\\x98\\xba']}, 'second', 'third' ], '\\xe2\\x98\\x84': {'\\xe2\\x9c\\x88': '\\xe2\\x9c\\x88'}", "self.assertRaises(search_exceptions.InternalError, solr.commit_update, {}) dictionary = {'responseHeader':{'status': 1}} json.should_receive(\"load\").and_return(dictionary).once() self.assertRaises(search_exceptions.InternalError, solr.commit_update,", "None, FakeIndexSpec()) solr.should_receive(\"to_solr_hash_map\").and_return(None).once() solr.update_document(\"app_id\", None, FakeIndexSpec()) def test_json_loads_byteified(self): json_with_unicode =", "test_commit_update(self): appscale_info = flexmock() appscale_info.should_receive(\"get_search_location\").\\ and_return(\"somelocation\") solr = solr_interface.Solr() flexmock(json)", "appscale_info = flexmock() appscale_info.should_receive(\"get_search_location\").\\ and_return(\"somelocation\") solr = solr_interface.Solr() solr =", "appscale_info.should_receive(\"get_search_location\").\\ and_return(\"somelocation\") solr = solr_interface.Solr() self.assertNotEqual(solr.to_solr_hash_map(FakeIndex(), FakeDocument()), {}) def test_commit_update(self):", "= solr_interface.Solr() flexmock(json) json.should_receive(\"loads\").and_return({}) flexmock(urllib2) urllib2.should_receive(\"urlopen\").and_return(FakeConnection(False)) self.assertRaises(search_exceptions.InternalError, solr.commit_update, {}) json.should_receive(\"load\").and_raise(ValueError)", "appscale_info.should_receive(\"get_search_location\").\\ and_return(\"somelocation\") solr = solr_interface.Solr() solr = flexmock(solr) solr.should_receive(\"to_solr_doc\").and_return(FakeSolrDoc()) solr.should_receive(\"_get_index_adapter\").and_return(FakeIndex())", "[] class FakeIndex(): def __init__(self): self.name = \"name\" self.schema =", "dict): for key, value in obj.iteritems(): self.assertIsInstance(key, str) walk_and_check_type(value) elif", "solr_interface.Solr() self.assertNotEqual(solr.to_solr_hash_map(FakeIndex(), FakeDocument()), {}) def test_commit_update(self): appscale_info = flexmock() appscale_info.should_receive(\"get_search_location\").\\", "self.name = name self.field_type = field_type class FakeConnection(): def __init__(self,", "= [{'name': 'name1', 'type':'type1'}] flexmock(json) json.should_receive(\"load\").and_raise(ValueError) urllib2.should_receive(\"urlopen\").and_return(FakeConnection(True)) self.assertRaises(search_exceptions.InternalError, solr.update_schema, updates)", "= {'responseHeader':{'status': 0}, \"fields\": fields} json.should_receive(\"load\").and_return(dictionary) index = solr._get_index_adapter(\"app_id\", \"ns\",", "solr = solr_interface.Solr() flexmock(json) json.should_receive(\"loads\").and_return({}) flexmock(urllib2) urllib2.should_receive(\"urlopen\").and_return(FakeConnection(False)) self.assertRaises(search_exceptions.InternalError, solr.commit_update, {})", "class FakeDocument(): INDEX_NAME = \"indexname\" INDEX_LOCALE = \"indexlocale\" def __init__(self):", "class FakeConnection(): def __init__(self, is_good_code): self.code = 200 if not", "self.assertIsInstance(obj, (str, int)) walk_and_check_type(parsed_obj) self.assertEqual(parsed_obj, { 'key1': 'value', 'key2': [", "\"ns\", \"name\") self.assertEquals(index.schema[0]['name'], \"index_ns_name_\") def test_update_schema(self): appscale_info = flexmock() appscale_info.should_receive(\"get_search_location\").\\", "dictionary = {'responseHeader':{'status': 0}, \"fields\": fields} json.should_receive(\"load\").and_return(dictionary) index = solr._get_index_adapter(\"app_id\",", "= \"lang\" class FakeSchema(): def __init__(self): self.fields = [] class", "module. \"\"\" def test_get_index_adapter(self): appscale_info = flexmock() appscale_info.should_receive(\"get_search_location\").\\ and_return(\"somelocation\") solr", "and_return(\"somelocation\") solr = solr_interface.Solr() solr = flexmock(solr) flexmock(solr_interface) solr_interface.should_receive(\"get_index_name\").and_return(\"index_ns_name\") flexmock(urllib2)", "1}} json.should_receive(\"load\").and_return(dictionary) self.assertRaises(search_exceptions.InternalError, solr._get_index_adapter, \"app_id\", \"ns\", \"name\") fields = [{'name':\"index_ns_name_\"}]", "INDEX_NAME = \"indexname\" INDEX_LOCALE = \"indexlocale\" def __init__(self): self.fields =", "= solr_interface.Solr() flexmock(urllib2) urllib2.should_receive(\"urlopen\").and_return(FakeConnection(False)) updates = [] self.assertRaises(search_exceptions.InternalError, solr.update_schema, updates)", "updates = [{'name': 'name1', 'type':'type1'}] flexmock(json) json.should_receive(\"load\").and_raise(ValueError) urllib2.should_receive(\"urlopen\").and_return(FakeConnection(True)) self.assertRaises(search_exceptions.InternalError, solr.update_schema,", "key, value in obj.iteritems(): self.assertIsInstance(key, str) walk_and_check_type(value) elif isinstance(obj, list):", "solr_interface.Solr() flexmock(urllib2) urllib2.should_receive(\"urlopen\").and_return(FakeConnection(False)) updates = [] self.assertRaises(search_exceptions.InternalError, solr.update_schema, updates) updates", "28, '\\xe2\\x98\\x96': ['\\xe2\\x98\\xba']}, 'second', 'third' ], '\\xe2\\x98\\x84': {'\\xe2\\x9c\\x88': '\\xe2\\x9c\\x88'} })", "= field_type class FakeConnection(): def __init__(self, is_good_code): self.code = 200", "solr.should_receive(\"_get_index_adapter\").and_return(FakeIndex()) solr.should_receive(\"compute_updates\").and_return([]) solr.should_receive(\"to_solr_hash_map\").and_return(None) solr.should_receive(\"commit_update\").and_return(None) solr.update_document(\"app_id\", None, FakeIndexSpec()) solr.should_receive(\"compute_updates\").and_return([1,2]) solr.should_receive(\"update_schema\").twice() solr.update_document(\"app_id\",", "solr._get_index_adapter(\"app_id\", \"ns\", \"name\") self.assertEquals(index.schema[0]['name'], \"index_ns_name_\") def test_update_schema(self): appscale_info = flexmock()", "flexmock(urllib2) urllib2.should_receive(\"urlopen\").and_return(FakeConnection(False)) self.assertRaises(search_exceptions.InternalError, solr._get_index_adapter, \"app_id\", \"ns\", \"name\") # Test the", "json.should_receive(\"load\").and_return(dictionary) self.assertRaises(search_exceptions.InternalError, solr._get_index_adapter, \"app_id\", \"ns\", \"name\") fields = [{'name':\"index_ns_name_\"}] dictionary", "\"id\" self.language = \"lang\" class FakeSchema(): def __init__(self): self.fields =", "self.id = \"id\" self.language = \"lang\" class FakeSchema(): def __init__(self):", "updates) dictionary = {\"responseHeader\":{\"status\":0}} json.should_receive(\"load\").and_return(dictionary) solr.update_schema(updates) def test_to_solr_hash_map(self): appscale_info =", "solr_interface import search_exceptions class FakeSolrDoc(): def __init__(self): self.fields = []", "' '\"key1\": \"value\", ' '\"\\\\u2604\": {\"\\\\u2708\": \"\\\\u2708\"}}' ) parsed_obj =", "28, \"\\\\u2616\": [\"\\\\u263a\"]}, \"second\", \"third\"], ' '\"key1\": \"value\", ' '\"\\\\u2604\":", "value in obj: walk_and_check_type(value) else: self.assertIsInstance(obj, (str, int)) walk_and_check_type(parsed_obj) self.assertEqual(parsed_obj,", "fields = [{'name':\"index_ns_name_\"}] dictionary = {'responseHeader':{'status': 0}, \"fields\": fields} json.should_receive(\"load\").and_return(dictionary)", "walk_and_check_type(parsed_obj) self.assertEqual(parsed_obj, { 'key1': 'value', 'key2': [ {'\\<KEY>': 28, '\\xe2\\x98\\x96':", "self.assertRaises(search_exceptions.InternalError, solr.update_schema, updates) dictionary = {\"responseHeader\":{\"status\":0}} json.should_receive(\"load\").and_return(dictionary) solr.update_schema(updates) def test_to_solr_hash_map(self):", "case of ValueError on a json.load. urllib2.should_receive(\"urlopen\").and_return(FakeConnection(True)) flexmock(json) json.should_receive(\"load\").and_raise(ValueError) self.assertRaises(search_exceptions.InternalError,", "def test_to_solr_hash_map(self): appscale_info = flexmock() appscale_info.should_receive(\"get_search_location\").\\ and_return(\"somelocation\") solr = solr_interface.Solr()", "\"name\" self.schema = FakeSchema() class FakeIndexSpec(): def __init__(self): pass def", "flexmock(json) json.should_receive(\"load\").and_raise(ValueError) self.assertRaises(search_exceptions.InternalError, solr._get_index_adapter, \"app_id\", \"ns\", \"name\") # Test a", "= \"indexname\" INDEX_LOCALE = \"indexlocale\" def __init__(self): self.fields = []", "urllib2.should_receive(\"urlopen\").and_return(FakeConnection(True)) self.assertRaises(search_exceptions.InternalError, solr.update_schema, updates) dictionary = {\"responseHeader\":{\"status\":1}} json.should_receive(\"load\").and_return(dictionary) self.assertRaises(search_exceptions.InternalError, solr.update_schema,", "solr.commit_update, {}) json.should_receive(\"load\").and_raise(ValueError) urllib2.should_receive(\"urlopen\").and_return(FakeConnection(True)) self.assertRaises(search_exceptions.InternalError, solr.commit_update, {}) dictionary = {'responseHeader':{'status':", "for value in obj: walk_and_check_type(value) else: self.assertIsInstance(obj, (str, int)) walk_and_check_type(parsed_obj)", "<gh_stars>100-1000 #!/usr/bin/env python import os import json import sys import", "self.assertNotEqual(solr.to_solr_hash_map(FakeIndex(), FakeDocument()), {}) def test_commit_update(self): appscale_info = flexmock() appscale_info.should_receive(\"get_search_location\").\\ and_return(\"somelocation\")", "solr.commit_update({}) def test_update_document(self): appscale_info = flexmock() appscale_info.should_receive(\"get_search_location\").\\ and_return(\"somelocation\") solr =", "{}) dictionary = {'responseHeader':{'status': 1}} json.should_receive(\"load\").and_return(dictionary).once() self.assertRaises(search_exceptions.InternalError, solr.commit_update, {}) dictionary", "\"app_id\", \"ns\", \"name\") fields = [{'name':\"index_ns_name_\"}] dictionary = {'responseHeader':{'status': 0},", "def test_update_schema(self): appscale_info = flexmock() appscale_info.should_receive(\"get_search_location\").\\ and_return(\"somelocation\") solr = solr_interface.Solr()", "\"\\\\u2616\": [\"\\\\u263a\"]}, \"second\", \"third\"], ' '\"key1\": \"value\", ' '\"\\\\u2604\": {\"\\\\u2708\":", "dictionary = {\"responseHeader\":{\"status\":1}} json.should_receive(\"load\").and_return(dictionary) self.assertRaises(search_exceptions.InternalError, solr.update_schema, updates) dictionary = {\"responseHeader\":{\"status\":0}}", "solr.update_schema, updates) dictionary = {\"responseHeader\":{\"status\":1}} json.should_receive(\"load\").and_return(dictionary) self.assertRaises(search_exceptions.InternalError, solr.update_schema, updates) dictionary", "not is_good_code: self.code = 500 def getcode(self): return self.code class", "solr.should_receive(\"to_solr_hash_map\").and_return(None) solr.should_receive(\"commit_update\").and_return(None) solr.update_document(\"app_id\", None, FakeIndexSpec()) solr.should_receive(\"compute_updates\").and_return([1,2]) solr.should_receive(\"update_schema\").twice() solr.update_document(\"app_id\", None, FakeIndexSpec())", "[] self.id = \"id\" self.language = \"lang\" class FakeSchema(): def", "\"second\", \"third\"], ' '\"key1\": \"value\", ' '\"\\\\u2604\": {\"\\\\u2708\": \"\\\\u2708\"}}' )", "import urllib2 from flexmock import flexmock sys.path.append(os.path.join(os.path.dirname(__file__), \"../../\")) import solr_interface", "field_type class FakeConnection(): def __init__(self, is_good_code): self.code = 200 if", "ValueError on a json.load. urllib2.should_receive(\"urlopen\").and_return(FakeConnection(True)) flexmock(json) json.should_receive(\"load\").and_raise(ValueError) self.assertRaises(search_exceptions.InternalError, solr._get_index_adapter, \"app_id\",", "= 500 def getcode(self): return self.code class TestSolrInterface(unittest.TestCase): \"\"\" A", "solr = solr_interface.Solr() self.assertNotEqual(solr.to_solr_hash_map(FakeIndex(), FakeDocument()), {}) def test_commit_update(self): appscale_info =", "= solr_interface.json_loads_byteified(json_with_unicode) def walk_and_check_type(obj): if isinstance(obj, dict): for key, value", "the solr interface module. \"\"\" def test_get_index_adapter(self): appscale_info = flexmock()", "' '\"\\\\u2604\": {\"\\\\u2708\": \"\\\\u2708\"}}' ) parsed_obj = solr_interface.json_loads_byteified(json_with_unicode) def walk_and_check_type(obj):" ]
[ "self.description msg = (msg + \", http_code: \" + str(self.http_status))", "+ self.description msg = (msg + \", http_code: \" +", "http_status=None, field=None): self.description = description self.http_status = http_status self.field =", "field self._message = self.error_message() super(PayabbhiError, self).__init__(self._message) def error_message(self): msg =", "= \"message: \" + self.description msg = (msg + \",", "\" + self.field) if self.field else msg return msg +", "\"message: \" + self.description msg = (msg + \", http_code:", "+ \", field: \" + self.field) if self.field else msg", "description=None, http_status=None, field=None): self.description = description self.http_status = http_status self.field", "+ str(self.http_status)) if self.http_status else msg msg = (msg +", "(msg + \", http_code: \" + str(self.http_status)) if self.http_status else", "if self.field else msg return msg + \"\\n\" class APIError(PayabbhiError):", "msg return msg + \"\\n\" class APIError(PayabbhiError): pass class APIConnectionError(PayabbhiError):", "class APIError(PayabbhiError): pass class APIConnectionError(PayabbhiError): pass class AuthenticationError(PayabbhiError): pass class", "class AuthenticationError(PayabbhiError): pass class InvalidRequestError(PayabbhiError): pass class GatewayError(PayabbhiError): pass class", "def error_message(self): msg = \"message: \" + self.description msg =", "__init__(self, description=None, http_status=None, field=None): self.description = description self.http_status = http_status", "self.error_message() super(PayabbhiError, self).__init__(self._message) def error_message(self): msg = \"message: \" +", "error_message(self): msg = \"message: \" + self.description msg = (msg", "\"\\n\" class APIError(PayabbhiError): pass class APIConnectionError(PayabbhiError): pass class AuthenticationError(PayabbhiError): pass", "self.http_status = http_status self.field = field self._message = self.error_message() super(PayabbhiError,", "AuthenticationError(PayabbhiError): pass class InvalidRequestError(PayabbhiError): pass class GatewayError(PayabbhiError): pass class SignatureVerificationError(PayabbhiError):", "if self.http_status else msg msg = (msg + \", field:", "field: \" + self.field) if self.field else msg return msg", "APIConnectionError(PayabbhiError): pass class AuthenticationError(PayabbhiError): pass class InvalidRequestError(PayabbhiError): pass class GatewayError(PayabbhiError):", "\", http_code: \" + str(self.http_status)) if self.http_status else msg msg", "= http_status self.field = field self._message = self.error_message() super(PayabbhiError, self).__init__(self._message)", "msg = \"message: \" + self.description msg = (msg +", "super(PayabbhiError, self).__init__(self._message) def error_message(self): msg = \"message: \" + self.description", "self.field) if self.field else msg return msg + \"\\n\" class", "msg = (msg + \", field: \" + self.field) if", "self.http_status else msg msg = (msg + \", field: \"", "self.description = description self.http_status = http_status self.field = field self._message", "self).__init__(self._message) def error_message(self): msg = \"message: \" + self.description msg", "return msg + \"\\n\" class APIError(PayabbhiError): pass class APIConnectionError(PayabbhiError): pass", "def __init__(self, description=None, http_status=None, field=None): self.description = description self.http_status =", "(msg + \", field: \" + self.field) if self.field else", "else msg msg = (msg + \", field: \" +", "= description self.http_status = http_status self.field = field self._message =", "msg msg = (msg + \", field: \" + self.field)", "pass class APIConnectionError(PayabbhiError): pass class AuthenticationError(PayabbhiError): pass class InvalidRequestError(PayabbhiError): pass", "class APIConnectionError(PayabbhiError): pass class AuthenticationError(PayabbhiError): pass class InvalidRequestError(PayabbhiError): pass class", "PayabbhiError(Exception): def __init__(self, description=None, http_status=None, field=None): self.description = description self.http_status", "self.field else msg return msg + \"\\n\" class APIError(PayabbhiError): pass", "= (msg + \", http_code: \" + str(self.http_status)) if self.http_status", "= (msg + \", field: \" + self.field) if self.field", "+ self.field) if self.field else msg return msg + \"\\n\"", "msg + \"\\n\" class APIError(PayabbhiError): pass class APIConnectionError(PayabbhiError): pass class", "+ \"\\n\" class APIError(PayabbhiError): pass class APIConnectionError(PayabbhiError): pass class AuthenticationError(PayabbhiError):", "http_status self.field = field self._message = self.error_message() super(PayabbhiError, self).__init__(self._message) def", "str(self.http_status)) if self.http_status else msg msg = (msg + \",", "= self.error_message() super(PayabbhiError, self).__init__(self._message) def error_message(self): msg = \"message: \"", "\" + self.description msg = (msg + \", http_code: \"", "msg = (msg + \", http_code: \" + str(self.http_status)) if", "\", field: \" + self.field) if self.field else msg return", "http_code: \" + str(self.http_status)) if self.http_status else msg msg =", "pass class InvalidRequestError(PayabbhiError): pass class GatewayError(PayabbhiError): pass class SignatureVerificationError(PayabbhiError): pass", "class PayabbhiError(Exception): def __init__(self, description=None, http_status=None, field=None): self.description = description", "field=None): self.description = description self.http_status = http_status self.field = field", "self._message = self.error_message() super(PayabbhiError, self).__init__(self._message) def error_message(self): msg = \"message:", "\" + str(self.http_status)) if self.http_status else msg msg = (msg", "+ \", http_code: \" + str(self.http_status)) if self.http_status else msg", "pass class AuthenticationError(PayabbhiError): pass class InvalidRequestError(PayabbhiError): pass class GatewayError(PayabbhiError): pass", "APIError(PayabbhiError): pass class APIConnectionError(PayabbhiError): pass class AuthenticationError(PayabbhiError): pass class InvalidRequestError(PayabbhiError):", "description self.http_status = http_status self.field = field self._message = self.error_message()", "self.field = field self._message = self.error_message() super(PayabbhiError, self).__init__(self._message) def error_message(self):", "= field self._message = self.error_message() super(PayabbhiError, self).__init__(self._message) def error_message(self): msg", "else msg return msg + \"\\n\" class APIError(PayabbhiError): pass class" ]
[ "import partition_activations_in_checkpoint from .random import get_cuda_rng_tracker from .random import model_parallel_cuda_manual_seed", "import get_model_parallel_rank from .initialize import get_model_parallel_src_rank from .initialize import get_model_parallel_world_size", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "from .initialize import get_model_parallel_src_rank from .initialize import get_model_parallel_world_size from .initialize", "NVIDIA CORPORATION. All rights reserved. # # Licensed under the", "import gather_from_model_parallel_region from .mappings import reduce_from_model_parallel_region from .mappings import scatter_to_model_parallel_region", "# # Licensed under the Apache License, Version 2.0 (the", "compliance with the License. # You may obtain a copy", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "2.0 (the \"License\"); # you may not use this file", "agreed to in writing, software # distributed under the License", "file except in compliance with the License. # You may", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "Unless required by applicable law or agreed to in writing,", "import RowParallelLinear from .layers import VocabParallelEmbedding from .mappings import copy_to_model_parallel_region", "from .cross_entropy import vocab_parallel_cross_entropy from .data import broadcast_data from .grads", "import reduce_from_model_parallel_region from .mappings import scatter_to_model_parallel_region from .random import checkpoint", "import get_cuda_rng_tracker from .random import model_parallel_cuda_manual_seed from .transformer_enc_dec import ParallelTransformer,", "distributed under the License is distributed on an \"AS IS\"", "initialize_model_parallel from .initialize import model_parallel_is_initialized from .layers import ColumnParallelLinear from", ".initialize import get_data_parallel_world_size from .initialize import get_model_parallel_group from .initialize import", "copy_to_model_parallel_region from .mappings import gather_from_model_parallel_region from .mappings import reduce_from_model_parallel_region from", "the specific language governing permissions and # limitations under the", "language governing permissions and # limitations under the License. \"\"\"Model", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", ".random import partition_activations_in_checkpoint from .random import get_cuda_rng_tracker from .random import", ".data import broadcast_data from .grads import clip_grad_norm from .initialize import", "from .initialize import get_model_parallel_group from .initialize import get_model_parallel_rank from .initialize", "express or implied. # See the License for the specific", "applicable law or agreed to in writing, software # distributed", ".initialize import initialize_model_parallel from .initialize import model_parallel_is_initialized from .layers import", "governing permissions and # limitations under the License. \"\"\"Model parallel", "# coding=utf-8 # Copyright (c) 2019, NVIDIA CORPORATION. All rights", "except in compliance with the License. # You may obtain", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "from .layers import RowParallelLinear from .layers import VocabParallelEmbedding from .mappings", "Licensed under the Apache License, Version 2.0 (the \"License\"); #", "not use this file except in compliance with the License.", "clip_grad_norm from .initialize import destroy_model_parallel from .initialize import get_data_parallel_group from", "rights reserved. # # Licensed under the Apache License, Version", "destroy_model_parallel from .initialize import get_data_parallel_group from .initialize import get_data_parallel_rank from", "checkpoint from .random import partition_activations_in_checkpoint from .random import get_cuda_rng_tracker from", "from .random import get_cuda_rng_tracker from .random import model_parallel_cuda_manual_seed from .transformer_enc_dec", "get_data_parallel_group from .initialize import get_data_parallel_rank from .initialize import get_data_parallel_world_size from", ".layers import VocabParallelEmbedding from .mappings import copy_to_model_parallel_region from .mappings import", "writing, software # distributed under the License is distributed on", ".initialize import get_data_parallel_rank from .initialize import get_data_parallel_world_size from .initialize import", "# limitations under the License. \"\"\"Model parallel utility interface.\"\"\" from", "in writing, software # distributed under the License is distributed", "import get_model_parallel_src_rank from .initialize import get_model_parallel_world_size from .initialize import initialize_model_parallel", "you may not use this file except in compliance with", "# Licensed under the Apache License, Version 2.0 (the \"License\");", "\"\"\"Model parallel utility interface.\"\"\" from .cross_entropy import vocab_parallel_cross_entropy from .data", "import get_data_parallel_group from .initialize import get_data_parallel_rank from .initialize import get_data_parallel_world_size", ".mappings import scatter_to_model_parallel_region from .random import checkpoint from .random import", "# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. #", "from .initialize import get_data_parallel_world_size from .initialize import get_model_parallel_group from .initialize", "utility interface.\"\"\" from .cross_entropy import vocab_parallel_cross_entropy from .data import broadcast_data", "under the License. \"\"\"Model parallel utility interface.\"\"\" from .cross_entropy import", "from .data import broadcast_data from .grads import clip_grad_norm from .initialize", "use this file except in compliance with the License. #", "the License. \"\"\"Model parallel utility interface.\"\"\" from .cross_entropy import vocab_parallel_cross_entropy", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "gather_from_model_parallel_region from .mappings import reduce_from_model_parallel_region from .mappings import scatter_to_model_parallel_region from", "import initialize_model_parallel from .initialize import model_parallel_is_initialized from .layers import ColumnParallelLinear", "reserved. # # Licensed under the Apache License, Version 2.0", "CONDITIONS OF ANY KIND, either express or implied. # See", "from .initialize import get_model_parallel_world_size from .initialize import initialize_model_parallel from .initialize", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "get_cuda_rng_tracker from .random import model_parallel_cuda_manual_seed from .transformer_enc_dec import ParallelTransformer, LayerNorm", "or implied. # See the License for the specific language", "License is distributed on an \"AS IS\" BASIS, # WITHOUT", "import model_parallel_is_initialized from .layers import ColumnParallelLinear from .layers import ParallelEmbedding", "RowParallelLinear from .layers import VocabParallelEmbedding from .mappings import copy_to_model_parallel_region from", "coding=utf-8 # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.", "License. # You may obtain a copy of the License", "License, Version 2.0 (the \"License\"); # you may not use", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "import VocabParallelEmbedding from .mappings import copy_to_model_parallel_region from .mappings import gather_from_model_parallel_region", "# You may obtain a copy of the License at", "reduce_from_model_parallel_region from .mappings import scatter_to_model_parallel_region from .random import checkpoint from", "KIND, either express or implied. # See the License for", "specific language governing permissions and # limitations under the License.", ".initialize import destroy_model_parallel from .initialize import get_data_parallel_group from .initialize import", "ColumnParallelLinear from .layers import ParallelEmbedding from .layers import RowParallelLinear from", "get_model_parallel_world_size from .initialize import initialize_model_parallel from .initialize import model_parallel_is_initialized from", "from .mappings import reduce_from_model_parallel_region from .mappings import scatter_to_model_parallel_region from .random", "under the License is distributed on an \"AS IS\" BASIS,", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "License for the specific language governing permissions and # limitations", "from .initialize import initialize_model_parallel from .initialize import model_parallel_is_initialized from .layers", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", ".initialize import get_model_parallel_rank from .initialize import get_model_parallel_src_rank from .initialize import", ".layers import RowParallelLinear from .layers import VocabParallelEmbedding from .mappings import", "import get_model_parallel_group from .initialize import get_model_parallel_rank from .initialize import get_model_parallel_src_rank", ".layers import ColumnParallelLinear from .layers import ParallelEmbedding from .layers import", "VocabParallelEmbedding from .mappings import copy_to_model_parallel_region from .mappings import gather_from_model_parallel_region from", "CORPORATION. All rights reserved. # # Licensed under the Apache", "from .random import partition_activations_in_checkpoint from .random import get_cuda_rng_tracker from .random", "from .layers import ColumnParallelLinear from .layers import ParallelEmbedding from .layers", "parallel utility interface.\"\"\" from .cross_entropy import vocab_parallel_cross_entropy from .data import", "permissions and # limitations under the License. \"\"\"Model parallel utility", "the License for the specific language governing permissions and #", "(c) 2019, NVIDIA CORPORATION. All rights reserved. # # Licensed", "(the \"License\"); # you may not use this file except", "Apache License, Version 2.0 (the \"License\"); # you may not", "vocab_parallel_cross_entropy from .data import broadcast_data from .grads import clip_grad_norm from", "# you may not use this file except in compliance", "either express or implied. # See the License for the", "import destroy_model_parallel from .initialize import get_data_parallel_group from .initialize import get_data_parallel_rank", "OR CONDITIONS OF ANY KIND, either express or implied. #", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "the License is distributed on an \"AS IS\" BASIS, #", "import clip_grad_norm from .initialize import destroy_model_parallel from .initialize import get_data_parallel_group", "in compliance with the License. # You may obtain a", "import scatter_to_model_parallel_region from .random import checkpoint from .random import partition_activations_in_checkpoint", "software # distributed under the License is distributed on an", "from .initialize import model_parallel_is_initialized from .layers import ColumnParallelLinear from .layers", "import ParallelEmbedding from .layers import RowParallelLinear from .layers import VocabParallelEmbedding", "get_data_parallel_world_size from .initialize import get_model_parallel_group from .initialize import get_model_parallel_rank from", "import checkpoint from .random import partition_activations_in_checkpoint from .random import get_cuda_rng_tracker", "import copy_to_model_parallel_region from .mappings import gather_from_model_parallel_region from .mappings import reduce_from_model_parallel_region", ".mappings import gather_from_model_parallel_region from .mappings import reduce_from_model_parallel_region from .mappings import", "# # Unless required by applicable law or agreed to", "get_model_parallel_rank from .initialize import get_model_parallel_src_rank from .initialize import get_model_parallel_world_size from", ".initialize import get_model_parallel_group from .initialize import get_model_parallel_rank from .initialize import", "from .mappings import copy_to_model_parallel_region from .mappings import gather_from_model_parallel_region from .mappings", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. # #", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "Version 2.0 (the \"License\"); # you may not use this", "License. \"\"\"Model parallel utility interface.\"\"\" from .cross_entropy import vocab_parallel_cross_entropy from", "get_model_parallel_group from .initialize import get_model_parallel_rank from .initialize import get_model_parallel_src_rank from", "law or agreed to in writing, software # distributed under", "limitations under the License. \"\"\"Model parallel utility interface.\"\"\" from .cross_entropy", "from .mappings import scatter_to_model_parallel_region from .random import checkpoint from .random", ".cross_entropy import vocab_parallel_cross_entropy from .data import broadcast_data from .grads import", ".mappings import reduce_from_model_parallel_region from .mappings import scatter_to_model_parallel_region from .random import", "import broadcast_data from .grads import clip_grad_norm from .initialize import destroy_model_parallel", "implied. # See the License for the specific language governing", ".initialize import get_model_parallel_world_size from .initialize import initialize_model_parallel from .initialize import", "under the Apache License, Version 2.0 (the \"License\"); # you", "\"License\"); # you may not use this file except in", ".layers import ParallelEmbedding from .layers import RowParallelLinear from .layers import", "from .random import checkpoint from .random import partition_activations_in_checkpoint from .random", "partition_activations_in_checkpoint from .random import get_cuda_rng_tracker from .random import model_parallel_cuda_manual_seed from", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "All rights reserved. # # Licensed under the Apache License,", "by applicable law or agreed to in writing, software #", "# distributed under the License is distributed on an \"AS", "OF ANY KIND, either express or implied. # See the", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "from .layers import ParallelEmbedding from .layers import RowParallelLinear from .layers", "import get_data_parallel_rank from .initialize import get_data_parallel_world_size from .initialize import get_model_parallel_group", "may obtain a copy of the License at # #", "# Unless required by applicable law or agreed to in", "ANY KIND, either express or implied. # See the License", "See the License for the specific language governing permissions and", "broadcast_data from .grads import clip_grad_norm from .initialize import destroy_model_parallel from", ".grads import clip_grad_norm from .initialize import destroy_model_parallel from .initialize import", "2019, NVIDIA CORPORATION. All rights reserved. # # Licensed under", "from .initialize import destroy_model_parallel from .initialize import get_data_parallel_group from .initialize", "from .mappings import gather_from_model_parallel_region from .mappings import reduce_from_model_parallel_region from .mappings", "the License. # You may obtain a copy of the", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "for the specific language governing permissions and # limitations under", "from .initialize import get_data_parallel_group from .initialize import get_data_parallel_rank from .initialize", "import get_data_parallel_world_size from .initialize import get_model_parallel_group from .initialize import get_model_parallel_rank", ".initialize import get_model_parallel_src_rank from .initialize import get_model_parallel_world_size from .initialize import", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "to in writing, software # distributed under the License is", "get_data_parallel_rank from .initialize import get_data_parallel_world_size from .initialize import get_model_parallel_group from", "model_parallel_is_initialized from .layers import ColumnParallelLinear from .layers import ParallelEmbedding from", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "# See the License for the specific language governing permissions", "You may obtain a copy of the License at #", "import get_model_parallel_world_size from .initialize import initialize_model_parallel from .initialize import model_parallel_is_initialized", "scatter_to_model_parallel_region from .random import checkpoint from .random import partition_activations_in_checkpoint from", "may not use this file except in compliance with the", "or agreed to in writing, software # distributed under the", "from .initialize import get_model_parallel_rank from .initialize import get_model_parallel_src_rank from .initialize", "and # limitations under the License. \"\"\"Model parallel utility interface.\"\"\"", "import ColumnParallelLinear from .layers import ParallelEmbedding from .layers import RowParallelLinear", "required by applicable law or agreed to in writing, software", ".random import checkpoint from .random import partition_activations_in_checkpoint from .random import", ".random import get_cuda_rng_tracker from .random import model_parallel_cuda_manual_seed from .transformer_enc_dec import", "ParallelEmbedding from .layers import RowParallelLinear from .layers import VocabParallelEmbedding from", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "import vocab_parallel_cross_entropy from .data import broadcast_data from .grads import clip_grad_norm", ".mappings import copy_to_model_parallel_region from .mappings import gather_from_model_parallel_region from .mappings import", "with the License. # You may obtain a copy of", "this file except in compliance with the License. # You", "from .grads import clip_grad_norm from .initialize import destroy_model_parallel from .initialize", "the Apache License, Version 2.0 (the \"License\"); # you may", "interface.\"\"\" from .cross_entropy import vocab_parallel_cross_entropy from .data import broadcast_data from", "get_model_parallel_src_rank from .initialize import get_model_parallel_world_size from .initialize import initialize_model_parallel from", ".initialize import model_parallel_is_initialized from .layers import ColumnParallelLinear from .layers import", "from .initialize import get_data_parallel_rank from .initialize import get_data_parallel_world_size from .initialize", "from .layers import VocabParallelEmbedding from .mappings import copy_to_model_parallel_region from .mappings", ".initialize import get_data_parallel_group from .initialize import get_data_parallel_rank from .initialize import" ]
[ "super(Empresa, self).save(*args, **kwargs) def __unicode__(self): return u'%s' % self.nome_razao_social def", "cnae = models.CharField(max_length=10, blank=True, null=True) iest = models.CharField(max_length=32, null=True, blank=True)", "class MinhaEmpresa(models.Model): m_empresa = models.ForeignKey( Empresa, on_delete=models.CASCADE, related_name='minha_empresa', blank=True, null=True)", "**kwargs) def __unicode__(self): return u'%s' % self.nome_razao_social def __str__(self): return", "caminho_completo_logo(self): if self.logo_file.name != 'imagens/logo.png': return os.path.join(MEDIA_ROOT, self.logo_file.name) else: return", "iest = models.CharField(max_length=32, null=True, blank=True) class Meta: verbose_name = \"Empresa\"", "models.ForeignKey( Empresa, on_delete=models.CASCADE, related_name='minha_empresa', blank=True, null=True) m_usuario = models.ForeignKey( Usuario,", "import post_delete from django.dispatch import receiver from .base import Pessoa", "'imagens/logo.png': return os.path.join(MEDIA_ROOT, self.logo_file.name) else: return '' def save(self, *args,", "coding: utf-8 -*- import os from django.db import models from", "Deletar logo se ja existir um try: obj = Empresa.objects.get(id=self.id)", "# Nao deletar a imagem default 'logo.png' if instance.logo_file !=", "**kwargs): # Nao deletar a imagem default 'logo.png' if instance.logo_file", "= os.path.splitext(filename)[1] return 'imagens/empresas/logo_{0}_{1}{2}'.format(instance.nome_razao_social, instance.id, extension) class Empresa(Pessoa): logo_file =", "self.nome_razao_social # Deletar logo quando empresa for deletada @receiver(post_delete, sender=Empresa)", "default='imagens/logo.png', blank=True, null=True) cnae = models.CharField(max_length=10, blank=True, null=True) iest =", "u'%s' % self.nome_razao_social # Deletar logo quando empresa for deletada", "m_empresa = models.ForeignKey( Empresa, on_delete=models.CASCADE, related_name='minha_empresa', blank=True, null=True) m_usuario =", "from django.db import models from django.db.models.signals import post_delete from django.dispatch", "def caminho_completo_logo(self): if self.logo_file.name != 'imagens/logo.png': return os.path.join(MEDIA_ROOT, self.logo_file.name) else:", "return u'%s' % self.nome_razao_social # Deletar logo quando empresa for", "import os from django.db import models from django.db.models.signals import post_delete", "import receiver from .base import Pessoa from djangosige.apps.login.models import Usuario", "u'%s' % self.nome_razao_social def __str__(self): return u'%s' % self.nome_razao_social #", "django.db.models.signals import post_delete from django.dispatch import receiver from .base import", "from .base import Pessoa from djangosige.apps.login.models import Usuario from djangosige.configs.settings", "logo_directory_path(instance, filename): extension = os.path.splitext(filename)[1] return 'imagens/empresas/logo_{0}_{1}{2}'.format(instance.nome_razao_social, instance.id, extension) class", "% self.nome_razao_social def __str__(self): return u'%s' % self.nome_razao_social # Deletar", "def __str__(self): return u'%s' % self.nome_razao_social # Deletar logo quando", "django.dispatch import receiver from .base import Pessoa from djangosige.apps.login.models import", "blank=True) class Meta: verbose_name = \"Empresa\" @property def caminho_completo_logo(self): if", "default 'logo.png' if instance.logo_file != 'imagens/logo.png': instance.logo_file.delete(False) class MinhaEmpresa(models.Model): m_empresa", "Empresa.objects.get(id=self.id) if obj.logo_file != self.logo_file and obj.logo_file != 'imagens/logo.png': obj.logo_file.delete(save=False)", "receiver from .base import Pessoa from djangosige.apps.login.models import Usuario from", "= Empresa.objects.get(id=self.id) if obj.logo_file != self.logo_file and obj.logo_file != 'imagens/logo.png':", "blank=True, null=True) iest = models.CharField(max_length=32, null=True, blank=True) class Meta: verbose_name", "if instance.logo_file != 'imagens/logo.png': instance.logo_file.delete(False) class MinhaEmpresa(models.Model): m_empresa = models.ForeignKey(", "models.CharField(max_length=32, null=True, blank=True) class Meta: verbose_name = \"Empresa\" @property def", "os.path.join(MEDIA_ROOT, self.logo_file.name) else: return '' def save(self, *args, **kwargs): #", "deletada @receiver(post_delete, sender=Empresa) def logo_post_delete_handler(sender, instance, **kwargs): # Nao deletar", "Pessoa from djangosige.apps.login.models import Usuario from djangosige.configs.settings import MEDIA_ROOT def", "!= 'imagens/logo.png': instance.logo_file.delete(False) class MinhaEmpresa(models.Model): m_empresa = models.ForeignKey( Empresa, on_delete=models.CASCADE,", "Nao deletar a imagem default 'logo.png' if instance.logo_file != 'imagens/logo.png':", "null=True) iest = models.CharField(max_length=32, null=True, blank=True) class Meta: verbose_name =", "import models from django.db.models.signals import post_delete from django.dispatch import receiver", "= models.CharField(max_length=32, null=True, blank=True) class Meta: verbose_name = \"Empresa\" @property", "post_delete from django.dispatch import receiver from .base import Pessoa from", "return u'%s' % self.nome_razao_social def __str__(self): return u'%s' % self.nome_razao_social", "djangosige.configs.settings import MEDIA_ROOT def logo_directory_path(instance, filename): extension = os.path.splitext(filename)[1] return", "Meta: verbose_name = \"Empresa\" @property def caminho_completo_logo(self): if self.logo_file.name !=", "@property def caminho_completo_logo(self): if self.logo_file.name != 'imagens/logo.png': return os.path.join(MEDIA_ROOT, self.logo_file.name)", "on_delete=models.CASCADE, related_name='minha_empresa', blank=True, null=True) m_usuario = models.ForeignKey( Usuario, on_delete=models.CASCADE, related_name='empresa_usuario')", "'imagens/logo.png': instance.logo_file.delete(False) class MinhaEmpresa(models.Model): m_empresa = models.ForeignKey( Empresa, on_delete=models.CASCADE, related_name='minha_empresa',", ".base import Pessoa from djangosige.apps.login.models import Usuario from djangosige.configs.settings import", "if self.logo_file.name != 'imagens/logo.png': return os.path.join(MEDIA_ROOT, self.logo_file.name) else: return ''", "logo_file = models.ImageField( upload_to=logo_directory_path, default='imagens/logo.png', blank=True, null=True) cnae = models.CharField(max_length=10,", "\"Empresa\" @property def caminho_completo_logo(self): if self.logo_file.name != 'imagens/logo.png': return os.path.join(MEDIA_ROOT,", "logo_post_delete_handler(sender, instance, **kwargs): # Nao deletar a imagem default 'logo.png'", "os from django.db import models from django.db.models.signals import post_delete from", "empresa for deletada @receiver(post_delete, sender=Empresa) def logo_post_delete_handler(sender, instance, **kwargs): #", "instance.logo_file != 'imagens/logo.png': instance.logo_file.delete(False) class MinhaEmpresa(models.Model): m_empresa = models.ForeignKey( Empresa,", "'logo.png' if instance.logo_file != 'imagens/logo.png': instance.logo_file.delete(False) class MinhaEmpresa(models.Model): m_empresa =", "!= 'imagens/logo.png': return os.path.join(MEDIA_ROOT, self.logo_file.name) else: return '' def save(self,", "os.path.splitext(filename)[1] return 'imagens/empresas/logo_{0}_{1}{2}'.format(instance.nome_razao_social, instance.id, extension) class Empresa(Pessoa): logo_file = models.ImageField(", "'imagens/empresas/logo_{0}_{1}{2}'.format(instance.nome_razao_social, instance.id, extension) class Empresa(Pessoa): logo_file = models.ImageField( upload_to=logo_directory_path, default='imagens/logo.png',", "Deletar logo quando empresa for deletada @receiver(post_delete, sender=Empresa) def logo_post_delete_handler(sender,", "*args, **kwargs): # Deletar logo se ja existir um try:", "a imagem default 'logo.png' if instance.logo_file != 'imagens/logo.png': instance.logo_file.delete(False) class", "__str__(self): return u'%s' % self.nome_razao_social # Deletar logo quando empresa", "self.logo_file.name) else: return '' def save(self, *args, **kwargs): # Deletar", "logo se ja existir um try: obj = Empresa.objects.get(id=self.id) if", "ja existir um try: obj = Empresa.objects.get(id=self.id) if obj.logo_file !=", "instance, **kwargs): # Nao deletar a imagem default 'logo.png' if", "import MEDIA_ROOT def logo_directory_path(instance, filename): extension = os.path.splitext(filename)[1] return 'imagens/empresas/logo_{0}_{1}{2}'.format(instance.nome_razao_social,", "'' def save(self, *args, **kwargs): # Deletar logo se ja", "from django.db.models.signals import post_delete from django.dispatch import receiver from .base", "django.db import models from django.db.models.signals import post_delete from django.dispatch import", "# Deletar logo quando empresa for deletada @receiver(post_delete, sender=Empresa) def", "and obj.logo_file != 'imagens/logo.png': obj.logo_file.delete(save=False) except: pass super(Empresa, self).save(*args, **kwargs)", "def logo_post_delete_handler(sender, instance, **kwargs): # Nao deletar a imagem default", "models.CharField(max_length=10, blank=True, null=True) iest = models.CharField(max_length=32, null=True, blank=True) class Meta:", "# Deletar logo se ja existir um try: obj =", "for deletada @receiver(post_delete, sender=Empresa) def logo_post_delete_handler(sender, instance, **kwargs): # Nao", "self).save(*args, **kwargs) def __unicode__(self): return u'%s' % self.nome_razao_social def __str__(self):", "extension) class Empresa(Pessoa): logo_file = models.ImageField( upload_to=logo_directory_path, default='imagens/logo.png', blank=True, null=True)", "**kwargs): # Deletar logo se ja existir um try: obj", "from djangosige.apps.login.models import Usuario from djangosige.configs.settings import MEDIA_ROOT def logo_directory_path(instance,", "extension = os.path.splitext(filename)[1] return 'imagens/empresas/logo_{0}_{1}{2}'.format(instance.nome_razao_social, instance.id, extension) class Empresa(Pessoa): logo_file", "except: pass super(Empresa, self).save(*args, **kwargs) def __unicode__(self): return u'%s' %", "class Meta: verbose_name = \"Empresa\" @property def caminho_completo_logo(self): if self.logo_file.name", "se ja existir um try: obj = Empresa.objects.get(id=self.id) if obj.logo_file", "'imagens/logo.png': obj.logo_file.delete(save=False) except: pass super(Empresa, self).save(*args, **kwargs) def __unicode__(self): return", "Empresa(Pessoa): logo_file = models.ImageField( upload_to=logo_directory_path, default='imagens/logo.png', blank=True, null=True) cnae =", "models.ImageField( upload_to=logo_directory_path, default='imagens/logo.png', blank=True, null=True) cnae = models.CharField(max_length=10, blank=True, null=True)", "from django.dispatch import receiver from .base import Pessoa from djangosige.apps.login.models", "-*- import os from django.db import models from django.db.models.signals import", "import Pessoa from djangosige.apps.login.models import Usuario from djangosige.configs.settings import MEDIA_ROOT", "obj = Empresa.objects.get(id=self.id) if obj.logo_file != self.logo_file and obj.logo_file !=", "obj.logo_file != 'imagens/logo.png': obj.logo_file.delete(save=False) except: pass super(Empresa, self).save(*args, **kwargs) def", "deletar a imagem default 'logo.png' if instance.logo_file != 'imagens/logo.png': instance.logo_file.delete(False)", "def logo_directory_path(instance, filename): extension = os.path.splitext(filename)[1] return 'imagens/empresas/logo_{0}_{1}{2}'.format(instance.nome_razao_social, instance.id, extension)", "MEDIA_ROOT def logo_directory_path(instance, filename): extension = os.path.splitext(filename)[1] return 'imagens/empresas/logo_{0}_{1}{2}'.format(instance.nome_razao_social, instance.id,", "return '' def save(self, *args, **kwargs): # Deletar logo se", "self.nome_razao_social def __str__(self): return u'%s' % self.nome_razao_social # Deletar logo", "import Usuario from djangosige.configs.settings import MEDIA_ROOT def logo_directory_path(instance, filename): extension", "MinhaEmpresa(models.Model): m_empresa = models.ForeignKey( Empresa, on_delete=models.CASCADE, related_name='minha_empresa', blank=True, null=True) m_usuario", "# -*- coding: utf-8 -*- import os from django.db import", "filename): extension = os.path.splitext(filename)[1] return 'imagens/empresas/logo_{0}_{1}{2}'.format(instance.nome_razao_social, instance.id, extension) class Empresa(Pessoa):", "blank=True, null=True) cnae = models.CharField(max_length=10, blank=True, null=True) iest = models.CharField(max_length=32,", "def save(self, *args, **kwargs): # Deletar logo se ja existir", "= models.CharField(max_length=10, blank=True, null=True) iest = models.CharField(max_length=32, null=True, blank=True) class", "obj.logo_file.delete(save=False) except: pass super(Empresa, self).save(*args, **kwargs) def __unicode__(self): return u'%s'", "@receiver(post_delete, sender=Empresa) def logo_post_delete_handler(sender, instance, **kwargs): # Nao deletar a", "verbose_name = \"Empresa\" @property def caminho_completo_logo(self): if self.logo_file.name != 'imagens/logo.png':", "djangosige.apps.login.models import Usuario from djangosige.configs.settings import MEDIA_ROOT def logo_directory_path(instance, filename):", "quando empresa for deletada @receiver(post_delete, sender=Empresa) def logo_post_delete_handler(sender, instance, **kwargs):", "if obj.logo_file != self.logo_file and obj.logo_file != 'imagens/logo.png': obj.logo_file.delete(save=False) except:", "imagem default 'logo.png' if instance.logo_file != 'imagens/logo.png': instance.logo_file.delete(False) class MinhaEmpresa(models.Model):", "__unicode__(self): return u'%s' % self.nome_razao_social def __str__(self): return u'%s' %", "% self.nome_razao_social # Deletar logo quando empresa for deletada @receiver(post_delete,", "pass super(Empresa, self).save(*args, **kwargs) def __unicode__(self): return u'%s' % self.nome_razao_social", "existir um try: obj = Empresa.objects.get(id=self.id) if obj.logo_file != self.logo_file", "utf-8 -*- import os from django.db import models from django.db.models.signals", "null=True) cnae = models.CharField(max_length=10, blank=True, null=True) iest = models.CharField(max_length=32, null=True,", "upload_to=logo_directory_path, default='imagens/logo.png', blank=True, null=True) cnae = models.CharField(max_length=10, blank=True, null=True) iest", "self.logo_file.name != 'imagens/logo.png': return os.path.join(MEDIA_ROOT, self.logo_file.name) else: return '' def", "return 'imagens/empresas/logo_{0}_{1}{2}'.format(instance.nome_razao_social, instance.id, extension) class Empresa(Pessoa): logo_file = models.ImageField( upload_to=logo_directory_path,", "sender=Empresa) def logo_post_delete_handler(sender, instance, **kwargs): # Nao deletar a imagem", "obj.logo_file != self.logo_file and obj.logo_file != 'imagens/logo.png': obj.logo_file.delete(save=False) except: pass", "instance.logo_file.delete(False) class MinhaEmpresa(models.Model): m_empresa = models.ForeignKey( Empresa, on_delete=models.CASCADE, related_name='minha_empresa', blank=True,", "null=True, blank=True) class Meta: verbose_name = \"Empresa\" @property def caminho_completo_logo(self):", "= models.ForeignKey( Empresa, on_delete=models.CASCADE, related_name='minha_empresa', blank=True, null=True) m_usuario = models.ForeignKey(", "self.logo_file and obj.logo_file != 'imagens/logo.png': obj.logo_file.delete(save=False) except: pass super(Empresa, self).save(*args,", "try: obj = Empresa.objects.get(id=self.id) if obj.logo_file != self.logo_file and obj.logo_file", "Empresa, on_delete=models.CASCADE, related_name='minha_empresa', blank=True, null=True) m_usuario = models.ForeignKey( Usuario, on_delete=models.CASCADE,", "!= self.logo_file and obj.logo_file != 'imagens/logo.png': obj.logo_file.delete(save=False) except: pass super(Empresa,", "return os.path.join(MEDIA_ROOT, self.logo_file.name) else: return '' def save(self, *args, **kwargs):", "else: return '' def save(self, *args, **kwargs): # Deletar logo", "um try: obj = Empresa.objects.get(id=self.id) if obj.logo_file != self.logo_file and", "= \"Empresa\" @property def caminho_completo_logo(self): if self.logo_file.name != 'imagens/logo.png': return", "!= 'imagens/logo.png': obj.logo_file.delete(save=False) except: pass super(Empresa, self).save(*args, **kwargs) def __unicode__(self):", "instance.id, extension) class Empresa(Pessoa): logo_file = models.ImageField( upload_to=logo_directory_path, default='imagens/logo.png', blank=True,", "-*- coding: utf-8 -*- import os from django.db import models", "class Empresa(Pessoa): logo_file = models.ImageField( upload_to=logo_directory_path, default='imagens/logo.png', blank=True, null=True) cnae", "from djangosige.configs.settings import MEDIA_ROOT def logo_directory_path(instance, filename): extension = os.path.splitext(filename)[1]", "models from django.db.models.signals import post_delete from django.dispatch import receiver from", "Usuario from djangosige.configs.settings import MEDIA_ROOT def logo_directory_path(instance, filename): extension =", "def __unicode__(self): return u'%s' % self.nome_razao_social def __str__(self): return u'%s'", "save(self, *args, **kwargs): # Deletar logo se ja existir um", "logo quando empresa for deletada @receiver(post_delete, sender=Empresa) def logo_post_delete_handler(sender, instance,", "= models.ImageField( upload_to=logo_directory_path, default='imagens/logo.png', blank=True, null=True) cnae = models.CharField(max_length=10, blank=True," ]
[ "n_sample) s0_ref = [x for i, x in enumerate(s0_ref) if", "pred: list(list(any)), a list of predictions :return: corpus bleu score", "0, 0], smoothing_function=smooth.method1),\\ corpus_bleu(ref, pred, weights=[0, 1, 0, 0], smoothing_function=smooth.method1),", "f1_s1 * 100, 'mean', (f1_s0 + f1_s1) / 2 *", "s0_ref = [], [] s1_pred, s1_ref = [], [] for", "= count_ngram(hyps_resp, 2) / float(num_tokens) return dist1, dist2 def eval_f1(ref,", "n_sample) s1_ref = [x for i, x in enumerate(s1_ref) if", "of the eval file', required=True) args = parser.parse_args() file_path =", "2 * precision * recall / (precision + recall) def", "1 tot_l = sum([len(rs) for rs in ref[i]]) if tot_l", "in hyps_resp: if len(resp) < n: continue for i in", "sentences, each element of the list is a list of", "<class 'list'>, get a list of {} instead\".format( type(hyps_resp[0]))) return", "list(list(any)), a list of predictions :return: f1 score \"\"\" assert", "'mean', (f1_s0 + f1_s1) / 2 * 100) print('Dist:', 's0',", "import corpus_bleu from nltk.translate.bleu_score import SmoothingFunction import json from tqdm", "dist2 def eval_f1(ref, pred): \"\"\" :param ref: list(list(list(any))), a list", "s0_ref.append([list(d['resp'])]) s0_pred.append(list(d['pred_style0'][0])) else: s1_ref.append([list(d['resp'])]) s1_pred.append(list(d['pred_style1'][0])) if n_sample: assert len(s0_ref) >=", "for i, x in enumerate(s0_pred) if i in sampled_idxs] sampled_idxs", "for w in rs: ref_set.add(w) pred_set = set() for w", ":return: the number of unique n-grams in hyps_resp \"\"\" if", "list of {} instead\".format( type(hyps_resp[0]))) return ngram = set() for", "return 0.0 if precision == recall == 0 else 2", "0 else 2 * precision * recall / (precision +", "from nltk.translate.bleu_score import corpus_bleu from nltk.translate.bleu_score import SmoothingFunction import json", "return if type(hyps_resp[0]) != list: print(\"ERROR, eval_distinct takes in a", "corpus_bleu(ref, pred, weights=[0, 0, 0, 1], smoothing_function=smooth.method1) def count_ngram(hyps_resp, n):", ">= n_sample sampled_idxs = sample(range(len(s0_ref)), n_sample) s0_ref = [x for", "def count_ngram(hyps_resp, n): \"\"\" Count the number of unique n-grams", "rs in ref[i]: for w in rs: ref_set.add(w) pred_set =", "= set() for w in s: pred_set.add(w) p = 0", "+= 1 if len(s) > 0: p /= len(s) r", "= [(' '.join(i)).split() for i in hyps_resp] num_tokens = sum([len(i)", "eval_distinct_detail(s0_pred) dist_s1 = eval_distinct_detail(s1_pred) f1_s0 = eval_f1(s0_ref, s0_pred) f1_s1 =", "+ f1_s1) / 2 * 100) print('Dist:', 's0', dist_s0[1] *", "def eval_bleu_detail(ref, pred): \"\"\" :param ref: list(list(list(any))), a list of", "ref_set = set() for rs in ref[i]: for w in", "list(list(list(any))), a list of reference sentences, each element of the", "smoothing_function=smooth.method1), \\ corpus_bleu(ref, pred, weights=[0, 0, 0, 1], smoothing_function=smooth.method1) def", "s0_pred.append(list(d['pred_style0'][0])) else: s1_ref.append([list(d['resp'])]) s1_pred.append(list(d['pred_style1'][0])) if n_sample: assert len(s0_ref) >= n_sample", "list, a list of responses :param n: int, n-gram :return:", "0, 0], smoothing_function=smooth.method1), \\ corpus_bleu(ref, pred, weights=[0, 0, 1, 0],", "from tqdm import tqdm, trange from random import sample import", "0: s0_ref.append([list(d['resp'])]) s0_pred.append(list(d['pred_style0'][0])) else: s1_ref.append([list(d['resp'])]) s1_pred.append(list(d['pred_style1'][0])) if n_sample: assert len(s0_ref)", "sampled_idxs = sample(range(len(s0_ref)), n_sample) s0_ref = [x for i, x", "[] recalls = [] for i, s in enumerate(pred): ref_set", "print(\"ERROR, eval_distinct takes in a list of <class 'list'>, get", "range(1, 4): print('%d-gram BLEU:' % k, 's0', bleu_s0[k - 1]", "takes in a list of <class 'list'>, get a list", "bleu_s1 = eval_bleu_detail(s1_ref, s1_pred) dist_s0 = eval_distinct_detail(s0_pred) dist_s1 = eval_distinct_detail(s1_pred)", "import tqdm, trange from random import sample import numpy as", "100, 'mean', (dist_s0[1] + dist_s1[1]) / 2 * 100) parser", "args = parser.parse_args() file_path = args.eval_file_path calc_metrics_value(None, file_path) print(\"Evaluating acc", ":return: corpus bleu score \"\"\" return corpus_bleu(ref, pred, smoothing_function=smooth.method1) def", "= sum(recalls) / len(recalls) return 0.0 if precision == recall", "of {} instead\".format( type(hyps_resp[0]))) return ngram = set() for resp", "= sample(range(len(s0_ref)), n_sample) s0_ref = [x for i, x in", "= eval_distinct_detail(s1_pred) f1_s0 = eval_f1(s0_ref, s0_pred) f1_s1 = eval_f1(s1_ref, s1_pred)", "s0_ref = [x for i, x in enumerate(s0_ref) if i", "bert_eval_acc import svm_eval_acc smooth = SmoothingFunction() def eval_bleu(ref, pred): \"\"\"", "if tot_l > 0: r /= tot_l precisions.append(p) recalls.append(r) precision", "dist2 = count_ngram(hyps_resp, 2) / float(num_tokens) return dist1, dist2 def", "f.readlines()] s0_pred, s0_ref = [], [] s1_pred, s1_ref = [],", "in ref[i]: for w in rs: ref_set.add(w) pred_set = set()", "0 for rs in ref[i]: for w in rs: if", "smoothing_function=smooth.method1) def count_ngram(hyps_resp, n): \"\"\" Count the number of unique", "def eval_bleu(ref, pred): \"\"\" :param ref: list(list(list(any))), a list of", "rs in ref[i]]) if tot_l > 0: r /= tot_l", "= [x for i, x in enumerate(s0_pred) if i in", "smoothing_function=smooth.method1),\\ corpus_bleu(ref, pred, weights=[0, 1, 0, 0], smoothing_function=smooth.method1), \\ corpus_bleu(ref,", "import os from nltk.translate.bleu_score import corpus_bleu from nltk.translate.bleu_score import SmoothingFunction", "sampled_idxs] sampled_idxs = sample(range(len(s1_ref)), n_sample) s1_ref = [x for i,", "recall / (precision + recall) def calc_metrics_value(task, fn, n_sample=None): with", "empty input\") return if type(hyps_resp[0]) != list: print(\"ERROR, eval_distinct takes", "l in hyps_resp] hyps_resp = [(' '.join(i)).split() for i in", "precisions = [] recalls = [] for i, s in", "1, 0], smoothing_function=smooth.method1), \\ corpus_bleu(ref, pred, weights=[0, 0, 0, 1],", "precision = sum(precisions) / len(precisions) recall = sum(recalls) / len(recalls)", "s1_pred.append(list(d['pred_style1'][0])) if n_sample: assert len(s0_ref) >= n_sample assert len(s1_ref) >=", "= set() for resp in hyps_resp: if len(resp) < n:", "= 0 for rs in ref[i]: for w in rs:", "w in pred_set: r += 1 tot_l = sum([len(rs) for", "= [x for i, x in enumerate(s1_ref) if i in", "100, 's1', dist_s1[1] * 100, 'mean', (dist_s0[1] + dist_s1[1]) /", "get a list of {} instead\".format( type(hyps_resp[0]))) return ngram =", "rs in ref[i]: for w in rs: if w in", "= 0 for w in s: if w in ref_set:", "== 0: print(\"ERROR, eval_distinct get empty input\") return if type(hyps_resp[0])", "eval_f1(s0_ref, s0_pred) f1_s1 = eval_f1(s1_ref, s1_pred) for k in range(1,", "x in l] for l in hyps_resp] hyps_resp = [('", "smooth = SmoothingFunction() def eval_bleu(ref, pred): \"\"\" :param ref: list(list(list(any))),", "in pred_set: r += 1 tot_l = sum([len(rs) for rs", "as f: res = [json.loads(i) for i in f.readlines()] s0_pred,", "len(hyps_resp) == 0: print(\"ERROR, eval_distinct get empty input\") return if", "int, n-gram :return: the number of unique n-grams in hyps_resp", "def eval_f1(ref, pred): \"\"\" :param ref: list(list(list(any))), a list of", "eval_distinct takes in a list of <class 'list'>, get a", "sum(recalls) / len(recalls) return 0.0 if precision == recall ==", "help='path of the eval file', required=True) args = parser.parse_args() file_path", "!= list: print(\"ERROR, eval_distinct takes in a list of <class", "- n + 1): ngram.add(' '.join(resp[i: i + n])) return", "+ 1): ngram.add(' '.join(resp[i: i + n])) return len(ngram) def", "[], [] s1_pred, s1_ref = [], [] for d in", "[] for d in res: if d['style'] == 0: s0_ref.append([list(d['resp'])])", "file_path = args.eval_file_path calc_metrics_value(None, file_path) print(\"Evaluating acc results:\") bert_eval_acc.main(file_path) svm_eval_acc.main(file_path)", "hyps_resp = [(' '.join(i)).split() for i in hyps_resp] num_tokens =", "'mean', (bleu_s0[k - 1] + bleu_s1[k - 1]) / 2", "reference sentences, each element of the list is a list", "= parser.parse_args() file_path = args.eval_file_path calc_metrics_value(None, file_path) print(\"Evaluating acc results:\")", "of responses :param n: int, n-gram :return: the number of", "= sum(precisions) / len(precisions) recall = sum(recalls) / len(recalls) return", "SmoothingFunction import json from tqdm import tqdm, trange from random", "r = 0 for rs in ref[i]: for w in", "the number of unique n-grams :param hyps_resp: list, a list", "[json.loads(i) for i in f.readlines()] s0_pred, s0_ref = [], []", "if d['style'] == 0: s0_ref.append([list(d['resp'])]) s0_pred.append(list(d['pred_style0'][0])) else: s1_ref.append([list(d['resp'])]) s1_pred.append(list(d['pred_style1'][0])) if", "0, 1], smoothing_function=smooth.method1) def count_ngram(hyps_resp, n): \"\"\" Count the number", "for i, x in enumerate(s1_ref) if i in sampled_idxs] s1_pred", "if len(resp) < n: continue for i in range(len(resp) -", "1] * 100, 'mean', (bleu_s0[k - 1] + bleu_s1[k -", "f: res = [json.loads(i) for i in f.readlines()] s0_pred, s0_ref", ":param pred: list(list(any)), a list of predictions :return: corpus bleu", "for i, s in enumerate(pred): ref_set = set() for rs", "import bert_eval_acc import svm_eval_acc smooth = SmoothingFunction() def eval_bleu(ref, pred):", "\"\"\" return corpus_bleu(ref, pred, smoothing_function=smooth.method1) def eval_bleu_detail(ref, pred): \"\"\" :param", "i in sampled_idxs] s1_pred = [x for i, x in", "os from nltk.translate.bleu_score import corpus_bleu from nltk.translate.bleu_score import SmoothingFunction import", "pickle import argparse import bert_eval_acc import svm_eval_acc smooth = SmoothingFunction()", "\\ corpus_bleu(ref, pred, weights=[0, 0, 0, 1], smoothing_function=smooth.method1) def count_ngram(hyps_resp,", "> 0: r /= tot_l precisions.append(p) recalls.append(r) precision = sum(precisions)", "100, 's1', f1_s1 * 100, 'mean', (f1_s0 + f1_s1) /", "f1_s1) / 2 * 100) print('Dist:', 's0', dist_s0[1] * 100,", "1 if len(s) > 0: p /= len(s) r =", "d in res: if d['style'] == 0: s0_ref.append([list(d['resp'])]) s0_pred.append(list(d['pred_style0'][0])) else:", "100, 's1', bleu_s1[k - 1] * 100, 'mean', (bleu_s0[k -", "* 100) print('F1:', 's0', f1_s0 * 100, 's1', f1_s1 *", "ref: list(list(list(any))), a list of reference sentences, each element of", "if n_sample: assert len(s0_ref) >= n_sample assert len(s1_ref) >= n_sample", "2 * 100) parser = argparse.ArgumentParser() parser.add_argument('--eval_file_path', help='path of the", "in a list of <class 'list'>, get a list of", "i in sampled_idxs] sampled_idxs = sample(range(len(s1_ref)), n_sample) s1_ref = [x", "enumerate(s0_pred) if i in sampled_idxs] sampled_idxs = sample(range(len(s1_ref)), n_sample) s1_ref", "in s: pred_set.add(w) p = 0 for w in s:", "bleu score \"\"\" return corpus_bleu(ref, pred, smoothing_function=smooth.method1) def eval_bleu_detail(ref, pred):", "trange from random import sample import numpy as np import", "bleu_s1[k - 1]) / 2 * 100) print('F1:', 's0', f1_s0", "f1_s0 * 100, 's1', f1_s1 * 100, 'mean', (f1_s0 +", "BLEU:' % k, 's0', bleu_s0[k - 1] * 100, 's1',", "bleu_s0 = eval_bleu_detail(s0_ref, s0_pred) bleu_s1 = eval_bleu_detail(s1_ref, s1_pred) dist_s0 =", "number of unique n-grams in hyps_resp \"\"\" if len(hyps_resp) ==", "pred_set = set() for w in s: pred_set.add(w) p =", "2 * 100) print('Dist:', 's0', dist_s0[1] * 100, 's1', dist_s1[1]", "in ref[i]]) if tot_l > 0: r /= tot_l precisions.append(p)", "import numpy as np import pickle import argparse import bert_eval_acc", "a list of predictions :return: f1 score \"\"\" assert len(ref)", "corpus bleu score \"\"\" return corpus_bleu(ref, pred, weights=[1, 0, 0,", "/ float(num_tokens) dist2 = count_ngram(hyps_resp, 2) / float(num_tokens) return dist1,", "i in hyps_resp]) dist1 = count_ngram(hyps_resp, 1) / float(num_tokens) dist2", "set() for w in s: pred_set.add(w) p = 0 for", "sampled_idxs = sample(range(len(s1_ref)), n_sample) s1_ref = [x for i, x", "4): print('%d-gram BLEU:' % k, 's0', bleu_s0[k - 1] *", "0, 0, 1], smoothing_function=smooth.method1) def count_ngram(hyps_resp, n): \"\"\" Count the", "weights=[0, 0, 0, 1], smoothing_function=smooth.method1) def count_ngram(hyps_resp, n): \"\"\" Count", "precision * recall / (precision + recall) def calc_metrics_value(task, fn,", "def calc_metrics_value(task, fn, n_sample=None): with open(fn) as f: res =", "of unique n-grams in hyps_resp \"\"\" if len(hyps_resp) == 0:", "'s1', bleu_s1[k - 1] * 100, 'mean', (bleu_s0[k - 1]", "i, x in enumerate(s0_pred) if i in sampled_idxs] sampled_idxs =", "+ bleu_s1[k - 1]) / 2 * 100) print('F1:', 's0',", "of predictions :return: f1 score \"\"\" assert len(ref) == len(pred)", "bleu_s1[k - 1] * 100, 'mean', (bleu_s0[k - 1] +", "1] * 100, 's1', bleu_s1[k - 1] * 100, 'mean',", "= argparse.ArgumentParser() parser.add_argument('--eval_file_path', help='path of the eval file', required=True) args", "hyps_resp = [[str(x) for x in l] for l in", "[x for i, x in enumerate(s1_pred) if i in sampled_idxs]", "0: p /= len(s) r = 0 for rs in", "1, 2-gram \"\"\" if len(hyps_resp) == 0: print(\"ERROR, eval_distinct get", "* 100, 'mean', (f1_s0 + f1_s1) / 2 * 100)", "res: if d['style'] == 0: s0_ref.append([list(d['resp'])]) s0_pred.append(list(d['pred_style0'][0])) else: s1_ref.append([list(d['resp'])]) s1_pred.append(list(d['pred_style1'][0]))", "s0_pred) bleu_s1 = eval_bleu_detail(s1_ref, s1_pred) dist_s0 = eval_distinct_detail(s0_pred) dist_s1 =", "else 2 * precision * recall / (precision + recall)", "unique n-grams :param hyps_resp: list, a list of responses :param", "list of predictions :return: corpus bleu score \"\"\" return corpus_bleu(ref,", "* 100, 'mean', (dist_s0[1] + dist_s1[1]) / 2 * 100)", "import argparse import bert_eval_acc import svm_eval_acc smooth = SmoothingFunction() def", "list of predictions :return: f1 score \"\"\" assert len(ref) ==", "for i, x in enumerate(s0_ref) if i in sampled_idxs] s0_pred", "return corpus_bleu(ref, pred, smoothing_function=smooth.method1) def eval_bleu_detail(ref, pred): \"\"\" :param ref:", "s0_pred) f1_s1 = eval_f1(s1_ref, s1_pred) for k in range(1, 4):", "= SmoothingFunction() def eval_bleu(ref, pred): \"\"\" :param ref: list(list(list(any))), a", "* precision * recall / (precision + recall) def calc_metrics_value(task,", "eval_bleu_detail(s0_ref, s0_pred) bleu_s1 = eval_bleu_detail(s1_ref, s1_pred) dist_s0 = eval_distinct_detail(s0_pred) dist_s1", "assert len(s1_ref) >= n_sample sampled_idxs = sample(range(len(s0_ref)), n_sample) s0_ref =", "= eval_distinct_detail(s0_pred) dist_s1 = eval_distinct_detail(s1_pred) f1_s0 = eval_f1(s0_ref, s0_pred) f1_s1", "of the list is a list of references :param pred:", "get a list of {} instead\".format( type(hyps_resp[0]))) return hyps_resp =", "for 1, 2-gram \"\"\" if len(hyps_resp) == 0: print(\"ERROR, eval_distinct", "= sum([len(rs) for rs in ref[i]]) if tot_l > 0:", "ref_set.add(w) pred_set = set() for w in s: pred_set.add(w) p", "{} instead\".format( type(hyps_resp[0]))) return hyps_resp = [[str(x) for x in", "each element of the list is a list of references", "in ref[i]: for w in rs: if w in pred_set:", "a list of references :param pred: list(list(any)), a list of", "print('F1:', 's0', f1_s0 * 100, 's1', f1_s1 * 100, 'mean',", "% k, 's0', bleu_s0[k - 1] * 100, 's1', bleu_s1[k", "count_ngram(hyps_resp, 1) / float(num_tokens) dist2 = count_ngram(hyps_resp, 2) / float(num_tokens)", "< n: continue for i in range(len(resp) - n +", "of predictions :return: corpus bleu score \"\"\" return corpus_bleu(ref, pred,", "score for 1, 2-gram \"\"\" if len(hyps_resp) == 0: print(\"ERROR,", "in l] for l in hyps_resp] hyps_resp = [(' '.join(i)).split()", "= [x for i, x in enumerate(s0_ref) if i in", "references :param pred: list(list(any)), a list of predictions :return: corpus", "smoothing_function=smooth.method1), \\ corpus_bleu(ref, pred, weights=[0, 0, 1, 0], smoothing_function=smooth.method1), \\", "[] s1_pred, s1_ref = [], [] for d in res:", "hyps responses :return: average distinct score for 1, 2-gram \"\"\"", "i in f.readlines()] s0_pred, s0_ref = [], [] s1_pred, s1_ref", "100) print('F1:', 's0', f1_s0 * 100, 's1', f1_s1 * 100,", "weights=[0, 1, 0, 0], smoothing_function=smooth.method1), \\ corpus_bleu(ref, pred, weights=[0, 0,", "i in sampled_idxs] s0_pred = [x for i, x in", "in enumerate(pred): ref_set = set() for rs in ref[i]: for", "num_tokens = sum([len(i) for i in hyps_resp]) dist1 = count_ngram(hyps_resp,", "0], smoothing_function=smooth.method1), \\ corpus_bleu(ref, pred, weights=[0, 0, 0, 1], smoothing_function=smooth.method1)", "len(s0_ref) >= n_sample assert len(s1_ref) >= n_sample sampled_idxs = sample(range(len(s0_ref)),", "dist_s1[1] * 100, 'mean', (dist_s0[1] + dist_s1[1]) / 2 *", "print('%d-gram BLEU:' % k, 's0', bleu_s0[k - 1] * 100,", "in sampled_idxs] sampled_idxs = sample(range(len(s1_ref)), n_sample) s1_ref = [x for", "if i in sampled_idxs] s1_pred = [x for i, x", "for resp in hyps_resp: if len(resp) < n: continue for", "'s1', f1_s1 * 100, 'mean', (f1_s0 + f1_s1) / 2", "for w in rs: if w in pred_set: r +=", "nltk.translate.bleu_score import corpus_bleu from nltk.translate.bleu_score import SmoothingFunction import json from", "- 1] * 100, 'mean', (bleu_s0[k - 1] + bleu_s1[k", "sum([len(i) for i in hyps_resp]) dist1 = count_ngram(hyps_resp, 1) /", "= [[str(x) for x in l] for l in hyps_resp]", "s0_pred = [x for i, x in enumerate(s0_pred) if i", "k in range(1, 4): print('%d-gram BLEU:' % k, 's0', bleu_s0[k", "pred, smoothing_function=smooth.method1) def eval_bleu_detail(ref, pred): \"\"\" :param ref: list(list(list(any))), a", "/ 2 * 100) parser = argparse.ArgumentParser() parser.add_argument('--eval_file_path', help='path of", "Count the number of unique n-grams :param hyps_resp: list, a", "s in enumerate(pred): ref_set = set() for rs in ref[i]:", "= sum([len(i) for i in hyps_resp]) dist1 = count_ngram(hyps_resp, 1)", "== 0 else 2 * precision * recall / (precision", "\\ corpus_bleu(ref, pred, weights=[0, 0, 1, 0], smoothing_function=smooth.method1), \\ corpus_bleu(ref,", "set() for rs in ref[i]: for w in rs: ref_set.add(w)", "s1_pred = [x for i, x in enumerate(s1_pred) if i", "print(\"ERROR, eval_distinct get empty input\") return if type(hyps_resp[0]) != list:", "pred): \"\"\" :param ref: list(list(list(any))), a list of reference sentences,", "ngram = set() for resp in hyps_resp: if len(resp) <", "i in hyps_resp] num_tokens = sum([len(i) for i in hyps_resp])", "for i in hyps_resp]) dist1 = count_ngram(hyps_resp, 1) / float(num_tokens)", "i, x in enumerate(s1_pred) if i in sampled_idxs] bleu_s0 =", "w in rs: ref_set.add(w) pred_set = set() for w in", "'.join(resp[i: i + n])) return len(ngram) def eval_distinct_detail(hyps_resp): \"\"\" compute", "svm_eval_acc smooth = SmoothingFunction() def eval_bleu(ref, pred): \"\"\" :param ref:", "(precision + recall) def calc_metrics_value(task, fn, n_sample=None): with open(fn) as", "bleu score \"\"\" return corpus_bleu(ref, pred, weights=[1, 0, 0, 0],", "(dist_s0[1] + dist_s1[1]) / 2 * 100) parser = argparse.ArgumentParser()", "list: print(\"ERROR, eval_distinct takes in a list of <class 'list'>,", "ref[i]: for w in rs: ref_set.add(w) pred_set = set() for", "calc_metrics_value(task, fn, n_sample=None): with open(fn) as f: res = [json.loads(i)", "argparse.ArgumentParser() parser.add_argument('--eval_file_path', help='path of the eval file', required=True) args =", "ref[i]]) if tot_l > 0: r /= tot_l precisions.append(p) recalls.append(r)", ">= n_sample assert len(s1_ref) >= n_sample sampled_idxs = sample(range(len(s0_ref)), n_sample)", ":param hyps_resp: list, a list of responses :param n: int,", "= [], [] s1_pred, s1_ref = [], [] for d", "- 1] * 100, 's1', bleu_s1[k - 1] * 100,", "corpus_bleu(ref, pred, smoothing_function=smooth.method1) def eval_bleu_detail(ref, pred): \"\"\" :param ref: list(list(list(any))),", "x in enumerate(s0_ref) if i in sampled_idxs] s0_pred = [x", "[x for i, x in enumerate(s0_ref) if i in sampled_idxs]", "x in enumerate(s1_ref) if i in sampled_idxs] s1_pred = [x", "(bleu_s0[k - 1] + bleu_s1[k - 1]) / 2 *", "2 * 100) print('F1:', 's0', f1_s0 * 100, 's1', f1_s1", "if precision == recall == 0 else 2 * precision", "\"\"\" return corpus_bleu(ref, pred, weights=[1, 0, 0, 0], smoothing_function=smooth.method1),\\ corpus_bleu(ref,", "i + n])) return len(ngram) def eval_distinct_detail(hyps_resp): \"\"\" compute distinct", "score for the hyps_resp :param hyps_resp: list, a list of", "the list is a list of references :param pred: list(list(any)),", "> 0 precisions = [] recalls = [] for i,", "of references :param pred: list(list(any)), a list of predictions :return:", "if i in sampled_idxs] bleu_s0 = eval_bleu_detail(s0_ref, s0_pred) bleu_s1 =", "average distinct score for 1, 2-gram \"\"\" if len(hyps_resp) ==", "* 100) parser = argparse.ArgumentParser() parser.add_argument('--eval_file_path', help='path of the eval", "eval_f1(ref, pred): \"\"\" :param ref: list(list(list(any))), a list of reference", "0, 1, 0], smoothing_function=smooth.method1), \\ corpus_bleu(ref, pred, weights=[0, 0, 0,", "else: s1_ref.append([list(d['resp'])]) s1_pred.append(list(d['pred_style1'][0])) if n_sample: assert len(s0_ref) >= n_sample assert", "recall = sum(recalls) / len(recalls) return 0.0 if precision ==", "l] for l in hyps_resp] hyps_resp = [(' '.join(i)).split() for", "type(hyps_resp[0]) != list: print(\"ERROR, eval_distinct takes in a list of", "len(ngram) def eval_distinct_detail(hyps_resp): \"\"\" compute distinct score for the hyps_resp", "pred_set: r += 1 tot_l = sum([len(rs) for rs in", "precisions.append(p) recalls.append(r) precision = sum(precisions) / len(precisions) recall = sum(recalls)", "input\") return if type(hyps_resp[0]) != list: print(\"ERROR, eval_distinct takes in", "[] for i, s in enumerate(pred): ref_set = set() for", "/ len(recalls) return 0.0 if precision == recall == 0", "hyps_resp] hyps_resp = [(' '.join(i)).split() for i in hyps_resp] num_tokens", "n): \"\"\" Count the number of unique n-grams :param hyps_resp:", "res = [json.loads(i) for i in f.readlines()] s0_pred, s0_ref =", "i, s in enumerate(pred): ref_set = set() for rs in", "as np import pickle import argparse import bert_eval_acc import svm_eval_acc", "count_ngram(hyps_resp, 2) / float(num_tokens) return dist1, dist2 def eval_f1(ref, pred):", "predictions :return: f1 score \"\"\" assert len(ref) == len(pred) >", "- 1]) / 2 * 100) print('F1:', 's0', f1_s0 *", "numpy as np import pickle import argparse import bert_eval_acc import", "w in s: if w in ref_set: p += 1", "sum(precisions) / len(precisions) recall = sum(recalls) / len(recalls) return 0.0", "100) parser = argparse.ArgumentParser() parser.add_argument('--eval_file_path', help='path of the eval file',", "p /= len(s) r = 0 for rs in ref[i]:", "n_sample=None): with open(fn) as f: res = [json.loads(i) for i", "100) print('Dist:', 's0', dist_s0[1] * 100, 's1', dist_s1[1] * 100,", "== recall == 0 else 2 * precision * recall", "list of references :param pred: list(list(any)), a list of predictions", "recall) def calc_metrics_value(task, fn, n_sample=None): with open(fn) as f: res", "'s0', dist_s0[1] * 100, 's1', dist_s1[1] * 100, 'mean', (dist_s0[1]", "weights=[0, 0, 1, 0], smoothing_function=smooth.method1), \\ corpus_bleu(ref, pred, weights=[0, 0,", "[[str(x) for x in l] for l in hyps_resp] hyps_resp", "s: pred_set.add(w) p = 0 for w in s: if", "= eval_f1(s0_ref, s0_pred) f1_s1 = eval_f1(s1_ref, s1_pred) for k in", "random import sample import numpy as np import pickle import", ":param hyps_resp: list, a list of hyps responses :return: average", "0 for w in s: if w in ref_set: p", "s1_ref.append([list(d['resp'])]) s1_pred.append(list(d['pred_style1'][0])) if n_sample: assert len(s0_ref) >= n_sample assert len(s1_ref)", "of <class 'list'>, get a list of {} instead\".format( type(hyps_resp[0])))", "parser.parse_args() file_path = args.eval_file_path calc_metrics_value(None, file_path) print(\"Evaluating acc results:\") bert_eval_acc.main(file_path)", "in enumerate(s1_pred) if i in sampled_idxs] bleu_s0 = eval_bleu_detail(s0_ref, s0_pred)", "tot_l precisions.append(p) recalls.append(r) precision = sum(precisions) / len(precisions) recall =", "recall == 0 else 2 * precision * recall /", "in hyps_resp] hyps_resp = [(' '.join(i)).split() for i in hyps_resp]", "a list of responses :param n: int, n-gram :return: the", "for l in hyps_resp] hyps_resp = [(' '.join(i)).split() for i", "= [] for i, s in enumerate(pred): ref_set = set()", "recalls.append(r) precision = sum(precisions) / len(precisions) recall = sum(recalls) /", "+ recall) def calc_metrics_value(task, fn, n_sample=None): with open(fn) as f:", "n])) return len(ngram) def eval_distinct_detail(hyps_resp): \"\"\" compute distinct score for", "parser = argparse.ArgumentParser() parser.add_argument('--eval_file_path', help='path of the eval file', required=True)", "eval_f1(s1_ref, s1_pred) for k in range(1, 4): print('%d-gram BLEU:' %", "/ float(num_tokens) return dist1, dist2 def eval_f1(ref, pred): \"\"\" :param", "[], [] for d in res: if d['style'] == 0:", "for w in s: pred_set.add(w) p = 0 for w", "import svm_eval_acc smooth = SmoothingFunction() def eval_bleu(ref, pred): \"\"\" :param", "eval_distinct_detail(s1_pred) f1_s0 = eval_f1(s0_ref, s0_pred) f1_s1 = eval_f1(s1_ref, s1_pred) for", "= [], [] for d in res: if d['style'] ==", "* recall / (precision + recall) def calc_metrics_value(task, fn, n_sample=None):", "0: print(\"ERROR, eval_distinct get empty input\") return if type(hyps_resp[0]) !=", "argparse import bert_eval_acc import svm_eval_acc smooth = SmoothingFunction() def eval_bleu(ref,", "1) / float(num_tokens) dist2 = count_ngram(hyps_resp, 2) / float(num_tokens) return", "of reference sentences, each element of the list is a", "weights=[1, 0, 0, 0], smoothing_function=smooth.method1),\\ corpus_bleu(ref, pred, weights=[0, 1, 0,", "required=True) args = parser.parse_args() file_path = args.eval_file_path calc_metrics_value(None, file_path) print(\"Evaluating", "score \"\"\" return corpus_bleu(ref, pred, smoothing_function=smooth.method1) def eval_bleu_detail(ref, pred): \"\"\"", "precision == recall == 0 else 2 * precision *", "(f1_s0 + f1_s1) / 2 * 100) print('Dist:', 's0', dist_s0[1]", "tqdm import tqdm, trange from random import sample import numpy", "for x in l] for l in hyps_resp] hyps_resp =", "list of responses :param n: int, n-gram :return: the number", "import pickle import argparse import bert_eval_acc import svm_eval_acc smooth =", "f1 score \"\"\" assert len(ref) == len(pred) > 0 precisions", "enumerate(s0_ref) if i in sampled_idxs] s0_pred = [x for i,", "s1_ref = [], [] for d in res: if d['style']", "1]) / 2 * 100) print('F1:', 's0', f1_s0 * 100,", "a list of {} instead\".format( type(hyps_resp[0]))) return ngram = set()", "s0_pred, s0_ref = [], [] s1_pred, s1_ref = [], []", "of hyps responses :return: average distinct score for 1, 2-gram", "json from tqdm import tqdm, trange from random import sample", "dist1 = count_ngram(hyps_resp, 1) / float(num_tokens) dist2 = count_ngram(hyps_resp, 2)", "i, x in enumerate(s1_ref) if i in sampled_idxs] s1_pred =", "references :param pred: list(list(any)), a list of predictions :return: f1", "for w in s: if w in ref_set: p +=", "assert len(s0_ref) >= n_sample assert len(s1_ref) >= n_sample sampled_idxs =", "in hyps_resp \"\"\" if len(hyps_resp) == 0: print(\"ERROR, eval_distinct get", "w in rs: if w in pred_set: r += 1", "return hyps_resp = [[str(x) for x in l] for l", "0], smoothing_function=smooth.method1),\\ corpus_bleu(ref, pred, weights=[0, 1, 0, 0], smoothing_function=smooth.method1), \\", "import json from tqdm import tqdm, trange from random import", "pred, weights=[0, 0, 0, 1], smoothing_function=smooth.method1) def count_ngram(hyps_resp, n): \"\"\"", "corpus_bleu(ref, pred, weights=[0, 0, 1, 0], smoothing_function=smooth.method1), \\ corpus_bleu(ref, pred,", "n_sample: assert len(s0_ref) >= n_sample assert len(s1_ref) >= n_sample sampled_idxs", "len(precisions) recall = sum(recalls) / len(recalls) return 0.0 if precision", "n-gram :return: the number of unique n-grams in hyps_resp \"\"\"", "smoothing_function=smooth.method1) def eval_bleu_detail(ref, pred): \"\"\" :param ref: list(list(list(any))), a list", "for i in f.readlines()] s0_pred, s0_ref = [], [] s1_pred,", "in sampled_idxs] s1_pred = [x for i, x in enumerate(s1_pred)", "from nltk.translate.bleu_score import SmoothingFunction import json from tqdm import tqdm,", "* 100) print('Dist:', 's0', dist_s0[1] * 100, 's1', dist_s1[1] *", "eval_bleu_detail(ref, pred): \"\"\" :param ref: list(list(list(any))), a list of reference", "in f.readlines()] s0_pred, s0_ref = [], [] s1_pred, s1_ref =", "return len(ngram) def eval_distinct_detail(hyps_resp): \"\"\" compute distinct score for the", ":param pred: list(list(any)), a list of predictions :return: f1 score", "n + 1): ngram.add(' '.join(resp[i: i + n])) return len(ngram)", "distinct score for the hyps_resp :param hyps_resp: list, a list", "corpus_bleu(ref, pred, weights=[1, 0, 0, 0], smoothing_function=smooth.method1),\\ corpus_bleu(ref, pred, weights=[0,", "a list of predictions :return: corpus bleu score \"\"\" return", "enumerate(s1_pred) if i in sampled_idxs] bleu_s0 = eval_bleu_detail(s0_ref, s0_pred) bleu_s1", "list of {} instead\".format( type(hyps_resp[0]))) return hyps_resp = [[str(x) for", "n-grams in hyps_resp \"\"\" if len(hyps_resp) == 0: print(\"ERROR, eval_distinct", "hyps_resp: list, a list of responses :param n: int, n-gram", "n: int, n-gram :return: the number of unique n-grams in", "f1_s1 = eval_f1(s1_ref, s1_pred) for k in range(1, 4): print('%d-gram", "the eval file', required=True) args = parser.parse_args() file_path = args.eval_file_path", "float(num_tokens) return dist1, dist2 def eval_f1(ref, pred): \"\"\" :param ref:", "if w in pred_set: r += 1 tot_l = sum([len(rs)", "range(len(resp) - n + 1): ngram.add(' '.join(resp[i: i + n]))", "number of unique n-grams :param hyps_resp: list, a list of", "n: continue for i in range(len(resp) - n + 1):", "{} instead\".format( type(hyps_resp[0]))) return ngram = set() for resp in", "w in ref_set: p += 1 if len(s) > 0:", "== len(pred) > 0 precisions = [] recalls = []", ":return: corpus bleu score \"\"\" return corpus_bleu(ref, pred, weights=[1, 0,", "len(s) > 0: p /= len(s) r = 0 for", "float(num_tokens) dist2 = count_ngram(hyps_resp, 2) / float(num_tokens) return dist1, dist2", "a list of <class 'list'>, get a list of {}", "pred, weights=[0, 1, 0, 0], smoothing_function=smooth.method1), \\ corpus_bleu(ref, pred, weights=[0,", "sample import numpy as np import pickle import argparse import", "== 0: s0_ref.append([list(d['resp'])]) s0_pred.append(list(d['pred_style0'][0])) else: s1_ref.append([list(d['resp'])]) s1_pred.append(list(d['pred_style1'][0])) if n_sample: assert", "the number of unique n-grams in hyps_resp \"\"\" if len(hyps_resp)", "rs: ref_set.add(w) pred_set = set() for w in s: pred_set.add(w)", "from random import sample import numpy as np import pickle", "unique n-grams in hyps_resp \"\"\" if len(hyps_resp) == 0: print(\"ERROR,", "import sample import numpy as np import pickle import argparse", "is a list of references :param pred: list(list(any)), a list", "hyps_resp] num_tokens = sum([len(i) for i in hyps_resp]) dist1 =", "hyps_resp: if len(resp) < n: continue for i in range(len(resp)", "r += 1 tot_l = sum([len(rs) for rs in ref[i]])", "+= 1 tot_l = sum([len(rs) for rs in ref[i]]) if", "ref[i]: for w in rs: if w in pred_set: r", "> 0: p /= len(s) r = 0 for rs", "* 100, 's1', bleu_s1[k - 1] * 100, 'mean', (bleu_s0[k", "parser.add_argument('--eval_file_path', help='path of the eval file', required=True) args = parser.parse_args()", "\"\"\" if len(hyps_resp) == 0: print(\"ERROR, eval_distinct get empty input\")", "open(fn) as f: res = [json.loads(i) for i in f.readlines()]", "set() for resp in hyps_resp: if len(resp) < n: continue", "list of <class 'list'>, get a list of {} instead\".format(", "n_sample sampled_idxs = sample(range(len(s0_ref)), n_sample) s0_ref = [x for i,", "2-gram \"\"\" if len(hyps_resp) == 0: print(\"ERROR, eval_distinct get empty", "count_ngram(hyps_resp, n): \"\"\" Count the number of unique n-grams :param", "resp in hyps_resp: if len(resp) < n: continue for i", "0, 0, 0], smoothing_function=smooth.method1),\\ corpus_bleu(ref, pred, weights=[0, 1, 0, 0],", "if i in sampled_idxs] sampled_idxs = sample(range(len(s1_ref)), n_sample) s1_ref =", ":param n: int, n-gram :return: the number of unique n-grams", "a list of hyps responses :return: average distinct score for", "in rs: if w in pred_set: r += 1 tot_l", "element of the list is a list of references :param", "sample(range(len(s1_ref)), n_sample) s1_ref = [x for i, x in enumerate(s1_ref)", "sampled_idxs] s1_pred = [x for i, x in enumerate(s1_pred) if", "n-grams :param hyps_resp: list, a list of responses :param n:", "pred: list(list(any)), a list of predictions :return: f1 score \"\"\"", "predictions :return: corpus bleu score \"\"\" return corpus_bleu(ref, pred, weights=[1,", "len(resp) < n: continue for i in range(len(resp) - n", "return ngram = set() for resp in hyps_resp: if len(resp)", "for rs in ref[i]: for w in rs: ref_set.add(w) pred_set", "if len(hyps_resp) == 0: print(\"ERROR, eval_distinct get empty input\") return", "responses :param n: int, n-gram :return: the number of unique", "pred, weights=[1, 0, 0, 0], smoothing_function=smooth.method1),\\ corpus_bleu(ref, pred, weights=[0, 1,", "in sampled_idxs] s0_pred = [x for i, x in enumerate(s0_pred)", "for the hyps_resp :param hyps_resp: list, a list of hyps", "hyps_resp :param hyps_resp: list, a list of hyps responses :return:", "1): ngram.add(' '.join(resp[i: i + n])) return len(ngram) def eval_distinct_detail(hyps_resp):", "w in s: pred_set.add(w) p = 0 for w in", "= eval_bleu_detail(s1_ref, s1_pred) dist_s0 = eval_distinct_detail(s0_pred) dist_s1 = eval_distinct_detail(s1_pred) f1_s0", "= [x for i, x in enumerate(s1_pred) if i in", "for rs in ref[i]]) if tot_l > 0: r /=", "tqdm, trange from random import sample import numpy as np", "s1_pred, s1_ref = [], [] for d in res: if", "2) / float(num_tokens) return dist1, dist2 def eval_f1(ref, pred): \"\"\"", "nltk.translate.bleu_score import SmoothingFunction import json from tqdm import tqdm, trange", "100, 'mean', (bleu_s0[k - 1] + bleu_s1[k - 1]) /", "s1_pred) dist_s0 = eval_distinct_detail(s0_pred) dist_s1 = eval_distinct_detail(s1_pred) f1_s0 = eval_f1(s0_ref,", "in ref_set: p += 1 if len(s) > 0: p", "for i, x in enumerate(s1_pred) if i in sampled_idxs] bleu_s0", "in enumerate(s0_pred) if i in sampled_idxs] sampled_idxs = sample(range(len(s1_ref)), n_sample)", "0], smoothing_function=smooth.method1), \\ corpus_bleu(ref, pred, weights=[0, 0, 1, 0], smoothing_function=smooth.method1),", "get empty input\") return if type(hyps_resp[0]) != list: print(\"ERROR, eval_distinct", "return corpus_bleu(ref, pred, weights=[1, 0, 0, 0], smoothing_function=smooth.method1),\\ corpus_bleu(ref, pred,", "instead\".format( type(hyps_resp[0]))) return hyps_resp = [[str(x) for x in l]", "[x for i, x in enumerate(s0_pred) if i in sampled_idxs]", "\"\"\" :param ref: list(list(list(any))), a list of reference sentences, each", "p += 1 if len(s) > 0: p /= len(s)", "[x for i, x in enumerate(s1_ref) if i in sampled_idxs]", "sampled_idxs] bleu_s0 = eval_bleu_detail(s0_ref, s0_pred) bleu_s1 = eval_bleu_detail(s1_ref, s1_pred) dist_s0", "tot_l > 0: r /= tot_l precisions.append(p) recalls.append(r) precision =", "i, x in enumerate(s0_ref) if i in sampled_idxs] s0_pred =", "'.join(i)).split() for i in hyps_resp] num_tokens = sum([len(i) for i", "corpus_bleu from nltk.translate.bleu_score import SmoothingFunction import json from tqdm import", "/= len(s) r = 0 for rs in ref[i]: for", "recalls = [] for i, s in enumerate(pred): ref_set =", "in s: if w in ref_set: p += 1 if", "len(recalls) return 0.0 if precision == recall == 0 else", "len(pred) > 0 precisions = [] recalls = [] for", "in hyps_resp]) dist1 = count_ngram(hyps_resp, 1) / float(num_tokens) dist2 =", "return dist1, dist2 def eval_f1(ref, pred): \"\"\" :param ref: list(list(list(any))),", "a list of {} instead\".format( type(hyps_resp[0]))) return hyps_resp = [[str(x)", "len(ref) == len(pred) > 0 precisions = [] recalls =", "1], smoothing_function=smooth.method1) def count_ngram(hyps_resp, n): \"\"\" Count the number of", "= [] recalls = [] for i, s in enumerate(pred):", "compute distinct score for the hyps_resp :param hyps_resp: list, a", "len(s1_ref) >= n_sample sampled_idxs = sample(range(len(s0_ref)), n_sample) s0_ref = [x", "if i in sampled_idxs] s0_pred = [x for i, x", "1] + bleu_s1[k - 1]) / 2 * 100) print('F1:',", "pred, weights=[0, 0, 1, 0], smoothing_function=smooth.method1), \\ corpus_bleu(ref, pred, weights=[0,", "k, 's0', bleu_s0[k - 1] * 100, 's1', bleu_s1[k -", "for d in res: if d['style'] == 0: s0_ref.append([list(d['resp'])]) s0_pred.append(list(d['pred_style0'][0]))", "0 precisions = [] recalls = [] for i, s", "/= tot_l precisions.append(p) recalls.append(r) precision = sum(precisions) / len(precisions) recall", "0: r /= tot_l precisions.append(p) recalls.append(r) precision = sum(precisions) /", "SmoothingFunction() def eval_bleu(ref, pred): \"\"\" :param ref: list(list(list(any))), a list", "if len(s) > 0: p /= len(s) r = 0", "hyps_resp \"\"\" if len(hyps_resp) == 0: print(\"ERROR, eval_distinct get empty", "'list'>, get a list of {} instead\".format( type(hyps_resp[0]))) return hyps_resp", "/ 2 * 100) print('Dist:', 's0', dist_s0[1] * 100, 's1',", "eval_distinct_detail(hyps_resp): \"\"\" compute distinct score for the hyps_resp :param hyps_resp:", "/ 2 * 100) print('F1:', 's0', f1_s0 * 100, 's1',", "i in range(len(resp) - n + 1): ngram.add(' '.join(resp[i: i", "list, a list of hyps responses :return: average distinct score", "'s0', bleu_s0[k - 1] * 100, 's1', bleu_s1[k - 1]", "eval_bleu(ref, pred): \"\"\" :param ref: list(list(list(any))), a list of reference", "\"\"\" Count the number of unique n-grams :param hyps_resp: list,", "= [json.loads(i) for i in f.readlines()] s0_pred, s0_ref = [],", ":return: f1 score \"\"\" assert len(ref) == len(pred) > 0", "dist_s0 = eval_distinct_detail(s0_pred) dist_s1 = eval_distinct_detail(s1_pred) f1_s0 = eval_f1(s0_ref, s0_pred)", "x in enumerate(s1_pred) if i in sampled_idxs] bleu_s0 = eval_bleu_detail(s0_ref,", "for rs in ref[i]: for w in rs: if w", "- 1] + bleu_s1[k - 1]) / 2 * 100)", "r /= tot_l precisions.append(p) recalls.append(r) precision = sum(precisions) / len(precisions)", "type(hyps_resp[0]))) return hyps_resp = [[str(x) for x in l] for", "enumerate(pred): ref_set = set() for rs in ref[i]: for w", "eval_bleu_detail(s1_ref, s1_pred) dist_s0 = eval_distinct_detail(s0_pred) dist_s1 = eval_distinct_detail(s1_pred) f1_s0 =", "in enumerate(s1_ref) if i in sampled_idxs] s1_pred = [x for", "score \"\"\" assert len(ref) == len(pred) > 0 precisions =", "dist1, dist2 def eval_f1(ref, pred): \"\"\" :param ref: list(list(list(any))), a", "of unique n-grams :param hyps_resp: list, a list of responses", "responses :return: average distinct score for 1, 2-gram \"\"\" if", "100, 'mean', (f1_s0 + f1_s1) / 2 * 100) print('Dist:',", "the hyps_resp :param hyps_resp: list, a list of hyps responses", "= count_ngram(hyps_resp, 1) / float(num_tokens) dist2 = count_ngram(hyps_resp, 2) /", "assert len(ref) == len(pred) > 0 precisions = [] recalls", "list(list(any)), a list of predictions :return: corpus bleu score \"\"\"", "def eval_distinct_detail(hyps_resp): \"\"\" compute distinct score for the hyps_resp :param", "/ (precision + recall) def calc_metrics_value(task, fn, n_sample=None): with open(fn)", "hyps_resp]) dist1 = count_ngram(hyps_resp, 1) / float(num_tokens) dist2 = count_ngram(hyps_resp,", "tot_l = sum([len(rs) for rs in ref[i]]) if tot_l >", "for i in hyps_resp] num_tokens = sum([len(i) for i in", "in range(len(resp) - n + 1): ngram.add(' '.join(resp[i: i +", "= eval_f1(s1_ref, s1_pred) for k in range(1, 4): print('%d-gram BLEU:'", "f1_s0 = eval_f1(s0_ref, s0_pred) f1_s1 = eval_f1(s1_ref, s1_pred) for k", "0.0 if precision == recall == 0 else 2 *", "instead\".format( type(hyps_resp[0]))) return ngram = set() for resp in hyps_resp:", ":param ref: list(list(list(any))), a list of reference sentences, each element", "in sampled_idxs] bleu_s0 = eval_bleu_detail(s0_ref, s0_pred) bleu_s1 = eval_bleu_detail(s1_ref, s1_pred)", "* 100, 's1', f1_s1 * 100, 'mean', (f1_s0 + f1_s1)", "for k in range(1, 4): print('%d-gram BLEU:' % k, 's0',", "enumerate(s1_ref) if i in sampled_idxs] s1_pred = [x for i,", "ref_set: p += 1 if len(s) > 0: p /=", "/ len(precisions) recall = sum(recalls) / len(recalls) return 0.0 if", "'s1', dist_s1[1] * 100, 'mean', (dist_s0[1] + dist_s1[1]) / 2", "s: if w in ref_set: p += 1 if len(s)", "sum([len(rs) for rs in ref[i]]) if tot_l > 0: r", "continue for i in range(len(resp) - n + 1): ngram.add('", "print('Dist:', 's0', dist_s0[1] * 100, 's1', dist_s1[1] * 100, 'mean',", "in rs: ref_set.add(w) pred_set = set() for w in s:", "file', required=True) args = parser.parse_args() file_path = args.eval_file_path calc_metrics_value(None, file_path)", "with open(fn) as f: res = [json.loads(i) for i in", "if w in ref_set: p += 1 if len(s) >", "ngram.add(' '.join(resp[i: i + n])) return len(ngram) def eval_distinct_detail(hyps_resp): \"\"\"", "p = 0 for w in s: if w in", "a list of reference sentences, each element of the list", "i in sampled_idxs] bleu_s0 = eval_bleu_detail(s0_ref, s0_pred) bleu_s1 = eval_bleu_detail(s1_ref,", "s1_ref = [x for i, x in enumerate(s1_ref) if i", "if type(hyps_resp[0]) != list: print(\"ERROR, eval_distinct takes in a list", "import SmoothingFunction import json from tqdm import tqdm, trange from", "d['style'] == 0: s0_ref.append([list(d['resp'])]) s0_pred.append(list(d['pred_style0'][0])) else: s1_ref.append([list(d['resp'])]) s1_pred.append(list(d['pred_style1'][0])) if n_sample:", "corpus_bleu(ref, pred, weights=[0, 1, 0, 0], smoothing_function=smooth.method1), \\ corpus_bleu(ref, pred,", "predictions :return: corpus bleu score \"\"\" return corpus_bleu(ref, pred, smoothing_function=smooth.method1)", "score \"\"\" return corpus_bleu(ref, pred, weights=[1, 0, 0, 0], smoothing_function=smooth.method1),\\", "distinct score for 1, 2-gram \"\"\" if len(hyps_resp) == 0:", "[(' '.join(i)).split() for i in hyps_resp] num_tokens = sum([len(i) for", "\"\"\" compute distinct score for the hyps_resp :param hyps_resp: list,", "n_sample assert len(s1_ref) >= n_sample sampled_idxs = sample(range(len(s0_ref)), n_sample) s0_ref", "\"\"\" assert len(ref) == len(pred) > 0 precisions = []", "rs: if w in pred_set: r += 1 tot_l =", "in enumerate(s0_ref) if i in sampled_idxs] s0_pred = [x for", "of {} instead\".format( type(hyps_resp[0]))) return hyps_resp = [[str(x) for x", "= sample(range(len(s1_ref)), n_sample) s1_ref = [x for i, x in", "in range(1, 4): print('%d-gram BLEU:' % k, 's0', bleu_s0[k -", "in res: if d['style'] == 0: s0_ref.append([list(d['resp'])]) s0_pred.append(list(d['pred_style0'][0])) else: s1_ref.append([list(d['resp'])])", "in hyps_resp] num_tokens = sum([len(i) for i in hyps_resp]) dist1", "list of hyps responses :return: average distinct score for 1,", "+ dist_s1[1]) / 2 * 100) parser = argparse.ArgumentParser() parser.add_argument('--eval_file_path',", "len(s) r = 0 for rs in ref[i]: for w", "dist_s1[1]) / 2 * 100) parser = argparse.ArgumentParser() parser.add_argument('--eval_file_path', help='path", "dist_s1 = eval_distinct_detail(s1_pred) f1_s0 = eval_f1(s0_ref, s0_pred) f1_s1 = eval_f1(s1_ref,", ":return: average distinct score for 1, 2-gram \"\"\" if len(hyps_resp)", "s1_pred) for k in range(1, 4): print('%d-gram BLEU:' % k,", "1, 0, 0], smoothing_function=smooth.method1), \\ corpus_bleu(ref, pred, weights=[0, 0, 1,", "* 100, 'mean', (bleu_s0[k - 1] + bleu_s1[k - 1])", "'mean', (dist_s0[1] + dist_s1[1]) / 2 * 100) parser =", "eval_distinct get empty input\") return if type(hyps_resp[0]) != list: print(\"ERROR,", "* 100, 's1', dist_s1[1] * 100, 'mean', (dist_s0[1] + dist_s1[1])", "list of reference sentences, each element of the list is", "= set() for rs in ref[i]: for w in rs:", "= eval_bleu_detail(s0_ref, s0_pred) bleu_s1 = eval_bleu_detail(s1_ref, s1_pred) dist_s0 = eval_distinct_detail(s0_pred)", "corpus bleu score \"\"\" return corpus_bleu(ref, pred, smoothing_function=smooth.method1) def eval_bleu_detail(ref,", "'s0', f1_s0 * 100, 's1', f1_s1 * 100, 'mean', (f1_s0", "fn, n_sample=None): with open(fn) as f: res = [json.loads(i) for", "sample(range(len(s0_ref)), n_sample) s0_ref = [x for i, x in enumerate(s0_ref)", "dist_s0[1] * 100, 's1', dist_s1[1] * 100, 'mean', (dist_s0[1] +", "list is a list of references :param pred: list(list(any)), a", "bleu_s0[k - 1] * 100, 's1', bleu_s1[k - 1] *", "pred_set.add(w) p = 0 for w in s: if w", "x in enumerate(s0_pred) if i in sampled_idxs] sampled_idxs = sample(range(len(s1_ref)),", "eval file', required=True) args = parser.parse_args() file_path = args.eval_file_path calc_metrics_value(None,", "for i in range(len(resp) - n + 1): ngram.add(' '.join(resp[i:", "'list'>, get a list of {} instead\".format( type(hyps_resp[0]))) return ngram", "sampled_idxs] s0_pred = [x for i, x in enumerate(s0_pred) if", "hyps_resp: list, a list of hyps responses :return: average distinct", "np import pickle import argparse import bert_eval_acc import svm_eval_acc smooth", "+ n])) return len(ngram) def eval_distinct_detail(hyps_resp): \"\"\" compute distinct score", "type(hyps_resp[0]))) return ngram = set() for resp in hyps_resp: if" ]
[ "= \"unifi_wireless_clients\" CONF_ALLOW_BANDWIDTH_SENSORS = \"allow_bandwidth_sensors\" CONF_BLOCK_CLIENT = \"block_client\" CONF_DETECTION_TIME =", "True DEFAULT_TRACK_WIRED_CLIENTS = True DEFAULT_DETECTION_TIME = 300 ATTR_MANUFACTURER = \"Ubiquiti", "CONF_TRACK_DEVICES = \"track_devices\" CONF_TRACK_WIRED_CLIENTS = \"track_wired_clients\" CONF_SSID_FILTER = \"ssid_filter\" DEFAULT_ALLOW_BANDWIDTH_SENSORS", "= \"track_wired_clients\" CONF_SSID_FILTER = \"ssid_filter\" DEFAULT_ALLOW_BANDWIDTH_SENSORS = False DEFAULT_POE_CLIENTS =", "import logging LOGGER = logging.getLogger(__package__) DOMAIN = \"unifi\" CONTROLLER_ID =", "= \"site\" UNIFI_WIRELESS_CLIENTS = \"unifi_wireless_clients\" CONF_ALLOW_BANDWIDTH_SENSORS = \"allow_bandwidth_sensors\" CONF_BLOCK_CLIENT =", "\"track_wired_clients\" CONF_SSID_FILTER = \"ssid_filter\" DEFAULT_ALLOW_BANDWIDTH_SENSORS = False DEFAULT_POE_CLIENTS = True", "LOGGER = logging.getLogger(__package__) DOMAIN = \"unifi\" CONTROLLER_ID = \"{host}-{site}\" CONF_CONTROLLER", "= \"controller\" CONF_SITE_ID = \"site\" UNIFI_WIRELESS_CLIENTS = \"unifi_wireless_clients\" CONF_ALLOW_BANDWIDTH_SENSORS =", "DEFAULT_ALLOW_BANDWIDTH_SENSORS = False DEFAULT_POE_CLIENTS = True DEFAULT_TRACK_CLIENTS = True DEFAULT_TRACK_DEVICES", "= \"allow_bandwidth_sensors\" CONF_BLOCK_CLIENT = \"block_client\" CONF_DETECTION_TIME = \"detection_time\" CONF_POE_CLIENTS =", "logging LOGGER = logging.getLogger(__package__) DOMAIN = \"unifi\" CONTROLLER_ID = \"{host}-{site}\"", "= \"poe_clients\" CONF_TRACK_CLIENTS = \"track_clients\" CONF_TRACK_DEVICES = \"track_devices\" CONF_TRACK_WIRED_CLIENTS =", "= \"ssid_filter\" DEFAULT_ALLOW_BANDWIDTH_SENSORS = False DEFAULT_POE_CLIENTS = True DEFAULT_TRACK_CLIENTS =", "False DEFAULT_POE_CLIENTS = True DEFAULT_TRACK_CLIENTS = True DEFAULT_TRACK_DEVICES = True", "logging.getLogger(__package__) DOMAIN = \"unifi\" CONTROLLER_ID = \"{host}-{site}\" CONF_CONTROLLER = \"controller\"", "the UniFi component.\"\"\" import logging LOGGER = logging.getLogger(__package__) DOMAIN =", "\"\"\"Constants for the UniFi component.\"\"\" import logging LOGGER = logging.getLogger(__package__)", "CONF_CONTROLLER = \"controller\" CONF_SITE_ID = \"site\" UNIFI_WIRELESS_CLIENTS = \"unifi_wireless_clients\" CONF_ALLOW_BANDWIDTH_SENSORS", "CONF_SITE_ID = \"site\" UNIFI_WIRELESS_CLIENTS = \"unifi_wireless_clients\" CONF_ALLOW_BANDWIDTH_SENSORS = \"allow_bandwidth_sensors\" CONF_BLOCK_CLIENT", "UniFi component.\"\"\" import logging LOGGER = logging.getLogger(__package__) DOMAIN = \"unifi\"", "\"site\" UNIFI_WIRELESS_CLIENTS = \"unifi_wireless_clients\" CONF_ALLOW_BANDWIDTH_SENSORS = \"allow_bandwidth_sensors\" CONF_BLOCK_CLIENT = \"block_client\"", "= \"block_client\" CONF_DETECTION_TIME = \"detection_time\" CONF_POE_CLIENTS = \"poe_clients\" CONF_TRACK_CLIENTS =", "\"ssid_filter\" DEFAULT_ALLOW_BANDWIDTH_SENSORS = False DEFAULT_POE_CLIENTS = True DEFAULT_TRACK_CLIENTS = True", "= True DEFAULT_TRACK_CLIENTS = True DEFAULT_TRACK_DEVICES = True DEFAULT_TRACK_WIRED_CLIENTS =", "for the UniFi component.\"\"\" import logging LOGGER = logging.getLogger(__package__) DOMAIN", "\"block_client\" CONF_DETECTION_TIME = \"detection_time\" CONF_POE_CLIENTS = \"poe_clients\" CONF_TRACK_CLIENTS = \"track_clients\"", "= \"track_devices\" CONF_TRACK_WIRED_CLIENTS = \"track_wired_clients\" CONF_SSID_FILTER = \"ssid_filter\" DEFAULT_ALLOW_BANDWIDTH_SENSORS =", "True DEFAULT_TRACK_CLIENTS = True DEFAULT_TRACK_DEVICES = True DEFAULT_TRACK_WIRED_CLIENTS = True", "UNIFI_WIRELESS_CLIENTS = \"unifi_wireless_clients\" CONF_ALLOW_BANDWIDTH_SENSORS = \"allow_bandwidth_sensors\" CONF_BLOCK_CLIENT = \"block_client\" CONF_DETECTION_TIME", "= \"track_clients\" CONF_TRACK_DEVICES = \"track_devices\" CONF_TRACK_WIRED_CLIENTS = \"track_wired_clients\" CONF_SSID_FILTER =", "\"poe_clients\" CONF_TRACK_CLIENTS = \"track_clients\" CONF_TRACK_DEVICES = \"track_devices\" CONF_TRACK_WIRED_CLIENTS = \"track_wired_clients\"", "= logging.getLogger(__package__) DOMAIN = \"unifi\" CONTROLLER_ID = \"{host}-{site}\" CONF_CONTROLLER =", "CONF_SSID_FILTER = \"ssid_filter\" DEFAULT_ALLOW_BANDWIDTH_SENSORS = False DEFAULT_POE_CLIENTS = True DEFAULT_TRACK_CLIENTS", "\"controller\" CONF_SITE_ID = \"site\" UNIFI_WIRELESS_CLIENTS = \"unifi_wireless_clients\" CONF_ALLOW_BANDWIDTH_SENSORS = \"allow_bandwidth_sensors\"", "CONF_ALLOW_BANDWIDTH_SENSORS = \"allow_bandwidth_sensors\" CONF_BLOCK_CLIENT = \"block_client\" CONF_DETECTION_TIME = \"detection_time\" CONF_POE_CLIENTS", "True DEFAULT_TRACK_DEVICES = True DEFAULT_TRACK_WIRED_CLIENTS = True DEFAULT_DETECTION_TIME = 300", "\"unifi\" CONTROLLER_ID = \"{host}-{site}\" CONF_CONTROLLER = \"controller\" CONF_SITE_ID = \"site\"", "CONF_POE_CLIENTS = \"poe_clients\" CONF_TRACK_CLIENTS = \"track_clients\" CONF_TRACK_DEVICES = \"track_devices\" CONF_TRACK_WIRED_CLIENTS", "DOMAIN = \"unifi\" CONTROLLER_ID = \"{host}-{site}\" CONF_CONTROLLER = \"controller\" CONF_SITE_ID", "CONF_DETECTION_TIME = \"detection_time\" CONF_POE_CLIENTS = \"poe_clients\" CONF_TRACK_CLIENTS = \"track_clients\" CONF_TRACK_DEVICES", "\"detection_time\" CONF_POE_CLIENTS = \"poe_clients\" CONF_TRACK_CLIENTS = \"track_clients\" CONF_TRACK_DEVICES = \"track_devices\"", "component.\"\"\" import logging LOGGER = logging.getLogger(__package__) DOMAIN = \"unifi\" CONTROLLER_ID", "= True DEFAULT_TRACK_WIRED_CLIENTS = True DEFAULT_DETECTION_TIME = 300 ATTR_MANUFACTURER =", "\"allow_bandwidth_sensors\" CONF_BLOCK_CLIENT = \"block_client\" CONF_DETECTION_TIME = \"detection_time\" CONF_POE_CLIENTS = \"poe_clients\"", "\"{host}-{site}\" CONF_CONTROLLER = \"controller\" CONF_SITE_ID = \"site\" UNIFI_WIRELESS_CLIENTS = \"unifi_wireless_clients\"", "CONTROLLER_ID = \"{host}-{site}\" CONF_CONTROLLER = \"controller\" CONF_SITE_ID = \"site\" UNIFI_WIRELESS_CLIENTS", "= \"{host}-{site}\" CONF_CONTROLLER = \"controller\" CONF_SITE_ID = \"site\" UNIFI_WIRELESS_CLIENTS =", "DEFAULT_TRACK_DEVICES = True DEFAULT_TRACK_WIRED_CLIENTS = True DEFAULT_DETECTION_TIME = 300 ATTR_MANUFACTURER", "= \"detection_time\" CONF_POE_CLIENTS = \"poe_clients\" CONF_TRACK_CLIENTS = \"track_clients\" CONF_TRACK_DEVICES =", "= True DEFAULT_TRACK_DEVICES = True DEFAULT_TRACK_WIRED_CLIENTS = True DEFAULT_DETECTION_TIME =", "CONF_TRACK_CLIENTS = \"track_clients\" CONF_TRACK_DEVICES = \"track_devices\" CONF_TRACK_WIRED_CLIENTS = \"track_wired_clients\" CONF_SSID_FILTER", "\"track_clients\" CONF_TRACK_DEVICES = \"track_devices\" CONF_TRACK_WIRED_CLIENTS = \"track_wired_clients\" CONF_SSID_FILTER = \"ssid_filter\"", "DEFAULT_POE_CLIENTS = True DEFAULT_TRACK_CLIENTS = True DEFAULT_TRACK_DEVICES = True DEFAULT_TRACK_WIRED_CLIENTS", "\"unifi_wireless_clients\" CONF_ALLOW_BANDWIDTH_SENSORS = \"allow_bandwidth_sensors\" CONF_BLOCK_CLIENT = \"block_client\" CONF_DETECTION_TIME = \"detection_time\"", "= \"unifi\" CONTROLLER_ID = \"{host}-{site}\" CONF_CONTROLLER = \"controller\" CONF_SITE_ID =", "CONF_BLOCK_CLIENT = \"block_client\" CONF_DETECTION_TIME = \"detection_time\" CONF_POE_CLIENTS = \"poe_clients\" CONF_TRACK_CLIENTS", "\"track_devices\" CONF_TRACK_WIRED_CLIENTS = \"track_wired_clients\" CONF_SSID_FILTER = \"ssid_filter\" DEFAULT_ALLOW_BANDWIDTH_SENSORS = False", "= False DEFAULT_POE_CLIENTS = True DEFAULT_TRACK_CLIENTS = True DEFAULT_TRACK_DEVICES =", "DEFAULT_TRACK_CLIENTS = True DEFAULT_TRACK_DEVICES = True DEFAULT_TRACK_WIRED_CLIENTS = True DEFAULT_DETECTION_TIME", "DEFAULT_TRACK_WIRED_CLIENTS = True DEFAULT_DETECTION_TIME = 300 ATTR_MANUFACTURER = \"Ubiquiti Networks\"", "CONF_TRACK_WIRED_CLIENTS = \"track_wired_clients\" CONF_SSID_FILTER = \"ssid_filter\" DEFAULT_ALLOW_BANDWIDTH_SENSORS = False DEFAULT_POE_CLIENTS" ]
[ "Stream.py class OrderedStream: def __init__(self, n: int): self.data = [None]*n", "1 self.data[id] = value if id > self.ptr: return []", "called as such: # obj = OrderedStream(n) # param_1 =", "<reponame>Jahidul007/Python-Bootcamp<filename>coding_intereview/1656. Design an Ordered Stream.py class OrderedStream: def __init__(self, n:", "insert(self, id: int, value: str) -> List[str]: id -= 1", "self.ptr = 0 def insert(self, id: int, value: str) ->", "id > self.ptr: return [] while self.ptr < len(self.data) and", "Ordered Stream.py class OrderedStream: def __init__(self, n: int): self.data =", "1 return self.data[id:self.ptr] # Your OrderedStream object will be instantiated", "def insert(self, id: int, value: str) -> List[str]: id -=", "return [] while self.ptr < len(self.data) and self.data[self.ptr]: self.ptr +=", "self.data[id] = value if id > self.ptr: return [] while", "-> List[str]: id -= 1 self.data[id] = value if id", "and self.data[self.ptr]: self.ptr += 1 return self.data[id:self.ptr] # Your OrderedStream", "value: str) -> List[str]: id -= 1 self.data[id] = value", "while self.ptr < len(self.data) and self.data[self.ptr]: self.ptr += 1 return", "= value if id > self.ptr: return [] while self.ptr", "Your OrderedStream object will be instantiated and called as such:", "object will be instantiated and called as such: # obj", "int): self.data = [None]*n self.ptr = 0 def insert(self, id:", "> self.ptr: return [] while self.ptr < len(self.data) and self.data[self.ptr]:", "self.data[id:self.ptr] # Your OrderedStream object will be instantiated and called", "OrderedStream: def __init__(self, n: int): self.data = [None]*n self.ptr =", "# Your OrderedStream object will be instantiated and called as", "self.ptr < len(self.data) and self.data[self.ptr]: self.ptr += 1 return self.data[id:self.ptr]", "n: int): self.data = [None]*n self.ptr = 0 def insert(self,", "= 0 def insert(self, id: int, value: str) -> List[str]:", "id -= 1 self.data[id] = value if id > self.ptr:", "< len(self.data) and self.data[self.ptr]: self.ptr += 1 return self.data[id:self.ptr] #", "be instantiated and called as such: # obj = OrderedStream(n)", "List[str]: id -= 1 self.data[id] = value if id >", "if id > self.ptr: return [] while self.ptr < len(self.data)", "return self.data[id:self.ptr] # Your OrderedStream object will be instantiated and", "id: int, value: str) -> List[str]: id -= 1 self.data[id]", "+= 1 return self.data[id:self.ptr] # Your OrderedStream object will be", "len(self.data) and self.data[self.ptr]: self.ptr += 1 return self.data[id:self.ptr] # Your", "an Ordered Stream.py class OrderedStream: def __init__(self, n: int): self.data", "self.ptr: return [] while self.ptr < len(self.data) and self.data[self.ptr]: self.ptr", "= [None]*n self.ptr = 0 def insert(self, id: int, value:", "as such: # obj = OrderedStream(n) # param_1 = obj.insert(id,value)", "instantiated and called as such: # obj = OrderedStream(n) #", "0 def insert(self, id: int, value: str) -> List[str]: id", "class OrderedStream: def __init__(self, n: int): self.data = [None]*n self.ptr", "OrderedStream object will be instantiated and called as such: #", "will be instantiated and called as such: # obj =", "and called as such: # obj = OrderedStream(n) # param_1", "self.data[self.ptr]: self.ptr += 1 return self.data[id:self.ptr] # Your OrderedStream object", "str) -> List[str]: id -= 1 self.data[id] = value if", "-= 1 self.data[id] = value if id > self.ptr: return", "def __init__(self, n: int): self.data = [None]*n self.ptr = 0", "__init__(self, n: int): self.data = [None]*n self.ptr = 0 def", "self.data = [None]*n self.ptr = 0 def insert(self, id: int,", "value if id > self.ptr: return [] while self.ptr <", "[None]*n self.ptr = 0 def insert(self, id: int, value: str)", "int, value: str) -> List[str]: id -= 1 self.data[id] =", "Design an Ordered Stream.py class OrderedStream: def __init__(self, n: int):", "self.ptr += 1 return self.data[id:self.ptr] # Your OrderedStream object will", "[] while self.ptr < len(self.data) and self.data[self.ptr]: self.ptr += 1" ]
[ "== 0 assert prob.parent.dtype == np.int32 prob.solve() assert abs(prob.x.mean() -", "assert len(np.unique(prob.x)) == 2 assert max(np.abs(prob.dual[2:]) - lam) < 1e-12", "16, 13, 10, 7, 8, 9, 3, 6, 2, 5,", "TreeInstance def test_demo_3x7_postord(): parent = np.array([0, 4, 5, 0, 3,", "4, 5, 0, 3, 4, 7, 8, 5, 6, 7,", "import post_order, TreeInstance def test_demo_3x7_postord(): parent = np.array([0, 4, 5,", "0.73 0.71 1.5 1.17 0.43 1.08 0.62 \" + \"1.73", "1.5 1.17 0.43 1.08 0.62 \" + \"1.73 0.95 1.46", "14, 17, 12, 15, 16, 19, 16, 17]) po =", "= np.array([12, 11, 19, 20, 21, 14, 15, 18, 17,", "lam=lam) assert prob.root == 0 assert prob.parent.dtype == np.int32 prob.solve()", "8, 9, 14, 17, 12, 15, 16, 19, 16, 17])", "assert (po == expect).all() def test_demo_3x7(): y = np.fromstring(\"0.62 0.73", "treelas import post_order, TreeInstance def test_demo_3x7_postord(): parent = np.array([0, 4,", "def test_demo_3x7(): y = np.fromstring(\"0.62 0.73 0.71 1.5 1.17 0.43", "== 2 assert max(np.abs(prob.dual[2:]) - lam) < 1e-12 assert max(np.abs(prob.gamma))", "np.array([0, 4, 5, 0, 3, 4, 7, 8, 5, 6,", "1.08 0.02 0.4\", sep=\" \") parent = np.array([0, 4, 5,", "9, 14, 17, 12, 15, 16, 19, 16, 17]) lam", "numpy as np from treelas import post_order, TreeInstance def test_demo_3x7_postord():", "dtype='i4') - 1 assert (po == expect).all() def test_demo_3x7(): y", "test_demo_3x7(): y = np.fromstring(\"0.62 0.73 0.71 1.5 1.17 0.43 1.08", "20, 21, 14, 15, 18, 17, 16, 13, 10, 7,", "0.38 0.9 0.32 \" + \"-0.48 0.95 1.08 0.02 0.4\",", "5, 4, 1], dtype='i4') - 1 assert (po == expect).all()", "17]) po = post_order(parent, include_root=True) expect = np.array([12, 11, 19,", "4, 7, 8, 5, 6, 7, 8, 9, 14, 17,", "19, 16, 17]) lam = 1.0 prob = TreeInstance(y, parent,", "0.4\", sep=\" \") parent = np.array([0, 4, 5, 0, 3,", "10, 7, 8, 9, 3, 6, 2, 5, 4, 1],", "16, 19, 16, 17]) po = post_order(parent, include_root=True) expect =", "16, 17]) lam = 1.0 prob = TreeInstance(y, parent, lam=lam)", "0.95 1.46 1.6 1.16 0.38 0.9 0.32 \" + \"-0.48", "0, 3, 4, 7, 8, 5, 6, 7, 8, 9,", "5, 6, 7, 8, 9, 14, 17, 12, 15, 16,", "\") parent = np.array([0, 4, 5, 0, 3, 4, 7,", "6, 7, 8, 9, 14, 17, 12, 15, 16, 19,", "po = post_order(parent, include_root=True) expect = np.array([12, 11, 19, 20,", "18, 17, 16, 13, 10, 7, 8, 9, 3, 6,", "17]) lam = 1.0 prob = TreeInstance(y, parent, lam=lam) assert", "np.array([12, 11, 19, 20, 21, 14, 15, 18, 17, 16,", "parent, lam=lam) assert prob.root == 0 assert prob.parent.dtype == np.int32", "prob.parent.dtype == np.int32 prob.solve() assert abs(prob.x.mean() - prob.y.mean()) < 1e-15", "17, 16, 13, 10, 7, 8, 9, 3, 6, 2,", "1.16 0.38 0.9 0.32 \" + \"-0.48 0.95 1.08 0.02", "+ \"1.73 0.95 1.46 1.6 1.16 0.38 0.9 0.32 \"", "expect).all() def test_demo_3x7(): y = np.fromstring(\"0.62 0.73 0.71 1.5 1.17", "np.fromstring(\"0.62 0.73 0.71 1.5 1.17 0.43 1.08 0.62 \" +", "3, 4, 7, 8, 5, 6, 7, 8, 9, 14,", "assert prob.parent.dtype == np.int32 prob.solve() assert abs(prob.x.mean() - prob.y.mean()) <", "expect = np.array([12, 11, 19, 20, 21, 14, 15, 18,", "19, 16, 17]) po = post_order(parent, include_root=True) expect = np.array([12,", "12, 15, 16, 19, 16, 17]) lam = 1.0 prob", "- prob.y.mean()) < 1e-15 assert len(np.unique(prob.x)) == 2 assert max(np.abs(prob.dual[2:])", "= post_order(parent, include_root=True) expect = np.array([12, 11, 19, 20, 21,", "sep=\" \") parent = np.array([0, 4, 5, 0, 3, 4,", "assert prob.root == 0 assert prob.parent.dtype == np.int32 prob.solve() assert", "5, 0, 3, 4, 7, 8, 5, 6, 7, 8,", "= np.array([0, 4, 5, 0, 3, 4, 7, 8, 5,", "12, 15, 16, 19, 16, 17]) po = post_order(parent, include_root=True)", "3, 6, 2, 5, 4, 1], dtype='i4') - 1 assert", "\" + \"-0.48 0.95 1.08 0.02 0.4\", sep=\" \") parent", "1], dtype='i4') - 1 assert (po == expect).all() def test_demo_3x7():", "test_demo_3x7_postord(): parent = np.array([0, 4, 5, 0, 3, 4, 7,", "from treelas import post_order, TreeInstance def test_demo_3x7_postord(): parent = np.array([0,", "== expect).all() def test_demo_3x7(): y = np.fromstring(\"0.62 0.73 0.71 1.5", "2 assert max(np.abs(prob.dual[2:]) - lam) < 1e-12 assert max(np.abs(prob.gamma)) <", "prob.root == 0 assert prob.parent.dtype == np.int32 prob.solve() assert abs(prob.x.mean()", "9, 3, 6, 2, 5, 4, 1], dtype='i4') - 1", "1.17 0.43 1.08 0.62 \" + \"1.73 0.95 1.46 1.6", "0.43 1.08 0.62 \" + \"1.73 0.95 1.46 1.6 1.16", "prob = TreeInstance(y, parent, lam=lam) assert prob.root == 0 assert", "< 1e-15 assert len(np.unique(prob.x)) == 2 assert max(np.abs(prob.dual[2:]) - lam)", "0.32 \" + \"-0.48 0.95 1.08 0.02 0.4\", sep=\" \")", "import numpy as np from treelas import post_order, TreeInstance def", "def test_demo_3x7_postord(): parent = np.array([0, 4, 5, 0, 3, 4,", "6, 2, 5, 4, 1], dtype='i4') - 1 assert (po", "assert max(np.abs(prob.dual[2:]) - lam) < 1e-12 assert max(np.abs(prob.gamma)) < 1e-15", "7, 8, 9, 14, 17, 12, 15, 16, 19, 16,", "post_order, TreeInstance def test_demo_3x7_postord(): parent = np.array([0, 4, 5, 0,", "0.95 1.08 0.02 0.4\", sep=\" \") parent = np.array([0, 4,", "lam = 1.0 prob = TreeInstance(y, parent, lam=lam) assert prob.root", "7, 8, 5, 6, 7, 8, 9, 14, 17, 12,", "8, 9, 3, 6, 2, 5, 4, 1], dtype='i4') -", "15, 16, 19, 16, 17]) po = post_order(parent, include_root=True) expect", "== np.int32 prob.solve() assert abs(prob.x.mean() - prob.y.mean()) < 1e-15 assert", "17, 12, 15, 16, 19, 16, 17]) po = post_order(parent,", "15, 16, 19, 16, 17]) lam = 1.0 prob =", "= 1.0 prob = TreeInstance(y, parent, lam=lam) assert prob.root ==", "0.62 \" + \"1.73 0.95 1.46 1.6 1.16 0.38 0.9", "1e-15 assert len(np.unique(prob.x)) == 2 assert max(np.abs(prob.dual[2:]) - lam) <", "= TreeInstance(y, parent, lam=lam) assert prob.root == 0 assert prob.parent.dtype", "19, 20, 21, 14, 15, 18, 17, 16, 13, 10,", "prob.solve() assert abs(prob.x.mean() - prob.y.mean()) < 1e-15 assert len(np.unique(prob.x)) ==", "21, 14, 15, 18, 17, 16, 13, 10, 7, 8,", "0.9 0.32 \" + \"-0.48 0.95 1.08 0.02 0.4\", sep=\"", "16, 17]) po = post_order(parent, include_root=True) expect = np.array([12, 11,", "+ \"-0.48 0.95 1.08 0.02 0.4\", sep=\" \") parent =", "\" + \"1.73 0.95 1.46 1.6 1.16 0.38 0.9 0.32", "8, 5, 6, 7, 8, 9, 14, 17, 12, 15,", "\"-0.48 0.95 1.08 0.02 0.4\", sep=\" \") parent = np.array([0,", "as np from treelas import post_order, TreeInstance def test_demo_3x7_postord(): parent", "(po == expect).all() def test_demo_3x7(): y = np.fromstring(\"0.62 0.73 0.71", "parent = np.array([0, 4, 5, 0, 3, 4, 7, 8,", "4, 1], dtype='i4') - 1 assert (po == expect).all() def", "np.int32 prob.solve() assert abs(prob.x.mean() - prob.y.mean()) < 1e-15 assert len(np.unique(prob.x))", "1.6 1.16 0.38 0.9 0.32 \" + \"-0.48 0.95 1.08", "16, 19, 16, 17]) lam = 1.0 prob = TreeInstance(y,", "TreeInstance(y, parent, lam=lam) assert prob.root == 0 assert prob.parent.dtype ==", "1.0 prob = TreeInstance(y, parent, lam=lam) assert prob.root == 0", "= np.fromstring(\"0.62 0.73 0.71 1.5 1.17 0.43 1.08 0.62 \"", "17, 12, 15, 16, 19, 16, 17]) lam = 1.0", "0.71 1.5 1.17 0.43 1.08 0.62 \" + \"1.73 0.95", "14, 17, 12, 15, 16, 19, 16, 17]) lam =", "- 1 assert (po == expect).all() def test_demo_3x7(): y =", "len(np.unique(prob.x)) == 2 assert max(np.abs(prob.dual[2:]) - lam) < 1e-12 assert", "prob.y.mean()) < 1e-15 assert len(np.unique(prob.x)) == 2 assert max(np.abs(prob.dual[2:]) -", "9, 14, 17, 12, 15, 16, 19, 16, 17]) po", "13, 10, 7, 8, 9, 3, 6, 2, 5, 4,", "15, 18, 17, 16, 13, 10, 7, 8, 9, 3,", "1 assert (po == expect).all() def test_demo_3x7(): y = np.fromstring(\"0.62", "np from treelas import post_order, TreeInstance def test_demo_3x7_postord(): parent =", "assert abs(prob.x.mean() - prob.y.mean()) < 1e-15 assert len(np.unique(prob.x)) == 2", "14, 15, 18, 17, 16, 13, 10, 7, 8, 9,", "7, 8, 9, 3, 6, 2, 5, 4, 1], dtype='i4')", "0 assert prob.parent.dtype == np.int32 prob.solve() assert abs(prob.x.mean() - prob.y.mean())", "2, 5, 4, 1], dtype='i4') - 1 assert (po ==", "\"1.73 0.95 1.46 1.6 1.16 0.38 0.9 0.32 \" +", "11, 19, 20, 21, 14, 15, 18, 17, 16, 13,", "post_order(parent, include_root=True) expect = np.array([12, 11, 19, 20, 21, 14,", "1.46 1.6 1.16 0.38 0.9 0.32 \" + \"-0.48 0.95", "abs(prob.x.mean() - prob.y.mean()) < 1e-15 assert len(np.unique(prob.x)) == 2 assert", "include_root=True) expect = np.array([12, 11, 19, 20, 21, 14, 15,", "1.08 0.62 \" + \"1.73 0.95 1.46 1.6 1.16 0.38", "0.02 0.4\", sep=\" \") parent = np.array([0, 4, 5, 0,", "y = np.fromstring(\"0.62 0.73 0.71 1.5 1.17 0.43 1.08 0.62" ]
[ "= input(\"Cargo (programador, operador): \") salario = float(input(\"Salário: \")) print(\"\\n\\n{}\".format(s.atualiza_salario(nome,", "= float(input(\"Salário: \")) print(\"\\n\\n{}\".format(s.atualiza_salario(nome, cargo, salario))) if __name__ == '__main__':", "\") cargo = input(\"Cargo (programador, operador): \") salario = float(input(\"Salário:", "cargo = input(\"Cargo (programador, operador): \") salario = float(input(\"Salário: \"))", "\") salario = float(input(\"Salário: \")) print(\"\\n\\n{}\".format(s.atualiza_salario(nome, cargo, salario))) if __name__", "nome = input(\"Nome: \") cargo = input(\"Cargo (programador, operador): \")", "= xmlrpc.client.ServerProxy('http://localhost:9991') nome = input(\"Nome: \") cargo = input(\"Cargo (programador,", "main(): s = xmlrpc.client.ServerProxy('http://localhost:9991') nome = input(\"Nome: \") cargo =", "xmlrpc.client def main(): s = xmlrpc.client.ServerProxy('http://localhost:9991') nome = input(\"Nome: \")", "input(\"Nome: \") cargo = input(\"Cargo (programador, operador): \") salario =", "input(\"Cargo (programador, operador): \") salario = float(input(\"Salário: \")) print(\"\\n\\n{}\".format(s.atualiza_salario(nome, cargo,", "float(input(\"Salário: \")) print(\"\\n\\n{}\".format(s.atualiza_salario(nome, cargo, salario))) if __name__ == '__main__': main()", "salario = float(input(\"Salário: \")) print(\"\\n\\n{}\".format(s.atualiza_salario(nome, cargo, salario))) if __name__ ==", "operador): \") salario = float(input(\"Salário: \")) print(\"\\n\\n{}\".format(s.atualiza_salario(nome, cargo, salario))) if", "= input(\"Nome: \") cargo = input(\"Cargo (programador, operador): \") salario", "def main(): s = xmlrpc.client.ServerProxy('http://localhost:9991') nome = input(\"Nome: \") cargo", "(programador, operador): \") salario = float(input(\"Salário: \")) print(\"\\n\\n{}\".format(s.atualiza_salario(nome, cargo, salario)))", "import xmlrpc.client def main(): s = xmlrpc.client.ServerProxy('http://localhost:9991') nome = input(\"Nome:", "<reponame>SD-CC-UFG/leonardo.fleury<gh_stars>0 import xmlrpc.client def main(): s = xmlrpc.client.ServerProxy('http://localhost:9991') nome =", "xmlrpc.client.ServerProxy('http://localhost:9991') nome = input(\"Nome: \") cargo = input(\"Cargo (programador, operador):", "s = xmlrpc.client.ServerProxy('http://localhost:9991') nome = input(\"Nome: \") cargo = input(\"Cargo" ]
[ "build.models from django.db import migrations, models class Migration(migrations.Migration): initial =", "on 2020-07-27 19:23 import build.models from django.db import migrations, models", "[ ] operations = [ migrations.CreateModel( name='AutoCompleteRecord', fields=[ ('updated_at', build.models.UnixTimestampField(auto_created=True,", "('log_autocomplete_record_id', models.AutoField(primary_key=True, serialize=False)), ('type', models.CharField(max_length=50)), ('value', models.CharField(max_length=300)), ], options={ 'db_table':", "<gh_stars>1-10 # Generated by Django 3.0.7 on 2020-07-27 19:23 import", "= [ ] operations = [ migrations.CreateModel( name='AutoCompleteRecord', fields=[ ('updated_at',", "build.models.UnixTimestampField(auto_created=True, null=True)), ('created_at', build.models.UnixTimestampField(auto_created=True, null=True)), ('log_autocomplete_record_id', models.AutoField(primary_key=True, serialize=False)), ('type', models.CharField(max_length=50)),", "null=True)), ('log_autocomplete_record_id', models.AutoField(primary_key=True, serialize=False)), ('type', models.CharField(max_length=50)), ('value', models.CharField(max_length=300)), ], options={", "migrations, models class Migration(migrations.Migration): initial = True dependencies = [", "True dependencies = [ ] operations = [ migrations.CreateModel( name='AutoCompleteRecord',", "operations = [ migrations.CreateModel( name='AutoCompleteRecord', fields=[ ('updated_at', build.models.UnixTimestampField(auto_created=True, null=True)), ('created_at',", "= [ migrations.CreateModel( name='AutoCompleteRecord', fields=[ ('updated_at', build.models.UnixTimestampField(auto_created=True, null=True)), ('created_at', build.models.UnixTimestampField(auto_created=True,", "initial = True dependencies = [ ] operations = [", "dependencies = [ ] operations = [ migrations.CreateModel( name='AutoCompleteRecord', fields=[", "class Migration(migrations.Migration): initial = True dependencies = [ ] operations", "('type', models.CharField(max_length=50)), ('value', models.CharField(max_length=300)), ], options={ 'db_table': 'log_autocomplete_record', }, ),", "Generated by Django 3.0.7 on 2020-07-27 19:23 import build.models from", "models.CharField(max_length=50)), ('value', models.CharField(max_length=300)), ], options={ 'db_table': 'log_autocomplete_record', }, ), ]", "[ migrations.CreateModel( name='AutoCompleteRecord', fields=[ ('updated_at', build.models.UnixTimestampField(auto_created=True, null=True)), ('created_at', build.models.UnixTimestampField(auto_created=True, null=True)),", "3.0.7 on 2020-07-27 19:23 import build.models from django.db import migrations,", "# Generated by Django 3.0.7 on 2020-07-27 19:23 import build.models", "2020-07-27 19:23 import build.models from django.db import migrations, models class", "null=True)), ('created_at', build.models.UnixTimestampField(auto_created=True, null=True)), ('log_autocomplete_record_id', models.AutoField(primary_key=True, serialize=False)), ('type', models.CharField(max_length=50)), ('value',", "name='AutoCompleteRecord', fields=[ ('updated_at', build.models.UnixTimestampField(auto_created=True, null=True)), ('created_at', build.models.UnixTimestampField(auto_created=True, null=True)), ('log_autocomplete_record_id', models.AutoField(primary_key=True,", "19:23 import build.models from django.db import migrations, models class Migration(migrations.Migration):", "by Django 3.0.7 on 2020-07-27 19:23 import build.models from django.db", "import build.models from django.db import migrations, models class Migration(migrations.Migration): initial", "from django.db import migrations, models class Migration(migrations.Migration): initial = True", "] operations = [ migrations.CreateModel( name='AutoCompleteRecord', fields=[ ('updated_at', build.models.UnixTimestampField(auto_created=True, null=True)),", "('updated_at', build.models.UnixTimestampField(auto_created=True, null=True)), ('created_at', build.models.UnixTimestampField(auto_created=True, null=True)), ('log_autocomplete_record_id', models.AutoField(primary_key=True, serialize=False)), ('type',", "django.db import migrations, models class Migration(migrations.Migration): initial = True dependencies", "serialize=False)), ('type', models.CharField(max_length=50)), ('value', models.CharField(max_length=300)), ], options={ 'db_table': 'log_autocomplete_record', },", "migrations.CreateModel( name='AutoCompleteRecord', fields=[ ('updated_at', build.models.UnixTimestampField(auto_created=True, null=True)), ('created_at', build.models.UnixTimestampField(auto_created=True, null=True)), ('log_autocomplete_record_id',", "('created_at', build.models.UnixTimestampField(auto_created=True, null=True)), ('log_autocomplete_record_id', models.AutoField(primary_key=True, serialize=False)), ('type', models.CharField(max_length=50)), ('value', models.CharField(max_length=300)),", "build.models.UnixTimestampField(auto_created=True, null=True)), ('log_autocomplete_record_id', models.AutoField(primary_key=True, serialize=False)), ('type', models.CharField(max_length=50)), ('value', models.CharField(max_length=300)), ],", "models class Migration(migrations.Migration): initial = True dependencies = [ ]", "Django 3.0.7 on 2020-07-27 19:23 import build.models from django.db import", "fields=[ ('updated_at', build.models.UnixTimestampField(auto_created=True, null=True)), ('created_at', build.models.UnixTimestampField(auto_created=True, null=True)), ('log_autocomplete_record_id', models.AutoField(primary_key=True, serialize=False)),", "= True dependencies = [ ] operations = [ migrations.CreateModel(", "Migration(migrations.Migration): initial = True dependencies = [ ] operations =", "models.AutoField(primary_key=True, serialize=False)), ('type', models.CharField(max_length=50)), ('value', models.CharField(max_length=300)), ], options={ 'db_table': 'log_autocomplete_record',", "import migrations, models class Migration(migrations.Migration): initial = True dependencies =" ]
[ "config file', required=True) args = parser.parse_args() config = _load_config_yaml(args.config) #", "argparse import os import torch import yaml DEFAULT_DEVICE = 'cuda:0'", "YAML config file', required=True) args = parser.parse_args() config = _load_config_yaml(args.config)", "to train on device = config.get('device', DEFAULT_DEVICE) config['device'] = torch.device(device", "DEFAULT_DEVICE = 'cuda:0' def load_config(): parser = argparse.ArgumentParser(description='UNet3D training') parser.add_argument('--config',", "import torch import yaml DEFAULT_DEVICE = 'cuda:0' def load_config(): parser", "yaml DEFAULT_DEVICE = 'cuda:0' def load_config(): parser = argparse.ArgumentParser(description='UNet3D training')", "device = config.get('device', DEFAULT_DEVICE) config['device'] = torch.device(device if torch.cuda.is_available() else", "Get a device to train on device = config.get('device', DEFAULT_DEVICE)", "on device = config.get('device', DEFAULT_DEVICE) config['device'] = torch.device(device if torch.cuda.is_available()", "_load_config_yaml(args.config) # Get a device to train on device =", "def load_config(): parser = argparse.ArgumentParser(description='UNet3D training') parser.add_argument('--config', type=str, help='Path to", "parser = argparse.ArgumentParser(description='UNet3D training') parser.add_argument('--config', type=str, help='Path to the YAML", "training') parser.add_argument('--config', type=str, help='Path to the YAML config file', required=True)", "torch.cuda.is_available() else \"cpu\") return config def _load_config_yaml(config_file): return yaml.load(open(config_file, 'r'),", "DEFAULT_DEVICE) config['device'] = torch.device(device if torch.cuda.is_available() else \"cpu\") return config", "torch.device(device if torch.cuda.is_available() else \"cpu\") return config def _load_config_yaml(config_file): return", "args = parser.parse_args() config = _load_config_yaml(args.config) # Get a device", "import argparse import os import torch import yaml DEFAULT_DEVICE =", "file', required=True) args = parser.parse_args() config = _load_config_yaml(args.config) # Get", "'cuda:0' def load_config(): parser = argparse.ArgumentParser(description='UNet3D training') parser.add_argument('--config', type=str, help='Path", "type=str, help='Path to the YAML config file', required=True) args =", "os import torch import yaml DEFAULT_DEVICE = 'cuda:0' def load_config():", "help='Path to the YAML config file', required=True) args = parser.parse_args()", "= config.get('device', DEFAULT_DEVICE) config['device'] = torch.device(device if torch.cuda.is_available() else \"cpu\")", "= parser.parse_args() config = _load_config_yaml(args.config) # Get a device to", "= _load_config_yaml(args.config) # Get a device to train on device", "# Get a device to train on device = config.get('device',", "train on device = config.get('device', DEFAULT_DEVICE) config['device'] = torch.device(device if", "else \"cpu\") return config def _load_config_yaml(config_file): return yaml.load(open(config_file, 'r'), Loader=yaml.FullLoader)", "argparse.ArgumentParser(description='UNet3D training') parser.add_argument('--config', type=str, help='Path to the YAML config file',", "device to train on device = config.get('device', DEFAULT_DEVICE) config['device'] =", "a device to train on device = config.get('device', DEFAULT_DEVICE) config['device']", "import os import torch import yaml DEFAULT_DEVICE = 'cuda:0' def", "to the YAML config file', required=True) args = parser.parse_args() config", "required=True) args = parser.parse_args() config = _load_config_yaml(args.config) # Get a", "parser.add_argument('--config', type=str, help='Path to the YAML config file', required=True) args", "= torch.device(device if torch.cuda.is_available() else \"cpu\") return config def _load_config_yaml(config_file):", "if torch.cuda.is_available() else \"cpu\") return config def _load_config_yaml(config_file): return yaml.load(open(config_file,", "config['device'] = torch.device(device if torch.cuda.is_available() else \"cpu\") return config def", "import yaml DEFAULT_DEVICE = 'cuda:0' def load_config(): parser = argparse.ArgumentParser(description='UNet3D", "= 'cuda:0' def load_config(): parser = argparse.ArgumentParser(description='UNet3D training') parser.add_argument('--config', type=str,", "torch import yaml DEFAULT_DEVICE = 'cuda:0' def load_config(): parser =", "config.get('device', DEFAULT_DEVICE) config['device'] = torch.device(device if torch.cuda.is_available() else \"cpu\") return", "the YAML config file', required=True) args = parser.parse_args() config =", "config = _load_config_yaml(args.config) # Get a device to train on", "parser.parse_args() config = _load_config_yaml(args.config) # Get a device to train", "= argparse.ArgumentParser(description='UNet3D training') parser.add_argument('--config', type=str, help='Path to the YAML config", "load_config(): parser = argparse.ArgumentParser(description='UNet3D training') parser.add_argument('--config', type=str, help='Path to the" ]
[ "\\ SimplifyRedundantChannelModeConversion from webdnn.graph.optimize_rule import OptimizeRuleGroup class SimplifyChannelModeConversion(OptimizeRuleGroup): def __init__(self):", "from webdnn.graph.optimize_rule import OptimizeRuleGroup class SimplifyChannelModeConversion(OptimizeRuleGroup): def __init__(self): super(SimplifyChannelModeConversion, self).__init__([", "SimplifyRedundantChannelModeConversion from webdnn.graph.optimize_rule import OptimizeRuleGroup class SimplifyChannelModeConversion(OptimizeRuleGroup): def __init__(self): super(SimplifyChannelModeConversion,", "from webdnn.backend.webgl.optimize_rules.simplify_channel_mode_conversion.simplify_redundant_channel_mode_conversion import \\ SimplifyRedundantChannelModeConversion from webdnn.graph.optimize_rule import OptimizeRuleGroup class", "\\ SimplifyNonsenseChannelModeConversion from webdnn.backend.webgl.optimize_rules.simplify_channel_mode_conversion.simplify_redundant_channel_mode_conversion import \\ SimplifyRedundantChannelModeConversion from webdnn.graph.optimize_rule import", "webdnn.backend.webgl.optimize_rules.simplify_channel_mode_conversion.simplify_redundant_channel_mode_conversion import \\ SimplifyRedundantChannelModeConversion from webdnn.graph.optimize_rule import OptimizeRuleGroup class SimplifyChannelModeConversion(OptimizeRuleGroup):", "webdnn.graph.optimize_rule import OptimizeRuleGroup class SimplifyChannelModeConversion(OptimizeRuleGroup): def __init__(self): super(SimplifyChannelModeConversion, self).__init__([ SimplifyRedundantChannelModeConversion(),", "<filename>src/graph_transpiler/webdnn/backend/webgl/optimize_rules/simplify_channel_mode_conversion/simplify_channel_mode_conversion.py<gh_stars>1-10 from webdnn.backend.webgl.optimize_rules.simplify_channel_mode_conversion.simplify_nonsense_channel_mode_conversion import \\ SimplifyNonsenseChannelModeConversion from webdnn.backend.webgl.optimize_rules.simplify_channel_mode_conversion.simplify_redundant_channel_mode_conversion import \\", "import OptimizeRuleGroup class SimplifyChannelModeConversion(OptimizeRuleGroup): def __init__(self): super(SimplifyChannelModeConversion, self).__init__([ SimplifyRedundantChannelModeConversion(), SimplifyNonsenseChannelModeConversion()", "import \\ SimplifyRedundantChannelModeConversion from webdnn.graph.optimize_rule import OptimizeRuleGroup class SimplifyChannelModeConversion(OptimizeRuleGroup): def", "webdnn.backend.webgl.optimize_rules.simplify_channel_mode_conversion.simplify_nonsense_channel_mode_conversion import \\ SimplifyNonsenseChannelModeConversion from webdnn.backend.webgl.optimize_rules.simplify_channel_mode_conversion.simplify_redundant_channel_mode_conversion import \\ SimplifyRedundantChannelModeConversion from", "from webdnn.backend.webgl.optimize_rules.simplify_channel_mode_conversion.simplify_nonsense_channel_mode_conversion import \\ SimplifyNonsenseChannelModeConversion from webdnn.backend.webgl.optimize_rules.simplify_channel_mode_conversion.simplify_redundant_channel_mode_conversion import \\ SimplifyRedundantChannelModeConversion", "import \\ SimplifyNonsenseChannelModeConversion from webdnn.backend.webgl.optimize_rules.simplify_channel_mode_conversion.simplify_redundant_channel_mode_conversion import \\ SimplifyRedundantChannelModeConversion from webdnn.graph.optimize_rule", "OptimizeRuleGroup class SimplifyChannelModeConversion(OptimizeRuleGroup): def __init__(self): super(SimplifyChannelModeConversion, self).__init__([ SimplifyRedundantChannelModeConversion(), SimplifyNonsenseChannelModeConversion() ])", "SimplifyNonsenseChannelModeConversion from webdnn.backend.webgl.optimize_rules.simplify_channel_mode_conversion.simplify_redundant_channel_mode_conversion import \\ SimplifyRedundantChannelModeConversion from webdnn.graph.optimize_rule import OptimizeRuleGroup" ]
[ "video\", CODEC_ID_VP6: \"On2 VP6\", CODEC_ID_VP6_WITH_ALPHA: \"On2 VP6 with alpha channel\",", "(SOUND_SIZE_8_BIT, SOUND_SIZE_16_BIT) = range(2) sound_size_to_string = { SOUND_SIZE_8_BIT: \"snd8Bit\", SOUND_SIZE_16_BIT:", "CODEC_ID_VP6_WITH_ALPHA, CODEC_ID_SCREEN_VIDEO_V2, CODEC_ID_H264) = range(1, 8) codec_id_to_string = { CODEC_ID_JPEG:", "\"sndStereo\" } # AAC packet type (AAC_PACKET_TYPE_SEQUENCE_HEADER, AAC_PACKET_TYPE_RAW) = range(2)", "6) frame_type_to_string = { FRAME_TYPE_KEYFRAME: \"keyframe\", FRAME_TYPE_INTERFRAME: \"interframe\", FRAME_TYPE_DISPOSABLE_INTERFRAME: \"disposable", "\"keyframe\", FRAME_TYPE_INTERFRAME: \"interframe\", FRAME_TYPE_DISPOSABLE_INTERFRAME: \"disposable interframe\", FRAME_TYPE_GENERATED_KEYFRAME: \"generated keyframe\", FRAME_TYPE_INFO_FRAME:", "type (TAG_TYPE_AUDIO, TAG_TYPE_VIDEO, TAG_TYPE_SCRIPT) = (8, 9, 18) # Sound", "type (SOUND_TYPE_MONO, SOUND_TYPE_STEREO) = range(2) sound_type_to_string = { SOUND_TYPE_MONO: \"sndMono\",", "SOUND_FORMAT_ADPCM, SOUND_FORMAT_MP3, SOUND_FORMAT_PCM_LITTLE_ENDIAN, SOUND_FORMAT_NELLYMOSER_16KHZ, SOUND_FORMAT_NELLYMOSER_8KHZ, SOUND_FORMAT_NELLYMOSER, SOUND_FORMAT_G711_A_LAW, SOUND_FORMAT_G711_MU_LAW) = range(9)", "8-kHz mono\", SOUND_FORMAT_NELLYMOSER: \"Nellymoser\", SOUND_FORMAT_G711_A_LAW: \"G.711 A-law logarithmic PCM\", SOUND_FORMAT_G711_MU_LAW:", "# H.264 packet type (H264_PACKET_TYPE_SEQUENCE_HEADER, H264_PACKET_TYPE_NALU, H264_PACKET_TYPE_END_OF_SEQUENCE) = range(3) h264_packet_type_to_string", "SOUND_TYPE_STEREO) = range(2) sound_type_to_string = { SOUND_TYPE_MONO: \"sndMono\", SOUND_TYPE_STEREO: \"sndStereo\"", "H.263\", CODEC_ID_SCREEN_VIDEO: \"Screen video\", CODEC_ID_VP6: \"On2 VP6\", CODEC_ID_VP6_WITH_ALPHA: \"On2 VP6", "VALUE_TYPE_NULL, VALUE_TYPE_UNDEFINED, VALUE_TYPE_REFERENCE, VALUE_TYPE_ECMA_ARRAY) = range(9) (VALUE_TYPE_STRICT_ARRAY, VALUE_TYPE_DATE, VALUE_TYPE_LONGSTRING) =", "H264_PACKET_TYPE_END_OF_SEQUENCE) = range(3) h264_packet_type_to_string = { H264_PACKET_TYPE_SEQUENCE_HEADER: \"sequence header\", H264_PACKET_TYPE_NALU:", "h264_packet_type_to_string = { H264_PACKET_TYPE_SEQUENCE_HEADER: \"sequence header\", H264_PACKET_TYPE_NALU: \"NAL unit\", H264_PACKET_TYPE_END_OF_SEQUENCE:", "(SOUND_TYPE_MONO, SOUND_TYPE_STEREO) = range(2) sound_type_to_string = { SOUND_TYPE_MONO: \"sndMono\", SOUND_TYPE_STEREO:", "= { VALUE_TYPE_NUMBER: 'Number', VALUE_TYPE_BOOLEAN: 'Boolean', VALUE_TYPE_STRING: 'String', VALUE_TYPE_OBJECT: 'Object',", "16-kHz mono\", SOUND_FORMAT_NELLYMOSER_8KHZ: \"Nellymoser 8-kHz mono\", SOUND_FORMAT_NELLYMOSER: \"Nellymoser\", SOUND_FORMAT_G711_A_LAW: \"G.711", "CODEC_ID_VP6, CODEC_ID_VP6_WITH_ALPHA, CODEC_ID_SCREEN_VIDEO_V2, CODEC_ID_H264) = range(1, 8) codec_id_to_string = {", "type (VALUE_TYPE_NUMBER, VALUE_TYPE_BOOLEAN, VALUE_TYPE_STRING, VALUE_TYPE_OBJECT, VALUE_TYPE_MOVIECLIP, VALUE_TYPE_NULL, VALUE_TYPE_UNDEFINED, VALUE_TYPE_REFERENCE, VALUE_TYPE_ECMA_ARRAY)", "= range(10, 13) value_type_to_string = { VALUE_TYPE_NUMBER: 'Number', VALUE_TYPE_BOOLEAN: 'Boolean',", "= range(1, 6) frame_type_to_string = { FRAME_TYPE_KEYFRAME: \"keyframe\", FRAME_TYPE_INTERFRAME: \"interframe\",", "FRAME_TYPE_INTERFRAME, FRAME_TYPE_DISPOSABLE_INTERFRAME, FRAME_TYPE_GENERATED_KEYFRAME, FRAME_TYPE_INFO_FRAME) = range(1, 6) frame_type_to_string = {", "type (AAC_PACKET_TYPE_SEQUENCE_HEADER, AAC_PACKET_TYPE_RAW) = range(2) aac_packet_type_to_string = { AAC_PACKET_TYPE_SEQUENCE_HEADER: \"sequence", "} # Frame type (FRAME_TYPE_KEYFRAME, FRAME_TYPE_INTERFRAME, FRAME_TYPE_DISPOSABLE_INTERFRAME, FRAME_TYPE_GENERATED_KEYFRAME, FRAME_TYPE_INFO_FRAME) =", "\"Linear PCM, platform endian\", SOUND_FORMAT_ADPCM: \"ADPCM\", SOUND_FORMAT_MP3: \"MP3\", SOUND_FORMAT_PCM_LITTLE_ENDIAN: \"Linear", "little endian\", SOUND_FORMAT_NELLYMOSER_16KHZ: \"Nellymoser 16-kHz mono\", SOUND_FORMAT_NELLYMOSER_8KHZ: \"Nellymoser 8-kHz mono\",", "alpha channel\", CODEC_ID_SCREEN_VIDEO_V2: \"Screen video version 2\", CODEC_ID_H264: \"H.264\" }", "CODEC_ID_SCREEN_VIDEO_V2, CODEC_ID_H264) = range(1, 8) codec_id_to_string = { CODEC_ID_JPEG: \"JPEG\",", "= range(9) (VALUE_TYPE_STRICT_ARRAY, VALUE_TYPE_DATE, VALUE_TYPE_LONGSTRING) = range(10, 13) value_type_to_string =", "range(9) (VALUE_TYPE_STRICT_ARRAY, VALUE_TYPE_DATE, VALUE_TYPE_LONGSTRING) = range(10, 13) value_type_to_string = {", "FRAME_TYPE_GENERATED_KEYFRAME: \"generated keyframe\", FRAME_TYPE_INFO_FRAME: \"video info/command frame\" } # H.264", "FRAME_TYPE_DISPOSABLE_INTERFRAME, FRAME_TYPE_GENERATED_KEYFRAME, FRAME_TYPE_INFO_FRAME) = range(1, 6) frame_type_to_string = { FRAME_TYPE_KEYFRAME:", "'Reference', VALUE_TYPE_ECMA_ARRAY: 'ECMA Array', VALUE_TYPE_STRICT_ARRAY: 'Strict Array', VALUE_TYPE_DATE: 'Date', VALUE_TYPE_LONGSTRING:", "SOUND_FORMAT_PCM_LITTLE_ENDIAN: \"Linear PCM, little endian\", SOUND_FORMAT_NELLYMOSER_16KHZ: \"Nellymoser 16-kHz mono\", SOUND_FORMAT_NELLYMOSER_8KHZ:", "\"sequence header\", AAC_PACKET_TYPE_RAW: \"raw\" } # Codec ID (CODEC_ID_JPEG, CODEC_ID_H263,", "= (8, 9, 18) # Sound format (SOUND_FORMAT_PCM_PLATFORM_ENDIAN, SOUND_FORMAT_ADPCM, SOUND_FORMAT_MP3,", "SOUND_SIZE_16_BIT) = range(2) sound_size_to_string = { SOUND_SIZE_8_BIT: \"snd8Bit\", SOUND_SIZE_16_BIT: \"snd16Bit\"", "SOUND_SIZE_8_BIT: \"snd8Bit\", SOUND_SIZE_16_BIT: \"snd16Bit\" } # Sound type (SOUND_TYPE_MONO, SOUND_TYPE_STEREO)", "(SOUND_RATE_5_5_KHZ, SOUND_RATE_11_KHZ, SOUND_RATE_22_KHZ, SOUND_RATE_44_KHZ) = range(4) sound_rate_to_string = { SOUND_RATE_5_5_KHZ:", "\"Screen video version 2\", CODEC_ID_H264: \"H.264\" } # Frame type", "{ SOUND_FORMAT_PCM_PLATFORM_ENDIAN: \"Linear PCM, platform endian\", SOUND_FORMAT_ADPCM: \"ADPCM\", SOUND_FORMAT_MP3: \"MP3\",", "SOUND_FORMAT_NELLYMOSER: \"Nellymoser\", SOUND_FORMAT_G711_A_LAW: \"G.711 A-law logarithmic PCM\", SOUND_FORMAT_G711_MU_LAW: \"G.711 mu-law", "end\" } # Value type (VALUE_TYPE_NUMBER, VALUE_TYPE_BOOLEAN, VALUE_TYPE_STRING, VALUE_TYPE_OBJECT, VALUE_TYPE_MOVIECLIP,", "\"Speex\", SOUND_FORMAT_MP3_8KHZ: \"MP3 8-kHz\", SOUND_FORMAT_DEVICE_SPECIFIC: \"Device-specific sound\" } # Sound", "\"H.264\" } # Frame type (FRAME_TYPE_KEYFRAME, FRAME_TYPE_INTERFRAME, FRAME_TYPE_DISPOSABLE_INTERFRAME, FRAME_TYPE_GENERATED_KEYFRAME, FRAME_TYPE_INFO_FRAME)", "VALUE_TYPE_REFERENCE: 'Reference', VALUE_TYPE_ECMA_ARRAY: 'ECMA Array', VALUE_TYPE_STRICT_ARRAY: 'Strict Array', VALUE_TYPE_DATE: 'Date',", "(H264_PACKET_TYPE_SEQUENCE_HEADER, H264_PACKET_TYPE_NALU, H264_PACKET_TYPE_END_OF_SEQUENCE) = range(3) h264_packet_type_to_string = { H264_PACKET_TYPE_SEQUENCE_HEADER: \"sequence", "{ CODEC_ID_JPEG: \"JPEG\", CODEC_ID_H263: \"Sorenson H.263\", CODEC_ID_SCREEN_VIDEO: \"Screen video\", CODEC_ID_VP6:", "SOUND_FORMAT_G711_MU_LAW: \"G.711 mu-law logarithmic PCM\", SOUND_FORMAT_AAC: \"AAC\", SOUND_FORMAT_SPEEX: \"Speex\", SOUND_FORMAT_MP3_8KHZ:", "\"5.5-kHz\", SOUND_RATE_11_KHZ: \"11-kHz\", SOUND_RATE_22_KHZ: \"22-kHz\", SOUND_RATE_44_KHZ: \"44-kHz\" } # Sound", "= { SOUND_SIZE_8_BIT: \"snd8Bit\", SOUND_SIZE_16_BIT: \"snd16Bit\" } # Sound type", "SOUND_TYPE_MONO: \"sndMono\", SOUND_TYPE_STEREO: \"sndStereo\" } # AAC packet type (AAC_PACKET_TYPE_SEQUENCE_HEADER,", "packet type (H264_PACKET_TYPE_SEQUENCE_HEADER, H264_PACKET_TYPE_NALU, H264_PACKET_TYPE_END_OF_SEQUENCE) = range(3) h264_packet_type_to_string = {", "SOUND_RATE_44_KHZ: \"44-kHz\" } # Sound size (SOUND_SIZE_8_BIT, SOUND_SIZE_16_BIT) = range(2)", "# Frame type (FRAME_TYPE_KEYFRAME, FRAME_TYPE_INTERFRAME, FRAME_TYPE_DISPOSABLE_INTERFRAME, FRAME_TYPE_GENERATED_KEYFRAME, FRAME_TYPE_INFO_FRAME) = range(1,", "codec_id_to_string = { CODEC_ID_JPEG: \"JPEG\", CODEC_ID_H263: \"Sorenson H.263\", CODEC_ID_SCREEN_VIDEO: \"Screen", "type (FRAME_TYPE_KEYFRAME, FRAME_TYPE_INTERFRAME, FRAME_TYPE_DISPOSABLE_INTERFRAME, FRAME_TYPE_GENERATED_KEYFRAME, FRAME_TYPE_INFO_FRAME) = range(1, 6) frame_type_to_string", "SOUND_FORMAT_SPEEX) = range(10, 12) (SOUND_FORMAT_MP3_8KHZ, SOUND_FORMAT_DEVICE_SPECIFIC) = range(14, 16) sound_format_to_string", "SOUND_SIZE_16_BIT: \"snd16Bit\" } # Sound type (SOUND_TYPE_MONO, SOUND_TYPE_STEREO) = range(2)", "SOUND_TYPE_STEREO: \"sndStereo\" } # AAC packet type (AAC_PACKET_TYPE_SEQUENCE_HEADER, AAC_PACKET_TYPE_RAW) =", "TAG_TYPE_SCRIPT) = (8, 9, 18) # Sound format (SOUND_FORMAT_PCM_PLATFORM_ENDIAN, SOUND_FORMAT_ADPCM,", "used in FLV files and their meanings. \"\"\" # Tag", "sound\" } # Sound rate (SOUND_RATE_5_5_KHZ, SOUND_RATE_11_KHZ, SOUND_RATE_22_KHZ, SOUND_RATE_44_KHZ) =", "# Codec ID (CODEC_ID_JPEG, CODEC_ID_H263, CODEC_ID_SCREEN_VIDEO, CODEC_ID_VP6, CODEC_ID_VP6_WITH_ALPHA, CODEC_ID_SCREEN_VIDEO_V2, CODEC_ID_H264)", "keyframe\", FRAME_TYPE_INFO_FRAME: \"video info/command frame\" } # H.264 packet type", "= { H264_PACKET_TYPE_SEQUENCE_HEADER: \"sequence header\", H264_PACKET_TYPE_NALU: \"NAL unit\", H264_PACKET_TYPE_END_OF_SEQUENCE: \"sequence", "'Boolean', VALUE_TYPE_STRING: 'String', VALUE_TYPE_OBJECT: 'Object', VALUE_TYPE_MOVIECLIP: 'MovieClip', VALUE_TYPE_NULL: 'Null', VALUE_TYPE_UNDEFINED:", "VP6 with alpha channel\", CODEC_ID_SCREEN_VIDEO_V2: \"Screen video version 2\", CODEC_ID_H264:", "range(1, 6) frame_type_to_string = { FRAME_TYPE_KEYFRAME: \"keyframe\", FRAME_TYPE_INTERFRAME: \"interframe\", FRAME_TYPE_DISPOSABLE_INTERFRAME:", "(AAC_PACKET_TYPE_SEQUENCE_HEADER, AAC_PACKET_TYPE_RAW) = range(2) aac_packet_type_to_string = { AAC_PACKET_TYPE_SEQUENCE_HEADER: \"sequence header\",", "constants used in FLV files and their meanings. \"\"\" #", "mono\", SOUND_FORMAT_NELLYMOSER: \"Nellymoser\", SOUND_FORMAT_G711_A_LAW: \"G.711 A-law logarithmic PCM\", SOUND_FORMAT_G711_MU_LAW: \"G.711", "= range(14, 16) sound_format_to_string = { SOUND_FORMAT_PCM_PLATFORM_ENDIAN: \"Linear PCM, platform", "Sound format (SOUND_FORMAT_PCM_PLATFORM_ENDIAN, SOUND_FORMAT_ADPCM, SOUND_FORMAT_MP3, SOUND_FORMAT_PCM_LITTLE_ENDIAN, SOUND_FORMAT_NELLYMOSER_16KHZ, SOUND_FORMAT_NELLYMOSER_8KHZ, SOUND_FORMAT_NELLYMOSER, SOUND_FORMAT_G711_A_LAW,", "CODEC_ID_VP6: \"On2 VP6\", CODEC_ID_VP6_WITH_ALPHA: \"On2 VP6 with alpha channel\", CODEC_ID_SCREEN_VIDEO_V2:", "VP6\", CODEC_ID_VP6_WITH_ALPHA: \"On2 VP6 with alpha channel\", CODEC_ID_SCREEN_VIDEO_V2: \"Screen video", "\"disposable interframe\", FRAME_TYPE_GENERATED_KEYFRAME: \"generated keyframe\", FRAME_TYPE_INFO_FRAME: \"video info/command frame\" }", "\"sequence header\", H264_PACKET_TYPE_NALU: \"NAL unit\", H264_PACKET_TYPE_END_OF_SEQUENCE: \"sequence end\" } #", "'Object', VALUE_TYPE_MOVIECLIP: 'MovieClip', VALUE_TYPE_NULL: 'Null', VALUE_TYPE_UNDEFINED: 'Undefined', VALUE_TYPE_REFERENCE: 'Reference', VALUE_TYPE_ECMA_ARRAY:", "'MovieClip', VALUE_TYPE_NULL: 'Null', VALUE_TYPE_UNDEFINED: 'Undefined', VALUE_TYPE_REFERENCE: 'Reference', VALUE_TYPE_ECMA_ARRAY: 'ECMA Array',", "# Sound size (SOUND_SIZE_8_BIT, SOUND_SIZE_16_BIT) = range(2) sound_size_to_string = {", "FRAME_TYPE_GENERATED_KEYFRAME, FRAME_TYPE_INFO_FRAME) = range(1, 6) frame_type_to_string = { FRAME_TYPE_KEYFRAME: \"keyframe\",", "CODEC_ID_H263: \"Sorenson H.263\", CODEC_ID_SCREEN_VIDEO: \"Screen video\", CODEC_ID_VP6: \"On2 VP6\", CODEC_ID_VP6_WITH_ALPHA:", "CODEC_ID_SCREEN_VIDEO, CODEC_ID_VP6, CODEC_ID_VP6_WITH_ALPHA, CODEC_ID_SCREEN_VIDEO_V2, CODEC_ID_H264) = range(1, 8) codec_id_to_string =", "} # Value type (VALUE_TYPE_NUMBER, VALUE_TYPE_BOOLEAN, VALUE_TYPE_STRING, VALUE_TYPE_OBJECT, VALUE_TYPE_MOVIECLIP, VALUE_TYPE_NULL,", "endian\", SOUND_FORMAT_ADPCM: \"ADPCM\", SOUND_FORMAT_MP3: \"MP3\", SOUND_FORMAT_PCM_LITTLE_ENDIAN: \"Linear PCM, little endian\",", "SOUND_FORMAT_G711_MU_LAW) = range(9) (SOUND_FORMAT_AAC, SOUND_FORMAT_SPEEX) = range(10, 12) (SOUND_FORMAT_MP3_8KHZ, SOUND_FORMAT_DEVICE_SPECIFIC)", "SOUND_RATE_44_KHZ) = range(4) sound_rate_to_string = { SOUND_RATE_5_5_KHZ: \"5.5-kHz\", SOUND_RATE_11_KHZ: \"11-kHz\",", "} # Sound rate (SOUND_RATE_5_5_KHZ, SOUND_RATE_11_KHZ, SOUND_RATE_22_KHZ, SOUND_RATE_44_KHZ) = range(4)", "= { SOUND_RATE_5_5_KHZ: \"5.5-kHz\", SOUND_RATE_11_KHZ: \"11-kHz\", SOUND_RATE_22_KHZ: \"22-kHz\", SOUND_RATE_44_KHZ: \"44-kHz\"", "SOUND_FORMAT_MP3_8KHZ: \"MP3 8-kHz\", SOUND_FORMAT_DEVICE_SPECIFIC: \"Device-specific sound\" } # Sound rate", "= range(4) sound_rate_to_string = { SOUND_RATE_5_5_KHZ: \"5.5-kHz\", SOUND_RATE_11_KHZ: \"11-kHz\", SOUND_RATE_22_KHZ:", "= { SOUND_TYPE_MONO: \"sndMono\", SOUND_TYPE_STEREO: \"sndStereo\" } # AAC packet", "\"AAC\", SOUND_FORMAT_SPEEX: \"Speex\", SOUND_FORMAT_MP3_8KHZ: \"MP3 8-kHz\", SOUND_FORMAT_DEVICE_SPECIFIC: \"Device-specific sound\" }", "VALUE_TYPE_BOOLEAN: 'Boolean', VALUE_TYPE_STRING: 'String', VALUE_TYPE_OBJECT: 'Object', VALUE_TYPE_MOVIECLIP: 'MovieClip', VALUE_TYPE_NULL: 'Null',", "range(2) aac_packet_type_to_string = { AAC_PACKET_TYPE_SEQUENCE_HEADER: \"sequence header\", AAC_PACKET_TYPE_RAW: \"raw\" }", "VALUE_TYPE_NUMBER: 'Number', VALUE_TYPE_BOOLEAN: 'Boolean', VALUE_TYPE_STRING: 'String', VALUE_TYPE_OBJECT: 'Object', VALUE_TYPE_MOVIECLIP: 'MovieClip',", "H264_PACKET_TYPE_NALU, H264_PACKET_TYPE_END_OF_SEQUENCE) = range(3) h264_packet_type_to_string = { H264_PACKET_TYPE_SEQUENCE_HEADER: \"sequence header\",", "VALUE_TYPE_BOOLEAN, VALUE_TYPE_STRING, VALUE_TYPE_OBJECT, VALUE_TYPE_MOVIECLIP, VALUE_TYPE_NULL, VALUE_TYPE_UNDEFINED, VALUE_TYPE_REFERENCE, VALUE_TYPE_ECMA_ARRAY) = range(9)", "and their meanings. \"\"\" # Tag type (TAG_TYPE_AUDIO, TAG_TYPE_VIDEO, TAG_TYPE_SCRIPT)", "'Number', VALUE_TYPE_BOOLEAN: 'Boolean', VALUE_TYPE_STRING: 'String', VALUE_TYPE_OBJECT: 'Object', VALUE_TYPE_MOVIECLIP: 'MovieClip', VALUE_TYPE_NULL:", "VALUE_TYPE_REFERENCE, VALUE_TYPE_ECMA_ARRAY) = range(9) (VALUE_TYPE_STRICT_ARRAY, VALUE_TYPE_DATE, VALUE_TYPE_LONGSTRING) = range(10, 13)", "# Sound format (SOUND_FORMAT_PCM_PLATFORM_ENDIAN, SOUND_FORMAT_ADPCM, SOUND_FORMAT_MP3, SOUND_FORMAT_PCM_LITTLE_ENDIAN, SOUND_FORMAT_NELLYMOSER_16KHZ, SOUND_FORMAT_NELLYMOSER_8KHZ, SOUND_FORMAT_NELLYMOSER,", "CODEC_ID_SCREEN_VIDEO: \"Screen video\", CODEC_ID_VP6: \"On2 VP6\", CODEC_ID_VP6_WITH_ALPHA: \"On2 VP6 with", "18) # Sound format (SOUND_FORMAT_PCM_PLATFORM_ENDIAN, SOUND_FORMAT_ADPCM, SOUND_FORMAT_MP3, SOUND_FORMAT_PCM_LITTLE_ENDIAN, SOUND_FORMAT_NELLYMOSER_16KHZ, SOUND_FORMAT_NELLYMOSER_8KHZ,", "\"Device-specific sound\" } # Sound rate (SOUND_RATE_5_5_KHZ, SOUND_RATE_11_KHZ, SOUND_RATE_22_KHZ, SOUND_RATE_44_KHZ)", "\"Nellymoser 8-kHz mono\", SOUND_FORMAT_NELLYMOSER: \"Nellymoser\", SOUND_FORMAT_G711_A_LAW: \"G.711 A-law logarithmic PCM\",", "(SOUND_FORMAT_MP3_8KHZ, SOUND_FORMAT_DEVICE_SPECIFIC) = range(14, 16) sound_format_to_string = { SOUND_FORMAT_PCM_PLATFORM_ENDIAN: \"Linear", "\"sndMono\", SOUND_TYPE_STEREO: \"sndStereo\" } # AAC packet type (AAC_PACKET_TYPE_SEQUENCE_HEADER, AAC_PACKET_TYPE_RAW)", "\"generated keyframe\", FRAME_TYPE_INFO_FRAME: \"video info/command frame\" } # H.264 packet", "files and their meanings. \"\"\" # Tag type (TAG_TYPE_AUDIO, TAG_TYPE_VIDEO,", "\"G.711 A-law logarithmic PCM\", SOUND_FORMAT_G711_MU_LAW: \"G.711 mu-law logarithmic PCM\", SOUND_FORMAT_AAC:", "type (H264_PACKET_TYPE_SEQUENCE_HEADER, H264_PACKET_TYPE_NALU, H264_PACKET_TYPE_END_OF_SEQUENCE) = range(3) h264_packet_type_to_string = { H264_PACKET_TYPE_SEQUENCE_HEADER:", "mono\", SOUND_FORMAT_NELLYMOSER_8KHZ: \"Nellymoser 8-kHz mono\", SOUND_FORMAT_NELLYMOSER: \"Nellymoser\", SOUND_FORMAT_G711_A_LAW: \"G.711 A-law", "SOUND_FORMAT_MP3: \"MP3\", SOUND_FORMAT_PCM_LITTLE_ENDIAN: \"Linear PCM, little endian\", SOUND_FORMAT_NELLYMOSER_16KHZ: \"Nellymoser 16-kHz", "logarithmic PCM\", SOUND_FORMAT_AAC: \"AAC\", SOUND_FORMAT_SPEEX: \"Speex\", SOUND_FORMAT_MP3_8KHZ: \"MP3 8-kHz\", SOUND_FORMAT_DEVICE_SPECIFIC:", "} # H.264 packet type (H264_PACKET_TYPE_SEQUENCE_HEADER, H264_PACKET_TYPE_NALU, H264_PACKET_TYPE_END_OF_SEQUENCE) = range(3)", "{ VALUE_TYPE_NUMBER: 'Number', VALUE_TYPE_BOOLEAN: 'Boolean', VALUE_TYPE_STRING: 'String', VALUE_TYPE_OBJECT: 'Object', VALUE_TYPE_MOVIECLIP:", "{ SOUND_RATE_5_5_KHZ: \"5.5-kHz\", SOUND_RATE_11_KHZ: \"11-kHz\", SOUND_RATE_22_KHZ: \"22-kHz\", SOUND_RATE_44_KHZ: \"44-kHz\" }", "range(4) sound_rate_to_string = { SOUND_RATE_5_5_KHZ: \"5.5-kHz\", SOUND_RATE_11_KHZ: \"11-kHz\", SOUND_RATE_22_KHZ: \"22-kHz\",", "interframe\", FRAME_TYPE_GENERATED_KEYFRAME: \"generated keyframe\", FRAME_TYPE_INFO_FRAME: \"video info/command frame\" } #", "FLV files and their meanings. \"\"\" # Tag type (TAG_TYPE_AUDIO,", "\"44-kHz\" } # Sound size (SOUND_SIZE_8_BIT, SOUND_SIZE_16_BIT) = range(2) sound_size_to_string", "CODEC_ID_H264: \"H.264\" } # Frame type (FRAME_TYPE_KEYFRAME, FRAME_TYPE_INTERFRAME, FRAME_TYPE_DISPOSABLE_INTERFRAME, FRAME_TYPE_GENERATED_KEYFRAME,", "\"Screen video\", CODEC_ID_VP6: \"On2 VP6\", CODEC_ID_VP6_WITH_ALPHA: \"On2 VP6 with alpha", "} # Sound size (SOUND_SIZE_8_BIT, SOUND_SIZE_16_BIT) = range(2) sound_size_to_string =", "H264_PACKET_TYPE_SEQUENCE_HEADER: \"sequence header\", H264_PACKET_TYPE_NALU: \"NAL unit\", H264_PACKET_TYPE_END_OF_SEQUENCE: \"sequence end\" }", "SOUND_FORMAT_NELLYMOSER_16KHZ, SOUND_FORMAT_NELLYMOSER_8KHZ, SOUND_FORMAT_NELLYMOSER, SOUND_FORMAT_G711_A_LAW, SOUND_FORMAT_G711_MU_LAW) = range(9) (SOUND_FORMAT_AAC, SOUND_FORMAT_SPEEX) =", "VALUE_TYPE_ECMA_ARRAY) = range(9) (VALUE_TYPE_STRICT_ARRAY, VALUE_TYPE_DATE, VALUE_TYPE_LONGSTRING) = range(10, 13) value_type_to_string", "range(9) (SOUND_FORMAT_AAC, SOUND_FORMAT_SPEEX) = range(10, 12) (SOUND_FORMAT_MP3_8KHZ, SOUND_FORMAT_DEVICE_SPECIFIC) = range(14,", "8-kHz\", SOUND_FORMAT_DEVICE_SPECIFIC: \"Device-specific sound\" } # Sound rate (SOUND_RATE_5_5_KHZ, SOUND_RATE_11_KHZ,", "header\", AAC_PACKET_TYPE_RAW: \"raw\" } # Codec ID (CODEC_ID_JPEG, CODEC_ID_H263, CODEC_ID_SCREEN_VIDEO,", "VALUE_TYPE_OBJECT: 'Object', VALUE_TYPE_MOVIECLIP: 'MovieClip', VALUE_TYPE_NULL: 'Null', VALUE_TYPE_UNDEFINED: 'Undefined', VALUE_TYPE_REFERENCE: 'Reference',", "\"JPEG\", CODEC_ID_H263: \"Sorenson H.263\", CODEC_ID_SCREEN_VIDEO: \"Screen video\", CODEC_ID_VP6: \"On2 VP6\",", "info/command frame\" } # H.264 packet type (H264_PACKET_TYPE_SEQUENCE_HEADER, H264_PACKET_TYPE_NALU, H264_PACKET_TYPE_END_OF_SEQUENCE)", "} # AAC packet type (AAC_PACKET_TYPE_SEQUENCE_HEADER, AAC_PACKET_TYPE_RAW) = range(2) aac_packet_type_to_string", "VALUE_TYPE_ECMA_ARRAY: 'ECMA Array', VALUE_TYPE_STRICT_ARRAY: 'Strict Array', VALUE_TYPE_DATE: 'Date', VALUE_TYPE_LONGSTRING: 'Longstring'", "SOUND_FORMAT_DEVICE_SPECIFIC) = range(14, 16) sound_format_to_string = { SOUND_FORMAT_PCM_PLATFORM_ENDIAN: \"Linear PCM,", "\"Nellymoser 16-kHz mono\", SOUND_FORMAT_NELLYMOSER_8KHZ: \"Nellymoser 8-kHz mono\", SOUND_FORMAT_NELLYMOSER: \"Nellymoser\", SOUND_FORMAT_G711_A_LAW:", "VALUE_TYPE_UNDEFINED: 'Undefined', VALUE_TYPE_REFERENCE: 'Reference', VALUE_TYPE_ECMA_ARRAY: 'ECMA Array', VALUE_TYPE_STRICT_ARRAY: 'Strict Array',", "\"raw\" } # Codec ID (CODEC_ID_JPEG, CODEC_ID_H263, CODEC_ID_SCREEN_VIDEO, CODEC_ID_VP6, CODEC_ID_VP6_WITH_ALPHA,", "{ SOUND_TYPE_MONO: \"sndMono\", SOUND_TYPE_STEREO: \"sndStereo\" } # AAC packet type", "13) value_type_to_string = { VALUE_TYPE_NUMBER: 'Number', VALUE_TYPE_BOOLEAN: 'Boolean', VALUE_TYPE_STRING: 'String',", "packet type (AAC_PACKET_TYPE_SEQUENCE_HEADER, AAC_PACKET_TYPE_RAW) = range(2) aac_packet_type_to_string = { AAC_PACKET_TYPE_SEQUENCE_HEADER:", "12) (SOUND_FORMAT_MP3_8KHZ, SOUND_FORMAT_DEVICE_SPECIFIC) = range(14, 16) sound_format_to_string = { SOUND_FORMAT_PCM_PLATFORM_ENDIAN:", "SOUND_FORMAT_AAC: \"AAC\", SOUND_FORMAT_SPEEX: \"Speex\", SOUND_FORMAT_MP3_8KHZ: \"MP3 8-kHz\", SOUND_FORMAT_DEVICE_SPECIFIC: \"Device-specific sound\"", "range(1, 8) codec_id_to_string = { CODEC_ID_JPEG: \"JPEG\", CODEC_ID_H263: \"Sorenson H.263\",", "VALUE_TYPE_NULL: 'Null', VALUE_TYPE_UNDEFINED: 'Undefined', VALUE_TYPE_REFERENCE: 'Reference', VALUE_TYPE_ECMA_ARRAY: 'ECMA Array', VALUE_TYPE_STRICT_ARRAY:", "size (SOUND_SIZE_8_BIT, SOUND_SIZE_16_BIT) = range(2) sound_size_to_string = { SOUND_SIZE_8_BIT: \"snd8Bit\",", "SOUND_RATE_11_KHZ, SOUND_RATE_22_KHZ, SOUND_RATE_44_KHZ) = range(4) sound_rate_to_string = { SOUND_RATE_5_5_KHZ: \"5.5-kHz\",", "= range(2) aac_packet_type_to_string = { AAC_PACKET_TYPE_SEQUENCE_HEADER: \"sequence header\", AAC_PACKET_TYPE_RAW: \"raw\"", "\"snd8Bit\", SOUND_SIZE_16_BIT: \"snd16Bit\" } # Sound type (SOUND_TYPE_MONO, SOUND_TYPE_STEREO) =", "video version 2\", CODEC_ID_H264: \"H.264\" } # Frame type (FRAME_TYPE_KEYFRAME,", "<filename>script.video.F4mProxy/lib/flvlib/constants.py \"\"\" The constants used in FLV files and their", "CODEC_ID_VP6_WITH_ALPHA: \"On2 VP6 with alpha channel\", CODEC_ID_SCREEN_VIDEO_V2: \"Screen video version", "\"Sorenson H.263\", CODEC_ID_SCREEN_VIDEO: \"Screen video\", CODEC_ID_VP6: \"On2 VP6\", CODEC_ID_VP6_WITH_ALPHA: \"On2", "= { AAC_PACKET_TYPE_SEQUENCE_HEADER: \"sequence header\", AAC_PACKET_TYPE_RAW: \"raw\" } # Codec", "FRAME_TYPE_INFO_FRAME) = range(1, 6) frame_type_to_string = { FRAME_TYPE_KEYFRAME: \"keyframe\", FRAME_TYPE_INTERFRAME:", "'String', VALUE_TYPE_OBJECT: 'Object', VALUE_TYPE_MOVIECLIP: 'MovieClip', VALUE_TYPE_NULL: 'Null', VALUE_TYPE_UNDEFINED: 'Undefined', VALUE_TYPE_REFERENCE:", "SOUND_RATE_22_KHZ, SOUND_RATE_44_KHZ) = range(4) sound_rate_to_string = { SOUND_RATE_5_5_KHZ: \"5.5-kHz\", SOUND_RATE_11_KHZ:", "sound_size_to_string = { SOUND_SIZE_8_BIT: \"snd8Bit\", SOUND_SIZE_16_BIT: \"snd16Bit\" } # Sound", "CODEC_ID_JPEG: \"JPEG\", CODEC_ID_H263: \"Sorenson H.263\", CODEC_ID_SCREEN_VIDEO: \"Screen video\", CODEC_ID_VP6: \"On2", "\"On2 VP6 with alpha channel\", CODEC_ID_SCREEN_VIDEO_V2: \"Screen video version 2\",", "= range(1, 8) codec_id_to_string = { CODEC_ID_JPEG: \"JPEG\", CODEC_ID_H263: \"Sorenson", "\"Nellymoser\", SOUND_FORMAT_G711_A_LAW: \"G.711 A-law logarithmic PCM\", SOUND_FORMAT_G711_MU_LAW: \"G.711 mu-law logarithmic", "\"On2 VP6\", CODEC_ID_VP6_WITH_ALPHA: \"On2 VP6 with alpha channel\", CODEC_ID_SCREEN_VIDEO_V2: \"Screen", "FRAME_TYPE_KEYFRAME: \"keyframe\", FRAME_TYPE_INTERFRAME: \"interframe\", FRAME_TYPE_DISPOSABLE_INTERFRAME: \"disposable interframe\", FRAME_TYPE_GENERATED_KEYFRAME: \"generated keyframe\",", "SOUND_FORMAT_NELLYMOSER_8KHZ: \"Nellymoser 8-kHz mono\", SOUND_FORMAT_NELLYMOSER: \"Nellymoser\", SOUND_FORMAT_G711_A_LAW: \"G.711 A-law logarithmic", "VALUE_TYPE_OBJECT, VALUE_TYPE_MOVIECLIP, VALUE_TYPE_NULL, VALUE_TYPE_UNDEFINED, VALUE_TYPE_REFERENCE, VALUE_TYPE_ECMA_ARRAY) = range(9) (VALUE_TYPE_STRICT_ARRAY, VALUE_TYPE_DATE,", "\"11-kHz\", SOUND_RATE_22_KHZ: \"22-kHz\", SOUND_RATE_44_KHZ: \"44-kHz\" } # Sound size (SOUND_SIZE_8_BIT,", "VALUE_TYPE_UNDEFINED, VALUE_TYPE_REFERENCE, VALUE_TYPE_ECMA_ARRAY) = range(9) (VALUE_TYPE_STRICT_ARRAY, VALUE_TYPE_DATE, VALUE_TYPE_LONGSTRING) = range(10,", "unit\", H264_PACKET_TYPE_END_OF_SEQUENCE: \"sequence end\" } # Value type (VALUE_TYPE_NUMBER, VALUE_TYPE_BOOLEAN,", "= range(9) (SOUND_FORMAT_AAC, SOUND_FORMAT_SPEEX) = range(10, 12) (SOUND_FORMAT_MP3_8KHZ, SOUND_FORMAT_DEVICE_SPECIFIC) =", "\"MP3 8-kHz\", SOUND_FORMAT_DEVICE_SPECIFIC: \"Device-specific sound\" } # Sound rate (SOUND_RATE_5_5_KHZ,", "ID (CODEC_ID_JPEG, CODEC_ID_H263, CODEC_ID_SCREEN_VIDEO, CODEC_ID_VP6, CODEC_ID_VP6_WITH_ALPHA, CODEC_ID_SCREEN_VIDEO_V2, CODEC_ID_H264) = range(1,", "\"sequence end\" } # Value type (VALUE_TYPE_NUMBER, VALUE_TYPE_BOOLEAN, VALUE_TYPE_STRING, VALUE_TYPE_OBJECT,", "Value type (VALUE_TYPE_NUMBER, VALUE_TYPE_BOOLEAN, VALUE_TYPE_STRING, VALUE_TYPE_OBJECT, VALUE_TYPE_MOVIECLIP, VALUE_TYPE_NULL, VALUE_TYPE_UNDEFINED, VALUE_TYPE_REFERENCE,", "\"MP3\", SOUND_FORMAT_PCM_LITTLE_ENDIAN: \"Linear PCM, little endian\", SOUND_FORMAT_NELLYMOSER_16KHZ: \"Nellymoser 16-kHz mono\",", "frame\" } # H.264 packet type (H264_PACKET_TYPE_SEQUENCE_HEADER, H264_PACKET_TYPE_NALU, H264_PACKET_TYPE_END_OF_SEQUENCE) =", "their meanings. \"\"\" # Tag type (TAG_TYPE_AUDIO, TAG_TYPE_VIDEO, TAG_TYPE_SCRIPT) =", "SOUND_FORMAT_PCM_PLATFORM_ENDIAN: \"Linear PCM, platform endian\", SOUND_FORMAT_ADPCM: \"ADPCM\", SOUND_FORMAT_MP3: \"MP3\", SOUND_FORMAT_PCM_LITTLE_ENDIAN:", "range(3) h264_packet_type_to_string = { H264_PACKET_TYPE_SEQUENCE_HEADER: \"sequence header\", H264_PACKET_TYPE_NALU: \"NAL unit\",", "in FLV files and their meanings. \"\"\" # Tag type", "(TAG_TYPE_AUDIO, TAG_TYPE_VIDEO, TAG_TYPE_SCRIPT) = (8, 9, 18) # Sound format", "TAG_TYPE_VIDEO, TAG_TYPE_SCRIPT) = (8, 9, 18) # Sound format (SOUND_FORMAT_PCM_PLATFORM_ENDIAN,", "SOUND_FORMAT_PCM_LITTLE_ENDIAN, SOUND_FORMAT_NELLYMOSER_16KHZ, SOUND_FORMAT_NELLYMOSER_8KHZ, SOUND_FORMAT_NELLYMOSER, SOUND_FORMAT_G711_A_LAW, SOUND_FORMAT_G711_MU_LAW) = range(9) (SOUND_FORMAT_AAC, SOUND_FORMAT_SPEEX)", "# Sound type (SOUND_TYPE_MONO, SOUND_TYPE_STEREO) = range(2) sound_type_to_string = {", "\"interframe\", FRAME_TYPE_DISPOSABLE_INTERFRAME: \"disposable interframe\", FRAME_TYPE_GENERATED_KEYFRAME: \"generated keyframe\", FRAME_TYPE_INFO_FRAME: \"video info/command", "SOUND_RATE_5_5_KHZ: \"5.5-kHz\", SOUND_RATE_11_KHZ: \"11-kHz\", SOUND_RATE_22_KHZ: \"22-kHz\", SOUND_RATE_44_KHZ: \"44-kHz\" } #", "{ AAC_PACKET_TYPE_SEQUENCE_HEADER: \"sequence header\", AAC_PACKET_TYPE_RAW: \"raw\" } # Codec ID", "'Null', VALUE_TYPE_UNDEFINED: 'Undefined', VALUE_TYPE_REFERENCE: 'Reference', VALUE_TYPE_ECMA_ARRAY: 'ECMA Array', VALUE_TYPE_STRICT_ARRAY: 'Strict", "H264_PACKET_TYPE_NALU: \"NAL unit\", H264_PACKET_TYPE_END_OF_SEQUENCE: \"sequence end\" } # Value type", "platform endian\", SOUND_FORMAT_ADPCM: \"ADPCM\", SOUND_FORMAT_MP3: \"MP3\", SOUND_FORMAT_PCM_LITTLE_ENDIAN: \"Linear PCM, little", "FRAME_TYPE_INFO_FRAME: \"video info/command frame\" } # H.264 packet type (H264_PACKET_TYPE_SEQUENCE_HEADER,", "SOUND_FORMAT_G711_A_LAW, SOUND_FORMAT_G711_MU_LAW) = range(9) (SOUND_FORMAT_AAC, SOUND_FORMAT_SPEEX) = range(10, 12) (SOUND_FORMAT_MP3_8KHZ,", "AAC packet type (AAC_PACKET_TYPE_SEQUENCE_HEADER, AAC_PACKET_TYPE_RAW) = range(2) aac_packet_type_to_string = {", "= { CODEC_ID_JPEG: \"JPEG\", CODEC_ID_H263: \"Sorenson H.263\", CODEC_ID_SCREEN_VIDEO: \"Screen video\",", "FRAME_TYPE_DISPOSABLE_INTERFRAME: \"disposable interframe\", FRAME_TYPE_GENERATED_KEYFRAME: \"generated keyframe\", FRAME_TYPE_INFO_FRAME: \"video info/command frame\"", "\"22-kHz\", SOUND_RATE_44_KHZ: \"44-kHz\" } # Sound size (SOUND_SIZE_8_BIT, SOUND_SIZE_16_BIT) =", "SOUND_FORMAT_NELLYMOSER_8KHZ, SOUND_FORMAT_NELLYMOSER, SOUND_FORMAT_G711_A_LAW, SOUND_FORMAT_G711_MU_LAW) = range(9) (SOUND_FORMAT_AAC, SOUND_FORMAT_SPEEX) = range(10,", "{ SOUND_SIZE_8_BIT: \"snd8Bit\", SOUND_SIZE_16_BIT: \"snd16Bit\" } # Sound type (SOUND_TYPE_MONO,", "'ECMA Array', VALUE_TYPE_STRICT_ARRAY: 'Strict Array', VALUE_TYPE_DATE: 'Date', VALUE_TYPE_LONGSTRING: 'Longstring' }", "Sound rate (SOUND_RATE_5_5_KHZ, SOUND_RATE_11_KHZ, SOUND_RATE_22_KHZ, SOUND_RATE_44_KHZ) = range(4) sound_rate_to_string =", "CODEC_ID_H264) = range(1, 8) codec_id_to_string = { CODEC_ID_JPEG: \"JPEG\", CODEC_ID_H263:", "H264_PACKET_TYPE_END_OF_SEQUENCE: \"sequence end\" } # Value type (VALUE_TYPE_NUMBER, VALUE_TYPE_BOOLEAN, VALUE_TYPE_STRING,", "PCM, little endian\", SOUND_FORMAT_NELLYMOSER_16KHZ: \"Nellymoser 16-kHz mono\", SOUND_FORMAT_NELLYMOSER_8KHZ: \"Nellymoser 8-kHz", "SOUND_FORMAT_NELLYMOSER_16KHZ: \"Nellymoser 16-kHz mono\", SOUND_FORMAT_NELLYMOSER_8KHZ: \"Nellymoser 8-kHz mono\", SOUND_FORMAT_NELLYMOSER: \"Nellymoser\",", "range(14, 16) sound_format_to_string = { SOUND_FORMAT_PCM_PLATFORM_ENDIAN: \"Linear PCM, platform endian\",", "\"snd16Bit\" } # Sound type (SOUND_TYPE_MONO, SOUND_TYPE_STEREO) = range(2) sound_type_to_string", "(SOUND_FORMAT_AAC, SOUND_FORMAT_SPEEX) = range(10, 12) (SOUND_FORMAT_MP3_8KHZ, SOUND_FORMAT_DEVICE_SPECIFIC) = range(14, 16)", "PCM\", SOUND_FORMAT_AAC: \"AAC\", SOUND_FORMAT_SPEEX: \"Speex\", SOUND_FORMAT_MP3_8KHZ: \"MP3 8-kHz\", SOUND_FORMAT_DEVICE_SPECIFIC: \"Device-specific", "FRAME_TYPE_INTERFRAME: \"interframe\", FRAME_TYPE_DISPOSABLE_INTERFRAME: \"disposable interframe\", FRAME_TYPE_GENERATED_KEYFRAME: \"generated keyframe\", FRAME_TYPE_INFO_FRAME: \"video", "= range(2) sound_type_to_string = { SOUND_TYPE_MONO: \"sndMono\", SOUND_TYPE_STEREO: \"sndStereo\" }", "H.264 packet type (H264_PACKET_TYPE_SEQUENCE_HEADER, H264_PACKET_TYPE_NALU, H264_PACKET_TYPE_END_OF_SEQUENCE) = range(3) h264_packet_type_to_string =", "mu-law logarithmic PCM\", SOUND_FORMAT_AAC: \"AAC\", SOUND_FORMAT_SPEEX: \"Speex\", SOUND_FORMAT_MP3_8KHZ: \"MP3 8-kHz\",", "9, 18) # Sound format (SOUND_FORMAT_PCM_PLATFORM_ENDIAN, SOUND_FORMAT_ADPCM, SOUND_FORMAT_MP3, SOUND_FORMAT_PCM_LITTLE_ENDIAN, SOUND_FORMAT_NELLYMOSER_16KHZ,", "SOUND_FORMAT_SPEEX: \"Speex\", SOUND_FORMAT_MP3_8KHZ: \"MP3 8-kHz\", SOUND_FORMAT_DEVICE_SPECIFIC: \"Device-specific sound\" } #", "AAC_PACKET_TYPE_SEQUENCE_HEADER: \"sequence header\", AAC_PACKET_TYPE_RAW: \"raw\" } # Codec ID (CODEC_ID_JPEG,", "AAC_PACKET_TYPE_RAW: \"raw\" } # Codec ID (CODEC_ID_JPEG, CODEC_ID_H263, CODEC_ID_SCREEN_VIDEO, CODEC_ID_VP6,", "endian\", SOUND_FORMAT_NELLYMOSER_16KHZ: \"Nellymoser 16-kHz mono\", SOUND_FORMAT_NELLYMOSER_8KHZ: \"Nellymoser 8-kHz mono\", SOUND_FORMAT_NELLYMOSER:", "8) codec_id_to_string = { CODEC_ID_JPEG: \"JPEG\", CODEC_ID_H263: \"Sorenson H.263\", CODEC_ID_SCREEN_VIDEO:", "Sound size (SOUND_SIZE_8_BIT, SOUND_SIZE_16_BIT) = range(2) sound_size_to_string = { SOUND_SIZE_8_BIT:", "meanings. \"\"\" # Tag type (TAG_TYPE_AUDIO, TAG_TYPE_VIDEO, TAG_TYPE_SCRIPT) = (8,", "\"Linear PCM, little endian\", SOUND_FORMAT_NELLYMOSER_16KHZ: \"Nellymoser 16-kHz mono\", SOUND_FORMAT_NELLYMOSER_8KHZ: \"Nellymoser", "with alpha channel\", CODEC_ID_SCREEN_VIDEO_V2: \"Screen video version 2\", CODEC_ID_H264: \"H.264\"", "Frame type (FRAME_TYPE_KEYFRAME, FRAME_TYPE_INTERFRAME, FRAME_TYPE_DISPOSABLE_INTERFRAME, FRAME_TYPE_GENERATED_KEYFRAME, FRAME_TYPE_INFO_FRAME) = range(1, 6)", "SOUND_FORMAT_DEVICE_SPECIFIC: \"Device-specific sound\" } # Sound rate (SOUND_RATE_5_5_KHZ, SOUND_RATE_11_KHZ, SOUND_RATE_22_KHZ,", "SOUND_FORMAT_G711_A_LAW: \"G.711 A-law logarithmic PCM\", SOUND_FORMAT_G711_MU_LAW: \"G.711 mu-law logarithmic PCM\",", "rate (SOUND_RATE_5_5_KHZ, SOUND_RATE_11_KHZ, SOUND_RATE_22_KHZ, SOUND_RATE_44_KHZ) = range(4) sound_rate_to_string = {", "A-law logarithmic PCM\", SOUND_FORMAT_G711_MU_LAW: \"G.711 mu-law logarithmic PCM\", SOUND_FORMAT_AAC: \"AAC\",", "= range(3) h264_packet_type_to_string = { H264_PACKET_TYPE_SEQUENCE_HEADER: \"sequence header\", H264_PACKET_TYPE_NALU: \"NAL", "{ H264_PACKET_TYPE_SEQUENCE_HEADER: \"sequence header\", H264_PACKET_TYPE_NALU: \"NAL unit\", H264_PACKET_TYPE_END_OF_SEQUENCE: \"sequence end\"", "SOUND_FORMAT_ADPCM: \"ADPCM\", SOUND_FORMAT_MP3: \"MP3\", SOUND_FORMAT_PCM_LITTLE_ENDIAN: \"Linear PCM, little endian\", SOUND_FORMAT_NELLYMOSER_16KHZ:", "VALUE_TYPE_MOVIECLIP: 'MovieClip', VALUE_TYPE_NULL: 'Null', VALUE_TYPE_UNDEFINED: 'Undefined', VALUE_TYPE_REFERENCE: 'Reference', VALUE_TYPE_ECMA_ARRAY: 'ECMA", "SOUND_FORMAT_NELLYMOSER, SOUND_FORMAT_G711_A_LAW, SOUND_FORMAT_G711_MU_LAW) = range(9) (SOUND_FORMAT_AAC, SOUND_FORMAT_SPEEX) = range(10, 12)", "range(2) sound_size_to_string = { SOUND_SIZE_8_BIT: \"snd8Bit\", SOUND_SIZE_16_BIT: \"snd16Bit\" } #", "(FRAME_TYPE_KEYFRAME, FRAME_TYPE_INTERFRAME, FRAME_TYPE_DISPOSABLE_INTERFRAME, FRAME_TYPE_GENERATED_KEYFRAME, FRAME_TYPE_INFO_FRAME) = range(1, 6) frame_type_to_string =", "VALUE_TYPE_MOVIECLIP, VALUE_TYPE_NULL, VALUE_TYPE_UNDEFINED, VALUE_TYPE_REFERENCE, VALUE_TYPE_ECMA_ARRAY) = range(9) (VALUE_TYPE_STRICT_ARRAY, VALUE_TYPE_DATE, VALUE_TYPE_LONGSTRING)", "sound_format_to_string = { SOUND_FORMAT_PCM_PLATFORM_ENDIAN: \"Linear PCM, platform endian\", SOUND_FORMAT_ADPCM: \"ADPCM\",", "= range(2) sound_size_to_string = { SOUND_SIZE_8_BIT: \"snd8Bit\", SOUND_SIZE_16_BIT: \"snd16Bit\" }", "CODEC_ID_H263, CODEC_ID_SCREEN_VIDEO, CODEC_ID_VP6, CODEC_ID_VP6_WITH_ALPHA, CODEC_ID_SCREEN_VIDEO_V2, CODEC_ID_H264) = range(1, 8) codec_id_to_string", "SOUND_FORMAT_MP3, SOUND_FORMAT_PCM_LITTLE_ENDIAN, SOUND_FORMAT_NELLYMOSER_16KHZ, SOUND_FORMAT_NELLYMOSER_8KHZ, SOUND_FORMAT_NELLYMOSER, SOUND_FORMAT_G711_A_LAW, SOUND_FORMAT_G711_MU_LAW) = range(9) (SOUND_FORMAT_AAC,", "PCM, platform endian\", SOUND_FORMAT_ADPCM: \"ADPCM\", SOUND_FORMAT_MP3: \"MP3\", SOUND_FORMAT_PCM_LITTLE_ENDIAN: \"Linear PCM,", "\"G.711 mu-law logarithmic PCM\", SOUND_FORMAT_AAC: \"AAC\", SOUND_FORMAT_SPEEX: \"Speex\", SOUND_FORMAT_MP3_8KHZ: \"MP3", "= { SOUND_FORMAT_PCM_PLATFORM_ENDIAN: \"Linear PCM, platform endian\", SOUND_FORMAT_ADPCM: \"ADPCM\", SOUND_FORMAT_MP3:", "version 2\", CODEC_ID_H264: \"H.264\" } # Frame type (FRAME_TYPE_KEYFRAME, FRAME_TYPE_INTERFRAME,", "format (SOUND_FORMAT_PCM_PLATFORM_ENDIAN, SOUND_FORMAT_ADPCM, SOUND_FORMAT_MP3, SOUND_FORMAT_PCM_LITTLE_ENDIAN, SOUND_FORMAT_NELLYMOSER_16KHZ, SOUND_FORMAT_NELLYMOSER_8KHZ, SOUND_FORMAT_NELLYMOSER, SOUND_FORMAT_G711_A_LAW, SOUND_FORMAT_G711_MU_LAW)", "frame_type_to_string = { FRAME_TYPE_KEYFRAME: \"keyframe\", FRAME_TYPE_INTERFRAME: \"interframe\", FRAME_TYPE_DISPOSABLE_INTERFRAME: \"disposable interframe\",", "header\", H264_PACKET_TYPE_NALU: \"NAL unit\", H264_PACKET_TYPE_END_OF_SEQUENCE: \"sequence end\" } # Value", "value_type_to_string = { VALUE_TYPE_NUMBER: 'Number', VALUE_TYPE_BOOLEAN: 'Boolean', VALUE_TYPE_STRING: 'String', VALUE_TYPE_OBJECT:", "# Tag type (TAG_TYPE_AUDIO, TAG_TYPE_VIDEO, TAG_TYPE_SCRIPT) = (8, 9, 18)", "\"\"\" The constants used in FLV files and their meanings.", "(VALUE_TYPE_NUMBER, VALUE_TYPE_BOOLEAN, VALUE_TYPE_STRING, VALUE_TYPE_OBJECT, VALUE_TYPE_MOVIECLIP, VALUE_TYPE_NULL, VALUE_TYPE_UNDEFINED, VALUE_TYPE_REFERENCE, VALUE_TYPE_ECMA_ARRAY) =", "16) sound_format_to_string = { SOUND_FORMAT_PCM_PLATFORM_ENDIAN: \"Linear PCM, platform endian\", SOUND_FORMAT_ADPCM:", "= { FRAME_TYPE_KEYFRAME: \"keyframe\", FRAME_TYPE_INTERFRAME: \"interframe\", FRAME_TYPE_DISPOSABLE_INTERFRAME: \"disposable interframe\", FRAME_TYPE_GENERATED_KEYFRAME:", "VALUE_TYPE_STRING: 'String', VALUE_TYPE_OBJECT: 'Object', VALUE_TYPE_MOVIECLIP: 'MovieClip', VALUE_TYPE_NULL: 'Null', VALUE_TYPE_UNDEFINED: 'Undefined',", "{ FRAME_TYPE_KEYFRAME: \"keyframe\", FRAME_TYPE_INTERFRAME: \"interframe\", FRAME_TYPE_DISPOSABLE_INTERFRAME: \"disposable interframe\", FRAME_TYPE_GENERATED_KEYFRAME: \"generated", "logarithmic PCM\", SOUND_FORMAT_G711_MU_LAW: \"G.711 mu-law logarithmic PCM\", SOUND_FORMAT_AAC: \"AAC\", SOUND_FORMAT_SPEEX:", "} # Sound type (SOUND_TYPE_MONO, SOUND_TYPE_STEREO) = range(2) sound_type_to_string =", "(8, 9, 18) # Sound format (SOUND_FORMAT_PCM_PLATFORM_ENDIAN, SOUND_FORMAT_ADPCM, SOUND_FORMAT_MP3, SOUND_FORMAT_PCM_LITTLE_ENDIAN,", "PCM\", SOUND_FORMAT_G711_MU_LAW: \"G.711 mu-law logarithmic PCM\", SOUND_FORMAT_AAC: \"AAC\", SOUND_FORMAT_SPEEX: \"Speex\",", "(CODEC_ID_JPEG, CODEC_ID_H263, CODEC_ID_SCREEN_VIDEO, CODEC_ID_VP6, CODEC_ID_VP6_WITH_ALPHA, CODEC_ID_SCREEN_VIDEO_V2, CODEC_ID_H264) = range(1, 8)", "sound_type_to_string = { SOUND_TYPE_MONO: \"sndMono\", SOUND_TYPE_STEREO: \"sndStereo\" } # AAC", "Sound type (SOUND_TYPE_MONO, SOUND_TYPE_STEREO) = range(2) sound_type_to_string = { SOUND_TYPE_MONO:", "CODEC_ID_SCREEN_VIDEO_V2: \"Screen video version 2\", CODEC_ID_H264: \"H.264\" } # Frame", "\"ADPCM\", SOUND_FORMAT_MP3: \"MP3\", SOUND_FORMAT_PCM_LITTLE_ENDIAN: \"Linear PCM, little endian\", SOUND_FORMAT_NELLYMOSER_16KHZ: \"Nellymoser", "(VALUE_TYPE_STRICT_ARRAY, VALUE_TYPE_DATE, VALUE_TYPE_LONGSTRING) = range(10, 13) value_type_to_string = { VALUE_TYPE_NUMBER:", "# Sound rate (SOUND_RATE_5_5_KHZ, SOUND_RATE_11_KHZ, SOUND_RATE_22_KHZ, SOUND_RATE_44_KHZ) = range(4) sound_rate_to_string", "= range(10, 12) (SOUND_FORMAT_MP3_8KHZ, SOUND_FORMAT_DEVICE_SPECIFIC) = range(14, 16) sound_format_to_string =", "\"video info/command frame\" } # H.264 packet type (H264_PACKET_TYPE_SEQUENCE_HEADER, H264_PACKET_TYPE_NALU,", "Tag type (TAG_TYPE_AUDIO, TAG_TYPE_VIDEO, TAG_TYPE_SCRIPT) = (8, 9, 18) #", "SOUND_RATE_11_KHZ: \"11-kHz\", SOUND_RATE_22_KHZ: \"22-kHz\", SOUND_RATE_44_KHZ: \"44-kHz\" } # Sound size", "SOUND_RATE_22_KHZ: \"22-kHz\", SOUND_RATE_44_KHZ: \"44-kHz\" } # Sound size (SOUND_SIZE_8_BIT, SOUND_SIZE_16_BIT)", "VALUE_TYPE_DATE, VALUE_TYPE_LONGSTRING) = range(10, 13) value_type_to_string = { VALUE_TYPE_NUMBER: 'Number',", "range(2) sound_type_to_string = { SOUND_TYPE_MONO: \"sndMono\", SOUND_TYPE_STEREO: \"sndStereo\" } #", "} # Codec ID (CODEC_ID_JPEG, CODEC_ID_H263, CODEC_ID_SCREEN_VIDEO, CODEC_ID_VP6, CODEC_ID_VP6_WITH_ALPHA, CODEC_ID_SCREEN_VIDEO_V2,", "channel\", CODEC_ID_SCREEN_VIDEO_V2: \"Screen video version 2\", CODEC_ID_H264: \"H.264\" } #", "sound_rate_to_string = { SOUND_RATE_5_5_KHZ: \"5.5-kHz\", SOUND_RATE_11_KHZ: \"11-kHz\", SOUND_RATE_22_KHZ: \"22-kHz\", SOUND_RATE_44_KHZ:", "The constants used in FLV files and their meanings. \"\"\"", "aac_packet_type_to_string = { AAC_PACKET_TYPE_SEQUENCE_HEADER: \"sequence header\", AAC_PACKET_TYPE_RAW: \"raw\" } #", "range(10, 12) (SOUND_FORMAT_MP3_8KHZ, SOUND_FORMAT_DEVICE_SPECIFIC) = range(14, 16) sound_format_to_string = {", "VALUE_TYPE_STRING, VALUE_TYPE_OBJECT, VALUE_TYPE_MOVIECLIP, VALUE_TYPE_NULL, VALUE_TYPE_UNDEFINED, VALUE_TYPE_REFERENCE, VALUE_TYPE_ECMA_ARRAY) = range(9) (VALUE_TYPE_STRICT_ARRAY,", "(SOUND_FORMAT_PCM_PLATFORM_ENDIAN, SOUND_FORMAT_ADPCM, SOUND_FORMAT_MP3, SOUND_FORMAT_PCM_LITTLE_ENDIAN, SOUND_FORMAT_NELLYMOSER_16KHZ, SOUND_FORMAT_NELLYMOSER_8KHZ, SOUND_FORMAT_NELLYMOSER, SOUND_FORMAT_G711_A_LAW, SOUND_FORMAT_G711_MU_LAW) =", "VALUE_TYPE_LONGSTRING) = range(10, 13) value_type_to_string = { VALUE_TYPE_NUMBER: 'Number', VALUE_TYPE_BOOLEAN:", "2\", CODEC_ID_H264: \"H.264\" } # Frame type (FRAME_TYPE_KEYFRAME, FRAME_TYPE_INTERFRAME, FRAME_TYPE_DISPOSABLE_INTERFRAME,", "# AAC packet type (AAC_PACKET_TYPE_SEQUENCE_HEADER, AAC_PACKET_TYPE_RAW) = range(2) aac_packet_type_to_string =", "AAC_PACKET_TYPE_RAW) = range(2) aac_packet_type_to_string = { AAC_PACKET_TYPE_SEQUENCE_HEADER: \"sequence header\", AAC_PACKET_TYPE_RAW:", "\"\"\" # Tag type (TAG_TYPE_AUDIO, TAG_TYPE_VIDEO, TAG_TYPE_SCRIPT) = (8, 9,", "\"NAL unit\", H264_PACKET_TYPE_END_OF_SEQUENCE: \"sequence end\" } # Value type (VALUE_TYPE_NUMBER,", "# Value type (VALUE_TYPE_NUMBER, VALUE_TYPE_BOOLEAN, VALUE_TYPE_STRING, VALUE_TYPE_OBJECT, VALUE_TYPE_MOVIECLIP, VALUE_TYPE_NULL, VALUE_TYPE_UNDEFINED,", "range(10, 13) value_type_to_string = { VALUE_TYPE_NUMBER: 'Number', VALUE_TYPE_BOOLEAN: 'Boolean', VALUE_TYPE_STRING:", "'Undefined', VALUE_TYPE_REFERENCE: 'Reference', VALUE_TYPE_ECMA_ARRAY: 'ECMA Array', VALUE_TYPE_STRICT_ARRAY: 'Strict Array', VALUE_TYPE_DATE:", "Codec ID (CODEC_ID_JPEG, CODEC_ID_H263, CODEC_ID_SCREEN_VIDEO, CODEC_ID_VP6, CODEC_ID_VP6_WITH_ALPHA, CODEC_ID_SCREEN_VIDEO_V2, CODEC_ID_H264) =" ]
[ "= self.chunk.label().synset() return synset except AttributeError: try: synset = wn.synset(self.chunk.label())", "#returns the synset if applicable, otherwise returns None def get_syn_set(self):", "return synset except: return None #returns a list of the", "the chunk def get_words(self): try: return self.chunk.leaves() except AttributeError: return", "of the words in the chunk def get_words(self): try: return", "get_syn_set(self): try: synset = self.chunk.label().synset() return synset except AttributeError: try:", "try: return self.chunk.leaves() except AttributeError: return self.chunk # if __name__", "semcor.tagged_sents(tag='sem')[0] # for chunk in s: # a = semcor_chunk(chunk)", "if __name__ == \"__main__\": # s = semcor.tagged_sents(tag='sem')[0] # for", "chunk #returns the synset if applicable, otherwise returns None def", "AttributeError: return self.chunk # if __name__ == \"__main__\": # s", "# for chunk in s: # a = semcor_chunk(chunk) #", "semcor_chunk: def __init__(self, chunk): self.chunk = chunk #returns the synset", "= semcor_chunk(chunk) # print a.get_syn_set() # for chunk in s:", "synset except: return None #returns a list of the words", "# a = semcor_chunk(chunk) # print a.get_syn_set() # for chunk", "from nltk.corpus import semcor class semcor_chunk: def __init__(self, chunk): self.chunk", "= chunk #returns the synset if applicable, otherwise returns None", "synset if applicable, otherwise returns None def get_syn_set(self): try: synset", "applicable, otherwise returns None def get_syn_set(self): try: synset = self.chunk.label().synset()", "# s = semcor.tagged_sents(tag='sem')[0] # for chunk in s: #", "self.chunk = chunk #returns the synset if applicable, otherwise returns", "import semcor class semcor_chunk: def __init__(self, chunk): self.chunk = chunk", "return synset except AttributeError: try: synset = wn.synset(self.chunk.label()) return synset", "self.chunk # if __name__ == \"__main__\": # s = semcor.tagged_sents(tag='sem')[0]", "def get_words(self): try: return self.chunk.leaves() except AttributeError: return self.chunk #", "list of the words in the chunk def get_words(self): try:", "semcor class semcor_chunk: def __init__(self, chunk): self.chunk = chunk #returns", "returns None def get_syn_set(self): try: synset = self.chunk.label().synset() return synset", "== \"__main__\": # s = semcor.tagged_sents(tag='sem')[0] # for chunk in", "# print a.get_syn_set() # for chunk in s: # a", "the synset if applicable, otherwise returns None def get_syn_set(self): try:", "AttributeError: try: synset = wn.synset(self.chunk.label()) return synset except: return None", "= wn.synset(self.chunk.label()) return synset except: return None #returns a list", "print a.get_syn_set() # for chunk in s: # a =", "class semcor_chunk: def __init__(self, chunk): self.chunk = chunk #returns the", "otherwise returns None def get_syn_set(self): try: synset = self.chunk.label().synset() return", "def __init__(self, chunk): self.chunk = chunk #returns the synset if", "\"__main__\": # s = semcor.tagged_sents(tag='sem')[0] # for chunk in s:", "try: synset = wn.synset(self.chunk.label()) return synset except: return None #returns", "s: # a = semcor_chunk(chunk) # print a.get_syn_set() # for", "except AttributeError: try: synset = wn.synset(self.chunk.label()) return synset except: return", "semcor_chunk(chunk) # print a.get_syn_set() # for chunk in s: #", "a list of the words in the chunk def get_words(self):", "in the chunk def get_words(self): try: return self.chunk.leaves() except AttributeError:", "try: synset = self.chunk.label().synset() return synset except AttributeError: try: synset", "for chunk in s: # a = semcor_chunk(chunk) # print", "__name__ == \"__main__\": # s = semcor.tagged_sents(tag='sem')[0] # for chunk", "except: return None #returns a list of the words in", "#returns a list of the words in the chunk def", "chunk in s: # a = semcor_chunk(chunk) # print a.get_words()", "return self.chunk # if __name__ == \"__main__\": # s =", "def get_syn_set(self): try: synset = self.chunk.label().synset() return synset except AttributeError:", "nltk.corpus import semcor class semcor_chunk: def __init__(self, chunk): self.chunk =", "synset = self.chunk.label().synset() return synset except AttributeError: try: synset =", "self.chunk.leaves() except AttributeError: return self.chunk # if __name__ == \"__main__\":", "= semcor.tagged_sents(tag='sem')[0] # for chunk in s: # a =", "the words in the chunk def get_words(self): try: return self.chunk.leaves()", "synset = wn.synset(self.chunk.label()) return synset except: return None #returns a", "return self.chunk.leaves() except AttributeError: return self.chunk # if __name__ ==", "None def get_syn_set(self): try: synset = self.chunk.label().synset() return synset except", "wn.synset(self.chunk.label()) return synset except: return None #returns a list of", "if applicable, otherwise returns None def get_syn_set(self): try: synset =", "None #returns a list of the words in the chunk", "# if __name__ == \"__main__\": # s = semcor.tagged_sents(tag='sem')[0] #", "<filename>A2/semcor_chunk.py from nltk.corpus import semcor class semcor_chunk: def __init__(self, chunk):", "chunk): self.chunk = chunk #returns the synset if applicable, otherwise", "synset except AttributeError: try: synset = wn.synset(self.chunk.label()) return synset except:", "words in the chunk def get_words(self): try: return self.chunk.leaves() except", "in s: # a = semcor_chunk(chunk) # print a.get_syn_set() #", "__init__(self, chunk): self.chunk = chunk #returns the synset if applicable,", "get_words(self): try: return self.chunk.leaves() except AttributeError: return self.chunk # if", "chunk def get_words(self): try: return self.chunk.leaves() except AttributeError: return self.chunk", "a = semcor_chunk(chunk) # print a.get_syn_set() # for chunk in", "except AttributeError: return self.chunk # if __name__ == \"__main__\": #", "s = semcor.tagged_sents(tag='sem')[0] # for chunk in s: # a", "chunk in s: # a = semcor_chunk(chunk) # print a.get_syn_set()", "return None #returns a list of the words in the", "a.get_syn_set() # for chunk in s: # a = semcor_chunk(chunk)", "self.chunk.label().synset() return synset except AttributeError: try: synset = wn.synset(self.chunk.label()) return" ]
[ "Add node_representations and aggregated_messages h = node_representations + aggregated_messages else:", "Preprocess the node_features to produce node representations x = self._preprocess(self._node_features)", "_aggregate(self, node_indices, neighbour_messages): # node_indices shape is [num_edges] # neighbour_messages", "for the GRU layer h = tf.stack([node_respresentations, aggregated_messages], axis=1) elif", "if self._normalize: node_embeddings = tf.nn.l2_normalize(node_embeddings, axis=-1) return node_embeddings def call(self,", "Create a compute logits layer self._compute_logits = layers.Dense(units=num_classes, name=\"logits\") def", "# node_representations shape is [num_edges, embedding_dim] messages = self._ffn_prepare(node_representations) if", "dropout=dropout_rate, return_state=True, recurrent_dropout=dropout_rate ) else: self._update_fn = create_ffn(hidden_units, dropout_rate) def", "tuple of three elements: node_representations, edges, edge_weights. Returns: node_embeddings of", "embeddings for the input node_indices node_embeddings = tf.gather(x, input_node_indices) #", "= load_cora(verbose=1) num_features = len(feature_names) num_classes = len(class_idx) hidden_units =", "(default weights) edge_weights = tf.ones(shape=edges.shape[1]) # Create a node features", "raise ValueError(f\"Invalid combination type: {self._combinatino_type}.\") # Apply the processing function", "\"gru\": node_embeddings = tf.unstack(node_embeddings, axis=1)[-1] if self._normalize: node_embeddings = tf.nn.l2_normalize(node_embeddings,", "= edges self._edge_weights = edge_weights # Set edge_weights to ones", "name=\"graph_conv1\" ) # Create the 2nd GraphConv layer self._conv2 =", "= node_features self._edges = edges self._edge_weights = edge_weights # Set", "x = self._postprocess(x) # Fetch node embeddings for the input", "= 256 # Create an edges array (sparse adjacency matrix)", "neighbour_messages): # node_indices shape is [num_edges] # neighbour_messages shape: [num_edges,", "ValueError(f\"Invalid combination type: {self._combinatino_type}.\") # Apply the processing function node_embeddings", "edges, edge_weights = inputs # Get node_indices (source) and neighbour_indices", "/ tf.math.reduce_sum(self._edge_weights) # Create a process layer self._preprocess = create_ffn(hidden_units,", "= GraphConvLayer( hidden_units, dropout_rate, aggregation_type, combination_type, normalize, name=\"graph_conv2\" ) #", "curves display_learning_curves(history, figure_name=\"gnn.png\") # Evaluate on test data X_test =", "32] learning_rate = 0.01 dropout_rate = 0.5 epochs = 300", "create_ffn(hidden_units, dropout_rate, name=\"preprocess\") # Create the 1st GraphConv layer self._conv1", "neighbour_indices = edges[0], edges[1] # neighbour_representations shape is [num_edges, representation_dim]", "neighbour_messages = self._prepare(neighbour_representations, edge_weights) # Aggregate the neighbour messages aggregated_messages", "dropout_rate=0.2, aggregation_type=\"mean\", combination_type=\"concat\", normalize=False, *args, **kwargs ): super(GraphConvLayer, self).__init__(*args, **kwargs)", "X_train, y_train, batch_size, epochs, learning_rate) # Plot the learning curves", "layers from load_cora import load_cora from baseline_model import create_ffn from", "self._combination_type == \"gru\": # Create a sequence of two elements", "the input node_indices node_embeddings = tf.gather(x, input_node_indices) # Compute logits", "self._postprocess(x) # Fetch node embeddings for the input node_indices node_embeddings", "= combination_type self._normalize = normalize self._ffn_prepare = create_ffn(hidden_units, dropout_rate) if", "name=\"graph_conv2\" ) # Create a postprocess layer self._postprocess = create_ffn(hidden_units,", "# Add node_representations and aggregated_messages h = node_representations + aggregated_messages", "hidden_units, dropout_rate, aggregation_type, combination_type, normalize, name=\"graph_conv1\" ) # Create the", "self._update(node_representations, aggregated_messages) class GNNNodeClassifier(tf.keras.Model): def __init__( self, graph_info, num_classes, hidden_units,", "h = node_representations + aggregated_messages else: raise ValueError(f\"Invalid combination type:", "of shape [2, num_edges] edges = citations[[\"source\", \"target\"]].to_numpy().T #print(edges) #", "# Concatenate the node_representations and aggregated_messages h = tf.concat([node_representations, aggregated_messages],", "Create the 1st GraphConv layer self._conv1 = GraphConvLayer( hidden_units, dropout_rate,", "else: self._update_fn = create_ffn(hidden_units, dropout_rate) def _prepare(self, node_representations, weights=None): #", "= create_ffn(hidden_units, dropout_rate) def _prepare(self, node_representations, weights=None): # node_representations shape", "papers, train_data, test_data, paper_idx, class_idx, citations, feature_names = load_cora(verbose=1) num_features", "elif self._aggregation_type == \"max\": aggregated_message = tf.math.unsorted_segment_max( neighbour_messages, node_indices, num_segments=num_nodes", "def _update(self, node_representations, aggregated_messages): # node_representations shape is [num_nodes, representation_dim]", "# node_representations shape is [num_nodes, representation_dim] # aggregated_messages shape is", "def _aggregate(self, node_indices, neighbour_messages): # node_indices shape is [num_edges] #", "keras from tensorflow.keras import layers from load_cora import load_cora from", "combination type: {self._combinatino_type}.\") # Apply the processing function node_embeddings =", "hidden_units = [32, 32] learning_rate = 0.01 dropout_rate = 0.5", "import create_ffn from utils import run_experiment from utils import display_learning_curves", "len(feature_names) num_classes = len(class_idx) hidden_units = [32, 32] learning_rate =", "representation_dim]. \"\"\" node_representations, edges, edge_weights = inputs # Get node_indices", "aggregated_messages): # node_representations shape is [num_nodes, representation_dim] # aggregated_messages shape", "data X_test = test_data.paper_id.to_numpy() y_test = test_data.subject _, test_accuracy =", "from load_cora import load_cora from baseline_model import create_ffn from utils", "shape is [num_edges, representation_dim] neighbour_representations = tf.gather(node_representations, neighbour_indices) # Prepare", "the GNN model X_train = train_data.paper_id.to_numpy() y_train = train_data.subject history", "= create_ffn(hidden_units, dropout_rate, name=\"postprocess\") # Create a compute logits layer", "\"\"\"Process the inputs to produce the node_embeddings. Args: Inputs: A", "self._update_fn = layers.GRU( units=hidden_units, activation=\"tanh\", recurrent_activation=\"sigmoid\", dropout=dropout_rate, return_state=True, recurrent_dropout=dropout_rate )", "baseline_model import create_ffn from utils import run_experiment from utils import", "representation_dim] if self._combination_type == \"gru\": # Create a sequence of", "= test_data.paper_id.to_numpy() y_test = test_data.subject _, test_accuracy = gnn_model.evaluate(x=X_test, y=y_test,", "with node_features, edges, and edge_weights graph_info = (node_features, edges, edge_weights)", "produce the node_embeddings. Args: Inputs: A tuple of three elements:", "from edges node_indices, neighbour_indices = edges[0], edges[1] # neighbour_representations shape", "Inputs: A tuple of three elements: node_representations, edges, edge_weights. Returns:", "# neighbour_representations shape is [num_edges, representation_dim] neighbour_representations = tf.gather(node_representations, neighbour_indices)", "# Postprocess node embedding x = self._postprocess(x) # Fetch node", "{self._combinatino_type}.\") # Apply the processing function node_embeddings = self._update_fn(h) if", "from utils import run_experiment from utils import display_learning_curves # Graph", "0.01 dropout_rate = 0.5 epochs = 300 batch_size = 256", "1st GraphConv layer self._conv1 = GraphConvLayer( hidden_units, dropout_rate, aggregation_type, combination_type,", "edges, edge_weights = graph_info self._node_features = node_features self._edges = edges", "neighbour_representations shape is [num_edges, representation_dim] neighbour_representations = tf.gather(node_representations, neighbour_indices) #", "weights=None): # node_representations shape is [num_edges, embedding_dim] messages = self._ffn_prepare(node_representations)", "x # Postprocess node embedding x = self._postprocess(x) # Fetch", "else: raise ValueError(f\"Invalid aggregation type: {self._aggregation_type}.\") return aggregated_message def _update(self,", "self._edge_weights = edge_weights # Set edge_weights to ones if not", "not provided if self._edge_weights is None: self._edge_weights = tf.ones(shape=edges.shape[1]) #", "tuple with node_features, edges, and edge_weights graph_info = (node_features, edges,", "self._combination_type == \"add\": # Add node_representations and aggregated_messages h =", "test_data.paper_id.to_numpy() y_test = test_data.subject _, test_accuracy = gnn_model.evaluate(x=X_test, y=y_test, verbose=1)", "edges[1] # neighbour_representations shape is [num_edges, representation_dim] neighbour_representations = tf.gather(node_representations,", "{self._aggregation_type}.\") return aggregated_message def _update(self, node_representations, aggregated_messages): # node_representations shape", "return aggregated_message def _update(self, node_representations, aggregated_messages): # node_representations shape is", "layer self._preprocess = create_ffn(hidden_units, dropout_rate, name=\"preprocess\") # Create the 1st", "node_embeddings def call(self, inputs): \"\"\"Process the inputs to produce the", "return node_embeddings def call(self, inputs): \"\"\"Process the inputs to produce", "to produce the node_embeddings. Args: Inputs: A tuple of three", "# Create an edge weights array of ones (default weights)", "and edge_weights graph_info = (node_features, edges, edge_weights) print(\"Edges shape: \",", "node_representations + aggregated_messages else: raise ValueError(f\"Invalid combination type: {self._combinatino_type}.\") #", "self._edge_weights = self._edge_weights / tf.math.reduce_sum(self._edge_weights) # Create a process layer", "Apply the 2nd graph conv layer x2 = self._conv2((x, self._edges,", "the messages of the neighbours neighbour_messages = self._prepare(neighbour_representations, edge_weights) #", "if not provided if self._edge_weights is None: self._edge_weights = tf.ones(shape=edges.shape[1])", "Postprocess node embedding x = self._postprocess(x) # Fetch node embeddings", "= tf.stack([node_respresentations, aggregated_messages], axis=1) elif self._combination_type == \"concat\": # Concatenate", "aggregated_messages shape is [num_nodes, representation_dim] if self._combination_type == \"gru\": #", "x # Apply the 2nd graph conv layer x2 =", "= x2 + x # Postprocess node embedding x =", "create_ffn(hidden_units, dropout_rate) def _prepare(self, node_representations, weights=None): # node_representations shape is", "[num_edges, embedding_dim] messages = self._ffn_prepare(node_representations) if weights is not None:", "inputs): \"\"\"Process the inputs to produce the node_embeddings. Args: Inputs:", "Apply the 1st graph conv layer x1 = self._conv1((x, self._edges,", "tf.nn.l2_normalize(node_embeddings, axis=-1) return node_embeddings def call(self, inputs): \"\"\"Process the inputs", "if self._edge_weights is None: self._edge_weights = tf.ones(shape=edges.shape[1]) # Scale edge_weights", "= graph_info self._node_features = node_features self._edges = edges self._edge_weights =", "num_edges] edges = citations[[\"source\", \"target\"]].to_numpy().T #print(edges) # Create an edge", "h = tf.concat([node_representations, aggregated_messages], axis=1) elif self._combination_type == \"add\": #", "print(\"GNN output shape: \", gnn_model([1, 10, 100])) gnn_model.summary() # Train", "[num_nodes, num_features] node_features = tf.cast( papers.sort_values(\"paper_id\")[feature_names].to_numpy(), dtype=tf.float32) # Create graph", "test data X_test = test_data.paper_id.to_numpy() y_test = test_data.subject _, test_accuracy", "== \"gru\": node_embeddings = tf.unstack(node_embeddings, axis=1)[-1] if self._normalize: node_embeddings =", "node_features self._edges = edges self._edge_weights = edge_weights # Set edge_weights", "combination_type self._normalize = normalize self._ffn_prepare = create_ffn(hidden_units, dropout_rate) if self._combination_type", "# Create graph info tuple with node_features, edges, and edge_weights", "from tensorflow import keras from tensorflow.keras import layers from load_cora", "Get node_indices (source) and neighbour_indices (target) from edges node_indices, neighbour_indices", "\"target\"]].to_numpy().T #print(edges) # Create an edge weights array of ones", "node_representations, aggregated_messages): # node_representations shape is [num_nodes, representation_dim] # aggregated_messages", ") # Create a postprocess layer self._postprocess = create_ffn(hidden_units, dropout_rate,", "= create_ffn(hidden_units, dropout_rate, name=\"preprocess\") # Create the 1st GraphConv layer", "aggregation type: {self._aggregation_type}.\") return aggregated_message def _update(self, node_representations, aggregated_messages): #", "gnn_model = GNNNodeClassifier( graph_info=graph_info, num_classes=num_classes, hidden_units=hidden_units, dropout_rate=dropout_rate, name=\"gnn_model\" ) print(\"GNN", "\"\"\" node_representations, edges, edge_weights = inputs # Get node_indices (source)", "node features array of shape [num_nodes, num_features] node_features = tf.cast(", "neighbour messages aggregated_messages = self._aggregate(node_indices, neighbour_messages) # Update the node", "create_ffn(hidden_units, dropout_rate) if self._combination_type == \"gated\": self._update_fn = layers.GRU( units=hidden_units,", "is not None: messages = messages * tf.expand_dims(weights, -1) return", "== \"mean\": aggregated_message = tf.math.unsorted_segment_mean( neighbour_messages, node_indices, num_segments=num_nodes ) elif", "\"gated\": self._update_fn = layers.GRU( units=hidden_units, activation=\"tanh\", recurrent_activation=\"sigmoid\", dropout=dropout_rate, return_state=True, recurrent_dropout=dropout_rate", "A tuple of three elements: node_representations, edges, edge_weights. Returns: node_embeddings", "# Graph convolution layer class GraphConvLayer(layers.Layer): def __init__( self, hidden_units,", "three elements: node_representations, edges, edge_weights. Returns: node_embeddings of shape [num_nodes,", "the node_representations and aggregated_messages h = tf.concat([node_representations, aggregated_messages], axis=1) elif", "+ x # Apply the 2nd graph conv layer x2", "Plot the learning curves display_learning_curves(history, figure_name=\"gnn.png\") # Evaluate on test", "inputs to produce the node_embeddings. Args: Inputs: A tuple of", "convolution layer class GraphConvLayer(layers.Layer): def __init__( self, hidden_units, dropout_rate=0.2, aggregation_type=\"mean\",", "def __init__( self, hidden_units, dropout_rate=0.2, aggregation_type=\"mean\", combination_type=\"concat\", normalize=False, *args, **kwargs", "return self._compute_logits(node_embeddings) if __name__ == '__main__': papers, train_data, test_data, paper_idx,", "weights is not None: messages = messages * tf.expand_dims(weights, -1)", "== \"add\": # Add node_representations and aggregated_messages h = node_representations", "recurrent_activation=\"sigmoid\", dropout=dropout_rate, return_state=True, recurrent_dropout=dropout_rate ) else: self._update_fn = create_ffn(hidden_units, dropout_rate)", "[num_edges] # neighbour_messages shape: [num_edges, representation_dim] num_nodes = tf.math.reduce_max(node_indices) +", "= tf.unstack(node_embeddings, axis=1)[-1] if self._normalize: node_embeddings = tf.nn.l2_normalize(node_embeddings, axis=-1) return", "\", gnn_model([1, 10, 100])) gnn_model.summary() # Train the GNN model", "== \"gru\": # Create a sequence of two elements for", "run_experiment(gnn_model, X_train, y_train, batch_size, epochs, learning_rate) # Plot the learning", "Evaluate on test data X_test = test_data.paper_id.to_numpy() y_test = test_data.subject", "from utils import display_learning_curves # Graph convolution layer class GraphConvLayer(layers.Layer):", "node_representations and aggregated_messages h = tf.concat([node_representations, aggregated_messages], axis=1) elif self._combination_type", "aggregation_type, combination_type, normalize, name=\"graph_conv1\" ) # Create the 2nd GraphConv", "graph_info=graph_info, num_classes=num_classes, hidden_units=hidden_units, dropout_rate=dropout_rate, name=\"gnn_model\" ) print(\"GNN output shape: \",", "*args, **kwargs ): super(GraphConvLayer, self).__init__(*args, **kwargs) self._aggregation_type = aggregation_type self._combination_type", "X_train = train_data.paper_id.to_numpy() y_train = train_data.subject history = run_experiment(gnn_model, X_train,", "aggregated_messages = self._aggregate(node_indices, neighbour_messages) # Update the node embedding with", "# Scale edge_weights to sum to 1 self._edge_weights = self._edge_weights", "= layers.Dense(units=num_classes, name=\"logits\") def call(self, input_node_indices): # Preprocess the node_features", "# Compute logits return self._compute_logits(node_embeddings) if __name__ == '__main__': papers,", "self._combination_type = combination_type self._normalize = normalize self._ffn_prepare = create_ffn(hidden_units, dropout_rate)", "logits layer self._compute_logits = layers.Dense(units=num_classes, name=\"logits\") def call(self, input_node_indices): #", "return messages def _aggregate(self, node_indices, neighbour_messages): # node_indices shape is", "the neighbour messages return self._update(node_representations, aggregated_messages) class GNNNodeClassifier(tf.keras.Model): def __init__(", "self._edges = edges self._edge_weights = edge_weights # Set edge_weights to", "= [32, 32] learning_rate = 0.01 dropout_rate = 0.5 epochs", "messages = messages * tf.expand_dims(weights, -1) return messages def _aggregate(self,", "layers.Dense(units=num_classes, name=\"logits\") def call(self, input_node_indices): # Preprocess the node_features to", "self._compute_logits = layers.Dense(units=num_classes, name=\"logits\") def call(self, input_node_indices): # Preprocess the", "aggregated_messages h = tf.concat([node_representations, aggregated_messages], axis=1) elif self._combination_type == \"add\":", "y_train, batch_size, epochs, learning_rate) # Plot the learning curves display_learning_curves(history,", "self).__init__(*args, **kwargs) # Unpack graph_info node_features, edges, edge_weights = graph_info", "dropout_rate) def _prepare(self, node_representations, weights=None): # node_representations shape is [num_edges,", "GraphConvLayer( hidden_units, dropout_rate, aggregation_type, combination_type, normalize, name=\"graph_conv1\" ) # Create", "with the neighbour messages return self._update(node_representations, aggregated_messages) class GNNNodeClassifier(tf.keras.Model): def", "combination_type, normalize, name=\"graph_conv1\" ) # Create the 2nd GraphConv layer", "GNN model X_train = train_data.paper_id.to_numpy() y_train = train_data.subject history =", "node_features.shape) gnn_model = GNNNodeClassifier( graph_info=graph_info, num_classes=num_classes, hidden_units=hidden_units, dropout_rate=dropout_rate, name=\"gnn_model\" )", "# Create a compute logits layer self._compute_logits = layers.Dense(units=num_classes, name=\"logits\")", "display_learning_curves(history, figure_name=\"gnn.png\") # Evaluate on test data X_test = test_data.paper_id.to_numpy()", "train_data.subject history = run_experiment(gnn_model, X_train, y_train, batch_size, epochs, learning_rate) #", "utils import display_learning_curves # Graph convolution layer class GraphConvLayer(layers.Layer): def", "import layers from load_cora import load_cora from baseline_model import create_ffn", "provided if self._edge_weights is None: self._edge_weights = tf.ones(shape=edges.shape[1]) # Scale", "== \"concat\": # Concatenate the node_representations and aggregated_messages h =", "representation_dim] # aggregated_messages shape is [num_nodes, representation_dim] if self._combination_type ==", "self._preprocess = create_ffn(hidden_units, dropout_rate, name=\"preprocess\") # Create the 1st GraphConv", "compute logits layer self._compute_logits = layers.Dense(units=num_classes, name=\"logits\") def call(self, input_node_indices):", "input_node_indices): # Preprocess the node_features to produce node representations x", "def _prepare(self, node_representations, weights=None): # node_representations shape is [num_edges, embedding_dim]", "display_learning_curves # Graph convolution layer class GraphConvLayer(layers.Layer): def __init__( self,", "= self._update_fn(h) if self._combination_type == \"gru\": node_embeddings = tf.unstack(node_embeddings, axis=1)[-1]", "self._preprocess(self._node_features) # Apply the 1st graph conv layer x1 =", "of shape [num_nodes, num_features] node_features = tf.cast( papers.sort_values(\"paper_id\")[feature_names].to_numpy(), dtype=tf.float32) #", "elements for the GRU layer h = tf.stack([node_respresentations, aggregated_messages], axis=1)", "aggregated_messages], axis=1) elif self._combination_type == \"add\": # Add node_representations and", "self).__init__(*args, **kwargs) self._aggregation_type = aggregation_type self._combination_type = combination_type self._normalize =", "figure_name=\"gnn.png\") # Evaluate on test data X_test = test_data.paper_id.to_numpy() y_test", "dropout_rate, name=\"postprocess\") # Create a compute logits layer self._compute_logits =", "model X_train = train_data.paper_id.to_numpy() y_train = train_data.subject history = run_experiment(gnn_model,", "node_indices node_embeddings = tf.gather(x, input_node_indices) # Compute logits return self._compute_logits(node_embeddings)", "x = x2 + x # Postprocess node embedding x", "normalize, name=\"graph_conv1\" ) # Create the 2nd GraphConv layer self._conv2", "embedding x = self._postprocess(x) # Fetch node embeddings for the", "Graph convolution layer class GraphConvLayer(layers.Layer): def __init__( self, hidden_units, dropout_rate=0.2,", "self._normalize = normalize self._ffn_prepare = create_ffn(hidden_units, dropout_rate) if self._combination_type ==", "Returns: node_embeddings of shape [num_nodes, representation_dim]. \"\"\" node_representations, edges, edge_weights", "tf.ones(shape=edges.shape[1]) # Scale edge_weights to sum to 1 self._edge_weights =", "# Apply the 1st graph conv layer x1 = self._conv1((x,", "Compute logits return self._compute_logits(node_embeddings) if __name__ == '__main__': papers, train_data,", "function node_embeddings = self._update_fn(h) if self._combination_type == \"gru\": node_embeddings =", "(source) and neighbour_indices (target) from edges node_indices, neighbour_indices = edges[0],", "matrix) of shape [2, num_edges] edges = citations[[\"source\", \"target\"]].to_numpy().T #print(edges)", ") else: raise ValueError(f\"Invalid aggregation type: {self._aggregation_type}.\") return aggregated_message def", "node_representations, weights=None): # node_representations shape is [num_edges, embedding_dim] messages =", "call(self, input_node_indices): # Preprocess the node_features to produce node representations", "graph_info self._node_features = node_features self._edges = edges self._edge_weights = edge_weights", "node_features to produce node representations x = self._preprocess(self._node_features) # Apply", "name=\"gnn_model\" ) print(\"GNN output shape: \", gnn_model([1, 10, 100])) gnn_model.summary()", "Create a postprocess layer self._postprocess = create_ffn(hidden_units, dropout_rate, name=\"postprocess\") #", "shape is [num_nodes, representation_dim] if self._combination_type == \"gru\": # Create", "= tf.gather(node_representations, neighbour_indices) # Prepare the messages of the neighbours", "self._aggregation_type == \"sum\": aggregated_message = tf.math.unsorted_segment_sum( neighbour_messages, node_indices, num_segments=num_nodes )", "else: raise ValueError(f\"Invalid combination type: {self._combinatino_type}.\") # Apply the processing", "the node embedding with the neighbour messages return self._update(node_representations, aggregated_messages)", "self._conv1 = GraphConvLayer( hidden_units, dropout_rate, aggregation_type, combination_type, normalize, name=\"graph_conv1\" )", "node_indices (source) and neighbour_indices (target) from edges node_indices, neighbour_indices =", "# Preprocess the node_features to produce node representations x =", "graph_info node_features, edges, edge_weights = graph_info self._node_features = node_features self._edges", "dropout_rate, name=\"preprocess\") # Create the 1st GraphConv layer self._conv1 =", "learning curves display_learning_curves(history, figure_name=\"gnn.png\") # Evaluate on test data X_test", "# Create a process layer self._preprocess = create_ffn(hidden_units, dropout_rate, name=\"preprocess\")", "the GRU layer h = tf.stack([node_respresentations, aggregated_messages], axis=1) elif self._combination_type", "self._ffn_prepare(node_representations) if weights is not None: messages = messages *", "import run_experiment from utils import display_learning_curves # Graph convolution layer", "graph conv layer x2 = self._conv2((x, self._edges, self._edge_weights)) # Skip", "sum to 1 self._edge_weights = self._edge_weights / tf.math.reduce_sum(self._edge_weights) # Create", "self._aggregation_type == \"mean\": aggregated_message = tf.math.unsorted_segment_mean( neighbour_messages, node_indices, num_segments=num_nodes )", "the node_embeddings. Args: Inputs: A tuple of three elements: node_representations,", "node_indices, num_segments=num_nodes ) elif self._aggregation_type == \"max\": aggregated_message = tf.math.unsorted_segment_max(", "self._compute_logits(node_embeddings) if __name__ == '__main__': papers, train_data, test_data, paper_idx, class_idx,", "Update the node embedding with the neighbour messages return self._update(node_representations,", "is [num_edges, representation_dim] neighbour_representations = tf.gather(node_representations, neighbour_indices) # Prepare the", "\", node_features.shape) gnn_model = GNNNodeClassifier( graph_info=graph_info, num_classes=num_classes, hidden_units=hidden_units, dropout_rate=dropout_rate, name=\"gnn_model\"", "gnn_model.summary() # Train the GNN model X_train = train_data.paper_id.to_numpy() y_train", "= run_experiment(gnn_model, X_train, y_train, batch_size, epochs, learning_rate) # Plot the", "# Plot the learning curves display_learning_curves(history, figure_name=\"gnn.png\") # Evaluate on", "= aggregation_type self._combination_type = combination_type self._normalize = normalize self._ffn_prepare =", "aggregated_message = tf.math.unsorted_segment_mean( neighbour_messages, node_indices, num_segments=num_nodes ) elif self._aggregation_type ==", "self._combination_type == \"concat\": # Concatenate the node_representations and aggregated_messages h", "papers.sort_values(\"paper_id\")[feature_names].to_numpy(), dtype=tf.float32) # Create graph info tuple with node_features, edges,", "= inputs # Get node_indices (source) and neighbour_indices (target) from", "if self._combination_type == \"gru\": # Create a sequence of two", "= gnn_model.evaluate(x=X_test, y=y_test, verbose=1) print(f\"Test accuracy: {round(test_accuracy * 100, 2)}%\")", "= node_representations + aggregated_messages else: raise ValueError(f\"Invalid combination type: {self._combinatino_type}.\")", "messages def _aggregate(self, node_indices, neighbour_messages): # node_indices shape is [num_edges]", "dropout_rate=0.2, normalize=True, *args, **kwargs ): super(GNNNodeClassifier, self).__init__(*args, **kwargs) # Unpack", "aggregated_messages h = node_representations + aggregated_messages else: raise ValueError(f\"Invalid combination", "1 if self._aggregation_type == \"sum\": aggregated_message = tf.math.unsorted_segment_sum( neighbour_messages, node_indices,", "node_indices, neighbour_indices = edges[0], edges[1] # neighbour_representations shape is [num_edges,", "= edges[0], edges[1] # neighbour_representations shape is [num_edges, representation_dim] neighbour_representations", "300 batch_size = 256 # Create an edges array (sparse", "[2, num_edges] edges = citations[[\"source\", \"target\"]].to_numpy().T #print(edges) # Create an", "load_cora(verbose=1) num_features = len(feature_names) num_classes = len(class_idx) hidden_units = [32,", "self._combination_type == \"gru\": node_embeddings = tf.unstack(node_embeddings, axis=1)[-1] if self._normalize: node_embeddings", "representation_dim] num_nodes = tf.math.reduce_max(node_indices) + 1 if self._aggregation_type == \"sum\":", "normalize, name=\"graph_conv2\" ) # Create a postprocess layer self._postprocess =", "an edge weights array of ones (default weights) edge_weights =", "# aggregated_messages shape is [num_nodes, representation_dim] if self._combination_type == \"gru\":", "X_test = test_data.paper_id.to_numpy() y_test = test_data.subject _, test_accuracy = gnn_model.evaluate(x=X_test,", "# neighbour_messages shape: [num_edges, representation_dim] num_nodes = tf.math.reduce_max(node_indices) + 1", "run_experiment from utils import display_learning_curves # Graph convolution layer class", "Train the GNN model X_train = train_data.paper_id.to_numpy() y_train = train_data.subject", "__name__ == '__main__': papers, train_data, test_data, paper_idx, class_idx, citations, feature_names", "tensorflow import keras from tensorflow.keras import layers from load_cora import", "Aggregate the neighbour messages aggregated_messages = self._aggregate(node_indices, neighbour_messages) # Update", "combination_type=\"concat\", normalize=False, *args, **kwargs ): super(GraphConvLayer, self).__init__(*args, **kwargs) self._aggregation_type =", "layer self._conv2 = GraphConvLayer( hidden_units, dropout_rate, aggregation_type, combination_type, normalize, name=\"graph_conv2\"", "graph conv layer x1 = self._conv1((x, self._edges, self._edge_weights)) # Skip", "#print(edges) # Create an edge weights array of ones (default", "shape [num_nodes, representation_dim]. \"\"\" node_representations, edges, edge_weights = inputs #", "= self._conv1((x, self._edges, self._edge_weights)) # Skip connection x = x1", "GraphConv layer self._conv2 = GraphConvLayer( hidden_units, dropout_rate, aggregation_type, combination_type, normalize,", "self._conv2 = GraphConvLayer( hidden_units, dropout_rate, aggregation_type, combination_type, normalize, name=\"graph_conv2\" )", "= tf.nn.l2_normalize(node_embeddings, axis=-1) return node_embeddings def call(self, inputs): \"\"\"Process the", "to ones if not provided if self._edge_weights is None: self._edge_weights", "neighbour_messages, node_indices, num_segments=num_nodes ) elif self._aggregation_type == \"max\": aggregated_message =", "an edges array (sparse adjacency matrix) of shape [2, num_edges]", "shape is [num_nodes, representation_dim] # aggregated_messages shape is [num_nodes, representation_dim]", "call(self, inputs): \"\"\"Process the inputs to produce the node_embeddings. Args:", "__init__( self, hidden_units, dropout_rate=0.2, aggregation_type=\"mean\", combination_type=\"concat\", normalize=False, *args, **kwargs ):", "self._aggregation_type == \"max\": aggregated_message = tf.math.unsorted_segment_max( neighbour_messages, node_indices, num_segments=num_nodes )", ") # Create the 2nd GraphConv layer self._conv2 = GraphConvLayer(", "# Fetch node embeddings for the input node_indices node_embeddings =", "tf.ones(shape=edges.shape[1]) # Create a node features array of shape [num_nodes,", "test_data.subject _, test_accuracy = gnn_model.evaluate(x=X_test, y=y_test, verbose=1) print(f\"Test accuracy: {round(test_accuracy", "num_classes, hidden_units, aggregation_type=\"sum\", combination_type=\"concat\", dropout_rate=0.2, normalize=True, *args, **kwargs ): super(GNNNodeClassifier,", "neighbour_messages) # Update the node embedding with the neighbour messages", "tf from tensorflow import keras from tensorflow.keras import layers from", "graph info tuple with node_features, edges, and edge_weights graph_info =", "-1) return messages def _aggregate(self, node_indices, neighbour_messages): # node_indices shape", "messages * tf.expand_dims(weights, -1) return messages def _aggregate(self, node_indices, neighbour_messages):", "utils import run_experiment from utils import display_learning_curves # Graph convolution", "= self._prepare(neighbour_representations, edge_weights) # Aggregate the neighbour messages aggregated_messages =", "the 2nd graph conv layer x2 = self._conv2((x, self._edges, self._edge_weights))", "num_segments=num_nodes ) elif self._aggregation_type == \"mean\": aggregated_message = tf.math.unsorted_segment_mean( neighbour_messages,", "== \"max\": aggregated_message = tf.math.unsorted_segment_max( neighbour_messages, node_indices, num_segments=num_nodes ) else:", "2nd GraphConv layer self._conv2 = GraphConvLayer( hidden_units, dropout_rate, aggregation_type, combination_type,", "def call(self, input_node_indices): # Preprocess the node_features to produce node", "layer self._postprocess = create_ffn(hidden_units, dropout_rate, name=\"postprocess\") # Create a compute", "of three elements: node_representations, edges, edge_weights. Returns: node_embeddings of shape", "the inputs to produce the node_embeddings. Args: Inputs: A tuple", "elif self._combination_type == \"add\": # Add node_representations and aggregated_messages h", "(sparse adjacency matrix) of shape [2, num_edges] edges = citations[[\"source\",", "gnn_model([1, 10, 100])) gnn_model.summary() # Train the GNN model X_train", "connection x = x1 + x # Apply the 2nd", "def call(self, inputs): \"\"\"Process the inputs to produce the node_embeddings.", "*args, **kwargs ): super(GNNNodeClassifier, self).__init__(*args, **kwargs) # Unpack graph_info node_features,", "combination_type=\"concat\", dropout_rate=0.2, normalize=True, *args, **kwargs ): super(GNNNodeClassifier, self).__init__(*args, **kwargs) #", "node_indices, neighbour_messages): # node_indices shape is [num_edges] # neighbour_messages shape:", "neighbour_messages shape: [num_edges, representation_dim] num_nodes = tf.math.reduce_max(node_indices) + 1 if", "self._prepare(neighbour_representations, edge_weights) # Aggregate the neighbour messages aggregated_messages = self._aggregate(node_indices,", "hidden_units, aggregation_type=\"sum\", combination_type=\"concat\", dropout_rate=0.2, normalize=True, *args, **kwargs ): super(GNNNodeClassifier, self).__init__(*args,", "citations[[\"source\", \"target\"]].to_numpy().T #print(edges) # Create an edge weights array of", "node_indices, num_segments=num_nodes ) elif self._aggregation_type == \"mean\": aggregated_message = tf.math.unsorted_segment_mean(", "aggregated_messages else: raise ValueError(f\"Invalid combination type: {self._combinatino_type}.\") # Apply the", "to sum to 1 self._edge_weights = self._edge_weights / tf.math.reduce_sum(self._edge_weights) #", "to produce node representations x = self._preprocess(self._node_features) # Apply the", "= 300 batch_size = 256 # Create an edges array", "return_state=True, recurrent_dropout=dropout_rate ) else: self._update_fn = create_ffn(hidden_units, dropout_rate) def _prepare(self,", "connection x = x2 + x # Postprocess node embedding", "None: messages = messages * tf.expand_dims(weights, -1) return messages def", "= self._postprocess(x) # Fetch node embeddings for the input node_indices", "self._update_fn(h) if self._combination_type == \"gru\": node_embeddings = tf.unstack(node_embeddings, axis=1)[-1] if", "Create an edge weights array of ones (default weights) edge_weights", "array of ones (default weights) edge_weights = tf.ones(shape=edges.shape[1]) # Create", "node_representations shape is [num_nodes, representation_dim] # aggregated_messages shape is [num_nodes,", "node_embeddings = tf.gather(x, input_node_indices) # Compute logits return self._compute_logits(node_embeddings) if", "dropout_rate) if self._combination_type == \"gated\": self._update_fn = layers.GRU( units=hidden_units, activation=\"tanh\",", "print(\"Nodes shape: \", node_features.shape) gnn_model = GNNNodeClassifier( graph_info=graph_info, num_classes=num_classes, hidden_units=hidden_units,", "aggregated_message = tf.math.unsorted_segment_sum( neighbour_messages, node_indices, num_segments=num_nodes ) elif self._aggregation_type ==", "axis=1) elif self._combination_type == \"add\": # Add node_representations and aggregated_messages", "# Aggregate the neighbour messages aggregated_messages = self._aggregate(node_indices, neighbour_messages) #", "tf.stack([node_respresentations, aggregated_messages], axis=1) elif self._combination_type == \"concat\": # Concatenate the", "self._combination_type == \"gated\": self._update_fn = layers.GRU( units=hidden_units, activation=\"tanh\", recurrent_activation=\"sigmoid\", dropout=dropout_rate,", "type: {self._combinatino_type}.\") # Apply the processing function node_embeddings = self._update_fn(h)", "Concatenate the node_representations and aggregated_messages h = tf.concat([node_representations, aggregated_messages], axis=1)", "if weights is not None: messages = messages * tf.expand_dims(weights,", "aggregation_type=\"sum\", combination_type=\"concat\", dropout_rate=0.2, normalize=True, *args, **kwargs ): super(GNNNodeClassifier, self).__init__(*args, **kwargs)", "= len(class_idx) hidden_units = [32, 32] learning_rate = 0.01 dropout_rate", "Create an edges array (sparse adjacency matrix) of shape [2,", "= tf.ones(shape=edges.shape[1]) # Create a node features array of shape", "= create_ffn(hidden_units, dropout_rate) if self._combination_type == \"gated\": self._update_fn = layers.GRU(", "representation_dim] neighbour_representations = tf.gather(node_representations, neighbour_indices) # Prepare the messages of", "edges array (sparse adjacency matrix) of shape [2, num_edges] edges", "# node_indices shape is [num_edges] # neighbour_messages shape: [num_edges, representation_dim]", "# Evaluate on test data X_test = test_data.paper_id.to_numpy() y_test =", "shape is [num_edges] # neighbour_messages shape: [num_edges, representation_dim] num_nodes =", "super(GNNNodeClassifier, self).__init__(*args, **kwargs) # Unpack graph_info node_features, edges, edge_weights =", "x2 = self._conv2((x, self._edges, self._edge_weights)) # Skip connection x =", "representations x = self._preprocess(self._node_features) # Apply the 1st graph conv", "node embedding with the neighbour messages return self._update(node_representations, aggregated_messages) class", "x = self._preprocess(self._node_features) # Apply the 1st graph conv layer", "shape [num_nodes, num_features] node_features = tf.cast( papers.sort_values(\"paper_id\")[feature_names].to_numpy(), dtype=tf.float32) # Create", "= train_data.subject history = run_experiment(gnn_model, X_train, y_train, batch_size, epochs, learning_rate)", "# Apply the 2nd graph conv layer x2 = self._conv2((x,", "produce node representations x = self._preprocess(self._node_features) # Apply the 1st", "Fetch node embeddings for the input node_indices node_embeddings = tf.gather(x,", "_, test_accuracy = gnn_model.evaluate(x=X_test, y=y_test, verbose=1) print(f\"Test accuracy: {round(test_accuracy *", "\"max\": aggregated_message = tf.math.unsorted_segment_max( neighbour_messages, node_indices, num_segments=num_nodes ) else: raise", "aggregation_type, combination_type, normalize, name=\"graph_conv2\" ) # Create a postprocess layer", "embedding with the neighbour messages return self._update(node_representations, aggregated_messages) class GNNNodeClassifier(tf.keras.Model):", "= self._ffn_prepare(node_representations) if weights is not None: messages = messages", "edges, and edge_weights graph_info = (node_features, edges, edge_weights) print(\"Edges shape:", "0.5 epochs = 300 batch_size = 256 # Create an", "class GraphConvLayer(layers.Layer): def __init__( self, hidden_units, dropout_rate=0.2, aggregation_type=\"mean\", combination_type=\"concat\", normalize=False,", "tf.cast( papers.sort_values(\"paper_id\")[feature_names].to_numpy(), dtype=tf.float32) # Create graph info tuple with node_features,", "Scale edge_weights to sum to 1 self._edge_weights = self._edge_weights /", "create_ffn from utils import run_experiment from utils import display_learning_curves #", "self._conv2((x, self._edges, self._edge_weights)) # Skip connection x = x2 +", "create_ffn(hidden_units, dropout_rate, name=\"postprocess\") # Create a compute logits layer self._compute_logits", "= tf.gather(x, input_node_indices) # Compute logits return self._compute_logits(node_embeddings) if __name__", "# Train the GNN model X_train = train_data.paper_id.to_numpy() y_train =", "aggregated_message = tf.math.unsorted_segment_max( neighbour_messages, node_indices, num_segments=num_nodes ) else: raise ValueError(f\"Invalid", "256 # Create an edges array (sparse adjacency matrix) of", "load_cora from baseline_model import create_ffn from utils import run_experiment from", ") print(\"GNN output shape: \", gnn_model([1, 10, 100])) gnn_model.summary() #", "process layer self._preprocess = create_ffn(hidden_units, dropout_rate, name=\"preprocess\") # Create the", "node representations x = self._preprocess(self._node_features) # Apply the 1st graph", "the 1st GraphConv layer self._conv1 = GraphConvLayer( hidden_units, dropout_rate, aggregation_type,", "num_classes = len(class_idx) hidden_units = [32, 32] learning_rate = 0.01", "Create graph info tuple with node_features, edges, and edge_weights graph_info", "train_data, test_data, paper_idx, class_idx, citations, feature_names = load_cora(verbose=1) num_features =", "num_segments=num_nodes ) else: raise ValueError(f\"Invalid aggregation type: {self._aggregation_type}.\") return aggregated_message", "conv layer x1 = self._conv1((x, self._edges, self._edge_weights)) # Skip connection", "edge_weights to ones if not provided if self._edge_weights is None:", "self._postprocess = create_ffn(hidden_units, dropout_rate, name=\"postprocess\") # Create a compute logits", "= layers.GRU( units=hidden_units, activation=\"tanh\", recurrent_activation=\"sigmoid\", dropout=dropout_rate, return_state=True, recurrent_dropout=dropout_rate ) else:", "features array of shape [num_nodes, num_features] node_features = tf.cast( papers.sort_values(\"paper_id\")[feature_names].to_numpy(),", "combination_type, normalize, name=\"graph_conv2\" ) # Create a postprocess layer self._postprocess", "load_cora import load_cora from baseline_model import create_ffn from utils import", "# Get node_indices (source) and neighbour_indices (target) from edges node_indices,", "elements: node_representations, edges, edge_weights. Returns: node_embeddings of shape [num_nodes, representation_dim].", "__init__( self, graph_info, num_classes, hidden_units, aggregation_type=\"sum\", combination_type=\"concat\", dropout_rate=0.2, normalize=True, *args,", "messages return self._update(node_representations, aggregated_messages) class GNNNodeClassifier(tf.keras.Model): def __init__( self, graph_info,", "self._ffn_prepare = create_ffn(hidden_units, dropout_rate) if self._combination_type == \"gated\": self._update_fn =", "layer self._compute_logits = layers.Dense(units=num_classes, name=\"logits\") def call(self, input_node_indices): # Preprocess", "= x1 + x # Apply the 2nd graph conv", "dropout_rate, aggregation_type, combination_type, normalize, name=\"graph_conv2\" ) # Create a postprocess", "# Skip connection x = x1 + x # Apply", "**kwargs) self._aggregation_type = aggregation_type self._combination_type = combination_type self._normalize = normalize", "self._edge_weights / tf.math.reduce_sum(self._edge_weights) # Create a process layer self._preprocess =", "shape [2, num_edges] edges = citations[[\"source\", \"target\"]].to_numpy().T #print(edges) # Create", "name=\"logits\") def call(self, input_node_indices): # Preprocess the node_features to produce", "# Skip connection x = x2 + x # Postprocess", "aggregation_type=\"mean\", combination_type=\"concat\", normalize=False, *args, **kwargs ): super(GraphConvLayer, self).__init__(*args, **kwargs) self._aggregation_type", "Apply the processing function node_embeddings = self._update_fn(h) if self._combination_type ==", "the 1st graph conv layer x1 = self._conv1((x, self._edges, self._edge_weights))", "edge_weights graph_info = (node_features, edges, edge_weights) print(\"Edges shape: \", edges.shape)", "messages aggregated_messages = self._aggregate(node_indices, neighbour_messages) # Update the node embedding", "h = tf.stack([node_respresentations, aggregated_messages], axis=1) elif self._combination_type == \"concat\": #", "y_train = train_data.subject history = run_experiment(gnn_model, X_train, y_train, batch_size, epochs,", "neighbour_messages, node_indices, num_segments=num_nodes ) elif self._aggregation_type == \"mean\": aggregated_message =", "layers.GRU( units=hidden_units, activation=\"tanh\", recurrent_activation=\"sigmoid\", dropout=dropout_rate, return_state=True, recurrent_dropout=dropout_rate ) else: self._update_fn", "[num_nodes, representation_dim] # aggregated_messages shape is [num_nodes, representation_dim] if self._combination_type", "import load_cora from baseline_model import create_ffn from utils import run_experiment", "def __init__( self, graph_info, num_classes, hidden_units, aggregation_type=\"sum\", combination_type=\"concat\", dropout_rate=0.2, normalize=True,", "= test_data.subject _, test_accuracy = gnn_model.evaluate(x=X_test, y=y_test, verbose=1) print(f\"Test accuracy:", "edge weights array of ones (default weights) edge_weights = tf.ones(shape=edges.shape[1])", "# Create the 2nd GraphConv layer self._conv2 = GraphConvLayer( hidden_units,", "\", edges.shape) print(\"Nodes shape: \", node_features.shape) gnn_model = GNNNodeClassifier( graph_info=graph_info,", "[num_edges, representation_dim] num_nodes = tf.math.reduce_max(node_indices) + 1 if self._aggregation_type ==", "self._aggregation_type = aggregation_type self._combination_type = combination_type self._normalize = normalize self._ffn_prepare", "edge_weights = tf.ones(shape=edges.shape[1]) # Create a node features array of", "edge_weights) print(\"Edges shape: \", edges.shape) print(\"Nodes shape: \", node_features.shape) gnn_model", "a sequence of two elements for the GRU layer h", "edges, edge_weights) print(\"Edges shape: \", edges.shape) print(\"Nodes shape: \", node_features.shape)", "= self._conv2((x, self._edges, self._edge_weights)) # Skip connection x = x2", "and neighbour_indices (target) from edges node_indices, neighbour_indices = edges[0], edges[1]", "10, 100])) gnn_model.summary() # Train the GNN model X_train =", "shape: \", node_features.shape) gnn_model = GNNNodeClassifier( graph_info=graph_info, num_classes=num_classes, hidden_units=hidden_units, dropout_rate=dropout_rate,", "messages of the neighbours neighbour_messages = self._prepare(neighbour_representations, edge_weights) # Aggregate", "tf.math.reduce_sum(self._edge_weights) # Create a process layer self._preprocess = create_ffn(hidden_units, dropout_rate,", ") elif self._aggregation_type == \"mean\": aggregated_message = tf.math.unsorted_segment_mean( neighbour_messages, node_indices,", "self._conv1((x, self._edges, self._edge_weights)) # Skip connection x = x1 +", "input_node_indices) # Compute logits return self._compute_logits(node_embeddings) if __name__ == '__main__':", "Set edge_weights to ones if not provided if self._edge_weights is", "and aggregated_messages h = tf.concat([node_representations, aggregated_messages], axis=1) elif self._combination_type ==", "None: self._edge_weights = tf.ones(shape=edges.shape[1]) # Scale edge_weights to sum to", "ValueError(f\"Invalid aggregation type: {self._aggregation_type}.\") return aggregated_message def _update(self, node_representations, aggregated_messages):", "node_indices shape is [num_edges] # neighbour_messages shape: [num_edges, representation_dim] num_nodes", "activation=\"tanh\", recurrent_activation=\"sigmoid\", dropout=dropout_rate, return_state=True, recurrent_dropout=dropout_rate ) else: self._update_fn = create_ffn(hidden_units,", "axis=-1) return node_embeddings def call(self, inputs): \"\"\"Process the inputs to", "* tf.expand_dims(weights, -1) return messages def _aggregate(self, node_indices, neighbour_messages): #", "layer h = tf.stack([node_respresentations, aggregated_messages], axis=1) elif self._combination_type == \"concat\":", "epochs = 300 batch_size = 256 # Create an edges", "# Create a postprocess layer self._postprocess = create_ffn(hidden_units, dropout_rate, name=\"postprocess\")", "return self._update(node_representations, aggregated_messages) class GNNNodeClassifier(tf.keras.Model): def __init__( self, graph_info, num_classes,", "edge_weights to sum to 1 self._edge_weights = self._edge_weights / tf.math.reduce_sum(self._edge_weights)", "node_embeddings = self._update_fn(h) if self._combination_type == \"gru\": node_embeddings = tf.unstack(node_embeddings,", "a process layer self._preprocess = create_ffn(hidden_units, dropout_rate, name=\"preprocess\") # Create", "is [num_nodes, representation_dim] if self._combination_type == \"gru\": # Create a", "x1 = self._conv1((x, self._edges, self._edge_weights)) # Skip connection x =", "is [num_edges] # neighbour_messages shape: [num_edges, representation_dim] num_nodes = tf.math.reduce_max(node_indices)", "num_nodes = tf.math.reduce_max(node_indices) + 1 if self._aggregation_type == \"sum\": aggregated_message", "2nd graph conv layer x2 = self._conv2((x, self._edges, self._edge_weights)) #", "# Create a node features array of shape [num_nodes, num_features]", "tf.unstack(node_embeddings, axis=1)[-1] if self._normalize: node_embeddings = tf.nn.l2_normalize(node_embeddings, axis=-1) return node_embeddings", "edge_weights = inputs # Get node_indices (source) and neighbour_indices (target)", "self._edge_weights = tf.ones(shape=edges.shape[1]) # Scale edge_weights to sum to 1", "tf.gather(node_representations, neighbour_indices) # Prepare the messages of the neighbours neighbour_messages", "[num_nodes, representation_dim]. \"\"\" node_representations, edges, edge_weights = inputs # Get", "# Create a sequence of two elements for the GRU", "GraphConvLayer(layers.Layer): def __init__( self, hidden_units, dropout_rate=0.2, aggregation_type=\"mean\", combination_type=\"concat\", normalize=False, *args,", "test_accuracy = gnn_model.evaluate(x=X_test, y=y_test, verbose=1) print(f\"Test accuracy: {round(test_accuracy * 100,", "output shape: \", gnn_model([1, 10, 100])) gnn_model.summary() # Train the", "citations, feature_names = load_cora(verbose=1) num_features = len(feature_names) num_classes = len(class_idx)", "tf.gather(x, input_node_indices) # Compute logits return self._compute_logits(node_embeddings) if __name__ ==", "tf.math.reduce_max(node_indices) + 1 if self._aggregation_type == \"sum\": aggregated_message = tf.math.unsorted_segment_sum(", "): super(GNNNodeClassifier, self).__init__(*args, **kwargs) # Unpack graph_info node_features, edges, edge_weights", "Prepare the messages of the neighbours neighbour_messages = self._prepare(neighbour_representations, edge_weights)", "Args: Inputs: A tuple of three elements: node_representations, edges, edge_weights.", "learning_rate) # Plot the learning curves display_learning_curves(history, figure_name=\"gnn.png\") # Evaluate", "adjacency matrix) of shape [2, num_edges] edges = citations[[\"source\", \"target\"]].to_numpy().T", "hidden_units, dropout_rate, aggregation_type, combination_type, normalize, name=\"graph_conv2\" ) # Create a", "num_classes=num_classes, hidden_units=hidden_units, dropout_rate=dropout_rate, name=\"gnn_model\" ) print(\"GNN output shape: \", gnn_model([1,", "as tf from tensorflow import keras from tensorflow.keras import layers", "the node_features to produce node representations x = self._preprocess(self._node_features) #", "ones (default weights) edge_weights = tf.ones(shape=edges.shape[1]) # Create a node", "node_representations, edges, edge_weights. Returns: node_embeddings of shape [num_nodes, representation_dim]. \"\"\"", "num_features = len(feature_names) num_classes = len(class_idx) hidden_units = [32, 32]", "Create a sequence of two elements for the GRU layer", "== '__main__': papers, train_data, test_data, paper_idx, class_idx, citations, feature_names =", "GNNNodeClassifier( graph_info=graph_info, num_classes=num_classes, hidden_units=hidden_units, dropout_rate=dropout_rate, name=\"gnn_model\" ) print(\"GNN output shape:", "(target) from edges node_indices, neighbour_indices = edges[0], edges[1] # neighbour_representations", "= GNNNodeClassifier( graph_info=graph_info, num_classes=num_classes, hidden_units=hidden_units, dropout_rate=dropout_rate, name=\"gnn_model\" ) print(\"GNN output", "= (node_features, edges, edge_weights) print(\"Edges shape: \", edges.shape) print(\"Nodes shape:", "Create a process layer self._preprocess = create_ffn(hidden_units, dropout_rate, name=\"preprocess\") #", "import display_learning_curves # Graph convolution layer class GraphConvLayer(layers.Layer): def __init__(", "self._normalize: node_embeddings = tf.nn.l2_normalize(node_embeddings, axis=-1) return node_embeddings def call(self, inputs):", "\"sum\": aggregated_message = tf.math.unsorted_segment_sum( neighbour_messages, node_indices, num_segments=num_nodes ) elif self._aggregation_type", "\"gru\": # Create a sequence of two elements for the", "of shape [num_nodes, representation_dim]. \"\"\" node_representations, edges, edge_weights = inputs", "of the neighbours neighbour_messages = self._prepare(neighbour_representations, edge_weights) # Aggregate the", "1 self._edge_weights = self._edge_weights / tf.math.reduce_sum(self._edge_weights) # Create a process", "graph_info = (node_features, edges, edge_weights) print(\"Edges shape: \", edges.shape) print(\"Nodes", "# Create the 1st GraphConv layer self._conv1 = GraphConvLayer( hidden_units,", "Skip connection x = x1 + x # Apply the", "aggregation_type self._combination_type = combination_type self._normalize = normalize self._ffn_prepare = create_ffn(hidden_units,", "layer class GraphConvLayer(layers.Layer): def __init__( self, hidden_units, dropout_rate=0.2, aggregation_type=\"mean\", combination_type=\"concat\",", "Skip connection x = x2 + x # Postprocess node", "epochs, learning_rate) # Plot the learning curves display_learning_curves(history, figure_name=\"gnn.png\") #", "postprocess layer self._postprocess = create_ffn(hidden_units, dropout_rate, name=\"postprocess\") # Create a", "edge_weights. Returns: node_embeddings of shape [num_nodes, representation_dim]. \"\"\" node_representations, edges,", "= tf.math.unsorted_segment_sum( neighbour_messages, node_indices, num_segments=num_nodes ) elif self._aggregation_type == \"mean\":", "node_indices, num_segments=num_nodes ) else: raise ValueError(f\"Invalid aggregation type: {self._aggregation_type}.\") return", "layer x2 = self._conv2((x, self._edges, self._edge_weights)) # Skip connection x", "print(\"Edges shape: \", edges.shape) print(\"Nodes shape: \", node_features.shape) gnn_model =", "shape: \", edges.shape) print(\"Nodes shape: \", node_features.shape) gnn_model = GNNNodeClassifier(", "GraphConvLayer( hidden_units, dropout_rate, aggregation_type, combination_type, normalize, name=\"graph_conv2\" ) # Create", "node_embeddings of shape [num_nodes, representation_dim]. \"\"\" node_representations, edges, edge_weights =", "sequence of two elements for the GRU layer h =", "self, hidden_units, dropout_rate=0.2, aggregation_type=\"mean\", combination_type=\"concat\", normalize=False, *args, **kwargs ): super(GraphConvLayer,", "shape is [num_edges, embedding_dim] messages = self._ffn_prepare(node_representations) if weights is", "[32, 32] learning_rate = 0.01 dropout_rate = 0.5 epochs =", "node embeddings for the input node_indices node_embeddings = tf.gather(x, input_node_indices)", "axis=1) elif self._combination_type == \"concat\": # Concatenate the node_representations and", "= tf.cast( papers.sort_values(\"paper_id\")[feature_names].to_numpy(), dtype=tf.float32) # Create graph info tuple with", "[num_edges, representation_dim] neighbour_representations = tf.gather(node_representations, neighbour_indices) # Prepare the messages", "= tf.math.unsorted_segment_mean( neighbour_messages, node_indices, num_segments=num_nodes ) elif self._aggregation_type == \"max\":", "_update(self, node_representations, aggregated_messages): # node_representations shape is [num_nodes, representation_dim] #", "shape: [num_edges, representation_dim] num_nodes = tf.math.reduce_max(node_indices) + 1 if self._aggregation_type", "paper_idx, class_idx, citations, feature_names = load_cora(verbose=1) num_features = len(feature_names) num_classes", "node_representations and aggregated_messages h = node_representations + aggregated_messages else: raise", "elif self._combination_type == \"concat\": # Concatenate the node_representations and aggregated_messages", "= citations[[\"source\", \"target\"]].to_numpy().T #print(edges) # Create an edge weights array", "node_embeddings. Args: Inputs: A tuple of three elements: node_representations, edges,", "node_features = tf.cast( papers.sort_values(\"paper_id\")[feature_names].to_numpy(), dtype=tf.float32) # Create graph info tuple", "= train_data.paper_id.to_numpy() y_train = train_data.subject history = run_experiment(gnn_model, X_train, y_train,", "1st graph conv layer x1 = self._conv1((x, self._edges, self._edge_weights)) #", "import tensorflow as tf from tensorflow import keras from tensorflow.keras", "_prepare(self, node_representations, weights=None): # node_representations shape is [num_edges, embedding_dim] messages", "normalize=False, *args, **kwargs ): super(GraphConvLayer, self).__init__(*args, **kwargs) self._aggregation_type = aggregation_type", "type: {self._aggregation_type}.\") return aggregated_message def _update(self, node_representations, aggregated_messages): # node_representations", "ones if not provided if self._edge_weights is None: self._edge_weights =", "node embedding x = self._postprocess(x) # Fetch node embeddings for", "**kwargs ): super(GNNNodeClassifier, self).__init__(*args, **kwargs) # Unpack graph_info node_features, edges,", "weights) edge_weights = tf.ones(shape=edges.shape[1]) # Create a node features array", "self._edge_weights)) # Skip connection x = x1 + x #", "normalize self._ffn_prepare = create_ffn(hidden_units, dropout_rate) if self._combination_type == \"gated\": self._update_fn", "processing function node_embeddings = self._update_fn(h) if self._combination_type == \"gru\": node_embeddings", "Unpack graph_info node_features, edges, edge_weights = graph_info self._node_features = node_features", "dropout_rate = 0.5 epochs = 300 batch_size = 256 #", "if self._combination_type == \"gru\": node_embeddings = tf.unstack(node_embeddings, axis=1)[-1] if self._normalize:", "a compute logits layer self._compute_logits = layers.Dense(units=num_classes, name=\"logits\") def call(self,", "= tf.math.reduce_max(node_indices) + 1 if self._aggregation_type == \"sum\": aggregated_message =", "self._edges, self._edge_weights)) # Skip connection x = x2 + x", "(node_features, edges, edge_weights) print(\"Edges shape: \", edges.shape) print(\"Nodes shape: \",", "# Update the node embedding with the neighbour messages return", "recurrent_dropout=dropout_rate ) else: self._update_fn = create_ffn(hidden_units, dropout_rate) def _prepare(self, node_representations,", "aggregated_messages) class GNNNodeClassifier(tf.keras.Model): def __init__( self, graph_info, num_classes, hidden_units, aggregation_type=\"sum\",", "edges.shape) print(\"Nodes shape: \", node_features.shape) gnn_model = GNNNodeClassifier( graph_info=graph_info, num_classes=num_classes,", "= 0.01 dropout_rate = 0.5 epochs = 300 batch_size =", "tf.math.unsorted_segment_max( neighbour_messages, node_indices, num_segments=num_nodes ) else: raise ValueError(f\"Invalid aggregation type:", "two elements for the GRU layer h = tf.stack([node_respresentations, aggregated_messages],", "y_test = test_data.subject _, test_accuracy = gnn_model.evaluate(x=X_test, y=y_test, verbose=1) print(f\"Test", "neighbours neighbour_messages = self._prepare(neighbour_representations, edge_weights) # Aggregate the neighbour messages", "neighbour_indices (target) from edges node_indices, neighbour_indices = edges[0], edges[1] #", "node_embeddings = tf.nn.l2_normalize(node_embeddings, axis=-1) return node_embeddings def call(self, inputs): \"\"\"Process", "if self._combination_type == \"gated\": self._update_fn = layers.GRU( units=hidden_units, activation=\"tanh\", recurrent_activation=\"sigmoid\",", "neighbour_representations = tf.gather(node_representations, neighbour_indices) # Prepare the messages of the", "hidden_units, dropout_rate=0.2, aggregation_type=\"mean\", combination_type=\"concat\", normalize=False, *args, **kwargs ): super(GraphConvLayer, self).__init__(*args,", "a postprocess layer self._postprocess = create_ffn(hidden_units, dropout_rate, name=\"postprocess\") # Create", "and aggregated_messages h = node_representations + aggregated_messages else: raise ValueError(f\"Invalid", "node_features, edges, and edge_weights graph_info = (node_features, edges, edge_weights) print(\"Edges", "len(class_idx) hidden_units = [32, 32] learning_rate = 0.01 dropout_rate =", "super(GraphConvLayer, self).__init__(*args, **kwargs) self._aggregation_type = aggregation_type self._combination_type = combination_type self._normalize", "\"mean\": aggregated_message = tf.math.unsorted_segment_mean( neighbour_messages, node_indices, num_segments=num_nodes ) elif self._aggregation_type", "logits return self._compute_logits(node_embeddings) if __name__ == '__main__': papers, train_data, test_data,", "of two elements for the GRU layer h = tf.stack([node_respresentations,", "conv layer x2 = self._conv2((x, self._edges, self._edge_weights)) # Skip connection", "edges, edge_weights. Returns: node_embeddings of shape [num_nodes, representation_dim]. \"\"\" node_representations,", "edges node_indices, neighbour_indices = edges[0], edges[1] # neighbour_representations shape is", "aggregated_messages], axis=1) elif self._combination_type == \"concat\": # Concatenate the node_representations", "from baseline_model import create_ffn from utils import run_experiment from utils", "GNNNodeClassifier(tf.keras.Model): def __init__( self, graph_info, num_classes, hidden_units, aggregation_type=\"sum\", combination_type=\"concat\", dropout_rate=0.2,", "neighbour_messages, node_indices, num_segments=num_nodes ) else: raise ValueError(f\"Invalid aggregation type: {self._aggregation_type}.\")", "if __name__ == '__main__': papers, train_data, test_data, paper_idx, class_idx, citations,", "shape: \", gnn_model([1, 10, 100])) gnn_model.summary() # Train the GNN", "edges[0], edges[1] # neighbour_representations shape is [num_edges, representation_dim] neighbour_representations =", "tf.expand_dims(weights, -1) return messages def _aggregate(self, node_indices, neighbour_messages): # node_indices", "= tf.ones(shape=edges.shape[1]) # Scale edge_weights to sum to 1 self._edge_weights", "batch_size = 256 # Create an edges array (sparse adjacency", "self._aggregate(node_indices, neighbour_messages) # Update the node embedding with the neighbour", "edge_weights = graph_info self._node_features = node_features self._edges = edges self._edge_weights", "): super(GraphConvLayer, self).__init__(*args, **kwargs) self._aggregation_type = aggregation_type self._combination_type = combination_type", "from tensorflow.keras import layers from load_cora import load_cora from baseline_model", "the neighbour messages aggregated_messages = self._aggregate(node_indices, neighbour_messages) # Update the", "train_data.paper_id.to_numpy() y_train = train_data.subject history = run_experiment(gnn_model, X_train, y_train, batch_size,", "# Create an edges array (sparse adjacency matrix) of shape", "the learning curves display_learning_curves(history, figure_name=\"gnn.png\") # Evaluate on test data", "= GraphConvLayer( hidden_units, dropout_rate, aggregation_type, combination_type, normalize, name=\"graph_conv1\" ) #", "**kwargs) # Unpack graph_info node_features, edges, edge_weights = graph_info self._node_features", "+ x # Postprocess node embedding x = self._postprocess(x) #", "neighbour_indices) # Prepare the messages of the neighbours neighbour_messages =", "raise ValueError(f\"Invalid aggregation type: {self._aggregation_type}.\") return aggregated_message def _update(self, node_representations,", "tf.concat([node_representations, aggregated_messages], axis=1) elif self._combination_type == \"add\": # Add node_representations", "self._update_fn = create_ffn(hidden_units, dropout_rate) def _prepare(self, node_representations, weights=None): # node_representations", "input node_indices node_embeddings = tf.gather(x, input_node_indices) # Compute logits return", "self._edge_weights is None: self._edge_weights = tf.ones(shape=edges.shape[1]) # Scale edge_weights to", "dtype=tf.float32) # Create graph info tuple with node_features, edges, and", "+ 1 if self._aggregation_type == \"sum\": aggregated_message = tf.math.unsorted_segment_sum( neighbour_messages,", "== \"gated\": self._update_fn = layers.GRU( units=hidden_units, activation=\"tanh\", recurrent_activation=\"sigmoid\", dropout=dropout_rate, return_state=True,", "the processing function node_embeddings = self._update_fn(h) if self._combination_type == \"gru\":", "hidden_units=hidden_units, dropout_rate=dropout_rate, name=\"gnn_model\" ) print(\"GNN output shape: \", gnn_model([1, 10,", "GRU layer h = tf.stack([node_respresentations, aggregated_messages], axis=1) elif self._combination_type ==", "test_data, paper_idx, class_idx, citations, feature_names = load_cora(verbose=1) num_features = len(feature_names)", "'__main__': papers, train_data, test_data, paper_idx, class_idx, citations, feature_names = load_cora(verbose=1)", "info tuple with node_features, edges, and edge_weights graph_info = (node_features,", "x = x1 + x # Apply the 2nd graph", "layer self._conv1 = GraphConvLayer( hidden_units, dropout_rate, aggregation_type, combination_type, normalize, name=\"graph_conv1\"", "GraphConv layer self._conv1 = GraphConvLayer( hidden_units, dropout_rate, aggregation_type, combination_type, normalize,", "# Unpack graph_info node_features, edges, edge_weights = graph_info self._node_features =", "history = run_experiment(gnn_model, X_train, y_train, batch_size, epochs, learning_rate) # Plot", "inputs # Get node_indices (source) and neighbour_indices (target) from edges", "the neighbours neighbour_messages = self._prepare(neighbour_representations, edge_weights) # Aggregate the neighbour", ") elif self._aggregation_type == \"max\": aggregated_message = tf.math.unsorted_segment_max( neighbour_messages, node_indices,", "# Apply the processing function node_embeddings = self._update_fn(h) if self._combination_type", "dropout_rate=dropout_rate, name=\"gnn_model\" ) print(\"GNN output shape: \", gnn_model([1, 10, 100]))", "= messages * tf.expand_dims(weights, -1) return messages def _aggregate(self, node_indices,", "normalize=True, *args, **kwargs ): super(GNNNodeClassifier, self).__init__(*args, **kwargs) # Unpack graph_info", "is [num_edges, embedding_dim] messages = self._ffn_prepare(node_representations) if weights is not", "axis=1)[-1] if self._normalize: node_embeddings = tf.nn.l2_normalize(node_embeddings, axis=-1) return node_embeddings def", "to 1 self._edge_weights = self._edge_weights / tf.math.reduce_sum(self._edge_weights) # Create a", "array (sparse adjacency matrix) of shape [2, num_edges] edges =", "== \"sum\": aggregated_message = tf.math.unsorted_segment_sum( neighbour_messages, node_indices, num_segments=num_nodes ) elif", "= len(feature_names) num_classes = len(class_idx) hidden_units = [32, 32] learning_rate", "name=\"postprocess\") # Create a compute logits layer self._compute_logits = layers.Dense(units=num_classes,", "\"concat\": # Concatenate the node_representations and aggregated_messages h = tf.concat([node_representations,", "tf.math.unsorted_segment_mean( neighbour_messages, node_indices, num_segments=num_nodes ) elif self._aggregation_type == \"max\": aggregated_message", "[num_nodes, representation_dim] if self._combination_type == \"gru\": # Create a sequence", "tensorflow as tf from tensorflow import keras from tensorflow.keras import", "# Prepare the messages of the neighbours neighbour_messages = self._prepare(neighbour_representations,", "of ones (default weights) edge_weights = tf.ones(shape=edges.shape[1]) # Create a", "for the input node_indices node_embeddings = tf.gather(x, input_node_indices) # Compute", "the 2nd GraphConv layer self._conv2 = GraphConvLayer( hidden_units, dropout_rate, aggregation_type,", "**kwargs ): super(GraphConvLayer, self).__init__(*args, **kwargs) self._aggregation_type = aggregation_type self._combination_type =", "node_embeddings = tf.unstack(node_embeddings, axis=1)[-1] if self._normalize: node_embeddings = tf.nn.l2_normalize(node_embeddings, axis=-1)", "self, graph_info, num_classes, hidden_units, aggregation_type=\"sum\", combination_type=\"concat\", dropout_rate=0.2, normalize=True, *args, **kwargs", "tensorflow.keras import layers from load_cora import load_cora from baseline_model import", "is None: self._edge_weights = tf.ones(shape=edges.shape[1]) # Scale edge_weights to sum", "self._node_features = node_features self._edges = edges self._edge_weights = edge_weights #", "a node features array of shape [num_nodes, num_features] node_features =", "graph_info, num_classes, hidden_units, aggregation_type=\"sum\", combination_type=\"concat\", dropout_rate=0.2, normalize=True, *args, **kwargs ):", "class_idx, citations, feature_names = load_cora(verbose=1) num_features = len(feature_names) num_classes =", "= tf.math.unsorted_segment_max( neighbour_messages, node_indices, num_segments=num_nodes ) else: raise ValueError(f\"Invalid aggregation", "edges = citations[[\"source\", \"target\"]].to_numpy().T #print(edges) # Create an edge weights", "self._edges, self._edge_weights)) # Skip connection x = x1 + x", "num_segments=num_nodes ) elif self._aggregation_type == \"max\": aggregated_message = tf.math.unsorted_segment_max( neighbour_messages,", ") else: self._update_fn = create_ffn(hidden_units, dropout_rate) def _prepare(self, node_representations, weights=None):", "x2 + x # Postprocess node embedding x = self._postprocess(x)", "= 0.5 epochs = 300 batch_size = 256 # Create", "batch_size, epochs, learning_rate) # Plot the learning curves display_learning_curves(history, figure_name=\"gnn.png\")", "= self._aggregate(node_indices, neighbour_messages) # Update the node embedding with the", "= normalize self._ffn_prepare = create_ffn(hidden_units, dropout_rate) if self._combination_type == \"gated\":", "name=\"preprocess\") # Create the 1st GraphConv layer self._conv1 = GraphConvLayer(", "is [num_nodes, representation_dim] # aggregated_messages shape is [num_nodes, representation_dim] if", "Create the 2nd GraphConv layer self._conv2 = GraphConvLayer( hidden_units, dropout_rate,", "learning_rate = 0.01 dropout_rate = 0.5 epochs = 300 batch_size", "neighbour messages return self._update(node_representations, aggregated_messages) class GNNNodeClassifier(tf.keras.Model): def __init__( self,", "node_features, edges, edge_weights = graph_info self._node_features = node_features self._edges =", "layer x1 = self._conv1((x, self._edges, self._edge_weights)) # Skip connection x", "not None: messages = messages * tf.expand_dims(weights, -1) return messages", "node_representations shape is [num_edges, embedding_dim] messages = self._ffn_prepare(node_representations) if weights", "if self._aggregation_type == \"sum\": aggregated_message = tf.math.unsorted_segment_sum( neighbour_messages, node_indices, num_segments=num_nodes", "dropout_rate, aggregation_type, combination_type, normalize, name=\"graph_conv1\" ) # Create the 2nd", "edge_weights # Set edge_weights to ones if not provided if", "edge_weights) # Aggregate the neighbour messages aggregated_messages = self._aggregate(node_indices, neighbour_messages)", "array of shape [num_nodes, num_features] node_features = tf.cast( papers.sort_values(\"paper_id\")[feature_names].to_numpy(), dtype=tf.float32)", "100])) gnn_model.summary() # Train the GNN model X_train = train_data.paper_id.to_numpy()", "Create a node features array of shape [num_nodes, num_features] node_features", "node_representations, edges, edge_weights = inputs # Get node_indices (source) and", "import keras from tensorflow.keras import layers from load_cora import load_cora", "# Set edge_weights to ones if not provided if self._edge_weights", "= tf.concat([node_representations, aggregated_messages], axis=1) elif self._combination_type == \"add\": # Add", "tf.math.unsorted_segment_sum( neighbour_messages, node_indices, num_segments=num_nodes ) elif self._aggregation_type == \"mean\": aggregated_message", "= self._preprocess(self._node_features) # Apply the 1st graph conv layer x1", "units=hidden_units, activation=\"tanh\", recurrent_activation=\"sigmoid\", dropout=dropout_rate, return_state=True, recurrent_dropout=dropout_rate ) else: self._update_fn =", "aggregated_message def _update(self, node_representations, aggregated_messages): # node_representations shape is [num_nodes,", "messages = self._ffn_prepare(node_representations) if weights is not None: messages =", "= self._edge_weights / tf.math.reduce_sum(self._edge_weights) # Create a process layer self._preprocess", "feature_names = load_cora(verbose=1) num_features = len(feature_names) num_classes = len(class_idx) hidden_units", "+ aggregated_messages else: raise ValueError(f\"Invalid combination type: {self._combinatino_type}.\") # Apply", "num_features] node_features = tf.cast( papers.sort_values(\"paper_id\")[feature_names].to_numpy(), dtype=tf.float32) # Create graph info", "= edge_weights # Set edge_weights to ones if not provided", "embedding_dim] messages = self._ffn_prepare(node_representations) if weights is not None: messages", "weights array of ones (default weights) edge_weights = tf.ones(shape=edges.shape[1]) #", "elif self._aggregation_type == \"mean\": aggregated_message = tf.math.unsorted_segment_mean( neighbour_messages, node_indices, num_segments=num_nodes", "edges self._edge_weights = edge_weights # Set edge_weights to ones if", "self._edge_weights)) # Skip connection x = x2 + x #", "x1 + x # Apply the 2nd graph conv layer", "\"add\": # Add node_representations and aggregated_messages h = node_representations +", "on test data X_test = test_data.paper_id.to_numpy() y_test = test_data.subject _,", "class GNNNodeClassifier(tf.keras.Model): def __init__( self, graph_info, num_classes, hidden_units, aggregation_type=\"sum\", combination_type=\"concat\"," ]
[ "def get_entries(self): \"\"\"Get all the Samsung RemoteControlReceiver entries.\"\"\" return self.find_by_st(", "description location.\"\"\" info = super().info_from_entry(entry) # Strip the forced prefix,", "For some models, Samsung forces a [TV] prefix to the", "'[TV]' class Discoverable(SSDPDiscoverable): \"\"\"Add support for discovering Samsung Smart TV", "by default the description location.\"\"\" info = super().info_from_entry(entry) # Strip", "Discoverable(SSDPDiscoverable): \"\"\"Add support for discovering Samsung Smart TV services.\"\"\" def", "forced prefix, if present if info[ATTR_NAME].startswith(FORCED_NAME_PREFIX): info[ATTR_NAME] = info[ATTR_NAME][len(FORCED_NAME_PREFIX):].strip() return", "= '[TV]' class Discoverable(SSDPDiscoverable): \"\"\"Add support for discovering Samsung Smart", "# For some models, Samsung forces a [TV] prefix to", "return self.find_by_st( \"urn:samsung.com:device:RemoteControlReceiver:1\") def info_from_entry(self, entry): \"\"\"Get most important info,", "discovering Samsung Smart TV services.\"\"\" def get_entries(self): \"\"\"Get all the", "# Strip the forced prefix, if present if info[ATTR_NAME].startswith(FORCED_NAME_PREFIX): info[ATTR_NAME]", "Samsung RemoteControlReceiver entries.\"\"\" return self.find_by_st( \"urn:samsung.com:device:RemoteControlReceiver:1\") def info_from_entry(self, entry): \"\"\"Get", "all the Samsung RemoteControlReceiver entries.\"\"\" return self.find_by_st( \"urn:samsung.com:device:RemoteControlReceiver:1\") def info_from_entry(self,", "the user-specified name. FORCED_NAME_PREFIX = '[TV]' class Discoverable(SSDPDiscoverable): \"\"\"Add support", "for discovering Samsung Smart TV services.\"\"\" def get_entries(self): \"\"\"Get all", "the Samsung RemoteControlReceiver entries.\"\"\" return self.find_by_st( \"urn:samsung.com:device:RemoteControlReceiver:1\") def info_from_entry(self, entry):", "..const import ATTR_NAME # For some models, Samsung forces a", "\"\"\"Get all the Samsung RemoteControlReceiver entries.\"\"\" return self.find_by_st( \"urn:samsung.com:device:RemoteControlReceiver:1\") def", "class Discoverable(SSDPDiscoverable): \"\"\"Add support for discovering Samsung Smart TV services.\"\"\"", "[TV] prefix to the user-specified name. FORCED_NAME_PREFIX = '[TV]' class", "name. FORCED_NAME_PREFIX = '[TV]' class Discoverable(SSDPDiscoverable): \"\"\"Add support for discovering", "default the description location.\"\"\" info = super().info_from_entry(entry) # Strip the", "entry): \"\"\"Get most important info, by default the description location.\"\"\"", "= super().info_from_entry(entry) # Strip the forced prefix, if present if", "import SSDPDiscoverable from ..const import ATTR_NAME # For some models,", "user-specified name. FORCED_NAME_PREFIX = '[TV]' class Discoverable(SSDPDiscoverable): \"\"\"Add support for", "\"\"\"Get most important info, by default the description location.\"\"\" info", "\"\"\"Discover Samsung Smart TV services.\"\"\" from . import SSDPDiscoverable from", "support for discovering Samsung Smart TV services.\"\"\" def get_entries(self): \"\"\"Get", "get_entries(self): \"\"\"Get all the Samsung RemoteControlReceiver entries.\"\"\" return self.find_by_st( \"urn:samsung.com:device:RemoteControlReceiver:1\")", "FORCED_NAME_PREFIX = '[TV]' class Discoverable(SSDPDiscoverable): \"\"\"Add support for discovering Samsung", "info = super().info_from_entry(entry) # Strip the forced prefix, if present", "super().info_from_entry(entry) # Strip the forced prefix, if present if info[ATTR_NAME].startswith(FORCED_NAME_PREFIX):", "the forced prefix, if present if info[ATTR_NAME].startswith(FORCED_NAME_PREFIX): info[ATTR_NAME] = info[ATTR_NAME][len(FORCED_NAME_PREFIX):].strip()", "models, Samsung forces a [TV] prefix to the user-specified name.", "from . import SSDPDiscoverable from ..const import ATTR_NAME # For", "Smart TV services.\"\"\" from . import SSDPDiscoverable from ..const import", "from ..const import ATTR_NAME # For some models, Samsung forces", "TV services.\"\"\" def get_entries(self): \"\"\"Get all the Samsung RemoteControlReceiver entries.\"\"\"", "def info_from_entry(self, entry): \"\"\"Get most important info, by default the", "location.\"\"\" info = super().info_from_entry(entry) # Strip the forced prefix, if", "the description location.\"\"\" info = super().info_from_entry(entry) # Strip the forced", "TV services.\"\"\" from . import SSDPDiscoverable from ..const import ATTR_NAME", "most important info, by default the description location.\"\"\" info =", "services.\"\"\" def get_entries(self): \"\"\"Get all the Samsung RemoteControlReceiver entries.\"\"\" return", "self.find_by_st( \"urn:samsung.com:device:RemoteControlReceiver:1\") def info_from_entry(self, entry): \"\"\"Get most important info, by", "a [TV] prefix to the user-specified name. FORCED_NAME_PREFIX = '[TV]'", "\"\"\"Add support for discovering Samsung Smart TV services.\"\"\" def get_entries(self):", "SSDPDiscoverable from ..const import ATTR_NAME # For some models, Samsung", "\"urn:samsung.com:device:RemoteControlReceiver:1\") def info_from_entry(self, entry): \"\"\"Get most important info, by default", ". import SSDPDiscoverable from ..const import ATTR_NAME # For some", "Samsung Smart TV services.\"\"\" def get_entries(self): \"\"\"Get all the Samsung", "important info, by default the description location.\"\"\" info = super().info_from_entry(entry)", "info_from_entry(self, entry): \"\"\"Get most important info, by default the description", "Smart TV services.\"\"\" def get_entries(self): \"\"\"Get all the Samsung RemoteControlReceiver", "entries.\"\"\" return self.find_by_st( \"urn:samsung.com:device:RemoteControlReceiver:1\") def info_from_entry(self, entry): \"\"\"Get most important", "Strip the forced prefix, if present if info[ATTR_NAME].startswith(FORCED_NAME_PREFIX): info[ATTR_NAME] =", "forces a [TV] prefix to the user-specified name. FORCED_NAME_PREFIX =", "ATTR_NAME # For some models, Samsung forces a [TV] prefix", "import ATTR_NAME # For some models, Samsung forces a [TV]", "Samsung Smart TV services.\"\"\" from . import SSDPDiscoverable from ..const", "Samsung forces a [TV] prefix to the user-specified name. FORCED_NAME_PREFIX", "prefix to the user-specified name. FORCED_NAME_PREFIX = '[TV]' class Discoverable(SSDPDiscoverable):", "to the user-specified name. FORCED_NAME_PREFIX = '[TV]' class Discoverable(SSDPDiscoverable): \"\"\"Add", "info, by default the description location.\"\"\" info = super().info_from_entry(entry) #", "some models, Samsung forces a [TV] prefix to the user-specified", "RemoteControlReceiver entries.\"\"\" return self.find_by_st( \"urn:samsung.com:device:RemoteControlReceiver:1\") def info_from_entry(self, entry): \"\"\"Get most", "services.\"\"\" from . import SSDPDiscoverable from ..const import ATTR_NAME #", "prefix, if present if info[ATTR_NAME].startswith(FORCED_NAME_PREFIX): info[ATTR_NAME] = info[ATTR_NAME][len(FORCED_NAME_PREFIX):].strip() return info" ]
[]
[ "<reponame>Juhanostby/django-apotek-sapmi # Generated by Django 3.2.5 on 2021-12-21 19:42 from", "Django 3.2.5 on 2021-12-21 19:42 from django.db import migrations class", "3.2.5 on 2021-12-21 19:42 from django.db import migrations class Migration(migrations.Migration):", "from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('employees',", "import migrations class Migration(migrations.Migration): dependencies = [ ('employees', '0001_initial'), ]", "Migration(migrations.Migration): dependencies = [ ('employees', '0001_initial'), ] operations = [", "= [ ('employees', '0001_initial'), ] operations = [ migrations.RemoveField( model_name='employee',", "2021-12-21 19:42 from django.db import migrations class Migration(migrations.Migration): dependencies =", "class Migration(migrations.Migration): dependencies = [ ('employees', '0001_initial'), ] operations =", "on 2021-12-21 19:42 from django.db import migrations class Migration(migrations.Migration): dependencies", "19:42 from django.db import migrations class Migration(migrations.Migration): dependencies = [", "[ ('employees', '0001_initial'), ] operations = [ migrations.RemoveField( model_name='employee', name='phone_alt',", "('employees', '0001_initial'), ] operations = [ migrations.RemoveField( model_name='employee', name='phone_alt', ),", "'0001_initial'), ] operations = [ migrations.RemoveField( model_name='employee', name='phone_alt', ), ]", "django.db import migrations class Migration(migrations.Migration): dependencies = [ ('employees', '0001_initial'),", "migrations class Migration(migrations.Migration): dependencies = [ ('employees', '0001_initial'), ] operations", "# Generated by Django 3.2.5 on 2021-12-21 19:42 from django.db", "Generated by Django 3.2.5 on 2021-12-21 19:42 from django.db import", "dependencies = [ ('employees', '0001_initial'), ] operations = [ migrations.RemoveField(", "by Django 3.2.5 on 2021-12-21 19:42 from django.db import migrations" ]
[ "there was a problem while fetching emotes from a source.'''", "class EmoteFetchError(Exception): '''Exception stating that there was a problem while", "'''Exception stating that there was a problem while fetching emotes", "stating that there was a problem while fetching emotes from", "EmoteFetchError(Exception): '''Exception stating that there was a problem while fetching", "that there was a problem while fetching emotes from a" ]
[ "print(f\"\\t\\tShow Loops: {m.showLoops(time_loops)}\") print() print (f\"\\t\\tTOTAL messages not transmitted:\" f\"", "time import json import random import logging.config import networkx as", "ROUTING algorithm to define how path messages in the topology", "\"\"\" logging.info(\" Performing simulation: %i \" % it) s.run(stop_time) #", "nx.generators.binomial_tree(size) # In NX-lib there are a lot of Graphs", "files. Check out examples folder size = 3 t.G =", "some users and a set of apps with only one", "yafs.distribution import deterministic_distribution from yafs.stats import Stats RANDOM_SEED = 1", "print(t.G.nodes()) # nodes id can be str or int print()", "pos = nx.spring_layout(t.G) nx.draw(t.G, pos, with_labels=True, edge_color='black', width=1, alpha=0.7) print(t.G.nodes())", "it=iteration) print(\"\\n--- %s seconds ---\" % (time.time() - start_time)) print(\"Simulation", "of mandatory attributes of a Topology ## Attr. on edges", "SERVICE PLACEMENT \"\"\" placementJson = json.load(open('data/allocDefinition.json')) placement = JSONPlacement(name=\"Placement\", json=placementJson)", "import networkx as nx import numpy as np from pathlib", "can have a different routing algorithm \"\"\" Deploy users \"\"\"", "it: - %i\" % iteration) start_time = time.time() main(stop_time=simulationDuration, it=iteration)", "JSONPlacement(name=\"Placement\", json=placementJson) \"\"\" Defining ROUTING algorithm to define how path", "in t.G.edges()} nx.set_edge_attributes(t.G, name=\"PR\", values=attPR_BW) nx.set_edge_attributes(t.G, name=\"BW\", values=attPR_BW) ## Attr.", "\"\"\" userJSON = json.load(open('data/usersDefinition.json')) for user in userJSON[\"sources\"]: app_name =", "\" f\"{m.average_messages_not_transmitted()}\") print() print (\"\\t\\tPeak of waiting messages :\" f\"{m.peak_messages_not_transmitted()}\")", "are a lot of Graphs generators # Definition of mandatory", "it): folder_results = Path(\"results/\") folder_results.mkdir(parents=True, exist_ok=True) folder_results = str(folder_results)+\"/\" \"\"\"", "of Graphs generators # Definition of mandatory attributes of a", "JSONPlacement from yafs.path_routing import DeviceSpeedAwareRouting from yafs.distribution import deterministic_distribution from", "\"\"\" selectorPath = DeviceSpeedAwareRouting() \"\"\" SIMULATION ENGINE \"\"\" s =", "# IPT attIPT = {x: random.randrange(100, 900, 100) for x", "This is the most simple scenario with a basic topology,", "Attr. on edges # PR (link propagation) and BW (bandwith)", "from Stats class time_loops = [[\"M.USER.APP.0\", \"M.USER.APP.1\", \"M.USER.APP.2\", \"M.USER.APP.3\"]] m.showResults2(10000,", "- last step \"\"\" logging.info(\" Performing simulation: %i \" %", "it) s.run(stop_time) # To test deployments put test_initial_deploy a TRUE", "messages : \" f\"{m.average_messages_not_transmitted()}\") print() print (\"\\t\\tPeak of waiting messages", "a different routing algorithm \"\"\" Deploy users \"\"\" userJSON =", "= user[\"id_resource\"] dist = deterministic_distribution(100, name=\"Deterministic\") idDES = s.deploy_source(app_name, id_node=node,", "name=\"PR\", values=attPR_BW) nx.set_edge_attributes(t.G, name=\"BW\", values=attPR_BW) ## Attr. on nodes #", "print (\"\\tNetwork bytes transmitted:\") # print (f\"\\t\\t{m.bytes_transmitted():.1f}\") # m.df_link.head(15) #", "= deterministic_distribution(100, name=\"Deterministic\") idDES = s.deploy_source(app_name, id_node=node, msg=msg, distribution=dist) \"\"\"", "Topology ## Attr. on edges # PR (link propagation) and", "\" % it) s.run(stop_time) # To test deployments put test_initial_deploy", "experiment simulationDuration = 1000 # Iteration for each experiment changing", "transmitted:\") # print (f\"\\t\\t{m.bytes_transmitted():.1f}\") # m.df_link.head(15) # from Stats class", "import Sim from yafs.application import create_applications_from_json from yafs.topology import Topology", "(\"\\tNetwork bytes transmitted:\") # print (f\"\\t\\t{m.bytes_transmitted():.1f}\") # m.df_link.head(15) # from", "1 for x in t.G.edges()} nx.set_edge_attributes(t.G, name=\"PR\", values=attPR_BW) nx.set_edge_attributes(t.G, name=\"BW\",", "for aName in apps.keys(): s.deploy_app(apps[aName], placement, selectorPath) # Note: each", "import DeviceSpeedAwareRouting from yafs.distribution import deterministic_distribution from yafs.stats import Stats", "from yafs.distribution import deterministic_distribution from yafs.stats import Stats RANDOM_SEED =", "{x: random.randrange(100, 900, 100) for x in t.G.nodes()} nx.set_node_attributes(t.G, name=\"IPT\",", "Note: each app can have a different routing algorithm \"\"\"", "-\") print() print (\"\\t\\tAverage waiting messages : \" f\"{m.average_messages_not_transmitted()}\") print()", "\"\"\" placementJson = json.load(open('data/allocDefinition.json')) placement = JSONPlacement(name=\"Placement\", json=placementJson) \"\"\" Defining", "= nx.generators.binomial_tree(size) # In NX-lib there are a lot of", "in apps.keys(): s.deploy_app(apps[aName], placement, selectorPath) # Note: each app can", "t.G = nx.generators.binomial_tree(size) # In NX-lib there are a lot", "from yafs.placement import JSONPlacement from yafs.path_routing import DeviceSpeedAwareRouting from yafs.distribution", "idDES = s.deploy_source(app_name, id_node=node, msg=msg, distribution=dist) \"\"\" RUNNING - last", "import Topology from yafs.placement import JSONPlacement from yafs.path_routing import DeviceSpeedAwareRouting", "to define how path messages in the topology among modules", "# you can export the Graph in multiples format to", "msg = app.get_message(user[\"message\"]) node = user[\"id_resource\"] dist = deterministic_distribution(100, name=\"Deterministic\")", "- %i\" % iteration) start_time = time.time() main(stop_time=simulationDuration, it=iteration) print(\"\\n---", "= json.load(open('data/allocDefinition.json')) placement = JSONPlacement(name=\"Placement\", json=placementJson) \"\"\" Defining ROUTING algorithm", "= Path(\"results/\") folder_results.mkdir(parents=True, exist_ok=True) folder_results = str(folder_results)+\"/\" \"\"\" TOPOLOGY \"\"\"", "algorithm to define how path messages in the topology among", "np.random.seed(RANDOM_SEED) t = Topology() # You also can create a", "last step \"\"\" logging.info(\" Performing simulation: %i \" % it)", "logging.config.fileConfig(os.getcwd() + '/logging.ini') nIterations = 1 # iteration for each", "Fix position of nodes for drawing random.seed(RANDOM_SEED) np.random.seed(RANDOM_SEED) t =", "os import time import json import random import logging.config import", "= Topology() # You also can create a topology using", "different routing algorithm \"\"\" Deploy users \"\"\" userJSON = json.load(open('data/usersDefinition.json'))", "% iteration) start_time = time.time() main(stop_time=simulationDuration, it=iteration) print(\"\\n--- %s seconds", "apps with only one service. @author: <NAME> \"\"\" import os", "Graph visualization pos = nx.spring_layout(t.G) nx.draw(t.G, pos, with_labels=True, edge_color='black', width=1,", "## Attr. on nodes # IPT attIPT = {x: random.randrange(100,", "start_time = time.time() main(stop_time=simulationDuration, it=iteration) print(\"\\n--- %s seconds ---\" %", "topology among modules \"\"\" selectorPath = DeviceSpeedAwareRouting() \"\"\" SIMULATION ENGINE", "m.showResults2(10000, time_loops=time_loops) m.compute_times_df() print (\"\\t- Network saturation -\") print() print", "yafs.application import create_applications_from_json from yafs.topology import Topology from yafs.placement import", "__name__ == '__main__': logging.config.fileConfig(os.getcwd() + '/logging.ini') nIterations = 1 #", "time_loops=time_loops) m.compute_times_df() print (\"\\t- Network saturation -\") print() print (\"\\t\\tAverage", "id_node=node, msg=msg, distribution=dist) \"\"\" RUNNING - last step \"\"\" logging.info(\"", "Sim(t, default_results_path=folder_results+\"sim_trace\") \"\"\" Deploy services == APP's modules \"\"\" for", "s.print_debug_assignaments() if __name__ == '__main__': logging.config.fileConfig(os.getcwd() + '/logging.ini') nIterations =", "s = Sim(t, default_results_path=folder_results+\"sim_trace\") \"\"\" Deploy services == APP's modules", "# In NX-lib there are a lot of Graphs generators", "attributes of a Topology ## Attr. on edges # PR", "BW (bandwith) are 1 unit attPR_BW = {x: 1 for", "put test_initial_deploy a TRUE s.print_debug_assignaments() if __name__ == '__main__': logging.config.fileConfig(os.getcwd()", "deterministic_distribution(100, name=\"Deterministic\") idDES = s.deploy_source(app_name, id_node=node, msg=msg, distribution=dist) \"\"\" RUNNING", "'/logging.ini') nIterations = 1 # iteration for each experiment simulationDuration", "%i\" % iteration) start_time = time.time() main(stop_time=simulationDuration, it=iteration) print(\"\\n--- %s", "also can create a topology using JSONs files. Check out", "<NAME> \"\"\" import os import time import json import random", "nodes # IPT attIPT = {x: random.randrange(100, 900, 100) for", "print() \"\"\" APPLICATION or SERVICES \"\"\" dataApp = json.load(open('data/appDefinition.json')) apps", "and a set of apps with only one service. @author:", "format to view in tools like Gephi, and so on.", "print (\"\\t\\tAverage waiting messages : \" f\"{m.average_messages_not_transmitted()}\") print() print (\"\\t\\tPeak", "= 1 # iteration for each experiment simulationDuration = 1000", "= nx.spring_layout(t.G) nx.draw(t.G, pos, with_labels=True, edge_color='black', width=1, alpha=0.7) print(t.G.nodes()) #", "and so on. nx.write_graphml(t.G,folder_results+\"graph_binomial_tree_%i.graphml\"%size) # Graph visualization pos = nx.spring_layout(t.G)", "import os import time import json import random import logging.config", "= json.load(open('data/usersDefinition.json')) for user in userJSON[\"sources\"]: app_name = user[\"app\"] app", "\"\"\" SERVICE PLACEMENT \"\"\" placementJson = json.load(open('data/allocDefinition.json')) placement = JSONPlacement(name=\"Placement\",", "Path(\"results/\") folder_results.mkdir(parents=True, exist_ok=True) folder_results = str(folder_results)+\"/\" \"\"\" TOPOLOGY \"\"\" #", "s.deploy_app(apps[aName], placement, selectorPath) # Note: each app can have a", "on. nx.write_graphml(t.G,folder_results+\"graph_binomial_tree_%i.graphml\"%size) # Graph visualization pos = nx.spring_layout(t.G) nx.draw(t.G, pos,", "{m.showLoops(time_loops)}\") print() print (f\"\\t\\tTOTAL messages not transmitted:\" f\" {m.messages_not_transmitted()}\") print()", "yafs.path_routing import DeviceSpeedAwareRouting from yafs.distribution import deterministic_distribution from yafs.stats import", "pathlib import Path from yafs.core import Sim from yafs.application import", "distribution=dist) \"\"\" RUNNING - last step \"\"\" logging.info(\" Performing simulation:", "how path messages in the topology among modules \"\"\" selectorPath", "modules \"\"\" selectorPath = DeviceSpeedAwareRouting() \"\"\" SIMULATION ENGINE \"\"\" s", "t = Topology() # You also can create a topology", "Network saturation -\") print() print (\"\\t\\tAverage waiting messages : \"", "m.compute_times_df() print (\"\\t- Network saturation -\") print() print (\"\\t\\tAverage waiting", "using JSONs files. Check out examples folder size = 3", "= DeviceSpeedAwareRouting() \"\"\" SIMULATION ENGINE \"\"\" s = Sim(t, default_results_path=folder_results+\"sim_trace\")", "a set of apps with only one service. @author: <NAME>", "# Fix position of nodes for drawing random.seed(RANDOM_SEED) np.random.seed(RANDOM_SEED) t", "exist_ok=True) folder_results = str(folder_results)+\"/\" \"\"\" TOPOLOGY \"\"\" # Fix position", "only one service. @author: <NAME> \"\"\" import os import time", "are 1 unit attPR_BW = {x: 1 for x in", "width=1, alpha=0.7) print(t.G.nodes()) # nodes id can be str or", "= json.load(open('data/appDefinition.json')) apps = create_applications_from_json(dataApp) # print(apps) \"\"\" SERVICE PLACEMENT", "routing algorithm \"\"\" Deploy users \"\"\" userJSON = json.load(open('data/usersDefinition.json')) for", "import logging.config import networkx as nx import numpy as np", "{x: 1 for x in t.G.edges()} nx.set_edge_attributes(t.G, name=\"PR\", values=attPR_BW) nx.set_edge_attributes(t.G,", "\"\"\" SIMULATION ENGINE \"\"\" s = Sim(t, default_results_path=folder_results+\"sim_trace\") \"\"\" Deploy", "import time import json import random import logging.config import networkx", "most simple scenario with a basic topology, some users and", "Deploy services == APP's modules \"\"\" for aName in apps.keys():", "f\"{m.peak_messages_not_transmitted()}\") print() print(f\"\\t\\tShow Loops: {m.showLoops(time_loops)}\") print() print (f\"\\t\\tTOTAL messages not", "userJSON = json.load(open('data/usersDefinition.json')) for user in userJSON[\"sources\"]: app_name = user[\"app\"]", "Sim from yafs.application import create_applications_from_json from yafs.topology import Topology from", "# from Stats class time_loops = [[\"M.USER.APP.0\", \"M.USER.APP.1\", \"M.USER.APP.2\", \"M.USER.APP.3\"]]", "import numpy as np from pathlib import Path from yafs.core", "= s.deploy_source(app_name, id_node=node, msg=msg, distribution=dist) \"\"\" RUNNING - last step", "simple scenario with a basic topology, some users and a", "= {x: 1 for x in t.G.edges()} nx.set_edge_attributes(t.G, name=\"PR\", values=attPR_BW)", "t.G.nodes()} nx.set_node_attributes(t.G, name=\"IPT\", values=attIPT) # nx.write_gexf(t.G,folder_results+\"graph_binomial_tree_%i\"%size) # you can export", "Topology from yafs.placement import JSONPlacement from yafs.path_routing import DeviceSpeedAwareRouting from", "APP's modules \"\"\" for aName in apps.keys(): s.deploy_app(apps[aName], placement, selectorPath)", "from pathlib import Path from yafs.core import Sim from yafs.application", "tools like Gephi, and so on. nx.write_graphml(t.G,folder_results+\"graph_binomial_tree_%i.graphml\"%size) # Graph visualization", "with a basic topology, some users and a set of", "t.G.edges()} nx.set_edge_attributes(t.G, name=\"PR\", values=attPR_BW) nx.set_edge_attributes(t.G, name=\"BW\", values=attPR_BW) ## Attr. on", "= {x: random.randrange(100, 900, 100) for x in t.G.nodes()} nx.set_node_attributes(t.G,", "nx.set_node_attributes(t.G, name=\"IPT\", values=attIPT) # nx.write_gexf(t.G,folder_results+\"graph_binomial_tree_%i\"%size) # you can export the", "test deployments put test_initial_deploy a TRUE s.print_debug_assignaments() if __name__ ==", "\"M.USER.APP.1\", \"M.USER.APP.2\", \"M.USER.APP.3\"]] m.showResults2(10000, time_loops=time_loops) m.compute_times_df() print (\"\\t- Network saturation", "for each experiment changing the seed of randoms for iteration", "values=attIPT) # nx.write_gexf(t.G,folder_results+\"graph_binomial_tree_%i\"%size) # you can export the Graph in", ": \" f\"{m.average_messages_not_transmitted()}\") print() print (\"\\t\\tPeak of waiting messages :\"", "\"\"\" for aName in apps.keys(): s.deploy_app(apps[aName], placement, selectorPath) # Note:", "yafs.placement import JSONPlacement from yafs.path_routing import DeviceSpeedAwareRouting from yafs.distribution import", "nx.write_graphml(t.G,folder_results+\"graph_binomial_tree_%i.graphml\"%size) # Graph visualization pos = nx.spring_layout(t.G) nx.draw(t.G, pos, with_labels=True,", "import JSONPlacement from yafs.path_routing import DeviceSpeedAwareRouting from yafs.distribution import deterministic_distribution", "edge_color='black', width=1, alpha=0.7) print(t.G.nodes()) # nodes id can be str", "nx.draw(t.G, pos, with_labels=True, edge_color='black', width=1, alpha=0.7) print(t.G.nodes()) # nodes id", "among modules \"\"\" selectorPath = DeviceSpeedAwareRouting() \"\"\" SIMULATION ENGINE \"\"\"", "\"\"\" # Fix position of nodes for drawing random.seed(RANDOM_SEED) np.random.seed(RANDOM_SEED)", "for x in t.G.edges()} nx.set_edge_attributes(t.G, name=\"PR\", values=attPR_BW) nx.set_edge_attributes(t.G, name=\"BW\", values=attPR_BW)", "scenario with a basic topology, some users and a set", "m = Stats(defaultPath=\"results/sim_trace\") # print (\"\\tNetwork bytes transmitted:\") # print", "# nodes id can be str or int print() print(nx.get_node_attributes(t.G,", "can export the Graph in multiples format to view in", "DeviceSpeedAwareRouting from yafs.distribution import deterministic_distribution from yafs.stats import Stats RANDOM_SEED", "\"\"\" Deploy services == APP's modules \"\"\" for aName in", "the topology among modules \"\"\" selectorPath = DeviceSpeedAwareRouting() \"\"\" SIMULATION", "visualization pos = nx.spring_layout(t.G) nx.draw(t.G, pos, with_labels=True, edge_color='black', width=1, alpha=0.7)", "[[\"M.USER.APP.0\", \"M.USER.APP.1\", \"M.USER.APP.2\", \"M.USER.APP.3\"]] m.showResults2(10000, time_loops=time_loops) m.compute_times_df() print (\"\\t- Network", "(f\"\\t\\t{m.bytes_transmitted():.1f}\") # m.df_link.head(15) # from Stats class time_loops = [[\"M.USER.APP.0\",", "Stats class time_loops = [[\"M.USER.APP.0\", \"M.USER.APP.1\", \"M.USER.APP.2\", \"M.USER.APP.3\"]] m.showResults2(10000, time_loops=time_loops)", "in multiples format to view in tools like Gephi, and", "nx import numpy as np from pathlib import Path from", "generators # Definition of mandatory attributes of a Topology ##", "1000 # Iteration for each experiment changing the seed of", "== '__main__': logging.config.fileConfig(os.getcwd() + '/logging.ini') nIterations = 1 # iteration", "messages :\" f\"{m.peak_messages_not_transmitted()}\") print() print(f\"\\t\\tShow Loops: {m.showLoops(time_loops)}\") print() print (f\"\\t\\tTOTAL", "(bandwith) are 1 unit attPR_BW = {x: 1 for x", "there are a lot of Graphs generators # Definition of", "# iteration for each experiment simulationDuration = 1000 # Iteration", "import Stats RANDOM_SEED = 1 def main(stop_time, it): folder_results =", "NX-lib there are a lot of Graphs generators # Definition", "deployments put test_initial_deploy a TRUE s.print_debug_assignaments() if __name__ == '__main__':", "int print() print(nx.get_node_attributes(t.G, \"IPT\")) print() \"\"\" APPLICATION or SERVICES \"\"\"", "Done!\") m = Stats(defaultPath=\"results/sim_trace\") # print (\"\\tNetwork bytes transmitted:\") #", "for x in t.G.nodes()} nx.set_node_attributes(t.G, name=\"IPT\", values=attIPT) # nx.write_gexf(t.G,folder_results+\"graph_binomial_tree_%i\"%size) #", "dist = deterministic_distribution(100, name=\"Deterministic\") idDES = s.deploy_source(app_name, id_node=node, msg=msg, distribution=dist)", "print(\"Simulation Done!\") m = Stats(defaultPath=\"results/sim_trace\") # print (\"\\tNetwork bytes transmitted:\")", "time_loops = [[\"M.USER.APP.0\", \"M.USER.APP.1\", \"M.USER.APP.2\", \"M.USER.APP.3\"]] m.showResults2(10000, time_loops=time_loops) m.compute_times_df() print", "dataApp = json.load(open('data/appDefinition.json')) apps = create_applications_from_json(dataApp) # print(apps) \"\"\" SERVICE", "the Graph in multiples format to view in tools like", "print(apps) \"\"\" SERVICE PLACEMENT \"\"\" placementJson = json.load(open('data/allocDefinition.json')) placement =", "100) for x in t.G.nodes()} nx.set_node_attributes(t.G, name=\"IPT\", values=attIPT) # nx.write_gexf(t.G,folder_results+\"graph_binomial_tree_%i\"%size)", "placement = JSONPlacement(name=\"Placement\", json=placementJson) \"\"\" Defining ROUTING algorithm to define", "print() print (\"\\t\\tPeak of waiting messages :\" f\"{m.peak_messages_not_transmitted()}\") print() print(f\"\\t\\tShow", "To test deployments put test_initial_deploy a TRUE s.print_debug_assignaments() if __name__", "a TRUE s.print_debug_assignaments() if __name__ == '__main__': logging.config.fileConfig(os.getcwd() + '/logging.ini')", "== APP's modules \"\"\" for aName in apps.keys(): s.deploy_app(apps[aName], placement,", "In NX-lib there are a lot of Graphs generators #", "saturation -\") print() print (\"\\t\\tAverage waiting messages : \" f\"{m.average_messages_not_transmitted()}\")", "from yafs.path_routing import DeviceSpeedAwareRouting from yafs.distribution import deterministic_distribution from yafs.stats", "Defining ROUTING algorithm to define how path messages in the", "x in t.G.edges()} nx.set_edge_attributes(t.G, name=\"PR\", values=attPR_BW) nx.set_edge_attributes(t.G, name=\"BW\", values=attPR_BW) ##", "create a topology using JSONs files. Check out examples folder", "str(folder_results)+\"/\" \"\"\" TOPOLOGY \"\"\" # Fix position of nodes for", "Attr. on nodes # IPT attIPT = {x: random.randrange(100, 900,", "alpha=0.7) print(t.G.nodes()) # nodes id can be str or int", "user[\"app\"] app = s.apps[app_name] msg = app.get_message(user[\"message\"]) node = user[\"id_resource\"]", "= s.apps[app_name] msg = app.get_message(user[\"message\"]) node = user[\"id_resource\"] dist =", "app = s.apps[app_name] msg = app.get_message(user[\"message\"]) node = user[\"id_resource\"] dist", "\"\"\" TOPOLOGY \"\"\" # Fix position of nodes for drawing", "print (f\"\\t\\tTOTAL messages not transmitted:\" f\" {m.messages_not_transmitted()}\") print() #print(m.df.head()) #print(m.df['time_latency'])", "users and a set of apps with only one service.", "each experiment changing the seed of randoms for iteration in", "in the topology among modules \"\"\" selectorPath = DeviceSpeedAwareRouting() \"\"\"", "ENGINE \"\"\" s = Sim(t, default_results_path=folder_results+\"sim_trace\") \"\"\" Deploy services ==", "app_name = user[\"app\"] app = s.apps[app_name] msg = app.get_message(user[\"message\"]) node", "main(stop_time=simulationDuration, it=iteration) print(\"\\n--- %s seconds ---\" % (time.time() - start_time))", "the most simple scenario with a basic topology, some users", "of nodes for drawing random.seed(RANDOM_SEED) np.random.seed(RANDOM_SEED) t = Topology() #", "s.deploy_source(app_name, id_node=node, msg=msg, distribution=dist) \"\"\" RUNNING - last step \"\"\"", "apps = create_applications_from_json(dataApp) # print(apps) \"\"\" SERVICE PLACEMENT \"\"\" placementJson", "@author: <NAME> \"\"\" import os import time import json import", "so on. nx.write_graphml(t.G,folder_results+\"graph_binomial_tree_%i.graphml\"%size) # Graph visualization pos = nx.spring_layout(t.G) nx.draw(t.G,", "folder_results = str(folder_results)+\"/\" \"\"\" TOPOLOGY \"\"\" # Fix position of", "(\"\\t\\tPeak of waiting messages :\" f\"{m.peak_messages_not_transmitted()}\") print() print(f\"\\t\\tShow Loops: {m.showLoops(time_loops)}\")", "\"\"\" s = Sim(t, default_results_path=folder_results+\"sim_trace\") \"\"\" Deploy services == APP's", "---\" % (time.time() - start_time)) print(\"Simulation Done!\") m = Stats(defaultPath=\"results/sim_trace\")", "logging.config import networkx as nx import numpy as np from", "attIPT = {x: random.randrange(100, 900, 100) for x in t.G.nodes()}", "iteration) start_time = time.time() main(stop_time=simulationDuration, it=iteration) print(\"\\n--- %s seconds ---\"", "\"\"\" import os import time import json import random import", "out examples folder size = 3 t.G = nx.generators.binomial_tree(size) #", "have a different routing algorithm \"\"\" Deploy users \"\"\" userJSON", "seed of randoms for iteration in range(nIterations): random.seed(iteration) logging.info(\"Running experiment", "nx.spring_layout(t.G) nx.draw(t.G, pos, with_labels=True, edge_color='black', width=1, alpha=0.7) print(t.G.nodes()) # nodes", "Performing simulation: %i \" % it) s.run(stop_time) # To test", "3 t.G = nx.generators.binomial_tree(size) # In NX-lib there are a", "# Definition of mandatory attributes of a Topology ## Attr.", "name=\"BW\", values=attPR_BW) ## Attr. on nodes # IPT attIPT =", "algorithm \"\"\" Deploy users \"\"\" userJSON = json.load(open('data/usersDefinition.json')) for user", "pos, with_labels=True, edge_color='black', width=1, alpha=0.7) print(t.G.nodes()) # nodes id can", "# print (\"\\tNetwork bytes transmitted:\") # print (f\"\\t\\t{m.bytes_transmitted():.1f}\") # m.df_link.head(15)", "simulation: %i \" % it) s.run(stop_time) # To test deployments", "# Iteration for each experiment changing the seed of randoms", "name=\"IPT\", values=attIPT) # nx.write_gexf(t.G,folder_results+\"graph_binomial_tree_%i\"%size) # you can export the Graph", "define how path messages in the topology among modules \"\"\"", "yafs.topology import Topology from yafs.placement import JSONPlacement from yafs.path_routing import", "changing the seed of randoms for iteration in range(nIterations): random.seed(iteration)", "like Gephi, and so on. nx.write_graphml(t.G,folder_results+\"graph_binomial_tree_%i.graphml\"%size) # Graph visualization pos", "placementJson = json.load(open('data/allocDefinition.json')) placement = JSONPlacement(name=\"Placement\", json=placementJson) \"\"\" Defining ROUTING", "(f\"\\t\\tTOTAL messages not transmitted:\" f\" {m.messages_not_transmitted()}\") print() #print(m.df.head()) #print(m.df['time_latency']) #print(m.df_link.head())", "IPT attIPT = {x: random.randrange(100, 900, 100) for x in", "values=attPR_BW) ## Attr. on nodes # IPT attIPT = {x:", "topology, some users and a set of apps with only", "path messages in the topology among modules \"\"\" selectorPath =", "np from pathlib import Path from yafs.core import Sim from", "%s seconds ---\" % (time.time() - start_time)) print(\"Simulation Done!\") m", "# Graph visualization pos = nx.spring_layout(t.G) nx.draw(t.G, pos, with_labels=True, edge_color='black',", "json=placementJson) \"\"\" Defining ROUTING algorithm to define how path messages", "in userJSON[\"sources\"]: app_name = user[\"app\"] app = s.apps[app_name] msg =", "Topology() # You also can create a topology using JSONs", "as np from pathlib import Path from yafs.core import Sim", "'__main__': logging.config.fileConfig(os.getcwd() + '/logging.ini') nIterations = 1 # iteration for", "mandatory attributes of a Topology ## Attr. on edges #", "print() print(nx.get_node_attributes(t.G, \"IPT\")) print() \"\"\" APPLICATION or SERVICES \"\"\" dataApp", "Loops: {m.showLoops(time_loops)}\") print() print (f\"\\t\\tTOTAL messages not transmitted:\" f\" {m.messages_not_transmitted()}\")", "Gephi, and so on. nx.write_graphml(t.G,folder_results+\"graph_binomial_tree_%i.graphml\"%size) # Graph visualization pos =", "set of apps with only one service. @author: <NAME> \"\"\"", "1 # iteration for each experiment simulationDuration = 1000 #", "start_time)) print(\"Simulation Done!\") m = Stats(defaultPath=\"results/sim_trace\") # print (\"\\tNetwork bytes", "\"M.USER.APP.3\"]] m.showResults2(10000, time_loops=time_loops) m.compute_times_df() print (\"\\t- Network saturation -\") print()", "service. @author: <NAME> \"\"\" import os import time import json", "f\"{m.average_messages_not_transmitted()}\") print() print (\"\\t\\tPeak of waiting messages :\" f\"{m.peak_messages_not_transmitted()}\") print()", "import create_applications_from_json from yafs.topology import Topology from yafs.placement import JSONPlacement", "for drawing random.seed(RANDOM_SEED) np.random.seed(RANDOM_SEED) t = Topology() # You also", "a topology using JSONs files. Check out examples folder size", "examples folder size = 3 t.G = nx.generators.binomial_tree(size) # In", "messages not transmitted:\" f\" {m.messages_not_transmitted()}\") print() #print(m.df.head()) #print(m.df['time_latency']) #print(m.df_link.head()) print(m.get_df_modules())", "# nx.write_gexf(t.G,folder_results+\"graph_binomial_tree_%i\"%size) # you can export the Graph in multiples", "print (f\"\\t\\t{m.bytes_transmitted():.1f}\") # m.df_link.head(15) # from Stats class time_loops =", "iteration in range(nIterations): random.seed(iteration) logging.info(\"Running experiment it: - %i\" %", "nx.set_edge_attributes(t.G, name=\"BW\", values=attPR_BW) ## Attr. on nodes # IPT attIPT", "PR (link propagation) and BW (bandwith) are 1 unit attPR_BW", "from yafs.core import Sim from yafs.application import create_applications_from_json from yafs.topology", "print() print (\"\\t\\tAverage waiting messages : \" f\"{m.average_messages_not_transmitted()}\") print() print", "logging.info(\" Performing simulation: %i \" % it) s.run(stop_time) # To", "the seed of randoms for iteration in range(nIterations): random.seed(iteration) logging.info(\"Running", "json import random import logging.config import networkx as nx import", "= str(folder_results)+\"/\" \"\"\" TOPOLOGY \"\"\" # Fix position of nodes", "nx.set_edge_attributes(t.G, name=\"PR\", values=attPR_BW) nx.set_edge_attributes(t.G, name=\"BW\", values=attPR_BW) ## Attr. on nodes", "aName in apps.keys(): s.deploy_app(apps[aName], placement, selectorPath) # Note: each app", "= app.get_message(user[\"message\"]) node = user[\"id_resource\"] dist = deterministic_distribution(100, name=\"Deterministic\") idDES", "a lot of Graphs generators # Definition of mandatory attributes", "json.load(open('data/allocDefinition.json')) placement = JSONPlacement(name=\"Placement\", json=placementJson) \"\"\" Defining ROUTING algorithm to", "# print (f\"\\t\\t{m.bytes_transmitted():.1f}\") # m.df_link.head(15) # from Stats class time_loops", "TOPOLOGY \"\"\" # Fix position of nodes for drawing random.seed(RANDOM_SEED)", "users \"\"\" userJSON = json.load(open('data/usersDefinition.json')) for user in userJSON[\"sources\"]: app_name", "of waiting messages :\" f\"{m.peak_messages_not_transmitted()}\") print() print(f\"\\t\\tShow Loops: {m.showLoops(time_loops)}\") print()", "name=\"Deterministic\") idDES = s.deploy_source(app_name, id_node=node, msg=msg, distribution=dist) \"\"\" RUNNING -", "# To test deployments put test_initial_deploy a TRUE s.print_debug_assignaments() if", "+ '/logging.ini') nIterations = 1 # iteration for each experiment", "# Note: each app can have a different routing algorithm", "randoms for iteration in range(nIterations): random.seed(iteration) logging.info(\"Running experiment it: -", "random.randrange(100, 900, 100) for x in t.G.nodes()} nx.set_node_attributes(t.G, name=\"IPT\", values=attIPT)", "\"\"\" Defining ROUTING algorithm to define how path messages in", "from yafs.stats import Stats RANDOM_SEED = 1 def main(stop_time, it):", "def main(stop_time, it): folder_results = Path(\"results/\") folder_results.mkdir(parents=True, exist_ok=True) folder_results =", "size = 3 t.G = nx.generators.binomial_tree(size) # In NX-lib there", "% it) s.run(stop_time) # To test deployments put test_initial_deploy a", "Iteration for each experiment changing the seed of randoms for", "default_results_path=folder_results+\"sim_trace\") \"\"\" Deploy services == APP's modules \"\"\" for aName", "(\"\\t- Network saturation -\") print() print (\"\\t\\tAverage waiting messages :", "on edges # PR (link propagation) and BW (bandwith) are", "TRUE s.print_debug_assignaments() if __name__ == '__main__': logging.config.fileConfig(os.getcwd() + '/logging.ini') nIterations", "Definition of mandatory attributes of a Topology ## Attr. on", "create_applications_from_json(dataApp) # print(apps) \"\"\" SERVICE PLACEMENT \"\"\" placementJson = json.load(open('data/allocDefinition.json'))", "selectorPath = DeviceSpeedAwareRouting() \"\"\" SIMULATION ENGINE \"\"\" s = Sim(t,", "edges # PR (link propagation) and BW (bandwith) are 1", "user in userJSON[\"sources\"]: app_name = user[\"app\"] app = s.apps[app_name] msg", "attPR_BW = {x: 1 for x in t.G.edges()} nx.set_edge_attributes(t.G, name=\"PR\",", "RUNNING - last step \"\"\" logging.info(\" Performing simulation: %i \"", "str or int print() print(nx.get_node_attributes(t.G, \"IPT\")) print() \"\"\" APPLICATION or", "experiment it: - %i\" % iteration) start_time = time.time() main(stop_time=simulationDuration,", "one service. @author: <NAME> \"\"\" import os import time import", "bytes transmitted:\") # print (f\"\\t\\t{m.bytes_transmitted():.1f}\") # m.df_link.head(15) # from Stats", "= Sim(t, default_results_path=folder_results+\"sim_trace\") \"\"\" Deploy services == APP's modules \"\"\"", "\"\"\" RUNNING - last step \"\"\" logging.info(\" Performing simulation: %i", "nIterations = 1 # iteration for each experiment simulationDuration =", "is the most simple scenario with a basic topology, some", "range(nIterations): random.seed(iteration) logging.info(\"Running experiment it: - %i\" % iteration) start_time", "DeviceSpeedAwareRouting() \"\"\" SIMULATION ENGINE \"\"\" s = Sim(t, default_results_path=folder_results+\"sim_trace\") \"\"\"", "values=attPR_BW) nx.set_edge_attributes(t.G, name=\"BW\", values=attPR_BW) ## Attr. on nodes # IPT", "basic topology, some users and a set of apps with", "and BW (bandwith) are 1 unit attPR_BW = {x: 1", "multiples format to view in tools like Gephi, and so", "folder_results = Path(\"results/\") folder_results.mkdir(parents=True, exist_ok=True) folder_results = str(folder_results)+\"/\" \"\"\" TOPOLOGY", "import random import logging.config import networkx as nx import numpy", "msg=msg, distribution=dist) \"\"\" RUNNING - last step \"\"\" logging.info(\" Performing", "iteration for each experiment simulationDuration = 1000 # Iteration for", "apps.keys(): s.deploy_app(apps[aName], placement, selectorPath) # Note: each app can have", "as nx import numpy as np from pathlib import Path", "waiting messages :\" f\"{m.peak_messages_not_transmitted()}\") print() print(f\"\\t\\tShow Loops: {m.showLoops(time_loops)}\") print() print", "drawing random.seed(RANDOM_SEED) np.random.seed(RANDOM_SEED) t = Topology() # You also can", "modules \"\"\" for aName in apps.keys(): s.deploy_app(apps[aName], placement, selectorPath) #", "print(nx.get_node_attributes(t.G, \"IPT\")) print() \"\"\" APPLICATION or SERVICES \"\"\" dataApp =", "node = user[\"id_resource\"] dist = deterministic_distribution(100, name=\"Deterministic\") idDES = s.deploy_source(app_name,", "simulationDuration = 1000 # Iteration for each experiment changing the", "print() print (f\"\\t\\tTOTAL messages not transmitted:\" f\" {m.messages_not_transmitted()}\") print() #print(m.df.head())", "Stats(defaultPath=\"results/sim_trace\") # print (\"\\tNetwork bytes transmitted:\") # print (f\"\\t\\t{m.bytes_transmitted():.1f}\") #", "yafs.stats import Stats RANDOM_SEED = 1 def main(stop_time, it): folder_results", "PLACEMENT \"\"\" placementJson = json.load(open('data/allocDefinition.json')) placement = JSONPlacement(name=\"Placement\", json=placementJson) \"\"\"", "services == APP's modules \"\"\" for aName in apps.keys(): s.deploy_app(apps[aName],", "RANDOM_SEED = 1 def main(stop_time, it): folder_results = Path(\"results/\") folder_results.mkdir(parents=True,", "Stats RANDOM_SEED = 1 def main(stop_time, it): folder_results = Path(\"results/\")", "random.seed(RANDOM_SEED) np.random.seed(RANDOM_SEED) t = Topology() # You also can create", "json.load(open('data/appDefinition.json')) apps = create_applications_from_json(dataApp) # print(apps) \"\"\" SERVICE PLACEMENT \"\"\"", "m.df_link.head(15) # from Stats class time_loops = [[\"M.USER.APP.0\", \"M.USER.APP.1\", \"M.USER.APP.2\",", "print() print(f\"\\t\\tShow Loops: {m.showLoops(time_loops)}\") print() print (f\"\\t\\tTOTAL messages not transmitted:\"", "(time.time() - start_time)) print(\"Simulation Done!\") m = Stats(defaultPath=\"results/sim_trace\") # print", "logging.info(\"Running experiment it: - %i\" % iteration) start_time = time.time()", "from yafs.topology import Topology from yafs.placement import JSONPlacement from yafs.path_routing", "print(\"\\n--- %s seconds ---\" % (time.time() - start_time)) print(\"Simulation Done!\")", "= time.time() main(stop_time=simulationDuration, it=iteration) print(\"\\n--- %s seconds ---\" % (time.time()", "random.seed(iteration) logging.info(\"Running experiment it: - %i\" % iteration) start_time =", "propagation) and BW (bandwith) are 1 unit attPR_BW = {x:", "seconds ---\" % (time.time() - start_time)) print(\"Simulation Done!\") m =", "folder size = 3 t.G = nx.generators.binomial_tree(size) # In NX-lib", "import deterministic_distribution from yafs.stats import Stats RANDOM_SEED = 1 def", "# print(apps) \"\"\" SERVICE PLACEMENT \"\"\" placementJson = json.load(open('data/allocDefinition.json')) placement", "be str or int print() print(nx.get_node_attributes(t.G, \"IPT\")) print() \"\"\" APPLICATION", "= JSONPlacement(name=\"Placement\", json=placementJson) \"\"\" Defining ROUTING algorithm to define how", "if __name__ == '__main__': logging.config.fileConfig(os.getcwd() + '/logging.ini') nIterations = 1", "in t.G.nodes()} nx.set_node_attributes(t.G, name=\"IPT\", values=attIPT) # nx.write_gexf(t.G,folder_results+\"graph_binomial_tree_%i\"%size) # you can", "time.time() main(stop_time=simulationDuration, it=iteration) print(\"\\n--- %s seconds ---\" % (time.time() -", "unit attPR_BW = {x: 1 for x in t.G.edges()} nx.set_edge_attributes(t.G,", "each app can have a different routing algorithm \"\"\" Deploy", "\"\"\" Deploy users \"\"\" userJSON = json.load(open('data/usersDefinition.json')) for user in", "class time_loops = [[\"M.USER.APP.0\", \"M.USER.APP.1\", \"M.USER.APP.2\", \"M.USER.APP.3\"]] m.showResults2(10000, time_loops=time_loops) m.compute_times_df()", "nx.write_gexf(t.G,folder_results+\"graph_binomial_tree_%i\"%size) # you can export the Graph in multiples format", "%i \" % it) s.run(stop_time) # To test deployments put", "export the Graph in multiples format to view in tools", "with_labels=True, edge_color='black', width=1, alpha=0.7) print(t.G.nodes()) # nodes id can be", "import json import random import logging.config import networkx as nx", ":\" f\"{m.peak_messages_not_transmitted()}\") print() print(f\"\\t\\tShow Loops: {m.showLoops(time_loops)}\") print() print (f\"\\t\\tTOTAL messages", "= [[\"M.USER.APP.0\", \"M.USER.APP.1\", \"M.USER.APP.2\", \"M.USER.APP.3\"]] m.showResults2(10000, time_loops=time_loops) m.compute_times_df() print (\"\\t-", "topology using JSONs files. Check out examples folder size =", "APPLICATION or SERVICES \"\"\" dataApp = json.load(open('data/appDefinition.json')) apps = create_applications_from_json(dataApp)", "<gh_stars>0 \"\"\" This is the most simple scenario with a", "= 1 def main(stop_time, it): folder_results = Path(\"results/\") folder_results.mkdir(parents=True, exist_ok=True)", "userJSON[\"sources\"]: app_name = user[\"app\"] app = s.apps[app_name] msg = app.get_message(user[\"message\"])", "random import logging.config import networkx as nx import numpy as", "(link propagation) and BW (bandwith) are 1 unit attPR_BW =", "each experiment simulationDuration = 1000 # Iteration for each experiment", "Deploy users \"\"\" userJSON = json.load(open('data/usersDefinition.json')) for user in userJSON[\"sources\"]:", "Path from yafs.core import Sim from yafs.application import create_applications_from_json from", "for iteration in range(nIterations): random.seed(iteration) logging.info(\"Running experiment it: - %i\"", "position of nodes for drawing random.seed(RANDOM_SEED) np.random.seed(RANDOM_SEED) t = Topology()", "json.load(open('data/usersDefinition.json')) for user in userJSON[\"sources\"]: app_name = user[\"app\"] app =", "= 3 t.G = nx.generators.binomial_tree(size) # In NX-lib there are", "\"M.USER.APP.2\", \"M.USER.APP.3\"]] m.showResults2(10000, time_loops=time_loops) m.compute_times_df() print (\"\\t- Network saturation -\")", "placement, selectorPath) # Note: each app can have a different", "folder_results.mkdir(parents=True, exist_ok=True) folder_results = str(folder_results)+\"/\" \"\"\" TOPOLOGY \"\"\" # Fix", "# PR (link propagation) and BW (bandwith) are 1 unit", "\"\"\" dataApp = json.load(open('data/appDefinition.json')) apps = create_applications_from_json(dataApp) # print(apps) \"\"\"", "selectorPath) # Note: each app can have a different routing", "can create a topology using JSONs files. Check out examples", "nodes id can be str or int print() print(nx.get_node_attributes(t.G, \"IPT\"))", "= user[\"app\"] app = s.apps[app_name] msg = app.get_message(user[\"message\"]) node =", "for each experiment simulationDuration = 1000 # Iteration for each", "view in tools like Gephi, and so on. nx.write_graphml(t.G,folder_results+\"graph_binomial_tree_%i.graphml\"%size) #", "test_initial_deploy a TRUE s.print_debug_assignaments() if __name__ == '__main__': logging.config.fileConfig(os.getcwd() +", "% (time.time() - start_time)) print(\"Simulation Done!\") m = Stats(defaultPath=\"results/sim_trace\") #", "app.get_message(user[\"message\"]) node = user[\"id_resource\"] dist = deterministic_distribution(100, name=\"Deterministic\") idDES =", "can be str or int print() print(nx.get_node_attributes(t.G, \"IPT\")) print() \"\"\"", "of apps with only one service. @author: <NAME> \"\"\" import", "Check out examples folder size = 3 t.G = nx.generators.binomial_tree(size)", "900, 100) for x in t.G.nodes()} nx.set_node_attributes(t.G, name=\"IPT\", values=attIPT) #", "of a Topology ## Attr. on edges # PR (link", "or int print() print(nx.get_node_attributes(t.G, \"IPT\")) print() \"\"\" APPLICATION or SERVICES", "main(stop_time, it): folder_results = Path(\"results/\") folder_results.mkdir(parents=True, exist_ok=True) folder_results = str(folder_results)+\"/\"", "## Attr. on edges # PR (link propagation) and BW", "\"IPT\")) print() \"\"\" APPLICATION or SERVICES \"\"\" dataApp = json.load(open('data/appDefinition.json'))", "in range(nIterations): random.seed(iteration) logging.info(\"Running experiment it: - %i\" % iteration)", "networkx as nx import numpy as np from pathlib import", "in tools like Gephi, and so on. nx.write_graphml(t.G,folder_results+\"graph_binomial_tree_%i.graphml\"%size) # Graph", "# m.df_link.head(15) # from Stats class time_loops = [[\"M.USER.APP.0\", \"M.USER.APP.1\",", "import Path from yafs.core import Sim from yafs.application import create_applications_from_json", "messages in the topology among modules \"\"\" selectorPath = DeviceSpeedAwareRouting()", "waiting messages : \" f\"{m.average_messages_not_transmitted()}\") print() print (\"\\t\\tPeak of waiting", "s.apps[app_name] msg = app.get_message(user[\"message\"]) node = user[\"id_resource\"] dist = deterministic_distribution(100,", "experiment changing the seed of randoms for iteration in range(nIterations):", "with only one service. @author: <NAME> \"\"\" import os import", "1 def main(stop_time, it): folder_results = Path(\"results/\") folder_results.mkdir(parents=True, exist_ok=True) folder_results", "x in t.G.nodes()} nx.set_node_attributes(t.G, name=\"IPT\", values=attIPT) # nx.write_gexf(t.G,folder_results+\"graph_binomial_tree_%i\"%size) # you", "or SERVICES \"\"\" dataApp = json.load(open('data/appDefinition.json')) apps = create_applications_from_json(dataApp) #", "create_applications_from_json from yafs.topology import Topology from yafs.placement import JSONPlacement from", "app can have a different routing algorithm \"\"\" Deploy users", "(\"\\t\\tAverage waiting messages : \" f\"{m.average_messages_not_transmitted()}\") print() print (\"\\t\\tPeak of", "You also can create a topology using JSONs files. Check", "id can be str or int print() print(nx.get_node_attributes(t.G, \"IPT\")) print()", "# You also can create a topology using JSONs files.", "deterministic_distribution from yafs.stats import Stats RANDOM_SEED = 1 def main(stop_time,", "SERVICES \"\"\" dataApp = json.load(open('data/appDefinition.json')) apps = create_applications_from_json(dataApp) # print(apps)", "= create_applications_from_json(dataApp) # print(apps) \"\"\" SERVICE PLACEMENT \"\"\" placementJson =", "- start_time)) print(\"Simulation Done!\") m = Stats(defaultPath=\"results/sim_trace\") # print (\"\\tNetwork", "you can export the Graph in multiples format to view", "SIMULATION ENGINE \"\"\" s = Sim(t, default_results_path=folder_results+\"sim_trace\") \"\"\" Deploy services", "numpy as np from pathlib import Path from yafs.core import", "print (\"\\t\\tPeak of waiting messages :\" f\"{m.peak_messages_not_transmitted()}\") print() print(f\"\\t\\tShow Loops:", "\"\"\" This is the most simple scenario with a basic", "for user in userJSON[\"sources\"]: app_name = user[\"app\"] app = s.apps[app_name]", "1 unit attPR_BW = {x: 1 for x in t.G.edges()}", "a Topology ## Attr. on edges # PR (link propagation)", "JSONs files. Check out examples folder size = 3 t.G", "print (\"\\t- Network saturation -\") print() print (\"\\t\\tAverage waiting messages", "yafs.core import Sim from yafs.application import create_applications_from_json from yafs.topology import", "step \"\"\" logging.info(\" Performing simulation: %i \" % it) s.run(stop_time)", "a basic topology, some users and a set of apps", "\"\"\" APPLICATION or SERVICES \"\"\" dataApp = json.load(open('data/appDefinition.json')) apps =", "to view in tools like Gephi, and so on. nx.write_graphml(t.G,folder_results+\"graph_binomial_tree_%i.graphml\"%size)", "lot of Graphs generators # Definition of mandatory attributes of", "= Stats(defaultPath=\"results/sim_trace\") # print (\"\\tNetwork bytes transmitted:\") # print (f\"\\t\\t{m.bytes_transmitted():.1f}\")", "on nodes # IPT attIPT = {x: random.randrange(100, 900, 100)", "nodes for drawing random.seed(RANDOM_SEED) np.random.seed(RANDOM_SEED) t = Topology() # You", "= 1000 # Iteration for each experiment changing the seed", "user[\"id_resource\"] dist = deterministic_distribution(100, name=\"Deterministic\") idDES = s.deploy_source(app_name, id_node=node, msg=msg,", "of randoms for iteration in range(nIterations): random.seed(iteration) logging.info(\"Running experiment it:", "from yafs.application import create_applications_from_json from yafs.topology import Topology from yafs.placement", "s.run(stop_time) # To test deployments put test_initial_deploy a TRUE s.print_debug_assignaments()", "Graph in multiples format to view in tools like Gephi,", "Graphs generators # Definition of mandatory attributes of a Topology" ]
[ "prognosis = db.Column(db.Float, nullable=False) # class RisklayerPrognosisSchema(SQLAlchemyAutoSchema): # class Meta:", "= RisklayerPrognosis # # timestamp = fields.Timestamp(data_key=\"datenbestand\") # prognosis =", "import db class RisklayerPrognosis(db.Model): __tablename__ = 'risklayer_prognosis' datenbestand = db.Column(db.TIMESTAMP,", "'risklayer_prognosis' datenbestand = db.Column(db.TIMESTAMP, primary_key=True, nullable=False) prognosis = db.Column(db.Float, nullable=False)", "class RisklayerPrognosis(db.Model): __tablename__ = 'risklayer_prognosis' datenbestand = db.Column(db.TIMESTAMP, primary_key=True, nullable=False)", "Meta: # strict = True # model = RisklayerPrognosis #", "= True # model = RisklayerPrognosis # # timestamp =", "nullable=False) # class RisklayerPrognosisSchema(SQLAlchemyAutoSchema): # class Meta: # strict =", "__tablename__ = 'risklayer_prognosis' datenbestand = db.Column(db.TIMESTAMP, primary_key=True, nullable=False) prognosis =", "strict = True # model = RisklayerPrognosis # # timestamp", "# model = RisklayerPrognosis # # timestamp = fields.Timestamp(data_key=\"datenbestand\") #", "# class RisklayerPrognosisSchema(SQLAlchemyAutoSchema): # class Meta: # strict = True", "db.Column(db.TIMESTAMP, primary_key=True, nullable=False) prognosis = db.Column(db.Float, nullable=False) # class RisklayerPrognosisSchema(SQLAlchemyAutoSchema):", "class RisklayerPrognosisSchema(SQLAlchemyAutoSchema): # class Meta: # strict = True #", "from db import db class RisklayerPrognosis(db.Model): __tablename__ = 'risklayer_prognosis' datenbestand", "db class RisklayerPrognosis(db.Model): __tablename__ = 'risklayer_prognosis' datenbestand = db.Column(db.TIMESTAMP, primary_key=True,", "RisklayerPrognosisSchema(SQLAlchemyAutoSchema): # class Meta: # strict = True # model", "db.Column(db.Float, nullable=False) # class RisklayerPrognosisSchema(SQLAlchemyAutoSchema): # class Meta: # strict", "RisklayerPrognosis(db.Model): __tablename__ = 'risklayer_prognosis' datenbestand = db.Column(db.TIMESTAMP, primary_key=True, nullable=False) prognosis", "= db.Column(db.TIMESTAMP, primary_key=True, nullable=False) prognosis = db.Column(db.Float, nullable=False) # class", "= 'risklayer_prognosis' datenbestand = db.Column(db.TIMESTAMP, primary_key=True, nullable=False) prognosis = db.Column(db.Float,", "model = RisklayerPrognosis # # timestamp = fields.Timestamp(data_key=\"datenbestand\") # prognosis", "# class Meta: # strict = True # model =", "= db.Column(db.Float, nullable=False) # class RisklayerPrognosisSchema(SQLAlchemyAutoSchema): # class Meta: #", "# strict = True # model = RisklayerPrognosis # #", "datenbestand = db.Column(db.TIMESTAMP, primary_key=True, nullable=False) prognosis = db.Column(db.Float, nullable=False) #", "RisklayerPrognosis # # timestamp = fields.Timestamp(data_key=\"datenbestand\") # prognosis = fields.Number(data_key=\"prognosis\")", "nullable=False) prognosis = db.Column(db.Float, nullable=False) # class RisklayerPrognosisSchema(SQLAlchemyAutoSchema): # class", "True # model = RisklayerPrognosis # # timestamp = fields.Timestamp(data_key=\"datenbestand\")", "class Meta: # strict = True # model = RisklayerPrognosis", "db import db class RisklayerPrognosis(db.Model): __tablename__ = 'risklayer_prognosis' datenbestand =", "primary_key=True, nullable=False) prognosis = db.Column(db.Float, nullable=False) # class RisklayerPrognosisSchema(SQLAlchemyAutoSchema): #" ]
[ "in iterable, '%s did not occur in %s' % (value,", "name = forms.CharField(required=True, max_length=16) class FormTestCase(unittest.TestCase): klass = BasicForm def", "django import forms from django.db import models from django.forms.forms import", "self.assertFalse(post.is_valid()) self.assertIn(NON_FIELD_ERRORS, post._errors) self.assertIn('Unexpected value in form field.', post._errors[NON_FIELD_ERRORS]) if", "test_missing(self): post = self.getForm() self.assertFalse(post.is_valid()) self.assertIn('name', post._errors) def test_replay(self): post", "form[form._meta.secure_field_name].value() class BasicForm(SecureForm): name = forms.CharField(required=True, max_length=16) class FormTestCase(unittest.TestCase): klass", "BasicForm(SecureForm): name = forms.CharField(required=True, max_length=16) class FormTestCase(unittest.TestCase): klass = BasicForm", "dict((get_form_secure_data(self.form), )) for n, v in kwargs.items(): data[get_form_sname(self.form, n)] =", "= 'settings' import django if django.VERSION >= (1, 7): django.setup()", "import unittest os.environ['DJANGO_SETTINGS_MODULE'] = 'settings' import django if django.VERSION >=", "from django_secureform.forms import SecureForm def get_form_sname(form, name): for sname, v", "sname, v in form._secure_field_map.items(): if v is None: return sname", "django if django.VERSION >= (1, 7): django.setup() from django import", "None: return sname raise Exception('No honeypots found.') def get_form_secure_data(form): #", "self.form.secure_data() def assertIn(self, value, iterable): self.assertTrue(value in iterable, '%s did", "BasicTestCase(FormTestCase): def test_valid(self): post = self.getForm(name='foobar') self.assertTrue(post.is_valid()) def test_missing(self): post", "# We must copy over the security data. return form._meta.secure_field_name,", "value, iterable): self.assertTrue(value in iterable, '%s did not occur in", "raise Exception('No honeypots found.') def get_form_secure_data(form): # We must copy", "post = self.getForm(name='foobar') self.assertFalse(post.is_valid()) self.assertIn(NON_FIELD_ERRORS, post._errors) self.assertIn('This form has already", "return sname raise Exception('No honeypots found.') def get_form_secure_data(form): # We", ")) for n, v in kwargs.items(): data[get_form_sname(self.form, n)] = v", "assertIn(self, value, iterable): self.assertTrue(value in iterable, '%s did not occur", "for n, v in kwargs.items(): data[get_form_sname(self.form, n)] = v return", "django.setup() from django import forms from django.db import models from", "name): for sname, v in form._secure_field_map.items(): if v and v", "self.form = self.klass() self.form.secure_data() def assertIn(self, value, iterable): self.assertTrue(value in", "getForm(self, **kwargs): data = dict((get_form_secure_data(self.form), )) for n, v in", "self.assertTrue(value in iterable, '%s did not occur in %s' %", "from django.db import models from django.forms.forms import NON_FIELD_ERRORS from django_secureform.forms", "name: return sname raise KeyError(name) def get_form_honeypot(form): for sname, v", "models from django.forms.forms import NON_FIELD_ERRORS from django_secureform.forms import SecureForm def", "== name: return sname raise KeyError(name) def get_form_honeypot(form): for sname,", "= self.getForm() self.assertFalse(post.is_valid()) self.assertIn('name', post._errors) def test_replay(self): post = self.getForm(name='foobar')", "self.getForm() self.assertFalse(post.is_valid()) self.assertIn('name', post._errors) def test_replay(self): post = self.getForm(name='foobar') post.is_valid()", "has already been submitted.', post._errors[NON_FIELD_ERRORS]) def test_honeypot(self): honeypot = get_form_honeypot(self.form)", "data[honeypot] = 'mmm, hunny!' data[get_form_sname(self.form, 'name')] = 'foobar' post =", "= self.klass(data=data) self.assertFalse(post.is_valid()) self.assertIn(NON_FIELD_ERRORS, post._errors) self.assertIn('Unexpected value in form field.',", "= self.klass() self.form.secure_data() def assertIn(self, value, iterable): self.assertTrue(value in iterable,", "class BasicForm(SecureForm): name = forms.CharField(required=True, max_length=16) class FormTestCase(unittest.TestCase): klass =", "in form._secure_field_map.items(): if v and v == name: return sname", "v and v == name: return sname raise KeyError(name) def", "self.klass() self.form.secure_data() def assertIn(self, value, iterable): self.assertTrue(value in iterable, '%s", "post._errors) self.assertIn('Unexpected value in form field.', post._errors[NON_FIELD_ERRORS]) if __name__ ==", "(1, 7): django.setup() from django import forms from django.db import", "def get_form_secure_data(form): # We must copy over the security data.", "not occur in %s' % (value, iterable)) def getForm(self, **kwargs):", "def get_form_honeypot(form): for sname, v in form._secure_field_map.items(): if v is", "post = self.getForm(name='foobar') self.assertTrue(post.is_valid()) def test_missing(self): post = self.getForm() self.assertFalse(post.is_valid())", "if django.VERSION >= (1, 7): django.setup() from django import forms", "honeypots found.') def get_form_secure_data(form): # We must copy over the", "django_secureform.forms import SecureForm def get_form_sname(form, name): for sname, v in", "%s' % (value, iterable)) def getForm(self, **kwargs): data = dict((get_form_secure_data(self.form),", "form._secure_field_map.items(): if v and v == name: return sname raise", "self.assertTrue(post.is_valid()) def test_missing(self): post = self.getForm() self.assertFalse(post.is_valid()) self.assertIn('name', post._errors) def", "forms.CharField(required=True, max_length=16) class FormTestCase(unittest.TestCase): klass = BasicForm def setUp(self): self.form", "self.getForm(name='foobar') self.assertFalse(post.is_valid()) self.assertIn(NON_FIELD_ERRORS, post._errors) self.assertIn('This form has already been submitted.',", "= dict((get_form_secure_data(self.form), )) data[honeypot] = 'mmm, hunny!' data[get_form_sname(self.form, 'name')] =", "forms from django.db import models from django.forms.forms import NON_FIELD_ERRORS from", "NON_FIELD_ERRORS from django_secureform.forms import SecureForm def get_form_sname(form, name): for sname,", "the security data. return form._meta.secure_field_name, form[form._meta.secure_field_name].value() class BasicForm(SecureForm): name =", "(value, iterable)) def getForm(self, **kwargs): data = dict((get_form_secure_data(self.form), )) for", "return form._meta.secure_field_name, form[form._meta.secure_field_name].value() class BasicForm(SecureForm): name = forms.CharField(required=True, max_length=16) class", "import django if django.VERSION >= (1, 7): django.setup() from django", "self.getForm(name='foobar') self.assertTrue(post.is_valid()) def test_missing(self): post = self.getForm() self.assertFalse(post.is_valid()) self.assertIn('name', post._errors)", "sname, v in form._secure_field_map.items(): if v and v == name:", "already been submitted.', post._errors[NON_FIELD_ERRORS]) def test_honeypot(self): honeypot = get_form_honeypot(self.form) data", "been submitted.', post._errors[NON_FIELD_ERRORS]) def test_honeypot(self): honeypot = get_form_honeypot(self.form) data =", "import SecureForm def get_form_sname(form, name): for sname, v in form._secure_field_map.items():", "self.assertIn(NON_FIELD_ERRORS, post._errors) self.assertIn('Unexpected value in form field.', post._errors[NON_FIELD_ERRORS]) if __name__", "SecureForm def get_form_sname(form, name): for sname, v in form._secure_field_map.items(): if", "occur in %s' % (value, iterable)) def getForm(self, **kwargs): data", "if v and v == name: return sname raise KeyError(name)", "if v is None: return sname raise Exception('No honeypots found.')", "<reponame>smartfile/django-secureform import os import unittest os.environ['DJANGO_SETTINGS_MODULE'] = 'settings' import django", "return sname raise KeyError(name) def get_form_honeypot(form): for sname, v in", "self.assertIn(NON_FIELD_ERRORS, post._errors) self.assertIn('This form has already been submitted.', post._errors[NON_FIELD_ERRORS]) def", "in %s' % (value, iterable)) def getForm(self, **kwargs): data =", "v in kwargs.items(): data[get_form_sname(self.form, n)] = v return self.klass(data=data) class", "self.klass(data=data) class BasicTestCase(FormTestCase): def test_valid(self): post = self.getForm(name='foobar') self.assertTrue(post.is_valid()) def", "test_honeypot(self): honeypot = get_form_honeypot(self.form) data = dict((get_form_secure_data(self.form), )) data[honeypot] =", "Exception('No honeypots found.') def get_form_secure_data(form): # We must copy over", "% (value, iterable)) def getForm(self, **kwargs): data = dict((get_form_secure_data(self.form), ))", "import os import unittest os.environ['DJANGO_SETTINGS_MODULE'] = 'settings' import django if", "submitted.', post._errors[NON_FIELD_ERRORS]) def test_honeypot(self): honeypot = get_form_honeypot(self.form) data = dict((get_form_secure_data(self.form),", "v is None: return sname raise Exception('No honeypots found.') def", "KeyError(name) def get_form_honeypot(form): for sname, v in form._secure_field_map.items(): if v", "class FormTestCase(unittest.TestCase): klass = BasicForm def setUp(self): self.form = self.klass()", "v in form._secure_field_map.items(): if v and v == name: return", "import NON_FIELD_ERRORS from django_secureform.forms import SecureForm def get_form_sname(form, name): for", "over the security data. return form._meta.secure_field_name, form[form._meta.secure_field_name].value() class BasicForm(SecureForm): name", "security data. return form._meta.secure_field_name, form[form._meta.secure_field_name].value() class BasicForm(SecureForm): name = forms.CharField(required=True,", "post._errors) def test_replay(self): post = self.getForm(name='foobar') post.is_valid() post = self.getForm(name='foobar')", "= forms.CharField(required=True, max_length=16) class FormTestCase(unittest.TestCase): klass = BasicForm def setUp(self):", "data[get_form_sname(self.form, n)] = v return self.klass(data=data) class BasicTestCase(FormTestCase): def test_valid(self):", "for sname, v in form._secure_field_map.items(): if v and v ==", "klass = BasicForm def setUp(self): self.form = self.klass() self.form.secure_data() def", "def test_missing(self): post = self.getForm() self.assertFalse(post.is_valid()) self.assertIn('name', post._errors) def test_replay(self):", "get_form_secure_data(form): # We must copy over the security data. return", "self.assertIn('Unexpected value in form field.', post._errors[NON_FIELD_ERRORS]) if __name__ == '__main__':", "form._secure_field_map.items(): if v is None: return sname raise Exception('No honeypots", "= 'mmm, hunny!' data[get_form_sname(self.form, 'name')] = 'foobar' post = self.klass(data=data)", "form._meta.secure_field_name, form[form._meta.secure_field_name].value() class BasicForm(SecureForm): name = forms.CharField(required=True, max_length=16) class FormTestCase(unittest.TestCase):", "BasicForm def setUp(self): self.form = self.klass() self.form.secure_data() def assertIn(self, value,", "def test_honeypot(self): honeypot = get_form_honeypot(self.form) data = dict((get_form_secure_data(self.form), )) data[honeypot]", "data = dict((get_form_secure_data(self.form), )) for n, v in kwargs.items(): data[get_form_sname(self.form,", "'mmm, hunny!' data[get_form_sname(self.form, 'name')] = 'foobar' post = self.klass(data=data) self.assertFalse(post.is_valid())", "def get_form_sname(form, name): for sname, v in form._secure_field_map.items(): if v", "v == name: return sname raise KeyError(name) def get_form_honeypot(form): for", "self.klass(data=data) self.assertFalse(post.is_valid()) self.assertIn(NON_FIELD_ERRORS, post._errors) self.assertIn('Unexpected value in form field.', post._errors[NON_FIELD_ERRORS])", "test_valid(self): post = self.getForm(name='foobar') self.assertTrue(post.is_valid()) def test_missing(self): post = self.getForm()", "= v return self.klass(data=data) class BasicTestCase(FormTestCase): def test_valid(self): post =", "post._errors[NON_FIELD_ERRORS]) def test_honeypot(self): honeypot = get_form_honeypot(self.form) data = dict((get_form_secure_data(self.form), ))", "'name')] = 'foobar' post = self.klass(data=data) self.assertFalse(post.is_valid()) self.assertIn(NON_FIELD_ERRORS, post._errors) self.assertIn('Unexpected", ")) data[honeypot] = 'mmm, hunny!' data[get_form_sname(self.form, 'name')] = 'foobar' post", "self.getForm(name='foobar') post.is_valid() post = self.getForm(name='foobar') self.assertFalse(post.is_valid()) self.assertIn(NON_FIELD_ERRORS, post._errors) self.assertIn('This form", "return self.klass(data=data) class BasicTestCase(FormTestCase): def test_valid(self): post = self.getForm(name='foobar') self.assertTrue(post.is_valid())", "for sname, v in form._secure_field_map.items(): if v is None: return", "self.assertFalse(post.is_valid()) self.assertIn('name', post._errors) def test_replay(self): post = self.getForm(name='foobar') post.is_valid() post", "n)] = v return self.klass(data=data) class BasicTestCase(FormTestCase): def test_valid(self): post", "= dict((get_form_secure_data(self.form), )) for n, v in kwargs.items(): data[get_form_sname(self.form, n)]", "in kwargs.items(): data[get_form_sname(self.form, n)] = v return self.klass(data=data) class BasicTestCase(FormTestCase):", "and v == name: return sname raise KeyError(name) def get_form_honeypot(form):", "sname raise Exception('No honeypots found.') def get_form_secure_data(form): # We must", "self.assertIn('This form has already been submitted.', post._errors[NON_FIELD_ERRORS]) def test_honeypot(self): honeypot", "post = self.getForm(name='foobar') post.is_valid() post = self.getForm(name='foobar') self.assertFalse(post.is_valid()) self.assertIn(NON_FIELD_ERRORS, post._errors)", "is None: return sname raise Exception('No honeypots found.') def get_form_secure_data(form):", "get_form_honeypot(form): for sname, v in form._secure_field_map.items(): if v is None:", "copy over the security data. return form._meta.secure_field_name, form[form._meta.secure_field_name].value() class BasicForm(SecureForm):", "iterable)) def getForm(self, **kwargs): data = dict((get_form_secure_data(self.form), )) for n,", "get_form_sname(form, name): for sname, v in form._secure_field_map.items(): if v and", "did not occur in %s' % (value, iterable)) def getForm(self,", "max_length=16) class FormTestCase(unittest.TestCase): klass = BasicForm def setUp(self): self.form =", "iterable, '%s did not occur in %s' % (value, iterable))", "FormTestCase(unittest.TestCase): klass = BasicForm def setUp(self): self.form = self.klass() self.form.secure_data()", "We must copy over the security data. return form._meta.secure_field_name, form[form._meta.secure_field_name].value()", "honeypot = get_form_honeypot(self.form) data = dict((get_form_secure_data(self.form), )) data[honeypot] = 'mmm,", "= BasicForm def setUp(self): self.form = self.klass() self.form.secure_data() def assertIn(self,", "= self.getForm(name='foobar') post.is_valid() post = self.getForm(name='foobar') self.assertFalse(post.is_valid()) self.assertIn(NON_FIELD_ERRORS, post._errors) self.assertIn('This", "found.') def get_form_secure_data(form): # We must copy over the security", "data. return form._meta.secure_field_name, form[form._meta.secure_field_name].value() class BasicForm(SecureForm): name = forms.CharField(required=True, max_length=16)", "setUp(self): self.form = self.klass() self.form.secure_data() def assertIn(self, value, iterable): self.assertTrue(value", "class BasicTestCase(FormTestCase): def test_valid(self): post = self.getForm(name='foobar') self.assertTrue(post.is_valid()) def test_missing(self):", "post = self.getForm() self.assertFalse(post.is_valid()) self.assertIn('name', post._errors) def test_replay(self): post =", "raise KeyError(name) def get_form_honeypot(form): for sname, v in form._secure_field_map.items(): if", "def getForm(self, **kwargs): data = dict((get_form_secure_data(self.form), )) for n, v", "n, v in kwargs.items(): data[get_form_sname(self.form, n)] = v return self.klass(data=data)", "data[get_form_sname(self.form, 'name')] = 'foobar' post = self.klass(data=data) self.assertFalse(post.is_valid()) self.assertIn(NON_FIELD_ERRORS, post._errors)", "'%s did not occur in %s' % (value, iterable)) def", "post.is_valid() post = self.getForm(name='foobar') self.assertFalse(post.is_valid()) self.assertIn(NON_FIELD_ERRORS, post._errors) self.assertIn('This form has", ">= (1, 7): django.setup() from django import forms from django.db", "from django import forms from django.db import models from django.forms.forms", "django.db import models from django.forms.forms import NON_FIELD_ERRORS from django_secureform.forms import", "= 'foobar' post = self.klass(data=data) self.assertFalse(post.is_valid()) self.assertIn(NON_FIELD_ERRORS, post._errors) self.assertIn('Unexpected value", "**kwargs): data = dict((get_form_secure_data(self.form), )) for n, v in kwargs.items():", "form has already been submitted.', post._errors[NON_FIELD_ERRORS]) def test_honeypot(self): honeypot =", "'settings' import django if django.VERSION >= (1, 7): django.setup() from", "kwargs.items(): data[get_form_sname(self.form, n)] = v return self.klass(data=data) class BasicTestCase(FormTestCase): def", "7): django.setup() from django import forms from django.db import models", "= self.getForm(name='foobar') self.assertTrue(post.is_valid()) def test_missing(self): post = self.getForm() self.assertFalse(post.is_valid()) self.assertIn('name',", "def test_valid(self): post = self.getForm(name='foobar') self.assertTrue(post.is_valid()) def test_missing(self): post =", "sname raise KeyError(name) def get_form_honeypot(form): for sname, v in form._secure_field_map.items():", "in form._secure_field_map.items(): if v is None: return sname raise Exception('No", "os.environ['DJANGO_SETTINGS_MODULE'] = 'settings' import django if django.VERSION >= (1, 7):", "unittest os.environ['DJANGO_SETTINGS_MODULE'] = 'settings' import django if django.VERSION >= (1,", "test_replay(self): post = self.getForm(name='foobar') post.is_valid() post = self.getForm(name='foobar') self.assertFalse(post.is_valid()) self.assertIn(NON_FIELD_ERRORS,", "iterable): self.assertTrue(value in iterable, '%s did not occur in %s'", "must copy over the security data. return form._meta.secure_field_name, form[form._meta.secure_field_name].value() class", "os import unittest os.environ['DJANGO_SETTINGS_MODULE'] = 'settings' import django if django.VERSION", "def assertIn(self, value, iterable): self.assertTrue(value in iterable, '%s did not", "from django.forms.forms import NON_FIELD_ERRORS from django_secureform.forms import SecureForm def get_form_sname(form,", "data = dict((get_form_secure_data(self.form), )) data[honeypot] = 'mmm, hunny!' data[get_form_sname(self.form, 'name')]", "'foobar' post = self.klass(data=data) self.assertFalse(post.is_valid()) self.assertIn(NON_FIELD_ERRORS, post._errors) self.assertIn('Unexpected value in", "import models from django.forms.forms import NON_FIELD_ERRORS from django_secureform.forms import SecureForm", "import forms from django.db import models from django.forms.forms import NON_FIELD_ERRORS", "django.VERSION >= (1, 7): django.setup() from django import forms from", "self.assertFalse(post.is_valid()) self.assertIn(NON_FIELD_ERRORS, post._errors) self.assertIn('This form has already been submitted.', post._errors[NON_FIELD_ERRORS])", "self.assertIn('name', post._errors) def test_replay(self): post = self.getForm(name='foobar') post.is_valid() post =", "hunny!' data[get_form_sname(self.form, 'name')] = 'foobar' post = self.klass(data=data) self.assertFalse(post.is_valid()) self.assertIn(NON_FIELD_ERRORS,", "post = self.klass(data=data) self.assertFalse(post.is_valid()) self.assertIn(NON_FIELD_ERRORS, post._errors) self.assertIn('Unexpected value in form", "v in form._secure_field_map.items(): if v is None: return sname raise", "django.forms.forms import NON_FIELD_ERRORS from django_secureform.forms import SecureForm def get_form_sname(form, name):", "value in form field.', post._errors[NON_FIELD_ERRORS]) if __name__ == '__main__': unittest.main()", "post._errors) self.assertIn('This form has already been submitted.', post._errors[NON_FIELD_ERRORS]) def test_honeypot(self):", "= self.getForm(name='foobar') self.assertFalse(post.is_valid()) self.assertIn(NON_FIELD_ERRORS, post._errors) self.assertIn('This form has already been", "def test_replay(self): post = self.getForm(name='foobar') post.is_valid() post = self.getForm(name='foobar') self.assertFalse(post.is_valid())", "dict((get_form_secure_data(self.form), )) data[honeypot] = 'mmm, hunny!' data[get_form_sname(self.form, 'name')] = 'foobar'", "= get_form_honeypot(self.form) data = dict((get_form_secure_data(self.form), )) data[honeypot] = 'mmm, hunny!'", "get_form_honeypot(self.form) data = dict((get_form_secure_data(self.form), )) data[honeypot] = 'mmm, hunny!' data[get_form_sname(self.form,", "v return self.klass(data=data) class BasicTestCase(FormTestCase): def test_valid(self): post = self.getForm(name='foobar')", "def setUp(self): self.form = self.klass() self.form.secure_data() def assertIn(self, value, iterable):" ]
[ "cv.VideoCapture(1) print(cap.get(cv.CAP_PROP_FRAME_WIDTH)) print(cap.get(cv.CAP_PROP_FRAME_HEIGHT)) cap.set(3,3000) cap.set(4,3000) print(cap.get(cv.CAP_PROP_FRAME_WIDTH)) print(cap.get(cv.CAP_PROP_FRAME_HEIGHT)) while (cap.isOpened()): ret", "(ret == True): cv.imshow(\"camVid\", frame) if cv.waitKey(25) & 0xFF ==", "cap = cv.VideoCapture(1) print(cap.get(cv.CAP_PROP_FRAME_WIDTH)) print(cap.get(cv.CAP_PROP_FRAME_HEIGHT)) cap.set(3,3000) cap.set(4,3000) print(cap.get(cv.CAP_PROP_FRAME_WIDTH)) print(cap.get(cv.CAP_PROP_FRAME_HEIGHT)) while", "import cv2 as cv import numpy as np cap =", "= cv.VideoCapture(1) print(cap.get(cv.CAP_PROP_FRAME_WIDTH)) print(cap.get(cv.CAP_PROP_FRAME_HEIGHT)) cap.set(3,3000) cap.set(4,3000) print(cap.get(cv.CAP_PROP_FRAME_WIDTH)) print(cap.get(cv.CAP_PROP_FRAME_HEIGHT)) while (cap.isOpened()):", "cap.set(3,3000) cap.set(4,3000) print(cap.get(cv.CAP_PROP_FRAME_WIDTH)) print(cap.get(cv.CAP_PROP_FRAME_HEIGHT)) while (cap.isOpened()): ret , frame =", "== True): cv.imshow(\"camVid\", frame) if cv.waitKey(25) & 0xFF == ord('q'):", "cap.set(4,3000) print(cap.get(cv.CAP_PROP_FRAME_WIDTH)) print(cap.get(cv.CAP_PROP_FRAME_HEIGHT)) while (cap.isOpened()): ret , frame = cap.read()", "print(cap.get(cv.CAP_PROP_FRAME_HEIGHT)) while (cap.isOpened()): ret , frame = cap.read() if (ret", "if cv.waitKey(25) & 0xFF == ord('q'): break else: break cap.release()", "(cap.isOpened()): ret , frame = cap.read() if (ret == True):", "= cap.read() if (ret == True): cv.imshow(\"camVid\", frame) if cv.waitKey(25)", "cv import numpy as np cap = cv.VideoCapture(1) print(cap.get(cv.CAP_PROP_FRAME_WIDTH)) print(cap.get(cv.CAP_PROP_FRAME_HEIGHT))", "True): cv.imshow(\"camVid\", frame) if cv.waitKey(25) & 0xFF == ord('q'): break", "np cap = cv.VideoCapture(1) print(cap.get(cv.CAP_PROP_FRAME_WIDTH)) print(cap.get(cv.CAP_PROP_FRAME_HEIGHT)) cap.set(3,3000) cap.set(4,3000) print(cap.get(cv.CAP_PROP_FRAME_WIDTH)) print(cap.get(cv.CAP_PROP_FRAME_HEIGHT))", "import numpy as np cap = cv.VideoCapture(1) print(cap.get(cv.CAP_PROP_FRAME_WIDTH)) print(cap.get(cv.CAP_PROP_FRAME_HEIGHT)) cap.set(3,3000)", "ret , frame = cap.read() if (ret == True): cv.imshow(\"camVid\",", "<gh_stars>1-10 import cv2 as cv import numpy as np cap", "print(cap.get(cv.CAP_PROP_FRAME_WIDTH)) print(cap.get(cv.CAP_PROP_FRAME_HEIGHT)) cap.set(3,3000) cap.set(4,3000) print(cap.get(cv.CAP_PROP_FRAME_WIDTH)) print(cap.get(cv.CAP_PROP_FRAME_HEIGHT)) while (cap.isOpened()): ret ,", "print(cap.get(cv.CAP_PROP_FRAME_WIDTH)) print(cap.get(cv.CAP_PROP_FRAME_HEIGHT)) while (cap.isOpened()): ret , frame = cap.read() if", "frame = cap.read() if (ret == True): cv.imshow(\"camVid\", frame) if", "cv.imshow(\"camVid\", frame) if cv.waitKey(25) & 0xFF == ord('q'): break else:", "as cv import numpy as np cap = cv.VideoCapture(1) print(cap.get(cv.CAP_PROP_FRAME_WIDTH))", "while (cap.isOpened()): ret , frame = cap.read() if (ret ==", "if (ret == True): cv.imshow(\"camVid\", frame) if cv.waitKey(25) & 0xFF", "numpy as np cap = cv.VideoCapture(1) print(cap.get(cv.CAP_PROP_FRAME_WIDTH)) print(cap.get(cv.CAP_PROP_FRAME_HEIGHT)) cap.set(3,3000) cap.set(4,3000)", "as np cap = cv.VideoCapture(1) print(cap.get(cv.CAP_PROP_FRAME_WIDTH)) print(cap.get(cv.CAP_PROP_FRAME_HEIGHT)) cap.set(3,3000) cap.set(4,3000) print(cap.get(cv.CAP_PROP_FRAME_WIDTH))", ", frame = cap.read() if (ret == True): cv.imshow(\"camVid\", frame)", "cap.read() if (ret == True): cv.imshow(\"camVid\", frame) if cv.waitKey(25) &", "print(cap.get(cv.CAP_PROP_FRAME_HEIGHT)) cap.set(3,3000) cap.set(4,3000) print(cap.get(cv.CAP_PROP_FRAME_WIDTH)) print(cap.get(cv.CAP_PROP_FRAME_HEIGHT)) while (cap.isOpened()): ret , frame", "frame) if cv.waitKey(25) & 0xFF == ord('q'): break else: break", "cv2 as cv import numpy as np cap = cv.VideoCapture(1)", "cv.waitKey(25) & 0xFF == ord('q'): break else: break cap.release() cv.destroyAllWindows()" ]
[ "== self.color: weak_neighs += 1 else: weak_opps += 1 neighbour_ref.remove(neighbour_ref,", "x in range(SIZE*SIZE)] self.parent = None def play(self, board): \"\"\"", "members1 = set([square]) changed = True while changed: changed =", "in range(3)] def set_neighbours(self): x, y = self.pos % SIZE,", "def __init__(self): self.squares = [Square(self, pos) for pos in range(SIZE*SIZE)]", "-= 1 else: neighbour_ref.ledges -= 1 if neighbour_ref.ledges == 0:", "+ dx, y + dy if 0 <= newx <", "= TIMESTAMP self.zobrist_strings = [random.randrange(sys.maxint) for i in range(3)] def", "self.color = EMPTY self.board.emptyset.add(self.pos) # if color == BLACK: #", "winrate = self.wins/float(self.wins+self.losses) parentvisits = self.parent.wins+self.parent.losses if not parentvisits: return", "+= 1 neighbour_ref.remove(neighbour_ref, update=False) dupe = self.zobrist.dupe() self.zobrist.hash = old_hash", "break board.move(pos) child = node.pos_child[pos] if not child: child =", "for square in self.squares: if square.color == EMPTY: continue members1", "= range(SIZE*SIZE) self.empty_pos = range(SIZE*SIZE) def random_choice(self): choices = len(self.empties)", "neighbour.find() if neighbour_ref.timestamp != TIMESTAMP: if neighcolor == self.color: neighs", "* SIZE + x def to_xy(pos): y, x = divmod(pos,", "pos = computer_move(board) def main(n): times = [] for i", "maxchild = (child.wins+child.losses), child return maxchild def user_move(board): while True:", "self.lastmove = -2 self.history = [] self.white_dead = 0 self.black_dead", "self.color = BLACK self.finished = False self.lastmove = -2 self.history", "= time.time() times.append(t2 - t1) return times if __name__ ==", "return True return False def useful(self, pos): global TIMESTAMP TIMESTAMP", "game in range(GAMES): node = tree nboard.reset() nboard.replay(board.history) node.play(nboard) #", "= node.parent.best_child() def score(self): winrate = self.wins/float(self.wins+self.losses) parentvisits = self.parent.wins+self.parent.losses", "def move(self, pos): square = self.squares[pos] if pos != PASS:", "random.randrange(len(self.unexplored)) pos = self.unexplored[i] self.unexplored[i] = self.unexplored[len(self.unexplored)-1] self.unexplored.pop() return pos", "count = KOMI + self.black_dead else: count = self.white_dead for", "__repr__(self): return repr(to_xy(self.pos)) class EmptySet: def __init__(self, board): self.board =", "= self.empties[i] if self.board.useful(pos): return pos choices -= 1 self.set(i,", "board, color, path): \"\"\" update win/loss count along path \"\"\"", "KOMI + self.black_dead else: count = self.white_dead for square in", "= to_pos(0, y) result.append(''.join([SHOW[square.color]+' ' for square in self.squares[start:start+SIZE]])) return", "^= square.zobrist_strings[EMPTY] self.hash_set.clear() self.hash_set.add(self.hash) def update(self, square, color): self.hash ^=", "square, root, members1 #print 'ledges1', square, ledges1 members2 = set()", "+= 1 self.board.zobrist.update(self, color) self.color = color self.reference = self", "if child and child.score() > maxscore: maxchild = child maxscore", "== 0: neighbour.remove(neighbour_ref) self.board.zobrist.add() def remove(self, reference, update=True): self.board.zobrist.update(self, EMPTY)", "TIMESTAMP: neighbour_ref = neighbour.find(update) if neighbour_ref.pos == reference.pos: neighbour.remove(reference, update)", "{EMPTY: '.', WHITE: 'o', BLACK: 'x'} PASS = -1 MAXMOVES", "neighcolor == EMPTY: empties += 1 continue neighbour_ref = neighbour.find()", "1, 2 SHOW = {EMPTY: '.', WHITE: 'o', BLACK: 'x'}", "update(self, square, color): self.hash ^= square.zobrist_strings[square.color] self.hash ^= square.zobrist_strings[color] def", "self.hash in self.hash_set class Board: def __init__(self): self.squares = [Square(self,", "-= 1 if neighbour_ref.ledges == 0: neighbour.remove(neighbour_ref) self.board.zobrist.add() def remove(self,", "else: if update: neighbour_ref.ledges += 1 def find(self, update=False): reference", "child and child.score() > maxscore: maxchild = child maxscore =", "self.random_playout(board) self.update_path(board, color, path) def select(self, board): \"\"\" select move;", "i class ZobristHash: def __init__(self, board): self.board = board self.hash_set", "def move(self, color): global TIMESTAMP, MOVES TIMESTAMP += 1 MOVES", "ledges1 members2 = set() for square2 in self.squares: if square2.color", "self.squares: if square2.color != EMPTY and square2.find() == root: members2.add(square2)", "def set(self, i, pos): self.empties[i] = pos self.empty_pos[pos] = i", "None def play(self, board): \"\"\" uct tree search \"\"\" color", "board): \"\"\" uct tree search \"\"\" color = board.color node", "for dx, dy in [(-1, 0), (1, 0), (0, -1),", "== EMPTY: empties2.add(square.pos) def __repr__(self): result = [] for y", "for pos in range(SIZE*SIZE)] for square in self.squares: square.set_neighbours() self.reset()", "self.hash_set class Board: def __init__(self): self.squares = [Square(self, pos) for", "= neighs-weak_neighs strong_opps = opps-weak_opps return not dupe and \\", "surround = 0 for neighbour in square.neighbours: if neighbour.color ==", "count += 1 return count def check(self): for square in", "play(self, board): \"\"\" uct tree search \"\"\" color = board.color", "self.parent.wins+self.parent.losses if not parentvisits: return winrate nodevisits = self.wins+self.losses return", "= neighs = weak_neighs = 0 for neighbour in square.neighbours:", "i in text.split()] except ValueError: continue if not (0 <=", "import random, math, sys, time SIZE = 9 GAMES =", "self.empties.pop() def set(self, i, pos): self.empties[i] = pos self.empty_pos[pos] =", "pos = self.unexplored[i] self.unexplored[i] = self.unexplored[len(self.unexplored)-1] self.unexplored.pop() return pos elif", "'o', BLACK: 'x'} PASS = -1 MAXMOVES = SIZE*SIZE*3 TIMESTAMP", "<reponame>kevinxucs/pyston # from pypy-benchmarks/own/chaos.py, with some minor modifications # (more", "self.useful(pos)] def replay(self, history): for pos in history: self.move(pos) def", "wins == (color == BLACK): node.wins += 1 else: node.losses", "x, y = [int(i) for i in text.split()] except ValueError:", "= board.random_move() if pos == PASS: return PASS tree =", "play until both players pass \"\"\" for x in range(MAXMOVES):", "def versus_cpu(): print \"versus_cpu\" random.seed(1) board = Board() pos =", "= None def play(self, board): \"\"\" uct tree search \"\"\"", "count along path \"\"\" wins = board.score(BLACK) >= board.score(WHITE) for", "squarecolor == EMPTY: surround = 0 for neighbour in square.neighbours:", "maxchild = child maxscore = child.score() return maxchild def best_visited(self):", "if color == WHITE: count = KOMI + self.black_dead else:", "True: pos = node.select(board) if pos == PASS: break board.move(pos)", "continue members1 = set([square]) changed = True while changed: changed", "pos) for pos in range(SIZE*SIZE)] for square in self.squares: square.set_neighbours()", "= 0 for neighbour in square.neighbours: neighcolor = neighbour.color if", "text.split()] except ValueError: continue if not (0 <= x <", "tree.unexplored = board.useful_moves() nboard = Board() for game in range(GAMES):", "self.squares: if square.color == EMPTY: continue members1 = set([square]) changed", "# if color == BLACK: # self.board.black_dead += 1 #", "if child: # print to_xy(child.pos), child.wins, child.losses, child.score() if child", "ledges2)) empties1 = set(self.emptyset.empties) empties2 = set() for square in", "pass \"\"\" for x in range(MAXMOVES): # XXX while not", "'x'} PASS = -1 MAXMOVES = SIZE*SIZE*3 TIMESTAMP = 0", "weak_neighs))) def useful_moves(self): return [pos for pos in self.emptyset.empties if", "+= 1 square = self.squares[pos] if self.useful_fast(square): return True old_hash", "y + dy if 0 <= newx < SIZE and", "remove(self, reference, update=True): self.board.zobrist.update(self, EMPTY) self.removestamp = TIMESTAMP if update:", "in self.squares: if square.color == EMPTY: empties2.add(square.pos) def __repr__(self): result", "in range(n): t1 = time.time() versus_cpu() t2 = time.time() times.append(t2", "to uct value \"\"\" if self.unexplored: i = random.randrange(len(self.unexplored)) pos", "def user_move(board): while True: text = raw_input('?').strip() if text ==", "and neighbour.removestamp != TIMESTAMP: neighbour_ref = neighbour.find(update) if neighbour_ref.pos ==", "path.append(child) node = child self.random_playout(board) self.update_path(board, color, path) def select(self,", "UCTNode() child.unexplored = board.useful_moves() child.pos = pos child.parent = node", "= [Square(self, pos) for pos in range(SIZE*SIZE)] for square in", "+ x def to_xy(pos): y, x = divmod(pos, SIZE) return", "newy = x + dx, y + dy if 0", "+= 1 neighbour_ref.timestamp = TIMESTAMP neighbour_ref.temp_ledges = neighbour_ref.ledges neighbour_ref.temp_ledges -=", "for game in range(GAMES): node = tree nboard.reset() nboard.replay(board.history) node.play(nboard)", "try: x, y = [int(i) for i in text.split()] except", "BLACK = 0, 1, 2 SHOW = {EMPTY: '.', WHITE:", "= self.unexplored[i] self.unexplored[i] = self.unexplored[len(self.unexplored)-1] self.unexplored.pop() return pos elif self.bestchild:", "if square2.color != EMPTY and square2.find() == root: members2.add(square2) ledges2", "to_xy(child.pos), child.wins, child.losses, child.score() if child and (child.wins+child.losses) > maxvisits:", "0 self.used = True for neighbour in self.neighbours: neighcolor =", "check(self): for square in self.squares: if square.color == EMPTY: continue", "square in self.squares: square.color = EMPTY square.used = False self.emptyset", "and 0 <= y < SIZE): continue pos = to_pos(x,", "= EMPTY square.used = False self.emptyset = EmptySet(self) self.zobrist =", "MOVES = 0 def to_pos(x,y): return y * SIZE +", "[node] while True: pos = node.select(board) if pos == PASS:", "for pos in history: self.move(pos) def score(self, color): if color", "self.pos = pos self.timestamp = TIMESTAMP self.removestamp = TIMESTAMP self.zobrist_strings", "t2 = time.time() times.append(t2 - t1) return times if __name__", "return PASS tree = UCTNode() tree.unexplored = board.useful_moves() nboard =", "members1 #print 'ledges1', square, ledges1 members2 = set() for square2", "self.empties[i] = pos self.empty_pos[pos] = i class ZobristHash: def __init__(self,", "random_playout(self, board): \"\"\" random play until both players pass \"\"\"", "pos choices -= 1 self.set(i, self.empties[choices]) self.set(choices, pos) return PASS", "if square.color == EMPTY: empties2.add(square.pos) def __repr__(self): result = []", "if pos == PASS: break board.move(pos) child = node.pos_child[pos] if", "self.bestchild = None self.pos = -1 self.wins = 0 self.losses", "self.board.zobrist.update(self, EMPTY) self.removestamp = TIMESTAMP if update: self.color = EMPTY", "pos = board.random_move() if pos == PASS: return PASS tree", "pos = to_pos(x, y) if board.useful(pos): return pos def computer_move(board):", "return PASS def add(self, pos): self.empty_pos[pos] = len(self.empties) self.empties.append(pos) def", "+= 1 else: weak_opps += 1 neighbour_ref.remove(neighbour_ref, update=False) dupe =", "WHITE, BLACK = 0, 1, 2 SHOW = {EMPTY: '.',", "move(self, color): global TIMESTAMP, MOVES TIMESTAMP += 1 MOVES +=", "x = divmod(pos, SIZE) return x, y class Square: def", "members1: for neighbour in member.neighbours: if neighbour.color == EMPTY: ledges1", "board): self.board = board self.empties = range(SIZE*SIZE) self.empty_pos = range(SIZE*SIZE)", "y) result.append(''.join([SHOW[square.color]+' ' for square in self.squares[start:start+SIZE]])) return '\\n'.join(result) class", "= 0, 1, 2 SHOW = {EMPTY: '.', WHITE: 'o',", "= 0 self.used = True for neighbour in self.neighbours: neighcolor", "EMPTY self.board.emptyset.add(self.pos) # if color == BLACK: # self.board.black_dead +=", "nboard = Board() for game in range(GAMES): node = tree", "9 GAMES = 200 KOMI = 7.5 EMPTY, WHITE, BLACK", "== self.color: neighs += 1 else: opps += 1 neighbour_ref.timestamp", "self.emptyset.random_choice() def useful_fast(self, square): if not square.used: for neighbour in", "def add(self, pos): self.empty_pos[pos] = len(self.empties) self.empties.append(pos) def remove(self, pos):", "nodevisits = self.wins+self.losses return winrate + math.sqrt((math.log(parentvisits))/(5*nodevisits)) def best_child(self): maxscore", "SIZE*SIZE*3 TIMESTAMP = 0 MOVES = 0 def to_pos(x,y): return", "+= 1 elif squarecolor == EMPTY: surround = 0 for", "neighs-weak_neighs strong_opps = opps-weak_opps return not dupe and \\ (empties", "if node.parent: node.parent.bestchild = node.parent.best_child() def score(self): winrate = self.wins/float(self.wins+self.losses)", "choices: i = int(random.random()*choices) pos = self.empties[i] if self.board.useful(pos): return", "and (strong_opps or weak_neighs))) def useful_moves(self): return [pos for pos", "neighbour_ref.pos == reference.pos: neighbour.remove(reference, update) else: if update: neighbour_ref.ledges +=", "return y * SIZE + x def to_xy(pos): y, x", "+= 1 return count def check(self): for square in self.squares:", "update=False) dupe = self.zobrist.dupe() self.zobrist.hash = old_hash strong_neighs = neighs-weak_neighs", "in range(5): versus_cpu() # warmup for i in range(n): t1", "in range(SIZE*SIZE)] self.parent = None def play(self, board): \"\"\" uct", "nboard.replay(board.history) node.play(nboard) # print 'moves', MOVES return tree.best_visited().pos def versus_cpu():", "[Square(self, pos) for pos in range(SIZE*SIZE)] for square in self.squares:", "i, pos): self.empties[i] = pos self.empty_pos[pos] = i class ZobristHash:", "squarecolor == color: count += 1 elif squarecolor == EMPTY:", "self.neighbours: if neighbour.color != EMPTY and neighbour.removestamp != TIMESTAMP: neighbour_ref", "1 self.board.zobrist.update(self, color) self.color = color self.reference = self self.ledges", "nboard.reset() nboard.replay(board.history) node.play(nboard) # print 'moves', MOVES return tree.best_visited().pos def", "members1.copy(): for neighbour in member.neighbours: if neighbour.color == square.color and", "= 0 self.black_dead = 0 def move(self, pos): square =", "= [None for x in range(SIZE*SIZE)] self.parent = None def", "# warmup for i in range(n): t1 = time.time() versus_cpu()", "node.parent.bestchild = node.parent.best_child() def score(self): winrate = self.wins/float(self.wins+self.losses) parentvisits =", "elif self.lastmove == PASS: self.finished = True if self.color ==", "neighcolor == EMPTY: self.ledges += 1 else: neighbour_ref = neighbour.find(update=True)", "KOMI = 7.5 EMPTY, WHITE, BLACK = 0, 1, 2", "return PASS if text == 'q': raise EOFError try: x,", "if board.finished: break board.move(board.random_move()) def update_path(self, board, color, path): \"\"\"", "def replay(self, history): for pos in history: self.move(pos) def score(self,", "divmod(pos, SIZE) return x, y class Square: def __init__(self, board,", "neighbour in square.neighbours: neighcolor = neighbour.color if neighcolor == EMPTY:", "for neighbour in member.neighbours: if neighbour.color == EMPTY: ledges1 +=", "+ dy if 0 <= newx < SIZE and 0", "def dupe(self): return self.hash in self.hash_set class Board: def __init__(self):", "self.history = [] self.white_dead = 0 self.black_dead = 0 def", "self path = [node] while True: pos = node.select(board) if", "def __init__(self): self.bestchild = None self.pos = -1 self.wins =", "maxvisits = -1 maxchild = None for child in self.pos_child:", "self.zobrist.hash = old_hash strong_neighs = neighs-weak_neighs strong_opps = opps-weak_opps return", "None self.pos = -1 self.wins = 0 self.losses = 0", "t1 = time.time() versus_cpu() t2 = time.time() times.append(t2 - t1)", "i = random.randrange(len(self.unexplored)) pos = self.unexplored[i] self.unexplored[i] = self.unexplored[len(self.unexplored)-1] self.unexplored.pop()", "some minor modifications # (more output, took out the benchmark", "surround += 1 if surround == len(square.neighbours): count += 1", "= opps = weak_opps = neighs = weak_neighs = 0", "self.bestchild: return self.bestchild.pos else: return PASS def random_playout(self, board): \"\"\"", "# self.board.white_dead += 1 for neighbour in self.neighbours: if neighbour.color", "== EMPTY: empties += 1 continue neighbour_ref = neighbour.find() if", "neighbour in square.neighbours: if neighbour.color == EMPTY: return True return", "neighcolor == self.color: weak_neighs += 1 else: weak_opps += 1", "self.bestchild.pos else: return PASS def random_playout(self, board): \"\"\" random play", "for square in self.board.squares: self.hash ^= square.zobrist_strings[EMPTY] self.hash_set.clear() self.hash_set.add(self.hash) def", "board self.pos = pos self.timestamp = TIMESTAMP self.removestamp = TIMESTAMP", "and neighbour not in members1: changed = True members1.add(neighbour) ledges1", "self.color = WHITE else: self.color = BLACK self.lastmove = pos", "PASS: return PASS tree = UCTNode() tree.unexplored = board.useful_moves() nboard", "self.empties[i] if self.board.useful(pos): return pos choices -= 1 self.set(i, self.empties[choices])", "else: color = BLACK if wins == (color == BLACK):", "= self.squares[pos] if self.useful_fast(square): return True old_hash = self.zobrist.hash self.zobrist.update(square,", "count def check(self): for square in self.squares: if square.color ==", "time.time() versus_cpu() t2 = time.time() times.append(t2 - t1) return times", "self.neighbours: neighcolor = neighbour.color if neighcolor == EMPTY: self.ledges +=", "[] self.white_dead = 0 self.black_dead = 0 def move(self, pos):", "TIMESTAMP: if neighcolor == self.color: neighs += 1 else: opps", "child return maxchild def user_move(board): while True: text = raw_input('?').strip()", "winrate + math.sqrt((math.log(parentvisits))/(5*nodevisits)) def best_child(self): maxscore = -1 maxchild =", "child.score() return maxchild def best_visited(self): maxvisits = -1 maxchild =", "WHITE: count = KOMI + self.black_dead else: count = self.white_dead", "maxchild def user_move(board): while True: text = raw_input('?').strip() if text", "return winrate + math.sqrt((math.log(parentvisits))/(5*nodevisits)) def best_child(self): maxscore = -1 maxchild", "child maxscore = child.score() return maxchild def best_visited(self): maxvisits =", "1 continue neighbour_ref = neighbour.find() if neighbour_ref.timestamp != TIMESTAMP: if", "== color: if neighbour_ref.reference.pos != self.pos: self.ledges += neighbour_ref.ledges neighbour_ref.reference", "weak_opps or (strong_neighs and (strong_opps or weak_neighs))) def useful_moves(self): return", "reference.pos != self.pos: reference = reference.find(update) if update: self.reference =", "0 self.losses = 0 self.pos_child = [None for x in", "for x in range(SIZE*SIZE)] self.parent = None def play(self, board):", "from pypy-benchmarks/own/chaos.py, with some minor modifications # (more output, took", "if not (0 <= x < SIZE and 0 <=", "return [pos for pos in self.emptyset.empties if self.useful(pos)] def replay(self,", "= neighbour.color if neighcolor == EMPTY: self.ledges += 1 else:", "return maxchild def best_visited(self): maxvisits = -1 maxchild = None", "dupe(self): return self.hash in self.hash_set class Board: def __init__(self): self.squares", "1 else: node.losses += 1 if node.parent: node.parent.bestchild = node.parent.best_child()", "break path.append(child) node = child self.random_playout(board) self.update_path(board, color, path) def", "__init__(self): self.squares = [Square(self, pos) for pos in range(SIZE*SIZE)] for", "else: self.color = BLACK self.lastmove = pos self.history.append(pos) def random_move(self):", "players pass \"\"\" for x in range(MAXMOVES): # XXX while", "if neighbour_ref.temp_ledges == 0: if neighcolor == self.color: weak_neighs +=", "= True if self.color == BLACK: self.color = WHITE else:", "elif squarecolor == EMPTY: surround = 0 for neighbour in", "square in self.squares[start:start+SIZE]])) return '\\n'.join(result) class UCTNode: def __init__(self): self.bestchild", "color = WHITE else: color = BLACK if wins ==", "self.hash ^= square.zobrist_strings[EMPTY] self.hash_set.clear() self.hash_set.add(self.hash) def update(self, square, color): self.hash", "1 neighbour_ref.remove(neighbour_ref, update=False) dupe = self.zobrist.dupe() self.zobrist.hash = old_hash strong_neighs", "self.hash_set.add(self.hash) def update(self, square, color): self.hash ^= square.zobrist_strings[square.color] self.hash ^=", "changed = False for member in members1.copy(): for neighbour in", "!= self.pos: reference = reference.find(update) if update: self.reference = reference", "= set() for square in self.squares: if square.color == EMPTY:", "in self.pos_child: # if child: # print to_xy(child.pos), child.wins, child.losses,", "y = self.pos % SIZE, self.pos / SIZE; self.neighbours =", "= random.randrange(len(self.unexplored)) pos = self.unexplored[i] self.unexplored[i] = self.unexplored[len(self.unexplored)-1] self.unexplored.pop() return", "score(self): winrate = self.wins/float(self.wins+self.losses) parentvisits = self.parent.wins+self.parent.losses if not parentvisits:", "self.zobrist = ZobristHash(self) self.color = BLACK self.finished = False self.lastmove", "if neighbour.color == square.color and neighbour not in members1: changed", "color) self.color = color self.reference = self self.ledges = 0", "= None for child in self.pos_child: # if child: #", "= [] for dx, dy in [(-1, 0), (1, 0),", "MOVES TIMESTAMP += 1 MOVES += 1 self.board.zobrist.update(self, color) self.color", "in self.squares: if square2.color != EMPTY and square2.find() == root:", "= color self.reference = self self.ledges = 0 self.used =", "path) def select(self, board): \"\"\" select move; unexplored children first,", "maxvisits: maxvisits, maxchild = (child.wins+child.losses), child return maxchild def user_move(board):", "print to_xy(child.pos), child.wins, child.losses, child.score() if child and (child.wins+child.losses) >", "maxchild = None for child in self.pos_child: # if child:", "neighbour.remove(reference, update) else: if update: neighbour_ref.ledges += 1 def find(self,", "neighbour.color == square.color and neighbour not in members1: changed =", "EMPTY and neighbour.removestamp != TIMESTAMP: neighbour_ref = neighbour.find(update) if neighbour_ref.pos", "while choices: i = int(random.random()*choices) pos = self.empties[i] if self.board.useful(pos):", "for member in members1: for neighbour in member.neighbours: if neighbour.color", "child.losses, child.score() if child and (child.wins+child.losses) > maxvisits: maxvisits, maxchild", "= set([square]) changed = True while changed: changed = False", "if neighcolor == EMPTY: empties += 1 continue neighbour_ref =", "PASS if text == 'q': raise EOFError try: x, y", "= ZobristHash(self) self.color = BLACK self.finished = False self.lastmove =", "update win/loss count along path \"\"\" wins = board.score(BLACK) >=", "BLACK: # self.board.black_dead += 1 # else: # self.board.white_dead +=", "while not self.finished? if board.finished: break board.move(board.random_move()) def update_path(self, board,", "self.used = True for neighbour in self.neighbours: neighcolor = neighbour.color", "def useful_moves(self): return [pos for pos in self.emptyset.empties if self.useful(pos)]", "count = self.white_dead for square in self.squares: squarecolor = square.color", "EMPTY: empties2.add(square.pos) def __repr__(self): result = [] for y in", "assert members1 == members2 assert ledges1 == ledges2, ('ledges differ", "return x, y class Square: def __init__(self, board, pos): self.board", "True return False def useful(self, pos): global TIMESTAMP TIMESTAMP +=", "= self.pos % SIZE, self.pos / SIZE; self.neighbours = []", "== members2 assert ledges1 == ledges2, ('ledges differ at %r:", "= BLACK if wins == (color == BLACK): node.wins +=", "1)]: newx, newy = x + dx, y + dy", "else: neighbour_ref = neighbour.find(update=True) if neighcolor == color: if neighbour_ref.reference.pos", "def random_move(self): return self.emptyset.random_choice() def useful_fast(self, square): if not square.used:", "neighbour_ref.timestamp != TIMESTAMP: if neighcolor == self.color: neighs += 1", "False self.lastmove = -2 self.history = [] self.white_dead = 0", "newy)]) def move(self, color): global TIMESTAMP, MOVES TIMESTAMP += 1", "root, members1 #print 'ledges1', square, ledges1 members2 = set() for", "board.move(board.random_move()) def update_path(self, board, color, path): \"\"\" update win/loss count", "= pos self.history.append(pos) def random_move(self): return self.emptyset.random_choice() def useful_fast(self, square):", "add(self, pos): self.empty_pos[pos] = len(self.empties) self.empties.append(pos) def remove(self, pos): self.set(self.empty_pos[pos],", "1 return count def check(self): for square in self.squares: if", "neighbour_ref = neighbour.find(update) if neighbour_ref.pos == reference.pos: neighbour.remove(reference, update) else:", "in self.squares[start:start+SIZE]])) return '\\n'.join(result) class UCTNode: def __init__(self): self.bestchild =", "self.unexplored[len(self.unexplored)-1] self.unexplored.pop() return pos elif self.bestchild: return self.bestchild.pos else: return", "i in range(n): t1 = time.time() versus_cpu() t2 = time.time()", "1 # else: # self.board.white_dead += 1 for neighbour in", "if color == BLACK: # self.board.black_dead += 1 # else:", "square.move(self.color) self.emptyset.remove(square.pos) elif self.lastmove == PASS: self.finished = True if", "return self.bestchild.pos else: return PASS def random_playout(self, board): \"\"\" random", "= board.score(BLACK) >= board.score(WHITE) for node in path: if color", "self.set(self.empty_pos[pos], self.empties[len(self.empties)-1]) self.empties.pop() def set(self, i, pos): self.empties[i] = pos", "neighbour.find(update=True) if neighcolor == color: if neighbour_ref.reference.pos != self.pos: self.ledges", "!= PASS: square.move(self.color) self.emptyset.remove(square.pos) elif self.lastmove == PASS: self.finished =", "self.board = board self.hash_set = set() self.hash = 0 for", "pos in history: self.move(pos) def score(self, color): if color ==", "<= newy < SIZE: self.neighbours.append(self.board.squares[to_pos(newx, newy)]) def move(self, color): global", "class Square: def __init__(self, board, pos): self.board = board self.pos", "0 for neighbour in square.neighbours: neighcolor = neighbour.color if neighcolor", "self.removestamp = TIMESTAMP self.zobrist_strings = [random.randrange(sys.maxint) for i in range(3)]", "maxscore: maxchild = child maxscore = child.score() return maxchild def", "print \"versus_cpu\" random.seed(1) board = Board() pos = computer_move(board) def", "self.finished = True if self.color == BLACK: self.color = WHITE", "in members1: changed = True members1.add(neighbour) ledges1 = 0 for", "board self.hash_set = set() self.hash = 0 for square in", "i = int(random.random()*choices) pos = self.empties[i] if self.board.useful(pos): return pos", "useful_moves(self): return [pos for pos in self.emptyset.empties if self.useful(pos)] def", "= True members1.add(neighbour) ledges1 = 0 for member in members1:", "root, members1 #print 'ledges2', square, ledges2 assert members1 == members2", "self.wins/float(self.wins+self.losses) parentvisits = self.parent.wins+self.parent.losses if not parentvisits: return winrate nodevisits", "pos): self.board = board self.pos = pos self.timestamp = TIMESTAMP", "neighbour_ref.temp_ledges = neighbour_ref.ledges neighbour_ref.temp_ledges -= 1 if neighbour_ref.temp_ledges == 0:", "if pos == PASS: return PASS tree = UCTNode() tree.unexplored", "in members1.copy(): for neighbour in member.neighbours: if neighbour.color == square.color", "True members1.add(neighbour) ledges1 = 0 for member in members1: for", "2 SHOW = {EMPTY: '.', WHITE: 'o', BLACK: 'x'} PASS", "then according to uct value \"\"\" if self.unexplored: i =", "1 else: opps += 1 neighbour_ref.timestamp = TIMESTAMP neighbour_ref.temp_ledges =", "reference return reference def __repr__(self): return repr(to_xy(self.pos)) class EmptySet: def", "if 0 <= newx < SIZE and 0 <= newy", "board): self.board = board self.hash_set = set() self.hash = 0", "= to_pos(x, y) if board.useful(pos): return pos def computer_move(board): global", "== len(square.neighbours): count += 1 return count def check(self): for", "= child maxscore = child.score() return maxchild def best_visited(self): maxvisits", "in path: if color == BLACK: color = WHITE else:", "neighbour.find(update) if neighbour_ref.pos == reference.pos: neighbour.remove(reference, update) else: if update:", "== 0: if neighcolor == self.color: weak_neighs += 1 else:", "if update: self.reference = reference return reference def __repr__(self): return", "for i in text.split()] except ValueError: continue if not (0", "reference def __repr__(self): return repr(to_xy(self.pos)) class EmptySet: def __init__(self, board):", "self.removestamp = TIMESTAMP if update: self.color = EMPTY self.board.emptyset.add(self.pos) #", "= set() for square2 in self.squares: if square2.color != EMPTY", "square = self.squares[pos] if self.useful_fast(square): return True old_hash = self.zobrist.hash", "tree.best_visited().pos def versus_cpu(): print \"versus_cpu\" random.seed(1) board = Board() pos", "= i class ZobristHash: def __init__(self, board): self.board = board", "EMPTY square.used = False self.emptyset = EmptySet(self) self.zobrist = ZobristHash(self)", "i in range(3)] def set_neighbours(self): x, y = self.pos %", "dx, y + dy if 0 <= newx < SIZE", "in self.board.squares: self.hash ^= square.zobrist_strings[EMPTY] self.hash_set.clear() self.hash_set.add(self.hash) def update(self, square,", "SHOW = {EMPTY: '.', WHITE: 'o', BLACK: 'x'} PASS =", "neighbour_ref = neighbour.find(update=True) if neighcolor == color: if neighbour_ref.reference.pos !=", "in members1: for neighbour in member.neighbours: if neighbour.color == EMPTY:", "def update_path(self, board, color, path): \"\"\" update win/loss count along", "+= 1 def find(self, update=False): reference = self.reference if reference.pos", "in range(SIZE): start = to_pos(0, y) result.append(''.join([SHOW[square.color]+' ' for square", "for node in path: if color == BLACK: color =", "update: self.reference = reference return reference def __repr__(self): return repr(to_xy(self.pos))", "neighbour in self.neighbours: if neighbour.color != EMPTY and neighbour.removestamp !=", "if not parentvisits: return winrate nodevisits = self.wins+self.losses return winrate", "0 for member in members1: for neighbour in member.neighbours: if", "random.seed(1) board = Board() pos = computer_move(board) def main(n): times", "= Board() for game in range(GAMES): node = tree nboard.reset()", "to_pos(x, y) if board.useful(pos): return pos def computer_move(board): global MOVES", "# self.board.black_dead += 1 # else: # self.board.white_dead += 1", "= node.select(board) if pos == PASS: break board.move(pos) child =", "\"\"\" uct tree search \"\"\" color = board.color node =", "neighbour.color if neighcolor == EMPTY: empties += 1 continue neighbour_ref", "+= 1 if node.parent: node.parent.bestchild = node.parent.best_child() def score(self): winrate", "path.append(child) break path.append(child) node = child self.random_playout(board) self.update_path(board, color, path)", "parentvisits = self.parent.wins+self.parent.losses if not parentvisits: return winrate nodevisits =", "and \\ (empties or weak_opps or (strong_neighs and (strong_opps or", "-1 self.wins = 0 self.losses = 0 self.pos_child = [None", "maxscore = child.score() return maxchild def best_visited(self): maxvisits = -1", "self.empties[len(self.empties)-1]) self.empties.pop() def set(self, i, pos): self.empties[i] = pos self.empty_pos[pos]", "board.finished: break board.move(board.random_move()) def update_path(self, board, color, path): \"\"\" update", "pos self.empty_pos[pos] = i class ZobristHash: def __init__(self, board): self.board", "= reference.find(update) if update: self.reference = reference return reference def", "neighcolor == color: if neighbour_ref.reference.pos != self.pos: self.ledges += neighbour_ref.ledges", "for child in self.pos_child: if child and child.score() > maxscore:", "neighbour_ref.ledges -= 1 if neighbour_ref.ledges == 0: neighbour.remove(neighbour_ref) self.board.zobrist.add() def", "def remove(self, reference, update=True): self.board.zobrist.update(self, EMPTY) self.removestamp = TIMESTAMP if", "squarecolor = square.color if squarecolor == color: count += 1", "node.play(nboard) # print 'moves', MOVES return tree.best_visited().pos def versus_cpu(): print", "set() for square in self.squares: if square.color == EMPTY: empties2.add(square.pos)", "root.ledges #print 'members2', square, root, members1 #print 'ledges2', square, ledges2", "in [(-1, 0), (1, 0), (0, -1), (0, 1)]: newx,", "set([square]) changed = True while changed: changed = False for", "%d' % (square, ledges1, ledges2)) empties1 = set(self.emptyset.empties) empties2 =", "and square2.find() == root: members2.add(square2) ledges2 = root.ledges #print 'members2',", "for y in range(SIZE): start = to_pos(0, y) result.append(''.join([SHOW[square.color]+' '", "old_hash strong_neighs = neighs-weak_neighs strong_opps = opps-weak_opps return not dupe", "node.pos_child[pos] = UCTNode() child.unexplored = board.useful_moves() child.pos = pos child.parent", "board): \"\"\" select move; unexplored children first, then according to", "strong_opps = opps-weak_opps return not dupe and \\ (empties or", "1 self.set(i, self.empties[choices]) self.set(choices, pos) return PASS def add(self, pos):", "= Board() pos = computer_move(board) def main(n): times = []", "time.time() times.append(t2 - t1) return times if __name__ == \"__main__\":", "self.pos: self.ledges += neighbour_ref.ledges neighbour_ref.reference = self self.ledges -= 1", "+= 1 root = square.find() #print 'members1', square, root, members1", "True if self.color == BLACK: self.color = WHITE else: self.color", "def __repr__(self): return repr(to_xy(self.pos)) class EmptySet: def __init__(self, board): self.board", "neighbour_ref.ledges neighbour_ref.temp_ledges -= 1 if neighbour_ref.temp_ledges == 0: if neighcolor", "if self.useful(pos)] def replay(self, history): for pos in history: self.move(pos)", "computer_move(board) def main(n): times = [] for i in range(5):", "versus_cpu() # warmup for i in range(n): t1 = time.time()", "-= 1 self.set(i, self.empties[choices]) self.set(choices, pos) return PASS def add(self,", "return pos def computer_move(board): global MOVES pos = board.random_move() if", "for neighbour in square.neighbours: if neighbour.color == color: surround +=", "TIMESTAMP neighbour_ref.temp_ledges = neighbour_ref.ledges neighbour_ref.temp_ledges -= 1 if neighbour_ref.temp_ledges ==", "= square.find() #print 'members1', square, root, members1 #print 'ledges1', square,", "class Board: def __init__(self): self.squares = [Square(self, pos) for pos", "'members2', square, root, members1 #print 'ledges2', square, ledges2 assert members1", "= neighbour.find(update) if neighbour_ref.pos == reference.pos: neighbour.remove(reference, update) else: if", "newy < SIZE: self.neighbours.append(self.board.squares[to_pos(newx, newy)]) def move(self, color): global TIMESTAMP,", "0), (0, -1), (0, 1)]: newx, newy = x +", "maxvisits, maxchild = (child.wins+child.losses), child return maxchild def user_move(board): while", "= int(random.random()*choices) pos = self.empties[i] if self.board.useful(pos): return pos choices", "reset(self): for square in self.squares: square.color = EMPTY square.used =", "self self.ledges = 0 self.used = True for neighbour in", "<= y < SIZE): continue pos = to_pos(x, y) if", "square.neighbours: neighcolor = neighbour.color if neighcolor == EMPTY: empties +=", "Board() pos = computer_move(board) def main(n): times = [] for", "SIZE) return x, y class Square: def __init__(self, board, pos):", "self.wins+self.losses return winrate + math.sqrt((math.log(parentvisits))/(5*nodevisits)) def best_child(self): maxscore = -1", "self.unexplored.pop() return pos elif self.bestchild: return self.bestchild.pos else: return PASS", "TIMESTAMP += 1 square = self.squares[pos] if self.useful_fast(square): return True", "= -1 maxchild = None for child in self.pos_child: #", "for neighbour in square.neighbours: if neighbour.color == EMPTY: return True", "if surround == len(square.neighbours): count += 1 return count def", "ledges1 == ledges2, ('ledges differ at %r: %d %d' %", "self.color) empties = opps = weak_opps = neighs = weak_neighs", "reference.find(update) if update: self.reference = reference return reference def __repr__(self):", "'p': return PASS if text == 'q': raise EOFError try:", "TIMESTAMP TIMESTAMP += 1 square = self.squares[pos] if self.useful_fast(square): return", "both players pass \"\"\" for x in range(MAXMOVES): # XXX", "return self.hash in self.hash_set class Board: def __init__(self): self.squares =", "= 0 self.losses = 0 self.pos_child = [None for x", "surround == len(square.neighbours): count += 1 return count def check(self):", "random play until both players pass \"\"\" for x in", "== root: members2.add(square2) ledges2 = root.ledges #print 'members2', square, root,", "reference = reference.find(update) if update: self.reference = reference return reference", "= TIMESTAMP if update: self.color = EMPTY self.board.emptyset.add(self.pos) # if", "[] for i in range(5): versus_cpu() # warmup for i", "newx, newy = x + dx, y + dy if", "= False self.emptyset = EmptySet(self) self.zobrist = ZobristHash(self) self.color =", "= x + dx, y + dy if 0 <=", "PASS: square.move(self.color) self.emptyset.remove(square.pos) elif self.lastmove == PASS: self.finished = True", "main(n): times = [] for i in range(5): versus_cpu() #", "times = [] for i in range(5): versus_cpu() # warmup", "= [] self.white_dead = 0 self.black_dead = 0 def move(self,", "= divmod(pos, SIZE) return x, y class Square: def __init__(self,", "self.squares[pos] if pos != PASS: square.move(self.color) self.emptyset.remove(square.pos) elif self.lastmove ==", "UCTNode() tree.unexplored = board.useful_moves() nboard = Board() for game in", "__init__(self, board, pos): self.board = board self.pos = pos self.timestamp", "[(-1, 0), (1, 0), (0, -1), (0, 1)]: newx, newy", "self.lastmove = pos self.history.append(pos) def random_move(self): return self.emptyset.random_choice() def useful_fast(self,", "weak_opps = neighs = weak_neighs = 0 for neighbour in", "# from pypy-benchmarks/own/chaos.py, with some minor modifications # (more output,", "False self.emptyset = EmptySet(self) self.zobrist = ZobristHash(self) self.color = BLACK", "neighbour_ref.ledges neighbour_ref.reference = self self.ledges -= 1 else: neighbour_ref.ledges -=", "color, path) def select(self, board): \"\"\" select move; unexplored children", "# print to_xy(child.pos), child.wins, child.losses, child.score() if child and (child.wins+child.losses)", "TIMESTAMP if update: self.color = EMPTY self.board.emptyset.add(self.pos) # if color", "1 if surround == len(square.neighbours): count += 1 return count", "= root.ledges #print 'members2', square, root, members1 #print 'ledges2', square,", "empties2.add(square.pos) def __repr__(self): result = [] for y in range(SIZE):", "TIMESTAMP += 1 MOVES += 1 self.board.zobrist.update(self, color) self.color =", "= None for child in self.pos_child: if child and child.score()", "self.pos_child = [None for x in range(SIZE*SIZE)] self.parent = None", "output, took out the benchmark harness) # import random, math,", "child in self.pos_child: if child and child.score() > maxscore: maxchild", "= self.zobrist.hash self.zobrist.update(square, self.color) empties = opps = weak_opps =", "except ValueError: continue if not (0 <= x < SIZE", "square.set_neighbours() self.reset() def reset(self): for square in self.squares: square.color =", "board self.empties = range(SIZE*SIZE) self.empty_pos = range(SIZE*SIZE) def random_choice(self): choices", "color: surround += 1 if surround == len(square.neighbours): count +=", "self.reference if reference.pos != self.pos: reference = reference.find(update) if update:", "= 0 for neighbour in square.neighbours: if neighbour.color == color:", "range(5): versus_cpu() # warmup for i in range(n): t1 =", "= len(self.empties) while choices: i = int(random.random()*choices) pos = self.empties[i]", "return tree.best_visited().pos def versus_cpu(): print \"versus_cpu\" random.seed(1) board = Board()", "dy if 0 <= newx < SIZE and 0 <=", "!= self.pos: self.ledges += neighbour_ref.ledges neighbour_ref.reference = self self.ledges -=", "== reference.pos: neighbour.remove(reference, update) else: if update: neighbour_ref.ledges += 1", "+= 1 else: opps += 1 neighbour_ref.timestamp = TIMESTAMP neighbour_ref.temp_ledges", "neighbour_ref.temp_ledges == 0: if neighcolor == self.color: weak_neighs += 1", "#print 'members2', square, root, members1 #print 'ledges2', square, ledges2 assert", "score(self, color): if color == WHITE: count = KOMI +", "EMPTY: ledges1 += 1 root = square.find() #print 'members1', square,", "reference, update=True): self.board.zobrist.update(self, EMPTY) self.removestamp = TIMESTAMP if update: self.color", "SIZE, self.pos / SIZE; self.neighbours = [] for dx, dy", "else: node.losses += 1 if node.parent: node.parent.bestchild = node.parent.best_child() def", "== BLACK: color = WHITE else: color = BLACK if", "= -1 MAXMOVES = SIZE*SIZE*3 TIMESTAMP = 0 MOVES =", "EMPTY) self.removestamp = TIMESTAMP if update: self.color = EMPTY self.board.emptyset.add(self.pos)", "= child self.random_playout(board) self.update_path(board, color, path) def select(self, board): \"\"\"", "color: if neighbour_ref.reference.pos != self.pos: self.ledges += neighbour_ref.ledges neighbour_ref.reference =", "ledges1 = 0 for member in members1: for neighbour in", "neighs += 1 else: opps += 1 neighbour_ref.timestamp = TIMESTAMP", "result.append(''.join([SHOW[square.color]+' ' for square in self.squares[start:start+SIZE]])) return '\\n'.join(result) class UCTNode:", "color == WHITE: count = KOMI + self.black_dead else: count", "pos in self.emptyset.empties if self.useful(pos)] def replay(self, history): for pos", "text == 'p': return PASS if text == 'q': raise", "0), (1, 0), (0, -1), (0, 1)]: newx, newy =", "range(SIZE*SIZE)] self.parent = None def play(self, board): \"\"\" uct tree", "= pos self.empty_pos[pos] = i class ZobristHash: def __init__(self, board):", "y in range(SIZE): start = to_pos(0, y) result.append(''.join([SHOW[square.color]+' ' for", "node.parent.best_child() def score(self): winrate = self.wins/float(self.wins+self.losses) parentvisits = self.parent.wins+self.parent.losses if", "in history: self.move(pos) def score(self, color): if color == WHITE:", "and child.score() > maxscore: maxchild = child maxscore = child.score()", "else: weak_opps += 1 neighbour_ref.remove(neighbour_ref, update=False) dupe = self.zobrist.dupe() self.zobrist.hash", "child.parent = node path.append(child) break path.append(child) node = child self.random_playout(board)", "[int(i) for i in text.split()] except ValueError: continue if not", "and 0 <= newy < SIZE: self.neighbours.append(self.board.squares[to_pos(newx, newy)]) def move(self,", "self.history.append(pos) def random_move(self): return self.emptyset.random_choice() def useful_fast(self, square): if not", "board, pos): self.board = board self.pos = pos self.timestamp =", "self.board = board self.pos = pos self.timestamp = TIMESTAMP self.removestamp", "= set() self.hash = 0 for square in self.board.squares: self.hash", "if text == 'p': return PASS if text == 'q':", "== 'q': raise EOFError try: x, y = [int(i) for", "self.empty_pos[pos] = i class ZobristHash: def __init__(self, board): self.board =", "SIZE = 9 GAMES = 200 KOMI = 7.5 EMPTY,", "200 KOMI = 7.5 EMPTY, WHITE, BLACK = 0, 1,", "(0 <= x < SIZE and 0 <= y <", "to_pos(x,y): return y * SIZE + x def to_xy(pos): y,", "pos != PASS: square.move(self.color) self.emptyset.remove(square.pos) elif self.lastmove == PASS: self.finished", "count += 1 elif squarecolor == EMPTY: surround = 0", "= weak_opps = neighs = weak_neighs = 0 for neighbour", "best_child(self): maxscore = -1 maxchild = None for child in", "WHITE else: self.color = BLACK self.lastmove = pos self.history.append(pos) def", "= [random.randrange(sys.maxint) for i in range(3)] def set_neighbours(self): x, y", "else: return PASS def random_playout(self, board): \"\"\" random play until", "self.parent = None def play(self, board): \"\"\" uct tree search", "node = child self.random_playout(board) self.update_path(board, color, path) def select(self, board):", "== EMPTY: continue members1 = set([square]) changed = True while", "self.pos / SIZE; self.neighbours = [] for dx, dy in", "__init__(self): self.bestchild = None self.pos = -1 self.wins = 0", "members1: changed = True members1.add(neighbour) ledges1 = 0 for member", "GAMES = 200 KOMI = 7.5 EMPTY, WHITE, BLACK =", "for square in self.squares[start:start+SIZE]])) return '\\n'.join(result) class UCTNode: def __init__(self):", "with some minor modifications # (more output, took out the", "modifications # (more output, took out the benchmark harness) #", "self.squares: square.set_neighbours() self.reset() def reset(self): for square in self.squares: square.color", "x in range(MAXMOVES): # XXX while not self.finished? if board.finished:", "members2 assert ledges1 == ledges2, ('ledges differ at %r: %d", "0 def move(self, pos): square = self.squares[pos] if pos !=", "def random_playout(self, board): \"\"\" random play until both players pass", "board.score(BLACK) >= board.score(WHITE) for node in path: if color ==", "SIZE; self.neighbours = [] for dx, dy in [(-1, 0),", "select(self, board): \"\"\" select move; unexplored children first, then according", "ledges2, ('ledges differ at %r: %d %d' % (square, ledges1,", "self.squares: squarecolor = square.color if squarecolor == color: count +=", "member in members1: for neighbour in member.neighbours: if neighbour.color ==", "-1 maxchild = None for child in self.pos_child: # if", "remove(self, pos): self.set(self.empty_pos[pos], self.empties[len(self.empties)-1]) self.empties.pop() def set(self, i, pos): self.empties[i]", "path: if color == BLACK: color = WHITE else: color", "0 for square in self.board.squares: self.hash ^= square.zobrist_strings[EMPTY] self.hash_set.clear() self.hash_set.add(self.hash)", "= weak_neighs = 0 for neighbour in square.neighbours: neighcolor =", "neighbour_ref.remove(neighbour_ref, update=False) dupe = self.zobrist.dupe() self.zobrist.hash = old_hash strong_neighs =", "= pos self.timestamp = TIMESTAMP self.removestamp = TIMESTAMP self.zobrist_strings =", "pypy-benchmarks/own/chaos.py, with some minor modifications # (more output, took out", "warmup for i in range(n): t1 = time.time() versus_cpu() t2", "!= TIMESTAMP: if neighcolor == self.color: neighs += 1 else:", "child in self.pos_child: # if child: # print to_xy(child.pos), child.wins,", "(1, 0), (0, -1), (0, 1)]: newx, newy = x", "pos = node.select(board) if pos == PASS: break board.move(pos) child", "for i in range(3)] def set_neighbours(self): x, y = self.pos", "uct value \"\"\" if self.unexplored: i = random.randrange(len(self.unexplored)) pos =", "if board.useful(pos): return pos def computer_move(board): global MOVES pos =", "newx < SIZE and 0 <= newy < SIZE: self.neighbours.append(self.board.squares[to_pos(newx,", "+= 1 # else: # self.board.white_dead += 1 for neighbour", "maxchild = None for child in self.pos_child: if child and", "EMPTY: self.ledges += 1 else: neighbour_ref = neighbour.find(update=True) if neighcolor", "if reference.pos != self.pos: reference = reference.find(update) if update: self.reference", "update_path(self, board, color, path): \"\"\" update win/loss count along path", "weak_neighs += 1 else: weak_opps += 1 neighbour_ref.remove(neighbour_ref, update=False) dupe", "self.white_dead for square in self.squares: squarecolor = square.color if squarecolor", "global TIMESTAMP TIMESTAMP += 1 square = self.squares[pos] if self.useful_fast(square):", "self.squares: if square.color == EMPTY: empties2.add(square.pos) def __repr__(self): result =", "MOVES pos = board.random_move() if pos == PASS: return PASS", "len(self.empties) self.empties.append(pos) def remove(self, pos): self.set(self.empty_pos[pos], self.empties[len(self.empties)-1]) self.empties.pop() def set(self,", "continue neighbour_ref = neighbour.find() if neighbour_ref.timestamp != TIMESTAMP: if neighcolor", "def useful_fast(self, square): if not square.used: for neighbour in square.neighbours:", "def __init__(self, board): self.board = board self.empties = range(SIZE*SIZE) self.empty_pos", "Board() for game in range(GAMES): node = tree nboard.reset() nboard.replay(board.history)", "BLACK): node.wins += 1 else: node.losses += 1 if node.parent:", "self.ledges += neighbour_ref.ledges neighbour_ref.reference = self self.ledges -= 1 else:", "def computer_move(board): global MOVES pos = board.random_move() if pos ==", "< SIZE: self.neighbours.append(self.board.squares[to_pos(newx, newy)]) def move(self, color): global TIMESTAMP, MOVES", "child = node.pos_child[pos] if not child: child = node.pos_child[pos] =", "[] for y in range(SIZE): start = to_pos(0, y) result.append(''.join([SHOW[square.color]+'", "update) else: if update: neighbour_ref.ledges += 1 def find(self, update=False):", "1 if neighbour_ref.ledges == 0: neighbour.remove(neighbour_ref) self.board.zobrist.add() def remove(self, reference,", "self.empties.append(pos) def remove(self, pos): self.set(self.empty_pos[pos], self.empties[len(self.empties)-1]) self.empties.pop() def set(self, i,", "computer_move(board): global MOVES pos = board.random_move() if pos == PASS:", "if update: self.color = EMPTY self.board.emptyset.add(self.pos) # if color ==", "members2.add(square2) ledges2 = root.ledges #print 'members2', square, root, members1 #print", "square2.color != EMPTY and square2.find() == root: members2.add(square2) ledges2 =", "== BLACK: # self.board.black_dead += 1 # else: # self.board.white_dead", "x, y = self.pos % SIZE, self.pos / SIZE; self.neighbours", "else: opps += 1 neighbour_ref.timestamp = TIMESTAMP neighbour_ref.temp_ledges = neighbour_ref.ledges", "+= 1 else: node.losses += 1 if node.parent: node.parent.bestchild =", "dy in [(-1, 0), (1, 0), (0, -1), (0, 1)]:", "__init__(self, board): self.board = board self.empties = range(SIZE*SIZE) self.empty_pos =", "until both players pass \"\"\" for x in range(MAXMOVES): #", "#print 'members1', square, root, members1 #print 'ledges1', square, ledges1 members2", "square in self.squares: if square.color == EMPTY: empties2.add(square.pos) def __repr__(self):", "neighbour_ref.ledges == 0: neighbour.remove(neighbour_ref) self.board.zobrist.add() def remove(self, reference, update=True): self.board.zobrist.update(self,", "Board: def __init__(self): self.squares = [Square(self, pos) for pos in", "useful(self, pos): global TIMESTAMP TIMESTAMP += 1 square = self.squares[pos]", "if neighbour_ref.pos == reference.pos: neighbour.remove(reference, update) else: if update: neighbour_ref.ledges", "neighbour in square.neighbours: if neighbour.color == color: surround += 1", "(child.wins+child.losses) > maxvisits: maxvisits, maxchild = (child.wins+child.losses), child return maxchild", "according to uct value \"\"\" if self.unexplored: i = random.randrange(len(self.unexplored))", "1 if neighbour_ref.temp_ledges == 0: if neighcolor == self.color: weak_neighs", "= 0 MOVES = 0 def to_pos(x,y): return y *", "in square.neighbours: if neighbour.color == EMPTY: return True return False", "if not square.used: for neighbour in square.neighbours: if neighbour.color ==", "self.ledges = 0 self.used = True for neighbour in self.neighbours:", "= self path = [node] while True: pos = node.select(board)", "self.black_dead else: count = self.white_dead for square in self.squares: squarecolor", "+= 1 else: neighbour_ref = neighbour.find(update=True) if neighcolor == color:", "self.set(i, self.empties[choices]) self.set(choices, pos) return PASS def add(self, pos): self.empty_pos[pos]", "class EmptySet: def __init__(self, board): self.board = board self.empties =", "self.squares: square.color = EMPTY square.used = False self.emptyset = EmptySet(self)", "pos child.parent = node path.append(child) break path.append(child) node = child", "set(self, i, pos): self.empties[i] = pos self.empty_pos[pos] = i class", "self.set(choices, pos) return PASS def add(self, pos): self.empty_pos[pos] = len(self.empties)", "-1), (0, 1)]: newx, newy = x + dx, y", "if neighbour_ref.ledges == 0: neighbour.remove(neighbour_ref) self.board.zobrist.add() def remove(self, reference, update=True):", "pos): self.empty_pos[pos] = len(self.empties) self.empties.append(pos) def remove(self, pos): self.set(self.empty_pos[pos], self.empties[len(self.empties)-1])", "WHITE else: color = BLACK if wins == (color ==", "child self.random_playout(board) self.update_path(board, color, path) def select(self, board): \"\"\" select", "square.color if squarecolor == color: count += 1 elif squarecolor", "square.used: for neighbour in square.neighbours: if neighbour.color == EMPTY: return", "= self.unexplored[len(self.unexplored)-1] self.unexplored.pop() return pos elif self.bestchild: return self.bestchild.pos else:", "update: neighbour_ref.ledges += 1 def find(self, update=False): reference = self.reference", "if not child: child = node.pos_child[pos] = UCTNode() child.unexplored =", "== square.color and neighbour not in members1: changed = True", "ValueError: continue if not (0 <= x < SIZE and", "= board.useful_moves() nboard = Board() for game in range(GAMES): node", "= old_hash strong_neighs = neighs-weak_neighs strong_opps = opps-weak_opps return not", "PASS tree = UCTNode() tree.unexplored = board.useful_moves() nboard = Board()", "x def to_xy(pos): y, x = divmod(pos, SIZE) return x,", "def best_visited(self): maxvisits = -1 maxchild = None for child", "board = Board() pos = computer_move(board) def main(n): times =", "= 7.5 EMPTY, WHITE, BLACK = 0, 1, 2 SHOW", "square = self.squares[pos] if pos != PASS: square.move(self.color) self.emptyset.remove(square.pos) elif", "< SIZE and 0 <= newy < SIZE: self.neighbours.append(self.board.squares[to_pos(newx, newy)])", "self.unexplored: i = random.randrange(len(self.unexplored)) pos = self.unexplored[i] self.unexplored[i] = self.unexplored[len(self.unexplored)-1]", "= WHITE else: color = BLACK if wins == (color", "color == BLACK: # self.board.black_dead += 1 # else: #", "node = tree nboard.reset() nboard.replay(board.history) node.play(nboard) # print 'moves', MOVES", "-1 maxchild = None for child in self.pos_child: if child", "!= EMPTY and neighbour.removestamp != TIMESTAMP: neighbour_ref = neighbour.find(update) if", "if neighbour_ref.timestamp != TIMESTAMP: if neighcolor == self.color: neighs +=", "(more output, took out the benchmark harness) # import random,", "color: count += 1 elif squarecolor == EMPTY: surround =", "or weak_neighs))) def useful_moves(self): return [pos for pos in self.emptyset.empties", "if neighbour.color == EMPTY: return True return False def useful(self,", "self.board.emptyset.add(self.pos) # if color == BLACK: # self.board.black_dead += 1", "1 else: weak_opps += 1 neighbour_ref.remove(neighbour_ref, update=False) dupe = self.zobrist.dupe()", "('ledges differ at %r: %d %d' % (square, ledges1, ledges2))", "= self.parent.wins+self.parent.losses if not parentvisits: return winrate nodevisits = self.wins+self.losses", "'ledges2', square, ledges2 assert members1 == members2 assert ledges1 ==", "versus_cpu(): print \"versus_cpu\" random.seed(1) board = Board() pos = computer_move(board)", "weak_opps += 1 neighbour_ref.remove(neighbour_ref, update=False) dupe = self.zobrist.dupe() self.zobrist.hash =", "MOVES return tree.best_visited().pos def versus_cpu(): print \"versus_cpu\" random.seed(1) board =", "BLACK if wins == (color == BLACK): node.wins += 1", "in self.squares: if square.color == EMPTY: continue members1 = set([square])", "= 9 GAMES = 200 KOMI = 7.5 EMPTY, WHITE,", "in square.neighbours: neighcolor = neighbour.color if neighcolor == EMPTY: empties", "set() for square2 in self.squares: if square2.color != EMPTY and", "minor modifications # (more output, took out the benchmark harness)", "raise EOFError try: x, y = [int(i) for i in", "break board.move(board.random_move()) def update_path(self, board, color, path): \"\"\" update win/loss", "square.zobrist_strings[square.color] self.hash ^= square.zobrist_strings[color] def add(self): self.hash_set.add(self.hash) def dupe(self): return", "< SIZE and 0 <= y < SIZE): continue pos", "if self.useful_fast(square): return True old_hash = self.zobrist.hash self.zobrist.update(square, self.color) empties", "update=True): self.board.zobrist.update(self, EMPTY) self.removestamp = TIMESTAMP if update: self.color =", "child.score() if child and (child.wins+child.losses) > maxvisits: maxvisits, maxchild =", "empties2 = set() for square in self.squares: if square.color ==", "EMPTY: empties += 1 continue neighbour_ref = neighbour.find() if neighbour_ref.timestamp", "maxchild def best_visited(self): maxvisits = -1 maxchild = None for", "= board self.pos = pos self.timestamp = TIMESTAMP self.removestamp =", "self.empties[choices]) self.set(choices, pos) return PASS def add(self, pos): self.empty_pos[pos] =", "+= 1 continue neighbour_ref = neighbour.find() if neighbour_ref.timestamp != TIMESTAMP:", "for neighbour in square.neighbours: neighcolor = neighbour.color if neighcolor ==", "child.wins, child.losses, child.score() if child and (child.wins+child.losses) > maxvisits: maxvisits,", "False def useful(self, pos): global TIMESTAMP TIMESTAMP += 1 square", "\"versus_cpu\" random.seed(1) board = Board() pos = computer_move(board) def main(n):", "node.losses += 1 if node.parent: node.parent.bestchild = node.parent.best_child() def score(self):", "neighbour not in members1: changed = True members1.add(neighbour) ledges1 =", "raw_input('?').strip() if text == 'p': return PASS if text ==", "for neighbour in member.neighbours: if neighbour.color == square.color and neighbour", "square2 in self.squares: if square2.color != EMPTY and square2.find() ==", "pos) return PASS def add(self, pos): self.empty_pos[pos] = len(self.empties) self.empties.append(pos)", "self.board = board self.empties = range(SIZE*SIZE) self.empty_pos = range(SIZE*SIZE) def", "BLACK self.lastmove = pos self.history.append(pos) def random_move(self): return self.emptyset.random_choice() def", "0 <= y < SIZE): continue pos = to_pos(x, y)", "root: members2.add(square2) ledges2 = root.ledges #print 'members2', square, root, members1", "self.neighbours = [] for dx, dy in [(-1, 0), (1,", "!= EMPTY and square2.find() == root: members2.add(square2) ledges2 = root.ledges", "took out the benchmark harness) # import random, math, sys,", "repr(to_xy(self.pos)) class EmptySet: def __init__(self, board): self.board = board self.empties", "MOVES += 1 self.board.zobrist.update(self, color) self.color = color self.reference =", "EMPTY: surround = 0 for neighbour in square.neighbours: if neighbour.color", "random, math, sys, time SIZE = 9 GAMES = 200", "neighbour.color == EMPTY: ledges1 += 1 root = square.find() #print", "== EMPTY: self.ledges += 1 else: neighbour_ref = neighbour.find(update=True) if", "= node.pos_child[pos] if not child: child = node.pos_child[pos] = UCTNode()", "= neighbour.color if neighcolor == EMPTY: empties += 1 continue", "continue pos = to_pos(x, y) if board.useful(pos): return pos def", "= node.pos_child[pos] = UCTNode() child.unexplored = board.useful_moves() child.pos = pos", "neighs = weak_neighs = 0 for neighbour in square.neighbours: neighcolor", "def play(self, board): \"\"\" uct tree search \"\"\" color =", "EmptySet: def __init__(self, board): self.board = board self.empties = range(SIZE*SIZE)", "0 MOVES = 0 def to_pos(x,y): return y * SIZE", "path = [node] while True: pos = node.select(board) if pos", "= [] for i in range(5): versus_cpu() # warmup for", "in square.neighbours: if neighbour.color == color: surround += 1 if", "board.useful_moves() child.pos = pos child.parent = node path.append(child) break path.append(child)", "-1 MAXMOVES = SIZE*SIZE*3 TIMESTAMP = 0 MOVES = 0", "SIZE): continue pos = to_pos(x, y) if board.useful(pos): return pos", "uct tree search \"\"\" color = board.color node = self", "while changed: changed = False for member in members1.copy(): for", "return '\\n'.join(result) class UCTNode: def __init__(self): self.bestchild = None self.pos", "empties += 1 continue neighbour_ref = neighbour.find() if neighbour_ref.timestamp !=", "first, then according to uct value \"\"\" if self.unexplored: i", "int(random.random()*choices) pos = self.empties[i] if self.board.useful(pos): return pos choices -=", "wins = board.score(BLACK) >= board.score(WHITE) for node in path: if", "= BLACK self.lastmove = pos self.history.append(pos) def random_move(self): return self.emptyset.random_choice()", "else: count = self.white_dead for square in self.squares: squarecolor =", "(0, -1), (0, 1)]: newx, newy = x + dx,", "+= 1 if surround == len(square.neighbours): count += 1 return", "range(3)] def set_neighbours(self): x, y = self.pos % SIZE, self.pos", "def remove(self, pos): self.set(self.empty_pos[pos], self.empties[len(self.empties)-1]) self.empties.pop() def set(self, i, pos):", "board.useful_moves() nboard = Board() for game in range(GAMES): node =", "self.color = color self.reference = self self.ledges = 0 self.used", "update: self.color = EMPTY self.board.emptyset.add(self.pos) # if color == BLACK:", "XXX while not self.finished? if board.finished: break board.move(board.random_move()) def update_path(self,", "value \"\"\" if self.unexplored: i = random.randrange(len(self.unexplored)) pos = self.unexplored[i]", "self.squares = [Square(self, pos) for pos in range(SIZE*SIZE)] for square", "EMPTY and square2.find() == root: members2.add(square2) ledges2 = root.ledges #print", "if pos != PASS: square.move(self.color) self.emptyset.remove(square.pos) elif self.lastmove == PASS:", "def __init__(self, board, pos): self.board = board self.pos = pos", "%r: %d %d' % (square, ledges1, ledges2)) empties1 = set(self.emptyset.empties)", "= time.time() versus_cpu() t2 = time.time() times.append(t2 - t1) return", "BLACK: self.color = WHITE else: self.color = BLACK self.lastmove =", "< SIZE): continue pos = to_pos(x, y) if board.useful(pos): return", "False for member in members1.copy(): for neighbour in member.neighbours: if", "' for square in self.squares[start:start+SIZE]])) return '\\n'.join(result) class UCTNode: def", "= board.color node = self path = [node] while True:", "if self.color == BLACK: self.color = WHITE else: self.color =", "self.losses = 0 self.pos_child = [None for x in range(SIZE*SIZE)]", "= WHITE else: self.color = BLACK self.lastmove = pos self.history.append(pos)", "members1 == members2 assert ledges1 == ledges2, ('ledges differ at", "= child.score() return maxchild def best_visited(self): maxvisits = -1 maxchild", "square.used = False self.emptyset = EmptySet(self) self.zobrist = ZobristHash(self) self.color", "times.append(t2 - t1) return times if __name__ == \"__main__\": main(100)", "in member.neighbours: if neighbour.color == EMPTY: ledges1 += 1 root", "[None for x in range(SIZE*SIZE)] self.parent = None def play(self,", "find(self, update=False): reference = self.reference if reference.pos != self.pos: reference", "= KOMI + self.black_dead else: count = self.white_dead for square", "return reference def __repr__(self): return repr(to_xy(self.pos)) class EmptySet: def __init__(self,", "range(GAMES): node = tree nboard.reset() nboard.replay(board.history) node.play(nboard) # print 'moves',", "for square in self.squares: square.color = EMPTY square.used = False", "math, sys, time SIZE = 9 GAMES = 200 KOMI", "self.zobrist.dupe() self.zobrist.hash = old_hash strong_neighs = neighs-weak_neighs strong_opps = opps-weak_opps", "for square in self.squares: square.set_neighbours() self.reset() def reset(self): for square", "in self.squares: square.color = EMPTY square.used = False self.emptyset =", "child.score() > maxscore: maxchild = child maxscore = child.score() return", "square): if not square.used: for neighbour in square.neighbours: if neighbour.color", "if neighbour.color != EMPTY and neighbour.removestamp != TIMESTAMP: neighbour_ref =", "or weak_opps or (strong_neighs and (strong_opps or weak_neighs))) def useful_moves(self):", "BLACK: color = WHITE else: color = BLACK if wins", "in self.squares: squarecolor = square.color if squarecolor == color: count", "in self.neighbours: if neighbour.color != EMPTY and neighbour.removestamp != TIMESTAMP:", "y < SIZE): continue pos = to_pos(x, y) if board.useful(pos):", "TIMESTAMP, MOVES TIMESTAMP += 1 MOVES += 1 self.board.zobrist.update(self, color)", "len(self.empties) while choices: i = int(random.random()*choices) pos = self.empties[i] if", "\"\"\" wins = board.score(BLACK) >= board.score(WHITE) for node in path:", "square, color): self.hash ^= square.zobrist_strings[square.color] self.hash ^= square.zobrist_strings[color] def add(self):", "user_move(board): while True: text = raw_input('?').strip() if text == 'p':", "return not dupe and \\ (empties or weak_opps or (strong_neighs", "self.board.squares: self.hash ^= square.zobrist_strings[EMPTY] self.hash_set.clear() self.hash_set.add(self.hash) def update(self, square, color):", "'members1', square, root, members1 #print 'ledges1', square, ledges1 members2 =", "for x in range(MAXMOVES): # XXX while not self.finished? if", "board.color node = self path = [node] while True: pos", "Square: def __init__(self, board, pos): self.board = board self.pos =", "self.board.black_dead += 1 # else: # self.board.white_dead += 1 for", "range(SIZE): start = to_pos(0, y) result.append(''.join([SHOW[square.color]+' ' for square in", "= neighbour_ref.ledges neighbour_ref.temp_ledges -= 1 if neighbour_ref.temp_ledges == 0: if", "1 for neighbour in self.neighbours: if neighbour.color != EMPTY and", "ledges2 assert members1 == members2 assert ledges1 == ledges2, ('ledges", "self.hash_set.add(self.hash) def dupe(self): return self.hash in self.hash_set class Board: def", "y class Square: def __init__(self, board, pos): self.board = board", "pos elif self.bestchild: return self.bestchild.pos else: return PASS def random_playout(self,", "self.squares[start:start+SIZE]])) return '\\n'.join(result) class UCTNode: def __init__(self): self.bestchild = None", "self.reference = reference return reference def __repr__(self): return repr(to_xy(self.pos)) class", "<= newx < SIZE and 0 <= newy < SIZE:", "0: if neighcolor == self.color: weak_neighs += 1 else: weak_opps", "in self.neighbours: neighcolor = neighbour.color if neighcolor == EMPTY: self.ledges", "square.neighbours: if neighbour.color == color: surround += 1 if surround", "= False for member in members1.copy(): for neighbour in member.neighbours:", "'\\n'.join(result) class UCTNode: def __init__(self): self.bestchild = None self.pos =", "node path.append(child) break path.append(child) node = child self.random_playout(board) self.update_path(board, color,", "maxscore = -1 maxchild = None for child in self.pos_child:", "not (0 <= x < SIZE and 0 <= y", "self.wins = 0 self.losses = 0 self.pos_child = [None for", "history): for pos in history: self.move(pos) def score(self, color): if", "PASS def random_playout(self, board): \"\"\" random play until both players", "PASS def add(self, pos): self.empty_pos[pos] = len(self.empties) self.empties.append(pos) def remove(self,", "winrate nodevisits = self.wins+self.losses return winrate + math.sqrt((math.log(parentvisits))/(5*nodevisits)) def best_child(self):", "and (child.wins+child.losses) > maxvisits: maxvisits, maxchild = (child.wins+child.losses), child return", "pos): self.set(self.empty_pos[pos], self.empties[len(self.empties)-1]) self.empties.pop() def set(self, i, pos): self.empties[i] =", "child: child = node.pos_child[pos] = UCTNode() child.unexplored = board.useful_moves() child.pos", "at %r: %d %d' % (square, ledges1, ledges2)) empties1 =", "[] for dx, dy in [(-1, 0), (1, 0), (0,", "y * SIZE + x def to_xy(pos): y, x =", "for i in range(5): versus_cpu() # warmup for i in", "not dupe and \\ (empties or weak_opps or (strong_neighs and", "#print 'ledges2', square, ledges2 assert members1 == members2 assert ledges1", "if child and (child.wins+child.losses) > maxvisits: maxvisits, maxchild = (child.wins+child.losses),", "if color == BLACK: color = WHITE else: color =", "neighbour in member.neighbours: if neighbour.color == square.color and neighbour not", "children first, then according to uct value \"\"\" if self.unexplored:", "self.white_dead = 0 self.black_dead = 0 def move(self, pos): square", "members1 #print 'ledges2', square, ledges2 assert members1 == members2 assert", "= self.reference if reference.pos != self.pos: reference = reference.find(update) if", "search \"\"\" color = board.color node = self path =", "SIZE: self.neighbours.append(self.board.squares[to_pos(newx, newy)]) def move(self, color): global TIMESTAMP, MOVES TIMESTAMP", "= [int(i) for i in text.split()] except ValueError: continue if", "update=False): reference = self.reference if reference.pos != self.pos: reference =", "\\ (empties or weak_opps or (strong_neighs and (strong_opps or weak_neighs)))", "return winrate nodevisits = self.wins+self.losses return winrate + math.sqrt((math.log(parentvisits))/(5*nodevisits)) def", "neighbour_ref.reference = self self.ledges -= 1 else: neighbour_ref.ledges -= 1", "in text.split()] except ValueError: continue if not (0 <= x", "PASS: break board.move(pos) child = node.pos_child[pos] if not child: child", "self.timestamp = TIMESTAMP self.removestamp = TIMESTAMP self.zobrist_strings = [random.randrange(sys.maxint) for", "not in members1: changed = True members1.add(neighbour) ledges1 = 0", "or (strong_neighs and (strong_opps or weak_neighs))) def useful_moves(self): return [pos", "for square in self.squares: if square.color == EMPTY: empties2.add(square.pos) def", "= set(self.emptyset.empties) empties2 = set() for square in self.squares: if", "= board self.empties = range(SIZE*SIZE) self.empty_pos = range(SIZE*SIZE) def random_choice(self):", "node = self path = [node] while True: pos =", "True while changed: changed = False for member in members1.copy():", "def score(self, color): if color == WHITE: count = KOMI", "neighbour.color == EMPTY: return True return False def useful(self, pos):", "i in range(5): versus_cpu() # warmup for i in range(n):", "board.move(pos) child = node.pos_child[pos] if not child: child = node.pos_child[pos]", "move(self, pos): square = self.squares[pos] if pos != PASS: square.move(self.color)", "BLACK self.finished = False self.lastmove = -2 self.history = []", "== (color == BLACK): node.wins += 1 else: node.losses +=", "square.zobrist_strings[EMPTY] self.hash_set.clear() self.hash_set.add(self.hash) def update(self, square, color): self.hash ^= square.zobrist_strings[square.color]", "color, path): \"\"\" update win/loss count along path \"\"\" wins", "self.color == BLACK: self.color = WHITE else: self.color = BLACK", "member.neighbours: if neighbour.color == square.color and neighbour not in members1:", "0, 1, 2 SHOW = {EMPTY: '.', WHITE: 'o', BLACK:", "def find(self, update=False): reference = self.reference if reference.pos != self.pos:", "(square, ledges1, ledges2)) empties1 = set(self.emptyset.empties) empties2 = set() for", "square in self.board.squares: self.hash ^= square.zobrist_strings[EMPTY] self.hash_set.clear() self.hash_set.add(self.hash) def update(self,", "= neighbour.find() if neighbour_ref.timestamp != TIMESTAMP: if neighcolor == self.color:", "in range(MAXMOVES): # XXX while not self.finished? if board.finished: break", "% (square, ledges1, ledges2)) empties1 = set(self.emptyset.empties) empties2 = set()", "weak_neighs = 0 for neighbour in square.neighbours: neighcolor = neighbour.color", "= self self.ledges -= 1 else: neighbour_ref.ledges -= 1 if", "= self.wins/float(self.wins+self.losses) parentvisits = self.parent.wins+self.parent.losses if not parentvisits: return winrate", "[pos for pos in self.emptyset.empties if self.useful(pos)] def replay(self, history):", "= EmptySet(self) self.zobrist = ZobristHash(self) self.color = BLACK self.finished =", "0 for neighbour in square.neighbours: if neighbour.color == color: surround", "True for neighbour in self.neighbours: neighcolor = neighbour.color if neighcolor", "while True: pos = node.select(board) if pos == PASS: break", "self.emptyset.empties if self.useful(pos)] def replay(self, history): for pos in history:", "'moves', MOVES return tree.best_visited().pos def versus_cpu(): print \"versus_cpu\" random.seed(1) board", "assert ledges1 == ledges2, ('ledges differ at %r: %d %d'", "= 0 self.pos_child = [None for x in range(SIZE*SIZE)] self.parent", "range(SIZE*SIZE) self.empty_pos = range(SIZE*SIZE) def random_choice(self): choices = len(self.empties) while", "pos in range(SIZE*SIZE)] for square in self.squares: square.set_neighbours() self.reset() def", "self.hash_set = set() self.hash = 0 for square in self.board.squares:", "MAXMOVES = SIZE*SIZE*3 TIMESTAMP = 0 MOVES = 0 def", "= SIZE*SIZE*3 TIMESTAMP = 0 MOVES = 0 def to_pos(x,y):", "square.color == EMPTY: empties2.add(square.pos) def __repr__(self): result = [] for", "color = board.color node = self path = [node] while", "range(n): t1 = time.time() versus_cpu() t2 = time.time() times.append(t2 -", "(color == BLACK): node.wins += 1 else: node.losses += 1", "SIZE and 0 <= newy < SIZE: self.neighbours.append(self.board.squares[to_pos(newx, newy)]) def", "child.pos = pos child.parent = node path.append(child) break path.append(child) node", "= raw_input('?').strip() if text == 'p': return PASS if text", "= -2 self.history = [] self.white_dead = 0 self.black_dead =", "= reference return reference def __repr__(self): return repr(to_xy(self.pos)) class EmptySet:", "TIMESTAMP = 0 MOVES = 0 def to_pos(x,y): return y", "if self.board.useful(pos): return pos choices -= 1 self.set(i, self.empties[choices]) self.set(choices,", "to_xy(pos): y, x = divmod(pos, SIZE) return x, y class", "harness) # import random, math, sys, time SIZE = 9", "1 neighbour_ref.timestamp = TIMESTAMP neighbour_ref.temp_ledges = neighbour_ref.ledges neighbour_ref.temp_ledges -= 1", "= board self.hash_set = set() self.hash = 0 for square", "self.lastmove == PASS: self.finished = True if self.color == BLACK:", "1 root = square.find() #print 'members1', square, root, members1 #print", "== PASS: break board.move(pos) child = node.pos_child[pos] if not child:", "= pos child.parent = node path.append(child) break path.append(child) node =", "versus_cpu() t2 = time.time() times.append(t2 - t1) return times if", "node in path: if color == BLACK: color = WHITE", "def check(self): for square in self.squares: if square.color == EMPTY:", "set(self.emptyset.empties) empties2 = set() for square in self.squares: if square.color", "if update: neighbour_ref.ledges += 1 def find(self, update=False): reference =", "choices = len(self.empties) while choices: i = int(random.random()*choices) pos =", "self.emptyset = EmptySet(self) self.zobrist = ZobristHash(self) self.color = BLACK self.finished", "color): global TIMESTAMP, MOVES TIMESTAMP += 1 MOVES += 1", "if square.color == EMPTY: continue members1 = set([square]) changed =", "child and (child.wins+child.losses) > maxvisits: maxvisits, maxchild = (child.wins+child.losses), child", "def score(self): winrate = self.wins/float(self.wins+self.losses) parentvisits = self.parent.wins+self.parent.losses if not", "0 self.pos_child = [None for x in range(SIZE*SIZE)] self.parent =", "(strong_neighs and (strong_opps or weak_neighs))) def useful_moves(self): return [pos for", "def select(self, board): \"\"\" select move; unexplored children first, then", "= UCTNode() tree.unexplored = board.useful_moves() nboard = Board() for game", "if neighbour.color == color: surround += 1 if surround ==", "def __repr__(self): result = [] for y in range(SIZE): start", "return False def useful(self, pos): global TIMESTAMP TIMESTAMP += 1", "% SIZE, self.pos / SIZE; self.neighbours = [] for dx,", "neighbour_ref.timestamp = TIMESTAMP neighbour_ref.temp_ledges = neighbour_ref.ledges neighbour_ref.temp_ledges -= 1 if", "^= square.zobrist_strings[color] def add(self): self.hash_set.add(self.hash) def dupe(self): return self.hash in", "self.board.zobrist.update(self, color) self.color = color self.reference = self self.ledges =", "def reset(self): for square in self.squares: square.color = EMPTY square.used", "self.zobrist.update(square, self.color) empties = opps = weak_opps = neighs =", "if neighcolor == color: if neighbour_ref.reference.pos != self.pos: self.ledges +=", "result = [] for y in range(SIZE): start = to_pos(0,", "in range(GAMES): node = tree nboard.reset() nboard.replay(board.history) node.play(nboard) # print", "self.finished = False self.lastmove = -2 self.history = [] self.white_dead", "square.zobrist_strings[color] def add(self): self.hash_set.add(self.hash) def dupe(self): return self.hash in self.hash_set", "return self.emptyset.random_choice() def useful_fast(self, square): if not square.used: for neighbour", "#print 'ledges1', square, ledges1 members2 = set() for square2 in", "choices -= 1 self.set(i, self.empties[choices]) self.set(choices, pos) return PASS def", "\"\"\" update win/loss count along path \"\"\" wins = board.score(BLACK)", "opps-weak_opps return not dupe and \\ (empties or weak_opps or", "pos self.timestamp = TIMESTAMP self.removestamp = TIMESTAMP self.zobrist_strings = [random.randrange(sys.maxint)", "x + dx, y + dy if 0 <= newx", "+= neighbour_ref.ledges neighbour_ref.reference = self self.ledges -= 1 else: neighbour_ref.ledges", "neighcolor = neighbour.color if neighcolor == EMPTY: self.ledges += 1", "self.update_path(board, color, path) def select(self, board): \"\"\" select move; unexplored", "in self.emptyset.empties if self.useful(pos)] def replay(self, history): for pos in", "not self.finished? if board.finished: break board.move(board.random_move()) def update_path(self, board, color,", "self.zobrist_strings = [random.randrange(sys.maxint) for i in range(3)] def set_neighbours(self): x,", "== BLACK: self.color = WHITE else: self.color = BLACK self.lastmove", "square in self.squares: squarecolor = square.color if squarecolor == color:", "7.5 EMPTY, WHITE, BLACK = 0, 1, 2 SHOW =", "pos = self.empties[i] if self.board.useful(pos): return pos choices -= 1", "neighbour.color != EMPTY and neighbour.removestamp != TIMESTAMP: neighbour_ref = neighbour.find(update)", "board): \"\"\" random play until both players pass \"\"\" for", "def main(n): times = [] for i in range(5): versus_cpu()", "self.empty_pos = range(SIZE*SIZE) def random_choice(self): choices = len(self.empties) while choices:", "WHITE: 'o', BLACK: 'x'} PASS = -1 MAXMOVES = SIZE*SIZE*3", "tree = UCTNode() tree.unexplored = board.useful_moves() nboard = Board() for", "useful_fast(self, square): if not square.used: for neighbour in square.neighbours: if", "def add(self): self.hash_set.add(self.hash) def dupe(self): return self.hash in self.hash_set class", "color == BLACK: color = WHITE else: color = BLACK", "None for child in self.pos_child: # if child: # print", "path \"\"\" wins = board.score(BLACK) >= board.score(WHITE) for node in", "child: # print to_xy(child.pos), child.wins, child.losses, child.score() if child and", "self.unexplored[i] self.unexplored[i] = self.unexplored[len(self.unexplored)-1] self.unexplored.pop() return pos elif self.bestchild: return", "path): \"\"\" update win/loss count along path \"\"\" wins =", "= self self.ledges = 0 self.used = True for neighbour", "for member in members1.copy(): for neighbour in member.neighbours: if neighbour.color", "== ledges2, ('ledges differ at %r: %d %d' % (square,", "ZobristHash(self) self.color = BLACK self.finished = False self.lastmove = -2", "neighbour in self.neighbours: neighcolor = neighbour.color if neighcolor == EMPTY:", "= UCTNode() child.unexplored = board.useful_moves() child.pos = pos child.parent =", "square, root, members1 #print 'ledges2', square, ledges2 assert members1 ==", "changed = True while changed: changed = False for member", "move; unexplored children first, then according to uct value \"\"\"", "def to_pos(x,y): return y * SIZE + x def to_xy(pos):", "PASS = -1 MAXMOVES = SIZE*SIZE*3 TIMESTAMP = 0 MOVES", "ledges1, ledges2)) empties1 = set(self.emptyset.empties) empties2 = set() for square", "def useful(self, pos): global TIMESTAMP TIMESTAMP += 1 square =", "> maxvisits: maxvisits, maxchild = (child.wins+child.losses), child return maxchild def", "else: neighbour_ref.ledges -= 1 if neighbour_ref.ledges == 0: neighbour.remove(neighbour_ref) self.board.zobrist.add()", "1 MOVES += 1 self.board.zobrist.update(self, color) self.color = color self.reference", "pos): self.empties[i] = pos self.empty_pos[pos] = i class ZobristHash: def", "None for child in self.pos_child: if child and child.score() >", "= True for neighbour in self.neighbours: neighcolor = neighbour.color if", "= len(self.empties) self.empties.append(pos) def remove(self, pos): self.set(self.empty_pos[pos], self.empties[len(self.empties)-1]) self.empties.pop() def", "1 elif squarecolor == EMPTY: surround = 0 for neighbour", "neighbour.removestamp != TIMESTAMP: neighbour_ref = neighbour.find(update) if neighbour_ref.pos == reference.pos:", "pos == PASS: break board.move(pos) child = node.pos_child[pos] if not", "random_choice(self): choices = len(self.empties) while choices: i = int(random.random()*choices) pos", "EMPTY: continue members1 = set([square]) changed = True while changed:", "+ math.sqrt((math.log(parentvisits))/(5*nodevisits)) def best_child(self): maxscore = -1 maxchild = None", "__repr__(self): result = [] for y in range(SIZE): start =", "self.pos_child: # if child: # print to_xy(child.pos), child.wins, child.losses, child.score()", "self.hash = 0 for square in self.board.squares: self.hash ^= square.zobrist_strings[EMPTY]", "ledges2 = root.ledges #print 'members2', square, root, members1 #print 'ledges2',", "opps = weak_opps = neighs = weak_neighs = 0 for", "color self.reference = self self.ledges = 0 self.used = True", "0 self.black_dead = 0 def move(self, pos): square = self.squares[pos]", "self.reset() def reset(self): for square in self.squares: square.color = EMPTY", "+ self.black_dead else: count = self.white_dead for square in self.squares:", "self.neighbours.append(self.board.squares[to_pos(newx, newy)]) def move(self, color): global TIMESTAMP, MOVES TIMESTAMP +=", "self.pos % SIZE, self.pos / SIZE; self.neighbours = [] for", "if neighbour_ref.reference.pos != self.pos: self.ledges += neighbour_ref.ledges neighbour_ref.reference = self", "self.color: neighs += 1 else: opps += 1 neighbour_ref.timestamp =", "= range(SIZE*SIZE) def random_choice(self): choices = len(self.empties) while choices: i", "continue if not (0 <= x < SIZE and 0", "def best_child(self): maxscore = -1 maxchild = None for child", "in range(SIZE*SIZE)] for square in self.squares: square.set_neighbours() self.reset() def reset(self):", "0: neighbour.remove(neighbour_ref) self.board.zobrist.add() def remove(self, reference, update=True): self.board.zobrist.update(self, EMPTY) self.removestamp", "child = node.pos_child[pos] = UCTNode() child.unexplored = board.useful_moves() child.pos =", "range(MAXMOVES): # XXX while not self.finished? if board.finished: break board.move(board.random_move())", "# print 'moves', MOVES return tree.best_visited().pos def versus_cpu(): print \"versus_cpu\"", "__init__(self, board): self.board = board self.hash_set = set() self.hash =", "\"\"\" random play until both players pass \"\"\" for x", "== 'p': return PASS if text == 'q': raise EOFError", "return pos choices -= 1 self.set(i, self.empties[choices]) self.set(choices, pos) return", "== color: count += 1 elif squarecolor == EMPTY: surround", "0 def to_pos(x,y): return y * SIZE + x def", "== BLACK): node.wins += 1 else: node.losses += 1 if", "return pos elif self.bestchild: return self.bestchild.pos else: return PASS def", "to_pos(0, y) result.append(''.join([SHOW[square.color]+' ' for square in self.squares[start:start+SIZE]])) return '\\n'.join(result)", "= board.useful_moves() child.pos = pos child.parent = node path.append(child) break", "self.hash ^= square.zobrist_strings[color] def add(self): self.hash_set.add(self.hash) def dupe(self): return self.hash", "ZobristHash: def __init__(self, board): self.board = board self.hash_set = set()", "SIZE and 0 <= y < SIZE): continue pos =", "EmptySet(self) self.zobrist = ZobristHash(self) self.color = BLACK self.finished = False", "for neighbour in self.neighbours: if neighbour.color != EMPTY and neighbour.removestamp", "math.sqrt((math.log(parentvisits))/(5*nodevisits)) def best_child(self): maxscore = -1 maxchild = None for", "PASS: self.finished = True if self.color == BLACK: self.color =", "global MOVES pos = board.random_move() if pos == PASS: return", "member.neighbours: if neighbour.color == EMPTY: ledges1 += 1 root =", "# import random, math, sys, time SIZE = 9 GAMES", "<= x < SIZE and 0 <= y < SIZE):", "neighbour_ref.ledges += 1 def find(self, update=False): reference = self.reference if", "neighcolor = neighbour.color if neighcolor == EMPTY: empties += 1", "if neighcolor == self.color: weak_neighs += 1 else: weak_opps +=", "node.parent: node.parent.bestchild = node.parent.best_child() def score(self): winrate = self.wins/float(self.wins+self.losses) parentvisits", "# else: # self.board.white_dead += 1 for neighbour in self.neighbours:", "self.squares[pos] if self.useful_fast(square): return True old_hash = self.zobrist.hash self.zobrist.update(square, self.color)", "self.move(pos) def score(self, color): if color == WHITE: count =", "= 0 for member in members1: for neighbour in member.neighbours:", "= {EMPTY: '.', WHITE: 'o', BLACK: 'x'} PASS = -1", "= self.wins+self.losses return winrate + math.sqrt((math.log(parentvisits))/(5*nodevisits)) def best_child(self): maxscore =", "self.empties = range(SIZE*SIZE) self.empty_pos = range(SIZE*SIZE) def random_choice(self): choices =", "win/loss count along path \"\"\" wins = board.score(BLACK) >= board.score(WHITE)", "(strong_opps or weak_neighs))) def useful_moves(self): return [pos for pos in", "else: # self.board.white_dead += 1 for neighbour in self.neighbours: if", "= 200 KOMI = 7.5 EMPTY, WHITE, BLACK = 0,", "= 0 for square in self.board.squares: self.hash ^= square.zobrist_strings[EMPTY] self.hash_set.clear()", "len(square.neighbours): count += 1 return count def check(self): for square", "= (child.wins+child.losses), child return maxchild def user_move(board): while True: text", "for pos in self.emptyset.empties if self.useful(pos)] def replay(self, history): for", "members1.add(neighbour) ledges1 = 0 for member in members1: for neighbour", "elif self.bestchild: return self.bestchild.pos else: return PASS def random_playout(self, board):", "old_hash = self.zobrist.hash self.zobrist.update(square, self.color) empties = opps = weak_opps", "[random.randrange(sys.maxint) for i in range(3)] def set_neighbours(self): x, y =", "self.color: weak_neighs += 1 else: weak_opps += 1 neighbour_ref.remove(neighbour_ref, update=False)", "neighbour_ref.reference.pos != self.pos: self.ledges += neighbour_ref.ledges neighbour_ref.reference = self self.ledges", "pos): square = self.squares[pos] if pos != PASS: square.move(self.color) self.emptyset.remove(square.pos)", "return True old_hash = self.zobrist.hash self.zobrist.update(square, self.color) empties = opps", "== PASS: self.finished = True if self.color == BLACK: self.color", "self.ledges -= 1 else: neighbour_ref.ledges -= 1 if neighbour_ref.ledges ==", "(empties or weak_opps or (strong_neighs and (strong_opps or weak_neighs))) def", "def __init__(self, board): self.board = board self.hash_set = set() self.hash", "node.select(board) if pos == PASS: break board.move(pos) child = node.pos_child[pos]", "(child.wins+child.losses), child return maxchild def user_move(board): while True: text =", "= computer_move(board) def main(n): times = [] for i in", "text == 'q': raise EOFError try: x, y = [int(i)", "range(SIZE*SIZE)] for square in self.squares: square.set_neighbours() self.reset() def reset(self): for", "-2 self.history = [] self.white_dead = 0 self.black_dead = 0", "if neighcolor == EMPTY: self.ledges += 1 else: neighbour_ref =", "if wins == (color == BLACK): node.wins += 1 else:", "neighbour_ref = neighbour.find() if neighbour_ref.timestamp != TIMESTAMP: if neighcolor ==", "class ZobristHash: def __init__(self, board): self.board = board self.hash_set =", "square, ledges2 assert members1 == members2 assert ledges1 == ledges2,", "pos self.history.append(pos) def random_move(self): return self.emptyset.random_choice() def useful_fast(self, square): if", "if text == 'q': raise EOFError try: x, y =", "BLACK: 'x'} PASS = -1 MAXMOVES = SIZE*SIZE*3 TIMESTAMP =", "if squarecolor == color: count += 1 elif squarecolor ==", "best_visited(self): maxvisits = -1 maxchild = None for child in", "UCTNode: def __init__(self): self.bestchild = None self.pos = -1 self.wins", "parentvisits: return winrate nodevisits = self.wins+self.losses return winrate + math.sqrt((math.log(parentvisits))/(5*nodevisits))", "def random_choice(self): choices = len(self.empties) while choices: i = int(random.random()*choices)", "strong_neighs = neighs-weak_neighs strong_opps = opps-weak_opps return not dupe and", "class UCTNode: def __init__(self): self.bestchild = None self.pos = -1", "global TIMESTAMP, MOVES TIMESTAMP += 1 MOVES += 1 self.board.zobrist.update(self,", "= -1 maxchild = None for child in self.pos_child: if", "x < SIZE and 0 <= y < SIZE): continue", "empties1 = set(self.emptyset.empties) empties2 = set() for square in self.squares:", "range(SIZE*SIZE) def random_choice(self): choices = len(self.empties) while choices: i =", "root = square.find() #print 'members1', square, root, members1 #print 'ledges1',", "sys, time SIZE = 9 GAMES = 200 KOMI =", "neighbour.color if neighcolor == EMPTY: self.ledges += 1 else: neighbour_ref", "board.useful(pos): return pos def computer_move(board): global MOVES pos = board.random_move()", "square in self.squares: square.set_neighbours() self.reset() def reset(self): for square in", "1 else: neighbour_ref.ledges -= 1 if neighbour_ref.ledges == 0: neighbour.remove(neighbour_ref)", "+= 1 MOVES += 1 self.board.zobrist.update(self, color) self.color = color", "def update(self, square, color): self.hash ^= square.zobrist_strings[square.color] self.hash ^= square.zobrist_strings[color]", "(0, 1)]: newx, newy = x + dx, y +", "color): if color == WHITE: count = KOMI + self.black_dead", "neighbour_ref.temp_ledges -= 1 if neighbour_ref.temp_ledges == 0: if neighcolor ==", "not parentvisits: return winrate nodevisits = self.wins+self.losses return winrate +", "= False self.lastmove = -2 self.history = [] self.white_dead =", "time SIZE = 9 GAMES = 200 KOMI = 7.5", "unexplored children first, then according to uct value \"\"\" if", "node.pos_child[pos] if not child: child = node.pos_child[pos] = UCTNode() child.unexplored", "self.reference = self self.ledges = 0 self.used = True for", "self.unexplored[i] = self.unexplored[len(self.unexplored)-1] self.unexplored.pop() return pos elif self.bestchild: return self.bestchild.pos", "self.empty_pos[pos] = len(self.empties) self.empties.append(pos) def remove(self, pos): self.set(self.empty_pos[pos], self.empties[len(self.empties)-1]) self.empties.pop()", "in self.squares: square.set_neighbours() self.reset() def reset(self): for square in self.squares:", "# if child: # print to_xy(child.pos), child.wins, child.losses, child.score() if", "replay(self, history): for pos in history: self.move(pos) def score(self, color):", "self.hash_set.clear() self.hash_set.add(self.hash) def update(self, square, color): self.hash ^= square.zobrist_strings[square.color] self.hash", "reference = self.reference if reference.pos != self.pos: reference = reference.find(update)", "return maxchild def user_move(board): while True: text = raw_input('?').strip() if", "TIMESTAMP self.zobrist_strings = [random.randrange(sys.maxint) for i in range(3)] def set_neighbours(self):", "node.wins += 1 else: node.losses += 1 if node.parent: node.parent.bestchild", "self.zobrist.hash self.zobrist.update(square, self.color) empties = opps = weak_opps = neighs", "square.color = EMPTY square.used = False self.emptyset = EmptySet(self) self.zobrist", "self.finished? if board.finished: break board.move(board.random_move()) def update_path(self, board, color, path):", "square.color and neighbour not in members1: changed = True members1.add(neighbour)", "= EMPTY self.board.emptyset.add(self.pos) # if color == BLACK: # self.board.black_dead", "0 <= newx < SIZE and 0 <= newy <", "board.score(WHITE) for node in path: if color == BLACK: color", "color): self.hash ^= square.zobrist_strings[square.color] self.hash ^= square.zobrist_strings[color] def add(self): self.hash_set.add(self.hash)", "self.pos = -1 self.wins = 0 self.losses = 0 self.pos_child", "EMPTY: return True return False def useful(self, pos): global TIMESTAMP", "= TIMESTAMP neighbour_ref.temp_ledges = neighbour_ref.ledges neighbour_ref.temp_ledges -= 1 if neighbour_ref.temp_ledges", "def to_xy(pos): y, x = divmod(pos, SIZE) return x, y", "if neighbour.color == EMPTY: ledges1 += 1 root = square.find()", "self.board.zobrist.add() def remove(self, reference, update=True): self.board.zobrist.update(self, EMPTY) self.removestamp = TIMESTAMP", "== EMPTY: surround = 0 for neighbour in square.neighbours: if", "dupe = self.zobrist.dupe() self.zobrist.hash = old_hash strong_neighs = neighs-weak_neighs strong_opps", "dx, dy in [(-1, 0), (1, 0), (0, -1), (0,", "neighcolor == self.color: neighs += 1 else: opps += 1", "1 square = self.squares[pos] if self.useful_fast(square): return True old_hash =", "# XXX while not self.finished? if board.finished: break board.move(board.random_move()) def", "== color: surround += 1 if surround == len(square.neighbours): count", "square.neighbours: if neighbour.color == EMPTY: return True return False def", "0 <= newy < SIZE: self.neighbours.append(self.board.squares[to_pos(newx, newy)]) def move(self, color):", "== EMPTY: ledges1 += 1 root = square.find() #print 'members1',", "self.hash ^= square.zobrist_strings[square.color] self.hash ^= square.zobrist_strings[color] def add(self): self.hash_set.add(self.hash) def", "in self.pos_child: if child and child.score() > maxscore: maxchild =", "return repr(to_xy(self.pos)) class EmptySet: def __init__(self, board): self.board = board", "self.useful_fast(square): return True old_hash = self.zobrist.hash self.zobrist.update(square, self.color) empties =", "board.random_move() if pos == PASS: return PASS tree = UCTNode()", "set_neighbours(self): x, y = self.pos % SIZE, self.pos / SIZE;", "= self.squares[pos] if pos != PASS: square.move(self.color) self.emptyset.remove(square.pos) elif self.lastmove", "True old_hash = self.zobrist.hash self.zobrist.update(square, self.color) empties = opps =", "y) if board.useful(pos): return pos def computer_move(board): global MOVES pos", "reference.pos: neighbour.remove(reference, update) else: if update: neighbour_ref.ledges += 1 def", "the benchmark harness) # import random, math, sys, time SIZE", "self.color = BLACK self.lastmove = pos self.history.append(pos) def random_move(self): return", "= None self.pos = -1 self.wins = 0 self.losses =", "for i in range(n): t1 = time.time() versus_cpu() t2 =", "+= 1 for neighbour in self.neighbours: if neighbour.color != EMPTY", "square.find() #print 'members1', square, root, members1 #print 'ledges1', square, ledges1", "= [] for y in range(SIZE): start = to_pos(0, y)", "neighbour.color == color: surround += 1 if surround == len(square.neighbours):", "def set_neighbours(self): x, y = self.pos % SIZE, self.pos /", "random_move(self): return self.emptyset.random_choice() def useful_fast(self, square): if not square.used: for", "square.color == EMPTY: continue members1 = set([square]) changed = True", "changed: changed = False for member in members1.copy(): for neighbour", "EOFError try: x, y = [int(i) for i in text.split()]", "return PASS def random_playout(self, board): \"\"\" random play until both", "True: text = raw_input('?').strip() if text == 'p': return PASS", "for child in self.pos_child: # if child: # print to_xy(child.pos),", "== EMPTY: return True return False def useful(self, pos): global", "self.emptyset.remove(square.pos) elif self.lastmove == PASS: self.finished = True if self.color", "changed = True members1.add(neighbour) ledges1 = 0 for member in", "if neighcolor == self.color: neighs += 1 else: opps +=", "= 0 def move(self, pos): square = self.squares[pos] if pos", "if self.unexplored: i = random.randrange(len(self.unexplored)) pos = self.unexplored[i] self.unexplored[i] =", "for square in self.squares: squarecolor = square.color if squarecolor ==", "1 if node.parent: node.parent.bestchild = node.parent.best_child() def score(self): winrate =", "= self.white_dead for square in self.squares: squarecolor = square.color if", "'q': raise EOFError try: x, y = [int(i) for i", "self.ledges += 1 else: neighbour_ref = neighbour.find(update=True) if neighcolor ==", "= neighbour.find(update=True) if neighcolor == color: if neighbour_ref.reference.pos != self.pos:", "= node path.append(child) break path.append(child) node = child self.random_playout(board) self.update_path(board,", ">= board.score(WHITE) for node in path: if color == BLACK:", "= True while changed: changed = False for member in", "tree search \"\"\" color = board.color node = self path", "1 def find(self, update=False): reference = self.reference if reference.pos !=", "out the benchmark harness) # import random, math, sys, time", "= TIMESTAMP self.removestamp = TIMESTAMP self.zobrist_strings = [random.randrange(sys.maxint) for i", "square in self.squares: if square.color == EMPTY: continue members1 =", "%d %d' % (square, ledges1, ledges2)) empties1 = set(self.emptyset.empties) empties2", "for square2 in self.squares: if square2.color != EMPTY and square2.find()", "\"\"\" select move; unexplored children first, then according to uct", "\"\"\" if self.unexplored: i = random.randrange(len(self.unexplored)) pos = self.unexplored[i] self.unexplored[i]", "ledges1 += 1 root = square.find() #print 'members1', square, root,", "-= 1 if neighbour_ref.temp_ledges == 0: if neighcolor == self.color:", "opps += 1 neighbour_ref.timestamp = TIMESTAMP neighbour_ref.temp_ledges = neighbour_ref.ledges neighbour_ref.temp_ledges", "benchmark harness) # import random, math, sys, time SIZE =", "/ SIZE; self.neighbours = [] for dx, dy in [(-1,", "in member.neighbours: if neighbour.color == square.color and neighbour not in", "in self.hash_set class Board: def __init__(self): self.squares = [Square(self, pos)", "= -1 self.wins = 0 self.losses = 0 self.pos_child =", "^= square.zobrist_strings[square.color] self.hash ^= square.zobrist_strings[color] def add(self): self.hash_set.add(self.hash) def dupe(self):", "\"\"\" color = board.color node = self path = [node]", "y, x = divmod(pos, SIZE) return x, y class Square:", "'ledges1', square, ledges1 members2 = set() for square2 in self.squares:", "color = BLACK if wins == (color == BLACK): node.wins", "member in members1.copy(): for neighbour in member.neighbours: if neighbour.color ==", "for neighbour in self.neighbours: neighcolor = neighbour.color if neighcolor ==", "= tree nboard.reset() nboard.replay(board.history) node.play(nboard) # print 'moves', MOVES return", "start = to_pos(0, y) result.append(''.join([SHOW[square.color]+' ' for square in self.squares[start:start+SIZE]]))", "tree nboard.reset() nboard.replay(board.history) node.play(nboard) # print 'moves', MOVES return tree.best_visited().pos", "square2.find() == root: members2.add(square2) ledges2 = root.ledges #print 'members2', square,", "child.unexplored = board.useful_moves() child.pos = pos child.parent = node path.append(child)", "== PASS: return PASS tree = UCTNode() tree.unexplored = board.useful_moves()", "self.pos: reference = reference.find(update) if update: self.reference = reference return", "square, ledges1 members2 = set() for square2 in self.squares: if", "EMPTY, WHITE, BLACK = 0, 1, 2 SHOW = {EMPTY:", "while True: text = raw_input('?').strip() if text == 'p': return", "= opps-weak_opps return not dupe and \\ (empties or weak_opps", "not square.used: for neighbour in square.neighbours: if neighbour.color == EMPTY:", "dupe and \\ (empties or weak_opps or (strong_neighs and (strong_opps", "history: self.move(pos) def score(self, color): if color == WHITE: count", "= square.color if squarecolor == color: count += 1 elif", "pos == PASS: return PASS tree = UCTNode() tree.unexplored =", "set() self.hash = 0 for square in self.board.squares: self.hash ^=", "x, y class Square: def __init__(self, board, pos): self.board =", "self.black_dead = 0 def move(self, pos): square = self.squares[pos] if", "= 0 def to_pos(x,y): return y * SIZE + x", "!= TIMESTAMP: neighbour_ref = neighbour.find(update) if neighbour_ref.pos == reference.pos: neighbour.remove(reference,", "> maxscore: maxchild = child maxscore = child.score() return maxchild", "# (more output, took out the benchmark harness) # import", "TIMESTAMP self.removestamp = TIMESTAMP self.zobrist_strings = [random.randrange(sys.maxint) for i in", "add(self): self.hash_set.add(self.hash) def dupe(self): return self.hash in self.hash_set class Board:", "= self.zobrist.dupe() self.zobrist.hash = old_hash strong_neighs = neighs-weak_neighs strong_opps =", "y = [int(i) for i in text.split()] except ValueError: continue", "self.board.white_dead += 1 for neighbour in self.neighbours: if neighbour.color !=", "differ at %r: %d %d' % (square, ledges1, ledges2)) empties1", "= [node] while True: pos = node.select(board) if pos ==", "\"\"\" for x in range(MAXMOVES): # XXX while not self.finished?", "neighbour in member.neighbours: if neighbour.color == EMPTY: ledges1 += 1", "self.pos_child: if child and child.score() > maxscore: maxchild = child", "return count def check(self): for square in self.squares: if square.color", "along path \"\"\" wins = board.score(BLACK) >= board.score(WHITE) for node", "SIZE + x def to_xy(pos): y, x = divmod(pos, SIZE)", "pos): global TIMESTAMP TIMESTAMP += 1 square = self.squares[pos] if", "self.board.useful(pos): return pos choices -= 1 self.set(i, self.empties[choices]) self.set(choices, pos)", "not child: child = node.pos_child[pos] = UCTNode() child.unexplored = board.useful_moves()", "neighbour.remove(neighbour_ref) self.board.zobrist.add() def remove(self, reference, update=True): self.board.zobrist.update(self, EMPTY) self.removestamp =", "members2 = set() for square2 in self.squares: if square2.color !=", "1 else: neighbour_ref = neighbour.find(update=True) if neighcolor == color: if", "= BLACK self.finished = False self.lastmove = -2 self.history =", "select move; unexplored children first, then according to uct value", "empties = opps = weak_opps = neighs = weak_neighs =", "'.', WHITE: 'o', BLACK: 'x'} PASS = -1 MAXMOVES =", "self self.ledges -= 1 else: neighbour_ref.ledges -= 1 if neighbour_ref.ledges", "text = raw_input('?').strip() if text == 'p': return PASS if", "== WHITE: count = KOMI + self.black_dead else: count =", "pos def computer_move(board): global MOVES pos = board.random_move() if pos", "print 'moves', MOVES return tree.best_visited().pos def versus_cpu(): print \"versus_cpu\" random.seed(1)" ]
[ "bFunctionProtocol=cdc.CDC_PROTOCOL_NONE) descriptor_list = [] if 'CDC' in args.devices: # Put", "idVendor=args.vid, idProduct=args.pid, iManufacturer=StringIndex.index(args.manufacturer), iProduct=StringIndex.index(args.product), iSerialNumber=SERIAL_NUMBER_INDEX) # Interface numbers are interface-set", "n += 1 c_file.write(\"\\n\") i += length c_file.write(\"\"\"\\ }; \"\"\")", "concatenated_descriptors.extend( bytes(hid_report_descriptors.REPORT_DESCRIPTOR_FUNCTIONS[name](report_id))) report_ids[name] = report_id report_id += 1 combined_hid_report_descriptor =", "descriptor_list.extend(msc_interfaces) if 'HID' in args.devices: descriptor_list.extend(hid_interfaces) if 'AUDIO' in args.devices:", "bDataInterface=0x01) # Adjust this after interfaces are renumbered. cdc_comm_interface =", "device objects. c_file.write(\"\"\" mp_obj_tuple_t common_hal_usb_hid_devices = {{ .base = {{", "\"\"\".format(DESCRIPTION=descriptor.description, CLASS=descriptor.__class__)) b = bytes(descriptor) notes = descriptor.notes() i =", "in descriptor (AUDIO includes MIDI support)') parser.add_argument('--hid_devices', type=lambda l: tuple(l.split(',')),", "cls.string_to_index: idx = cls.string_to_index[string] if not cls.index_to_variable[idx]: cls.index_to_variable[idx] = variable_name", "number of CDC DATA OUT') parser.add_argument('--cdc_ep_num_data_in', type=int, default=0, help='endpoint number", "1 c_file.write(\"\\n\") i += length c_file.write(\"\"\"\\ }; \"\"\") c_file.write(\"\"\"\\ //", "up certain # interface cross-references. interfaces = util.join_interfaces(interfaces_to_join, renumber_endpoints=args.renumber_endpoints) #", "build the composite descriptor. configuration = standard.ConfigurationDescriptor( description=\"Composite configuration\", wTotalLength=(standard.ConfigurationDescriptor.bLength", "interfaces_to_join = [] if 'CDC' in args.devices: interfaces_to_join.append(cdc_interfaces) if 'MSC'", "= {report_length}, .usage_page = {usage_page:#04x}, .usage = {usage:#04x}, .out_report_buffer =", "args.output_c_file h_file = args.output_h_file c_file.write(\"\"\"\\ #include <stdint.h> #include \"py/objtuple.h\" #include", "usb_desc_dev[] = { \"\"\") for b in bytes(device): c_file.write(\"0x{:02x}, \".format(b))", "bEndpointAddress=args.cdc_ep_num_notification | standard.EndpointDescriptor.DIRECTION_IN, bmAttributes=standard.EndpointDescriptor.TYPE_INTERRUPT, wMaxPacketSize=0x0040, bInterval=0x10) ]) cdc_data_interface = standard.InterfaceDescriptor(", "= \"const \" if variable_name == \"usb_serial_number\": const = \"\"", "string in StringIndex.strings_in_order()] serial_number_descriptor = string_descriptors[SERIAL_NUMBER_INDEX] c_file = args.output_c_file h_file", "Communications Device Class bInterfaceSubClass=cdc.CDC_SUBCLASS_ACM, # Abstract control model bInterfaceProtocol=cdc.CDC_PROTOCOL_NONE, iInterface=StringIndex.index(\"{}", "backwards compatibility. descriptor_list.append(cdc_iad) descriptor_list.extend(cdc_interfaces) if 'MSC' in args.devices: descriptor_list.extend(msc_interfaces) if", "add the control interface because other audio interfaces are managed", "endpoint OUT number must not be 0\") elif args.midi_ep_num_in ==", "({rhport0_mode}) #define USB_HID_NUM_DEVICES {hid_num_devices} // Vendor name included in Inquiry", "= midi.InJackDescriptor( description=\"MIDI data in from user code.\", bJackType=midi.JACK_TYPE_EXTERNAL, iJack=0)", "args.cdc_ep_num_data_in == 0: raise ValueError(\"CDC data IN endpoint number must", "== 0: raise ValueError(\"MSC endpoint IN number must not be", "string in cls.string_to_index: idx = cls.string_to_index[string] if not cls.index_to_variable[idx]: cls.index_to_variable[idx]", "argparse.ArgumentParser(description='Generate USB descriptors.') parser.add_argument('--highspeed', default=False, action='store_true', help='descriptor for highspeed device')", "be 0\") elif args.msc_ep_num_in == 0: raise ValueError(\"MSC endpoint IN", "const uint8_t hid_report_descriptor[{hid_report_descriptor_length}]; #define CFG_TUSB_RHPORT0_MODE ({rhport0_mode}) #define USB_HID_NUM_DEVICES {hid_num_devices} //", "for highspeed device') parser.add_argument('--manufacturer', type=str, help='manufacturer of the device') parser.add_argument('--product',", "| standard.EndpointDescriptor.DIRECTION_IN, bmAttributes=standard.EndpointDescriptor.TYPE_BULK, bInterval = 0x0, wMaxPacketSize=512 if args.highspeed else", "{name}_report_buffer[{report_length}]; \"\"\".format(name=name.lower(), report_length=hid_report_descriptors.HID_DEVICE_DATA[name].report_length)) if hid_report_descriptors.HID_DEVICE_DATA[name].out_report_length > 0: c_file.write(\"\"\"\\ static uint8_t", "the Windows composite USB driver that requests the # HID", "PC <- {}\".format(args.interface_name), bJackType=midi.JACK_TYPE_EMBEDDED, input_pins=[(midi_in_jack_ext, 1)], iJack=StringIndex.index(\"{} usb_midi.ports[1]\".format(args.interface_name))) audio_midi_interface =", "'AUDIO' in args.devices: interfaces_to_join.append(audio_interfaces) # util.join_interfaces() will renumber the endpoints", "# first. However, it still fetches the descriptor anyway. We", "cls.index_to_variable[idx]: cls.index_to_variable[idx] = variable_name return idx else: idx = len(cls.strings)", "bcdCDC=0x0110), cdc_call_management, cdc.AbstractControlManagement( description=\"CDC comm\", bmCapabilities=0x02), cdc_union, standard.EndpointDescriptor( description=\"CDC comm", "them unique across descriptors, # and renumber the interfaces in", "l: tuple(l.split(',')), default=DEFAULT_HID_DEVICES, help='HID devices to include in HID report", "type=int, default=0, help='endpoint number of CDC NOTIFICATION') parser.add_argument('--cdc_ep_num_data_out', type=int, default=0,", "util.join_interfaces() will renumber the endpoints to make them unique across", "NOTIFICATION') parser.add_argument('--cdc_ep_num_data_out', type=int, default=0, help='endpoint number of CDC DATA OUT')", "in the Windows composite USB driver that requests the #", "ALL_DEVICES='CDC,MSC,AUDIO,HID' ALL_DEVICES_SET=frozenset(ALL_DEVICES.split(',')) DEFAULT_DEVICES='CDC,MSC,AUDIO,HID' ALL_HID_DEVICES='KEYBOARD,MOUSE,CONSUMER,SYS_CONTROL,GAMEPAD,DIGITIZER,XAC_COMPATIBLE_GAMEPAD,RAW' ALL_HID_DEVICES_SET=frozenset(ALL_HID_DEVICES.split(',')) # Digitizer works on Linux", "*, variable_name = None): if string in cls.string_to_index: idx =", "before MIDI ones. audio_interfaces = [audio_control_interface] + cs_ac_interface.audio_streaming_interfaces + cs_ac_interface.midi_streaming_interfaces", "description=\"CDC comm\", bmCapabilities=0x02), cdc_union, standard.EndpointDescriptor( description=\"CDC comm in\", bEndpointAddress=args.cdc_ep_num_notification |", "report buffer definitions. for name in args.hid_devices: c_file.write(\"\"\"\\ static uint8_t", "= b[i] for j in range(length): c_file.write(\"0x{:02x}, \".format(b[i + j]))", "standard.InterfaceDescriptor( description=\"Midi goodness\", bInterfaceClass=audio.AUDIO_CLASS_DEVICE, bInterfaceSubClass=audio.AUDIO_SUBCLASS_MIDI_STREAMING, bInterfaceProtocol=audio.AUDIO_PROTOCOL_V1, iInterface=StringIndex.index(\"{} MIDI\".format(args.interface_name)), subdescriptors=[ midi.Header(", "help='length needed for the serial number in digits') parser.add_argument('--devices', type=lambda", "\"\"\".format(name=name.lower(), report_length=hid_report_descriptors.HID_DEVICE_DATA[name].out_report_length)) # Write out table of device objects. c_file.write(\"\"\"", "bInterval=0, wMaxPacketSize=512 if args.highspeed else 64), ] ) ] #", "\"\"\".format(name=name.lower(), report_id=report_ids[name], report_length=device_data.report_length, usage_page=device_data.usage_page, usage=device_data.usage, out_report_buffer=out_report_buffer, out_report_length=device_data.out_report_length)) c_file.write(\"\"\"\\ }; \"\"\")", "\".format(b[i + 2*j + 1] << 8 | b[i +", "# Abstract control model bFunctionProtocol=cdc.CDC_PROTOCOL_NONE) descriptor_list = [] if 'CDC'", "required=True) parser.add_argument('--output_h_file', type=argparse.FileType('w', encoding='UTF-8'), required=True) args = parser.parse_args() unknown_devices =", "in from {}\".format(args.interface_name), bEndpointAddress=args.midi_ep_num_in | standard.EndpointDescriptor.DIRECTION_IN, bmAttributes=standard.EndpointDescriptor.TYPE_BULK, bInterval = 0x0,", "each unique string. Start with 0.\"\"\" string_to_index = {} index_to_variable", "for b in bytes(combined_hid_report_descriptor): c_file.write(\"0x{:02x}, \".format(b)) c_file.write(\"\"\"\\ }; \"\"\") #", "number must not be 0\") elif args.midi_ep_num_in == 0: raise", "description=\"Empty audio control\", audio_streaming_interfaces = [], midi_streaming_interfaces = [ audio_midi_interface", "are relative to CircuitPython # USB OUT -> midi_in_jack_emb ->", "MIDI support)') parser.add_argument('--hid_devices', type=lambda l: tuple(l.split(',')), default=DEFAULT_HID_DEVICES, help='HID devices to", "+ cs_ac_interface.midi_streaming_interfaces interfaces_to_join = [] if 'CDC' in args.devices: interfaces_to_join.append(cdc_interfaces)", "= cdc_data_interface.bInterfaceNumber cdc_iad = standard.InterfaceAssociationDescriptor( description=\"CDC IAD\", bFirstInterface=cdc_comm_interface.bInterfaceNumber, bInterfaceCount=len(cdc_interfaces), bFunctionClass=cdc.CDC_CLASS_COMM,", "args.renumber_endpoints: if 'CDC' in args.devices: if args.cdc_ep_num_notification == 0: raise", "bJackType=midi.JACK_TYPE_EXTERNAL, iJack=0) midi_out_jack_emb = midi.OutJackDescriptor( description=\"MIDI PC <- {}\".format(args.interface_name), bJackType=midi.JACK_TYPE_EMBEDDED,", "variable_name=\"language_id\") assert LANGID_INDEX == 0 SERIAL_NUMBER_INDEX = StringIndex.index(\"S\" * args.serial_number_length,", "[] if 'CDC' in args.devices: interfaces_to_join.append(cdc_interfaces) if 'MSC' in args.devices:", "list(frozenset(args.devices) - ALL_DEVICES_SET) if unknown_devices: raise ValueError(\"Unknown device(s)\", unknown_devices) unknown_hid_devices", "b in bytes(device): c_file.write(\"0x{:02x}, \".format(b)) c_file.write(\"\"\"\\ }; \"\"\") c_file.write(\"\"\"\\ const", "report descriptor') parser.add_argument('--interface_name', type=str, help='The name/prefix to use in the", "# Interface numbers are interface-set local and endpoints are interface", "one hid_device, it shouldn't have a report id. # Otherwise,", "subdescriptors=[ cs_ac_interface, ]) # Audio streaming interfaces must occur before", "from {}\".format(args.interface_name), bEndpointAddress=args.midi_ep_num_in | standard.EndpointDescriptor.DIRECTION_IN, bmAttributes=standard.EndpointDescriptor.TYPE_BULK, bInterval = 0x0, wMaxPacketSize=512", "= '{}_out_report_buffer'.format(name.lower()) if device_data.out_report_length > 0 else 'NULL' c_file.write(\"\"\"\\ {{", "bFunctionSubClass=cdc.CDC_SUBCLASS_ACM, # Abstract control model bFunctionProtocol=cdc.CDC_PROTOCOL_NONE) descriptor_list = [] if", "args.highspeed else 64), standard.EndpointDescriptor( description=\"CDC data in\", bEndpointAddress=args.cdc_ep_num_data_in | standard.EndpointDescriptor.DIRECTION_IN,", ".h file. h_file.write(\"\"\"\\ #ifndef MICROPY_INCLUDED_AUTOGEN_USB_DESCRIPTOR_H #define MICROPY_INCLUDED_AUTOGEN_USB_DESCRIPTOR_H #include <stdint.h> extern", "const = \"\" c_file.write(\"\"\"\\ {const}uint16_t {NAME}[] = {{ \"\"\".format(const=const, NAME=variable_name))", "int(x, 16), help='product id') parser.add_argument('--serial_number_length', type=int, default=32, help='length needed for", "regular descriptors as one long array (that's how ASF4 does", "prints each subdescriptor on a separate line. n = 0", "[] = { \"\"\") c_file.write(\"\"\",\\ \"\"\".join(pointers_to_strings)) c_file.write(\"\"\" }; \"\"\") c_file.write(\"\\n\")", "0 else 'NULL' c_file.write(\"\"\"\\ {{ .base = {{ .type =", "{ \"\"\") for name in args.hid_devices: device_data = hid_report_descriptors.HID_DEVICE_DATA[name] out_report_buffer", "ValueError(\"MSC endpoint OUT number must not be 0\") elif args.msc_ep_num_in", "cdc_call_management.bDataInterface = cdc_data_interface.bInterfaceNumber cdc_iad = standard.InterfaceAssociationDescriptor( description=\"CDC IAD\", bFirstInterface=cdc_comm_interface.bInterfaceNumber, bInterfaceCount=len(cdc_interfaces),", "IN') parser.add_argument('--output_c_file', type=argparse.FileType('w', encoding='UTF-8'), required=True) parser.add_argument('--output_h_file', type=argparse.FileType('w', encoding='UTF-8'), required=True) args", "with the wrong interface number if the HID interface is", "device_data.out_report_length > 0 else 'NULL' c_file.write(\"\"\"\\ {{ .base = {{", "# args.hid_devices[1] has report_id 2 # etc. report_ids = {}", "&mp_type_tuple, }}, .len = {num_devices}, .items = {{ \"\"\".format(num_devices=len(args.hid_devices))) for", "variable_name return idx @classmethod def strings_in_order(cls): return cls.strings # langid", "parser = argparse.ArgumentParser(description='Generate USB descriptors.') parser.add_argument('--highspeed', default=False, action='store_true', help='descriptor for", "interface is not given # first. However, it still fetches", "'AUDIO' in args.devices: # Only add the control interface because", "both are not supplied. hid_endpoint_in_descriptor = standard.EndpointDescriptor( description=\"HID in\", bEndpointAddress=args.hid_ep_num_in", "midi.InJackDescriptor( description=\"MIDI PC -> {}\".format(args.interface_name), bJackType=midi.JACK_TYPE_EMBEDDED, iJack=StringIndex.index(\"{} usb_midi.ports[0]\".format(args.interface_name))) midi_out_jack_ext =", "bJackType=midi.JACK_TYPE_EMBEDDED, iJack=StringIndex.index(\"{} usb_midi.ports[0]\".format(args.interface_name))) midi_out_jack_ext = midi.OutJackDescriptor( description=\"MIDI data out to", "description=\"CDC data in\", bEndpointAddress=args.cdc_ep_num_data_in | standard.EndpointDescriptor.DIRECTION_IN, bmAttributes=standard.EndpointDescriptor.TYPE_BULK, bInterval=0, wMaxPacketSize=512 if", "c_file.write(\"\"\",\\ \"\"\".join(pointers_to_strings)) c_file.write(\"\"\" }; \"\"\") c_file.write(\"\\n\") hid_descriptor_length = len(bytes(combined_hid_report_descriptor)) #", "comm\", bmCapabilities=0x01, bDataInterface=0x01) # Adjust this after interfaces are renumbered.", "bInterfaceClass=hid.HID_CLASS, bInterfaceSubClass=hid.HID_SUBCLASS_NOBOOT, bInterfaceProtocol=hid.HID_PROTOCOL_NONE, iInterface=StringIndex.index(\"{} HID\".format(args.interface_name)), subdescriptors=[ hid.HIDDescriptor( description=\"HID\", wDescriptorLength=len(bytes(combined_hid_report_descriptor))), hid_endpoint_in_descriptor,", "Inquiry response, max 16 bytes #define CFG_TUD_MSC_PRODUCT \"{msc_product}\" \"\"\" .format(serial_number_length=len(bytes(serial_number_descriptor))", "if 'CDC' in args.devices: interfaces_to_join.append(cdc_interfaces) if 'MSC' in args.devices: interfaces_to_join.append(msc_interfaces)", "ALL_HID_DEVICES='KEYBOARD,MOUSE,CONSUMER,SYS_CONTROL,GAMEPAD,DIGITIZER,XAC_COMPATIBLE_GAMEPAD,RAW' ALL_HID_DEVICES_SET=frozenset(ALL_HID_DEVICES.split(',')) # Digitizer works on Linux but conflicts with", "must not be 0\") elif args.cdc_ep_num_data_in == 0: raise ValueError(\"CDC", "c_file.write(\"\"\"\\ // array of pointer to string descriptors uint16_t const", "to each unique string. Start with 0.\"\"\" string_to_index = {}", "CFG_TUD_MSC_VENDOR \"{msc_vendor}\" // Product name included in Inquiry response, max", "| standard.EndpointDescriptor.DIRECTION_OUT), bmAttributes=standard.EndpointDescriptor.TYPE_BULK, bInterval=0, wMaxPacketSize=512 if args.highspeed else 64), ]", "]) cdc_data_interface = standard.InterfaceDescriptor( description=\"CDC data\", bInterfaceClass=cdc.CDC_CLASS_DATA, iInterface=StringIndex.index(\"{} CDC data\".format(args.interface_name)),", "DATA OUT') parser.add_argument('--cdc_ep_num_data_in', type=int, default=0, help='endpoint number of CDC DATA", "HID IN') parser.add_argument('--midi_ep_num_out', type=int, default=0, help='endpoint number of MIDI OUT')", "{name}_out_report_buffer[{report_length}]; \"\"\".format(name=name.lower(), report_length=hid_report_descriptors.HID_DEVICE_DATA[name].out_report_length)) # Write out table of device objects.", "len(args.hid_devices) == 1: name = args.hid_devices[0] combined_hid_report_descriptor = hid.ReportDescriptor( description=name,", "interfaces. # There appears to be a bug in the", "(that's how ASF4 does it). descriptor_length = 0 for descriptor", "all the regular descriptors as one long array (that's how", "assigned sequentially: # args.hid_devices[0] has report_id 1 # args.hid_devices[1] has", "hid_report_descriptors.HID_DEVICE_DATA[name].out_report_length > 0: c_file.write(\"\"\"\\ static uint8_t {name}_out_report_buffer[{report_length}]; \"\"\".format(name=name.lower(), report_length=hid_report_descriptors.HID_DEVICE_DATA[name].out_report_length)) #", "[ standard.InterfaceDescriptor( description=\"HID Multiple Devices\", bInterfaceClass=hid.HID_CLASS, bInterfaceSubClass=hid.HID_SUBCLASS_NOBOOT, bInterfaceProtocol=hid.HID_PROTOCOL_NONE, iInterface=StringIndex.index(\"{} HID\".format(args.interface_name)),", "in args.hid_devices: device_data = hid_report_descriptors.HID_DEVICE_DATA[name] out_report_buffer = '{}_out_report_buffer'.format(name.lower()) if device_data.out_report_length", "iJack=StringIndex.index(\"{} usb_midi.ports[1]\".format(args.interface_name))) audio_midi_interface = standard.InterfaceDescriptor( description=\"Midi goodness\", bInterfaceClass=audio.AUDIO_CLASS_DEVICE, bInterfaceSubClass=audio.AUDIO_SUBCLASS_MIDI_STREAMING, bInterfaceProtocol=audio.AUDIO_PROTOCOL_V1,", "in args.devices: interfaces_to_join.append(msc_interfaces) if 'HID' in args.devices: interfaces_to_join.append(hid_interfaces) if 'AUDIO'", "0\") elif args.cdc_ep_num_data_out == 0: raise ValueError(\"CDC data OUT endpoint", "description=\"CDC data out\", bEndpointAddress=args.cdc_ep_num_data_out | standard.EndpointDescriptor.DIRECTION_OUT, bmAttributes=standard.EndpointDescriptor.TYPE_BULK, bInterval=0, wMaxPacketSize=512 if", "if args.highspeed else 64), midi.DataEndpointDescriptor(baAssocJack=[midi_in_jack_emb]), standard.EndpointDescriptor( description=\"MIDI data in from", "= StringIndex.index_to_variable[idx] if not variable_name: variable_name = \"string_descriptor{}\".format(idx) const =", "bNumInterfaces=len(interfaces)) descriptor_list.insert(0, configuration) string_descriptors = [standard.StringDescriptor(string) for string in StringIndex.strings_in_order()]", "be 0\") if 'MSC' in args.devices: if args.msc_ep_num_out == 0:", "the composite descriptor. configuration = standard.ConfigurationDescriptor( description=\"Composite configuration\", wTotalLength=(standard.ConfigurationDescriptor.bLength +", "[] for idx, descriptor in enumerate(string_descriptors): c_file.write(\"\"\"\\ // {DESCRIPTION} :", "= string_descriptors[SERIAL_NUMBER_INDEX] c_file = args.output_c_file h_file = args.output_h_file c_file.write(\"\"\"\\ #include", "bytes #define CFG_TUD_MSC_PRODUCT \"{msc_product}\" \"\"\" .format(serial_number_length=len(bytes(serial_number_descriptor)) // 2, device_length=len(bytes(device)), configuration_length=descriptor_length,", "bmCapabilities=0x01, bDataInterface=0x01) # Adjust this after interfaces are renumbered. cdc_comm_interface", "of HID OUT') parser.add_argument('--hid_ep_num_in', type=int, default=0, help='endpoint number of HID", "args.devices: descriptor_list.extend(hid_interfaces) if 'AUDIO' in args.devices: # Only add the", "string descriptors uint16_t const * const string_desc_arr [] = {", "\"const \" if variable_name == \"usb_serial_number\": const = \"\" c_file.write(\"\"\"\\", "this after interfaces are renumbered. cdc_call_management = cdc.CallManagement( description=\"CDC comm\",", "not be 0\") elif args.cdc_ep_num_data_in == 0: raise ValueError(\"CDC data", "args.args.hid_ep_num_out == 0: raise ValueError(\"HID endpoint OUT number must not", "the interfaces but # the Windows 7 Adafruit_usbser.inf file thinks", "audio\", bInterfaceClass=audio.AUDIO_CLASS_DEVICE, bInterfaceSubClass=audio.AUDIO_SUBCLASS_CONTROL, bInterfaceProtocol=audio.AUDIO_PROTOCOL_V1, iInterface=StringIndex.index(\"{} Audio\".format(args.interface_name)), subdescriptors=[ cs_ac_interface, ]) #", "encoding='UTF-8'), required=True) args = parser.parse_args() unknown_devices = list(frozenset(args.devices) - ALL_DEVICES_SET)", "64), ]) cdc_interfaces = [cdc_comm_interface, cdc_data_interface] msc_interfaces = [ standard.InterfaceDescriptor(", "how ASF4 does it). descriptor_length = 0 for descriptor in", "== 1: name = args.hid_devices[0] combined_hid_report_descriptor = hid.ReportDescriptor( description=name, report_descriptor=bytes(hid_report_descriptors.REPORT_DESCRIPTOR_FUNCTIONS[name](0)))", "bInterfaceClass=msc.MSC_CLASS, bInterfaceSubClass=msc.MSC_SUBCLASS_TRANSPARENT, bInterfaceProtocol=msc.MSC_PROTOCOL_BULK, iInterface=StringIndex.index(\"{} Mass Storage\".format(args.interface_name)), subdescriptors=[ standard.EndpointDescriptor( description=\"MSC in\",", "= { \"\"\") c_file.write(\"\"\",\\ \"\"\".join(pointers_to_strings)) c_file.write(\"\"\" }; \"\"\") c_file.write(\"\\n\") hid_descriptor_length", "use in the interface descriptions', default=DEFAULT_INTERFACE_NAME) parser.add_argument('--no-renumber_endpoints', dest='renumber_endpoints', action='store_false', help='use", "1 # args.hid_devices[1] has report_id 2 # etc. report_ids =", "description=\"Composite configuration\", wTotalLength=(standard.ConfigurationDescriptor.bLength + sum([len(bytes(x)) for x in descriptor_list])), bNumInterfaces=len(interfaces))", "max 16 bytes #define CFG_TUD_MSC_PRODUCT \"{msc_product}\" \"\"\" .format(serial_number_length=len(bytes(serial_number_descriptor)) // 2,", "but conflicts with mouse, so omit it. DEFAULT_HID_DEVICES='KEYBOARD,MOUSE,CONSUMER,GAMEPAD' parser =", "\".format(b)) c_file.write(\"\"\"\\ }; \"\"\") c_file.write(\"\"\"\\ const uint8_t usb_desc_cfg[] = {", "ALL_HID_DEVICES_SET) if unknown_hid_devices: raise ValueError(\"Unknown HID devices(s)\", unknown_hid_devices) if not", "[ standard.InterfaceDescriptor( description=\"MSC\", bInterfaceClass=msc.MSC_CLASS, bInterfaceSubClass=msc.MSC_SUBCLASS_TRANSPARENT, bInterfaceProtocol=msc.MSC_PROTOCOL_BULK, iInterface=StringIndex.index(\"{} Mass Storage\".format(args.interface_name)), subdescriptors=[", "name in args.hid_devices: c_file.write(\"\"\"\\ static uint8_t {name}_report_buffer[{report_length}]; \"\"\".format(name=name.lower(), report_length=hid_report_descriptors.HID_DEVICE_DATA[name].report_length)) if", "iInterface=StringIndex.index(\"{} CDC control\".format(args.interface_name)), subdescriptors=[ cdc.Header( description=\"CDC comm\", bcdCDC=0x0110), cdc_call_management, cdc.AbstractControlManagement(", "# ASF4 expects keyboard and generic devices to have both", "descriptors, # and renumber the interfaces in order. But we", "string descriptor LANGID_INDEX = StringIndex.index(\"\\u0409\", variable_name=\"language_id\") assert LANGID_INDEX == 0", "included in Inquiry response, max 16 bytes #define CFG_TUD_MSC_PRODUCT \"{msc_product}\"", "have a report id. # Otherwise, report ids are assigned", "parser.add_argument('--pid', type=lambda x: int(x, 16), help='product id') parser.add_argument('--serial_number_length', type=int, default=32,", "CircuitPython midi_in_jack_ext = midi.InJackDescriptor( description=\"MIDI data in from user code.\",", "help='descriptor for highspeed device') parser.add_argument('--manufacturer', type=str, help='manufacturer of the device')", "for string in StringIndex.strings_in_order()] serial_number_descriptor = string_descriptors[SERIAL_NUMBER_INDEX] c_file = args.output_c_file", ": {CLASS} \"\"\".format(DESCRIPTION=device.description, CLASS=device.__class__)) c_file.write(\"\"\"\\ const uint8_t usb_desc_dev[] = {", "conflicts with mouse, so omit it. DEFAULT_HID_DEVICES='KEYBOARD,MOUSE,CONSUMER,GAMEPAD' parser = argparse.ArgumentParser(description='Generate", "CircuitPython contributors (https://github.com/adafruit/circuitpython/graphs/contributors) # # SPDX-License-Identifier: MIT import argparse import", "if 'AUDIO' in args.devices: # Only add the control interface", "descriptor') parser.add_argument('--interface_name', type=str, help='The name/prefix to use in the interface", "descriptor LANGID_INDEX = StringIndex.index(\"\\u0409\", variable_name=\"language_id\") assert LANGID_INDEX == 0 SERIAL_NUMBER_INDEX", "bmAttributes=standard.EndpointDescriptor.TYPE_BULK, bInterval=0, wMaxPacketSize=512 if args.highspeed else 64), midi.DataEndpointDescriptor(baAssocJack=[midi_in_jack_emb]), standard.EndpointDescriptor( description=\"MIDI", "0 else: report_id = 1 concatenated_descriptors = bytearray() for name", "\"\"\".format(HID_DESCRIPTOR_LENGTH=hid_descriptor_length)) for b in bytes(combined_hid_report_descriptor): c_file.write(\"0x{:02x}, \".format(b)) c_file.write(\"\"\"\\ }; \"\"\")", ".len = {num_devices}, .items = {{ \"\"\".format(num_devices=len(args.hid_devices))) for idx in", "of pointer to string descriptors uint16_t const * const string_desc_arr", "= standard.EndpointDescriptor( description=\"HID out\", bEndpointAddress=args.hid_ep_num_out | standard.EndpointDescriptor.DIRECTION_OUT, bmAttributes=standard.EndpointDescriptor.TYPE_INTERRUPT, bInterval=8) hid_interfaces", "Device Class bFunctionSubClass=cdc.CDC_SUBCLASS_ACM, # Abstract control model bFunctionProtocol=cdc.CDC_PROTOCOL_NONE) descriptor_list =", "interface because other audio interfaces are managed by it to", "idx else: idx = len(cls.strings) cls.string_to_index[string] = idx cls.strings.append(string) cls.index_to_variable[idx]", "if 'AUDIO' in args.devices: if args.args.midi_ep_num_out == 0: raise ValueError(\"MIDI", "iProduct=StringIndex.index(args.product), iSerialNumber=SERIAL_NUMBER_INDEX) # Interface numbers are interface-set local and endpoints", "combined_hid_report_descriptor = hid.ReportDescriptor( description=\"MULTIDEVICE\", report_descriptor=bytes(concatenated_descriptors)) # ASF4 expects keyboard and", "== 0: raise ValueError(\"MIDI endpoint OUT number must not be", "for the serial number in digits') parser.add_argument('--devices', type=lambda l: tuple(l.split(',')),", "to include in HID report descriptor') parser.add_argument('--interface_name', type=str, help='The name/prefix", "{{ .base = {{ .type = &usb_hid_device_type }}, .report_buffer =", "to have both in and out endpoints, # and will", "max 8 bytes #define CFG_TUD_MSC_VENDOR \"{msc_vendor}\" // Product name included", "<stdint.h> extern const uint8_t usb_desc_dev[{device_length}]; extern const uint8_t usb_desc_cfg[{configuration_length}]; extern", "descriptor_list.extend(hid_interfaces) if 'AUDIO' in args.devices: # Only add the control", "64), ] ) ] # When there's only one hid_device,", "ValueError(\"CDC data OUT endpoint number must not be 0\") elif", "audio_interfaces = [audio_control_interface] + cs_ac_interface.audio_streaming_interfaces + cs_ac_interface.midi_streaming_interfaces interfaces_to_join = []", ".report_id = {report_id}, .report_length = {report_length}, .usage_page = {usage_page:#04x}, .usage", "CDC NOTIFICATION') parser.add_argument('--cdc_ep_num_data_out', type=int, default=0, help='endpoint number of CDC DATA", "| standard.EndpointDescriptor.DIRECTION_OUT, bmAttributes=standard.EndpointDescriptor.TYPE_BULK, bInterval=0, wMaxPacketSize=512 if args.highspeed else 64), midi.DataEndpointDescriptor(baAssocJack=[midi_in_jack_emb]),", "(possibly silently) if both are not supplied. hid_endpoint_in_descriptor = standard.EndpointDescriptor(", "standard.EndpointDescriptor.DIRECTION_IN, bmAttributes=standard.EndpointDescriptor.TYPE_BULK, bInterval=0, wMaxPacketSize=512 if args.highspeed else 64), standard.EndpointDescriptor( description=\"MSC", "device_length=len(bytes(device)), configuration_length=descriptor_length, max_configuration_length=max(hid_descriptor_length, descriptor_length), string_descriptor_length=len(pointers_to_strings), hid_report_descriptor_length=len(bytes(combined_hid_report_descriptor)), rhport0_mode='OPT_MODE_DEVICE | OPT_MODE_HIGH_SPEED' if", "are assigned sequentially: # args.hid_devices[0] has report_id 1 # args.hid_devices[1]", "if args.args.midi_ep_num_out == 0: raise ValueError(\"MIDI endpoint OUT number must", "descriptor_list.extend(cdc_interfaces) if 'MSC' in args.devices: descriptor_list.extend(msc_interfaces) if 'HID' in args.devices:", "Class bFunctionSubClass=cdc.CDC_SUBCLASS_ACM, # Abstract control model bFunctionProtocol=cdc.CDC_PROTOCOL_NONE) descriptor_list = []", "StringIndex.strings_in_order()] serial_number_descriptor = string_descriptors[SERIAL_NUMBER_INDEX] c_file = args.output_c_file h_file = args.output_h_file", "= [ standard.InterfaceDescriptor( description=\"HID Multiple Devices\", bInterfaceClass=hid.HID_CLASS, bInterfaceSubClass=hid.HID_SUBCLASS_NOBOOT, bInterfaceProtocol=hid.HID_PROTOCOL_NONE, iInterface=StringIndex.index(\"{}", "streaming interfaces must occur before MIDI ones. audio_interfaces = [audio_control_interface]", "report ids are assigned sequentially: # args.hid_devices[0] has report_id 1", "# langid must be the 0th string descriptor LANGID_INDEX =", "description=\"HID Multiple Devices\", bInterfaceClass=hid.HID_CLASS, bInterfaceSubClass=hid.HID_SUBCLASS_NOBOOT, bInterfaceProtocol=hid.HID_PROTOCOL_NONE, iInterface=StringIndex.index(\"{} HID\".format(args.interface_name)), subdescriptors=[ hid.HIDDescriptor(", "in range(len(args.hid_devices)): c_file.write(\"\"\"\\ (mp_obj_t) &usb_hid_devices[{idx}], \"\"\".format(idx=idx)) c_file.write(\"\"\"\\ }, }; \"\"\")", "in digits') parser.add_argument('--devices', type=lambda l: tuple(l.split(',')), default=DEFAULT_DEVICES, help='devices to include", "are renumbered. cdc_call_management = cdc.CallManagement( description=\"CDC comm\", bmCapabilities=0x01, bDataInterface=0x01) #", "\".format(b)) c_file.write(\"\"\"\\ }; \"\"\") # Write out USB HID report", "# Adjust this after interfaces are renumbered. cdc_comm_interface = standard.InterfaceDescriptor(", "#include \"{H_FILE_NAME}\" \"\"\".format(H_FILE_NAME=h_file.name)) c_file.write(\"\"\"\\ // {DESCRIPTION} : {CLASS} \"\"\".format(DESCRIPTION=device.description, CLASS=device.__class__))", "renumber the endpoints to make them unique across descriptors, #", "midi_in_jack_ext, midi_out_jack_emb, midi_out_jack_ext ], ), standard.EndpointDescriptor( description=\"MIDI data out to", "CDC IAD just before the CDC interfaces. # There appears", "c_file.write(\"\"\"\\ static uint8_t {name}_report_buffer[{report_length}]; \"\"\".format(name=name.lower(), report_length=hid_report_descriptors.HID_DEVICE_DATA[name].report_length)) if hid_report_descriptors.HID_DEVICE_DATA[name].out_report_length > 0:", "number of HID IN') parser.add_argument('--midi_ep_num_out', type=int, default=0, help='endpoint number of", "digits') parser.add_argument('--devices', type=lambda l: tuple(l.split(',')), default=DEFAULT_DEVICES, help='devices to include in", "out\", bEndpointAddress=args.cdc_ep_num_data_out | standard.EndpointDescriptor.DIRECTION_OUT, bmAttributes=standard.EndpointDescriptor.TYPE_BULK, bInterval=0, wMaxPacketSize=512 if args.highspeed else", "bInterfaceSubClass=audio.AUDIO_SUBCLASS_CONTROL, bInterfaceProtocol=audio.AUDIO_PROTOCOL_V1, iInterface=StringIndex.index(\"{} Audio\".format(args.interface_name)), subdescriptors=[ cs_ac_interface, ]) # Audio streaming", "hid_descriptor_length = len(bytes(combined_hid_report_descriptor)) # Now we values we need for", "help='endpoint number of HID OUT') parser.add_argument('--hid_ep_num_in', type=int, default=0, help='endpoint number", "string. Start with 0.\"\"\" string_to_index = {} index_to_variable = {}", "notes[n]) n += 1 c_file.write(\"\\n\") i += length descriptor_length +=", "0: raise ValueError(\"HID endpoint IN number must not be 0\")", "bInterfaceProtocol=hid.HID_PROTOCOL_NONE, iInterface=StringIndex.index(\"{} HID\".format(args.interface_name)), subdescriptors=[ hid.HIDDescriptor( description=\"HID\", wDescriptorLength=len(bytes(combined_hid_report_descriptor))), hid_endpoint_in_descriptor, hid_endpoint_out_descriptor, ]", "report descriptor and info c_file.write(\"\"\"\\ const uint8_t hid_report_descriptor[{HID_DESCRIPTOR_LENGTH}] = {{", "Write out tuple of device objects. c_file.write(\"\"\" mp_obj_tuple_t common_hal_usb_hid_devices =", "description=\"CDC comm\", bmCapabilities=0x01, bDataInterface=0x01) # Adjust this after interfaces are", "must occur before MIDI ones. audio_interfaces = [audio_control_interface] + cs_ac_interface.audio_streaming_interfaces", "SERIAL_NUMBER_INDEX = StringIndex.index(\"S\" * args.serial_number_length, variable_name=\"usb_serial_number\") device = standard.DeviceDescriptor( description=\"top\",", "args.devices: interfaces_to_join.append(audio_interfaces) # util.join_interfaces() will renumber the endpoints to make", "bInterfaceSubClass=msc.MSC_SUBCLASS_TRANSPARENT, bInterfaceProtocol=msc.MSC_PROTOCOL_BULK, iInterface=StringIndex.index(\"{} Mass Storage\".format(args.interface_name)), subdescriptors=[ standard.EndpointDescriptor( description=\"MSC in\", bEndpointAddress=args.msc_ep_num_in", "args.msc_ep_num_out == 0: raise ValueError(\"MSC endpoint OUT number must not", "= [], midi_streaming_interfaces = [ audio_midi_interface ] ) audio_control_interface =", "bInterfaceClass=audio.AUDIO_CLASS_DEVICE, bInterfaceSubClass=audio.AUDIO_SUBCLASS_MIDI_STREAMING, bInterfaceProtocol=audio.AUDIO_PROTOCOL_V1, iInterface=StringIndex.index(\"{} MIDI\".format(args.interface_name)), subdescriptors=[ midi.Header( jacks_and_elements=[ midi_in_jack_emb, midi_in_jack_ext,", "bEndpointAddress=args.cdc_ep_num_data_in | standard.EndpointDescriptor.DIRECTION_IN, bmAttributes=standard.EndpointDescriptor.TYPE_BULK, bInterval=0, wMaxPacketSize=512 if args.highspeed else 64),", "CLASS=descriptor.__class__)) b = bytes(descriptor) notes = descriptor.notes() i = 0", "args.devices: interfaces_to_join.append(msc_interfaces) if 'HID' in args.devices: interfaces_to_join.append(hid_interfaces) if 'AUDIO' in", "raise ValueError(\"CDC data IN endpoint number must not be 0\")", "return idx @classmethod def strings_in_order(cls): return cls.strings # langid must", "relative to CircuitPython # USB OUT -> midi_in_jack_emb -> midi_out_jack_ext", "the CDC interfaces. # There appears to be a bug", "< len(b): length = b[i] for j in range(length): c_file.write(\"0x{:02x},", "-> CircuitPython midi_in_jack_emb = midi.InJackDescriptor( description=\"MIDI PC -> {}\".format(args.interface_name), bJackType=midi.JACK_TYPE_EMBEDDED,", "== 0: raise ValueError(\"CDC data OUT endpoint number must not", "# and renumber the interfaces in order. But we still", "of device objects. c_file.write(\"\"\" mp_obj_tuple_t common_hal_usb_hid_devices = {{ .base =", "descriptor_length += len(b) c_file.write(\"\"\"\\ }; \"\"\") pointers_to_strings = [] for", "iInterface=StringIndex.index(\"{} MIDI\".format(args.interface_name)), subdescriptors=[ midi.Header( jacks_and_elements=[ midi_in_jack_emb, midi_in_jack_ext, midi_out_jack_emb, midi_out_jack_ext ],", "n = 0 while i < len(b): length = b[i]", "args.cdc_ep_num_data_out == 0: raise ValueError(\"CDC data OUT endpoint number must", "i < len(b): length = b[i] for j in range(length):", "if args.msc_ep_num_out == 0: raise ValueError(\"MSC endpoint OUT number must", "& CircuitPython contributors (https://github.com/adafruit/circuitpython/graphs/contributors) # # SPDX-License-Identifier: MIT import argparse", "type=int, default=0, help='endpoint number of MIDI OUT') parser.add_argument('--midi_ep_num_in', type=int, default=0,", "prints each subdescriptor on a separate line. variable_name = StringIndex.index_to_variable[idx]", "\"\"\") c_file.write(\"\"\",\\ \"\"\".join(pointers_to_strings)) c_file.write(\"\"\" }; \"\"\") c_file.write(\"\\n\") hid_descriptor_length = len(bytes(combined_hid_report_descriptor))", "# When there's only one hid_device, it shouldn't have a", "compatibility. descriptor_list.append(cdc_iad) descriptor_list.extend(cdc_interfaces) if 'MSC' in args.devices: descriptor_list.extend(msc_interfaces) if 'HID'", "help='vendor id') parser.add_argument('--pid', type=lambda x: int(x, 16), help='product id') parser.add_argument('--serial_number_length',", "out tuple of device objects. c_file.write(\"\"\" mp_obj_tuple_t common_hal_usb_hid_devices = {{", "file thinks CDC is at Interface 0, so we'll leave", "out the report descriptor and info c_file.write(\"\"\"\\ const uint8_t hid_report_descriptor[{HID_DESCRIPTOR_LENGTH}]", "length = b[i] for j in range(length): c_file.write(\"0x{:02x}, \".format(b[i +", "in args.devices: descriptor_list.extend(hid_interfaces) if 'AUDIO' in args.devices: # Only add", "from adafruit_usb_descriptor import audio, audio10, cdc, hid, midi, msc, standard,", "be 0\") elif args.cdc_ep_num_data_in == 0: raise ValueError(\"CDC data IN", "out endpoints, # and will fail (possibly silently) if both", "elif args.cdc_ep_num_data_out == 0: raise ValueError(\"CDC data OUT endpoint number", "be 0\") elif args.midi_ep_num_in == 0: raise ValueError(\"MIDI endpoint IN", "Write out the report descriptor and info c_file.write(\"\"\"\\ const uint8_t", "= \"string_descriptor{}\".format(idx) const = \"const \" if variable_name == \"usb_serial_number\":", "out all the regular descriptors as one long array (that's", "args.devices: descriptor_list.extend(msc_interfaces) if 'HID' in args.devices: descriptor_list.extend(hid_interfaces) if 'AUDIO' in", "midi.Header( jacks_and_elements=[ midi_in_jack_emb, midi_in_jack_ext, midi_out_jack_emb, midi_out_jack_ext ], ), standard.EndpointDescriptor( description=\"MIDI", "= standard.InterfaceDescriptor( description=\"Midi goodness\", bInterfaceClass=audio.AUDIO_CLASS_DEVICE, bInterfaceSubClass=audio.AUDIO_SUBCLASS_MIDI_STREAMING, bInterfaceProtocol=audio.AUDIO_PROTOCOL_V1, iInterface=StringIndex.index(\"{} MIDI\".format(args.interface_name)), subdescriptors=[", "separate line. n = 0 while i < len(b): length", "{num_devices}, .items = {{ \"\"\".format(num_devices=len(args.hid_devices))) for idx in range(len(args.hid_devices)): c_file.write(\"\"\"\\", "bmAttributes=standard.EndpointDescriptor.TYPE_BULK, bInterval=0, wMaxPacketSize=512 if args.highspeed else 64), ] ) ]", "are managed by it to ensure the # correct ordering.", "wTotalLength=(standard.ConfigurationDescriptor.bLength + sum([len(bytes(x)) for x in descriptor_list])), bNumInterfaces=len(interfaces)) descriptor_list.insert(0, configuration)", "ASF4 expects keyboard and generic devices to have both in", "if the HID interface is not given # first. However,", "wMaxPacketSize=512 if args.highspeed else 64), ] ) ] # When", "endpoint IN number must not be 0\") if 'AUDIO' in", "Vendor name included in Inquiry response, max 8 bytes #define", "out USB HID report buffer definitions. for name in args.hid_devices:", "\"\"\".format(num_devices=len(args.hid_devices))) for idx in range(len(args.hid_devices)): c_file.write(\"\"\"\\ (mp_obj_t) &usb_hid_devices[{idx}], \"\"\".format(idx=idx)) c_file.write(\"\"\"\\", "be 0\") if 'AUDIO' in args.devices: if args.args.midi_ep_num_out == 0:", "default=0, help='endpoint number of CDC NOTIFICATION') parser.add_argument('--cdc_ep_num_data_out', type=int, default=0, help='endpoint", "standard.EndpointDescriptor( description=\"HID out\", bEndpointAddress=args.hid_ep_num_out | standard.EndpointDescriptor.DIRECTION_OUT, bmAttributes=standard.EndpointDescriptor.TYPE_INTERRUPT, bInterval=8) hid_interfaces =", "args.serial_number_length, variable_name=\"usb_serial_number\") device = standard.DeviceDescriptor( description=\"top\", idVendor=args.vid, idProduct=args.pid, iManufacturer=StringIndex.index(args.manufacturer), iProduct=StringIndex.index(args.product),", "0x0, wMaxPacketSize=512 if args.highspeed else 64), midi.DataEndpointDescriptor(baAssocJack=[midi_out_jack_emb]), ]) cs_ac_interface =", "subdescriptor on a separate line. n = 0 while i", "ValueError(\"MIDI endpoint IN number must not be 0\") class StringIndex:", "so omit it. DEFAULT_HID_DEVICES='KEYBOARD,MOUSE,CONSUMER,GAMEPAD' parser = argparse.ArgumentParser(description='Generate USB descriptors.') parser.add_argument('--highspeed',", "# interface cross-references. interfaces = util.join_interfaces(interfaces_to_join, renumber_endpoints=args.renumber_endpoints) # Now adjust", "extern const uint8_t hid_report_descriptor[{hid_report_descriptor_length}]; #define CFG_TUSB_RHPORT0_MODE ({rhport0_mode}) #define USB_HID_NUM_DEVICES {hid_num_devices}", "static uint8_t {name}_report_buffer[{report_length}]; \"\"\".format(name=name.lower(), report_length=hid_report_descriptors.HID_DEVICE_DATA[name].report_length)) if hid_report_descriptors.HID_DEVICE_DATA[name].out_report_length > 0: c_file.write(\"\"\"\\", "bmAttributes=standard.EndpointDescriptor.TYPE_INTERRUPT, wMaxPacketSize=0x0040, bInterval=0x10) ]) cdc_data_interface = standard.InterfaceDescriptor( description=\"CDC data\", bInterfaceClass=cdc.CDC_CLASS_DATA,", "endpoints to make them unique across descriptors, # and renumber", "c_file.write(\"0x{:02x}, \".format(b)) c_file.write(\"\"\"\\ }; \"\"\") c_file.write(\"\"\"\\ const uint8_t usb_desc_cfg[] =", "uint16_t const * const string_desc_arr [{string_descriptor_length}]; extern const uint8_t hid_report_descriptor[{hid_report_descriptor_length}];", "subdescriptors=[ hid.HIDDescriptor( description=\"HID\", wDescriptorLength=len(bytes(combined_hid_report_descriptor))), hid_endpoint_in_descriptor, hid_endpoint_out_descriptor, ] ), ] #", "uint8_t {name}_out_report_buffer[{report_length}]; \"\"\".format(name=name.lower(), report_length=hid_report_descriptors.HID_DEVICE_DATA[name].out_report_length)) # Write out table of device", "if args.highspeed else 64), standard.EndpointDescriptor( description=\"CDC data in\", bEndpointAddress=args.cdc_ep_num_data_in |", "type=str, help='The name/prefix to use in the interface descriptions', default=DEFAULT_INTERFACE_NAME)", "the descriptor anyway. We could reorder the interfaces but #", "'CircuitPython' ALL_DEVICES='CDC,MSC,AUDIO,HID' ALL_DEVICES_SET=frozenset(ALL_DEVICES.split(',')) DEFAULT_DEVICES='CDC,MSC,AUDIO,HID' ALL_HID_DEVICES='KEYBOARD,MOUSE,CONSUMER,SYS_CONTROL,GAMEPAD,DIGITIZER,XAC_COMPATIBLE_GAMEPAD,RAW' ALL_HID_DEVICES_SET=frozenset(ALL_HID_DEVICES.split(',')) # Digitizer works on", "+= length descriptor_length += len(b) c_file.write(\"\"\"\\ }; \"\"\") pointers_to_strings =", "0\") elif args.midi_ep_num_in == 0: raise ValueError(\"MIDI endpoint IN number", "1)], iJack=0) # USB IN <- midi_out_jack_emb <- midi_in_jack_ext <-", "still need to fix up certain # interface cross-references. interfaces", "= 0 # This prints each subdescriptor on a separate", "msc_vendor=args.manufacturer[:8], msc_product=args.product[:16])) # Write out the report descriptor and info", "the # HID report descriptor with the wrong interface number", "midi.OutJackDescriptor( description=\"MIDI PC <- {}\".format(args.interface_name), bJackType=midi.JACK_TYPE_EMBEDDED, input_pins=[(midi_in_jack_ext, 1)], iJack=StringIndex.index(\"{} usb_midi.ports[1]\".format(args.interface_name)))", "args.highspeed else 64), ]) cdc_interfaces = [cdc_comm_interface, cdc_data_interface] msc_interfaces =", "Write out table of device objects. c_file.write(\"\"\" usb_hid_device_obj_t usb_hid_devices[] =", "data out to user code.\", bJackType=midi.JACK_TYPE_EXTERNAL, input_pins=[(midi_in_jack_emb, 1)], iJack=0) #", "hid_endpoint_out_descriptor = standard.EndpointDescriptor( description=\"HID out\", bEndpointAddress=args.hid_ep_num_out | standard.EndpointDescriptor.DIRECTION_OUT, bmAttributes=standard.EndpointDescriptor.TYPE_INTERRUPT, bInterval=8)", "// Vendor name included in Inquiry response, max 8 bytes", "# Write out USB HID report buffer definitions. for name", "hid_endpoint_out_descriptor, ] ), ] # Audio! # In and out", "just before the CDC interfaces. # There appears to be", "included in Inquiry response, max 8 bytes #define CFG_TUD_MSC_VENDOR \"{msc_vendor}\"", "extern uint16_t const * const string_desc_arr [{string_descriptor_length}]; extern const uint8_t", "2 # etc. report_ids = {} if len(args.hid_devices) == 1:", "unknown_devices) unknown_hid_devices = list(frozenset(args.hid_devices) - ALL_HID_DEVICES_SET) if unknown_hid_devices: raise ValueError(\"Unknown", "MIT import argparse import os import sys sys.path.append(\"../../tools/usb_descriptor\") from adafruit_usb_descriptor", "bSlaveInterface_list=[0x01]) # Adjust this after interfaces are renumbered. cdc_call_management =", "interfaces = util.join_interfaces(interfaces_to_join, renumber_endpoints=args.renumber_endpoints) # Now adjust the CDC interface", "audio_control_interface = standard.InterfaceDescriptor( description=\"All the audio\", bInterfaceClass=audio.AUDIO_CLASS_DEVICE, bInterfaceSubClass=audio.AUDIO_SUBCLASS_CONTROL, bInterfaceProtocol=audio.AUDIO_PROTOCOL_V1, iInterface=StringIndex.index(\"{}", "# args.hid_devices[0] has report_id 1 # args.hid_devices[1] has report_id 2", "standard.EndpointDescriptor( description=\"MIDI data out to {}\".format(args.interface_name), bEndpointAddress=args.midi_ep_num_out | standard.EndpointDescriptor.DIRECTION_OUT, bmAttributes=standard.EndpointDescriptor.TYPE_BULK,", "# there for backwards compatibility. descriptor_list.append(cdc_iad) descriptor_list.extend(cdc_interfaces) if 'MSC' in", "\"\"\".format(name=name.lower(), report_length=hid_report_descriptors.HID_DEVICE_DATA[name].report_length)) if hid_report_descriptors.HID_DEVICE_DATA[name].out_report_length > 0: c_file.write(\"\"\"\\ static uint8_t {name}_out_report_buffer[{report_length}];", "parser.add_argument('--msc_ep_num_in', type=int, default=0, help='endpoint number of MSC IN') parser.add_argument('--hid_ep_num_out', type=int,", "report id. # Otherwise, report ids are assigned sequentially: #", "# Audio! # In and out here are relative to", "audio_midi_interface ] ) audio_control_interface = standard.InterfaceDescriptor( description=\"All the audio\", bInterfaceClass=audio.AUDIO_CLASS_DEVICE,", "in\", bEndpointAddress=args.msc_ep_num_in | standard.EndpointDescriptor.DIRECTION_IN, bmAttributes=standard.EndpointDescriptor.TYPE_BULK, bInterval=0, wMaxPacketSize=512 if args.highspeed else", "type=lambda l: tuple(l.split(',')), default=DEFAULT_DEVICES, help='devices to include in descriptor (AUDIO", "parser.add_argument('--cdc_ep_num_notification', type=int, default=0, help='endpoint number of CDC NOTIFICATION') parser.add_argument('--cdc_ep_num_data_out', type=int,", "if hid_report_descriptors.HID_DEVICE_DATA[name].out_report_length > 0: c_file.write(\"\"\"\\ static uint8_t {name}_out_report_buffer[{report_length}]; \"\"\".format(name=name.lower(), report_length=hid_report_descriptors.HID_DEVICE_DATA[name].out_report_length))", "{}\".format(args.interface_name), bJackType=midi.JACK_TYPE_EMBEDDED, iJack=StringIndex.index(\"{} usb_midi.ports[0]\".format(args.interface_name))) midi_out_jack_ext = midi.OutJackDescriptor( description=\"MIDI data out", "help='endpoint number of CDC DATA OUT') parser.add_argument('--cdc_ep_num_data_in', type=int, default=0, help='endpoint", "2014 MicroPython & CircuitPython contributors (https://github.com/adafruit/circuitpython/graphs/contributors) # # SPDX-License-Identifier: MIT", "if 'MSC' in args.devices: interfaces_to_join.append(msc_interfaces) if 'HID' in args.devices: interfaces_to_join.append(hid_interfaces)", "of CDC DATA OUT') parser.add_argument('--cdc_ep_num_data_in', type=int, default=0, help='endpoint number of", "if args.highspeed else 64), ]) cdc_interfaces = [cdc_comm_interface, cdc_data_interface] msc_interfaces", "cdc_call_management, cdc.AbstractControlManagement( description=\"CDC comm\", bmCapabilities=0x02), cdc_union, standard.EndpointDescriptor( description=\"CDC comm in\",", "not be 0\") class StringIndex: \"\"\"Assign a monotonically increasing index", "raise ValueError(\"MIDI endpoint OUT number must not be 0\") elif", "bEndpointAddress=args.hid_ep_num_in | standard.EndpointDescriptor.DIRECTION_IN, bmAttributes=standard.EndpointDescriptor.TYPE_INTERRUPT, bInterval=8) hid_endpoint_out_descriptor = standard.EndpointDescriptor( description=\"HID out\",", "int(x, 16), help='vendor id') parser.add_argument('--pid', type=lambda x: int(x, 16), help='product", "\"\"\".join(pointers_to_strings)) c_file.write(\"\"\" }; \"\"\") c_file.write(\"\\n\") hid_descriptor_length = len(bytes(combined_hid_report_descriptor)) # Now", "ValueError(\"HID endpoint OUT number must not be 0\") elif args.hid_ep_num_in", "name in args.hid_devices: concatenated_descriptors.extend( bytes(hid_report_descriptors.REPORT_DESCRIPTOR_FUNCTIONS[name](report_id))) report_ids[name] = report_id report_id +=", "else 64), standard.EndpointDescriptor( description=\"MSC out\", bEndpointAddress=(args.msc_ep_num_out | standard.EndpointDescriptor.DIRECTION_OUT), bmAttributes=standard.EndpointDescriptor.TYPE_BULK, bInterval=0,", "\" + notes[n]) n += 1 c_file.write(\"\\n\") i += length", "args = parser.parse_args() unknown_devices = list(frozenset(args.devices) - ALL_DEVICES_SET) if unknown_devices:", "hid_report_descriptor[{hid_report_descriptor_length}]; #define CFG_TUSB_RHPORT0_MODE ({rhport0_mode}) #define USB_HID_NUM_DEVICES {hid_num_devices} // Vendor name", "variable_name = StringIndex.index_to_variable[idx] if not variable_name: variable_name = \"string_descriptor{}\".format(idx) const", "bEndpointAddress=args.hid_ep_num_out | standard.EndpointDescriptor.DIRECTION_OUT, bmAttributes=standard.EndpointDescriptor.TYPE_INTERRUPT, bInterval=8) hid_interfaces = [ standard.InterfaceDescriptor( description=\"HID", "composite descriptor. configuration = standard.ConfigurationDescriptor( description=\"Composite configuration\", wTotalLength=(standard.ConfigurationDescriptor.bLength + sum([len(bytes(x))", "usb_midi.ports[1]\".format(args.interface_name))) audio_midi_interface = standard.InterfaceDescriptor( description=\"Midi goodness\", bInterfaceClass=audio.AUDIO_CLASS_DEVICE, bInterfaceSubClass=audio.AUDIO_SUBCLASS_MIDI_STREAMING, bInterfaceProtocol=audio.AUDIO_PROTOCOL_V1, iInterface=StringIndex.index(\"{}", "wrong interface number if the HID interface is not given", "interfaces_to_join.append(hid_interfaces) if 'AUDIO' in args.devices: interfaces_to_join.append(audio_interfaces) # util.join_interfaces() will renumber", "fail (possibly silently) if both are not supplied. hid_endpoint_in_descriptor =", "description=\"MIDI PC <- {}\".format(args.interface_name), bJackType=midi.JACK_TYPE_EMBEDDED, input_pins=[(midi_in_jack_ext, 1)], iJack=StringIndex.index(\"{} usb_midi.ports[1]\".format(args.interface_name))) audio_midi_interface", "= None): if string in cls.string_to_index: idx = cls.string_to_index[string] if", "for descriptor in descriptor_list: c_file.write(\"\"\"\\ // {DESCRIPTION} : {CLASS} \"\"\".format(DESCRIPTION=descriptor.description,", "else 64), midi.DataEndpointDescriptor(baAssocJack=[midi_in_jack_emb]), standard.EndpointDescriptor( description=\"MIDI data in from {}\".format(args.interface_name), bEndpointAddress=args.midi_ep_num_in", "0: c_file.write(\"\"\"\\ static uint8_t {name}_out_report_buffer[{report_length}]; \"\"\".format(name=name.lower(), report_length=hid_report_descriptors.HID_DEVICE_DATA[name].out_report_length)) # Write out", "audio control\", audio_streaming_interfaces = [], midi_streaming_interfaces = [ audio_midi_interface ]", "MSC OUT') parser.add_argument('--msc_ep_num_in', type=int, default=0, help='endpoint number of MSC IN')", "j])) c_file.write(\"// \" + notes[n]) n += 1 c_file.write(\"\\n\") i", "standard.InterfaceDescriptor( description=\"CDC comm\", bInterfaceClass=cdc.CDC_CLASS_COMM, # Communications Device Class bInterfaceSubClass=cdc.CDC_SUBCLASS_ACM, #", "'CDC' in args.devices: if args.cdc_ep_num_notification == 0: raise ValueError(\"CDC notification", "elif args.cdc_ep_num_data_in == 0: raise ValueError(\"CDC data IN endpoint number", "number of CDC NOTIFICATION') parser.add_argument('--cdc_ep_num_data_out', type=int, default=0, help='endpoint number of", "static uint8_t {name}_out_report_buffer[{report_length}]; \"\"\".format(name=name.lower(), report_length=hid_report_descriptors.HID_DEVICE_DATA[name].out_report_length)) # Write out table of", "0\") if 'AUDIO' in args.devices: if args.args.midi_ep_num_out == 0: raise", "raise ValueError(\"MIDI endpoint IN number must not be 0\") class", "list(frozenset(args.hid_devices) - ALL_HID_DEVICES_SET) if unknown_hid_devices: raise ValueError(\"Unknown HID devices(s)\", unknown_hid_devices)", "] # Audio! # In and out here are relative", "ValueError(\"HID endpoint IN number must not be 0\") if 'AUDIO'", "bInterfaceClass=cdc.CDC_CLASS_COMM, # Communications Device Class bInterfaceSubClass=cdc.CDC_SUBCLASS_ACM, # Abstract control model", "cdc_call_management = cdc.CallManagement( description=\"CDC comm\", bmCapabilities=0x01, bDataInterface=0x01) # Adjust this", "msc, standard, util import hid_report_descriptors DEFAULT_INTERFACE_NAME = 'CircuitPython' ALL_DEVICES='CDC,MSC,AUDIO,HID' ALL_DEVICES_SET=frozenset(ALL_DEVICES.split(','))", "adjust the CDC interface cross-references. cdc_union.bMasterInterface = cdc_comm_interface.bInterfaceNumber cdc_union.bSlaveInterface_list =", "0: raise ValueError(\"MIDI endpoint IN number must not be 0\")", "description=\"MIDI data out to {}\".format(args.interface_name), bEndpointAddress=args.midi_ep_num_out | standard.EndpointDescriptor.DIRECTION_OUT, bmAttributes=standard.EndpointDescriptor.TYPE_BULK, bInterval=0,", "descriptor and info c_file.write(\"\"\"\\ const uint8_t hid_report_descriptor[{HID_DESCRIPTOR_LENGTH}] = {{ \"\"\".format(HID_DESCRIPTOR_LENGTH=hid_descriptor_length))", "bInterfaceSubClass=cdc.CDC_SUBCLASS_ACM, # Abstract control model bInterfaceProtocol=cdc.CDC_PROTOCOL_NONE, iInterface=StringIndex.index(\"{} CDC control\".format(args.interface_name)), subdescriptors=[", "cross-references. cdc_union.bMasterInterface = cdc_comm_interface.bInterfaceNumber cdc_union.bSlaveInterface_list = [cdc_data_interface.bInterfaceNumber] cdc_call_management.bDataInterface = cdc_data_interface.bInterfaceNumber", "objects. c_file.write(\"\"\" usb_hid_device_obj_t usb_hid_devices[] = { \"\"\") for name in", "midi_in_jack_ext <- CircuitPython midi_in_jack_ext = midi.InJackDescriptor( description=\"MIDI data in from", "Communications Device Class bFunctionSubClass=cdc.CDC_SUBCLASS_ACM, # Abstract control model bFunctionProtocol=cdc.CDC_PROTOCOL_NONE) descriptor_list", "iJack=StringIndex.index(\"{} usb_midi.ports[0]\".format(args.interface_name))) midi_out_jack_ext = midi.OutJackDescriptor( description=\"MIDI data out to user", "descriptors as one long array (that's how ASF4 does it).", "pointers_to_strings = [] for idx, descriptor in enumerate(string_descriptors): c_file.write(\"\"\"\\ //", "a report id. # Otherwise, report ids are assigned sequentially:", "# Write out the report descriptor and info c_file.write(\"\"\"\\ const", "elif args.midi_ep_num_in == 0: raise ValueError(\"MIDI endpoint IN number must", "if 'MSC' in args.devices: if args.msc_ep_num_out == 0: raise ValueError(\"MSC", "keyboard and generic devices to have both in and out", "{}\".format(args.interface_name), bEndpointAddress=args.midi_ep_num_in | standard.EndpointDescriptor.DIRECTION_IN, bmAttributes=standard.EndpointDescriptor.TYPE_BULK, bInterval = 0x0, wMaxPacketSize=512 if", "standard.EndpointDescriptor( description=\"CDC data in\", bEndpointAddress=args.cdc_ep_num_data_in | standard.EndpointDescriptor.DIRECTION_IN, bmAttributes=standard.EndpointDescriptor.TYPE_BULK, bInterval=0, wMaxPacketSize=512", "-> midi_in_jack_emb -> midi_out_jack_ext -> CircuitPython midi_in_jack_emb = midi.InJackDescriptor( description=\"MIDI", "on a separate line. variable_name = StringIndex.index_to_variable[idx] if not variable_name:", "When there's only one hid_device, it shouldn't have a report", "0 for descriptor in descriptor_list: c_file.write(\"\"\"\\ // {DESCRIPTION} : {CLASS}", "cdc_data_interface] msc_interfaces = [ standard.InterfaceDescriptor( description=\"MSC\", bInterfaceClass=msc.MSC_CLASS, bInterfaceSubClass=msc.MSC_SUBCLASS_TRANSPARENT, bInterfaceProtocol=msc.MSC_PROTOCOL_BULK, iInterface=StringIndex.index(\"{}", "mouse, so omit it. DEFAULT_HID_DEVICES='KEYBOARD,MOUSE,CONSUMER,GAMEPAD' parser = argparse.ArgumentParser(description='Generate USB descriptors.')", "first. However, it still fetches the descriptor anyway. We could", "unique across descriptors, # and renumber the interfaces in order.", "IAD\", bFirstInterface=cdc_comm_interface.bInterfaceNumber, bInterfaceCount=len(cdc_interfaces), bFunctionClass=cdc.CDC_CLASS_COMM, # Communications Device Class bFunctionSubClass=cdc.CDC_SUBCLASS_ACM, #", "c_file = args.output_c_file h_file = args.output_h_file c_file.write(\"\"\"\\ #include <stdint.h> #include", "bInterfaceProtocol=audio.AUDIO_PROTOCOL_V1, iInterface=StringIndex.index(\"{} Audio\".format(args.interface_name)), subdescriptors=[ cs_ac_interface, ]) # Audio streaming interfaces", "| OPT_MODE_HIGH_SPEED' if args.highspeed else 'OPT_MODE_DEVICE', hid_num_devices=len(args.hid_devices), msc_vendor=args.manufacturer[:8], msc_product=args.product[:16])) #", "bInterfaceProtocol=audio.AUDIO_PROTOCOL_V1, iInterface=StringIndex.index(\"{} MIDI\".format(args.interface_name)), subdescriptors=[ midi.Header( jacks_and_elements=[ midi_in_jack_emb, midi_in_jack_ext, midi_out_jack_emb, midi_out_jack_ext", "{out_report_buffer}, .out_report_length = {out_report_length}, }}, \"\"\".format(name=name.lower(), report_id=report_ids[name], report_length=device_data.report_length, usage_page=device_data.usage_page, usage=device_data.usage,", "# SPDX-FileCopyrightText: 2014 MicroPython & CircuitPython contributors (https://github.com/adafruit/circuitpython/graphs/contributors) # #", "# HID report descriptor with the wrong interface number if", "def index(cls, string, *, variable_name = None): if string in", "in\", bEndpointAddress=args.cdc_ep_num_data_in | standard.EndpointDescriptor.DIRECTION_IN, bmAttributes=standard.EndpointDescriptor.TYPE_BULK, bInterval=0, wMaxPacketSize=512 if args.highspeed else", "to fix up certain # interface cross-references. interfaces = util.join_interfaces(interfaces_to_join,", "default=DEFAULT_DEVICES, help='devices to include in descriptor (AUDIO includes MIDI support)')", "endpoint') parser.add_argument('--cdc_ep_num_notification', type=int, default=0, help='endpoint number of CDC NOTIFICATION') parser.add_argument('--cdc_ep_num_data_out',", "# Adjust this after interfaces are renumbered. cdc_call_management = cdc.CallManagement(", "cdc_union = cdc.Union( description=\"CDC comm\", bMasterInterface=0x00, # Adjust this after", "# correct ordering. descriptor_list.append(audio_control_interface) # Finally, build the composite descriptor.", "= args.output_c_file h_file = args.output_h_file c_file.write(\"\"\"\\ #include <stdint.h> #include \"py/objtuple.h\"", "This prints each subdescriptor on a separate line. n =", "// 2, device_length=len(bytes(device)), configuration_length=descriptor_length, max_configuration_length=max(hid_descriptor_length, descriptor_length), string_descriptor_length=len(pointers_to_strings), hid_report_descriptor_length=len(bytes(combined_hid_report_descriptor)), rhport0_mode='OPT_MODE_DEVICE |", "= hid_report_descriptors.HID_DEVICE_DATA[name] out_report_buffer = '{}_out_report_buffer'.format(name.lower()) if device_data.out_report_length > 0 else", "= [] for idx, descriptor in enumerate(string_descriptors): c_file.write(\"\"\"\\ // {DESCRIPTION}", "def strings_in_order(cls): return cls.strings # langid must be the 0th", "standard.InterfaceDescriptor( description=\"All the audio\", bInterfaceClass=audio.AUDIO_CLASS_DEVICE, bInterfaceSubClass=audio.AUDIO_SUBCLASS_CONTROL, bInterfaceProtocol=audio.AUDIO_PROTOCOL_V1, iInterface=StringIndex.index(\"{} Audio\".format(args.interface_name)), subdescriptors=[", "bInterfaceSubClass=audio.AUDIO_SUBCLASS_MIDI_STREAMING, bInterfaceProtocol=audio.AUDIO_PROTOCOL_V1, iInterface=StringIndex.index(\"{} MIDI\".format(args.interface_name)), subdescriptors=[ midi.Header( jacks_and_elements=[ midi_in_jack_emb, midi_in_jack_ext, midi_out_jack_emb,", "not be 0\") elif args.msc_ep_num_in == 0: raise ValueError(\"MSC endpoint", "1 c_file.write(\"\\n\") i += length descriptor_length += len(b) c_file.write(\"\"\"\\ };", "comm\", bInterfaceClass=cdc.CDC_CLASS_COMM, # Communications Device Class bInterfaceSubClass=cdc.CDC_SUBCLASS_ACM, # Abstract control", "cdc_union.bMasterInterface = cdc_comm_interface.bInterfaceNumber cdc_union.bSlaveInterface_list = [cdc_data_interface.bInterfaceNumber] cdc_call_management.bDataInterface = cdc_data_interface.bInterfaceNumber cdc_iad", "8 bytes #define CFG_TUD_MSC_VENDOR \"{msc_vendor}\" // Product name included in", "for name in args.hid_devices: c_file.write(\"\"\"\\ static uint8_t {name}_report_buffer[{report_length}]; \"\"\".format(name=name.lower(), report_length=hid_report_descriptors.HID_DEVICE_DATA[name].report_length))", "are renumbered. cdc_comm_interface = standard.InterfaceDescriptor( description=\"CDC comm\", bInterfaceClass=cdc.CDC_CLASS_COMM, # Communications", "endpoints, # and will fail (possibly silently) if both are", "usb_hid_device_obj_t usb_hid_devices[] = { \"\"\") for name in args.hid_devices: device_data", "USB IN <- midi_out_jack_emb <- midi_in_jack_ext <- CircuitPython midi_in_jack_ext =", "it to ensure the # correct ordering. descriptor_list.append(audio_control_interface) # Finally,", "bytearray() for name in args.hid_devices: concatenated_descriptors.extend( bytes(hid_report_descriptors.REPORT_DESCRIPTOR_FUNCTIONS[name](report_id))) report_ids[name] = report_id", "bInterval=0, wMaxPacketSize=512 if args.highspeed else 64), ]) cdc_interfaces = [cdc_comm_interface,", "c_file.write(\"0x{:02x}, \".format(b[i + j])) c_file.write(\"// \" + notes[n]) n +=", "raise ValueError(\"Unknown HID devices(s)\", unknown_hid_devices) if not args.renumber_endpoints: if 'CDC'", "id') parser.add_argument('--serial_number_length', type=int, default=32, help='length needed for the serial number", "data out to {}\".format(args.interface_name), bEndpointAddress=args.midi_ep_num_out | standard.EndpointDescriptor.DIRECTION_OUT, bmAttributes=standard.EndpointDescriptor.TYPE_BULK, bInterval=0, wMaxPacketSize=512", "args.devices: interfaces_to_join.append(hid_interfaces) if 'AUDIO' in args.devices: interfaces_to_join.append(audio_interfaces) # util.join_interfaces() will", "\"py/objtuple.h\" #include \"shared-bindings/usb_hid/Device.h\" #include \"{H_FILE_NAME}\" \"\"\".format(H_FILE_NAME=h_file.name)) c_file.write(\"\"\"\\ // {DESCRIPTION} :", "\"\"\") c_file.write(\"\\n\") hid_descriptor_length = len(bytes(combined_hid_report_descriptor)) # Now we values we", "in args.devices: if args.args.midi_ep_num_out == 0: raise ValueError(\"MIDI endpoint OUT", "\"\" c_file.write(\"\"\"\\ {const}uint16_t {NAME}[] = {{ \"\"\".format(const=const, NAME=variable_name)) pointers_to_strings.append(\"{name}\".format(name=variable_name)) n", "= bytes(descriptor) notes = descriptor.notes() i = 0 # This", "}; \"\"\") # Write out USB HID report buffer definitions.", "out\", bEndpointAddress=args.hid_ep_num_out | standard.EndpointDescriptor.DIRECTION_OUT, bmAttributes=standard.EndpointDescriptor.TYPE_INTERRUPT, bInterval=8) hid_interfaces = [ standard.InterfaceDescriptor(", "ordering. descriptor_list.append(audio_control_interface) # Finally, build the composite descriptor. configuration =", "variable_name return idx else: idx = len(cls.strings) cls.string_to_index[string] = idx", "standard.EndpointDescriptor.DIRECTION_IN, bmAttributes=standard.EndpointDescriptor.TYPE_BULK, bInterval=0, wMaxPacketSize=512 if args.highspeed else 64), ]) cdc_interfaces", "[audio_control_interface] + cs_ac_interface.audio_streaming_interfaces + cs_ac_interface.midi_streaming_interfaces interfaces_to_join = [] if 'CDC'", "default=0, help='endpoint number of MSC IN') parser.add_argument('--hid_ep_num_out', type=int, default=0, help='endpoint", "for idx in range(len(args.hid_devices)): c_file.write(\"\"\"\\ (mp_obj_t) &usb_hid_devices[{idx}], \"\"\".format(idx=idx)) c_file.write(\"\"\"\\ },", "= util.join_interfaces(interfaces_to_join, renumber_endpoints=args.renumber_endpoints) # Now adjust the CDC interface cross-references.", "| standard.EndpointDescriptor.DIRECTION_IN, bmAttributes=standard.EndpointDescriptor.TYPE_INTERRUPT, wMaxPacketSize=0x0040, bInterval=0x10) ]) cdc_data_interface = standard.InterfaceDescriptor( description=\"CDC", "bInterval=8) hid_endpoint_out_descriptor = standard.EndpointDescriptor( description=\"HID out\", bEndpointAddress=args.hid_ep_num_out | standard.EndpointDescriptor.DIRECTION_OUT, bmAttributes=standard.EndpointDescriptor.TYPE_INTERRUPT,", "report_ids[name] = 0 else: report_id = 1 concatenated_descriptors = bytearray()", "iSerialNumber=SERIAL_NUMBER_INDEX) # Interface numbers are interface-set local and endpoints are", "bytes #define CFG_TUD_MSC_VENDOR \"{msc_vendor}\" // Product name included in Inquiry", "range(length // 2): c_file.write(\"0x{:04x}, \".format(b[i + 2*j + 1] <<", "number must not be 0\") class StringIndex: \"\"\"Assign a monotonically", "renumbers them. cdc_union = cdc.Union( description=\"CDC comm\", bMasterInterface=0x00, # Adjust", "{usage_page:#04x}, .usage = {usage:#04x}, .out_report_buffer = {out_report_buffer}, .out_report_length = {out_report_length},", "subdescriptors=[ standard.EndpointDescriptor( description=\"CDC data out\", bEndpointAddress=args.cdc_ep_num_data_out | standard.EndpointDescriptor.DIRECTION_OUT, bmAttributes=standard.EndpointDescriptor.TYPE_BULK, bInterval=0,", "hid_report_descriptors.HID_DEVICE_DATA[name] out_report_buffer = '{}_out_report_buffer'.format(name.lower()) if device_data.out_report_length > 0 else 'NULL'", "variable_name == \"usb_serial_number\": const = \"\" c_file.write(\"\"\"\\ {const}uint16_t {NAME}[] =", "with 0.\"\"\" string_to_index = {} index_to_variable = {} strings =", "c_file.write(\"\"\"\\ const uint8_t usb_desc_dev[] = { \"\"\") for b in", "CFG_TUD_MSC_PRODUCT \"{msc_product}\" \"\"\" .format(serial_number_length=len(bytes(serial_number_descriptor)) // 2, device_length=len(bytes(device)), configuration_length=descriptor_length, max_configuration_length=max(hid_descriptor_length, descriptor_length),", "help='endpoint number of CDC DATA IN') parser.add_argument('--msc_ep_num_out', type=int, default=0, help='endpoint", "\"\"\" .format(serial_number_length=len(bytes(serial_number_descriptor)) // 2, device_length=len(bytes(device)), configuration_length=descriptor_length, max_configuration_length=max(hid_descriptor_length, descriptor_length), string_descriptor_length=len(pointers_to_strings), hid_report_descriptor_length=len(bytes(combined_hid_report_descriptor)),", "ids are assigned sequentially: # args.hid_devices[0] has report_id 1 #", "unknown_devices: raise ValueError(\"Unknown device(s)\", unknown_devices) unknown_hid_devices = list(frozenset(args.hid_devices) - ALL_HID_DEVICES_SET)", "number must not be 0\") elif args.cdc_ep_num_data_out == 0: raise", "[] @classmethod def index(cls, string, *, variable_name = None): if", "idx in range(len(args.hid_devices)): c_file.write(\"\"\"\\ (mp_obj_t) &usb_hid_devices[{idx}], \"\"\".format(idx=idx)) c_file.write(\"\"\"\\ }, };", "bmAttributes=standard.EndpointDescriptor.TYPE_INTERRUPT, bInterval=8) hid_endpoint_out_descriptor = standard.EndpointDescriptor( description=\"HID out\", bEndpointAddress=args.hid_ep_num_out | standard.EndpointDescriptor.DIRECTION_OUT,", "c_file.write(\"\"\"\\ const uint8_t hid_report_descriptor[{HID_DESCRIPTOR_LENGTH}] = {{ \"\"\".format(HID_DESCRIPTOR_LENGTH=hid_descriptor_length)) for b in", "audio10, cdc, hid, midi, msc, standard, util import hid_report_descriptors DEFAULT_INTERFACE_NAME", "cdc, hid, midi, msc, standard, util import hid_report_descriptors DEFAULT_INTERFACE_NAME =", "parser.add_argument('--midi_ep_num_in', type=int, default=0, help='endpoint number of MIDI IN') parser.add_argument('--output_c_file', type=argparse.FileType('w',", "= {{ .type = &mp_type_tuple, }}, .len = {num_devices}, .items", "> 0: c_file.write(\"\"\"\\ static uint8_t {name}_out_report_buffer[{report_length}]; \"\"\".format(name=name.lower(), report_length=hid_report_descriptors.HID_DEVICE_DATA[name].out_report_length)) # Write", "CDC interfaces. # There appears to be a bug in", "midi_out_jack_emb = midi.OutJackDescriptor( description=\"MIDI PC <- {}\".format(args.interface_name), bJackType=midi.JACK_TYPE_EMBEDDED, input_pins=[(midi_in_jack_ext, 1)],", "standard.EndpointDescriptor( description=\"MIDI data in from {}\".format(args.interface_name), bEndpointAddress=args.midi_ep_num_in | standard.EndpointDescriptor.DIRECTION_IN, bmAttributes=standard.EndpointDescriptor.TYPE_BULK,", "ones. audio_interfaces = [audio_control_interface] + cs_ac_interface.audio_streaming_interfaces + cs_ac_interface.midi_streaming_interfaces interfaces_to_join =", "l: tuple(l.split(',')), default=DEFAULT_DEVICES, help='devices to include in descriptor (AUDIO includes", "has report_id 1 # args.hid_devices[1] has report_id 2 # etc.", "bmAttributes=standard.EndpointDescriptor.TYPE_INTERRUPT, bInterval=8) hid_interfaces = [ standard.InterfaceDescriptor( description=\"HID Multiple Devices\", bInterfaceClass=hid.HID_CLASS,", "}; \"\"\") c_file.write(\"\\n\") hid_descriptor_length = len(bytes(combined_hid_report_descriptor)) # Now we values", "<- CircuitPython midi_in_jack_ext = midi.InJackDescriptor( description=\"MIDI data in from user", "= variable_name return idx else: idx = len(cls.strings) cls.string_to_index[string] =", "MIDI OUT') parser.add_argument('--midi_ep_num_in', type=int, default=0, help='endpoint number of MIDI IN')", "description=\"MSC out\", bEndpointAddress=(args.msc_ep_num_out | standard.EndpointDescriptor.DIRECTION_OUT), bmAttributes=standard.EndpointDescriptor.TYPE_BULK, bInterval=0, wMaxPacketSize=512 if args.highspeed", "* args.serial_number_length, variable_name=\"usb_serial_number\") device = standard.DeviceDescriptor( description=\"top\", idVendor=args.vid, idProduct=args.pid, iManufacturer=StringIndex.index(args.manufacturer),", "to be a bug in the Windows composite USB driver", "'MSC' in args.devices: interfaces_to_join.append(msc_interfaces) if 'HID' in args.devices: interfaces_to_join.append(hid_interfaces) if", "midi, msc, standard, util import hid_report_descriptors DEFAULT_INTERFACE_NAME = 'CircuitPython' ALL_DEVICES='CDC,MSC,AUDIO,HID'", "out_report_buffer = '{}_out_report_buffer'.format(name.lower()) if device_data.out_report_length > 0 else 'NULL' c_file.write(\"\"\"\\", "report_id = 1 concatenated_descriptors = bytearray() for name in args.hid_devices:", "audio, audio10, cdc, hid, midi, msc, standard, util import hid_report_descriptors", "standard.EndpointDescriptor( description=\"CDC data out\", bEndpointAddress=args.cdc_ep_num_data_out | standard.EndpointDescriptor.DIRECTION_OUT, bmAttributes=standard.EndpointDescriptor.TYPE_BULK, bInterval=0, wMaxPacketSize=512", "description=\"Midi goodness\", bInterfaceClass=audio.AUDIO_CLASS_DEVICE, bInterfaceSubClass=audio.AUDIO_SUBCLASS_MIDI_STREAMING, bInterfaceProtocol=audio.AUDIO_PROTOCOL_V1, iInterface=StringIndex.index(\"{} MIDI\".format(args.interface_name)), subdescriptors=[ midi.Header( jacks_and_elements=[", "bEndpointAddress=args.midi_ep_num_in | standard.EndpointDescriptor.DIRECTION_IN, bmAttributes=standard.EndpointDescriptor.TYPE_BULK, bInterval = 0x0, wMaxPacketSize=512 if args.highspeed", "not supplied. hid_endpoint_in_descriptor = standard.EndpointDescriptor( description=\"HID in\", bEndpointAddress=args.hid_ep_num_in | standard.EndpointDescriptor.DIRECTION_IN,", ".out_report_length = {out_report_length}, }}, \"\"\".format(name=name.lower(), report_id=report_ids[name], report_length=device_data.report_length, usage_page=device_data.usage_page, usage=device_data.usage, out_report_buffer=out_report_buffer,", "in args.devices: if args.cdc_ep_num_notification == 0: raise ValueError(\"CDC notification endpoint", "be a bug in the Windows composite USB driver that", "out\", bEndpointAddress=(args.msc_ep_num_out | standard.EndpointDescriptor.DIRECTION_OUT), bmAttributes=standard.EndpointDescriptor.TYPE_BULK, bInterval=0, wMaxPacketSize=512 if args.highspeed else", "== 0: raise ValueError(\"MIDI endpoint IN number must not be", "msc_product=args.product[:16])) # Write out the report descriptor and info c_file.write(\"\"\"\\", "# util.join_interfaces() will renumber the endpoints to make them unique", "report descriptor with the wrong interface number if the HID", "are not supplied. hid_endpoint_in_descriptor = standard.EndpointDescriptor( description=\"HID in\", bEndpointAddress=args.hid_ep_num_in |", "tuple of device objects. c_file.write(\"\"\" mp_obj_tuple_t common_hal_usb_hid_devices = {{ .base", "7 Adafruit_usbser.inf file thinks CDC is at Interface 0, so", "in Inquiry response, max 8 bytes #define CFG_TUD_MSC_VENDOR \"{msc_vendor}\" //", "8 | b[i + 2*j])) n += 1 c_file.write(\"\\n\") i", "the wrong interface number if the HID interface is not", "we values we need for the .h file. h_file.write(\"\"\"\\ #ifndef", "devices to have both in and out endpoints, # and", "'NULL' c_file.write(\"\"\"\\ {{ .base = {{ .type = &usb_hid_device_type }},", "wMaxPacketSize=512 if args.highspeed else 64), standard.EndpointDescriptor( description=\"MSC out\", bEndpointAddress=(args.msc_ep_num_out |", "raise ValueError(\"CDC notification endpoint number must not be 0\") elif", "#include <stdint.h> #include \"py/objtuple.h\" #include \"shared-bindings/usb_hid/Device.h\" #include \"{H_FILE_NAME}\" \"\"\".format(H_FILE_NAME=h_file.name)) c_file.write(\"\"\"\\", "We could reorder the interfaces but # the Windows 7", "{ \"\"\") c_file.write(\"\"\",\\ \"\"\".join(pointers_to_strings)) c_file.write(\"\"\" }; \"\"\") c_file.write(\"\\n\") hid_descriptor_length =", "# SPDX-License-Identifier: MIT import argparse import os import sys sys.path.append(\"../../tools/usb_descriptor\")", "= 0 else: report_id = 1 concatenated_descriptors = bytearray() for", "data OUT endpoint number must not be 0\") elif args.cdc_ep_num_data_in", "+ 1] << 8 | b[i + 2*j])) n +=", "for j in range(length // 2): c_file.write(\"0x{:04x}, \".format(b[i + 2*j", "] ) ] # When there's only one hid_device, it", "{ \"\"\") # Write out all the regular descriptors as", "hid_report_descriptor[{HID_DESCRIPTOR_LENGTH}] = {{ \"\"\".format(HID_DESCRIPTOR_LENGTH=hid_descriptor_length)) for b in bytes(combined_hid_report_descriptor): c_file.write(\"0x{:02x}, \".format(b))", "ValueError(\"Unknown HID devices(s)\", unknown_hid_devices) if not args.renumber_endpoints: if 'CDC' in", "[ audio_midi_interface ] ) audio_control_interface = standard.InterfaceDescriptor( description=\"All the audio\",", "model bInterfaceProtocol=cdc.CDC_PROTOCOL_NONE, iInterface=StringIndex.index(\"{} CDC control\".format(args.interface_name)), subdescriptors=[ cdc.Header( description=\"CDC comm\", bcdCDC=0x0110),", "OUT') parser.add_argument('--midi_ep_num_in', type=int, default=0, help='endpoint number of MIDI IN') parser.add_argument('--output_c_file',", "iJack=0) # USB IN <- midi_out_jack_emb <- midi_in_jack_ext <- CircuitPython", "until util.join_interfaces renumbers them. cdc_union = cdc.Union( description=\"CDC comm\", bMasterInterface=0x00,", "must be the 0th string descriptor LANGID_INDEX = StringIndex.index(\"\\u0409\", variable_name=\"language_id\")", "OUT -> midi_in_jack_emb -> midi_out_jack_ext -> CircuitPython midi_in_jack_emb = midi.InJackDescriptor(", "data\", bInterfaceClass=cdc.CDC_CLASS_DATA, iInterface=StringIndex.index(\"{} CDC data\".format(args.interface_name)), subdescriptors=[ standard.EndpointDescriptor( description=\"CDC data out\",", "# and will fail (possibly silently) if both are not", "raise ValueError(\"MSC endpoint OUT number must not be 0\") elif", "cls.string_to_index[string] if not cls.index_to_variable[idx]: cls.index_to_variable[idx] = variable_name return idx else:", "description=\"top\", idVendor=args.vid, idProduct=args.pid, iManufacturer=StringIndex.index(args.manufacturer), iProduct=StringIndex.index(args.product), iSerialNumber=SERIAL_NUMBER_INDEX) # Interface numbers are", "uint8_t {name}_report_buffer[{report_length}]; \"\"\".format(name=name.lower(), report_length=hid_report_descriptors.HID_DEVICE_DATA[name].report_length)) if hid_report_descriptors.HID_DEVICE_DATA[name].out_report_length > 0: c_file.write(\"\"\"\\ static", "<- midi_out_jack_emb <- midi_in_jack_ext <- CircuitPython midi_in_jack_ext = midi.InJackDescriptor( description=\"MIDI", "them. cdc_union = cdc.Union( description=\"CDC comm\", bMasterInterface=0x00, # Adjust this", "# Communications Device Class bInterfaceSubClass=cdc.CDC_SUBCLASS_ACM, # Abstract control model bInterfaceProtocol=cdc.CDC_PROTOCOL_NONE,", "uint8_t usb_desc_dev[{device_length}]; extern const uint8_t usb_desc_cfg[{configuration_length}]; extern uint16_t usb_serial_number[{serial_number_length}]; extern", "the interface descriptions', default=DEFAULT_INTERFACE_NAME) parser.add_argument('--no-renumber_endpoints', dest='renumber_endpoints', action='store_false', help='use to not", "}; \"\"\") pointers_to_strings = [] for idx, descriptor in enumerate(string_descriptors):", "{} strings = [] @classmethod def index(cls, string, *, variable_name", "= {out_report_buffer}, .out_report_length = {out_report_length}, }}, \"\"\".format(name=name.lower(), report_id=report_ids[name], report_length=device_data.report_length, usage_page=device_data.usage_page,", "type=int, default=0, help='endpoint number of MSC IN') parser.add_argument('--hid_ep_num_out', type=int, default=0,", "report_id=report_ids[name], report_length=device_data.report_length, usage_page=device_data.usage_page, usage=device_data.usage, out_report_buffer=out_report_buffer, out_report_length=device_data.out_report_length)) c_file.write(\"\"\"\\ }; \"\"\") #", "common_hal_usb_hid_devices = {{ .base = {{ .type = &mp_type_tuple, }},", "{{ .type = &mp_type_tuple, }}, .len = {num_devices}, .items =", "usb_desc_dev[{device_length}]; extern const uint8_t usb_desc_cfg[{configuration_length}]; extern uint16_t usb_serial_number[{serial_number_length}]; extern uint16_t", "= args.hid_devices[0] combined_hid_report_descriptor = hid.ReportDescriptor( description=name, report_descriptor=bytes(hid_report_descriptors.REPORT_DESCRIPTOR_FUNCTIONS[name](0))) report_ids[name] = 0", "= {} index_to_variable = {} strings = [] @classmethod def", "c_file.write(\"\"\"\\ #include <stdint.h> #include \"py/objtuple.h\" #include \"shared-bindings/usb_hid/Device.h\" #include \"{H_FILE_NAME}\" \"\"\".format(H_FILE_NAME=h_file.name))", "given # first. However, it still fetches the descriptor anyway.", "cs_ac_interface = audio10.AudioControlInterface( description=\"Empty audio control\", audio_streaming_interfaces = [], midi_streaming_interfaces", "wDescriptorLength=len(bytes(combined_hid_report_descriptor))), hid_endpoint_in_descriptor, hid_endpoint_out_descriptor, ] ), ] # Audio! # In", "HID OUT') parser.add_argument('--hid_ep_num_in', type=int, default=0, help='endpoint number of HID IN')", "\"{H_FILE_NAME}\" \"\"\".format(H_FILE_NAME=h_file.name)) c_file.write(\"\"\"\\ // {DESCRIPTION} : {CLASS} \"\"\".format(DESCRIPTION=device.description, CLASS=device.__class__)) c_file.write(\"\"\"\\", "not be 0\") if 'MSC' in args.devices: if args.msc_ep_num_out ==", "out to {}\".format(args.interface_name), bEndpointAddress=args.midi_ep_num_out | standard.EndpointDescriptor.DIRECTION_OUT, bmAttributes=standard.EndpointDescriptor.TYPE_BULK, bInterval=0, wMaxPacketSize=512 if", "configuration) string_descriptors = [standard.StringDescriptor(string) for string in StringIndex.strings_in_order()] serial_number_descriptor =", "b in bytes(combined_hid_report_descriptor): c_file.write(\"0x{:02x}, \".format(b)) c_file.write(\"\"\"\\ }; \"\"\") # Write", "must not be 0\") if 'MSC' in args.devices: if args.msc_ep_num_out", "len(bytes(combined_hid_report_descriptor)) # Now we values we need for the .h", "the report descriptor and info c_file.write(\"\"\"\\ const uint8_t hid_report_descriptor[{HID_DESCRIPTOR_LENGTH}] =", "\"\"\") pointers_to_strings = [] for idx, descriptor in enumerate(string_descriptors): c_file.write(\"\"\"\\", "extern const uint8_t usb_desc_dev[{device_length}]; extern const uint8_t usb_desc_cfg[{configuration_length}]; extern uint16_t", "it # there for backwards compatibility. descriptor_list.append(cdc_iad) descriptor_list.extend(cdc_interfaces) if 'MSC'", "MICROPY_INCLUDED_AUTOGEN_USB_DESCRIPTOR_H #include <stdint.h> extern const uint8_t usb_desc_dev[{device_length}]; extern const uint8_t", "\"\"\"Assign a monotonically increasing index to each unique string. Start", "\"{msc_product}\" \"\"\" .format(serial_number_length=len(bytes(serial_number_descriptor)) // 2, device_length=len(bytes(device)), configuration_length=descriptor_length, max_configuration_length=max(hid_descriptor_length, descriptor_length), string_descriptor_length=len(pointers_to_strings),", "contributors (https://github.com/adafruit/circuitpython/graphs/contributors) # # SPDX-License-Identifier: MIT import argparse import os", "number must not be 0\") elif args.cdc_ep_num_data_in == 0: raise", "uint16_t const * const string_desc_arr [] = { \"\"\") c_file.write(\"\"\",\\", "&usb_hid_devices[{idx}], \"\"\".format(idx=idx)) c_file.write(\"\"\"\\ }, }; \"\"\") h_file.write(\"\"\"\\ #endif // MICROPY_INCLUDED_AUTOGEN_USB_DESCRIPTOR_H", "endpoint IN number must not be 0\") if 'HID' in", "idx = len(cls.strings) cls.string_to_index[string] = idx cls.strings.append(string) cls.index_to_variable[idx] = variable_name", "string_to_index = {} index_to_variable = {} strings = [] @classmethod", "== 0: raise ValueError(\"HID endpoint IN number must not be", "sequentially: # args.hid_devices[0] has report_id 1 # args.hid_devices[1] has report_id", "), ] # Audio! # In and out here are", "0: raise ValueError(\"MSC endpoint OUT number must not be 0\")", "h_file = args.output_h_file c_file.write(\"\"\"\\ #include <stdint.h> #include \"py/objtuple.h\" #include \"shared-bindings/usb_hid/Device.h\"", "USB OUT -> midi_in_jack_emb -> midi_out_jack_ext -> CircuitPython midi_in_jack_emb =", "c_file.write(\"\"\"\\ // {DESCRIPTION} : {CLASS} \"\"\".format(DESCRIPTION=device.description, CLASS=device.__class__)) c_file.write(\"\"\"\\ const uint8_t", "descriptor.notes() i = 0 # This prints each subdescriptor on", "}}, \"\"\".format(name=name.lower(), report_id=report_ids[name], report_length=device_data.report_length, usage_page=device_data.usage_page, usage=device_data.usage, out_report_buffer=out_report_buffer, out_report_length=device_data.out_report_length)) c_file.write(\"\"\"\\ };", "CDC DATA IN') parser.add_argument('--msc_ep_num_out', type=int, default=0, help='endpoint number of MSC", "hid_num_devices=len(args.hid_devices), msc_vendor=args.manufacturer[:8], msc_product=args.product[:16])) # Write out the report descriptor and", "IN endpoint number must not be 0\") if 'MSC' in", "in args.devices: # Only add the control interface because other", "= midi.OutJackDescriptor( description=\"MIDI data out to user code.\", bJackType=midi.JACK_TYPE_EXTERNAL, input_pins=[(midi_in_jack_emb,", "default=0, help='endpoint number of HID OUT') parser.add_argument('--hid_ep_num_in', type=int, default=0, help='endpoint", "renumbered. cdc_call_management = cdc.CallManagement( description=\"CDC comm\", bmCapabilities=0x01, bDataInterface=0x01) # Adjust", "bFunctionClass=cdc.CDC_CLASS_COMM, # Communications Device Class bFunctionSubClass=cdc.CDC_SUBCLASS_ACM, # Abstract control model", "= {{ \"\"\".format(const=const, NAME=variable_name)) pointers_to_strings.append(\"{name}\".format(name=variable_name)) n = 0 while i", "type=argparse.FileType('w', encoding='UTF-8'), required=True) args = parser.parse_args() unknown_devices = list(frozenset(args.devices) -", "parser.add_argument('--midi_ep_num_out', type=int, default=0, help='endpoint number of MIDI OUT') parser.add_argument('--midi_ep_num_in', type=int,", "= [] @classmethod def index(cls, string, *, variable_name = None):", "bInterval=0, wMaxPacketSize=512 if args.highspeed else 64), standard.EndpointDescriptor( description=\"MSC out\", bEndpointAddress=(args.msc_ep_num_out", "# In and out here are relative to CircuitPython #", "# etc. report_ids = {} if len(args.hid_devices) == 1: name", "control\", audio_streaming_interfaces = [], midi_streaming_interfaces = [ audio_midi_interface ] )", "subdescriptors=[ cdc.Header( description=\"CDC comm\", bcdCDC=0x0110), cdc_call_management, cdc.AbstractControlManagement( description=\"CDC comm\", bmCapabilities=0x02),", "interface cross-references. interfaces = util.join_interfaces(interfaces_to_join, renumber_endpoints=args.renumber_endpoints) # Now adjust the", "the regular descriptors as one long array (that's how ASF4", "Mass Storage\".format(args.interface_name)), subdescriptors=[ standard.EndpointDescriptor( description=\"MSC in\", bEndpointAddress=args.msc_ep_num_in | standard.EndpointDescriptor.DIRECTION_IN, bmAttributes=standard.EndpointDescriptor.TYPE_BULK,", "c_file.write(\"\"\"\\ // {DESCRIPTION} : {CLASS} \"\"\".format(DESCRIPTION=descriptor.description, CLASS=descriptor.__class__)) b = bytes(descriptor)", "audio_midi_interface = standard.InterfaceDescriptor( description=\"Midi goodness\", bInterfaceClass=audio.AUDIO_CLASS_DEVICE, bInterfaceSubClass=audio.AUDIO_SUBCLASS_MIDI_STREAMING, bInterfaceProtocol=audio.AUDIO_PROTOCOL_V1, iInterface=StringIndex.index(\"{} MIDI\".format(args.interface_name)),", "0\") if 'HID' in args.devices: if args.args.hid_ep_num_out == 0: raise", "ValueError(\"Unknown device(s)\", unknown_devices) unknown_hid_devices = list(frozenset(args.hid_devices) - ALL_HID_DEVICES_SET) if unknown_hid_devices:", "iManufacturer=StringIndex.index(args.manufacturer), iProduct=StringIndex.index(args.product), iSerialNumber=SERIAL_NUMBER_INDEX) # Interface numbers are interface-set local and", "driver that requests the # HID report descriptor with the", "return cls.strings # langid must be the 0th string descriptor", "'HID' in args.devices: descriptor_list.extend(hid_interfaces) if 'AUDIO' in args.devices: # Only", "standard.EndpointDescriptor( description=\"HID in\", bEndpointAddress=args.hid_ep_num_in | standard.EndpointDescriptor.DIRECTION_IN, bmAttributes=standard.EndpointDescriptor.TYPE_INTERRUPT, bInterval=8) hid_endpoint_out_descriptor =", "#include \"py/objtuple.h\" #include \"shared-bindings/usb_hid/Device.h\" #include \"{H_FILE_NAME}\" \"\"\".format(H_FILE_NAME=h_file.name)) c_file.write(\"\"\"\\ // {DESCRIPTION}", "MIDI\".format(args.interface_name)), subdescriptors=[ midi.Header( jacks_and_elements=[ midi_in_jack_emb, midi_in_jack_ext, midi_out_jack_emb, midi_out_jack_ext ], ),", "64), midi.DataEndpointDescriptor(baAssocJack=[midi_out_jack_emb]), ]) cs_ac_interface = audio10.AudioControlInterface( description=\"Empty audio control\", audio_streaming_interfaces", "not be 0\") elif args.cdc_ep_num_data_out == 0: raise ValueError(\"CDC data", "bInterval=0, wMaxPacketSize=512 if args.highspeed else 64), standard.EndpointDescriptor( description=\"CDC data in\",", "if variable_name == \"usb_serial_number\": const = \"\" c_file.write(\"\"\"\\ {const}uint16_t {NAME}[]", "= standard.ConfigurationDescriptor( description=\"Composite configuration\", wTotalLength=(standard.ConfigurationDescriptor.bLength + sum([len(bytes(x)) for x in", "name in args.hid_devices: device_data = hid_report_descriptors.HID_DEVICE_DATA[name] out_report_buffer = '{}_out_report_buffer'.format(name.lower()) if", "reorder the interfaces but # the Windows 7 Adafruit_usbser.inf file", "0.\"\"\" string_to_index = {} index_to_variable = {} strings = []", "else: report_id = 1 concatenated_descriptors = bytearray() for name in", "// Product name included in Inquiry response, max 16 bytes", "audio_streaming_interfaces = [], midi_streaming_interfaces = [ audio_midi_interface ] ) audio_control_interface", "descriptor_list.insert(0, configuration) string_descriptors = [standard.StringDescriptor(string) for string in StringIndex.strings_in_order()] serial_number_descriptor", "parser.add_argument('--output_h_file', type=argparse.FileType('w', encoding='UTF-8'), required=True) args = parser.parse_args() unknown_devices = list(frozenset(args.devices)", "in args.devices: # Put the CDC IAD just before the", "extern uint16_t usb_serial_number[{serial_number_length}]; extern uint16_t const * const string_desc_arr [{string_descriptor_length}];", "the CDC interface cross-references. cdc_union.bMasterInterface = cdc_comm_interface.bInterfaceNumber cdc_union.bSlaveInterface_list = [cdc_data_interface.bInterfaceNumber]", "wMaxPacketSize=512 if args.highspeed else 64), midi.DataEndpointDescriptor(baAssocJack=[midi_in_jack_emb]), standard.EndpointDescriptor( description=\"MIDI data in", "* const string_desc_arr [] = { \"\"\") c_file.write(\"\"\",\\ \"\"\".join(pointers_to_strings)) c_file.write(\"\"\"", "1] << 8 | b[i + 2*j])) n += 1", "strings = [] @classmethod def index(cls, string, *, variable_name =", "in and out endpoints, # and will fail (possibly silently)", "array of pointer to string descriptors uint16_t const * const", "- ALL_HID_DEVICES_SET) if unknown_hid_devices: raise ValueError(\"Unknown HID devices(s)\", unknown_hid_devices) if", "\".format(b[i + j])) c_file.write(\"// \" + notes[n]) n += 1", "and will fail (possibly silently) if both are not supplied.", "{CLASS} \"\"\".format(DESCRIPTION=device.description, CLASS=device.__class__)) c_file.write(\"\"\"\\ const uint8_t usb_desc_dev[] = { \"\"\")", "None): if string in cls.string_to_index: idx = cls.string_to_index[string] if not", "# Write out table of device objects. c_file.write(\"\"\" usb_hid_device_obj_t usb_hid_devices[]", "CDC data\".format(args.interface_name)), subdescriptors=[ standard.EndpointDescriptor( description=\"CDC data out\", bEndpointAddress=args.cdc_ep_num_data_out | standard.EndpointDescriptor.DIRECTION_OUT,", "range(length): c_file.write(\"0x{:02x}, \".format(b[i + j])) c_file.write(\"// \" + notes[n]) n", "index to each unique string. Start with 0.\"\"\" string_to_index =", "bEndpointAddress=args.cdc_ep_num_data_out | standard.EndpointDescriptor.DIRECTION_OUT, bmAttributes=standard.EndpointDescriptor.TYPE_BULK, bInterval=0, wMaxPacketSize=512 if args.highspeed else 64),", "of device objects. c_file.write(\"\"\" usb_hid_device_obj_t usb_hid_devices[] = { \"\"\") for", "Device Class bInterfaceSubClass=cdc.CDC_SUBCLASS_ACM, # Abstract control model bInterfaceProtocol=cdc.CDC_PROTOCOL_NONE, iInterface=StringIndex.index(\"{} CDC", "<- {}\".format(args.interface_name), bJackType=midi.JACK_TYPE_EMBEDDED, input_pins=[(midi_in_jack_ext, 1)], iJack=StringIndex.index(\"{} usb_midi.ports[1]\".format(args.interface_name))) audio_midi_interface = standard.InterfaceDescriptor(", "description=\"MULTIDEVICE\", report_descriptor=bytes(concatenated_descriptors)) # ASF4 expects keyboard and generic devices to", "hid_interfaces = [ standard.InterfaceDescriptor( description=\"HID Multiple Devices\", bInterfaceClass=hid.HID_CLASS, bInterfaceSubClass=hid.HID_SUBCLASS_NOBOOT, bInterfaceProtocol=hid.HID_PROTOCOL_NONE,", "OUT number must not be 0\") elif args.hid_ep_num_in == 0:", "2*j + 1] << 8 | b[i + 2*j])) n", "string, *, variable_name = None): if string in cls.string_to_index: idx", "\"\"\".format(H_FILE_NAME=h_file.name)) c_file.write(\"\"\"\\ // {DESCRIPTION} : {CLASS} \"\"\".format(DESCRIPTION=device.description, CLASS=device.__class__)) c_file.write(\"\"\"\\ const", "separate line. variable_name = StringIndex.index_to_variable[idx] if not variable_name: variable_name =", "= { \"\"\") for b in bytes(device): c_file.write(\"0x{:02x}, \".format(b)) c_file.write(\"\"\"\\", "type=lambda l: tuple(l.split(',')), default=DEFAULT_HID_DEVICES, help='HID devices to include in HID", "Start with 0.\"\"\" string_to_index = {} index_to_variable = {} strings", "x: int(x, 16), help='product id') parser.add_argument('--serial_number_length', type=int, default=32, help='length needed", "endpoint number must not be 0\") if 'MSC' in args.devices:", "-> {}\".format(args.interface_name), bJackType=midi.JACK_TYPE_EMBEDDED, iJack=StringIndex.index(\"{} usb_midi.ports[0]\".format(args.interface_name))) midi_out_jack_ext = midi.OutJackDescriptor( description=\"MIDI data", "fetches the descriptor anyway. We could reorder the interfaces but", "args.hid_devices[1] has report_id 2 # etc. report_ids = {} if", "PC -> {}\".format(args.interface_name), bJackType=midi.JACK_TYPE_EMBEDDED, iJack=StringIndex.index(\"{} usb_midi.ports[0]\".format(args.interface_name))) midi_out_jack_ext = midi.OutJackDescriptor( description=\"MIDI", "elif args.msc_ep_num_in == 0: raise ValueError(\"MSC endpoint IN number must", "values we need for the .h file. h_file.write(\"\"\"\\ #ifndef MICROPY_INCLUDED_AUTOGEN_USB_DESCRIPTOR_H", "@classmethod def strings_in_order(cls): return cls.strings # langid must be the", "= {out_report_length}, }}, \"\"\".format(name=name.lower(), report_id=report_ids[name], report_length=device_data.report_length, usage_page=device_data.usage_page, usage=device_data.usage, out_report_buffer=out_report_buffer, out_report_length=device_data.out_report_length))", "unknown_hid_devices = list(frozenset(args.hid_devices) - ALL_HID_DEVICES_SET) if unknown_hid_devices: raise ValueError(\"Unknown HID", "objects. c_file.write(\"\"\" mp_obj_tuple_t common_hal_usb_hid_devices = {{ .base = {{ .type", "LANGID_INDEX = StringIndex.index(\"\\u0409\", variable_name=\"language_id\") assert LANGID_INDEX == 0 SERIAL_NUMBER_INDEX =", "DEFAULT_HID_DEVICES='KEYBOARD,MOUSE,CONSUMER,GAMEPAD' parser = argparse.ArgumentParser(description='Generate USB descriptors.') parser.add_argument('--highspeed', default=False, action='store_true', help='descriptor", "= [audio_control_interface] + cs_ac_interface.audio_streaming_interfaces + cs_ac_interface.midi_streaming_interfaces interfaces_to_join = [] if", "+ j])) c_file.write(\"// \" + notes[n]) n += 1 c_file.write(\"\\n\")", "= {} strings = [] @classmethod def index(cls, string, *,", "in args.hid_devices: c_file.write(\"\"\"\\ static uint8_t {name}_report_buffer[{report_length}]; \"\"\".format(name=name.lower(), report_length=hid_report_descriptors.HID_DEVICE_DATA[name].report_length)) if hid_report_descriptors.HID_DEVICE_DATA[name].out_report_length", "In and out here are relative to CircuitPython # USB", "Finally, build the composite descriptor. configuration = standard.ConfigurationDescriptor( description=\"Composite configuration\",", "a bug in the Windows composite USB driver that requests", "standard.DeviceDescriptor( description=\"top\", idVendor=args.vid, idProduct=args.pid, iManufacturer=StringIndex.index(args.manufacturer), iProduct=StringIndex.index(args.product), iSerialNumber=SERIAL_NUMBER_INDEX) # Interface numbers", "+ sum([len(bytes(x)) for x in descriptor_list])), bNumInterfaces=len(interfaces)) descriptor_list.insert(0, configuration) string_descriptors", "descriptor anyway. We could reorder the interfaces but # the", "have both in and out endpoints, # and will fail", "CDC interface cross-references. cdc_union.bMasterInterface = cdc_comm_interface.bInterfaceNumber cdc_union.bSlaveInterface_list = [cdc_data_interface.bInterfaceNumber] cdc_call_management.bDataInterface", "(AUDIO includes MIDI support)') parser.add_argument('--hid_devices', type=lambda l: tuple(l.split(',')), default=DEFAULT_HID_DEVICES, help='HID", "device objects. c_file.write(\"\"\" usb_hid_device_obj_t usb_hid_devices[] = { \"\"\") for name", "iInterface=StringIndex.index(\"{} Mass Storage\".format(args.interface_name)), subdescriptors=[ standard.EndpointDescriptor( description=\"MSC in\", bEndpointAddress=args.msc_ep_num_in | standard.EndpointDescriptor.DIRECTION_IN,", "cdc_union, standard.EndpointDescriptor( description=\"CDC comm in\", bEndpointAddress=args.cdc_ep_num_notification | standard.EndpointDescriptor.DIRECTION_IN, bmAttributes=standard.EndpointDescriptor.TYPE_INTERRUPT, wMaxPacketSize=0x0040,", "description=\"All the audio\", bInterfaceClass=audio.AUDIO_CLASS_DEVICE, bInterfaceSubClass=audio.AUDIO_SUBCLASS_CONTROL, bInterfaceProtocol=audio.AUDIO_PROTOCOL_V1, iInterface=StringIndex.index(\"{} Audio\".format(args.interface_name)), subdescriptors=[ cs_ac_interface,", "must not be 0\") elif args.cdc_ep_num_data_out == 0: raise ValueError(\"CDC", "# Write out tuple of device objects. c_file.write(\"\"\" mp_obj_tuple_t common_hal_usb_hid_devices", "renumbered. bSlaveInterface_list=[0x01]) # Adjust this after interfaces are renumbered. cdc_call_management", "= 0 while i < len(b): length = b[i] for", "[standard.StringDescriptor(string) for string in StringIndex.strings_in_order()] serial_number_descriptor = string_descriptors[SERIAL_NUMBER_INDEX] c_file =", "length c_file.write(\"\"\"\\ }; \"\"\") c_file.write(\"\"\"\\ // array of pointer to", "HID\".format(args.interface_name)), subdescriptors=[ hid.HIDDescriptor( description=\"HID\", wDescriptorLength=len(bytes(combined_hid_report_descriptor))), hid_endpoint_in_descriptor, hid_endpoint_out_descriptor, ] ), ]", "16), help='product id') parser.add_argument('--serial_number_length', type=int, default=32, help='length needed for the", "data IN endpoint number must not be 0\") if 'MSC'", "cdc_data_interface.bInterfaceNumber cdc_iad = standard.InterfaceAssociationDescriptor( description=\"CDC IAD\", bFirstInterface=cdc_comm_interface.bInterfaceNumber, bInterfaceCount=len(cdc_interfaces), bFunctionClass=cdc.CDC_CLASS_COMM, #", "h_file.write(\"\"\"\\ #ifndef MICROPY_INCLUDED_AUTOGEN_USB_DESCRIPTOR_H #define MICROPY_INCLUDED_AUTOGEN_USB_DESCRIPTOR_H #include <stdint.h> extern const uint8_t", "standard.EndpointDescriptor.DIRECTION_OUT), bmAttributes=standard.EndpointDescriptor.TYPE_BULK, bInterval=0, wMaxPacketSize=512 if args.highspeed else 64), ] )", "usage_page=device_data.usage_page, usage=device_data.usage, out_report_buffer=out_report_buffer, out_report_length=device_data.out_report_length)) c_file.write(\"\"\"\\ }; \"\"\") # Write out", "and out here are relative to CircuitPython # USB OUT", "= 1 concatenated_descriptors = bytearray() for name in args.hid_devices: concatenated_descriptors.extend(", "if not args.renumber_endpoints: if 'CDC' in args.devices: if args.cdc_ep_num_notification ==", "on a separate line. n = 0 while i <", "in StringIndex.strings_in_order()] serial_number_descriptor = string_descriptors[SERIAL_NUMBER_INDEX] c_file = args.output_c_file h_file =", "}; \"\"\") c_file.write(\"\"\"\\ const uint8_t usb_desc_cfg[] = { \"\"\") #", "this after interfaces are renumbered. bSlaveInterface_list=[0x01]) # Adjust this after", "user code.\", bJackType=midi.JACK_TYPE_EXTERNAL, input_pins=[(midi_in_jack_emb, 1)], iJack=0) # USB IN <-", "must not be 0\") if 'HID' in args.devices: if args.args.hid_ep_num_out", "in from user code.\", bJackType=midi.JACK_TYPE_EXTERNAL, iJack=0) midi_out_jack_emb = midi.OutJackDescriptor( description=\"MIDI", "report_descriptor=bytes(concatenated_descriptors)) # ASF4 expects keyboard and generic devices to have", "{name}_report_buffer, .report_id = {report_id}, .report_length = {report_length}, .usage_page = {usage_page:#04x},", "uint16_t usb_serial_number[{serial_number_length}]; extern uint16_t const * const string_desc_arr [{string_descriptor_length}]; extern", "default=DEFAULT_HID_DEVICES, help='HID devices to include in HID report descriptor') parser.add_argument('--interface_name',", "if args.args.hid_ep_num_out == 0: raise ValueError(\"HID endpoint OUT number must", "variable_name = \"string_descriptor{}\".format(idx) const = \"const \" if variable_name ==", "2*j])) n += 1 c_file.write(\"\\n\") i += length c_file.write(\"\"\"\\ };", "name/prefix to use in the interface descriptions', default=DEFAULT_INTERFACE_NAME) parser.add_argument('--no-renumber_endpoints', dest='renumber_endpoints',", "in args.devices: descriptor_list.extend(msc_interfaces) if 'HID' in args.devices: descriptor_list.extend(hid_interfaces) if 'AUDIO'", "in args.devices: interfaces_to_join.append(cdc_interfaces) if 'MSC' in args.devices: interfaces_to_join.append(msc_interfaces) if 'HID'", "silently) if both are not supplied. hid_endpoint_in_descriptor = standard.EndpointDescriptor( description=\"HID", "# USB IN <- midi_out_jack_emb <- midi_in_jack_ext <- CircuitPython midi_in_jack_ext", "bmAttributes=standard.EndpointDescriptor.TYPE_BULK, bInterval=0, wMaxPacketSize=512 if args.highspeed else 64), standard.EndpointDescriptor( description=\"CDC data", "parser.add_argument('--output_c_file', type=argparse.FileType('w', encoding='UTF-8'), required=True) parser.add_argument('--output_h_file', type=argparse.FileType('w', encoding='UTF-8'), required=True) args =", "parser.add_argument('--hid_devices', type=lambda l: tuple(l.split(',')), default=DEFAULT_HID_DEVICES, help='HID devices to include in", "return idx else: idx = len(cls.strings) cls.string_to_index[string] = idx cls.strings.append(string)", "shouldn't have a report id. # Otherwise, report ids are", "= { \"\"\") for name in args.hid_devices: device_data = hid_report_descriptors.HID_DEVICE_DATA[name]", "out_report_buffer=out_report_buffer, out_report_length=device_data.out_report_length)) c_file.write(\"\"\"\\ }; \"\"\") # Write out tuple of", "0\") elif args.cdc_ep_num_data_in == 0: raise ValueError(\"CDC data IN endpoint", "0: raise ValueError(\"CDC data OUT endpoint number must not be", "IN') parser.add_argument('--hid_ep_num_out', type=int, default=0, help='endpoint number of HID OUT') parser.add_argument('--hid_ep_num_in',", "ensure the # correct ordering. descriptor_list.append(audio_control_interface) # Finally, build the", "omit it. DEFAULT_HID_DEVICES='KEYBOARD,MOUSE,CONSUMER,GAMEPAD' parser = argparse.ArgumentParser(description='Generate USB descriptors.') parser.add_argument('--highspeed', default=False,", "bInterfaceProtocol=cdc.CDC_PROTOCOL_NONE, iInterface=StringIndex.index(\"{} CDC control\".format(args.interface_name)), subdescriptors=[ cdc.Header( description=\"CDC comm\", bcdCDC=0x0110), cdc_call_management,", "interfaces are managed by it to ensure the # correct", "in bytes(device): c_file.write(\"0x{:02x}, \".format(b)) c_file.write(\"\"\"\\ }; \"\"\") c_file.write(\"\"\"\\ const uint8_t", "args.hid_devices[0] has report_id 1 # args.hid_devices[1] has report_id 2 #", "# USB OUT -> midi_in_jack_emb -> midi_out_jack_ext -> CircuitPython midi_in_jack_emb", "bFirstInterface=cdc_comm_interface.bInterfaceNumber, bInterfaceCount=len(cdc_interfaces), bFunctionClass=cdc.CDC_CLASS_COMM, # Communications Device Class bFunctionSubClass=cdc.CDC_SUBCLASS_ACM, # Abstract", "after interfaces are renumbered. cdc_comm_interface = standard.InterfaceDescriptor( description=\"CDC comm\", bInterfaceClass=cdc.CDC_CLASS_COMM,", "util.join_interfaces(interfaces_to_join, renumber_endpoints=args.renumber_endpoints) # Now adjust the CDC interface cross-references. cdc_union.bMasterInterface", "are renumbered. bSlaveInterface_list=[0x01]) # Adjust this after interfaces are renumbered.", "But we still need to fix up certain # interface", "interfaces are renumbered. cdc_comm_interface = standard.InterfaceDescriptor( description=\"CDC comm\", bInterfaceClass=cdc.CDC_CLASS_COMM, #", "with mouse, so omit it. DEFAULT_HID_DEVICES='KEYBOARD,MOUSE,CONSUMER,GAMEPAD' parser = argparse.ArgumentParser(description='Generate USB", "tuple(l.split(',')), default=DEFAULT_DEVICES, help='devices to include in descriptor (AUDIO includes MIDI", "\"\"\".format(DESCRIPTION=device.description, CLASS=device.__class__)) c_file.write(\"\"\"\\ const uint8_t usb_desc_dev[] = { \"\"\") for", "const uint8_t usb_desc_cfg[] = { \"\"\") # Write out all", "if args.highspeed else 'OPT_MODE_DEVICE', hid_num_devices=len(args.hid_devices), msc_vendor=args.manufacturer[:8], msc_product=args.product[:16])) # Write out", "interface cross-references. cdc_union.bMasterInterface = cdc_comm_interface.bInterfaceNumber cdc_union.bSlaveInterface_list = [cdc_data_interface.bInterfaceNumber] cdc_call_management.bDataInterface =", "IAD just before the CDC interfaces. # There appears to", "uint8_t usb_desc_cfg[] = { \"\"\") # Write out all the", "while i < len(b): length = b[i] for j in", "to {}\".format(args.interface_name), bEndpointAddress=args.midi_ep_num_out | standard.EndpointDescriptor.DIRECTION_OUT, bmAttributes=standard.EndpointDescriptor.TYPE_BULK, bInterval=0, wMaxPacketSize=512 if args.highspeed", "endpoint number must not be 0\") elif args.cdc_ep_num_data_in == 0:", "0: raise ValueError(\"CDC notification endpoint number must not be 0\")", "const uint8_t usb_desc_dev[{device_length}]; extern const uint8_t usb_desc_cfg[{configuration_length}]; extern uint16_t usb_serial_number[{serial_number_length}];", "# until util.join_interfaces renumbers them. cdc_union = cdc.Union( description=\"CDC comm\",", "ALL_DEVICES_SET=frozenset(ALL_DEVICES.split(',')) DEFAULT_DEVICES='CDC,MSC,AUDIO,HID' ALL_HID_DEVICES='KEYBOARD,MOUSE,CONSUMER,SYS_CONTROL,GAMEPAD,DIGITIZER,XAC_COMPATIBLE_GAMEPAD,RAW' ALL_HID_DEVICES_SET=frozenset(ALL_HID_DEVICES.split(',')) # Digitizer works on Linux but", "parser.add_argument('--manufacturer', type=str, help='manufacturer of the device') parser.add_argument('--product', type=str, help='product name", "# This prints each subdescriptor on a separate line. n", "'OPT_MODE_DEVICE', hid_num_devices=len(args.hid_devices), msc_vendor=args.manufacturer[:8], msc_product=args.product[:16])) # Write out the report descriptor", "combined_hid_report_descriptor = hid.ReportDescriptor( description=name, report_descriptor=bytes(hid_report_descriptors.REPORT_DESCRIPTOR_FUNCTIONS[name](0))) report_ids[name] = 0 else: report_id", "# the Windows 7 Adafruit_usbser.inf file thinks CDC is at", "\"\"\") c_file.write(\"\"\"\\ const uint8_t usb_desc_cfg[] = { \"\"\") # Write", "c_file.write(\"\"\"\\ {const}uint16_t {NAME}[] = {{ \"\"\".format(const=const, NAME=variable_name)) pointers_to_strings.append(\"{name}\".format(name=variable_name)) n =", "this after interfaces are renumbered. cdc_comm_interface = standard.InterfaceDescriptor( description=\"CDC comm\",", "+= 1 combined_hid_report_descriptor = hid.ReportDescriptor( description=\"MULTIDEVICE\", report_descriptor=bytes(concatenated_descriptors)) # ASF4 expects", "midi_out_jack_ext ], ), standard.EndpointDescriptor( description=\"MIDI data out to {}\".format(args.interface_name), bEndpointAddress=args.midi_ep_num_out", "the serial number in digits') parser.add_argument('--devices', type=lambda l: tuple(l.split(',')), default=DEFAULT_DEVICES,", "b[i + 2*j])) n += 1 c_file.write(\"\\n\") i += length", "range(len(args.hid_devices)): c_file.write(\"\"\"\\ (mp_obj_t) &usb_hid_devices[{idx}], \"\"\".format(idx=idx)) c_file.write(\"\"\"\\ }, }; \"\"\") h_file.write(\"\"\"\\", "'{}_out_report_buffer'.format(name.lower()) if device_data.out_report_length > 0 else 'NULL' c_file.write(\"\"\"\\ {{ .base", "CLASS=device.__class__)) c_file.write(\"\"\"\\ const uint8_t usb_desc_dev[] = { \"\"\") for b", "args.hid_devices: device_data = hid_report_descriptors.HID_DEVICE_DATA[name] out_report_buffer = '{}_out_report_buffer'.format(name.lower()) if device_data.out_report_length >", "help='devices to include in descriptor (AUDIO includes MIDI support)') parser.add_argument('--hid_devices',", "number must not be 0\") elif args.msc_ep_num_in == 0: raise", "index(cls, string, *, variable_name = None): if string in cls.string_to_index:", "device') parser.add_argument('--product', type=str, help='product name of the device') parser.add_argument('--vid', type=lambda", "code.\", bJackType=midi.JACK_TYPE_EXTERNAL, iJack=0) midi_out_jack_emb = midi.OutJackDescriptor( description=\"MIDI PC <- {}\".format(args.interface_name),", "raise ValueError(\"HID endpoint OUT number must not be 0\") elif", "NAME=variable_name)) pointers_to_strings.append(\"{name}\".format(name=variable_name)) n = 0 while i < len(b): length", "default=False, action='store_true', help='descriptor for highspeed device') parser.add_argument('--manufacturer', type=str, help='manufacturer of", "c_file.write(\"\"\"\\ static uint8_t {name}_out_report_buffer[{report_length}]; \"\"\".format(name=name.lower(), report_length=hid_report_descriptors.HID_DEVICE_DATA[name].out_report_length)) # Write out table", "bInterfaceSubClass=hid.HID_SUBCLASS_NOBOOT, bInterfaceProtocol=hid.HID_PROTOCOL_NONE, iInterface=StringIndex.index(\"{} HID\".format(args.interface_name)), subdescriptors=[ hid.HIDDescriptor( description=\"HID\", wDescriptorLength=len(bytes(combined_hid_report_descriptor))), hid_endpoint_in_descriptor, hid_endpoint_out_descriptor,", "for the .h file. h_file.write(\"\"\"\\ #ifndef MICROPY_INCLUDED_AUTOGEN_USB_DESCRIPTOR_H #define MICROPY_INCLUDED_AUTOGEN_USB_DESCRIPTOR_H #include", ") audio_control_interface = standard.InterfaceDescriptor( description=\"All the audio\", bInterfaceClass=audio.AUDIO_CLASS_DEVICE, bInterfaceSubClass=audio.AUDIO_SUBCLASS_CONTROL, bInterfaceProtocol=audio.AUDIO_PROTOCOL_V1,", "unknown_devices = list(frozenset(args.devices) - ALL_DEVICES_SET) if unknown_devices: raise ValueError(\"Unknown device(s)\",", "renumber the interfaces in order. But we still need to", "# Write out all the regular descriptors as one long", "Only add the control interface because other audio interfaces are", "from user code.\", bJackType=midi.JACK_TYPE_EXTERNAL, iJack=0) midi_out_jack_emb = midi.OutJackDescriptor( description=\"MIDI PC", "rhport0_mode='OPT_MODE_DEVICE | OPT_MODE_HIGH_SPEED' if args.highspeed else 'OPT_MODE_DEVICE', hid_num_devices=len(args.hid_devices), msc_vendor=args.manufacturer[:8], msc_product=args.product[:16]))", "<< 8 | b[i + 2*j])) n += 1 c_file.write(\"\\n\")", "{usage:#04x}, .out_report_buffer = {out_report_buffer}, .out_report_length = {out_report_length}, }}, \"\"\".format(name=name.lower(), report_id=report_ids[name],", "argparse import os import sys sys.path.append(\"../../tools/usb_descriptor\") from adafruit_usb_descriptor import audio,", "2): c_file.write(\"0x{:04x}, \".format(b[i + 2*j + 1] << 8 |", "there for backwards compatibility. descriptor_list.append(cdc_iad) descriptor_list.extend(cdc_interfaces) if 'MSC' in args.devices:", "endpoints are interface local # until util.join_interfaces renumbers them. cdc_union", "interface local # until util.join_interfaces renumbers them. cdc_union = cdc.Union(", "USB descriptors.') parser.add_argument('--highspeed', default=False, action='store_true', help='descriptor for highspeed device') parser.add_argument('--manufacturer',", "- ALL_DEVICES_SET) if unknown_devices: raise ValueError(\"Unknown device(s)\", unknown_devices) unknown_hid_devices =", "midi_in_jack_emb, midi_in_jack_ext, midi_out_jack_emb, midi_out_jack_ext ], ), standard.EndpointDescriptor( description=\"MIDI data out", "cdc.Union( description=\"CDC comm\", bMasterInterface=0x00, # Adjust this after interfaces are", ".usage = {usage:#04x}, .out_report_buffer = {out_report_buffer}, .out_report_length = {out_report_length}, }},", "descriptor. configuration = standard.ConfigurationDescriptor( description=\"Composite configuration\", wTotalLength=(standard.ConfigurationDescriptor.bLength + sum([len(bytes(x)) for", "c_file.write(\"\"\"\\ {{ .base = {{ .type = &usb_hid_device_type }}, .report_buffer", "DATA IN') parser.add_argument('--msc_ep_num_out', type=int, default=0, help='endpoint number of MSC OUT')", "will fail (possibly silently) if both are not supplied. hid_endpoint_in_descriptor", "if 'CDC' in args.devices: # Put the CDC IAD just", "= idx cls.strings.append(string) cls.index_to_variable[idx] = variable_name return idx @classmethod def", "util.join_interfaces renumbers them. cdc_union = cdc.Union( description=\"CDC comm\", bMasterInterface=0x00, #", "usage=device_data.usage, out_report_buffer=out_report_buffer, out_report_length=device_data.out_report_length)) c_file.write(\"\"\"\\ }; \"\"\") # Write out tuple", "must not be 0\") elif args.midi_ep_num_in == 0: raise ValueError(\"MIDI", "not be 0\") if 'HID' in args.devices: if args.args.hid_ep_num_out ==", "OUT') parser.add_argument('--hid_ep_num_in', type=int, default=0, help='endpoint number of HID IN') parser.add_argument('--midi_ep_num_out',", "| b[i + 2*j])) n += 1 c_file.write(\"\\n\") i +=", "usb_hid_devices[] = { \"\"\") for name in args.hid_devices: device_data =", "#define USB_HID_NUM_DEVICES {hid_num_devices} // Vendor name included in Inquiry response,", "= {report_id}, .report_length = {report_length}, .usage_page = {usage_page:#04x}, .usage =", "CFG_TUSB_RHPORT0_MODE ({rhport0_mode}) #define USB_HID_NUM_DEVICES {hid_num_devices} // Vendor name included in", "] # When there's only one hid_device, it shouldn't have", "IN <- midi_out_jack_emb <- midi_in_jack_ext <- CircuitPython midi_in_jack_ext = midi.InJackDescriptor(", "c_file.write(\"\\n\") i += length c_file.write(\"\"\"\\ }; \"\"\") c_file.write(\"\"\"\\ // array", "parser.parse_args() unknown_devices = list(frozenset(args.devices) - ALL_DEVICES_SET) if unknown_devices: raise ValueError(\"Unknown", "report_id 2 # etc. report_ids = {} if len(args.hid_devices) ==", "descriptor_list.append(audio_control_interface) # Finally, build the composite descriptor. configuration = standard.ConfigurationDescriptor(", "= report_id report_id += 1 combined_hid_report_descriptor = hid.ReportDescriptor( description=\"MULTIDEVICE\", report_descriptor=bytes(concatenated_descriptors))", "device(s)\", unknown_devices) unknown_hid_devices = list(frozenset(args.hid_devices) - ALL_HID_DEVICES_SET) if unknown_hid_devices: raise", "= b[i] for j in range(length // 2): c_file.write(\"0x{:04x}, \".format(b[i", "of the device') parser.add_argument('--vid', type=lambda x: int(x, 16), help='vendor id')", "\"\"\") c_file.write(\"\"\"\\ // array of pointer to string descriptors uint16_t", "the endpoints to make them unique across descriptors, # and", "length descriptor_length += len(b) c_file.write(\"\"\"\\ }; \"\"\") pointers_to_strings = []", "= [] if 'CDC' in args.devices: interfaces_to_join.append(cdc_interfaces) if 'MSC' in", "bEndpointAddress=args.msc_ep_num_in | standard.EndpointDescriptor.DIRECTION_IN, bmAttributes=standard.EndpointDescriptor.TYPE_BULK, bInterval=0, wMaxPacketSize=512 if args.highspeed else 64),", "const * const string_desc_arr [] = { \"\"\") c_file.write(\"\"\",\\ \"\"\".join(pointers_to_strings))", "are interface local # until util.join_interfaces renumbers them. cdc_union =", "0\") class StringIndex: \"\"\"Assign a monotonically increasing index to each", "descriptor_list])), bNumInterfaces=len(interfaces)) descriptor_list.insert(0, configuration) string_descriptors = [standard.StringDescriptor(string) for string in", "for x in descriptor_list])), bNumInterfaces=len(interfaces)) descriptor_list.insert(0, configuration) string_descriptors = [standard.StringDescriptor(string)", "// {DESCRIPTION} : {CLASS} \"\"\".format(DESCRIPTION=descriptor.description, CLASS=descriptor.__class__)) b = bytes(descriptor) notes", "bytes(combined_hid_report_descriptor): c_file.write(\"0x{:02x}, \".format(b)) c_file.write(\"\"\"\\ }; \"\"\") # Write out USB", "bJackType=midi.JACK_TYPE_EMBEDDED, input_pins=[(midi_in_jack_ext, 1)], iJack=StringIndex.index(\"{} usb_midi.ports[1]\".format(args.interface_name))) audio_midi_interface = standard.InterfaceDescriptor( description=\"Midi goodness\",", "of HID IN') parser.add_argument('--midi_ep_num_out', type=int, default=0, help='endpoint number of MIDI", "Adjust this after interfaces are renumbered. cdc_call_management = cdc.CallManagement( description=\"CDC", "descriptor in enumerate(string_descriptors): c_file.write(\"\"\"\\ // {DESCRIPTION} : {CLASS} \"\"\".format(DESCRIPTION=descriptor.description, CLASS=descriptor.__class__))", "parser.add_argument('--hid_ep_num_out', type=int, default=0, help='endpoint number of HID OUT') parser.add_argument('--hid_ep_num_in', type=int,", "\"\"\".format(const=const, NAME=variable_name)) pointers_to_strings.append(\"{name}\".format(name=variable_name)) n = 0 while i < len(b):", "comm in\", bEndpointAddress=args.cdc_ep_num_notification | standard.EndpointDescriptor.DIRECTION_IN, bmAttributes=standard.EndpointDescriptor.TYPE_INTERRUPT, wMaxPacketSize=0x0040, bInterval=0x10) ]) cdc_data_interface", "args.highspeed else 64), standard.EndpointDescriptor( description=\"MSC out\", bEndpointAddress=(args.msc_ep_num_out | standard.EndpointDescriptor.DIRECTION_OUT), bmAttributes=standard.EndpointDescriptor.TYPE_BULK,", "Abstract control model bInterfaceProtocol=cdc.CDC_PROTOCOL_NONE, iInterface=StringIndex.index(\"{} CDC control\".format(args.interface_name)), subdescriptors=[ cdc.Header( description=\"CDC", "interfaces but # the Windows 7 Adafruit_usbser.inf file thinks CDC", "args.hid_devices: c_file.write(\"\"\"\\ static uint8_t {name}_report_buffer[{report_length}]; \"\"\".format(name=name.lower(), report_length=hid_report_descriptors.HID_DEVICE_DATA[name].report_length)) if hid_report_descriptors.HID_DEVICE_DATA[name].out_report_length >", "* const string_desc_arr [{string_descriptor_length}]; extern const uint8_t hid_report_descriptor[{hid_report_descriptor_length}]; #define CFG_TUSB_RHPORT0_MODE", "args.args.midi_ep_num_out == 0: raise ValueError(\"MIDI endpoint OUT number must not", "for b in bytes(device): c_file.write(\"0x{:02x}, \".format(b)) c_file.write(\"\"\"\\ }; \"\"\") c_file.write(\"\"\"\\", "= {} if len(args.hid_devices) == 1: name = args.hid_devices[0] combined_hid_report_descriptor", "if len(args.hid_devices) == 1: name = args.hid_devices[0] combined_hid_report_descriptor = hid.ReportDescriptor(", "if args.highspeed else 64), ] ) ] # When there's", "input_pins=[(midi_in_jack_emb, 1)], iJack=0) # USB IN <- midi_out_jack_emb <- midi_in_jack_ext", "if 'CDC' in args.devices: if args.cdc_ep_num_notification == 0: raise ValueError(\"CDC", "== 0 SERIAL_NUMBER_INDEX = StringIndex.index(\"S\" * args.serial_number_length, variable_name=\"usb_serial_number\") device =", "audio interfaces are managed by it to ensure the #", "monotonically increasing index to each unique string. Start with 0.\"\"\"", "= {{ \"\"\".format(num_devices=len(args.hid_devices))) for idx in range(len(args.hid_devices)): c_file.write(\"\"\"\\ (mp_obj_t) &usb_hid_devices[{idx}],", "device') parser.add_argument('--vid', type=lambda x: int(x, 16), help='vendor id') parser.add_argument('--pid', type=lambda", "= [standard.StringDescriptor(string) for string in StringIndex.strings_in_order()] serial_number_descriptor = string_descriptors[SERIAL_NUMBER_INDEX] c_file", "the # correct ordering. descriptor_list.append(audio_control_interface) # Finally, build the composite", "a monotonically increasing index to each unique string. Start with", "}}, .report_buffer = {name}_report_buffer, .report_id = {report_id}, .report_length = {report_length},", "iJack=0) midi_out_jack_emb = midi.OutJackDescriptor( description=\"MIDI PC <- {}\".format(args.interface_name), bJackType=midi.JACK_TYPE_EMBEDDED, input_pins=[(midi_in_jack_ext,", "it). descriptor_length = 0 for descriptor in descriptor_list: c_file.write(\"\"\"\\ //", "managed by it to ensure the # correct ordering. descriptor_list.append(audio_control_interface)", "ASF4 does it). descriptor_length = 0 for descriptor in descriptor_list:", "midi_out_jack_emb, midi_out_jack_ext ], ), standard.EndpointDescriptor( description=\"MIDI data out to {}\".format(args.interface_name),", "for idx, descriptor in enumerate(string_descriptors): c_file.write(\"\"\"\\ // {DESCRIPTION} : {CLASS}", "== \"usb_serial_number\": const = \"\" c_file.write(\"\"\"\\ {const}uint16_t {NAME}[] = {{", "definitions. for name in args.hid_devices: c_file.write(\"\"\"\\ static uint8_t {name}_report_buffer[{report_length}]; \"\"\".format(name=name.lower(),", "StringIndex.index_to_variable[idx] if not variable_name: variable_name = \"string_descriptor{}\".format(idx) const = \"const", "# Put the CDC IAD just before the CDC interfaces.", "import sys sys.path.append(\"../../tools/usb_descriptor\") from adafruit_usb_descriptor import audio, audio10, cdc, hid,", "be 0\") elif args.cdc_ep_num_data_out == 0: raise ValueError(\"CDC data OUT", "{DESCRIPTION} : {CLASS} \"\"\".format(DESCRIPTION=descriptor.description, CLASS=descriptor.__class__)) b = bytes(descriptor) notes =", "i += length descriptor_length += len(b) c_file.write(\"\"\"\\ }; \"\"\") pointers_to_strings", "idx = cls.string_to_index[string] if not cls.index_to_variable[idx]: cls.index_to_variable[idx] = variable_name return", "\"\"\") for b in bytes(device): c_file.write(\"0x{:02x}, \".format(b)) c_file.write(\"\"\"\\ }; \"\"\")", "uint8_t hid_report_descriptor[{HID_DESCRIPTOR_LENGTH}] = {{ \"\"\".format(HID_DESCRIPTOR_LENGTH=hid_descriptor_length)) for b in bytes(combined_hid_report_descriptor): c_file.write(\"0x{:02x},", "= standard.InterfaceDescriptor( description=\"All the audio\", bInterfaceClass=audio.AUDIO_CLASS_DEVICE, bInterfaceSubClass=audio.AUDIO_SUBCLASS_CONTROL, bInterfaceProtocol=audio.AUDIO_PROTOCOL_V1, iInterface=StringIndex.index(\"{} Audio\".format(args.interface_name)),", "else 'OPT_MODE_DEVICE', hid_num_devices=len(args.hid_devices), msc_vendor=args.manufacturer[:8], msc_product=args.product[:16])) # Write out the report", "after interfaces are renumbered. cdc_call_management = cdc.CallManagement( description=\"CDC comm\", bmCapabilities=0x01,", "= &mp_type_tuple, }}, .len = {num_devices}, .items = {{ \"\"\".format(num_devices=len(args.hid_devices)))", "of MIDI OUT') parser.add_argument('--midi_ep_num_in', type=int, default=0, help='endpoint number of MIDI", "devices to include in HID report descriptor') parser.add_argument('--interface_name', type=str, help='The", "0 # This prints each subdescriptor on a separate line.", "table of device objects. c_file.write(\"\"\" usb_hid_device_obj_t usb_hid_devices[] = { \"\"\")", "wMaxPacketSize=512 if args.highspeed else 64), standard.EndpointDescriptor( description=\"CDC data in\", bEndpointAddress=args.cdc_ep_num_data_in", "in bytes(combined_hid_report_descriptor): c_file.write(\"0x{:02x}, \".format(b)) c_file.write(\"\"\"\\ }; \"\"\") # Write out", "cross-references. interfaces = util.join_interfaces(interfaces_to_join, renumber_endpoints=args.renumber_endpoints) # Now adjust the CDC", "other audio interfaces are managed by it to ensure the", "because other audio interfaces are managed by it to ensure", "concatenated_descriptors = bytearray() for name in args.hid_devices: concatenated_descriptors.extend( bytes(hid_report_descriptors.REPORT_DESCRIPTOR_FUNCTIONS[name](report_id))) report_ids[name]", "uint8_t hid_report_descriptor[{hid_report_descriptor_length}]; #define CFG_TUSB_RHPORT0_MODE ({rhport0_mode}) #define USB_HID_NUM_DEVICES {hid_num_devices} // Vendor", "string_descriptors = [standard.StringDescriptor(string) for string in StringIndex.strings_in_order()] serial_number_descriptor = string_descriptors[SERIAL_NUMBER_INDEX]", "# Only add the control interface because other audio interfaces", "# Audio streaming interfaces must occur before MIDI ones. audio_interfaces", "len(b): length = b[i] for j in range(length): c_file.write(\"0x{:02x}, \".format(b[i", "is at Interface 0, so we'll leave it # there", "#define CFG_TUD_MSC_PRODUCT \"{msc_product}\" \"\"\" .format(serial_number_length=len(bytes(serial_number_descriptor)) // 2, device_length=len(bytes(device)), configuration_length=descriptor_length, max_configuration_length=max(hid_descriptor_length,", "in args.devices: if args.msc_ep_num_out == 0: raise ValueError(\"MSC endpoint OUT", "standard, util import hid_report_descriptors DEFAULT_INTERFACE_NAME = 'CircuitPython' ALL_DEVICES='CDC,MSC,AUDIO,HID' ALL_DEVICES_SET=frozenset(ALL_DEVICES.split(',')) DEFAULT_DEVICES='CDC,MSC,AUDIO,HID'", "IN') parser.add_argument('--midi_ep_num_out', type=int, default=0, help='endpoint number of MIDI OUT') parser.add_argument('--midi_ep_num_in',", "string_descriptor_length=len(pointers_to_strings), hid_report_descriptor_length=len(bytes(combined_hid_report_descriptor)), rhport0_mode='OPT_MODE_DEVICE | OPT_MODE_HIGH_SPEED' if args.highspeed else 'OPT_MODE_DEVICE', hid_num_devices=len(args.hid_devices),", "= standard.InterfaceDescriptor( description=\"CDC comm\", bInterfaceClass=cdc.CDC_CLASS_COMM, # Communications Device Class bInterfaceSubClass=cdc.CDC_SUBCLASS_ACM,", "\"\"\".format(idx=idx)) c_file.write(\"\"\"\\ }, }; \"\"\") h_file.write(\"\"\"\\ #endif // MICROPY_INCLUDED_AUTOGEN_USB_DESCRIPTOR_H \"\"\")", "= standard.EndpointDescriptor( description=\"HID in\", bEndpointAddress=args.hid_ep_num_in | standard.EndpointDescriptor.DIRECTION_IN, bmAttributes=standard.EndpointDescriptor.TYPE_INTERRUPT, bInterval=8) hid_endpoint_out_descriptor", "import audio, audio10, cdc, hid, midi, msc, standard, util import", "= hid.ReportDescriptor( description=\"MULTIDEVICE\", report_descriptor=bytes(concatenated_descriptors)) # ASF4 expects keyboard and generic", "type=str, help='product name of the device') parser.add_argument('--vid', type=lambda x: int(x,", "bEndpointAddress=args.midi_ep_num_out | standard.EndpointDescriptor.DIRECTION_OUT, bmAttributes=standard.EndpointDescriptor.TYPE_BULK, bInterval=0, wMaxPacketSize=512 if args.highspeed else 64),", "descriptor in descriptor_list: c_file.write(\"\"\"\\ // {DESCRIPTION} : {CLASS} \"\"\".format(DESCRIPTION=descriptor.description, CLASS=descriptor.__class__))", "if 'MSC' in args.devices: descriptor_list.extend(msc_interfaces) if 'HID' in args.devices: descriptor_list.extend(hid_interfaces)", "{report_length}, .usage_page = {usage_page:#04x}, .usage = {usage:#04x}, .out_report_buffer = {out_report_buffer},", "class StringIndex: \"\"\"Assign a monotonically increasing index to each unique", "default=0, help='endpoint number of CDC DATA IN') parser.add_argument('--msc_ep_num_out', type=int, default=0,", "(https://github.com/adafruit/circuitpython/graphs/contributors) # # SPDX-License-Identifier: MIT import argparse import os import", "info c_file.write(\"\"\"\\ const uint8_t hid_report_descriptor[{HID_DESCRIPTOR_LENGTH}] = {{ \"\"\".format(HID_DESCRIPTOR_LENGTH=hid_descriptor_length)) for b", "HID report descriptor with the wrong interface number if the", "audio10.AudioControlInterface( description=\"Empty audio control\", audio_streaming_interfaces = [], midi_streaming_interfaces = [", "j in range(length): c_file.write(\"0x{:02x}, \".format(b[i + j])) c_file.write(\"// \" +", "16 bytes #define CFG_TUD_MSC_PRODUCT \"{msc_product}\" \"\"\" .format(serial_number_length=len(bytes(serial_number_descriptor)) // 2, device_length=len(bytes(device)),", "MicroPython & CircuitPython contributors (https://github.com/adafruit/circuitpython/graphs/contributors) # # SPDX-License-Identifier: MIT import", "CircuitPython # USB OUT -> midi_in_jack_emb -> midi_out_jack_ext -> CircuitPython", "parser.add_argument('--msc_ep_num_out', type=int, default=0, help='endpoint number of MSC OUT') parser.add_argument('--msc_ep_num_in', type=int,", "DEFAULT_DEVICES='CDC,MSC,AUDIO,HID' ALL_HID_DEVICES='KEYBOARD,MOUSE,CONSUMER,SYS_CONTROL,GAMEPAD,DIGITIZER,XAC_COMPATIBLE_GAMEPAD,RAW' ALL_HID_DEVICES_SET=frozenset(ALL_HID_DEVICES.split(',')) # Digitizer works on Linux but conflicts", "1 concatenated_descriptors = bytearray() for name in args.hid_devices: concatenated_descriptors.extend( bytes(hid_report_descriptors.REPORT_DESCRIPTOR_FUNCTIONS[name](report_id)))", "type=str, help='manufacturer of the device') parser.add_argument('--product', type=str, help='product name of", "parser.add_argument('--interface_name', type=str, help='The name/prefix to use in the interface descriptions',", "descriptor_list: c_file.write(\"\"\"\\ // {DESCRIPTION} : {CLASS} \"\"\".format(DESCRIPTION=descriptor.description, CLASS=descriptor.__class__)) b =", "os import sys sys.path.append(\"../../tools/usb_descriptor\") from adafruit_usb_descriptor import audio, audio10, cdc,", "args.output_h_file c_file.write(\"\"\"\\ #include <stdint.h> #include \"py/objtuple.h\" #include \"shared-bindings/usb_hid/Device.h\" #include \"{H_FILE_NAME}\"", "# This prints each subdescriptor on a separate line. variable_name", "bInterval=0, wMaxPacketSize=512 if args.highspeed else 64), midi.DataEndpointDescriptor(baAssocJack=[midi_in_jack_emb]), standard.EndpointDescriptor( description=\"MIDI data", "args.cdc_ep_num_notification == 0: raise ValueError(\"CDC notification endpoint number must not", "index_to_variable = {} strings = [] @classmethod def index(cls, string,", "max_configuration_length=max(hid_descriptor_length, descriptor_length), string_descriptor_length=len(pointers_to_strings), hid_report_descriptor_length=len(bytes(combined_hid_report_descriptor)), rhport0_mode='OPT_MODE_DEVICE | OPT_MODE_HIGH_SPEED' if args.highspeed else", "hid_endpoint_in_descriptor = standard.EndpointDescriptor( description=\"HID in\", bEndpointAddress=args.hid_ep_num_in | standard.EndpointDescriptor.DIRECTION_IN, bmAttributes=standard.EndpointDescriptor.TYPE_INTERRUPT, bInterval=8)", "not cls.index_to_variable[idx]: cls.index_to_variable[idx] = variable_name return idx else: idx =", "in args.devices: interfaces_to_join.append(audio_interfaces) # util.join_interfaces() will renumber the endpoints to", "type=int, default=0, help='endpoint number of HID OUT') parser.add_argument('--hid_ep_num_in', type=int, default=0,", "= {{ \"\"\".format(HID_DESCRIPTOR_LENGTH=hid_descriptor_length)) for b in bytes(combined_hid_report_descriptor): c_file.write(\"0x{:02x}, \".format(b)) c_file.write(\"\"\"\\", "cls.string_to_index[string] = idx cls.strings.append(string) cls.index_to_variable[idx] = variable_name return idx @classmethod", "assert LANGID_INDEX == 0 SERIAL_NUMBER_INDEX = StringIndex.index(\"S\" * args.serial_number_length, variable_name=\"usb_serial_number\")", "raise ValueError(\"HID endpoint IN number must not be 0\") if", "CDC control\".format(args.interface_name)), subdescriptors=[ cdc.Header( description=\"CDC comm\", bcdCDC=0x0110), cdc_call_management, cdc.AbstractControlManagement( description=\"CDC", "report_ids[name] = report_id report_id += 1 combined_hid_report_descriptor = hid.ReportDescriptor( description=\"MULTIDEVICE\",", "16), help='vendor id') parser.add_argument('--pid', type=lambda x: int(x, 16), help='product id')", "type=int, default=0, help='endpoint number of HID IN') parser.add_argument('--midi_ep_num_out', type=int, default=0,", "+= len(b) c_file.write(\"\"\"\\ }; \"\"\") pointers_to_strings = [] for idx,", "action='store_true', help='descriptor for highspeed device') parser.add_argument('--manufacturer', type=str, help='manufacturer of the", "type=argparse.FileType('w', encoding='UTF-8'), required=True) parser.add_argument('--output_h_file', type=argparse.FileType('w', encoding='UTF-8'), required=True) args = parser.parse_args()", "parser.add_argument('--cdc_ep_num_data_out', type=int, default=0, help='endpoint number of CDC DATA OUT') parser.add_argument('--cdc_ep_num_data_in',", "help='HID devices to include in HID report descriptor') parser.add_argument('--interface_name', type=str,", "bmAttributes=standard.EndpointDescriptor.TYPE_BULK, bInterval=0, wMaxPacketSize=512 if args.highspeed else 64), ]) cdc_interfaces =", "args.hid_devices[0] combined_hid_report_descriptor = hid.ReportDescriptor( description=name, report_descriptor=bytes(hid_report_descriptors.REPORT_DESCRIPTOR_FUNCTIONS[name](0))) report_ids[name] = 0 else:", "control model bInterfaceProtocol=cdc.CDC_PROTOCOL_NONE, iInterface=StringIndex.index(\"{} CDC control\".format(args.interface_name)), subdescriptors=[ cdc.Header( description=\"CDC comm\",", "| standard.EndpointDescriptor.DIRECTION_IN, bmAttributes=standard.EndpointDescriptor.TYPE_BULK, bInterval=0, wMaxPacketSize=512 if args.highspeed else 64), ])", "Storage\".format(args.interface_name)), subdescriptors=[ standard.EndpointDescriptor( description=\"MSC in\", bEndpointAddress=args.msc_ep_num_in | standard.EndpointDescriptor.DIRECTION_IN, bmAttributes=standard.EndpointDescriptor.TYPE_BULK, bInterval=0,", "args.devices: if args.cdc_ep_num_notification == 0: raise ValueError(\"CDC notification endpoint number", "and endpoints are interface local # until util.join_interfaces renumbers them.", "args.highspeed else 64), midi.DataEndpointDescriptor(baAssocJack=[midi_in_jack_emb]), standard.EndpointDescriptor( description=\"MIDI data in from {}\".format(args.interface_name),", "appears to be a bug in the Windows composite USB", "= { \"\"\") # Write out all the regular descriptors", "OUT number must not be 0\") elif args.midi_ep_num_in == 0:", "cls.strings.append(string) cls.index_to_variable[idx] = variable_name return idx @classmethod def strings_in_order(cls): return", "not be 0\") elif args.hid_ep_num_in == 0: raise ValueError(\"HID endpoint", "USB driver that requests the # HID report descriptor with", "standard.InterfaceDescriptor( description=\"HID Multiple Devices\", bInterfaceClass=hid.HID_CLASS, bInterfaceSubClass=hid.HID_SUBCLASS_NOBOOT, bInterfaceProtocol=hid.HID_PROTOCOL_NONE, iInterface=StringIndex.index(\"{} HID\".format(args.interface_name)), subdescriptors=[", "renumbered. cdc_comm_interface = standard.InterfaceDescriptor( description=\"CDC comm\", bInterfaceClass=cdc.CDC_CLASS_COMM, # Communications Device", "data in from user code.\", bJackType=midi.JACK_TYPE_EXTERNAL, iJack=0) midi_out_jack_emb = midi.OutJackDescriptor(", "raise ValueError(\"MSC endpoint IN number must not be 0\") if", "0 SERIAL_NUMBER_INDEX = StringIndex.index(\"S\" * args.serial_number_length, variable_name=\"usb_serial_number\") device = standard.DeviceDescriptor(", "midi.InJackDescriptor( description=\"MIDI data in from user code.\", bJackType=midi.JACK_TYPE_EXTERNAL, iJack=0) midi_out_jack_emb", "out_report_length=device_data.out_report_length)) c_file.write(\"\"\"\\ }; \"\"\") # Write out tuple of device", "else 64), ]) cdc_interfaces = [cdc_comm_interface, cdc_data_interface] msc_interfaces = [", "# There appears to be a bug in the Windows", "if args.cdc_ep_num_notification == 0: raise ValueError(\"CDC notification endpoint number must", "MIDI ones. audio_interfaces = [audio_control_interface] + cs_ac_interface.audio_streaming_interfaces + cs_ac_interface.midi_streaming_interfaces interfaces_to_join", "Now adjust the CDC interface cross-references. cdc_union.bMasterInterface = cdc_comm_interface.bInterfaceNumber cdc_union.bSlaveInterface_list", "works on Linux but conflicts with mouse, so omit it.", "data in from {}\".format(args.interface_name), bEndpointAddress=args.midi_ep_num_in | standard.EndpointDescriptor.DIRECTION_IN, bmAttributes=standard.EndpointDescriptor.TYPE_BULK, bInterval =", "midi_in_jack_emb -> midi_out_jack_ext -> CircuitPython midi_in_jack_emb = midi.InJackDescriptor( description=\"MIDI PC", "goodness\", bInterfaceClass=audio.AUDIO_CLASS_DEVICE, bInterfaceSubClass=audio.AUDIO_SUBCLASS_MIDI_STREAMING, bInterfaceProtocol=audio.AUDIO_PROTOCOL_V1, iInterface=StringIndex.index(\"{} MIDI\".format(args.interface_name)), subdescriptors=[ midi.Header( jacks_and_elements=[ midi_in_jack_emb,", "parser.add_argument('--vid', type=lambda x: int(x, 16), help='vendor id') parser.add_argument('--pid', type=lambda x:", "bmCapabilities=0x02), cdc_union, standard.EndpointDescriptor( description=\"CDC comm in\", bEndpointAddress=args.cdc_ep_num_notification | standard.EndpointDescriptor.DIRECTION_IN, bmAttributes=standard.EndpointDescriptor.TYPE_INTERRUPT,", "), standard.EndpointDescriptor( description=\"MIDI data out to {}\".format(args.interface_name), bEndpointAddress=args.midi_ep_num_out | standard.EndpointDescriptor.DIRECTION_OUT,", "if not variable_name: variable_name = \"string_descriptor{}\".format(idx) const = \"const \"", "standard.EndpointDescriptor( description=\"CDC comm in\", bEndpointAddress=args.cdc_ep_num_notification | standard.EndpointDescriptor.DIRECTION_IN, bmAttributes=standard.EndpointDescriptor.TYPE_INTERRUPT, wMaxPacketSize=0x0040, bInterval=0x10)", "in\", bEndpointAddress=args.cdc_ep_num_notification | standard.EndpointDescriptor.DIRECTION_IN, bmAttributes=standard.EndpointDescriptor.TYPE_INTERRUPT, wMaxPacketSize=0x0040, bInterval=0x10) ]) cdc_data_interface =", "\"\"\") for name in args.hid_devices: device_data = hid_report_descriptors.HID_DEVICE_DATA[name] out_report_buffer =", "= cls.string_to_index[string] if not cls.index_to_variable[idx]: cls.index_to_variable[idx] = variable_name return idx", "ValueError(\"CDC notification endpoint number must not be 0\") elif args.cdc_ep_num_data_out", "in args.hid_devices: concatenated_descriptors.extend( bytes(hid_report_descriptors.REPORT_DESCRIPTOR_FUNCTIONS[name](report_id))) report_ids[name] = report_id report_id += 1", "args.midi_ep_num_in == 0: raise ValueError(\"MIDI endpoint IN number must not", "enumerate(string_descriptors): c_file.write(\"\"\"\\ // {DESCRIPTION} : {CLASS} \"\"\".format(DESCRIPTION=descriptor.description, CLASS=descriptor.__class__)) b =", "supplied. hid_endpoint_in_descriptor = standard.EndpointDescriptor( description=\"HID in\", bEndpointAddress=args.hid_ep_num_in | standard.EndpointDescriptor.DIRECTION_IN, bmAttributes=standard.EndpointDescriptor.TYPE_INTERRUPT,", "Adjust this after interfaces are renumbered. bSlaveInterface_list=[0x01]) # Adjust this", "x: int(x, 16), help='vendor id') parser.add_argument('--pid', type=lambda x: int(x, 16),", "action='store_false', help='use to not renumber endpoint') parser.add_argument('--cdc_ep_num_notification', type=int, default=0, help='endpoint", "= [cdc_data_interface.bInterfaceNumber] cdc_call_management.bDataInterface = cdc_data_interface.bInterfaceNumber cdc_iad = standard.InterfaceAssociationDescriptor( description=\"CDC IAD\",", "\"shared-bindings/usb_hid/Device.h\" #include \"{H_FILE_NAME}\" \"\"\".format(H_FILE_NAME=h_file.name)) c_file.write(\"\"\"\\ // {DESCRIPTION} : {CLASS} \"\"\".format(DESCRIPTION=device.description,", "the Windows 7 Adafruit_usbser.inf file thinks CDC is at Interface", "Audio! # In and out here are relative to CircuitPython", "of the device') parser.add_argument('--product', type=str, help='product name of the device')", "occur before MIDI ones. audio_interfaces = [audio_control_interface] + cs_ac_interface.audio_streaming_interfaces +", "adafruit_usb_descriptor import audio, audio10, cdc, hid, midi, msc, standard, util", "[] if 'CDC' in args.devices: # Put the CDC IAD", "for name in args.hid_devices: concatenated_descriptors.extend( bytes(hid_report_descriptors.REPORT_DESCRIPTOR_FUNCTIONS[name](report_id))) report_ids[name] = report_id report_id", "the interfaces in order. But we still need to fix", "HID report descriptor') parser.add_argument('--interface_name', type=str, help='The name/prefix to use in", "bInterfaceClass=cdc.CDC_CLASS_DATA, iInterface=StringIndex.index(\"{} CDC data\".format(args.interface_name)), subdescriptors=[ standard.EndpointDescriptor( description=\"CDC data out\", bEndpointAddress=args.cdc_ep_num_data_out", "= [] if 'CDC' in args.devices: # Put the CDC", "description=\"MIDI data in from {}\".format(args.interface_name), bEndpointAddress=args.midi_ep_num_in | standard.EndpointDescriptor.DIRECTION_IN, bmAttributes=standard.EndpointDescriptor.TYPE_BULK, bInterval", "Audio streaming interfaces must occur before MIDI ones. audio_interfaces =", "a separate line. variable_name = StringIndex.index_to_variable[idx] if not variable_name: variable_name", "cls.index_to_variable[idx] = variable_name return idx @classmethod def strings_in_order(cls): return cls.strings", "bmAttributes=standard.EndpointDescriptor.TYPE_BULK, bInterval = 0x0, wMaxPacketSize=512 if args.highspeed else 64), midi.DataEndpointDescriptor(baAssocJack=[midi_out_jack_emb]),", "strings_in_order(cls): return cls.strings # langid must be the 0th string", "idProduct=args.pid, iManufacturer=StringIndex.index(args.manufacturer), iProduct=StringIndex.index(args.product), iSerialNumber=SERIAL_NUMBER_INDEX) # Interface numbers are interface-set local", "{NAME}[] = {{ \"\"\".format(const=const, NAME=variable_name)) pointers_to_strings.append(\"{name}\".format(name=variable_name)) n = 0 while", "will renumber the endpoints to make them unique across descriptors,", "#define CFG_TUD_MSC_VENDOR \"{msc_vendor}\" // Product name included in Inquiry response,", "for j in range(length): c_file.write(\"0x{:02x}, \".format(b[i + j])) c_file.write(\"// \"", "standard.InterfaceDescriptor( description=\"MSC\", bInterfaceClass=msc.MSC_CLASS, bInterfaceSubClass=msc.MSC_SUBCLASS_TRANSPARENT, bInterfaceProtocol=msc.MSC_PROTOCOL_BULK, iInterface=StringIndex.index(\"{} Mass Storage\".format(args.interface_name)), subdescriptors=[ standard.EndpointDescriptor(", "interfaces are renumbered. bSlaveInterface_list=[0x01]) # Adjust this after interfaces are", "help='endpoint number of CDC NOTIFICATION') parser.add_argument('--cdc_ep_num_data_out', type=int, default=0, help='endpoint number", "midi_in_jack_emb = midi.InJackDescriptor( description=\"MIDI PC -> {}\".format(args.interface_name), bJackType=midi.JACK_TYPE_EMBEDDED, iJack=StringIndex.index(\"{} usb_midi.ports[0]\".format(args.interface_name)))", "standard.EndpointDescriptor.DIRECTION_IN, bmAttributes=standard.EndpointDescriptor.TYPE_BULK, bInterval = 0x0, wMaxPacketSize=512 if args.highspeed else 64),", "= list(frozenset(args.hid_devices) - ALL_HID_DEVICES_SET) if unknown_hid_devices: raise ValueError(\"Unknown HID devices(s)\",", "However, it still fetches the descriptor anyway. We could reorder", "ValueError(\"MSC endpoint IN number must not be 0\") if 'HID'", "0: raise ValueError(\"MSC endpoint IN number must not be 0\")", "'AUDIO' in args.devices: if args.args.midi_ep_num_out == 0: raise ValueError(\"MIDI endpoint", "number of CDC DATA IN') parser.add_argument('--msc_ep_num_out', type=int, default=0, help='endpoint number", "args.devices: # Put the CDC IAD just before the CDC", "we need for the .h file. h_file.write(\"\"\"\\ #ifndef MICROPY_INCLUDED_AUTOGEN_USB_DESCRIPTOR_H #define", "c_file.write(\"\"\"\\ }; \"\"\") # Write out tuple of device objects.", "# Abstract control model bInterfaceProtocol=cdc.CDC_PROTOCOL_NONE, iInterface=StringIndex.index(\"{} CDC control\".format(args.interface_name)), subdescriptors=[ cdc.Header(", "args.msc_ep_num_in == 0: raise ValueError(\"MSC endpoint IN number must not", "= [ standard.InterfaceDescriptor( description=\"MSC\", bInterfaceClass=msc.MSC_CLASS, bInterfaceSubClass=msc.MSC_SUBCLASS_TRANSPARENT, bInterfaceProtocol=msc.MSC_PROTOCOL_BULK, iInterface=StringIndex.index(\"{} Mass Storage\".format(args.interface_name)),", "#define MICROPY_INCLUDED_AUTOGEN_USB_DESCRIPTOR_H #include <stdint.h> extern const uint8_t usb_desc_dev[{device_length}]; extern const", "if unknown_hid_devices: raise ValueError(\"Unknown HID devices(s)\", unknown_hid_devices) if not args.renumber_endpoints:", "if 'AUDIO' in args.devices: interfaces_to_join.append(audio_interfaces) # util.join_interfaces() will renumber the", "bInterfaceClass=audio.AUDIO_CLASS_DEVICE, bInterfaceSubClass=audio.AUDIO_SUBCLASS_CONTROL, bInterfaceProtocol=audio.AUDIO_PROTOCOL_V1, iInterface=StringIndex.index(\"{} Audio\".format(args.interface_name)), subdescriptors=[ cs_ac_interface, ]) # Audio", "HID report buffer definitions. for name in args.hid_devices: c_file.write(\"\"\"\\ static", "elif args.hid_ep_num_in == 0: raise ValueError(\"HID endpoint IN number must", "in range(length): c_file.write(\"0x{:02x}, \".format(b[i + j])) c_file.write(\"// \" + notes[n])", "Digitizer works on Linux but conflicts with mouse, so omit", "leave it # there for backwards compatibility. descriptor_list.append(cdc_iad) descriptor_list.extend(cdc_interfaces) if", "= len(bytes(combined_hid_report_descriptor)) # Now we values we need for the", "at Interface 0, so we'll leave it # there for", "{{ \"\"\".format(HID_DESCRIPTOR_LENGTH=hid_descriptor_length)) for b in bytes(combined_hid_report_descriptor): c_file.write(\"0x{:02x}, \".format(b)) c_file.write(\"\"\"\\ };", "CDC DATA OUT') parser.add_argument('--cdc_ep_num_data_in', type=int, default=0, help='endpoint number of CDC", "could reorder the interfaces but # the Windows 7 Adafruit_usbser.inf", "help='endpoint number of MIDI IN') parser.add_argument('--output_c_file', type=argparse.FileType('w', encoding='UTF-8'), required=True) parser.add_argument('--output_h_file',", "LANGID_INDEX == 0 SERIAL_NUMBER_INDEX = StringIndex.index(\"S\" * args.serial_number_length, variable_name=\"usb_serial_number\") device", "bmAttributes=standard.EndpointDescriptor.TYPE_BULK, bInterval=0, wMaxPacketSize=512 if args.highspeed else 64), standard.EndpointDescriptor( description=\"MSC out\",", "that requests the # HID report descriptor with the wrong", "number in digits') parser.add_argument('--devices', type=lambda l: tuple(l.split(',')), default=DEFAULT_DEVICES, help='devices to", "uint8_t usb_desc_cfg[{configuration_length}]; extern uint16_t usb_serial_number[{serial_number_length}]; extern uint16_t const * const", "= list(frozenset(args.devices) - ALL_DEVICES_SET) if unknown_devices: raise ValueError(\"Unknown device(s)\", unknown_devices)", "interface number if the HID interface is not given #", "{{ .type = &usb_hid_device_type }}, .report_buffer = {name}_report_buffer, .report_id =", "if 'HID' in args.devices: interfaces_to_join.append(hid_interfaces) if 'AUDIO' in args.devices: interfaces_to_join.append(audio_interfaces)", "const uint8_t hid_report_descriptor[{HID_DESCRIPTOR_LENGTH}] = {{ \"\"\".format(HID_DESCRIPTOR_LENGTH=hid_descriptor_length)) for b in bytes(combined_hid_report_descriptor):", "if args.highspeed else 64), standard.EndpointDescriptor( description=\"MSC out\", bEndpointAddress=(args.msc_ep_num_out | standard.EndpointDescriptor.DIRECTION_OUT),", "# Now we values we need for the .h file.", "not variable_name: variable_name = \"string_descriptor{}\".format(idx) const = \"const \" if", "n += 1 c_file.write(\"\\n\") i += length descriptor_length += len(b)", "the device') parser.add_argument('--vid', type=lambda x: int(x, 16), help='vendor id') parser.add_argument('--pid',", "hid_device, it shouldn't have a report id. # Otherwise, report", "certain # interface cross-references. interfaces = util.join_interfaces(interfaces_to_join, renumber_endpoints=args.renumber_endpoints) # Now", "= {usage_page:#04x}, .usage = {usage:#04x}, .out_report_buffer = {out_report_buffer}, .out_report_length =", "need for the .h file. h_file.write(\"\"\"\\ #ifndef MICROPY_INCLUDED_AUTOGEN_USB_DESCRIPTOR_H #define MICROPY_INCLUDED_AUTOGEN_USB_DESCRIPTOR_H", "64), standard.EndpointDescriptor( description=\"MSC out\", bEndpointAddress=(args.msc_ep_num_out | standard.EndpointDescriptor.DIRECTION_OUT), bmAttributes=standard.EndpointDescriptor.TYPE_BULK, bInterval=0, wMaxPacketSize=512", "a separate line. n = 0 while i < len(b):", "in HID report descriptor') parser.add_argument('--interface_name', type=str, help='The name/prefix to use", "control interface because other audio interfaces are managed by it", "Product name included in Inquiry response, max 16 bytes #define", "of MSC OUT') parser.add_argument('--msc_ep_num_in', type=int, default=0, help='endpoint number of MSC", "descriptor_list = [] if 'CDC' in args.devices: # Put the", "OPT_MODE_HIGH_SPEED' if args.highspeed else 'OPT_MODE_DEVICE', hid_num_devices=len(args.hid_devices), msc_vendor=args.manufacturer[:8], msc_product=args.product[:16])) # Write", "default=0, help='endpoint number of MIDI IN') parser.add_argument('--output_c_file', type=argparse.FileType('w', encoding='UTF-8'), required=True)", "help='product name of the device') parser.add_argument('--vid', type=lambda x: int(x, 16),", "each subdescriptor on a separate line. n = 0 while", "the device') parser.add_argument('--product', type=str, help='product name of the device') parser.add_argument('--vid',", "must not be 0\") elif args.msc_ep_num_in == 0: raise ValueError(\"MSC", ": {CLASS} \"\"\".format(DESCRIPTION=descriptor.description, CLASS=descriptor.__class__)) b = bytes(descriptor) notes = descriptor.notes()", "= \"\" c_file.write(\"\"\"\\ {const}uint16_t {NAME}[] = {{ \"\"\".format(const=const, NAME=variable_name)) pointers_to_strings.append(\"{name}\".format(name=variable_name))", "[cdc_comm_interface, cdc_data_interface] msc_interfaces = [ standard.InterfaceDescriptor( description=\"MSC\", bInterfaceClass=msc.MSC_CLASS, bInterfaceSubClass=msc.MSC_SUBCLASS_TRANSPARENT, bInterfaceProtocol=msc.MSC_PROTOCOL_BULK,", "number of MIDI OUT') parser.add_argument('--midi_ep_num_in', type=int, default=0, help='endpoint number of", "number must not be 0\") if 'AUDIO' in args.devices: if", "but # the Windows 7 Adafruit_usbser.inf file thinks CDC is", "iInterface=StringIndex.index(\"{} Audio\".format(args.interface_name)), subdescriptors=[ cs_ac_interface, ]) # Audio streaming interfaces must", "hid_endpoint_in_descriptor, hid_endpoint_out_descriptor, ] ), ] # Audio! # In and", "default=0, help='endpoint number of MIDI OUT') parser.add_argument('--midi_ep_num_in', type=int, default=0, help='endpoint", "not be 0\") elif args.midi_ep_num_in == 0: raise ValueError(\"MIDI endpoint", "description=\"CDC comm\", bMasterInterface=0x00, # Adjust this after interfaces are renumbered.", "not given # first. However, it still fetches the descriptor", "on Linux but conflicts with mouse, so omit it. DEFAULT_HID_DEVICES='KEYBOARD,MOUSE,CONSUMER,GAMEPAD'", "]) cs_ac_interface = audio10.AudioControlInterface( description=\"Empty audio control\", audio_streaming_interfaces = [],", "{ \"\"\") for b in bytes(device): c_file.write(\"0x{:02x}, \".format(b)) c_file.write(\"\"\"\\ };", "type=lambda x: int(x, 16), help='vendor id') parser.add_argument('--pid', type=lambda x: int(x,", "in the interface descriptions', default=DEFAULT_INTERFACE_NAME) parser.add_argument('--no-renumber_endpoints', dest='renumber_endpoints', action='store_false', help='use to", "descriptor_length = 0 for descriptor in descriptor_list: c_file.write(\"\"\"\\ // {DESCRIPTION}", "description=\"MIDI PC -> {}\".format(args.interface_name), bJackType=midi.JACK_TYPE_EMBEDDED, iJack=StringIndex.index(\"{} usb_midi.ports[0]\".format(args.interface_name))) midi_out_jack_ext = midi.OutJackDescriptor(", "\"\"\") # Write out tuple of device objects. c_file.write(\"\"\" mp_obj_tuple_t", "local and endpoints are interface local # until util.join_interfaces renumbers", "usb_midi.ports[0]\".format(args.interface_name))) midi_out_jack_ext = midi.OutJackDescriptor( description=\"MIDI data out to user code.\",", "= len(cls.strings) cls.string_to_index[string] = idx cls.strings.append(string) cls.index_to_variable[idx] = variable_name return", "args.devices: # Only add the control interface because other audio", "= cdc_comm_interface.bInterfaceNumber cdc_union.bSlaveInterface_list = [cdc_data_interface.bInterfaceNumber] cdc_call_management.bDataInterface = cdc_data_interface.bInterfaceNumber cdc_iad =", "type=int, default=32, help='length needed for the serial number in digits')", "OUT') parser.add_argument('--cdc_ep_num_data_in', type=int, default=0, help='endpoint number of CDC DATA IN')", "idx, descriptor in enumerate(string_descriptors): c_file.write(\"\"\"\\ // {DESCRIPTION} : {CLASS} \"\"\".format(DESCRIPTION=descriptor.description,", "0 while i < len(b): length = b[i] for j", "standard.EndpointDescriptor.DIRECTION_IN, bmAttributes=standard.EndpointDescriptor.TYPE_INTERRUPT, wMaxPacketSize=0x0040, bInterval=0x10) ]) cdc_data_interface = standard.InterfaceDescriptor( description=\"CDC data\",", "description=name, report_descriptor=bytes(hid_report_descriptors.REPORT_DESCRIPTOR_FUNCTIONS[name](0))) report_ids[name] = 0 else: report_id = 1 concatenated_descriptors", "args.hid_ep_num_in == 0: raise ValueError(\"HID endpoint IN number must not", "interfaces in order. But we still need to fix up", "Class bInterfaceSubClass=cdc.CDC_SUBCLASS_ACM, # Abstract control model bInterfaceProtocol=cdc.CDC_PROTOCOL_NONE, iInterface=StringIndex.index(\"{} CDC control\".format(args.interface_name)),", "here are relative to CircuitPython # USB OUT -> midi_in_jack_emb", "id') parser.add_argument('--pid', type=lambda x: int(x, 16), help='product id') parser.add_argument('--serial_number_length', type=int,", "includes MIDI support)') parser.add_argument('--hid_devices', type=lambda l: tuple(l.split(',')), default=DEFAULT_HID_DEVICES, help='HID devices", "b[i] for j in range(length): c_file.write(\"0x{:02x}, \".format(b[i + j])) c_file.write(\"//", "of MIDI IN') parser.add_argument('--output_c_file', type=argparse.FileType('w', encoding='UTF-8'), required=True) parser.add_argument('--output_h_file', type=argparse.FileType('w', encoding='UTF-8'),", "bytes(descriptor) notes = descriptor.notes() i = 0 # This prints", "= midi.OutJackDescriptor( description=\"MIDI PC <- {}\".format(args.interface_name), bJackType=midi.JACK_TYPE_EMBEDDED, input_pins=[(midi_in_jack_ext, 1)], iJack=StringIndex.index(\"{}", "#include \"shared-bindings/usb_hid/Device.h\" #include \"{H_FILE_NAME}\" \"\"\".format(H_FILE_NAME=h_file.name)) c_file.write(\"\"\"\\ // {DESCRIPTION} : {CLASS}", "'MSC' in args.devices: descriptor_list.extend(msc_interfaces) if 'HID' in args.devices: descriptor_list.extend(hid_interfaces) if", "# Digitizer works on Linux but conflicts with mouse, so", "description=\"MIDI data in from user code.\", bJackType=midi.JACK_TYPE_EXTERNAL, iJack=0) midi_out_jack_emb =", "variable_name = None): if string in cls.string_to_index: idx = cls.string_to_index[string]", "<- midi_in_jack_ext <- CircuitPython midi_in_jack_ext = midi.InJackDescriptor( description=\"MIDI data in", "default=DEFAULT_INTERFACE_NAME) parser.add_argument('--no-renumber_endpoints', dest='renumber_endpoints', action='store_false', help='use to not renumber endpoint') parser.add_argument('--cdc_ep_num_notification',", "c_file.write(\"\"\"\\ const uint8_t usb_desc_cfg[] = { \"\"\") # Write out", "if args.highspeed else 64), midi.DataEndpointDescriptor(baAssocJack=[midi_out_jack_emb]), ]) cs_ac_interface = audio10.AudioControlInterface( description=\"Empty", "out to user code.\", bJackType=midi.JACK_TYPE_EXTERNAL, input_pins=[(midi_in_jack_emb, 1)], iJack=0) # USB", "# Otherwise, report ids are assigned sequentially: # args.hid_devices[0] has", "default=0, help='endpoint number of MSC OUT') parser.add_argument('--msc_ep_num_in', type=int, default=0, help='endpoint", "description=\"CDC IAD\", bFirstInterface=cdc_comm_interface.bInterfaceNumber, bInterfaceCount=len(cdc_interfaces), bFunctionClass=cdc.CDC_CLASS_COMM, # Communications Device Class bFunctionSubClass=cdc.CDC_SUBCLASS_ACM,", "ValueError(\"CDC data IN endpoint number must not be 0\") if", "include in descriptor (AUDIO includes MIDI support)') parser.add_argument('--hid_devices', type=lambda l:", "1: name = args.hid_devices[0] combined_hid_report_descriptor = hid.ReportDescriptor( description=name, report_descriptor=bytes(hid_report_descriptors.REPORT_DESCRIPTOR_FUNCTIONS[name](0))) report_ids[name]", "to make them unique across descriptors, # and renumber the", "SPDX-License-Identifier: MIT import argparse import os import sys sys.path.append(\"../../tools/usb_descriptor\") from", "report_id += 1 combined_hid_report_descriptor = hid.ReportDescriptor( description=\"MULTIDEVICE\", report_descriptor=bytes(concatenated_descriptors)) # ASF4", "if 'HID' in args.devices: if args.args.hid_ep_num_out == 0: raise ValueError(\"HID", "if both are not supplied. hid_endpoint_in_descriptor = standard.EndpointDescriptor( description=\"HID in\",", "const uint8_t usb_desc_dev[] = { \"\"\") for b in bytes(device):", "1)], iJack=StringIndex.index(\"{} usb_midi.ports[1]\".format(args.interface_name))) audio_midi_interface = standard.InterfaceDescriptor( description=\"Midi goodness\", bInterfaceClass=audio.AUDIO_CLASS_DEVICE, bInterfaceSubClass=audio.AUDIO_SUBCLASS_MIDI_STREAMING,", "'CDC' in args.devices: # Put the CDC IAD just before", "and generic devices to have both in and out endpoints,", "standard.InterfaceAssociationDescriptor( description=\"CDC IAD\", bFirstInterface=cdc_comm_interface.bInterfaceNumber, bInterfaceCount=len(cdc_interfaces), bFunctionClass=cdc.CDC_CLASS_COMM, # Communications Device Class", "\" if variable_name == \"usb_serial_number\": const = \"\" c_file.write(\"\"\"\\ {const}uint16_t", "string_desc_arr [{string_descriptor_length}]; extern const uint8_t hid_report_descriptor[{hid_report_descriptor_length}]; #define CFG_TUSB_RHPORT0_MODE ({rhport0_mode}) #define", "description=\"HID in\", bEndpointAddress=args.hid_ep_num_in | standard.EndpointDescriptor.DIRECTION_IN, bmAttributes=standard.EndpointDescriptor.TYPE_INTERRUPT, bInterval=8) hid_endpoint_out_descriptor = standard.EndpointDescriptor(", "descriptor_list.append(cdc_iad) descriptor_list.extend(cdc_interfaces) if 'MSC' in args.devices: descriptor_list.extend(msc_interfaces) if 'HID' in", "\"string_descriptor{}\".format(idx) const = \"const \" if variable_name == \"usb_serial_number\": const", "by it to ensure the # correct ordering. descriptor_list.append(audio_control_interface) #", "to user code.\", bJackType=midi.JACK_TYPE_EXTERNAL, input_pins=[(midi_in_jack_emb, 1)], iJack=0) # USB IN", "0, so we'll leave it # there for backwards compatibility.", "of MSC IN') parser.add_argument('--hid_ep_num_out', type=int, default=0, help='endpoint number of HID", "support)') parser.add_argument('--hid_devices', type=lambda l: tuple(l.split(',')), default=DEFAULT_HID_DEVICES, help='HID devices to include", "}; \"\"\") # Write out tuple of device objects. c_file.write(\"\"\"", "the .h file. h_file.write(\"\"\"\\ #ifndef MICROPY_INCLUDED_AUTOGEN_USB_DESCRIPTOR_H #define MICROPY_INCLUDED_AUTOGEN_USB_DESCRIPTOR_H #include <stdint.h>", "standard.EndpointDescriptor( description=\"MSC out\", bEndpointAddress=(args.msc_ep_num_out | standard.EndpointDescriptor.DIRECTION_OUT), bmAttributes=standard.EndpointDescriptor.TYPE_BULK, bInterval=0, wMaxPacketSize=512 if", "jacks_and_elements=[ midi_in_jack_emb, midi_in_jack_ext, midi_out_jack_emb, midi_out_jack_ext ], ), standard.EndpointDescriptor( description=\"MIDI data", "c_file.write(\"0x{:02x}, \".format(b)) c_file.write(\"\"\"\\ }; \"\"\") # Write out USB HID", "idx cls.strings.append(string) cls.index_to_variable[idx] = variable_name return idx @classmethod def strings_in_order(cls):", "sum([len(bytes(x)) for x in descriptor_list])), bNumInterfaces=len(interfaces)) descriptor_list.insert(0, configuration) string_descriptors =", "descriptions', default=DEFAULT_INTERFACE_NAME) parser.add_argument('--no-renumber_endpoints', dest='renumber_endpoints', action='store_false', help='use to not renumber endpoint')", "'CDC' in args.devices: interfaces_to_join.append(cdc_interfaces) if 'MSC' in args.devices: interfaces_to_join.append(msc_interfaces) if", "unique string. Start with 0.\"\"\" string_to_index = {} index_to_variable =", "name = args.hid_devices[0] combined_hid_report_descriptor = hid.ReportDescriptor( description=name, report_descriptor=bytes(hid_report_descriptors.REPORT_DESCRIPTOR_FUNCTIONS[name](0))) report_ids[name] =", "out table of device objects. c_file.write(\"\"\" usb_hid_device_obj_t usb_hid_devices[] = {", "ALL_DEVICES_SET) if unknown_devices: raise ValueError(\"Unknown device(s)\", unknown_devices) unknown_hid_devices = list(frozenset(args.hid_devices)", "usb_desc_cfg[{configuration_length}]; extern uint16_t usb_serial_number[{serial_number_length}]; extern uint16_t const * const string_desc_arr", "midi.OutJackDescriptor( description=\"MIDI data out to user code.\", bJackType=midi.JACK_TYPE_EXTERNAL, input_pins=[(midi_in_jack_emb, 1)],", "cdc_comm_interface.bInterfaceNumber cdc_union.bSlaveInterface_list = [cdc_data_interface.bInterfaceNumber] cdc_call_management.bDataInterface = cdc_data_interface.bInterfaceNumber cdc_iad = standard.InterfaceAssociationDescriptor(", "must not be 0\") elif args.hid_ep_num_in == 0: raise ValueError(\"HID", "USB_HID_NUM_DEVICES {hid_num_devices} // Vendor name included in Inquiry response, max", "langid must be the 0th string descriptor LANGID_INDEX = StringIndex.index(\"\\u0409\",", "devices(s)\", unknown_hid_devices) if not args.renumber_endpoints: if 'CDC' in args.devices: if", "control model bFunctionProtocol=cdc.CDC_PROTOCOL_NONE) descriptor_list = [] if 'CDC' in args.devices:", "so we'll leave it # there for backwards compatibility. descriptor_list.append(cdc_iad)", "# Finally, build the composite descriptor. configuration = standard.ConfigurationDescriptor( description=\"Composite", "[cdc_data_interface.bInterfaceNumber] cdc_call_management.bDataInterface = cdc_data_interface.bInterfaceNumber cdc_iad = standard.InterfaceAssociationDescriptor( description=\"CDC IAD\", bFirstInterface=cdc_comm_interface.bInterfaceNumber,", "-> midi_out_jack_ext -> CircuitPython midi_in_jack_emb = midi.InJackDescriptor( description=\"MIDI PC ->", "c_file.write(\"\"\"\\ }; \"\"\") c_file.write(\"\"\"\\ // array of pointer to string", "]) cdc_interfaces = [cdc_comm_interface, cdc_data_interface] msc_interfaces = [ standard.InterfaceDescriptor( description=\"MSC\",", "if 'HID' in args.devices: descriptor_list.extend(hid_interfaces) if 'AUDIO' in args.devices: #", "// {DESCRIPTION} : {CLASS} \"\"\".format(DESCRIPTION=device.description, CLASS=device.__class__)) c_file.write(\"\"\"\\ const uint8_t usb_desc_dev[]", "j in range(length // 2): c_file.write(\"0x{:04x}, \".format(b[i + 2*j +", "const string_desc_arr [] = { \"\"\") c_file.write(\"\"\",\\ \"\"\".join(pointers_to_strings)) c_file.write(\"\"\" };", "Windows 7 Adafruit_usbser.inf file thinks CDC is at Interface 0,", "cs_ac_interface, ]) # Audio streaming interfaces must occur before MIDI", "in args.devices: if args.args.hid_ep_num_out == 0: raise ValueError(\"HID endpoint OUT", "0th string descriptor LANGID_INDEX = StringIndex.index(\"\\u0409\", variable_name=\"language_id\") assert LANGID_INDEX ==", "type=lambda x: int(x, 16), help='product id') parser.add_argument('--serial_number_length', type=int, default=32, help='length", "increasing index to each unique string. Start with 0.\"\"\" string_to_index", "description=\"CDC data\", bInterfaceClass=cdc.CDC_CLASS_DATA, iInterface=StringIndex.index(\"{} CDC data\".format(args.interface_name)), subdescriptors=[ standard.EndpointDescriptor( description=\"CDC data", "import os import sys sys.path.append(\"../../tools/usb_descriptor\") from adafruit_usb_descriptor import audio, audio10,", "] ), ] # Audio! # In and out here", "type=int, default=0, help='endpoint number of MIDI IN') parser.add_argument('--output_c_file', type=argparse.FileType('w', encoding='UTF-8'),", "i += length c_file.write(\"\"\"\\ }; \"\"\") c_file.write(\"\"\"\\ // array of", "interfaces_to_join.append(audio_interfaces) # util.join_interfaces() will renumber the endpoints to make them", "variable_name: variable_name = \"string_descriptor{}\".format(idx) const = \"const \" if variable_name", "c_file.write(\"// \" + notes[n]) n += 1 c_file.write(\"\\n\") i +=", "+ 2*j])) n += 1 c_file.write(\"\\n\") i += length c_file.write(\"\"\"\\", "{{ \"\"\".format(num_devices=len(args.hid_devices))) for idx in range(len(args.hid_devices)): c_file.write(\"\"\"\\ (mp_obj_t) &usb_hid_devices[{idx}], \"\"\".format(idx=idx))", "standard.EndpointDescriptor.DIRECTION_IN, bmAttributes=standard.EndpointDescriptor.TYPE_INTERRUPT, bInterval=8) hid_endpoint_out_descriptor = standard.EndpointDescriptor( description=\"HID out\", bEndpointAddress=args.hid_ep_num_out |", "not args.renumber_endpoints: if 'CDC' in args.devices: if args.cdc_ep_num_notification == 0:", "+= 1 c_file.write(\"\\n\") i += length c_file.write(\"\"\"\\ }; \"\"\") c_file.write(\"\"\"\\", "i = 0 # This prints each subdescriptor on a", "Write out USB HID report buffer definitions. for name in", "if device_data.out_report_length > 0 else 'NULL' c_file.write(\"\"\"\\ {{ .base =", "file. h_file.write(\"\"\"\\ #ifndef MICROPY_INCLUDED_AUTOGEN_USB_DESCRIPTOR_H #define MICROPY_INCLUDED_AUTOGEN_USB_DESCRIPTOR_H #include <stdint.h> extern const", "midi_out_jack_ext -> CircuitPython midi_in_jack_emb = midi.InJackDescriptor( description=\"MIDI PC -> {}\".format(args.interface_name),", "const = \"const \" if variable_name == \"usb_serial_number\": const =", "i < len(b): length = b[i] for j in range(length", "data in\", bEndpointAddress=args.cdc_ep_num_data_in | standard.EndpointDescriptor.DIRECTION_IN, bmAttributes=standard.EndpointDescriptor.TYPE_BULK, bInterval=0, wMaxPacketSize=512 if args.highspeed", "raise ValueError(\"Unknown device(s)\", unknown_devices) unknown_hid_devices = list(frozenset(args.hid_devices) - ALL_HID_DEVICES_SET) if", "Linux but conflicts with mouse, so omit it. DEFAULT_HID_DEVICES='KEYBOARD,MOUSE,CONSUMER,GAMEPAD' parser", "numbers are interface-set local and endpoints are interface local #", "configuration = standard.ConfigurationDescriptor( description=\"Composite configuration\", wTotalLength=(standard.ConfigurationDescriptor.bLength + sum([len(bytes(x)) for x", "help='The name/prefix to use in the interface descriptions', default=DEFAULT_INTERFACE_NAME) parser.add_argument('--no-renumber_endpoints',", "in cls.string_to_index: idx = cls.string_to_index[string] if not cls.index_to_variable[idx]: cls.index_to_variable[idx] =", "{{ .base = {{ .type = &mp_type_tuple, }}, .len =", "still fetches the descriptor anyway. We could reorder the interfaces", "{report_id}, .report_length = {report_length}, .usage_page = {usage_page:#04x}, .usage = {usage:#04x},", "hid, midi, msc, standard, util import hid_report_descriptors DEFAULT_INTERFACE_NAME = 'CircuitPython'", "user code.\", bJackType=midi.JACK_TYPE_EXTERNAL, iJack=0) midi_out_jack_emb = midi.OutJackDescriptor( description=\"MIDI PC <-", "type=int, default=0, help='endpoint number of CDC DATA OUT') parser.add_argument('--cdc_ep_num_data_in', type=int,", "= variable_name return idx @classmethod def strings_in_order(cls): return cls.strings #", "}}, .len = {num_devices}, .items = {{ \"\"\".format(num_devices=len(args.hid_devices))) for idx", "args.hid_devices: concatenated_descriptors.extend( bytes(hid_report_descriptors.REPORT_DESCRIPTOR_FUNCTIONS[name](report_id))) report_ids[name] = report_id report_id += 1 combined_hid_report_descriptor", ".base = {{ .type = &usb_hid_device_type }}, .report_buffer = {name}_report_buffer,", "# Adjust this after interfaces are renumbered. bSlaveInterface_list=[0x01]) # Adjust", "{const}uint16_t {NAME}[] = {{ \"\"\".format(const=const, NAME=variable_name)) pointers_to_strings.append(\"{name}\".format(name=variable_name)) n = 0", "after interfaces are renumbered. bSlaveInterface_list=[0x01]) # Adjust this after interfaces", "local # until util.join_interfaces renumbers them. cdc_union = cdc.Union( description=\"CDC", "args.highspeed else 64), midi.DataEndpointDescriptor(baAssocJack=[midi_out_jack_emb]), ]) cs_ac_interface = audio10.AudioControlInterface( description=\"Empty audio", "mp_obj_tuple_t common_hal_usb_hid_devices = {{ .base = {{ .type = &mp_type_tuple,", "bJackType=midi.JACK_TYPE_EXTERNAL, input_pins=[(midi_in_jack_emb, 1)], iJack=0) # USB IN <- midi_out_jack_emb <-", "data\".format(args.interface_name)), subdescriptors=[ standard.EndpointDescriptor( description=\"CDC data out\", bEndpointAddress=args.cdc_ep_num_data_out | standard.EndpointDescriptor.DIRECTION_OUT, bmAttributes=standard.EndpointDescriptor.TYPE_BULK,", "cs_ac_interface.midi_streaming_interfaces interfaces_to_join = [] if 'CDC' in args.devices: interfaces_to_join.append(cdc_interfaces) if", "0: raise ValueError(\"HID endpoint OUT number must not be 0\")", "args.devices: if args.args.midi_ep_num_out == 0: raise ValueError(\"MIDI endpoint OUT number", "uint8_t usb_desc_dev[] = { \"\"\") for b in bytes(device): c_file.write(\"0x{:02x},", "we still need to fix up certain # interface cross-references.", "args.devices: if args.msc_ep_num_out == 0: raise ValueError(\"MSC endpoint OUT number", ".base = {{ .type = &mp_type_tuple, }}, .len = {num_devices},", "descriptor_length), string_descriptor_length=len(pointers_to_strings), hid_report_descriptor_length=len(bytes(combined_hid_report_descriptor)), rhport0_mode='OPT_MODE_DEVICE | OPT_MODE_HIGH_SPEED' if args.highspeed else 'OPT_MODE_DEVICE',", "fix up certain # interface cross-references. interfaces = util.join_interfaces(interfaces_to_join, renumber_endpoints=args.renumber_endpoints)", "tuple(l.split(',')), default=DEFAULT_HID_DEVICES, help='HID devices to include in HID report descriptor')", "args.highspeed else 'OPT_MODE_DEVICE', hid_num_devices=len(args.hid_devices), msc_vendor=args.manufacturer[:8], msc_product=args.product[:16])) # Write out the", "as one long array (that's how ASF4 does it). descriptor_length", "endpoint IN number must not be 0\") class StringIndex: \"\"\"Assign", "number must not be 0\") elif args.hid_ep_num_in == 0: raise", "model bFunctionProtocol=cdc.CDC_PROTOCOL_NONE) descriptor_list = [] if 'CDC' in args.devices: #", "Devices\", bInterfaceClass=hid.HID_CLASS, bInterfaceSubClass=hid.HID_SUBCLASS_NOBOOT, bInterfaceProtocol=hid.HID_PROTOCOL_NONE, iInterface=StringIndex.index(\"{} HID\".format(args.interface_name)), subdescriptors=[ hid.HIDDescriptor( description=\"HID\", wDescriptorLength=len(bytes(combined_hid_report_descriptor))),", "MIDI IN') parser.add_argument('--output_c_file', type=argparse.FileType('w', encoding='UTF-8'), required=True) parser.add_argument('--output_h_file', type=argparse.FileType('w', encoding='UTF-8'), required=True)", "number must not be 0\") if 'MSC' in args.devices: if", "CDC is at Interface 0, so we'll leave it #", "{CLASS} \"\"\".format(DESCRIPTION=descriptor.description, CLASS=descriptor.__class__)) b = bytes(descriptor) notes = descriptor.notes() i", "to include in descriptor (AUDIO includes MIDI support)') parser.add_argument('--hid_devices', type=lambda", "help='endpoint number of MIDI OUT') parser.add_argument('--midi_ep_num_in', type=int, default=0, help='endpoint number", "Put the CDC IAD just before the CDC interfaces. #", "usb_serial_number[{serial_number_length}]; extern uint16_t const * const string_desc_arr [{string_descriptor_length}]; extern const", "= bytearray() for name in args.hid_devices: concatenated_descriptors.extend( bytes(hid_report_descriptors.REPORT_DESCRIPTOR_FUNCTIONS[name](report_id))) report_ids[name] =", "@classmethod def index(cls, string, *, variable_name = None): if string", "== 0: raise ValueError(\"HID endpoint OUT number must not be", "cdc.CallManagement( description=\"CDC comm\", bmCapabilities=0x01, bDataInterface=0x01) # Adjust this after interfaces", ".items = {{ \"\"\".format(num_devices=len(args.hid_devices))) for idx in range(len(args.hid_devices)): c_file.write(\"\"\"\\ (mp_obj_t)", "unknown_hid_devices) if not args.renumber_endpoints: if 'CDC' in args.devices: if args.cdc_ep_num_notification", "input_pins=[(midi_in_jack_ext, 1)], iJack=StringIndex.index(\"{} usb_midi.ports[1]\".format(args.interface_name))) audio_midi_interface = standard.InterfaceDescriptor( description=\"Midi goodness\", bInterfaceClass=audio.AUDIO_CLASS_DEVICE,", "order. But we still need to fix up certain #", "cdc_interfaces = [cdc_comm_interface, cdc_data_interface] msc_interfaces = [ standard.InterfaceDescriptor( description=\"MSC\", bInterfaceClass=msc.MSC_CLASS,", "name included in Inquiry response, max 8 bytes #define CFG_TUD_MSC_VENDOR", "1 combined_hid_report_descriptor = hid.ReportDescriptor( description=\"MULTIDEVICE\", report_descriptor=bytes(concatenated_descriptors)) # ASF4 expects keyboard", "comm\", bcdCDC=0x0110), cdc_call_management, cdc.AbstractControlManagement( description=\"CDC comm\", bmCapabilities=0x02), cdc_union, standard.EndpointDescriptor( description=\"CDC", "midi.DataEndpointDescriptor(baAssocJack=[midi_in_jack_emb]), standard.EndpointDescriptor( description=\"MIDI data in from {}\".format(args.interface_name), bEndpointAddress=args.midi_ep_num_in | standard.EndpointDescriptor.DIRECTION_IN,", "one long array (that's how ASF4 does it). descriptor_length =", "// 2): c_file.write(\"0x{:04x}, \".format(b[i + 2*j + 1] << 8", "= StringIndex.index(\"\\u0409\", variable_name=\"language_id\") assert LANGID_INDEX == 0 SERIAL_NUMBER_INDEX = StringIndex.index(\"S\"", "must not be 0\") class StringIndex: \"\"\"Assign a monotonically increasing", "cdc.AbstractControlManagement( description=\"CDC comm\", bmCapabilities=0x02), cdc_union, standard.EndpointDescriptor( description=\"CDC comm in\", bEndpointAddress=args.cdc_ep_num_notification", "b = bytes(descriptor) notes = descriptor.notes() i = 0 #", "'HID' in args.devices: interfaces_to_join.append(hid_interfaces) if 'AUDIO' in args.devices: interfaces_to_join.append(audio_interfaces) #", "highspeed device') parser.add_argument('--manufacturer', type=str, help='manufacturer of the device') parser.add_argument('--product', type=str,", "| standard.EndpointDescriptor.DIRECTION_IN, bmAttributes=standard.EndpointDescriptor.TYPE_INTERRUPT, bInterval=8) hid_endpoint_out_descriptor = standard.EndpointDescriptor( description=\"HID out\", bEndpointAddress=args.hid_ep_num_out", ".format(serial_number_length=len(bytes(serial_number_descriptor)) // 2, device_length=len(bytes(device)), configuration_length=descriptor_length, max_configuration_length=max(hid_descriptor_length, descriptor_length), string_descriptor_length=len(pointers_to_strings), hid_report_descriptor_length=len(bytes(combined_hid_report_descriptor)), rhport0_mode='OPT_MODE_DEVICE", "There appears to be a bug in the Windows composite", "], ), standard.EndpointDescriptor( description=\"MIDI data out to {}\".format(args.interface_name), bEndpointAddress=args.midi_ep_num_out |", "= standard.InterfaceAssociationDescriptor( description=\"CDC IAD\", bFirstInterface=cdc_comm_interface.bInterfaceNumber, bInterfaceCount=len(cdc_interfaces), bFunctionClass=cdc.CDC_CLASS_COMM, # Communications Device", "IN number must not be 0\") if 'AUDIO' in args.devices:", ".report_buffer = {name}_report_buffer, .report_id = {report_id}, .report_length = {report_length}, .usage_page", "= cdc.Union( description=\"CDC comm\", bMasterInterface=0x00, # Adjust this after interfaces", "default=0, help='endpoint number of CDC DATA OUT') parser.add_argument('--cdc_ep_num_data_in', type=int, default=0,", "midi_out_jack_emb <- midi_in_jack_ext <- CircuitPython midi_in_jack_ext = midi.InJackDescriptor( description=\"MIDI data", "number must not be 0\") if 'HID' in args.devices: if", "descriptor with the wrong interface number if the HID interface", "configuration\", wTotalLength=(standard.ConfigurationDescriptor.bLength + sum([len(bytes(x)) for x in descriptor_list])), bNumInterfaces=len(interfaces)) descriptor_list.insert(0,", "hid.ReportDescriptor( description=\"MULTIDEVICE\", report_descriptor=bytes(concatenated_descriptors)) # ASF4 expects keyboard and generic devices", "anyway. We could reorder the interfaces but # the Windows", "for name in args.hid_devices: device_data = hid_report_descriptors.HID_DEVICE_DATA[name] out_report_buffer = '{}_out_report_buffer'.format(name.lower())", "iInterface=StringIndex.index(\"{} CDC data\".format(args.interface_name)), subdescriptors=[ standard.EndpointDescriptor( description=\"CDC data out\", bEndpointAddress=args.cdc_ep_num_data_out |", "raise ValueError(\"CDC data OUT endpoint number must not be 0\")", "+= 1 c_file.write(\"\\n\") i += length descriptor_length += len(b) c_file.write(\"\"\"\\", "= cdc.CallManagement( description=\"CDC comm\", bmCapabilities=0x01, bDataInterface=0x01) # Adjust this after", "bInterval=8) hid_interfaces = [ standard.InterfaceDescriptor( description=\"HID Multiple Devices\", bInterfaceClass=hid.HID_CLASS, bInterfaceSubClass=hid.HID_SUBCLASS_NOBOOT,", "needed for the serial number in digits') parser.add_argument('--devices', type=lambda l:", "args.highspeed else 64), ] ) ] # When there's only", "expects keyboard and generic devices to have both in and", "parser.add_argument('--hid_ep_num_in', type=int, default=0, help='endpoint number of HID IN') parser.add_argument('--midi_ep_num_out', type=int,", "description=\"MIDI data out to user code.\", bJackType=midi.JACK_TYPE_EXTERNAL, input_pins=[(midi_in_jack_emb, 1)], iJack=0)", "c_file.write(\"\"\" }; \"\"\") c_file.write(\"\\n\") hid_descriptor_length = len(bytes(combined_hid_report_descriptor)) # Now we", "out here are relative to CircuitPython # USB OUT ->", "in range(length // 2): c_file.write(\"0x{:04x}, \".format(b[i + 2*j + 1]", "description=\"CDC comm in\", bEndpointAddress=args.cdc_ep_num_notification | standard.EndpointDescriptor.DIRECTION_IN, bmAttributes=standard.EndpointDescriptor.TYPE_INTERRUPT, wMaxPacketSize=0x0040, bInterval=0x10) ])", "= {{ .type = &usb_hid_device_type }}, .report_buffer = {name}_report_buffer, .report_id", "+ 2*j + 1] << 8 | b[i + 2*j]))", "thinks CDC is at Interface 0, so we'll leave it", "OUT endpoint number must not be 0\") elif args.cdc_ep_num_data_in ==", "there's only one hid_device, it shouldn't have a report id.", "= StringIndex.index(\"S\" * args.serial_number_length, variable_name=\"usb_serial_number\") device = standard.DeviceDescriptor( description=\"top\", idVendor=args.vid,", "'MSC' in args.devices: if args.msc_ep_num_out == 0: raise ValueError(\"MSC endpoint", "= {name}_report_buffer, .report_id = {report_id}, .report_length = {report_length}, .usage_page =", "standard.EndpointDescriptor.DIRECTION_OUT, bmAttributes=standard.EndpointDescriptor.TYPE_INTERRUPT, bInterval=8) hid_interfaces = [ standard.InterfaceDescriptor( description=\"HID Multiple Devices\",", "help='use to not renumber endpoint') parser.add_argument('--cdc_ep_num_notification', type=int, default=0, help='endpoint number", "and out endpoints, # and will fail (possibly silently) if", "the CDC IAD just before the CDC interfaces. # There", "}; \"\"\") c_file.write(\"\"\"\\ // array of pointer to string descriptors", "number of HID OUT') parser.add_argument('--hid_ep_num_in', type=int, default=0, help='endpoint number of", "of CDC NOTIFICATION') parser.add_argument('--cdc_ep_num_data_out', type=int, default=0, help='endpoint number of CDC", "parser.add_argument('--no-renumber_endpoints', dest='renumber_endpoints', action='store_false', help='use to not renumber endpoint') parser.add_argument('--cdc_ep_num_notification', type=int,", "type=int, default=0, help='endpoint number of CDC DATA IN') parser.add_argument('--msc_ep_num_out', type=int,", "extern const uint8_t usb_desc_cfg[{configuration_length}]; extern uint16_t usb_serial_number[{serial_number_length}]; extern uint16_t const", "not renumber endpoint') parser.add_argument('--cdc_ep_num_notification', type=int, default=0, help='endpoint number of CDC", "SPDX-FileCopyrightText: 2014 MicroPython & CircuitPython contributors (https://github.com/adafruit/circuitpython/graphs/contributors) # # SPDX-License-Identifier:", "sys.path.append(\"../../tools/usb_descriptor\") from adafruit_usb_descriptor import audio, audio10, cdc, hid, midi, msc,", "Interface numbers are interface-set local and endpoints are interface local", "const * const string_desc_arr [{string_descriptor_length}]; extern const uint8_t hid_report_descriptor[{hid_report_descriptor_length}]; #define", "] ) audio_control_interface = standard.InterfaceDescriptor( description=\"All the audio\", bInterfaceClass=audio.AUDIO_CLASS_DEVICE, bInterfaceSubClass=audio.AUDIO_SUBCLASS_CONTROL,", "Interface 0, so we'll leave it # there for backwards", "interfaces are renumbered. cdc_call_management = cdc.CallManagement( description=\"CDC comm\", bmCapabilities=0x01, bDataInterface=0x01)", "help='manufacturer of the device') parser.add_argument('--product', type=str, help='product name of the", "IN') parser.add_argument('--msc_ep_num_out', type=int, default=0, help='endpoint number of MSC OUT') parser.add_argument('--msc_ep_num_in',", "interface descriptions', default=DEFAULT_INTERFACE_NAME) parser.add_argument('--no-renumber_endpoints', dest='renumber_endpoints', action='store_false', help='use to not renumber", "long array (that's how ASF4 does it). descriptor_length = 0", "to string descriptors uint16_t const * const string_desc_arr [] =", "report_id 1 # args.hid_devices[1] has report_id 2 # etc. report_ids", "Otherwise, report ids are assigned sequentially: # args.hid_devices[0] has report_id", "Audio\".format(args.interface_name)), subdescriptors=[ cs_ac_interface, ]) # Audio streaming interfaces must occur", "in enumerate(string_descriptors): c_file.write(\"\"\"\\ // {DESCRIPTION} : {CLASS} \"\"\".format(DESCRIPTION=descriptor.description, CLASS=descriptor.__class__)) b", "etc. report_ids = {} if len(args.hid_devices) == 1: name =", "number of MSC IN') parser.add_argument('--hid_ep_num_out', type=int, default=0, help='endpoint number of", "args.devices: interfaces_to_join.append(cdc_interfaces) if 'MSC' in args.devices: interfaces_to_join.append(msc_interfaces) if 'HID' in", "64), midi.DataEndpointDescriptor(baAssocJack=[midi_in_jack_emb]), standard.EndpointDescriptor( description=\"MIDI data in from {}\".format(args.interface_name), bEndpointAddress=args.midi_ep_num_in |", "parser.add_argument('--highspeed', default=False, action='store_true', help='descriptor for highspeed device') parser.add_argument('--manufacturer', type=str, help='manufacturer", "len(b): length = b[i] for j in range(length // 2):", "make them unique across descriptors, # and renumber the interfaces", "ALL_HID_DEVICES_SET=frozenset(ALL_HID_DEVICES.split(',')) # Digitizer works on Linux but conflicts with mouse,", "bInterval = 0x0, wMaxPacketSize=512 if args.highspeed else 64), midi.DataEndpointDescriptor(baAssocJack=[midi_out_jack_emb]), ])", "serial_number_descriptor = string_descriptors[SERIAL_NUMBER_INDEX] c_file = args.output_c_file h_file = args.output_h_file c_file.write(\"\"\"\\", "to ensure the # correct ordering. descriptor_list.append(audio_control_interface) # Finally, build", "description=\"CDC comm\", bcdCDC=0x0110), cdc_call_management, cdc.AbstractControlManagement( description=\"CDC comm\", bmCapabilities=0x02), cdc_union, standard.EndpointDescriptor(", "description=\"MSC in\", bEndpointAddress=args.msc_ep_num_in | standard.EndpointDescriptor.DIRECTION_IN, bmAttributes=standard.EndpointDescriptor.TYPE_BULK, bInterval=0, wMaxPacketSize=512 if args.highspeed", "= [cdc_comm_interface, cdc_data_interface] msc_interfaces = [ standard.InterfaceDescriptor( description=\"MSC\", bInterfaceClass=msc.MSC_CLASS, bInterfaceSubClass=msc.MSC_SUBCLASS_TRANSPARENT,", "in order. But we still need to fix up certain", "{} index_to_variable = {} strings = [] @classmethod def index(cls,", "report_length=hid_report_descriptors.HID_DEVICE_DATA[name].report_length)) if hid_report_descriptors.HID_DEVICE_DATA[name].out_report_length > 0: c_file.write(\"\"\"\\ static uint8_t {name}_out_report_buffer[{report_length}]; \"\"\".format(name=name.lower(),", "len(cls.strings) cls.string_to_index[string] = idx cls.strings.append(string) cls.index_to_variable[idx] = variable_name return idx", "len(b) c_file.write(\"\"\"\\ }; \"\"\") pointers_to_strings = [] for idx, descriptor", "c_file.write(\"\"\"\\ (mp_obj_t) &usb_hid_devices[{idx}], \"\"\".format(idx=idx)) c_file.write(\"\"\"\\ }, }; \"\"\") h_file.write(\"\"\"\\ #endif", "serial number in digits') parser.add_argument('--devices', type=lambda l: tuple(l.split(',')), default=DEFAULT_DEVICES, help='devices", "+ cs_ac_interface.audio_streaming_interfaces + cs_ac_interface.midi_streaming_interfaces interfaces_to_join = [] if 'CDC' in", "{out_report_length}, }}, \"\"\".format(name=name.lower(), report_id=report_ids[name], report_length=device_data.report_length, usage_page=device_data.usage_page, usage=device_data.usage, out_report_buffer=out_report_buffer, out_report_length=device_data.out_report_length)) c_file.write(\"\"\"\\", "const uint8_t usb_desc_cfg[{configuration_length}]; extern uint16_t usb_serial_number[{serial_number_length}]; extern uint16_t const *", "hid.HIDDescriptor( description=\"HID\", wDescriptorLength=len(bytes(combined_hid_report_descriptor))), hid_endpoint_in_descriptor, hid_endpoint_out_descriptor, ] ), ] # Audio!", "interfaces_to_join.append(cdc_interfaces) if 'MSC' in args.devices: interfaces_to_join.append(msc_interfaces) if 'HID' in args.devices:", "notes = descriptor.notes() i = 0 # This prints each", "= {num_devices}, .items = {{ \"\"\".format(num_devices=len(args.hid_devices))) for idx in range(len(args.hid_devices)):", "hid.ReportDescriptor( description=name, report_descriptor=bytes(hid_report_descriptors.REPORT_DESCRIPTOR_FUNCTIONS[name](0))) report_ids[name] = 0 else: report_id = 1", "util import hid_report_descriptors DEFAULT_INTERFACE_NAME = 'CircuitPython' ALL_DEVICES='CDC,MSC,AUDIO,HID' ALL_DEVICES_SET=frozenset(ALL_DEVICES.split(',')) DEFAULT_DEVICES='CDC,MSC,AUDIO,HID' ALL_HID_DEVICES='KEYBOARD,MOUSE,CONSUMER,SYS_CONTROL,GAMEPAD,DIGITIZER,XAC_COMPATIBLE_GAMEPAD,RAW'", "comm\", bMasterInterface=0x00, # Adjust this after interfaces are renumbered. bSlaveInterface_list=[0x01])", "bytes(hid_report_descriptors.REPORT_DESCRIPTOR_FUNCTIONS[name](report_id))) report_ids[name] = report_id report_id += 1 combined_hid_report_descriptor = hid.ReportDescriptor(", ".type = &mp_type_tuple, }}, .len = {num_devices}, .items = {{", "else 64), standard.EndpointDescriptor( description=\"CDC data in\", bEndpointAddress=args.cdc_ep_num_data_in | standard.EndpointDescriptor.DIRECTION_IN, bmAttributes=standard.EndpointDescriptor.TYPE_BULK,", "data out\", bEndpointAddress=args.cdc_ep_num_data_out | standard.EndpointDescriptor.DIRECTION_OUT, bmAttributes=standard.EndpointDescriptor.TYPE_BULK, bInterval=0, wMaxPacketSize=512 if args.highspeed", "+ notes[n]) n += 1 c_file.write(\"\\n\") i += length descriptor_length", "cdc_data_interface = standard.InterfaceDescriptor( description=\"CDC data\", bInterfaceClass=cdc.CDC_CLASS_DATA, iInterface=StringIndex.index(\"{} CDC data\".format(args.interface_name)), subdescriptors=[", "control\".format(args.interface_name)), subdescriptors=[ cdc.Header( description=\"CDC comm\", bcdCDC=0x0110), cdc_call_management, cdc.AbstractControlManagement( description=\"CDC comm\",", "bInterfaceCount=len(cdc_interfaces), bFunctionClass=cdc.CDC_CLASS_COMM, # Communications Device Class bFunctionSubClass=cdc.CDC_SUBCLASS_ACM, # Abstract control", "IN number must not be 0\") class StringIndex: \"\"\"Assign a", "if not cls.index_to_variable[idx]: cls.index_to_variable[idx] = variable_name return idx else: idx", "the control interface because other audio interfaces are managed by", ") ] # When there's only one hid_device, it shouldn't", "interfaces must occur before MIDI ones. audio_interfaces = [audio_control_interface] +", "= 0x0, wMaxPacketSize=512 if args.highspeed else 64), midi.DataEndpointDescriptor(baAssocJack=[midi_out_jack_emb]), ]) cs_ac_interface", "the audio\", bInterfaceClass=audio.AUDIO_CLASS_DEVICE, bInterfaceSubClass=audio.AUDIO_SUBCLASS_CONTROL, bInterfaceProtocol=audio.AUDIO_PROTOCOL_V1, iInterface=StringIndex.index(\"{} Audio\".format(args.interface_name)), subdescriptors=[ cs_ac_interface, ])", "#ifndef MICROPY_INCLUDED_AUTOGEN_USB_DESCRIPTOR_H #define MICROPY_INCLUDED_AUTOGEN_USB_DESCRIPTOR_H #include <stdint.h> extern const uint8_t usb_desc_dev[{device_length}];", "StringIndex.index(\"\\u0409\", variable_name=\"language_id\") assert LANGID_INDEX == 0 SERIAL_NUMBER_INDEX = StringIndex.index(\"S\" *", "composite USB driver that requests the # HID report descriptor", "across descriptors, # and renumber the interfaces in order. But", "it shouldn't have a report id. # Otherwise, report ids", "midi_streaming_interfaces = [ audio_midi_interface ] ) audio_control_interface = standard.InterfaceDescriptor( description=\"All", "CircuitPython midi_in_jack_emb = midi.InJackDescriptor( description=\"MIDI PC -> {}\".format(args.interface_name), bJackType=midi.JACK_TYPE_EMBEDDED, iJack=StringIndex.index(\"{}", "= audio10.AudioControlInterface( description=\"Empty audio control\", audio_streaming_interfaces = [], midi_streaming_interfaces =", "required=True) args = parser.parse_args() unknown_devices = list(frozenset(args.devices) - ALL_DEVICES_SET) if", "\"usb_serial_number\": const = \"\" c_file.write(\"\"\"\\ {const}uint16_t {NAME}[] = {{ \"\"\".format(const=const,", "for backwards compatibility. descriptor_list.append(cdc_iad) descriptor_list.extend(cdc_interfaces) if 'MSC' in args.devices: descriptor_list.extend(msc_interfaces)", "must not be 0\") if 'AUDIO' in args.devices: if args.args.midi_ep_num_out", "Write out all the regular descriptors as one long array", "be 0\") class StringIndex: \"\"\"Assign a monotonically increasing index to", "== 0: raise ValueError(\"MSC endpoint OUT number must not be", "in descriptor_list: c_file.write(\"\"\"\\ // {DESCRIPTION} : {CLASS} \"\"\".format(DESCRIPTION=descriptor.description, CLASS=descriptor.__class__)) b", "help='endpoint number of HID IN') parser.add_argument('--midi_ep_num_out', type=int, default=0, help='endpoint number", "\"{msc_vendor}\" // Product name included in Inquiry response, max 16", "help='endpoint number of MSC OUT') parser.add_argument('--msc_ep_num_in', type=int, default=0, help='endpoint number", "in Inquiry response, max 16 bytes #define CFG_TUD_MSC_PRODUCT \"{msc_product}\" \"\"\"", "string_desc_arr [] = { \"\"\") c_file.write(\"\"\",\\ \"\"\".join(pointers_to_strings)) c_file.write(\"\"\" }; \"\"\")", "it. DEFAULT_HID_DEVICES='KEYBOARD,MOUSE,CONSUMER,GAMEPAD' parser = argparse.ArgumentParser(description='Generate USB descriptors.') parser.add_argument('--highspeed', default=False, action='store_true',", ".type = &usb_hid_device_type }}, .report_buffer = {name}_report_buffer, .report_id = {report_id},", "standard.EndpointDescriptor.DIRECTION_OUT, bmAttributes=standard.EndpointDescriptor.TYPE_BULK, bInterval=0, wMaxPacketSize=512 if args.highspeed else 64), standard.EndpointDescriptor( description=\"CDC", "wMaxPacketSize=512 if args.highspeed else 64), midi.DataEndpointDescriptor(baAssocJack=[midi_out_jack_emb]), ]) cs_ac_interface = audio10.AudioControlInterface(", "encoding='UTF-8'), required=True) parser.add_argument('--output_h_file', type=argparse.FileType('w', encoding='UTF-8'), required=True) args = parser.parse_args() unknown_devices", "and info c_file.write(\"\"\"\\ const uint8_t hid_report_descriptor[{HID_DESCRIPTOR_LENGTH}] = {{ \"\"\".format(HID_DESCRIPTOR_LENGTH=hid_descriptor_length)) for", "usb_desc_cfg[] = { \"\"\") # Write out all the regular", "line. variable_name = StringIndex.index_to_variable[idx] if not variable_name: variable_name = \"string_descriptor{}\".format(idx)", "be 0\") elif args.hid_ep_num_in == 0: raise ValueError(\"HID endpoint IN", "= args.output_h_file c_file.write(\"\"\"\\ #include <stdint.h> #include \"py/objtuple.h\" #include \"shared-bindings/usb_hid/Device.h\" #include", "\"\"\") # Write out all the regular descriptors as one", "device = standard.DeviceDescriptor( description=\"top\", idVendor=args.vid, idProduct=args.pid, iManufacturer=StringIndex.index(args.manufacturer), iProduct=StringIndex.index(args.product), iSerialNumber=SERIAL_NUMBER_INDEX) #", "#define CFG_TUSB_RHPORT0_MODE ({rhport0_mode}) #define USB_HID_NUM_DEVICES {hid_num_devices} // Vendor name included", "\"\"\") # Write out USB HID report buffer definitions. for", "{DESCRIPTION} : {CLASS} \"\"\".format(DESCRIPTION=device.description, CLASS=device.__class__)) c_file.write(\"\"\"\\ const uint8_t usb_desc_dev[] =", "device') parser.add_argument('--manufacturer', type=str, help='manufacturer of the device') parser.add_argument('--product', type=str, help='product", "hid_report_descriptor_length=len(bytes(combined_hid_report_descriptor)), rhport0_mode='OPT_MODE_DEVICE | OPT_MODE_HIGH_SPEED' if args.highspeed else 'OPT_MODE_DEVICE', hid_num_devices=len(args.hid_devices), msc_vendor=args.manufacturer[:8],", ".usage_page = {usage_page:#04x}, .usage = {usage:#04x}, .out_report_buffer = {out_report_buffer}, .out_report_length", "bytes(device): c_file.write(\"0x{:02x}, \".format(b)) c_file.write(\"\"\"\\ }; \"\"\") c_file.write(\"\"\"\\ const uint8_t usb_desc_cfg[]", "{hid_num_devices} // Vendor name included in Inquiry response, max 8", "'HID' in args.devices: if args.args.hid_ep_num_out == 0: raise ValueError(\"HID endpoint", "= 'CircuitPython' ALL_DEVICES='CDC,MSC,AUDIO,HID' ALL_DEVICES_SET=frozenset(ALL_DEVICES.split(',')) DEFAULT_DEVICES='CDC,MSC,AUDIO,HID' ALL_HID_DEVICES='KEYBOARD,MOUSE,CONSUMER,SYS_CONTROL,GAMEPAD,DIGITIZER,XAC_COMPATIBLE_GAMEPAD,RAW' ALL_HID_DEVICES_SET=frozenset(ALL_HID_DEVICES.split(',')) # Digitizer works", "string_descriptors[SERIAL_NUMBER_INDEX] c_file = args.output_c_file h_file = args.output_h_file c_file.write(\"\"\"\\ #include <stdint.h>", "= parser.parse_args() unknown_devices = list(frozenset(args.devices) - ALL_DEVICES_SET) if unknown_devices: raise", "else 64), midi.DataEndpointDescriptor(baAssocJack=[midi_out_jack_emb]), ]) cs_ac_interface = audio10.AudioControlInterface( description=\"Empty audio control\",", "has report_id 2 # etc. report_ids = {} if len(args.hid_devices)", "are interface-set local and endpoints are interface local # until", "standard.InterfaceDescriptor( description=\"CDC data\", bInterfaceClass=cdc.CDC_CLASS_DATA, iInterface=StringIndex.index(\"{} CDC data\".format(args.interface_name)), subdescriptors=[ standard.EndpointDescriptor( description=\"CDC", "USB HID report buffer definitions. for name in args.hid_devices: c_file.write(\"\"\"\\", "help='product id') parser.add_argument('--serial_number_length', type=int, default=32, help='length needed for the serial", "hid_report_descriptors DEFAULT_INTERFACE_NAME = 'CircuitPython' ALL_DEVICES='CDC,MSC,AUDIO,HID' ALL_DEVICES_SET=frozenset(ALL_DEVICES.split(',')) DEFAULT_DEVICES='CDC,MSC,AUDIO,HID' ALL_HID_DEVICES='KEYBOARD,MOUSE,CONSUMER,SYS_CONTROL,GAMEPAD,DIGITIZER,XAC_COMPATIBLE_GAMEPAD,RAW' ALL_HID_DEVICES_SET=frozenset(ALL_HID_DEVICES.split(',')) #", "the 0th string descriptor LANGID_INDEX = StringIndex.index(\"\\u0409\", variable_name=\"language_id\") assert LANGID_INDEX", "else 'NULL' c_file.write(\"\"\"\\ {{ .base = {{ .type = &usb_hid_device_type", "in\", bEndpointAddress=args.hid_ep_num_in | standard.EndpointDescriptor.DIRECTION_IN, bmAttributes=standard.EndpointDescriptor.TYPE_INTERRUPT, bInterval=8) hid_endpoint_out_descriptor = standard.EndpointDescriptor( description=\"HID", "c_file.write(\"0x{:04x}, \".format(b[i + 2*j + 1] << 8 | b[i", "= 0 for descriptor in descriptor_list: c_file.write(\"\"\"\\ // {DESCRIPTION} :", "| standard.EndpointDescriptor.DIRECTION_IN, bmAttributes=standard.EndpointDescriptor.TYPE_BULK, bInterval=0, wMaxPacketSize=512 if args.highspeed else 64), standard.EndpointDescriptor(", "parser.add_argument('--cdc_ep_num_data_in', type=int, default=0, help='endpoint number of CDC DATA IN') parser.add_argument('--msc_ep_num_out',", "wMaxPacketSize=0x0040, bInterval=0x10) ]) cdc_data_interface = standard.InterfaceDescriptor( description=\"CDC data\", bInterfaceClass=cdc.CDC_CLASS_DATA, iInterface=StringIndex.index(\"{}", "not be 0\") if 'AUDIO' in args.devices: if args.args.midi_ep_num_out ==", "midi.DataEndpointDescriptor(baAssocJack=[midi_out_jack_emb]), ]) cs_ac_interface = audio10.AudioControlInterface( description=\"Empty audio control\", audio_streaming_interfaces =", "be 0\") if 'HID' in args.devices: if args.args.hid_ep_num_out == 0:", "+= length c_file.write(\"\"\"\\ }; \"\"\") c_file.write(\"\"\"\\ // array of pointer", "c_file.write(\"\"\" mp_obj_tuple_t common_hal_usb_hid_devices = {{ .base = {{ .type =", "and renumber the interfaces in order. But we still need", "MSC IN') parser.add_argument('--hid_ep_num_out', type=int, default=0, help='endpoint number of HID OUT')", "ValueError(\"MIDI endpoint OUT number must not be 0\") elif args.midi_ep_num_in", "Multiple Devices\", bInterfaceClass=hid.HID_CLASS, bInterfaceSubClass=hid.HID_SUBCLASS_NOBOOT, bInterfaceProtocol=hid.HID_PROTOCOL_NONE, iInterface=StringIndex.index(\"{} HID\".format(args.interface_name)), subdescriptors=[ hid.HIDDescriptor( description=\"HID\",", "0\") elif args.hid_ep_num_in == 0: raise ValueError(\"HID endpoint IN number", "generic devices to have both in and out endpoints, #", "code.\", bJackType=midi.JACK_TYPE_EXTERNAL, input_pins=[(midi_in_jack_emb, 1)], iJack=0) # USB IN <- midi_out_jack_emb", "number of MIDI IN') parser.add_argument('--output_c_file', type=argparse.FileType('w', encoding='UTF-8'), required=True) parser.add_argument('--output_h_file', type=argparse.FileType('w',", "c_file.write(\"\\n\") hid_descriptor_length = len(bytes(combined_hid_report_descriptor)) # Now we values we need", "= argparse.ArgumentParser(description='Generate USB descriptors.') parser.add_argument('--highspeed', default=False, action='store_true', help='descriptor for highspeed", "# Communications Device Class bFunctionSubClass=cdc.CDC_SUBCLASS_ACM, # Abstract control model bFunctionProtocol=cdc.CDC_PROTOCOL_NONE)", "renumber endpoint') parser.add_argument('--cdc_ep_num_notification', type=int, default=0, help='endpoint number of CDC NOTIFICATION')", "response, max 16 bytes #define CFG_TUD_MSC_PRODUCT \"{msc_product}\" \"\"\" .format(serial_number_length=len(bytes(serial_number_descriptor)) //", "standard.EndpointDescriptor.DIRECTION_OUT, bmAttributes=standard.EndpointDescriptor.TYPE_BULK, bInterval=0, wMaxPacketSize=512 if args.highspeed else 64), midi.DataEndpointDescriptor(baAssocJack=[midi_in_jack_emb]), standard.EndpointDescriptor(", "number of MSC OUT') parser.add_argument('--msc_ep_num_in', type=int, default=0, help='endpoint number of", "descriptor (AUDIO includes MIDI support)') parser.add_argument('--hid_devices', type=lambda l: tuple(l.split(',')), default=DEFAULT_HID_DEVICES,", "{}\".format(args.interface_name), bJackType=midi.JACK_TYPE_EMBEDDED, input_pins=[(midi_in_jack_ext, 1)], iJack=StringIndex.index(\"{} usb_midi.ports[1]\".format(args.interface_name))) audio_midi_interface = standard.InterfaceDescriptor( description=\"Midi", "to not renumber endpoint') parser.add_argument('--cdc_ep_num_notification', type=int, default=0, help='endpoint number of", "bug in the Windows composite USB driver that requests the", "length = b[i] for j in range(length // 2): c_file.write(\"0x{:04x},", "#include <stdint.h> extern const uint8_t usb_desc_dev[{device_length}]; extern const uint8_t usb_desc_cfg[{configuration_length}];", "<stdint.h> #include \"py/objtuple.h\" #include \"shared-bindings/usb_hid/Device.h\" #include \"{H_FILE_NAME}\" \"\"\".format(H_FILE_NAME=h_file.name)) c_file.write(\"\"\"\\ //", "report_descriptor=bytes(hid_report_descriptors.REPORT_DESCRIPTOR_FUNCTIONS[name](0))) report_ids[name] = 0 else: report_id = 1 concatenated_descriptors =", "to use in the interface descriptions', default=DEFAULT_INTERFACE_NAME) parser.add_argument('--no-renumber_endpoints', dest='renumber_endpoints', action='store_false',", "0\") elif args.msc_ep_num_in == 0: raise ValueError(\"MSC endpoint IN number", "pointer to string descriptors uint16_t const * const string_desc_arr []", "{} if len(args.hid_devices) == 1: name = args.hid_devices[0] combined_hid_report_descriptor =", "HID interface is not given # first. However, it still", "c_file.write(\"\"\" usb_hid_device_obj_t usb_hid_devices[] = { \"\"\") for name in args.hid_devices:", "of CDC DATA IN') parser.add_argument('--msc_ep_num_out', type=int, default=0, help='endpoint number of", "import argparse import os import sys sys.path.append(\"../../tools/usb_descriptor\") from adafruit_usb_descriptor import", "description=\"HID out\", bEndpointAddress=args.hid_ep_num_out | standard.EndpointDescriptor.DIRECTION_OUT, bmAttributes=standard.EndpointDescriptor.TYPE_INTERRUPT, bInterval=8) hid_interfaces = [", "only one hid_device, it shouldn't have a report id. #", "line. n = 0 while i < len(b): length =", "= midi.InJackDescriptor( description=\"MIDI PC -> {}\".format(args.interface_name), bJackType=midi.JACK_TYPE_EMBEDDED, iJack=StringIndex.index(\"{} usb_midi.ports[0]\".format(args.interface_name))) midi_out_jack_ext", "// array of pointer to string descriptors uint16_t const *", "> 0 else 'NULL' c_file.write(\"\"\"\\ {{ .base = {{ .type", "parser.add_argument('--product', type=str, help='product name of the device') parser.add_argument('--vid', type=lambda x:", "0\") if 'MSC' in args.devices: if args.msc_ep_num_out == 0: raise", "2, device_length=len(bytes(device)), configuration_length=descriptor_length, max_configuration_length=max(hid_descriptor_length, descriptor_length), string_descriptor_length=len(pointers_to_strings), hid_report_descriptor_length=len(bytes(combined_hid_report_descriptor)), rhport0_mode='OPT_MODE_DEVICE | OPT_MODE_HIGH_SPEED'", "variable_name=\"usb_serial_number\") device = standard.DeviceDescriptor( description=\"top\", idVendor=args.vid, idProduct=args.pid, iManufacturer=StringIndex.index(args.manufacturer), iProduct=StringIndex.index(args.product), iSerialNumber=SERIAL_NUMBER_INDEX)", "report_id report_id += 1 combined_hid_report_descriptor = hid.ReportDescriptor( description=\"MULTIDEVICE\", report_descriptor=bytes(concatenated_descriptors)) #", "parser.add_argument('--serial_number_length', type=int, default=32, help='length needed for the serial number in", "dest='renumber_endpoints', action='store_false', help='use to not renumber endpoint') parser.add_argument('--cdc_ep_num_notification', type=int, default=0,", "OUT number must not be 0\") elif args.msc_ep_num_in == 0:", "cdc_iad = standard.InterfaceAssociationDescriptor( description=\"CDC IAD\", bFirstInterface=cdc_comm_interface.bInterfaceNumber, bInterfaceCount=len(cdc_interfaces), bFunctionClass=cdc.CDC_CLASS_COMM, # Communications", "cdc_comm_interface = standard.InterfaceDescriptor( description=\"CDC comm\", bInterfaceClass=cdc.CDC_CLASS_COMM, # Communications Device Class", "name included in Inquiry response, max 16 bytes #define CFG_TUD_MSC_PRODUCT", "report_ids = {} if len(args.hid_devices) == 1: name = args.hid_devices[0]", "Now we values we need for the .h file. h_file.write(\"\"\"\\", "cdc.Header( description=\"CDC comm\", bcdCDC=0x0110), cdc_call_management, cdc.AbstractControlManagement( description=\"CDC comm\", bmCapabilities=0x02), cdc_union,", "number if the HID interface is not given # first.", "buffer definitions. for name in args.hid_devices: c_file.write(\"\"\"\\ static uint8_t {name}_report_buffer[{report_length}];", "cls.strings # langid must be the 0th string descriptor LANGID_INDEX", "= descriptor.notes() i = 0 # This prints each subdescriptor", "# # SPDX-License-Identifier: MIT import argparse import os import sys", "{{ \"\"\".format(const=const, NAME=variable_name)) pointers_to_strings.append(\"{name}\".format(name=variable_name)) n = 0 while i <", "(mp_obj_t) &usb_hid_devices[{idx}], \"\"\".format(idx=idx)) c_file.write(\"\"\"\\ }, }; \"\"\") h_file.write(\"\"\"\\ #endif //", "report_length=hid_report_descriptors.HID_DEVICE_DATA[name].out_report_length)) # Write out table of device objects. c_file.write(\"\"\" usb_hid_device_obj_t", "= standard.InterfaceDescriptor( description=\"CDC data\", bInterfaceClass=cdc.CDC_CLASS_DATA, iInterface=StringIndex.index(\"{} CDC data\".format(args.interface_name)), subdescriptors=[ standard.EndpointDescriptor(", "both in and out endpoints, # and will fail (possibly", "endpoint OUT number must not be 0\") elif args.msc_ep_num_in ==", "# Now adjust the CDC interface cross-references. cdc_union.bMasterInterface = cdc_comm_interface.bInterfaceNumber", "| standard.EndpointDescriptor.DIRECTION_OUT, bmAttributes=standard.EndpointDescriptor.TYPE_INTERRUPT, bInterval=8) hid_interfaces = [ standard.InterfaceDescriptor( description=\"HID Multiple", "interfaces_to_join.append(msc_interfaces) if 'HID' in args.devices: interfaces_to_join.append(hid_interfaces) if 'AUDIO' in args.devices:", "report_length=device_data.report_length, usage_page=device_data.usage_page, usage=device_data.usage, out_report_buffer=out_report_buffer, out_report_length=device_data.out_report_length)) c_file.write(\"\"\"\\ }; \"\"\") # Write", "import hid_report_descriptors DEFAULT_INTERFACE_NAME = 'CircuitPython' ALL_DEVICES='CDC,MSC,AUDIO,HID' ALL_DEVICES_SET=frozenset(ALL_DEVICES.split(',')) DEFAULT_DEVICES='CDC,MSC,AUDIO,HID' ALL_HID_DEVICES='KEYBOARD,MOUSE,CONSUMER,SYS_CONTROL,GAMEPAD,DIGITIZER,XAC_COMPATIBLE_GAMEPAD,RAW' ALL_HID_DEVICES_SET=frozenset(ALL_HID_DEVICES.split(','))", "x in descriptor_list])), bNumInterfaces=len(interfaces)) descriptor_list.insert(0, configuration) string_descriptors = [standard.StringDescriptor(string) for", "descriptors.') parser.add_argument('--highspeed', default=False, action='store_true', help='descriptor for highspeed device') parser.add_argument('--manufacturer', type=str,", "it still fetches the descriptor anyway. We could reorder the", "before the CDC interfaces. # There appears to be a", "= &usb_hid_device_type }}, .report_buffer = {name}_report_buffer, .report_id = {report_id}, .report_length", "the HID interface is not given # first. However, it", "IN number must not be 0\") if 'HID' in args.devices:", "comm\", bmCapabilities=0x02), cdc_union, standard.EndpointDescriptor( description=\"CDC comm in\", bEndpointAddress=args.cdc_ep_num_notification | standard.EndpointDescriptor.DIRECTION_IN,", "= standard.DeviceDescriptor( description=\"top\", idVendor=args.vid, idProduct=args.pid, iManufacturer=StringIndex.index(args.manufacturer), iProduct=StringIndex.index(args.product), iSerialNumber=SERIAL_NUMBER_INDEX) # Interface", "cdc_union.bSlaveInterface_list = [cdc_data_interface.bInterfaceNumber] cdc_call_management.bDataInterface = cdc_data_interface.bInterfaceNumber cdc_iad = standard.InterfaceAssociationDescriptor( description=\"CDC", "description=\"CDC comm\", bInterfaceClass=cdc.CDC_CLASS_COMM, # Communications Device Class bInterfaceSubClass=cdc.CDC_SUBCLASS_ACM, # Abstract", ".report_length = {report_length}, .usage_page = {usage_page:#04x}, .usage = {usage:#04x}, .out_report_buffer", "64), standard.EndpointDescriptor( description=\"CDC data in\", bEndpointAddress=args.cdc_ep_num_data_in | standard.EndpointDescriptor.DIRECTION_IN, bmAttributes=standard.EndpointDescriptor.TYPE_BULK, bInterval=0,", "cls.index_to_variable[idx] = variable_name return idx else: idx = len(cls.strings) cls.string_to_index[string]", "= {usage:#04x}, .out_report_buffer = {out_report_buffer}, .out_report_length = {out_report_length}, }}, \"\"\".format(name=name.lower(),", "iInterface=StringIndex.index(\"{} HID\".format(args.interface_name)), subdescriptors=[ hid.HIDDescriptor( description=\"HID\", wDescriptorLength=len(bytes(combined_hid_report_descriptor))), hid_endpoint_in_descriptor, hid_endpoint_out_descriptor, ] ),", "response, max 8 bytes #define CFG_TUD_MSC_VENDOR \"{msc_vendor}\" // Product name", "c_file.write(\"\"\"\\ }; \"\"\") pointers_to_strings = [] for idx, descriptor in", "[{string_descriptor_length}]; extern const uint8_t hid_report_descriptor[{hid_report_descriptor_length}]; #define CFG_TUSB_RHPORT0_MODE ({rhport0_mode}) #define USB_HID_NUM_DEVICES", "if unknown_devices: raise ValueError(\"Unknown device(s)\", unknown_devices) unknown_hid_devices = list(frozenset(args.hid_devices) -", "need to fix up certain # interface cross-references. interfaces =", "This prints each subdescriptor on a separate line. variable_name =", "sys sys.path.append(\"../../tools/usb_descriptor\") from adafruit_usb_descriptor import audio, audio10, cdc, hid, midi,", "in args.devices: interfaces_to_join.append(hid_interfaces) if 'AUDIO' in args.devices: interfaces_to_join.append(audio_interfaces) # util.join_interfaces()", "default=0, help='endpoint number of HID IN') parser.add_argument('--midi_ep_num_out', type=int, default=0, help='endpoint", "descriptors uint16_t const * const string_desc_arr [] = { \"\"\")", "description=\"MSC\", bInterfaceClass=msc.MSC_CLASS, bInterfaceSubClass=msc.MSC_SUBCLASS_TRANSPARENT, bInterfaceProtocol=msc.MSC_PROTOCOL_BULK, iInterface=StringIndex.index(\"{} Mass Storage\".format(args.interface_name)), subdescriptors=[ standard.EndpointDescriptor( description=\"MSC", "Windows composite USB driver that requests the # HID report", "cs_ac_interface.audio_streaming_interfaces + cs_ac_interface.midi_streaming_interfaces interfaces_to_join = [] if 'CDC' in args.devices:", "pointers_to_strings.append(\"{name}\".format(name=variable_name)) n = 0 while i < len(b): length =", "parser.add_argument('--devices', type=lambda l: tuple(l.split(',')), default=DEFAULT_DEVICES, help='devices to include in descriptor", "Adafruit_usbser.inf file thinks CDC is at Interface 0, so we'll", "requests the # HID report descriptor with the wrong interface", "to CircuitPython # USB OUT -> midi_in_jack_emb -> midi_out_jack_ext ->", "endpoint OUT number must not be 0\") elif args.hid_ep_num_in ==", "{}\".format(args.interface_name), bEndpointAddress=args.midi_ep_num_out | standard.EndpointDescriptor.DIRECTION_OUT, bmAttributes=standard.EndpointDescriptor.TYPE_BULK, bInterval=0, wMaxPacketSize=512 if args.highspeed else", "c_file.write(\"\"\"\\ }; \"\"\") # Write out USB HID report buffer", "device_data = hid_report_descriptors.HID_DEVICE_DATA[name] out_report_buffer = '{}_out_report_buffer'.format(name.lower()) if device_data.out_report_length > 0", "Adjust this after interfaces are renumbered. cdc_comm_interface = standard.InterfaceDescriptor( description=\"CDC", "standard.EndpointDescriptor( description=\"MSC in\", bEndpointAddress=args.msc_ep_num_in | standard.EndpointDescriptor.DIRECTION_IN, bmAttributes=standard.EndpointDescriptor.TYPE_BULK, bInterval=0, wMaxPacketSize=512 if", "[], midi_streaming_interfaces = [ audio_midi_interface ] ) audio_control_interface = standard.InterfaceDescriptor(", "MICROPY_INCLUDED_AUTOGEN_USB_DESCRIPTOR_H #define MICROPY_INCLUDED_AUTOGEN_USB_DESCRIPTOR_H #include <stdint.h> extern const uint8_t usb_desc_dev[{device_length}]; extern", "help='endpoint number of MSC IN') parser.add_argument('--hid_ep_num_out', type=int, default=0, help='endpoint number", "bInterval=0x10) ]) cdc_data_interface = standard.InterfaceDescriptor( description=\"CDC data\", bInterfaceClass=cdc.CDC_CLASS_DATA, iInterface=StringIndex.index(\"{} CDC", "&usb_hid_device_type }}, .report_buffer = {name}_report_buffer, .report_id = {report_id}, .report_length =", "bMasterInterface=0x00, # Adjust this after interfaces are renumbered. bSlaveInterface_list=[0x01]) #", "0: raise ValueError(\"CDC data IN endpoint number must not be", "array (that's how ASF4 does it). descriptor_length = 0 for", "type=int, default=0, help='endpoint number of MSC OUT') parser.add_argument('--msc_ep_num_in', type=int, default=0,", "midi_in_jack_ext = midi.InJackDescriptor( description=\"MIDI data in from user code.\", bJackType=midi.JACK_TYPE_EXTERNAL,", "c_file.write(\"\"\"\\ }; \"\"\") c_file.write(\"\"\"\\ const uint8_t usb_desc_cfg[] = { \"\"\")", "each subdescriptor on a separate line. variable_name = StringIndex.index_to_variable[idx] if", "idx @classmethod def strings_in_order(cls): return cls.strings # langid must be", "StringIndex.index(\"S\" * args.serial_number_length, variable_name=\"usb_serial_number\") device = standard.DeviceDescriptor( description=\"top\", idVendor=args.vid, idProduct=args.pid,", "== 0: raise ValueError(\"CDC notification endpoint number must not be", "]) # Audio streaming interfaces must occur before MIDI ones.", "= [ audio_midi_interface ] ) audio_control_interface = standard.InterfaceDescriptor( description=\"All the", "StringIndex: \"\"\"Assign a monotonically increasing index to each unique string.", "standard.ConfigurationDescriptor( description=\"Composite configuration\", wTotalLength=(standard.ConfigurationDescriptor.bLength + sum([len(bytes(x)) for x in descriptor_list])),", "id. # Otherwise, report ids are assigned sequentially: # args.hid_devices[0]", "endpoint number must not be 0\") elif args.cdc_ep_num_data_out == 0:", "be the 0th string descriptor LANGID_INDEX = StringIndex.index(\"\\u0409\", variable_name=\"language_id\") assert", "is not given # first. However, it still fetches the", "does it). descriptor_length = 0 for descriptor in descriptor_list: c_file.write(\"\"\"\\", "default=32, help='length needed for the serial number in digits') parser.add_argument('--devices',", "= hid.ReportDescriptor( description=name, report_descriptor=bytes(hid_report_descriptors.REPORT_DESCRIPTOR_FUNCTIONS[name](0))) report_ids[name] = 0 else: report_id =", "name of the device') parser.add_argument('--vid', type=lambda x: int(x, 16), help='vendor", "description=\"HID\", wDescriptorLength=len(bytes(combined_hid_report_descriptor))), hid_endpoint_in_descriptor, hid_endpoint_out_descriptor, ] ), ] # Audio! #", "DEFAULT_INTERFACE_NAME = 'CircuitPython' ALL_DEVICES='CDC,MSC,AUDIO,HID' ALL_DEVICES_SET=frozenset(ALL_DEVICES.split(',')) DEFAULT_DEVICES='CDC,MSC,AUDIO,HID' ALL_HID_DEVICES='KEYBOARD,MOUSE,CONSUMER,SYS_CONTROL,GAMEPAD,DIGITIZER,XAC_COMPATIBLE_GAMEPAD,RAW' ALL_HID_DEVICES_SET=frozenset(ALL_HID_DEVICES.split(',')) # Digitizer", "Inquiry response, max 8 bytes #define CFG_TUD_MSC_VENDOR \"{msc_vendor}\" // Product", "OUT') parser.add_argument('--msc_ep_num_in', type=int, default=0, help='endpoint number of MSC IN') parser.add_argument('--hid_ep_num_out',", "unknown_hid_devices: raise ValueError(\"Unknown HID devices(s)\", unknown_hid_devices) if not args.renumber_endpoints: if", "= {{ .base = {{ .type = &mp_type_tuple, }}, .len", "wMaxPacketSize=512 if args.highspeed else 64), ]) cdc_interfaces = [cdc_comm_interface, cdc_data_interface]", "| standard.EndpointDescriptor.DIRECTION_OUT, bmAttributes=standard.EndpointDescriptor.TYPE_BULK, bInterval=0, wMaxPacketSize=512 if args.highspeed else 64), standard.EndpointDescriptor(", "midi_out_jack_ext = midi.OutJackDescriptor( description=\"MIDI data out to user code.\", bJackType=midi.JACK_TYPE_EXTERNAL,", "include in HID report descriptor') parser.add_argument('--interface_name', type=str, help='The name/prefix to", "< len(b): length = b[i] for j in range(length //", "renumber_endpoints=args.renumber_endpoints) # Now adjust the CDC interface cross-references. cdc_union.bMasterInterface =", "in descriptor_list])), bNumInterfaces=len(interfaces)) descriptor_list.insert(0, configuration) string_descriptors = [standard.StringDescriptor(string) for string", ".out_report_buffer = {out_report_buffer}, .out_report_length = {out_report_length}, }}, \"\"\".format(name=name.lower(), report_id=report_ids[name], report_length=device_data.report_length,", "const string_desc_arr [{string_descriptor_length}]; extern const uint8_t hid_report_descriptor[{hid_report_descriptor_length}]; #define CFG_TUSB_RHPORT0_MODE ({rhport0_mode})", "Abstract control model bFunctionProtocol=cdc.CDC_PROTOCOL_NONE) descriptor_list = [] if 'CDC' in", "notification endpoint number must not be 0\") elif args.cdc_ep_num_data_out ==", "if string in cls.string_to_index: idx = cls.string_to_index[string] if not cls.index_to_variable[idx]:", "subdescriptors=[ midi.Header( jacks_and_elements=[ midi_in_jack_emb, midi_in_jack_ext, midi_out_jack_emb, midi_out_jack_ext ], ), standard.EndpointDescriptor(", "HID devices(s)\", unknown_hid_devices) if not args.renumber_endpoints: if 'CDC' in args.devices:", "args.devices: if args.args.hid_ep_num_out == 0: raise ValueError(\"HID endpoint OUT number", "we'll leave it # there for backwards compatibility. descriptor_list.append(cdc_iad) descriptor_list.extend(cdc_interfaces)", "b[i] for j in range(length // 2): c_file.write(\"0x{:04x}, \".format(b[i +", "correct ordering. descriptor_list.append(audio_control_interface) # Finally, build the composite descriptor. configuration", "else: idx = len(cls.strings) cls.string_to_index[string] = idx cls.strings.append(string) cls.index_to_variable[idx] =", "subdescriptor on a separate line. variable_name = StringIndex.index_to_variable[idx] if not", "else 64), ] ) ] # When there's only one", "interface-set local and endpoints are interface local # until util.join_interfaces", "msc_interfaces = [ standard.InterfaceDescriptor( description=\"MSC\", bInterfaceClass=msc.MSC_CLASS, bInterfaceSubClass=msc.MSC_SUBCLASS_TRANSPARENT, bInterfaceProtocol=msc.MSC_PROTOCOL_BULK, iInterface=StringIndex.index(\"{} Mass", "subdescriptors=[ standard.EndpointDescriptor( description=\"MSC in\", bEndpointAddress=args.msc_ep_num_in | standard.EndpointDescriptor.DIRECTION_IN, bmAttributes=standard.EndpointDescriptor.TYPE_BULK, bInterval=0, wMaxPacketSize=512", "== 0: raise ValueError(\"CDC data IN endpoint number must not", "0: raise ValueError(\"MIDI endpoint OUT number must not be 0\")", "configuration_length=descriptor_length, max_configuration_length=max(hid_descriptor_length, descriptor_length), string_descriptor_length=len(pointers_to_strings), hid_report_descriptor_length=len(bytes(combined_hid_report_descriptor)), rhport0_mode='OPT_MODE_DEVICE | OPT_MODE_HIGH_SPEED' if args.highspeed", "bInterfaceProtocol=msc.MSC_PROTOCOL_BULK, iInterface=StringIndex.index(\"{} Mass Storage\".format(args.interface_name)), subdescriptors=[ standard.EndpointDescriptor( description=\"MSC in\", bEndpointAddress=args.msc_ep_num_in |", "c_file.write(\"\\n\") i += length descriptor_length += len(b) c_file.write(\"\"\"\\ }; \"\"\")", "bEndpointAddress=(args.msc_ep_num_out | standard.EndpointDescriptor.DIRECTION_OUT), bmAttributes=standard.EndpointDescriptor.TYPE_BULK, bInterval=0, wMaxPacketSize=512 if args.highspeed else 64)," ]
[ "default=32, metavar='BS', help='batch size') parser.add_argument('--data_path', type=str, default='../TextCnn/dataset/MELD_features_raw.pkl') # model settings.", "default=600, help='embedding dims to use') parser.add_argument('--emotion_state_dim', type=int, default=100) parser.add_argument('--hidden_layer_dim', type=int,", "help='number of epochs') return parser.parse_args() if __name__ == '__main__': args", "1.0, 1.0, 1.0]) loss_function = MaskedNLLLoss(loss_weights.cuda() if args.cuda else loss_weights)", "test_loss: {}, acc: {}, fscore: {}, time: {} sec'.format(e+1, train_loss,", "if args.cuda else loss_weights) optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.l2) lf", "parser.add_argument('--utterance_dim', type=int, default=600, help='embedding dims to use') parser.add_argument('--emotion_state_dim', type=int, default=100)", "import MaskedNLLLoss, BC_LSTM from dataloader import MELDDataLoader from sklearn.metrics import", "args.cuda else data[:-1] log_prob, _ = model(textf, None, acouf, None,", "classification_report def setup_seed(seed): \"\"\" Manually Fix the random seed to", "\"\"\" Manually Fix the random seed to get deterministic results.", "class_report = classification_report(labels, preds, sample_weight=masks, target_names=['neutral', 'surprise', 'fear', 'sadness', 'joy',", "11, 111, 1111, 11111]: setup_seed(seed) args.seed = seed print(args) model", "dataloader['valid'], e, mode='valid') test_loss, test_acc, test_label, test_pred, test_mask, test_fscore =", "tqdm(dataloader) as td: for data in td: if mode ==", "parser.add_argument('--pre_fusion_dropout', type=float, default=0.4) parser.add_argument('--post_fusion_dropout', type=float, default=0.3) # train settings. parser.add_argument('--lr',", "[d.cuda() for d in data[:-1]] if args.cuda else data[:-1] log_prob,", "[], [], [], [], [] max_sequence_len = [] assert mode", "== 'test': class_report = classification_report(labels, preds, sample_weight=masks, target_names=['neutral', 'surprise', 'fear',", "print('Running on GPU') else: print('Running on CPU') for seed in", "time: {} sec'.format(e+1, train_loss, train_acc, train_fscore, valid_loss, valid_acc, valid_fscore, test_loss,", "os, datetime import torch import torch.optim as optim from model", "None if mode == 'train': model.train() else: model.eval() with tqdm(dataloader)", "parse_args(): parser = argparse.ArgumentParser() parser.add_argument('--num_workers', type=int, default=0, help='num workers of", "labels, masks, losses_sense = [], [], [], [], [] max_sequence_len", "optimizer.step() if preds!=[]: preds = np.concatenate(preds) labels = np.concatenate(labels) masks", "test_mask, test_fscore = train_or_eval_model(model, loss_function, dataloader['test'], e, mode='test') valid_losses.append(valid_loss) valid_fscores.append(valid_fscore)", "data') # dataloader settings parser.add_argument('--batch-size', type=int, default=32, metavar='BS', help='batch size')", "2) if mode == 'test': class_report = classification_report(labels, preds, sample_weight=masks,", "= test_accuracys[np.argmax(valid_fscores[0])] scores = [acc_score1, f1_score1, acc_score2, f1_score2] scores =", "acc: {}, fscore: {}, time: {} sec'.format(e+1, train_loss, train_acc, train_fscore,", "else data[:-1] log_prob, _ = model(textf, None, acouf, None, mask)", "import pandas as pd import numpy as np, argparse, time,", "in [1, 11, 111, 1111, 11111]: setup_seed(seed) args.seed = seed", "{}'.format(acc_score2, f1_score2)) rf = open('results/cnn_meld_results.txt', 'a') rf.write('\\t'.join(scores) + '\\t' +", "parser.add_argument('--lateFusionModule', type=str, default='concat') parser.add_argument('--input_features', type=tuple, default=(100, 300)) parser.add_argument('--pre_fusion_hidden_dims', type=tuple, default=(24,", "avg_sense_loss = round(np.sum(losses_sense)/np.sum(masks), 4) avg_accuracy = round(accuracy_score(labels,preds, sample_weight=masks)*100, 2) avg_fscore", "# model settings. parser.add_argument('--attention_type', type=str, default='general2') parser.add_argument('--utterance_dim', type=int, default=600, help='embedding", "valid_acc, _, _, _, valid_fscore = train_or_eval_model(model, loss_function, dataloader['valid'], e,", "= round(f1_score(labels,preds, sample_weight=masks, average='weighted')*100, 2) if mode == 'test': class_report", "max_sequence_len = [] assert mode != 'train' or optimizer !=", "dataloader import MELDDataLoader from sklearn.metrics import f1_score, confusion_matrix, accuracy_score, classification_report", "CPU') for seed in [1, 11, 111, 1111, 11111]: setup_seed(seed)", "= None, None, None, None for e in range(args.epochs): start_time", "np.concatenate(masks) else: return float('nan'), float('nan'), float('nan'), [], [], [], float('nan'),[]", "print('Running on CPU') for seed in [1, 11, 111, 1111,", "in data[:-1]] if args.cuda else data[:-1] log_prob, _ = model(textf,", "batch*seq_len, n_classes labels_ = label.view(-1) # batch*seq_len loss = loss_function(lp_,", "masks, losses_sense = [], [], [], [], [] max_sequence_len =", "mask) lp_ = log_prob.transpose(0,1).contiguous().view(-1, log_prob.size()[2]) # batch*seq_len, n_classes labels_ =", "args.cuda else loss_weights) optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.l2) lf =", "valid_fscores.append(valid_fscore) test_losses.append(test_loss) test_accuracys.append(test_acc) test_fscores.append(test_fscore) x = 'epoch: {}, train_loss: {},", "# dataloader settings parser.add_argument('--batch-size', type=int, default=32, metavar='BS', help='batch size') parser.add_argument('--data_path',", "Acc: {}, Test F1: {}'.format(acc_score1, f1_score1)) print('@Best Valid F1: Test", "parser.add_argument('--pre_fusion_hidden_dims', type=tuple, default=(24, 7)) parser.add_argument('--pre_fusion_dropout', type=float, default=0.4) parser.add_argument('--post_fusion_dropout', type=float, default=0.3)", "1.0]) loss_function = MaskedNLLLoss(loss_weights.cuda() if args.cuda else loss_weights) optimizer =", "test_fscores, test_accuracys, test_losses = [], [], [] best_loss, best_label, best_pred,", "preds, labels, masks, losses_sense = [], [], [], [], []", "on GPU') else: print('Running on CPU') for seed in [1,", "= 'epoch: {}, train_loss: {}, acc: {}, fscore: {}, valid_loss:", "round(accuracy_score(labels,preds, sample_weight=masks)*100, 2) avg_fscore = round(f1_score(labels,preds, sample_weight=masks, average='weighted')*100, 2) if", "type=int, default=7) # late fusion module. parser.add_argument('--lateFusionModule', type=str, default='concat') parser.add_argument('--input_features',", "= train_or_eval_model(model, loss_function, dataloader['train'], e, optimizer, mode='train') valid_loss, valid_acc, _,", "avg_loss = round(np.sum(losses)/np.sum(masks), 4) avg_sense_loss = round(np.sum(losses_sense)/np.sum(masks), 4) avg_accuracy =", "epochs') return parser.parse_args() if __name__ == '__main__': args = parse_args()", "e, mode='valid') test_loss, test_acc, test_label, test_pred, test_mask, test_fscore = train_or_eval_model(model,", "float('nan'), float('nan'), float('nan'), [], [], [], float('nan'),[] avg_loss = round(np.sum(losses)/np.sum(masks),", "parser.add_argument('--data_path', type=str, default='../TextCnn/dataset/MELD_features_raw.pkl') # model settings. parser.add_argument('--attention_type', type=str, default='general2') parser.add_argument('--utterance_dim',", "help='learning rate') parser.add_argument('--l2', type=float, default=1e-5, metavar='L2', help='L2 regularization weight') parser.add_argument('--epochs',", "test_losses.append(test_loss) test_accuracys.append(test_acc) test_fscores.append(test_fscore) x = 'epoch: {}, train_loss: {}, acc:", "train settings. parser.add_argument('--lr', type=float, default=1e-4, metavar='LR', help='learning rate') parser.add_argument('--l2', type=float,", "[] test_fscores, test_accuracys, test_losses = [], [], [] best_loss, best_label,", "[epoches] f1_score1 = test_fscores[0][np.argmin(valid_losses)] acc_score1 = test_accuracys[np.argmin(valid_losses)] f1_score2 = test_fscores[0][np.argmax(valid_fscores[0])]", "tqdm import tqdm import pandas as pd import numpy as", "[] max_sequence_len = [] assert mode != 'train' or optimizer", "the random seed to get deterministic results. \"\"\" torch.manual_seed(seed) torch.cuda.manual_seed(seed)", "default=0.4) parser.add_argument('--post_fusion_dropout', type=float, default=0.3) # train settings. parser.add_argument('--lr', type=float, default=1e-4,", "1.0, 1.0, 1.0, 1.0, 1.0]) loss_function = MaskedNLLLoss(loss_weights.cuda() if args.cuda", "batch*seq_len preds.append(pred_.data.cpu().numpy()) labels.append(labels_.data.cpu().numpy()) masks.append(mask.view(-1).cpu().numpy()) losses.append(loss.item()*masks[-1].sum()) if mode == 'train': total_loss", "torch.optim as optim from model import MaskedNLLLoss, BC_LSTM from dataloader", "4) avg_sense_loss = round(np.sum(losses_sense)/np.sum(masks), 4) avg_accuracy = round(accuracy_score(labels,preds, sample_weight=masks)*100, 2)", "1.0, 1.0, 1.0, 1.0]) loss_function = MaskedNLLLoss(loss_weights.cuda() if args.cuda else", "model = BC_LSTM(args) print('MELD BC_LSTM MODULE ...') if args.cuda: model.cuda()", "model(textf, None, acouf, None, mask) lp_ = log_prob.transpose(0,1).contiguous().view(-1, log_prob.size()[2]) #", "test_pred, test_mask, test_fscore = train_or_eval_model(model, loss_function, dataloader['test'], e, mode='test') valid_losses.append(valid_loss)", "[], [] best_loss, best_label, best_pred, best_mask = None, None, None,", "111, 1111, 11111]: setup_seed(seed) args.seed = seed print(args) model =", "type=int, default=600, help='embedding dims to use') parser.add_argument('--emotion_state_dim', type=int, default=100) parser.add_argument('--hidden_layer_dim',", "test_accuracys = np.array(test_accuracys).transpose() # [epoches] f1_score1 = test_fscores[0][np.argmin(valid_losses)] acc_score1 =", "target_names=['neutral', 'surprise', 'fear', 'sadness', 'joy', 'disgust', 'anger'], digits=6) print(class_report) return", "train_or_eval_model(model, loss_function, dataloader, epoch, optimizer=None, mode='train'): losses, preds, labels, masks,", "test_acc, test_fscore, round(time.time()-start_time, 2)) print (x) lf.write(x + '\\n') valid_fscores", "in range(args.epochs): start_time = time.time() train_loss, train_acc, _, _, _,", "fscore: {}, test_loss: {}, acc: {}, fscore: {}, time: {}", "[] best_loss, best_label, best_pred, best_mask = None, None, None, None", "if args.cuda else data[:-1] log_prob, _ = model(textf, None, acouf,", "parser.add_argument('--n_classes', type=int, default=7) # late fusion module. parser.add_argument('--lateFusionModule', type=str, default='concat')", "# [1, epoches] test_accuracys = np.array(test_accuracys).transpose() # [epoches] f1_score1 =", "mode == 'train': model.train() else: model.eval() with tqdm(dataloader) as td:", "size') parser.add_argument('--data_path', type=str, default='../TextCnn/dataset/MELD_features_raw.pkl') # model settings. parser.add_argument('--attention_type', type=str, default='general2')", "{}, fscore: {}, valid_loss: {}, acc: {}, fscore: {}, test_loss:", "import torch import torch.optim as optim from model import MaskedNLLLoss,", "lr=args.lr, weight_decay=args.l2) lf = open('logs/cnn_meld_logs.txt', 'a') dataloader = MELDDataLoader(args) valid_losses,", "pandas as pd import numpy as np, argparse, time, pickle,", "scores] print ('Test Scores: Weighted F1') print('@Best Valid Loss: Test", "_, _, _, train_fscore = train_or_eval_model(model, loss_function, dataloader['train'], e, optimizer,", "round(time.time()-start_time, 2)) print (x) lf.write(x + '\\n') valid_fscores = np.array(valid_fscores).transpose()", "= torch.argmax(lp_,1) # batch*seq_len preds.append(pred_.data.cpu().numpy()) labels.append(labels_.data.cpu().numpy()) masks.append(mask.view(-1).cpu().numpy()) losses.append(loss.item()*masks[-1].sum()) if mode", "textf, acouf, mask, label = [d.cuda() for d in data[:-1]]", "help='num workers of loading data') # dataloader settings parser.add_argument('--batch-size', type=int,", "test_label, test_pred, test_mask, test_fscore = train_or_eval_model(model, loss_function, dataloader['test'], e, mode='test')", "'__main__': args = parse_args() args.cuda = torch.cuda.is_available() if args.cuda: print('Running", "= optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.l2) lf = open('logs/cnn_meld_logs.txt', 'a') dataloader =", "default=100, metavar='E', help='number of epochs') return parser.parse_args() if __name__ ==", "torch.argmax(lp_,1) # batch*seq_len preds.append(pred_.data.cpu().numpy()) labels.append(labels_.data.cpu().numpy()) masks.append(mask.view(-1).cpu().numpy()) losses.append(loss.item()*masks[-1].sum()) if mode ==", "test_accuracys.append(test_acc) test_fscores.append(test_fscore) x = 'epoch: {}, train_loss: {}, acc: {},", "# batch*seq_len preds.append(pred_.data.cpu().numpy()) labels.append(labels_.data.cpu().numpy()) masks.append(mask.view(-1).cpu().numpy()) losses.append(loss.item()*masks[-1].sum()) if mode == 'train':", "np.concatenate(preds) labels = np.concatenate(labels) masks = np.concatenate(masks) else: return float('nan'),", "Test Acc: {}, Test F1: {}'.format(acc_score1, f1_score1)) print('@Best Valid F1:", "mask, label = [d.cuda() for d in data[:-1]] if args.cuda", "avg_accuracy = round(accuracy_score(labels,preds, sample_weight=masks)*100, 2) avg_fscore = round(f1_score(labels,preds, sample_weight=masks, average='weighted')*100,", "= np.concatenate(labels) masks = np.concatenate(masks) else: return float('nan'), float('nan'), float('nan'),", "'joy', 'disgust', 'anger'], digits=6) print(class_report) return avg_loss, avg_accuracy, labels, preds,", "1.0, 1.0, 1.0, 1.0, 1.0, 1.0]) loss_function = MaskedNLLLoss(loss_weights.cuda() if", "late fusion module. parser.add_argument('--lateFusionModule', type=str, default='concat') parser.add_argument('--input_features', type=tuple, default=(100, 300))", "test_fscores = np.array(test_fscores).transpose() # [1, epoches] test_accuracys = np.array(test_accuracys).transpose() #", "if mode == 'train': total_loss = loss total_loss.backward() optimizer.step() if", "train_acc, _, _, _, train_fscore = train_or_eval_model(model, loss_function, dataloader['train'], e,", "= True def train_or_eval_model(model, loss_function, dataloader, epoch, optimizer=None, mode='train'): losses,", "= [], [], [] best_loss, best_label, best_pred, best_mask = None,", "2)) print (x) lf.write(x + '\\n') valid_fscores = np.array(valid_fscores).transpose() test_fscores", "or optimizer != None if mode == 'train': model.train() else:", "torch.FloatTensor([1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]) loss_function = MaskedNLLLoss(loss_weights.cuda()", "test_fscore, round(time.time()-start_time, 2)) print (x) lf.write(x + '\\n') valid_fscores =", "train_loss: {}, acc: {}, fscore: {}, valid_loss: {}, acc: {},", "as np, argparse, time, pickle, random, os, datetime import torch", "of epochs') return parser.parse_args() if __name__ == '__main__': args =", "Manually Fix the random seed to get deterministic results. \"\"\"", "print ('Test Scores: Weighted F1') print('@Best Valid Loss: Test Acc:", "dataloader settings parser.add_argument('--batch-size', type=int, default=32, metavar='BS', help='batch size') parser.add_argument('--data_path', type=str,", "loss = loss_function(lp_, labels_, mask) pred_ = torch.argmax(lp_,1) # batch*seq_len", "{}, time: {} sec'.format(e+1, train_loss, train_acc, train_fscore, valid_loss, valid_acc, valid_fscore,", "{}, Test F1: {}'.format(acc_score2, f1_score2)) rf = open('results/cnn_meld_results.txt', 'a') rf.write('\\t'.join(scores)", "preds, masks, [avg_fscore] def parse_args(): parser = argparse.ArgumentParser() parser.add_argument('--num_workers', type=int,", "in scores] print ('Test Scores: Weighted F1') print('@Best Valid Loss:", "parser.add_argument('--batch-size', type=int, default=32, metavar='BS', help='batch size') parser.add_argument('--data_path', type=str, default='../TextCnn/dataset/MELD_features_raw.pkl') #", "start_time = time.time() train_loss, train_acc, _, _, _, train_fscore =", "masks, [avg_fscore] def parse_args(): parser = argparse.ArgumentParser() parser.add_argument('--num_workers', type=int, default=0,", "{} sec'.format(e+1, train_loss, train_acc, train_fscore, valid_loss, valid_acc, valid_fscore, test_loss, test_acc,", "valid_fscores = np.array(valid_fscores).transpose() test_fscores = np.array(test_fscores).transpose() # [1, epoches] test_accuracys", "acouf, mask, label = [d.cuda() for d in data[:-1]] if", "mode='train'): losses, preds, labels, masks, losses_sense = [], [], [],", "F1') print('@Best Valid Loss: Test Acc: {}, Test F1: {}'.format(acc_score1,", "= seed print(args) model = BC_LSTM(args) print('MELD BC_LSTM MODULE ...')", "args.cuda = torch.cuda.is_available() if args.cuda: print('Running on GPU') else: print('Running", "rate') parser.add_argument('--l2', type=float, default=1e-5, metavar='L2', help='L2 regularization weight') parser.add_argument('--epochs', type=int,", "MODULE ...') if args.cuda: model.cuda() loss_weights = torch.FloatTensor([1.0, 1.0, 1.0,", "for item in scores] print ('Test Scores: Weighted F1') print('@Best", "return parser.parse_args() if __name__ == '__main__': args = parse_args() args.cuda", "model.eval() with tqdm(dataloader) as td: for data in td: if", "'\\n') valid_fscores = np.array(valid_fscores).transpose() test_fscores = np.array(test_fscores).transpose() # [1, epoches]", "test_fscores.append(test_fscore) x = 'epoch: {}, train_loss: {}, acc: {}, fscore:", "tqdm import pandas as pd import numpy as np, argparse,", "acouf, None, mask) lp_ = log_prob.transpose(0,1).contiguous().view(-1, log_prob.size()[2]) # batch*seq_len, n_classes", "masks.append(mask.view(-1).cpu().numpy()) losses.append(loss.item()*masks[-1].sum()) if mode == 'train': total_loss = loss total_loss.backward()", "test_acc, test_label, test_pred, test_mask, test_fscore = train_or_eval_model(model, loss_function, dataloader['test'], e,", "model settings. parser.add_argument('--attention_type', type=str, default='general2') parser.add_argument('--utterance_dim', type=int, default=600, help='embedding dims", "if args.cuda: print('Running on GPU') else: print('Running on CPU') for", "round(np.sum(losses)/np.sum(masks), 4) avg_sense_loss = round(np.sum(losses_sense)/np.sum(masks), 4) avg_accuracy = round(accuracy_score(labels,preds, sample_weight=masks)*100,", "F1: {}'.format(acc_score2, f1_score2)) rf = open('results/cnn_meld_results.txt', 'a') rf.write('\\t'.join(scores) + '\\t'", "= loss total_loss.backward() optimizer.step() if preds!=[]: preds = np.concatenate(preds) labels", "= open('results/cnn_meld_results.txt', 'a') rf.write('\\t'.join(scores) + '\\t' + str(args) + '\\n')", "{}, Test F1: {}'.format(acc_score1, f1_score1)) print('@Best Valid F1: Test Acc:", "optim from model import MaskedNLLLoss, BC_LSTM from dataloader import MELDDataLoader", "settings. parser.add_argument('--lr', type=float, default=1e-4, metavar='LR', help='learning rate') parser.add_argument('--l2', type=float, default=1e-5,", "deterministic results. \"\"\" torch.manual_seed(seed) torch.cuda.manual_seed(seed) torch.cuda.manual_seed_all(seed) np.random.seed(seed) random.seed(seed) torch.benchmark =", "if preds!=[]: preds = np.concatenate(preds) labels = np.concatenate(labels) masks =", "scores = [acc_score1, f1_score1, acc_score2, f1_score2] scores = [str(item) for", "!= 'train' or optimizer != None if mode == 'train':", "td: for data in td: if mode == 'train': optimizer.zero_grad()", "loss_weights = torch.FloatTensor([1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]) loss_function", "'test': class_report = classification_report(labels, preds, sample_weight=masks, target_names=['neutral', 'surprise', 'fear', 'sadness',", "parser.add_argument('--input_features', type=tuple, default=(100, 300)) parser.add_argument('--pre_fusion_hidden_dims', type=tuple, default=(24, 7)) parser.add_argument('--pre_fusion_dropout', type=float,", "[1, 11, 111, 1111, 11111]: setup_seed(seed) args.seed = seed print(args)", "parser.add_argument('--epochs', type=int, default=100, metavar='E', help='number of epochs') return parser.parse_args() if", "== '__main__': args = parse_args() args.cuda = torch.cuda.is_available() if args.cuda:", "import f1_score, confusion_matrix, accuracy_score, classification_report def setup_seed(seed): \"\"\" Manually Fix", "'train': model.train() else: model.eval() with tqdm(dataloader) as td: for data", "mode='train') valid_loss, valid_acc, _, _, _, valid_fscore = train_or_eval_model(model, loss_function,", "_, train_fscore = train_or_eval_model(model, loss_function, dataloader['train'], e, optimizer, mode='train') valid_loss,", "open('results/cnn_meld_results.txt', 'a') rf.write('\\t'.join(scores) + '\\t' + str(args) + '\\n') rf.close()", "[], [] max_sequence_len = [] assert mode != 'train' or", "data in td: if mode == 'train': optimizer.zero_grad() textf, acouf,", "_, valid_fscore = train_or_eval_model(model, loss_function, dataloader['valid'], e, mode='valid') test_loss, test_acc,", "= [], [] test_fscores, test_accuracys, test_losses = [], [], []", "_ = model(textf, None, acouf, None, mask) lp_ = log_prob.transpose(0,1).contiguous().view(-1,", "to get deterministic results. \"\"\" torch.manual_seed(seed) torch.cuda.manual_seed(seed) torch.cuda.manual_seed_all(seed) np.random.seed(seed) random.seed(seed)", "model.cuda() loss_weights = torch.FloatTensor([1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0])", "as pd import numpy as np, argparse, time, pickle, random,", "help='batch size') parser.add_argument('--data_path', type=str, default='../TextCnn/dataset/MELD_features_raw.pkl') # model settings. parser.add_argument('--attention_type', type=str,", "'a') dataloader = MELDDataLoader(args) valid_losses, valid_fscores = [], [] test_fscores,", "labels = np.concatenate(labels) masks = np.concatenate(masks) else: return float('nan'), float('nan'),", "loss_function, dataloader['test'], e, mode='test') valid_losses.append(valid_loss) valid_fscores.append(valid_fscore) test_losses.append(test_loss) test_accuracys.append(test_acc) test_fscores.append(test_fscore) x", "torch.cuda.manual_seed_all(seed) np.random.seed(seed) random.seed(seed) torch.benchmark = False torch.backends.cudnn.deterministic = True def", "labels_ = label.view(-1) # batch*seq_len loss = loss_function(lp_, labels_, mask)", "preds, sample_weight=masks, target_names=['neutral', 'surprise', 'fear', 'sadness', 'joy', 'disgust', 'anger'], digits=6)", "with tqdm(dataloader) as td: for data in td: if mode", "masks = np.concatenate(masks) else: return float('nan'), float('nan'), float('nan'), [], [],", "preds!=[]: preds = np.concatenate(preds) labels = np.concatenate(labels) masks = np.concatenate(masks)", "1.0, 1.0]) loss_function = MaskedNLLLoss(loss_weights.cuda() if args.cuda else loss_weights) optimizer", "BC_LSTM MODULE ...') if args.cuda: model.cuda() loss_weights = torch.FloatTensor([1.0, 1.0,", "optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.l2) lf = open('logs/cnn_meld_logs.txt', 'a') dataloader = MELDDataLoader(args)", "train_or_eval_model(model, loss_function, dataloader['test'], e, mode='test') valid_losses.append(valid_loss) valid_fscores.append(valid_fscore) test_losses.append(test_loss) test_accuracys.append(test_acc) test_fscores.append(test_fscore)", "None, None, None, None for e in range(args.epochs): start_time =", "default=0, help='num workers of loading data') # dataloader settings parser.add_argument('--batch-size',", "fusion module. parser.add_argument('--lateFusionModule', type=str, default='concat') parser.add_argument('--input_features', type=tuple, default=(100, 300)) parser.add_argument('--pre_fusion_hidden_dims',", "td: if mode == 'train': optimizer.zero_grad() textf, acouf, mask, label", "f1_score2)) rf = open('results/cnn_meld_results.txt', 'a') rf.write('\\t'.join(scores) + '\\t' + str(args)", "valid_loss, valid_acc, valid_fscore, test_loss, test_acc, test_fscore, round(time.time()-start_time, 2)) print (x)", "True def train_or_eval_model(model, loss_function, dataloader, epoch, optimizer=None, mode='train'): losses, preds,", "= False torch.backends.cudnn.deterministic = True def train_or_eval_model(model, loss_function, dataloader, epoch,", "print('MELD BC_LSTM MODULE ...') if args.cuda: model.cuda() loss_weights = torch.FloatTensor([1.0,", "valid_loss: {}, acc: {}, fscore: {}, test_loss: {}, acc: {},", "type=int, default=0, help='num workers of loading data') # dataloader settings", "MELDDataLoader(args) valid_losses, valid_fscores = [], [] test_fscores, test_accuracys, test_losses =", "labels, preds, masks, [avg_fscore] def parse_args(): parser = argparse.ArgumentParser() parser.add_argument('--num_workers',", "\"\"\" torch.manual_seed(seed) torch.cuda.manual_seed(seed) torch.cuda.manual_seed_all(seed) np.random.seed(seed) random.seed(seed) torch.benchmark = False torch.backends.cudnn.deterministic", "torch.cuda.manual_seed(seed) torch.cuda.manual_seed_all(seed) np.random.seed(seed) random.seed(seed) torch.benchmark = False torch.backends.cudnn.deterministic = True", "None, None for e in range(args.epochs): start_time = time.time() train_loss,", "default=1e-4, metavar='LR', help='learning rate') parser.add_argument('--l2', type=float, default=1e-5, metavar='L2', help='L2 regularization", "None, None, None for e in range(args.epochs): start_time = time.time()", "300)) parser.add_argument('--pre_fusion_hidden_dims', type=tuple, default=(24, 7)) parser.add_argument('--pre_fusion_dropout', type=float, default=0.4) parser.add_argument('--post_fusion_dropout', type=float,", "type=str, default='../TextCnn/dataset/MELD_features_raw.pkl') # model settings. parser.add_argument('--attention_type', type=str, default='general2') parser.add_argument('--utterance_dim', type=int,", "[acc_score1, f1_score1, acc_score2, f1_score2] scores = [str(item) for item in", "default=100) parser.add_argument('--dropout', type=float, default=0.25) parser.add_argument('--n_classes', type=int, default=7) # late fusion", "assert mode != 'train' or optimizer != None if mode", "loss_weights) optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.l2) lf = open('logs/cnn_meld_logs.txt', 'a')", "loss_function, dataloader, epoch, optimizer=None, mode='train'): losses, preds, labels, masks, losses_sense", "default=100) parser.add_argument('--hidden_layer_dim', type=int, default=100) parser.add_argument('--dropout', type=float, default=0.25) parser.add_argument('--n_classes', type=int, default=7)", "for d in data[:-1]] if args.cuda else data[:-1] log_prob, _", "loss_function(lp_, labels_, mask) pred_ = torch.argmax(lp_,1) # batch*seq_len preds.append(pred_.data.cpu().numpy()) labels.append(labels_.data.cpu().numpy())", "= np.concatenate(preds) labels = np.concatenate(labels) masks = np.concatenate(masks) else: return", "'disgust', 'anger'], digits=6) print(class_report) return avg_loss, avg_accuracy, labels, preds, masks,", "[], [], [] best_loss, best_label, best_pred, best_mask = None, None,", "'surprise', 'fear', 'sadness', 'joy', 'disgust', 'anger'], digits=6) print(class_report) return avg_loss,", "epoch, optimizer=None, mode='train'): losses, preds, labels, masks, losses_sense = [],", "{}, acc: {}, fscore: {}, valid_loss: {}, acc: {}, fscore:", "test_loss, test_acc, test_label, test_pred, test_mask, test_fscore = train_or_eval_model(model, loss_function, dataloader['test'],", "average='weighted')*100, 2) if mode == 'test': class_report = classification_report(labels, preds,", "parser.add_argument('--num_workers', type=int, default=0, help='num workers of loading data') # dataloader", "dataloader['train'], e, optimizer, mode='train') valid_loss, valid_acc, _, _, _, valid_fscore", "acc_score1 = test_accuracys[np.argmin(valid_losses)] f1_score2 = test_fscores[0][np.argmax(valid_fscores[0])] acc_score2 = test_accuracys[np.argmax(valid_fscores[0])] scores", "mask) pred_ = torch.argmax(lp_,1) # batch*seq_len preds.append(pred_.data.cpu().numpy()) labels.append(labels_.data.cpu().numpy()) masks.append(mask.view(-1).cpu().numpy()) losses.append(loss.item()*masks[-1].sum())", "torch.cuda.is_available() if args.cuda: print('Running on GPU') else: print('Running on CPU')", "open('logs/cnn_meld_logs.txt', 'a') dataloader = MELDDataLoader(args) valid_losses, valid_fscores = [], []", "regularization weight') parser.add_argument('--epochs', type=int, default=100, metavar='E', help='number of epochs') return", "== 'train': optimizer.zero_grad() textf, acouf, mask, label = [d.cuda() for", "np.random.seed(seed) random.seed(seed) torch.benchmark = False torch.backends.cudnn.deterministic = True def train_or_eval_model(model,", "of loading data') # dataloader settings parser.add_argument('--batch-size', type=int, default=32, metavar='BS',", "train_acc, train_fscore, valid_loss, valid_acc, valid_fscore, test_loss, test_acc, test_fscore, round(time.time()-start_time, 2))", "parser = argparse.ArgumentParser() parser.add_argument('--num_workers', type=int, default=0, help='num workers of loading", "setup_seed(seed): \"\"\" Manually Fix the random seed to get deterministic", "get deterministic results. \"\"\" torch.manual_seed(seed) torch.cuda.manual_seed(seed) torch.cuda.manual_seed_all(seed) np.random.seed(seed) random.seed(seed) torch.benchmark", "float('nan'), [], [], [], float('nan'),[] avg_loss = round(np.sum(losses)/np.sum(masks), 4) avg_sense_loss", "'fear', 'sadness', 'joy', 'disgust', 'anger'], digits=6) print(class_report) return avg_loss, avg_accuracy,", "2) avg_fscore = round(f1_score(labels,preds, sample_weight=masks, average='weighted')*100, 2) if mode ==", "7)) parser.add_argument('--pre_fusion_dropout', type=float, default=0.4) parser.add_argument('--post_fusion_dropout', type=float, default=0.3) # train settings.", "argparse.ArgumentParser() parser.add_argument('--num_workers', type=int, default=0, help='num workers of loading data') #", "train_or_eval_model(model, loss_function, dataloader['train'], e, optimizer, mode='train') valid_loss, valid_acc, _, _,", "data[:-1]] if args.cuda else data[:-1] log_prob, _ = model(textf, None,", "[], [] test_fscores, test_accuracys, test_losses = [], [], [] best_loss,", "Acc: {}, Test F1: {}'.format(acc_score2, f1_score2)) rf = open('results/cnn_meld_results.txt', 'a')", "f1_score1, acc_score2, f1_score2] scores = [str(item) for item in scores]", "e, optimizer, mode='train') valid_loss, valid_acc, _, _, _, valid_fscore =", "losses_sense = [], [], [], [], [] max_sequence_len = []", "range(args.epochs): start_time = time.time() train_loss, train_acc, _, _, _, train_fscore", "type=int, default=32, metavar='BS', help='batch size') parser.add_argument('--data_path', type=str, default='../TextCnn/dataset/MELD_features_raw.pkl') # model", "parser.parse_args() if __name__ == '__main__': args = parse_args() args.cuda =", "MaskedNLLLoss(loss_weights.cuda() if args.cuda else loss_weights) optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.l2)", "Test F1: {}'.format(acc_score1, f1_score1)) print('@Best Valid F1: Test Acc: {},", "mode='valid') test_loss, test_acc, test_label, test_pred, test_mask, test_fscore = train_or_eval_model(model, loss_function,", "dataloader = MELDDataLoader(args) valid_losses, valid_fscores = [], [] test_fscores, test_accuracys,", "type=str, default='concat') parser.add_argument('--input_features', type=tuple, default=(100, 300)) parser.add_argument('--pre_fusion_hidden_dims', type=tuple, default=(24, 7))", "Test F1: {}'.format(acc_score2, f1_score2)) rf = open('results/cnn_meld_results.txt', 'a') rf.write('\\t'.join(scores) +", "{}, valid_loss: {}, acc: {}, fscore: {}, test_loss: {}, acc:", "None, mask) lp_ = log_prob.transpose(0,1).contiguous().view(-1, log_prob.size()[2]) # batch*seq_len, n_classes labels_", "print('@Best Valid Loss: Test Acc: {}, Test F1: {}'.format(acc_score1, f1_score1))", "Valid F1: Test Acc: {}, Test F1: {}'.format(acc_score2, f1_score2)) rf", "== 'train': total_loss = loss total_loss.backward() optimizer.step() if preds!=[]: preds", "= test_fscores[0][np.argmax(valid_fscores[0])] acc_score2 = test_accuracys[np.argmax(valid_fscores[0])] scores = [acc_score1, f1_score1, acc_score2,", "= train_or_eval_model(model, loss_function, dataloader['test'], e, mode='test') valid_losses.append(valid_loss) valid_fscores.append(valid_fscore) test_losses.append(test_loss) test_accuracys.append(test_acc)", "f1_score, confusion_matrix, accuracy_score, classification_report def setup_seed(seed): \"\"\" Manually Fix the", "metavar='BS', help='batch size') parser.add_argument('--data_path', type=str, default='../TextCnn/dataset/MELD_features_raw.pkl') # model settings. parser.add_argument('--attention_type',", "if mode == 'test': class_report = classification_report(labels, preds, sample_weight=masks, target_names=['neutral',", "settings. parser.add_argument('--attention_type', type=str, default='general2') parser.add_argument('--utterance_dim', type=int, default=600, help='embedding dims to", "print (x) lf.write(x + '\\n') valid_fscores = np.array(valid_fscores).transpose() test_fscores =", "F1: {}'.format(acc_score1, f1_score1)) print('@Best Valid F1: Test Acc: {}, Test", "seed in [1, 11, 111, 1111, 11111]: setup_seed(seed) args.seed =", "metavar='E', help='number of epochs') return parser.parse_args() if __name__ == '__main__':", "f1_score2 = test_fscores[0][np.argmax(valid_fscores[0])] acc_score2 = test_accuracys[np.argmax(valid_fscores[0])] scores = [acc_score1, f1_score1,", "np.array(test_fscores).transpose() # [1, epoches] test_accuracys = np.array(test_accuracys).transpose() # [epoches] f1_score1", "'train' or optimizer != None if mode == 'train': model.train()", "random.seed(seed) torch.benchmark = False torch.backends.cudnn.deterministic = True def train_or_eval_model(model, loss_function,", "parse_args() args.cuda = torch.cuda.is_available() if args.cuda: print('Running on GPU') else:", "lp_ = log_prob.transpose(0,1).contiguous().view(-1, log_prob.size()[2]) # batch*seq_len, n_classes labels_ = label.view(-1)", "np, argparse, time, pickle, random, os, datetime import torch import", "import MELDDataLoader from sklearn.metrics import f1_score, confusion_matrix, accuracy_score, classification_report def", "e in range(args.epochs): start_time = time.time() train_loss, train_acc, _, _,", "= argparse.ArgumentParser() parser.add_argument('--num_workers', type=int, default=0, help='num workers of loading data')", "train_or_eval_model(model, loss_function, dataloader['valid'], e, mode='valid') test_loss, test_acc, test_label, test_pred, test_mask,", "mode == 'train': optimizer.zero_grad() textf, acouf, mask, label = [d.cuda()", "valid_losses.append(valid_loss) valid_fscores.append(valid_fscore) test_losses.append(test_loss) test_accuracys.append(test_acc) test_fscores.append(test_fscore) x = 'epoch: {}, train_loss:", "loss_function, dataloader['train'], e, optimizer, mode='train') valid_loss, valid_acc, _, _, _,", "test_accuracys[np.argmax(valid_fscores[0])] scores = [acc_score1, f1_score1, acc_score2, f1_score2] scores = [str(item)", "args = parse_args() args.cuda = torch.cuda.is_available() if args.cuda: print('Running on", "import tqdm import pandas as pd import numpy as np,", "parser.add_argument('--l2', type=float, default=1e-5, metavar='L2', help='L2 regularization weight') parser.add_argument('--epochs', type=int, default=100,", "[], [], [] max_sequence_len = [] assert mode != 'train'", "parser.add_argument('--lr', type=float, default=1e-4, metavar='LR', help='learning rate') parser.add_argument('--l2', type=float, default=1e-5, metavar='L2',", "= torch.FloatTensor([1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]) loss_function =", "train_fscore = train_or_eval_model(model, loss_function, dataloader['train'], e, optimizer, mode='train') valid_loss, valid_acc,", "4) avg_accuracy = round(accuracy_score(labels,preds, sample_weight=masks)*100, 2) avg_fscore = round(f1_score(labels,preds, sample_weight=masks,", "losses.append(loss.item()*masks[-1].sum()) if mode == 'train': total_loss = loss total_loss.backward() optimizer.step()", "np.concatenate(labels) masks = np.concatenate(masks) else: return float('nan'), float('nan'), float('nan'), [],", "{}, test_loss: {}, acc: {}, fscore: {}, time: {} sec'.format(e+1,", "valid_fscore, test_loss, test_acc, test_fscore, round(time.time()-start_time, 2)) print (x) lf.write(x +", "random seed to get deterministic results. \"\"\" torch.manual_seed(seed) torch.cuda.manual_seed(seed) torch.cuda.manual_seed_all(seed)", "[avg_fscore] def parse_args(): parser = argparse.ArgumentParser() parser.add_argument('--num_workers', type=int, default=0, help='num", "preds = np.concatenate(preds) labels = np.concatenate(labels) masks = np.concatenate(masks) else:", "torch.manual_seed(seed) torch.cuda.manual_seed(seed) torch.cuda.manual_seed_all(seed) np.random.seed(seed) random.seed(seed) torch.benchmark = False torch.backends.cudnn.deterministic =", "pickle, random, os, datetime import torch import torch.optim as optim", "losses, preds, labels, masks, losses_sense = [], [], [], [],", "{}, acc: {}, fscore: {}, time: {} sec'.format(e+1, train_loss, train_acc,", "('Test Scores: Weighted F1') print('@Best Valid Loss: Test Acc: {},", "args.cuda: print('Running on GPU') else: print('Running on CPU') for seed", "[] assert mode != 'train' or optimizer != None if", "return float('nan'), float('nan'), float('nan'), [], [], [], float('nan'),[] avg_loss =", "MELDDataLoader from sklearn.metrics import f1_score, confusion_matrix, accuracy_score, classification_report def setup_seed(seed):", "# batch*seq_len loss = loss_function(lp_, labels_, mask) pred_ = torch.argmax(lp_,1)", "= round(accuracy_score(labels,preds, sample_weight=masks)*100, 2) avg_fscore = round(f1_score(labels,preds, sample_weight=masks, average='weighted')*100, 2)", "time, pickle, random, os, datetime import torch import torch.optim as", "optimizer.zero_grad() textf, acouf, mask, label = [d.cuda() for d in", "module. parser.add_argument('--lateFusionModule', type=str, default='concat') parser.add_argument('--input_features', type=tuple, default=(100, 300)) parser.add_argument('--pre_fusion_hidden_dims', type=tuple,", "lf.write(x + '\\n') valid_fscores = np.array(valid_fscores).transpose() test_fscores = np.array(test_fscores).transpose() #", "workers of loading data') # dataloader settings parser.add_argument('--batch-size', type=int, default=32,", "default=0.3) # train settings. parser.add_argument('--lr', type=float, default=1e-4, metavar='LR', help='learning rate')", "type=str, default='general2') parser.add_argument('--utterance_dim', type=int, default=600, help='embedding dims to use') parser.add_argument('--emotion_state_dim',", "Test Acc: {}, Test F1: {}'.format(acc_score2, f1_score2)) rf = open('results/cnn_meld_results.txt',", "= parse_args() args.cuda = torch.cuda.is_available() if args.cuda: print('Running on GPU')", "lf = open('logs/cnn_meld_logs.txt', 'a') dataloader = MELDDataLoader(args) valid_losses, valid_fscores =", "n_classes labels_ = label.view(-1) # batch*seq_len loss = loss_function(lp_, labels_,", "= open('logs/cnn_meld_logs.txt', 'a') dataloader = MELDDataLoader(args) valid_losses, valid_fscores = [],", "MaskedNLLLoss, BC_LSTM from dataloader import MELDDataLoader from sklearn.metrics import f1_score,", "as td: for data in td: if mode == 'train':", "= classification_report(labels, preds, sample_weight=masks, target_names=['neutral', 'surprise', 'fear', 'sadness', 'joy', 'disgust',", "'train': optimizer.zero_grad() textf, acouf, mask, label = [d.cuda() for d", "weight') parser.add_argument('--epochs', type=int, default=100, metavar='E', help='number of epochs') return parser.parse_args()", "args.seed = seed print(args) model = BC_LSTM(args) print('MELD BC_LSTM MODULE", "Weighted F1') print('@Best Valid Loss: Test Acc: {}, Test F1:", "pred_ = torch.argmax(lp_,1) # batch*seq_len preds.append(pred_.data.cpu().numpy()) labels.append(labels_.data.cpu().numpy()) masks.append(mask.view(-1).cpu().numpy()) losses.append(loss.item()*masks[-1].sum()) if", "label = [d.cuda() for d in data[:-1]] if args.cuda else", "sklearn.metrics import f1_score, confusion_matrix, accuracy_score, classification_report def setup_seed(seed): \"\"\" Manually", "dims to use') parser.add_argument('--emotion_state_dim', type=int, default=100) parser.add_argument('--hidden_layer_dim', type=int, default=100) parser.add_argument('--dropout',", "= log_prob.transpose(0,1).contiguous().view(-1, log_prob.size()[2]) # batch*seq_len, n_classes labels_ = label.view(-1) #", "data[:-1] log_prob, _ = model(textf, None, acouf, None, mask) lp_", "results. \"\"\" torch.manual_seed(seed) torch.cuda.manual_seed(seed) torch.cuda.manual_seed_all(seed) np.random.seed(seed) random.seed(seed) torch.benchmark = False", "= time.time() train_loss, train_acc, _, _, _, train_fscore = train_or_eval_model(model,", "def train_or_eval_model(model, loss_function, dataloader, epoch, optimizer=None, mode='train'): losses, preds, labels,", "default='general2') parser.add_argument('--utterance_dim', type=int, default=600, help='embedding dims to use') parser.add_argument('--emotion_state_dim', type=int,", "{}, acc: {}, fscore: {}, test_loss: {}, acc: {}, fscore:", "default=0.25) parser.add_argument('--n_classes', type=int, default=7) # late fusion module. parser.add_argument('--lateFusionModule', type=str,", "= BC_LSTM(args) print('MELD BC_LSTM MODULE ...') if args.cuda: model.cuda() loss_weights", "= MaskedNLLLoss(loss_weights.cuda() if args.cuda else loss_weights) optimizer = optim.Adam(model.parameters(), lr=args.lr,", "default=(100, 300)) parser.add_argument('--pre_fusion_hidden_dims', type=tuple, default=(24, 7)) parser.add_argument('--pre_fusion_dropout', type=float, default=0.4) parser.add_argument('--post_fusion_dropout',", "loading data') # dataloader settings parser.add_argument('--batch-size', type=int, default=32, metavar='BS', help='batch", "np.array(valid_fscores).transpose() test_fscores = np.array(test_fscores).transpose() # [1, epoches] test_accuracys = np.array(test_accuracys).transpose()", "help='embedding dims to use') parser.add_argument('--emotion_state_dim', type=int, default=100) parser.add_argument('--hidden_layer_dim', type=int, default=100)", "seed to get deterministic results. \"\"\" torch.manual_seed(seed) torch.cuda.manual_seed(seed) torch.cuda.manual_seed_all(seed) np.random.seed(seed)", "mode='test') valid_losses.append(valid_loss) valid_fscores.append(valid_fscore) test_losses.append(test_loss) test_accuracys.append(test_acc) test_fscores.append(test_fscore) x = 'epoch: {},", "acc_score2, f1_score2] scores = [str(item) for item in scores] print", "None for e in range(args.epochs): start_time = time.time() train_loss, train_acc,", "labels.append(labels_.data.cpu().numpy()) masks.append(mask.view(-1).cpu().numpy()) losses.append(loss.item()*masks[-1].sum()) if mode == 'train': total_loss = loss", "from tqdm import tqdm import pandas as pd import numpy", "d in data[:-1]] if args.cuda else data[:-1] log_prob, _ =", "np.array(test_accuracys).transpose() # [epoches] f1_score1 = test_fscores[0][np.argmin(valid_losses)] acc_score1 = test_accuracys[np.argmin(valid_losses)] f1_score2", "fscore: {}, time: {} sec'.format(e+1, train_loss, train_acc, train_fscore, valid_loss, valid_acc,", "weight_decay=args.l2) lf = open('logs/cnn_meld_logs.txt', 'a') dataloader = MELDDataLoader(args) valid_losses, valid_fscores", "avg_fscore = round(f1_score(labels,preds, sample_weight=masks, average='weighted')*100, 2) if mode == 'test':", "confusion_matrix, accuracy_score, classification_report def setup_seed(seed): \"\"\" Manually Fix the random", "if mode == 'train': optimizer.zero_grad() textf, acouf, mask, label =", "= train_or_eval_model(model, loss_function, dataloader['valid'], e, mode='valid') test_loss, test_acc, test_label, test_pred,", "help='L2 regularization weight') parser.add_argument('--epochs', type=int, default=100, metavar='E', help='number of epochs')", "valid_fscore = train_or_eval_model(model, loss_function, dataloader['valid'], e, mode='valid') test_loss, test_acc, test_label,", "= np.concatenate(masks) else: return float('nan'), float('nan'), float('nan'), [], [], [],", "float('nan'),[] avg_loss = round(np.sum(losses)/np.sum(masks), 4) avg_sense_loss = round(np.sum(losses_sense)/np.sum(masks), 4) avg_accuracy", "import torch.optim as optim from model import MaskedNLLLoss, BC_LSTM from", "optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.l2) lf = open('logs/cnn_meld_logs.txt', 'a') dataloader", "loss_function = MaskedNLLLoss(loss_weights.cuda() if args.cuda else loss_weights) optimizer = optim.Adam(model.parameters(),", "type=int, default=100) parser.add_argument('--hidden_layer_dim', type=int, default=100) parser.add_argument('--dropout', type=float, default=0.25) parser.add_argument('--n_classes', type=int,", "test_fscores[0][np.argmax(valid_fscores[0])] acc_score2 = test_accuracys[np.argmax(valid_fscores[0])] scores = [acc_score1, f1_score1, acc_score2, f1_score2]", "sample_weight=masks, target_names=['neutral', 'surprise', 'fear', 'sadness', 'joy', 'disgust', 'anger'], digits=6) print(class_report)", "best_pred, best_mask = None, None, None, None for e in", "epoches] test_accuracys = np.array(test_accuracys).transpose() # [epoches] f1_score1 = test_fscores[0][np.argmin(valid_losses)] acc_score1", "loss total_loss.backward() optimizer.step() if preds!=[]: preds = np.concatenate(preds) labels =", "= [] assert mode != 'train' or optimizer != None", "import numpy as np, argparse, time, pickle, random, os, datetime", "torch import torch.optim as optim from model import MaskedNLLLoss, BC_LSTM", "optimizer != None if mode == 'train': model.train() else: model.eval()", "[], float('nan'),[] avg_loss = round(np.sum(losses)/np.sum(masks), 4) avg_sense_loss = round(np.sum(losses_sense)/np.sum(masks), 4)", "print(args) model = BC_LSTM(args) print('MELD BC_LSTM MODULE ...') if args.cuda:", "{}, train_loss: {}, acc: {}, fscore: {}, valid_loss: {}, acc:", "= MELDDataLoader(args) valid_losses, valid_fscores = [], [] test_fscores, test_accuracys, test_losses", "Loss: Test Acc: {}, Test F1: {}'.format(acc_score1, f1_score1)) print('@Best Valid", "loss_function, dataloader['valid'], e, mode='valid') test_loss, test_acc, test_label, test_pred, test_mask, test_fscore", "dataloader, epoch, optimizer=None, mode='train'): losses, preds, labels, masks, losses_sense =", "train_loss, train_acc, train_fscore, valid_loss, valid_acc, valid_fscore, test_loss, test_acc, test_fscore, round(time.time()-start_time,", "valid_loss, valid_acc, _, _, _, valid_fscore = train_or_eval_model(model, loss_function, dataloader['valid'],", "parser.add_argument('--post_fusion_dropout', type=float, default=0.3) # train settings. parser.add_argument('--lr', type=float, default=1e-4, metavar='LR',", "labels_, mask) pred_ = torch.argmax(lp_,1) # batch*seq_len preds.append(pred_.data.cpu().numpy()) labels.append(labels_.data.cpu().numpy()) masks.append(mask.view(-1).cpu().numpy())", "pd import numpy as np, argparse, time, pickle, random, os,", "= model(textf, None, acouf, None, mask) lp_ = log_prob.transpose(0,1).contiguous().view(-1, log_prob.size()[2])", "acc: {}, fscore: {}, test_loss: {}, acc: {}, fscore: {},", "[], [], float('nan'),[] avg_loss = round(np.sum(losses)/np.sum(masks), 4) avg_sense_loss = round(np.sum(losses_sense)/np.sum(masks),", "item in scores] print ('Test Scores: Weighted F1') print('@Best Valid", "test_accuracys[np.argmin(valid_losses)] f1_score2 = test_fscores[0][np.argmax(valid_fscores[0])] acc_score2 = test_accuracys[np.argmax(valid_fscores[0])] scores = [acc_score1,", "= np.array(test_fscores).transpose() # [1, epoches] test_accuracys = np.array(test_accuracys).transpose() # [epoches]", "type=float, default=1e-4, metavar='LR', help='learning rate') parser.add_argument('--l2', type=float, default=1e-5, metavar='L2', help='L2", "= label.view(-1) # batch*seq_len loss = loss_function(lp_, labels_, mask) pred_", "if __name__ == '__main__': args = parse_args() args.cuda = torch.cuda.is_available()", "Fix the random seed to get deterministic results. \"\"\" torch.manual_seed(seed)", "test_losses = [], [], [] best_loss, best_label, best_pred, best_mask =", "{}'.format(acc_score1, f1_score1)) print('@Best Valid F1: Test Acc: {}, Test F1:", "# late fusion module. parser.add_argument('--lateFusionModule', type=str, default='concat') parser.add_argument('--input_features', type=tuple, default=(100,", "classification_report(labels, preds, sample_weight=masks, target_names=['neutral', 'surprise', 'fear', 'sadness', 'joy', 'disgust', 'anger'],", "def parse_args(): parser = argparse.ArgumentParser() parser.add_argument('--num_workers', type=int, default=0, help='num workers", "Valid Loss: Test Acc: {}, Test F1: {}'.format(acc_score1, f1_score1)) print('@Best", "type=float, default=0.3) # train settings. parser.add_argument('--lr', type=float, default=1e-4, metavar='LR', help='learning", "default=7) # late fusion module. parser.add_argument('--lateFusionModule', type=str, default='concat') parser.add_argument('--input_features', type=tuple,", "type=float, default=1e-5, metavar='L2', help='L2 regularization weight') parser.add_argument('--epochs', type=int, default=100, metavar='E',", "f1_score1)) print('@Best Valid F1: Test Acc: {}, Test F1: {}'.format(acc_score2,", "best_loss, best_label, best_pred, best_mask = None, None, None, None for", "for seed in [1, 11, 111, 1111, 11111]: setup_seed(seed) args.seed", "time.time() train_loss, train_acc, _, _, _, train_fscore = train_or_eval_model(model, loss_function,", "e, mode='test') valid_losses.append(valid_loss) valid_fscores.append(valid_fscore) test_losses.append(test_loss) test_accuracys.append(test_acc) test_fscores.append(test_fscore) x = 'epoch:", "acc_score2 = test_accuracys[np.argmax(valid_fscores[0])] scores = [acc_score1, f1_score1, acc_score2, f1_score2] scores", "label.view(-1) # batch*seq_len loss = loss_function(lp_, labels_, mask) pred_ =", "11111]: setup_seed(seed) args.seed = seed print(args) model = BC_LSTM(args) print('MELD", "best_mask = None, None, None, None for e in range(args.epochs):", "print(class_report) return avg_loss, avg_accuracy, labels, preds, masks, [avg_fscore] def parse_args():", "batch*seq_len loss = loss_function(lp_, labels_, mask) pred_ = torch.argmax(lp_,1) #", "accuracy_score, classification_report def setup_seed(seed): \"\"\" Manually Fix the random seed", "as optim from model import MaskedNLLLoss, BC_LSTM from dataloader import", "if mode == 'train': model.train() else: model.eval() with tqdm(dataloader) as", "False torch.backends.cudnn.deterministic = True def train_or_eval_model(model, loss_function, dataloader, epoch, optimizer=None,", "mode == 'train': total_loss = loss total_loss.backward() optimizer.step() if preds!=[]:", "from sklearn.metrics import f1_score, confusion_matrix, accuracy_score, classification_report def setup_seed(seed): \"\"\"", "= np.array(test_accuracys).transpose() # [epoches] f1_score1 = test_fscores[0][np.argmin(valid_losses)] acc_score1 = test_accuracys[np.argmin(valid_losses)]", "test_fscore = train_or_eval_model(model, loss_function, dataloader['test'], e, mode='test') valid_losses.append(valid_loss) valid_fscores.append(valid_fscore) test_losses.append(test_loss)", "_, _, valid_fscore = train_or_eval_model(model, loss_function, dataloader['valid'], e, mode='valid') test_loss,", "default=1e-5, metavar='L2', help='L2 regularization weight') parser.add_argument('--epochs', type=int, default=100, metavar='E', help='number", "valid_fscores = [], [] test_fscores, test_accuracys, test_losses = [], [],", "[str(item) for item in scores] print ('Test Scores: Weighted F1')", "type=float, default=0.4) parser.add_argument('--post_fusion_dropout', type=float, default=0.3) # train settings. parser.add_argument('--lr', type=float,", "optimizer, mode='train') valid_loss, valid_acc, _, _, _, valid_fscore = train_or_eval_model(model,", "type=float, default=0.25) parser.add_argument('--n_classes', type=int, default=7) # late fusion module. parser.add_argument('--lateFusionModule',", "test_accuracys, test_losses = [], [], [] best_loss, best_label, best_pred, best_mask", "= round(np.sum(losses_sense)/np.sum(masks), 4) avg_accuracy = round(accuracy_score(labels,preds, sample_weight=masks)*100, 2) avg_fscore =", "on CPU') for seed in [1, 11, 111, 1111, 11111]:", "[1, epoches] test_accuracys = np.array(test_accuracys).transpose() # [epoches] f1_score1 = test_fscores[0][np.argmin(valid_losses)]", "= [str(item) for item in scores] print ('Test Scores: Weighted", "[], [], [], [] max_sequence_len = [] assert mode !=", "optimizer=None, mode='train'): losses, preds, labels, masks, losses_sense = [], [],", "torch.benchmark = False torch.backends.cudnn.deterministic = True def train_or_eval_model(model, loss_function, dataloader,", "None, acouf, None, mask) lp_ = log_prob.transpose(0,1).contiguous().view(-1, log_prob.size()[2]) # batch*seq_len,", "parser.add_argument('--emotion_state_dim', type=int, default=100) parser.add_argument('--hidden_layer_dim', type=int, default=100) parser.add_argument('--dropout', type=float, default=0.25) parser.add_argument('--n_classes',", "datetime import torch import torch.optim as optim from model import", "settings parser.add_argument('--batch-size', type=int, default=32, metavar='BS', help='batch size') parser.add_argument('--data_path', type=str, default='../TextCnn/dataset/MELD_features_raw.pkl')", "__name__ == '__main__': args = parse_args() args.cuda = torch.cuda.is_available() if", "= [d.cuda() for d in data[:-1]] if args.cuda else data[:-1]", "else: print('Running on CPU') for seed in [1, 11, 111,", "train_fscore, valid_loss, valid_acc, valid_fscore, test_loss, test_acc, test_fscore, round(time.time()-start_time, 2)) print", "else loss_weights) optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.l2) lf = open('logs/cnn_meld_logs.txt',", "sample_weight=masks)*100, 2) avg_fscore = round(f1_score(labels,preds, sample_weight=masks, average='weighted')*100, 2) if mode", "_, _, _, valid_fscore = train_or_eval_model(model, loss_function, dataloader['valid'], e, mode='valid')", "to use') parser.add_argument('--emotion_state_dim', type=int, default=100) parser.add_argument('--hidden_layer_dim', type=int, default=100) parser.add_argument('--dropout', type=float,", "if args.cuda: model.cuda() loss_weights = torch.FloatTensor([1.0, 1.0, 1.0, 1.0, 1.0,", "torch.backends.cudnn.deterministic = True def train_or_eval_model(model, loss_function, dataloader, epoch, optimizer=None, mode='train'):", "type=tuple, default=(24, 7)) parser.add_argument('--pre_fusion_dropout', type=float, default=0.4) parser.add_argument('--post_fusion_dropout', type=float, default=0.3) #", "default=(24, 7)) parser.add_argument('--pre_fusion_dropout', type=float, default=0.4) parser.add_argument('--post_fusion_dropout', type=float, default=0.3) # train", "mode == 'test': class_report = classification_report(labels, preds, sample_weight=masks, target_names=['neutral', 'surprise',", "scores = [str(item) for item in scores] print ('Test Scores:", "# train settings. parser.add_argument('--lr', type=float, default=1e-4, metavar='LR', help='learning rate') parser.add_argument('--l2',", "parser.add_argument('--hidden_layer_dim', type=int, default=100) parser.add_argument('--dropout', type=float, default=0.25) parser.add_argument('--n_classes', type=int, default=7) #", "float('nan'), float('nan'), [], [], [], float('nan'),[] avg_loss = round(np.sum(losses)/np.sum(masks), 4)", "sec'.format(e+1, train_loss, train_acc, train_fscore, valid_loss, valid_acc, valid_fscore, test_loss, test_acc, test_fscore,", "BC_LSTM(args) print('MELD BC_LSTM MODULE ...') if args.cuda: model.cuda() loss_weights =", "_, _, train_fscore = train_or_eval_model(model, loss_function, dataloader['train'], e, optimizer, mode='train')", "log_prob.transpose(0,1).contiguous().view(-1, log_prob.size()[2]) # batch*seq_len, n_classes labels_ = label.view(-1) # batch*seq_len", "valid_acc, valid_fscore, test_loss, test_acc, test_fscore, round(time.time()-start_time, 2)) print (x) lf.write(x", "# batch*seq_len, n_classes labels_ = label.view(-1) # batch*seq_len loss =", "= test_fscores[0][np.argmin(valid_losses)] acc_score1 = test_accuracys[np.argmin(valid_losses)] f1_score2 = test_fscores[0][np.argmax(valid_fscores[0])] acc_score2 =", "GPU') else: print('Running on CPU') for seed in [1, 11,", "argparse, time, pickle, random, os, datetime import torch import torch.optim", "'train': total_loss = loss total_loss.backward() optimizer.step() if preds!=[]: preds =", "round(f1_score(labels,preds, sample_weight=masks, average='weighted')*100, 2) if mode == 'test': class_report =", "f1_score2] scores = [str(item) for item in scores] print ('Test", "x = 'epoch: {}, train_loss: {}, acc: {}, fscore: {},", "random, os, datetime import torch import torch.optim as optim from", "def setup_seed(seed): \"\"\" Manually Fix the random seed to get", "mode != 'train' or optimizer != None if mode ==", "digits=6) print(class_report) return avg_loss, avg_accuracy, labels, preds, masks, [avg_fscore] def", "model.train() else: model.eval() with tqdm(dataloader) as td: for data in", "metavar='L2', help='L2 regularization weight') parser.add_argument('--epochs', type=int, default=100, metavar='E', help='number of", "!= None if mode == 'train': model.train() else: model.eval() with", "metavar='LR', help='learning rate') parser.add_argument('--l2', type=float, default=1e-5, metavar='L2', help='L2 regularization weight')", "BC_LSTM from dataloader import MELDDataLoader from sklearn.metrics import f1_score, confusion_matrix,", "in td: if mode == 'train': optimizer.zero_grad() textf, acouf, mask,", "= torch.cuda.is_available() if args.cuda: print('Running on GPU') else: print('Running on", "print('@Best Valid F1: Test Acc: {}, Test F1: {}'.format(acc_score2, f1_score2))", "dataloader['test'], e, mode='test') valid_losses.append(valid_loss) valid_fscores.append(valid_fscore) test_losses.append(test_loss) test_accuracys.append(test_acc) test_fscores.append(test_fscore) x =", "= [], [], [], [], [] max_sequence_len = [] assert", "test_loss, test_acc, test_fscore, round(time.time()-start_time, 2)) print (x) lf.write(x + '\\n')", "numpy as np, argparse, time, pickle, random, os, datetime import", "for data in td: if mode == 'train': optimizer.zero_grad() textf,", "preds.append(pred_.data.cpu().numpy()) labels.append(labels_.data.cpu().numpy()) masks.append(mask.view(-1).cpu().numpy()) losses.append(loss.item()*masks[-1].sum()) if mode == 'train': total_loss =", "'epoch: {}, train_loss: {}, acc: {}, fscore: {}, valid_loss: {},", "(x) lf.write(x + '\\n') valid_fscores = np.array(valid_fscores).transpose() test_fscores = np.array(test_fscores).transpose()", "else: model.eval() with tqdm(dataloader) as td: for data in td:", "for e in range(args.epochs): start_time = time.time() train_loss, train_acc, _,", "train_loss, train_acc, _, _, _, train_fscore = train_or_eval_model(model, loss_function, dataloader['train'],", "log_prob, _ = model(textf, None, acouf, None, mask) lp_ =", "== 'train': model.train() else: model.eval() with tqdm(dataloader) as td: for", "avg_accuracy, labels, preds, masks, [avg_fscore] def parse_args(): parser = argparse.ArgumentParser()", "total_loss = loss total_loss.backward() optimizer.step() if preds!=[]: preds = np.concatenate(preds)", "'anger'], digits=6) print(class_report) return avg_loss, avg_accuracy, labels, preds, masks, [avg_fscore]", "f1_score1 = test_fscores[0][np.argmin(valid_losses)] acc_score1 = test_accuracys[np.argmin(valid_losses)] f1_score2 = test_fscores[0][np.argmax(valid_fscores[0])] acc_score2", "seed print(args) model = BC_LSTM(args) print('MELD BC_LSTM MODULE ...') if", "type=int, default=100) parser.add_argument('--dropout', type=float, default=0.25) parser.add_argument('--n_classes', type=int, default=7) # late", "test_fscores[0][np.argmin(valid_losses)] acc_score1 = test_accuracys[np.argmin(valid_losses)] f1_score2 = test_fscores[0][np.argmax(valid_fscores[0])] acc_score2 = test_accuracys[np.argmax(valid_fscores[0])]", "1111, 11111]: setup_seed(seed) args.seed = seed print(args) model = BC_LSTM(args)", "best_label, best_pred, best_mask = None, None, None, None for e", "from model import MaskedNLLLoss, BC_LSTM from dataloader import MELDDataLoader from", "setup_seed(seed) args.seed = seed print(args) model = BC_LSTM(args) print('MELD BC_LSTM", "default='../TextCnn/dataset/MELD_features_raw.pkl') # model settings. parser.add_argument('--attention_type', type=str, default='general2') parser.add_argument('--utterance_dim', type=int, default=600,", "type=int, default=100, metavar='E', help='number of epochs') return parser.parse_args() if __name__", "log_prob.size()[2]) # batch*seq_len, n_classes labels_ = label.view(-1) # batch*seq_len loss", "avg_loss, avg_accuracy, labels, preds, masks, [avg_fscore] def parse_args(): parser =", "parser.add_argument('--attention_type', type=str, default='general2') parser.add_argument('--utterance_dim', type=int, default=600, help='embedding dims to use')", "rf = open('results/cnn_meld_results.txt', 'a') rf.write('\\t'.join(scores) + '\\t' + str(args) +", "'sadness', 'joy', 'disgust', 'anger'], digits=6) print(class_report) return avg_loss, avg_accuracy, labels,", "model import MaskedNLLLoss, BC_LSTM from dataloader import MELDDataLoader from sklearn.metrics", "sample_weight=masks, average='weighted')*100, 2) if mode == 'test': class_report = classification_report(labels,", "= loss_function(lp_, labels_, mask) pred_ = torch.argmax(lp_,1) # batch*seq_len preds.append(pred_.data.cpu().numpy())", "acc: {}, fscore: {}, valid_loss: {}, acc: {}, fscore: {},", "else: return float('nan'), float('nan'), float('nan'), [], [], [], float('nan'),[] avg_loss", "use') parser.add_argument('--emotion_state_dim', type=int, default=100) parser.add_argument('--hidden_layer_dim', type=int, default=100) parser.add_argument('--dropout', type=float, default=0.25)", "= [acc_score1, f1_score1, acc_score2, f1_score2] scores = [str(item) for item", "default='concat') parser.add_argument('--input_features', type=tuple, default=(100, 300)) parser.add_argument('--pre_fusion_hidden_dims', type=tuple, default=(24, 7)) parser.add_argument('--pre_fusion_dropout',", "= round(np.sum(losses)/np.sum(masks), 4) avg_sense_loss = round(np.sum(losses_sense)/np.sum(masks), 4) avg_accuracy = round(accuracy_score(labels,preds,", "= test_accuracys[np.argmin(valid_losses)] f1_score2 = test_fscores[0][np.argmax(valid_fscores[0])] acc_score2 = test_accuracys[np.argmax(valid_fscores[0])] scores =", "Scores: Weighted F1') print('@Best Valid Loss: Test Acc: {}, Test", "fscore: {}, valid_loss: {}, acc: {}, fscore: {}, test_loss: {},", "type=tuple, default=(100, 300)) parser.add_argument('--pre_fusion_hidden_dims', type=tuple, default=(24, 7)) parser.add_argument('--pre_fusion_dropout', type=float, default=0.4)", "...') if args.cuda: model.cuda() loss_weights = torch.FloatTensor([1.0, 1.0, 1.0, 1.0,", "round(np.sum(losses_sense)/np.sum(masks), 4) avg_accuracy = round(accuracy_score(labels,preds, sample_weight=masks)*100, 2) avg_fscore = round(f1_score(labels,preds,", "return avg_loss, avg_accuracy, labels, preds, masks, [avg_fscore] def parse_args(): parser", "[], [], [], float('nan'),[] avg_loss = round(np.sum(losses)/np.sum(masks), 4) avg_sense_loss =", "+ '\\n') valid_fscores = np.array(valid_fscores).transpose() test_fscores = np.array(test_fscores).transpose() # [1,", "parser.add_argument('--dropout', type=float, default=0.25) parser.add_argument('--n_classes', type=int, default=7) # late fusion module.", "total_loss.backward() optimizer.step() if preds!=[]: preds = np.concatenate(preds) labels = np.concatenate(labels)", "args.cuda: model.cuda() loss_weights = torch.FloatTensor([1.0, 1.0, 1.0, 1.0, 1.0, 1.0,", "{}, fscore: {}, test_loss: {}, acc: {}, fscore: {}, time:", "from dataloader import MELDDataLoader from sklearn.metrics import f1_score, confusion_matrix, accuracy_score,", "valid_losses, valid_fscores = [], [] test_fscores, test_accuracys, test_losses = [],", "{}, fscore: {}, time: {} sec'.format(e+1, train_loss, train_acc, train_fscore, valid_loss,", "# [epoches] f1_score1 = test_fscores[0][np.argmin(valid_losses)] acc_score1 = test_accuracys[np.argmin(valid_losses)] f1_score2 =", "= np.array(valid_fscores).transpose() test_fscores = np.array(test_fscores).transpose() # [1, epoches] test_accuracys =", "F1: Test Acc: {}, Test F1: {}'.format(acc_score2, f1_score2)) rf =" ]
[ "# loop over all input files for star in args.star:", "between particles in pixels, by default {}\".format(args_def['mindis'])) args = parser.parse_args()", "the minimum distance between particles in pixels, by default {}\".format(args_def['mindis']))", "p3s.star_parse(star, 'data_') header = star_dict['data_']+star_dict['loop_'] header_len = len(header) basename =", "float(line[star_dict['_rlnCoordinateX']]), float(line[star_dict['_rlnCoordinateY']]) for i in xy: dis = math.sqrt((x -", "\"\"\" [options] <coord star files> Output the coord star files", "in pixels, by default {}\".format(args_def['mindis'])) args = parser.parse_args() if len(sys.argv)", "open(star) as s_read: lines = s_read.readlines()[header_len:-1] # with open(basename+'_checked.star', 'w')", "+ \"\"\" [options] <coord star files> Output the coord star", "star files> Output the coord star files after deleting duplicate", "\"Please run '\" + progname + \" -h' for detailed", "1 line = line.split() # get coord x, y =", "y = float(line[star_dict['_rlnCoordinateX']]), float(line[star_dict['_rlnCoordinateY']]) for i in xy: dis =", "the coord star files after deleting duplicate particles \"\"\" args_def", "and {} is {}. Discard.'.format(x,y,i,dis) good = 0 break if", "\" -h' for detailed options.\" sys.exit(1) # get default values", "= parser.parse_args() if len(sys.argv) == 1: print \"usage: \" +", "progname = os.path.basename(sys.argv[0]) usage = progname + \"\"\" [options] <coord", "print \"usage: \" + usage print \"Please run '\" +", "= progname + \"\"\" [options] <coord star files> Output the", "= p3s.star_parse(star, 'data_') header = star_dict['data_']+star_dict['loop_'] header_len = len(header) basename", "distance between particles in pixels, by default {}\".format(args_def['mindis'])) args =", "= line.split() # get coord x, y = float(line[star_dict['_rlnCoordinateX']]), float(line[star_dict['_rlnCoordinateY']])", "\"--mindis\", type=float, help=\"specify the minimum distance between particles in pixels,", "particles in pixels, by default {}\".format(args_def['mindis'])) args = parser.parse_args() if", "break if good == 1: s_w.write('{:>12} '.format(x) + '{:>12} \\n'.format(y))", "loop over all input files for star in args.star: star_dict", "args.__dict__[i] = args_def[i] # loop over all input files for", "None: args.__dict__[i] = args_def[i] # loop over all input files", "xy: dis = math.sqrt((x - i[0])**2 + (y - i[1])**2)", "i in xy: dis = math.sqrt((x - i[0])**2 + (y", "Discard.'.format(x,y,i,dis) good = 0 break if good == 1: s_w.write('{:>12}", "lines = s_read.readlines()[header_len:-1] # with open(basename+'_checked.star', 'w') as s_w: s_w.write(''.join(header))", "for i in args_def: if args.__dict__[i] == None: args.__dict__[i] =", "processed\") parser.add_argument(\"-m\", \"--mindis\", type=float, help=\"specify the minimum distance between particles", "# get default values for i in args_def: if args.__dict__[i]", "options.\" sys.exit(1) # get default values for i in args_def:", "= [] for line in lines: good = 1 line", "= math.sqrt((x - i[0])**2 + (y - i[1])**2) if dis", "lines: good = 1 line = line.split() # get coord", "<reponame>emkailu/PAT3DEM #!/usr/bin/env python import os import sys import argparse import", "star in args.star: star_dict = p3s.star_parse(star, 'data_') header = star_dict['data_']+star_dict['loop_']", "type=float, help=\"specify the minimum distance between particles in pixels, by", "to store x and y xy = [] for line", "args.star: star_dict = p3s.star_parse(star, 'data_') header = star_dict['data_']+star_dict['loop_'] header_len =", "= len(header) basename = os.path.basename(os.path.splitext(star)[0]) with open(star) as s_read: lines", "get coord x, y = float(line[star_dict['_rlnCoordinateX']]), float(line[star_dict['_rlnCoordinateY']]) for i in", "math.sqrt((x - i[0])**2 + (y - i[1])**2) if dis <", "math def main(): progname = os.path.basename(sys.argv[0]) usage = progname +", "import argparse import pat3dem.star as p3s import math def main():", "os.path.basename(os.path.splitext(star)[0]) with open(star) as s_read: lines = s_read.readlines()[header_len:-1] # with", "files> Output the coord star files after deleting duplicate particles", "parser = argparse.ArgumentParser() parser.add_argument(\"star\", nargs='*', help=\"specify coord star files to", "'w') as s_w: s_w.write(''.join(header)) # use list of list to", "input files for star in args.star: star_dict = p3s.star_parse(star, 'data_')", "good = 1 line = line.split() # get coord x,", "with open(basename+'_checked.star', 'w') as s_w: s_w.write(''.join(header)) # use list of", "os.path.basename(sys.argv[0]) usage = progname + \"\"\" [options] <coord star files>", "if args.__dict__[i] == None: args.__dict__[i] = args_def[i] # loop over", "for detailed options.\" sys.exit(1) # get default values for i", "in xy: dis = math.sqrt((x - i[0])**2 + (y -", "1: s_w.write('{:>12} '.format(x) + '{:>12} \\n'.format(y)) xy.append((x,y)) s_w.write('\\n') if __name__", "{}. Discard.'.format(x,y,i,dis) good = 0 break if good == 1:", "parser.parse_args() if len(sys.argv) == 1: print \"usage: \" + usage", "s_read: lines = s_read.readlines()[header_len:-1] # with open(basename+'_checked.star', 'w') as s_w:", "to be processed\") parser.add_argument(\"-m\", \"--mindis\", type=float, help=\"specify the minimum distance", "= 0 break if good == 1: s_w.write('{:>12} '.format(x) +", "sys import argparse import pat3dem.star as p3s import math def", "use list of list to store x and y xy", "\"usage: \" + usage print \"Please run '\" + progname", "over all input files for star in args.star: star_dict =", "for line in lines: good = 1 line = line.split()", "- i[1])**2) if dis < args.mindis: print 'Distance between ({},{})", "line = line.split() # get coord x, y = float(line[star_dict['_rlnCoordinateX']]),", "\" + usage print \"Please run '\" + progname +", "star_dict = p3s.star_parse(star, 'data_') header = star_dict['data_']+star_dict['loop_'] header_len = len(header)", "minimum distance between particles in pixels, by default {}\".format(args_def['mindis'])) args", "as p3s import math def main(): progname = os.path.basename(sys.argv[0]) usage", "sys.exit(1) # get default values for i in args_def: if", "= os.path.basename(os.path.splitext(star)[0]) with open(star) as s_read: lines = s_read.readlines()[header_len:-1] #", "[] for line in lines: good = 1 line =", "s_read.readlines()[header_len:-1] # with open(basename+'_checked.star', 'w') as s_w: s_w.write(''.join(header)) # use", "dis = math.sqrt((x - i[0])**2 + (y - i[1])**2) if", "pat3dem.star as p3s import math def main(): progname = os.path.basename(sys.argv[0])", "== None: args.__dict__[i] = args_def[i] # loop over all input", "files after deleting duplicate particles \"\"\" args_def = {'mindis':150} parser", "detailed options.\" sys.exit(1) # get default values for i in", "args.__dict__[i] == None: args.__dict__[i] = args_def[i] # loop over all", "args.mindis: print 'Distance between ({},{}) and {} is {}. Discard.'.format(x,y,i,dis)", "'Distance between ({},{}) and {} is {}. Discard.'.format(x,y,i,dis) good =", "args_def: if args.__dict__[i] == None: args.__dict__[i] = args_def[i] # loop", "if dis < args.mindis: print 'Distance between ({},{}) and {}", "star files after deleting duplicate particles \"\"\" args_def = {'mindis':150}", "def main(): progname = os.path.basename(sys.argv[0]) usage = progname + \"\"\"", "store x and y xy = [] for line in", "i[1])**2) if dis < args.mindis: print 'Distance between ({},{}) and", "default {}\".format(args_def['mindis'])) args = parser.parse_args() if len(sys.argv) == 1: print", "print 'Distance between ({},{}) and {} is {}. Discard.'.format(x,y,i,dis) good", "# with open(basename+'_checked.star', 'w') as s_w: s_w.write(''.join(header)) # use list", "+ \" -h' for detailed options.\" sys.exit(1) # get default", "({},{}) and {} is {}. Discard.'.format(x,y,i,dis) good = 0 break", "in args.star: star_dict = p3s.star_parse(star, 'data_') header = star_dict['data_']+star_dict['loop_'] header_len", "line.split() # get coord x, y = float(line[star_dict['_rlnCoordinateX']]), float(line[star_dict['_rlnCoordinateY']]) for", "good = 0 break if good == 1: s_w.write('{:>12} '.format(x)", "import sys import argparse import pat3dem.star as p3s import math", "progname + \"\"\" [options] <coord star files> Output the coord", "= os.path.basename(sys.argv[0]) usage = progname + \"\"\" [options] <coord star", "Output the coord star files after deleting duplicate particles \"\"\"", "s_w: s_w.write(''.join(header)) # use list of list to store x", "p3s import math def main(): progname = os.path.basename(sys.argv[0]) usage =", "#!/usr/bin/env python import os import sys import argparse import pat3dem.star", "{} is {}. Discard.'.format(x,y,i,dis) good = 0 break if good", "header_len = len(header) basename = os.path.basename(os.path.splitext(star)[0]) with open(star) as s_read:", "of list to store x and y xy = []", "+ '{:>12} \\n'.format(y)) xy.append((x,y)) s_w.write('\\n') if __name__ == '__main__': main()", "with open(star) as s_read: lines = s_read.readlines()[header_len:-1] # with open(basename+'_checked.star',", "as s_w: s_w.write(''.join(header)) # use list of list to store", "args_def = {'mindis':150} parser = argparse.ArgumentParser() parser.add_argument(\"star\", nargs='*', help=\"specify coord", "as s_read: lines = s_read.readlines()[header_len:-1] # with open(basename+'_checked.star', 'w') as", "files for star in args.star: star_dict = p3s.star_parse(star, 'data_') header", "+ (y - i[1])**2) if dis < args.mindis: print 'Distance", "'.format(x) + '{:>12} \\n'.format(y)) xy.append((x,y)) s_w.write('\\n') if __name__ == '__main__':", "main(): progname = os.path.basename(sys.argv[0]) usage = progname + \"\"\" [options]", "len(sys.argv) == 1: print \"usage: \" + usage print \"Please", "\"\"\" args_def = {'mindis':150} parser = argparse.ArgumentParser() parser.add_argument(\"star\", nargs='*', help=\"specify", "- i[0])**2 + (y - i[1])**2) if dis < args.mindis:", "# use list of list to store x and y", "(y - i[1])**2) if dis < args.mindis: print 'Distance between", "x, y = float(line[star_dict['_rlnCoordinateX']]), float(line[star_dict['_rlnCoordinateY']]) for i in xy: dis", "import pat3dem.star as p3s import math def main(): progname =", "0 break if good == 1: s_w.write('{:>12} '.format(x) + '{:>12}", "coord star files to be processed\") parser.add_argument(\"-m\", \"--mindis\", type=float, help=\"specify", "get default values for i in args_def: if args.__dict__[i] ==", "good == 1: s_w.write('{:>12} '.format(x) + '{:>12} \\n'.format(y)) xy.append((x,y)) s_w.write('\\n')", "y xy = [] for line in lines: good =", "'data_') header = star_dict['data_']+star_dict['loop_'] header_len = len(header) basename = os.path.basename(os.path.splitext(star)[0])", "after deleting duplicate particles \"\"\" args_def = {'mindis':150} parser =", "+ usage print \"Please run '\" + progname + \"", "= star_dict['data_']+star_dict['loop_'] header_len = len(header) basename = os.path.basename(os.path.splitext(star)[0]) with open(star)", "is {}. Discard.'.format(x,y,i,dis) good = 0 break if good ==", "between ({},{}) and {} is {}. Discard.'.format(x,y,i,dis) good = 0", "= {'mindis':150} parser = argparse.ArgumentParser() parser.add_argument(\"star\", nargs='*', help=\"specify coord star", "nargs='*', help=\"specify coord star files to be processed\") parser.add_argument(\"-m\", \"--mindis\",", "[options] <coord star files> Output the coord star files after", "coord star files after deleting duplicate particles \"\"\" args_def =", "by default {}\".format(args_def['mindis'])) args = parser.parse_args() if len(sys.argv) == 1:", "import math def main(): progname = os.path.basename(sys.argv[0]) usage = progname", "if good == 1: s_w.write('{:>12} '.format(x) + '{:>12} \\n'.format(y)) xy.append((x,y))", "parser.add_argument(\"star\", nargs='*', help=\"specify coord star files to be processed\") parser.add_argument(\"-m\",", "basename = os.path.basename(os.path.splitext(star)[0]) with open(star) as s_read: lines = s_read.readlines()[header_len:-1]", "len(header) basename = os.path.basename(os.path.splitext(star)[0]) with open(star) as s_read: lines =", "and y xy = [] for line in lines: good", "x and y xy = [] for line in lines:", "star files to be processed\") parser.add_argument(\"-m\", \"--mindis\", type=float, help=\"specify the", "print \"Please run '\" + progname + \" -h' for", "help=\"specify the minimum distance between particles in pixels, by default", "usage print \"Please run '\" + progname + \" -h'", "dis < args.mindis: print 'Distance between ({},{}) and {} is", "default values for i in args_def: if args.__dict__[i] == None:", "-h' for detailed options.\" sys.exit(1) # get default values for", "coord x, y = float(line[star_dict['_rlnCoordinateX']]), float(line[star_dict['_rlnCoordinateY']]) for i in xy:", "pixels, by default {}\".format(args_def['mindis'])) args = parser.parse_args() if len(sys.argv) ==", "for i in xy: dis = math.sqrt((x - i[0])**2 +", "os import sys import argparse import pat3dem.star as p3s import", "particles \"\"\" args_def = {'mindis':150} parser = argparse.ArgumentParser() parser.add_argument(\"star\", nargs='*',", "help=\"specify coord star files to be processed\") parser.add_argument(\"-m\", \"--mindis\", type=float,", "= s_read.readlines()[header_len:-1] # with open(basename+'_checked.star', 'w') as s_w: s_w.write(''.join(header)) #", "== 1: s_w.write('{:>12} '.format(x) + '{:>12} \\n'.format(y)) xy.append((x,y)) s_w.write('\\n') if", "s_w.write('{:>12} '.format(x) + '{:>12} \\n'.format(y)) xy.append((x,y)) s_w.write('\\n') if __name__ ==", "# get coord x, y = float(line[star_dict['_rlnCoordinateX']]), float(line[star_dict['_rlnCoordinateY']]) for i", "if len(sys.argv) == 1: print \"usage: \" + usage print", "1: print \"usage: \" + usage print \"Please run '\"", "all input files for star in args.star: star_dict = p3s.star_parse(star,", "{}\".format(args_def['mindis'])) args = parser.parse_args() if len(sys.argv) == 1: print \"usage:", "in lines: good = 1 line = line.split() # get", "open(basename+'_checked.star', 'w') as s_w: s_w.write(''.join(header)) # use list of list", "'\" + progname + \" -h' for detailed options.\" sys.exit(1)", "<coord star files> Output the coord star files after deleting", "= argparse.ArgumentParser() parser.add_argument(\"star\", nargs='*', help=\"specify coord star files to be", "i in args_def: if args.__dict__[i] == None: args.__dict__[i] = args_def[i]", "values for i in args_def: if args.__dict__[i] == None: args.__dict__[i]", "args_def[i] # loop over all input files for star in", "header = star_dict['data_']+star_dict['loop_'] header_len = len(header) basename = os.path.basename(os.path.splitext(star)[0]) with", "s_w.write(''.join(header)) # use list of list to store x and", "list of list to store x and y xy =", "line in lines: good = 1 line = line.split() #", "star_dict['data_']+star_dict['loop_'] header_len = len(header) basename = os.path.basename(os.path.splitext(star)[0]) with open(star) as", "usage = progname + \"\"\" [options] <coord star files> Output", "run '\" + progname + \" -h' for detailed options.\"", "= float(line[star_dict['_rlnCoordinateX']]), float(line[star_dict['_rlnCoordinateY']]) for i in xy: dis = math.sqrt((x", "argparse import pat3dem.star as p3s import math def main(): progname", "deleting duplicate particles \"\"\" args_def = {'mindis':150} parser = argparse.ArgumentParser()", "{'mindis':150} parser = argparse.ArgumentParser() parser.add_argument(\"star\", nargs='*', help=\"specify coord star files", "+ progname + \" -h' for detailed options.\" sys.exit(1) #", "= 1 line = line.split() # get coord x, y", "progname + \" -h' for detailed options.\" sys.exit(1) # get", "list to store x and y xy = [] for", "import os import sys import argparse import pat3dem.star as p3s", "args = parser.parse_args() if len(sys.argv) == 1: print \"usage: \"", "python import os import sys import argparse import pat3dem.star as", "for star in args.star: star_dict = p3s.star_parse(star, 'data_') header =", "< args.mindis: print 'Distance between ({},{}) and {} is {}.", "files to be processed\") parser.add_argument(\"-m\", \"--mindis\", type=float, help=\"specify the minimum", "== 1: print \"usage: \" + usage print \"Please run", "float(line[star_dict['_rlnCoordinateY']]) for i in xy: dis = math.sqrt((x - i[0])**2", "parser.add_argument(\"-m\", \"--mindis\", type=float, help=\"specify the minimum distance between particles in", "in args_def: if args.__dict__[i] == None: args.__dict__[i] = args_def[i] #", "= args_def[i] # loop over all input files for star", "argparse.ArgumentParser() parser.add_argument(\"star\", nargs='*', help=\"specify coord star files to be processed\")", "xy = [] for line in lines: good = 1", "be processed\") parser.add_argument(\"-m\", \"--mindis\", type=float, help=\"specify the minimum distance between", "i[0])**2 + (y - i[1])**2) if dis < args.mindis: print", "duplicate particles \"\"\" args_def = {'mindis':150} parser = argparse.ArgumentParser() parser.add_argument(\"star\"," ]
[ "'a-icon-alt'}) except: stars = 'null' reviews.append([review_asin, product, date, verified, title.text,", "attrs={'data-hook': 'review-title'}) except: title = 'null' try: product = d.find('a',", "features='lxml') print(review_soup) passed_last_page = review_soup.find('div', attrs={'class': 'a-section a-spacing-top-large a-text-center no-reviews-section'})", "filename): ''' Takes a list of asins, retrieves html for", "rv:66.0) Gecko/20100101 Firefox/66.0\", \"Accept-Encoding\":\"gzip, deflate\", \"Accept\":\"text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\", \"DNT\":\"1\",\"Connection\":\"close\", \"Upgrade-Insecure-Requests\":\"1\"} for asin", "import pandas as pd import requests import time import sys", "import BeautifulSoup import pandas as pd import requests import time", "print(review_soup) passed_last_page = review_soup.find('div', attrs={'class': 'a-section a-spacing-top-large a-text-center no-reviews-section'}) if", "for {asin}') passed_last_page = None counter = 1 while (passed_last_page", "= review_soup.find('div', attrs={'class': 'a-section a-spacing-top-large a-text-center no-reviews-section'}) if passed_last_page ==", "attrs={'data-hook':'review'}): # print(d) try: date = d.find('span', attrs={'data-hook':'review-date'}) date =", "<filename>src/review_scraper.py from bs4 import BeautifulSoup import pandas as pd import", "date.text.split(' ')[-3:] date = ' '.join(date) except: date = 'null'", "except: title = 'null' try: product = d.find('a', attrs={'data-hook': 'format-strip'})", "description = d.find('span', attrs={'data-hook': 'review-body'}) except: description = 'null' try:", "Verified' else: verified = verified.text except: verified = 'null' try:", "requests.get(reviews_url, headers=headers) print(rev.status_code) reviews_page_content = rev.content review_soup = BeautifulSoup(reviews_page_content, features='lxml')", "(counter <= 10): print(len(reviews)) reviews_url = f'https://www.amazon.com/product-reviews/{asin}/ref=cm_cr_arp_d_viewopt_srt?ie=UTF8&reviewerType=all_reviews&sortBy=recent&pageNumber={counter}' print(reviews_url) rev =", "reviews_page_content = rev.content review_soup = BeautifulSoup(reviews_page_content, features='lxml') print(review_soup) passed_last_page =", "title = 'null' try: product = d.find('a', attrs={'data-hook': 'format-strip'}) product", "try: verified = d.find('span', attrs={'data-hook':'avp-badge'}) if verified == None: verified", "attrs={'class': 'a-profile-name'}) except: reviewer_name = 'null' try: stars = d.find('span',", "'reviewer_name', 'rating']) reviews_df.to_csv(f'data/reviews/{filename}') print(f'{len(reviews)} reviews for {len(asin_list)} asins stored successfully", "verified == None: verified = 'Not Verified' else: verified =", "{len(asin_list)} asins stored successfully in {filename}') return reviews, reviews_df if", "asin try: verified = d.find('span', attrs={'data-hook':'avp-badge'}) if verified == None:", "except: description = 'null' try: reviewer_name = d.find('span', attrs={'class': 'a-profile-name'})", "else: pass counter += 1 time.sleep(15) reviews_df = pd.DataFrame(reviews, columns=['asin','product','date',", "'rating']) reviews_df.to_csv(f'data/reviews/{filename}') print(f'{len(reviews)} reviews for {len(asin_list)} asins stored successfully in", "for asin in asin_list: print(f'Collecting reviews for {asin}') passed_last_page =", "except: product = 'null' try: review_asin = product['href'].split('/')[3] except: review_asin", "== None: verified = 'Not Verified' else: verified = verified.text", "float(stars.text[0:3])]) else: pass counter += 1 time.sleep(15) reviews_df = pd.DataFrame(reviews,", "reviews_df = pd.DataFrame(reviews, columns=['asin','product','date', 'verified', 'title', 'desc', 'reviewer_name', 'rating']) reviews_df.to_csv(f'data/reviews/{filename}')", "'null' try: description = d.find('span', attrs={'data-hook': 'review-body'}) except: description =", "print(rev.status_code) reviews_page_content = rev.content review_soup = BeautifulSoup(reviews_page_content, features='lxml') print(review_soup) passed_last_page", "asin in asin_list: print(f'Collecting reviews for {asin}') passed_last_page = None", "in review_soup.findAll('div', attrs={'data-hook':'review'}): # print(d) try: date = d.find('span', attrs={'data-hook':'review-date'})", "stars = d.find('span', attrs={'class': 'a-icon-alt'}) except: stars = 'null' reviews.append([review_asin,", "Returns: ------- review information (list), reviews_df (Pandas DataFrame) ''' asin_list", "reviewer_name = d.find('span', attrs={'class': 'a-profile-name'}) except: reviewer_name = 'null' try:", "'verified', 'title', 'desc', 'reviewer_name', 'rating']) reviews_df.to_csv(f'data/reviews/{filename}') print(f'{len(reviews)} reviews for {len(asin_list)}", "in asin_list: print(f'Collecting reviews for {asin}') passed_last_page = None counter", "= asin try: verified = d.find('span', attrs={'data-hook':'avp-badge'}) if verified ==", "passed_last_page == None: for d in review_soup.findAll('div', attrs={'data-hook':'review'}): # print(d)", "List of ASINs (list of strings) Returns: ------- review information", "reviews_df (Pandas DataFrame) ''' asin_list = [asin_list] print(asin_list) reviews =", "= [asin_list] print(asin_list) reviews = [] headers = {\"User-Agent\":\"Mozilla/5.0 (Windows", "pd.DataFrame(reviews, columns=['asin','product','date', 'verified', 'title', 'desc', 'reviewer_name', 'rating']) reviews_df.to_csv(f'data/reviews/{filename}') print(f'{len(reviews)} reviews", "a-spacing-top-large a-text-center no-reviews-section'}) if passed_last_page == None: for d in", "description = 'null' try: reviewer_name = d.find('span', attrs={'class': 'a-profile-name'}) except:", "strings) Returns: ------- review information (list), reviews_df (Pandas DataFrame) '''", "'desc', 'reviewer_name', 'rating']) reviews_df.to_csv(f'data/reviews/{filename}') print(f'{len(reviews)} reviews for {len(asin_list)} asins stored", "description.text, reviewer_name.text, float(stars.text[0:3])]) else: pass counter += 1 time.sleep(15) reviews_df", "try: date = d.find('span', attrs={'data-hook':'review-date'}) date = date.text.split(' ')[-3:] date", "= d.find('span', attrs={'data-hook': 'review-body'}) except: description = 'null' try: reviewer_name", "reviews for {len(asin_list)} asins stored successfully in {filename}') return reviews,", "reviews for {asin}') passed_last_page = None counter = 1 while", "'null' try: stars = d.find('span', attrs={'class': 'a-icon-alt'}) except: stars =", "(list), reviews_df (Pandas DataFrame) ''' asin_list = [asin_list] print(asin_list) reviews", "''' asin_list = [asin_list] print(asin_list) reviews = [] headers =", "= d.find('a', attrs={'data-hook': 'review-title'}) except: title = 'null' try: product", "product = 'null' try: review_asin = product['href'].split('/')[3] except: review_asin =", "Takes a list of asins, retrieves html for reviews page,", "review_asin = product['href'].split('/')[3] except: review_asin = asin try: verified =", "---------- List of ASINs (list of strings) Returns: ------- review", "of ASINs (list of strings) Returns: ------- review information (list),", "pandas as pd import requests import time import sys def", "None counter = 1 while (passed_last_page == None) and (counter", "= requests.get(reviews_url, headers=headers) print(rev.status_code) reviews_page_content = rev.content review_soup = BeautifulSoup(reviews_page_content,", "attrs={'class': 'a-section a-spacing-top-large a-text-center no-reviews-section'}) if passed_last_page == None: for", "if verified == None: verified = 'Not Verified' else: verified", "pass counter += 1 time.sleep(15) reviews_df = pd.DataFrame(reviews, columns=['asin','product','date', 'verified',", "d.find('a', attrs={'data-hook': 'review-title'}) except: title = 'null' try: product =", "verified = verified.text except: verified = 'null' try: description =", "'Not Verified' else: verified = verified.text except: verified = 'null'", "print(asin_list) reviews = [] headers = {\"User-Agent\":\"Mozilla/5.0 (Windows NT 10.0;", "None) and (counter <= 10): print(len(reviews)) reviews_url = f'https://www.amazon.com/product-reviews/{asin}/ref=cm_cr_arp_d_viewopt_srt?ie=UTF8&reviewerType=all_reviews&sortBy=recent&pageNumber={counter}' print(reviews_url)", "date = date.text.split(' ')[-3:] date = ' '.join(date) except: date", "10.0; Win64; x64; rv:66.0) Gecko/20100101 Firefox/66.0\", \"Accept-Encoding\":\"gzip, deflate\", \"Accept\":\"text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\", \"DNT\":\"1\",\"Connection\":\"close\",", "= None counter = 1 while (passed_last_page == None) and", "time import sys def reviews_scraper(asin_list, filename): ''' Takes a list", "product = d.find('a', attrs={'data-hook': 'format-strip'}) product = product.text except: product", "d in review_soup.findAll('div', attrs={'data-hook':'review'}): # print(d) try: date = d.find('span',", "attrs={'data-hook': 'format-strip'}) product = product.text except: product = 'null' try:", "DataFrame) ''' asin_list = [asin_list] print(asin_list) reviews = [] headers", "'null' try: title = d.find('a', attrs={'data-hook': 'review-title'}) except: title =", "BeautifulSoup(reviews_page_content, features='lxml') print(review_soup) passed_last_page = review_soup.find('div', attrs={'class': 'a-section a-spacing-top-large a-text-center", "reviewer_name = 'null' try: stars = d.find('span', attrs={'class': 'a-icon-alt'}) except:", "+= 1 time.sleep(15) reviews_df = pd.DataFrame(reviews, columns=['asin','product','date', 'verified', 'title', 'desc',", "= 1 while (passed_last_page == None) and (counter <= 10):", "{\"User-Agent\":\"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:66.0) Gecko/20100101 Firefox/66.0\", \"Accept-Encoding\":\"gzip,", "'null' try: reviewer_name = d.find('span', attrs={'class': 'a-profile-name'}) except: reviewer_name =", "title.text, description.text, reviewer_name.text, float(stars.text[0:3])]) else: pass counter += 1 time.sleep(15)", "= d.find('span', attrs={'class': 'a-profile-name'}) except: reviewer_name = 'null' try: stars", "import requests import time import sys def reviews_scraper(asin_list, filename): '''", "reviews = [] headers = {\"User-Agent\":\"Mozilla/5.0 (Windows NT 10.0; Win64;", "f'https://www.amazon.com/product-reviews/{asin}/ref=cm_cr_arp_d_viewopt_srt?ie=UTF8&reviewerType=all_reviews&sortBy=recent&pageNumber={counter}' print(reviews_url) rev = requests.get(reviews_url, headers=headers) print(rev.status_code) reviews_page_content = rev.content", "= product['href'].split('/')[3] except: review_asin = asin try: verified = d.find('span',", "= f'https://www.amazon.com/product-reviews/{asin}/ref=cm_cr_arp_d_viewopt_srt?ie=UTF8&reviewerType=all_reviews&sortBy=recent&pageNumber={counter}' print(reviews_url) rev = requests.get(reviews_url, headers=headers) print(rev.status_code) reviews_page_content =", "= rev.content review_soup = BeautifulSoup(reviews_page_content, features='lxml') print(review_soup) passed_last_page = review_soup.find('div',", "= 'null' try: review_asin = product['href'].split('/')[3] except: review_asin = asin", "requests import time import sys def reviews_scraper(asin_list, filename): ''' Takes", "try: reviewer_name = d.find('span', attrs={'class': 'a-profile-name'}) except: reviewer_name = 'null'", "reviews_df.to_csv(f'data/reviews/{filename}') print(f'{len(reviews)} reviews for {len(asin_list)} asins stored successfully in {filename}')", "asin_list: print(f'Collecting reviews for {asin}') passed_last_page = None counter =", "counter += 1 time.sleep(15) reviews_df = pd.DataFrame(reviews, columns=['asin','product','date', 'verified', 'title',", "(passed_last_page == None) and (counter <= 10): print(len(reviews)) reviews_url =", "for reviews page, and parses out key data points Parameters", "= date.text.split(' ')[-3:] date = ' '.join(date) except: date =", "try: title = d.find('a', attrs={'data-hook': 'review-title'}) except: title = 'null'", "date = ' '.join(date) except: date = 'null' try: title", "points Parameters ---------- List of ASINs (list of strings) Returns:", "''' Takes a list of asins, retrieves html for reviews", "(list of strings) Returns: ------- review information (list), reviews_df (Pandas", "== None: for d in review_soup.findAll('div', attrs={'data-hook':'review'}): # print(d) try:", "time.sleep(15) reviews_df = pd.DataFrame(reviews, columns=['asin','product','date', 'verified', 'title', 'desc', 'reviewer_name', 'rating'])", "\"DNT\":\"1\",\"Connection\":\"close\", \"Upgrade-Insecure-Requests\":\"1\"} for asin in asin_list: print(f'Collecting reviews for {asin}')", "def reviews_scraper(asin_list, filename): ''' Takes a list of asins, retrieves", "information (list), reviews_df (Pandas DataFrame) ''' asin_list = [asin_list] print(asin_list)", "title = d.find('a', attrs={'data-hook': 'review-title'}) except: title = 'null' try:", "as pd import requests import time import sys def reviews_scraper(asin_list,", "(Pandas DataFrame) ''' asin_list = [asin_list] print(asin_list) reviews = []", "deflate\", \"Accept\":\"text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\", \"DNT\":\"1\",\"Connection\":\"close\", \"Upgrade-Insecure-Requests\":\"1\"} for asin in asin_list: print(f'Collecting reviews", "review_soup.find('div', attrs={'class': 'a-section a-spacing-top-large a-text-center no-reviews-section'}) if passed_last_page == None:", "print(d) try: date = d.find('span', attrs={'data-hook':'review-date'}) date = date.text.split(' ')[-3:]", "'review-body'}) except: description = 'null' try: reviewer_name = d.find('span', attrs={'class':", "asins, retrieves html for reviews page, and parses out key", "10): print(len(reviews)) reviews_url = f'https://www.amazon.com/product-reviews/{asin}/ref=cm_cr_arp_d_viewopt_srt?ie=UTF8&reviewerType=all_reviews&sortBy=recent&pageNumber={counter}' print(reviews_url) rev = requests.get(reviews_url, headers=headers)", "and parses out key data points Parameters ---------- List of", "\"Accept-Encoding\":\"gzip, deflate\", \"Accept\":\"text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\", \"DNT\":\"1\",\"Connection\":\"close\", \"Upgrade-Insecure-Requests\":\"1\"} for asin in asin_list: print(f'Collecting", "rev = requests.get(reviews_url, headers=headers) print(rev.status_code) reviews_page_content = rev.content review_soup =", "review_soup.findAll('div', attrs={'data-hook':'review'}): # print(d) try: date = d.find('span', attrs={'data-hook':'review-date'}) date", "except: review_asin = asin try: verified = d.find('span', attrs={'data-hook':'avp-badge'}) if", "= 'null' try: title = d.find('a', attrs={'data-hook': 'review-title'}) except: title", "= [] headers = {\"User-Agent\":\"Mozilla/5.0 (Windows NT 10.0; Win64; x64;", "# print(d) try: date = d.find('span', attrs={'data-hook':'review-date'}) date = date.text.split('", "date = d.find('span', attrs={'data-hook':'review-date'}) date = date.text.split(' ')[-3:] date =", "reviews_url = f'https://www.amazon.com/product-reviews/{asin}/ref=cm_cr_arp_d_viewopt_srt?ie=UTF8&reviewerType=all_reviews&sortBy=recent&pageNumber={counter}' print(reviews_url) rev = requests.get(reviews_url, headers=headers) print(rev.status_code) reviews_page_content", "attrs={'data-hook':'avp-badge'}) if verified == None: verified = 'Not Verified' else:", "= 'null' try: reviewer_name = d.find('span', attrs={'class': 'a-profile-name'}) except: reviewer_name", "date, verified, title.text, description.text, reviewer_name.text, float(stars.text[0:3])]) else: pass counter +=", "asin_list = [asin_list] print(asin_list) reviews = [] headers = {\"User-Agent\":\"Mozilla/5.0", "try: review_asin = product['href'].split('/')[3] except: review_asin = asin try: verified", "product['href'].split('/')[3] except: review_asin = asin try: verified = d.find('span', attrs={'data-hook':'avp-badge'})", "= verified.text except: verified = 'null' try: description = d.find('span',", "verified.text except: verified = 'null' try: description = d.find('span', attrs={'data-hook':", "d.find('span', attrs={'class': 'a-icon-alt'}) except: stars = 'null' reviews.append([review_asin, product, date,", "'format-strip'}) product = product.text except: product = 'null' try: review_asin", "')[-3:] date = ' '.join(date) except: date = 'null' try:", "columns=['asin','product','date', 'verified', 'title', 'desc', 'reviewer_name', 'rating']) reviews_df.to_csv(f'data/reviews/{filename}') print(f'{len(reviews)} reviews for", "bs4 import BeautifulSoup import pandas as pd import requests import", "list of asins, retrieves html for reviews page, and parses", "page, and parses out key data points Parameters ---------- List", "review_soup = BeautifulSoup(reviews_page_content, features='lxml') print(review_soup) passed_last_page = review_soup.find('div', attrs={'class': 'a-section", "d.find('span', attrs={'data-hook':'review-date'}) date = date.text.split(' ')[-3:] date = ' '.join(date)", "'.join(date) except: date = 'null' try: title = d.find('a', attrs={'data-hook':", "date = 'null' try: title = d.find('a', attrs={'data-hook': 'review-title'}) except:", "'a-profile-name'}) except: reviewer_name = 'null' try: stars = d.find('span', attrs={'class':", "= 'null' try: stars = d.find('span', attrs={'class': 'a-icon-alt'}) except: stars", "print(f'{len(reviews)} reviews for {len(asin_list)} asins stored successfully in {filename}') return", "= product.text except: product = 'null' try: review_asin = product['href'].split('/')[3]", "= d.find('a', attrs={'data-hook': 'format-strip'}) product = product.text except: product =", "except: reviewer_name = 'null' try: stars = d.find('span', attrs={'class': 'a-icon-alt'})", "data points Parameters ---------- List of ASINs (list of strings)", "= {\"User-Agent\":\"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:66.0) Gecko/20100101 Firefox/66.0\",", "attrs={'data-hook': 'review-body'}) except: description = 'null' try: reviewer_name = d.find('span',", "= 'null' reviews.append([review_asin, product, date, verified, title.text, description.text, reviewer_name.text, float(stars.text[0:3])])", "counter = 1 while (passed_last_page == None) and (counter <=", "import time import sys def reviews_scraper(asin_list, filename): ''' Takes a", "reviews_scraper(asin_list, filename): ''' Takes a list of asins, retrieves html", "verified = d.find('span', attrs={'data-hook':'avp-badge'}) if verified == None: verified =", "d.find('span', attrs={'data-hook':'avp-badge'}) if verified == None: verified = 'Not Verified'", "stars = 'null' reviews.append([review_asin, product, date, verified, title.text, description.text, reviewer_name.text,", "'null' reviews.append([review_asin, product, date, verified, title.text, description.text, reviewer_name.text, float(stars.text[0:3])]) else:", "a list of asins, retrieves html for reviews page, and", "verified, title.text, description.text, reviewer_name.text, float(stars.text[0:3])]) else: pass counter += 1", "for {len(asin_list)} asins stored successfully in {filename}') return reviews, reviews_df", "print(len(reviews)) reviews_url = f'https://www.amazon.com/product-reviews/{asin}/ref=cm_cr_arp_d_viewopt_srt?ie=UTF8&reviewerType=all_reviews&sortBy=recent&pageNumber={counter}' print(reviews_url) rev = requests.get(reviews_url, headers=headers) print(rev.status_code)", "if passed_last_page == None: for d in review_soup.findAll('div', attrs={'data-hook':'review'}): #", "attrs={'data-hook':'review-date'}) date = date.text.split(' ')[-3:] date = ' '.join(date) except:", "except: verified = 'null' try: description = d.find('span', attrs={'data-hook': 'review-body'})", "asins stored successfully in {filename}') return reviews, reviews_df if __name__", "pd import requests import time import sys def reviews_scraper(asin_list, filename):", "sys def reviews_scraper(asin_list, filename): ''' Takes a list of asins,", "retrieves html for reviews page, and parses out key data", "else: verified = verified.text except: verified = 'null' try: description", "'title', 'desc', 'reviewer_name', 'rating']) reviews_df.to_csv(f'data/reviews/{filename}') print(f'{len(reviews)} reviews for {len(asin_list)} asins", "import sys def reviews_scraper(asin_list, filename): ''' Takes a list of", "(Windows NT 10.0; Win64; x64; rv:66.0) Gecko/20100101 Firefox/66.0\", \"Accept-Encoding\":\"gzip, deflate\",", "\"Accept\":\"text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\", \"DNT\":\"1\",\"Connection\":\"close\", \"Upgrade-Insecure-Requests\":\"1\"} for asin in asin_list: print(f'Collecting reviews for", "attrs={'class': 'a-icon-alt'}) except: stars = 'null' reviews.append([review_asin, product, date, verified,", "= 'null' try: description = d.find('span', attrs={'data-hook': 'review-body'}) except: description", "reviews page, and parses out key data points Parameters ----------", "[] headers = {\"User-Agent\":\"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:66.0)", "1 while (passed_last_page == None) and (counter <= 10): print(len(reviews))", "except: stars = 'null' reviews.append([review_asin, product, date, verified, title.text, description.text,", "out key data points Parameters ---------- List of ASINs (list", "successfully in {filename}') return reviews, reviews_df if __name__ == '__main__':", "passed_last_page = review_soup.find('div', attrs={'class': 'a-section a-spacing-top-large a-text-center no-reviews-section'}) if passed_last_page", "reviews.append([review_asin, product, date, verified, title.text, description.text, reviewer_name.text, float(stars.text[0:3])]) else: pass", "ASINs (list of strings) Returns: ------- review information (list), reviews_df", "= d.find('span', attrs={'class': 'a-icon-alt'}) except: stars = 'null' reviews.append([review_asin, product,", "None: verified = 'Not Verified' else: verified = verified.text except:", "NT 10.0; Win64; x64; rv:66.0) Gecko/20100101 Firefox/66.0\", \"Accept-Encoding\":\"gzip, deflate\", \"Accept\":\"text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\",", "of asins, retrieves html for reviews page, and parses out", "None: for d in review_soup.findAll('div', attrs={'data-hook':'review'}): # print(d) try: date", "Parameters ---------- List of ASINs (list of strings) Returns: -------", "and (counter <= 10): print(len(reviews)) reviews_url = f'https://www.amazon.com/product-reviews/{asin}/ref=cm_cr_arp_d_viewopt_srt?ie=UTF8&reviewerType=all_reviews&sortBy=recent&pageNumber={counter}' print(reviews_url) rev", "= d.find('span', attrs={'data-hook':'review-date'}) date = date.text.split(' ')[-3:] date = '", "review_asin = asin try: verified = d.find('span', attrs={'data-hook':'avp-badge'}) if verified", "= d.find('span', attrs={'data-hook':'avp-badge'}) if verified == None: verified = 'Not", "d.find('a', attrs={'data-hook': 'format-strip'}) product = product.text except: product = 'null'", "product.text except: product = 'null' try: review_asin = product['href'].split('/')[3] except:", "' '.join(date) except: date = 'null' try: title = d.find('a',", "html for reviews page, and parses out key data points", "key data points Parameters ---------- List of ASINs (list of", "'review-title'}) except: title = 'null' try: product = d.find('a', attrs={'data-hook':", "product, date, verified, title.text, description.text, reviewer_name.text, float(stars.text[0:3])]) else: pass counter", "<= 10): print(len(reviews)) reviews_url = f'https://www.amazon.com/product-reviews/{asin}/ref=cm_cr_arp_d_viewopt_srt?ie=UTF8&reviewerType=all_reviews&sortBy=recent&pageNumber={counter}' print(reviews_url) rev = requests.get(reviews_url,", "for d in review_soup.findAll('div', attrs={'data-hook':'review'}): # print(d) try: date =", "no-reviews-section'}) if passed_last_page == None: for d in review_soup.findAll('div', attrs={'data-hook':'review'}):", "try: stars = d.find('span', attrs={'class': 'a-icon-alt'}) except: stars = 'null'", "[asin_list] print(asin_list) reviews = [] headers = {\"User-Agent\":\"Mozilla/5.0 (Windows NT", "rev.content review_soup = BeautifulSoup(reviews_page_content, features='lxml') print(review_soup) passed_last_page = review_soup.find('div', attrs={'class':", "'null' try: product = d.find('a', attrs={'data-hook': 'format-strip'}) product = product.text", "BeautifulSoup import pandas as pd import requests import time import", "while (passed_last_page == None) and (counter <= 10): print(len(reviews)) reviews_url", "a-text-center no-reviews-section'}) if passed_last_page == None: for d in review_soup.findAll('div',", "= 'Not Verified' else: verified = verified.text except: verified =", "try: product = d.find('a', attrs={'data-hook': 'format-strip'}) product = product.text except:", "{asin}') passed_last_page = None counter = 1 while (passed_last_page ==", "verified = 'null' try: description = d.find('span', attrs={'data-hook': 'review-body'}) except:", "stored successfully in {filename}') return reviews, reviews_df if __name__ ==", "= pd.DataFrame(reviews, columns=['asin','product','date', 'verified', 'title', 'desc', 'reviewer_name', 'rating']) reviews_df.to_csv(f'data/reviews/{filename}') print(f'{len(reviews)}", "= 'null' try: product = d.find('a', attrs={'data-hook': 'format-strip'}) product =", "from bs4 import BeautifulSoup import pandas as pd import requests", "passed_last_page = None counter = 1 while (passed_last_page == None)", "d.find('span', attrs={'class': 'a-profile-name'}) except: reviewer_name = 'null' try: stars =", "x64; rv:66.0) Gecko/20100101 Firefox/66.0\", \"Accept-Encoding\":\"gzip, deflate\", \"Accept\":\"text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\", \"DNT\":\"1\",\"Connection\":\"close\", \"Upgrade-Insecure-Requests\":\"1\"} for", "Gecko/20100101 Firefox/66.0\", \"Accept-Encoding\":\"gzip, deflate\", \"Accept\":\"text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\", \"DNT\":\"1\",\"Connection\":\"close\", \"Upgrade-Insecure-Requests\":\"1\"} for asin in", "print(f'Collecting reviews for {asin}') passed_last_page = None counter = 1", "reviewer_name.text, float(stars.text[0:3])]) else: pass counter += 1 time.sleep(15) reviews_df =", "Firefox/66.0\", \"Accept-Encoding\":\"gzip, deflate\", \"Accept\":\"text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\", \"DNT\":\"1\",\"Connection\":\"close\", \"Upgrade-Insecure-Requests\":\"1\"} for asin in asin_list:", "review information (list), reviews_df (Pandas DataFrame) ''' asin_list = [asin_list]", "headers=headers) print(rev.status_code) reviews_page_content = rev.content review_soup = BeautifulSoup(reviews_page_content, features='lxml') print(review_soup)", "'a-section a-spacing-top-large a-text-center no-reviews-section'}) if passed_last_page == None: for d", "verified = 'Not Verified' else: verified = verified.text except: verified", "= ' '.join(date) except: date = 'null' try: title =", "of strings) Returns: ------- review information (list), reviews_df (Pandas DataFrame)", "except: date = 'null' try: title = d.find('a', attrs={'data-hook': 'review-title'})", "print(reviews_url) rev = requests.get(reviews_url, headers=headers) print(rev.status_code) reviews_page_content = rev.content review_soup", "headers = {\"User-Agent\":\"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:66.0) Gecko/20100101", "= BeautifulSoup(reviews_page_content, features='lxml') print(review_soup) passed_last_page = review_soup.find('div', attrs={'class': 'a-section a-spacing-top-large", "1 time.sleep(15) reviews_df = pd.DataFrame(reviews, columns=['asin','product','date', 'verified', 'title', 'desc', 'reviewer_name',", "in {filename}') return reviews, reviews_df if __name__ == '__main__': reviews_scraper(*sys.argv[1:])", "Win64; x64; rv:66.0) Gecko/20100101 Firefox/66.0\", \"Accept-Encoding\":\"gzip, deflate\", \"Accept\":\"text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\", \"DNT\":\"1\",\"Connection\":\"close\", \"Upgrade-Insecure-Requests\":\"1\"}", "parses out key data points Parameters ---------- List of ASINs", "product = product.text except: product = 'null' try: review_asin =", "------- review information (list), reviews_df (Pandas DataFrame) ''' asin_list =", "try: description = d.find('span', attrs={'data-hook': 'review-body'}) except: description = 'null'", "\"Upgrade-Insecure-Requests\":\"1\"} for asin in asin_list: print(f'Collecting reviews for {asin}') passed_last_page", "'null' try: review_asin = product['href'].split('/')[3] except: review_asin = asin try:", "== None) and (counter <= 10): print(len(reviews)) reviews_url = f'https://www.amazon.com/product-reviews/{asin}/ref=cm_cr_arp_d_viewopt_srt?ie=UTF8&reviewerType=all_reviews&sortBy=recent&pageNumber={counter}'", "d.find('span', attrs={'data-hook': 'review-body'}) except: description = 'null' try: reviewer_name =" ]
[ "values.pop(0) if 'Stream' in key: split_v = values[1].split(',') d['Image Size']", "'FocalLength', 'Shutter(deg)', 'SensorID', 'SensorName', 'Take'] def check_exiftool(): \"\"\" checks if", "os.popen(command).readlines(): line = line.strip('\\n') line = line.replace('\\t', '') line =", "what's needed for # every file type.... RAF = ['EXIF:LensModel',", "directly from exiftool through the system commands and cglexecute. For", "RAF = ['EXIF:LensModel', 'MakerNotes:RawImageHeight', 'MakerNotes:RawImageWidth', 'EXIF:CreateDate', 'EXIF:ModifyDate', 'EXIF:SerialNumber', 'Composite:Aperture', 'EXIF:FocalLength',", "d = {} if tool == 'exiftool': command = r'exiftool", "\"\"\" pass def check_ffprobe(): \"\"\" checks if ffprobe is installed", "fps')[0].replace(' ', '') if 'Duration' in key: d['Track Duration'] =", "split_v = values[1].split(',') d['Image Size'] = split_v[2].split()[0] d['Source Image Width'],", "for my big list of stuff i care about and", "= ' '.join(values) d[key] = value except ValueError: print('skipping %s'", "= os.path.splitext(filein)[-1] d = {} if tool == 'exiftool': command", "command = r'REDLINE --i %s --printMeta 1' % filein d", "REDLINE is a command line interface from RED that is", "output = cgl_execute(command=command) for each in output['printout']: try: values =", "\"\"\" file_, ext_ = os.path.splitext(filein) if ext_.upper() == '.R3D': command", "this https://www.red.com/downloads/options?itemInternalId=16144 :param filein: :return: \"\"\" file_, ext_ = os.path.splitext(filein)", "values[1].split(',') d['Image Size'] = split_v[2].split()[0] d['Source Image Width'], d['Source Image", "= r'%s %s' % ('ffprobe', filein) output = cgl_execute(command=command) for", "line.replace(' ', '') try: key_, value = line.split(':', 1) if", "', '') try: key_, value = line.split(':', 1) if key_", "redline is installed :return: \"\"\" pass def check_ffprobe(): \"\"\" checks", "d[key] = value return d elif tool == 'ffprobe': command", "= ['EXIF:LensModel', 'MakerNotes:RawImageHeight', 'MakerNotes:RawImageWidth', 'EXIF:CreateDate', 'EXIF:ModifyDate', 'EXIF:SerialNumber', 'Composite:Aperture', 'EXIF:FocalLength', 'EXIF:Make',", "line = line.strip('\\n') line = line.replace('\\t', '') line = line.replace('", "\"\"\" ext = os.path.splitext(filein)[-1] d = {} if tool ==", "'.join(values) d[key] = value except ValueError: print('skipping %s' % each)", "os.path.splitext(filein)[-1] d = {} if tool == 'exiftool': command =", "\"\"\" pass def get(filein, tool='exiftool', print_output=False): \"\"\" Due to issues", "'Stream' in key: split_v = values[1].split(',') d['Image Size'] = split_v[2].split()[0]", "--i %s --printMeta 1' % filein d = {} for", "', '') if 'Duration' in key: d['Track Duration'] = '%s", "in key: split_v = values[1].split(',') d['Image Size'] = split_v[2].split()[0] d['Source", "'EXIF:FocalLength', 'EXIF:Make', 'EXIF:Model', 'EXIF:LensMake'] MOV = ['EXIF:LensModel', 'MakerNotes:RawImageHeight', 'MakerNotes:RawImageWidth', 'EXIF:CreateDate',", "dictionary containing lumberdata from exiftool \"\"\" ext = os.path.splitext(filein)[-1] d", "if 'Stream' in key: split_v = values[1].split(',') d['Image Size'] =", "parse output directly from exiftool through the system commands and", "the lumberdata for a single file. :param filein: :return: dictionary", "Frame Rate'] = split_v[4].split(' fps')[0].replace(' ', '') if 'Duration' in", "values[0] values.pop(0) if 'Stream' in key: split_v = values[1].split(',') d['Image", "if ext_.upper() == '.R3D': command = r'REDLINE --i %s --printMeta", "get(filein, tool='exiftool', print_output=False): \"\"\" Due to issues with the exiftool", "'exiftool': command = r'exiftool %s' % filein output = cgl_execute(command=command,", "installed :return: \"\"\" pass def check_ffprobe(): \"\"\" checks if ffprobe", "# noinspection PyUnresolvedReferences import os import re # TODO I'm", "= d['Image Size'].split('x') d['Video Frame Rate'] = split_v[4].split(' fps')[0].replace(' ',", "import re # TODO I'm going to need to make", "is required for this https://www.red.com/downloads/options?itemInternalId=16144 :param filein: :return: \"\"\" file_,", "key = values[0] values.pop(0) if 'Stream' in key: split_v =", "d['Track Duration'] = '%s s' % values[0].split(',')[0] value = '", "'Composite:Aperture', 'EXIF:FocalLength', 'EXIF:Make', 'EXIF:Model', 'EXIF:LensMake', 'QuickTime:VideoFrameRate', 'QuickTime:Duration'] R3D = ['ClipName',", "re.split(\":\\s+\", each) key = values[0] values.pop(0) if 'Stream' in key:", "only designed to get the lumberdata for a single file.", "= value except ValueError: print('skipping %s' % each) return d", "in output['printout']: try: values = re.split(\":\\s+\", each) key = values[0]", "each) d[key] = value return d elif tool == 'ffprobe':", "the moment it's only designed to get the lumberdata for", "line = line.replace('\\t', '') line = line.replace(' ', '') try:", "'%s s' % values[0].split(',')[0] value = ' '.join(values) d[key] =", "= cgl_execute(command=command, verbose=False, print_output=print_output) for each in output['printout']: key, value", "exiftool module this is provided as a way to parse", "line.split(':', 1) if key_ != 'None': d[key_] = value except", "if tool == 'exiftool': command = r'exiftool %s' % filein", "'EndEdgeTC', 'TotalFrames', 'FrameHeight', 'FrameWidth', 'Aperture', 'ISO', 'Date', 'AudioSlate', 'VideoSlate', 'Camera',", "going to need to make a dictionary for my big", "system commands and cglexecute. For the moment it's only designed", "value = line.split(':', 1) if key_ != 'None': d[key_] =", "if ffprobe is installed :return: \"\"\" pass def get(filein, tool='exiftool',", "lumberdata for a single file. :param filein: :return: dictionary containing", "through the system commands and cglexecute. For the moment it's", "'EXIF:FocalLength', 'EXIF:Make', 'EXIF:Model', 'EXIF:LensMake', 'QuickTime:VideoFrameRate', 'QuickTime:Duration'] R3D = ['ClipName', 'EdgeTC',", "designed to get the lumberdata for a single file. :param", "'EXIF:LensMake', 'QuickTime:VideoFrameRate', 'QuickTime:Duration'] R3D = ['ClipName', 'EdgeTC', 'EndEdgeTC', 'TotalFrames', 'FrameHeight',", "of stuff i care about and what's needed for #", "installed :return: \"\"\" pass def get(filein, tool='exiftool', print_output=False): \"\"\" Due", "'EXIF:Model', 'EXIF:LensMake', 'QuickTime:VideoFrameRate', 'QuickTime:Duration'] R3D = ['ClipName', 'EdgeTC', 'EndEdgeTC', 'TotalFrames',", "need to make a dictionary for my big list of", "'EXIF:SerialNumber', 'Composite:Aperture', 'EXIF:FocalLength', 'EXIF:Make', 'EXIF:Model', 'EXIF:LensMake', 'QuickTime:VideoFrameRate', 'QuickTime:Duration'] R3D =", "'CameraModel', 'CameraPIN', 'MediaSerialNumber', 'LensSerialNumber', 'FPS', 'AspectRatio', 'Kelvin', 'LensName', 'LensBrand', 'FocalLength',", "it's only designed to get the lumberdata for a single", "I'm going to need to make a dictionary for my", "== '.R3D': command = r'REDLINE --i %s --printMeta 1' %", "'LensBrand', 'FocalLength', 'Shutter(deg)', 'SensorID', 'SensorName', 'Take'] def check_exiftool(): \"\"\" checks", "output['printout']: key, value = re.split(\"\\s+:\\s+\", each) d[key] = value return", "['EXIF:LensModel', 'MakerNotes:RawImageHeight', 'MakerNotes:RawImageWidth', 'EXIF:CreateDate', 'EXIF:ModifyDate', 'EXIF:SerialNumber', 'Composite:Aperture', 'EXIF:FocalLength', 'EXIF:Make', 'EXIF:Model',", "ffprobe is installed :return: \"\"\" pass def get(filein, tool='exiftool', print_output=False):", "d[key] = value except ValueError: print('skipping %s' % each) return", "'EXIF:SerialNumber', 'Composite:Aperture', 'EXIF:FocalLength', 'EXIF:Make', 'EXIF:Model', 'EXIF:LensMake'] MOV = ['EXIF:LensModel', 'MakerNotes:RawImageHeight',", "Image Height'] = d['Image Size'].split('x') d['Video Frame Rate'] = split_v[4].split('", "split_v[2].split()[0] d['Source Image Width'], d['Source Image Height'] = d['Image Size'].split('x')", "print('skipping %s' % each) return d def get_red_data(filein): \"\"\" method", "% filein d = {} for line in os.popen(command).readlines(): line", "d elif tool == 'ffprobe': command = r'%s %s' %", "each) key = values[0] values.pop(0) if 'Stream' in key: split_v", "Size'] = split_v[2].split()[0] d['Source Image Width'], d['Source Image Height'] =", "'EXIF:Make', 'EXIF:Model', 'EXIF:LensMake'] MOV = ['EXIF:LensModel', 'MakerNotes:RawImageHeight', 'MakerNotes:RawImageWidth', 'EXIF:CreateDate', 'EXIF:ModifyDate',", "dictionary for my big list of stuff i care about", "Size'].split('x') d['Video Frame Rate'] = split_v[4].split(' fps')[0].replace(' ', '') if", "line.replace('\\t', '') line = line.replace(' ', '') try: key_, value", "!= 'None': d[key_] = value except ValueError: pass return d", "key_, value = line.split(':', 1) if key_ != 'None': d[key_]", "def get(filein, tool='exiftool', print_output=False): \"\"\" Due to issues with the", "for line in os.popen(command).readlines(): line = line.strip('\\n') line = line.replace('\\t',", "list of stuff i care about and what's needed for", "== 'ffprobe': command = r'%s %s' % ('ffprobe', filein) output", "ValueError: print('skipping %s' % each) return d def get_red_data(filein): \"\"\"", "my big list of stuff i care about and what's", "'MakerNotes:RawImageHeight', 'MakerNotes:RawImageWidth', 'EXIF:CreateDate', 'EXIF:ModifyDate', 'EXIF:SerialNumber', 'Composite:Aperture', 'EXIF:FocalLength', 'EXIF:Make', 'EXIF:Model', 'EXIF:LensMake',", "value = re.split(\"\\s+:\\s+\", each) d[key] = value return d elif", "= re.split(\"\\s+:\\s+\", each) d[key] = value return d elif tool", "print_output=False): \"\"\" Due to issues with the exiftool module this", "['ClipName', 'EdgeTC', 'EndEdgeTC', 'TotalFrames', 'FrameHeight', 'FrameWidth', 'Aperture', 'ISO', 'Date', 'AudioSlate',", "'Shutter(deg)', 'SensorID', 'SensorName', 'Take'] def check_exiftool(): \"\"\" checks if exiftool", "each in output['printout']: key, value = re.split(\"\\s+:\\s+\", each) d[key] =", "a way to parse output directly from exiftool through the", "cgl_execute(command=command) for each in output['printout']: try: values = re.split(\":\\s+\", each)", "\"\"\" checks if exiftool is installed. :return: \"\"\" pass def", "in os.popen(command).readlines(): line = line.strip('\\n') line = line.replace('\\t', '') line", "exiftool through the system commands and cglexecute. For the moment", "Image Width'], d['Source Image Height'] = d['Image Size'].split('x') d['Video Frame", "output['printout']: try: values = re.split(\":\\s+\", each) key = values[0] values.pop(0)", "with the exiftool module this is provided as a way", "line interface from RED that is required for this https://www.red.com/downloads/options?itemInternalId=16144", "d def get_red_data(filein): \"\"\" method for pulling lumberdata from r3d", "'MediaSerialNumber', 'LensSerialNumber', 'FPS', 'AspectRatio', 'Kelvin', 'LensName', 'LensBrand', 'FocalLength', 'Shutter(deg)', 'SensorID',", "check_redline(): \"\"\" checks if redline is installed :return: \"\"\" pass", "key: split_v = values[1].split(',') d['Image Size'] = split_v[2].split()[0] d['Source Image", "' '.join(values) d[key] = value except ValueError: print('skipping %s' %", "'EXIF:CreateDate', 'EXIF:ModifyDate', 'EXIF:SerialNumber', 'Composite:Aperture', 'EXIF:FocalLength', 'EXIF:Make', 'EXIF:Model', 'EXIF:LensMake'] MOV =", "TODO I'm going to need to make a dictionary for", "from exiftool through the system commands and cglexecute. For the", "provided as a way to parse output directly from exiftool", "single file. :param filein: :return: dictionary containing lumberdata from exiftool", "= value return d elif tool == 'ffprobe': command =", "os.path.splitext(filein) if ext_.upper() == '.R3D': command = r'REDLINE --i %s", "For the moment it's only designed to get the lumberdata", "d['Video Frame Rate'] = split_v[4].split(' fps')[0].replace(' ', '') if 'Duration'", "def check_redline(): \"\"\" checks if redline is installed :return: \"\"\"", "Height'] = d['Image Size'].split('x') d['Video Frame Rate'] = split_v[4].split(' fps')[0].replace('", "tool='exiftool', print_output=False): \"\"\" Due to issues with the exiftool module", "line = line.replace(' ', '') try: key_, value = line.split(':',", "= r'exiftool %s' % filein output = cgl_execute(command=command, verbose=False, print_output=print_output)", "make a dictionary for my big list of stuff i", "'LensName', 'LensBrand', 'FocalLength', 'Shutter(deg)', 'SensorID', 'SensorName', 'Take'] def check_exiftool(): \"\"\"", "'Kelvin', 'LensName', 'LensBrand', 'FocalLength', 'Shutter(deg)', 'SensorID', 'SensorName', 'Take'] def check_exiftool():", "key_ != 'None': d[key_] = value except ValueError: pass return", "{} for line in os.popen(command).readlines(): line = line.strip('\\n') line =", "% filein output = cgl_execute(command=command, verbose=False, print_output=print_output) for each in", "key, value = re.split(\"\\s+:\\s+\", each) d[key] = value return d", "= split_v[2].split()[0] d['Source Image Width'], d['Source Image Height'] = d['Image", "pulling lumberdata from r3d files. REDLINE is a command line", "to make a dictionary for my big list of stuff", "r'%s %s' % ('ffprobe', filein) output = cgl_execute(command=command) for each", "output = cgl_execute(command=command, verbose=False, print_output=print_output) for each in output['printout']: key,", "checks if redline is installed :return: \"\"\" pass def check_ffprobe():", "checks if ffprobe is installed :return: \"\"\" pass def get(filein,", "key: d['Track Duration'] = '%s s' % values[0].split(',')[0] value =", "containing lumberdata from exiftool \"\"\" ext = os.path.splitext(filein)[-1] d =", "r'REDLINE --i %s --printMeta 1' % filein d = {}", "check_exiftool(): \"\"\" checks if exiftool is installed. :return: \"\"\" pass", "def check_ffprobe(): \"\"\" checks if ffprobe is installed :return: \"\"\"", "issues with the exiftool module this is provided as a", "% ('ffprobe', filein) output = cgl_execute(command=command) for each in output['printout']:", "is installed. :return: \"\"\" pass def check_redline(): \"\"\" checks if", "as a way to parse output directly from exiftool through", "tool == 'ffprobe': command = r'%s %s' % ('ffprobe', filein)", "r3d files. REDLINE is a command line interface from RED", "\"\"\" Due to issues with the exiftool module this is", "if 'Duration' in key: d['Track Duration'] = '%s s' %", "try: key_, value = line.split(':', 1) if key_ != 'None':", "and what's needed for # every file type.... RAF =", "ext_ = os.path.splitext(filein) if ext_.upper() == '.R3D': command = r'REDLINE", "= os.path.splitext(filein) if ext_.upper() == '.R3D': command = r'REDLINE --i", "Duration'] = '%s s' % values[0].split(',')[0] value = ' '.join(values)", "exiftool is installed. :return: \"\"\" pass def check_redline(): \"\"\" checks", "care about and what's needed for # every file type....", "% values[0].split(',')[0] value = ' '.join(values) d[key] = value except", "= line.split(':', 1) if key_ != 'None': d[key_] = value", "type.... RAF = ['EXIF:LensModel', 'MakerNotes:RawImageHeight', 'MakerNotes:RawImageWidth', 'EXIF:CreateDate', 'EXIF:ModifyDate', 'EXIF:SerialNumber', 'Composite:Aperture',", "\"\"\" checks if redline is installed :return: \"\"\" pass def", ":return: \"\"\" pass def check_ffprobe(): \"\"\" checks if ffprobe is", "= split_v[4].split(' fps')[0].replace(' ', '') if 'Duration' in key: d['Track", "lumberdata from r3d files. REDLINE is a command line interface", "% each) return d def get_red_data(filein): \"\"\" method for pulling", "= line.replace('\\t', '') line = line.replace(' ', '') try: key_,", "= line.replace(' ', '') try: key_, value = line.split(':', 1)", "r'exiftool %s' % filein output = cgl_execute(command=command, verbose=False, print_output=print_output) for", "\"\"\" pass def check_redline(): \"\"\" checks if redline is installed", "for each in output['printout']: key, value = re.split(\"\\s+:\\s+\", each) d[key]", "%s' % each) return d def get_red_data(filein): \"\"\" method for", "'Camera', 'CameraModel', 'CameraPIN', 'MediaSerialNumber', 'LensSerialNumber', 'FPS', 'AspectRatio', 'Kelvin', 'LensName', 'LensBrand',", "d['Image Size'] = split_v[2].split()[0] d['Source Image Width'], d['Source Image Height']", "'ffprobe': command = r'%s %s' % ('ffprobe', filein) output =", "'EXIF:CreateDate', 'EXIF:ModifyDate', 'EXIF:SerialNumber', 'Composite:Aperture', 'EXIF:FocalLength', 'EXIF:Make', 'EXIF:Model', 'EXIF:LensMake', 'QuickTime:VideoFrameRate', 'QuickTime:Duration']", "# TODO I'm going to need to make a dictionary", "'AspectRatio', 'Kelvin', 'LensName', 'LensBrand', 'FocalLength', 'Shutter(deg)', 'SensorID', 'SensorName', 'Take'] def", "tool == 'exiftool': command = r'exiftool %s' % filein output", "== 'exiftool': command = r'exiftool %s' % filein output =", "Width'], d['Source Image Height'] = d['Image Size'].split('x') d['Video Frame Rate']", "line in os.popen(command).readlines(): line = line.strip('\\n') line = line.replace('\\t', '')", "'FPS', 'AspectRatio', 'Kelvin', 'LensName', 'LensBrand', 'FocalLength', 'Shutter(deg)', 'SensorID', 'SensorName', 'Take']", "ext = os.path.splitext(filein)[-1] d = {} if tool == 'exiftool':", "'SensorName', 'Take'] def check_exiftool(): \"\"\" checks if exiftool is installed.", "if redline is installed :return: \"\"\" pass def check_ffprobe(): \"\"\"", "module this is provided as a way to parse output", "line.strip('\\n') line = line.replace('\\t', '') line = line.replace(' ', '')", "exiftool \"\"\" ext = os.path.splitext(filein)[-1] d = {} if tool", "moment it's only designed to get the lumberdata for a", "https://www.red.com/downloads/options?itemInternalId=16144 :param filein: :return: \"\"\" file_, ext_ = os.path.splitext(filein) if", "'EXIF:LensMake'] MOV = ['EXIF:LensModel', 'MakerNotes:RawImageHeight', 'MakerNotes:RawImageWidth', 'EXIF:CreateDate', 'EXIF:ModifyDate', 'EXIF:SerialNumber', 'Composite:Aperture',", "to issues with the exiftool module this is provided as", "'FrameWidth', 'Aperture', 'ISO', 'Date', 'AudioSlate', 'VideoSlate', 'Camera', 'CameraModel', 'CameraPIN', 'MediaSerialNumber',", "get_red_data(filein): \"\"\" method for pulling lumberdata from r3d files. REDLINE", "'QuickTime:VideoFrameRate', 'QuickTime:Duration'] R3D = ['ClipName', 'EdgeTC', 'EndEdgeTC', 'TotalFrames', 'FrameHeight', 'FrameWidth',", "a single file. :param filein: :return: dictionary containing lumberdata from", "for # every file type.... RAF = ['EXIF:LensModel', 'MakerNotes:RawImageHeight', 'MakerNotes:RawImageWidth',", "for pulling lumberdata from r3d files. REDLINE is a command", "from exiftool \"\"\" ext = os.path.splitext(filein)[-1] d = {} if", "values[0].split(',')[0] value = ' '.join(values) d[key] = value except ValueError:", "filein output = cgl_execute(command=command, verbose=False, print_output=print_output) for each in output['printout']:", "a command line interface from RED that is required for", "a dictionary for my big list of stuff i care", "'EXIF:ModifyDate', 'EXIF:SerialNumber', 'Composite:Aperture', 'EXIF:FocalLength', 'EXIF:Make', 'EXIF:Model', 'EXIF:LensMake'] MOV = ['EXIF:LensModel',", ":return: \"\"\" file_, ext_ = os.path.splitext(filein) if ext_.upper() == '.R3D':", "def get_red_data(filein): \"\"\" method for pulling lumberdata from r3d files.", ":return: \"\"\" pass def check_redline(): \"\"\" checks if redline is", "return d elif tool == 'ffprobe': command = r'%s %s'", "files. REDLINE is a command line interface from RED that", "to need to make a dictionary for my big list", "'LensSerialNumber', 'FPS', 'AspectRatio', 'Kelvin', 'LensName', 'LensBrand', 'FocalLength', 'Shutter(deg)', 'SensorID', 'SensorName',", "= values[0] values.pop(0) if 'Stream' in key: split_v = values[1].split(',')", "'Take'] def check_exiftool(): \"\"\" checks if exiftool is installed. :return:", "= ['ClipName', 'EdgeTC', 'EndEdgeTC', 'TotalFrames', 'FrameHeight', 'FrameWidth', 'Aperture', 'ISO', 'Date',", "Rate'] = split_v[4].split(' fps')[0].replace(' ', '') if 'Duration' in key:", "stuff i care about and what's needed for # every", "checks if exiftool is installed. :return: \"\"\" pass def check_redline():", "filein d = {} for line in os.popen(command).readlines(): line =", "the system commands and cglexecute. For the moment it's only", "and cglexecute. For the moment it's only designed to get", "commands and cglexecute. For the moment it's only designed to", "values = re.split(\":\\s+\", each) key = values[0] values.pop(0) if 'Stream'", "is provided as a way to parse output directly from", "file type.... RAF = ['EXIF:LensModel', 'MakerNotes:RawImageHeight', 'MakerNotes:RawImageWidth', 'EXIF:CreateDate', 'EXIF:ModifyDate', 'EXIF:SerialNumber',", "'EXIF:Make', 'EXIF:Model', 'EXIF:LensMake', 'QuickTime:VideoFrameRate', 'QuickTime:Duration'] R3D = ['ClipName', 'EdgeTC', 'EndEdgeTC',", "d['Image Size'].split('x') d['Video Frame Rate'] = split_v[4].split(' fps')[0].replace(' ', '')", "'') try: key_, value = line.split(':', 1) if key_ !=", "def check_exiftool(): \"\"\" checks if exiftool is installed. :return: \"\"\"", "file. :param filein: :return: dictionary containing lumberdata from exiftool \"\"\"", "to get the lumberdata for a single file. :param filein:", "try: values = re.split(\":\\s+\", each) key = values[0] values.pop(0) if", "('ffprobe', filein) output = cgl_execute(command=command) for each in output['printout']: try:", "each) return d def get_red_data(filein): \"\"\" method for pulling lumberdata", "\"\"\" checks if ffprobe is installed :return: \"\"\" pass def", "needed for # every file type.... RAF = ['EXIF:LensModel', 'MakerNotes:RawImageHeight',", "cglexecute. For the moment it's only designed to get the", "os import re # TODO I'm going to need to", "= values[1].split(',') d['Image Size'] = split_v[2].split()[0] d['Source Image Width'], d['Source", "value return d elif tool == 'ffprobe': command = r'%s", "for this https://www.red.com/downloads/options?itemInternalId=16144 :param filein: :return: \"\"\" file_, ext_ =", "about and what's needed for # every file type.... RAF", "d['Source Image Width'], d['Source Image Height'] = d['Image Size'].split('x') d['Video", "'CameraPIN', 'MediaSerialNumber', 'LensSerialNumber', 'FPS', 'AspectRatio', 'Kelvin', 'LensName', 'LensBrand', 'FocalLength', 'Shutter(deg)',", "import os import re # TODO I'm going to need", "filein) output = cgl_execute(command=command) for each in output['printout']: try: values", "'Date', 'AudioSlate', 'VideoSlate', 'Camera', 'CameraModel', 'CameraPIN', 'MediaSerialNumber', 'LensSerialNumber', 'FPS', 'AspectRatio',", "%s --printMeta 1' % filein d = {} for line", "RED that is required for this https://www.red.com/downloads/options?itemInternalId=16144 :param filein: :return:", "in output['printout']: key, value = re.split(\"\\s+:\\s+\", each) d[key] = value", "'Duration' in key: d['Track Duration'] = '%s s' % values[0].split(',')[0]", "'AudioSlate', 'VideoSlate', 'Camera', 'CameraModel', 'CameraPIN', 'MediaSerialNumber', 'LensSerialNumber', 'FPS', 'AspectRatio', 'Kelvin',", "command = r'%s %s' % ('ffprobe', filein) output = cgl_execute(command=command)", "interface from RED that is required for this https://www.red.com/downloads/options?itemInternalId=16144 :param", "if key_ != 'None': d[key_] = value except ValueError: pass", "print_output=print_output) for each in output['printout']: key, value = re.split(\"\\s+:\\s+\", each)", "value = ' '.join(values) d[key] = value except ValueError: print('skipping", "%s' % filein output = cgl_execute(command=command, verbose=False, print_output=print_output) for each", "R3D = ['ClipName', 'EdgeTC', 'EndEdgeTC', 'TotalFrames', 'FrameHeight', 'FrameWidth', 'Aperture', 'ISO',", "'TotalFrames', 'FrameHeight', 'FrameWidth', 'Aperture', 'ISO', 'Date', 'AudioSlate', 'VideoSlate', 'Camera', 'CameraModel',", ":return: dictionary containing lumberdata from exiftool \"\"\" ext = os.path.splitext(filein)[-1]", "'MakerNotes:RawImageWidth', 'EXIF:CreateDate', 'EXIF:ModifyDate', 'EXIF:SerialNumber', 'Composite:Aperture', 'EXIF:FocalLength', 'EXIF:Make', 'EXIF:Model', 'EXIF:LensMake'] MOV", "command = r'exiftool %s' % filein output = cgl_execute(command=command, verbose=False,", "installed. :return: \"\"\" pass def check_redline(): \"\"\" checks if redline", "'.R3D': command = r'REDLINE --i %s --printMeta 1' % filein", "output directly from exiftool through the system commands and cglexecute.", "filein: :return: dictionary containing lumberdata from exiftool \"\"\" ext =", "every file type.... RAF = ['EXIF:LensModel', 'MakerNotes:RawImageHeight', 'MakerNotes:RawImageWidth', 'EXIF:CreateDate', 'EXIF:ModifyDate',", "method for pulling lumberdata from r3d files. REDLINE is a", "'QuickTime:Duration'] R3D = ['ClipName', 'EdgeTC', 'EndEdgeTC', 'TotalFrames', 'FrameHeight', 'FrameWidth', 'Aperture',", "file_, ext_ = os.path.splitext(filein) if ext_.upper() == '.R3D': command =", "ext_.upper() == '.R3D': command = r'REDLINE --i %s --printMeta 1'", "d = {} for line in os.popen(command).readlines(): line = line.strip('\\n')", "'') line = line.replace(' ', '') try: key_, value =", "'ISO', 'Date', 'AudioSlate', 'VideoSlate', 'Camera', 'CameraModel', 'CameraPIN', 'MediaSerialNumber', 'LensSerialNumber', 'FPS',", "return d def get_red_data(filein): \"\"\" method for pulling lumberdata from", "way to parse output directly from exiftool through the system", "is installed :return: \"\"\" pass def check_ffprobe(): \"\"\" checks if", "big list of stuff i care about and what's needed", "the exiftool module this is provided as a way to", "get the lumberdata for a single file. :param filein: :return:", "'VideoSlate', 'Camera', 'CameraModel', 'CameraPIN', 'MediaSerialNumber', 'LensSerialNumber', 'FPS', 'AspectRatio', 'Kelvin', 'LensName',", "Due to issues with the exiftool module this is provided", "re # TODO I'm going to need to make a", "'MakerNotes:RawImageWidth', 'EXIF:CreateDate', 'EXIF:ModifyDate', 'EXIF:SerialNumber', 'Composite:Aperture', 'EXIF:FocalLength', 'EXIF:Make', 'EXIF:Model', 'EXIF:LensMake', 'QuickTime:VideoFrameRate',", "= line.strip('\\n') line = line.replace('\\t', '') line = line.replace(' ',", "to parse output directly from exiftool through the system commands", "command line interface from RED that is required for this", "that is required for this https://www.red.com/downloads/options?itemInternalId=16144 :param filein: :return: \"\"\"", "noinspection PyUnresolvedReferences import os import re # TODO I'm going", "'EXIF:ModifyDate', 'EXIF:SerialNumber', 'Composite:Aperture', 'EXIF:FocalLength', 'EXIF:Make', 'EXIF:Model', 'EXIF:LensMake', 'QuickTime:VideoFrameRate', 'QuickTime:Duration'] R3D", "except ValueError: print('skipping %s' % each) return d def get_red_data(filein):", "\"\"\" method for pulling lumberdata from r3d files. REDLINE is", "pass def get(filein, tool='exiftool', print_output=False): \"\"\" Due to issues with", "value except ValueError: print('skipping %s' % each) return d def", "from RED that is required for this https://www.red.com/downloads/options?itemInternalId=16144 :param filein:", "'EdgeTC', 'EndEdgeTC', 'TotalFrames', 'FrameHeight', 'FrameWidth', 'Aperture', 'ISO', 'Date', 'AudioSlate', 'VideoSlate',", "'EXIF:Model', 'EXIF:LensMake'] MOV = ['EXIF:LensModel', 'MakerNotes:RawImageHeight', 'MakerNotes:RawImageWidth', 'EXIF:CreateDate', 'EXIF:ModifyDate', 'EXIF:SerialNumber',", "{} if tool == 'exiftool': command = r'exiftool %s' %", "= {} if tool == 'exiftool': command = r'exiftool %s'", "lumberdata from exiftool \"\"\" ext = os.path.splitext(filein)[-1] d = {}", "pass def check_ffprobe(): \"\"\" checks if ffprobe is installed :return:", "each in output['printout']: try: values = re.split(\":\\s+\", each) key =", "in key: d['Track Duration'] = '%s s' % values[0].split(',')[0] value", "pass def check_redline(): \"\"\" checks if redline is installed :return:", "verbose=False, print_output=print_output) for each in output['printout']: key, value = re.split(\"\\s+:\\s+\",", "--printMeta 1' % filein d = {} for line in", "is installed :return: \"\"\" pass def get(filein, tool='exiftool', print_output=False): \"\"\"", "split_v[4].split(' fps')[0].replace(' ', '') if 'Duration' in key: d['Track Duration']", "i care about and what's needed for # every file", "required for this https://www.red.com/downloads/options?itemInternalId=16144 :param filein: :return: \"\"\" file_, ext_", "%s' % ('ffprobe', filein) output = cgl_execute(command=command) for each in", "d['Source Image Height'] = d['Image Size'].split('x') d['Video Frame Rate'] =", "= r'REDLINE --i %s --printMeta 1' % filein d =", "elif tool == 'ffprobe': command = r'%s %s' % ('ffprobe',", "'SensorID', 'SensorName', 'Take'] def check_exiftool(): \"\"\" checks if exiftool is", "filein: :return: \"\"\" file_, ext_ = os.path.splitext(filein) if ext_.upper() ==", "= {} for line in os.popen(command).readlines(): line = line.strip('\\n') line", "is a command line interface from RED that is required", "'MakerNotes:RawImageHeight', 'MakerNotes:RawImageWidth', 'EXIF:CreateDate', 'EXIF:ModifyDate', 'EXIF:SerialNumber', 'Composite:Aperture', 'EXIF:FocalLength', 'EXIF:Make', 'EXIF:Model', 'EXIF:LensMake']", "'Aperture', 'ISO', 'Date', 'AudioSlate', 'VideoSlate', 'Camera', 'CameraModel', 'CameraPIN', 'MediaSerialNumber', 'LensSerialNumber',", ":param filein: :return: \"\"\" file_, ext_ = os.path.splitext(filein) if ext_.upper()", "for each in output['printout']: try: values = re.split(\":\\s+\", each) key", "MOV = ['EXIF:LensModel', 'MakerNotes:RawImageHeight', 'MakerNotes:RawImageWidth', 'EXIF:CreateDate', 'EXIF:ModifyDate', 'EXIF:SerialNumber', 'Composite:Aperture', 'EXIF:FocalLength',", "this is provided as a way to parse output directly", "1' % filein d = {} for line in os.popen(command).readlines():", "if exiftool is installed. :return: \"\"\" pass def check_redline(): \"\"\"", "s' % values[0].split(',')[0] value = ' '.join(values) d[key] = value", "'FrameHeight', 'FrameWidth', 'Aperture', 'ISO', 'Date', 'AudioSlate', 'VideoSlate', 'Camera', 'CameraModel', 'CameraPIN',", "check_ffprobe(): \"\"\" checks if ffprobe is installed :return: \"\"\" pass", "= cgl_execute(command=command) for each in output['printout']: try: values = re.split(\":\\s+\",", "'') if 'Duration' in key: d['Track Duration'] = '%s s'", "1) if key_ != 'None': d[key_] = value except ValueError:", "= '%s s' % values[0].split(',')[0] value = ' '.join(values) d[key]", "for a single file. :param filein: :return: dictionary containing lumberdata", "'Composite:Aperture', 'EXIF:FocalLength', 'EXIF:Make', 'EXIF:Model', 'EXIF:LensMake'] MOV = ['EXIF:LensModel', 'MakerNotes:RawImageHeight', 'MakerNotes:RawImageWidth',", "# every file type.... RAF = ['EXIF:LensModel', 'MakerNotes:RawImageHeight', 'MakerNotes:RawImageWidth', 'EXIF:CreateDate',", "= re.split(\":\\s+\", each) key = values[0] values.pop(0) if 'Stream' in", "cgl_execute(command=command, verbose=False, print_output=print_output) for each in output['printout']: key, value =", "PyUnresolvedReferences import os import re # TODO I'm going to", "re.split(\"\\s+:\\s+\", each) d[key] = value return d elif tool ==", ":return: \"\"\" pass def get(filein, tool='exiftool', print_output=False): \"\"\" Due to", ":param filein: :return: dictionary containing lumberdata from exiftool \"\"\" ext", "from r3d files. REDLINE is a command line interface from" ]
[ "self._target_workspace_check.get_position() valid = self._scene.check_target_in_workspace(pos_to_check) if not valid: raise InvalidActionError('Target is", "This is why # the velocites are set to 0", "enable_path_observations(self, value: bool) -> None: if (self._action_mode.arm != ArmActionMode.DELTA_EE_POSE_PLAN_WORLD_FRAME and", "= self._scene.get_observation() camcorder.save(obs, self.get_robot_visuals(), self.get_all_graspable_objects()) elif self._action_mode.arm == ArmActionMode.DELTA_JOINT_VELOCITY: self._assert_action_space(arm_action,", "is, it is concentrated on close objects. W = np.array([1", "joint limits --- # get the minimum distance of each", "the current and the reference configuration -> -> \\nabla_q L(q)", "TaskEnvironment(object): def __init__(self, pyrep: PyRep, robot: Robot, scene: Scene, task:", "greater the distance the smaller # the weight. That is,", "1 / np.sum(np.square(d_6_T)) , 1 / np.sum(np.square(d_7_T)) ]) * 0.1", "ob.get_position(), \"wrist_camera\": ob.get_position()}) return positions def get_all_graspable_object_poses(self, relative_to_cameras=False): \"\"\" returns", "raise an issues on this task.' % self._task.get_name()) from e", "import Demo from rlbench.observation_config import ObservationConfig _TORQUE_MAX_VEL = 9999 _DT", "poses.append({\"left_shoulder_camera\": ob.get_pose(), \"right_shoulder_camera\": ob.get_pose(), \"front_camera\": ob.get_pose(), \"wrist_camera\": ob.get_pose()}) return poses", "dataset root provided.\") demos = utils.get_stored_demos( amount, image_paths, self._dataset_root, self._variation_number,", "import Scene from rlbench.backend.task import Task from rlbench.demo import Demo", "<= 0: raise RuntimeError( 'Could not collect demos. Maybe a", "= [] self._path_observations = self._path_action( list(arm_action), relative_to=self._robot.arm.get_tip()) else: raise RuntimeError('Unrecognised", "self._robot.gripper.release() success, terminate = self._task.success() task_reward = self._task.reward() reward =", "np.matmul(J_plus, J)), dL) # the provided jacobian seems to be", "np.transpose(A_1.dot(A_2).dot(A_3).dot(A_4).dot(A_5).dot(p_5) - p_obs) d_6_T = np.transpose(A_1.dot(A_2).dot(A_3).dot(A_4).dot(A_5).dot(A_6).dot(p_6) - p_obs) d_7_T =", "if type(setup[\"W\"]) is list: W = np.array(setup[\"W\"]) elif setup[\"W\"] is", "cur_positions, prev_values, atol=0.001) prev_values = cur_positions done = reached or", "int: return self._task.variation_count() def reset(self) -> (List[str], Observation): self._scene.reset() try:", "import Observation from rlbench.backend.robot import Robot from rlbench.backend.scene import Scene", "# get the minimum distance of each joint to its", "qy, qz, qw] self._ee_action(list(new_pose)) elif self._action_mode.arm == ArmActionMode.EE_POSE_EE_FRAME: self._assert_action_space(arm_action, (7,))", "the current and the reference configuration. -> L(q) \"\"\" if", "- p_obs) d_5_T = np.transpose(A_1.dot(A_2).dot(A_3).dot(A_4).dot(A_5).dot(p_5) - p_obs) d_6_T = np.transpose(A_1.dot(A_2).dot(A_3).dot(A_4).dot(A_5).dot(A_6).dot(p_6)", "gripper close action, the check for grasp. for g_obj in", "self._scene.step() # if needed save some images if camcorder: obs", "if current_ee != ee_action: done = False while not done:", "%d variations.' % ( v, self.variation_count())) self._variation_number = v def", "return self._scene.get_observation() def get_joint_upper_velocity_limits(self): return self._robot.arm.get_joint_upper_velocity_limits() def get_all_graspable_objects(self): return self._task.get_graspable_objects()", "from e self._reset_called = True # redundancy resolution self._last_e =", "_torque_action(self, action): self._robot.arm.set_joint_target_velocities( [(_TORQUE_MAX_VEL if t < 0 else -_TORQUE_MAX_VEL)", "the distance the smaller # the weight. That is, it", "< setup[\"cut-off_error\"]: q_dot_redundancy = np.array([0.0] * 7) self._last_e = e", "\"Call 'reset' before calling 'step' on a task.\") # action", "self._pyrep = pyrep self._robot = robot self._scene = scene self._task", "limits --- # get the minimum distance of each joint", "a path.') from e def step(self, action, camcorder=None) -> (Observation,", "* d/dq (p_i^0 (q_1,..., q_i) - p_obs) # where p_i^0", "relative_to is not None: self._target_workspace_check.set_position( pos_to_check, relative_to) pos_to_check = self._target_workspace_check.get_position()", "z] + [qx, qy, qz, qw] self._ee_action(list(new_pose)) elif self._action_mode.arm ==", "ArmActionMode.EE_POSE_PLAN_EE_FRAME): raise RuntimeError('Only available in DELTA_EE_POSE_PLAN or ' 'ABS_EE_POSE_PLAN action", "+ \\ -np.matmul(d_5_T, A_1.dot(A_2).dot(dA_3).dot(A_4).dot(A_5).dot(p_5)) + \\ -np.matmul(d_6_T, A_1.dot(A_2).dot(dA_3).dot(A_4).dot(A_5).dot(A_6).dot(p_6)) + \\", "on this task.' % self._task.get_name()) from e self._reset_called = True", "self._scene.get_observation() def get_joint_upper_velocity_limits(self): return self._robot.arm.get_joint_upper_velocity_limits() def get_all_graspable_objects(self): return self._task.get_graspable_objects() def", "calculate the derivatives in each dimension dq_1 = -np.matmul(d_1_T, dA_1.dot(p_1))", "self._torque_action(new_action) self._scene.step() elif self._action_mode.arm == ArmActionMode.ABS_EE_POSE_WORLD_FRAME: self._assert_action_space(arm_action, (7,)) self._ee_action(list(arm_action)) elif", "self._path_observations = [] self._path_observations = self._path_action(list(arm_action)) elif self._action_mode.arm == ArmActionMode.DELTA_EE_POSE_PLAN_WORLD_FRAME:", "be: %s, but was shape: %s' % ( str(expected_shape), str(np.shape(action))))", "= -np.matmul(d_1_T, dA_1.dot(p_1)) + \\ -np.matmul(d_2_T, dA_1.dot(A_2).dot(p_2)) + \\ -np.matmul(d_3_T,", "RuntimeError('Only available in DELTA_EE_POSE_PLAN or ' 'ABS_EE_POSE_PLAN action mode.') return", "'ABS_EE_POSE_PLAN action mode.') return self._path_observations def get_demos(self, amount: int, live_demos:", "None] = None, max_attempts: int = _MAX_DEMO_ATTEMPTS, ) -> List[Demo]:", "Quaternion( qw, qx, qy, qz) qw, qx, qy, qz =", "weight. That is, it is concentrated on close objects. W", "np.linalg.pinv(J) # weighting if type(setup[\"W\"]) is list: W = np.array(setup[\"W\"])", "int) -> None: if v >= self.variation_count(): raise TaskEnvironmentError( 'Requested", "self._robot.get_transformation_matrices_derivatives() p_1, p_2, p_3, p_4, p_5, p_6, p_7 = self._robot.get_link_positions_in_ref_frames()", "ArmActionMode.ABS_EE_POSE_PLAN_WORLD_FRAME: self._assert_action_space(arm_action, (7,)) self._path_observations = [] self._path_observations = self._path_action(list(arm_action)) elif", "# weighting if type(setup[\"W\"]) is list: W = np.array(setup[\"W\"]) elif", "- ref_pos) return e * W, 0.5*np.dot(e,e*W) def get_loss_collision_avoidance(self, W,", "self._scene.step() elif self._action_mode.arm == ArmActionMode.DELTA_JOINT_POSITION: self._assert_action_space(arm_action, (len(self._robot.arm.joints),)) cur = np.array(self._robot.arm.get_joint_positions())", "self._robot.get_transformation_matrices() dA_1, dA_2, dA_3, dA_4, dA_5, dA_6, dA_7 = self._robot.get_transformation_matrices_derivatives()", "= utils.get_stored_demos( amount, image_paths, self._dataset_root, self._variation_number, self._task.get_name(), self._obs_config) else: ctr_loop", "and the reference consfiguration as well as its partial derivatives", "gripper action current_ee = (1.0 if self._robot.gripper.get_open_amount()[0] > 0.9 else", "else: # If gripper open action, the check for ungrasp.", "= np.transpose(A_1.dot(A_2).dot(A_3).dot(A_4).dot(A_5).dot(A_6).dot(A_7).dot(p_7) - p_obs) # now we can calculate the", "= v def variation_count(self) -> int: return self._task.variation_count() def reset(self)", "advance d_1_T = np.transpose(A_1.dot(p_1) - p_obs) d_2_T = np.transpose(A_1.dot(A_2).dot(p_2) -", "the workspace; if not, then quick reject # Only checks", "ArmActionMode.DELTA_EE_POSE_WORLD_FRAME: self._assert_action_space(arm_action, (7,)) a_x, a_y, a_z, a_qx, a_qy, a_qz, a_qw", "== ArmActionMode.ABS_JOINT_VELOCITY: self._assert_action_space(arm_action, (len(self._robot.arm.joints),)) self._robot.arm.set_joint_target_velocities(arm_action) self._scene.step() # if needed save", "i in range(amount): attempts = max_attempts while attempts > 0:", "positions of all graspable object relative to all enabled cameras", "-np.matmul(d_7_T, A_1.dot(A_2).dot(A_3).dot(A_4).dot(dA_5).dot(A_6).dot(A_7).dot(p_7)) dq_6 = -np.matmul(d_6_T, A_1.dot(A_2).dot(A_3).dot(A_4).dot(A_5).dot(dA_6).dot(p_6)) + \\ -np.matmul(d_7_T, A_1.dot(A_2).dot(A_3).dot(A_4).dot(A_5).dot(dA_6).dot(A_7).dot(p_7))", "mode.') if current_ee != ee_action: done = False while not", "ArmActionMode.EE_POSE_EE_FRAME: self._assert_action_space(arm_action, (7,)) self._ee_action( list(arm_action), relative_to=self._robot.arm.get_tip()) elif self._action_mode.arm == ArmActionMode.EE_POSE_PLAN_EE_FRAME:", "actions - q_dot_redundancy, L def get_loss_reference_position(self, ref_pos, W): \"\"\" Calculates", "and self._action_mode.arm != ArmActionMode.EE_POSE_PLAN_EE_FRAME): raise RuntimeError('Only available in DELTA_EE_POSE_PLAN or", "from pyrep import PyRep from pyrep.errors import IKError from pyrep.objects", "if t < 0 else -_TORQUE_MAX_VEL) for t in action])", "demo when no dataset root provided.\") demos = utils.get_stored_demos( amount,", "Summed squarred error between the current and the reference configuration.", "np.sum(np.abs(e - self._last_e)) if self._last_e is not None and e_dot", "partial derivatives with respect to al q's for redundancy resoltuion.", "None] = None, max_attempts: int = _MAX_DEMO_ATTEMPTS) -> List[Demo]: demos", "task_reward = self._task.reward() reward = float(success) if task_reward is None", "link and an obstacle as our Loss # the chain", "True # redundancy resolution self._last_e = None # Returns a", "self._task.success() # If the task succeeds while traversing path, then", "* 0.1 # --- scaling to keep distance to joint", "position towards a reference position. \"\"\" # get the Jacobian", "the loss as well as the respective partial derivatives for", "relative_to_camera: positions.append(self._scene.get_object_position_relative_to_cameras(ob)) else: positions.append({\"left_shoulder_camera\": ob.get_position(), \"right_shoulder_camera\": ob.get_position(), \"front_camera\": ob.get_position(), \"wrist_camera\":", "If gripper open action, the check for ungrasp. self._robot.gripper.release() success,", "object p_obs = self._task.obstacle.get_position() + np.array([0, 0, 0.33]) - self._robot.arm.joints[0].get_position()", "to 0 once the error stops changing much. e =", "if relative_to is not None: self._target_workspace_check.set_position( pos_to_check, relative_to) pos_to_check =", "raise TypeError(\"Unsupported type %s for weighting vector.\" % type(setup[\"W\"])) #", "# Only checks position, not rotation pos_to_check = action[:3] if", "None, max_attempts: int = _MAX_DEMO_ATTEMPTS, ) -> List[Demo]: \"\"\"Negative means", "import IKError from pyrep.objects import Dummy, Object from rlbench import", "observation return desc, self._scene.get_observation() def get_observation(self) -> Observation: return self._scene.get_observation()", "reference configuration. -> L(q) \"\"\" if W is None: #", "graspable object relative to all enabled cameras \"\"\" objects =", "the current and the reference consfiguration as well as its", "to 1 for 0° <= d <= 15° rad_thres =", "for redundancy resolution defining the mode, weighting etc. :return: Array", "with the task?') return demos def reset_to_demo(self, demo: Demo) ->", "== 0): raise RuntimeError( \"Can't ask for a stored demo", "== ArmActionMode.DELTA_EE_POSE_PLAN_WORLD_FRAME: self._assert_action_space(arm_action, (7,)) a_x, a_y, a_z, a_qx, a_qy, a_qz,", "W, 0.5*np.dot(e,e*W) def get_loss_collision_avoidance(self, W, setup): \"\"\" Calculates the loss", "# start scaling down error when joint limit is 15°", "self._path_observations = [] self._path_observations = self._path_action(list(new_pose)) elif self._action_mode.arm == ArmActionMode.DELTA_EE_POSE_WORLD_FRAME:", "def get_loss_reference_position(self, ref_pos, W): \"\"\" Calculates the summed squarred error", "1 / np.sum(np.square(d_3_T)) , 1 / np.sum(np.square(d_4_T)) , 1 /", "cameras \"\"\" objects = self._task.get_graspable_objects() poses = [] for ob", "if self._last_e is not None: e_dot = np.sum(np.abs(e - self._last_e))", "TaskEnvironmentError(Exception): pass class TaskEnvironment(object): def __init__(self, pyrep: PyRep, robot: Robot,", "rule delivers: d/dq L = (p_i^0 (q_1,..., q_i) - p_obs)^T", "random_seed = np.random.get_state() self.reset() logging.info('Collecting demo %d' % i) try:", "scene. This should not ' 'happen, please raise an issues", "if the target is in the workspace; if not, then", "an issues on this task.' % self._task.get_name()) from e self._reset_called", "0.9 else 0.0) if ee_action > 0.5: ee_action = 1.0", "(p_i^0 (q_1,..., q_i) - p_obs)^T * d/dq (p_i^0 (q_1,..., q_i)", "relative_to=self._robot.arm.get_tip()) elif self._action_mode.arm == ArmActionMode.EE_POSE_PLAN_EE_FRAME: self._assert_action_space(arm_action, (7,)) self._path_observations = []", "desc, self._scene.get_observation() def get_observation(self) -> Observation: return self._scene.get_observation() def get_joint_upper_velocity_limits(self):", "+ arm_action) self._scene.step() elif self._action_mode.arm == ArmActionMode.ABS_JOINT_POSITION: self._assert_action_space(arm_action, (len(self._robot.arm.joints),)) self._robot.arm.set_joint_target_positions(arm_action)", "is None: # use default weighting later W = None", "- p_obs) d_4_T = np.transpose(A_1.dot(A_2).dot(A_3).dot(A_4).dot(p_4) - p_obs) d_5_T = np.transpose(A_1.dot(A_2).dot(A_3).dot(A_4).dot(A_5).dot(p_5)", "self._robot.gripper.grasp(g_obj) else: # If gripper open action, the check for", "None: if (self._action_mode.arm != ArmActionMode.DELTA_EE_POSE_PLAN_WORLD_FRAME and self._action_mode.arm != ArmActionMode.ABS_EE_POSE_PLAN_WORLD_FRAME and", "= Dummy.create() self._last_e = None def get_name(self) -> str: return", "distance. The greater the distance the smaller # the weight.", "action]) self._robot.arm.set_joint_forces(np.abs(action)) def _ee_action(self, action, relative_to=None): self._assert_unit_quaternion(action[3:]) try: joint_positions =", "checks position, not rotation pos_to_check = action[:3] if relative_to is", "\\ -np.matmul(d_3_T, dA_1.dot(A_2).dot(A_3).dot(p_3)) + \\ -np.matmul(d_4_T, dA_1.dot(A_2).dot(A_3).dot(A_4).dot(p_4)) + \\ -np.matmul(d_5_T,", "action, the check for ungrasp. self._robot.gripper.release() success, terminate = self._task.success()", "redundant self-motion into the nullspace without changing the gripper tip", "= 1/2 sum_{i=1}^N w_i (q_i - \\tilde{q}_i)^2 :param ref_pos: Reference", "is not None: not_moving = np.allclose( cur_positions, prev_values, atol=0.001) prev_values", "return actions - q_dot_redundancy, L def get_loss_reference_position(self, ref_pos, W): \"\"\"", ":param W: Weighting vector. :return: 1: The partial derivatives of", "camcorder: obs = self._scene.get_observation() camcorder.save(obs, self.get_robot_visuals(), self.get_all_graspable_objects()) if ee_action ==", "type(setup[\"W\"])) # compute the error if setup[\"mode\"] == \"reference_position\": dL,", "dA_4, dA_5, dA_6, dA_7 = self._robot.get_transformation_matrices_derivatives() p_1, p_2, p_3, p_4,", "extra value for gripper open close state arm_action = np.array(action[:-1])", "# Returns a list of descriptions and the first observation", "qz = list(new_rot) new_pose = [a_x + x, a_y +", "callable_each_step: Callable[[Observation], None] = None, max_attempts: int = _MAX_DEMO_ATTEMPTS, )", "ctr_loop = self._robot.arm.joints[0].is_control_loop_enabled() self._robot.arm.set_control_loop_enabled(True) demos = self._get_live_demos( amount, callable_each_step, max_attempts)", "+ \\ -np.matmul(d_5_T, A_1.dot(dA_2).dot(A_3).dot(A_4).dot(A_5).dot(p_5)) + \\ -np.matmul(d_6_T, A_1.dot(dA_2).dot(A_3).dot(A_4).dot(A_5).dot(A_6).dot(p_6)) + \\", "(7,)) self._path_observations = [] self._path_observations = self._path_action( list(arm_action), relative_to=self._robot.arm.get_tip()) else:", "task %s in the scene. This should not ' 'happen,", "# calculate the loss L = np.sqrt(np.dot(d_1_T, d_1_T))*W[0] \\ +", "-np.matmul(d_6_T, A_1.dot(A_2).dot(A_3).dot(A_4).dot(A_5).dot(dA_6).dot(p_6)) + \\ -np.matmul(d_7_T, A_1.dot(A_2).dot(A_3).dot(A_4).dot(A_5).dot(dA_6).dot(A_7).dot(p_7)) dq_7 = -np.matmul(d_7_T, A_1.dot(A_2).dot(A_3).dot(A_4).dot(A_5).dot(A_6).dot(dA_7).dot(p_7))", "0: random_seed = np.random.get_state() self.reset() logging.info('Collecting demo %d' % i)", "[] for ob in objects: if relative_to_cameras: poses.append(self._scene.get_object_pose_relative_to_cameras(ob)) else: poses.append({\"left_shoulder_camera\":", "self._action_mode.arm == ArmActionMode.DELTA_JOINT_TORQUE: cur = np.array(self._robot.arm.get_joint_forces()) new_action = cur +", "if self._dataset_root is None or len(self._dataset_root) == 0: raise RuntimeError(", "value for gripper open close state arm_action = np.array(action[:-1]) ee_action", "qz, qw = self._robot.arm.get_tip().get_pose() new_rot = Quaternion(a_qw, a_qx, a_qy, a_qz)", "get the transformation matrices, their derivatives, and the positions of", "stops changing much. e = dL if setup[\"cut-off_error\"] is not", "qw] self._path_observations = [] self._path_observations = self._path_action(list(new_pose)) elif self._action_mode.arm ==", "between each link and an obstacle as our Loss #", "L(q) \"\"\" if W is None: # default weighting W", "not None: if self._last_e is not None: e_dot = np.sum(np.abs(e", "if (self._action_mode.arm != ArmActionMode.DELTA_EE_POSE_PLAN_WORLD_FRAME and self._action_mode.arm != ArmActionMode.ABS_EE_POSE_PLAN_WORLD_FRAME and self._action_mode.arm", "in range(amount): attempts = max_attempts while attempts > 0: random_seed", "not place the task %s in the scene. This should", "# (e.g. when we collide wth something) while not done:", "scene: Scene, task: Task, action_mode: ActionMode, dataset_root: str, obs_config: ObservationConfig,", "+ np.sqrt(np.dot(d_3_T, d_3_T))*W[2] \\ + np.sqrt(np.dot(d_4_T, d_4_T))*W[3] \\ + np.sqrt(np.dot(d_5_T,", "done, info if not self._reset_called: raise RuntimeError( \"Call 'reset' before", "provided jacobian seems to be inaccurate resulting in slight movement", "e: raise TaskEnvironmentError( 'Could not place the task %s in", "- p_obs) d_7_T = np.transpose(A_1.dot(A_2).dot(A_3).dot(A_4).dot(A_5).dot(A_6).dot(A_7).dot(p_7) - p_obs) # now we", "start scaling down error when joint limit is 15° away.", "dq_2 = -np.matmul(d_2_T, A_1.dot(dA_2).dot(p_2)) + \\ -np.matmul(d_3_T, A_1.dot(dA_2).dot(A_3).dot(p_3)) + \\", "str(np.shape(action)))) def _assert_unit_quaternion(self, quat): if not np.isclose(np.linalg.norm(quat), 1.0): raise RuntimeError('Action", "\\ -np.matmul(d_7_T, A_1.dot(A_2).dot(A_3).dot(dA_4).dot(A_5).dot(A_6).dot(A_7).dot(p_7)) dq_5 = -np.matmul(d_5_T, A_1.dot(A_2).dot(A_3).dot(A_4).dot(dA_5).dot(p_5)) + \\ -np.matmul(d_6_T,", "self._action_mode.arm == ArmActionMode.DELTA_JOINT_POSITION: self._assert_action_space(arm_action, (len(self._robot.arm.joints),)) cur = np.array(self._robot.arm.get_joint_positions()) self._robot.arm.set_joint_target_positions(cur +", "*= np.array([ np.minimum((1/rad_thres)*d, 1.0) for d in min_j_distances]) # concatenate", "List[Demo]: \"\"\"Negative means all demos\"\"\" if not live_demos and (self._dataset_root", "\"Can't ask for stored demo when no dataset root provided.\")", "self._robot.arm.get_tip().get_pose() new_rot = Quaternion(a_qw, a_qx, a_qy, a_qz) * Quaternion( qw,", "type %s for weighting vector.\" % type(setup[\"W\"])) # compute the", "v >= self.variation_count(): raise TaskEnvironmentError( 'Requested variation %d, but there", "0 once the error stops changing much. e = dL", "once the error stops changing much. e = dL if", "respect to al q's for redundancy resoltuion. -> L(q) =", "action[:3], quaternion=action[3:], ignore_collisions=True, relative_to=relative_to) done = False observations = []", "A_1.dot(A_2).dot(dA_3).dot(A_4).dot(A_5).dot(A_6).dot(p_6)) + \\ -np.matmul(d_7_T, A_1.dot(A_2).dot(dA_3).dot(A_4).dot(A_5).dot(A_6).dot(A_7).dot(p_7)) dq_4 = -np.matmul(d_4_T, A_1.dot(A_2).dot(A_3).dot(dA_4).dot(p_4)) +", "np.shape(action) != expected_shape: raise RuntimeError( 'Expected the action shape to", "-np.matmul(d_6_T, dA_1.dot(A_2).dot(A_3).dot(A_4).dot(A_5).dot(A_6).dot(p_6)) + \\ -np.matmul(d_7_T, dA_1.dot(A_2).dot(A_3).dot(A_4).dot(A_5).dot(A_6).dot(A_7).dot(p_7)) dq_2 = -np.matmul(d_2_T, A_1.dot(dA_2).dot(p_2))", "concentrated on close objects. W = np.array([1 / np.sum(np.square(d_1_T)), 1", "int = _MAX_DEMO_ATTEMPTS, ) -> List[Demo]: \"\"\"Negative means all demos\"\"\"", "WaypointError) as e: raise TaskEnvironmentError( 'Could not place the task", "+ \\ -np.matmul(d_6_T, A_1.dot(A_2).dot(dA_3).dot(A_4).dot(A_5).dot(A_6).dot(p_6)) + \\ -np.matmul(d_7_T, A_1.dot(A_2).dot(dA_3).dot(A_4).dot(A_5).dot(A_6).dot(A_7).dot(p_7)) dq_4 =", "desc = self._scene.init_episode( self._variation_number, max_attempts=_MAX_RESET_ATTEMPTS, randomly_place=not self._static_positions) except (BoundaryError, WaypointError)", "= (self._robot.arm.get_joint_positions() - ref_pos) return e * W, 0.5*np.dot(e,e*W) def", "ee_action = 0.0 if current_ee != ee_action: arm_action = np.array([0.0]*7)", "error between the current and the reference configuration. -> L(q)", "= 40 _MAX_DEMO_ATTEMPTS = 10 class InvalidActionError(Exception): pass class TaskEnvironmentError(Exception):", "raise RuntimeError('Unrecognised action mode.') if current_ee != ee_action: done =", "Exception as e: attempts -= 1 logging.info('Bad demo. ' +", "qx, qy, qz) qw, qx, qy, qz = list(new_rot) new_pose", "self._task.get_graspable_objects() positions = [] for ob in objects: if relative_to_camera:", "1.0 elif ee_action < 0.5: ee_action = 0.0 if current_ee", "not_moving = False if prev_values is not None: not_moving =", "action mode.') return self._path_observations def get_demos(self, amount: int, live_demos: bool", "the Jacobian J = self._robot.arm.get_jacobian() J = np.transpose(J) J =", "A_1.dot(A_2).dot(A_3).dot(A_4).dot(dA_5).dot(A_6).dot(p_6)) + \\ -np.matmul(d_7_T, A_1.dot(A_2).dot(A_3).dot(A_4).dot(dA_5).dot(A_6).dot(A_7).dot(p_7)) dq_6 = -np.matmul(d_6_T, A_1.dot(A_2).dot(A_3).dot(A_4).dot(A_5).dot(dA_6).dot(p_6)) +", "len(self._dataset_root) == 0): raise RuntimeError( \"Can't ask for a stored", "np.isclose(np.linalg.norm(quat), 1.0): raise RuntimeError('Action contained non unit quaternion!') def _torque_action(self,", "-np.matmul(d_7_T, A_1.dot(A_2).dot(A_3).dot(dA_4).dot(A_5).dot(A_6).dot(A_7).dot(p_7)) dq_5 = -np.matmul(d_5_T, A_1.dot(A_2).dot(A_3).dot(A_4).dot(dA_5).dot(p_5)) + \\ -np.matmul(d_6_T, A_1.dot(A_2).dot(A_3).dot(A_4).dot(dA_5).dot(A_6).dot(p_6))", "np.sqrt(np.dot(d_7_T, d_7_T))*W[6] return dL, L def enable_path_observations(self, value: bool) ->", "# get the transformation matrices, their derivatives, and the positions", "attempts = max_attempts while attempts > 0: random_seed = np.random.get_state()", "= np.array(setup[\"lower_joint_pos_limit\"]) upper_joint_limits = np.array(setup[\"upper_joint_pos_limit\"]) min_j_distances = [np.minimum(u-j, j-l) for", "-> (Observation, int, bool): # returns observation, reward, done, info", "IKError from pyrep.objects import Dummy, Object from rlbench import utils", "40 _MAX_DEMO_ATTEMPTS = 10 class InvalidActionError(Exception): pass class TaskEnvironmentError(Exception): pass", "there are only %d variations.' % ( v, self.variation_count())) self._variation_number", "ValueError('Gripper action expected to be within 0 and 1.') #", "-np.matmul(d_5_T, A_1.dot(A_2).dot(A_3).dot(dA_4).dot(A_5).dot(p_5)) + \\ -np.matmul(d_6_T, A_1.dot(A_2).dot(A_3).dot(dA_4).dot(A_5).dot(A_6).dot(p_6)) + \\ -np.matmul(d_7_T, A_1.dot(A_2).dot(A_3).dot(dA_4).dot(A_5).dot(A_6).dot(A_7).dot(p_7))", "ee_action > 0.5: ee_action = 1.0 elif ee_action < 0.5:", "DELTA_EE_POSE_PLAN or ' 'ABS_EE_POSE_PLAN action mode.') return self._path_observations def get_demos(self,", "self._path_action(list(new_pose)) elif self._action_mode.arm == ArmActionMode.DELTA_EE_POSE_WORLD_FRAME: self._assert_action_space(arm_action, (7,)) a_x, a_y, a_z,", "type(setup[\"W\"]) is list: W = np.array(setup[\"W\"]) elif setup[\"W\"] is None:", ") -> List[Demo]: \"\"\"Negative means all demos\"\"\" if not live_demos", "(7,)) self._path_observations = [] self._path_observations = self._path_action(list(arm_action)) elif self._action_mode.arm ==", "# Check if the target is in the workspace; if", "self.get_all_graspable_objects()) elif self._action_mode.arm == ArmActionMode.DELTA_JOINT_VELOCITY: self._assert_action_space(arm_action, (len(self._robot.arm.joints),)) cur = np.array(self._robot.arm.get_joint_velocities())", "+ [qx, qy, qz, qw] self._path_observations = [] self._path_observations =", "self._robot.gripper.get_open_amount()[0] > 0.9 else 0.0) if ee_action > 0.5: ee_action", "current_ee = (1.0 if self._robot.gripper.get_open_amount()[0] > 0.9 else 0.0) if", "from rlbench.backend.robot import Robot from rlbench.backend.scene import Scene from rlbench.backend.task", "self._scene.step() if self._enable_path_observations: observations.append(self._scene.get_observation()) success, terminate = self._task.success() # If", "weighting later W = None else: raise TypeError(\"Unsupported type %s", "np.transpose(J) J = np.flip(J) J = J[-3:] # compute the", "+ \\ -np.matmul(d_6_T, dA_1.dot(A_2).dot(A_3).dot(A_4).dot(A_5).dot(A_6).dot(p_6)) + \\ -np.matmul(d_7_T, dA_1.dot(A_2).dot(A_3).dot(A_4).dot(A_5).dot(A_6).dot(A_7).dot(p_7)) dq_2 =", "False observations = [] while not done: done = path.step()", "rlbench.backend.exceptions import BoundaryError, WaypointError from rlbench.backend.observation import Observation from rlbench.backend.robot", "actions yet push the joint position towards a reference position.", "0.5*np.dot(e,e*W) def get_loss_collision_avoidance(self, W, setup): \"\"\" Calculates the loss as", "for l,u,j in zip(lower_joint_limits, upper_joint_limits, joint_positions)] # start scaling down", "we stop moving # (e.g. when we collide wth something)", "False while not done: done = self._robot.gripper.actuate(ee_action, velocity=0.2) self._pyrep.step() self._task.step()", "p_obs) d_6_T = np.transpose(A_1.dot(A_2).dot(A_3).dot(A_4).dot(A_5).dot(A_6).dot(p_6) - p_obs) d_7_T = np.transpose(A_1.dot(A_2).dot(A_3).dot(A_4).dot(A_5).dot(A_6).dot(A_7).dot(p_7) -", "dq_5, dq_6, dq_7])*W # calculate the loss L = np.sqrt(np.dot(d_1_T,", "= np.array(self._robot.arm.get_joint_velocities()) self._robot.arm.set_joint_target_velocities(cur + arm_action) self._scene.step() elif self._action_mode.arm == ArmActionMode.ABS_JOINT_POSITION:", "rlbench.demo import Demo from rlbench.observation_config import ObservationConfig _TORQUE_MAX_VEL = 9999", "(\\prod_{j=1}^i A_j^{j-1}(q_j)) * p_i # as the left side of", "ob.get_position(), \"right_shoulder_camera\": ob.get_position(), \"front_camera\": ob.get_position(), \"wrist_camera\": ob.get_position()}) return positions def", "objects: if relative_to_cameras: poses.append(self._scene.get_object_pose_relative_to_cameras(ob)) else: poses.append({\"left_shoulder_camera\": ob.get_pose(), \"right_shoulder_camera\": ob.get_pose(), \"front_camera\":", "obstacle as our Loss # the chain rule delivers: d/dq", "minimum distance of each joint to its limit joint_positions =", "is None or len(self._dataset_root) == 0: raise RuntimeError( \"Can't ask", "class TaskEnvironment(object): def __init__(self, pyrep: PyRep, robot: Robot, scene: Scene,", "weighting if type(setup[\"W\"]) is list: W = np.array(setup[\"W\"]) elif setup[\"W\"]", "the summed squarred error between the current and the reference", "variation %d, but there are only %d variations.' % (", "[] self._path_observations = self._path_action(list(new_pose)) elif self._action_mode.arm == ArmActionMode.DELTA_EE_POSE_WORLD_FRAME: self._assert_action_space(arm_action, (7,))", "break observations.append(self._scene.get_observation()) return observations except IKError as e: raise InvalidActionError('Could", "path = self._robot.arm.get_path( action[:3], quaternion=action[3:], ignore_collisions=True, relative_to=relative_to) done = False", "import Robot from rlbench.backend.scene import Scene from rlbench.backend.task import Task", "self._last_e is not None: e_dot = np.sum(np.abs(e - self._last_e)) if", "pseudo inverse J_plus = np.linalg.pinv(J) # weighting if type(setup[\"W\"]) is", "derivatives in each dimension dq_1 = -np.matmul(d_1_T, dA_1.dot(p_1)) + \\", "+ np.sqrt(np.dot(d_6_T, d_6_T))*W[5] \\ + np.sqrt(np.dot(d_7_T, d_7_T))*W[6] return dL, L", "prev_values = cur_positions done = reached or not_moving def _path_action(self,", "of the distance between each link and an obstacle as", "_MAX_DEMO_ATTEMPTS = 10 class InvalidActionError(Exception): pass class TaskEnvironmentError(Exception): pass class", "not rotation pos_to_check = action[:3] if relative_to is not None:", "actions, setup): \"\"\" Resolves redundant self-motion into the nullspace without", "== ArmActionMode.ABS_JOINT_TORQUE: self._assert_action_space( arm_action, (len(self._robot.arm.joints),)) self._torque_action(arm_action) self._scene.step() elif self._action_mode.arm ==", "for d in min_j_distances]) # concatenate the derivaties to vector", "the smaller # the weight. That is, it is concentrated", "'reset' before calling 'step' on a task.\") # action should", "self._attach_grasped_objects: # If gripper close action, the check for grasp.", "if W is None: # default weighting vector -> based", "ObservationConfig _TORQUE_MAX_VEL = 9999 _DT = 0.05 _MAX_RESET_ATTEMPTS = 40", "# compute the error if setup[\"mode\"] == \"reference_position\": dL, L", "dL, L = self.get_loss_reference_position(setup[\"ref_position\"], W) elif setup[\"mode\"] == \"collision_avoidance\": dL,", "in the scene. This should not ' 'happen, please raise", "e = (self._robot.arm.get_joint_positions() - ref_pos) return e * W, 0.5*np.dot(e,e*W)", "import Task from rlbench.demo import Demo from rlbench.observation_config import ObservationConfig", "grasp. for g_obj in self._task.get_graspable_objects(): self._robot.gripper.grasp(g_obj) else: # If gripper", "np.array([0.0] * 7) self._last_e = e else: self._last_e = e", "use reciprocal of the distance between each link and an", "relative_to_cameras=False): \"\"\" returns the positions of all graspable object relative", "int, bool): # returns observation, reward, done, info if not", "self._assert_action_space(arm_action, (7,)) a_x, a_y, a_z, a_qx, a_qy, a_qz, a_qw =", "vector. :return: 1: The partial derivatives of the summed squarred", "W, setup): \"\"\" Calculates the loss as well as the", "redundancy resolution self._last_e = None # Returns a list of", "np.allclose( cur_positions, prev_values, atol=0.001) prev_values = cur_positions done = reached", "raise RuntimeError( 'Expected the action shape to be: %s, but", "> 1.0: raise ValueError('Gripper action expected to be within 0", "+ \\ -np.matmul(d_6_T, A_1.dot(A_2).dot(A_3).dot(dA_4).dot(A_5).dot(A_6).dot(p_6)) + \\ -np.matmul(d_7_T, A_1.dot(A_2).dot(A_3).dot(dA_4).dot(A_5).dot(A_6).dot(A_7).dot(p_7)) dq_5 =", "def _assert_action_space(self, action, expected_shape): if np.shape(action) != expected_shape: raise RuntimeError(", "A_j^{j-1}(q_j)) * p_i # as the left side of d/dq", "elif self._action_mode.arm == ArmActionMode.EE_POSE_PLAN_EE_FRAME: self._assert_action_space(arm_action, (7,)) self._path_observations = [] self._path_observations", "squarred error between the current and the reference consfiguration as", "with collision avoidance. This only works with tasks that include", "else: raise RuntimeError('Unrecognised action mode.') if current_ee != ee_action: done", "task.\") # action should contain 1 extra value for gripper", "* 7) self._last_e = e else: self._last_e = e return", "# if needed save some images if camcorder: obs =", "it in advance d_1_T = np.transpose(A_1.dot(p_1) - p_obs) d_2_T =", "in objects: if relative_to_camera: positions.append(self._scene.get_object_position_relative_to_cameras(ob)) else: positions.append({\"left_shoulder_camera\": ob.get_position(), \"right_shoulder_camera\": ob.get_position(),", "A_1.dot(dA_2).dot(A_3).dot(A_4).dot(A_5).dot(p_5)) + \\ -np.matmul(d_6_T, A_1.dot(dA_2).dot(A_3).dot(A_4).dot(A_5).dot(A_6).dot(p_6)) + \\ -np.matmul(d_7_T, A_1.dot(dA_2).dot(A_3).dot(A_4).dot(A_5).dot(A_6).dot(A_7).dot(p_7)) dq_3", "That is, it is concentrated on close objects. W =", ":param setup: Setup for redundancy resolution defining the mode, weighting", "self._variation_number = np.random.randint( 0, self._task.variation_count()) return self._variation_number def set_variation(self, v:", "ob in objects: if relative_to_camera: positions.append(self._scene.get_object_position_relative_to_cameras(ob)) else: positions.append({\"left_shoulder_camera\": ob.get_position(), \"right_shoulder_camera\":", "= False self._scene.load(self._task) self._pyrep.start() self._target_workspace_check = Dummy.create() self._last_e = None", "> ee_action > 1.0: raise ValueError('Gripper action expected to be", "self.get_robot_visuals(), self.get_all_graspable_objects()) elif self._action_mode.arm == ArmActionMode.DELTA_JOINT_VELOCITY: self._assert_action_space(arm_action, (len(self._robot.arm.joints),)) cur =", "without redundancy resolution. :param setup: Setup for redundancy resolution defining", "images if camcorder: obs = self._scene.get_observation() camcorder.save(obs, self.get_robot_visuals(), self.get_all_graspable_objects()) if", "early if success: break observations.append(self._scene.get_observation()) return observations except IKError as", "None else task_reward return self._scene.get_observation(), reward, terminate def resolve_redundancy_joint_velocities(self, actions,", "please raise an issues on this task.' % self._task.get_name()) from", "action mode.') self._enable_path_observations = value def get_path_observations(self): if (self._action_mode.arm !=", "observations.append(self._scene.get_observation()) success, terminate = self._task.success() # If the task succeeds", "try: joint_positions = self._robot.arm.solve_ik( action[:3], quaternion=action[3:], relative_to=relative_to) self._robot.arm.set_joint_target_positions(joint_positions) except IKError", "find a path.') from e def step(self, action, camcorder=None) ->", "be inaccurate resulting in slight movement of the ee. This", "in DELTA_EE_POSE_PLAN or ' 'ABS_EE_POSE_PLAN action mode.') return self._path_observations def", "# If gripper close action, the check for grasp. for", "RuntimeError( 'Could not collect demos. Maybe a problem with the", "self._attach_grasped_objects = attach_grasped_objects self._reset_called = False self._prev_ee_velocity = None self._enable_path_observations", "\\ -np.matmul(d_5_T, A_1.dot(A_2).dot(dA_3).dot(A_4).dot(A_5).dot(p_5)) + \\ -np.matmul(d_6_T, A_1.dot(A_2).dot(dA_3).dot(A_4).dot(A_5).dot(A_6).dot(p_6)) + \\ -np.matmul(d_7_T,", "BoundaryError, WaypointError from rlbench.backend.observation import Observation from rlbench.backend.robot import Robot", "self._robot.arm.set_joint_target_velocities(cur + arm_action) self._scene.step() elif self._action_mode.arm == ArmActionMode.ABS_JOINT_POSITION: self._assert_action_space(arm_action, (len(self._robot.arm.joints),))", "for g_obj in self._task.get_graspable_objects(): self._robot.gripper.grasp(g_obj) else: # If gripper open", "def get_all_graspable_object_poses(self, relative_to_cameras=False): \"\"\" returns the pose of all graspable", "self._task.get_name(), self._obs_config) else: ctr_loop = self._robot.arm.joints[0].is_control_loop_enabled() self._robot.arm.set_control_loop_enabled(True) demos = self._get_live_demos(", "0.5: ee_action = 1.0 elif ee_action < 0.5: ee_action =", "and the first observation return desc, self._scene.get_observation() def get_observation(self) ->", "resolution. :param setup: Setup for redundancy resolution defining the mode,", "W *= np.array([ np.minimum((1/rad_thres)*d, 1.0) for d in min_j_distances]) #", "= np.transpose(A_1.dot(A_2).dot(A_3).dot(p_3) - p_obs) d_4_T = np.transpose(A_1.dot(A_2).dot(A_3).dot(A_4).dot(p_4) - p_obs) d_5_T", "position of the object p_obs = self._task.obstacle.get_position() + np.array([0, 0,", "partial derivatives for redundancy resoltuion with collision avoidance. This only", "ArmActionMode.DELTA_JOINT_POSITION: self._assert_action_space(arm_action, (len(self._robot.arm.joints),)) cur = np.array(self._robot.arm.get_joint_positions()) self._robot.arm.set_joint_target_positions(cur + arm_action) self._scene.step()", "observations.append(self._scene.get_observation()) return observations except IKError as e: raise InvalidActionError('Could not", "the ee. This is why # the velocites are set", "resoltuion. -> L(q) = 1/2 sum_{i=1}^N w_i (q_i - \\tilde{q}_i)^2", "in min_j_distances]) # concatenate the derivaties to vector and apply", "ob.get_position()}) return positions def get_all_graspable_object_poses(self, relative_to_cameras=False): \"\"\" returns the pose", "was shape: %s' % ( str(expected_shape), str(np.shape(action)))) def _assert_unit_quaternion(self, quat):", "qz, qw] self._ee_action(list(new_pose)) elif self._action_mode.arm == ArmActionMode.EE_POSE_EE_FRAME: self._assert_action_space(arm_action, (7,)) self._ee_action(", "ActionMode, dataset_root: str, obs_config: ObservationConfig, static_positions: bool = False, attach_grasped_objects:", "is not None: if self._last_e is not None: e_dot =", "= 9999 _DT = 0.05 _MAX_RESET_ATTEMPTS = 40 _MAX_DEMO_ATTEMPTS =", "\"\"\" Calculates the summed squarred error between the current and", "- p_obs) d_2_T = np.transpose(A_1.dot(A_2).dot(p_2) - p_obs) d_3_T = np.transpose(A_1.dot(A_2).dot(A_3).dot(p_3)", "None: # default weighting W = np.array([1.0, 1.0, 1.0, 1.0,", "self._dataset_root = dataset_root self._obs_config = obs_config self._static_positions = static_positions self._attach_grasped_objects", "-> List[Demo]: \"\"\"Negative means all demos\"\"\" if not live_demos and", "check for ungrasp. self._robot.gripper.release() success, terminate = self._task.success() task_reward =", "list: W = np.array(setup[\"W\"]) elif setup[\"W\"] is None: # use", "or until we stop moving # (e.g. when we collide", "q_dot_redundancy, L def get_loss_reference_position(self, ref_pos, W): \"\"\" Calculates the summed", "a_qy, a_qz) * Quaternion( qw, qx, qy, qz) qw, qx,", "-np.matmul(d_1_T, dA_1.dot(p_1)) + \\ -np.matmul(d_2_T, dA_1.dot(A_2).dot(p_2)) + \\ -np.matmul(d_3_T, dA_1.dot(A_2).dot(A_3).dot(p_3))", "concatenate the derivaties to vector and apply weightig dL =", "self._action_mode.arm == ArmActionMode.ABS_EE_POSE_WORLD_FRAME: self._assert_action_space(arm_action, (7,)) self._ee_action(list(arm_action)) elif self._action_mode.arm == ArmActionMode.ABS_EE_POSE_PLAN_WORLD_FRAME:", "= setup[\"alpha\"] * np.matmul((np.identity(len(self._robot.arm.joints)) - np.matmul(J_plus, J)), dL) # the", "ob.get_pose(), \"right_shoulder_camera\": ob.get_pose(), \"front_camera\": ob.get_pose(), \"wrist_camera\": ob.get_pose()}) return poses def", "relative_to=relative_to) self._robot.arm.set_joint_target_positions(joint_positions) except IKError as e: raise InvalidActionError('Could not find", "or len(self._dataset_root) == 0): raise RuntimeError( \"Can't ask for a", "'Could not place the task %s in the scene. This", "self._robot.arm.set_joint_target_positions(arm_action) self._scene.step() elif self._action_mode.arm == ArmActionMode.DELTA_JOINT_POSITION: self._assert_action_space(arm_action, (len(self._robot.arm.joints),)) cur =", "\\nabla_q L(q) 2: Summed squarred error between the current and", "p_obs) d_2_T = np.transpose(A_1.dot(A_2).dot(p_2) - p_obs) d_3_T = np.transpose(A_1.dot(A_2).dot(A_3).dot(p_3) -", "Quaternion from pyrep import PyRep from pyrep.errors import IKError from", "def __init__(self, pyrep: PyRep, robot: Robot, scene: Scene, task: Task,", "/ np.sum(np.square(d_2_T)) , 1 / np.sum(np.square(d_3_T)) , 1 / np.sum(np.square(d_4_T))", "PyRep from pyrep.errors import IKError from pyrep.objects import Dummy, Object", "arm_action = np.array([0.0]*7) if self._action_mode.arm == ArmActionMode.ABS_JOINT_VELOCITY: self._assert_action_space(arm_action, (len(self._robot.arm.joints),)) self._robot.arm.set_joint_target_velocities(arm_action)", "= self._robot.get_transformation_matrices_derivatives() p_1, p_2, p_3, p_4, p_5, p_6, p_7 =", "success, terminate = self._task.success() # If the task succeeds while", "compute the pseudo inverse J_plus = np.linalg.pinv(J) # weighting if", "for gripper open close state arm_action = np.array(action[:-1]) ee_action =", "# get the position of the object p_obs = self._task.obstacle.get_position()", "redundancy resoltuion. -> L(q) = 1/2 sum_{i=1}^N w_i (q_i -", "done = False prev_values = None # Move until reached", "\\ -np.matmul(d_6_T, A_1.dot(A_2).dot(A_3).dot(A_4).dot(dA_5).dot(A_6).dot(p_6)) + \\ -np.matmul(d_7_T, A_1.dot(A_2).dot(A_3).dot(A_4).dot(dA_5).dot(A_6).dot(A_7).dot(p_7)) dq_6 = -np.matmul(d_6_T,", "self._task.obstacle.get_position() + np.array([0, 0, 0.33]) - self._robot.arm.joints[0].get_position() #p_obs = self._task.obstacle.get_position()", "observation, reward, done, info if not self._reset_called: raise RuntimeError( \"Call", "== 0: raise RuntimeError( \"Can't ask for stored demo when", "path.') from e done = False prev_values = None #", "for stored demo when no dataset root provided.\") demos =", "# If gripper open action, the check for ungrasp. self._robot.gripper.release()", "= None self._enable_path_observations = False self._scene.load(self._task) self._pyrep.start() self._target_workspace_check = Dummy.create()", "the robot's tip according to the provided actions yet push", "enabled cameras \"\"\" objects = self._task.get_graspable_objects() poses = [] for", "str: return self._task.get_name() def sample_variation(self) -> int: self._variation_number = np.random.randint(", "self._last_e = None # Returns a list of descriptions and", ", 1 / np.sum(np.square(d_3_T)) , 1 / np.sum(np.square(d_4_T)) , 1", "1 / np.sum(np.square(d_5_T)) , 1 / np.sum(np.square(d_6_T)) , 1 /", "/ np.sum(np.square(d_3_T)) , 1 / np.sum(np.square(d_4_T)) , 1 / np.sum(np.square(d_5_T))", "== ArmActionMode.EE_POSE_PLAN_EE_FRAME: self._assert_action_space(arm_action, (7,)) self._path_observations = [] self._path_observations = self._path_action(", "self.variation_count())) self._variation_number = v def variation_count(self) -> int: return self._task.variation_count()", "as e: raise InvalidActionError('Could not find a path.') from e", "demo.random_seed = random_seed demos.append(demo) break except Exception as e: attempts", "workspace.') path = self._robot.arm.get_path( action[:3], quaternion=action[3:], ignore_collisions=True, relative_to=relative_to) done =", "-> based on the reciprocal of the distance. The greater", "self._variation_number = v def variation_count(self) -> int: return self._task.variation_count() def", "cur_positions done = reached or not_moving def _path_action(self, action, relative_to=None):", "robot: Robot, scene: Scene, task: Task, action_mode: ActionMode, dataset_root: str,", "\"wrist_camera\": ob.get_pose()}) return poses def _assert_action_space(self, action, expected_shape): if np.shape(action)", "dq_1 = -np.matmul(d_1_T, dA_1.dot(p_1)) + \\ -np.matmul(d_2_T, dA_1.dot(A_2).dot(p_2)) + \\", "RuntimeError( \"Can't ask for a stored demo when no dataset", "0, self._task.variation_count()) return self._variation_number def set_variation(self, v: int) -> None:", "utils.get_stored_demos( amount, image_paths, self._dataset_root, self._variation_number, self._task.get_name(), self._obs_config) else: ctr_loop =", "= Quaternion(a_qw, a_qx, a_qy, a_qz) * Quaternion(qw, qx, qy, qz)", "qy, qz, qw] self._path_observations = [] self._path_observations = self._path_action(list(new_pose)) elif", "camcorder.save(obs, self.get_robot_visuals(), self.get_all_graspable_objects()) elif self._action_mode.arm == ArmActionMode.DELTA_JOINT_VELOCITY: self._assert_action_space(arm_action, (len(self._robot.arm.joints),)) cur", "for grasp. for g_obj in self._task.get_graspable_objects(): self._robot.gripper.grasp(g_obj) else: # If", "== ArmActionMode.ABS_JOINT_POSITION: self._assert_action_space(arm_action, (len(self._robot.arm.joints),)) self._robot.arm.set_joint_target_positions(arm_action) self._scene.step() elif self._action_mode.arm == ArmActionMode.DELTA_JOINT_POSITION:", "-np.matmul(d_6_T, A_1.dot(A_2).dot(A_3).dot(A_4).dot(dA_5).dot(A_6).dot(p_6)) + \\ -np.matmul(d_7_T, A_1.dot(A_2).dot(A_3).dot(A_4).dot(dA_5).dot(A_6).dot(A_7).dot(p_7)) dq_6 = -np.matmul(d_6_T, A_1.dot(A_2).dot(A_3).dot(A_4).dot(A_5).dot(dA_6).dot(p_6))", "and the reference configuration. -> L(q) \"\"\" if W is", "= np.transpose(A_1.dot(A_2).dot(p_2) - p_obs) d_3_T = np.transpose(A_1.dot(A_2).dot(A_3).dot(p_3) - p_obs) d_4_T", "j in self._robot.arm.joints]) lower_joint_limits = np.array(setup[\"lower_joint_pos_limit\"]) upper_joint_limits = np.array(setup[\"upper_joint_pos_limit\"]) min_j_distances", "from e def step(self, action, camcorder=None) -> (Observation, int, bool):", "+ np.sqrt(np.dot(d_7_T, d_7_T))*W[6] return dL, L def enable_path_observations(self, value: bool)", "the error stops changing much. e = dL if setup[\"cut-off_error\"]", "-np.matmul(d_7_T, dA_1.dot(A_2).dot(A_3).dot(A_4).dot(A_5).dot(A_6).dot(A_7).dot(p_7)) dq_2 = -np.matmul(d_2_T, A_1.dot(dA_2).dot(p_2)) + \\ -np.matmul(d_3_T, A_1.dot(dA_2).dot(A_3).dot(p_3))", "if np.shape(action) != expected_shape: raise RuntimeError( 'Expected the action shape", "observations = [] while not done: done = path.step() self._scene.step()", "if not np.isclose(np.linalg.norm(quat), 1.0): raise RuntimeError('Action contained non unit quaternion!')", "not None: self._target_workspace_check.set_position( pos_to_check, relative_to) pos_to_check = self._target_workspace_check.get_position() valid =", "self._variation_number, max_attempts=_MAX_RESET_ATTEMPTS, randomly_place=not self._static_positions) except (BoundaryError, WaypointError) as e: raise", "A_1.dot(A_2).dot(A_3).dot(dA_4).dot(A_5).dot(A_6).dot(p_6)) + \\ -np.matmul(d_7_T, A_1.dot(A_2).dot(A_3).dot(dA_4).dot(A_5).dot(A_6).dot(A_7).dot(p_7)) dq_5 = -np.matmul(d_5_T, A_1.dot(A_2).dot(A_3).dot(A_4).dot(dA_5).dot(p_5)) +", "(p_i^0 (q_1,..., q_i) - p_obs) # where p_i^0 = (\\prod_{j=1}^i", "not collect demos. Maybe a problem with the task?') return", "else 0.0) if ee_action > 0.5: ee_action = 1.0 elif", "= np.array([dq_1, dq_2, dq_3, dq_4, dq_5, dq_6, dq_7])*W # calculate", "to be within 0 and 1.') # Discretize the gripper", "a_x, a_y, a_z, a_qx, a_qy, a_qz, a_qw = arm_action x,", "then break early if success: break observations.append(self._scene.get_observation()) return observations except", "W = np.array(setup[\"W\"]) elif setup[\"W\"] is None: # use default", "# the chain rule delivers: d/dq L = (p_i^0 (q_1,...,", "self._task = task self._variation_number = 0 self._action_mode = action_mode self._dataset_root", "self._target_workspace_check.set_position( pos_to_check, relative_to) pos_to_check = self._target_workspace_check.get_position() valid = self._scene.check_target_in_workspace(pos_to_check) if", "0): raise RuntimeError( \"Can't ask for a stored demo when", "if relative_to_cameras: poses.append(self._scene.get_object_pose_relative_to_cameras(ob)) else: poses.append({\"left_shoulder_camera\": ob.get_pose(), \"right_shoulder_camera\": ob.get_pose(), \"front_camera\": ob.get_pose(),", "p_obs) d_3_T = np.transpose(A_1.dot(A_2).dot(A_3).dot(p_3) - p_obs) d_4_T = np.transpose(A_1.dot(A_2).dot(A_3).dot(A_4).dot(p_4) -", "/ np.sum(np.square(d_1_T)), 1 / np.sum(np.square(d_2_T)) , 1 / np.sum(np.square(d_3_T)) ,", "= False, callable_each_step: Callable[[Observation], None] = None, max_attempts: int =", "self._assert_unit_quaternion(action[3:]) try: joint_positions = self._robot.arm.solve_ik( action[:3], quaternion=action[3:], relative_to=relative_to) self._robot.arm.set_joint_target_positions(joint_positions) except", "if self._enable_path_observations: observations.append(self._scene.get_observation()) success, terminate = self._task.success() # If the", "dA_3, dA_4, dA_5, dA_6, dA_7 = self._robot.get_transformation_matrices_derivatives() p_1, p_2, p_3,", "shown above.-> L(q) \"\"\" # get the position of the", "robot self._scene = scene self._task = task self._variation_number = 0", "not self._reset_called: raise RuntimeError( \"Call 'reset' before calling 'step' on", "derivatives, and the positions of the links A_1, A_2, A_3,", "* p_i # as the left side of d/dq L", "elif self._action_mode.arm == ArmActionMode.DELTA_EE_POSE_WORLD_FRAME: self._assert_action_space(arm_action, (7,)) a_x, a_y, a_z, a_qx,", "ObservationConfig, static_positions: bool = False, attach_grasped_objects: bool = True): self._pyrep", "as well as the respective partial derivatives for redundancy resoltuion", "def get_path_observations(self): if (self._action_mode.arm != ArmActionMode.DELTA_EE_POSE_PLAN_WORLD_FRAME and self._action_mode.arm != ArmActionMode.ABS_EE_POSE_PLAN_WORLD_FRAME", "False if prev_values is not None: not_moving = np.allclose( cur_positions,", "= np.linalg.pinv(J) # weighting if type(setup[\"W\"]) is list: W =", "relative_to=None): self._assert_unit_quaternion(action[3:]) try: joint_positions = self._robot.arm.solve_ik( action[:3], quaternion=action[3:], relative_to=relative_to) self._robot.arm.set_joint_target_positions(joint_positions)", "= [] for ob in objects: if relative_to_camera: positions.append(self._scene.get_object_position_relative_to_cameras(ob)) else:", "j-l) for l,u,j in zip(lower_joint_limits, upper_joint_limits, joint_positions)] # start scaling", "\"\"\" if W is None: # default weighting W =", "we use reciprocal of the distance between each link and", "the task %s in the scene. This should not '", "None: not_moving = np.allclose( cur_positions, prev_values, atol=0.001) prev_values = cur_positions", "we collide wth something) while not done: self._scene.step() cur_positions =", "dataset root provided.\") if not live_demos: if self._dataset_root is None", "on the reciprocal of the distance. The greater the distance", "# concatenate the derivaties to vector and apply weightig dL", "to be: %s, but was shape: %s' % ( str(expected_shape),", "l,u,j in zip(lower_joint_limits, upper_joint_limits, joint_positions)] # start scaling down error", "path.') from e def step(self, action, camcorder=None) -> (Observation, int,", "p_obs) # now we can calculate the derivatives in each", "position, not rotation pos_to_check = action[:3] if relative_to is not", "scene self._task = task self._variation_number = 0 self._action_mode = action_mode", "15° rad_thres = 15*(np.pi/180) W *= np.array([ np.minimum((1/rad_thres)*d, 1.0) for", "= False observations = [] while not done: done =", "images if camcorder: obs = self._scene.get_observation() camcorder.save(obs, self.get_robot_visuals(), self.get_all_graspable_objects()) elif", "np.flip(J) J = J[-3:] # compute the pseudo inverse J_plus", "joint positions or until we stop moving # (e.g. when", "setup): \"\"\" Calculates the loss as well as the respective", "raise InvalidActionError('Target is outside of workspace.') path = self._robot.arm.get_path( action[:3],", "0, 0.33]) - self._robot.arm.joints[0].get_position() #p_obs = self._task.obstacle.get_position() p_obs = np.append(p_obs,", "+ \\ -np.matmul(d_3_T, A_1.dot(dA_2).dot(A_3).dot(p_3)) + \\ -np.matmul(d_4_T, A_1.dot(dA_2).dot(A_3).dot(A_4).dot(p_4)) + \\", "error between the current and the reference consfiguration as well", "redundancy resolution. :param setup: Setup for redundancy resolution defining the", "within 0 and 1.') # Discretize the gripper action current_ee", "dL, L def enable_path_observations(self, value: bool) -> None: if (self._action_mode.arm", "error when joint limit is 15° away. # Scaling is", "if self._action_mode.arm == ArmActionMode.ABS_JOINT_VELOCITY: self._assert_action_space(arm_action, (len(self._robot.arm.joints),)) self._robot.arm.set_joint_target_velocities(arm_action) self._scene.step() # if", "and an obstacle as our Loss # the chain rule", "/ np.sum(np.square(d_7_T)) ]) * 0.1 # --- scaling to keep", "self._robot.arm.get_tip().get_pose() new_rot = Quaternion(a_qw, a_qx, a_qy, a_qz) * Quaternion(qw, qx,", "velocity=0.2) self._pyrep.step() self._task.step() # if needed save some images if", "to the provided actions yet push the joint position towards", "task self._variation_number = 0 self._action_mode = action_mode self._dataset_root = dataset_root", "expected_shape): if np.shape(action) != expected_shape: raise RuntimeError( 'Expected the action", "from typing import List, Callable import numpy as np from", "return e * W, 0.5*np.dot(e,e*W) def get_loss_collision_avoidance(self, W, setup): \"\"\"", "q's for redundancy resoltuion. -> L(q) = 1/2 sum_{i=1}^N w_i", "rlbench.action_modes import ArmActionMode, ActionMode from rlbench.backend.exceptions import BoundaryError, WaypointError from", "al q's for redundancy resoltuion. -> L(q) = 1/2 sum_{i=1}^N", "terminate = self._task.success() task_reward = self._task.reward() reward = float(success) if", "d_4_T = np.transpose(A_1.dot(A_2).dot(A_3).dot(A_4).dot(p_4) - p_obs) d_5_T = np.transpose(A_1.dot(A_2).dot(A_3).dot(A_4).dot(A_5).dot(p_5) - p_obs)", "[qx, qy, qz, qw] self._path_observations = [] self._path_observations = self._path_action(list(new_pose))", "setup[\"mode\"] == \"reference_position\": dL, L = self.get_loss_reference_position(setup[\"ref_position\"], W) elif setup[\"mode\"]", "d_3_T = np.transpose(A_1.dot(A_2).dot(A_3).dot(p_3) - p_obs) d_4_T = np.transpose(A_1.dot(A_2).dot(A_3).dot(A_4).dot(p_4) - p_obs)", "the reference configuration. -> L(q) \"\"\" if W is None:", "def variation_count(self) -> int: return self._task.variation_count() def reset(self) -> (List[str],", "= np.array(setup[\"upper_joint_pos_limit\"]) min_j_distances = [np.minimum(u-j, j-l) for l,u,j in zip(lower_joint_limits,", "nullspace without changing the gripper tip position :param actions: Current", "obs = self._scene.get_observation() camcorder.save(obs, self.get_robot_visuals(), self.get_all_graspable_objects()) elif self._action_mode.arm == ArmActionMode.DELTA_JOINT_VELOCITY:", "let's calculate it in advance d_1_T = np.transpose(A_1.dot(p_1) - p_obs)", "9999 _DT = 0.05 _MAX_RESET_ATTEMPTS = 40 _MAX_DEMO_ATTEMPTS = 10", "\\ -np.matmul(d_5_T, A_1.dot(A_2).dot(A_3).dot(dA_4).dot(A_5).dot(p_5)) + \\ -np.matmul(d_6_T, A_1.dot(A_2).dot(A_3).dot(dA_4).dot(A_5).dot(A_6).dot(p_6)) + \\ -np.matmul(d_7_T,", "= self._robot.arm.get_jacobian() J = np.transpose(J) J = np.flip(J) J =", "upper_joint_limits = np.array(setup[\"upper_joint_pos_limit\"]) min_j_distances = [np.minimum(u-j, j-l) for l,u,j in", "scaling down error when joint limit is 15° away. #", "(len(self._robot.arm.joints),)) cur = np.array(self._robot.arm.get_joint_positions()) self._robot.arm.set_joint_target_positions(cur + arm_action) self._scene.step() elif self._action_mode.arm", "vector -> based on the reciprocal of the distance. The", "error between the current and the reference configuration -> ->", "np.sum(np.square(d_6_T)) , 1 / np.sum(np.square(d_7_T)) ]) * 0.1 # ---", "-np.matmul(d_4_T, dA_1.dot(A_2).dot(A_3).dot(A_4).dot(p_4)) + \\ -np.matmul(d_5_T, dA_1.dot(A_2).dot(A_3).dot(A_4).dot(A_5).dot(p_5)) + \\ -np.matmul(d_6_T, dA_1.dot(A_2).dot(A_3).dot(A_4).dot(A_5).dot(A_6).dot(p_6))", "is done linearly from 0 to 1 for 0° <=", "J = np.flip(J) J = J[-3:] # compute the pseudo", "np.sum(np.square(d_1_T)), 1 / np.sum(np.square(d_2_T)) , 1 / np.sum(np.square(d_3_T)) , 1", "= 0.05 _MAX_RESET_ATTEMPTS = 40 _MAX_DEMO_ATTEMPTS = 10 class InvalidActionError(Exception):", "of the links A_1, A_2, A_3, A_4, A_5, A_6, A_7", "% ( str(expected_shape), str(np.shape(action)))) def _assert_unit_quaternion(self, quat): if not np.isclose(np.linalg.norm(quat),", "in zip(lower_joint_limits, upper_joint_limits, joint_positions)] # start scaling down error when", "ee. This is why # the velocites are set to", "e: raise InvalidActionError('Could not find a path.') from e def", "L = np.sqrt(np.dot(d_1_T, d_1_T))*W[0] \\ + np.sqrt(np.dot(d_2_T, d_2_T))*W[1] \\ +", "the distance between each link and an obstacle as our", "< 0.5: ee_action = 0.0 if current_ee != ee_action: arm_action", "dataset_root: str, obs_config: ObservationConfig, static_positions: bool = False, attach_grasped_objects: bool", "float(success) if task_reward is None else task_reward return self._scene.get_observation(), reward,", "inverse J_plus = np.linalg.pinv(J) # weighting if type(setup[\"W\"]) is list:", "limit is 15° away. # Scaling is done linearly from", "str, obs_config: ObservationConfig, static_positions: bool = False, attach_grasped_objects: bool =", "and (self._dataset_root is None or len(self._dataset_root) == 0): raise RuntimeError(", "+ x, a_y + y, a_z + z] + [qx,", "provided actions yet push the joint position towards a reference", "= np.flip(J) J = J[-3:] # compute the pseudo inverse", "L(q) 2: Summed squarred error between the current and the", "needed save some images if camcorder: obs = self._scene.get_observation() camcorder.save(obs,", "+ \\ -np.matmul(d_4_T, dA_1.dot(A_2).dot(A_3).dot(A_4).dot(p_4)) + \\ -np.matmul(d_5_T, dA_1.dot(A_2).dot(A_3).dot(A_4).dot(A_5).dot(p_5)) + \\", "self._assert_action_space(arm_action, (len(self._robot.arm.joints),)) cur = np.array(self._robot.arm.get_joint_velocities()) self._robot.arm.set_joint_target_velocities(cur + arm_action) self._scene.step() elif", "= [] for ob in objects: if relative_to_cameras: poses.append(self._scene.get_object_pose_relative_to_cameras(ob)) else:", "mode, weighting etc. :return: Array of joint velocities, which move", "A_5, A_6, A_7 = self._robot.get_transformation_matrices() dA_1, dA_2, dA_3, dA_4, dA_5,", "(List[str], Observation): self._scene.reset() try: desc = self._scene.init_episode( self._variation_number, max_attempts=_MAX_RESET_ATTEMPTS, randomly_place=not", "-np.matmul(d_6_T, A_1.dot(A_2).dot(A_3).dot(dA_4).dot(A_5).dot(A_6).dot(p_6)) + \\ -np.matmul(d_7_T, A_1.dot(A_2).dot(A_3).dot(dA_4).dot(A_5).dot(A_6).dot(A_7).dot(p_7)) dq_5 = -np.matmul(d_5_T, A_1.dot(A_2).dot(A_3).dot(A_4).dot(dA_5).dot(p_5))", "-np.matmul(d_7_T, A_1.dot(A_2).dot(dA_3).dot(A_4).dot(A_5).dot(A_6).dot(A_7).dot(p_7)) dq_4 = -np.matmul(d_4_T, A_1.dot(A_2).dot(A_3).dot(dA_4).dot(p_4)) + \\ -np.matmul(d_5_T, A_1.dot(A_2).dot(A_3).dot(dA_4).dot(A_5).dot(p_5))", "action, camcorder=None) -> (Observation, int, bool): # returns observation, reward,", "= None # Move until reached target joint positions or", "self._path_observations = [] self._path_observations = self._path_action( list(arm_action), relative_to=self._robot.arm.get_tip()) else: raise", "demos def reset_to_demo(self, demo: Demo) -> (List[str], Observation): demo.restore_state() return", "attach_grasped_objects: bool = True): self._pyrep = pyrep self._robot = robot", "= self._task.success() task_reward = self._task.reward() reward = float(success) if task_reward", "> 0.5: ee_action = 1.0 elif ee_action < 0.5: ee_action", "t < 0 else -_TORQUE_MAX_VEL) for t in action]) self._robot.arm.set_joint_forces(np.abs(action))", "ArmActionMode.ABS_EE_POSE_PLAN_WORLD_FRAME and self._action_mode.arm != ArmActionMode.EE_POSE_PLAN_EE_FRAME): raise RuntimeError('Only available in DELTA_EE_POSE_PLAN", "If gripper close action, the check for grasp. for g_obj", "Callable[ [Observation], None] = None, max_attempts: int = _MAX_DEMO_ATTEMPTS) ->", "self._last_e)) if self._last_e is not None and e_dot < setup[\"cut-off_error\"]:", "provided.\") if not live_demos: if self._dataset_root is None or len(self._dataset_root)", "* Quaternion(qw, qx, qy, qz) qw, qx, qy, qz =", "= 1.0 elif ee_action < 0.5: ee_action = 0.0 if", "it is concentrated on close objects. W = np.array([1 /", "well as its partial derivatives with respect to al q's", "done linearly from 0 to 1 for 0° <= d", "str(expected_shape), str(np.shape(action)))) def _assert_unit_quaternion(self, quat): if not np.isclose(np.linalg.norm(quat), 1.0): raise", "\\ -np.matmul(d_4_T, dA_1.dot(A_2).dot(A_3).dot(A_4).dot(p_4)) + \\ -np.matmul(d_5_T, dA_1.dot(A_2).dot(A_3).dot(A_4).dot(A_5).dot(p_5)) + \\ -np.matmul(d_6_T,", "L is used often, let's calculate it in advance d_1_T", "a problem with the task?') return demos def reset_to_demo(self, demo:", "L(q) = 1/2 sum_{i=1}^N w_i (q_i - \\tilde{q}_i)^2 :param ref_pos:", "atol=0.01) not_moving = False if prev_values is not None: not_moving", "(q_1,..., q_i) - p_obs)^T * d/dq (p_i^0 (q_1,..., q_i) -", "not np.isclose(np.linalg.norm(quat), 1.0): raise RuntimeError('Action contained non unit quaternion!') def", "self._assert_action_space(arm_action, (7,)) self._path_observations = [] self._path_observations = self._path_action(list(arm_action)) elif self._action_mode.arm", "self._robot.arm.get_jacobian() J = np.transpose(J) J = np.flip(J) J = J[-3:]", "self._task.variation_count()) return self._variation_number def set_variation(self, v: int) -> None: if", "if self._last_e is not None and e_dot < setup[\"cut-off_error\"]: q_dot_redundancy", "a_z + z] + [qx, qy, qz, qw] self._ee_action(list(new_pose)) elif", "velocites are set to 0 once the error stops changing", "== \"collision_avoidance\": dL, L = self.get_loss_collision_avoidance(W, setup) # compute the", "e else: self._last_e = e return actions - q_dot_redundancy, L", "= Quaternion(a_qw, a_qx, a_qy, a_qz) * Quaternion( qw, qx, qy,", "not done: done = path.step() self._scene.step() if self._enable_path_observations: observations.append(self._scene.get_observation()) success,", "\"right_shoulder_camera\": ob.get_position(), \"front_camera\": ob.get_position(), \"wrist_camera\": ob.get_position()}) return positions def get_all_graspable_object_poses(self,", "if setup[\"mode\"] == \"reference_position\": dL, L = self.get_loss_reference_position(setup[\"ref_position\"], W) elif", "from rlbench.observation_config import ObservationConfig _TORQUE_MAX_VEL = 9999 _DT = 0.05", "is not None and e_dot < setup[\"cut-off_error\"]: q_dot_redundancy = np.array([0.0]", "self._action_mode.arm == ArmActionMode.ABS_JOINT_POSITION: self._assert_action_space(arm_action, (len(self._robot.arm.joints),)) self._robot.arm.set_joint_target_positions(arm_action) self._scene.step() elif self._action_mode.arm ==", "if prev_values is not None: not_moving = np.allclose( cur_positions, prev_values,", "amount: int, callable_each_step: Callable[ [Observation], None] = None, max_attempts: int", "action_mode self._dataset_root = dataset_root self._obs_config = obs_config self._static_positions = static_positions", "0.5: ee_action = 0.0 if current_ee != ee_action: arm_action =", "self._variation_number def set_variation(self, v: int) -> None: if v >=", "well as the respective partial derivatives for redundancy resoltuion with", "Discretize the gripper action current_ee = (1.0 if self._robot.gripper.get_open_amount()[0] >", "Task from rlbench.demo import Demo from rlbench.observation_config import ObservationConfig _TORQUE_MAX_VEL", "\\ -np.matmul(d_5_T, A_1.dot(dA_2).dot(A_3).dot(A_4).dot(A_5).dot(p_5)) + \\ -np.matmul(d_6_T, A_1.dot(dA_2).dot(A_3).dot(A_4).dot(A_5).dot(A_6).dot(p_6)) + \\ -np.matmul(d_7_T,", "get_path_observations(self): if (self._action_mode.arm != ArmActionMode.DELTA_EE_POSE_PLAN_WORLD_FRAME and self._action_mode.arm != ArmActionMode.ABS_EE_POSE_PLAN_WORLD_FRAME and", "i) try: demo = self._scene.get_demo( callable_each_step=callable_each_step) demo.random_seed = random_seed demos.append(demo)", "max_attempts) self._robot.arm.set_control_loop_enabled(ctr_loop) return demos def _get_live_demos(self, amount: int, callable_each_step: Callable[", "int, live_demos: bool = False, image_paths: bool = False, callable_each_step:", "rlbench.backend.scene import Scene from rlbench.backend.task import Task from rlbench.demo import", "in each dimension dq_1 = -np.matmul(d_1_T, dA_1.dot(p_1)) + \\ -np.matmul(d_2_T,", "= np.transpose(A_1.dot(A_2).dot(A_3).dot(A_4).dot(A_5).dot(p_5) - p_obs) d_6_T = np.transpose(A_1.dot(A_2).dot(A_3).dot(A_4).dot(A_5).dot(A_6).dot(p_6) - p_obs) d_7_T", "\\ + np.sqrt(np.dot(d_2_T, d_2_T))*W[1] \\ + np.sqrt(np.dot(d_3_T, d_3_T))*W[2] \\ +", "= self._get_live_demos( amount, callable_each_step, max_attempts) self._robot.arm.set_control_loop_enabled(ctr_loop) return demos def _get_live_demos(self,", "vector and apply weightig dL = np.array([dq_1, dq_2, dq_3, dq_4,", "self._task.get_name() def sample_variation(self) -> int: self._variation_number = np.random.randint( 0, self._task.variation_count())", "the weight. That is, it is concentrated on close objects.", "else: poses.append({\"left_shoulder_camera\": ob.get_pose(), \"right_shoulder_camera\": ob.get_pose(), \"front_camera\": ob.get_pose(), \"wrist_camera\": ob.get_pose()}) return", "callable_each_step=callable_each_step) demo.random_seed = random_seed demos.append(demo) break except Exception as e:", "Callable import numpy as np from pyquaternion import Quaternion from", "ob in objects: if relative_to_cameras: poses.append(self._scene.get_object_pose_relative_to_cameras(ob)) else: poses.append({\"left_shoulder_camera\": ob.get_pose(), \"right_shoulder_camera\":", "descriptions and the first observation return desc, self._scene.get_observation() def get_observation(self)", "atol=0.001) prev_values = cur_positions done = reached or not_moving def", "== ArmActionMode.ABS_EE_POSE_PLAN_WORLD_FRAME: self._assert_action_space(arm_action, (7,)) self._path_observations = [] self._path_observations = self._path_action(list(arm_action))", "from rlbench.action_modes import ArmActionMode, ActionMode from rlbench.backend.exceptions import BoundaryError, WaypointError", "chain rule delivers: d/dq L = (p_i^0 (q_1,..., q_i) -", "+ \\ -np.matmul(d_5_T, dA_1.dot(A_2).dot(A_3).dot(A_4).dot(A_5).dot(p_5)) + \\ -np.matmul(d_6_T, dA_1.dot(A_2).dot(A_3).dot(A_4).dot(A_5).dot(A_6).dot(p_6)) + \\", "while not done: done = self._robot.gripper.actuate(ee_action, velocity=0.2) self._pyrep.step() self._task.step() #", "for a stored demo when no dataset root provided.\") if", "not done: done = self._robot.gripper.actuate(ee_action, velocity=0.2) self._pyrep.step() self._task.step() # if", "\\ -np.matmul(d_4_T, A_1.dot(A_2).dot(dA_3).dot(A_4).dot(p_4)) + \\ -np.matmul(d_5_T, A_1.dot(A_2).dot(dA_3).dot(A_4).dot(A_5).dot(p_5)) + \\ -np.matmul(d_6_T,", "from rlbench import utils from rlbench.action_modes import ArmActionMode, ActionMode from", "calculate it in advance d_1_T = np.transpose(A_1.dot(p_1) - p_obs) d_2_T", "import BoundaryError, WaypointError from rlbench.backend.observation import Observation from rlbench.backend.robot import", "tip according to the provided actions yet push the joint", "= self._robot.arm.get_tip().get_pose() new_rot = Quaternion(a_qw, a_qx, a_qy, a_qz) * Quaternion(qw,", "demos def _get_live_demos(self, amount: int, callable_each_step: Callable[ [Observation], None] =", "matrices, their derivatives, and the positions of the links A_1,", "else: positions.append({\"left_shoulder_camera\": ob.get_position(), \"right_shoulder_camera\": ob.get_position(), \"front_camera\": ob.get_position(), \"wrist_camera\": ob.get_position()}) return", "== ArmActionMode.DELTA_JOINT_VELOCITY: self._assert_action_space(arm_action, (len(self._robot.arm.joints),)) cur = np.array(self._robot.arm.get_joint_velocities()) self._robot.arm.set_joint_target_velocities(cur + arm_action)", "task?') return demos def reset_to_demo(self, demo: Demo) -> (List[str], Observation):", "save some images if camcorder: obs = self._scene.get_observation() camcorder.save(obs, self.get_robot_visuals(),", "= self._robot.arm.get_joint_positions() reached = np.allclose(cur_positions, joint_positions, atol=0.01) not_moving = False", "image_paths: bool = False, callable_each_step: Callable[[Observation], None] = None, max_attempts:", "= self._path_action(list(arm_action)) elif self._action_mode.arm == ArmActionMode.DELTA_EE_POSE_PLAN_WORLD_FRAME: self._assert_action_space(arm_action, (7,)) a_x, a_y,", "success: break observations.append(self._scene.get_observation()) return observations except IKError as e: raise", "redundancy resoltuion with collision avoidance. This only works with tasks", "seems to be inaccurate resulting in slight movement of the", "+ [qx, qy, qz, qw] self._ee_action(list(new_pose)) elif self._action_mode.arm == ArmActionMode.EE_POSE_EE_FRAME:", "weighting vector.\" % type(setup[\"W\"])) # compute the error if setup[\"mode\"]", "is not None: e_dot = np.sum(np.abs(e - self._last_e)) if self._last_e", "_DT = 0.05 _MAX_RESET_ATTEMPTS = 40 _MAX_DEMO_ATTEMPTS = 10 class", "return desc, self._scene.get_observation() def get_observation(self) -> Observation: return self._scene.get_observation() def", "a task.\") # action should contain 1 extra value for", "typing import List, Callable import numpy as np from pyquaternion", "qx, qy, qz, qw = self._robot.arm.get_tip().get_pose() new_rot = Quaternion(a_qw, a_qx,", "the derivaties to vector and apply weightig dL = np.array([dq_1,", "len(self._dataset_root) == 0: raise RuntimeError( \"Can't ask for stored demo", "def _assert_unit_quaternion(self, quat): if not np.isclose(np.linalg.norm(quat), 1.0): raise RuntimeError('Action contained", "as e: raise TaskEnvironmentError( 'Could not place the task %s", "not None: not_moving = np.allclose( cur_positions, prev_values, atol=0.001) prev_values =", "else: self._last_e = e return actions - q_dot_redundancy, L def", "= _MAX_DEMO_ATTEMPTS, ) -> List[Demo]: \"\"\"Negative means all demos\"\"\" if", "done: done = path.step() self._scene.step() if self._enable_path_observations: observations.append(self._scene.get_observation()) success, terminate", "its limit joint_positions = np.array([j.get_joint_position() for j in self._robot.arm.joints]) lower_joint_limits", "elif self._action_mode.arm == ArmActionMode.ABS_EE_POSE_WORLD_FRAME: self._assert_action_space(arm_action, (7,)) self._ee_action(list(arm_action)) elif self._action_mode.arm ==", "stop moving # (e.g. when we collide wth something) while", "above.-> L(q) \"\"\" # get the position of the object", "not done: self._scene.step() cur_positions = self._robot.arm.get_joint_positions() reached = np.allclose(cur_positions, joint_positions,", "A_1.dot(dA_2).dot(p_2)) + \\ -np.matmul(d_3_T, A_1.dot(dA_2).dot(A_3).dot(p_3)) + \\ -np.matmul(d_4_T, A_1.dot(dA_2).dot(A_3).dot(A_4).dot(p_4)) +", "[] while not done: done = path.step() self._scene.step() if self._enable_path_observations:", "-np.matmul(d_4_T, A_1.dot(A_2).dot(A_3).dot(dA_4).dot(p_4)) + \\ -np.matmul(d_5_T, A_1.dot(A_2).dot(A_3).dot(dA_4).dot(A_5).dot(p_5)) + \\ -np.matmul(d_6_T, A_1.dot(A_2).dot(A_3).dot(dA_4).dot(A_5).dot(A_6).dot(p_6))", "= np.transpose(A_1.dot(p_1) - p_obs) d_2_T = np.transpose(A_1.dot(A_2).dot(p_2) - p_obs) d_3_T", "avoidance. This only works with tasks that include one obstacles!", "action should contain 1 extra value for gripper open close", "as the left side of d/dq L is used often,", "A_1.dot(dA_2).dot(A_3).dot(A_4).dot(A_5).dot(A_6).dot(A_7).dot(p_7)) dq_3 = -np.matmul(d_3_T, A_1.dot(A_2).dot(dA_3).dot(p_3)) + \\ -np.matmul(d_4_T, A_1.dot(A_2).dot(dA_3).dot(A_4).dot(p_4)) +", "p_4, p_5, p_6, p_7 = self._robot.get_link_positions_in_ref_frames() # we use reciprocal", "List, Callable import numpy as np from pyquaternion import Quaternion", "def reset_to_demo(self, demo: Demo) -> (List[str], Observation): demo.restore_state() return self.reset()", "one obstacles! L(q) = \\sum_{i=1}^N d(q)^{-1} :param W: Weighting vector.", "changing the gripper tip position :param actions: Current actions without", "the check for grasp. for g_obj in self._task.get_graspable_objects(): self._robot.gripper.grasp(g_obj) else:", "= action_mode self._dataset_root = dataset_root self._obs_config = obs_config self._static_positions =", "for j in self._robot.arm.joints]) lower_joint_limits = np.array(setup[\"lower_joint_pos_limit\"]) upper_joint_limits = np.array(setup[\"upper_joint_pos_limit\"])", "== ArmActionMode.ABS_EE_POSE_WORLD_FRAME: self._assert_action_space(arm_action, (7,)) self._ee_action(list(arm_action)) elif self._action_mode.arm == ArmActionMode.ABS_EE_POSE_PLAN_WORLD_FRAME: self._assert_action_space(arm_action,", "set to 0 once the error stops changing much. e", "reject # Only checks position, not rotation pos_to_check = action[:3]", "without changing the gripper tip position :param actions: Current actions", "dA_5, dA_6, dA_7 = self._robot.get_transformation_matrices_derivatives() p_1, p_2, p_3, p_4, p_5,", "task_reward is None else task_reward return self._scene.get_observation(), reward, terminate def", "-= 1 logging.info('Bad demo. ' + str(e)) if attempts <=", "d_2_T))*W[1] \\ + np.sqrt(np.dot(d_3_T, d_3_T))*W[2] \\ + np.sqrt(np.dot(d_4_T, d_4_T))*W[3] \\", "distance of each joint to its limit joint_positions = np.array([j.get_joint_position()", "the distance. The greater the distance the smaller # the", "WaypointError from rlbench.backend.observation import Observation from rlbench.backend.robot import Robot from", "_ee_action(self, action, relative_to=None): self._assert_unit_quaternion(action[3:]) try: joint_positions = self._robot.arm.solve_ik( action[:3], quaternion=action[3:],", "2: Summed squarred error between the current and the reference", "the reference consfiguration as well as its partial derivatives with", "0 else -_TORQUE_MAX_VEL) for t in action]) self._robot.arm.set_joint_forces(np.abs(action)) def _ee_action(self,", "else: raise TypeError(\"Unsupported type %s for weighting vector.\" % type(setup[\"W\"]))", "dq_2, dq_3, dq_4, dq_5, dq_6, dq_7])*W # calculate the loss", "of joint velocities, which move the robot's tip according to", "ArmActionMode.DELTA_EE_POSE_PLAN_WORLD_FRAME and self._action_mode.arm != ArmActionMode.ABS_EE_POSE_PLAN_WORLD_FRAME and self._action_mode.arm != ArmActionMode.EE_POSE_PLAN_EE_FRAME): raise", "\"front_camera\": ob.get_position(), \"wrist_camera\": ob.get_position()}) return positions def get_all_graspable_object_poses(self, relative_to_cameras=False): \"\"\"", "Scene from rlbench.backend.task import Task from rlbench.demo import Demo from", "RuntimeError( \"Can't ask for stored demo when no dataset root", "1.0): raise RuntimeError('Action contained non unit quaternion!') def _torque_action(self, action):", "Weighting vector. :return: 1: The partial derivatives of the loss", "min_j_distances = [np.minimum(u-j, j-l) for l,u,j in zip(lower_joint_limits, upper_joint_limits, joint_positions)]", "arm_action) self._scene.step() elif self._action_mode.arm == ArmActionMode.ABS_JOINT_POSITION: self._assert_action_space(arm_action, (len(self._robot.arm.joints),)) self._robot.arm.set_joint_target_positions(arm_action) self._scene.step()", "done = False observations = [] while not done: done", "position. :param W: Weighting vector. :return: 1: The partial derivatives", "self._scene.step() elif self._action_mode.arm == ArmActionMode.DELTA_JOINT_TORQUE: cur = np.array(self._robot.arm.get_joint_forces()) new_action =", "dA_1.dot(A_2).dot(p_2)) + \\ -np.matmul(d_3_T, dA_1.dot(A_2).dot(A_3).dot(p_3)) + \\ -np.matmul(d_4_T, dA_1.dot(A_2).dot(A_3).dot(A_4).dot(p_4)) +", "compute the error if setup[\"mode\"] == \"reference_position\": dL, L =", "get_all_graspable_object_positions(self, relative_to_cameras=False): \"\"\" returns the positions of all graspable object", "a stored demo when no dataset root provided.\") if not", "== ArmActionMode.DELTA_EE_POSE_WORLD_FRAME: self._assert_action_space(arm_action, (7,)) a_x, a_y, a_z, a_qx, a_qy, a_qz,", "joint limit is 15° away. # Scaling is done linearly", "[qx, qy, qz, qw] self._ee_action(list(new_pose)) elif self._action_mode.arm == ArmActionMode.EE_POSE_EE_FRAME: self._assert_action_space(arm_action,", "= 10 class InvalidActionError(Exception): pass class TaskEnvironmentError(Exception): pass class TaskEnvironment(object):", "for i in range(amount): attempts = max_attempts while attempts >", "self._task.get_graspable_objects(): self._robot.gripper.grasp(g_obj) else: # If gripper open action, the check", "_assert_unit_quaternion(self, quat): if not np.isclose(np.linalg.norm(quat), 1.0): raise RuntimeError('Action contained non", "workspace; if not, then quick reject # Only checks position,", "\\sum_{i=1}^N d(q)^{-1} :param W: Weighting vector. :return: 1: The partial", "the minimum distance of each joint to its limit joint_positions", "return self._robot.arm.get_visuals() def get_all_graspable_object_positions(self, relative_to_cameras=False): \"\"\" returns the positions of", "if not, then quick reject # Only checks position, not", "self._assert_action_space(arm_action, (7,)) self._path_observations = [] self._path_observations = self._path_action( list(arm_action), relative_to=self._robot.arm.get_tip())", "the joint velocities q_dot_redundancy = setup[\"alpha\"] * np.matmul((np.identity(len(self._robot.arm.joints)) - np.matmul(J_plus,", "- p_obs) # now we can calculate the derivatives in", "from rlbench.backend.observation import Observation from rlbench.backend.robot import Robot from rlbench.backend.scene", "configuration. -> L(q) \"\"\" if W is None: # default", "live_demos: if self._dataset_root is None or len(self._dataset_root) == 0: raise", "self._robot.arm.set_control_loop_enabled(ctr_loop) return demos def _get_live_demos(self, amount: int, callable_each_step: Callable[ [Observation],", "as np from pyquaternion import Quaternion from pyrep import PyRep", "elif self._action_mode.arm == ArmActionMode.DELTA_EE_POSE_PLAN_WORLD_FRAME: self._assert_action_space(arm_action, (7,)) a_x, a_y, a_z, a_qx,", "DELTA_EE_POSE_PLAN or ' 'ABS_EE_POSE_PLAN action mode.') self._enable_path_observations = value def", ", 1 / np.sum(np.square(d_4_T)) , 1 / np.sum(np.square(d_5_T)) , 1", "arm_action x, y, z, qx, qy, qz, qw = self._robot.arm.get_tip().get_pose()", "done = self._robot.gripper.actuate(ee_action, velocity=0.2) self._pyrep.step() self._task.step() # if needed save", "-> Observation: return self._scene.get_observation() def get_joint_upper_velocity_limits(self): return self._robot.arm.get_joint_upper_velocity_limits() def get_all_graspable_objects(self):", "but was shape: %s' % ( str(expected_shape), str(np.shape(action)))) def _assert_unit_quaternion(self,", "valid = self._scene.check_target_in_workspace(pos_to_check) if not valid: raise InvalidActionError('Target is outside", "all enabled cameras \"\"\" objects = self._task.get_graspable_objects() positions = []", "-> None: if (self._action_mode.arm != ArmActionMode.DELTA_EE_POSE_PLAN_WORLD_FRAME and self._action_mode.arm != ArmActionMode.ABS_EE_POSE_PLAN_WORLD_FRAME", "poses def _assert_action_space(self, action, expected_shape): if np.shape(action) != expected_shape: raise", "W) elif setup[\"mode\"] == \"collision_avoidance\": dL, L = self.get_loss_collision_avoidance(W, setup)", "attempts -= 1 logging.info('Bad demo. ' + str(e)) if attempts", "setup[\"cut-off_error\"] is not None: if self._last_e is not None: e_dot", "p_2, p_3, p_4, p_5, p_6, p_7 = self._robot.get_link_positions_in_ref_frames() # we", "image_paths, self._dataset_root, self._variation_number, self._task.get_name(), self._obs_config) else: ctr_loop = self._robot.arm.joints[0].is_control_loop_enabled() self._robot.arm.set_control_loop_enabled(True)", "'Could not collect demos. Maybe a problem with the task?')", "= self._robot.get_transformation_matrices() dA_1, dA_2, dA_3, dA_4, dA_5, dA_6, dA_7 =", "self._scene.step() cur_positions = self._robot.arm.get_joint_positions() reached = np.allclose(cur_positions, joint_positions, atol=0.01) not_moving", "self._action_mode.arm == ArmActionMode.ABS_EE_POSE_PLAN_WORLD_FRAME: self._assert_action_space(arm_action, (7,)) self._path_observations = [] self._path_observations =", "+ \\ -np.matmul(d_2_T, dA_1.dot(A_2).dot(p_2)) + \\ -np.matmul(d_3_T, dA_1.dot(A_2).dot(A_3).dot(p_3)) + \\", "1/2 sum_{i=1}^N w_i (q_i - \\tilde{q}_i)^2 :param ref_pos: Reference position.", "[] self._path_observations = self._path_action(list(arm_action)) elif self._action_mode.arm == ArmActionMode.DELTA_EE_POSE_PLAN_WORLD_FRAME: self._assert_action_space(arm_action, (7,))", "new_rot = Quaternion(a_qw, a_qx, a_qy, a_qz) * Quaternion(qw, qx, qy,", "for redundancy resoltuion. -> L(q) = 1/2 sum_{i=1}^N w_i (q_i", "qy, qz, qw = self._robot.arm.get_tip().get_pose() new_rot = Quaternion(a_qw, a_qx, a_qy,", "This should not ' 'happen, please raise an issues on", "in self._task.get_graspable_objects(): self._robot.gripper.grasp(g_obj) else: # If gripper open action, the", "self._robot.arm.get_path( action[:3], quaternion=action[3:], ignore_collisions=True, relative_to=relative_to) done = False observations =", "def step(self, action, camcorder=None) -> (Observation, int, bool): # returns", "np.sum(np.square(d_2_T)) , 1 / np.sum(np.square(d_3_T)) , 1 / np.sum(np.square(d_4_T)) ,", "(self._robot.arm.get_joint_positions() - ref_pos) return e * W, 0.5*np.dot(e,e*W) def get_loss_collision_avoidance(self,", "= self._scene.get_observation() camcorder.save(obs, self.get_robot_visuals(), self.get_all_graspable_objects()) if ee_action == 0.0 and", "The partial derivatives of the loss above. -> \\nable_q L(q)", "first observation return desc, self._scene.get_observation() def get_observation(self) -> Observation: return", "positions or until we stop moving # (e.g. when we", "quat): if not np.isclose(np.linalg.norm(quat), 1.0): raise RuntimeError('Action contained non unit", "value: bool) -> None: if (self._action_mode.arm != ArmActionMode.DELTA_EE_POSE_PLAN_WORLD_FRAME and self._action_mode.arm", "if 0.0 > ee_action > 1.0: raise ValueError('Gripper action expected", "self._assert_action_space(arm_action, (len(self._robot.arm.joints),)) cur = np.array(self._robot.arm.get_joint_positions()) self._robot.arm.set_joint_target_positions(cur + arm_action) self._scene.step() elif", "Move until reached target joint positions or until we stop", "_MAX_DEMO_ATTEMPTS) -> List[Demo]: demos = [] for i in range(amount):", "max_attempts=_MAX_RESET_ATTEMPTS, randomly_place=not self._static_positions) except (BoundaryError, WaypointError) as e: raise TaskEnvironmentError(", "RuntimeError('Unrecognised action mode.') if current_ee != ee_action: done = False", "L = self.get_loss_reference_position(setup[\"ref_position\"], W) elif setup[\"mode\"] == \"collision_avoidance\": dL, L", "if W is None: # default weighting W = np.array([1.0,", "max_attempts: int = _MAX_DEMO_ATTEMPTS) -> List[Demo]: demos = [] for", "self._last_e = None def get_name(self) -> str: return self._task.get_name() def", "zip(lower_joint_limits, upper_joint_limits, joint_positions)] # start scaling down error when joint", "%s' % ( str(expected_shape), str(np.shape(action)))) def _assert_unit_quaternion(self, quat): if not", "logging.info('Bad demo. ' + str(e)) if attempts <= 0: raise", "now we can calculate the derivatives in each dimension dq_1", "= pyrep self._robot = robot self._scene = scene self._task =", "root provided.\") demos = utils.get_stored_demos( amount, image_paths, self._dataset_root, self._variation_number, self._task.get_name(),", "ob.get_position(), \"front_camera\": ob.get_position(), \"wrist_camera\": ob.get_position()}) return positions def get_all_graspable_object_poses(self, relative_to_cameras=False):", "return observations except IKError as e: raise InvalidActionError('Could not find", "None # Returns a list of descriptions and the first", "g_obj in self._task.get_graspable_objects(): self._robot.gripper.grasp(g_obj) else: # If gripper open action,", "1.0, 1.0, 1.0, 1.0, 1.0, 1.0]) e = (self._robot.arm.get_joint_positions() -", "self._obs_config) else: ctr_loop = self._robot.arm.joints[0].is_control_loop_enabled() self._robot.arm.set_control_loop_enabled(True) demos = self._get_live_demos( amount,", "not find a path.') from e def step(self, action, camcorder=None)", "\"reference_position\": dL, L = self.get_loss_reference_position(setup[\"ref_position\"], W) elif setup[\"mode\"] == \"collision_avoidance\":", "= self._task.reward() reward = float(success) if task_reward is None else", "self._path_action(list(arm_action)) elif self._action_mode.arm == ArmActionMode.DELTA_EE_POSE_PLAN_WORLD_FRAME: self._assert_action_space(arm_action, (7,)) a_x, a_y, a_z,", "or ' 'ABS_EE_POSE_PLAN action mode.') self._enable_path_observations = value def get_path_observations(self):", "for redundancy resoltuion with collision avoidance. This only works with", "\\ -np.matmul(d_6_T, A_1.dot(dA_2).dot(A_3).dot(A_4).dot(A_5).dot(A_6).dot(p_6)) + \\ -np.matmul(d_7_T, A_1.dot(dA_2).dot(A_3).dot(A_4).dot(A_5).dot(A_6).dot(A_7).dot(p_7)) dq_3 = -np.matmul(d_3_T,", "succeeds while traversing path, then break early if success: break", "calling 'step' on a task.\") # action should contain 1", "when joint limit is 15° away. # Scaling is done", "1 / np.sum(np.square(d_2_T)) , 1 / np.sum(np.square(d_3_T)) , 1 /", "cur = np.array(self._robot.arm.get_joint_positions()) self._robot.arm.set_joint_target_positions(cur + arm_action) self._scene.step() elif self._action_mode.arm ==", "False, attach_grasped_objects: bool = True): self._pyrep = pyrep self._robot =", "q_dot_redundancy = setup[\"alpha\"] * np.matmul((np.identity(len(self._robot.arm.joints)) - np.matmul(J_plus, J)), dL) #", "1 for 0° <= d <= 15° rad_thres = 15*(np.pi/180)", "# the provided jacobian seems to be inaccurate resulting in", "terminate def resolve_redundancy_joint_velocities(self, actions, setup): \"\"\" Resolves redundant self-motion into", "15° away. # Scaling is done linearly from 0 to", "= self._robot.arm.get_tip().get_pose() new_rot = Quaternion(a_qw, a_qx, a_qy, a_qz) * Quaternion(", "elif ee_action < 0.5: ee_action = 0.0 if current_ee !=", "self._task.variation_count() def reset(self) -> (List[str], Observation): self._scene.reset() try: desc =", "except (BoundaryError, WaypointError) as e: raise TaskEnvironmentError( 'Could not place", "actions: Current actions without redundancy resolution. :param setup: Setup for", "= static_positions self._attach_grasped_objects = attach_grasped_objects self._reset_called = False self._prev_ee_velocity =", "joint velocities, which move the robot's tip according to the", "p_obs)^T * d/dq (p_i^0 (q_1,..., q_i) - p_obs) # where", "1.') # Discretize the gripper action current_ee = (1.0 if", "_TORQUE_MAX_VEL = 9999 _DT = 0.05 _MAX_RESET_ATTEMPTS = 40 _MAX_DEMO_ATTEMPTS", "def reset(self) -> (List[str], Observation): self._scene.reset() try: desc = self._scene.init_episode(", "positions of the links A_1, A_2, A_3, A_4, A_5, A_6,", "joint_positions)] # start scaling down error when joint limit is", "self._action_mode.arm == ArmActionMode.DELTA_EE_POSE_PLAN_WORLD_FRAME: self._assert_action_space(arm_action, (7,)) a_x, a_y, a_z, a_qx, a_qy,", "-np.matmul(d_5_T, A_1.dot(dA_2).dot(A_3).dot(A_4).dot(A_5).dot(p_5)) + \\ -np.matmul(d_6_T, A_1.dot(dA_2).dot(A_3).dot(A_4).dot(A_5).dot(A_6).dot(p_6)) + \\ -np.matmul(d_7_T, A_1.dot(dA_2).dot(A_3).dot(A_4).dot(A_5).dot(A_6).dot(A_7).dot(p_7))", "1 / np.sum(np.square(d_7_T)) ]) * 0.1 # --- scaling to", "and 1.') # Discretize the gripper action current_ee = (1.0", "configuration -> -> \\nabla_q L(q) 2: Summed squarred error between", "- np.matmul(J_plus, J)), dL) # the provided jacobian seems to", "== ArmActionMode.DELTA_JOINT_POSITION: self._assert_action_space(arm_action, (len(self._robot.arm.joints),)) cur = np.array(self._robot.arm.get_joint_positions()) self._robot.arm.set_joint_target_positions(cur + arm_action)", "= self._robot.get_link_positions_in_ref_frames() # we use reciprocal of the distance between", "A_1.dot(A_2).dot(dA_3).dot(A_4).dot(p_4)) + \\ -np.matmul(d_5_T, A_1.dot(A_2).dot(dA_3).dot(A_4).dot(A_5).dot(p_5)) + \\ -np.matmul(d_6_T, A_1.dot(A_2).dot(dA_3).dot(A_4).dot(A_5).dot(A_6).dot(p_6)) +", "objects = self._task.get_graspable_objects() poses = [] for ob in objects:", "default weighting vector -> based on the reciprocal of the", "if camcorder: obs = self._scene.get_observation() camcorder.save(obs, self.get_robot_visuals(), self.get_all_graspable_objects()) elif self._action_mode.arm", "np.transpose(A_1.dot(p_1) - p_obs) d_2_T = np.transpose(A_1.dot(A_2).dot(p_2) - p_obs) d_3_T =", "a_qz, a_qw = arm_action x, y, z, qx, qy, qz,", "np.array(setup[\"W\"]) elif setup[\"W\"] is None: # use default weighting later", "is None or len(self._dataset_root) == 0): raise RuntimeError( \"Can't ask", "p_obs = self._task.obstacle.get_position() + np.array([0, 0, 0.33]) - self._robot.arm.joints[0].get_position() #p_obs", "The partial derivatives of the summed squarred error between the", "-> \\nable_q L(q) 2: The loss shown above.-> L(q) \"\"\"", "Calculates the loss as well as the respective partial derivatives", "action expected to be within 0 and 1.') # Discretize", "Observation: return self._scene.get_observation() def get_joint_upper_velocity_limits(self): return self._robot.arm.get_joint_upper_velocity_limits() def get_all_graspable_objects(self): return", "move the robot's tip according to the provided actions yet", "dq_4 = -np.matmul(d_4_T, A_1.dot(A_2).dot(A_3).dot(dA_4).dot(p_4)) + \\ -np.matmul(d_5_T, A_1.dot(A_2).dot(A_3).dot(dA_4).dot(A_5).dot(p_5)) + \\", "np.transpose(A_1.dot(A_2).dot(A_3).dot(p_3) - p_obs) d_4_T = np.transpose(A_1.dot(A_2).dot(A_3).dot(A_4).dot(p_4) - p_obs) d_5_T =", "camcorder: obs = self._scene.get_observation() camcorder.save(obs, self.get_robot_visuals(), self.get_all_graspable_objects()) elif self._action_mode.arm ==", "and self._action_mode.arm != ArmActionMode.ABS_EE_POSE_PLAN_WORLD_FRAME and self._action_mode.arm != ArmActionMode.EE_POSE_PLAN_EE_FRAME): raise RuntimeError('Only", "that include one obstacles! L(q) = \\sum_{i=1}^N d(q)^{-1} :param W:", "1.0) for d in min_j_distances]) # concatenate the derivaties to", "'happen, please raise an issues on this task.' % self._task.get_name())", "as its partial derivatives with respect to al q's for", "p_obs = np.append(p_obs, [1]) # get the transformation matrices, their", "self._scene.step() elif self._action_mode.arm == ArmActionMode.ABS_JOINT_TORQUE: self._assert_action_space( arm_action, (len(self._robot.arm.joints),)) self._torque_action(arm_action) self._scene.step()", "get_loss_reference_position(self, ref_pos, W): \"\"\" Calculates the summed squarred error between", "d/dq L = (p_i^0 (q_1,..., q_i) - p_obs)^T * d/dq", "np.array(setup[\"upper_joint_pos_limit\"]) min_j_distances = [np.minimum(u-j, j-l) for l,u,j in zip(lower_joint_limits, upper_joint_limits,", "relative to all enabled cameras \"\"\" objects = self._task.get_graspable_objects() positions", "dq_5 = -np.matmul(d_5_T, A_1.dot(A_2).dot(A_3).dot(A_4).dot(dA_5).dot(p_5)) + \\ -np.matmul(d_6_T, A_1.dot(A_2).dot(A_3).dot(A_4).dot(dA_5).dot(A_6).dot(p_6)) + \\", "- q_dot_redundancy, L def get_loss_reference_position(self, ref_pos, W): \"\"\" Calculates the", "= np.transpose(A_1.dot(A_2).dot(A_3).dot(A_4).dot(A_5).dot(A_6).dot(p_6) - p_obs) d_7_T = np.transpose(A_1.dot(A_2).dot(A_3).dot(A_4).dot(A_5).dot(A_6).dot(A_7).dot(p_7) - p_obs) #", "demos = [] for i in range(amount): attempts = max_attempts", "z] + [qx, qy, qz, qw] self._path_observations = [] self._path_observations", "ee_action = 1.0 elif ee_action < 0.5: ee_action = 0.0", "p_5, p_6, p_7 = self._robot.get_link_positions_in_ref_frames() # we use reciprocal of", "(7,)) self._ee_action(list(arm_action)) elif self._action_mode.arm == ArmActionMode.ABS_EE_POSE_PLAN_WORLD_FRAME: self._assert_action_space(arm_action, (7,)) self._path_observations =", "ee_action = action[-1] if 0.0 > ee_action > 1.0: raise", "apply weightig dL = np.array([dq_1, dq_2, dq_3, dq_4, dq_5, dq_6,", "return self._path_observations def get_demos(self, amount: int, live_demos: bool = False,", "np.array([1 / np.sum(np.square(d_1_T)), 1 / np.sum(np.square(d_2_T)) , 1 / np.sum(np.square(d_3_T))", "traversing path, then break early if success: break observations.append(self._scene.get_observation()) return", "List[Demo]: demos = [] for i in range(amount): attempts =", "class InvalidActionError(Exception): pass class TaskEnvironmentError(Exception): pass class TaskEnvironment(object): def __init__(self,", "A_1.dot(A_2).dot(A_3).dot(A_4).dot(A_5).dot(dA_6).dot(p_6)) + \\ -np.matmul(d_7_T, A_1.dot(A_2).dot(A_3).dot(A_4).dot(A_5).dot(dA_6).dot(A_7).dot(p_7)) dq_7 = -np.matmul(d_7_T, A_1.dot(A_2).dot(A_3).dot(A_4).dot(A_5).dot(A_6).dot(dA_7).dot(p_7)) if", "\\ -np.matmul(d_7_T, A_1.dot(A_2).dot(A_3).dot(A_4).dot(dA_5).dot(A_6).dot(A_7).dot(p_7)) dq_6 = -np.matmul(d_6_T, A_1.dot(A_2).dot(A_3).dot(A_4).dot(A_5).dot(dA_6).dot(p_6)) + \\ -np.matmul(d_7_T,", "object relative to all enabled cameras \"\"\" objects = self._task.get_graspable_objects()", "try: desc = self._scene.init_episode( self._variation_number, max_attempts=_MAX_RESET_ATTEMPTS, randomly_place=not self._static_positions) except (BoundaryError,", "not_moving def _path_action(self, action, relative_to=None): self._assert_unit_quaternion(action[3:]) try: # Check if", "np.array(self._robot.arm.get_joint_positions()) self._robot.arm.set_joint_target_positions(cur + arm_action) self._scene.step() elif self._action_mode.arm == ArmActionMode.ABS_JOINT_TORQUE: self._assert_action_space(", "setup) # compute the joint velocities q_dot_redundancy = setup[\"alpha\"] *", "None: e_dot = np.sum(np.abs(e - self._last_e)) if self._last_e is not", "1: The partial derivatives of the loss above. -> \\nable_q", "position :param actions: Current actions without redundancy resolution. :param setup:", "Observation): self._scene.reset() try: desc = self._scene.init_episode( self._variation_number, max_attempts=_MAX_RESET_ATTEMPTS, randomly_place=not self._static_positions)", "-> L(q) = 1/2 sum_{i=1}^N w_i (q_i - \\tilde{q}_i)^2 :param", "dq_7])*W # calculate the loss L = np.sqrt(np.dot(d_1_T, d_1_T))*W[0] \\", "stored demo when no dataset root provided.\") demos = utils.get_stored_demos(", "# returns observation, reward, done, info if not self._reset_called: raise", "= J[-3:] # compute the pseudo inverse J_plus = np.linalg.pinv(J)", "of d/dq L is used often, let's calculate it in", "dL) # the provided jacobian seems to be inaccurate resulting", "should not ' 'happen, please raise an issues on this", "= [a_x + x, a_y + y, a_z + z]", "is None: # default weighting vector -> based on the", "as e: attempts -= 1 logging.info('Bad demo. ' + str(e))", "self._path_observations def get_demos(self, amount: int, live_demos: bool = False, image_paths:", "are only %d variations.' % ( v, self.variation_count())) self._variation_number =", "= np.random.randint( 0, self._task.variation_count()) return self._variation_number def set_variation(self, v: int)", "pyrep self._robot = robot self._scene = scene self._task = task", "means all demos\"\"\" if not live_demos and (self._dataset_root is None", "keep distance to joint limits --- # get the minimum", "W = np.array([1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]) e", "None or len(self._dataset_root) == 0: raise RuntimeError( \"Can't ask for", "v: int) -> None: if v >= self.variation_count(): raise TaskEnvironmentError(", "-> int: self._variation_number = np.random.randint( 0, self._task.variation_count()) return self._variation_number def", "(1.0 if self._robot.gripper.get_open_amount()[0] > 0.9 else 0.0) if ee_action >", "self._variation_number = 0 self._action_mode = action_mode self._dataset_root = dataset_root self._obs_config", "L = (p_i^0 (q_1,..., q_i) - p_obs)^T * d/dq (p_i^0", "but there are only %d variations.' % ( v, self.variation_count()))", "joint to its limit joint_positions = np.array([j.get_joint_position() for j in", "based on the reciprocal of the distance. The greater the", "A_2, A_3, A_4, A_5, A_6, A_7 = self._robot.get_transformation_matrices() dA_1, dA_2,", "= arm_action x, y, z, qx, qy, qz, qw =", "def get_loss_collision_avoidance(self, W, setup): \"\"\" Calculates the loss as well", "= (p_i^0 (q_1,..., q_i) - p_obs)^T * d/dq (p_i^0 (q_1,...,", "return self._variation_number def set_variation(self, v: int) -> None: if v", "new_rot = Quaternion(a_qw, a_qx, a_qy, a_qz) * Quaternion( qw, qx,", "dA_1.dot(A_2).dot(A_3).dot(p_3)) + \\ -np.matmul(d_4_T, dA_1.dot(A_2).dot(A_3).dot(A_4).dot(p_4)) + \\ -np.matmul(d_5_T, dA_1.dot(A_2).dot(A_3).dot(A_4).dot(A_5).dot(p_5)) +", "class TaskEnvironmentError(Exception): pass class TaskEnvironment(object): def __init__(self, pyrep: PyRep, robot:", "shape: %s' % ( str(expected_shape), str(np.shape(action)))) def _assert_unit_quaternion(self, quat): if", "reached target joint positions or until we stop moving #", "= np.array(self._robot.arm.get_joint_forces()) new_action = cur + arm_action self._torque_action(new_action) self._scene.step() elif", "self._path_action( list(arm_action), relative_to=self._robot.arm.get_tip()) else: raise RuntimeError('Unrecognised action mode.') if current_ee", "+ z] + [qx, qy, qz, qw] self._path_observations = []", "all demos\"\"\" if not live_demos and (self._dataset_root is None or", "to al q's for redundancy resoltuion. -> L(q) = 1/2", "= [np.minimum(u-j, j-l) for l,u,j in zip(lower_joint_limits, upper_joint_limits, joint_positions)] #", "np.transpose(A_1.dot(A_2).dot(A_3).dot(A_4).dot(A_5).dot(A_6).dot(p_6) - p_obs) d_7_T = np.transpose(A_1.dot(A_2).dot(A_3).dot(A_4).dot(A_5).dot(A_6).dot(A_7).dot(p_7) - p_obs) # now", "joint_positions, atol=0.01) not_moving = False if prev_values is not None:", "d_7_T = np.transpose(A_1.dot(A_2).dot(A_3).dot(A_4).dot(A_5).dot(A_6).dot(A_7).dot(p_7) - p_obs) # now we can calculate", "Demo from rlbench.observation_config import ObservationConfig _TORQUE_MAX_VEL = 9999 _DT =", "self._action_mode.arm == ArmActionMode.ABS_JOINT_VELOCITY: self._assert_action_space(arm_action, (len(self._robot.arm.joints),)) self._robot.arm.set_joint_target_velocities(arm_action) self._scene.step() # if needed", "+ \\ -np.matmul(d_7_T, A_1.dot(dA_2).dot(A_3).dot(A_4).dot(A_5).dot(A_6).dot(A_7).dot(p_7)) dq_3 = -np.matmul(d_3_T, A_1.dot(A_2).dot(dA_3).dot(p_3)) + \\", "# now we can calculate the derivatives in each dimension", "0: raise RuntimeError( 'Could not collect demos. Maybe a problem", "self._task.get_graspable_objects() poses = [] for ob in objects: if relative_to_cameras:", "ee_action > 1.0: raise ValueError('Gripper action expected to be within", "check for grasp. for g_obj in self._task.get_graspable_objects(): self._robot.gripper.grasp(g_obj) else: #", "This only works with tasks that include one obstacles! L(q)", "RuntimeError( \"Call 'reset' before calling 'step' on a task.\") #", "import ArmActionMode, ActionMode from rlbench.backend.exceptions import BoundaryError, WaypointError from rlbench.backend.observation", "observations except IKError as e: raise InvalidActionError('Could not find a", "!= expected_shape: raise RuntimeError( 'Expected the action shape to be:", "provided.\") demos = utils.get_stored_demos( amount, image_paths, self._dataset_root, self._variation_number, self._task.get_name(), self._obs_config)", "Maybe a problem with the task?') return demos def reset_to_demo(self,", "task_reward return self._scene.get_observation(), reward, terminate def resolve_redundancy_joint_velocities(self, actions, setup): \"\"\"", "import logging from typing import List, Callable import numpy as", "partial derivatives of the loss above. -> \\nable_q L(q) 2:", "def _ee_action(self, action, relative_to=None): self._assert_unit_quaternion(action[3:]) try: joint_positions = self._robot.arm.solve_ik( action[:3],", "include one obstacles! L(q) = \\sum_{i=1}^N d(q)^{-1} :param W: Weighting", "= task self._variation_number = 0 self._action_mode = action_mode self._dataset_root =", "of each joint to its limit joint_positions = np.array([j.get_joint_position() for", "The greater the distance the smaller # the weight. That", "from 0 to 1 for 0° <= d <= 15°", "def get_all_graspable_object_positions(self, relative_to_cameras=False): \"\"\" returns the positions of all graspable", "Scene, task: Task, action_mode: ActionMode, dataset_root: str, obs_config: ObservationConfig, static_positions:", "0° <= d <= 15° rad_thres = 15*(np.pi/180) W *=", "jacobian seems to be inaccurate resulting in slight movement of", "+ \\ -np.matmul(d_5_T, A_1.dot(A_2).dot(A_3).dot(dA_4).dot(A_5).dot(p_5)) + \\ -np.matmul(d_6_T, A_1.dot(A_2).dot(A_3).dot(dA_4).dot(A_5).dot(A_6).dot(p_6)) + \\", "1 logging.info('Bad demo. ' + str(e)) if attempts <= 0:", "squarred error between the current and the reference configuration. ->", "bool = False, callable_each_step: Callable[[Observation], None] = None, max_attempts: int", "'ABS_EE_POSE_PLAN action mode.') self._enable_path_observations = value def get_path_observations(self): if (self._action_mode.arm", "amount: int, live_demos: bool = False, image_paths: bool = False,", "joint_positions = np.array([j.get_joint_position() for j in self._robot.arm.joints]) lower_joint_limits = np.array(setup[\"lower_joint_pos_limit\"])", "- p_obs) d_3_T = np.transpose(A_1.dot(A_2).dot(A_3).dot(p_3) - p_obs) d_4_T = np.transpose(A_1.dot(A_2).dot(A_3).dot(A_4).dot(p_4)", "joint position towards a reference position. \"\"\" # get the", "-np.matmul(d_3_T, dA_1.dot(A_2).dot(A_3).dot(p_3)) + \\ -np.matmul(d_4_T, dA_1.dot(A_2).dot(A_3).dot(A_4).dot(p_4)) + \\ -np.matmul(d_5_T, dA_1.dot(A_2).dot(A_3).dot(A_4).dot(A_5).dot(p_5))", ":param actions: Current actions without redundancy resolution. :param setup: Setup", "value def get_path_observations(self): if (self._action_mode.arm != ArmActionMode.DELTA_EE_POSE_PLAN_WORLD_FRAME and self._action_mode.arm !=", "self._pyrep.start() self._target_workspace_check = Dummy.create() self._last_e = None def get_name(self) ->", "issues on this task.' % self._task.get_name()) from e self._reset_called =", "self._scene.get_observation() def get_observation(self) -> Observation: return self._scene.get_observation() def get_joint_upper_velocity_limits(self): return", "etc. :return: Array of joint velocities, which move the robot's", "the loss L = np.sqrt(np.dot(d_1_T, d_1_T))*W[0] \\ + np.sqrt(np.dot(d_2_T, d_2_T))*W[1]", "demos = utils.get_stored_demos( amount, image_paths, self._dataset_root, self._variation_number, self._task.get_name(), self._obs_config) else:", "and the reference configuration -> -> \\nabla_q L(q) 2: Summed", "ArmActionMode.ABS_EE_POSE_WORLD_FRAME: self._assert_action_space(arm_action, (7,)) self._ee_action(list(arm_action)) elif self._action_mode.arm == ArmActionMode.ABS_EE_POSE_PLAN_WORLD_FRAME: self._assert_action_space(arm_action, (7,))", "links A_1, A_2, A_3, A_4, A_5, A_6, A_7 = self._robot.get_transformation_matrices()", "-> List[Demo]: demos = [] for i in range(amount): attempts", "% i) try: demo = self._scene.get_demo( callable_each_step=callable_each_step) demo.random_seed = random_seed", "from e done = False prev_values = None # Move", "get the Jacobian J = self._robot.arm.get_jacobian() J = np.transpose(J) J", "qz) qw, qx, qy, qz = list(new_rot) new_pose = [a_x", "import Quaternion from pyrep import PyRep from pyrep.errors import IKError", "= -np.matmul(d_2_T, A_1.dot(dA_2).dot(p_2)) + \\ -np.matmul(d_3_T, A_1.dot(dA_2).dot(A_3).dot(p_3)) + \\ -np.matmul(d_4_T,", "of descriptions and the first observation return desc, self._scene.get_observation() def", "Returns a list of descriptions and the first observation return", "= random_seed demos.append(demo) break except Exception as e: attempts -=", "into the nullspace without changing the gripper tip position :param", "demo when no dataset root provided.\") if not live_demos: if", "calculate the loss L = np.sqrt(np.dot(d_1_T, d_1_T))*W[0] \\ + np.sqrt(np.dot(d_2_T,", "W): \"\"\" Calculates the summed squarred error between the current", "False self._prev_ee_velocity = None self._enable_path_observations = False self._scene.load(self._task) self._pyrep.start() self._target_workspace_check", "\\ -np.matmul(d_4_T, A_1.dot(dA_2).dot(A_3).dot(A_4).dot(p_4)) + \\ -np.matmul(d_5_T, A_1.dot(dA_2).dot(A_3).dot(A_4).dot(A_5).dot(p_5)) + \\ -np.matmul(d_6_T,", "\"\"\" # get the Jacobian J = self._robot.arm.get_jacobian() J =", "enabled cameras \"\"\" objects = self._task.get_graspable_objects() positions = [] for", "dataset_root self._obs_config = obs_config self._static_positions = static_positions self._attach_grasped_objects = attach_grasped_objects", "= 0.0 if current_ee != ee_action: arm_action = np.array([0.0]*7) if", "a reference position. \"\"\" # get the Jacobian J =", "%s for weighting vector.\" % type(setup[\"W\"])) # compute the error", "bool) -> None: if (self._action_mode.arm != ArmActionMode.DELTA_EE_POSE_PLAN_WORLD_FRAME and self._action_mode.arm !=", "obs = self._scene.get_observation() camcorder.save(obs, self.get_robot_visuals(), self.get_all_graspable_objects()) if ee_action == 0.0", "demos = self._get_live_demos( amount, callable_each_step, max_attempts) self._robot.arm.set_control_loop_enabled(ctr_loop) return demos def", "+ \\ -np.matmul(d_7_T, A_1.dot(A_2).dot(dA_3).dot(A_4).dot(A_5).dot(A_6).dot(A_7).dot(p_7)) dq_4 = -np.matmul(d_4_T, A_1.dot(A_2).dot(A_3).dot(dA_4).dot(p_4)) + \\", "TypeError(\"Unsupported type %s for weighting vector.\" % type(setup[\"W\"])) # compute", "p_obs) # where p_i^0 = (\\prod_{j=1}^i A_j^{j-1}(q_j)) * p_i #", "= True # redundancy resolution self._last_e = None # Returns", "the task succeeds while traversing path, then break early if", "% self._task.get_name()) from e self._reset_called = True # redundancy resolution", "self._prev_ee_velocity = None self._enable_path_observations = False self._scene.load(self._task) self._pyrep.start() self._target_workspace_check =", "derivatives for redundancy resoltuion with collision avoidance. This only works", "y, a_z + z] + [qx, qy, qz, qw] self._ee_action(list(new_pose))", "+ \\ -np.matmul(d_4_T, A_1.dot(dA_2).dot(A_3).dot(A_4).dot(p_4)) + \\ -np.matmul(d_5_T, A_1.dot(dA_2).dot(A_3).dot(A_4).dot(A_5).dot(p_5)) + \\", "static_positions: bool = False, attach_grasped_objects: bool = True): self._pyrep =", "away. # Scaling is done linearly from 0 to 1", "live_demos and (self._dataset_root is None or len(self._dataset_root) == 0): raise", "return poses def _assert_action_space(self, action, expected_shape): if np.shape(action) != expected_shape:", "why # the velocites are set to 0 once the", "/ np.sum(np.square(d_6_T)) , 1 / np.sum(np.square(d_7_T)) ]) * 0.1 #", "return self._task.get_graspable_objects() def get_robot_visuals(self): return self._robot.arm.get_visuals() def get_all_graspable_object_positions(self, relative_to_cameras=False): \"\"\"", "# where p_i^0 = (\\prod_{j=1}^i A_j^{j-1}(q_j)) * p_i # as", "our Loss # the chain rule delivers: d/dq L =", "when no dataset root provided.\") if not live_demos: if self._dataset_root", "+ np.array([0, 0, 0.33]) - self._robot.arm.joints[0].get_position() #p_obs = self._task.obstacle.get_position() p_obs", "= self._task.get_graspable_objects() poses = [] for ob in objects: if", "J = np.transpose(J) J = np.flip(J) J = J[-3:] #", "Task, action_mode: ActionMode, dataset_root: str, obs_config: ObservationConfig, static_positions: bool =", "self._robot.arm.set_control_loop_enabled(True) demos = self._get_live_demos( amount, callable_each_step, max_attempts) self._robot.arm.set_control_loop_enabled(ctr_loop) return demos", "reward, terminate def resolve_redundancy_joint_velocities(self, actions, setup): \"\"\" Resolves redundant self-motion", "if success: break observations.append(self._scene.get_observation()) return observations except IKError as e:", "= [] while not done: done = path.step() self._scene.step() if", "in self._robot.arm.joints]) lower_joint_limits = np.array(setup[\"lower_joint_pos_limit\"]) upper_joint_limits = np.array(setup[\"upper_joint_pos_limit\"]) min_j_distances =", "A_1.dot(A_2).dot(A_3).dot(A_4).dot(A_5).dot(A_6).dot(dA_7).dot(p_7)) if W is None: # default weighting vector ->", "to vector and apply weightig dL = np.array([dq_1, dq_2, dq_3,", "def get_robot_visuals(self): return self._robot.arm.get_visuals() def get_all_graspable_object_positions(self, relative_to_cameras=False): \"\"\" returns the", "to joint limits --- # get the minimum distance of", "self._robot.arm.get_joint_upper_velocity_limits() def get_all_graspable_objects(self): return self._task.get_graspable_objects() def get_robot_visuals(self): return self._robot.arm.get_visuals() def", ":return: Array of joint velocities, which move the robot's tip", "prev_values is not None: not_moving = np.allclose( cur_positions, prev_values, atol=0.001)", "current_ee != ee_action: arm_action = np.array([0.0]*7) if self._action_mode.arm == ArmActionMode.ABS_JOINT_VELOCITY:", "# use default weighting later W = None else: raise", "get_joint_upper_velocity_limits(self): return self._robot.arm.get_joint_upper_velocity_limits() def get_all_graspable_objects(self): return self._task.get_graspable_objects() def get_robot_visuals(self): return", "p_6, p_7 = self._robot.get_link_positions_in_ref_frames() # we use reciprocal of the", "is None: # default weighting W = np.array([1.0, 1.0, 1.0,", "L def enable_path_observations(self, value: bool) -> None: if (self._action_mode.arm !=", "IKError as e: raise InvalidActionError('Could not find a path.') from", "dA_2, dA_3, dA_4, dA_5, dA_6, dA_7 = self._robot.get_transformation_matrices_derivatives() p_1, p_2,", "self._robot.get_link_positions_in_ref_frames() # we use reciprocal of the distance between each", "+ y, a_z + z] + [qx, qy, qz, qw]", "gripper open close state arm_action = np.array(action[:-1]) ee_action = action[-1]", "close objects. W = np.array([1 / np.sum(np.square(d_1_T)), 1 / np.sum(np.square(d_2_T))", "list(arm_action), relative_to=self._robot.arm.get_tip()) else: raise RuntimeError('Unrecognised action mode.') if current_ee !=", "ask for stored demo when no dataset root provided.\") demos", "d_3_T))*W[2] \\ + np.sqrt(np.dot(d_4_T, d_4_T))*W[3] \\ + np.sqrt(np.dot(d_5_T, d_5_T))*W[4] \\", "e * W, 0.5*np.dot(e,e*W) def get_loss_collision_avoidance(self, W, setup): \"\"\" Calculates", "success, terminate = self._task.success() task_reward = self._task.reward() reward = float(success)", "outside of workspace.') path = self._robot.arm.get_path( action[:3], quaternion=action[3:], ignore_collisions=True, relative_to=relative_to)", "= None else: raise TypeError(\"Unsupported type %s for weighting vector.\"", "return self._scene.get_observation(), reward, terminate def resolve_redundancy_joint_velocities(self, actions, setup): \"\"\" Resolves", "use default weighting later W = None else: raise TypeError(\"Unsupported", "\"\"\" returns the positions of all graspable object relative to", "valid: raise InvalidActionError('Target is outside of workspace.') path = self._robot.arm.get_path(", "weightig dL = np.array([dq_1, dq_2, dq_3, dq_4, dq_5, dq_6, dq_7])*W", "pass class TaskEnvironment(object): def __init__(self, pyrep: PyRep, robot: Robot, scene:", "range(amount): attempts = max_attempts while attempts > 0: random_seed =", "bool = True): self._pyrep = pyrep self._robot = robot self._scene", "= max_attempts while attempts > 0: random_seed = np.random.get_state() self.reset()", "squarred error between the current and the reference configuration ->", "\\ -np.matmul(d_6_T, dA_1.dot(A_2).dot(A_3).dot(A_4).dot(A_5).dot(A_6).dot(p_6)) + \\ -np.matmul(d_7_T, dA_1.dot(A_2).dot(A_3).dot(A_4).dot(A_5).dot(A_6).dot(A_7).dot(p_7)) dq_2 = -np.matmul(d_2_T,", "if ee_action == 0.0 and self._attach_grasped_objects: # If gripper close", "= cur + arm_action self._torque_action(new_action) self._scene.step() elif self._action_mode.arm == ArmActionMode.ABS_EE_POSE_WORLD_FRAME:", "weighting etc. :return: Array of joint velocities, which move the", "self._robot = robot self._scene = scene self._task = task self._variation_number", "(self._action_mode.arm != ArmActionMode.DELTA_EE_POSE_PLAN_WORLD_FRAME and self._action_mode.arm != ArmActionMode.ABS_EE_POSE_PLAN_WORLD_FRAME and self._action_mode.arm !=", "place the task %s in the scene. This should not", "e: raise InvalidActionError('Could not find a path.') from e done", "= np.array([0.0]*7) if self._action_mode.arm == ArmActionMode.ABS_JOINT_VELOCITY: self._assert_action_space(arm_action, (len(self._robot.arm.joints),)) self._robot.arm.set_joint_target_velocities(arm_action) self._scene.step()", "qx, qy, qz = list(new_rot) new_pose = [a_x + x,", "new_pose = [a_x + x, a_y + y, a_z +", "actions without redundancy resolution. :param setup: Setup for redundancy resolution", "each joint to its limit joint_positions = np.array([j.get_joint_position() for j", "demos.append(demo) break except Exception as e: attempts -= 1 logging.info('Bad", "= np.transpose(J) J = np.flip(J) J = J[-3:] # compute", "Quaternion(qw, qx, qy, qz) qw, qx, qy, qz = list(new_rot)", "+ np.sqrt(np.dot(d_5_T, d_5_T))*W[4] \\ + np.sqrt(np.dot(d_6_T, d_6_T))*W[5] \\ + np.sqrt(np.dot(d_7_T,", "ActionMode from rlbench.backend.exceptions import BoundaryError, WaypointError from rlbench.backend.observation import Observation", "self._assert_action_space( arm_action, (len(self._robot.arm.joints),)) self._torque_action(arm_action) self._scene.step() elif self._action_mode.arm == ArmActionMode.DELTA_JOINT_TORQUE: cur", "current_ee != ee_action: done = False while not done: done", "\\ + np.sqrt(np.dot(d_7_T, d_7_T))*W[6] return dL, L def enable_path_observations(self, value:", "when no dataset root provided.\") demos = utils.get_stored_demos( amount, image_paths,", "velocities q_dot_redundancy = setup[\"alpha\"] * np.matmul((np.identity(len(self._robot.arm.joints)) - np.matmul(J_plus, J)), dL)", "the positions of all graspable object relative to all enabled", "setup: Setup for redundancy resolution defining the mode, weighting etc.", "def get_name(self) -> str: return self._task.get_name() def sample_variation(self) -> int:", "RuntimeError( 'Expected the action shape to be: %s, but was", "list(arm_action), relative_to=self._robot.arm.get_tip()) elif self._action_mode.arm == ArmActionMode.EE_POSE_PLAN_EE_FRAME: self._assert_action_space(arm_action, (7,)) self._path_observations =", "return dL, L def enable_path_observations(self, value: bool) -> None: if", "relative_to_cameras: poses.append(self._scene.get_object_pose_relative_to_cameras(ob)) else: poses.append({\"left_shoulder_camera\": ob.get_pose(), \"right_shoulder_camera\": ob.get_pose(), \"front_camera\": ob.get_pose(), \"wrist_camera\":", "raise InvalidActionError('Could not find a path.') from e def step(self,", "= None def get_name(self) -> str: return self._task.get_name() def sample_variation(self)", "\\ -np.matmul(d_7_T, A_1.dot(A_2).dot(A_3).dot(A_4).dot(A_5).dot(dA_6).dot(A_7).dot(p_7)) dq_7 = -np.matmul(d_7_T, A_1.dot(A_2).dot(A_3).dot(A_4).dot(A_5).dot(A_6).dot(dA_7).dot(p_7)) if W is", "elif self._action_mode.arm == ArmActionMode.ABS_EE_POSE_PLAN_WORLD_FRAME: self._assert_action_space(arm_action, (7,)) self._path_observations = [] self._path_observations", "relative_to_cameras=False): \"\"\" returns the pose of all graspable object relative", "\\ -np.matmul(d_7_T, A_1.dot(A_2).dot(dA_3).dot(A_4).dot(A_5).dot(A_6).dot(A_7).dot(p_7)) dq_4 = -np.matmul(d_4_T, A_1.dot(A_2).dot(A_3).dot(dA_4).dot(p_4)) + \\ -np.matmul(d_5_T,", "not_moving = np.allclose( cur_positions, prev_values, atol=0.001) prev_values = cur_positions done", "0 self._action_mode = action_mode self._dataset_root = dataset_root self._obs_config = obs_config", "W = None else: raise TypeError(\"Unsupported type %s for weighting", "W is None: # default weighting W = np.array([1.0, 1.0,", "loss above. -> \\nable_q L(q) 2: The loss shown above.->", "tasks that include one obstacles! L(q) = \\sum_{i=1}^N d(q)^{-1} :param", "0 to 1 for 0° <= d <= 15° rad_thres", "callable_each_step, max_attempts) self._robot.arm.set_control_loop_enabled(ctr_loop) return demos def _get_live_demos(self, amount: int, callable_each_step:", "A_6, A_7 = self._robot.get_transformation_matrices() dA_1, dA_2, dA_3, dA_4, dA_5, dA_6,", "p_3, p_4, p_5, p_6, p_7 = self._robot.get_link_positions_in_ref_frames() # we use", "if v >= self.variation_count(): raise TaskEnvironmentError( 'Requested variation %d, but", "= dL if setup[\"cut-off_error\"] is not None: if self._last_e is", "= cur_positions done = reached or not_moving def _path_action(self, action,", "task succeeds while traversing path, then break early if success:", "pyrep: PyRep, robot: Robot, scene: Scene, task: Task, action_mode: ActionMode,", "obstacles! L(q) = \\sum_{i=1}^N d(q)^{-1} :param W: Weighting vector. :return:", "in DELTA_EE_POSE_PLAN or ' 'ABS_EE_POSE_PLAN action mode.') self._enable_path_observations = value", "summed squarred error between the current and the reference configuration", "= dataset_root self._obs_config = obs_config self._static_positions = static_positions self._attach_grasped_objects =", "'step' on a task.\") # action should contain 1 extra", "joint velocities q_dot_redundancy = setup[\"alpha\"] * np.matmul((np.identity(len(self._robot.arm.joints)) - np.matmul(J_plus, J)),", "for ob in objects: if relative_to_camera: positions.append(self._scene.get_object_position_relative_to_cameras(ob)) else: positions.append({\"left_shoulder_camera\": ob.get_position(),", "dA_7 = self._robot.get_transformation_matrices_derivatives() p_1, p_2, p_3, p_4, p_5, p_6, p_7", "according to the provided actions yet push the joint position", "-np.matmul(d_3_T, A_1.dot(dA_2).dot(A_3).dot(p_3)) + \\ -np.matmul(d_4_T, A_1.dot(dA_2).dot(A_3).dot(A_4).dot(p_4)) + \\ -np.matmul(d_5_T, A_1.dot(dA_2).dot(A_3).dot(A_4).dot(A_5).dot(p_5))", "dA_1.dot(A_2).dot(A_3).dot(A_4).dot(p_4)) + \\ -np.matmul(d_5_T, dA_1.dot(A_2).dot(A_3).dot(A_4).dot(A_5).dot(p_5)) + \\ -np.matmul(d_6_T, dA_1.dot(A_2).dot(A_3).dot(A_4).dot(A_5).dot(A_6).dot(p_6)) +", "= attach_grasped_objects self._reset_called = False self._prev_ee_velocity = None self._enable_path_observations =", "y, a_z + z] + [qx, qy, qz, qw] self._path_observations", "# --- scaling to keep distance to joint limits ---", "reference position. \"\"\" # get the Jacobian J = self._robot.arm.get_jacobian()", "is outside of workspace.') path = self._robot.arm.get_path( action[:3], quaternion=action[3:], ignore_collisions=True,", "\"right_shoulder_camera\": ob.get_pose(), \"front_camera\": ob.get_pose(), \"wrist_camera\": ob.get_pose()}) return poses def _assert_action_space(self,", "derivaties to vector and apply weightig dL = np.array([dq_1, dq_2,", "is in the workspace; if not, then quick reject #", "which move the robot's tip according to the provided actions", "np.array(setup[\"lower_joint_pos_limit\"]) upper_joint_limits = np.array(setup[\"upper_joint_pos_limit\"]) min_j_distances = [np.minimum(u-j, j-l) for l,u,j", "only works with tasks that include one obstacles! L(q) =", "from pyrep.errors import IKError from pyrep.objects import Dummy, Object from", "arm_action, (len(self._robot.arm.joints),)) self._torque_action(arm_action) self._scene.step() elif self._action_mode.arm == ArmActionMode.DELTA_JOINT_TORQUE: cur =", "d/dq L is used often, let's calculate it in advance", "(BoundaryError, WaypointError) as e: raise TaskEnvironmentError( 'Could not place the", "RuntimeError('Action contained non unit quaternion!') def _torque_action(self, action): self._robot.arm.set_joint_target_velocities( [(_TORQUE_MAX_VEL", "not, then quick reject # Only checks position, not rotation", "elif self._action_mode.arm == ArmActionMode.ABS_JOINT_TORQUE: self._assert_action_space( arm_action, (len(self._robot.arm.joints),)) self._torque_action(arm_action) self._scene.step() elif", "self._reset_called = False self._prev_ee_velocity = None self._enable_path_observations = False self._scene.load(self._task)", "the check for ungrasp. self._robot.gripper.release() success, terminate = self._task.success() task_reward", "as our Loss # the chain rule delivers: d/dq L", "arm_action = np.array(action[:-1]) ee_action = action[-1] if 0.0 > ee_action", "joint_positions = self._robot.arm.solve_ik( action[:3], quaternion=action[3:], relative_to=relative_to) self._robot.arm.set_joint_target_positions(joint_positions) except IKError as", "rlbench.backend.observation import Observation from rlbench.backend.robot import Robot from rlbench.backend.scene import", "qw, qx, qy, qz) qw, qx, qy, qz = list(new_rot)", "== ArmActionMode.EE_POSE_EE_FRAME: self._assert_action_space(arm_action, (7,)) self._ee_action( list(arm_action), relative_to=self._robot.arm.get_tip()) elif self._action_mode.arm ==", "= True): self._pyrep = pyrep self._robot = robot self._scene =", "np.sum(np.square(d_7_T)) ]) * 0.1 # --- scaling to keep distance", "elif setup[\"mode\"] == \"collision_avoidance\": dL, L = self.get_loss_collision_avoidance(W, setup) #", "later W = None else: raise TypeError(\"Unsupported type %s for", "= None, max_attempts: int = _MAX_DEMO_ATTEMPTS, ) -> List[Demo]: \"\"\"Negative", "the target is in the workspace; if not, then quick", "movement of the ee. This is why # the velocites", "' 'happen, please raise an issues on this task.' %", "+ arm_action self._torque_action(new_action) self._scene.step() elif self._action_mode.arm == ArmActionMode.ABS_EE_POSE_WORLD_FRAME: self._assert_action_space(arm_action, (7,))", "d_6_T))*W[5] \\ + np.sqrt(np.dot(d_7_T, d_7_T))*W[6] return dL, L def enable_path_observations(self,", "J[-3:] # compute the pseudo inverse J_plus = np.linalg.pinv(J) #", "+ z] + [qx, qy, qz, qw] self._ee_action(list(new_pose)) elif self._action_mode.arm", "d_7_T))*W[6] return dL, L def enable_path_observations(self, value: bool) -> None:", "sum_{i=1}^N w_i (q_i - \\tilde{q}_i)^2 :param ref_pos: Reference position. :param", "no dataset root provided.\") if not live_demos: if self._dataset_root is", "callable_each_step: Callable[ [Observation], None] = None, max_attempts: int = _MAX_DEMO_ATTEMPTS)", "in action]) self._robot.arm.set_joint_forces(np.abs(action)) def _ee_action(self, action, relative_to=None): self._assert_unit_quaternion(action[3:]) try: joint_positions", ", 1 / np.sum(np.square(d_7_T)) ]) * 0.1 # --- scaling", "cur = np.array(self._robot.arm.get_joint_forces()) new_action = cur + arm_action self._torque_action(new_action) self._scene.step()", "qy, qz) qw, qx, qy, qz = list(new_rot) new_pose =", "shape to be: %s, but was shape: %s' % (", "camcorder.save(obs, self.get_robot_visuals(), self.get_all_graspable_objects()) if ee_action == 0.0 and self._attach_grasped_objects: #", "inaccurate resulting in slight movement of the ee. This is", "ArmActionMode.ABS_JOINT_VELOCITY: self._assert_action_space(arm_action, (len(self._robot.arm.joints),)) self._robot.arm.set_joint_target_velocities(arm_action) self._scene.step() # if needed save some", "= [] self._path_observations = self._path_action(list(new_pose)) elif self._action_mode.arm == ArmActionMode.DELTA_EE_POSE_WORLD_FRAME: self._assert_action_space(arm_action,", "= action[-1] if 0.0 > ee_action > 1.0: raise ValueError('Gripper", "the joint position towards a reference position. \"\"\" # get", "obs_config self._static_positions = static_positions self._attach_grasped_objects = attach_grasped_objects self._reset_called = False", "def get_observation(self) -> Observation: return self._scene.get_observation() def get_joint_upper_velocity_limits(self): return self._robot.arm.get_joint_upper_velocity_limits()", "Current actions without redundancy resolution. :param setup: Setup for redundancy", "self._action_mode.arm == ArmActionMode.ABS_JOINT_TORQUE: self._assert_action_space( arm_action, (len(self._robot.arm.joints),)) self._torque_action(arm_action) self._scene.step() elif self._action_mode.arm", "-np.matmul(d_6_T, A_1.dot(dA_2).dot(A_3).dot(A_4).dot(A_5).dot(A_6).dot(p_6)) + \\ -np.matmul(d_7_T, A_1.dot(dA_2).dot(A_3).dot(A_4).dot(A_5).dot(A_6).dot(A_7).dot(p_7)) dq_3 = -np.matmul(d_3_T, A_1.dot(A_2).dot(dA_3).dot(p_3))", "int, callable_each_step: Callable[ [Observation], None] = None, max_attempts: int =", "-np.matmul(d_5_T, A_1.dot(A_2).dot(dA_3).dot(A_4).dot(A_5).dot(p_5)) + \\ -np.matmul(d_6_T, A_1.dot(A_2).dot(dA_3).dot(A_4).dot(A_5).dot(A_6).dot(p_6)) + \\ -np.matmul(d_7_T, A_1.dot(A_2).dot(dA_3).dot(A_4).dot(A_5).dot(A_6).dot(A_7).dot(p_7))", "cur + arm_action self._torque_action(new_action) self._scene.step() elif self._action_mode.arm == ArmActionMode.ABS_EE_POSE_WORLD_FRAME: self._assert_action_space(arm_action,", "A_1.dot(A_2).dot(A_3).dot(dA_4).dot(A_5).dot(p_5)) + \\ -np.matmul(d_6_T, A_1.dot(A_2).dot(A_3).dot(dA_4).dot(A_5).dot(A_6).dot(p_6)) + \\ -np.matmul(d_7_T, A_1.dot(A_2).dot(A_3).dot(dA_4).dot(A_5).dot(A_6).dot(A_7).dot(p_7)) dq_5", "not find a path.') from e done = False prev_values", "p_i^0 = (\\prod_{j=1}^i A_j^{j-1}(q_j)) * p_i # as the left", "for ungrasp. self._robot.gripper.release() success, terminate = self._task.success() task_reward = self._task.reward()", "the derivatives in each dimension dq_1 = -np.matmul(d_1_T, dA_1.dot(p_1)) +", "= False self._prev_ee_velocity = None self._enable_path_observations = False self._scene.load(self._task) self._pyrep.start()", "dA_1.dot(p_1)) + \\ -np.matmul(d_2_T, dA_1.dot(A_2).dot(p_2)) + \\ -np.matmul(d_3_T, dA_1.dot(A_2).dot(A_3).dot(p_3)) +", "W = np.array([1 / np.sum(np.square(d_1_T)), 1 / np.sum(np.square(d_2_T)) , 1", "dq_3 = -np.matmul(d_3_T, A_1.dot(A_2).dot(dA_3).dot(p_3)) + \\ -np.matmul(d_4_T, A_1.dot(A_2).dot(dA_3).dot(A_4).dot(p_4)) + \\", "d_1_T))*W[0] \\ + np.sqrt(np.dot(d_2_T, d_2_T))*W[1] \\ + np.sqrt(np.dot(d_3_T, d_3_T))*W[2] \\", "# get the Jacobian J = self._robot.arm.get_jacobian() J = np.transpose(J)", "import utils from rlbench.action_modes import ArmActionMode, ActionMode from rlbench.backend.exceptions import", "action current_ee = (1.0 if self._robot.gripper.get_open_amount()[0] > 0.9 else 0.0)", "d_5_T))*W[4] \\ + np.sqrt(np.dot(d_6_T, d_6_T))*W[5] \\ + np.sqrt(np.dot(d_7_T, d_7_T))*W[6] return", "np from pyquaternion import Quaternion from pyrep import PyRep from", "not live_demos: if self._dataset_root is None or len(self._dataset_root) == 0:", "info if not self._reset_called: raise RuntimeError( \"Call 'reset' before calling", "rad_thres = 15*(np.pi/180) W *= np.array([ np.minimum((1/rad_thres)*d, 1.0) for d", "for weighting vector.\" % type(setup[\"W\"])) # compute the error if", "position. \"\"\" # get the Jacobian J = self._robot.arm.get_jacobian() J", "side of d/dq L is used often, let's calculate it", "e return actions - q_dot_redundancy, L def get_loss_reference_position(self, ref_pos, W):", "A_1, A_2, A_3, A_4, A_5, A_6, A_7 = self._robot.get_transformation_matrices() dA_1,", "v def variation_count(self) -> int: return self._task.variation_count() def reset(self) ->", "collision avoidance. This only works with tasks that include one", "np.array([1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]) e = (self._robot.arm.get_joint_positions()", "or len(self._dataset_root) == 0: raise RuntimeError( \"Can't ask for stored", "= np.random.get_state() self.reset() logging.info('Collecting demo %d' % i) try: demo", "self._action_mode.arm == ArmActionMode.EE_POSE_EE_FRAME: self._assert_action_space(arm_action, (7,)) self._ee_action( list(arm_action), relative_to=self._robot.arm.get_tip()) elif self._action_mode.arm", "derivatives with respect to al q's for redundancy resoltuion. ->", "0.05 _MAX_RESET_ATTEMPTS = 40 _MAX_DEMO_ATTEMPTS = 10 class InvalidActionError(Exception): pass", "x, a_y + y, a_z + z] + [qx, qy,", "mode.') self._enable_path_observations = value def get_path_observations(self): if (self._action_mode.arm != ArmActionMode.DELTA_EE_POSE_PLAN_WORLD_FRAME", "= None # Returns a list of descriptions and the", "self._get_live_demos( amount, callable_each_step, max_attempts) self._robot.arm.set_control_loop_enabled(ctr_loop) return demos def _get_live_demos(self, amount:", "on a task.\") # action should contain 1 extra value", "__init__(self, pyrep: PyRep, robot: Robot, scene: Scene, task: Task, action_mode:", "w_i (q_i - \\tilde{q}_i)^2 :param ref_pos: Reference position. :param W:", "= np.append(p_obs, [1]) # get the transformation matrices, their derivatives,", "unit quaternion!') def _torque_action(self, action): self._robot.arm.set_joint_target_velocities( [(_TORQUE_MAX_VEL if t <", "ee_action: arm_action = np.array([0.0]*7) if self._action_mode.arm == ArmActionMode.ABS_JOINT_VELOCITY: self._assert_action_space(arm_action, (len(self._robot.arm.joints),))", "-np.matmul(d_5_T, dA_1.dot(A_2).dot(A_3).dot(A_4).dot(A_5).dot(p_5)) + \\ -np.matmul(d_6_T, dA_1.dot(A_2).dot(A_3).dot(A_4).dot(A_5).dot(A_6).dot(p_6)) + \\ -np.matmul(d_7_T, dA_1.dot(A_2).dot(A_3).dot(A_4).dot(A_5).dot(A_6).dot(A_7).dot(p_7))", "min_j_distances]) # concatenate the derivaties to vector and apply weightig", "(len(self._robot.arm.joints),)) self._robot.arm.set_joint_target_positions(arm_action) self._scene.step() elif self._action_mode.arm == ArmActionMode.DELTA_JOINT_POSITION: self._assert_action_space(arm_action, (len(self._robot.arm.joints),)) cur", "0.0 and self._attach_grasped_objects: # If gripper close action, the check", "= np.array(setup[\"W\"]) elif setup[\"W\"] is None: # use default weighting", "dimension dq_1 = -np.matmul(d_1_T, dA_1.dot(p_1)) + \\ -np.matmul(d_2_T, dA_1.dot(A_2).dot(p_2)) +", "rlbench.backend.robot import Robot from rlbench.backend.scene import Scene from rlbench.backend.task import", "if camcorder: obs = self._scene.get_observation() camcorder.save(obs, self.get_robot_visuals(), self.get_all_graspable_objects()) if ee_action", "before calling 'step' on a task.\") # action should contain", "# the velocites are set to 0 once the error", "def set_variation(self, v: int) -> None: if v >= self.variation_count():", "def resolve_redundancy_joint_velocities(self, actions, setup): \"\"\" Resolves redundant self-motion into the", "0.33]) - self._robot.arm.joints[0].get_position() #p_obs = self._task.obstacle.get_position() p_obs = np.append(p_obs, [1])", "find a path.') from e done = False prev_values =", "= value def get_path_observations(self): if (self._action_mode.arm != ArmActionMode.DELTA_EE_POSE_PLAN_WORLD_FRAME and self._action_mode.arm", "if attempts <= 0: raise RuntimeError( 'Could not collect demos.", "set_variation(self, v: int) -> None: if v >= self.variation_count(): raise", "from rlbench.backend.task import Task from rlbench.demo import Demo from rlbench.observation_config", "resulting in slight movement of the ee. This is why", "= np.array([1 / np.sum(np.square(d_1_T)), 1 / np.sum(np.square(d_2_T)) , 1 /", "raise RuntimeError( \"Can't ask for a stored demo when no", "tip position :param actions: Current actions without redundancy resolution. :param", "A_1.dot(dA_2).dot(A_3).dot(A_4).dot(p_4)) + \\ -np.matmul(d_5_T, A_1.dot(dA_2).dot(A_3).dot(A_4).dot(A_5).dot(p_5)) + \\ -np.matmul(d_6_T, A_1.dot(dA_2).dot(A_3).dot(A_4).dot(A_5).dot(A_6).dot(p_6)) +", "only %d variations.' % ( v, self.variation_count())) self._variation_number = v", "root provided.\") if not live_demos: if self._dataset_root is None or", "Quaternion(a_qw, a_qx, a_qy, a_qz) * Quaternion( qw, qx, qy, qz)", "!= ArmActionMode.ABS_EE_POSE_PLAN_WORLD_FRAME and self._action_mode.arm != ArmActionMode.EE_POSE_PLAN_EE_FRAME): raise RuntimeError('Only available in", "]) * 0.1 # --- scaling to keep distance to", "self.get_robot_visuals(), self.get_all_graspable_objects()) if ee_action == 0.0 and self._attach_grasped_objects: # If", "pos_to_check = self._target_workspace_check.get_position() valid = self._scene.check_target_in_workspace(pos_to_check) if not valid: raise", "to all enabled cameras \"\"\" objects = self._task.get_graspable_objects() positions =", "get_all_graspable_object_poses(self, relative_to_cameras=False): \"\"\" returns the pose of all graspable object", "def get_demos(self, amount: int, live_demos: bool = False, image_paths: bool", "\\ -np.matmul(d_7_T, dA_1.dot(A_2).dot(A_3).dot(A_4).dot(A_5).dot(A_6).dot(A_7).dot(p_7)) dq_2 = -np.matmul(d_2_T, A_1.dot(dA_2).dot(p_2)) + \\ -np.matmul(d_3_T,", "wth something) while not done: self._scene.step() cur_positions = self._robot.arm.get_joint_positions() reached", "if self._robot.gripper.get_open_amount()[0] > 0.9 else 0.0) if ee_action > 0.5:", "q_i) - p_obs)^T * d/dq (p_i^0 (q_1,..., q_i) - p_obs)", "self._scene.get_demo( callable_each_step=callable_each_step) demo.random_seed = random_seed demos.append(demo) break except Exception as", "+ \\ -np.matmul(d_7_T, dA_1.dot(A_2).dot(A_3).dot(A_4).dot(A_5).dot(A_6).dot(A_7).dot(p_7)) dq_2 = -np.matmul(d_2_T, A_1.dot(dA_2).dot(p_2)) + \\", "int: self._variation_number = np.random.randint( 0, self._task.variation_count()) return self._variation_number def set_variation(self,", "\"front_camera\": ob.get_pose(), \"wrist_camera\": ob.get_pose()}) return poses def _assert_action_space(self, action, expected_shape):", "step(self, action, camcorder=None) -> (Observation, int, bool): # returns observation,", "\\ -np.matmul(d_7_T, A_1.dot(dA_2).dot(A_3).dot(A_4).dot(A_5).dot(A_6).dot(A_7).dot(p_7)) dq_3 = -np.matmul(d_3_T, A_1.dot(A_2).dot(dA_3).dot(p_3)) + \\ -np.matmul(d_4_T,", "derivatives of the summed squarred error between the current and", "action mode.') if current_ee != ee_action: done = False while", "d/dq (p_i^0 (q_1,..., q_i) - p_obs) # where p_i^0 =", "= np.array([j.get_joint_position() for j in self._robot.arm.joints]) lower_joint_limits = np.array(setup[\"lower_joint_pos_limit\"]) upper_joint_limits", "upper_joint_limits, joint_positions)] # start scaling down error when joint limit", "the task?') return demos def reset_to_demo(self, demo: Demo) -> (List[str],", "= self.get_loss_reference_position(setup[\"ref_position\"], W) elif setup[\"mode\"] == \"collision_avoidance\": dL, L =", "cur = np.array(self._robot.arm.get_joint_velocities()) self._robot.arm.set_joint_target_velocities(cur + arm_action) self._scene.step() elif self._action_mode.arm ==", "Quaternion(a_qw, a_qx, a_qy, a_qz) * Quaternion(qw, qx, qy, qz) qw,", "y, z, qx, qy, qz, qw = self._robot.arm.get_tip().get_pose() new_rot =", "Callable[[Observation], None] = None, max_attempts: int = _MAX_DEMO_ATTEMPTS, ) ->", "PyRep, robot: Robot, scene: Scene, task: Task, action_mode: ActionMode, dataset_root:", "if not valid: raise InvalidActionError('Target is outside of workspace.') path", "d in min_j_distances]) # concatenate the derivaties to vector and", "= False, attach_grasped_objects: bool = True): self._pyrep = pyrep self._robot", "\"\"\" objects = self._task.get_graspable_objects() positions = [] for ob in", "rotation pos_to_check = action[:3] if relative_to is not None: self._target_workspace_check.set_position(", "self._robot.arm.set_joint_target_positions(cur + arm_action) self._scene.step() elif self._action_mode.arm == ArmActionMode.ABS_JOINT_TORQUE: self._assert_action_space( arm_action,", "raise InvalidActionError('Could not find a path.') from e done =", "return demos def reset_to_demo(self, demo: Demo) -> (List[str], Observation): demo.restore_state()", "self._assert_action_space(arm_action, (len(self._robot.arm.joints),)) self._robot.arm.set_joint_target_positions(arm_action) self._scene.step() elif self._action_mode.arm == ArmActionMode.DELTA_JOINT_POSITION: self._assert_action_space(arm_action, (len(self._robot.arm.joints),))", "except Exception as e: attempts -= 1 logging.info('Bad demo. '", "current and the reference configuration. -> L(q) \"\"\" if W", "W is None: # default weighting vector -> based on", "new_action = cur + arm_action self._torque_action(new_action) self._scene.step() elif self._action_mode.arm ==", "(q_1,..., q_i) - p_obs) # where p_i^0 = (\\prod_{j=1}^i A_j^{j-1}(q_j))", "+ \\ -np.matmul(d_7_T, A_1.dot(A_2).dot(A_3).dot(dA_4).dot(A_5).dot(A_6).dot(A_7).dot(p_7)) dq_5 = -np.matmul(d_5_T, A_1.dot(A_2).dot(A_3).dot(A_4).dot(dA_5).dot(p_5)) + \\", "= float(success) if task_reward is None else task_reward return self._scene.get_observation(),", "ignore_collisions=True, relative_to=relative_to) done = False observations = [] while not", "q_dot_redundancy = np.array([0.0] * 7) self._last_e = e else: self._last_e", "available in DELTA_EE_POSE_PLAN or ' 'ABS_EE_POSE_PLAN action mode.') self._enable_path_observations =", "ArmActionMode, ActionMode from rlbench.backend.exceptions import BoundaryError, WaypointError from rlbench.backend.observation import", "pose of all graspable object relative to all enabled cameras", "np.transpose(A_1.dot(A_2).dot(A_3).dot(A_4).dot(A_5).dot(A_6).dot(A_7).dot(p_7) - p_obs) # now we can calculate the derivatives", "cur_positions = self._robot.arm.get_joint_positions() reached = np.allclose(cur_positions, joint_positions, atol=0.01) not_moving =", "self._assert_action_space(arm_action, (7,)) self._ee_action(list(arm_action)) elif self._action_mode.arm == ArmActionMode.ABS_EE_POSE_PLAN_WORLD_FRAME: self._assert_action_space(arm_action, (7,)) self._path_observations", "left side of d/dq L is used often, let's calculate", "# the weight. That is, it is concentrated on close", "a list of descriptions and the first observation return desc,", "done: done = self._robot.gripper.actuate(ee_action, velocity=0.2) self._pyrep.step() self._task.step() # if needed", "all enabled cameras \"\"\" objects = self._task.get_graspable_objects() poses = []", "= (1.0 if self._robot.gripper.get_open_amount()[0] > 0.9 else 0.0) if ee_action", "self._enable_path_observations = value def get_path_observations(self): if (self._action_mode.arm != ArmActionMode.DELTA_EE_POSE_PLAN_WORLD_FRAME and", "the chain rule delivers: d/dq L = (p_i^0 (q_1,..., q_i)", "partial derivatives of the summed squarred error between the current", "+ \\ -np.matmul(d_6_T, A_1.dot(dA_2).dot(A_3).dot(A_4).dot(A_5).dot(A_6).dot(p_6)) + \\ -np.matmul(d_7_T, A_1.dot(dA_2).dot(A_3).dot(A_4).dot(A_5).dot(A_6).dot(A_7).dot(p_7)) dq_3 =", "if task_reward is None else task_reward return self._scene.get_observation(), reward, terminate", "None: if self._last_e is not None: e_dot = np.sum(np.abs(e -", "the position of the object p_obs = self._task.obstacle.get_position() + np.array([0,", "is used often, let's calculate it in advance d_1_T =", "qw = self._robot.arm.get_tip().get_pose() new_rot = Quaternion(a_qw, a_qx, a_qy, a_qz) *", "positions def get_all_graspable_object_poses(self, relative_to_cameras=False): \"\"\" returns the pose of all", "self._assert_unit_quaternion(action[3:]) try: # Check if the target is in the", "= self._target_workspace_check.get_position() valid = self._scene.check_target_in_workspace(pos_to_check) if not valid: raise InvalidActionError('Target", "self._robot.arm.set_joint_forces(np.abs(action)) def _ee_action(self, action, relative_to=None): self._assert_unit_quaternion(action[3:]) try: joint_positions = self._robot.arm.solve_ik(", "task.' % self._task.get_name()) from e self._reset_called = True # redundancy", "prev_values = None # Move until reached target joint positions", "= obs_config self._static_positions = static_positions self._attach_grasped_objects = attach_grasped_objects self._reset_called =", "d_6_T = np.transpose(A_1.dot(A_2).dot(A_3).dot(A_4).dot(A_5).dot(A_6).dot(p_6) - p_obs) d_7_T = np.transpose(A_1.dot(A_2).dot(A_3).dot(A_4).dot(A_5).dot(A_6).dot(A_7).dot(p_7) - p_obs)", "a_z + z] + [qx, qy, qz, qw] self._path_observations =", "-> str: return self._task.get_name() def sample_variation(self) -> int: self._variation_number =", "10 class InvalidActionError(Exception): pass class TaskEnvironmentError(Exception): pass class TaskEnvironment(object): def", "--- scaling to keep distance to joint limits --- #", "available in DELTA_EE_POSE_PLAN or ' 'ABS_EE_POSE_PLAN action mode.') return self._path_observations", "% ( v, self.variation_count())) self._variation_number = v def variation_count(self) ->", "self.reset() logging.info('Collecting demo %d' % i) try: demo = self._scene.get_demo(", "= [] self._path_observations = self._path_action(list(arm_action)) elif self._action_mode.arm == ArmActionMode.DELTA_EE_POSE_PLAN_WORLD_FRAME: self._assert_action_space(arm_action,", "_MAX_RESET_ATTEMPTS = 40 _MAX_DEMO_ATTEMPTS = 10 class InvalidActionError(Exception): pass class", "np.sqrt(np.dot(d_5_T, d_5_T))*W[4] \\ + np.sqrt(np.dot(d_6_T, d_6_T))*W[5] \\ + np.sqrt(np.dot(d_7_T, d_7_T))*W[6]", "= np.allclose(cur_positions, joint_positions, atol=0.01) not_moving = False if prev_values is", "TaskEnvironmentError( 'Could not place the task %s in the scene.", "done: self._scene.step() cur_positions = self._robot.arm.get_joint_positions() reached = np.allclose(cur_positions, joint_positions, atol=0.01)", "raise RuntimeError('Only available in DELTA_EE_POSE_PLAN or ' 'ABS_EE_POSE_PLAN action mode.')", "= self._scene.get_demo( callable_each_step=callable_each_step) demo.random_seed = random_seed demos.append(demo) break except Exception", "- self._robot.arm.joints[0].get_position() #p_obs = self._task.obstacle.get_position() p_obs = np.append(p_obs, [1]) #", "the links A_1, A_2, A_3, A_4, A_5, A_6, A_7 =", "random_seed demos.append(demo) break except Exception as e: attempts -= 1", "#p_obs = self._task.obstacle.get_position() p_obs = np.append(p_obs, [1]) # get the", "action, expected_shape): if np.shape(action) != expected_shape: raise RuntimeError( 'Expected the", "loss as well as the respective partial derivatives for redundancy", "something) while not done: self._scene.step() cur_positions = self._robot.arm.get_joint_positions() reached =", "self._enable_path_observations = False self._scene.load(self._task) self._pyrep.start() self._target_workspace_check = Dummy.create() self._last_e =", "+ \\ -np.matmul(d_7_T, A_1.dot(A_2).dot(A_3).dot(A_4).dot(A_5).dot(dA_6).dot(A_7).dot(p_7)) dq_7 = -np.matmul(d_7_T, A_1.dot(A_2).dot(A_3).dot(A_4).dot(A_5).dot(A_6).dot(dA_7).dot(p_7)) if W", "path, then break early if success: break observations.append(self._scene.get_observation()) return observations", "= -np.matmul(d_7_T, A_1.dot(A_2).dot(A_3).dot(A_4).dot(A_5).dot(A_6).dot(dA_7).dot(p_7)) if W is None: # default weighting", "weighting vector -> based on the reciprocal of the distance.", "Robot, scene: Scene, task: Task, action_mode: ActionMode, dataset_root: str, obs_config:", "2: The loss shown above.-> L(q) \"\"\" # get the", "elif self._action_mode.arm == ArmActionMode.DELTA_JOINT_POSITION: self._assert_action_space(arm_action, (len(self._robot.arm.joints),)) cur = np.array(self._robot.arm.get_joint_positions()) self._robot.arm.set_joint_target_positions(cur", "the first observation return desc, self._scene.get_observation() def get_observation(self) -> Observation:", "np.allclose(cur_positions, joint_positions, atol=0.01) not_moving = False if prev_values is not", "consfiguration as well as its partial derivatives with respect to", "= np.array(action[:-1]) ee_action = action[-1] if 0.0 > ee_action >", "InvalidActionError('Target is outside of workspace.') path = self._robot.arm.get_path( action[:3], quaternion=action[3:],", "str(e)) if attempts <= 0: raise RuntimeError( 'Could not collect", "as the respective partial derivatives for redundancy resoltuion with collision", "to keep distance to joint limits --- # get the", "= np.array(self._robot.arm.get_joint_positions()) self._robot.arm.set_joint_target_positions(cur + arm_action) self._scene.step() elif self._action_mode.arm == ArmActionMode.ABS_JOINT_TORQUE:", "gripper open action, the check for ungrasp. self._robot.gripper.release() success, terminate", "Only checks position, not rotation pos_to_check = action[:3] if relative_to", "= -np.matmul(d_4_T, A_1.dot(A_2).dot(A_3).dot(dA_4).dot(p_4)) + \\ -np.matmul(d_5_T, A_1.dot(A_2).dot(A_3).dot(dA_4).dot(A_5).dot(p_5)) + \\ -np.matmul(d_6_T,", "# we use reciprocal of the distance between each link", "expected_shape: raise RuntimeError( 'Expected the action shape to be: %s,", "get the minimum distance of each joint to its limit", "= e else: self._last_e = e return actions - q_dot_redundancy,", "objects: if relative_to_camera: positions.append(self._scene.get_object_position_relative_to_cameras(ob)) else: positions.append({\"left_shoulder_camera\": ob.get_position(), \"right_shoulder_camera\": ob.get_position(), \"front_camera\":", "= 0 self._action_mode = action_mode self._dataset_root = dataset_root self._obs_config =", "+ \\ -np.matmul(d_7_T, A_1.dot(A_2).dot(A_3).dot(A_4).dot(dA_5).dot(A_6).dot(A_7).dot(p_7)) dq_6 = -np.matmul(d_6_T, A_1.dot(A_2).dot(A_3).dot(A_4).dot(A_5).dot(dA_6).dot(p_6)) + \\", "A_1.dot(A_2).dot(A_3).dot(A_4).dot(dA_5).dot(A_6).dot(A_7).dot(p_7)) dq_6 = -np.matmul(d_6_T, A_1.dot(A_2).dot(A_3).dot(A_4).dot(A_5).dot(dA_6).dot(p_6)) + \\ -np.matmul(d_7_T, A_1.dot(A_2).dot(A_3).dot(A_4).dot(A_5).dot(dA_6).dot(A_7).dot(p_7)) dq_7", "action, relative_to=None): self._assert_unit_quaternion(action[3:]) try: # Check if the target is", "not live_demos and (self._dataset_root is None or len(self._dataset_root) == 0):", "rlbench.backend.task import Task from rlbench.demo import Demo from rlbench.observation_config import", "raise RuntimeError( \"Can't ask for stored demo when no dataset", "of all graspable object relative to all enabled cameras \"\"\"", "return positions def get_all_graspable_object_poses(self, relative_to_cameras=False): \"\"\" returns the pose of", "% type(setup[\"W\"])) # compute the error if setup[\"mode\"] == \"reference_position\":", "np.random.randint( 0, self._task.variation_count()) return self._variation_number def set_variation(self, v: int) ->", "the provided jacobian seems to be inaccurate resulting in slight", "[a_x + x, a_y + y, a_z + z] +", "break except Exception as e: attempts -= 1 logging.info('Bad demo.", "= -np.matmul(d_3_T, A_1.dot(A_2).dot(dA_3).dot(p_3)) + \\ -np.matmul(d_4_T, A_1.dot(A_2).dot(dA_3).dot(A_4).dot(p_4)) + \\ -np.matmul(d_5_T,", "[] for i in range(amount): attempts = max_attempts while attempts", "reached = np.allclose(cur_positions, joint_positions, atol=0.01) not_moving = False if prev_values", "self._static_positions) except (BoundaryError, WaypointError) as e: raise TaskEnvironmentError( 'Could not", "= robot self._scene = scene self._task = task self._variation_number =", "the positions of the links A_1, A_2, A_3, A_4, A_5,", "A_3, A_4, A_5, A_6, A_7 = self._robot.get_transformation_matrices() dA_1, dA_2, dA_3,", "demos. Maybe a problem with the task?') return demos def", "1 / np.sum(np.square(d_4_T)) , 1 / np.sum(np.square(d_5_T)) , 1 /", "terminate = self._task.success() # If the task succeeds while traversing", "utils from rlbench.action_modes import ArmActionMode, ActionMode from rlbench.backend.exceptions import BoundaryError,", "0.0) if ee_action > 0.5: ee_action = 1.0 elif ee_action", "often, let's calculate it in advance d_1_T = np.transpose(A_1.dot(p_1) -", "variation_count(self) -> int: return self._task.variation_count() def reset(self) -> (List[str], Observation):", "self.get_loss_collision_avoidance(W, setup) # compute the joint velocities q_dot_redundancy = setup[\"alpha\"]", "1.0, 1.0, 1.0]) e = (self._robot.arm.get_joint_positions() - ref_pos) return e", "slight movement of the ee. This is why # the", "dA_6, dA_7 = self._robot.get_transformation_matrices_derivatives() p_1, p_2, p_3, p_4, p_5, p_6,", "\\ -np.matmul(d_6_T, A_1.dot(A_2).dot(A_3).dot(dA_4).dot(A_5).dot(A_6).dot(p_6)) + \\ -np.matmul(d_7_T, A_1.dot(A_2).dot(A_3).dot(dA_4).dot(A_5).dot(A_6).dot(A_7).dot(p_7)) dq_5 = -np.matmul(d_5_T,", "from rlbench.demo import Demo from rlbench.observation_config import ObservationConfig _TORQUE_MAX_VEL =", "-_TORQUE_MAX_VEL) for t in action]) self._robot.arm.set_joint_forces(np.abs(action)) def _ee_action(self, action, relative_to=None):", "much. e = dL if setup[\"cut-off_error\"] is not None: if", "of the ee. This is why # the velocites are", "the provided actions yet push the joint position towards a", "elif setup[\"W\"] is None: # use default weighting later W", "quaternion=action[3:], relative_to=relative_to) self._robot.arm.set_joint_target_positions(joint_positions) except IKError as e: raise InvalidActionError('Could not", "self._task.obstacle.get_position() p_obs = np.append(p_obs, [1]) # get the transformation matrices,", "until reached target joint positions or until we stop moving", "return demos def _get_live_demos(self, amount: int, callable_each_step: Callable[ [Observation], None]", "+ arm_action) self._scene.step() elif self._action_mode.arm == ArmActionMode.ABS_JOINT_TORQUE: self._assert_action_space( arm_action, (len(self._robot.arm.joints),))", "if not live_demos and (self._dataset_root is None or len(self._dataset_root) ==", "0.0 if current_ee != ee_action: arm_action = np.array([0.0]*7) if self._action_mode.arm", "be within 0 and 1.') # Discretize the gripper action", "list(new_rot) new_pose = [a_x + x, a_y + y, a_z", "!= ArmActionMode.EE_POSE_PLAN_EE_FRAME): raise RuntimeError('Only available in DELTA_EE_POSE_PLAN or ' 'ABS_EE_POSE_PLAN", "A_1.dot(dA_2).dot(A_3).dot(p_3)) + \\ -np.matmul(d_4_T, A_1.dot(dA_2).dot(A_3).dot(A_4).dot(p_4)) + \\ -np.matmul(d_5_T, A_1.dot(dA_2).dot(A_3).dot(A_4).dot(A_5).dot(p_5)) +", "-np.matmul(d_4_T, A_1.dot(dA_2).dot(A_3).dot(A_4).dot(p_4)) + \\ -np.matmul(d_5_T, A_1.dot(dA_2).dot(A_3).dot(A_4).dot(A_5).dot(p_5)) + \\ -np.matmul(d_6_T, A_1.dot(dA_2).dot(A_3).dot(A_4).dot(A_5).dot(A_6).dot(p_6))", "action): self._robot.arm.set_joint_target_velocities( [(_TORQUE_MAX_VEL if t < 0 else -_TORQUE_MAX_VEL) for", "[1]) # get the transformation matrices, their derivatives, and the", "- p_obs)^T * d/dq (p_i^0 (q_1,..., q_i) - p_obs) #", "e: attempts -= 1 logging.info('Bad demo. ' + str(e)) if", "a_qw = arm_action x, y, z, qx, qy, qz, qw", "np.sum(np.square(d_5_T)) , 1 / np.sum(np.square(d_6_T)) , 1 / np.sum(np.square(d_7_T)) ])", "done = reached or not_moving def _path_action(self, action, relative_to=None): self._assert_unit_quaternion(action[3:])", "np.array(self._robot.arm.get_joint_velocities()) self._robot.arm.set_joint_target_velocities(cur + arm_action) self._scene.step() elif self._action_mode.arm == ArmActionMode.ABS_JOINT_POSITION: self._assert_action_space(arm_action,", "( str(expected_shape), str(np.shape(action)))) def _assert_unit_quaternion(self, quat): if not np.isclose(np.linalg.norm(quat), 1.0):", "if relative_to_camera: positions.append(self._scene.get_object_position_relative_to_cameras(ob)) else: positions.append({\"left_shoulder_camera\": ob.get_position(), \"right_shoulder_camera\": ob.get_position(), \"front_camera\": ob.get_position(),", "W: Weighting vector. :return: 1: The partial derivatives of the", "- p_obs) d_6_T = np.transpose(A_1.dot(A_2).dot(A_3).dot(A_4).dot(A_5).dot(A_6).dot(p_6) - p_obs) d_7_T = np.transpose(A_1.dot(A_2).dot(A_3).dot(A_4).dot(A_5).dot(A_6).dot(A_7).dot(p_7)", "\\ -np.matmul(d_5_T, dA_1.dot(A_2).dot(A_3).dot(A_4).dot(A_5).dot(p_5)) + \\ -np.matmul(d_6_T, dA_1.dot(A_2).dot(A_3).dot(A_4).dot(A_5).dot(A_6).dot(p_6)) + \\ -np.matmul(d_7_T,", "(Observation, int, bool): # returns observation, reward, done, info if", "\\nable_q L(q) 2: The loss shown above.-> L(q) \"\"\" #", "ArmActionMode.DELTA_JOINT_VELOCITY: self._assert_action_space(arm_action, (len(self._robot.arm.joints),)) cur = np.array(self._robot.arm.get_joint_velocities()) self._robot.arm.set_joint_target_velocities(cur + arm_action) self._scene.step()", "0 and 1.') # Discretize the gripper action current_ee =", "is list: W = np.array(setup[\"W\"]) elif setup[\"W\"] is None: #", "\"\"\" Resolves redundant self-motion into the nullspace without changing the", "attempts > 0: random_seed = np.random.get_state() self.reset() logging.info('Collecting demo %d'", "1.0, 1.0]) e = (self._robot.arm.get_joint_positions() - ref_pos) return e *", "reward = float(success) if task_reward is None else task_reward return", "\"\"\" Calculates the loss as well as the respective partial", "e_dot = np.sum(np.abs(e - self._last_e)) if self._last_e is not None", "bool = False, image_paths: bool = False, callable_each_step: Callable[[Observation], None]", "np.matmul((np.identity(len(self._robot.arm.joints)) - np.matmul(J_plus, J)), dL) # the provided jacobian seems", "resolution self._last_e = None # Returns a list of descriptions", "Dummy.create() self._last_e = None def get_name(self) -> str: return self._task.get_name()", "_MAX_DEMO_ATTEMPTS, ) -> List[Demo]: \"\"\"Negative means all demos\"\"\" if not", "moving # (e.g. when we collide wth something) while not", "\"\"\"Negative means all demos\"\"\" if not live_demos and (self._dataset_root is", "resolve_redundancy_joint_velocities(self, actions, setup): \"\"\" Resolves redundant self-motion into the nullspace", "setup): \"\"\" Resolves redundant self-motion into the nullspace without changing", "mode.') return self._path_observations def get_demos(self, amount: int, live_demos: bool =", "self._path_observations = self._path_action(list(new_pose)) elif self._action_mode.arm == ArmActionMode.DELTA_EE_POSE_WORLD_FRAME: self._assert_action_space(arm_action, (7,)) a_x,", "> 0.9 else 0.0) if ee_action > 0.5: ee_action =", "reciprocal of the distance. The greater the distance the smaller", "elif self._action_mode.arm == ArmActionMode.DELTA_JOINT_VELOCITY: self._assert_action_space(arm_action, (len(self._robot.arm.joints),)) cur = np.array(self._robot.arm.get_joint_velocities()) self._robot.arm.set_joint_target_velocities(cur", "\"\"\" # get the position of the object p_obs =", "= _MAX_DEMO_ATTEMPTS) -> List[Demo]: demos = [] for i in", "from pyrep.objects import Dummy, Object from rlbench import utils from", "then quick reject # Only checks position, not rotation pos_to_check", "where p_i^0 = (\\prod_{j=1}^i A_j^{j-1}(q_j)) * p_i # as the", "transformation matrices, their derivatives, and the positions of the links", "each dimension dq_1 = -np.matmul(d_1_T, dA_1.dot(p_1)) + \\ -np.matmul(d_2_T, dA_1.dot(A_2).dot(p_2))", "np.random.get_state() self.reset() logging.info('Collecting demo %d' % i) try: demo =", "stored demo when no dataset root provided.\") if not live_demos:", "# action should contain 1 extra value for gripper open", "p_1, p_2, p_3, p_4, p_5, p_6, p_7 = self._robot.get_link_positions_in_ref_frames() #", "delivers: d/dq L = (p_i^0 (q_1,..., q_i) - p_obs)^T *", "obs_config: ObservationConfig, static_positions: bool = False, attach_grasped_objects: bool = True):", "for ob in objects: if relative_to_cameras: poses.append(self._scene.get_object_pose_relative_to_cameras(ob)) else: poses.append({\"left_shoulder_camera\": ob.get_pose(),", "to its limit joint_positions = np.array([j.get_joint_position() for j in self._robot.arm.joints])", "-np.matmul(d_7_T, A_1.dot(A_2).dot(A_3).dot(A_4).dot(A_5).dot(A_6).dot(dA_7).dot(p_7)) if W is None: # default weighting vector", "Loss # the chain rule delivers: d/dq L = (p_i^0", "break early if success: break observations.append(self._scene.get_observation()) return observations except IKError", "if not self._reset_called: raise RuntimeError( \"Call 'reset' before calling 'step'", "are set to 0 once the error stops changing much.", "t in action]) self._robot.arm.set_joint_forces(np.abs(action)) def _ee_action(self, action, relative_to=None): self._assert_unit_quaternion(action[3:]) try:", "limit joint_positions = np.array([j.get_joint_position() for j in self._robot.arm.joints]) lower_joint_limits =", "A_1.dot(A_2).dot(dA_3).dot(p_3)) + \\ -np.matmul(d_4_T, A_1.dot(A_2).dot(dA_3).dot(A_4).dot(p_4)) + \\ -np.matmul(d_5_T, A_1.dot(A_2).dot(dA_3).dot(A_4).dot(A_5).dot(p_5)) +", "the reciprocal of the distance. The greater the distance the", "self._path_observations = self._path_action(list(arm_action)) elif self._action_mode.arm == ArmActionMode.DELTA_EE_POSE_PLAN_WORLD_FRAME: self._assert_action_space(arm_action, (7,)) a_x,", "= False prev_values = None # Move until reached target", "np.transpose(A_1.dot(A_2).dot(A_3).dot(A_4).dot(p_4) - p_obs) d_5_T = np.transpose(A_1.dot(A_2).dot(A_3).dot(A_4).dot(A_5).dot(p_5) - p_obs) d_6_T =", "contain 1 extra value for gripper open close state arm_action", "\\ + np.sqrt(np.dot(d_5_T, d_5_T))*W[4] \\ + np.sqrt(np.dot(d_6_T, d_6_T))*W[5] \\ +", "lower_joint_limits = np.array(setup[\"lower_joint_pos_limit\"]) upper_joint_limits = np.array(setup[\"upper_joint_pos_limit\"]) min_j_distances = [np.minimum(u-j, j-l)", "np.sqrt(np.dot(d_4_T, d_4_T))*W[3] \\ + np.sqrt(np.dot(d_5_T, d_5_T))*W[4] \\ + np.sqrt(np.dot(d_6_T, d_6_T))*W[5]", "rlbench.observation_config import ObservationConfig _TORQUE_MAX_VEL = 9999 _DT = 0.05 _MAX_RESET_ATTEMPTS", "= self._robot.arm.solve_ik( action[:3], quaternion=action[3:], relative_to=relative_to) self._robot.arm.set_joint_target_positions(joint_positions) except IKError as e:", "Object from rlbench import utils from rlbench.action_modes import ArmActionMode, ActionMode", "non unit quaternion!') def _torque_action(self, action): self._robot.arm.set_joint_target_velocities( [(_TORQUE_MAX_VEL if t", "a_y, a_z, a_qx, a_qy, a_qz, a_qw = arm_action x, y,", "(q_i - \\tilde{q}_i)^2 :param ref_pos: Reference position. :param W: Weighting", "e self._reset_called = True # redundancy resolution self._last_e = None", "- self._last_e)) if self._last_e is not None and e_dot <", "distance between each link and an obstacle as our Loss", "= self._path_action(list(new_pose)) elif self._action_mode.arm == ArmActionMode.DELTA_EE_POSE_WORLD_FRAME: self._assert_action_space(arm_action, (7,)) a_x, a_y,", "state arm_action = np.array(action[:-1]) ee_action = action[-1] if 0.0 >", "= self._scene.check_target_in_workspace(pos_to_check) if not valid: raise InvalidActionError('Target is outside of", "self._torque_action(arm_action) self._scene.step() elif self._action_mode.arm == ArmActionMode.DELTA_JOINT_TORQUE: cur = np.array(self._robot.arm.get_joint_forces()) new_action", "self._ee_action(list(arm_action)) elif self._action_mode.arm == ArmActionMode.ABS_EE_POSE_PLAN_WORLD_FRAME: self._assert_action_space(arm_action, (7,)) self._path_observations = []", "is 15° away. # Scaling is done linearly from 0", "self._reset_called: raise RuntimeError( \"Call 'reset' before calling 'step' on a", "of the distance. The greater the distance the smaller #", "self._ee_action( list(arm_action), relative_to=self._robot.arm.get_tip()) elif self._action_mode.arm == ArmActionMode.EE_POSE_PLAN_EE_FRAME: self._assert_action_space(arm_action, (7,)) self._path_observations", "None self._enable_path_observations = False self._scene.load(self._task) self._pyrep.start() self._target_workspace_check = Dummy.create() self._last_e", "+ \\ -np.matmul(d_4_T, A_1.dot(A_2).dot(dA_3).dot(A_4).dot(p_4)) + \\ -np.matmul(d_5_T, A_1.dot(A_2).dot(dA_3).dot(A_4).dot(A_5).dot(p_5)) + \\", "action_mode: ActionMode, dataset_root: str, obs_config: ObservationConfig, static_positions: bool = False,", "<= d <= 15° rad_thres = 15*(np.pi/180) W *= np.array([", "the gripper action current_ee = (1.0 if self._robot.gripper.get_open_amount()[0] > 0.9", "while attempts > 0: random_seed = np.random.get_state() self.reset() logging.info('Collecting demo", ":return: 1: The partial derivatives of the loss above. ->", "open close state arm_action = np.array(action[:-1]) ee_action = action[-1] if", "self._target_workspace_check = Dummy.create() self._last_e = None def get_name(self) -> str:", "yet push the joint position towards a reference position. \"\"\"", "J = J[-3:] # compute the pseudo inverse J_plus =", "-np.matmul(d_7_T, A_1.dot(A_2).dot(A_3).dot(A_4).dot(A_5).dot(dA_6).dot(A_7).dot(p_7)) dq_7 = -np.matmul(d_7_T, A_1.dot(A_2).dot(A_3).dot(A_4).dot(A_5).dot(A_6).dot(dA_7).dot(p_7)) if W is None:", "np.sum(np.square(d_3_T)) , 1 / np.sum(np.square(d_4_T)) , 1 / np.sum(np.square(d_5_T)) ,", "(7,)) self._ee_action( list(arm_action), relative_to=self._robot.arm.get_tip()) elif self._action_mode.arm == ArmActionMode.EE_POSE_PLAN_EE_FRAME: self._assert_action_space(arm_action, (7,))", "velocities, which move the robot's tip according to the provided", "loss shown above.-> L(q) \"\"\" # get the position of", "\"\"\" objects = self._task.get_graspable_objects() poses = [] for ob in", "self._static_positions = static_positions self._attach_grasped_objects = attach_grasped_objects self._reset_called = False self._prev_ee_velocity", "def get_joint_upper_velocity_limits(self): return self._robot.arm.get_joint_upper_velocity_limits() def get_all_graspable_objects(self): return self._task.get_graspable_objects() def get_robot_visuals(self):", "dq_3, dq_4, dq_5, dq_6, dq_7])*W # calculate the loss L", "'Expected the action shape to be: %s, but was shape:", "action[:3] if relative_to is not None: self._target_workspace_check.set_position( pos_to_check, relative_to) pos_to_check", "resolution defining the mode, weighting etc. :return: Array of joint", "= self._robot.arm.joints[0].is_control_loop_enabled() self._robot.arm.set_control_loop_enabled(True) demos = self._get_live_demos( amount, callable_each_step, max_attempts) self._robot.arm.set_control_loop_enabled(ctr_loop)", "qz, qw] self._path_observations = [] self._path_observations = self._path_action(list(new_pose)) elif self._action_mode.arm", "an obstacle as our Loss # the chain rule delivers:", "pyrep.errors import IKError from pyrep.objects import Dummy, Object from rlbench", "* W, 0.5*np.dot(e,e*W) def get_loss_collision_avoidance(self, W, setup): \"\"\" Calculates the", "default weighting later W = None else: raise TypeError(\"Unsupported type", "d_2_T = np.transpose(A_1.dot(A_2).dot(p_2) - p_obs) d_3_T = np.transpose(A_1.dot(A_2).dot(A_3).dot(p_3) - p_obs)", "np.transpose(A_1.dot(A_2).dot(p_2) - p_obs) d_3_T = np.transpose(A_1.dot(A_2).dot(A_3).dot(p_3) - p_obs) d_4_T =", "live_demos: bool = False, image_paths: bool = False, callable_each_step: Callable[[Observation],", "= 15*(np.pi/180) W *= np.array([ np.minimum((1/rad_thres)*d, 1.0) for d in", "positions.append(self._scene.get_object_position_relative_to_cameras(ob)) else: positions.append({\"left_shoulder_camera\": ob.get_position(), \"right_shoulder_camera\": ob.get_position(), \"front_camera\": ob.get_position(), \"wrist_camera\": ob.get_position()})", "for 0° <= d <= 15° rad_thres = 15*(np.pi/180) W", "\\ -np.matmul(d_3_T, A_1.dot(dA_2).dot(A_3).dot(p_3)) + \\ -np.matmul(d_4_T, A_1.dot(dA_2).dot(A_3).dot(A_4).dot(p_4)) + \\ -np.matmul(d_5_T,", "self._reset_called = True # redundancy resolution self._last_e = None #", "max_attempts while attempts > 0: random_seed = np.random.get_state() self.reset() logging.info('Collecting", "1.0: raise ValueError('Gripper action expected to be within 0 and", "None and e_dot < setup[\"cut-off_error\"]: q_dot_redundancy = np.array([0.0] * 7)", "# compute the pseudo inverse J_plus = np.linalg.pinv(J) # weighting", "self._robot.arm.set_joint_target_positions(joint_positions) except IKError as e: raise InvalidActionError('Could not find a", "np.array(self._robot.arm.get_joint_forces()) new_action = cur + arm_action self._torque_action(new_action) self._scene.step() elif self._action_mode.arm", "pyrep import PyRep from pyrep.errors import IKError from pyrep.objects import", "the loss above. -> \\nable_q L(q) 2: The loss shown", "None, max_attempts: int = _MAX_DEMO_ATTEMPTS) -> List[Demo]: demos = []", "self._action_mode = action_mode self._dataset_root = dataset_root self._obs_config = obs_config self._static_positions", "self._action_mode.arm == ArmActionMode.EE_POSE_PLAN_EE_FRAME: self._assert_action_space(arm_action, (7,)) self._path_observations = [] self._path_observations =", "'Requested variation %d, but there are only %d variations.' %", "ArmActionMode.EE_POSE_PLAN_EE_FRAME: self._assert_action_space(arm_action, (7,)) self._path_observations = [] self._path_observations = self._path_action( list(arm_action),", "+ str(e)) if attempts <= 0: raise RuntimeError( 'Could not", "robot's tip according to the provided actions yet push the", "if ee_action > 0.5: ee_action = 1.0 elif ee_action <", "is why # the velocites are set to 0 once", "(len(self._robot.arm.joints),)) self._robot.arm.set_joint_target_velocities(arm_action) self._scene.step() # if needed save some images if", "= self._task.obstacle.get_position() + np.array([0, 0, 0.33]) - self._robot.arm.joints[0].get_position() #p_obs =", "False, image_paths: bool = False, callable_each_step: Callable[[Observation], None] = None,", "None or len(self._dataset_root) == 0): raise RuntimeError( \"Can't ask for", "and e_dot < setup[\"cut-off_error\"]: q_dot_redundancy = np.array([0.0] * 7) self._last_e", "0.1 # --- scaling to keep distance to joint limits", "towards a reference position. \"\"\" # get the Jacobian J", "1.0, 1.0, 1.0, 1.0, 1.0]) e = (self._robot.arm.get_joint_positions() - ref_pos)", "(7,)) a_x, a_y, a_z, a_qx, a_qy, a_qz, a_qw = arm_action", "get_name(self) -> str: return self._task.get_name() def sample_variation(self) -> int: self._variation_number", "the error if setup[\"mode\"] == \"reference_position\": dL, L = self.get_loss_reference_position(setup[\"ref_position\"],", "the left side of d/dq L is used often, let's", "a_qy, a_qz, a_qw = arm_action x, y, z, qx, qy,", "self._action_mode.arm != ArmActionMode.ABS_EE_POSE_PLAN_WORLD_FRAME and self._action_mode.arm != ArmActionMode.EE_POSE_PLAN_EE_FRAME): raise RuntimeError('Only available", "False, callable_each_step: Callable[[Observation], None] = None, max_attempts: int = _MAX_DEMO_ATTEMPTS,", "If the task succeeds while traversing path, then break early", ":return: 1: The partial derivatives of the summed squarred error", "reached or not_moving def _path_action(self, action, relative_to=None): self._assert_unit_quaternion(action[3:]) try: #", "(len(self._robot.arm.joints),)) cur = np.array(self._robot.arm.get_joint_velocities()) self._robot.arm.set_joint_target_velocities(cur + arm_action) self._scene.step() elif self._action_mode.arm", ":param ref_pos: Reference position. :param W: Weighting vector. :return: 1:", "try: demo = self._scene.get_demo( callable_each_step=callable_each_step) demo.random_seed = random_seed demos.append(demo) break", "\\ -np.matmul(d_2_T, dA_1.dot(A_2).dot(p_2)) + \\ -np.matmul(d_3_T, dA_1.dot(A_2).dot(A_3).dot(p_3)) + \\ -np.matmul(d_4_T,", "def get_all_graspable_objects(self): return self._task.get_graspable_objects() def get_robot_visuals(self): return self._robot.arm.get_visuals() def get_all_graspable_object_positions(self,", "Observation from rlbench.backend.robot import Robot from rlbench.backend.scene import Scene from", "e done = False prev_values = None # Move until", "defining the mode, weighting etc. :return: Array of joint velocities,", "between the current and the reference configuration -> -> \\nabla_q", "1: The partial derivatives of the summed squarred error between", "( v, self.variation_count())) self._variation_number = v def variation_count(self) -> int:", "action[:3], quaternion=action[3:], relative_to=relative_to) self._robot.arm.set_joint_target_positions(joint_positions) except IKError as e: raise InvalidActionError('Could", "p_obs) d_7_T = np.transpose(A_1.dot(A_2).dot(A_3).dot(A_4).dot(A_5).dot(A_6).dot(A_7).dot(p_7) - p_obs) # now we can", "self._task.success() task_reward = self._task.reward() reward = float(success) if task_reward is", "> 0: random_seed = np.random.get_state() self.reset() logging.info('Collecting demo %d' %", "summed squarred error between the current and the reference consfiguration", "done = path.step() self._scene.step() if self._enable_path_observations: observations.append(self._scene.get_observation()) success, terminate =", "self.variation_count(): raise TaskEnvironmentError( 'Requested variation %d, but there are only", "d_4_T))*W[3] \\ + np.sqrt(np.dot(d_5_T, d_5_T))*W[4] \\ + np.sqrt(np.dot(d_6_T, d_6_T))*W[5] \\", "+ np.sqrt(np.dot(d_4_T, d_4_T))*W[3] \\ + np.sqrt(np.dot(d_5_T, d_5_T))*W[4] \\ + np.sqrt(np.dot(d_6_T,", "current and the reference configuration -> -> \\nabla_q L(q) 2:", "self._robot.arm.get_visuals() def get_all_graspable_object_positions(self, relative_to_cameras=False): \"\"\" returns the positions of all", "-np.matmul(d_6_T, A_1.dot(A_2).dot(dA_3).dot(A_4).dot(A_5).dot(A_6).dot(p_6)) + \\ -np.matmul(d_7_T, A_1.dot(A_2).dot(dA_3).dot(A_4).dot(A_5).dot(A_6).dot(A_7).dot(p_7)) dq_4 = -np.matmul(d_4_T, A_1.dot(A_2).dot(A_3).dot(dA_4).dot(p_4))", "/ np.sum(np.square(d_5_T)) , 1 / np.sum(np.square(d_6_T)) , 1 / np.sum(np.square(d_7_T))", "above. -> \\nable_q L(q) 2: The loss shown above.-> L(q)", "np.append(p_obs, [1]) # get the transformation matrices, their derivatives, and", "self._robot.arm.joints[0].is_control_loop_enabled() self._robot.arm.set_control_loop_enabled(True) demos = self._get_live_demos( amount, callable_each_step, max_attempts) self._robot.arm.set_control_loop_enabled(ctr_loop) return", "relative to all enabled cameras \"\"\" objects = self._task.get_graspable_objects() poses", "a_qy, a_qz) * Quaternion(qw, qx, qy, qz) qw, qx, qy,", "returns observation, reward, done, info if not self._reset_called: raise RuntimeError(", "\\ + np.sqrt(np.dot(d_4_T, d_4_T))*W[3] \\ + np.sqrt(np.dot(d_5_T, d_5_T))*W[4] \\ +", "= None, max_attempts: int = _MAX_DEMO_ATTEMPTS) -> List[Demo]: demos =", "# default weighting vector -> based on the reciprocal of", "np.sqrt(np.dot(d_6_T, d_6_T))*W[5] \\ + np.sqrt(np.dot(d_7_T, d_7_T))*W[6] return dL, L def", "# redundancy resolution self._last_e = None # Returns a list", "dA_1.dot(A_2).dot(A_3).dot(A_4).dot(A_5).dot(A_6).dot(A_7).dot(p_7)) dq_2 = -np.matmul(d_2_T, A_1.dot(dA_2).dot(p_2)) + \\ -np.matmul(d_3_T, A_1.dot(dA_2).dot(A_3).dot(p_3)) +", "J_plus = np.linalg.pinv(J) # weighting if type(setup[\"W\"]) is list: W", "_path_action(self, action, relative_to=None): self._assert_unit_quaternion(action[3:]) try: # Check if the target", "-> int: return self._task.variation_count() def reset(self) -> (List[str], Observation): self._scene.reset()", "get_demos(self, amount: int, live_demos: bool = False, image_paths: bool =", "' 'ABS_EE_POSE_PLAN action mode.') return self._path_observations def get_demos(self, amount: int,", "the velocites are set to 0 once the error stops", "= np.allclose( cur_positions, prev_values, atol=0.001) prev_values = cur_positions done =", "ArmActionMode.DELTA_JOINT_TORQUE: cur = np.array(self._robot.arm.get_joint_forces()) new_action = cur + arm_action self._torque_action(new_action)", "not valid: raise InvalidActionError('Target is outside of workspace.') path =", "or not_moving def _path_action(self, action, relative_to=None): self._assert_unit_quaternion(action[3:]) try: # Check", "camcorder=None) -> (Observation, int, bool): # returns observation, reward, done,", "path.step() self._scene.step() if self._enable_path_observations: observations.append(self._scene.get_observation()) success, terminate = self._task.success() #", "used often, let's calculate it in advance d_1_T = np.transpose(A_1.dot(p_1)", "between the current and the reference consfiguration as well as", "the action shape to be: %s, but was shape: %s'", "amount, image_paths, self._dataset_root, self._variation_number, self._task.get_name(), self._obs_config) else: ctr_loop = self._robot.arm.joints[0].is_control_loop_enabled()", "RuntimeError('Only available in DELTA_EE_POSE_PLAN or ' 'ABS_EE_POSE_PLAN action mode.') self._enable_path_observations", "= self._task.get_graspable_objects() positions = [] for ob in objects: if", "[Observation], None] = None, max_attempts: int = _MAX_DEMO_ATTEMPTS) -> List[Demo]:", "int = _MAX_DEMO_ATTEMPTS) -> List[Demo]: demos = [] for i", "the pseudo inverse J_plus = np.linalg.pinv(J) # weighting if type(setup[\"W\"])", "demo %d' % i) try: demo = self._scene.get_demo( callable_each_step=callable_each_step) demo.random_seed", "self.get_all_graspable_objects()) if ee_action == 0.0 and self._attach_grasped_objects: # If gripper", "a_qz) * Quaternion( qw, qx, qy, qz) qw, qx, qy,", "= np.sum(np.abs(e - self._last_e)) if self._last_e is not None and", "Weighting vector. :return: 1: The partial derivatives of the summed", "J = self._robot.arm.get_jacobian() J = np.transpose(J) J = np.flip(J) J", "a_qx, a_qy, a_qz) * Quaternion( qw, qx, qy, qz) qw,", "self._task.step() # if needed save some images if camcorder: obs", "= \\sum_{i=1}^N d(q)^{-1} :param W: Weighting vector. :return: 1: The", "is not None: self._target_workspace_check.set_position( pos_to_check, relative_to) pos_to_check = self._target_workspace_check.get_position() valid", "compute the joint velocities q_dot_redundancy = setup[\"alpha\"] * np.matmul((np.identity(len(self._robot.arm.joints)) -", "vector. :return: 1: The partial derivatives of the loss above.", "import ObservationConfig _TORQUE_MAX_VEL = 9999 _DT = 0.05 _MAX_RESET_ATTEMPTS =", "def _get_live_demos(self, amount: int, callable_each_step: Callable[ [Observation], None] = None,", "= False while not done: done = self._robot.gripper.actuate(ee_action, velocity=0.2) self._pyrep.step()", "demos\"\"\" if not live_demos and (self._dataset_root is None or len(self._dataset_root)", "ob.get_pose(), \"wrist_camera\": ob.get_pose()}) return poses def _assert_action_space(self, action, expected_shape): if", "= path.step() self._scene.step() if self._enable_path_observations: observations.append(self._scene.get_observation()) success, terminate = self._task.success()", "of the loss above. -> \\nable_q L(q) 2: The loss", "ArmActionMode.ABS_JOINT_POSITION: self._assert_action_space(arm_action, (len(self._robot.arm.joints),)) self._robot.arm.set_joint_target_positions(arm_action) self._scene.step() elif self._action_mode.arm == ArmActionMode.DELTA_JOINT_POSITION: self._assert_action_space(arm_action,", "in advance d_1_T = np.transpose(A_1.dot(p_1) - p_obs) d_2_T = np.transpose(A_1.dot(A_2).dot(p_2)", "# default weighting W = np.array([1.0, 1.0, 1.0, 1.0, 1.0,", "[] self._path_observations = self._path_action( list(arm_action), relative_to=self._robot.arm.get_tip()) else: raise RuntimeError('Unrecognised action", "when we collide wth something) while not done: self._scene.step() cur_positions", "all graspable object relative to all enabled cameras \"\"\" objects", "positions = [] for ob in objects: if relative_to_camera: positions.append(self._scene.get_object_position_relative_to_cameras(ob))", "quick reject # Only checks position, not rotation pos_to_check =", "close state arm_action = np.array(action[:-1]) ee_action = action[-1] if 0.0", "d_5_T = np.transpose(A_1.dot(A_2).dot(A_3).dot(A_4).dot(A_5).dot(p_5) - p_obs) d_6_T = np.transpose(A_1.dot(A_2).dot(A_3).dot(A_4).dot(A_5).dot(A_6).dot(p_6) - p_obs)", "from pyquaternion import Quaternion from pyrep import PyRep from pyrep.errors", "gripper tip position :param actions: Current actions without redundancy resolution.", "= np.transpose(A_1.dot(A_2).dot(A_3).dot(A_4).dot(p_4) - p_obs) d_5_T = np.transpose(A_1.dot(A_2).dot(A_3).dot(A_4).dot(A_5).dot(p_5) - p_obs) d_6_T", "_get_live_demos(self, amount: int, callable_each_step: Callable[ [Observation], None] = None, max_attempts:", "a_qx, a_qy, a_qz) * Quaternion(qw, qx, qy, qz) qw, qx,", "A_4, A_5, A_6, A_7 = self._robot.get_transformation_matrices() dA_1, dA_2, dA_3, dA_4,", "InvalidActionError('Could not find a path.') from e def step(self, action,", "close action, the check for grasp. for g_obj in self._task.get_graspable_objects():", "= -np.matmul(d_5_T, A_1.dot(A_2).dot(A_3).dot(A_4).dot(dA_5).dot(p_5)) + \\ -np.matmul(d_6_T, A_1.dot(A_2).dot(A_3).dot(A_4).dot(dA_5).dot(A_6).dot(p_6)) + \\ -np.matmul(d_7_T,", "15*(np.pi/180) W *= np.array([ np.minimum((1/rad_thres)*d, 1.0) for d in min_j_distances])", "e def step(self, action, camcorder=None) -> (Observation, int, bool): #", "with respect to al q's for redundancy resoltuion. -> L(q)", "dA_1.dot(A_2).dot(A_3).dot(A_4).dot(A_5).dot(A_6).dot(p_6)) + \\ -np.matmul(d_7_T, dA_1.dot(A_2).dot(A_3).dot(A_4).dot(A_5).dot(A_6).dot(A_7).dot(p_7)) dq_2 = -np.matmul(d_2_T, A_1.dot(dA_2).dot(p_2)) +", "\\tilde{q}_i)^2 :param ref_pos: Reference position. :param W: Weighting vector. :return:", "the mode, weighting etc. :return: Array of joint velocities, which", "a_y + y, a_z + z] + [qx, qy, qz,", "self._task.reward() reward = float(success) if task_reward is None else task_reward", "self._scene.get_observation() camcorder.save(obs, self.get_robot_visuals(), self.get_all_graspable_objects()) if ee_action == 0.0 and self._attach_grasped_objects:", "resoltuion with collision avoidance. This only works with tasks that", "Calculates the summed squarred error between the current and the", "- \\tilde{q}_i)^2 :param ref_pos: Reference position. :param W: Weighting vector.", "%s in the scene. This should not ' 'happen, please", "ob.get_pose()}) return poses def _assert_action_space(self, action, expected_shape): if np.shape(action) !=", "ref_pos) return e * W, 0.5*np.dot(e,e*W) def get_loss_collision_avoidance(self, W, setup):", "A_1.dot(A_2).dot(dA_3).dot(A_4).dot(A_5).dot(p_5)) + \\ -np.matmul(d_6_T, A_1.dot(A_2).dot(dA_3).dot(A_4).dot(A_5).dot(A_6).dot(p_6)) + \\ -np.matmul(d_7_T, A_1.dot(A_2).dot(dA_3).dot(A_4).dot(A_5).dot(A_6).dot(A_7).dot(p_7)) dq_4", "= np.sqrt(np.dot(d_1_T, d_1_T))*W[0] \\ + np.sqrt(np.dot(d_2_T, d_2_T))*W[1] \\ + np.sqrt(np.dot(d_3_T,", "Reference position. :param W: Weighting vector. :return: 1: The partial", "ee_action == 0.0 and self._attach_grasped_objects: # If gripper close action,", "self._scene.get_observation() camcorder.save(obs, self.get_robot_visuals(), self.get_all_graspable_objects()) elif self._action_mode.arm == ArmActionMode.DELTA_JOINT_VELOCITY: self._assert_action_space(arm_action, (len(self._robot.arm.joints),))", "True): self._pyrep = pyrep self._robot = robot self._scene = scene", "-np.matmul(d_4_T, A_1.dot(A_2).dot(dA_3).dot(A_4).dot(p_4)) + \\ -np.matmul(d_5_T, A_1.dot(A_2).dot(dA_3).dot(A_4).dot(A_5).dot(p_5)) + \\ -np.matmul(d_6_T, A_1.dot(A_2).dot(dA_3).dot(A_4).dot(A_5).dot(A_6).dot(p_6))", "import numpy as np from pyquaternion import Quaternion from pyrep", "the transformation matrices, their derivatives, and the positions of the", ">= self.variation_count(): raise TaskEnvironmentError( 'Requested variation %d, but there are", "not None and e_dot < setup[\"cut-off_error\"]: q_dot_redundancy = np.array([0.0] *", "done = False while not done: done = self._robot.gripper.actuate(ee_action, velocity=0.2)", "np.sqrt(np.dot(d_3_T, d_3_T))*W[2] \\ + np.sqrt(np.dot(d_4_T, d_4_T))*W[3] \\ + np.sqrt(np.dot(d_5_T, d_5_T))*W[4]", "0: raise RuntimeError( \"Can't ask for stored demo when no", "\\ + np.sqrt(np.dot(d_3_T, d_3_T))*W[2] \\ + np.sqrt(np.dot(d_4_T, d_4_T))*W[3] \\ +", "p_i # as the left side of d/dq L is", "a_z, a_qx, a_qy, a_qz, a_qw = arm_action x, y, z,", "collect demos. Maybe a problem with the task?') return demos", "np.minimum((1/rad_thres)*d, 1.0) for d in min_j_distances]) # concatenate the derivaties", "of the object p_obs = self._task.obstacle.get_position() + np.array([0, 0, 0.33])", "!= ArmActionMode.DELTA_EE_POSE_PLAN_WORLD_FRAME and self._action_mode.arm != ArmActionMode.ABS_EE_POSE_PLAN_WORLD_FRAME and self._action_mode.arm != ArmActionMode.EE_POSE_PLAN_EE_FRAME):", "= [] for i in range(amount): attempts = max_attempts while", "get the position of the object p_obs = self._task.obstacle.get_position() +", "is None else task_reward return self._scene.get_observation(), reward, terminate def resolve_redundancy_joint_velocities(self,", "= scene self._task = task self._variation_number = 0 self._action_mode =", "L def get_loss_reference_position(self, ref_pos, W): \"\"\" Calculates the summed squarred", "ref_pos, W): \"\"\" Calculates the summed squarred error between the", "raise RuntimeError( 'Could not collect demos. Maybe a problem with", "-> -> \\nabla_q L(q) 2: Summed squarred error between the", "np.array([ np.minimum((1/rad_thres)*d, 1.0) for d in min_j_distances]) # concatenate the", "from rlbench.backend.exceptions import BoundaryError, WaypointError from rlbench.backend.observation import Observation from", "A_7 = self._robot.get_transformation_matrices() dA_1, dA_2, dA_3, dA_4, dA_5, dA_6, dA_7", "self._ee_action(list(new_pose)) elif self._action_mode.arm == ArmActionMode.EE_POSE_EE_FRAME: self._assert_action_space(arm_action, (7,)) self._ee_action( list(arm_action), relative_to=self._robot.arm.get_tip())", "None: # default weighting vector -> based on the reciprocal", "return self._robot.arm.get_joint_upper_velocity_limits() def get_all_graspable_objects(self): return self._task.get_graspable_objects() def get_robot_visuals(self): return self._robot.arm.get_visuals()", "Setup for redundancy resolution defining the mode, weighting etc. :return:", "Jacobian J = self._robot.arm.get_jacobian() J = np.transpose(J) J = np.flip(J)", "to all enabled cameras \"\"\" objects = self._task.get_graspable_objects() poses =", "is concentrated on close objects. W = np.array([1 / np.sum(np.square(d_1_T)),", "if current_ee != ee_action: arm_action = np.array([0.0]*7) if self._action_mode.arm ==", "list of descriptions and the first observation return desc, self._scene.get_observation()", "np.sum(np.square(d_4_T)) , 1 / np.sum(np.square(d_5_T)) , 1 / np.sum(np.square(d_6_T)) ,", "pyquaternion import Quaternion from pyrep import PyRep from pyrep.errors import", "the nullspace without changing the gripper tip position :param actions:", "as well as its partial derivatives with respect to al", "%d' % i) try: demo = self._scene.get_demo( callable_each_step=callable_each_step) demo.random_seed =", "(self._dataset_root is None or len(self._dataset_root) == 0): raise RuntimeError( \"Can't", "scaling to keep distance to joint limits --- # get", "-np.matmul(d_5_T, A_1.dot(A_2).dot(A_3).dot(A_4).dot(dA_5).dot(p_5)) + \\ -np.matmul(d_6_T, A_1.dot(A_2).dot(A_3).dot(A_4).dot(dA_5).dot(A_6).dot(p_6)) + \\ -np.matmul(d_7_T, A_1.dot(A_2).dot(A_3).dot(A_4).dot(dA_5).dot(A_6).dot(A_7).dot(p_7))", "dL = np.array([dq_1, dq_2, dq_3, dq_4, dq_5, dq_6, dq_7])*W #", "down error when joint limit is 15° away. # Scaling", "numpy as np from pyquaternion import Quaternion from pyrep import", "push the joint position towards a reference position. \"\"\" #", "TaskEnvironmentError( 'Requested variation %d, but there are only %d variations.'", "None: # use default weighting later W = None else:", "self.get_loss_reference_position(setup[\"ref_position\"], W) elif setup[\"mode\"] == \"collision_avoidance\": dL, L = self.get_loss_collision_avoidance(W,", "get_all_graspable_objects(self): return self._task.get_graspable_objects() def get_robot_visuals(self): return self._robot.arm.get_visuals() def get_all_graspable_object_positions(self, relative_to_cameras=False):", "self._task.get_name()) from e self._reset_called = True # redundancy resolution self._last_e", "poses.append(self._scene.get_object_pose_relative_to_cameras(ob)) else: poses.append({\"left_shoulder_camera\": ob.get_pose(), \"right_shoulder_camera\": ob.get_pose(), \"front_camera\": ob.get_pose(), \"wrist_camera\": ob.get_pose()})", "bool): # returns observation, reward, done, info if not self._reset_called:", "= self._robot.gripper.actuate(ee_action, velocity=0.2) self._pyrep.step() self._task.step() # if needed save some", "bool = False, attach_grasped_objects: bool = True): self._pyrep = pyrep", "= self.get_loss_collision_avoidance(W, setup) # compute the joint velocities q_dot_redundancy =", "0.0 > ee_action > 1.0: raise ValueError('Gripper action expected to", "raise RuntimeError( \"Call 'reset' before calling 'step' on a task.\")", "e = dL if setup[\"cut-off_error\"] is not None: if self._last_e", "derivatives of the loss above. -> \\nable_q L(q) 2: The", "return self._task.get_name() def sample_variation(self) -> int: self._variation_number = np.random.randint( 0,", "self._robot.arm.solve_ik( action[:3], quaternion=action[3:], relative_to=relative_to) self._robot.arm.set_joint_target_positions(joint_positions) except IKError as e: raise", "while not done: done = path.step() self._scene.step() if self._enable_path_observations: observations.append(self._scene.get_observation())", "* Quaternion( qw, qx, qy, qz) qw, qx, qy, qz", "self._obs_config = obs_config self._static_positions = static_positions self._attach_grasped_objects = attach_grasped_objects self._reset_called", "self._scene.reset() try: desc = self._scene.init_episode( self._variation_number, max_attempts=_MAX_RESET_ATTEMPTS, randomly_place=not self._static_positions) except", "+ \\ -np.matmul(d_6_T, A_1.dot(A_2).dot(A_3).dot(A_4).dot(dA_5).dot(A_6).dot(p_6)) + \\ -np.matmul(d_7_T, A_1.dot(A_2).dot(A_3).dot(A_4).dot(dA_5).dot(A_6).dot(A_7).dot(p_7)) dq_6 =", "def sample_variation(self) -> int: self._variation_number = np.random.randint( 0, self._task.variation_count()) return", "the gripper tip position :param actions: Current actions without redundancy", "/ np.sum(np.square(d_4_T)) , 1 / np.sum(np.square(d_5_T)) , 1 / np.sum(np.square(d_6_T))", "quaternion=action[3:], ignore_collisions=True, relative_to=relative_to) done = False observations = [] while", "else task_reward return self._scene.get_observation(), reward, terminate def resolve_redundancy_joint_velocities(self, actions, setup):", "vector.\" % type(setup[\"W\"])) # compute the error if setup[\"mode\"] ==", "reciprocal of the distance between each link and an obstacle", "distance to joint limits --- # get the minimum distance", "dL if setup[\"cut-off_error\"] is not None: if self._last_e is not", "the object p_obs = self._task.obstacle.get_position() + np.array([0, 0, 0.33]) -", "--- # get the minimum distance of each joint to", "# compute the joint velocities q_dot_redundancy = setup[\"alpha\"] * np.matmul((np.identity(len(self._robot.arm.joints))", "np.sqrt(np.dot(d_1_T, d_1_T))*W[0] \\ + np.sqrt(np.dot(d_2_T, d_2_T))*W[1] \\ + np.sqrt(np.dot(d_3_T, d_3_T))*W[2]", "reset(self) -> (List[str], Observation): self._scene.reset() try: desc = self._scene.init_episode( self._variation_number,", "= self._task.success() # If the task succeeds while traversing path,", "the respective partial derivatives for redundancy resoltuion with collision avoidance.", "None # Move until reached target joint positions or until", "and the positions of the links A_1, A_2, A_3, A_4,", "L(q) = \\sum_{i=1}^N d(q)^{-1} :param W: Weighting vector. :return: 1:", "\"wrist_camera\": ob.get_position()}) return positions def get_all_graspable_object_poses(self, relative_to_cameras=False): \"\"\" returns the", "pos_to_check, relative_to) pos_to_check = self._target_workspace_check.get_position() valid = self._scene.check_target_in_workspace(pos_to_check) if not", "# Scaling is done linearly from 0 to 1 for", "default weighting W = np.array([1.0, 1.0, 1.0, 1.0, 1.0, 1.0,", "' + str(e)) if attempts <= 0: raise RuntimeError( 'Could", "- p_obs) # where p_i^0 = (\\prod_{j=1}^i A_j^{j-1}(q_j)) * p_i", "dq_4, dq_5, dq_6, dq_7])*W # calculate the loss L =", "self._assert_action_space(arm_action, (len(self._robot.arm.joints),)) self._robot.arm.set_joint_target_velocities(arm_action) self._scene.step() # if needed save some images", "demo. ' + str(e)) if attempts <= 0: raise RuntimeError(", "target joint positions or until we stop moving # (e.g.", "self._enable_path_observations: observations.append(self._scene.get_observation()) success, terminate = self._task.success() # If the task", "raise TaskEnvironmentError( 'Requested variation %d, but there are only %d", "ref_pos: Reference position. :param W: Weighting vector. :return: 1: The", "while traversing path, then break early if success: break observations.append(self._scene.get_observation())", "A_1.dot(A_2).dot(dA_3).dot(A_4).dot(A_5).dot(A_6).dot(A_7).dot(p_7)) dq_4 = -np.matmul(d_4_T, A_1.dot(A_2).dot(A_3).dot(dA_4).dot(p_4)) + \\ -np.matmul(d_5_T, A_1.dot(A_2).dot(A_3).dot(dA_4).dot(A_5).dot(p_5)) +", "= self._scene.init_episode( self._variation_number, max_attempts=_MAX_RESET_ATTEMPTS, randomly_place=not self._static_positions) except (BoundaryError, WaypointError) as", "+ np.sqrt(np.dot(d_2_T, d_2_T))*W[1] \\ + np.sqrt(np.dot(d_3_T, d_3_T))*W[2] \\ + np.sqrt(np.dot(d_4_T,", "= list(new_rot) new_pose = [a_x + x, a_y + y,", "None else: raise TypeError(\"Unsupported type %s for weighting vector.\" %", "arm_action self._torque_action(new_action) self._scene.step() elif self._action_mode.arm == ArmActionMode.ABS_EE_POSE_WORLD_FRAME: self._assert_action_space(arm_action, (7,)) self._ee_action(list(arm_action))", "* np.matmul((np.identity(len(self._robot.arm.joints)) - np.matmul(J_plus, J)), dL) # the provided jacobian", "= np.array([0.0] * 7) self._last_e = e else: self._last_e =", "action shape to be: %s, but was shape: %s' %", "self._robot.gripper.actuate(ee_action, velocity=0.2) self._pyrep.step() self._task.step() # if needed save some images", "ungrasp. self._robot.gripper.release() success, terminate = self._task.success() task_reward = self._task.reward() reward", "\\ + np.sqrt(np.dot(d_6_T, d_6_T))*W[5] \\ + np.sqrt(np.dot(d_7_T, d_7_T))*W[6] return dL,", "raise TaskEnvironmentError( 'Could not place the task %s in the", "self._robot.arm.get_joint_positions() reached = np.allclose(cur_positions, joint_positions, atol=0.01) not_moving = False if", "\"collision_avoidance\": dL, L = self.get_loss_collision_avoidance(W, setup) # compute the joint", "ee_action < 0.5: ee_action = 0.0 if current_ee != ee_action:", "[(_TORQUE_MAX_VEL if t < 0 else -_TORQUE_MAX_VEL) for t in", "= False if prev_values is not None: not_moving = np.allclose(", "def enable_path_observations(self, value: bool) -> None: if (self._action_mode.arm != ArmActionMode.DELTA_EE_POSE_PLAN_WORLD_FRAME", "Dummy, Object from rlbench import utils from rlbench.action_modes import ArmActionMode,", "q_i) - p_obs) # where p_i^0 = (\\prod_{j=1}^i A_j^{j-1}(q_j)) *", "demo = self._scene.get_demo( callable_each_step=callable_each_step) demo.random_seed = random_seed demos.append(demo) break except", "loss L = np.sqrt(np.dot(d_1_T, d_1_T))*W[0] \\ + np.sqrt(np.dot(d_2_T, d_2_T))*W[1] \\", "and apply weightig dL = np.array([dq_1, dq_2, dq_3, dq_4, dq_5,", "of workspace.') path = self._robot.arm.get_path( action[:3], quaternion=action[3:], ignore_collisions=True, relative_to=relative_to) done", "= reached or not_moving def _path_action(self, action, relative_to=None): self._assert_unit_quaternion(action[3:]) try:", "A_1.dot(A_2).dot(A_3).dot(dA_4).dot(p_4)) + \\ -np.matmul(d_5_T, A_1.dot(A_2).dot(A_3).dot(dA_4).dot(A_5).dot(p_5)) + \\ -np.matmul(d_6_T, A_1.dot(A_2).dot(A_3).dot(dA_4).dot(A_5).dot(A_6).dot(p_6)) +", "= self._task.obstacle.get_position() p_obs = np.append(p_obs, [1]) # get the transformation", "each link and an obstacle as our Loss # the", "in objects: if relative_to_cameras: poses.append(self._scene.get_object_pose_relative_to_cameras(ob)) else: poses.append({\"left_shoulder_camera\": ob.get_pose(), \"right_shoulder_camera\": ob.get_pose(),", "relative_to) pos_to_check = self._target_workspace_check.get_position() valid = self._scene.check_target_in_workspace(pos_to_check) if not valid:", "self._action_mode.arm != ArmActionMode.EE_POSE_PLAN_EE_FRAME): raise RuntimeError('Only available in DELTA_EE_POSE_PLAN or '", "no dataset root provided.\") demos = utils.get_stored_demos( amount, image_paths, self._dataset_root,", "the scene. This should not ' 'happen, please raise an", "of the summed squarred error between the current and the", "reward, done, info if not self._reset_called: raise RuntimeError( \"Call 'reset'", "self._action_mode.arm == ArmActionMode.DELTA_JOINT_VELOCITY: self._assert_action_space(arm_action, (len(self._robot.arm.joints),)) cur = np.array(self._robot.arm.get_joint_velocities()) self._robot.arm.set_joint_target_velocities(cur +", "<= 15° rad_thres = 15*(np.pi/180) W *= np.array([ np.minimum((1/rad_thres)*d, 1.0)", "raise ValueError('Gripper action expected to be within 0 and 1.')", "# Discretize the gripper action current_ee = (1.0 if self._robot.gripper.get_open_amount()[0]", "# Move until reached target joint positions or until we", "\\ -np.matmul(d_6_T, A_1.dot(A_2).dot(dA_3).dot(A_4).dot(A_5).dot(A_6).dot(p_6)) + \\ -np.matmul(d_7_T, A_1.dot(A_2).dot(dA_3).dot(A_4).dot(A_5).dot(A_6).dot(A_7).dot(p_7)) dq_4 = -np.matmul(d_4_T,", "= -np.matmul(d_6_T, A_1.dot(A_2).dot(A_3).dot(A_4).dot(A_5).dot(dA_6).dot(p_6)) + \\ -np.matmul(d_7_T, A_1.dot(A_2).dot(A_3).dot(A_4).dot(A_5).dot(dA_6).dot(A_7).dot(p_7)) dq_7 = -np.matmul(d_7_T,", "InvalidActionError(Exception): pass class TaskEnvironmentError(Exception): pass class TaskEnvironment(object): def __init__(self, pyrep:", "7) self._last_e = e else: self._last_e = e return actions", "self._robot.arm.joints[0].get_position() #p_obs = self._task.obstacle.get_position() p_obs = np.append(p_obs, [1]) # get", "relative_to=self._robot.arm.get_tip()) else: raise RuntimeError('Unrecognised action mode.') if current_ee != ee_action:", "np.array([0.0]*7) if self._action_mode.arm == ArmActionMode.ABS_JOINT_VELOCITY: self._assert_action_space(arm_action, (len(self._robot.arm.joints),)) self._robot.arm.set_joint_target_velocities(arm_action) self._scene.step() #", "ask for a stored demo when no dataset root provided.\")", "with tasks that include one obstacles! L(q) = \\sum_{i=1}^N d(q)^{-1}", "if not live_demos: if self._dataset_root is None or len(self._dataset_root) ==", "self._assert_action_space(arm_action, (7,)) self._ee_action( list(arm_action), relative_to=self._robot.arm.get_tip()) elif self._action_mode.arm == ArmActionMode.EE_POSE_PLAN_EE_FRAME: self._assert_action_space(arm_action,", "relative_to=relative_to) done = False observations = [] while not done:", "their derivatives, and the positions of the links A_1, A_2,", "L = self.get_loss_collision_avoidance(W, setup) # compute the joint velocities q_dot_redundancy", "!= ee_action: arm_action = np.array([0.0]*7) if self._action_mode.arm == ArmActionMode.ABS_JOINT_VELOCITY: self._assert_action_space(arm_action,", "self._scene.get_observation(), reward, terminate def resolve_redundancy_joint_velocities(self, actions, setup): \"\"\" Resolves redundant", "d_1_T = np.transpose(A_1.dot(p_1) - p_obs) d_2_T = np.transpose(A_1.dot(A_2).dot(p_2) - p_obs)", "not None: e_dot = np.sum(np.abs(e - self._last_e)) if self._last_e is", "attempts <= 0: raise RuntimeError( 'Could not collect demos. Maybe", "d(q)^{-1} :param W: Weighting vector. :return: 1: The partial derivatives", "on close objects. W = np.array([1 / np.sum(np.square(d_1_T)), 1 /", "arm_action) self._scene.step() elif self._action_mode.arm == ArmActionMode.ABS_JOINT_TORQUE: self._assert_action_space( arm_action, (len(self._robot.arm.joints),)) self._torque_action(arm_action)", "pass class TaskEnvironmentError(Exception): pass class TaskEnvironment(object): def __init__(self, pyrep: PyRep,", "def _path_action(self, action, relative_to=None): self._assert_unit_quaternion(action[3:]) try: # Check if the", "respective partial derivatives for redundancy resoltuion with collision avoidance. This", "# If the task succeeds while traversing path, then break", "elif self._action_mode.arm == ArmActionMode.ABS_JOINT_POSITION: self._assert_action_space(arm_action, (len(self._robot.arm.joints),)) self._robot.arm.set_joint_target_positions(arm_action) self._scene.step() elif self._action_mode.arm", "InvalidActionError('Could not find a path.') from e done = False", "-np.matmul(d_2_T, A_1.dot(dA_2).dot(p_2)) + \\ -np.matmul(d_3_T, A_1.dot(dA_2).dot(A_3).dot(p_3)) + \\ -np.matmul(d_4_T, A_1.dot(dA_2).dot(A_3).dot(A_4).dot(p_4))", "\"\"\" returns the pose of all graspable object relative to", "while not done: self._scene.step() cur_positions = self._robot.arm.get_joint_positions() reached = np.allclose(cur_positions,", "distance the smaller # the weight. That is, it is", "linearly from 0 to 1 for 0° <= d <=", "z, qx, qy, qz, qw = self._robot.arm.get_tip().get_pose() new_rot = Quaternion(a_qw,", "self._pyrep.step() self._task.step() # if needed save some images if camcorder:", "d <= 15° rad_thres = 15*(np.pi/180) W *= np.array([ np.minimum((1/rad_thres)*d,", "dq_7 = -np.matmul(d_7_T, A_1.dot(A_2).dot(A_3).dot(A_4).dot(A_5).dot(A_6).dot(dA_7).dot(p_7)) if W is None: # default", "else -_TORQUE_MAX_VEL) for t in action]) self._robot.arm.set_joint_forces(np.abs(action)) def _ee_action(self, action,", "p_obs) d_5_T = np.transpose(A_1.dot(A_2).dot(A_3).dot(A_4).dot(A_5).dot(p_5) - p_obs) d_6_T = np.transpose(A_1.dot(A_2).dot(A_3).dot(A_4).dot(A_5).dot(A_6).dot(p_6) -", "a_qx, a_qy, a_qz, a_qw = arm_action x, y, z, qx,", "Check if the target is in the workspace; if not,", "L(q) 2: The loss shown above.-> L(q) \"\"\" # get", "= action[:3] if relative_to is not None: self._target_workspace_check.set_position( pos_to_check, relative_to)", "get_loss_collision_avoidance(self, W, setup): \"\"\" Calculates the loss as well as", "p_7 = self._robot.get_link_positions_in_ref_frames() # we use reciprocal of the distance", "%s, but was shape: %s' % ( str(expected_shape), str(np.shape(action)))) def", "np.array([j.get_joint_position() for j in self._robot.arm.joints]) lower_joint_limits = np.array(setup[\"lower_joint_pos_limit\"]) upper_joint_limits =", "current and the reference consfiguration as well as its partial", "self._dataset_root, self._variation_number, self._task.get_name(), self._obs_config) else: ctr_loop = self._robot.arm.joints[0].is_control_loop_enabled() self._robot.arm.set_control_loop_enabled(True) demos", "a path.') from e done = False prev_values = None", "from rlbench.backend.scene import Scene from rlbench.backend.task import Task from rlbench.demo", "= np.array([1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]) e =", "quaternion!') def _torque_action(self, action): self._robot.arm.set_joint_target_velocities( [(_TORQUE_MAX_VEL if t < 0", "dq_6 = -np.matmul(d_6_T, A_1.dot(A_2).dot(A_3).dot(A_4).dot(A_5).dot(dA_6).dot(p_6)) + \\ -np.matmul(d_7_T, A_1.dot(A_2).dot(A_3).dot(A_4).dot(A_5).dot(dA_6).dot(A_7).dot(p_7)) dq_7 =", "None: self._target_workspace_check.set_position( pos_to_check, relative_to) pos_to_check = self._target_workspace_check.get_position() valid = self._scene.check_target_in_workspace(pos_to_check)", "rlbench import utils from rlbench.action_modes import ArmActionMode, ActionMode from rlbench.backend.exceptions", "[np.minimum(u-j, j-l) for l,u,j in zip(lower_joint_limits, upper_joint_limits, joint_positions)] # start", "the reference configuration -> -> \\nabla_q L(q) 2: Summed squarred", "reference configuration -> -> \\nabla_q L(q) 2: Summed squarred error", "action[-1] if 0.0 > ee_action > 1.0: raise ValueError('Gripper action", "self._last_e = e return actions - q_dot_redundancy, L def get_loss_reference_position(self,", "prev_values, atol=0.001) prev_values = cur_positions done = reached or not_moving", "-> L(q) \"\"\" if W is None: # default weighting", "A_1.dot(A_2).dot(A_3).dot(dA_4).dot(A_5).dot(A_6).dot(A_7).dot(p_7)) dq_5 = -np.matmul(d_5_T, A_1.dot(A_2).dot(A_3).dot(A_4).dot(dA_5).dot(p_5)) + \\ -np.matmul(d_6_T, A_1.dot(A_2).dot(A_3).dot(A_4).dot(dA_5).dot(A_6).dot(p_6)) +", "= False, image_paths: bool = False, callable_each_step: Callable[[Observation], None] =", "ArmActionMode.ABS_JOINT_TORQUE: self._assert_action_space( arm_action, (len(self._robot.arm.joints),)) self._torque_action(arm_action) self._scene.step() elif self._action_mode.arm == ArmActionMode.DELTA_JOINT_TORQUE:", "some images if camcorder: obs = self._scene.get_observation() camcorder.save(obs, self.get_robot_visuals(), self.get_all_graspable_objects())", "None: if v >= self.variation_count(): raise TaskEnvironmentError( 'Requested variation %d,", "in the workspace; if not, then quick reject # Only", "J)), dL) # the provided jacobian seems to be inaccurate", "self._action_mode.arm == ArmActionMode.DELTA_EE_POSE_WORLD_FRAME: self._assert_action_space(arm_action, (7,)) a_x, a_y, a_z, a_qx, a_qy,", "amount, callable_each_step, max_attempts) self._robot.arm.set_control_loop_enabled(ctr_loop) return demos def _get_live_demos(self, amount: int,", "-np.matmul(d_3_T, A_1.dot(A_2).dot(dA_3).dot(p_3)) + \\ -np.matmul(d_4_T, A_1.dot(A_2).dot(dA_3).dot(A_4).dot(p_4)) + \\ -np.matmul(d_5_T, A_1.dot(A_2).dot(dA_3).dot(A_4).dot(A_5).dot(p_5))", "elif self._action_mode.arm == ArmActionMode.EE_POSE_EE_FRAME: self._assert_action_space(arm_action, (7,)) self._ee_action( list(arm_action), relative_to=self._robot.arm.get_tip()) elif", "error if setup[\"mode\"] == \"reference_position\": dL, L = self.get_loss_reference_position(setup[\"ref_position\"], W)", "v, self.variation_count())) self._variation_number = v def variation_count(self) -> int: return", "attach_grasped_objects self._reset_called = False self._prev_ee_velocity = None self._enable_path_observations = False", "else: ctr_loop = self._robot.arm.joints[0].is_control_loop_enabled() self._robot.arm.set_control_loop_enabled(True) demos = self._get_live_demos( amount, callable_each_step,", "Robot from rlbench.backend.scene import Scene from rlbench.backend.task import Task from", "setup[\"cut-off_error\"]: q_dot_redundancy = np.array([0.0] * 7) self._last_e = e else:", "expected to be within 0 and 1.') # Discretize the", "returns the pose of all graspable object relative to all", "reference consfiguration as well as its partial derivatives with respect", "np.sqrt(np.dot(d_2_T, d_2_T))*W[1] \\ + np.sqrt(np.dot(d_3_T, d_3_T))*W[2] \\ + np.sqrt(np.dot(d_4_T, d_4_T))*W[3]", "self._dataset_root is None or len(self._dataset_root) == 0: raise RuntimeError( \"Can't", "False prev_values = None # Move until reached target joint", "None def get_name(self) -> str: return self._task.get_name() def sample_variation(self) ->", "action, relative_to=None): self._assert_unit_quaternion(action[3:]) try: joint_positions = self._robot.arm.solve_ik( action[:3], quaternion=action[3:], relative_to=relative_to)", "randomly_place=not self._static_positions) except (BoundaryError, WaypointError) as e: raise TaskEnvironmentError( 'Could", "the pose of all graspable object relative to all enabled", "self._scene.check_target_in_workspace(pos_to_check) if not valid: raise InvalidActionError('Target is outside of workspace.')", "static_positions self._attach_grasped_objects = attach_grasped_objects self._reset_called = False self._prev_ee_velocity = None", "if needed save some images if camcorder: obs = self._scene.get_observation()", "def _torque_action(self, action): self._robot.arm.set_joint_target_velocities( [(_TORQUE_MAX_VEL if t < 0 else", "poses = [] for ob in objects: if relative_to_cameras: poses.append(self._scene.get_object_pose_relative_to_cameras(ob))", "until we stop moving # (e.g. when we collide wth", "self._variation_number, self._task.get_name(), self._obs_config) else: ctr_loop = self._robot.arm.joints[0].is_control_loop_enabled() self._robot.arm.set_control_loop_enabled(True) demos =", "setup[\"mode\"] == \"collision_avoidance\": dL, L = self.get_loss_collision_avoidance(W, setup) # compute", "[] for ob in objects: if relative_to_camera: positions.append(self._scene.get_object_position_relative_to_cameras(ob)) else: positions.append({\"left_shoulder_camera\":", "contained non unit quaternion!') def _torque_action(self, action): self._robot.arm.set_joint_target_velocities( [(_TORQUE_MAX_VEL if", "self._robot.arm.joints]) lower_joint_limits = np.array(setup[\"lower_joint_pos_limit\"]) upper_joint_limits = np.array(setup[\"upper_joint_pos_limit\"]) min_j_distances = [np.minimum(u-j,", "== 0.0 and self._attach_grasped_objects: # If gripper close action, the", "variations.' % ( v, self.variation_count())) self._variation_number = v def variation_count(self)", "we can calculate the derivatives in each dimension dq_1 =", "= (\\prod_{j=1}^i A_j^{j-1}(q_j)) * p_i # as the left side", "import Dummy, Object from rlbench import utils from rlbench.action_modes import", "weighting W = np.array([1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0])", "= self._robot.arm.get_path( action[:3], quaternion=action[3:], ignore_collisions=True, relative_to=relative_to) done = False observations", "+ \\ -np.matmul(d_3_T, dA_1.dot(A_2).dot(A_3).dot(p_3)) + \\ -np.matmul(d_4_T, dA_1.dot(A_2).dot(A_3).dot(A_4).dot(p_4)) + \\", "A_1.dot(A_2).dot(A_3).dot(A_4).dot(dA_5).dot(p_5)) + \\ -np.matmul(d_6_T, A_1.dot(A_2).dot(A_3).dot(A_4).dot(dA_5).dot(A_6).dot(p_6)) + \\ -np.matmul(d_7_T, A_1.dot(A_2).dot(A_3).dot(A_4).dot(dA_5).dot(A_6).dot(A_7).dot(p_7)) dq_6", "-> (List[str], Observation): self._scene.reset() try: desc = self._scene.init_episode( self._variation_number, max_attempts=_MAX_RESET_ATTEMPTS,", "False self._scene.load(self._task) self._pyrep.start() self._target_workspace_check = Dummy.create() self._last_e = None def", "should contain 1 extra value for gripper open close state", "can calculate the derivatives in each dimension dq_1 = -np.matmul(d_1_T,", "\"Can't ask for a stored demo when no dataset root", "action, the check for grasp. for g_obj in self._task.get_graspable_objects(): self._robot.gripper.grasp(g_obj)", "dA_1.dot(A_2).dot(A_3).dot(A_4).dot(A_5).dot(p_5)) + \\ -np.matmul(d_6_T, dA_1.dot(A_2).dot(A_3).dot(A_4).dot(A_5).dot(A_6).dot(p_6)) + \\ -np.matmul(d_7_T, dA_1.dot(A_2).dot(A_3).dot(A_4).dot(A_5).dot(A_6).dot(A_7).dot(p_7)) dq_2", "1 extra value for gripper open close state arm_action =", "redundancy resolution defining the mode, weighting etc. :return: Array of", "np.array(action[:-1]) ee_action = action[-1] if 0.0 > ee_action > 1.0:", "logging.info('Collecting demo %d' % i) try: demo = self._scene.get_demo( callable_each_step=callable_each_step)", "dq_6, dq_7])*W # calculate the loss L = np.sqrt(np.dot(d_1_T, d_1_T))*W[0]", "self._scene.step() elif self._action_mode.arm == ArmActionMode.ABS_JOINT_POSITION: self._assert_action_space(arm_action, (len(self._robot.arm.joints),)) self._robot.arm.set_joint_target_positions(arm_action) self._scene.step() elif", "return self._task.variation_count() def reset(self) -> (List[str], Observation): self._scene.reset() try: desc", "p_obs) d_4_T = np.transpose(A_1.dot(A_2).dot(A_3).dot(A_4).dot(p_4) - p_obs) d_5_T = np.transpose(A_1.dot(A_2).dot(A_3).dot(A_4).dot(A_5).dot(p_5) -", "(e.g. when we collide wth something) while not done: self._scene.step()", "1.0]) e = (self._robot.arm.get_joint_positions() - ref_pos) return e * W,", "works with tasks that include one obstacles! L(q) = \\sum_{i=1}^N", "L(q) \"\"\" # get the position of the object p_obs", "Scaling is done linearly from 0 to 1 for 0°", "error stops changing much. e = dL if setup[\"cut-off_error\"] is", "self._task.get_graspable_objects() def get_robot_visuals(self): return self._robot.arm.get_visuals() def get_all_graspable_object_positions(self, relative_to_cameras=False): \"\"\" returns", "setup[\"alpha\"] * np.matmul((np.identity(len(self._robot.arm.joints)) - np.matmul(J_plus, J)), dL) # the provided", "x, y, z, qx, qy, qz, qw = self._robot.arm.get_tip().get_pose() new_rot", "positions.append({\"left_shoulder_camera\": ob.get_position(), \"right_shoulder_camera\": ob.get_position(), \"front_camera\": ob.get_position(), \"wrist_camera\": ob.get_position()}) return positions", "e_dot < setup[\"cut-off_error\"]: q_dot_redundancy = np.array([0.0] * 7) self._last_e =", "except IKError as e: raise InvalidActionError('Could not find a path.')", "returns the positions of all graspable object relative to all", "import PyRep from pyrep.errors import IKError from pyrep.objects import Dummy,", "A_1.dot(A_2).dot(A_3).dot(A_4).dot(A_5).dot(dA_6).dot(A_7).dot(p_7)) dq_7 = -np.matmul(d_7_T, A_1.dot(A_2).dot(A_3).dot(A_4).dot(A_5).dot(A_6).dot(dA_7).dot(p_7)) if W is None: #", "# as the left side of d/dq L is used", "-np.matmul(d_2_T, dA_1.dot(A_2).dot(p_2)) + \\ -np.matmul(d_3_T, dA_1.dot(A_2).dot(A_3).dot(p_3)) + \\ -np.matmul(d_4_T, dA_1.dot(A_2).dot(A_3).dot(A_4).dot(p_4))", "qy, qz = list(new_rot) new_pose = [a_x + x, a_y", "%d, but there are only %d variations.' % ( v,", "and self._attach_grasped_objects: # If gripper close action, the check for", "task: Task, action_mode: ActionMode, dataset_root: str, obs_config: ObservationConfig, static_positions: bool", "to be inaccurate resulting in slight movement of the ee.", "a_qz) * Quaternion(qw, qx, qy, qz) qw, qx, qy, qz", "logging from typing import List, Callable import numpy as np", "open action, the check for ungrasp. self._robot.gripper.release() success, terminate =", "this task.' % self._task.get_name()) from e self._reset_called = True #", "The loss shown above.-> L(q) \"\"\" # get the position", "== ArmActionMode.DELTA_JOINT_TORQUE: cur = np.array(self._robot.arm.get_joint_forces()) new_action = cur + arm_action", "pyrep.objects import Dummy, Object from rlbench import utils from rlbench.action_modes", "Resolves redundant self-motion into the nullspace without changing the gripper", "setup[\"W\"] is None: # use default weighting later W =", "dL, L = self.get_loss_collision_avoidance(W, setup) # compute the joint velocities", "if setup[\"cut-off_error\"] is not None: if self._last_e is not None:", "cameras \"\"\" objects = self._task.get_graspable_objects() positions = [] for ob", "get_robot_visuals(self): return self._robot.arm.get_visuals() def get_all_graspable_object_positions(self, relative_to_cameras=False): \"\"\" returns the positions", "smaller # the weight. That is, it is concentrated on", "objects = self._task.get_graspable_objects() positions = [] for ob in objects:", "(len(self._robot.arm.joints),)) self._torque_action(arm_action) self._scene.step() elif self._action_mode.arm == ArmActionMode.DELTA_JOINT_TORQUE: cur = np.array(self._robot.arm.get_joint_forces())", "-> None: if v >= self.variation_count(): raise TaskEnvironmentError( 'Requested variation", "max_attempts: int = _MAX_DEMO_ATTEMPTS, ) -> List[Demo]: \"\"\"Negative means all", "self._last_e = e else: self._last_e = e return actions -", "_assert_action_space(self, action, expected_shape): if np.shape(action) != expected_shape: raise RuntimeError( 'Expected", "target is in the workspace; if not, then quick reject", "' 'ABS_EE_POSE_PLAN action mode.') self._enable_path_observations = value def get_path_observations(self): if", "ArmActionMode.DELTA_EE_POSE_PLAN_WORLD_FRAME: self._assert_action_space(arm_action, (7,)) a_x, a_y, a_z, a_qx, a_qy, a_qz, a_qw", "import List, Callable import numpy as np from pyquaternion import", "== \"reference_position\": dL, L = self.get_loss_reference_position(setup[\"ref_position\"], W) elif setup[\"mode\"] ==", "self._scene = scene self._task = task self._variation_number = 0 self._action_mode", "self._robot.arm.set_joint_target_velocities(arm_action) self._scene.step() # if needed save some images if camcorder:", "self._scene.init_episode( self._variation_number, max_attempts=_MAX_RESET_ATTEMPTS, randomly_place=not self._static_positions) except (BoundaryError, WaypointError) as e:", "ob.get_pose(), \"front_camera\": ob.get_pose(), \"wrist_camera\": ob.get_pose()}) return poses def _assert_action_space(self, action,", "raise RuntimeError('Action contained non unit quaternion!') def _torque_action(self, action): self._robot.arm.set_joint_target_velocities(", "try: # Check if the target is in the workspace;", "in slight movement of the ee. This is why #", "-np.matmul(d_7_T, A_1.dot(dA_2).dot(A_3).dot(A_4).dot(A_5).dot(A_6).dot(A_7).dot(p_7)) dq_3 = -np.matmul(d_3_T, A_1.dot(A_2).dot(dA_3).dot(p_3)) + \\ -np.matmul(d_4_T, A_1.dot(A_2).dot(dA_3).dot(A_4).dot(p_4))", "qw] self._ee_action(list(new_pose)) elif self._action_mode.arm == ArmActionMode.EE_POSE_EE_FRAME: self._assert_action_space(arm_action, (7,)) self._ee_action( list(arm_action),", "!= ee_action: done = False while not done: done =", "not ' 'happen, please raise an issues on this task.'", "self._robot.arm.set_joint_target_velocities( [(_TORQUE_MAX_VEL if t < 0 else -_TORQUE_MAX_VEL) for t", "self._scene.load(self._task) self._pyrep.start() self._target_workspace_check = Dummy.create() self._last_e = None def get_name(self)", "for t in action]) self._robot.arm.set_joint_forces(np.abs(action)) def _ee_action(self, action, relative_to=None): self._assert_unit_quaternion(action[3:])", "= self._path_action( list(arm_action), relative_to=self._robot.arm.get_tip()) else: raise RuntimeError('Unrecognised action mode.') if", "Array of joint velocities, which move the robot's tip according", "np.array([dq_1, dq_2, dq_3, dq_4, dq_5, dq_6, dq_7])*W # calculate the", "1.0, 1.0, 1.0, 1.0]) e = (self._robot.arm.get_joint_positions() - ref_pos) return", "collide wth something) while not done: self._scene.step() cur_positions = self._robot.arm.get_joint_positions()", "objects. W = np.array([1 / np.sum(np.square(d_1_T)), 1 / np.sum(np.square(d_2_T)) ,", "A_1.dot(dA_2).dot(A_3).dot(A_4).dot(A_5).dot(A_6).dot(p_6)) + \\ -np.matmul(d_7_T, A_1.dot(dA_2).dot(A_3).dot(A_4).dot(A_5).dot(A_6).dot(A_7).dot(p_7)) dq_3 = -np.matmul(d_3_T, A_1.dot(A_2).dot(dA_3).dot(p_3)) +", "self-motion into the nullspace without changing the gripper tip position", "ee_action: done = False while not done: done = self._robot.gripper.actuate(ee_action,", "self._path_observations = self._path_action( list(arm_action), relative_to=self._robot.arm.get_tip()) else: raise RuntimeError('Unrecognised action mode.')", "between the current and the reference configuration. -> L(q) \"\"\"", "its partial derivatives with respect to al q's for redundancy", ", 1 / np.sum(np.square(d_6_T)) , 1 / np.sum(np.square(d_7_T)) ]) *", "self._last_e is not None and e_dot < setup[\"cut-off_error\"]: q_dot_redundancy =", "self._scene.step() elif self._action_mode.arm == ArmActionMode.ABS_EE_POSE_WORLD_FRAME: self._assert_action_space(arm_action, (7,)) self._ee_action(list(arm_action)) elif self._action_mode.arm", "< 0 else -_TORQUE_MAX_VEL) for t in action]) self._robot.arm.set_joint_forces(np.abs(action)) def", "relative_to=None): self._assert_unit_quaternion(action[3:]) try: # Check if the target is in", "problem with the task?') return demos def reset_to_demo(self, demo: Demo)", "= e return actions - q_dot_redundancy, L def get_loss_reference_position(self, ref_pos,", "qw, qx, qy, qz = list(new_rot) new_pose = [a_x +", "sample_variation(self) -> int: self._variation_number = np.random.randint( 0, self._task.variation_count()) return self._variation_number", "np.array([0, 0, 0.33]) - self._robot.arm.joints[0].get_position() #p_obs = self._task.obstacle.get_position() p_obs =", "elif self._action_mode.arm == ArmActionMode.DELTA_JOINT_TORQUE: cur = np.array(self._robot.arm.get_joint_forces()) new_action = cur", "changing much. e = dL if setup[\"cut-off_error\"] is not None:", "dA_1, dA_2, dA_3, dA_4, dA_5, dA_6, dA_7 = self._robot.get_transformation_matrices_derivatives() p_1,", "or ' 'ABS_EE_POSE_PLAN action mode.') return self._path_observations def get_demos(self, amount:", ", 1 / np.sum(np.square(d_5_T)) , 1 / np.sum(np.square(d_6_T)) , 1", "get_observation(self) -> Observation: return self._scene.get_observation() def get_joint_upper_velocity_limits(self): return self._robot.arm.get_joint_upper_velocity_limits() def", "pos_to_check = action[:3] if relative_to is not None: self._target_workspace_check.set_position( pos_to_check,", "-> \\nabla_q L(q) 2: Summed squarred error between the current" ]
[ "self.assertNotIsInstance(new_obj.obj, ProxyRelatedModel) def test_initial_count(self): GenericFormSet = generic_inlineformset_factory(TaggedItem) data = {", "<input type=\"checkbox\" name=\"generic_relations-taggeditem-content_type-object_id-1-DELETE\" id=\"id_generic_relations-taggeditem-content_type-object_id-1-DELETE\"> <input type=\"hidden\" name=\"generic_relations-taggeditem-content_type-object_id-1-id\" id=\"id_generic_relations-taggeditem-content_type-object_id-1-id\"></p>\"\"\" % tagged_item_id", "isolate_apps from .models import ( Animal, ForProxyModelModel, Gecko, Mineral, ProxyRelatedModel,", "formset = TaggedItemFormSet(instance=platypus) self.assertEqual(len(formset.forms), 5) self.assertHTMLEqual( formset.forms[0].as_p(), '<input type=\"hidden\" name=\"generic_relations-taggeditem-content_type-object_id-0-id\"", "Works without a queryset. formset = TaggedItemFormSet(instance=platypus) self.assertEqual(len(formset.forms), 5) self.assertHTMLEqual(", "from django.test.utils import isolate_apps from .models import ( Animal, ForProxyModelModel,", "test_get_queryset_ordering(self): \"\"\" BaseGenericInlineFormSet.get_queryset() adds default ordering, if needed. \"\"\" inline_formset", "'form-TOTAL_FORMS': '1', 'form-INITIAL_FORMS': '0', 'form-MAX_NUM_FORMS': '', 'form-0-title': 'foo', } formset", "from .models import ( Animal, ForProxyModelModel, Gecko, Mineral, ProxyRelatedModel, TaggedItem,", "formset = inline_formset(instance=Gecko.objects.create()) self.assertIs(formset.get_queryset().ordered, True) def test_initial(self): quartz = Mineral.objects.create(name='Quartz',", "self.assertHTMLEqual( ''.join(form.as_p() for form in formset.forms), \"\"\"<p><label for=\"id_generic_relations-taggeditem-content_type-object_id-0-tag\"> Tag:</label> <input", "'form-0-title': 'foo', } formset = Formset(data, instance=instance, prefix='form') self.assertTrue(formset.is_valid()) new_obj,", "'form-TOTAL_FORMS': '3', 'form-INITIAL_FORMS': '3', 'form-MAX_NUM_FORMS': '', } formset = GenericFormSet(data=data,", "to ContentType\" with self.assertRaisesMessage(Exception, msg): generic_inlineformset_factory(BadModel, TaggedItemForm) def test_save_new_uses_form_save(self): class", "in Meta.\"\"\" Formset = generic_inlineformset_factory(TaggedItem, TaggedItemForm) form = Formset().forms[0] self.assertIsInstance(form['tag'].field.widget,", "model = TaggedItem fields = '__all__' widgets = {'tag': CustomWidget}", "type=\"checkbox\" name=\"x-0-DELETE\" id=\"id_x-0-DELETE\"> <input type=\"hidden\" name=\"x-0-id\" id=\"id_x-0-id\"></p>\"\"\" ) def test_options(self):", "generic_inlineformset_factory( TaggedItem, can_delete=False, exclude=['tag'], extra=3, ) platypus = Animal.objects.create(common_name='Platypus', latin_name='Ornithorhynchus", "<input id=\"id_x-0-tag\" type=\"text\" name=\"x-0-tag\" maxlength=\"50\"></p> <p><label for=\"id_x-0-DELETE\">Delete:</label> <input type=\"checkbox\" name=\"x-0-DELETE\"", "A queryset can be used to alter display ordering. formset", "for=\"id_generic_relations-taggeditem-content_type-object_id-0-DELETE\">Delete:</label> <input type=\"checkbox\" name=\"generic_relations-taggeditem-content_type-object_id-0-DELETE\" id=\"id_generic_relations-taggeditem-content_type-object_id-0-DELETE\"><input type=\"hidden\" name=\"generic_relations-taggeditem-content_type-object_id-0-id\" id=\"id_generic_relations-taggeditem-content_type-object_id-0-id\"></p>\"\"\" ) platypus", "lion.tags.create(tag='hairy') GenericFormSet = generic_inlineformset_factory(TaggedItem) data = { 'form-TOTAL_FORMS': '3', 'form-INITIAL_FORMS':", "exclude=['tag'], extra=3, ) platypus = Animal.objects.create(common_name='Platypus', latin_name='Ornithorhynchus anatinus') harmless =", "from django.contrib.contenttypes.forms import generic_inlineformset_factory from django.contrib.contenttypes.models import ContentType from django.db", "'content_type': ctype.pk, 'object_id': quartz.pk, }] formset = GenericFormSet(initial=initial_data) self.assertEqual(formset.forms[0].initial, initial_data[0])", "TaggedItem.objects.get(tag='shiny', object_id=platypus.id).id self.assertHTMLEqual( ''.join(form.as_p() for form in formset.forms), \"\"\"<p><label for=\"id_generic_relations-taggeditem-content_type-object_id-0-tag\">Tag:</label>", "widget defined in Meta.\"\"\" Formset = generic_inlineformset_factory(TaggedItem, TaggedItemForm) form =", "The save_as_new parameter creates new items that are associated with", "formset = GenericFormSet(instance=Animal()) self.assertHTMLEqual( ''.join(form.as_p() for form in formset.forms), \"\"\"<p><label", "defined in Meta.\"\"\" Formset = generic_inlineformset_factory(TaggedItem, TaggedItemForm) form = Formset().forms[0]", "'form-MAX_NUM_FORMS': '', } formset = GenericFormSet(data=data, prefix='form') self.assertEqual(formset.initial_form_count(), 3) formset", "inline_formset = generic_inlineformset_factory(TaggedItem, exclude=('tag',)) formset = inline_formset(instance=Gecko.objects.create()) self.assertIs(formset.get_queryset().ordered, True) def", "ContentType.objects.get_for_model(quartz) initial_data = [{ 'tag': 'lizard', 'content_type': ctype.pk, 'object_id': quartz.pk,", "TaggedItemForm) def test_save_new_uses_form_save(self): class SaveTestForm(forms.ModelForm): def save(self, *args, **kwargs): self.instance.saved_by", "formset = GenericFormSet(instance=lion, prefix='x') self.assertHTMLEqual( ''.join(form.as_p() for form in formset.forms),", "{ 'form-TOTAL_FORMS': '1', 'form-INITIAL_FORMS': '0', 'form-MAX_NUM_FORMS': '', 'form-0-title': 'foo', }", "creates new items that are associated with the object. \"\"\"", "TaggedItem, can_delete=False, exclude=['tag'], extra=3, ) platypus = Animal.objects.create(common_name='Platypus', latin_name='Ornithorhynchus anatinus')", "3) formset = GenericFormSet(data=data, prefix='form', save_as_new=True) self.assertEqual(formset.initial_form_count(), 0) def test_save_as_new(self):", "self.assertIsNone(formset.forms[1].instance.pk) def test_get_queryset_ordering(self): \"\"\" BaseGenericInlineFormSet.get_queryset() adds default ordering, if needed.", "= Mineral.objects.create(name='Quartz', hardness=7) GenericFormSet = generic_inlineformset_factory(TaggedItem, extra=1) ctype = ContentType.objects.get_for_model(quartz)", "type=\"hidden\" name=\"generic_relations-taggeditem-content_type-object_id-0-id\" id=\"id_generic_relations-taggeditem-content_type-object_id-0-id\"></p>\"\"\" ) formset = GenericFormSet(instance=Animal()) self.assertHTMLEqual( ''.join(form.as_p() for", "prefix='form', save_as_new=True) self.assertTrue(formset.is_valid()) tags = formset.save() self.assertEqual([tag.tag for tag in", "import ( Animal, ForProxyModelModel, Gecko, Mineral, ProxyRelatedModel, TaggedItem, ) class", "django.contrib.contenttypes.models import ContentType from django.db import models from django.test import", "= GenericFormSet(instance=platypus) tagged_item_id = TaggedItem.objects.get(tag='shiny', object_id=platypus.id).id self.assertHTMLEqual( ''.join(form.as_p() for form", "Animal.objects.create( common_name='Platypus', latin_name='Ornithorhynchus anatinus', ) platypus.tags.create(tag='shiny') GenericFormSet = generic_inlineformset_factory(TaggedItem, extra=1)", "name=\"generic_relations-taggeditem-content_type-object_id-0-DELETE\" id=\"id_generic_relations-taggeditem-content_type-object_id-0-DELETE\"><input type=\"hidden\" name=\"generic_relations-taggeditem-content_type-object_id-0-id\" id=\"id_generic_relations-taggeditem-content_type-object_id-0-id\"></p>\"\"\" ) platypus = Animal.objects.create( common_name='Platypus',", "maxlength=\"50\"></p> <p><label for=\"id_generic_relations-taggeditem-content_type-object_id-0-DELETE\">Delete:</label> <input type=\"checkbox\" name=\"generic_relations-taggeditem-content_type-object_id-0-DELETE\" id=\"id_generic_relations-taggeditem-content_type-object_id-0-DELETE\"> <input type=\"hidden\" name=\"generic_relations-taggeditem-content_type-object_id-0-id\"", "alter display ordering. formset = TaggedItemFormSet(instance=platypus, queryset=TaggedItem.objects.order_by('-tag')) self.assertEqual(len(formset.forms), 5) self.assertEqual(formset.forms[0].instance,", "'form-MAX_NUM_FORMS': '', 'form-0-title': 'foo', } formset = Formset(data, instance=instance, prefix='form')", "associated with the object. \"\"\" lion = Animal.objects.create(common_name='Lion', latin_name='Panthera leo')", "forms from django.contrib.contenttypes.forms import generic_inlineformset_factory from django.contrib.contenttypes.models import ContentType from", "Formset = generic_inlineformset_factory(TaggedItem, TaggedItemForm) form = Formset().forms[0] self.assertIsInstance(form['tag'].field.widget, CustomWidget) @isolate_apps('generic_relations')", "extra=1) formset = GenericFormSet() self.assertHTMLEqual( ''.join(form.as_p() for form in formset.forms),", "GenericFormSet = generic_inlineformset_factory(TaggedItem) data = { 'form-TOTAL_FORMS': '3', 'form-INITIAL_FORMS': '2',", "'form-0-tag': 'hunts', 'form-1-id': str(hairy.pk), 'form-1-tag': 'roars', } formset = GenericFormSet(data,", "form in formset.forms), \"\"\"<p><label for=\"id_generic_relations-taggeditem-content_type-object_id-0-tag\">Tag:</label> <input id=\"id_generic_relations-taggeditem-content_type-object_id-0-tag\" type=\"text\" name=\"generic_relations-taggeditem-content_type-object_id-0-tag\" value=\"shiny\"", "name=\"generic_relations-taggeditem-content_type-object_id-0-id\" id=\"id_generic_relations-taggeditem-content_type-object_id-0-id\"></p>\"\"\" ) formset = GenericFormSet(instance=Animal()) self.assertHTMLEqual( ''.join(form.as_p() for form", "type=\"hidden\" name=\"generic_relations-taggeditem-content_type-object_id-0-id\" id=\"id_generic_relations-taggeditem-content_type-object_id-0-id\"></p>\"\"\" ) platypus = Animal.objects.create( common_name='Platypus', latin_name='Ornithorhynchus anatinus',", "a queryset. formset = TaggedItemFormSet(instance=platypus) self.assertEqual(len(formset.forms), 5) self.assertHTMLEqual( formset.forms[0].as_p(), '<input", "for form in formset.forms), \"\"\"<p><label for=\"id_x-0-tag\">Tag:</label> <input id=\"id_x-0-tag\" type=\"text\" name=\"x-0-tag\"", "formset = GenericFormSet() self.assertHTMLEqual( ''.join(form.as_p() for form in formset.forms), \"\"\"<p><label", "<p><label for=\"id_generic_relations-taggeditem-content_type-object_id-1-tag\">Tag:</label> <input id=\"id_generic_relations-taggeditem-content_type-object_id-1-tag\" type=\"text\" name=\"generic_relations-taggeditem-content_type-object_id-1-tag\" maxlength=\"50\"></p> <p><label for=\"id_generic_relations-taggeditem-content_type-object_id-1-DELETE\">Delete:</label> <input", "<input type=\"checkbox\" name=\"generic_relations-taggeditem-content_type-object_id-0-DELETE\" id=\"id_generic_relations-taggeditem-content_type-object_id-0-DELETE\"><input type=\"hidden\" name=\"generic_relations-taggeditem-content_type-object_id-0-id\" id=\"id_generic_relations-taggeditem-content_type-object_id-0-id\"></p>\"\"\" ) platypus =", "Gecko, Mineral, ProxyRelatedModel, TaggedItem, ) class CustomWidget(forms.TextInput): pass class TaggedItemForm(forms.ModelForm):", "models.PositiveIntegerField() msg = \"fk_name 'generic_relations.BadModel.content_type' is not a ForeignKey to", "ContentType\" with self.assertRaisesMessage(Exception, msg): generic_inlineformset_factory(BadModel, TaggedItemForm) def test_save_new_uses_form_save(self): class SaveTestForm(forms.ModelForm):", "for=\"id_x-0-tag\">Tag:</label> <input id=\"id_x-0-tag\" type=\"text\" name=\"x-0-tag\" maxlength=\"50\"></p> <p><label for=\"id_x-0-DELETE\">Delete:</label> <input type=\"checkbox\"", "for_concrete_model=False) instance = ProxyRelatedModel.objects.create() data = { 'form-TOTAL_FORMS': '1', 'form-INITIAL_FORMS':", "Formset(data, instance=instance, prefix='form') self.assertTrue(formset.is_valid()) new_obj, = formset.save() self.assertEqual(new_obj.obj, instance) def", "formset = TaggedItemFormSet(instance=platypus, queryset=TaggedItem.objects.order_by('-tag')) self.assertEqual(len(formset.forms), 5) self.assertEqual(formset.forms[0].instance, mammal) self.assertEqual(formset.forms[1].instance, harmless)", "CustomWidget) @isolate_apps('generic_relations') def test_incorrect_content_type(self): class BadModel(models.Model): content_type = models.PositiveIntegerField() msg", "Formset = generic_inlineformset_factory(ForProxyModelModel, fields='__all__', for_concrete_model=False) instance = ProxyRelatedModel.objects.create() data =", "for_concrete_model=True) instance = ProxyRelatedModel.objects.create() data = { 'form-TOTAL_FORMS': '1', 'form-INITIAL_FORMS':", "id=\"id_x-0-tag\" type=\"text\" name=\"x-0-tag\" maxlength=\"50\"></p> <p><label for=\"id_x-0-DELETE\">Delete:</label> <input type=\"checkbox\" name=\"x-0-DELETE\" id=\"id_x-0-DELETE\">", "that omits items. formset = TaggedItemFormSet(instance=platypus, queryset=TaggedItem.objects.filter(tag__startswith='harm')) self.assertEqual(len(formset.forms), 4) self.assertEqual(formset.forms[0].instance,", "django.test.utils import isolate_apps from .models import ( Animal, ForProxyModelModel, Gecko,", "generic_inlineformset_factory(TaggedItem, extra=1) ctype = ContentType.objects.get_for_model(quartz) initial_data = [{ 'tag': 'lizard',", "value=\"%s\" id=\"id_generic_relations-taggeditem-content_type-object_id-0-id\"></p> <p><label for=\"id_generic_relations-taggeditem-content_type-object_id-1-tag\">Tag:</label> <input id=\"id_generic_relations-taggeditem-content_type-object_id-1-tag\" type=\"text\" name=\"generic_relations-taggeditem-content_type-object_id-1-tag\" maxlength=\"50\"></p> <p><label", "<input type=\"hidden\" name=\"generic_relations-taggeditem-content_type-object_id-0-id\" id=\"id_generic_relations-taggeditem-content_type-object_id-0-id\"></p>\"\"\" ) formset = GenericFormSet(instance=Animal()) self.assertHTMLEqual( ''.join(form.as_p()", "= generic_inlineformset_factory(TaggedItem) data = { 'form-TOTAL_FORMS': '3', 'form-INITIAL_FORMS': '2', 'form-MAX_NUM_FORMS':", "generic_inlineformset_factory(TaggedItem) data = { 'form-TOTAL_FORMS': '3', 'form-INITIAL_FORMS': '3', 'form-MAX_NUM_FORMS': '',", "self.assertEqual(formset.forms[1].instance, harmless) self.assertIsNone(formset.forms[2].instance.pk) # A queryset that omits items. formset", "*args, **kwargs): self.instance.saved_by = 'custom method' return super().save(*args, **kwargs) Formset", "<input type=\"hidden\" name=\"x-0-id\" id=\"id_x-0-id\"></p>\"\"\" ) def test_options(self): TaggedItemFormSet = generic_inlineformset_factory(", "str(yellow.pk), 'form-0-tag': 'hunts', 'form-1-id': str(hairy.pk), 'form-1-tag': 'roars', } formset =", "'', 'form-0-id': str(yellow.pk), 'form-0-tag': 'hunts', 'form-1-id': str(hairy.pk), 'form-1-tag': 'roars', }", "new_obj, = formset.save() self.assertNotIsInstance(new_obj.obj, ProxyRelatedModel) def test_initial_count(self): GenericFormSet = generic_inlineformset_factory(TaggedItem)", "test_initial_count(self): GenericFormSet = generic_inlineformset_factory(TaggedItem) data = { 'form-TOTAL_FORMS': '3', 'form-INITIAL_FORMS':", "mammal) self.assertIsNone(formset.forms[2].instance.pk) # A queryset can be used to alter", "prefix='form', save_as_new=True) self.assertEqual(formset.initial_form_count(), 0) def test_save_as_new(self): \"\"\" The save_as_new parameter", "type=\"text\" name=\"generic_relations-taggeditem-content_type-object_id-0-tag\" value=\"shiny\" maxlength=\"50\"></p> <p><label for=\"id_generic_relations-taggeditem-content_type-object_id-0-DELETE\">Delete:</label> <input type=\"checkbox\" name=\"generic_relations-taggeditem-content_type-object_id-0-DELETE\" id=\"id_generic_relations-taggeditem-content_type-object_id-0-DELETE\">", "= Formset(data, instance=instance, prefix='form') self.assertTrue(formset.is_valid()) new_obj = formset.save()[0] self.assertEqual(new_obj.saved_by, 'custom", "= Formset().forms[0] self.assertIsInstance(form['tag'].field.widget, CustomWidget) @isolate_apps('generic_relations') def test_incorrect_content_type(self): class BadModel(models.Model): content_type", "lion = Animal.objects.create(common_name='Lion', latin_name='Panthera leo') yellow = lion.tags.create(tag='yellow') hairy =", "'form-MAX_NUM_FORMS': '', 'form-0-id': str(yellow.pk), 'form-0-tag': 'hunts', 'form-1-id': str(hairy.pk), 'form-1-tag': 'roars',", "latin_name='Panthera leo') yellow = lion.tags.create(tag='yellow') hairy = lion.tags.create(tag='hairy') GenericFormSet =", "used to alter display ordering. formset = TaggedItemFormSet(instance=platypus, queryset=TaggedItem.objects.order_by('-tag')) self.assertEqual(len(formset.forms),", "<input type=\"hidden\" name=\"generic_relations-taggeditem-content_type-object_id-1-id\" id=\"id_generic_relations-taggeditem-content_type-object_id-1-id\"></p>\"\"\" % tagged_item_id ) lion = Animal.objects.create(common_name='Lion',", ") class CustomWidget(forms.TextInput): pass class TaggedItemForm(forms.ModelForm): class Meta: model =", "items. formset = TaggedItemFormSet(instance=platypus, queryset=TaggedItem.objects.filter(tag__startswith='harm')) self.assertEqual(len(formset.forms), 4) self.assertEqual(formset.forms[0].instance, harmless) self.assertIsNone(formset.forms[1].instance.pk)", "class GenericInlineFormsetTests(TestCase): def test_output(self): GenericFormSet = generic_inlineformset_factory(TaggedItem, extra=1) formset =", "class CustomWidget(forms.TextInput): pass class TaggedItemForm(forms.ModelForm): class Meta: model = TaggedItem", "prefix='form') self.assertTrue(formset.is_valid()) new_obj = formset.save()[0] self.assertEqual(new_obj.saved_by, 'custom method') def test_save_new_for_proxy(self):", "= TaggedItemFormSet(instance=platypus) self.assertEqual(len(formset.forms), 5) self.assertHTMLEqual( formset.forms[0].as_p(), '<input type=\"hidden\" name=\"generic_relations-taggeditem-content_type-object_id-0-id\" value=\"%s\"", "ForProxyModelModel, Gecko, Mineral, ProxyRelatedModel, TaggedItem, ) class CustomWidget(forms.TextInput): pass class", "type=\"hidden\" name=\"generic_relations-taggeditem-content_type-object_id-0-id\" value=\"%s\" id=\"id_generic_relations-taggeditem-content_type-object_id-0-id\"></p> <p><label for=\"id_generic_relations-taggeditem-content_type-object_id-1-tag\">Tag:</label> <input id=\"id_generic_relations-taggeditem-content_type-object_id-1-tag\" type=\"text\" name=\"generic_relations-taggeditem-content_type-object_id-1-tag\"", "test_meta_widgets(self): \"\"\"TaggedItemForm has a widget defined in Meta.\"\"\" Formset =", "self.assertTrue(formset.is_valid()) new_obj, = formset.save() self.assertEqual(new_obj.obj, instance) def test_save_new_for_concrete(self): Formset =", "type=\"text\" name=\"generic_relations-taggeditem-content_type-object_id-1-tag\" maxlength=\"50\"></p> <p><label for=\"id_generic_relations-taggeditem-content_type-object_id-1-DELETE\">Delete:</label> <input type=\"checkbox\" name=\"generic_relations-taggeditem-content_type-object_id-1-DELETE\" id=\"id_generic_relations-taggeditem-content_type-object_id-1-DELETE\"> <input", "for=\"id_generic_relations-taggeditem-content_type-object_id-0-DELETE\">Delete:</label> <input type=\"checkbox\" name=\"generic_relations-taggeditem-content_type-object_id-0-DELETE\" id=\"id_generic_relations-taggeditem-content_type-object_id-0-DELETE\"> <input type=\"hidden\" name=\"generic_relations-taggeditem-content_type-object_id-0-id\" id=\"id_generic_relations-taggeditem-content_type-object_id-0-id\"></p>\"\"\" )", "generic_inlineformset_factory(TaggedItem, TaggedItemForm) form = Formset().forms[0] self.assertIsInstance(form['tag'].field.widget, CustomWidget) @isolate_apps('generic_relations') def test_incorrect_content_type(self):", "instance = ProxyRelatedModel.objects.create() data = { 'form-TOTAL_FORMS': '1', 'form-INITIAL_FORMS': '0',", "class Meta: model = TaggedItem fields = '__all__' widgets =", "\"\"\" lion = Animal.objects.create(common_name='Lion', latin_name='Panthera leo') yellow = lion.tags.create(tag='yellow') hairy", "formset = TaggedItemFormSet(instance=platypus, queryset=TaggedItem.objects.filter(tag__startswith='harm')) self.assertEqual(len(formset.forms), 4) self.assertEqual(formset.forms[0].instance, harmless) self.assertIsNone(formset.forms[1].instance.pk) def", "id=\"id_generic_relations-taggeditem-content_type-object_id-0-DELETE\"><input type=\"hidden\" name=\"generic_relations-taggeditem-content_type-object_id-0-id\" id=\"id_generic_relations-taggeditem-content_type-object_id-0-id\"></p>\"\"\" ) platypus = Animal.objects.create( common_name='Platypus', latin_name='Ornithorhynchus", "class TaggedItemForm(forms.ModelForm): class Meta: model = TaggedItem fields = '__all__'", "Animal.objects.create(common_name='Lion', latin_name='Panthera leo') formset = GenericFormSet(instance=lion, prefix='x') self.assertHTMLEqual( ''.join(form.as_p() for", "SaveTestForm(forms.ModelForm): def save(self, *args, **kwargs): self.instance.saved_by = 'custom method' return", "'form-INITIAL_FORMS': '0', 'form-MAX_NUM_FORMS': '', 'form-0-title': 'foo', } formset = Formset(data,", "data = { 'form-TOTAL_FORMS': '3', 'form-INITIAL_FORMS': '2', 'form-MAX_NUM_FORMS': '', 'form-0-id':", "= GenericFormSet() self.assertHTMLEqual( ''.join(form.as_p() for form in formset.forms), \"\"\"<p><label for=\"id_generic_relations-taggeditem-content_type-object_id-0-tag\">", "in formset.forms), \"\"\"<p><label for=\"id_x-0-tag\">Tag:</label> <input id=\"id_x-0-tag\" type=\"text\" name=\"x-0-tag\" maxlength=\"50\"></p> <p><label", "anatinus', ) platypus.tags.create(tag='shiny') GenericFormSet = generic_inlineformset_factory(TaggedItem, extra=1) formset = GenericFormSet(instance=platypus)", ") lion = Animal.objects.create(common_name='Lion', latin_name='Panthera leo') formset = GenericFormSet(instance=lion, prefix='x')", "be used to alter display ordering. formset = TaggedItemFormSet(instance=platypus, queryset=TaggedItem.objects.order_by('-tag'))", "ProxyRelatedModel) def test_initial_count(self): GenericFormSet = generic_inlineformset_factory(TaggedItem) data = { 'form-TOTAL_FORMS':", "= generic_inlineformset_factory(ForProxyModelModel, fields='__all__', form=SaveTestForm) instance = ProxyRelatedModel.objects.create() data = {", "for=\"id_generic_relations-taggeditem-content_type-object_id-0-tag\">Tag:</label> <input id=\"id_generic_relations-taggeditem-content_type-object_id-0-tag\" type=\"text\" name=\"generic_relations-taggeditem-content_type-object_id-0-tag\" value=\"shiny\" maxlength=\"50\"></p> <p><label for=\"id_generic_relations-taggeditem-content_type-object_id-0-DELETE\">Delete:</label> <input", "self.assertEqual(formset.forms[0].instance, mammal) self.assertEqual(formset.forms[1].instance, harmless) self.assertIsNone(formset.forms[2].instance.pk) # A queryset that omits", "5) self.assertHTMLEqual( formset.forms[0].as_p(), '<input type=\"hidden\" name=\"generic_relations-taggeditem-content_type-object_id-0-id\" value=\"%s\" ' 'id=\"id_generic_relations-taggeditem-content_type-object_id-0-id\">' %", "id=\"id_generic_relations-taggeditem-content_type-object_id-1-tag\" type=\"text\" name=\"generic_relations-taggeditem-content_type-object_id-1-tag\" maxlength=\"50\"></p> <p><label for=\"id_generic_relations-taggeditem-content_type-object_id-1-DELETE\">Delete:</label> <input type=\"checkbox\" name=\"generic_relations-taggeditem-content_type-object_id-1-DELETE\" id=\"id_generic_relations-taggeditem-content_type-object_id-1-DELETE\">", ") def test_options(self): TaggedItemFormSet = generic_inlineformset_factory( TaggedItem, can_delete=False, exclude=['tag'], extra=3,", "generic_inlineformset_factory(TaggedItem, extra=1) formset = GenericFormSet(instance=platypus) tagged_item_id = TaggedItem.objects.get(tag='shiny', object_id=platypus.id).id self.assertHTMLEqual(", "= GenericFormSet(data=data, prefix='form') self.assertEqual(formset.initial_form_count(), 3) formset = GenericFormSet(data=data, prefix='form', save_as_new=True)", "test_save_new_for_proxy(self): Formset = generic_inlineformset_factory(ForProxyModelModel, fields='__all__', for_concrete_model=False) instance = ProxyRelatedModel.objects.create() data", "<p><label for=\"id_generic_relations-taggeditem-content_type-object_id-0-DELETE\">Delete:</label> <input type=\"checkbox\" name=\"generic_relations-taggeditem-content_type-object_id-0-DELETE\" id=\"id_generic_relations-taggeditem-content_type-object_id-0-DELETE\"><input type=\"hidden\" name=\"generic_relations-taggeditem-content_type-object_id-0-id\" id=\"id_generic_relations-taggeditem-content_type-object_id-0-id\"></p>\"\"\" )", "formset.forms), \"\"\"<p><label for=\"id_generic_relations-taggeditem-content_type-object_id-0-tag\"> Tag:</label> <input id=\"id_generic_relations-taggeditem-content_type-object_id-0-tag\" type=\"text\" name=\"generic_relations-taggeditem-content_type-object_id-0-tag\" maxlength=\"50\"></p> <p><label", "self.assertHTMLEqual( ''.join(form.as_p() for form in formset.forms), \"\"\"<p><label for=\"id_x-0-tag\">Tag:</label> <input id=\"id_x-0-tag\"", "Formset().forms[0] self.assertIsInstance(form['tag'].field.widget, CustomWidget) @isolate_apps('generic_relations') def test_incorrect_content_type(self): class BadModel(models.Model): content_type =", "'object_id': quartz.pk, }] formset = GenericFormSet(initial=initial_data) self.assertEqual(formset.forms[0].initial, initial_data[0]) def test_meta_widgets(self):", "BaseGenericInlineFormSet.get_queryset() adds default ordering, if needed. \"\"\" inline_formset = generic_inlineformset_factory(TaggedItem,", "def test_meta_widgets(self): \"\"\"TaggedItemForm has a widget defined in Meta.\"\"\" Formset", "'hunts', 'form-1-id': str(hairy.pk), 'form-1-tag': 'roars', } formset = GenericFormSet(data, instance=lion,", "def test_initial(self): quartz = Mineral.objects.create(name='Quartz', hardness=7) GenericFormSet = generic_inlineformset_factory(TaggedItem, extra=1)", "generic_inlineformset_factory(TaggedItem) data = { 'form-TOTAL_FORMS': '3', 'form-INITIAL_FORMS': '2', 'form-MAX_NUM_FORMS': '',", "mammal = platypus.tags.create(tag='mammal') # Works without a queryset. formset =", "= GenericFormSet(initial=initial_data) self.assertEqual(formset.forms[0].initial, initial_data[0]) def test_meta_widgets(self): \"\"\"TaggedItemForm has a widget", "= Formset(data, instance=instance, prefix='form') self.assertTrue(formset.is_valid()) new_obj, = formset.save() self.assertNotIsInstance(new_obj.obj, ProxyRelatedModel)", "ctype = ContentType.objects.get_for_model(quartz) initial_data = [{ 'tag': 'lizard', 'content_type': ctype.pk,", "save_as_new=True) self.assertEqual(formset.initial_form_count(), 0) def test_save_as_new(self): \"\"\" The save_as_new parameter creates", "= Animal.objects.create(common_name='Lion', latin_name='Panthera leo') yellow = lion.tags.create(tag='yellow') hairy = lion.tags.create(tag='hairy')", "import TestCase from django.test.utils import isolate_apps from .models import (", "'__all__' widgets = {'tag': CustomWidget} class GenericInlineFormsetTests(TestCase): def test_output(self): GenericFormSet", "type=\"checkbox\" name=\"generic_relations-taggeditem-content_type-object_id-0-DELETE\" id=\"id_generic_relations-taggeditem-content_type-object_id-0-DELETE\"> <input type=\"hidden\" name=\"generic_relations-taggeditem-content_type-object_id-0-id\" value=\"%s\" id=\"id_generic_relations-taggeditem-content_type-object_id-0-id\"></p> <p><label for=\"id_generic_relations-taggeditem-content_type-object_id-1-tag\">Tag:</label>", "fields='__all__', for_concrete_model=True) instance = ProxyRelatedModel.objects.create() data = { 'form-TOTAL_FORMS': '1',", "super().save(*args, **kwargs) Formset = generic_inlineformset_factory(ForProxyModelModel, fields='__all__', form=SaveTestForm) instance = ProxyRelatedModel.objects.create()", "ctype.pk, 'object_id': quartz.pk, }] formset = GenericFormSet(initial=initial_data) self.assertEqual(formset.forms[0].initial, initial_data[0]) def", "name=\"generic_relations-taggeditem-content_type-object_id-0-DELETE\" id=\"id_generic_relations-taggeditem-content_type-object_id-0-DELETE\"> <input type=\"hidden\" name=\"generic_relations-taggeditem-content_type-object_id-0-id\" value=\"%s\" id=\"id_generic_relations-taggeditem-content_type-object_id-0-id\"></p> <p><label for=\"id_generic_relations-taggeditem-content_type-object_id-1-tag\">Tag:</label> <input", "= TaggedItemFormSet(instance=platypus, queryset=TaggedItem.objects.order_by('-tag')) self.assertEqual(len(formset.forms), 5) self.assertEqual(formset.forms[0].instance, mammal) self.assertEqual(formset.forms[1].instance, harmless) self.assertIsNone(formset.forms[2].instance.pk)", "formset = GenericFormSet(data, instance=lion, prefix='form', save_as_new=True) self.assertTrue(formset.is_valid()) tags = formset.save()", "self.assertIsNone(formset.forms[2].instance.pk) # A queryset that omits items. formset = TaggedItemFormSet(instance=platypus,", "ForeignKey to ContentType\" with self.assertRaisesMessage(Exception, msg): generic_inlineformset_factory(BadModel, TaggedItemForm) def test_save_new_uses_form_save(self):", "'3', 'form-MAX_NUM_FORMS': '', } formset = GenericFormSet(data=data, prefix='form') self.assertEqual(formset.initial_form_count(), 3)", "{ 'form-TOTAL_FORMS': '3', 'form-INITIAL_FORMS': '2', 'form-MAX_NUM_FORMS': '', 'form-0-id': str(yellow.pk), 'form-0-tag':", "name=\"generic_relations-taggeditem-content_type-object_id-0-id\" value=\"%s\" id=\"id_generic_relations-taggeditem-content_type-object_id-0-id\"></p> <p><label for=\"id_generic_relations-taggeditem-content_type-object_id-1-tag\">Tag:</label> <input id=\"id_generic_relations-taggeditem-content_type-object_id-1-tag\" type=\"text\" name=\"generic_relations-taggeditem-content_type-object_id-1-tag\" maxlength=\"50\"></p>", "formset = Formset(data, instance=instance, prefix='form') self.assertTrue(formset.is_valid()) new_obj = formset.save()[0] self.assertEqual(new_obj.saved_by,", "self.assertEqual(len(formset.forms), 4) self.assertEqual(formset.forms[0].instance, harmless) self.assertIsNone(formset.forms[1].instance.pk) def test_get_queryset_ordering(self): \"\"\" BaseGenericInlineFormSet.get_queryset() adds", "= GenericFormSet(data=data, prefix='form', save_as_new=True) self.assertEqual(formset.initial_form_count(), 0) def test_save_as_new(self): \"\"\" The", "self.assertTrue(formset.is_valid()) tags = formset.save() self.assertEqual([tag.tag for tag in tags], ['hunts',", "= generic_inlineformset_factory(TaggedItem, extra=1) formset = GenericFormSet() self.assertHTMLEqual( ''.join(form.as_p() for form", "= [{ 'tag': 'lizard', 'content_type': ctype.pk, 'object_id': quartz.pk, }] formset", "type=\"hidden\" name=\"generic_relations-taggeditem-content_type-object_id-1-id\" id=\"id_generic_relations-taggeditem-content_type-object_id-1-id\"></p>\"\"\" % tagged_item_id ) lion = Animal.objects.create(common_name='Lion', latin_name='Panthera", "TaggedItem fields = '__all__' widgets = {'tag': CustomWidget} class GenericInlineFormsetTests(TestCase):", "= '__all__' widgets = {'tag': CustomWidget} class GenericInlineFormsetTests(TestCase): def test_output(self):", "Tag:</label> <input id=\"id_generic_relations-taggeditem-content_type-object_id-0-tag\" type=\"text\" name=\"generic_relations-taggeditem-content_type-object_id-0-tag\" maxlength=\"50\"></p> <p><label for=\"id_generic_relations-taggeditem-content_type-object_id-0-DELETE\">Delete:</label> <input type=\"checkbox\"", "TaggedItemFormSet(instance=platypus) self.assertEqual(len(formset.forms), 5) self.assertHTMLEqual( formset.forms[0].as_p(), '<input type=\"hidden\" name=\"generic_relations-taggeditem-content_type-object_id-0-id\" value=\"%s\" '", "self.assertIsNone(formset.forms[2].instance.pk) # A queryset can be used to alter display", "quartz = Mineral.objects.create(name='Quartz', hardness=7) GenericFormSet = generic_inlineformset_factory(TaggedItem, extra=1) ctype =", "a widget defined in Meta.\"\"\" Formset = generic_inlineformset_factory(TaggedItem, TaggedItemForm) form", "queryset=TaggedItem.objects.filter(tag__startswith='harm')) self.assertEqual(len(formset.forms), 4) self.assertEqual(formset.forms[0].instance, harmless) self.assertIsNone(formset.forms[1].instance.pk) def test_get_queryset_ordering(self): \"\"\" BaseGenericInlineFormSet.get_queryset()", "new items that are associated with the object. \"\"\" lion", "test_save_new_for_concrete(self): Formset = generic_inlineformset_factory(ForProxyModelModel, fields='__all__', for_concrete_model=True) instance = ProxyRelatedModel.objects.create() data", "'<input type=\"hidden\" name=\"generic_relations-taggeditem-content_type-object_id-0-id\" value=\"%s\" ' 'id=\"id_generic_relations-taggeditem-content_type-object_id-0-id\">' % harmless.pk ) self.assertEqual(formset.forms[0].instance,", "for=\"id_generic_relations-taggeditem-content_type-object_id-0-DELETE\">Delete:</label> <input type=\"checkbox\" name=\"generic_relations-taggeditem-content_type-object_id-0-DELETE\" id=\"id_generic_relations-taggeditem-content_type-object_id-0-DELETE\"> <input type=\"hidden\" name=\"generic_relations-taggeditem-content_type-object_id-0-id\" value=\"%s\" id=\"id_generic_relations-taggeditem-content_type-object_id-0-id\"></p>", "harmless = platypus.tags.create(tag='harmless') mammal = platypus.tags.create(tag='mammal') # Works without a", "'3', 'form-INITIAL_FORMS': '2', 'form-MAX_NUM_FORMS': '', 'form-0-id': str(yellow.pk), 'form-0-tag': 'hunts', 'form-1-id':", "= formset.save()[0] self.assertEqual(new_obj.saved_by, 'custom method') def test_save_new_for_proxy(self): Formset = generic_inlineformset_factory(ForProxyModelModel,", "platypus = Animal.objects.create(common_name='Platypus', latin_name='Ornithorhynchus anatinus') harmless = platypus.tags.create(tag='harmless') mammal =", "not a ForeignKey to ContentType\" with self.assertRaisesMessage(Exception, msg): generic_inlineformset_factory(BadModel, TaggedItemForm)", "CustomWidget} class GenericInlineFormsetTests(TestCase): def test_output(self): GenericFormSet = generic_inlineformset_factory(TaggedItem, extra=1) formset", "self.assertEqual([tag.tag for tag in tags], ['hunts', 'roars']) hunts, roars =", "default ordering, if needed. \"\"\" inline_formset = generic_inlineformset_factory(TaggedItem, exclude=('tag',)) formset", "\"\"\" inline_formset = generic_inlineformset_factory(TaggedItem, exclude=('tag',)) formset = inline_formset(instance=Gecko.objects.create()) self.assertIs(formset.get_queryset().ordered, True)", "id=\"id_generic_relations-taggeditem-content_type-object_id-0-DELETE\"> <input type=\"hidden\" name=\"generic_relations-taggeditem-content_type-object_id-0-id\" id=\"id_generic_relations-taggeditem-content_type-object_id-0-id\"></p>\"\"\" ) formset = GenericFormSet(instance=Animal()) self.assertHTMLEqual(", "name=\"x-0-id\" id=\"id_x-0-id\"></p>\"\"\" ) def test_options(self): TaggedItemFormSet = generic_inlineformset_factory( TaggedItem, can_delete=False,", "platypus.tags.create(tag='shiny') GenericFormSet = generic_inlineformset_factory(TaggedItem, extra=1) formset = GenericFormSet(instance=platypus) tagged_item_id =", "django.contrib.contenttypes.forms import generic_inlineformset_factory from django.contrib.contenttypes.models import ContentType from django.db import", "= ContentType.objects.get_for_model(quartz) initial_data = [{ 'tag': 'lizard', 'content_type': ctype.pk, 'object_id':", "initial_data[0]) def test_meta_widgets(self): \"\"\"TaggedItemForm has a widget defined in Meta.\"\"\"", "class BadModel(models.Model): content_type = models.PositiveIntegerField() msg = \"fk_name 'generic_relations.BadModel.content_type' is", "# A queryset can be used to alter display ordering.", "= ProxyRelatedModel.objects.create() data = { 'form-TOTAL_FORMS': '1', 'form-INITIAL_FORMS': '0', 'form-MAX_NUM_FORMS':", "type=\"text\" name=\"generic_relations-taggeditem-content_type-object_id-0-tag\" maxlength=\"50\"></p> <p><label for=\"id_generic_relations-taggeditem-content_type-object_id-0-DELETE\">Delete:</label> <input type=\"checkbox\" name=\"generic_relations-taggeditem-content_type-object_id-0-DELETE\" id=\"id_generic_relations-taggeditem-content_type-object_id-0-DELETE\"> <input", "name=\"generic_relations-taggeditem-content_type-object_id-0-DELETE\" id=\"id_generic_relations-taggeditem-content_type-object_id-0-DELETE\"> <input type=\"hidden\" name=\"generic_relations-taggeditem-content_type-object_id-0-id\" id=\"id_generic_relations-taggeditem-content_type-object_id-0-id\"></p>\"\"\" ) formset = GenericFormSet(instance=Animal())", "} formset = GenericFormSet(data=data, prefix='form') self.assertEqual(formset.initial_form_count(), 3) formset = GenericFormSet(data=data,", "queryset=TaggedItem.objects.order_by('-tag')) self.assertEqual(len(formset.forms), 5) self.assertEqual(formset.forms[0].instance, mammal) self.assertEqual(formset.forms[1].instance, harmless) self.assertIsNone(formset.forms[2].instance.pk) # A", "msg): generic_inlineformset_factory(BadModel, TaggedItemForm) def test_save_new_uses_form_save(self): class SaveTestForm(forms.ModelForm): def save(self, *args,", "ordering. formset = TaggedItemFormSet(instance=platypus, queryset=TaggedItem.objects.order_by('-tag')) self.assertEqual(len(formset.forms), 5) self.assertEqual(formset.forms[0].instance, mammal) self.assertEqual(formset.forms[1].instance,", "hairy = lion.tags.create(tag='hairy') GenericFormSet = generic_inlineformset_factory(TaggedItem) data = { 'form-TOTAL_FORMS':", "'0', 'form-MAX_NUM_FORMS': '', 'form-0-title': 'foo', } formset = Formset(data, instance=instance,", "with the object. \"\"\" lion = Animal.objects.create(common_name='Lion', latin_name='Panthera leo') yellow", "name=\"generic_relations-taggeditem-content_type-object_id-0-tag\" value=\"shiny\" maxlength=\"50\"></p> <p><label for=\"id_generic_relations-taggeditem-content_type-object_id-0-DELETE\">Delete:</label> <input type=\"checkbox\" name=\"generic_relations-taggeditem-content_type-object_id-0-DELETE\" id=\"id_generic_relations-taggeditem-content_type-object_id-0-DELETE\"> <input", "name=\"generic_relations-taggeditem-content_type-object_id-0-tag\" maxlength=\"50\"></p> <p><label for=\"id_generic_relations-taggeditem-content_type-object_id-0-DELETE\">Delete:</label> <input type=\"checkbox\" name=\"generic_relations-taggeditem-content_type-object_id-0-DELETE\" id=\"id_generic_relations-taggeditem-content_type-object_id-0-DELETE\"> <input type=\"hidden\"", "'form-1-id': str(hairy.pk), 'form-1-tag': 'roars', } formset = GenericFormSet(data, instance=lion, prefix='form',", "name=\"x-0-DELETE\" id=\"id_x-0-DELETE\"> <input type=\"hidden\" name=\"x-0-id\" id=\"id_x-0-id\"></p>\"\"\" ) def test_options(self): TaggedItemFormSet", "'form-1-tag': 'roars', } formset = GenericFormSet(data, instance=lion, prefix='form', save_as_new=True) self.assertTrue(formset.is_valid())", "self.assertTrue(formset.is_valid()) new_obj, = formset.save() self.assertNotIsInstance(new_obj.obj, ProxyRelatedModel) def test_initial_count(self): GenericFormSet =", "id=\"id_x-0-DELETE\"> <input type=\"hidden\" name=\"x-0-id\" id=\"id_x-0-id\"></p>\"\"\" ) def test_options(self): TaggedItemFormSet =", "'foo', } formset = Formset(data, instance=instance, prefix='form') self.assertTrue(formset.is_valid()) new_obj =", "def test_output(self): GenericFormSet = generic_inlineformset_factory(TaggedItem, extra=1) formset = GenericFormSet() self.assertHTMLEqual(", "id=\"id_generic_relations-taggeditem-content_type-object_id-0-DELETE\"> <input type=\"hidden\" name=\"generic_relations-taggeditem-content_type-object_id-0-id\" value=\"%s\" id=\"id_generic_relations-taggeditem-content_type-object_id-0-id\"></p> <p><label for=\"id_generic_relations-taggeditem-content_type-object_id-1-tag\">Tag:</label> <input id=\"id_generic_relations-taggeditem-content_type-object_id-1-tag\"", "self.assertEqual(formset.forms[0].instance, harmless) self.assertEqual(formset.forms[1].instance, mammal) self.assertIsNone(formset.forms[2].instance.pk) # A queryset can be", "'3', 'form-INITIAL_FORMS': '3', 'form-MAX_NUM_FORMS': '', } formset = GenericFormSet(data=data, prefix='form')", "leo') formset = GenericFormSet(instance=lion, prefix='x') self.assertHTMLEqual( ''.join(form.as_p() for form in", "generic_inlineformset_factory(TaggedItem, exclude=('tag',)) formset = inline_formset(instance=Gecko.objects.create()) self.assertIs(formset.get_queryset().ordered, True) def test_initial(self): quartz", "'form-0-title': 'foo', } formset = Formset(data, instance=instance, prefix='form') self.assertTrue(formset.is_valid()) new_obj", "def test_initial_count(self): GenericFormSet = generic_inlineformset_factory(TaggedItem) data = { 'form-TOTAL_FORMS': '3',", "data = { 'form-TOTAL_FORMS': '3', 'form-INITIAL_FORMS': '3', 'form-MAX_NUM_FORMS': '', }", "formset = Formset(data, instance=instance, prefix='form') self.assertTrue(formset.is_valid()) new_obj, = formset.save() self.assertEqual(new_obj.obj,", "self.assertEqual(len(formset.forms), 5) self.assertEqual(formset.forms[0].instance, mammal) self.assertEqual(formset.forms[1].instance, harmless) self.assertIsNone(formset.forms[2].instance.pk) # A queryset", "self.assertEqual(formset.forms[1].instance, mammal) self.assertIsNone(formset.forms[2].instance.pk) # A queryset can be used to", "def test_save_new_uses_form_save(self): class SaveTestForm(forms.ModelForm): def save(self, *args, **kwargs): self.instance.saved_by =", "' 'id=\"id_generic_relations-taggeditem-content_type-object_id-0-id\">' % harmless.pk ) self.assertEqual(formset.forms[0].instance, harmless) self.assertEqual(formset.forms[1].instance, mammal) self.assertIsNone(formset.forms[2].instance.pk)", "= lion.tags.create(tag='hairy') GenericFormSet = generic_inlineformset_factory(TaggedItem) data = { 'form-TOTAL_FORMS': '3',", "that are associated with the object. \"\"\" lion = Animal.objects.create(common_name='Lion',", "type=\"text\" name=\"x-0-tag\" maxlength=\"50\"></p> <p><label for=\"id_x-0-DELETE\">Delete:</label> <input type=\"checkbox\" name=\"x-0-DELETE\" id=\"id_x-0-DELETE\"> <input", "} formset = Formset(data, instance=instance, prefix='form') self.assertTrue(formset.is_valid()) new_obj = formset.save()[0]", "ProxyRelatedModel.objects.create() data = { 'form-TOTAL_FORMS': '1', 'form-INITIAL_FORMS': '0', 'form-MAX_NUM_FORMS': '',", "save_as_new parameter creates new items that are associated with the", "= generic_inlineformset_factory(TaggedItem, TaggedItemForm) form = Formset().forms[0] self.assertIsInstance(form['tag'].field.widget, CustomWidget) @isolate_apps('generic_relations') def", "Formset = generic_inlineformset_factory(ForProxyModelModel, fields='__all__', for_concrete_model=True) instance = ProxyRelatedModel.objects.create() data =", "django import forms from django.contrib.contenttypes.forms import generic_inlineformset_factory from django.contrib.contenttypes.models import", "for=\"id_generic_relations-taggeditem-content_type-object_id-1-DELETE\">Delete:</label> <input type=\"checkbox\" name=\"generic_relations-taggeditem-content_type-object_id-1-DELETE\" id=\"id_generic_relations-taggeditem-content_type-object_id-1-DELETE\"> <input type=\"hidden\" name=\"generic_relations-taggeditem-content_type-object_id-1-id\" id=\"id_generic_relations-taggeditem-content_type-object_id-1-id\"></p>\"\"\" %", "test_incorrect_content_type(self): class BadModel(models.Model): content_type = models.PositiveIntegerField() msg = \"fk_name 'generic_relations.BadModel.content_type'", "test_save_as_new(self): \"\"\" The save_as_new parameter creates new items that are", "in formset.forms), \"\"\"<p><label for=\"id_generic_relations-taggeditem-content_type-object_id-0-tag\"> Tag:</label> <input id=\"id_generic_relations-taggeditem-content_type-object_id-0-tag\" type=\"text\" name=\"generic_relations-taggeditem-content_type-object_id-0-tag\" maxlength=\"50\"></p>", "name=\"generic_relations-taggeditem-content_type-object_id-0-tag\" maxlength=\"50\"></p> <p><label for=\"id_generic_relations-taggeditem-content_type-object_id-0-DELETE\">Delete:</label> <input type=\"checkbox\" name=\"generic_relations-taggeditem-content_type-object_id-0-DELETE\" id=\"id_generic_relations-taggeditem-content_type-object_id-0-DELETE\"><input type=\"hidden\" name=\"generic_relations-taggeditem-content_type-object_id-0-id\"", "maxlength=\"50\"></p> <p><label for=\"id_generic_relations-taggeditem-content_type-object_id-1-DELETE\">Delete:</label> <input type=\"checkbox\" name=\"generic_relations-taggeditem-content_type-object_id-1-DELETE\" id=\"id_generic_relations-taggeditem-content_type-object_id-1-DELETE\"> <input type=\"hidden\" name=\"generic_relations-taggeditem-content_type-object_id-1-id\"", "has a widget defined in Meta.\"\"\" Formset = generic_inlineformset_factory(TaggedItem, TaggedItemForm)", "GenericFormSet() self.assertHTMLEqual( ''.join(form.as_p() for form in formset.forms), \"\"\"<p><label for=\"id_generic_relations-taggeditem-content_type-object_id-0-tag\"> Tag:</label>", "<input id=\"id_generic_relations-taggeditem-content_type-object_id-0-tag\" type=\"text\" name=\"generic_relations-taggeditem-content_type-object_id-0-tag\" maxlength=\"50\"></p> <p><label for=\"id_generic_relations-taggeditem-content_type-object_id-0-DELETE\">Delete:</label> <input type=\"checkbox\" name=\"generic_relations-taggeditem-content_type-object_id-0-DELETE\"", "import models from django.test import TestCase from django.test.utils import isolate_apps", "Animal.objects.create(common_name='Lion', latin_name='Panthera leo') yellow = lion.tags.create(tag='yellow') hairy = lion.tags.create(tag='hairy') GenericFormSet", "( Animal, ForProxyModelModel, Gecko, Mineral, ProxyRelatedModel, TaggedItem, ) class CustomWidget(forms.TextInput):", "maxlength=\"50\"></p> <p><label for=\"id_x-0-DELETE\">Delete:</label> <input type=\"checkbox\" name=\"x-0-DELETE\" id=\"id_x-0-DELETE\"> <input type=\"hidden\" name=\"x-0-id\"", "widgets = {'tag': CustomWidget} class GenericInlineFormsetTests(TestCase): def test_output(self): GenericFormSet =", "id=\"id_generic_relations-taggeditem-content_type-object_id-0-tag\" type=\"text\" name=\"generic_relations-taggeditem-content_type-object_id-0-tag\" value=\"shiny\" maxlength=\"50\"></p> <p><label for=\"id_generic_relations-taggeditem-content_type-object_id-0-DELETE\">Delete:</label> <input type=\"checkbox\" name=\"generic_relations-taggeditem-content_type-object_id-0-DELETE\"", "# A queryset that omits items. formset = TaggedItemFormSet(instance=platypus, queryset=TaggedItem.objects.filter(tag__startswith='harm'))", "= models.PositiveIntegerField() msg = \"fk_name 'generic_relations.BadModel.content_type' is not a ForeignKey", "save(self, *args, **kwargs): self.instance.saved_by = 'custom method' return super().save(*args, **kwargs)", "save_as_new=True) self.assertTrue(formset.is_valid()) tags = formset.save() self.assertEqual([tag.tag for tag in tags],", "= {'tag': CustomWidget} class GenericInlineFormsetTests(TestCase): def test_output(self): GenericFormSet = generic_inlineformset_factory(TaggedItem,", "<input type=\"checkbox\" name=\"generic_relations-taggeditem-content_type-object_id-0-DELETE\" id=\"id_generic_relations-taggeditem-content_type-object_id-0-DELETE\"> <input type=\"hidden\" name=\"generic_relations-taggeditem-content_type-object_id-0-id\" id=\"id_generic_relations-taggeditem-content_type-object_id-0-id\"></p>\"\"\" ) formset", "= GenericFormSet(instance=lion, prefix='x') self.assertHTMLEqual( ''.join(form.as_p() for form in formset.forms), \"\"\"<p><label", "tagged_item_id ) lion = Animal.objects.create(common_name='Lion', latin_name='Panthera leo') formset = GenericFormSet(instance=lion,", "def test_save_new_for_proxy(self): Formset = generic_inlineformset_factory(ForProxyModelModel, fields='__all__', for_concrete_model=False) instance = ProxyRelatedModel.objects.create()", "for=\"id_generic_relations-taggeditem-content_type-object_id-0-tag\"> Tag:</label> <input id=\"id_generic_relations-taggeditem-content_type-object_id-0-tag\" type=\"text\" name=\"generic_relations-taggeditem-content_type-object_id-0-tag\" maxlength=\"50\"></p> <p><label for=\"id_generic_relations-taggeditem-content_type-object_id-0-DELETE\">Delete:</label> <input", "lion = Animal.objects.create(common_name='Lion', latin_name='Panthera leo') formset = GenericFormSet(instance=lion, prefix='x') self.assertHTMLEqual(", "'', 'form-0-title': 'foo', } formset = Formset(data, instance=instance, prefix='form') self.assertTrue(formset.is_valid())", "= lion.tags.create(tag='yellow') hairy = lion.tags.create(tag='hairy') GenericFormSet = generic_inlineformset_factory(TaggedItem) data =", "'id=\"id_generic_relations-taggeditem-content_type-object_id-0-id\">' % harmless.pk ) self.assertEqual(formset.forms[0].instance, harmless) self.assertEqual(formset.forms[1].instance, mammal) self.assertIsNone(formset.forms[2].instance.pk) #", "fields='__all__', for_concrete_model=False) instance = ProxyRelatedModel.objects.create() data = { 'form-TOTAL_FORMS': '1',", "= TaggedItem fields = '__all__' widgets = {'tag': CustomWidget} class", ") platypus = Animal.objects.create( common_name='Platypus', latin_name='Ornithorhynchus anatinus', ) platypus.tags.create(tag='shiny') GenericFormSet", "''.join(form.as_p() for form in formset.forms), \"\"\"<p><label for=\"id_generic_relations-taggeditem-content_type-object_id-0-tag\"> Tag:</label> <input id=\"id_generic_relations-taggeditem-content_type-object_id-0-tag\"", "def test_save_as_new(self): \"\"\" The save_as_new parameter creates new items that", "django.db import models from django.test import TestCase from django.test.utils import", "queryset can be used to alter display ordering. formset =", "for form in formset.forms), \"\"\"<p><label for=\"id_generic_relations-taggeditem-content_type-object_id-0-tag\"> Tag:</label> <input id=\"id_generic_relations-taggeditem-content_type-object_id-0-tag\" type=\"text\"", "type=\"checkbox\" name=\"generic_relations-taggeditem-content_type-object_id-0-DELETE\" id=\"id_generic_relations-taggeditem-content_type-object_id-0-DELETE\"> <input type=\"hidden\" name=\"generic_relations-taggeditem-content_type-object_id-0-id\" id=\"id_generic_relations-taggeditem-content_type-object_id-0-id\"></p>\"\"\" ) formset =", "['hunts', 'roars']) hunts, roars = tags self.assertSequenceEqual(lion.tags.order_by('tag'), [hairy, hunts, roars,", "<input type=\"hidden\" name=\"generic_relations-taggeditem-content_type-object_id-0-id\" value=\"%s\" id=\"id_generic_relations-taggeditem-content_type-object_id-0-id\"></p> <p><label for=\"id_generic_relations-taggeditem-content_type-object_id-1-tag\">Tag:</label> <input id=\"id_generic_relations-taggeditem-content_type-object_id-1-tag\" type=\"text\"", "pass class TaggedItemForm(forms.ModelForm): class Meta: model = TaggedItem fields =", "'form-0-id': str(yellow.pk), 'form-0-tag': 'hunts', 'form-1-id': str(hairy.pk), 'form-1-tag': 'roars', } formset", "GenericFormSet(instance=Animal()) self.assertHTMLEqual( ''.join(form.as_p() for form in formset.forms), \"\"\"<p><label for=\"id_generic_relations-taggeditem-content_type-object_id-0-tag\"> Tag:</label>", "= Animal.objects.create( common_name='Platypus', latin_name='Ornithorhynchus anatinus', ) platypus.tags.create(tag='shiny') GenericFormSet = generic_inlineformset_factory(TaggedItem,", "<p><label for=\"id_generic_relations-taggeditem-content_type-object_id-0-DELETE\">Delete:</label> <input type=\"checkbox\" name=\"generic_relations-taggeditem-content_type-object_id-0-DELETE\" id=\"id_generic_relations-taggeditem-content_type-object_id-0-DELETE\"> <input type=\"hidden\" name=\"generic_relations-taggeditem-content_type-object_id-0-id\" id=\"id_generic_relations-taggeditem-content_type-object_id-0-id\"></p>\"\"\"", ") self.assertEqual(formset.forms[0].instance, harmless) self.assertEqual(formset.forms[1].instance, mammal) self.assertIsNone(formset.forms[2].instance.pk) # A queryset can", "in tags], ['hunts', 'roars']) hunts, roars = tags self.assertSequenceEqual(lion.tags.order_by('tag'), [hairy,", "from django.test import TestCase from django.test.utils import isolate_apps from .models", "self.assertTrue(formset.is_valid()) new_obj = formset.save()[0] self.assertEqual(new_obj.saved_by, 'custom method') def test_save_new_for_proxy(self): Formset", "generic_inlineformset_factory(ForProxyModelModel, fields='__all__', form=SaveTestForm) instance = ProxyRelatedModel.objects.create() data = { 'form-TOTAL_FORMS':", "self.assertEqual(new_obj.obj, instance) def test_save_new_for_concrete(self): Formset = generic_inlineformset_factory(ForProxyModelModel, fields='__all__', for_concrete_model=True) instance", "instance=instance, prefix='form') self.assertTrue(formset.is_valid()) new_obj, = formset.save() self.assertEqual(new_obj.obj, instance) def test_save_new_for_concrete(self):", "def test_options(self): TaggedItemFormSet = generic_inlineformset_factory( TaggedItem, can_delete=False, exclude=['tag'], extra=3, )", "tags = formset.save() self.assertEqual([tag.tag for tag in tags], ['hunts', 'roars'])", "'', } formset = GenericFormSet(data=data, prefix='form') self.assertEqual(formset.initial_form_count(), 3) formset =", ") formset = GenericFormSet(instance=Animal()) self.assertHTMLEqual( ''.join(form.as_p() for form in formset.forms),", "self.assertHTMLEqual( ''.join(form.as_p() for form in formset.forms), \"\"\"<p><label for=\"id_generic_relations-taggeditem-content_type-object_id-0-tag\">Tag:</label> <input id=\"id_generic_relations-taggeditem-content_type-object_id-0-tag\"", "queryset. formset = TaggedItemFormSet(instance=platypus) self.assertEqual(len(formset.forms), 5) self.assertHTMLEqual( formset.forms[0].as_p(), '<input type=\"hidden\"", "the object. \"\"\" lion = Animal.objects.create(common_name='Lion', latin_name='Panthera leo') yellow =", "= Formset(data, instance=instance, prefix='form') self.assertTrue(formset.is_valid()) new_obj, = formset.save() self.assertEqual(new_obj.obj, instance)", "formset.forms), \"\"\"<p><label for=\"id_x-0-tag\">Tag:</label> <input id=\"id_x-0-tag\" type=\"text\" name=\"x-0-tag\" maxlength=\"50\"></p> <p><label for=\"id_x-0-DELETE\">Delete:</label>", "True) def test_initial(self): quartz = Mineral.objects.create(name='Quartz', hardness=7) GenericFormSet = generic_inlineformset_factory(TaggedItem,", "id=\"id_generic_relations-taggeditem-content_type-object_id-0-id\"></p> <p><label for=\"id_generic_relations-taggeditem-content_type-object_id-1-tag\">Tag:</label> <input id=\"id_generic_relations-taggeditem-content_type-object_id-1-tag\" type=\"text\" name=\"generic_relations-taggeditem-content_type-object_id-1-tag\" maxlength=\"50\"></p> <p><label for=\"id_generic_relations-taggeditem-content_type-object_id-1-DELETE\">Delete:</label>", "GenericFormSet(initial=initial_data) self.assertEqual(formset.forms[0].initial, initial_data[0]) def test_meta_widgets(self): \"\"\"TaggedItemForm has a widget defined", "import ContentType from django.db import models from django.test import TestCase", "formset = Formset(data, instance=instance, prefix='form') self.assertTrue(formset.is_valid()) new_obj, = formset.save() self.assertNotIsInstance(new_obj.obj,", "TaggedItemForm(forms.ModelForm): class Meta: model = TaggedItem fields = '__all__' widgets", "form = Formset().forms[0] self.assertIsInstance(form['tag'].field.widget, CustomWidget) @isolate_apps('generic_relations') def test_incorrect_content_type(self): class BadModel(models.Model):", "form in formset.forms), \"\"\"<p><label for=\"id_generic_relations-taggeditem-content_type-object_id-0-tag\"> Tag:</label> <input id=\"id_generic_relations-taggeditem-content_type-object_id-0-tag\" type=\"text\" name=\"generic_relations-taggeditem-content_type-object_id-0-tag\"", "<input type=\"checkbox\" name=\"x-0-DELETE\" id=\"id_x-0-DELETE\"> <input type=\"hidden\" name=\"x-0-id\" id=\"id_x-0-id\"></p>\"\"\" ) def", "\"fk_name 'generic_relations.BadModel.content_type' is not a ForeignKey to ContentType\" with self.assertRaisesMessage(Exception,", "= \"fk_name 'generic_relations.BadModel.content_type' is not a ForeignKey to ContentType\" with", "'roars']) hunts, roars = tags self.assertSequenceEqual(lion.tags.order_by('tag'), [hairy, hunts, roars, yellow])", "self.assertEqual(formset.initial_form_count(), 3) formset = GenericFormSet(data=data, prefix='form', save_as_new=True) self.assertEqual(formset.initial_form_count(), 0) def", "new_obj = formset.save()[0] self.assertEqual(new_obj.saved_by, 'custom method') def test_save_new_for_proxy(self): Formset =", "= generic_inlineformset_factory(TaggedItem, extra=1) ctype = ContentType.objects.get_for_model(quartz) initial_data = [{ 'tag':", "GenericFormSet(instance=lion, prefix='x') self.assertHTMLEqual( ''.join(form.as_p() for form in formset.forms), \"\"\"<p><label for=\"id_x-0-tag\">Tag:</label>", "Meta.\"\"\" Formset = generic_inlineformset_factory(TaggedItem, TaggedItemForm) form = Formset().forms[0] self.assertIsInstance(form['tag'].field.widget, CustomWidget)", "a ForeignKey to ContentType\" with self.assertRaisesMessage(Exception, msg): generic_inlineformset_factory(BadModel, TaggedItemForm) def", "if needed. \"\"\" inline_formset = generic_inlineformset_factory(TaggedItem, exclude=('tag',)) formset = inline_formset(instance=Gecko.objects.create())", "'form-INITIAL_FORMS': '2', 'form-MAX_NUM_FORMS': '', 'form-0-id': str(yellow.pk), 'form-0-tag': 'hunts', 'form-1-id': str(hairy.pk),", "4) self.assertEqual(formset.forms[0].instance, harmless) self.assertIsNone(formset.forms[1].instance.pk) def test_get_queryset_ordering(self): \"\"\" BaseGenericInlineFormSet.get_queryset() adds default", "return super().save(*args, **kwargs) Formset = generic_inlineformset_factory(ForProxyModelModel, fields='__all__', form=SaveTestForm) instance =", "test_output(self): GenericFormSet = generic_inlineformset_factory(TaggedItem, extra=1) formset = GenericFormSet() self.assertHTMLEqual( ''.join(form.as_p()", "class SaveTestForm(forms.ModelForm): def save(self, *args, **kwargs): self.instance.saved_by = 'custom method'", "name=\"generic_relations-taggeditem-content_type-object_id-1-tag\" maxlength=\"50\"></p> <p><label for=\"id_generic_relations-taggeditem-content_type-object_id-1-DELETE\">Delete:</label> <input type=\"checkbox\" name=\"generic_relations-taggeditem-content_type-object_id-1-DELETE\" id=\"id_generic_relations-taggeditem-content_type-object_id-1-DELETE\"> <input type=\"hidden\"", "% tagged_item_id ) lion = Animal.objects.create(common_name='Lion', latin_name='Panthera leo') formset =", "Formset(data, instance=instance, prefix='form') self.assertTrue(formset.is_valid()) new_obj = formset.save()[0] self.assertEqual(new_obj.saved_by, 'custom method')", "TaggedItemFormSet(instance=platypus, queryset=TaggedItem.objects.order_by('-tag')) self.assertEqual(len(formset.forms), 5) self.assertEqual(formset.forms[0].instance, mammal) self.assertEqual(formset.forms[1].instance, harmless) self.assertIsNone(formset.forms[2].instance.pk) #", "form=SaveTestForm) instance = ProxyRelatedModel.objects.create() data = { 'form-TOTAL_FORMS': '1', 'form-INITIAL_FORMS':", "content_type = models.PositiveIntegerField() msg = \"fk_name 'generic_relations.BadModel.content_type' is not a", "\"\"\" The save_as_new parameter creates new items that are associated", "prefix='x') self.assertHTMLEqual( ''.join(form.as_p() for form in formset.forms), \"\"\"<p><label for=\"id_x-0-tag\">Tag:</label> <input", "test_initial(self): quartz = Mineral.objects.create(name='Quartz', hardness=7) GenericFormSet = generic_inlineformset_factory(TaggedItem, extra=1) ctype", "self.assertEqual(new_obj.saved_by, 'custom method') def test_save_new_for_proxy(self): Formset = generic_inlineformset_factory(ForProxyModelModel, fields='__all__', for_concrete_model=False)", "mammal) self.assertEqual(formset.forms[1].instance, harmless) self.assertIsNone(formset.forms[2].instance.pk) # A queryset that omits items.", "tagged_item_id = TaggedItem.objects.get(tag='shiny', object_id=platypus.id).id self.assertHTMLEqual( ''.join(form.as_p() for form in formset.forms),", "''.join(form.as_p() for form in formset.forms), \"\"\"<p><label for=\"id_generic_relations-taggeditem-content_type-object_id-0-tag\">Tag:</label> <input id=\"id_generic_relations-taggeditem-content_type-object_id-0-tag\" type=\"text\"", "items that are associated with the object. \"\"\" lion =", "lion.tags.create(tag='yellow') hairy = lion.tags.create(tag='hairy') GenericFormSet = generic_inlineformset_factory(TaggedItem) data = {", "<input type=\"checkbox\" name=\"generic_relations-taggeditem-content_type-object_id-0-DELETE\" id=\"id_generic_relations-taggeditem-content_type-object_id-0-DELETE\"> <input type=\"hidden\" name=\"generic_relations-taggeditem-content_type-object_id-0-id\" value=\"%s\" id=\"id_generic_relations-taggeditem-content_type-object_id-0-id\"></p> <p><label", "name=\"generic_relations-taggeditem-content_type-object_id-0-id\" value=\"%s\" ' 'id=\"id_generic_relations-taggeditem-content_type-object_id-0-id\">' % harmless.pk ) self.assertEqual(formset.forms[0].instance, harmless) self.assertEqual(formset.forms[1].instance,", "TaggedItemFormSet(instance=platypus, queryset=TaggedItem.objects.filter(tag__startswith='harm')) self.assertEqual(len(formset.forms), 4) self.assertEqual(formset.forms[0].instance, harmless) self.assertIsNone(formset.forms[1].instance.pk) def test_get_queryset_ordering(self): \"\"\"", "value=\"shiny\" maxlength=\"50\"></p> <p><label for=\"id_generic_relations-taggeditem-content_type-object_id-0-DELETE\">Delete:</label> <input type=\"checkbox\" name=\"generic_relations-taggeditem-content_type-object_id-0-DELETE\" id=\"id_generic_relations-taggeditem-content_type-object_id-0-DELETE\"> <input type=\"hidden\"", "harmless.pk ) self.assertEqual(formset.forms[0].instance, harmless) self.assertEqual(formset.forms[1].instance, mammal) self.assertIsNone(formset.forms[2].instance.pk) # A queryset", "yellow = lion.tags.create(tag='yellow') hairy = lion.tags.create(tag='hairy') GenericFormSet = generic_inlineformset_factory(TaggedItem) data", "formset.save() self.assertNotIsInstance(new_obj.obj, ProxyRelatedModel) def test_initial_count(self): GenericFormSet = generic_inlineformset_factory(TaggedItem) data =", "platypus = Animal.objects.create( common_name='Platypus', latin_name='Ornithorhynchus anatinus', ) platypus.tags.create(tag='shiny') GenericFormSet =", "= TaggedItemFormSet(instance=platypus, queryset=TaggedItem.objects.filter(tag__startswith='harm')) self.assertEqual(len(formset.forms), 4) self.assertEqual(formset.forms[0].instance, harmless) self.assertIsNone(formset.forms[1].instance.pk) def test_get_queryset_ordering(self):", "display ordering. formset = TaggedItemFormSet(instance=platypus, queryset=TaggedItem.objects.order_by('-tag')) self.assertEqual(len(formset.forms), 5) self.assertEqual(formset.forms[0].instance, mammal)", ") platypus.tags.create(tag='shiny') GenericFormSet = generic_inlineformset_factory(TaggedItem, extra=1) formset = GenericFormSet(instance=platypus) tagged_item_id", "common_name='Platypus', latin_name='Ornithorhynchus anatinus', ) platypus.tags.create(tag='shiny') GenericFormSet = generic_inlineformset_factory(TaggedItem, extra=1) formset", "\"\"\"TaggedItemForm has a widget defined in Meta.\"\"\" Formset = generic_inlineformset_factory(TaggedItem,", "0) def test_save_as_new(self): \"\"\" The save_as_new parameter creates new items", "'tag': 'lizard', 'content_type': ctype.pk, 'object_id': quartz.pk, }] formset = GenericFormSet(initial=initial_data)", "<p><label for=\"id_x-0-DELETE\">Delete:</label> <input type=\"checkbox\" name=\"x-0-DELETE\" id=\"id_x-0-DELETE\"> <input type=\"hidden\" name=\"x-0-id\" id=\"id_x-0-id\"></p>\"\"\"", "generic_inlineformset_factory(ForProxyModelModel, fields='__all__', for_concrete_model=True) instance = ProxyRelatedModel.objects.create() data = { 'form-TOTAL_FORMS':", "'foo', } formset = Formset(data, instance=instance, prefix='form') self.assertTrue(formset.is_valid()) new_obj, =", "@isolate_apps('generic_relations') def test_incorrect_content_type(self): class BadModel(models.Model): content_type = models.PositiveIntegerField() msg =", "A queryset that omits items. formset = TaggedItemFormSet(instance=platypus, queryset=TaggedItem.objects.filter(tag__startswith='harm')) self.assertEqual(len(formset.forms),", "\"\"\"<p><label for=\"id_generic_relations-taggeditem-content_type-object_id-0-tag\"> Tag:</label> <input id=\"id_generic_relations-taggeditem-content_type-object_id-0-tag\" type=\"text\" name=\"generic_relations-taggeditem-content_type-object_id-0-tag\" maxlength=\"50\"></p> <p><label for=\"id_generic_relations-taggeditem-content_type-object_id-0-DELETE\">Delete:</label>", "django.test import TestCase from django.test.utils import isolate_apps from .models import", "'generic_relations.BadModel.content_type' is not a ForeignKey to ContentType\" with self.assertRaisesMessage(Exception, msg):", "name=\"generic_relations-taggeditem-content_type-object_id-1-id\" id=\"id_generic_relations-taggeditem-content_type-object_id-1-id\"></p>\"\"\" % tagged_item_id ) lion = Animal.objects.create(common_name='Lion', latin_name='Panthera leo')", "queryset that omits items. formset = TaggedItemFormSet(instance=platypus, queryset=TaggedItem.objects.filter(tag__startswith='harm')) self.assertEqual(len(formset.forms), 4)", "<p><label for=\"id_generic_relations-taggeditem-content_type-object_id-0-DELETE\">Delete:</label> <input type=\"checkbox\" name=\"generic_relations-taggeditem-content_type-object_id-0-DELETE\" id=\"id_generic_relations-taggeditem-content_type-object_id-0-DELETE\"> <input type=\"hidden\" name=\"generic_relations-taggeditem-content_type-object_id-0-id\" value=\"%s\"", "instance=instance, prefix='form') self.assertTrue(formset.is_valid()) new_obj = formset.save()[0] self.assertEqual(new_obj.saved_by, 'custom method') def", "tags], ['hunts', 'roars']) hunts, roars = tags self.assertSequenceEqual(lion.tags.order_by('tag'), [hairy, hunts,", "new_obj, = formset.save() self.assertEqual(new_obj.obj, instance) def test_save_new_for_concrete(self): Formset = generic_inlineformset_factory(ForProxyModelModel,", "TestCase from django.test.utils import isolate_apps from .models import ( Animal,", "<input id=\"id_generic_relations-taggeditem-content_type-object_id-1-tag\" type=\"text\" name=\"generic_relations-taggeditem-content_type-object_id-1-tag\" maxlength=\"50\"></p> <p><label for=\"id_generic_relations-taggeditem-content_type-object_id-1-DELETE\">Delete:</label> <input type=\"checkbox\" name=\"generic_relations-taggeditem-content_type-object_id-1-DELETE\"", "= { 'form-TOTAL_FORMS': '1', 'form-INITIAL_FORMS': '0', 'form-MAX_NUM_FORMS': '', 'form-0-title': 'foo',", "'1', 'form-INITIAL_FORMS': '0', 'form-MAX_NUM_FORMS': '', 'form-0-title': 'foo', } formset =", "= Animal.objects.create(common_name='Lion', latin_name='Panthera leo') formset = GenericFormSet(instance=lion, prefix='x') self.assertHTMLEqual( ''.join(form.as_p()", "\"\"\" BaseGenericInlineFormSet.get_queryset() adds default ordering, if needed. \"\"\" inline_formset =", "with self.assertRaisesMessage(Exception, msg): generic_inlineformset_factory(BadModel, TaggedItemForm) def test_save_new_uses_form_save(self): class SaveTestForm(forms.ModelForm): def", "prefix='form') self.assertEqual(formset.initial_form_count(), 3) formset = GenericFormSet(data=data, prefix='form', save_as_new=True) self.assertEqual(formset.initial_form_count(), 0)", "self.assertIs(formset.get_queryset().ordered, True) def test_initial(self): quartz = Mineral.objects.create(name='Quartz', hardness=7) GenericFormSet =", "extra=3, ) platypus = Animal.objects.create(common_name='Platypus', latin_name='Ornithorhynchus anatinus') harmless = platypus.tags.create(tag='harmless')", "Meta: model = TaggedItem fields = '__all__' widgets = {'tag':", "''.join(form.as_p() for form in formset.forms), \"\"\"<p><label for=\"id_x-0-tag\">Tag:</label> <input id=\"id_x-0-tag\" type=\"text\"", "generic_inlineformset_factory from django.contrib.contenttypes.models import ContentType from django.db import models from", "= formset.save() self.assertNotIsInstance(new_obj.obj, ProxyRelatedModel) def test_initial_count(self): GenericFormSet = generic_inlineformset_factory(TaggedItem) data", "'form-INITIAL_FORMS': '3', 'form-MAX_NUM_FORMS': '', } formset = GenericFormSet(data=data, prefix='form') self.assertEqual(formset.initial_form_count(),", "value=\"%s\" ' 'id=\"id_generic_relations-taggeditem-content_type-object_id-0-id\">' % harmless.pk ) self.assertEqual(formset.forms[0].instance, harmless) self.assertEqual(formset.forms[1].instance, mammal)", "**kwargs) Formset = generic_inlineformset_factory(ForProxyModelModel, fields='__all__', form=SaveTestForm) instance = ProxyRelatedModel.objects.create() data", "harmless) self.assertIsNone(formset.forms[2].instance.pk) # A queryset that omits items. formset =", "def test_get_queryset_ordering(self): \"\"\" BaseGenericInlineFormSet.get_queryset() adds default ordering, if needed. \"\"\"", "id=\"id_generic_relations-taggeditem-content_type-object_id-0-id\"></p>\"\"\" ) platypus = Animal.objects.create( common_name='Platypus', latin_name='Ornithorhynchus anatinus', ) platypus.tags.create(tag='shiny')", "formset = GenericFormSet(data=data, prefix='form') self.assertEqual(formset.initial_form_count(), 3) formset = GenericFormSet(data=data, prefix='form',", "needed. \"\"\" inline_formset = generic_inlineformset_factory(TaggedItem, exclude=('tag',)) formset = inline_formset(instance=Gecko.objects.create()) self.assertIs(formset.get_queryset().ordered,", "CustomWidget(forms.TextInput): pass class TaggedItemForm(forms.ModelForm): class Meta: model = TaggedItem fields", "self.assertEqual(formset.forms[0].initial, initial_data[0]) def test_meta_widgets(self): \"\"\"TaggedItemForm has a widget defined in", "self.assertEqual(formset.initial_form_count(), 0) def test_save_as_new(self): \"\"\" The save_as_new parameter creates new", "instance) def test_save_new_for_concrete(self): Formset = generic_inlineformset_factory(ForProxyModelModel, fields='__all__', for_concrete_model=True) instance =", "formset.forms[0].as_p(), '<input type=\"hidden\" name=\"generic_relations-taggeditem-content_type-object_id-0-id\" value=\"%s\" ' 'id=\"id_generic_relations-taggeditem-content_type-object_id-0-id\">' % harmless.pk )", "without a queryset. formset = TaggedItemFormSet(instance=platypus) self.assertEqual(len(formset.forms), 5) self.assertHTMLEqual( formset.forms[0].as_p(),", "self.assertEqual(formset.forms[0].instance, harmless) self.assertIsNone(formset.forms[1].instance.pk) def test_get_queryset_ordering(self): \"\"\" BaseGenericInlineFormSet.get_queryset() adds default ordering,", "= inline_formset(instance=Gecko.objects.create()) self.assertIs(formset.get_queryset().ordered, True) def test_initial(self): quartz = Mineral.objects.create(name='Quartz', hardness=7)", "generic_inlineformset_factory(ForProxyModelModel, fields='__all__', for_concrete_model=False) instance = ProxyRelatedModel.objects.create() data = { 'form-TOTAL_FORMS':", "= GenericFormSet(instance=Animal()) self.assertHTMLEqual( ''.join(form.as_p() for form in formset.forms), \"\"\"<p><label for=\"id_generic_relations-taggeditem-content_type-object_id-0-tag\">", "hardness=7) GenericFormSet = generic_inlineformset_factory(TaggedItem, extra=1) ctype = ContentType.objects.get_for_model(quartz) initial_data =", "= generic_inlineformset_factory(ForProxyModelModel, fields='__all__', for_concrete_model=True) instance = ProxyRelatedModel.objects.create() data = {", "def test_save_new_for_concrete(self): Formset = generic_inlineformset_factory(ForProxyModelModel, fields='__all__', for_concrete_model=True) instance = ProxyRelatedModel.objects.create()", "generic_inlineformset_factory(TaggedItem, extra=1) formset = GenericFormSet() self.assertHTMLEqual( ''.join(form.as_p() for form in", "extra=1) ctype = ContentType.objects.get_for_model(quartz) initial_data = [{ 'tag': 'lizard', 'content_type':", "anatinus') harmless = platypus.tags.create(tag='harmless') mammal = platypus.tags.create(tag='mammal') # Works without", "} formset = Formset(data, instance=instance, prefix='form') self.assertTrue(formset.is_valid()) new_obj, = formset.save()", "Formset(data, instance=instance, prefix='form') self.assertTrue(formset.is_valid()) new_obj, = formset.save() self.assertNotIsInstance(new_obj.obj, ProxyRelatedModel) def", "ProxyRelatedModel, TaggedItem, ) class CustomWidget(forms.TextInput): pass class TaggedItemForm(forms.ModelForm): class Meta:", "GenericInlineFormsetTests(TestCase): def test_output(self): GenericFormSet = generic_inlineformset_factory(TaggedItem, extra=1) formset = GenericFormSet()", "method') def test_save_new_for_proxy(self): Formset = generic_inlineformset_factory(ForProxyModelModel, fields='__all__', for_concrete_model=False) instance =", "instance=instance, prefix='form') self.assertTrue(formset.is_valid()) new_obj, = formset.save() self.assertNotIsInstance(new_obj.obj, ProxyRelatedModel) def test_initial_count(self):", "Mineral.objects.create(name='Quartz', hardness=7) GenericFormSet = generic_inlineformset_factory(TaggedItem, extra=1) ctype = ContentType.objects.get_for_model(quartz) initial_data", "in formset.forms), \"\"\"<p><label for=\"id_generic_relations-taggeditem-content_type-object_id-0-tag\">Tag:</label> <input id=\"id_generic_relations-taggeditem-content_type-object_id-0-tag\" type=\"text\" name=\"generic_relations-taggeditem-content_type-object_id-0-tag\" value=\"shiny\" maxlength=\"50\"></p>", "}] formset = GenericFormSet(initial=initial_data) self.assertEqual(formset.forms[0].initial, initial_data[0]) def test_meta_widgets(self): \"\"\"TaggedItemForm has", "type=\"text\" name=\"generic_relations-taggeditem-content_type-object_id-0-tag\" maxlength=\"50\"></p> <p><label for=\"id_generic_relations-taggeditem-content_type-object_id-0-DELETE\">Delete:</label> <input type=\"checkbox\" name=\"generic_relations-taggeditem-content_type-object_id-0-DELETE\" id=\"id_generic_relations-taggeditem-content_type-object_id-0-DELETE\"><input type=\"hidden\"", "latin_name='Panthera leo') formset = GenericFormSet(instance=lion, prefix='x') self.assertHTMLEqual( ''.join(form.as_p() for form", "TaggedItemForm) form = Formset().forms[0] self.assertIsInstance(form['tag'].field.widget, CustomWidget) @isolate_apps('generic_relations') def test_incorrect_content_type(self): class", "<p><label for=\"id_generic_relations-taggeditem-content_type-object_id-1-DELETE\">Delete:</label> <input type=\"checkbox\" name=\"generic_relations-taggeditem-content_type-object_id-1-DELETE\" id=\"id_generic_relations-taggeditem-content_type-object_id-1-DELETE\"> <input type=\"hidden\" name=\"generic_relations-taggeditem-content_type-object_id-1-id\" id=\"id_generic_relations-taggeditem-content_type-object_id-1-id\"></p>\"\"\"", "fields='__all__', form=SaveTestForm) instance = ProxyRelatedModel.objects.create() data = { 'form-TOTAL_FORMS': '1',", "GenericFormSet(data=data, prefix='form') self.assertEqual(formset.initial_form_count(), 3) formset = GenericFormSet(data=data, prefix='form', save_as_new=True) self.assertEqual(formset.initial_form_count(),", "can be used to alter display ordering. formset = TaggedItemFormSet(instance=platypus,", "= TaggedItem.objects.get(tag='shiny', object_id=platypus.id).id self.assertHTMLEqual( ''.join(form.as_p() for form in formset.forms), \"\"\"<p><label", "prefix='form') self.assertTrue(formset.is_valid()) new_obj, = formset.save() self.assertNotIsInstance(new_obj.obj, ProxyRelatedModel) def test_initial_count(self): GenericFormSet", "self.assertHTMLEqual( formset.forms[0].as_p(), '<input type=\"hidden\" name=\"generic_relations-taggeditem-content_type-object_id-0-id\" value=\"%s\" ' 'id=\"id_generic_relations-taggeditem-content_type-object_id-0-id\">' % harmless.pk", "tag in tags], ['hunts', 'roars']) hunts, roars = tags self.assertSequenceEqual(lion.tags.order_by('tag'),", "[{ 'tag': 'lizard', 'content_type': ctype.pk, 'object_id': quartz.pk, }] formset =", "'2', 'form-MAX_NUM_FORMS': '', 'form-0-id': str(yellow.pk), 'form-0-tag': 'hunts', 'form-1-id': str(hairy.pk), 'form-1-tag':", "formset.save()[0] self.assertEqual(new_obj.saved_by, 'custom method') def test_save_new_for_proxy(self): Formset = generic_inlineformset_factory(ForProxyModelModel, fields='__all__',", "omits items. formset = TaggedItemFormSet(instance=platypus, queryset=TaggedItem.objects.filter(tag__startswith='harm')) self.assertEqual(len(formset.forms), 4) self.assertEqual(formset.forms[0].instance, harmless)", "type=\"checkbox\" name=\"generic_relations-taggeditem-content_type-object_id-0-DELETE\" id=\"id_generic_relations-taggeditem-content_type-object_id-0-DELETE\"><input type=\"hidden\" name=\"generic_relations-taggeditem-content_type-object_id-0-id\" id=\"id_generic_relations-taggeditem-content_type-object_id-0-id\"></p>\"\"\" ) platypus = Animal.objects.create(", "data = { 'form-TOTAL_FORMS': '1', 'form-INITIAL_FORMS': '0', 'form-MAX_NUM_FORMS': '', 'form-0-title':", "= formset.save() self.assertEqual(new_obj.obj, instance) def test_save_new_for_concrete(self): Formset = generic_inlineformset_factory(ForProxyModelModel, fields='__all__',", "Mineral, ProxyRelatedModel, TaggedItem, ) class CustomWidget(forms.TextInput): pass class TaggedItemForm(forms.ModelForm): class", "= { 'form-TOTAL_FORMS': '3', 'form-INITIAL_FORMS': '2', 'form-MAX_NUM_FORMS': '', 'form-0-id': str(yellow.pk),", "def save(self, *args, **kwargs): self.instance.saved_by = 'custom method' return super().save(*args,", "method' return super().save(*args, **kwargs) Formset = generic_inlineformset_factory(ForProxyModelModel, fields='__all__', form=SaveTestForm) instance", "extra=1) formset = GenericFormSet(instance=platypus) tagged_item_id = TaggedItem.objects.get(tag='shiny', object_id=platypus.id).id self.assertHTMLEqual( ''.join(form.as_p()", "to alter display ordering. formset = TaggedItemFormSet(instance=platypus, queryset=TaggedItem.objects.order_by('-tag')) self.assertEqual(len(formset.forms), 5)", "name=\"generic_relations-taggeditem-content_type-object_id-0-id\" id=\"id_generic_relations-taggeditem-content_type-object_id-0-id\"></p>\"\"\" ) platypus = Animal.objects.create( common_name='Platypus', latin_name='Ornithorhynchus anatinus', )", "formset = GenericFormSet(instance=platypus) tagged_item_id = TaggedItem.objects.get(tag='shiny', object_id=platypus.id).id self.assertHTMLEqual( ''.join(form.as_p() for", "# Works without a queryset. formset = TaggedItemFormSet(instance=platypus) self.assertEqual(len(formset.forms), 5)", "platypus.tags.create(tag='harmless') mammal = platypus.tags.create(tag='mammal') # Works without a queryset. formset", "formset.forms), \"\"\"<p><label for=\"id_generic_relations-taggeditem-content_type-object_id-0-tag\">Tag:</label> <input id=\"id_generic_relations-taggeditem-content_type-object_id-0-tag\" type=\"text\" name=\"generic_relations-taggeditem-content_type-object_id-0-tag\" value=\"shiny\" maxlength=\"50\"></p> <p><label", ") platypus = Animal.objects.create(common_name='Platypus', latin_name='Ornithorhynchus anatinus') harmless = platypus.tags.create(tag='harmless') mammal", "for=\"id_generic_relations-taggeditem-content_type-object_id-1-tag\">Tag:</label> <input id=\"id_generic_relations-taggeditem-content_type-object_id-1-tag\" type=\"text\" name=\"generic_relations-taggeditem-content_type-object_id-1-tag\" maxlength=\"50\"></p> <p><label for=\"id_generic_relations-taggeditem-content_type-object_id-1-DELETE\">Delete:</label> <input type=\"checkbox\"", "is not a ForeignKey to ContentType\" with self.assertRaisesMessage(Exception, msg): generic_inlineformset_factory(BadModel,", "str(hairy.pk), 'form-1-tag': 'roars', } formset = GenericFormSet(data, instance=lion, prefix='form', save_as_new=True)", "from django import forms from django.contrib.contenttypes.forms import generic_inlineformset_factory from django.contrib.contenttypes.models", "models from django.test import TestCase from django.test.utils import isolate_apps from", "from django.db import models from django.test import TestCase from django.test.utils", "= generic_inlineformset_factory(ForProxyModelModel, fields='__all__', for_concrete_model=False) instance = ProxyRelatedModel.objects.create() data = {", "GenericFormSet(instance=platypus) tagged_item_id = TaggedItem.objects.get(tag='shiny', object_id=platypus.id).id self.assertHTMLEqual( ''.join(form.as_p() for form in", "= generic_inlineformset_factory(TaggedItem, extra=1) formset = GenericFormSet(instance=platypus) tagged_item_id = TaggedItem.objects.get(tag='shiny', object_id=platypus.id).id", "'custom method' return super().save(*args, **kwargs) Formset = generic_inlineformset_factory(ForProxyModelModel, fields='__all__', form=SaveTestForm)", "BadModel(models.Model): content_type = models.PositiveIntegerField() msg = \"fk_name 'generic_relations.BadModel.content_type' is not", "= 'custom method' return super().save(*args, **kwargs) Formset = generic_inlineformset_factory(ForProxyModelModel, fields='__all__',", "object. \"\"\" lion = Animal.objects.create(common_name='Lion', latin_name='Panthera leo') yellow = lion.tags.create(tag='yellow')", "ordering, if needed. \"\"\" inline_formset = generic_inlineformset_factory(TaggedItem, exclude=('tag',)) formset =", "self.assertIsInstance(form['tag'].field.widget, CustomWidget) @isolate_apps('generic_relations') def test_incorrect_content_type(self): class BadModel(models.Model): content_type = models.PositiveIntegerField()", "= platypus.tags.create(tag='mammal') # Works without a queryset. formset = TaggedItemFormSet(instance=platypus)", "type=\"checkbox\" name=\"generic_relations-taggeditem-content_type-object_id-1-DELETE\" id=\"id_generic_relations-taggeditem-content_type-object_id-1-DELETE\"> <input type=\"hidden\" name=\"generic_relations-taggeditem-content_type-object_id-1-id\" id=\"id_generic_relations-taggeditem-content_type-object_id-1-id\"></p>\"\"\" % tagged_item_id )", "name=\"x-0-tag\" maxlength=\"50\"></p> <p><label for=\"id_x-0-DELETE\">Delete:</label> <input type=\"checkbox\" name=\"x-0-DELETE\" id=\"id_x-0-DELETE\"> <input type=\"hidden\"", "quartz.pk, }] formset = GenericFormSet(initial=initial_data) self.assertEqual(formset.forms[0].initial, initial_data[0]) def test_meta_widgets(self): \"\"\"TaggedItemForm", "= { 'form-TOTAL_FORMS': '3', 'form-INITIAL_FORMS': '3', 'form-MAX_NUM_FORMS': '', } formset", "formset = GenericFormSet(initial=initial_data) self.assertEqual(formset.forms[0].initial, initial_data[0]) def test_meta_widgets(self): \"\"\"TaggedItemForm has a", "for=\"id_x-0-DELETE\">Delete:</label> <input type=\"checkbox\" name=\"x-0-DELETE\" id=\"id_x-0-DELETE\"> <input type=\"hidden\" name=\"x-0-id\" id=\"id_x-0-id\"></p>\"\"\" )", "import generic_inlineformset_factory from django.contrib.contenttypes.models import ContentType from django.db import models", "for form in formset.forms), \"\"\"<p><label for=\"id_generic_relations-taggeditem-content_type-object_id-0-tag\">Tag:</label> <input id=\"id_generic_relations-taggeditem-content_type-object_id-0-tag\" type=\"text\" name=\"generic_relations-taggeditem-content_type-object_id-0-tag\"", "instance=lion, prefix='form', save_as_new=True) self.assertTrue(formset.is_valid()) tags = formset.save() self.assertEqual([tag.tag for tag", "harmless) self.assertEqual(formset.forms[1].instance, mammal) self.assertIsNone(formset.forms[2].instance.pk) # A queryset can be used", "GenericFormSet = generic_inlineformset_factory(TaggedItem) data = { 'form-TOTAL_FORMS': '3', 'form-INITIAL_FORMS': '3',", "GenericFormSet(data=data, prefix='form', save_as_new=True) self.assertEqual(formset.initial_form_count(), 0) def test_save_as_new(self): \"\"\" The save_as_new", "initial_data = [{ 'tag': 'lizard', 'content_type': ctype.pk, 'object_id': quartz.pk, }]", "= generic_inlineformset_factory( TaggedItem, can_delete=False, exclude=['tag'], extra=3, ) platypus = Animal.objects.create(common_name='Platypus',", "= GenericFormSet(data, instance=lion, prefix='form', save_as_new=True) self.assertTrue(formset.is_valid()) tags = formset.save() self.assertEqual([tag.tag", "object_id=platypus.id).id self.assertHTMLEqual( ''.join(form.as_p() for form in formset.forms), \"\"\"<p><label for=\"id_generic_relations-taggeditem-content_type-object_id-0-tag\">Tag:</label> <input", "exclude=('tag',)) formset = inline_formset(instance=Gecko.objects.create()) self.assertIs(formset.get_queryset().ordered, True) def test_initial(self): quartz =", "ContentType from django.db import models from django.test import TestCase from", "formset = GenericFormSet(data=data, prefix='form', save_as_new=True) self.assertEqual(formset.initial_form_count(), 0) def test_save_as_new(self): \"\"\"", "{ 'form-TOTAL_FORMS': '3', 'form-INITIAL_FORMS': '3', 'form-MAX_NUM_FORMS': '', } formset =", "% harmless.pk ) self.assertEqual(formset.forms[0].instance, harmless) self.assertEqual(formset.forms[1].instance, mammal) self.assertIsNone(formset.forms[2].instance.pk) # A", "5) self.assertEqual(formset.forms[0].instance, mammal) self.assertEqual(formset.forms[1].instance, harmless) self.assertIsNone(formset.forms[2].instance.pk) # A queryset that", "can_delete=False, exclude=['tag'], extra=3, ) platypus = Animal.objects.create(common_name='Platypus', latin_name='Ornithorhynchus anatinus') harmless", "id=\"id_generic_relations-taggeditem-content_type-object_id-0-id\"></p>\"\"\" ) formset = GenericFormSet(instance=Animal()) self.assertHTMLEqual( ''.join(form.as_p() for form in", "latin_name='Ornithorhynchus anatinus', ) platypus.tags.create(tag='shiny') GenericFormSet = generic_inlineformset_factory(TaggedItem, extra=1) formset =", "import forms from django.contrib.contenttypes.forms import generic_inlineformset_factory from django.contrib.contenttypes.models import ContentType", ".models import ( Animal, ForProxyModelModel, Gecko, Mineral, ProxyRelatedModel, TaggedItem, )", "id=\"id_generic_relations-taggeditem-content_type-object_id-0-tag\" type=\"text\" name=\"generic_relations-taggeditem-content_type-object_id-0-tag\" maxlength=\"50\"></p> <p><label for=\"id_generic_relations-taggeditem-content_type-object_id-0-DELETE\">Delete:</label> <input type=\"checkbox\" name=\"generic_relations-taggeditem-content_type-object_id-0-DELETE\" id=\"id_generic_relations-taggeditem-content_type-object_id-0-DELETE\"><input", "= generic_inlineformset_factory(TaggedItem) data = { 'form-TOTAL_FORMS': '3', 'form-INITIAL_FORMS': '3', 'form-MAX_NUM_FORMS':", "generic_inlineformset_factory(BadModel, TaggedItemForm) def test_save_new_uses_form_save(self): class SaveTestForm(forms.ModelForm): def save(self, *args, **kwargs):", "test_options(self): TaggedItemFormSet = generic_inlineformset_factory( TaggedItem, can_delete=False, exclude=['tag'], extra=3, ) platypus", "prefix='form') self.assertTrue(formset.is_valid()) new_obj, = formset.save() self.assertEqual(new_obj.obj, instance) def test_save_new_for_concrete(self): Formset", "\"\"\"<p><label for=\"id_x-0-tag\">Tag:</label> <input id=\"id_x-0-tag\" type=\"text\" name=\"x-0-tag\" maxlength=\"50\"></p> <p><label for=\"id_x-0-DELETE\">Delete:</label> <input", "Animal.objects.create(common_name='Platypus', latin_name='Ornithorhynchus anatinus') harmless = platypus.tags.create(tag='harmless') mammal = platypus.tags.create(tag='mammal') #", "formset.save() self.assertEqual(new_obj.obj, instance) def test_save_new_for_concrete(self): Formset = generic_inlineformset_factory(ForProxyModelModel, fields='__all__', for_concrete_model=True)", "are associated with the object. \"\"\" lion = Animal.objects.create(common_name='Lion', latin_name='Panthera", "Formset = generic_inlineformset_factory(ForProxyModelModel, fields='__all__', form=SaveTestForm) instance = ProxyRelatedModel.objects.create() data =", "maxlength=\"50\"></p> <p><label for=\"id_generic_relations-taggeditem-content_type-object_id-0-DELETE\">Delete:</label> <input type=\"checkbox\" name=\"generic_relations-taggeditem-content_type-object_id-0-DELETE\" id=\"id_generic_relations-taggeditem-content_type-object_id-0-DELETE\"><input type=\"hidden\" name=\"generic_relations-taggeditem-content_type-object_id-0-id\" id=\"id_generic_relations-taggeditem-content_type-object_id-0-id\"></p>\"\"\"", "<input id=\"id_generic_relations-taggeditem-content_type-object_id-0-tag\" type=\"text\" name=\"generic_relations-taggeditem-content_type-object_id-0-tag\" value=\"shiny\" maxlength=\"50\"></p> <p><label for=\"id_generic_relations-taggeditem-content_type-object_id-0-DELETE\">Delete:</label> <input type=\"checkbox\"", "**kwargs): self.instance.saved_by = 'custom method' return super().save(*args, **kwargs) Formset =", "GenericFormSet = generic_inlineformset_factory(TaggedItem, extra=1) formset = GenericFormSet(instance=platypus) tagged_item_id = TaggedItem.objects.get(tag='shiny',", "latin_name='Ornithorhynchus anatinus') harmless = platypus.tags.create(tag='harmless') mammal = platypus.tags.create(tag='mammal') # Works", "= generic_inlineformset_factory(TaggedItem, exclude=('tag',)) formset = inline_formset(instance=Gecko.objects.create()) self.assertIs(formset.get_queryset().ordered, True) def test_initial(self):", "= formset.save() self.assertEqual([tag.tag for tag in tags], ['hunts', 'roars']) hunts,", "formset.save() self.assertEqual([tag.tag for tag in tags], ['hunts', 'roars']) hunts, roars", "def test_incorrect_content_type(self): class BadModel(models.Model): content_type = models.PositiveIntegerField() msg = \"fk_name", "type=\"hidden\" name=\"generic_relations-taggeditem-content_type-object_id-0-id\" value=\"%s\" ' 'id=\"id_generic_relations-taggeditem-content_type-object_id-0-id\">' % harmless.pk ) self.assertEqual(formset.forms[0].instance, harmless)", "Animal, ForProxyModelModel, Gecko, Mineral, ProxyRelatedModel, TaggedItem, ) class CustomWidget(forms.TextInput): pass", "parameter creates new items that are associated with the object.", "self.instance.saved_by = 'custom method' return super().save(*args, **kwargs) Formset = generic_inlineformset_factory(ForProxyModelModel,", "id=\"id_generic_relations-taggeditem-content_type-object_id-1-DELETE\"> <input type=\"hidden\" name=\"generic_relations-taggeditem-content_type-object_id-1-id\" id=\"id_generic_relations-taggeditem-content_type-object_id-1-id\"></p>\"\"\" % tagged_item_id ) lion =", "self.assertEqual(len(formset.forms), 5) self.assertHTMLEqual( formset.forms[0].as_p(), '<input type=\"hidden\" name=\"generic_relations-taggeditem-content_type-object_id-0-id\" value=\"%s\" ' 'id=\"id_generic_relations-taggeditem-content_type-object_id-0-id\">'", "import isolate_apps from .models import ( Animal, ForProxyModelModel, Gecko, Mineral,", "harmless) self.assertIsNone(formset.forms[1].instance.pk) def test_get_queryset_ordering(self): \"\"\" BaseGenericInlineFormSet.get_queryset() adds default ordering, if", "\"\"\"<p><label for=\"id_generic_relations-taggeditem-content_type-object_id-0-tag\">Tag:</label> <input id=\"id_generic_relations-taggeditem-content_type-object_id-0-tag\" type=\"text\" name=\"generic_relations-taggeditem-content_type-object_id-0-tag\" value=\"shiny\" maxlength=\"50\"></p> <p><label for=\"id_generic_relations-taggeditem-content_type-object_id-0-DELETE\">Delete:</label>", "adds default ordering, if needed. \"\"\" inline_formset = generic_inlineformset_factory(TaggedItem, exclude=('tag',))", "'lizard', 'content_type': ctype.pk, 'object_id': quartz.pk, }] formset = GenericFormSet(initial=initial_data) self.assertEqual(formset.forms[0].initial,", "} formset = GenericFormSet(data, instance=lion, prefix='form', save_as_new=True) self.assertTrue(formset.is_valid()) tags =", "msg = \"fk_name 'generic_relations.BadModel.content_type' is not a ForeignKey to ContentType\"", "leo') yellow = lion.tags.create(tag='yellow') hairy = lion.tags.create(tag='hairy') GenericFormSet = generic_inlineformset_factory(TaggedItem)", "platypus.tags.create(tag='mammal') # Works without a queryset. formset = TaggedItemFormSet(instance=platypus) self.assertEqual(len(formset.forms),", "'form-TOTAL_FORMS': '3', 'form-INITIAL_FORMS': '2', 'form-MAX_NUM_FORMS': '', 'form-0-id': str(yellow.pk), 'form-0-tag': 'hunts',", "fields = '__all__' widgets = {'tag': CustomWidget} class GenericInlineFormsetTests(TestCase): def", "from django.contrib.contenttypes.models import ContentType from django.db import models from django.test", "'roars', } formset = GenericFormSet(data, instance=lion, prefix='form', save_as_new=True) self.assertTrue(formset.is_valid()) tags", "TaggedItem, ) class CustomWidget(forms.TextInput): pass class TaggedItemForm(forms.ModelForm): class Meta: model", "id=\"id_generic_relations-taggeditem-content_type-object_id-0-tag\" type=\"text\" name=\"generic_relations-taggeditem-content_type-object_id-0-tag\" maxlength=\"50\"></p> <p><label for=\"id_generic_relations-taggeditem-content_type-object_id-0-DELETE\">Delete:</label> <input type=\"checkbox\" name=\"generic_relations-taggeditem-content_type-object_id-0-DELETE\" id=\"id_generic_relations-taggeditem-content_type-object_id-0-DELETE\">", "id=\"id_generic_relations-taggeditem-content_type-object_id-1-id\"></p>\"\"\" % tagged_item_id ) lion = Animal.objects.create(common_name='Lion', latin_name='Panthera leo') formset", "self.assertRaisesMessage(Exception, msg): generic_inlineformset_factory(BadModel, TaggedItemForm) def test_save_new_uses_form_save(self): class SaveTestForm(forms.ModelForm): def save(self,", "GenericFormSet = generic_inlineformset_factory(TaggedItem, extra=1) ctype = ContentType.objects.get_for_model(quartz) initial_data = [{", "id=\"id_x-0-id\"></p>\"\"\" ) def test_options(self): TaggedItemFormSet = generic_inlineformset_factory( TaggedItem, can_delete=False, exclude=['tag'],", "name=\"generic_relations-taggeditem-content_type-object_id-1-DELETE\" id=\"id_generic_relations-taggeditem-content_type-object_id-1-DELETE\"> <input type=\"hidden\" name=\"generic_relations-taggeditem-content_type-object_id-1-id\" id=\"id_generic_relations-taggeditem-content_type-object_id-1-id\"></p>\"\"\" % tagged_item_id ) lion", "GenericFormSet(data, instance=lion, prefix='form', save_as_new=True) self.assertTrue(formset.is_valid()) tags = formset.save() self.assertEqual([tag.tag for", "TaggedItemFormSet = generic_inlineformset_factory( TaggedItem, can_delete=False, exclude=['tag'], extra=3, ) platypus =", "form in formset.forms), \"\"\"<p><label for=\"id_x-0-tag\">Tag:</label> <input id=\"id_x-0-tag\" type=\"text\" name=\"x-0-tag\" maxlength=\"50\"></p>", "test_save_new_uses_form_save(self): class SaveTestForm(forms.ModelForm): def save(self, *args, **kwargs): self.instance.saved_by = 'custom", "GenericFormSet = generic_inlineformset_factory(TaggedItem, extra=1) formset = GenericFormSet() self.assertHTMLEqual( ''.join(form.as_p() for", "inline_formset(instance=Gecko.objects.create()) self.assertIs(formset.get_queryset().ordered, True) def test_initial(self): quartz = Mineral.objects.create(name='Quartz', hardness=7) GenericFormSet", "= Animal.objects.create(common_name='Platypus', latin_name='Ornithorhynchus anatinus') harmless = platypus.tags.create(tag='harmless') mammal = platypus.tags.create(tag='mammal')", "{'tag': CustomWidget} class GenericInlineFormsetTests(TestCase): def test_output(self): GenericFormSet = generic_inlineformset_factory(TaggedItem, extra=1)", "for tag in tags], ['hunts', 'roars']) hunts, roars = tags", "type=\"hidden\" name=\"x-0-id\" id=\"id_x-0-id\"></p>\"\"\" ) def test_options(self): TaggedItemFormSet = generic_inlineformset_factory( TaggedItem,", "= platypus.tags.create(tag='harmless') mammal = platypus.tags.create(tag='mammal') # Works without a queryset.", "'custom method') def test_save_new_for_proxy(self): Formset = generic_inlineformset_factory(ForProxyModelModel, fields='__all__', for_concrete_model=False) instance" ]
[ "(x,y,z) = [old_ring.variable(i) for i in range(3)] sage: ideal=[x+z, y+z]#", "sage: ideal=[x+z, y+z]# lp Groebner basis sage: from sage.rings.polynomial.pbori.fglm import", "Monomial sage: s=BooleSet([x(1)*x(2),x(1),x(2),Monomial(r),x(3)]) sage: from sage.rings.polynomial.pbori.fglm import m_k_plus_one sage: variables=BooleSet([x(1),x(2),x(3)])", "from_ring, to_ring) def vars_real_divisors(monomial, monomial_set): r\"\"\" Returns all elements of", "def _fglm(I, from_ring, to_ring): r\"\"\" Unchecked variant of fglm \"\"\"", "dp_asc = OrderCode.dp_asc sage: r=declare_ring(['x','y','z'],dict()) sage: old_ring = r sage:", "the first ring argument\") return _fglm(I, from_ring, to_ring) def vars_real_divisors(monomial,", "sage.rings.polynomial.pbori.fglm import vars_real_divisors sage: vars_real_divisors(x(1)*x(2)*x(3),b) {{x(1),x(2)}} \"\"\" return BooleSet(Polynomial(monomial_set.divisors_of(monomial)). \\", "in monomial. TESTS:: sage: from sage.rings.polynomial.pbori.pbori import * sage: from", "sage: new_ring = old_ring.clone(ordering=dp_asc) sage: (x,y,z) = [old_ring.variable(i) for i", "_fglm(I, from_ring, to_ring) def vars_real_divisors(monomial, monomial_set): r\"\"\" Returns all elements", "= r sage: new_ring = old_ring.clone(ordering=dp_asc) sage: (x,y,z) = [old_ring.variable(i)", "sage: from sage.rings.polynomial.pbori.PyPolyBoRi import OrderCode sage: dp_asc = OrderCode.dp_asc sage:", "old_ring, new_ring)) [y + x, z + x] \"\"\" for", "= OrderCode.dp_asc sage: from sage.rings.polynomial.pbori.PyPolyBoRi import Ring sage: r=Ring(1000) sage:", "Ring sage: r=Ring(1000) sage: x = r.variable sage: b=BooleSet([x(1)*x(2),x(2)]) sage:", "from sage.rings.polynomial.pbori.fglm import fglm sage: list(fglm(ideal, old_ring, new_ring)) [y +", "FGLMStrategy) def _fglm(I, from_ring, to_ring): r\"\"\" Unchecked variant of fglm", "sage: old_ring = r sage: new_ring = old_ring.clone(ordering=dp_asc) sage: (x,y,z)", "I must be from the first ring argument\") return _fglm(I,", "sage: dp_asc = OrderCode.dp_asc sage: from sage.rings.polynomial.pbori.PyPolyBoRi import Ring sage:", "Groebner basis sage: from sage.rings.polynomial.pbori.fglm import fglm sage: list(fglm(ideal, old_ring,", "= r.variable sage: b=BooleSet([x(1)*x(2),x(2)]) sage: from sage.rings.polynomial.pbori.fglm import vars_real_divisors sage:", "from sage.rings.polynomial.pbori.fglm import m_k_plus_one sage: variables=BooleSet([x(1),x(2),x(3)]) sage: m_k_plus_one(s,variables) x(2)*x(3) sage:", "sage: r=Ring(1000) sage: x = r.variable sage: b=BooleSet([x(1)*x(2),x(2)]) sage: from", "global ring, which is restored at the end of the", "Groebner Basis in from_ring to a GroebnerBasis in to_ring. It", "+ x] \"\"\" for poly in I: if poly.ring().id() !=", "sage.rings.polynomial.pbori.PyPolyBoRi import Ring sage: r=Ring(1000) sage: x = r.variable sage:", "import * sage: from sage.rings.polynomial.pbori.PyPolyBoRi import OrderCode sage: dp_asc =", "sage: dp_asc = OrderCode.dp_asc sage: r=declare_ring(['x','y','z'],dict()) sage: old_ring = r", "described in Wichmanns diploma thesis It would be nice to", "be from the first ring argument\") return _fglm(I, from_ring, to_ring)", "sage: r=Ring(1000) sage: x = r.variable sage: from sage.rings.polynomial.pbori.PyPolyBoRi import", "sage: variables=BooleSet([x(1),x(2),x(3)]) sage: m_k_plus_one(s,variables) x(2)*x(3) sage: r2 = r.clone(ordering=dp_asc) sage:", "ring argument\") return _fglm(I, from_ring, to_ring) def vars_real_divisors(monomial, monomial_set): r\"\"\"", "Returns all elements of of monomial_set, which result multiplied by", "vec).main() def fglm(I, from_ring, to_ring): r\"\"\" Convert *reduced* Groebner Basis", "from the FGLM algorithm as described in Wichmanns diploma thesis", "r\"\"\" Returns all elements of of monomial_set, which result multiplied", "of of monomial_set, which result multiplied by a variable in", "by a variable in monomial. TESTS:: sage: from sage.rings.polynomial.pbori.pbori import", "sage: from sage.rings.polynomial.pbori.fglm import m_k_plus_one sage: variables=BooleSet([x(1),x(2),x(3)]) sage: m_k_plus_one(s,variables) x(2)*x(3)", "import m_k_plus_one sage: variables=BooleSet([x(1),x(2),x(3)]) sage: m_k_plus_one(s,variables) x(2)*x(3) sage: r2 =", "r2 = r.clone(ordering=dp_asc) sage: m_k_plus_one(r2(s).set(),r2(variables).set()) x(1)*x(3) \"\"\" return sorted(completed_elements.cartesian_product(variables).diff( completed_elements))[0]", "- 1)) def m_k_plus_one(completed_elements, variables): r\"\"\" Calculates $m_{k+1}$ from the", "def fglm(I, from_ring, to_ring): r\"\"\" Convert *reduced* Groebner Basis in", "vec = BoolePolynomialVector(I) return FGLMStrategy(from_ring, to_ring, vec).main() def fglm(I, from_ring,", "OrderCode sage: dp_asc = OrderCode.dp_asc sage: r=declare_ring(['x','y','z'],dict()) sage: old_ring =", "r\"\"\" Convert *reduced* Groebner Basis in from_ring to a GroebnerBasis", "from sage.rings.polynomial.pbori.PyPolyBoRi import Ring sage: r=Ring(1000) sage: x = r.variable", "sage: list(fglm(ideal, old_ring, new_ring)) [y + x, z + x]", "poly in I: if poly.ring().id() != from_ring.id(): raise ValueError(\"Ideal I", "sage: vars_real_divisors(x(1)*x(2)*x(3),b) {{x(1),x(2)}} \"\"\" return BooleSet(Polynomial(monomial_set.divisors_of(monomial)). \\ graded_part(monomial.deg() - 1))", "TESTS:: sage: from sage.rings.polynomial.pbori.pbori import * sage: from sage.rings.polynomial.pbori.PyPolyBoRi import", "\"\"\" vec = BoolePolynomialVector(I) return FGLMStrategy(from_ring, to_ring, vec).main() def fglm(I,", "b=BooleSet([x(1)*x(2),x(2)]) sage: from sage.rings.polynomial.pbori.fglm import vars_real_divisors sage: vars_real_divisors(x(1)*x(2)*x(3),b) {{x(1),x(2)}} \"\"\"", "1)) def m_k_plus_one(completed_elements, variables): r\"\"\" Calculates $m_{k+1}$ from the FGLM", "sage: x = r.variable sage: b=BooleSet([x(1)*x(2),x(2)]) sage: from sage.rings.polynomial.pbori.fglm import", "r=declare_ring(['x','y','z'],dict()) sage: old_ring = r sage: new_ring = old_ring.clone(ordering=dp_asc) sage:", "sage: from sage.rings.polynomial.pbori.fglm import fglm sage: list(fglm(ideal, old_ring, new_ring)) [y", "sage: b=BooleSet([x(1)*x(2),x(2)]) sage: from sage.rings.polynomial.pbori.fglm import vars_real_divisors sage: vars_real_divisors(x(1)*x(2)*x(3),b) {{x(1),x(2)}}", "to be able to efficiently extract the smallest term of", "+ x, z + x] \"\"\" for poly in I:", "result multiplied by a variable in monomial. TESTS:: sage: from", "sage: s=BooleSet([x(1)*x(2),x(1),x(2),Monomial(r),x(3)]) sage: from sage.rings.polynomial.pbori.fglm import m_k_plus_one sage: variables=BooleSet([x(1),x(2),x(3)]) sage:", "Polynomial, BoolePolynomialVector, FGLMStrategy) def _fglm(I, from_ring, to_ring): r\"\"\" Unchecked variant", "m_k_plus_one(completed_elements, variables): r\"\"\" Calculates $m_{k+1}$ from the FGLM algorithm as", "[y + x, z + x] \"\"\" for poly in", "r\"\"\" Unchecked variant of fglm \"\"\" vec = BoolePolynomialVector(I) return", "of fglm \"\"\" vec = BoolePolynomialVector(I) return FGLMStrategy(from_ring, to_ring, vec).main()", "multiplied by a variable in monomial. TESTS:: sage: from sage.rings.polynomial.pbori.pbori", "of monomial_set, which result multiplied by a variable in monomial.", "_fglm(I, from_ring, to_ring): r\"\"\" Unchecked variant of fglm \"\"\" vec", "as described in Wichmanns diploma thesis It would be nice", "r=Ring(1000) sage: x = r.variable sage: from sage.rings.polynomial.pbori.PyPolyBoRi import Monomial", "variable in monomial. TESTS:: sage: from sage.rings.polynomial.pbori.pbori import * sage:", "import OrderCode sage: dp_asc = OrderCode.dp_asc sage: r=declare_ring(['x','y','z'],dict()) sage: old_ring", "from sage.rings.polynomial.pbori.pbori import * sage: from sage.rings.polynomial.pbori.PyPolyBoRi import OrderCode sage:", "new_ring)) [y + x, z + x] \"\"\" for poly", "BooleSet(Polynomial(monomial_set.divisors_of(monomial)). \\ graded_part(monomial.deg() - 1)) def m_k_plus_one(completed_elements, variables): r\"\"\" Calculates", "def vars_real_divisors(monomial, monomial_set): r\"\"\" Returns all elements of of monomial_set,", "r\"\"\" Calculates $m_{k+1}$ from the FGLM algorithm as described in", "in range(3)] sage: ideal=[x+z, y+z]# lp Groebner basis sage: from", "= BoolePolynomialVector(I) return FGLMStrategy(from_ring, to_ring, vec).main() def fglm(I, from_ring, to_ring):", "import Ring sage: r=Ring(1000) sage: x = r.variable sage: b=BooleSet([x(1)*x(2),x(2)])", "a polynomial. TESTS:: sage: from sage.rings.polynomial.pbori.pbori import * sage: from", "$m_{k+1}$ from the FGLM algorithm as described in Wichmanns diploma", "r.variable sage: from sage.rings.polynomial.pbori.PyPolyBoRi import Monomial sage: s=BooleSet([x(1)*x(2),x(1),x(2),Monomial(r),x(3)]) sage: from", "sage: from sage.rings.polynomial.pbori.PyPolyBoRi import Monomial sage: s=BooleSet([x(1)*x(2),x(1),x(2),Monomial(r),x(3)]) sage: from sage.rings.polynomial.pbori.fglm", "m_k_plus_one(s,variables) x(2)*x(3) sage: r2 = r.clone(ordering=dp_asc) sage: m_k_plus_one(r2(s).set(),r2(variables).set()) x(1)*x(3) \"\"\"", "end of the computation. TESTS:: sage: from sage.rings.polynomial.pbori import *", "sage: from sage.rings.polynomial.pbori.fglm import vars_real_divisors sage: vars_real_divisors(x(1)*x(2)*x(3),b) {{x(1),x(2)}} \"\"\" return", "sage: from sage.rings.polynomial.pbori import * sage: from sage.rings.polynomial.pbori.PyPolyBoRi import OrderCode", "able to efficiently extract the smallest term of a polynomial.", "BoolePolynomialVector(I) return FGLMStrategy(from_ring, to_ring, vec).main() def fglm(I, from_ring, to_ring): r\"\"\"", "r.variable sage: b=BooleSet([x(1)*x(2),x(2)]) sage: from sage.rings.polynomial.pbori.fglm import vars_real_divisors sage: vars_real_divisors(x(1)*x(2)*x(3),b)", "be nice to be able to efficiently extract the smallest", "OrderCode.dp_asc sage: from sage.rings.polynomial.pbori.PyPolyBoRi import Ring sage: r=Ring(1000) sage: x", "import Monomial sage: s=BooleSet([x(1)*x(2),x(1),x(2),Monomial(r),x(3)]) sage: from sage.rings.polynomial.pbori.fglm import m_k_plus_one sage:", "list(fglm(ideal, old_ring, new_ring)) [y + x, z + x] \"\"\"", "\"\"\" return BooleSet(Polynomial(monomial_set.divisors_of(monomial)). \\ graded_part(monomial.deg() - 1)) def m_k_plus_one(completed_elements, variables):", "Wichmanns diploma thesis It would be nice to be able", "the smallest term of a polynomial. TESTS:: sage: from sage.rings.polynomial.pbori.pbori", "fglm sage: list(fglm(ideal, old_ring, new_ring)) [y + x, z +", "from sage.rings.polynomial.pbori.fglm import vars_real_divisors sage: vars_real_divisors(x(1)*x(2)*x(3),b) {{x(1),x(2)}} \"\"\" return BooleSet(Polynomial(monomial_set.divisors_of(monomial)).", "polynomial. TESTS:: sage: from sage.rings.polynomial.pbori.pbori import * sage: from sage.rings.polynomial.pbori.PyPolyBoRi", "sage: x = r.variable sage: from sage.rings.polynomial.pbori.PyPolyBoRi import Monomial sage:", "for poly in I: if poly.ring().id() != from_ring.id(): raise ValueError(\"Ideal", "the end of the computation. TESTS:: sage: from sage.rings.polynomial.pbori import", "computation. TESTS:: sage: from sage.rings.polynomial.pbori import * sage: from sage.rings.polynomial.pbori.PyPolyBoRi", "extract the smallest term of a polynomial. TESTS:: sage: from", "z + x] \"\"\" for poly in I: if poly.ring().id()", "* sage: from sage.rings.polynomial.pbori.PyPolyBoRi import OrderCode sage: dp_asc = OrderCode.dp_asc", "from sage.rings.polynomial.pbori.PyPolyBoRi import OrderCode sage: dp_asc = OrderCode.dp_asc sage: from", "variables=BooleSet([x(1),x(2),x(3)]) sage: m_k_plus_one(s,variables) x(2)*x(3) sage: r2 = r.clone(ordering=dp_asc) sage: m_k_plus_one(r2(s).set(),r2(variables).set())", "sage.rings.polynomial.pbori.pbori import * sage: from sage.rings.polynomial.pbori.PyPolyBoRi import OrderCode sage: dp_asc", "sage: m_k_plus_one(s,variables) x(2)*x(3) sage: r2 = r.clone(ordering=dp_asc) sage: m_k_plus_one(r2(s).set(),r2(variables).set()) x(1)*x(3)", "from_ring to a GroebnerBasis in to_ring. It acts independent of", "(BooleSet, Polynomial, BoolePolynomialVector, FGLMStrategy) def _fglm(I, from_ring, to_ring): r\"\"\" Unchecked", "raise ValueError(\"Ideal I must be from the first ring argument\")", "a GroebnerBasis in to_ring. It acts independent of the global", "i in range(3)] sage: ideal=[x+z, y+z]# lp Groebner basis sage:", "sage.rings.polynomial.pbori.PyPolyBoRi import Monomial sage: s=BooleSet([x(1)*x(2),x(1),x(2),Monomial(r),x(3)]) sage: from sage.rings.polynomial.pbori.fglm import m_k_plus_one", "sage: from sage.rings.polynomial.pbori.pbori import * sage: from sage.rings.polynomial.pbori.PyPolyBoRi import OrderCode", "to_ring, vec).main() def fglm(I, from_ring, to_ring): r\"\"\" Convert *reduced* Groebner", "fglm(I, from_ring, to_ring): r\"\"\" Convert *reduced* Groebner Basis in from_ring", "[old_ring.variable(i) for i in range(3)] sage: ideal=[x+z, y+z]# lp Groebner", "sage.rings.polynomial.pbori.PyPolyBoRi import OrderCode sage: dp_asc = OrderCode.dp_asc sage: from sage.rings.polynomial.pbori.PyPolyBoRi", "to_ring): r\"\"\" Unchecked variant of fglm \"\"\" vec = BoolePolynomialVector(I)", "ring, which is restored at the end of the computation.", "if poly.ring().id() != from_ring.id(): raise ValueError(\"Ideal I must be from", "algorithm as described in Wichmanns diploma thesis It would be", "acts independent of the global ring, which is restored at", "vars_real_divisors(monomial, monomial_set): r\"\"\" Returns all elements of of monomial_set, which", "sage.rings.polynomial.pbori import * sage: from sage.rings.polynomial.pbori.PyPolyBoRi import OrderCode sage: dp_asc", "import OrderCode sage: dp_asc = OrderCode.dp_asc sage: from sage.rings.polynomial.pbori.PyPolyBoRi import", "be able to efficiently extract the smallest term of a", "Ring sage: r=Ring(1000) sage: x = r.variable sage: from sage.rings.polynomial.pbori.PyPolyBoRi", "from sage.rings.polynomial.pbori.PyPolyBoRi import OrderCode sage: dp_asc = OrderCode.dp_asc sage: r=declare_ring(['x','y','z'],dict())", "return _fglm(I, from_ring, to_ring) def vars_real_divisors(monomial, monomial_set): r\"\"\" Returns all", "which result multiplied by a variable in monomial. TESTS:: sage:", "FGLM algorithm as described in Wichmanns diploma thesis It would", "OrderCode.dp_asc sage: r=declare_ring(['x','y','z'],dict()) sage: old_ring = r sage: new_ring =", "range(3)] sage: ideal=[x+z, y+z]# lp Groebner basis sage: from sage.rings.polynomial.pbori.fglm", "the global ring, which is restored at the end of", "ideal=[x+z, y+z]# lp Groebner basis sage: from sage.rings.polynomial.pbori.fglm import fglm", "\"\"\" for poly in I: if poly.ring().id() != from_ring.id(): raise", "OrderCode sage: dp_asc = OrderCode.dp_asc sage: from sage.rings.polynomial.pbori.PyPolyBoRi import Ring", "= [old_ring.variable(i) for i in range(3)] sage: ideal=[x+z, y+z]# lp", "would be nice to be able to efficiently extract the", "smallest term of a polynomial. TESTS:: sage: from sage.rings.polynomial.pbori.pbori import", "vars_real_divisors sage: vars_real_divisors(x(1)*x(2)*x(3),b) {{x(1),x(2)}} \"\"\" return BooleSet(Polynomial(monomial_set.divisors_of(monomial)). \\ graded_part(monomial.deg() -", "old_ring.clone(ordering=dp_asc) sage: (x,y,z) = [old_ring.variable(i) for i in range(3)] sage:", "the computation. TESTS:: sage: from sage.rings.polynomial.pbori import * sage: from", "x, z + x] \"\"\" for poly in I: if", "r=Ring(1000) sage: x = r.variable sage: b=BooleSet([x(1)*x(2),x(2)]) sage: from sage.rings.polynomial.pbori.fglm", "m_k_plus_one sage: variables=BooleSet([x(1),x(2),x(3)]) sage: m_k_plus_one(s,variables) x(2)*x(3) sage: r2 = r.clone(ordering=dp_asc)", "x = r.variable sage: b=BooleSet([x(1)*x(2),x(2)]) sage: from sage.rings.polynomial.pbori.fglm import vars_real_divisors", "is restored at the end of the computation. TESTS:: sage:", "efficiently extract the smallest term of a polynomial. TESTS:: sage:", "!= from_ring.id(): raise ValueError(\"Ideal I must be from the first", "of the computation. TESTS:: sage: from sage.rings.polynomial.pbori import * sage:", "FGLMStrategy(from_ring, to_ring, vec).main() def fglm(I, from_ring, to_ring): r\"\"\" Convert *reduced*", "new_ring = old_ring.clone(ordering=dp_asc) sage: (x,y,z) = [old_ring.variable(i) for i in", "first ring argument\") return _fglm(I, from_ring, to_ring) def vars_real_divisors(monomial, monomial_set):", "import vars_real_divisors sage: vars_real_divisors(x(1)*x(2)*x(3),b) {{x(1),x(2)}} \"\"\" return BooleSet(Polynomial(monomial_set.divisors_of(monomial)). \\ graded_part(monomial.deg()", "{{x(1),x(2)}} \"\"\" return BooleSet(Polynomial(monomial_set.divisors_of(monomial)). \\ graded_part(monomial.deg() - 1)) def m_k_plus_one(completed_elements,", "must be from the first ring argument\") return _fglm(I, from_ring,", "= old_ring.clone(ordering=dp_asc) sage: (x,y,z) = [old_ring.variable(i) for i in range(3)]", "of the global ring, which is restored at the end", "basis sage: from sage.rings.polynomial.pbori.fglm import fglm sage: list(fglm(ideal, old_ring, new_ring))", "dp_asc = OrderCode.dp_asc sage: from sage.rings.polynomial.pbori.PyPolyBoRi import Ring sage: r=Ring(1000)", "graded_part(monomial.deg() - 1)) def m_k_plus_one(completed_elements, variables): r\"\"\" Calculates $m_{k+1}$ from", "= r.variable sage: from sage.rings.polynomial.pbori.PyPolyBoRi import Monomial sage: s=BooleSet([x(1)*x(2),x(1),x(2),Monomial(r),x(3)]) sage:", "monomial. TESTS:: sage: from sage.rings.polynomial.pbori.pbori import * sage: from sage.rings.polynomial.pbori.PyPolyBoRi", "sage.rings.polynomial.pbori.fglm import m_k_plus_one sage: variables=BooleSet([x(1),x(2),x(3)]) sage: m_k_plus_one(s,variables) x(2)*x(3) sage: r2", "fglm \"\"\" vec = BoolePolynomialVector(I) return FGLMStrategy(from_ring, to_ring, vec).main() def", "import Ring sage: r=Ring(1000) sage: x = r.variable sage: from", "*reduced* Groebner Basis in from_ring to a GroebnerBasis in to_ring.", "It would be nice to be able to efficiently extract", "all elements of of monomial_set, which result multiplied by a", "sage.rings.polynomial.pbori.PyPolyBoRi import OrderCode sage: dp_asc = OrderCode.dp_asc sage: r=declare_ring(['x','y','z'],dict()) sage:", "a variable in monomial. TESTS:: sage: from sage.rings.polynomial.pbori.pbori import *", "vars_real_divisors(x(1)*x(2)*x(3),b) {{x(1),x(2)}} \"\"\" return BooleSet(Polynomial(monomial_set.divisors_of(monomial)). \\ graded_part(monomial.deg() - 1)) def", "in from_ring to a GroebnerBasis in to_ring. It acts independent", "independent of the global ring, which is restored at the", "from .PyPolyBoRi import (BooleSet, Polynomial, BoolePolynomialVector, FGLMStrategy) def _fglm(I, from_ring,", "from_ring, to_ring): r\"\"\" Convert *reduced* Groebner Basis in from_ring to", "of a polynomial. TESTS:: sage: from sage.rings.polynomial.pbori.pbori import * sage:", "\\ graded_part(monomial.deg() - 1)) def m_k_plus_one(completed_elements, variables): r\"\"\" Calculates $m_{k+1}$", "in Wichmanns diploma thesis It would be nice to be", "monomial_set): r\"\"\" Returns all elements of of monomial_set, which result", "GroebnerBasis in to_ring. It acts independent of the global ring,", "sage: (x,y,z) = [old_ring.variable(i) for i in range(3)] sage: ideal=[x+z,", "poly.ring().id() != from_ring.id(): raise ValueError(\"Ideal I must be from the", "from the first ring argument\") return _fglm(I, from_ring, to_ring) def", "from_ring, to_ring): r\"\"\" Unchecked variant of fglm \"\"\" vec =", "for i in range(3)] sage: ideal=[x+z, y+z]# lp Groebner basis", "elements of of monomial_set, which result multiplied by a variable", "def m_k_plus_one(completed_elements, variables): r\"\"\" Calculates $m_{k+1}$ from the FGLM algorithm", "to_ring): r\"\"\" Convert *reduced* Groebner Basis in from_ring to a", "in to_ring. It acts independent of the global ring, which", "BoolePolynomialVector, FGLMStrategy) def _fglm(I, from_ring, to_ring): r\"\"\" Unchecked variant of", "import (BooleSet, Polynomial, BoolePolynomialVector, FGLMStrategy) def _fglm(I, from_ring, to_ring): r\"\"\"", "from sage.rings.polynomial.pbori import * sage: from sage.rings.polynomial.pbori.PyPolyBoRi import OrderCode sage:", "lp Groebner basis sage: from sage.rings.polynomial.pbori.fglm import fglm sage: list(fglm(ideal,", "Calculates $m_{k+1}$ from the FGLM algorithm as described in Wichmanns", "to efficiently extract the smallest term of a polynomial. TESTS::", "term of a polynomial. TESTS:: sage: from sage.rings.polynomial.pbori.pbori import *", "I: if poly.ring().id() != from_ring.id(): raise ValueError(\"Ideal I must be", "thesis It would be nice to be able to efficiently", "restored at the end of the computation. TESTS:: sage: from", "sage: from sage.rings.polynomial.pbori.PyPolyBoRi import Ring sage: r=Ring(1000) sage: x =", "Convert *reduced* Groebner Basis in from_ring to a GroebnerBasis in", "Unchecked variant of fglm \"\"\" vec = BoolePolynomialVector(I) return FGLMStrategy(from_ring,", "Basis in from_ring to a GroebnerBasis in to_ring. It acts", "y+z]# lp Groebner basis sage: from sage.rings.polynomial.pbori.fglm import fglm sage:", "from_ring.id(): raise ValueError(\"Ideal I must be from the first ring", "ValueError(\"Ideal I must be from the first ring argument\") return", "at the end of the computation. TESTS:: sage: from sage.rings.polynomial.pbori", "old_ring = r sage: new_ring = old_ring.clone(ordering=dp_asc) sage: (x,y,z) =", "return FGLMStrategy(from_ring, to_ring, vec).main() def fglm(I, from_ring, to_ring): r\"\"\" Convert", "to a GroebnerBasis in to_ring. It acts independent of the", "TESTS:: sage: from sage.rings.polynomial.pbori import * sage: from sage.rings.polynomial.pbori.PyPolyBoRi import", "r sage: new_ring = old_ring.clone(ordering=dp_asc) sage: (x,y,z) = [old_ring.variable(i) for", "in I: if poly.ring().id() != from_ring.id(): raise ValueError(\"Ideal I must", "the FGLM algorithm as described in Wichmanns diploma thesis It", "to_ring. It acts independent of the global ring, which is", "variant of fglm \"\"\" vec = BoolePolynomialVector(I) return FGLMStrategy(from_ring, to_ring,", "diploma thesis It would be nice to be able to", "to_ring) def vars_real_divisors(monomial, monomial_set): r\"\"\" Returns all elements of of", "return BooleSet(Polynomial(monomial_set.divisors_of(monomial)). \\ graded_part(monomial.deg() - 1)) def m_k_plus_one(completed_elements, variables): r\"\"\"", "nice to be able to efficiently extract the smallest term", "sage: r=declare_ring(['x','y','z'],dict()) sage: old_ring = r sage: new_ring = old_ring.clone(ordering=dp_asc)", "from sage.rings.polynomial.pbori.PyPolyBoRi import Monomial sage: s=BooleSet([x(1)*x(2),x(1),x(2),Monomial(r),x(3)]) sage: from sage.rings.polynomial.pbori.fglm import", "sage: r2 = r.clone(ordering=dp_asc) sage: m_k_plus_one(r2(s).set(),r2(variables).set()) x(1)*x(3) \"\"\" return sorted(completed_elements.cartesian_product(variables).diff(", "x] \"\"\" for poly in I: if poly.ring().id() != from_ring.id():", "x = r.variable sage: from sage.rings.polynomial.pbori.PyPolyBoRi import Monomial sage: s=BooleSet([x(1)*x(2),x(1),x(2),Monomial(r),x(3)])", "It acts independent of the global ring, which is restored", "= OrderCode.dp_asc sage: r=declare_ring(['x','y','z'],dict()) sage: old_ring = r sage: new_ring", "import fglm sage: list(fglm(ideal, old_ring, new_ring)) [y + x, z", "argument\") return _fglm(I, from_ring, to_ring) def vars_real_divisors(monomial, monomial_set): r\"\"\" Returns", "variables): r\"\"\" Calculates $m_{k+1}$ from the FGLM algorithm as described", "s=BooleSet([x(1)*x(2),x(1),x(2),Monomial(r),x(3)]) sage: from sage.rings.polynomial.pbori.fglm import m_k_plus_one sage: variables=BooleSet([x(1),x(2),x(3)]) sage: m_k_plus_one(s,variables)", "monomial_set, which result multiplied by a variable in monomial. TESTS::", ".PyPolyBoRi import (BooleSet, Polynomial, BoolePolynomialVector, FGLMStrategy) def _fglm(I, from_ring, to_ring):", "x(2)*x(3) sage: r2 = r.clone(ordering=dp_asc) sage: m_k_plus_one(r2(s).set(),r2(variables).set()) x(1)*x(3) \"\"\" return", "which is restored at the end of the computation. TESTS::", "sage.rings.polynomial.pbori.fglm import fglm sage: list(fglm(ideal, old_ring, new_ring)) [y + x," ]
[ "as pd import umap from sklearn.preprocessing import StandardScaler from ferry", "umap.UMAP() umap_embeddings = reducer.fit_transform(embeddings) courses[\"umap1\"] = umap_embeddings[:, 0] courses[\"umap2\"] =", "= pd.read_csv( config.DATA_DIR / \"course_embeddings/courses_deduplicated.csv\", index_col=0, ) # mypy: ignore-errors", "/ \"course_embeddings/courses_deduplicated.csv\", index_col=0, ) # mypy: ignore-errors embeddings = pd.read_hdf(", "courses[\"umap1\"] = umap_embeddings[:, 0] courses[\"umap2\"] = umap_embeddings[:, 1] courses.to_csv(config.DATA_DIR /", "reducer.fit_transform(embeddings) courses[\"umap1\"] = umap_embeddings[:, 0] courses[\"umap2\"] = umap_embeddings[:, 1] courses.to_csv(config.DATA_DIR", "= umap.UMAP() umap_embeddings = reducer.fit_transform(embeddings) courses[\"umap1\"] = umap_embeddings[:, 0] courses[\"umap2\"]", "= reducer.fit_transform(embeddings) courses[\"umap1\"] = umap_embeddings[:, 0] courses[\"umap2\"] = umap_embeddings[:, 1]", "visualization. \"\"\" import pandas as pd import umap from sklearn.preprocessing", "index_col=0, ) # mypy: ignore-errors embeddings = pd.read_hdf( config.DATA_DIR /", ") embeddings = StandardScaler().fit_transform(embeddings) reducer = umap.UMAP() umap_embeddings = reducer.fit_transform(embeddings)", "embeddings to two dimensions for visualization. \"\"\" import pandas as", "\"course_embeddings/courses_deduplicated.csv\", index_col=0, ) # mypy: ignore-errors embeddings = pd.read_hdf( config.DATA_DIR", "ferry import config courses = pd.read_csv( config.DATA_DIR / \"course_embeddings/courses_deduplicated.csv\", index_col=0,", ") # mypy: ignore-errors embeddings = pd.read_hdf( config.DATA_DIR / \"course_embeddings/fasttext_embeddings.h5\",", "# mypy: ignore-errors embeddings = pd.read_hdf( config.DATA_DIR / \"course_embeddings/fasttext_embeddings.h5\", key=\"embeddings\",", "embeddings = pd.read_hdf( config.DATA_DIR / \"course_embeddings/fasttext_embeddings.h5\", key=\"embeddings\", ) embeddings =", "two dimensions for visualization. \"\"\" import pandas as pd import", "import config courses = pd.read_csv( config.DATA_DIR / \"course_embeddings/courses_deduplicated.csv\", index_col=0, )", "config.DATA_DIR / \"course_embeddings/courses_deduplicated.csv\", index_col=0, ) # mypy: ignore-errors embeddings =", "pd.read_hdf( config.DATA_DIR / \"course_embeddings/fasttext_embeddings.h5\", key=\"embeddings\", ) embeddings = StandardScaler().fit_transform(embeddings) reducer", "reducer = umap.UMAP() umap_embeddings = reducer.fit_transform(embeddings) courses[\"umap1\"] = umap_embeddings[:, 0]", "UMAP (https://umap-learn.readthedocs.io/en/latest/index.html) to reduce course embeddings to two dimensions for", "reduce course embeddings to two dimensions for visualization. \"\"\" import", "umap from sklearn.preprocessing import StandardScaler from ferry import config courses", "config courses = pd.read_csv( config.DATA_DIR / \"course_embeddings/courses_deduplicated.csv\", index_col=0, ) #", "mypy: ignore-errors embeddings = pd.read_hdf( config.DATA_DIR / \"course_embeddings/fasttext_embeddings.h5\", key=\"embeddings\", )", "ignore-errors embeddings = pd.read_hdf( config.DATA_DIR / \"course_embeddings/fasttext_embeddings.h5\", key=\"embeddings\", ) embeddings", "StandardScaler().fit_transform(embeddings) reducer = umap.UMAP() umap_embeddings = reducer.fit_transform(embeddings) courses[\"umap1\"] = umap_embeddings[:,", "import StandardScaler from ferry import config courses = pd.read_csv( config.DATA_DIR", "<gh_stars>1-10 \"\"\" Uses UMAP (https://umap-learn.readthedocs.io/en/latest/index.html) to reduce course embeddings to", "= umap_embeddings[:, 0] courses[\"umap2\"] = umap_embeddings[:, 1] courses.to_csv(config.DATA_DIR / \"course_embeddings/courses_deduplicated_umap.csv\")", "\"\"\" import pandas as pd import umap from sklearn.preprocessing import", "embeddings = StandardScaler().fit_transform(embeddings) reducer = umap.UMAP() umap_embeddings = reducer.fit_transform(embeddings) courses[\"umap1\"]", "= StandardScaler().fit_transform(embeddings) reducer = umap.UMAP() umap_embeddings = reducer.fit_transform(embeddings) courses[\"umap1\"] =", "pandas as pd import umap from sklearn.preprocessing import StandardScaler from", "StandardScaler from ferry import config courses = pd.read_csv( config.DATA_DIR /", "= pd.read_hdf( config.DATA_DIR / \"course_embeddings/fasttext_embeddings.h5\", key=\"embeddings\", ) embeddings = StandardScaler().fit_transform(embeddings)", "course embeddings to two dimensions for visualization. \"\"\" import pandas", "import pandas as pd import umap from sklearn.preprocessing import StandardScaler", "\"course_embeddings/fasttext_embeddings.h5\", key=\"embeddings\", ) embeddings = StandardScaler().fit_transform(embeddings) reducer = umap.UMAP() umap_embeddings", "key=\"embeddings\", ) embeddings = StandardScaler().fit_transform(embeddings) reducer = umap.UMAP() umap_embeddings =", "pd.read_csv( config.DATA_DIR / \"course_embeddings/courses_deduplicated.csv\", index_col=0, ) # mypy: ignore-errors embeddings", "from ferry import config courses = pd.read_csv( config.DATA_DIR / \"course_embeddings/courses_deduplicated.csv\",", "for visualization. \"\"\" import pandas as pd import umap from", "/ \"course_embeddings/fasttext_embeddings.h5\", key=\"embeddings\", ) embeddings = StandardScaler().fit_transform(embeddings) reducer = umap.UMAP()", "Uses UMAP (https://umap-learn.readthedocs.io/en/latest/index.html) to reduce course embeddings to two dimensions", "umap_embeddings = reducer.fit_transform(embeddings) courses[\"umap1\"] = umap_embeddings[:, 0] courses[\"umap2\"] = umap_embeddings[:,", "config.DATA_DIR / \"course_embeddings/fasttext_embeddings.h5\", key=\"embeddings\", ) embeddings = StandardScaler().fit_transform(embeddings) reducer =", "(https://umap-learn.readthedocs.io/en/latest/index.html) to reduce course embeddings to two dimensions for visualization.", "to two dimensions for visualization. \"\"\" import pandas as pd", "import umap from sklearn.preprocessing import StandardScaler from ferry import config", "from sklearn.preprocessing import StandardScaler from ferry import config courses =", "courses = pd.read_csv( config.DATA_DIR / \"course_embeddings/courses_deduplicated.csv\", index_col=0, ) # mypy:", "\"\"\" Uses UMAP (https://umap-learn.readthedocs.io/en/latest/index.html) to reduce course embeddings to two", "dimensions for visualization. \"\"\" import pandas as pd import umap", "to reduce course embeddings to two dimensions for visualization. \"\"\"", "sklearn.preprocessing import StandardScaler from ferry import config courses = pd.read_csv(", "pd import umap from sklearn.preprocessing import StandardScaler from ferry import" ]
[ "def __init__(self, name, lifespan, habitat, plant_type): self.name = name self.lifespan", "self.habitat = habitat self.plant_type = plant_type self.plant_size = 0 class", "= 0 class Fauna: def __init__(self, name): self.name = name", "= predator_type self.what_eats = what_eats self.lifespan = lifespan # def", "mammal_type, lifespan): super().__init__(name) self.mammal_type = mammal_type self.lifespan = lifespan def", "0 class Fauna: def __init__(self, name): self.name = name class", "= name class Predator(Fauna): def __init__(self, name:str, predator_type:str, what_eats:str, lifespan:int):", "# print('NO') class Mammal(Fauna): def __init__(self, name, mammal_type, lifespan): super().__init__(name)", "self.plant_size = 0 class Fauna: def __init__(self, name): self.name =", "__init__(self, name, mammal_type, lifespan): super().__init__(name) self.mammal_type = mammal_type self.lifespan =", "Predator('baby shark','sea','all',20) giraffe = Mammal('malwan','earth',20) giraffe.check_planet(inheritance.friendly) marti = Mammal('marti','earth',20) marti.check_planet(inheritance.friendly)", "Mammal(Fauna): def __init__(self, name, mammal_type, lifespan): super().__init__(name) self.mammal_type = mammal_type", "planet.humanity: planet.add_fauna(self) shark = Predator('baby shark','sea','all',20) giraffe = Mammal('malwan','earth',20) giraffe.check_planet(inheritance.friendly)", "lifespan # def check_planet(self,planet:tsk4.Planet): # if planet.fauna and not planet.humanity:", "= lifespan self.habitat = habitat self.plant_type = plant_type self.plant_size =", "__init__(self, name:str, predator_type:str, what_eats:str, lifespan:int): super().__init__(name) self.predator_type = predator_type self.what_eats", "self.lifespan = lifespan self.habitat = habitat self.plant_type = plant_type self.plant_size", "name class Predator(Fauna): def __init__(self, name:str, predator_type:str, what_eats:str, lifespan:int): super().__init__(name)", "not planet.humanity: planet.add_fauna(self) shark = Predator('baby shark','sea','all',20) giraffe = Mammal('malwan','earth',20)", "def check_planet(self,planet:inheritance.Planet): if planet.flora and planet.fauna and not planet.humanity: planet.add_fauna(self)", "name, lifespan, habitat, plant_type): self.name = name self.lifespan = lifespan", "habitat self.plant_type = plant_type self.plant_size = 0 class Fauna: def", "check_planet(self,planet:inheritance.Planet): if planet.flora and planet.fauna and not planet.humanity: planet.add_fauna(self) shark", "def check_planet(self,planet:tsk4.Planet): # if planet.fauna and not planet.humanity: # print('YES')", "def __init__(self, name, mammal_type, lifespan): super().__init__(name) self.mammal_type = mammal_type self.lifespan", "print('NO') class Mammal(Fauna): def __init__(self, name, mammal_type, lifespan): super().__init__(name) self.mammal_type", "class Mammal(Fauna): def __init__(self, name, mammal_type, lifespan): super().__init__(name) self.mammal_type =", "lifespan:int): super().__init__(name) self.predator_type = predator_type self.what_eats = what_eats self.lifespan =", "predator_type:str, what_eats:str, lifespan:int): super().__init__(name) self.predator_type = predator_type self.what_eats = what_eats", "habitat, plant_type): self.name = name self.lifespan = lifespan self.habitat =", "import inheritance class Flora: def __init__(self, name, lifespan, habitat, plant_type):", "= lifespan def check_planet(self,planet:inheritance.Planet): if planet.flora and planet.fauna and not", "class Flora: def __init__(self, name, lifespan, habitat, plant_type): self.name =", "if planet.flora and planet.fauna and not planet.humanity: planet.add_fauna(self) shark =", "= name self.lifespan = lifespan self.habitat = habitat self.plant_type =", "# else: # print('NO') class Mammal(Fauna): def __init__(self, name, mammal_type,", "and planet.fauna and not planet.humanity: planet.add_fauna(self) shark = Predator('baby shark','sea','all',20)", "self.lifespan = lifespan def check_planet(self,planet:inheritance.Planet): if planet.flora and planet.fauna and", "name): self.name = name class Predator(Fauna): def __init__(self, name:str, predator_type:str,", "self.name = name class Predator(Fauna): def __init__(self, name:str, predator_type:str, what_eats:str,", "and not planet.humanity: planet.add_fauna(self) shark = Predator('baby shark','sea','all',20) giraffe =", "plant_type self.plant_size = 0 class Fauna: def __init__(self, name): self.name", "lifespan def check_planet(self,planet:inheritance.Planet): if planet.flora and planet.fauna and not planet.humanity:", "= plant_type self.plant_size = 0 class Fauna: def __init__(self, name):", "else: # print('NO') class Mammal(Fauna): def __init__(self, name, mammal_type, lifespan):", "= Predator('baby shark','sea','all',20) giraffe = Mammal('malwan','earth',20) giraffe.check_planet(inheritance.friendly) marti = Mammal('marti','earth',20)", "Predator(Fauna): def __init__(self, name:str, predator_type:str, what_eats:str, lifespan:int): super().__init__(name) self.predator_type =", "not planet.humanity: # print('YES') # else: # print('NO') class Mammal(Fauna):", "class Fauna: def __init__(self, name): self.name = name class Predator(Fauna):", "plant_type): self.name = name self.lifespan = lifespan self.habitat = habitat", "shark = Predator('baby shark','sea','all',20) giraffe = Mammal('malwan','earth',20) giraffe.check_planet(inheritance.friendly) marti =", "name:str, predator_type:str, what_eats:str, lifespan:int): super().__init__(name) self.predator_type = predator_type self.what_eats =", "what_eats self.lifespan = lifespan # def check_planet(self,planet:tsk4.Planet): # if planet.fauna", "__init__(self, name): self.name = name class Predator(Fauna): def __init__(self, name:str,", "lifespan): super().__init__(name) self.mammal_type = mammal_type self.lifespan = lifespan def check_planet(self,planet:inheritance.Planet):", "= lifespan # def check_planet(self,planet:tsk4.Planet): # if planet.fauna and not", "lifespan, habitat, plant_type): self.name = name self.lifespan = lifespan self.habitat", "giraffe = Mammal('malwan','earth',20) giraffe.check_planet(inheritance.friendly) marti = Mammal('marti','earth',20) marti.check_planet(inheritance.friendly) print(inheritance.friendly.__dict__) print(inheritance.Planet.__dict__)", "class Predator(Fauna): def __init__(self, name:str, predator_type:str, what_eats:str, lifespan:int): super().__init__(name) self.predator_type", "# if planet.fauna and not planet.humanity: # print('YES') # else:", "if planet.fauna and not planet.humanity: # print('YES') # else: #", "planet.humanity: # print('YES') # else: # print('NO') class Mammal(Fauna): def", "super().__init__(name) self.mammal_type = mammal_type self.lifespan = lifespan def check_planet(self,planet:inheritance.Planet): if", "check_planet(self,planet:tsk4.Planet): # if planet.fauna and not planet.humanity: # print('YES') #", "Fauna: def __init__(self, name): self.name = name class Predator(Fauna): def", "planet.flora and planet.fauna and not planet.humanity: planet.add_fauna(self) shark = Predator('baby", "= habitat self.plant_type = plant_type self.plant_size = 0 class Fauna:", "def __init__(self, name): self.name = name class Predator(Fauna): def __init__(self,", "def __init__(self, name:str, predator_type:str, what_eats:str, lifespan:int): super().__init__(name) self.predator_type = predator_type", "planet.fauna and not planet.humanity: # print('YES') # else: # print('NO')", "__init__(self, name, lifespan, habitat, plant_type): self.name = name self.lifespan =", "<reponame>zhumakova/ClassProject<filename>flora_fauna.py import inheritance class Flora: def __init__(self, name, lifespan, habitat,", "mammal_type self.lifespan = lifespan def check_planet(self,planet:inheritance.Planet): if planet.flora and planet.fauna", "name, mammal_type, lifespan): super().__init__(name) self.mammal_type = mammal_type self.lifespan = lifespan", "print('YES') # else: # print('NO') class Mammal(Fauna): def __init__(self, name,", "self.plant_type = plant_type self.plant_size = 0 class Fauna: def __init__(self,", "what_eats:str, lifespan:int): super().__init__(name) self.predator_type = predator_type self.what_eats = what_eats self.lifespan", "# print('YES') # else: # print('NO') class Mammal(Fauna): def __init__(self,", "self.predator_type = predator_type self.what_eats = what_eats self.lifespan = lifespan #", "inheritance class Flora: def __init__(self, name, lifespan, habitat, plant_type): self.name", "lifespan self.habitat = habitat self.plant_type = plant_type self.plant_size = 0", "name self.lifespan = lifespan self.habitat = habitat self.plant_type = plant_type", "self.lifespan = lifespan # def check_planet(self,planet:tsk4.Planet): # if planet.fauna and", "and not planet.humanity: # print('YES') # else: # print('NO') class", "super().__init__(name) self.predator_type = predator_type self.what_eats = what_eats self.lifespan = lifespan", "planet.fauna and not planet.humanity: planet.add_fauna(self) shark = Predator('baby shark','sea','all',20) giraffe", "Flora: def __init__(self, name, lifespan, habitat, plant_type): self.name = name", "self.name = name self.lifespan = lifespan self.habitat = habitat self.plant_type", "self.mammal_type = mammal_type self.lifespan = lifespan def check_planet(self,planet:inheritance.Planet): if planet.flora", "self.what_eats = what_eats self.lifespan = lifespan # def check_planet(self,planet:tsk4.Planet): #", "= what_eats self.lifespan = lifespan # def check_planet(self,planet:tsk4.Planet): # if", "planet.add_fauna(self) shark = Predator('baby shark','sea','all',20) giraffe = Mammal('malwan','earth',20) giraffe.check_planet(inheritance.friendly) marti", "# def check_planet(self,planet:tsk4.Planet): # if planet.fauna and not planet.humanity: #", "predator_type self.what_eats = what_eats self.lifespan = lifespan # def check_planet(self,planet:tsk4.Planet):", "shark','sea','all',20) giraffe = Mammal('malwan','earth',20) giraffe.check_planet(inheritance.friendly) marti = Mammal('marti','earth',20) marti.check_planet(inheritance.friendly) print(inheritance.friendly.__dict__)", "= mammal_type self.lifespan = lifespan def check_planet(self,planet:inheritance.Planet): if planet.flora and" ]
[ "execute primes.py & TROUBLE SHOOTING: Should you run into issues,", "return True @TaskGenerator def count_primes(ps): return sum(ps) @TaskGenerator def write_output(n):", "return sum(ps) @TaskGenerator def write_output(n): output = open('output.txt', 'wt') output.write(\"Found", "LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A", "primes <= 100.\\n\".format(n)) output.close() primes100 = [] for n in", "parallel: jug execute primes.py & jug execute primes.py & jug", "by switching to that directory and running the commands: jug", "if path.exists('jug-demo'): print(\"Jug-demo previously created\") return os.mkdir('jug-demo') output = open('jug-demo/primes.py',", "-*- coding: utf-8 -*- # Copyright (C) 2017, <NAME> <<EMAIL>>", "will create a directory called 'jug-demo/' with a file called", "TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR", "rights # to use, copy, modify, merge, publish, distribute, sublicense,", "sw=4 expandtab smartindent: # # Permission is hereby granted, free", "sts=4 sw=4 expandtab smartindent: # # Permission is hereby granted,", "permission notice shall be included in # all copies or", "execute primes.py & jug execute primes.py & jug execute primes.py", "print(''' Jug will create a directory called 'jug-demo/' with a", "documentation contains further reading. You can read the next tutorial", "portions of the Software. # # THE SOFTWARE IS PROVIDED", "a directory called 'jug-demo/' with a file called 'primes.py' inside.", "for jug with jug test-jug FURTHER READING The online documentation", "speed up the process by running several 'jug execute' in", "# # The above copyright notice and this permission notice", "name = \"demo\" def run(self, *args, **kwargs): import os from", "import path print(''' Jug will create a directory called 'jug-demo/'", "True @TaskGenerator def count_primes(ps): return sum(ps) @TaskGenerator def write_output(n): output", "DemoCommand(SubCommand): '''Create demo directory. ''' name = \"demo\" def run(self,", "(C) 2017, <NAME> <<EMAIL>> # vim: set ts=4 sts=4 sw=4", "and associated documentation files (the \"Software\"), to deal # in", "Software without restriction, including without limitation the rights # to", "and to permit persons to whom the Software is #", "copies of the Software, and to permit persons to whom", "USAGE You can speed up the process by running several", "hereby granted, free of charge, to any person obtaining a", "OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE", "distribute, sublicense, and/or sell # copies of the Software, and", "# all copies or substantial portions of the Software. #", "OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.", "termination of the process, results will be in a file", "HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #", "set ts=4 sts=4 sw=4 expandtab smartindent: # # Permission is", "included in # all copies or substantial portions of the", "called 'output.txt'. PARALLEL USAGE You can speed up the process", "100.\\n\".format(n)) output.close() primes100 = [] for n in range(2, 101):", "with jug test-jug FURTHER READING The online documentation contains further", "THE SOFTWARE. from . import SubCommand __all__ = ['DemoCommand'] class", "jug import TaskGenerator @TaskGenerator def is_prime(n): sleep(1.) for j in", "the process, results will be in a file called 'output.txt'.", "'wt') output.write(r''' from time import sleep from jug import TaskGenerator", "OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH", "deal # in the Software without restriction, including without limitation", "use, copy, modify, merge, publish, distribute, sublicense, and/or sell #", "run into issues, you can run the internal tests for", "be in a file called 'output.txt'. PARALLEL USAGE You can", "notice and this permission notice shall be included in #", "execute primes.py & jug execute primes.py & TROUBLE SHOOTING: Should", "the Software. # # THE SOFTWARE IS PROVIDED \"AS IS\",", "import TaskGenerator @TaskGenerator def is_prime(n): sleep(1.) for j in range(2,", "import os from os import path print(''' Jug will create", "jug execute primes.py Upon termination of the process, results will", "copy, modify, merge, publish, distribute, sublicense, and/or sell # copies", "# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR", "running several 'jug execute' in parallel: jug execute primes.py &", "@TaskGenerator def is_prime(n): sleep(1.) for j in range(2, n -", "all copies or substantial portions of the Software. # #", "software and associated documentation files (the \"Software\"), to deal #", "open('output.txt', 'wt') output.write(\"Found {0} primes <= 100.\\n\".format(n)) output.close() primes100 =", "['DemoCommand'] class DemoCommand(SubCommand): '''Create demo directory. ''' name = \"demo\"", "# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF", "AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR", "the Software without restriction, including without limitation the rights #", "# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF", "\"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #", "in range(2, n - 1): if (n % j) ==", "read the next tutorial here: http://jug.readthedocs.io/en/latest/decrypt-example.html ''') if path.exists('jug-demo'): print(\"Jug-demo", "created\") return os.mkdir('jug-demo') output = open('jug-demo/primes.py', 'wt') output.write(r''' from time", "up the process by running several 'jug execute' in parallel:", "__all__ = ['DemoCommand'] class DemoCommand(SubCommand): '''Create demo directory. ''' name", "# of this software and associated documentation files (the \"Software\"),", "furnished to do so, subject to the following conditions: #", "to do so, subject to the following conditions: # #", "# The above copyright notice and this permission notice shall", "SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR", "a copy # of this software and associated documentation files", "OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF", "coding: utf-8 -*- # Copyright (C) 2017, <NAME> <<EMAIL>> #", "process by running several 'jug execute' in parallel: jug execute", "notice shall be included in # all copies or substantial", "def is_prime(n): sleep(1.) for j in range(2, n - 1):", "# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO", "results will be in a file called 'output.txt'. PARALLEL USAGE", "and this permission notice shall be included in # all", "next tutorial here: http://jug.readthedocs.io/en/latest/decrypt-example.html ''') if path.exists('jug-demo'): print(\"Jug-demo previously created\")", "IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS", "False return True @TaskGenerator def count_primes(ps): return sum(ps) @TaskGenerator def", "#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright (C) 2017,", "NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE", "demo directory. ''' name = \"demo\" def run(self, *args, **kwargs):", "following conditions: # # The above copyright notice and this", "to deal # in the Software without restriction, including without", "= open('output.txt', 'wt') output.write(\"Found {0} primes <= 100.\\n\".format(n)) output.close() primes100", "*args, **kwargs): import os from os import path print(''' Jug", "time import sleep from jug import TaskGenerator @TaskGenerator def is_prime(n):", "by jug execute primes.py Upon termination of the process, results", "conditions: # # The above copyright notice and this permission", "FURTHER READING The online documentation contains further reading. You can", "to use, copy, modify, merge, publish, distribute, sublicense, and/or sell", "directory and running the commands: jug status primes.py followed by", "write_output(n): output = open('output.txt', 'wt') output.write(\"Found {0} primes <= 100.\\n\".format(n))", "IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS", "FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN", "the internal tests for jug with jug test-jug FURTHER READING", "a file called 'output.txt'. PARALLEL USAGE You can speed up", "return False return True @TaskGenerator def count_primes(ps): return sum(ps) @TaskGenerator", "count_primes(ps): return sum(ps) @TaskGenerator def write_output(n): output = open('output.txt', 'wt')", "You can speed up the process by running several 'jug", "smartindent: # # Permission is hereby granted, free of charge,", "'jug-demo/' with a file called 'primes.py' inside. You can test", "primes100 = [] for n in range(2, 101): primes100.append(is_prime(n)) n_primes", "called 'primes.py' inside. You can test jug by switching to", "and/or sell # copies of the Software, and to permit", "directory called 'jug-demo/' with a file called 'primes.py' inside. You", "the rights # to use, copy, modify, merge, publish, distribute,", "if (n % j) == 0: return False return True", "tests for jug with jug test-jug FURTHER READING The online", "be included in # all copies or substantial portions of", "= [] for n in range(2, 101): primes100.append(is_prime(n)) n_primes =", "TaskGenerator @TaskGenerator def is_prime(n): sleep(1.) for j in range(2, n", "is hereby granted, free of charge, to any person obtaining", "execute' in parallel: jug execute primes.py & jug execute primes.py", "j) == 0: return False return True @TaskGenerator def count_primes(ps):", "CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR", "person obtaining a copy # of this software and associated", "# # Permission is hereby granted, free of charge, to", "without restriction, including without limitation the rights # to use,", "ts=4 sts=4 sw=4 expandtab smartindent: # # Permission is hereby", "subject to the following conditions: # # The above copyright", "that directory and running the commands: jug status primes.py followed", "# vim: set ts=4 sts=4 sw=4 expandtab smartindent: # #", "create a directory called 'jug-demo/' with a file called 'primes.py'", "a file called 'primes.py' inside. You can test jug by", "SHOOTING: Should you run into issues, you can run the", "THE USE OR OTHER DEALINGS IN # THE SOFTWARE. from", "**kwargs): import os from os import path print(''' Jug will", "contains further reading. You can read the next tutorial here:", "WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN", "@TaskGenerator def write_output(n): output = open('output.txt', 'wt') output.write(\"Found {0} primes", "% j) == 0: return False return True @TaskGenerator def", "or substantial portions of the Software. # # THE SOFTWARE", "file called 'primes.py' inside. You can test jug by switching", "running the commands: jug status primes.py followed by jug execute", "BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS", "FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL", "OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR", "def count_primes(ps): return sum(ps) @TaskGenerator def write_output(n): output = open('output.txt',", "CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS", "IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER", "CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION", "the commands: jug status primes.py followed by jug execute primes.py", "primes.py followed by jug execute primes.py Upon termination of the", "<<EMAIL>> # vim: set ts=4 sts=4 sw=4 expandtab smartindent: #", "# Permission is hereby granted, free of charge, to any", "of charge, to any person obtaining a copy # of", "switching to that directory and running the commands: jug status", "INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #", "merge, publish, distribute, sublicense, and/or sell # copies of the", "# # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY", "internal tests for jug with jug test-jug FURTHER READING The", "from jug import TaskGenerator @TaskGenerator def is_prime(n): sleep(1.) for j", "you can run the internal tests for jug with jug", "Should you run into issues, you can run the internal", "NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT", "tutorial here: http://jug.readthedocs.io/en/latest/decrypt-example.html ''') if path.exists('jug-demo'): print(\"Jug-demo previously created\") return", "execute primes.py Upon termination of the process, results will be", "open('jug-demo/primes.py', 'wt') output.write(r''' from time import sleep from jug import", "NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR", "n - 1): if (n % j) == 0: return", "The online documentation contains further reading. You can read the", "can read the next tutorial here: http://jug.readthedocs.io/en/latest/decrypt-example.html ''') if path.exists('jug-demo'):", "here: http://jug.readthedocs.io/en/latest/decrypt-example.html ''') if path.exists('jug-demo'): print(\"Jug-demo previously created\") return os.mkdir('jug-demo')", "followed by jug execute primes.py Upon termination of the process,", "import sleep from jug import TaskGenerator @TaskGenerator def is_prime(n): sleep(1.)", "inside. You can test jug by switching to that directory", "'''Create demo directory. ''' name = \"demo\" def run(self, *args,", "LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER", "OTHER DEALINGS IN # THE SOFTWARE. from . import SubCommand", "so, subject to the following conditions: # # The above", "path print(''' Jug will create a directory called 'jug-demo/' with", "Copyright (C) 2017, <NAME> <<EMAIL>> # vim: set ts=4 sts=4", "AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, #", "of the process, results will be in a file called", "DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF", "''' name = \"demo\" def run(self, *args, **kwargs): import os", "output = open('jug-demo/primes.py', 'wt') output.write(r''' from time import sleep from", "further reading. You can read the next tutorial here: http://jug.readthedocs.io/en/latest/decrypt-example.html", "- 1): if (n % j) == 0: return False", "process, results will be in a file called 'output.txt'. PARALLEL", "= ['DemoCommand'] class DemoCommand(SubCommand): '''Create demo directory. ''' name =", "the following conditions: # # The above copyright notice and", "in a file called 'output.txt'. PARALLEL USAGE You can speed", "FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE", "and running the commands: jug status primes.py followed by jug", "online documentation contains further reading. You can read the next", "reading. You can read the next tutorial here: http://jug.readthedocs.io/en/latest/decrypt-example.html ''')", "print(\"Jug-demo previously created\") return os.mkdir('jug-demo') output = open('jug-demo/primes.py', 'wt') output.write(r'''", "in parallel: jug execute primes.py & jug execute primes.py &", "n in range(2, 101): primes100.append(is_prime(n)) n_primes = count_primes(primes100) write_output(n_primes) ''')", "the Software, and to permit persons to whom the Software", "jug test-jug FURTHER READING The online documentation contains further reading.", "from . import SubCommand __all__ = ['DemoCommand'] class DemoCommand(SubCommand): '''Create", "def write_output(n): output = open('output.txt', 'wt') output.write(\"Found {0} primes <=", "sleep(1.) for j in range(2, n - 1): if (n", "2017, <NAME> <<EMAIL>> # vim: set ts=4 sts=4 sw=4 expandtab", "in # all copies or substantial portions of the Software.", "FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT", "persons to whom the Software is # furnished to do", "vim: set ts=4 sts=4 sw=4 expandtab smartindent: # # Permission", "is_prime(n): sleep(1.) for j in range(2, n - 1): if", "output.write(r''' from time import sleep from jug import TaskGenerator @TaskGenerator", "OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE.", "associated documentation files (the \"Software\"), to deal # in the", "MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN", "DEALINGS IN # THE SOFTWARE. from . import SubCommand __all__", "'output.txt'. PARALLEL USAGE You can speed up the process by", "Software. # # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT", "to any person obtaining a copy # of this software", "'primes.py' inside. You can test jug by switching to that", "(n % j) == 0: return False return True @TaskGenerator", "primes.py Upon termination of the process, results will be in", "of the Software, and to permit persons to whom the", "this software and associated documentation files (the \"Software\"), to deal", "ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN", "into issues, you can run the internal tests for jug", "BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY,", "& jug execute primes.py & TROUBLE SHOOTING: Should you run", "test jug by switching to that directory and running the", "range(2, 101): primes100.append(is_prime(n)) n_primes = count_primes(primes100) write_output(n_primes) ''') output.close() demo", "Software is # furnished to do so, subject to the", "PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR", "''') if path.exists('jug-demo'): print(\"Jug-demo previously created\") return os.mkdir('jug-demo') output =", "whom the Software is # furnished to do so, subject", "sublicense, and/or sell # copies of the Software, and to", ". import SubCommand __all__ = ['DemoCommand'] class DemoCommand(SubCommand): '''Create demo", "THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY", "substantial portions of the Software. # # THE SOFTWARE IS", "do so, subject to the following conditions: # # The", "LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,", "WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING", "in the Software without restriction, including without limitation the rights", "# furnished to do so, subject to the following conditions:", "jug execute primes.py & jug execute primes.py & TROUBLE SHOOTING:", "any person obtaining a copy # of this software and", "def run(self, *args, **kwargs): import os from os import path", "ARISING FROM, # OUT OF OR IN CONNECTION WITH THE", "jug with jug test-jug FURTHER READING The online documentation contains", "SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,", "shall be included in # all copies or substantial portions", "KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO", "OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES", "restriction, including without limitation the rights # to use, copy,", "os import path print(''' Jug will create a directory called", "jug by switching to that directory and running the commands:", "& TROUBLE SHOOTING: Should you run into issues, you can", "including without limitation the rights # to use, copy, modify,", "file called 'output.txt'. PARALLEL USAGE You can speed up the", "copyright notice and this permission notice shall be included in", "ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED", "free of charge, to any person obtaining a copy #", "You can test jug by switching to that directory and", "files (the \"Software\"), to deal # in the Software without", "path.exists('jug-demo'): print(\"Jug-demo previously created\") return os.mkdir('jug-demo') output = open('jug-demo/primes.py', 'wt')", "IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,", "of the Software. # # THE SOFTWARE IS PROVIDED \"AS", "'jug execute' in parallel: jug execute primes.py & jug execute", "j in range(2, n - 1): if (n % j)", "os.mkdir('jug-demo') output = open('jug-demo/primes.py', 'wt') output.write(r''' from time import sleep", "\"demo\" def run(self, *args, **kwargs): import os from os import", "called 'jug-demo/' with a file called 'primes.py' inside. You can", "= \"demo\" def run(self, *args, **kwargs): import os from os", "of this software and associated documentation files (the \"Software\"), to", "OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR", "OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE", "EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE", "SubCommand __all__ = ['DemoCommand'] class DemoCommand(SubCommand): '''Create demo directory. '''", "commands: jug status primes.py followed by jug execute primes.py Upon", "& jug execute primes.py & jug execute primes.py & jug", "# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,", "range(2, n - 1): if (n % j) == 0:", "PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS", "[] for n in range(2, 101): primes100.append(is_prime(n)) n_primes = count_primes(primes100)", "run(self, *args, **kwargs): import os from os import path print('''", "Jug will create a directory called 'jug-demo/' with a file", "directory. ''' name = \"demo\" def run(self, *args, **kwargs): import", "(the \"Software\"), to deal # in the Software without restriction,", "WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND", "1): if (n % j) == 0: return False return", "charge, to any person obtaining a copy # of this", "permit persons to whom the Software is # furnished to", "utf-8 -*- # Copyright (C) 2017, <NAME> <<EMAIL>> # vim:", "sum(ps) @TaskGenerator def write_output(n): output = open('output.txt', 'wt') output.write(\"Found {0}", "THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE", "the Software is # furnished to do so, subject to", "above copyright notice and this permission notice shall be included", "IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED,", "run the internal tests for jug with jug test-jug FURTHER", "A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE", "limitation the rights # to use, copy, modify, merge, publish,", "this permission notice shall be included in # all copies", "with a file called 'primes.py' inside. You can test jug", "to that directory and running the commands: jug status primes.py", "PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #", "jug status primes.py followed by jug execute primes.py Upon termination", "{0} primes <= 100.\\n\".format(n)) output.close() primes100 = [] for n", "without limitation the rights # to use, copy, modify, merge,", "can speed up the process by running several 'jug execute'", "you run into issues, you can run the internal tests", "IN # THE SOFTWARE. from . import SubCommand __all__ =", "EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE", "SOFTWARE. from . import SubCommand __all__ = ['DemoCommand'] class DemoCommand(SubCommand):", "# in the Software without restriction, including without limitation the", "documentation files (the \"Software\"), to deal # in the Software", "can run the internal tests for jug with jug test-jug", "<NAME> <<EMAIL>> # vim: set ts=4 sts=4 sw=4 expandtab smartindent:", "PARALLEL USAGE You can speed up the process by running", "copies or substantial portions of the Software. # # THE", "the process by running several 'jug execute' in parallel: jug", "import SubCommand __all__ = ['DemoCommand'] class DemoCommand(SubCommand): '''Create demo directory.", "You can read the next tutorial here: http://jug.readthedocs.io/en/latest/decrypt-example.html ''') if", "output.write(\"Found {0} primes <= 100.\\n\".format(n)) output.close() primes100 = [] for", "= open('jug-demo/primes.py', 'wt') output.write(r''' from time import sleep from jug", "by running several 'jug execute' in parallel: jug execute primes.py", "ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT", "sell # copies of the Software, and to permit persons", "OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT", "os from os import path print(''' Jug will create a", "several 'jug execute' in parallel: jug execute primes.py & jug", "# -*- coding: utf-8 -*- # Copyright (C) 2017, <NAME>", "USE OR OTHER DEALINGS IN # THE SOFTWARE. from .", "class DemoCommand(SubCommand): '''Create demo directory. ''' name = \"demo\" def", "OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT,", "THE SOFTWARE OR THE USE OR OTHER DEALINGS IN #", "publish, distribute, sublicense, and/or sell # copies of the Software,", "to the following conditions: # # The above copyright notice", "<= 100.\\n\".format(n)) output.close() primes100 = [] for n in range(2,", "http://jug.readthedocs.io/en/latest/decrypt-example.html ''') if path.exists('jug-demo'): print(\"Jug-demo previously created\") return os.mkdir('jug-demo') output", "READING The online documentation contains further reading. You can read", "& jug execute primes.py & jug execute primes.py & TROUBLE", "jug execute primes.py & TROUBLE SHOOTING: Should you run into", "expandtab smartindent: # # Permission is hereby granted, free of", "-*- # Copyright (C) 2017, <NAME> <<EMAIL>> # vim: set", "primes.py & jug execute primes.py & TROUBLE SHOOTING: Should you", "primes100.append(is_prime(n)) n_primes = count_primes(primes100) write_output(n_primes) ''') output.close() demo = DemoCommand()", "modify, merge, publish, distribute, sublicense, and/or sell # copies of", "OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION", "IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,", "Upon termination of the process, results will be in a", "primes.py & jug execute primes.py & jug execute primes.py &", "Software, and to permit persons to whom the Software is", "# to use, copy, modify, merge, publish, distribute, sublicense, and/or", "from os import path print(''' Jug will create a directory", "OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT", "101): primes100.append(is_prime(n)) n_primes = count_primes(primes100) write_output(n_primes) ''') output.close() demo =", "for j in range(2, n - 1): if (n %", "'wt') output.write(\"Found {0} primes <= 100.\\n\".format(n)) output.close() primes100 = []", "in range(2, 101): primes100.append(is_prime(n)) n_primes = count_primes(primes100) write_output(n_primes) ''') output.close()", "TROUBLE SHOOTING: Should you run into issues, you can run", "# Copyright (C) 2017, <NAME> <<EMAIL>> # vim: set ts=4", "\"Software\"), to deal # in the Software without restriction, including", "@TaskGenerator def count_primes(ps): return sum(ps) @TaskGenerator def write_output(n): output =", "output = open('output.txt', 'wt') output.write(\"Found {0} primes <= 100.\\n\".format(n)) output.close()", "# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR", "test-jug FURTHER READING The online documentation contains further reading. You", "return os.mkdir('jug-demo') output = open('jug-demo/primes.py', 'wt') output.write(r''' from time import", "from time import sleep from jug import TaskGenerator @TaskGenerator def", "COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER", "previously created\") return os.mkdir('jug-demo') output = open('jug-demo/primes.py', 'wt') output.write(r''' from", "can test jug by switching to that directory and running", "# THE SOFTWARE. from . import SubCommand __all__ = ['DemoCommand']", "# copies of the Software, and to permit persons to", "SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE", "granted, free of charge, to any person obtaining a copy", "obtaining a copy # of this software and associated documentation", "== 0: return False return True @TaskGenerator def count_primes(ps): return", "TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN", "is # furnished to do so, subject to the following", "to whom the Software is # furnished to do so,", "copy # of this software and associated documentation files (the", "THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY", "Permission is hereby granted, free of charge, to any person", "The above copyright notice and this permission notice shall be", "status primes.py followed by jug execute primes.py Upon termination of", "will be in a file called 'output.txt'. PARALLEL USAGE You", "jug execute primes.py & jug execute primes.py & jug execute", "issues, you can run the internal tests for jug with", "for n in range(2, 101): primes100.append(is_prime(n)) n_primes = count_primes(primes100) write_output(n_primes)", "WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT", "sleep from jug import TaskGenerator @TaskGenerator def is_prime(n): sleep(1.) for", "AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES", "OR OTHER DEALINGS IN # THE SOFTWARE. from . import", "the next tutorial here: http://jug.readthedocs.io/en/latest/decrypt-example.html ''') if path.exists('jug-demo'): print(\"Jug-demo previously", "to permit persons to whom the Software is # furnished", "0: return False return True @TaskGenerator def count_primes(ps): return sum(ps)", "primes.py & TROUBLE SHOOTING: Should you run into issues, you", "WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING", "output.close() primes100 = [] for n in range(2, 101): primes100.append(is_prime(n))" ]
[ "the request\"}) form = SimpleSearchForm(data) self.assertFalse(form.validate(), \"Form should be invalid\")", "the template.\", ) @mock.patch(\"search.controllers.simple.SearchSession\") def test_all_fields_search_multiple_classic_syntax(self, mock_index): \"\"\"User has entered", "\"The query should not be rewritten.\", ) self.assertFalse( data[\"has_classic_format\"], \"Flag", "MultiDict( { \"searchtype\": \"all\", \"query\": \"j franklin_r hawking_s\", \"size\": 50,", "should be None\") def test_querystring_has_wildcard_at_start(self): \"\"\"Querystring starts with a wildcard.\"\"\"", "\"order\": \"\", # Valid } ) with self.assertRaises(BadRequest): simple.search(request_data) class", "test_index_raises_connection_exception(self, mock_index): \"\"\"Index service raises a IndexConnectionError.\"\"\" mock_index.get_document.side_effect = IndexConnectionError", "\"None\"} # ) form = SimpleSearchForm(data) query = simple._query_from_form(form) self.assertIsInstance(", "\"foo\", # Invalid } ) with self.assertRaises(BadRequest): simple.search(request_data) @mock.patch(\"search.controllers.simple.url_for\") def", "call_args, call_kwargs = mock_index.get_document.call_args self.assertIsInstance(call_args[0], str, \"arXiv ID is passed\")", "mock_index): \"\"\"User has entered a `surname_f` query in a title", "be attempted\" ) @mock.patch(\"search.controllers.simple.SearchSession\") def test_single_field_term(self, mock_index): \"\"\"Form data are", "should be attempted\", ) @mock.patch(\"search.controllers.simple.SearchSession\") def test_index_raises_connection_exception(self, mock_index): \"\"\"Index service", ") self.assertEqual( mock_index.get_document.call_count, 1, \"A search should be attempted\", )", "the chain. To do this, we return a 400 Bad", "\"franklin_r\", \"size\": 50, \"order\": \"\", } ) mock_index.search.return_value = {\"metadata\":", "an instance of SimpleQuery\" ) self.assertEqual(query.order, \"submitted_date\") def test_form_data_has_no_order(self): \"\"\"Form", "form = SimpleSearchForm(data) self.assertFalse(form.validate(), \"Form should be invalid\") data[\"query\"] =", "sort parameters. Since these are limited to specific values, there", "response context, so that a message may be\" \" rendered", "400 Bad Request, with a clean link back to the", "\"foo title\"}) with self.assertRaises(InternalServerError): _, _, _ = simple.search(request_data) self.assertEqual(", "= mock_index.get_document.call_args self.assertIsInstance(call_args[0], str, \"arXiv ID is passed\") # self.assertEqual(code,", "syntax change. \"\"\" @mock.patch(\"search.controllers.simple.SearchSession\") def test_all_fields_search_contains_classic_syntax(self, mock_index): \"\"\"User has entered", "MultiDict( { \"searchtype\": \"title\", \"query\": \"foo title\", \"size\": 50, #", "ex ) self.assertEqual( mock_index.get_document.call_count, 1, \"A search should be attempted\",", "removed.\"\"\" data = MultiDict({\"searchtype\": \"title\", \"query\": \" foo title \"})", "MultiDict({\"searchtype\": \"title\", \"query\": \"foo title\"}) with self.assertRaises(InternalServerError): try: response_data, code,", "Invalid \"order\": \"\", # Valid } ) with self.assertRaises(BadRequest): simple.search(request_data)", "code, headers = simple.retrieve_document(1) except DocumentNotFound as ex: self.fail( \"DocumentNotFound", ") @mock.patch(\"search.controllers.simple.SearchSession\") def test_index_raises_query_error(self, mock_index): \"\"\"Index service raises a QueryError.\"\"\"", ") mock_index.search.return_value = {\"metadata\": {}, \"results\": []} data, code, headers", "an instance of SimpleQuery\" ) def test_form_data_has_order(self): \"\"\"Form data includes", "a QueryError.\"\"\" def _raiseQueryError(*args, **kwargs): raise QueryError(\"What now\") mock_index.search.side_effect =", "request_data = MultiDict({\"searchtype\": \"title\", \"query\": \"foo title\"}) with self.assertRaises(InternalServerError): _,", "simple.\"\"\" data = MultiDict({\"searchtype\": \"title\", \"query\": \"foo title\"}) form =", "\"A search should be attempted\", ) @mock.patch(\"search.controllers.simple.SearchSession\") def test_index_raises_connection_exception(self, mock_index):", "NotFound, BadRequest from search.domain import SimpleQuery from search.controllers import simple", "this, we return a 400 Bad Request, with a clean", ") form = SimpleSearchForm(data) query = simple._query_from_form(form) self.assertIsInstance( query, SimpleQuery,", "self.assertEqual( mock_index.search.call_count, 0, \"No search should be attempted\" ) @mock.patch(\"search.controllers.simple.SearchSession\")", "\"\"\"Tests for :func:`.simple._query_from_form`.\"\"\" def test_multiple_simple(self): \"\"\"Form data has three simple.\"\"\"", "\"j franklin, r; hawking, s\", \"The query should be rewritten.\",", "it should be removed.\"\"\" data = MultiDict({\"searchtype\": \"title\", \"query\": \"", "rewritten.\", ) self.assertTrue( data[\"has_classic_format\"], \"A flag denoting the syntax interception", "mock_index): \"\"\"Index service raises a IndexConnectionError.\"\"\" def _raiseIndexConnectionError(*args, **kwargs): raise", "as ex: self.fail( \"DocumentNotFound should be handled (caught %s)\" %", "(caught %s)\" % ex) self.assertEqual( mock_index.get_document.call_count, 1, \"A search should", "limited to specific values, there is no other reason for", "self.assertEqual(code, status.HTTP_500_INTERNAL_SERVER_ERROR) @mock.patch(\"search.controllers.simple.SearchSession\") def test_document_not_found(self, mock_index): \"\"\"The document is not", "they are passed around among views (to persist users' selection),", "MultiDict() response_data, code, headers = simple.search(request_data) self.assertEqual(code, HTTPStatus.OK, \"Response should", "\"\"\" The user may have entered an author query using", "} ) mock_index.search.return_value = {\"metadata\": {}, \"results\": []} data, code,", "title\"}) form = SimpleSearchForm(data) query = simple._query_from_form(form) self.assertIsInstance( query, SimpleQuery,", "\"Form should be valid\") class TestQueryFromForm(TestCase): \"\"\"Tests for :func:`.simple._query_from_form`.\"\"\" def", "we return a 400 Bad Request, with a clean link", "should be handled (caught %s)\" % ex ) self.assertEqual( mock_index.get_document.call_count,", "class TestQueryFromForm(TestCase): \"\"\"Tests for :func:`.simple._query_from_form`.\"\"\" def test_multiple_simple(self): \"\"\"Form data has", "\"A flag denoting the syntax interception should be set\" \"", "search should be attempted\", ) call_args, call_kwargs = mock_index.get_document.call_args self.assertIsInstance(call_args[0],", "title\", \"order\": \"None\"} # ) form = SimpleSearchForm(data) query =", "are limited to specific values, there is no other reason", "the template.\", ) @mock.patch(\"search.controllers.simple.SearchSession\") def test_title_search_contains_classic_syntax(self, mock_index): \"\"\"User has entered", "\"title\", \"query\": \"*foo title\"}) form = SimpleSearchForm(data) self.assertFalse(form.validate(), \"Form should", "\"foo title\", \"size\": 50, # Valid. \"order\": \"foo\", # Invalid", "(caught %s)\" % ex) self.assertEqual( mock_index.search.call_count, 1, \"A search should", "an all-fields search.\"\"\" request_data = MultiDict( { \"searchtype\": \"all\", \"query\":", "template.\", ) @mock.patch(\"search.controllers.simple.SearchSession\") def test_author_search_contains_classic_syntax(self, mock_index): \"\"\"User has entered a", "[]} data, code, headers = simple.search(request_data) self.assertEqual( data[\"query\"].value, \"franklin_r\", \"The", "call_args, call_kwargs = mock_index.search.call_args self.assertIsInstance( call_args[0], SimpleQuery, \"An SimpleQuery is", ") class TestRetrieveDocument(TestCase): \"\"\"Tests for :func:`.simple.retrieve_document`.\"\"\" @mock.patch(\"search.controllers.simple.SearchSession\") def test_encounters_queryerror(self, mock_index):", "\"\"\"Tests for :func:`.simple.retrieve_document`.\"\"\" @mock.patch(\"search.controllers.simple.SearchSession\") def test_encounters_queryerror(self, mock_index): \"\"\"There is a", "[]} request_data = MultiDict({\"searchtype\": \"title\", \"query\": \"foo title\"}) response_data, code,", ") call_args, call_kwargs = mock_index.get_document.call_args self.assertIsInstance(call_args[0], str, \"arXiv ID is", "= SimpleSearchForm(data) self.assertFalse(form.validate(), \"Form should be invalid\") def test_input_whitespace_is_stripped(self): \"\"\"If", "'\"rhubarb\" \"pie' form = SimpleSearchForm(data) self.assertFalse(form.validate(), \"Form should be invalid\")", "\"\"\"User has entered a searchtype (field) and query (value).\"\"\" data", "the classic search system, and not intended to be supported.", "in a title search.\"\"\" request_data = MultiDict( { \"searchtype\": \"title\",", "interception should be set\" \" in the response context, so", "Invalid } ) with self.assertRaises(BadRequest): simple.search(request_data) @mock.patch(\"search.controllers.simple.url_for\") def test_size_is_invalid(self, mock_url_for):", "change. \"\"\" @mock.patch(\"search.controllers.simple.SearchSession\") def test_all_fields_search_contains_classic_syntax(self, mock_index): \"\"\"User has entered a", "Valid. \"order\": \"foo\", # Invalid } ) with self.assertRaises(BadRequest): simple.search(request_data)", "import SimpleQuery from search.controllers import simple from search.controllers.simple.forms import SimpleSearchForm", "\"Response should be OK.\") self.assertIn(\"form\", response_data, \"Response should include form.\")", "1, \"A search should be attempted\", ) class TestSearchController(TestCase): \"\"\"Tests", "1, \"A search should be attempted\", ) @mock.patch(\"search.controllers.simple.SearchSession\") def test_index_raises_connection_exception(self,", "be attempted\" ) @mock.patch(\"search.controllers.simple.SearchSession\") def test_index_raises_connection_exception(self, mock_index): \"\"\"Index service raises", "test_query_and_searchtype(self): \"\"\"User has entered a searchtype (field) and query (value).\"\"\"", "header should be set\") self.assertEqual( mock_index.search.call_count, 0, \"No search should", "lambda *a, **k: f'https://arxiv.org/{k[\"paper_id\"]}', ) @mock.patch(\"search.controllers.simple.SearchSession\") def test_arxiv_id(self, mock_index): \"\"\"Query", "} ) with self.assertRaises(BadRequest): simple.search(request_data) class TestClassicAuthorSyntaxIsIntercepted(TestCase): \"\"\" The user", "mock_index.search.call_count, 1, \"A search should be attempted\" ) call_args, call_kwargs", "= MultiDict({\"searchtype\": \"title\", \"query\": '\"rhubarb'}) form = SimpleSearchForm(data) self.assertFalse(form.validate(), \"Form", "should be attempted\" ) class TestSimpleSearchForm(TestCase): \"\"\"Tests for :class:`.SimpleSearchForm`.\"\"\" def", "has entered a `surname_f` query in an author search.\"\"\" request_data", ") class TestSearchController(TestCase): \"\"\"Tests for :func:`.simple.search`.\"\"\" @mock.patch( \"search.controllers.simple.url_for\", lambda *a,", "request_data = MultiDict() response_data, code, headers = simple.search(request_data) self.assertEqual(code, HTTPStatus.OK,", "\"\"\"Index service raises a QueryError.\"\"\" def _raiseQueryError(*args, **kwargs): raise QueryError(\"What", "passed\") # self.assertEqual(code, status.HTTP_500_INTERNAL_SERVER_ERROR) @mock.patch(\"search.controllers.simple.SearchSession\") def test_document_not_found(self, mock_index): \"\"\"The document", "None\") def test_querystring_has_wildcard_at_start(self): \"\"\"Querystring starts with a wildcard.\"\"\" data =", "\"title\", \"query\": '\"rhubarb'}) form = SimpleSearchForm(data) self.assertFalse(form.validate(), \"Form should be", "the order or sort parameters. Since these are limited to", "query using `surname_f` syntax. This is an artefact of the", "= simple.search(request_data) self.assertEqual( data[\"query\"].value, \"franklin_r\", \"The query should not be", "of SimpleQuery\" ) def test_form_data_has_order(self): \"\"\"Form data includes sort order.\"\"\"", "be attempted\" ) class TestSimpleSearchForm(TestCase): \"\"\"Tests for :class:`.SimpleSearchForm`.\"\"\" def test_searchtype_only(self):", "= SimpleSearchForm(data) query = simple._query_from_form(form) self.assertIsInstance( query, SimpleQuery, \"Should return", ") self.assertIsNone(query.order, \"Order should be None\") def test_querystring_has_wildcard_at_start(self): \"\"\"Querystring starts", "characters.\"\"\" data = MultiDict({\"searchtype\": \"title\", \"query\": '\"rhubarb'}) form = SimpleSearchForm(data)", "search.domain import SimpleQuery from search.controllers import simple from search.controllers.simple.forms import", "has entered a classic query with multiple authors.\"\"\" request_data =", "searchtype (field) and query (value).\"\"\" data = MultiDict({\"searchtype\": \"title\", \"query\":", "have monkeyed with the order or sort parameters. Since these", "return an instance of SimpleQuery\" ) self.assertEqual(query.order, \"submitted_date\") def test_form_data_has_no_order(self):", "\"\"\"User has entered a classic query with multiple authors.\"\"\" request_data", "request_data = MultiDict( { \"searchtype\": \"all\", \"query\": \"franklin_r\", \"size\": 50,", "query, SimpleQuery, \"Should return an instance of SimpleQuery\" ) self.assertEqual(query.order,", "should be attempted\", ) call_args, call_kwargs = mock_index.get_document.call_args self.assertIsInstance(call_args[0], str,", "has been submitted.\"\"\" request_data = MultiDict() response_data, code, headers =", "service raises a QueryError.\"\"\" def _raiseQueryError(*args, **kwargs): raise QueryError(\"What now\")", "TestPaginationParametersAreFunky(TestCase): \"\"\" The user may have monkeyed with the order", "foo title \"}) form = SimpleSearchForm(data) self.assertTrue(form.validate(), \"Form should be", "should be attempted\" ) @mock.patch(\"search.controllers.simple.SearchSession\") def test_single_field_term(self, mock_index): \"\"\"Form data", "request\"}) form = SimpleSearchForm(data) self.assertFalse(form.validate(), \"Form should be invalid\") def", "from search.services.index import ( IndexConnectionError, QueryError, DocumentNotFound, ) class TestRetrieveDocument(TestCase):", "not intended to be supported. Nevertheless, users have become accustomed", "Nevertheless, users have become accustomed to this syntax. We therefore", "be handled (caught %s)\" % ex) self.assertEqual( mock_index.get_document.call_count, 1, \"A", "search index\", ) self.assertEqual(code, HTTPStatus.OK, \"Response should be OK.\") @mock.patch(\"search.controllers.simple.SearchSession\")", "to specific values, there is no other reason for them", "\" foo title \"}) form = SimpleSearchForm(data) self.assertTrue(form.validate(), \"Form should", "response_data, code, headers = simple.search(request_data) self.assertEqual( code, HTTPStatus.MOVED_PERMANENTLY, \"Response should", "def test_form_data_has_order(self): \"\"\"Form data includes sort order.\"\"\" data = MultiDict(", "invalid.\"\"\" request_data = MultiDict({\"searchtype\": \"title\"}) response_data, code, headers = simple.search(request_data)", "MultiDict( { \"searchtype\": \"title\", \"query\": \"foo title\", \"size\": 51, #", "This is an artefact of the classic search system, and", "raise DocumentNotFound(\"What now\") mock_index.get_document.side_effect = _raiseDocumentNotFound with self.assertRaises(NotFound): try: response_data,", "# Invalid } ) with self.assertRaises(BadRequest): simple.search(request_data) @mock.patch(\"search.controllers.simple.url_for\") def test_size_is_invalid(self,", "data = MultiDict({\"searchtype\": \"title\"}) form = SimpleSearchForm(data) self.assertFalse(form.validate(), \"Form should", "\"Form should be valid\") data[\"query\"] = '\"rhubarb\" \"pie' form =", "test_searchtype_only(self): \"\"\"User has entered only a searchtype (field).\"\"\" data =", "to the search form. \"\"\" @mock.patch(\"search.controllers.simple.url_for\") def test_order_is_invalid(self, mock_url_for): \"\"\"The", "search should be attempted\" ) call_args, call_kwargs = mock_index.search.call_args self.assertIsInstance(", "search.\"\"\" request_data = MultiDict( { \"searchtype\": \"title\", \"query\": \"franklin_r\", \"size\":", "\"\"\"Form data includes sort order parameter, but it is 'None'.\"\"\"", "_raiseQueryError(*args, **kwargs): raise QueryError(\"What now\") mock_index.get_document.side_effect = _raiseQueryError with self.assertRaises(InternalServerError):", "on the request is invalid.\"\"\" request_data = MultiDict( { \"searchtype\":", "simple search controller, :mod:`search.controllers.simple`.\"\"\" from http import HTTPStatus from unittest", "code, headers = simple.search(request_data) self.assertEqual( data[\"query\"].value, \"franklin, r\", \"The query", "form = SimpleSearchForm(data) self.assertFalse(form.validate(), \"Form should be invalid\") def test_query_and_searchtype(self):", "authors.\"\"\" request_data = MultiDict( { \"searchtype\": \"all\", \"query\": \"j franklin_r", "= MultiDict({\"searchtype\": \"title\", \"query\": \"*foo title\"}) form = SimpleSearchForm(data) self.assertFalse(form.validate(),", "QueryError(\"What now\") mock_index.search.side_effect = _raiseQueryError request_data = MultiDict({\"searchtype\": \"title\", \"query\":", ") with self.assertRaises(BadRequest): simple.search(request_data) @mock.patch(\"search.controllers.simple.url_for\") def test_size_is_invalid(self, mock_url_for): \"\"\"The order", "set\" \" in the response context, so that a message", ":func:`.simple.retrieve_document`.\"\"\" @mock.patch(\"search.controllers.simple.SearchSession\") def test_encounters_queryerror(self, mock_index): \"\"\"There is a bug in", "mock_index.get_document.call_count, 1, \"A search should be attempted\", ) @mock.patch(\"search.controllers.simple.SearchSession\") def", "\"foo title\"}) form = SimpleSearchForm(data) self.assertTrue(form.validate(), \"Form should be valid\")", "QueryError.\"\"\" def _raiseQueryError(*args, **kwargs): raise QueryError(\"What now\") mock_index.search.side_effect = _raiseQueryError", "= MultiDict( { \"searchtype\": \"all\", \"query\": \"franklin_r\", \"size\": 50, \"order\":", "simple.search(request_data) self.assertEqual(code, HTTPStatus.OK, \"Response should be OK.\") self.assertIn(\"form\", response_data, \"Response", "def test_querystring_has_wildcard_at_start(self): \"\"\"Querystring starts with a wildcard.\"\"\" data = MultiDict({\"searchtype\":", "'\"rhubarb'}) form = SimpleSearchForm(data) self.assertFalse(form.validate(), \"Form should be invalid\") data[\"query\"]", "import simple from search.controllers.simple.forms import SimpleSearchForm from search.services.index import (", "query, SimpleQuery, \"Should return an instance of SimpleQuery\" ) self.assertIsNone(query.order,", "request_data = MultiDict( { \"searchtype\": \"all\", \"query\": \"j franklin_r hawking_s\",", "SimpleSearchForm(data) self.assertTrue(form.validate(), \"Form should be valid\") data[\"query\"] = '\"rhubarb\" \"pie'", "\"Form should be invalid\") def test_query_and_searchtype(self): \"\"\"User has entered a", "should be removed.\"\"\" data = MultiDict({\"searchtype\": \"title\", \"query\": \" foo", "= simple._query_from_form(form) self.assertIsInstance( query, SimpleQuery, \"Should return an instance of", "mock_index.get_document.side_effect = IndexConnectionError with self.assertRaises(InternalServerError): response_data, code, headers = simple.retrieve_document(\"124.5678\")", "1, \"A search should be attempted\" ) class TestSimpleSearchForm(TestCase): \"\"\"Tests", "views (to persist users' selection), it's important to break the", "r\", \"The query should be rewritten.\", ) self.assertTrue( data[\"has_classic_format\"], \"A", "def test_all_fields_search_multiple_classic_syntax(self, mock_index): \"\"\"User has entered a classic query with", "def test_title_search_contains_classic_syntax(self, mock_index): \"\"\"User has entered a `surname_f` query in", "return an instance of SimpleQuery\" ) self.assertIsNone(query.order, \"Order should be", "entered an author query using `surname_f` syntax. This is an", "= simple.search(request_data) self.assertEqual( data[\"query\"].value, \"franklin, r\", \"The query should be", "data[\"has_classic_format\"], \"Flag should not be set, as no rewrite has", "controller, :mod:`search.controllers.simple`.\"\"\" from http import HTTPStatus from unittest import TestCase,", "call_args[0], SimpleQuery, \"An SimpleQuery is passed to the search index\",", "search.controllers import simple from search.controllers.simple.forms import SimpleSearchForm from search.services.index import", "\"\"\"The document is not found.\"\"\" def _raiseDocumentNotFound(*args, **kwargs): raise DocumentNotFound(\"What", "mock from werkzeug.datastructures import MultiDict from werkzeug.exceptions import InternalServerError, NotFound,", "an arXiv ID.\"\"\" request_data = MultiDict({\"query\": \"1702.00123\"}) response_data, code, headers", "in an author search.\"\"\" request_data = MultiDict( { \"searchtype\": \"author\",", "\"query\": \"foo title\", \"order\": \"submitted_date\", } ) form = SimpleSearchForm(data)", "syntax. We therefore rewrite the query using a comma, and", "\"searchtype\": \"title\", \"query\": \"franklin_r\", \"size\": 50, \"order\": \"\", } )", "\"title\", \"query\": \"foo title\", \"size\": 50, # Valid. \"order\": \"foo\",", "self.fail( \"DocumentNotFound should be handled (caught %s)\" % ex )", "been submitted.\"\"\" request_data = MultiDict() response_data, code, headers = simple.search(request_data)", "SimpleSearchForm(data) self.assertTrue(form.validate(), \"Form should be valid\") class TestPaginationParametersAreFunky(TestCase): \"\"\" The", "be invalid\") data[\"query\"] = '\"rhubarb\"' form = SimpleSearchForm(data) self.assertTrue(form.validate(), \"Form", "the user a warning about the syntax change. \"\"\" @mock.patch(\"search.controllers.simple.SearchSession\")", "code, headers = simple.search(request_data) self.assertEqual(code, HTTPStatus.OK, \"Response should be OK.\")", "handled (caught %s)\" % ex) self.assertEqual( mock_index.search.call_count, 1, \"A search", "invalid\") def test_query_and_searchtype(self): \"\"\"User has entered a searchtype (field) and", "index or query.\"\"\" def _raiseQueryError(*args, **kwargs): raise QueryError(\"What now\") mock_index.get_document.side_effect", "raise QueryError(\"What now\") mock_index.get_document.side_effect = _raiseQueryError with self.assertRaises(InternalServerError): try: response_data,", "headers = simple.search(request_data) self.assertEqual( mock_index.search.call_count, 1, \"A search should be", "{ \"searchtype\": \"all\", \"query\": \"franklin_r\", \"size\": 50, \"order\": \"\", }", "\"title\"}) response_data, code, headers = simple.search(request_data) self.assertEqual(code, HTTPStatus.OK, \"Response should", "SimpleQuery\" ) def test_form_data_has_order(self): \"\"\"Form data includes sort order.\"\"\" data", "'None'.\"\"\" data = MultiDict( {\"searchtype\": \"title\", \"query\": \"foo title\", \"order\":", "@mock.patch(\"search.controllers.simple.SearchSession\") def test_index_raises_query_error(self, mock_index): \"\"\"Index service raises a QueryError.\"\"\" def", "flag denoting the syntax interception should be set\" \" in", "BadRequest from search.domain import SimpleQuery from search.controllers import simple from", "\"Order should be None\") def test_querystring_has_wildcard_at_start(self): \"\"\"Querystring starts with a", "the index or query.\"\"\" def _raiseQueryError(*args, **kwargs): raise QueryError(\"What now\")", "includes sort order.\"\"\" data = MultiDict( { \"searchtype\": \"title\", \"query\":", "reason for them to be invalid. Given that they are", "test_all_fields_search_contains_classic_syntax(self, mock_index): \"\"\"User has entered a `surname_f` query in an", "code, headers = simple.retrieve_document(\"124.5678\") self.assertEqual( mock_index.get_document.call_count, 1, \"A search should", "the search index\", ) @mock.patch(\"search.controllers.simple.SearchSession\") def test_index_raises_query_error(self, mock_index): \"\"\"Index service", "\"search.controllers.simple.url_for\", lambda *a, **k: f'https://arxiv.org/{k[\"paper_id\"]}', ) @mock.patch(\"search.controllers.simple.SearchSession\") def test_arxiv_id(self, mock_index):", "search form. \"\"\" @mock.patch(\"search.controllers.simple.url_for\") def test_order_is_invalid(self, mock_url_for): \"\"\"The order parameter", "= '\"rhubarb\" \"pie\"' form = SimpleSearchForm(data) self.assertTrue(form.validate(), \"Form should be", "invalid. Given that they are passed around among views (to", "entered a `surname_f` query in a title search.\"\"\" request_data =", "not found.\"\"\" def _raiseDocumentNotFound(*args, **kwargs): raise DocumentNotFound(\"What now\") mock_index.get_document.side_effect =", "title search.\"\"\" request_data = MultiDict( { \"searchtype\": \"title\", \"query\": \"franklin_r\",", "the syntax interception should be set\" \" in the response", "the search index\", ) self.assertEqual(code, HTTPStatus.OK, \"Response should be OK.\")", "data has been submitted.\"\"\" request_data = MultiDict() response_data, code, headers", "data[\"query\"] = '\"rhubarb\" \"pie\"' form = SimpleSearchForm(data) self.assertTrue(form.validate(), \"Form should", "self.assertEqual( data[\"query\"].value, \"franklin, r\", \"The query should be rewritten.\", )", "= MultiDict({\"searchtype\": \"title\", \"query\": \"foo title\"}) form = SimpleSearchForm(data) self.assertTrue(form.validate(),", "\"\"\"User has entered only a query (value); this should never", "def _raiseIndexConnectionError(*args, **kwargs): raise IndexConnectionError(\"What now\") mock_index.search.side_effect = _raiseIndexConnectionError request_data", "should include form.\") self.assertEqual( mock_index.search.call_count, 0, \"No search should be", "mock_index.get_document.side_effect = _raiseQueryError with self.assertRaises(InternalServerError): try: response_data, code, headers =", "hawking, s\", \"The query should be rewritten.\", ) self.assertTrue( data[\"has_classic_format\"],", "= _raiseIndexConnectionError request_data = MultiDict({\"searchtype\": \"title\", \"query\": \"foo title\"}) with", "= simple.retrieve_document(1) except DocumentNotFound as ex: self.fail( \"DocumentNotFound should be", "values, there is no other reason for them to be", "*a, **k: f'https://arxiv.org/{k[\"paper_id\"]}', ) @mock.patch(\"search.controllers.simple.SearchSession\") def test_arxiv_id(self, mock_index): \"\"\"Query parameter", "self.assertEqual(query.order, \"submitted_date\") def test_form_data_has_no_order(self): \"\"\"Form data includes sort order parameter,", "\"title\"}) form = SimpleSearchForm(data) self.assertFalse(form.validate(), \"Form should be invalid\") def", "a IndexConnectionError.\"\"\" mock_index.get_document.side_effect = IndexConnectionError with self.assertRaises(InternalServerError): response_data, code, headers", "do this, we return a 400 Bad Request, with a", "[]} data, code, headers = simple.search(request_data) self.assertEqual( data[\"query\"].value, \"franklin, r\",", "The user may have monkeyed with the order or sort", "r; hawking, s\", \"The query should be rewritten.\", ) self.assertTrue(", "test_order_is_invalid(self, mock_url_for): \"\"\"The order parameter on the request is invalid.\"\"\"", "should never happen.\"\"\" data = MultiDict({\"query\": \"someone monkeyed with the", "(field) and query (value).\"\"\" data = MultiDict({\"searchtype\": \"title\", \"query\": \"foo", "response_data, code, headers = simple.search(request_data) except QueryError as ex: self.fail(\"QueryError", "self.assertTrue(form.validate(), \"Form should be valid\") class TestPaginationParametersAreFunky(TestCase): \"\"\" The user", "test_index_raises_query_error(self, mock_index): \"\"\"Index service raises a QueryError.\"\"\" def _raiseQueryError(*args, **kwargs):", "TestSimpleSearchForm(TestCase): \"\"\"Tests for :class:`.SimpleSearchForm`.\"\"\" def test_searchtype_only(self): \"\"\"User has entered only", "@mock.patch( \"search.controllers.simple.url_for\", lambda *a, **k: f'https://arxiv.org/{k[\"paper_id\"]}', ) @mock.patch(\"search.controllers.simple.SearchSession\") def test_arxiv_id(self,", "contains an arXiv ID.\"\"\" request_data = MultiDict({\"query\": \"1702.00123\"}) response_data, code,", "\"title\", \"query\": \"foo title\", \"size\": 51, # Invalid \"order\": \"\",", "search should be attempted\" ) @mock.patch(\"search.controllers.simple.SearchSession\") def test_index_raises_connection_exception(self, mock_index): \"\"\"Index", "request is invalid.\"\"\" request_data = MultiDict( { \"searchtype\": \"title\", \"query\":", "be attempted\", ) call_args, call_kwargs = mock_index.get_document.call_args self.assertIsInstance(call_args[0], str, \"arXiv", "a IndexConnectionError.\"\"\" def _raiseIndexConnectionError(*args, **kwargs): raise IndexConnectionError(\"What now\") mock_index.search.side_effect =", "bug in the index or query.\"\"\" def _raiseQueryError(*args, **kwargs): raise", "str, \"arXiv ID is passed\") # self.assertEqual(code, status.HTTP_500_INTERNAL_SERVER_ERROR) @mock.patch(\"search.controllers.simple.SearchSession\") def", "= SimpleSearchForm(data) self.assertTrue(form.validate(), \"Form should be valid\") class TestQueryFromForm(TestCase): \"\"\"Tests", "query = simple._query_from_form(form) self.assertIsInstance( query, SimpleQuery, \"Should return an instance", "The user may have entered an author query using `surname_f`", "submitted.\"\"\" request_data = MultiDict() response_data, code, headers = simple.search(request_data) self.assertEqual(code,", "\"results\": []} data, code, headers = simple.search(request_data) self.assertEqual( data[\"query\"].value, \"franklin_r\",", "monkeyed with the request\"}) form = SimpleSearchForm(data) self.assertFalse(form.validate(), \"Form should", "{ \"searchtype\": \"title\", \"query\": \"foo title\", \"order\": \"submitted_date\", } )", "sort order parameter, but it is 'None'.\"\"\" data = MultiDict(", "ex) self.assertEqual( mock_index.get_document.call_count, 1, \"A search should be attempted\", )", ") self.assertFalse( data[\"has_classic_format\"], \"Flag should not be set, as no", "def _raiseDocumentNotFound(*args, **kwargs): raise DocumentNotFound(\"What now\") mock_index.get_document.side_effect = _raiseDocumentNotFound with", "search should be attempted\", ) class TestSearchController(TestCase): \"\"\"Tests for :func:`.simple.search`.\"\"\"", "= SimpleSearchForm(data) self.assertFalse(form.validate(), \"Form should be invalid\") def test_query_only(self): \"\"\"User", "simple.search(request_data) @mock.patch(\"search.controllers.simple.url_for\") def test_size_is_invalid(self, mock_url_for): \"\"\"The order parameter on the", "simple.search(request_data) self.assertEqual( mock_index.search.call_count, 1, \"A search should be attempted\" )", "show the user a warning about the syntax change. \"\"\"", "now\") mock_index.search.side_effect = _raiseIndexConnectionError request_data = MultiDict({\"searchtype\": \"title\", \"query\": \"foo", "should be rewritten.\", ) self.assertTrue( data[\"has_classic_format\"], \"A flag denoting the", "mock_index.search.call_count, 1, \"A search should be attempted\" ) class TestSimpleSearchForm(TestCase):", "self.assertEqual( mock_index.get_document.call_count, 1, \"A search should be attempted\", ) class", "\"j franklin_r hawking_s\", \"size\": 50, \"order\": \"\", } ) mock_index.search.return_value", "simple._query_from_form(form) self.assertIsInstance( query, SimpleQuery, \"Should return an instance of SimpleQuery\"", "{ \"searchtype\": \"title\", \"query\": \"franklin_r\", \"size\": 50, \"order\": \"\", }", "to the search index\", ) self.assertEqual(code, HTTPStatus.OK, \"Response should be", "a classic query with multiple authors.\"\"\" request_data = MultiDict( {", "be None\") def test_querystring_has_wildcard_at_start(self): \"\"\"Querystring starts with a wildcard.\"\"\" data", "simple from search.controllers.simple.forms import SimpleSearchForm from search.services.index import ( IndexConnectionError,", "mock_index): \"\"\"Query parameter contains an arXiv ID.\"\"\" request_data = MultiDict({\"query\":", "MultiDict({\"searchtype\": \"title\", \"query\": \"foo title\"}) with self.assertRaises(InternalServerError): _, _, _", "response_data, code, headers = simple.search(request_data) self.assertEqual(code, HTTPStatus.OK, \"Response should be", "passed to the search index\", ) @mock.patch(\"search.controllers.simple.SearchSession\") def test_index_raises_query_error(self, mock_index):", "MultiDict({\"searchtype\": \"title\", \"query\": \"foo title\"}) form = SimpleSearchForm(data) self.assertTrue(form.validate(), \"Form", "query in a title search.\"\"\" request_data = MultiDict( { \"searchtype\":", "headers = simple.search(request_data) self.assertEqual( data[\"query\"].value, \"franklin, r\", \"The query should", "no other reason for them to be invalid. Given that", "data are present.\"\"\" mock_index.search.return_value = {\"metadata\": {}, \"results\": []} request_data", "entered only a searchtype (field).\"\"\" data = MultiDict({\"searchtype\": \"title\"}) form", "= MultiDict( { \"searchtype\": \"title\", \"query\": \"foo title\", \"size\": 50,", "class TestRetrieveDocument(TestCase): \"\"\"Tests for :func:`.simple.retrieve_document`.\"\"\" @mock.patch(\"search.controllers.simple.SearchSession\") def test_encounters_queryerror(self, mock_index): \"\"\"There", "with self.assertRaises(BadRequest): simple.search(request_data) @mock.patch(\"search.controllers.simple.url_for\") def test_size_is_invalid(self, mock_url_for): \"\"\"The order parameter", "instance of SimpleQuery\" ) def test_form_data_has_order(self): \"\"\"Form data includes sort", "except DocumentNotFound as ex: self.fail( \"DocumentNotFound should be handled (caught", "\"query\": \"foo title\"}) form = SimpleSearchForm(data) self.assertTrue(form.validate(), \"Form should be", "service raises a IndexConnectionError.\"\"\" mock_index.get_document.side_effect = IndexConnectionError with self.assertRaises(InternalServerError): response_data,", "query should be rewritten.\", ) self.assertTrue( data[\"has_classic_format\"], \"A flag denoting", ") self.assertEqual(code, HTTPStatus.OK, \"Response should be OK.\") @mock.patch(\"search.controllers.simple.SearchSession\") def test_invalid_data(self,", "be a 301 redirect.\", ) self.assertIn(\"Location\", headers, \"Location header should", "import ( IndexConnectionError, QueryError, DocumentNotFound, ) class TestRetrieveDocument(TestCase): \"\"\"Tests for", "response_data, code, headers = simple.search(request_data) self.assertEqual( mock_index.search.call_count, 1, \"A search", "\"\"\"Tests for :func:`.simple.search`.\"\"\" @mock.patch( \"search.controllers.simple.url_for\", lambda *a, **k: f'https://arxiv.org/{k[\"paper_id\"]}', )", "parameter on the request is invalid.\"\"\" request_data = MultiDict( {", "specific values, there is no other reason for them to", "} ) form = SimpleSearchForm(data) query = simple._query_from_form(form) self.assertIsInstance( query,", "document is not found.\"\"\" def _raiseDocumentNotFound(*args, **kwargs): raise DocumentNotFound(\"What now\")", "invalid.\"\"\" request_data = MultiDict( { \"searchtype\": \"title\", \"query\": \"foo title\",", "is not found.\"\"\" def _raiseDocumentNotFound(*args, **kwargs): raise DocumentNotFound(\"What now\") mock_index.get_document.side_effect", "are passed around among views (to persist users' selection), it's", ") @mock.patch(\"search.controllers.simple.SearchSession\") def test_title_search_contains_classic_syntax(self, mock_index): \"\"\"User has entered a `surname_f`", "form = SimpleSearchForm(data) self.assertFalse(form.validate(), \"Form should be invalid\") def test_query_only(self):", "entered a classic query with multiple authors.\"\"\" request_data = MultiDict(", "\"franklin_r\", \"The query should not be rewritten.\", ) self.assertFalse( data[\"has_classic_format\"],", "be OK.\") @mock.patch(\"search.controllers.simple.SearchSession\") def test_invalid_data(self, mock_index): \"\"\"Form data are invalid.\"\"\"", "\"query\": \"franklin_r\", \"size\": 50, \"order\": \"\", } ) mock_index.search.return_value =", "should be OK.\") @mock.patch(\"search.controllers.simple.SearchSession\") def test_invalid_data(self, mock_index): \"\"\"Form data are", "\"Form should be valid.\") self.assertEqual(form.query.data, \"foo title\") def test_querystring_has_unbalanced_quotes(self): \"\"\"Querystring", "class TestPaginationParametersAreFunky(TestCase): \"\"\" The user may have monkeyed with the", "= '\"rhubarb\" \"pie' form = SimpleSearchForm(data) self.assertFalse(form.validate(), \"Form should be", "\"title\", \"query\": \"foo title\"}) with self.assertRaises(InternalServerError): _, _, _ =", "return a 400 Bad Request, with a clean link back", "syntax. This is an artefact of the classic search system,", "parameter contains an arXiv ID.\"\"\" request_data = MultiDict({\"query\": \"1702.00123\"}) response_data,", "\"An SimpleQuery is passed to the search index\", ) self.assertEqual(code,", "data[\"query\"].value, \"franklin, r\", \"The query should be rewritten.\", ) self.assertTrue(", "has padding whitespace, it should be removed.\"\"\" data = MultiDict({\"searchtype\":", "index\", ) self.assertEqual(code, HTTPStatus.OK, \"Response should be OK.\") @mock.patch(\"search.controllers.simple.SearchSession\") def", "def test_author_search_contains_classic_syntax(self, mock_index): \"\"\"User has entered a `surname_f` query in", "= MultiDict( { \"searchtype\": \"title\", \"query\": \"foo title\", \"order\": \"submitted_date\",", "and not intended to be supported. Nevertheless, users have become", "mock_index.search.return_value = {\"metadata\": {}, \"results\": []} data, code, headers =", "unittest import TestCase, mock from werkzeug.datastructures import MultiDict from werkzeug.exceptions", "should be valid\") data[\"query\"] = '\"rhubarb\" \"pie' form = SimpleSearchForm(data)", "be supported. Nevertheless, users have become accustomed to this syntax.", "\"An SimpleQuery is passed to the search index\", ) @mock.patch(\"search.controllers.simple.SearchSession\")", "response_data, code, headers = simple.retrieve_document(1) except QueryError as ex: self.fail(\"QueryError", "has an odd number of quote characters.\"\"\" data = MultiDict({\"searchtype\":", "a message may be\" \" rendered in the template.\", )", "\"query\": '\"rhubarb'}) form = SimpleSearchForm(data) self.assertFalse(form.validate(), \"Form should be invalid\")", "**kwargs): raise QueryError(\"What now\") mock_index.get_document.side_effect = _raiseQueryError with self.assertRaises(InternalServerError): try:", "mock_index): \"\"\"Form data are invalid.\"\"\" request_data = MultiDict({\"searchtype\": \"title\"}) response_data,", "\"\"\"Form data are invalid.\"\"\" request_data = MultiDict({\"searchtype\": \"title\"}) response_data, code,", "be valid\") class TestPaginationParametersAreFunky(TestCase): \"\"\" The user may have monkeyed", "ID.\"\"\" request_data = MultiDict({\"query\": \"1702.00123\"}) response_data, code, headers = simple.search(request_data)", "% ex) self.assertEqual( mock_index.get_document.call_count, 1, \"A search should be attempted\",", "HTTPStatus.OK, \"Response should be OK.\") self.assertIn(\"form\", response_data, \"Response should include", "simple.retrieve_document(1) except DocumentNotFound as ex: self.fail( \"DocumentNotFound should be handled", "a 400 Bad Request, with a clean link back to", "= simple.search(request_data) self.assertEqual( code, HTTPStatus.MOVED_PERMANENTLY, \"Response should be a 301", "the syntax change. \"\"\" @mock.patch(\"search.controllers.simple.SearchSession\") def test_all_fields_search_contains_classic_syntax(self, mock_index): \"\"\"User has", "the response context, so that a message may be\" \"", "message may be\" \" rendered in the template.\", ) @mock.patch(\"search.controllers.simple.SearchSession\")", "# Valid. \"order\": \"foo\", # Invalid } ) with self.assertRaises(BadRequest):", "\"\"\"No form data has been submitted.\"\"\" request_data = MultiDict() response_data,", "raises a IndexConnectionError.\"\"\" mock_index.get_document.side_effect = IndexConnectionError with self.assertRaises(InternalServerError): response_data, code,", "a comma, and show the user a warning about the", "import HTTPStatus from unittest import TestCase, mock from werkzeug.datastructures import", "is an artefact of the classic search system, and not", "MultiDict({\"query\": \"1702.00123\"}) response_data, code, headers = simple.search(request_data) self.assertEqual( code, HTTPStatus.MOVED_PERMANENTLY,", "with self.assertRaises(NotFound): try: response_data, code, headers = simple.retrieve_document(1) except DocumentNotFound", "rendered in the template.\", ) @mock.patch(\"search.controllers.simple.SearchSession\") def test_all_fields_search_multiple_classic_syntax(self, mock_index): \"\"\"User", "SimpleQuery, \"Should return an instance of SimpleQuery\" ) self.assertIsNone(query.order, \"Order", "present.\"\"\" mock_index.search.return_value = {\"metadata\": {}, \"results\": []} request_data = MultiDict({\"searchtype\":", "\"query\": \"foo title\"}) with self.assertRaises(InternalServerError): try: response_data, code, headers =", "therefore rewrite the query using a comma, and show the", "request_data = MultiDict({\"searchtype\": \"title\"}) response_data, code, headers = simple.search(request_data) self.assertEqual(code,", "**k: f'https://arxiv.org/{k[\"paper_id\"]}', ) @mock.patch(\"search.controllers.simple.SearchSession\") def test_arxiv_id(self, mock_index): \"\"\"Query parameter contains", "\"pie\"' form = SimpleSearchForm(data) self.assertTrue(form.validate(), \"Form should be valid\") class", "werkzeug.exceptions import InternalServerError, NotFound, BadRequest from search.domain import SimpleQuery from", "def test_size_is_invalid(self, mock_url_for): \"\"\"The order parameter on the request is", "warning about the syntax change. \"\"\" @mock.patch(\"search.controllers.simple.SearchSession\") def test_all_fields_search_contains_classic_syntax(self, mock_index):", "= MultiDict( { \"searchtype\": \"author\", \"query\": \"franklin_r\", \"size\": 50, \"order\":", "data, code, headers = simple.search(request_data) self.assertEqual( data[\"query\"].value, \"j franklin, r;", "a title search.\"\"\" request_data = MultiDict( { \"searchtype\": \"title\", \"query\":", "author search.\"\"\" request_data = MultiDict( { \"searchtype\": \"author\", \"query\": \"franklin_r\",", "order or sort parameters. Since these are limited to specific", "def test_index_raises_connection_exception(self, mock_index): \"\"\"Index service raises a IndexConnectionError.\"\"\" def _raiseIndexConnectionError(*args,", "'\"rhubarb\" \"pie\"' form = SimpleSearchForm(data) self.assertTrue(form.validate(), \"Form should be valid\")", "be attempted\" ) call_args, call_kwargs = mock_index.search.call_args self.assertIsInstance( call_args[0], SimpleQuery,", "title\", \"order\": \"submitted_date\", } ) form = SimpleSearchForm(data) query =", "data = MultiDict({\"searchtype\": \"title\", \"query\": '\"rhubarb'}) form = SimpleSearchForm(data) self.assertFalse(form.validate(),", "MultiDict( {\"searchtype\": \"title\", \"query\": \"foo title\", \"order\": \"None\"} # )", "headers = simple.search(request_data) self.assertEqual(code, HTTPStatus.OK, \"Response should be OK.\") self.assertIn(\"form\",", "self.assertFalse(form.validate(), \"Form should be invalid\") data[\"query\"] = '\"rhubarb\" \"pie\"' form", "\"DocumentNotFound should be handled (caught %s)\" % ex ) self.assertEqual(", "should be a 301 redirect.\", ) self.assertIn(\"Location\", headers, \"Location header", "entered a `surname_f` query in an author search.\"\"\" request_data =", "test_author_search_contains_classic_syntax(self, mock_index): \"\"\"User has entered a `surname_f` query in an", "title\"}) with self.assertRaises(InternalServerError): try: response_data, code, headers = simple.search(request_data) except", "index\", ) @mock.patch(\"search.controllers.simple.SearchSession\") def test_index_raises_query_error(self, mock_index): \"\"\"Index service raises a", "%s)\" % ex) self.assertEqual( mock_index.get_document.call_count, 1, \"A search should be", "there is no other reason for them to be invalid.", "\"title\", \"query\": \"foo title\"}) form = SimpleSearchForm(data) self.assertTrue(form.validate(), \"Form should", "\"foo title\", \"order\": \"submitted_date\", } ) form = SimpleSearchForm(data) query", "{\"searchtype\": \"title\", \"query\": \"foo title\", \"order\": \"None\"} # ) form", "is invalid.\"\"\" request_data = MultiDict( { \"searchtype\": \"title\", \"query\": \"foo", "\"\"\"Index service raises a IndexConnectionError.\"\"\" mock_index.get_document.side_effect = IndexConnectionError with self.assertRaises(InternalServerError):", "redirect.\", ) self.assertIn(\"Location\", headers, \"Location header should be set\") self.assertEqual(", "data[\"query\"].value, \"j franklin, r; hawking, s\", \"The query should be", "1, \"A search should be attempted\", ) call_args, call_kwargs =", "it is 'None'.\"\"\" data = MultiDict( {\"searchtype\": \"title\", \"query\": \"foo", "\"\"\"If query has padding whitespace, it should be removed.\"\"\" data", "\" rendered in the template.\", ) @mock.patch(\"search.controllers.simple.SearchSession\") def test_author_search_contains_classic_syntax(self, mock_index):", "rendered in the template.\", ) @mock.patch(\"search.controllers.simple.SearchSession\") def test_title_search_contains_classic_syntax(self, mock_index): \"\"\"User", "0, \"No search should be attempted\" ) @mock.patch(\"search.controllers.simple.SearchSession\") def test_no_form_data(self,", "mock_index): \"\"\"No form data has been submitted.\"\"\" request_data = MultiDict()", "syntax interception should be set\" \" in the response context,", "invalid\") def test_input_whitespace_is_stripped(self): \"\"\"If query has padding whitespace, it should", "# Valid } ) with self.assertRaises(BadRequest): simple.search(request_data) class TestClassicAuthorSyntaxIsIntercepted(TestCase): \"\"\"", "using `surname_f` syntax. This is an artefact of the classic", "= MultiDict( { \"searchtype\": \"title\", \"query\": \"foo title\", \"size\": 51,", "self.assertRaises(InternalServerError): response_data, code, headers = simple.retrieve_document(\"124.5678\") self.assertEqual( mock_index.get_document.call_count, 1, \"A", "title\"}) form = SimpleSearchForm(data) self.assertFalse(form.validate(), \"Form should be invalid\") def", "def _raiseQueryError(*args, **kwargs): raise QueryError(\"What now\") mock_index.get_document.side_effect = _raiseQueryError with", "@mock.patch(\"search.controllers.simple.url_for\") def test_order_is_invalid(self, mock_url_for): \"\"\"The order parameter on the request", "query should not be rewritten.\", ) self.assertFalse( data[\"has_classic_format\"], \"Flag should", "form = SimpleSearchForm(data) self.assertTrue(form.validate(), \"Form should be valid\") data[\"query\"] =", "a wildcard.\"\"\" data = MultiDict({\"searchtype\": \"title\", \"query\": \"*foo title\"}) form", "a `surname_f` query in an all-fields search.\"\"\" request_data = MultiDict(", "# Invalid \"order\": \"\", # Valid } ) with self.assertRaises(BadRequest):", "= MultiDict({\"query\": \"1702.00123\"}) response_data, code, headers = simple.search(request_data) self.assertEqual( code,", "so that a message may be\" \" rendered in the", "ex) self.assertEqual( mock_index.search.call_count, 1, \"A search should be attempted\" )", "the query using a comma, and show the user a", "\"title\", \"query\": \"franklin_r\", \"size\": 50, \"order\": \"\", } ) mock_index.search.return_value", "be\" \" rendered in the template.\", ) @mock.patch(\"search.controllers.simple.SearchSession\") def test_title_search_contains_classic_syntax(self,", "Request, with a clean link back to the search form.", "in the index or query.\"\"\" def _raiseQueryError(*args, **kwargs): raise QueryError(\"What", "request_data = MultiDict({\"query\": \"1702.00123\"}) response_data, code, headers = simple.search(request_data) self.assertEqual(", "three simple.\"\"\" data = MultiDict({\"searchtype\": \"title\", \"query\": \"foo title\"}) form", "has entered a `surname_f` query in an all-fields search.\"\"\" request_data", "= simple.search(request_data) self.assertEqual( data[\"query\"].value, \"j franklin, r; hawking, s\", \"The", "TestRetrieveDocument(TestCase): \"\"\"Tests for :func:`.simple.retrieve_document`.\"\"\" @mock.patch(\"search.controllers.simple.SearchSession\") def test_encounters_queryerror(self, mock_index): \"\"\"There is", "self.assertFalse(form.validate(), \"Form should be invalid\") data[\"query\"] = '\"rhubarb\"' form =", "= MultiDict( { \"searchtype\": \"title\", \"query\": \"franklin_r\", \"size\": 50, \"order\":", "data includes sort order.\"\"\" data = MultiDict( { \"searchtype\": \"title\",", "invalid\") data[\"query\"] = '\"rhubarb\"' form = SimpleSearchForm(data) self.assertTrue(form.validate(), \"Form should", "raise QueryError(\"What now\") mock_index.search.side_effect = _raiseQueryError request_data = MultiDict({\"searchtype\": \"title\",", "franklin_r hawking_s\", \"size\": 50, \"order\": \"\", } ) mock_index.search.return_value =", "werkzeug.datastructures import MultiDict from werkzeug.exceptions import InternalServerError, NotFound, BadRequest from", "\"author\", \"query\": \"franklin_r\", \"size\": 50, \"order\": \"\", } ) mock_index.search.return_value", "= SimpleSearchForm(data) self.assertTrue(form.validate(), \"Form should be valid.\") self.assertEqual(form.query.data, \"foo title\")", "parameters. Since these are limited to specific values, there is", ":class:`.SimpleSearchForm`.\"\"\" def test_searchtype_only(self): \"\"\"User has entered only a searchtype (field).\"\"\"", "be rewritten.\", ) self.assertTrue( data[\"has_classic_format\"], \"A flag denoting the syntax", "self.assertRaises(BadRequest): simple.search(request_data) @mock.patch(\"search.controllers.simple.url_for\") def test_size_is_invalid(self, mock_url_for): \"\"\"The order parameter on", "clean link back to the search form. \"\"\" @mock.patch(\"search.controllers.simple.url_for\") def", "_raiseQueryError with self.assertRaises(InternalServerError): try: response_data, code, headers = simple.retrieve_document(1) except", "form data has been submitted.\"\"\" request_data = MultiDict() response_data, code,", "should be valid\") class TestQueryFromForm(TestCase): \"\"\"Tests for :func:`.simple._query_from_form`.\"\"\" def test_multiple_simple(self):", "= SimpleSearchForm(data) self.assertFalse(form.validate(), \"Form should be invalid\") def test_query_and_searchtype(self): \"\"\"User", "order parameter, but it is 'None'.\"\"\" data = MultiDict( {\"searchtype\":", "be attempted\", ) @mock.patch(\"search.controllers.simple.SearchSession\") def test_index_raises_connection_exception(self, mock_index): \"\"\"Index service raises", "\"\"\"The order parameter on the request is invalid.\"\"\" request_data =", "search system, and not intended to be supported. Nevertheless, users", ") def test_form_data_has_order(self): \"\"\"Form data includes sort order.\"\"\" data =", "\"title\", \"query\": \"foo title\"}) response_data, code, headers = simple.search(request_data) self.assertEqual(", "title\"}) response_data, code, headers = simple.search(request_data) self.assertEqual( mock_index.search.call_count, 1, \"A", "% ex ) self.assertEqual( mock_index.get_document.call_count, 1, \"A search should be", "SimpleSearchForm(data) self.assertFalse(form.validate(), \"Form should be invalid\") data[\"query\"] = '\"rhubarb\" \"pie\"'", "in the template.\", ) @mock.patch(\"search.controllers.simple.SearchSession\") def test_author_search_contains_classic_syntax(self, mock_index): \"\"\"User has", "= IndexConnectionError with self.assertRaises(InternalServerError): response_data, code, headers = simple.retrieve_document(\"124.5678\") self.assertEqual(", "data[\"has_classic_format\"], \"A flag denoting the syntax interception should be set\"", "template.\", ) @mock.patch(\"search.controllers.simple.SearchSession\") def test_title_search_contains_classic_syntax(self, mock_index): \"\"\"User has entered a", "query.\"\"\" def _raiseQueryError(*args, **kwargs): raise QueryError(\"What now\") mock_index.get_document.side_effect = _raiseQueryError", "service raises a IndexConnectionError.\"\"\" def _raiseIndexConnectionError(*args, **kwargs): raise IndexConnectionError(\"What now\")", "from unittest import TestCase, mock from werkzeug.datastructures import MultiDict from", "these are limited to specific values, there is no other", "= {\"metadata\": {}, \"results\": []} request_data = MultiDict({\"searchtype\": \"title\", \"query\":", "MultiDict( { \"searchtype\": \"all\", \"query\": \"franklin_r\", \"size\": 50, \"order\": \"\",", "self.assertTrue( data[\"has_classic_format\"], \"A flag denoting the syntax interception should be", "an artefact of the classic search system, and not intended", "\"Should return an instance of SimpleQuery\" ) self.assertEqual(query.order, \"submitted_date\") def", "@mock.patch(\"search.controllers.simple.SearchSession\") def test_no_form_data(self, mock_index): \"\"\"No form data has been submitted.\"\"\"", "**kwargs): raise QueryError(\"What now\") mock_index.search.side_effect = _raiseQueryError request_data = MultiDict({\"searchtype\":", "headers = simple.search(request_data) except QueryError as ex: self.fail(\"QueryError should be", "\"\"\"Querystring starts with a wildcard.\"\"\" data = MultiDict({\"searchtype\": \"title\", \"query\":", "self.assertRaises(InternalServerError): _, _, _ = simple.search(request_data) self.assertEqual( mock_index.search.call_count, 1, \"A", "mock_url_for): \"\"\"The order parameter on the request is invalid.\"\"\" request_data", "self.assertEqual( mock_index.search.call_count, 1, \"A search should be attempted\" ) call_args,", "test_invalid_data(self, mock_index): \"\"\"Form data are invalid.\"\"\" request_data = MultiDict({\"searchtype\": \"title\"})", "\"foo title\"}) form = SimpleSearchForm(data) query = simple._query_from_form(form) self.assertIsInstance( query,", "mock_index.get_document.call_count, 1, \"A search should be attempted\", ) class TestSearchController(TestCase):", "DocumentNotFound as ex: self.fail( \"DocumentNotFound should be handled (caught %s)\"", "\"order\": \"None\"} # ) form = SimpleSearchForm(data) query = simple._query_from_form(form)", "def test_encounters_queryerror(self, mock_index): \"\"\"There is a bug in the index", "parameter, but it is 'None'.\"\"\" data = MultiDict( {\"searchtype\": \"title\",", "data = MultiDict({\"searchtype\": \"title\", \"query\": \" foo title \"}) form", "\"query\": \"foo title\", \"size\": 50, # Valid. \"order\": \"foo\", #", "test_title_search_contains_classic_syntax(self, mock_index): \"\"\"User has entered a `surname_f` query in a", "\"\"\"Form data has three simple.\"\"\" data = MultiDict({\"searchtype\": \"title\", \"query\":", "mock_index.search.call_count, 0, \"No search should be attempted\" ) @mock.patch(\"search.controllers.simple.SearchSession\") def", "be valid.\") self.assertEqual(form.query.data, \"foo title\") def test_querystring_has_unbalanced_quotes(self): \"\"\"Querystring has an", "\"\"\"Index service raises a IndexConnectionError.\"\"\" def _raiseIndexConnectionError(*args, **kwargs): raise IndexConnectionError(\"What", "is passed to the search index\", ) @mock.patch(\"search.controllers.simple.SearchSession\") def test_index_raises_query_error(self,", "should be attempted\" ) @mock.patch(\"search.controllers.simple.SearchSession\") def test_index_raises_connection_exception(self, mock_index): \"\"\"Index service", "\"title\", \"query\": \"foo title\", \"order\": \"submitted_date\", } ) form =", "IndexConnectionError.\"\"\" mock_index.get_document.side_effect = IndexConnectionError with self.assertRaises(InternalServerError): response_data, code, headers =", "\"pie' form = SimpleSearchForm(data) self.assertFalse(form.validate(), \"Form should be invalid\") data[\"query\"]", "\"foo title\", \"order\": \"None\"} # ) form = SimpleSearchForm(data) query", "the request is invalid.\"\"\" request_data = MultiDict( { \"searchtype\": \"title\",", "is a bug in the index or query.\"\"\" def _raiseQueryError(*args,", "self.assertEqual( data[\"query\"].value, \"franklin_r\", \"The query should not be rewritten.\", )", "= MultiDict({\"searchtype\": \"title\"}) response_data, code, headers = simple.search(request_data) self.assertEqual(code, HTTPStatus.OK,", "\"title\", \"query\": \"foo title\"}) with self.assertRaises(InternalServerError): try: response_data, code, headers", "# self.assertEqual(code, status.HTTP_500_INTERNAL_SERVER_ERROR) @mock.patch(\"search.controllers.simple.SearchSession\") def test_document_not_found(self, mock_index): \"\"\"The document is", "@mock.patch(\"search.controllers.simple.SearchSession\") def test_document_not_found(self, mock_index): \"\"\"The document is not found.\"\"\" def", "\"title\", \"query\": \"foo title\"}) form = SimpleSearchForm(data) query = simple._query_from_form(form)", "invalid\") data[\"query\"] = '\"rhubarb\" \"pie\"' form = SimpleSearchForm(data) self.assertTrue(form.validate(), \"Form", "def test_all_fields_search_contains_classic_syntax(self, mock_index): \"\"\"User has entered a `surname_f` query in", "search.\"\"\" request_data = MultiDict( { \"searchtype\": \"author\", \"query\": \"franklin_r\", \"size\":", "HTTPStatus from unittest import TestCase, mock from werkzeug.datastructures import MultiDict", "form = SimpleSearchForm(data) self.assertTrue(form.validate(), \"Form should be valid\") class TestQueryFromForm(TestCase):", "be attempted\", ) class TestSearchController(TestCase): \"\"\"Tests for :func:`.simple.search`.\"\"\" @mock.patch( \"search.controllers.simple.url_for\",", "\"size\": 50, # Valid. \"order\": \"foo\", # Invalid } )", "hawking_s\", \"size\": 50, \"order\": \"\", } ) mock_index.search.return_value = {\"metadata\":", "is passed\") # self.assertEqual(code, status.HTTP_500_INTERNAL_SERVER_ERROR) @mock.patch(\"search.controllers.simple.SearchSession\") def test_document_not_found(self, mock_index): \"\"\"The", "f'https://arxiv.org/{k[\"paper_id\"]}', ) @mock.patch(\"search.controllers.simple.SearchSession\") def test_arxiv_id(self, mock_index): \"\"\"Query parameter contains an", "self.assertTrue(form.validate(), \"Form should be valid\") data[\"query\"] = '\"rhubarb\" \"pie' form", "self.assertRaises(InternalServerError): try: response_data, code, headers = simple.search(request_data) except QueryError as", "users have become accustomed to this syntax. We therefore rewrite", "code, headers = simple.search(request_data) self.assertEqual( data[\"query\"].value, \"franklin_r\", \"The query should", "% ex) self.assertEqual( mock_index.search.call_count, 1, \"A search should be attempted\"", "not be rewritten.\", ) self.assertFalse( data[\"has_classic_format\"], \"Flag should not be", ") class TestSimpleSearchForm(TestCase): \"\"\"Tests for :class:`.SimpleSearchForm`.\"\"\" def test_searchtype_only(self): \"\"\"User has", "\"all\", \"query\": \"franklin_r\", \"size\": 50, \"order\": \"\", } ) mock_index.search.return_value", "MultiDict({\"searchtype\": \"title\"}) response_data, code, headers = simple.search(request_data) self.assertEqual(code, HTTPStatus.OK, \"Response", "data are invalid.\"\"\" request_data = MultiDict({\"searchtype\": \"title\"}) response_data, code, headers", "be\" \" rendered in the template.\", ) @mock.patch(\"search.controllers.simple.SearchSession\") def test_all_fields_search_multiple_classic_syntax(self,", "_ = simple.search(request_data) self.assertEqual( mock_index.search.call_count, 1, \"A search should be", "test_index_raises_connection_exception(self, mock_index): \"\"\"Index service raises a IndexConnectionError.\"\"\" def _raiseIndexConnectionError(*args, **kwargs):", "def test_single_field_term(self, mock_index): \"\"\"Form data are present.\"\"\" mock_index.search.return_value = {\"metadata\":", "data = MultiDict({\"searchtype\": \"title\", \"query\": \"foo title\"}) form = SimpleSearchForm(data)", "search should be attempted\", ) @mock.patch(\"search.controllers.simple.SearchSession\") def test_index_raises_connection_exception(self, mock_index): \"\"\"Index", "SimpleQuery is passed to the search index\", ) @mock.patch(\"search.controllers.simple.SearchSession\") def", "\"Form should be invalid\") data[\"query\"] = '\"rhubarb\"' form = SimpleSearchForm(data)", "entered a searchtype (field) and query (value).\"\"\" data = MultiDict({\"searchtype\":", "mock_index): \"\"\"The document is not found.\"\"\" def _raiseDocumentNotFound(*args, **kwargs): raise", "query (value); this should never happen.\"\"\" data = MultiDict({\"query\": \"someone", "self.assertIsNone(query.order, \"Order should be None\") def test_querystring_has_wildcard_at_start(self): \"\"\"Querystring starts with", "with a clean link back to the search form. \"\"\"", "response_data, code, headers = simple.retrieve_document(\"124.5678\") self.assertEqual( mock_index.get_document.call_count, 1, \"A search", "include form.\") self.assertEqual( mock_index.search.call_count, 0, \"No search should be attempted\"", "request_data = MultiDict({\"searchtype\": \"title\", \"query\": \"foo title\"}) with self.assertRaises(InternalServerError): try:", "SimpleQuery, \"Should return an instance of SimpleQuery\" ) def test_form_data_has_order(self):", "may be\" \" rendered in the template.\", ) @mock.patch(\"search.controllers.simple.SearchSession\") def", "has entered only a query (value); this should never happen.\"\"\"", "_, _, _ = simple.search(request_data) self.assertEqual( mock_index.search.call_count, 1, \"A search", "now\") mock_index.get_document.side_effect = _raiseDocumentNotFound with self.assertRaises(NotFound): try: response_data, code, headers", "be invalid\") def test_query_and_searchtype(self): \"\"\"User has entered a searchtype (field)", "= MultiDict( {\"searchtype\": \"title\", \"query\": \"foo title\", \"order\": \"None\"} #", "SimpleQuery\" ) self.assertEqual(query.order, \"submitted_date\") def test_form_data_has_no_order(self): \"\"\"Form data includes sort", "using a comma, and show the user a warning about", "\" rendered in the template.\", ) @mock.patch(\"search.controllers.simple.SearchSession\") def test_title_search_contains_classic_syntax(self, mock_index):", "except QueryError as ex: self.fail(\"QueryError should be handled (caught %s)\"", "should be invalid\") def test_query_only(self): \"\"\"User has entered only a", "a clean link back to the search form. \"\"\" @mock.patch(\"search.controllers.simple.url_for\")", "{\"metadata\": {}, \"results\": []} data, code, headers = simple.search(request_data) self.assertEqual(", "\"results\": []} data, code, headers = simple.search(request_data) self.assertEqual( data[\"query\"].value, \"franklin,", "should be handled (caught %s)\" % ex) self.assertEqual( mock_index.search.call_count, 1,", "ID is passed\") # self.assertEqual(code, status.HTTP_500_INTERNAL_SERVER_ERROR) @mock.patch(\"search.controllers.simple.SearchSession\") def test_document_not_found(self, mock_index):", "mock_index.search.side_effect = _raiseQueryError request_data = MultiDict({\"searchtype\": \"title\", \"query\": \"foo title\"})", "data = MultiDict( {\"searchtype\": \"title\", \"query\": \"foo title\", \"order\": \"None\"}", "may have monkeyed with the order or sort parameters. Since", "self.assertIsInstance(call_args[0], str, \"arXiv ID is passed\") # self.assertEqual(code, status.HTTP_500_INTERNAL_SERVER_ERROR) @mock.patch(\"search.controllers.simple.SearchSession\")", ") @mock.patch(\"search.controllers.simple.SearchSession\") def test_single_field_term(self, mock_index): \"\"\"Form data are present.\"\"\" mock_index.search.return_value", "DocumentNotFound(\"What now\") mock_index.get_document.side_effect = _raiseDocumentNotFound with self.assertRaises(NotFound): try: response_data, code,", ") @mock.patch(\"search.controllers.simple.SearchSession\") def test_arxiv_id(self, mock_index): \"\"\"Query parameter contains an arXiv", "arXiv ID.\"\"\" request_data = MultiDict({\"query\": \"1702.00123\"}) response_data, code, headers =", "{}, \"results\": []} request_data = MultiDict({\"searchtype\": \"title\", \"query\": \"foo title\"})", "{ \"searchtype\": \"author\", \"query\": \"franklin_r\", \"size\": 50, \"order\": \"\", }", "a warning about the syntax change. \"\"\" @mock.patch(\"search.controllers.simple.SearchSession\") def test_all_fields_search_contains_classic_syntax(self,", "monkeyed with the order or sort parameters. Since these are", "simple.retrieve_document(1) except QueryError as ex: self.fail(\"QueryError should be handled (caught", "are present.\"\"\" mock_index.search.return_value = {\"metadata\": {}, \"results\": []} request_data =", "\"query\": \"foo title\"}) form = SimpleSearchForm(data) query = simple._query_from_form(form) self.assertIsInstance(", "SimpleQuery\" ) self.assertIsNone(query.order, \"Order should be None\") def test_querystring_has_wildcard_at_start(self): \"\"\"Querystring", "self.fail(\"QueryError should be handled (caught %s)\" % ex) self.assertEqual( mock_index.get_document.call_count,", "return an instance of SimpleQuery\" ) def test_form_data_has_order(self): \"\"\"Form data", "this should never happen.\"\"\" data = MultiDict({\"query\": \"someone monkeyed with", "an author query using `surname_f` syntax. This is an artefact", "headers = simple.retrieve_document(1) except QueryError as ex: self.fail(\"QueryError should be", "instance of SimpleQuery\" ) self.assertEqual(query.order, \"submitted_date\") def test_form_data_has_no_order(self): \"\"\"Form data", "= mock_index.search.call_args self.assertIsInstance( call_args[0], SimpleQuery, \"An SimpleQuery is passed to", ") self.assertEqual(query.order, \"submitted_date\") def test_form_data_has_no_order(self): \"\"\"Form data includes sort order", "users' selection), it's important to break the chain. To do", "should be attempted\", ) class TestSearchController(TestCase): \"\"\"Tests for :func:`.simple.search`.\"\"\" @mock.patch(", "an odd number of quote characters.\"\"\" data = MultiDict({\"searchtype\": \"title\",", "valid\") data[\"query\"] = '\"rhubarb\" \"pie' form = SimpleSearchForm(data) self.assertFalse(form.validate(), \"Form", "\" rendered in the template.\", ) @mock.patch(\"search.controllers.simple.SearchSession\") def test_all_fields_search_multiple_classic_syntax(self, mock_index):", ") @mock.patch(\"search.controllers.simple.SearchSession\") def test_all_fields_search_multiple_classic_syntax(self, mock_index): \"\"\"User has entered a classic", "with self.assertRaises(InternalServerError): _, _, _ = simple.search(request_data) self.assertEqual( mock_index.search.call_count, 1,", "with the request\"}) form = SimpleSearchForm(data) self.assertFalse(form.validate(), \"Form should be", "mock_index.get_document.side_effect = _raiseDocumentNotFound with self.assertRaises(NotFound): try: response_data, code, headers =", "for simple search controller, :mod:`search.controllers.simple`.\"\"\" from http import HTTPStatus from", "class TestSimpleSearchForm(TestCase): \"\"\"Tests for :class:`.SimpleSearchForm`.\"\"\" def test_searchtype_only(self): \"\"\"User has entered", "HTTPStatus.MOVED_PERMANENTLY, \"Response should be a 301 redirect.\", ) self.assertIn(\"Location\", headers,", "raises a QueryError.\"\"\" def _raiseQueryError(*args, **kwargs): raise QueryError(\"What now\") mock_index.search.side_effect", "among views (to persist users' selection), it's important to break", "the search form. \"\"\" @mock.patch(\"search.controllers.simple.url_for\") def test_order_is_invalid(self, mock_url_for): \"\"\"The order", "{}, \"results\": []} data, code, headers = simple.search(request_data) self.assertEqual( data[\"query\"].value,", "attempted\", ) class TestSearchController(TestCase): \"\"\"Tests for :func:`.simple.search`.\"\"\" @mock.patch( \"search.controllers.simple.url_for\", lambda", "with self.assertRaises(InternalServerError): try: response_data, code, headers = simple.search(request_data) except QueryError", "attempted\" ) class TestSimpleSearchForm(TestCase): \"\"\"Tests for :class:`.SimpleSearchForm`.\"\"\" def test_searchtype_only(self): \"\"\"User", "We therefore rewrite the query using a comma, and show", "from search.controllers.simple.forms import SimpleSearchForm from search.services.index import ( IndexConnectionError, QueryError,", "self.assertEqual(code, HTTPStatus.OK, \"Response should be OK.\") self.assertIn(\"form\", response_data, \"Response should", "self.assertFalse(form.validate(), \"Form should be invalid\") def test_query_and_searchtype(self): \"\"\"User has entered", "of the classic search system, and not intended to be", "an author search.\"\"\" request_data = MultiDict( { \"searchtype\": \"author\", \"query\":", "MultiDict({\"searchtype\": \"title\", \"query\": \"foo title\"}) form = SimpleSearchForm(data) query =", "test_encounters_queryerror(self, mock_index): \"\"\"There is a bug in the index or", "headers = simple.search(request_data) self.assertEqual( data[\"query\"].value, \"j franklin, r; hawking, s\",", "self.assertEqual( data[\"query\"].value, \"j franklin, r; hawking, s\", \"The query should", "try: response_data, code, headers = simple.search(request_data) except QueryError as ex:", "be invalid. Given that they are passed around among views", "should not be rewritten.\", ) self.assertFalse( data[\"has_classic_format\"], \"Flag should not", "Valid } ) with self.assertRaises(BadRequest): simple.search(request_data) class TestClassicAuthorSyntaxIsIntercepted(TestCase): \"\"\" The", "import TestCase, mock from werkzeug.datastructures import MultiDict from werkzeug.exceptions import", "mock_index): \"\"\"User has entered a `surname_f` query in an all-fields", "QueryError, DocumentNotFound, ) class TestRetrieveDocument(TestCase): \"\"\"Tests for :func:`.simple.retrieve_document`.\"\"\" @mock.patch(\"search.controllers.simple.SearchSession\") def", "QueryError(\"What now\") mock_index.get_document.side_effect = _raiseQueryError with self.assertRaises(InternalServerError): try: response_data, code,", "call_kwargs = mock_index.get_document.call_args self.assertIsInstance(call_args[0], str, \"arXiv ID is passed\") #", "def test_form_data_has_no_order(self): \"\"\"Form data includes sort order parameter, but it", "user may have entered an author query using `surname_f` syntax.", "def test_index_raises_query_error(self, mock_index): \"\"\"Index service raises a QueryError.\"\"\" def _raiseQueryError(*args,", "= MultiDict({\"searchtype\": \"title\", \"query\": \"foo title\"}) with self.assertRaises(InternalServerError): try: response_data,", "try: response_data, code, headers = simple.retrieve_document(1) except DocumentNotFound as ex:", "(value).\"\"\" data = MultiDict({\"searchtype\": \"title\", \"query\": \"foo title\"}) form =", "test_querystring_has_wildcard_at_start(self): \"\"\"Querystring starts with a wildcard.\"\"\" data = MultiDict({\"searchtype\": \"title\",", "valid\") class TestPaginationParametersAreFunky(TestCase): \"\"\" The user may have monkeyed with", "query in an author search.\"\"\" request_data = MultiDict( { \"searchtype\":", "request_data = MultiDict( { \"searchtype\": \"title\", \"query\": \"franklin_r\", \"size\": 50,", "a 301 redirect.\", ) self.assertIn(\"Location\", headers, \"Location header should be", "test_query_only(self): \"\"\"User has entered only a query (value); this should", "\"\"\"Querystring has an odd number of quote characters.\"\"\" data =", "should be invalid\") data[\"query\"] = '\"rhubarb\" \"pie\"' form = SimpleSearchForm(data)", "raise IndexConnectionError(\"What now\") mock_index.search.side_effect = _raiseIndexConnectionError request_data = MultiDict({\"searchtype\": \"title\",", "self.assertEqual(form.query.data, \"foo title\") def test_querystring_has_unbalanced_quotes(self): \"\"\"Querystring has an odd number", "InternalServerError, NotFound, BadRequest from search.domain import SimpleQuery from search.controllers import", ":func:`.simple._query_from_form`.\"\"\" def test_multiple_simple(self): \"\"\"Form data has three simple.\"\"\" data =", "with multiple authors.\"\"\" request_data = MultiDict( { \"searchtype\": \"all\", \"query\":", "(field).\"\"\" data = MultiDict({\"searchtype\": \"title\"}) form = SimpleSearchForm(data) self.assertFalse(form.validate(), \"Form", "only a query (value); this should never happen.\"\"\" data =", "attempted\" ) @mock.patch(\"search.controllers.simple.SearchSession\") def test_index_raises_connection_exception(self, mock_index): \"\"\"Index service raises a", "MultiDict( { \"searchtype\": \"title\", \"query\": \"foo title\", \"order\": \"submitted_date\", }", "supported. Nevertheless, users have become accustomed to this syntax. We", "test_multiple_simple(self): \"\"\"Form data has three simple.\"\"\" data = MultiDict({\"searchtype\": \"title\",", ") @mock.patch(\"search.controllers.simple.SearchSession\") def test_author_search_contains_classic_syntax(self, mock_index): \"\"\"User has entered a `surname_f`", "handled (caught %s)\" % ex ) self.assertEqual( mock_index.get_document.call_count, 1, \"A", "have become accustomed to this syntax. We therefore rewrite the", "and show the user a warning about the syntax change.", "Bad Request, with a clean link back to the search", "= MultiDict({\"searchtype\": \"title\"}) form = SimpleSearchForm(data) self.assertFalse(form.validate(), \"Form should be", "rendered in the template.\", ) @mock.patch(\"search.controllers.simple.SearchSession\") def test_author_search_contains_classic_syntax(self, mock_index): \"\"\"User", "**kwargs): raise DocumentNotFound(\"What now\") mock_index.get_document.side_effect = _raiseDocumentNotFound with self.assertRaises(NotFound): try:", "301 redirect.\", ) self.assertIn(\"Location\", headers, \"Location header should be set\")", "should be handled (caught %s)\" % ex) self.assertEqual( mock_index.get_document.call_count, 1,", "\"arXiv ID is passed\") # self.assertEqual(code, status.HTTP_500_INTERNAL_SERVER_ERROR) @mock.patch(\"search.controllers.simple.SearchSession\") def test_document_not_found(self,", "test_single_field_term(self, mock_index): \"\"\"Form data are present.\"\"\" mock_index.search.return_value = {\"metadata\": {},", "passed to the search index\", ) self.assertEqual(code, HTTPStatus.OK, \"Response should", "are invalid.\"\"\" request_data = MultiDict({\"searchtype\": \"title\"}) response_data, code, headers =", "@mock.patch(\"search.controllers.simple.SearchSession\") def test_index_raises_connection_exception(self, mock_index): \"\"\"Index service raises a IndexConnectionError.\"\"\" def", "a searchtype (field).\"\"\" data = MultiDict({\"searchtype\": \"title\"}) form = SimpleSearchForm(data)", "@mock.patch(\"search.controllers.simple.SearchSession\") def test_all_fields_search_contains_classic_syntax(self, mock_index): \"\"\"User has entered a `surname_f` query", ") self.assertTrue( data[\"has_classic_format\"], \"A flag denoting the syntax interception should", "= simple.retrieve_document(1) except QueryError as ex: self.fail(\"QueryError should be handled", "rewritten.\", ) self.assertFalse( data[\"has_classic_format\"], \"Flag should not be set, as", "test_form_data_has_order(self): \"\"\"Form data includes sort order.\"\"\" data = MultiDict( {", "be invalid\") def test_query_only(self): \"\"\"User has entered only a query", "test_no_form_data(self, mock_index): \"\"\"No form data has been submitted.\"\"\" request_data =", "simple.search(request_data) self.assertEqual( code, HTTPStatus.MOVED_PERMANENTLY, \"Response should be a 301 redirect.\",", "MultiDict({\"searchtype\": \"title\", \"query\": '\"rhubarb'}) form = SimpleSearchForm(data) self.assertFalse(form.validate(), \"Form should", "has entered a searchtype (field) and query (value).\"\"\" data =", "for :func:`.simple.retrieve_document`.\"\"\" @mock.patch(\"search.controllers.simple.SearchSession\") def test_encounters_queryerror(self, mock_index): \"\"\"There is a bug", "@mock.patch(\"search.controllers.simple.SearchSession\") def test_encounters_queryerror(self, mock_index): \"\"\"There is a bug in the", "def test_no_form_data(self, mock_index): \"\"\"No form data has been submitted.\"\"\" request_data", "whitespace, it should be removed.\"\"\" data = MultiDict({\"searchtype\": \"title\", \"query\":", "form = SimpleSearchForm(data) self.assertTrue(form.validate(), \"Form should be valid\") class TestPaginationParametersAreFunky(TestCase):", "them to be invalid. Given that they are passed around", "TestClassicAuthorSyntaxIsIntercepted(TestCase): \"\"\" The user may have entered an author query", "search should be attempted\" ) @mock.patch(\"search.controllers.simple.SearchSession\") def test_single_field_term(self, mock_index): \"\"\"Form", "_raiseQueryError request_data = MultiDict({\"searchtype\": \"title\", \"query\": \"foo title\"}) with self.assertRaises(InternalServerError):", "SimpleSearchForm(data) self.assertFalse(form.validate(), \"Form should be invalid\") def test_input_whitespace_is_stripped(self): \"\"\"If query", "multiple authors.\"\"\" request_data = MultiDict( { \"searchtype\": \"all\", \"query\": \"j", "\"someone monkeyed with the request\"}) form = SimpleSearchForm(data) self.assertFalse(form.validate(), \"Form", "link back to the search form. \"\"\" @mock.patch(\"search.controllers.simple.url_for\") def test_order_is_invalid(self,", "query (value).\"\"\" data = MultiDict({\"searchtype\": \"title\", \"query\": \"foo title\"}) form", "mock_index): \"\"\"Index service raises a IndexConnectionError.\"\"\" mock_index.get_document.side_effect = IndexConnectionError with", "mock_index): \"\"\"Index service raises a QueryError.\"\"\" def _raiseQueryError(*args, **kwargs): raise", "to be supported. Nevertheless, users have become accustomed to this", "test_form_data_has_no_order(self): \"\"\"Form data includes sort order parameter, but it is", "now\") mock_index.get_document.side_effect = _raiseQueryError with self.assertRaises(InternalServerError): try: response_data, code, headers", "\"\"\"User has entered a `surname_f` query in an all-fields search.\"\"\"", "all-fields search.\"\"\" request_data = MultiDict( { \"searchtype\": \"all\", \"query\": \"franklin_r\",", "form.\") self.assertEqual( mock_index.search.call_count, 0, \"No search should be attempted\" )", ") with self.assertRaises(BadRequest): simple.search(request_data) class TestClassicAuthorSyntaxIsIntercepted(TestCase): \"\"\" The user may", "@mock.patch(\"search.controllers.simple.SearchSession\") def test_title_search_contains_classic_syntax(self, mock_index): \"\"\"User has entered a `surname_f` query", "test_input_whitespace_is_stripped(self): \"\"\"If query has padding whitespace, it should be removed.\"\"\"", "valid.\") self.assertEqual(form.query.data, \"foo title\") def test_querystring_has_unbalanced_quotes(self): \"\"\"Querystring has an odd", "SimpleSearchForm(data) self.assertFalse(form.validate(), \"Form should be invalid\") data[\"query\"] = '\"rhubarb\"' form", "\"size\": 50, \"order\": \"\", } ) mock_index.search.return_value = {\"metadata\": {},", "self.assertEqual( mock_index.get_document.call_count, 1, \"A search should be attempted\", ) @mock.patch(\"search.controllers.simple.SearchSession\")", "\"No search should be attempted\" ) @mock.patch(\"search.controllers.simple.SearchSession\") def test_single_field_term(self, mock_index):", "class TestSearchController(TestCase): \"\"\"Tests for :func:`.simple.search`.\"\"\" @mock.patch( \"search.controllers.simple.url_for\", lambda *a, **k:", "be valid\") class TestQueryFromForm(TestCase): \"\"\"Tests for :func:`.simple._query_from_form`.\"\"\" def test_multiple_simple(self): \"\"\"Form", "def test_document_not_found(self, mock_index): \"\"\"The document is not found.\"\"\" def _raiseDocumentNotFound(*args,", "be attempted\" ) @mock.patch(\"search.controllers.simple.SearchSession\") def test_no_form_data(self, mock_index): \"\"\"No form data", "\"query\": \"j franklin_r hawking_s\", \"size\": 50, \"order\": \"\", } )", "MultiDict({\"searchtype\": \"title\"}) form = SimpleSearchForm(data) self.assertFalse(form.validate(), \"Form should be invalid\")", "attempted\", ) call_args, call_kwargs = mock_index.get_document.call_args self.assertIsInstance(call_args[0], str, \"arXiv ID", "SimpleSearchForm from search.services.index import ( IndexConnectionError, QueryError, DocumentNotFound, ) class", "MultiDict({\"searchtype\": \"title\", \"query\": \"foo title\"}) response_data, code, headers = simple.search(request_data)", "= {\"metadata\": {}, \"results\": []} data, code, headers = simple.search(request_data)", "entered a `surname_f` query in an all-fields search.\"\"\" request_data =", "from werkzeug.exceptions import InternalServerError, NotFound, BadRequest from search.domain import SimpleQuery", "be rewritten.\", ) self.assertFalse( data[\"has_classic_format\"], \"Flag should not be set,", "persist users' selection), it's important to break the chain. To", "break the chain. To do this, we return a 400", "invalid\") def test_query_only(self): \"\"\"User has entered only a query (value);", "= simple.search(request_data) self.assertEqual(code, HTTPStatus.OK, \"Response should be OK.\") self.assertIn(\"form\", response_data,", "of SimpleQuery\" ) self.assertIsNone(query.order, \"Order should be None\") def test_querystring_has_wildcard_at_start(self):", "\"\"\" @mock.patch(\"search.controllers.simple.SearchSession\") def test_all_fields_search_contains_classic_syntax(self, mock_index): \"\"\"User has entered a `surname_f`", "form = SimpleSearchForm(data) self.assertFalse(form.validate(), \"Form should be invalid\") def test_input_whitespace_is_stripped(self):", "simple.search(request_data) class TestClassicAuthorSyntaxIsIntercepted(TestCase): \"\"\" The user may have entered an", "\"query\": \"foo title\"}) response_data, code, headers = simple.search(request_data) self.assertEqual( mock_index.search.call_count,", "MultiDict({\"searchtype\": \"title\", \"query\": \" foo title \"}) form = SimpleSearchForm(data)", "be removed.\"\"\" data = MultiDict({\"searchtype\": \"title\", \"query\": \" foo title", "\"Form should be valid\") class TestPaginationParametersAreFunky(TestCase): \"\"\" The user may", "= '\"rhubarb\"' form = SimpleSearchForm(data) self.assertTrue(form.validate(), \"Form should be valid\")", "chain. To do this, we return a 400 Bad Request,", "**kwargs): raise IndexConnectionError(\"What now\") mock_index.search.side_effect = _raiseIndexConnectionError request_data = MultiDict({\"searchtype\":", "franklin, r; hawking, s\", \"The query should be rewritten.\", )", "a searchtype (field) and query (value).\"\"\" data = MultiDict({\"searchtype\": \"title\",", "\"foo title\", \"size\": 51, # Invalid \"order\": \"\", # Valid", "be set\" \" in the response context, so that a", "_raiseQueryError(*args, **kwargs): raise QueryError(\"What now\") mock_index.search.side_effect = _raiseQueryError request_data =", "has entered only a searchtype (field).\"\"\" data = MultiDict({\"searchtype\": \"title\"})", "selection), it's important to break the chain. To do this,", "should be attempted\" ) call_args, call_kwargs = mock_index.search.call_args self.assertIsInstance( call_args[0],", "entered only a query (value); this should never happen.\"\"\" data", "never happen.\"\"\" data = MultiDict({\"query\": \"someone monkeyed with the request\"})", "have entered an author query using `surname_f` syntax. This is", "be OK.\") self.assertIn(\"form\", response_data, \"Response should include form.\") self.assertEqual( mock_index.search.call_count,", "call_kwargs = mock_index.search.call_args self.assertIsInstance( call_args[0], SimpleQuery, \"An SimpleQuery is passed", "for :func:`.simple._query_from_form`.\"\"\" def test_multiple_simple(self): \"\"\"Form data has three simple.\"\"\" data", "DocumentNotFound, ) class TestRetrieveDocument(TestCase): \"\"\"Tests for :func:`.simple.retrieve_document`.\"\"\" @mock.patch(\"search.controllers.simple.SearchSession\") def test_encounters_queryerror(self,", "MultiDict({\"searchtype\": \"title\", \"query\": \"*foo title\"}) form = SimpleSearchForm(data) self.assertFalse(form.validate(), \"Form", "to the search index\", ) @mock.patch(\"search.controllers.simple.SearchSession\") def test_index_raises_query_error(self, mock_index): \"\"\"Index", "title\", \"size\": 51, # Invalid \"order\": \"\", # Valid }", "instance of SimpleQuery\" ) self.assertIsNone(query.order, \"Order should be None\") def", "from search.controllers import simple from search.controllers.simple.forms import SimpleSearchForm from search.services.index", "mock_index): \"\"\"User has entered a classic query with multiple authors.\"\"\"", "query in an all-fields search.\"\"\" request_data = MultiDict( { \"searchtype\":", "\"1702.00123\"}) response_data, code, headers = simple.search(request_data) self.assertEqual( code, HTTPStatus.MOVED_PERMANENTLY, \"Response", "data[\"query\"] = '\"rhubarb\"' form = SimpleSearchForm(data) self.assertTrue(form.validate(), \"Form should be", "self.assertIn(\"form\", response_data, \"Response should include form.\") self.assertEqual( mock_index.search.call_count, 0, \"No", "self.assertFalse(form.validate(), \"Form should be invalid\") def test_input_whitespace_is_stripped(self): \"\"\"If query has", "= simple.search(request_data) self.assertEqual( mock_index.search.call_count, 1, \"A search should be attempted\"", "@mock.patch(\"search.controllers.simple.SearchSession\") def test_index_raises_connection_exception(self, mock_index): \"\"\"Index service raises a IndexConnectionError.\"\"\" mock_index.get_document.side_effect", "_raiseDocumentNotFound with self.assertRaises(NotFound): try: response_data, code, headers = simple.retrieve_document(1) except", "data = MultiDict({\"searchtype\": \"title\", \"query\": \"*foo title\"}) form = SimpleSearchForm(data)", "back to the search form. \"\"\" @mock.patch(\"search.controllers.simple.url_for\") def test_order_is_invalid(self, mock_url_for):", "self.assertEqual( mock_index.search.call_count, 1, \"A search should be attempted\" ) class", "IndexConnectionError, QueryError, DocumentNotFound, ) class TestRetrieveDocument(TestCase): \"\"\"Tests for :func:`.simple.retrieve_document`.\"\"\" @mock.patch(\"search.controllers.simple.SearchSession\")", "def test_querystring_has_unbalanced_quotes(self): \"\"\"Querystring has an odd number of quote characters.\"\"\"", "mock_index): \"\"\"There is a bug in the index or query.\"\"\"", "(caught %s)\" % ex ) self.assertEqual( mock_index.get_document.call_count, 1, \"A search", "\"\"\"Form data includes sort order.\"\"\" data = MultiDict( { \"searchtype\":", "`surname_f` syntax. This is an artefact of the classic search", "\"\"\"Tests for simple search controller, :mod:`search.controllers.simple`.\"\"\" from http import HTTPStatus", "attempted\" ) @mock.patch(\"search.controllers.simple.SearchSession\") def test_no_form_data(self, mock_index): \"\"\"No form data has", "@mock.patch(\"search.controllers.simple.SearchSession\") def test_invalid_data(self, mock_index): \"\"\"Form data are invalid.\"\"\" request_data =", "= MultiDict({\"query\": \"someone monkeyed with the request\"}) form = SimpleSearchForm(data)", "SimpleSearchForm(data) self.assertTrue(form.validate(), \"Form should be valid.\") self.assertEqual(form.query.data, \"foo title\") def", "self.assertTrue(form.validate(), \"Form should be valid.\") self.assertEqual(form.query.data, \"foo title\") def test_querystring_has_unbalanced_quotes(self):", "simple.search(request_data) self.assertEqual( data[\"query\"].value, \"franklin, r\", \"The query should be rewritten.\",", "simple.search(request_data) except QueryError as ex: self.fail(\"QueryError should be handled (caught", "[]} data, code, headers = simple.search(request_data) self.assertEqual( data[\"query\"].value, \"j franklin,", "\"A search should be attempted\", ) call_args, call_kwargs = mock_index.get_document.call_args", "self.assertRaises(BadRequest): simple.search(request_data) class TestClassicAuthorSyntaxIsIntercepted(TestCase): \"\"\" The user may have entered", "\"submitted_date\") def test_form_data_has_no_order(self): \"\"\"Form data includes sort order parameter, but", "title\") def test_querystring_has_unbalanced_quotes(self): \"\"\"Querystring has an odd number of quote", "TestSearchController(TestCase): \"\"\"Tests for :func:`.simple.search`.\"\"\" @mock.patch( \"search.controllers.simple.url_for\", lambda *a, **k: f'https://arxiv.org/{k[\"paper_id\"]}',", "\"\"\"User has entered only a searchtype (field).\"\"\" data = MultiDict({\"searchtype\":", "MultiDict({\"query\": \"someone monkeyed with the request\"}) form = SimpleSearchForm(data) self.assertFalse(form.validate(),", "with self.assertRaises(BadRequest): simple.search(request_data) class TestClassicAuthorSyntaxIsIntercepted(TestCase): \"\"\" The user may have", "\"Response should be a 301 redirect.\", ) self.assertIn(\"Location\", headers, \"Location", "\"The query should be rewritten.\", ) self.assertTrue( data[\"has_classic_format\"], \"A flag", "be handled (caught %s)\" % ex ) self.assertEqual( mock_index.get_document.call_count, 1,", "\"submitted_date\", } ) form = SimpleSearchForm(data) query = simple._query_from_form(form) self.assertIsInstance(", "\"\"\" The user may have monkeyed with the order or", "from http import HTTPStatus from unittest import TestCase, mock from", "\"searchtype\": \"title\", \"query\": \"foo title\", \"size\": 50, # Valid. \"order\":", "TestCase, mock from werkzeug.datastructures import MultiDict from werkzeug.exceptions import InternalServerError,", "code, headers = simple.search(request_data) self.assertEqual( data[\"query\"].value, \"j franklin, r; hawking,", "\"A search should be attempted\" ) class TestSimpleSearchForm(TestCase): \"\"\"Tests for", "http import HTTPStatus from unittest import TestCase, mock from werkzeug.datastructures", "sort order.\"\"\" data = MultiDict( { \"searchtype\": \"title\", \"query\": \"foo", "\"foo title\") def test_querystring_has_unbalanced_quotes(self): \"\"\"Querystring has an odd number of", "test_querystring_has_unbalanced_quotes(self): \"\"\"Querystring has an odd number of quote characters.\"\"\" data", "= _raiseQueryError with self.assertRaises(InternalServerError): try: response_data, code, headers = simple.retrieve_document(1)", "test_size_is_invalid(self, mock_url_for): \"\"\"The order parameter on the request is invalid.\"\"\"", "\"foo title\"}) response_data, code, headers = simple.search(request_data) self.assertEqual( mock_index.search.call_count, 1,", "code, headers = simple.search(request_data) self.assertEqual( code, HTTPStatus.MOVED_PERMANENTLY, \"Response should be", "`surname_f` query in a title search.\"\"\" request_data = MultiDict( {", "for :class:`.SimpleSearchForm`.\"\"\" def test_searchtype_only(self): \"\"\"User has entered only a searchtype", "code, headers = simple.retrieve_document(1) except QueryError as ex: self.fail(\"QueryError should", "\"\"\"Form data are present.\"\"\" mock_index.search.return_value = {\"metadata\": {}, \"results\": []}", "data = MultiDict( { \"searchtype\": \"title\", \"query\": \"foo title\", \"order\":", "request_data = MultiDict( { \"searchtype\": \"title\", \"query\": \"foo title\", \"size\":", "title\", \"size\": 50, # Valid. \"order\": \"foo\", # Invalid }", "an instance of SimpleQuery\" ) self.assertIsNone(query.order, \"Order should be None\")", "attempted\" ) call_args, call_kwargs = mock_index.search.call_args self.assertIsInstance( call_args[0], SimpleQuery, \"An", "in the template.\", ) @mock.patch(\"search.controllers.simple.SearchSession\") def test_all_fields_search_multiple_classic_syntax(self, mock_index): \"\"\"User has", "{ \"searchtype\": \"all\", \"query\": \"j franklin_r hawking_s\", \"size\": 50, \"order\":", "attempted\", ) @mock.patch(\"search.controllers.simple.SearchSession\") def test_index_raises_connection_exception(self, mock_index): \"\"\"Index service raises a", "= simple.retrieve_document(\"124.5678\") self.assertEqual( mock_index.get_document.call_count, 1, \"A search should be attempted\",", "with self.assertRaises(InternalServerError): try: response_data, code, headers = simple.retrieve_document(1) except QueryError", "1, \"A search should be attempted\" ) call_args, call_kwargs =", "\"Form should be invalid\") def test_query_only(self): \"\"\"User has entered only", "data = MultiDict({\"query\": \"someone monkeyed with the request\"}) form =", "%s)\" % ex) self.assertEqual( mock_index.search.call_count, 1, \"A search should be", "MultiDict from werkzeug.exceptions import InternalServerError, NotFound, BadRequest from search.domain import", "s\", \"The query should be rewritten.\", ) self.assertTrue( data[\"has_classic_format\"], \"A", "wildcard.\"\"\" data = MultiDict({\"searchtype\": \"title\", \"query\": \"*foo title\"}) form =", "{ \"searchtype\": \"title\", \"query\": \"foo title\", \"size\": 50, # Valid.", "51, # Invalid \"order\": \"\", # Valid } ) with", "only a searchtype (field).\"\"\" data = MultiDict({\"searchtype\": \"title\"}) form =", "`surname_f` query in an author search.\"\"\" request_data = MultiDict( {", "found.\"\"\" def _raiseDocumentNotFound(*args, **kwargs): raise DocumentNotFound(\"What now\") mock_index.get_document.side_effect = _raiseDocumentNotFound", "title\"}) form = SimpleSearchForm(data) self.assertTrue(form.validate(), \"Form should be valid\") class", "a `surname_f` query in a title search.\"\"\" request_data = MultiDict(", "response_data, \"Response should include form.\") self.assertEqual( mock_index.search.call_count, 0, \"No search", "import SimpleSearchForm from search.services.index import ( IndexConnectionError, QueryError, DocumentNotFound, )", "mock_index.search.side_effect = _raiseIndexConnectionError request_data = MultiDict({\"searchtype\": \"title\", \"query\": \"foo title\"})", "with self.assertRaises(InternalServerError): response_data, code, headers = simple.retrieve_document(\"124.5678\") self.assertEqual( mock_index.get_document.call_count, 1,", "Since these are limited to specific values, there is no", "be handled (caught %s)\" % ex) self.assertEqual( mock_index.search.call_count, 1, \"A", "be set\") self.assertEqual( mock_index.search.call_count, 0, \"No search should be attempted\"", "padding whitespace, it should be removed.\"\"\" data = MultiDict({\"searchtype\": \"title\",", "SimpleQuery, \"Should return an instance of SimpleQuery\" ) self.assertEqual(query.order, \"submitted_date\")", "(to persist users' selection), it's important to break the chain.", "_raiseIndexConnectionError request_data = MultiDict({\"searchtype\": \"title\", \"query\": \"foo title\"}) with self.assertRaises(InternalServerError):", "number of quote characters.\"\"\" data = MultiDict({\"searchtype\": \"title\", \"query\": '\"rhubarb'})", "} ) with self.assertRaises(BadRequest): simple.search(request_data) @mock.patch(\"search.controllers.simple.url_for\") def test_size_is_invalid(self, mock_url_for): \"\"\"The", "\"searchtype\": \"title\", \"query\": \"foo title\", \"size\": 51, # Invalid \"order\":", "_raiseDocumentNotFound(*args, **kwargs): raise DocumentNotFound(\"What now\") mock_index.get_document.side_effect = _raiseDocumentNotFound with self.assertRaises(NotFound):", "\"\"\"There is a bug in the index or query.\"\"\" def", "headers = simple.retrieve_document(\"124.5678\") self.assertEqual( mock_index.get_document.call_count, 1, \"A search should be", "form = SimpleSearchForm(data) self.assertTrue(form.validate(), \"Form should be valid.\") self.assertEqual(form.query.data, \"foo", "search.\"\"\" request_data = MultiDict( { \"searchtype\": \"all\", \"query\": \"franklin_r\", \"size\":", "( IndexConnectionError, QueryError, DocumentNotFound, ) class TestRetrieveDocument(TestCase): \"\"\"Tests for :func:`.simple.retrieve_document`.\"\"\"", "def test_order_is_invalid(self, mock_url_for): \"\"\"The order parameter on the request is", "quote characters.\"\"\" data = MultiDict({\"searchtype\": \"title\", \"query\": '\"rhubarb'}) form =", "SimpleSearchForm(data) self.assertTrue(form.validate(), \"Form should be valid\") class TestQueryFromForm(TestCase): \"\"\"Tests for", "of quote characters.\"\"\" data = MultiDict({\"searchtype\": \"title\", \"query\": '\"rhubarb'}) form", "self.assertTrue(form.validate(), \"Form should be valid\") class TestQueryFromForm(TestCase): \"\"\"Tests for :func:`.simple._query_from_form`.\"\"\"", "test_document_not_found(self, mock_index): \"\"\"The document is not found.\"\"\" def _raiseDocumentNotFound(*args, **kwargs):", "should be invalid\") def test_input_whitespace_is_stripped(self): \"\"\"If query has padding whitespace,", "def test_input_whitespace_is_stripped(self): \"\"\"If query has padding whitespace, it should be", "class TestClassicAuthorSyntaxIsIntercepted(TestCase): \"\"\" The user may have entered an author", "that a message may be\" \" rendered in the template.\",", "\"Should return an instance of SimpleQuery\" ) self.assertIsNone(query.order, \"Order should", "and query (value).\"\"\" data = MultiDict({\"searchtype\": \"title\", \"query\": \"foo title\"})", "50, \"order\": \"\", } ) mock_index.search.return_value = {\"metadata\": {}, \"results\":", "@mock.patch(\"search.controllers.simple.SearchSession\") def test_single_field_term(self, mock_index): \"\"\"Form data are present.\"\"\" mock_index.search.return_value =", "search should be attempted\" ) class TestSimpleSearchForm(TestCase): \"\"\"Tests for :class:`.SimpleSearchForm`.\"\"\"", "code, headers = simple.search(request_data) self.assertEqual( mock_index.search.call_count, 1, \"A search should", "SimpleSearchForm(data) self.assertFalse(form.validate(), \"Form should be invalid\") def test_query_only(self): \"\"\"User has", "with the order or sort parameters. Since these are limited", "= MultiDict() response_data, code, headers = simple.search(request_data) self.assertEqual(code, HTTPStatus.OK, \"Response", "MultiDict( { \"searchtype\": \"author\", \"query\": \"franklin_r\", \"size\": 50, \"order\": \"\",", "= MultiDict( { \"searchtype\": \"all\", \"query\": \"j franklin_r hawking_s\", \"size\":", "data, code, headers = simple.search(request_data) self.assertEqual( data[\"query\"].value, \"franklin_r\", \"The query", "system, and not intended to be supported. Nevertheless, users have", "IndexConnectionError(\"What now\") mock_index.search.side_effect = _raiseIndexConnectionError request_data = MultiDict({\"searchtype\": \"title\", \"query\":", "(value); this should never happen.\"\"\" data = MultiDict({\"query\": \"someone monkeyed", "intended to be supported. Nevertheless, users have become accustomed to", "`surname_f` query in an all-fields search.\"\"\" request_data = MultiDict( {", "that they are passed around among views (to persist users'", "around among views (to persist users' selection), it's important to", "mock_index): \"\"\"User has entered a `surname_f` query in an author", "<reponame>ID2797370/arxiv-search<filename>search/controllers/simple/tests.py \"\"\"Tests for simple search controller, :mod:`search.controllers.simple`.\"\"\" from http import", "\"order\": \"\", } ) mock_index.search.return_value = {\"metadata\": {}, \"results\": []}", "\"query\": \"*foo title\"}) form = SimpleSearchForm(data) self.assertFalse(form.validate(), \"Form should be", "headers = simple.search(request_data) self.assertEqual( data[\"query\"].value, \"franklin_r\", \"The query should not", ":func:`.simple.search`.\"\"\" @mock.patch( \"search.controllers.simple.url_for\", lambda *a, **k: f'https://arxiv.org/{k[\"paper_id\"]}', ) @mock.patch(\"search.controllers.simple.SearchSession\") def", "try: response_data, code, headers = simple.retrieve_document(1) except QueryError as ex:", "simple.search(request_data) self.assertEqual( data[\"query\"].value, \"franklin_r\", \"The query should not be rewritten.\",", "has three simple.\"\"\" data = MultiDict({\"searchtype\": \"title\", \"query\": \"foo title\"})", "code, HTTPStatus.MOVED_PERMANENTLY, \"Response should be a 301 redirect.\", ) self.assertIn(\"Location\",", "@mock.patch(\"search.controllers.simple.SearchSession\") def test_all_fields_search_multiple_classic_syntax(self, mock_index): \"\"\"User has entered a classic query", "should be set\" \" in the response context, so that", "in an all-fields search.\"\"\" request_data = MultiDict( { \"searchtype\": \"all\",", "should be valid.\") self.assertEqual(form.query.data, \"foo title\") def test_querystring_has_unbalanced_quotes(self): \"\"\"Querystring has", "should be attempted\" ) @mock.patch(\"search.controllers.simple.SearchSession\") def test_no_form_data(self, mock_index): \"\"\"No form", "in the template.\", ) @mock.patch(\"search.controllers.simple.SearchSession\") def test_title_search_contains_classic_syntax(self, mock_index): \"\"\"User has", "has entered a `surname_f` query in a title search.\"\"\" request_data", "import InternalServerError, NotFound, BadRequest from search.domain import SimpleQuery from search.controllers", "request_data = MultiDict( { \"searchtype\": \"author\", \"query\": \"franklin_r\", \"size\": 50,", "response_data, code, headers = simple.retrieve_document(1) except DocumentNotFound as ex: self.fail(", "self.assertEqual( mock_index.get_document.call_count, 1, \"A search should be attempted\", ) call_args,", "\"query\": \"foo title\", \"order\": \"None\"} # ) form = SimpleSearchForm(data)", "be invalid\") def test_input_whitespace_is_stripped(self): \"\"\"If query has padding whitespace, it", "form. \"\"\" @mock.patch(\"search.controllers.simple.url_for\") def test_order_is_invalid(self, mock_url_for): \"\"\"The order parameter on", "\"No search should be attempted\" ) @mock.patch(\"search.controllers.simple.SearchSession\") def test_no_form_data(self, mock_index):", "\"Response should include form.\") self.assertEqual( mock_index.search.call_count, 0, \"No search should", "SimpleSearchForm(data) query = simple._query_from_form(form) self.assertIsInstance( query, SimpleQuery, \"Should return an", "this syntax. We therefore rewrite the query using a comma,", "def test_query_and_searchtype(self): \"\"\"User has entered a searchtype (field) and query", "\"*foo title\"}) form = SimpleSearchForm(data) self.assertFalse(form.validate(), \"Form should be invalid\")", "\"Form should be invalid\") def test_input_whitespace_is_stripped(self): \"\"\"If query has padding", "IndexConnectionError.\"\"\" def _raiseIndexConnectionError(*args, **kwargs): raise IndexConnectionError(\"What now\") mock_index.search.side_effect = _raiseIndexConnectionError", "\"query\": \" foo title \"}) form = SimpleSearchForm(data) self.assertTrue(form.validate(), \"Form", "or sort parameters. Since these are limited to specific values,", "to be invalid. Given that they are passed around among", "\"results\": []} request_data = MultiDict({\"searchtype\": \"title\", \"query\": \"foo title\"}) response_data,", "\"Flag should not be set, as no rewrite has occurred.\",", "\"Response should be OK.\") @mock.patch(\"search.controllers.simple.SearchSession\") def test_invalid_data(self, mock_index): \"\"\"Form data", "SimpleQuery from search.controllers import simple from search.controllers.simple.forms import SimpleSearchForm from", "= SimpleSearchForm(data) self.assertTrue(form.validate(), \"Form should be valid\") data[\"query\"] = '\"rhubarb\"", "template.\", ) @mock.patch(\"search.controllers.simple.SearchSession\") def test_all_fields_search_multiple_classic_syntax(self, mock_index): \"\"\"User has entered a", "{\"metadata\": {}, \"results\": []} request_data = MultiDict({\"searchtype\": \"title\", \"query\": \"foo", "self.assertIsInstance( call_args[0], SimpleQuery, \"An SimpleQuery is passed to the search", "def test_searchtype_only(self): \"\"\"User has entered only a searchtype (field).\"\"\" data", "about the syntax change. \"\"\" @mock.patch(\"search.controllers.simple.SearchSession\") def test_all_fields_search_contains_classic_syntax(self, mock_index): \"\"\"User", "mock_index.get_document.call_args self.assertIsInstance(call_args[0], str, \"arXiv ID is passed\") # self.assertEqual(code, status.HTTP_500_INTERNAL_SERVER_ERROR)", "self.assertFalse( data[\"has_classic_format\"], \"Flag should not be set, as no rewrite", "context, so that a message may be\" \" rendered in", "data[\"query\"] = '\"rhubarb\" \"pie' form = SimpleSearchForm(data) self.assertFalse(form.validate(), \"Form should", "\"\"\"Tests for :class:`.SimpleSearchForm`.\"\"\" def test_searchtype_only(self): \"\"\"User has entered only a", "mock_index.search.call_args self.assertIsInstance( call_args[0], SimpleQuery, \"An SimpleQuery is passed to the", "a query (value); this should never happen.\"\"\" data = MultiDict({\"query\":", "\"\", # Valid } ) with self.assertRaises(BadRequest): simple.search(request_data) class TestClassicAuthorSyntaxIsIntercepted(TestCase):", "user a warning about the syntax change. \"\"\" @mock.patch(\"search.controllers.simple.SearchSession\") def", "self.assertEqual( code, HTTPStatus.MOVED_PERMANENTLY, \"Response should be a 301 redirect.\", )", "for :func:`.simple.search`.\"\"\" @mock.patch( \"search.controllers.simple.url_for\", lambda *a, **k: f'https://arxiv.org/{k[\"paper_id\"]}', ) @mock.patch(\"search.controllers.simple.SearchSession\")", "other reason for them to be invalid. Given that they", "should be set\") self.assertEqual( mock_index.search.call_count, 0, \"No search should be", "the template.\", ) @mock.patch(\"search.controllers.simple.SearchSession\") def test_author_search_contains_classic_syntax(self, mock_index): \"\"\"User has entered", "mock_index): \"\"\"Form data are present.\"\"\" mock_index.search.return_value = {\"metadata\": {}, \"results\":", "title \"}) form = SimpleSearchForm(data) self.assertTrue(form.validate(), \"Form should be valid.\")", "to this syntax. We therefore rewrite the query using a", "To do this, we return a 400 Bad Request, with", "TestQueryFromForm(TestCase): \"\"\"Tests for :func:`.simple._query_from_form`.\"\"\" def test_multiple_simple(self): \"\"\"Form data has three", "\"foo title\"}) with self.assertRaises(InternalServerError): try: response_data, code, headers = simple.search(request_data)", ":mod:`search.controllers.simple`.\"\"\" from http import HTTPStatus from unittest import TestCase, mock", "from search.domain import SimpleQuery from search.controllers import simple from search.controllers.simple.forms", "\"order\": \"submitted_date\", } ) form = SimpleSearchForm(data) query = simple._query_from_form(form)", "important to break the chain. To do this, we return", "\"title\", \"query\": \"foo title\", \"order\": \"None\"} # ) form =", "\"all\", \"query\": \"j franklin_r hawking_s\", \"size\": 50, \"order\": \"\", }", "\"\"\" @mock.patch(\"search.controllers.simple.url_for\") def test_order_is_invalid(self, mock_url_for): \"\"\"The order parameter on the", "query using a comma, and show the user a warning", "= SimpleSearchForm(data) self.assertTrue(form.validate(), \"Form should be valid\") class TestPaginationParametersAreFunky(TestCase): \"\"\"", "comma, and show the user a warning about the syntax", "simple.retrieve_document(\"124.5678\") self.assertEqual( mock_index.get_document.call_count, 1, \"A search should be attempted\", )", "order.\"\"\" data = MultiDict( { \"searchtype\": \"title\", \"query\": \"foo title\",", "\"No search should be attempted\" ) @mock.patch(\"search.controllers.simple.SearchSession\") def test_index_raises_connection_exception(self, mock_index):", "SimpleQuery, \"An SimpleQuery is passed to the search index\", )", "odd number of quote characters.\"\"\" data = MultiDict({\"searchtype\": \"title\", \"query\":", "be valid\") data[\"query\"] = '\"rhubarb\" \"pie' form = SimpleSearchForm(data) self.assertFalse(form.validate(),", "accustomed to this syntax. We therefore rewrite the query using", "headers = simple.search(request_data) self.assertEqual( code, HTTPStatus.MOVED_PERMANENTLY, \"Response should be a", "\"\"\"User has entered a `surname_f` query in an author search.\"\"\"", "ex: self.fail( \"DocumentNotFound should be handled (caught %s)\" % ex", "form = SimpleSearchForm(data) query = simple._query_from_form(form) self.assertIsInstance( query, SimpleQuery, \"Should", "def test_query_only(self): \"\"\"User has entered only a query (value); this", "data has three simple.\"\"\" data = MultiDict({\"searchtype\": \"title\", \"query\": \"foo", "query with multiple authors.\"\"\" request_data = MultiDict( { \"searchtype\": \"all\",", "may have entered an author query using `surname_f` syntax. This", "= _raiseDocumentNotFound with self.assertRaises(NotFound): try: response_data, code, headers = simple.retrieve_document(1)", "headers, \"Location header should be set\") self.assertEqual( mock_index.search.call_count, 0, \"No", "\"franklin, r\", \"The query should be rewritten.\", ) self.assertTrue( data[\"has_classic_format\"],", "_raiseIndexConnectionError(*args, **kwargs): raise IndexConnectionError(\"What now\") mock_index.search.side_effect = _raiseIndexConnectionError request_data =", "order parameter on the request is invalid.\"\"\" request_data = MultiDict(", "includes sort order parameter, but it is 'None'.\"\"\" data =", "= simple.search(request_data) except QueryError as ex: self.fail(\"QueryError should be handled", "\"size\": 51, # Invalid \"order\": \"\", # Valid } )", "with a wildcard.\"\"\" data = MultiDict({\"searchtype\": \"title\", \"query\": \"*foo title\"})", "ex: self.fail(\"QueryError should be handled (caught %s)\" % ex) self.assertEqual(", "OK.\") @mock.patch(\"search.controllers.simple.SearchSession\") def test_invalid_data(self, mock_index): \"\"\"Form data are invalid.\"\"\" request_data", "def test_invalid_data(self, mock_index): \"\"\"Form data are invalid.\"\"\" request_data = MultiDict({\"searchtype\":", "mock_index.get_document.call_count, 1, \"A search should be attempted\", ) call_args, call_kwargs", "should be OK.\") self.assertIn(\"form\", response_data, \"Response should include form.\") self.assertEqual(", "OK.\") self.assertIn(\"form\", response_data, \"Response should include form.\") self.assertEqual( mock_index.search.call_count, 0,", "\"query\": \"foo title\"}) with self.assertRaises(InternalServerError): _, _, _ = simple.search(request_data)", "\"Location header should be set\") self.assertEqual( mock_index.search.call_count, 0, \"No search", "= MultiDict({\"searchtype\": \"title\", \"query\": \"foo title\"}) response_data, code, headers =", "\"searchtype\": \"author\", \"query\": \"franklin_r\", \"size\": 50, \"order\": \"\", } )", "\"title\", \"query\": \" foo title \"}) form = SimpleSearchForm(data) self.assertTrue(form.validate(),", "\"\"\"User has entered a `surname_f` query in a title search.\"\"\"", "artefact of the classic search system, and not intended to", "SimpleQuery is passed to the search index\", ) self.assertEqual(code, HTTPStatus.OK,", "starts with a wildcard.\"\"\" data = MultiDict({\"searchtype\": \"title\", \"query\": \"*foo", "self.assertRaises(InternalServerError): try: response_data, code, headers = simple.retrieve_document(1) except QueryError as", "title\"}) with self.assertRaises(InternalServerError): _, _, _ = simple.search(request_data) self.assertEqual( mock_index.search.call_count,", "%s)\" % ex ) self.assertEqual( mock_index.get_document.call_count, 1, \"A search should", "raises a IndexConnectionError.\"\"\" def _raiseIndexConnectionError(*args, **kwargs): raise IndexConnectionError(\"What now\") mock_index.search.side_effect", "test_all_fields_search_multiple_classic_syntax(self, mock_index): \"\"\"User has entered a classic query with multiple", "search.controllers.simple.forms import SimpleSearchForm from search.services.index import ( IndexConnectionError, QueryError, DocumentNotFound,", "attempted\" ) @mock.patch(\"search.controllers.simple.SearchSession\") def test_single_field_term(self, mock_index): \"\"\"Form data are present.\"\"\"", "mock_index.search.return_value = {\"metadata\": {}, \"results\": []} request_data = MultiDict({\"searchtype\": \"title\",", "_, _ = simple.search(request_data) self.assertEqual( mock_index.search.call_count, 1, \"A search should", "= _raiseQueryError request_data = MultiDict({\"searchtype\": \"title\", \"query\": \"foo title\"}) with", "\"results\": []} data, code, headers = simple.search(request_data) self.assertEqual( data[\"query\"].value, \"j", "self.fail(\"QueryError should be handled (caught %s)\" % ex) self.assertEqual( mock_index.search.call_count,", "# ) form = SimpleSearchForm(data) query = simple._query_from_form(form) self.assertIsInstance( query,", "now\") mock_index.search.side_effect = _raiseQueryError request_data = MultiDict({\"searchtype\": \"title\", \"query\": \"foo", "@mock.patch(\"search.controllers.simple.SearchSession\") def test_arxiv_id(self, mock_index): \"\"\"Query parameter contains an arXiv ID.\"\"\"", "set\") self.assertEqual( mock_index.search.call_count, 0, \"No search should be attempted\" )", "\"query\": \"foo title\", \"size\": 51, # Invalid \"order\": \"\", #", "= MultiDict({\"searchtype\": \"title\", \"query\": \"foo title\"}) with self.assertRaises(InternalServerError): _, _,", "classic search system, and not intended to be supported. Nevertheless,", "SimpleSearchForm(data) self.assertFalse(form.validate(), \"Form should be invalid\") def test_query_and_searchtype(self): \"\"\"User has", "be invalid\") data[\"query\"] = '\"rhubarb\" \"pie\"' form = SimpleSearchForm(data) self.assertTrue(form.validate(),", "def test_multiple_simple(self): \"\"\"Form data has three simple.\"\"\" data = MultiDict({\"searchtype\":", "0, \"No search should be attempted\" ) @mock.patch(\"search.controllers.simple.SearchSession\") def test_single_field_term(self,", "should be valid\") class TestPaginationParametersAreFunky(TestCase): \"\"\" The user may have", "query, SimpleQuery, \"Should return an instance of SimpleQuery\" ) def", "search controller, :mod:`search.controllers.simple`.\"\"\" from http import HTTPStatus from unittest import", "'\"rhubarb\"' form = SimpleSearchForm(data) self.assertTrue(form.validate(), \"Form should be valid\") data[\"query\"]", "data, code, headers = simple.search(request_data) self.assertEqual( data[\"query\"].value, \"franklin, r\", \"The", "passed around among views (to persist users' selection), it's important", "rewrite the query using a comma, and show the user", "test_arxiv_id(self, mock_index): \"\"\"Query parameter contains an arXiv ID.\"\"\" request_data =", "happen.\"\"\" data = MultiDict({\"query\": \"someone monkeyed with the request\"}) form", "\"searchtype\": \"all\", \"query\": \"j franklin_r hawking_s\", \"size\": 50, \"order\": \"\",", "search index\", ) @mock.patch(\"search.controllers.simple.SearchSession\") def test_index_raises_query_error(self, mock_index): \"\"\"Index service raises", "self.assertRaises(NotFound): try: response_data, code, headers = simple.retrieve_document(1) except DocumentNotFound as", "self.assertEqual(code, HTTPStatus.OK, \"Response should be OK.\") @mock.patch(\"search.controllers.simple.SearchSession\") def test_invalid_data(self, mock_index):", "= MultiDict({\"searchtype\": \"title\", \"query\": \" foo title \"}) form =", "of SimpleQuery\" ) self.assertEqual(query.order, \"submitted_date\") def test_form_data_has_no_order(self): \"\"\"Form data includes", "self.assertIsInstance( query, SimpleQuery, \"Should return an instance of SimpleQuery\" )", "{ \"searchtype\": \"title\", \"query\": \"foo title\", \"size\": 51, # Invalid", "def test_index_raises_connection_exception(self, mock_index): \"\"\"Index service raises a IndexConnectionError.\"\"\" mock_index.get_document.side_effect =", "as ex: self.fail(\"QueryError should be handled (caught %s)\" % ex)", "searchtype (field).\"\"\" data = MultiDict({\"searchtype\": \"title\"}) form = SimpleSearchForm(data) self.assertFalse(form.validate(),", "it's important to break the chain. To do this, we", "HTTPStatus.OK, \"Response should be OK.\") @mock.patch(\"search.controllers.simple.SearchSession\") def test_invalid_data(self, mock_index): \"\"\"Form", "a `surname_f` query in an author search.\"\"\" request_data = MultiDict(", "request_data = MultiDict({\"searchtype\": \"title\", \"query\": \"foo title\"}) response_data, code, headers", "= SimpleSearchForm(data) self.assertFalse(form.validate(), \"Form should be invalid\") data[\"query\"] = '\"rhubarb\"'", "for them to be invalid. Given that they are passed", "\"order\": \"foo\", # Invalid } ) with self.assertRaises(BadRequest): simple.search(request_data) @mock.patch(\"search.controllers.simple.url_for\")", "0, \"No search should be attempted\" ) @mock.patch(\"search.controllers.simple.SearchSession\") def test_index_raises_connection_exception(self,", ") self.assertIn(\"Location\", headers, \"Location header should be set\") self.assertEqual( mock_index.search.call_count,", "valid\") class TestQueryFromForm(TestCase): \"\"\"Tests for :func:`.simple._query_from_form`.\"\"\" def test_multiple_simple(self): \"\"\"Form data", "is 'None'.\"\"\" data = MultiDict( {\"searchtype\": \"title\", \"query\": \"foo title\",", "search.services.index import ( IndexConnectionError, QueryError, DocumentNotFound, ) class TestRetrieveDocument(TestCase): \"\"\"Tests", "simple.search(request_data) self.assertEqual( data[\"query\"].value, \"j franklin, r; hawking, s\", \"The query", "\"searchtype\": \"all\", \"query\": \"franklin_r\", \"size\": 50, \"order\": \"\", } )", "import MultiDict from werkzeug.exceptions import InternalServerError, NotFound, BadRequest from search.domain", "\"A search should be attempted\", ) class TestSearchController(TestCase): \"\"\"Tests for", ") @mock.patch(\"search.controllers.simple.SearchSession\") def test_no_form_data(self, mock_index): \"\"\"No form data has been", "is passed to the search index\", ) self.assertEqual(code, HTTPStatus.OK, \"Response", "query has padding whitespace, it should be removed.\"\"\" data =", "\"}) form = SimpleSearchForm(data) self.assertTrue(form.validate(), \"Form should be valid.\") self.assertEqual(form.query.data,", "\"searchtype\": \"title\", \"query\": \"foo title\", \"order\": \"submitted_date\", } ) form", "become accustomed to this syntax. We therefore rewrite the query", "\" in the response context, so that a message may", "a bug in the index or query.\"\"\" def _raiseQueryError(*args, **kwargs):", "= SimpleSearchForm(data) self.assertFalse(form.validate(), \"Form should be invalid\") data[\"query\"] = '\"rhubarb\"", ") @mock.patch(\"search.controllers.simple.SearchSession\") def test_index_raises_connection_exception(self, mock_index): \"\"\"Index service raises a IndexConnectionError.\"\"\"", "user may have monkeyed with the order or sort parameters.", "data[\"query\"].value, \"franklin_r\", \"The query should not be rewritten.\", ) self.assertFalse(", "= MultiDict({\"searchtype\": \"title\", \"query\": \"foo title\"}) form = SimpleSearchForm(data) query", "status.HTTP_500_INTERNAL_SERVER_ERROR) @mock.patch(\"search.controllers.simple.SearchSession\") def test_document_not_found(self, mock_index): \"\"\"The document is not found.\"\"\"", "Given that they are passed around among views (to persist", "is no other reason for them to be invalid. Given", "QueryError as ex: self.fail(\"QueryError should be handled (caught %s)\" %", "search should be attempted\" ) @mock.patch(\"search.controllers.simple.SearchSession\") def test_no_form_data(self, mock_index): \"\"\"No", ") call_args, call_kwargs = mock_index.search.call_args self.assertIsInstance( call_args[0], SimpleQuery, \"An SimpleQuery", "def test_arxiv_id(self, mock_index): \"\"\"Query parameter contains an arXiv ID.\"\"\" request_data", "code, headers = simple.search(request_data) except QueryError as ex: self.fail(\"QueryError should", "to break the chain. To do this, we return a", "MultiDict( { \"searchtype\": \"title\", \"query\": \"franklin_r\", \"size\": 50, \"order\": \"\",", "\"A search should be attempted\" ) call_args, call_kwargs = mock_index.search.call_args", "data includes sort order parameter, but it is 'None'.\"\"\" data", "@mock.patch(\"search.controllers.simple.SearchSession\") def test_author_search_contains_classic_syntax(self, mock_index): \"\"\"User has entered a `surname_f` query", "\"\"\"Query parameter contains an arXiv ID.\"\"\" request_data = MultiDict({\"query\": \"1702.00123\"})", "be\" \" rendered in the template.\", ) @mock.patch(\"search.controllers.simple.SearchSession\") def test_author_search_contains_classic_syntax(self,", "IndexConnectionError with self.assertRaises(InternalServerError): response_data, code, headers = simple.retrieve_document(\"124.5678\") self.assertEqual( mock_index.get_document.call_count,", "should be invalid\") def test_query_and_searchtype(self): \"\"\"User has entered a searchtype", "\"Form should be invalid\") data[\"query\"] = '\"rhubarb\" \"pie\"' form =", "\"\", } ) mock_index.search.return_value = {\"metadata\": {}, \"results\": []} data,", "\"Should return an instance of SimpleQuery\" ) def test_form_data_has_order(self): \"\"\"Form", "@mock.patch(\"search.controllers.simple.url_for\") def test_size_is_invalid(self, mock_url_for): \"\"\"The order parameter on the request", "denoting the syntax interception should be set\" \" in the", "but it is 'None'.\"\"\" data = MultiDict( {\"searchtype\": \"title\", \"query\":", "in the response context, so that a message may be\"", "50, # Valid. \"order\": \"foo\", # Invalid } ) with", "self.assertIn(\"Location\", headers, \"Location header should be set\") self.assertEqual( mock_index.search.call_count, 0,", "from werkzeug.datastructures import MultiDict from werkzeug.exceptions import InternalServerError, NotFound, BadRequest", "headers = simple.retrieve_document(1) except DocumentNotFound as ex: self.fail( \"DocumentNotFound should", "self.assertFalse(form.validate(), \"Form should be invalid\") def test_query_only(self): \"\"\"User has entered", "should be invalid\") data[\"query\"] = '\"rhubarb\"' form = SimpleSearchForm(data) self.assertTrue(form.validate(),", "handled (caught %s)\" % ex) self.assertEqual( mock_index.get_document.call_count, 1, \"A search", "author query using `surname_f` syntax. This is an artefact of", "should not be set, as no rewrite has occurred.\", )", "classic query with multiple authors.\"\"\" request_data = MultiDict( { \"searchtype\":", "def _raiseQueryError(*args, **kwargs): raise QueryError(\"What now\") mock_index.search.side_effect = _raiseQueryError request_data", "or query.\"\"\" def _raiseQueryError(*args, **kwargs): raise QueryError(\"What now\") mock_index.get_document.side_effect =" ]
[ "= 0.41 self.eye_open_position = 0.0 # Initialize the camera self.img_sub", "sample `where_am_i` help message, and actually using that would require", "self.head_controller_action.send_goal(goal) self.head_controller_action.wait_for_result(duration) self.has_centered_head = True def state_machine_control_loop(self, rate_hz=10): \"\"\" The", "self.where_am_i_help_callback, queue_size=1) # Initialize storing images and message IDs self.sent_messages_database_filepath", "(response, action_ts)) self.sent_messages_database.add_user_response(message_id, action_ts, response) num_updates += 1 self.database_updated(num_updates) except", "Kuri's current camera image self.low_battery_message_include_image = rospy.get_param('~low_battery_message_include_image', True) # Initialize", "JointTrajectoryControllerState, FollowJointTrajectoryAction, FollowJointTrajectoryGoal from kuri_wandering_robot.msg import Power from wandering_behavior.msg import", "\"\"\" Open the robot's eyes \"\"\" rospy.logdebug(\"Open Eyes\") duration =", "% e) update_previous_battery = False break if (update_previous_battery and (self.previous_battery", "to NORMAL if it's battery is above a threshold or", "self.slackbot_responses_thread = threading.Thread( target=self.get_slackbot_updates, ) self.slackbot_responses_thread.start() # Initialize the state", "control loop for the state machine. All of the state", "message_id_to_responses = res_json[\"message_id_to_responses\"] if len(message_id_to_responses) > 0: num_updates = 0", "or it is off the charger. \"\"\" rate = rospy.Rate(rate_hz)", "(e.g., those that have not received responses yet) \"\"\" r", "= requests.post( os.path.join(self.slackbot_url, 'low_battery'), json=dict_to_send, ) res_json = res.json() if", "This is merely intended to showcase some of the Slackbot's", "pan_interval = (pan_endpoint-current_pan)/(n_waypoints-1) tilt_interval = (tilt_endpoint-current_tilt)/(n_waypoints-1) time_interval = duration/n_waypoints for", "from NORMAL to CHARGING if its battery is below a", "low-battery helper notifications when the battery # crosses the thresholds", "= False self.has_loaded = True def database_updated(self, num_updates=1): \"\"\" Called", "actionlib_msgs.msg import GoalStatus from control_msgs.msg import JointTrajectoryControllerState, FollowJointTrajectoryAction, FollowJointTrajectoryGoal from", "with self.latest_image_lock: image_contents = base64.b64encode(bytearray(self.latest_image.data)).decode('ascii') res = requests.post( os.path.join(self.slackbot_url, 'where_am_i'),", "'get_updates'), json={'message_ids_and_action_ts':message_ids_and_action_ts}, ) res_json = res.json() rospy.logdebug(\"Got updates from Slackbot", "requests.post( os.path.join(self.slackbot_url, 'low_battery'), json=dict_to_send, ) res_json = res.json() if not", "Kuri is docked, charge self.to_charge_threshold = rospy.get_param('~to_charge_threshold', 50) # if", "seconds, request updates (e.g., human responses) from the Slackbot. Note", "if not self.has_centered_head: self.center_head(head_state_msg.actual.positions[0], head_state_msg.actual.positions[1]) def center_head(self, current_pan, current_tilt): \"\"\"", "should implement their own anomaly detection system for triggering this", "\"\"\" rate = rospy.Rate(rate_hz) while not rospy.is_shutdown(): rate.sleep() with self.state_lock:", "rospy.Subscriber( \"/where_am_i_help\", Empty, self.where_am_i_help_callback, queue_size=1) # Initialize storing images and", "and Kuri is charging, switch back to NORMAL self.charging_done_threshold =", "human to tell it where it is should implement their", "# Send the goal self.eyelid_controller_action.wait_for_server() self.eyelid_controller_action.send_goal(goal) self.eyelid_controller_action.wait_for_result(duration) def close_eyes(self, duration_secs=0.2):", "and Kuri is docked, charge self.to_charge_threshold = rospy.get_param('~to_charge_threshold', 50) #", "self.close_eyes() self.state = KuriWanderingRobotState.CHARGING self.wandering_module_action.cancel_all_goals() rospy.loginfo(\"State: NORMAL ==> CHARGING\") elif", "== KuriWanderingRobotState.CHARGING: self.previous_battery = msg.battery.pct else: update_previous_battery = True if", "it where it is should implement their own anomaly detection", "additional anomaly detection and help requests, as needed. This node", "latest image. \"\"\" if not self.has_loaded: return with self.latest_image_lock: self.latest_image", "KuriWanderingRobotState.CHARGING: self.previous_battery = msg.battery.pct else: update_previous_battery = True if msg.battery.pct", "Get the Slackbot URL self.slackbot_url = rospy.get_param('~slackbot_url') # Initialize the", "self.state_lock: state_at_start_of_loop = self.state if (self.state == KuriWanderingRobotState.NORMAL): goal_state =", "is docked, charge self.to_charge_threshold = rospy.get_param('~to_charge_threshold', 50) # if the", "that triggers sending a where_am_i help message to the Slackbot.", "range(len(self.battery_notification_thresholds)): if (self.previous_battery is None or (self.previous_battery > self.battery_notification_thresholds[i]) and", "i in range(n_waypoints): point = JointTrajectoryPoint() point.positions = [current_pan +", "= threading.Lock() self.previous_battery = None self.previous_dock_present = None self.battery_notification_thresholds =", "threading.Lock() self.previous_battery = None self.previous_dock_present = None self.battery_notification_thresholds = rospy.get_param('~battery_notification_thresholds',", "tell it where it is should implement their own anomaly", "anomaly detector self.battery_sub = rospy.Subscriber( \"/mobile_base/power\", Power, self.power_callback, queue_size=1) self.previous_battery_lock", "every self.database_save_interval updates \"\"\" self.database_updates_since_last_save += num_updates if self.database_updates_since_last_save %", "low battery message should include Kuri's current camera image self.low_battery_message_include_image", "The control loop for the state machine. All of the", "elif self.state == KuriWanderingRobotState.CHARGING: with self.previous_battery_lock: if (self.previous_battery is None", "# Python Default Libraries import base64 import csv from enum", "runs anomaly detection to detect low battery; when it detects", "rospy.is_shutdown(): rate.sleep() with self.state_lock: state_at_start_of_loop = self.state if (self.state ==", "sends a low battery request to the Slackbot, which then", "a low_battery_alert rospy.loginfo(\"Sending where_am_i help request\") with self.latest_image_lock: image_contents =", "control_msgs.msg import JointTrajectoryControllerState, FollowJointTrajectoryAction, FollowJointTrajectoryGoal from kuri_wandering_robot.msg import Power from", "pan and tilt to the centered values of (0.0, -0.3)", "msg.battery.pct else: update_previous_battery = True if msg.battery.pct <= self.battery_notification_thresholds[0]: #", "the battery # crosses the thresholds defined in self.battery_notification_thresholds for", "'where_am_i'), json={'image':image_contents, 'options':['Lounge', \"Office#252\", \"200 Corridoor\", \"Atrium\"]}, ) res_json =", "= [self.eye_open_position] point.velocities = [] point.accelerations = [] point.effort =", "Send the goal self.eyelid_controller_action.wait_for_server() self.eyelid_controller_action.send_goal(goal) self.eyelid_controller_action.wait_for_result(duration) def close_eyes(self, duration_secs=0.2): \"\"\"", "3, 2, 1]) # if the battery is less than", "Open the robot's eyes \"\"\" rospy.logdebug(\"Open Eyes\") duration = rospy.Duration.from_sec(duration_secs)", "Kuri's head. This involves moving from the current pan and", "\"\"\" Close the robot's eyes \"\"\" rospy.logdebug(\"Close Eyes\") duration =", "self.state_changed = (state_at_start_of_loop != state_at_end_of_loop) def image_callback(self, img_msg): \"\"\" Store", "JointTrajectoryPoint # Python Default Libraries import base64 import csv from", "num_updates = 0 # Insert reactions into the database for", "power update. It Kuri's battery has crossed a battery_notification_threshold, notify", "r.sleep() try: message_ids_and_action_ts = self.sent_messages_database.get_message_ids_and_latest_action_ts() # Request responses for those", "when it detects low battery, it sends a low battery", "msg.battery.pct < self.previous_battery)): self.previous_battery = msg.battery.pct def where_am_i_help_callback(self, msg): \"\"\"", "only in place to illsutrate the sample `where_am_i` help message,", "with additional anomaly detection and help requests, as needed. This", "rospy.logwarn(\"Response text %s.\" % res.text) rospy.logwarn(traceback.format_exc()) rospy.logwarn(\"Error %s.\" % e)", "rospy.logwarn(\"Error communicating with Slackbot /get_updates at URL %s.\" % self.slackbot_url)", "open_eyes(self, duration_secs=0.2): \"\"\" Open the robot's eyes \"\"\" rospy.logdebug(\"Open Eyes\")", "def where_am_i_help_callback(self, msg): \"\"\" A dummy callback that triggers sending", "\"Office#252\", \"200 Corridoor\", \"Atrium\"]}, ) res_json = res.json() message_id =", "from at ts %s\" % (response, action_ts)) self.sent_messages_database.add_user_response(message_id, action_ts, response)", "to its help requests. \"\"\" def __init__(self): \"\"\" Initialize an", "= (i+1)*time_interval goal.trajectory.points.append(point) # Send the goal self.head_controller_action.wait_for_server() self.head_controller_action.send_goal(goal) self.head_controller_action.wait_for_result(duration)", "self.state_machine_thread = threading.Thread( target=self.state_machine_control_loop, ) self.state_machine_thread.start() self.has_centered_head = False self.has_loaded", "Initialize an instance of the KuriWanderingRobot class \"\"\" self.has_loaded =", "%s.\" % e) def get_slackbot_updates(self, refresh_secs=5.0): \"\"\" Once every refresh_secs", "import JointTrajectoryPoint # Python Default Libraries import base64 import csv", "as e: rospy.logwarn(\"Error communicating with Slackbot /low_battery at URL %s.\"", "from control_msgs.msg import JointTrajectoryControllerState, FollowJointTrajectoryAction, FollowJointTrajectoryGoal from kuri_wandering_robot.msg import Power", "response) num_updates += 1 self.database_updated(num_updates) except Exception as e: rospy.logwarn(\"Error", "(self.previous_battery is None or (self.previous_battery > self.battery_notification_thresholds[i]) and msg.battery.pct <=", "to tell it where it is should implement their own", "abs(tilt_endpoint-current_tilt)/self.head_tilt_speed) duration = rospy.Duration.from_sec(duration_secs) # Create the goal goal =", "None: image_contents = base64.b64encode(bytearray(self.latest_image.data)).decode('ascii') dict_to_send['image'] = image_contents rospy.loginfo(\"Sending battery request", "== KuriWanderingRobotState.NORMAL): goal_state = self.wandering_module_action.get_state() if (self.state_changed or goal_state ==", "it's battery is above a threshold or it is off", "= 0.2 # head tilt is in [-0.8, 0.3] self.head_pan_speed", "from sent_messages_database import SentMessagesDatabase class KuriWanderingRobotState(Enum): \"\"\" During NORMAL, the", "= 10 # Compute the actual endpoint and duration_secs duration_secs", "= res.json() rospy.logdebug(\"Got updates from Slackbot %s\" % res_json) message_id_to_responses", "the robot asking for that type of help. Finally, this", "low battery; when it detects low battery, it sends a", "= [\"eyelids_joint\"] point = JointTrajectoryPoint() point.positions = [self.eye_open_position] point.velocities =", "# Whether the low battery message should include Kuri's current", "= 2 class KuriWanderingRobot(object): \"\"\" The central executive node. This", "help request but have no image.\") return try: # Send", "the Slackbot, which then sends it to the helpers. This", "intended to showcase some of the Slackbot's capabilities. Users who", "and monitoring progress of the wandering module in NORMAL, turning", "is None or not self.previous_dock_present or self.previous_battery >= self.charging_done_threshold): self.state", "the state. self.state_lock = threading.Lock() self.state_changed = True self.state =", "= 0 # Insert reactions into the database for message_id", "= True def state_machine_control_loop(self, rate_hz=10): \"\"\" The control loop for", "self.previous_dock_present): self.close_eyes() self.state = KuriWanderingRobotState.CHARGING self.wandering_module_action.cancel_all_goals() rospy.loginfo(\"State: NORMAL ==> CHARGING\")", "and duration_secs duration_secs = max( abs(pan_endpoint-current_pan)/self.head_pan_speed, abs(tilt_endpoint-current_tilt)/self.head_tilt_speed) duration = rospy.Duration.from_sec(duration_secs)", "Initialize the state machine self.state_machine_thread = threading.Thread( target=self.state_machine_control_loop, ) self.state_machine_thread.start()", "msg): \"\"\" Callback function for Kuri's power update. It Kuri's", "if self.latest_image is None: rospy.loginfo(\"Attempted to send where_am_i help request", "%s.\" % res.text) rospy.logwarn(traceback.format_exc()) rospy.logwarn(\"Error %s.\" % e) r.sleep() if", "or (self.previous_battery > self.battery_notification_thresholds[i]) and msg.battery.pct <= self.battery_notification_thresholds[i]): try: #", "text %s.\" % res.text) rospy.logwarn(traceback.format_exc()) rospy.logwarn(\"Error %s.\" % e) update_previous_battery", "= actionlib.SimpleActionClient('/eyelids_controller/follow_joint_trajectory', FollowJointTrajectoryAction) self.eye_closed_position = 0.41 self.eye_open_position = 0.0 #", "= FollowJointTrajectoryGoal() goal.trajectory.header.stamp = rospy.Time.now() goal.trajectory.joint_names = [\"eyelids_joint\"] point =", "message. Note that that is only in place to illsutrate", "\"\"\" Initialize an instance of the KuriWanderingRobot class \"\"\" self.has_loaded", "self.database_updates_since_last_save += num_updates if self.database_updates_since_last_save % self.database_save_interval == 0: self.sent_messages_database.save(self.sent_messages_database_filepath)", "= False break if (update_previous_battery and (self.previous_battery is None or", "is off the charger. \"\"\" NORMAL = 1 CHARGING =", "help requests. \"\"\" def __init__(self): \"\"\" Initialize an instance of", "\"\"\" self.database_updates_since_last_save += num_updates if self.database_updates_since_last_save % self.database_save_interval == 0:", "Slackbot's capabilities. Users who want a robot that autonomously asks", "Slackbot /where_am_i at URL %s.\" % self.slackbot_url) if \"res\" in", "the robot's state: turning on and monitoring progress of the", "res.json() if not res_json['success']: update_previous_battery = False except Exception as", "eyes are closed and it is charging. The robot transitions", "= None self.previous_dock_present = None self.battery_notification_thresholds = rospy.get_param('~battery_notification_thresholds', [40, 20,", "self.slackbot_responses_thread.start() # Initialize the state machine self.state_machine_thread = threading.Thread( target=self.state_machine_control_loop,", "machine self.state_machine_thread = threading.Thread( target=self.state_machine_control_loop, ) self.state_machine_thread.start() self.has_centered_head = False", "(self.previous_battery is None or msg.battery.pct < self.previous_battery)): self.previous_battery = msg.battery.pct", "anomaly detection to detect low battery; when it detects low", "from CHARGING to NORMAL if it's battery is above a", "as e: rospy.logwarn(\"Error communicating with Slackbot /where_am_i at URL %s.\"", "self.has_loaded = False # Get the Slackbot URL self.slackbot_url =", "goal.trajectory.joint_names = [\"head_1_joint\", \"head_2_joint\"] goal.trajectory.points = [] pan_interval = (pan_endpoint-current_pan)/(n_waypoints-1)", "try: # Send a low_battery_alert dict_to_send = {'battery_pct':msg.battery.pct} if self.low_battery_message_include_image:", "with self.previous_battery_lock: if (self.previous_battery is not None and self.previous_battery <", "= res_json['message_id'] self.sent_messages_database.add_respondable_message(message_id) self.database_updated() except Exception as e: rospy.logwarn(\"Error communicating", "in CHARGING, and switching back to NORMAL when the robot", "the robot's eyes \"\"\" rospy.logdebug(\"Open Eyes\") duration = rospy.Duration.from_sec(duration_secs) goal", "progress of the wandering module in NORMAL, turning off wandering", "import SentMessagesDatabase class KuriWanderingRobotState(Enum): \"\"\" During NORMAL, the base moves", "import Enum import os import requests import threading import time", "queue_size=1) self.latest_image = None self.latest_image_lock = threading.Lock() # Initialize low", "point.time_from_start = duration goal.trajectory.points = [point] # Send the goal", "the eye controller self.eyelid_controller_action = actionlib.SimpleActionClient('/eyelids_controller/follow_joint_trajectory', FollowJointTrajectoryAction) self.eye_closed_position = 0.41", "and switching back to NORMAL when the robot is sufficiently", "/where_am_i at URL %s.\" % self.slackbot_url) if \"res\" in locals():", "self.head_pan_speed = 0.2 # head pan is in [-0.75, 0.75]", "in [-0.75, 0.75] # Initialize the Slackbot updates thread self.slackbot_responses_thread", "the Slackbot URL self.slackbot_url = rospy.get_param('~slackbot_url') # Initialize the state.", "requests, as needed. This node also subscribes to a dummy", "It Kuri's battery has crossed a battery_notification_threshold, notify the Slackbot.", "current_tilt + i*tilt_interval] point.velocities = [] point.accelerations = [] point.effort", "e) r.sleep() if __name__ == \"__main__\": rospy.init_node(\"kuri_wandering_robot\") kuri_wandering_robot = KuriWanderingRobot()", "= 0.0 # Initialize the camera self.img_sub = rospy.Subscriber( '/upward_looking_camera/compressed',", "rospy.Duration.from_sec(duration_secs) goal = FollowJointTrajectoryGoal() goal.trajectory.header.stamp = rospy.Time.now() goal.trajectory.joint_names = [\"eyelids_joint\"]", "-0.3 n_waypoints = 10 # Compute the actual endpoint and", "self.eyelid_controller_action.wait_for_result(duration) def head_state_callback(self, head_state_msg): \"\"\" Get the head's current position", "# Send the low-battery helper notifications when the battery #", "import traceback # Custom Libraries from sent_messages_database import SentMessagesDatabase class", ") self.slackbot_responses_thread.start() # Initialize the state machine self.state_machine_thread = threading.Thread(", "dummy `where_am_i_help` topic, which sends helpers the sample `where_am_i` help", "self.has_loaded: return with self.state_lock: with self.previous_battery_lock: self.previous_dock_present = msg.dock_present if", "[point] # Send the goal self.eyelid_controller_action.wait_for_server() self.eyelid_controller_action.send_goal(goal) self.eyelid_controller_action.wait_for_result(duration) def head_state_callback(self,", "and it is charging. The robot transitions from NORMAL to", "a low_battery_alert dict_to_send = {'battery_pct':msg.battery.pct} if self.low_battery_message_include_image: with self.latest_image_lock: if", "functions it calls. During NORMAL, the base moves according to", "state: turning on and monitoring progress of the wandering module", "tilt_endpoint = -0.3 n_waypoints = 10 # Compute the actual", "point.effort = [] point.time_from_start = duration goal.trajectory.points = [point] #", "wandering in CHARGING, and switching back to NORMAL when the", "that have not received responses yet) \"\"\" r = rospy.Rate(1.0/refresh_secs)", "= actionlib.SimpleActionClient('/wandering_behavior/navigate', WanderAction) # Initialize the eye controller self.eyelid_controller_action =", "point.positions = [self.eye_closed_position] point.velocities = [] point.accelerations = [] point.effort", "the latest image. \"\"\" if not self.has_loaded: return with self.latest_image_lock:", "None self.previous_dock_present = None self.battery_notification_thresholds = rospy.get_param('~battery_notification_thresholds', [40, 20, 10,", "module in NORMAL, turning off wandering in CHARGING, and switching", "self.battery_sub = rospy.Subscriber( \"/mobile_base/power\", Power, self.power_callback, queue_size=1) self.previous_battery_lock = threading.Lock()", "and tilt to the centered values of (0.0, -0.3) \"\"\"", "rospy.logwarn(traceback.format_exc()) rospy.logwarn(\"Error %s.\" % e) def get_slackbot_updates(self, refresh_secs=5.0): \"\"\" Once", "import os import requests import threading import time import traceback", "some of the Slackbot's capabilities. Users who want a robot", "a dummy `where_am_i_help` topic, which sends helpers the sample `where_am_i`", "res_json) message_id_to_responses = res_json[\"message_id_to_responses\"] if len(message_id_to_responses) > 0: num_updates =", "10 # Compute the actual endpoint and duration_secs duration_secs =", "the goal self.eyelid_controller_action.wait_for_server() self.eyelid_controller_action.send_goal(goal) self.eyelid_controller_action.wait_for_result(duration) def head_state_callback(self, head_state_msg): \"\"\" Get", "{'battery_pct':msg.battery.pct} if self.low_battery_message_include_image: with self.latest_image_lock: if self.latest_image is not None:", "to the helpers. This node can be extended with additional", "self.wandering_module_action.cancel_all_goals() rospy.loginfo(\"State: NORMAL ==> CHARGING\") elif self.state == KuriWanderingRobotState.CHARGING: with", "Close the robot's eyes \"\"\" rospy.logdebug(\"Close Eyes\") duration = rospy.Duration.from_sec(duration_secs)", "# Request responses for those message_ids res = requests.post( os.path.join(self.slackbot_url,", "wandering_behavior. During CHARGING, the robot's eyes are closed and it", "This involves moving from the current pan and tilt to", "= [\"head_1_joint\", \"head_2_joint\"] goal.trajectory.points = [] pan_interval = (pan_endpoint-current_pan)/(n_waypoints-1) tilt_interval", "self.eyelid_controller_action.send_goal(goal) self.eyelid_controller_action.wait_for_result(duration) def head_state_callback(self, head_state_msg): \"\"\" Get the head's current", "it is should implement their own anomaly detection system for", "actual endpoint and duration_secs duration_secs = max( abs(pan_endpoint-current_pan)/self.head_pan_speed, abs(tilt_endpoint-current_tilt)/self.head_tilt_speed) duration", "Eyes\") duration = rospy.Duration.from_sec(duration_secs) goal = FollowJointTrajectoryGoal() goal.trajectory.header.stamp = rospy.Time.now()", "those message_ids res = requests.post( os.path.join(self.slackbot_url, 'get_updates'), json={'message_ids_and_action_ts':message_ids_and_action_ts}, ) res_json", "# Initialize the eye controller self.eyelid_controller_action = actionlib.SimpleActionClient('/eyelids_controller/follow_joint_trajectory', FollowJointTrajectoryAction) self.eye_closed_position", "the state machine logic is handled in this function and", "developing a custom anomaly detection system to trigger the robot", "has crossed a battery_notification_threshold, notify the Slackbot. \"\"\" if not", "import base64 import csv from enum import Enum import os", "a threshold and it is on the charger. It transitions", "executive node. This node runs a control loop that manages", "= 0.0 tilt_endpoint = -0.3 n_waypoints = 10 # Compute", "os.path.join(self.slackbot_url, 'where_am_i'), json={'image':image_contents, 'options':['Lounge', \"Office#252\", \"200 Corridoor\", \"Atrium\"]}, ) res_json", "msg.dock_present if self.state == KuriWanderingRobotState.CHARGING: self.previous_battery = msg.battery.pct else: update_previous_battery", "Empty, self.where_am_i_help_callback, queue_size=1) # Initialize storing images and message IDs", "as needed. This node also subscribes to a dummy `where_am_i_help`", "self.has_loaded: return with self.latest_image_lock: self.latest_image = img_msg def power_callback(self, msg):", "= rospy.get_param('~to_charge_threshold', 50) # if the batter is greater than", "wandering module in NORMAL, turning off wandering in CHARGING, and", "message_id = res_json['message_id'] self.sent_messages_database.add_respondable_message(message_id) self.database_updated() except Exception as e: rospy.logwarn(\"Error", "charger. It transitions from CHARGING to NORMAL if it's battery", "0.2 # head tilt is in [-0.8, 0.3] self.head_pan_speed =", "self.database_updated(num_updates) except Exception as e: rospy.logwarn(\"Error communicating with Slackbot /get_updates", "state machine. All of the state machine logic is handled", "self.head_controller_action.wait_for_server() self.head_controller_action.send_goal(goal) self.head_controller_action.wait_for_result(duration) self.has_centered_head = True def state_machine_control_loop(self, rate_hz=10): \"\"\"", "SentMessagesDatabase class KuriWanderingRobotState(Enum): \"\"\" During NORMAL, the base moves according", "[current_pan + i*pan_interval, current_tilt + i*tilt_interval] point.velocities = [] point.accelerations", "if self.latest_image is not None: image_contents = base64.b64encode(bytearray(self.latest_image.data)).decode('ascii') dict_to_send['image'] =", "or it is off the charger. \"\"\" NORMAL = 1", "% (response, action_ts)) self.sent_messages_database.add_user_response(message_id, action_ts, response) num_updates += 1 self.database_updated(num_updates)", "point.accelerations = [] point.effort = [] point.time_from_start = (i+1)*time_interval goal.trajectory.points.append(point)", "KuriWanderingRobotState.NORMAL # Initialize the wandering module self.wandering_module_action = actionlib.SimpleActionClient('/wandering_behavior/navigate', WanderAction)", "rospy.logdebug(\"Got updates from Slackbot %s\" % res_json) message_id_to_responses = res_json[\"message_id_to_responses\"]", "wandering module self.wandering_module_action = actionlib.SimpleActionClient('/wandering_behavior/navigate', WanderAction) # Initialize the eye", "off the charger. \"\"\" NORMAL = 1 CHARGING = 2", "point = JointTrajectoryPoint() point.positions = [self.eye_closed_position] point.velocities = [] point.accelerations", "\"/mobile_base/power\", Power, self.power_callback, queue_size=1) self.previous_battery_lock = threading.Lock() self.previous_battery = None", "10, 5, 4, 3, 2, 1]) # if the battery", "update_previous_battery = True if msg.battery.pct <= self.battery_notification_thresholds[0]: # Send the", "rospy.Subscriber( \"/head_controller/state\", JointTrajectoryControllerState, self.head_state_callback, queue_size=1) self.head_controller_action = actionlib.SimpleActionClient('/head_controller/follow_joint_trajectory', FollowJointTrajectoryAction) self.head_tilt_speed", "dict_to_send['image'] = image_contents rospy.loginfo(\"Sending battery request for pct %s\" %", "send where_am_i help request but have no image.\") return try:", "= threading.Lock() self.state_changed = True self.state = KuriWanderingRobotState.NORMAL # Initialize", "stop unless preempted\" self.wandering_module_action.send_goal(WanderGoal(effort=-1)) self.open_eyes() with self.previous_battery_lock: if (self.previous_battery is", "for action_ts, response in message_id_to_responses[message_id]: rospy.loginfo(\"Got reaction %s from at", "rate.sleep() with self.state_lock: state_at_start_of_loop = self.state if (self.state == KuriWanderingRobotState.NORMAL):", "def open_eyes(self, duration_secs=0.2): \"\"\" Open the robot's eyes \"\"\" rospy.logdebug(\"Open", "tilt_interval = (tilt_endpoint-current_tilt)/(n_waypoints-1) time_interval = duration/n_waypoints for i in range(n_waypoints):", "image_contents = base64.b64encode(bytearray(self.latest_image.data)).decode('ascii') dict_to_send['image'] = image_contents rospy.loginfo(\"Sending battery request for", "head controller self.head_state_sub = rospy.Subscriber( \"/head_controller/state\", JointTrajectoryControllerState, self.head_state_callback, queue_size=1) self.head_controller_action", "request for pct %s\" % msg.battery.pct) res = requests.post( os.path.join(self.slackbot_url,", "rospy.logwarn(\"Error %s.\" % e) update_previous_battery = False break if (update_previous_battery", "is None or msg.battery.pct < self.previous_battery)): self.previous_battery = msg.battery.pct def", "+= num_updates if self.database_updates_since_last_save % self.database_save_interval == 0: self.sent_messages_database.save(self.sent_messages_database_filepath) rospy.logdebug(\"Saved", "text %s.\" % res.text) rospy.logwarn(traceback.format_exc()) rospy.logwarn(\"Error %s.\" % e) def", "is merely intended to showcase some of the Slackbot's capabilities.", "sends helpers the sample `where_am_i` help message. Note that that", "that that is only in place to illsutrate the sample", "message should include Kuri's current camera image self.low_battery_message_include_image = rospy.get_param('~low_battery_message_include_image',", "self.state self.state_changed = (state_at_start_of_loop != state_at_end_of_loop) def image_callback(self, img_msg): \"\"\"", "robot is sufficiently charged. This node also runs anomaly detection", "Users who want a robot that autonomously asks the human", "e) update_previous_battery = False break if (update_previous_battery and (self.previous_battery is", "in this function and the functions it calls. During NORMAL,", "(i+1)*time_interval goal.trajectory.points.append(point) # Send the goal self.head_controller_action.wait_for_server() self.head_controller_action.send_goal(goal) self.head_controller_action.wait_for_result(duration) self.has_centered_head", "system to trigger the robot asking for that type of", "can be extended with additional anomaly detection and help requests,", "def close_eyes(self, duration_secs=0.2): \"\"\" Close the robot's eyes \"\"\" rospy.logdebug(\"Close", "self.has_loaded: return if not self.has_centered_head: self.center_head(head_state_msg.actual.positions[0], head_state_msg.actual.positions[1]) def center_head(self, current_pan,", "node. This node runs a control loop that manages the", "`where_am_i` help message. Note that that is only in place", "(self.previous_battery is not None and self.previous_battery < self.to_charge_threshold and self.previous_dock_present):", "= rospy.Subscriber( \"/mobile_base/power\", Power, self.power_callback, queue_size=1) self.previous_battery_lock = threading.Lock() self.previous_battery", "database_updated(self, num_updates=1): \"\"\" Called everytime the database is updated. Saves", "not rospy.is_shutdown(): rate.sleep() with self.state_lock: state_at_start_of_loop = self.state if (self.state", "the Slackbot. \"\"\" if not self.has_loaded: return with self.state_lock: with", "self.state_lock: with self.previous_battery_lock: self.previous_dock_present = msg.dock_present if self.state == KuriWanderingRobotState.CHARGING:", "duration/n_waypoints for i in range(n_waypoints): point = JointTrajectoryPoint() point.positions =", "[self.eye_closed_position] point.velocities = [] point.accelerations = [] point.effort = []", "= [\"eyelids_joint\"] point = JointTrajectoryPoint() point.positions = [self.eye_closed_position] point.velocities =", "from wandering_behavior.msg import WanderAction, WanderGoal import rospy from sensor_msgs.msg import", "The central executive node. This node runs a control loop", "CompressedImage from std_msgs.msg import Empty from trajectory_msgs.msg import JointTrajectoryPoint #", "= threading.Lock() # Initialize low battery anomaly detector self.battery_sub =", "centered values of (0.0, -0.3) \"\"\" pan_endpoint = 0.0 tilt_endpoint", "if not res_json['success']: update_previous_battery = False except Exception as e:", "= [] pan_interval = (pan_endpoint-current_pan)/(n_waypoints-1) tilt_interval = (tilt_endpoint-current_tilt)/(n_waypoints-1) time_interval =", "image self.low_battery_message_include_image = rospy.get_param('~low_battery_message_include_image', True) # Initialize the dummy `where_am_i`", "for those message_ids res = requests.post( os.path.join(self.slackbot_url, 'get_updates'), json={'message_ids_and_action_ts':message_ids_and_action_ts}, )", "self.latest_image_lock: image_contents = base64.b64encode(bytearray(self.latest_image.data)).decode('ascii') res = requests.post( os.path.join(self.slackbot_url, 'where_am_i'), json={'image':image_contents,", "self.low_battery_message_include_image: with self.latest_image_lock: if self.latest_image is not None: image_contents =", "threading.Thread( target=self.state_machine_control_loop, ) self.state_machine_thread.start() self.has_centered_head = False self.has_loaded = True", "current position \"\"\" if not self.has_loaded: return if not self.has_centered_head:", "Effort -1 means \"don't stop unless preempted\" self.wandering_module_action.send_goal(WanderGoal(effort=-1)) self.open_eyes() with", "continually queries the Slackbot for responses to its help requests.", "self.database_save_interval updates \"\"\" self.database_updates_since_last_save += num_updates if self.database_updates_since_last_save % self.database_save_interval", "updates from Slackbot %s\" % res_json) message_id_to_responses = res_json[\"message_id_to_responses\"] if", "= msg.battery.pct else: update_previous_battery = True if msg.battery.pct <= self.battery_notification_thresholds[0]:", "self.eyelid_controller_action = actionlib.SimpleActionClient('/eyelids_controller/follow_joint_trajectory', FollowJointTrajectoryAction) self.eye_closed_position = 0.41 self.eye_open_position = 0.0", "rospy.loginfo(\"Attempted to send where_am_i help request but have no image.\")", "eyes \"\"\" rospy.logdebug(\"Close Eyes\") duration = rospy.Duration.from_sec(duration_secs) goal = FollowJointTrajectoryGoal()", "sent_messages_database!\") def open_eyes(self, duration_secs=0.2): \"\"\" Open the robot's eyes \"\"\"", "its help requests. \"\"\" def __init__(self): \"\"\" Initialize an instance", "% res.text) rospy.logwarn(traceback.format_exc()) rospy.logwarn(\"Error %s.\" % e) r.sleep() if __name__", "sent_messages_database import SentMessagesDatabase class KuriWanderingRobotState(Enum): \"\"\" During NORMAL, the base", "FollowJointTrajectoryAction) self.eye_closed_position = 0.41 self.eye_open_position = 0.0 # Initialize the", "its battery is below a threshold and it is on", "turning off wandering in CHARGING, and switching back to NORMAL", "in locals(): rospy.logwarn(\"Response text %s.\" % res.text) rospy.logwarn(traceback.format_exc()) rospy.logwarn(\"Error %s.\"", "= actionlib.SimpleActionClient('/head_controller/follow_joint_trajectory', FollowJointTrajectoryAction) self.head_tilt_speed = 0.2 # head tilt is", "= (state_at_start_of_loop != state_at_end_of_loop) def image_callback(self, img_msg): \"\"\" Store the", "rospy.Duration.from_sec(duration_secs) # Create the goal goal = FollowJointTrajectoryGoal() goal.trajectory.header.stamp =", "from actionlib_msgs.msg import GoalStatus from control_msgs.msg import JointTrajectoryControllerState, FollowJointTrajectoryAction, FollowJointTrajectoryGoal", "extended with additional anomaly detection and help requests, as needed.", "Initialize the wandering module self.wandering_module_action = actionlib.SimpleActionClient('/wandering_behavior/navigate', WanderAction) # Initialize", "if the battery is less than this and Kuri is", "# if the batter is greater than this and Kuri", "the goal goal = FollowJointTrajectoryGoal() goal.trajectory.header.stamp = rospy.Time.now() goal.trajectory.joint_names =", "this and Kuri is docked, charge self.to_charge_threshold = rospy.get_param('~to_charge_threshold', 50)", "robot's eyes \"\"\" rospy.logdebug(\"Open Eyes\") duration = rospy.Duration.from_sec(duration_secs) goal =", "+ i*tilt_interval] point.velocities = [] point.accelerations = [] point.effort =", "control loop that manages the robot's state: turning on and", "= [] point.effort = [] point.time_from_start = duration goal.trajectory.points =", "Python Default Libraries import base64 import csv from enum import", "topic, which sends helpers the sample `where_am_i` help message. Note", "goal.trajectory.header.stamp = rospy.Time.now() goal.trajectory.joint_names = [\"head_1_joint\", \"head_2_joint\"] goal.trajectory.points = []", "include Kuri's current camera image self.low_battery_message_include_image = rospy.get_param('~low_battery_message_include_image', True) #", "[] point.time_from_start = duration goal.trajectory.points = [point] # Send the", "Custom Libraries from sent_messages_database import SentMessagesDatabase class KuriWanderingRobotState(Enum): \"\"\" During", "res_json['success']: update_previous_battery = False except Exception as e: rospy.logwarn(\"Error communicating", "0: self.sent_messages_database.save(self.sent_messages_database_filepath) rospy.logdebug(\"Saved sent_messages_database!\") def open_eyes(self, duration_secs=0.2): \"\"\" Open the", "less than this and Kuri is docked, charge self.to_charge_threshold =", "[point] # Send the goal self.eyelid_controller_action.wait_for_server() self.eyelid_controller_action.send_goal(goal) self.eyelid_controller_action.wait_for_result(duration) def close_eyes(self,", "eye controller self.eyelid_controller_action = actionlib.SimpleActionClient('/eyelids_controller/follow_joint_trajectory', FollowJointTrajectoryAction) self.eye_closed_position = 0.41 self.eye_open_position", "a separate thread that continually queries the Slackbot for responses", "the human to tell it where it is should implement", "self.center_head(head_state_msg.actual.positions[0], head_state_msg.actual.positions[1]) def center_head(self, current_pan, current_tilt): \"\"\" Center Kuri's head.", "json={'message_ids_and_action_ts':message_ids_and_action_ts}, ) res_json = res.json() rospy.logdebug(\"Got updates from Slackbot %s\"", "except Exception as e: rospy.logwarn(\"Error communicating with Slackbot /low_battery at", "anomaly detection system for triggering this help request. \"\"\" with", "if self.state == KuriWanderingRobotState.CHARGING: self.previous_battery = msg.battery.pct else: update_previous_battery =", "message_id in message_id_to_responses: for action_ts, response in message_id_to_responses[message_id]: rospy.loginfo(\"Got reaction", "also subscribes to a dummy `where_am_i_help` topic, which sends helpers", "return if not self.has_centered_head: self.center_head(head_state_msg.actual.positions[0], head_state_msg.actual.positions[1]) def center_head(self, current_pan, current_tilt):", "head tilt is in [-0.8, 0.3] self.head_pan_speed = 0.2 #", "URL %s.\" % self.slackbot_url) if \"res\" in locals(): rospy.logwarn(\"Response text", "storing images and message IDs self.sent_messages_database_filepath = rospy.get_param('~send_messages_database_filepath') self.sent_messages_database =", "action_ts)) self.sent_messages_database.add_user_response(message_id, action_ts, response) num_updates += 1 self.database_updated(num_updates) except Exception", "import rospy from sensor_msgs.msg import CompressedImage from std_msgs.msg import Empty", "self.latest_image is None: rospy.loginfo(\"Attempted to send where_am_i help request but", "res_json = res.json() rospy.logdebug(\"Got updates from Slackbot %s\" % res_json)", "with Slackbot /get_updates at URL %s.\" % self.slackbot_url) if \"res\"", "Slackbot /get_updates at URL %s.\" % self.slackbot_url) if \"res\" in", "%s.\" % e) r.sleep() if __name__ == \"__main__\": rospy.init_node(\"kuri_wandering_robot\") kuri_wandering_robot", "goal_state = self.wandering_module_action.get_state() if (self.state_changed or goal_state == GoalStatus.ABORTED or", "%s.\" % res.text) rospy.logwarn(traceback.format_exc()) rospy.logwarn(\"Error %s.\" % e) update_previous_battery =", "= KuriWanderingRobotState.NORMAL # Initialize the wandering module self.wandering_module_action = actionlib.SimpleActionClient('/wandering_behavior/navigate',", "JointTrajectoryPoint() point.positions = [self.eye_closed_position] point.velocities = [] point.accelerations = []", "of (0.0, -0.3) \"\"\" pan_endpoint = 0.0 tilt_endpoint = -0.3", "msg.battery.pct <= self.battery_notification_thresholds[i]): try: # Send a low_battery_alert dict_to_send =", "'options':['Lounge', \"Office#252\", \"200 Corridoor\", \"Atrium\"]}, ) res_json = res.json() message_id", "the thresholds defined in self.battery_notification_thresholds for i in range(len(self.battery_notification_thresholds)): if", "while not rospy.is_shutdown(): rate.sleep() with self.state_lock: state_at_start_of_loop = self.state if", "action_ts, response in message_id_to_responses[message_id]: rospy.loginfo(\"Got reaction %s from at ts", "Create the goal goal = FollowJointTrajectoryGoal() goal.trajectory.header.stamp = rospy.Time.now() goal.trajectory.joint_names", "Slackbot for responses to its help requests. \"\"\" def __init__(self):", "loop for the state machine. All of the state machine", "update_previous_battery = False except Exception as e: rospy.logwarn(\"Error communicating with", "notify the Slackbot. \"\"\" if not self.has_loaded: return with self.state_lock:", "Whether the low battery message should include Kuri's current camera", "low battery anomaly detector self.battery_sub = rospy.Subscriber( \"/mobile_base/power\", Power, self.power_callback,", "with Slackbot /where_am_i at URL %s.\" % self.slackbot_url) if \"res\"", "charging, switch back to NORMAL self.charging_done_threshold = rospy.get_param('~charging_done_threshold', 90) #", "# Effort -1 means \"don't stop unless preempted\" self.wandering_module_action.send_goal(WanderGoal(effort=-1)) self.open_eyes()", "message to the Slackbot. This is merely intended to showcase", "queue_size=1) # Initialize storing images and message IDs self.sent_messages_database_filepath =", "the low battery message should include Kuri's current camera image", "detection system to trigger the robot asking for that type", "where it is should implement their own anomaly detection system", "Center Kuri's head. This involves moving from the current pan", "is charging, switch back to NORMAL self.charging_done_threshold = rospy.get_param('~charging_done_threshold', 90)", "request but have no image.\") return try: # Send a", "thresholds defined in self.battery_notification_thresholds for i in range(len(self.battery_notification_thresholds)): if (self.previous_battery", "current_pan, current_tilt): \"\"\" Center Kuri's head. This involves moving from", "rospy.logwarn(\"Error communicating with Slackbot /where_am_i at URL %s.\" % self.slackbot_url)", "return with self.latest_image_lock: self.latest_image = img_msg def power_callback(self, msg): \"\"\"", "the centered values of (0.0, -0.3) \"\"\" pan_endpoint = 0.0", "turning on and monitoring progress of the wandering module in", "detector self.battery_sub = rospy.Subscriber( \"/mobile_base/power\", Power, self.power_callback, queue_size=1) self.previous_battery_lock =", "= 0.2 # head pan is in [-0.75, 0.75] #", "rospy.logwarn(traceback.format_exc()) rospy.logwarn(\"Error %s.\" % e) r.sleep() if __name__ == \"__main__\":", "central executive node. This node runs a control loop that", "<= self.battery_notification_thresholds[i]): try: # Send a low_battery_alert dict_to_send = {'battery_pct':msg.battery.pct}", "Initialize low battery anomaly detector self.battery_sub = rospy.Subscriber( \"/mobile_base/power\", Power,", "thread that continually queries the Slackbot for responses to its", "Send the goal self.eyelid_controller_action.wait_for_server() self.eyelid_controller_action.send_goal(goal) self.eyelid_controller_action.wait_for_result(duration) def head_state_callback(self, head_state_msg): \"\"\"", "Get the head's current position \"\"\" if not self.has_loaded: return", "1 self.database_updated(num_updates) except Exception as e: rospy.logwarn(\"Error communicating with Slackbot", "#!/usr/bin/env python # ROS Libraries import actionlib from actionlib_msgs.msg import", "state_at_start_of_loop = self.state if (self.state == KuriWanderingRobotState.NORMAL): goal_state = self.wandering_module_action.get_state()", "# Send a low_battery_alert rospy.loginfo(\"Sending where_am_i help request\") with self.latest_image_lock:", "database for message_id in message_id_to_responses: for action_ts, response in message_id_to_responses[message_id]:", "help message, and actually using that would require developing a", "that type of help. Finally, this node has a separate", "Exception as e: rospy.logwarn(\"Error communicating with Slackbot /where_am_i at URL", "in NORMAL, turning off wandering in CHARGING, and switching back", "import actionlib from actionlib_msgs.msg import GoalStatus from control_msgs.msg import JointTrajectoryControllerState,", "CHARGING, and switching back to NORMAL when the robot is", "optionally request updates for partular message_ids (e.g., those that have", "greater than this and Kuri is charging, switch back to", "the database is updated. Saves the database every self.database_save_interval updates", "Called everytime the database is updated. Saves the database every", "goal.trajectory.points = [] pan_interval = (pan_endpoint-current_pan)/(n_waypoints-1) tilt_interval = (tilt_endpoint-current_tilt)/(n_waypoints-1) time_interval", "and self.previous_battery < self.to_charge_threshold and self.previous_dock_present): self.close_eyes() self.state = KuriWanderingRobotState.CHARGING", "self.has_loaded: r.sleep() try: message_ids_and_action_ts = self.sent_messages_database.get_message_ids_and_latest_action_ts() # Request responses for", "and message IDs self.sent_messages_database_filepath = rospy.get_param('~send_messages_database_filepath') self.sent_messages_database = SentMessagesDatabase.load( self.sent_messages_database_filepath)", "that autonomously asks the human to tell it where it", "# if the battery is less than this and Kuri", "request updates (e.g., human responses) from the Slackbot. Note that", "except Exception as e: rospy.logwarn(\"Error communicating with Slackbot /where_am_i at", "rospy.Subscriber( '/upward_looking_camera/compressed', CompressedImage, self.image_callback, queue_size=1) self.latest_image = None self.latest_image_lock =", "= JointTrajectoryPoint() point.positions = [self.eye_closed_position] point.velocities = [] point.accelerations =", "num_updates += 1 self.database_updated(num_updates) except Exception as e: rospy.logwarn(\"Error communicating", "A dummy callback that triggers sending a where_am_i help message", "is above a threshold or it is off the charger.", "= True self.state = KuriWanderingRobotState.NORMAL # Initialize the wandering module", "according to wandering_behavior. During CHARGING, the robot's eyes are closed", "for i in range(len(self.battery_notification_thresholds)): if (self.previous_battery is None or (self.previous_battery", "res_json['message_id'] self.sent_messages_database.add_respondable_message(message_id) self.database_updated() except Exception as e: rospy.logwarn(\"Error communicating with", "with Slackbot /low_battery at URL %s.\" % self.slackbot_url) if \"res\"", "# Compute the actual endpoint and duration_secs duration_secs = max(", ") res_json = res.json() message_id = res_json['message_id'] self.sent_messages_database.add_respondable_message(message_id) self.database_updated() except", "= JointTrajectoryPoint() point.positions = [current_pan + i*pan_interval, current_tilt + i*tilt_interval]", "to detect low battery; when it detects low battery, it", "which then sends it to the helpers. This node can", "thread self.slackbot_responses_thread = threading.Thread( target=self.get_slackbot_updates, ) self.slackbot_responses_thread.start() # Initialize the", "not None and self.previous_battery < self.to_charge_threshold and self.previous_dock_present): self.close_eyes() self.state", "in message_id_to_responses: for action_ts, response in message_id_to_responses[message_id]: rospy.loginfo(\"Got reaction %s", "def get_slackbot_updates(self, refresh_secs=5.0): \"\"\" Once every refresh_secs seconds, request updates", "this help request. \"\"\" with self.latest_image_lock: if self.latest_image is None:", "self.previous_battery >= self.charging_done_threshold): self.state = KuriWanderingRobotState.NORMAL rospy.loginfo(\"State: CHARGING ==> NORMAL\")", "the database every self.database_save_interval updates \"\"\" self.database_updates_since_last_save += num_updates if", "from trajectory_msgs.msg import JointTrajectoryPoint # Python Default Libraries import base64", "self.head_state_sub = rospy.Subscriber( \"/head_controller/state\", JointTrajectoryControllerState, self.head_state_callback, queue_size=1) self.head_controller_action = actionlib.SimpleActionClient('/head_controller/follow_joint_trajectory',", "It transitions from CHARGING to NORMAL if it's battery is", "to illsutrate the sample `where_am_i` help message, and actually using", "of the KuriWanderingRobot class \"\"\" self.has_loaded = False # Get", "self.previous_battery < self.to_charge_threshold and self.previous_dock_present): self.close_eyes() self.state = KuriWanderingRobotState.CHARGING self.wandering_module_action.cancel_all_goals()", "duration goal.trajectory.points = [point] # Send the goal self.eyelid_controller_action.wait_for_server() self.eyelid_controller_action.send_goal(goal)", "not self.previous_dock_present or self.previous_battery >= self.charging_done_threshold): self.state = KuriWanderingRobotState.NORMAL rospy.loginfo(\"State:", "NORMAL self.charging_done_threshold = rospy.get_param('~charging_done_threshold', 90) # Whether the low battery", "NORMAL = 1 CHARGING = 2 class KuriWanderingRobot(object): \"\"\" The", "updates \"\"\" self.database_updates_since_last_save += num_updates if self.database_updates_since_last_save % self.database_save_interval ==", "requests import threading import time import traceback # Custom Libraries", "= rospy.Rate(rate_hz) while not rospy.is_shutdown(): rate.sleep() with self.state_lock: state_at_start_of_loop =", "goal self.head_controller_action.wait_for_server() self.head_controller_action.send_goal(goal) self.head_controller_action.wait_for_result(duration) self.has_centered_head = True def state_machine_control_loop(self, rate_hz=10):", "where_am_i help request but have no image.\") return try: #", "require developing a custom anomaly detection system to trigger the", "\"\"\" Callback function for Kuri's power update. It Kuri's battery", "duration_secs = max( abs(pan_endpoint-current_pan)/self.head_pan_speed, abs(tilt_endpoint-current_tilt)/self.head_tilt_speed) duration = rospy.Duration.from_sec(duration_secs) # Create", "[\"eyelids_joint\"] point = JointTrajectoryPoint() point.positions = [self.eye_open_position] point.velocities = []", "= [self.eye_closed_position] point.velocities = [] point.accelerations = [] point.effort =", "rospy.logdebug(\"Close Eyes\") duration = rospy.Duration.from_sec(duration_secs) goal = FollowJointTrajectoryGoal() goal.trajectory.header.stamp =", "rate_hz=10): \"\"\" The control loop for the state machine. All", "import time import traceback # Custom Libraries from sent_messages_database import", "/get_updates at URL %s.\" % self.slackbot_url) if \"res\" in locals():", "= (pan_endpoint-current_pan)/(n_waypoints-1) tilt_interval = (tilt_endpoint-current_tilt)/(n_waypoints-1) time_interval = duration/n_waypoints for i", "battery request for pct %s\" % msg.battery.pct) res = requests.post(", "CHARGING, the robot's eyes are closed and it is charging.", "= [] point.effort = [] point.time_from_start = (i+1)*time_interval goal.trajectory.points.append(point) #", "msg.battery.pct <= self.battery_notification_thresholds[0]: # Send the low-battery helper notifications when", "rospy.logdebug(\"Waiting for wandering_module_action server\") self.wandering_module_action.wait_for_server() rospy.logdebug(\"Sending goal to wandering_module_action\") #", "is less than this and Kuri is docked, charge self.to_charge_threshold", "res_json = res.json() message_id = res_json['message_id'] self.sent_messages_database.add_respondable_message(message_id) self.database_updated() except Exception", "handled in this function and the functions it calls. During", "tilt is in [-0.8, 0.3] self.head_pan_speed = 0.2 # head", "if len(message_id_to_responses) > 0: num_updates = 0 # Insert reactions", "head's current position \"\"\" if not self.has_loaded: return if not", "rospy.Time.now() goal.trajectory.joint_names = [\"head_1_joint\", \"head_2_joint\"] goal.trajectory.points = [] pan_interval =", "duration = rospy.Duration.from_sec(duration_secs) goal = FollowJointTrajectoryGoal() goal.trajectory.header.stamp = rospy.Time.now() goal.trajectory.joint_names", "= self.wandering_module_action.get_state() if (self.state_changed or goal_state == GoalStatus.ABORTED or goal_state", "message_id_to_responses[message_id]: rospy.loginfo(\"Got reaction %s from at ts %s\" % (response,", "self.image_callback, queue_size=1) self.latest_image = None self.latest_image_lock = threading.Lock() # Initialize", "msg.battery.pct def where_am_i_help_callback(self, msg): \"\"\" A dummy callback that triggers", "if its battery is below a threshold and it is", "or not self.previous_dock_present or self.previous_battery >= self.charging_done_threshold): self.state = KuriWanderingRobotState.NORMAL", "logic is handled in this function and the functions it", "goal goal = FollowJointTrajectoryGoal() goal.trajectory.header.stamp = rospy.Time.now() goal.trajectory.joint_names = [\"head_1_joint\",", "res.text) rospy.logwarn(traceback.format_exc()) rospy.logwarn(\"Error %s.\" % e) update_previous_battery = False break", "head. This involves moving from the current pan and tilt", "rospy.logdebug(\"Saved sent_messages_database!\") def open_eyes(self, duration_secs=0.2): \"\"\" Open the robot's eyes", "if the batter is greater than this and Kuri is", "self.head_tilt_speed = 0.2 # head tilt is in [-0.8, 0.3]", "self.head_controller_action.wait_for_result(duration) self.has_centered_head = True def state_machine_control_loop(self, rate_hz=10): \"\"\" The control", "state_at_end_of_loop) def image_callback(self, img_msg): \"\"\" Store the latest image. \"\"\"", "requests.post( os.path.join(self.slackbot_url, 'where_am_i'), json={'image':image_contents, 'options':['Lounge', \"Office#252\", \"200 Corridoor\", \"Atrium\"]}, )", "# Initialize storing images and message IDs self.sent_messages_database_filepath = rospy.get_param('~send_messages_database_filepath')", "Initialize the Slackbot updates thread self.slackbot_responses_thread = threading.Thread( target=self.get_slackbot_updates, )", "def head_state_callback(self, head_state_msg): \"\"\" Get the head's current position \"\"\"", "detects low battery, it sends a low battery request to", "not self.has_loaded: r.sleep() try: message_ids_and_action_ts = self.sent_messages_database.get_message_ids_and_latest_action_ts() # Request responses", "# head pan is in [-0.75, 0.75] # Initialize the", "notifications when the battery # crosses the thresholds defined in", "= True if msg.battery.pct <= self.battery_notification_thresholds[0]: # Send the low-battery", "to the centered values of (0.0, -0.3) \"\"\" pan_endpoint =", ">= self.charging_done_threshold): self.state = KuriWanderingRobotState.NORMAL rospy.loginfo(\"State: CHARGING ==> NORMAL\") state_at_end_of_loop", "self.sent_messages_database.get_message_ids_and_latest_action_ts() # Request responses for those message_ids res = requests.post(", "helper notifications when the battery # crosses the thresholds defined", "to wandering_behavior. During CHARGING, the robot's eyes are closed and", "rospy.Time.now() goal.trajectory.joint_names = [\"eyelids_joint\"] point = JointTrajectoryPoint() point.positions = [self.eye_open_position]", "for message_id in message_id_to_responses: for action_ts, response in message_id_to_responses[message_id]: rospy.loginfo(\"Got", "charger. \"\"\" rate = rospy.Rate(rate_hz) while not rospy.is_shutdown(): rate.sleep() with", "a where_am_i help message to the Slackbot. This is merely", "threading.Lock() self.state_changed = True self.state = KuriWanderingRobotState.NORMAL # Initialize the", "the dummy `where_am_i` anomaly detector self.where_am_i_help_sub = rospy.Subscriber( \"/where_am_i_help\", Empty,", "docked, charge self.to_charge_threshold = rospy.get_param('~to_charge_threshold', 50) # if the batter", "= requests.post( os.path.join(self.slackbot_url, 'where_am_i'), json={'image':image_contents, 'options':['Lounge', \"Office#252\", \"200 Corridoor\", \"Atrium\"]},", "rospy.get_param('~slackbot_url') # Initialize the state. self.state_lock = threading.Lock() self.state_changed =", "python # ROS Libraries import actionlib from actionlib_msgs.msg import GoalStatus", "state. self.state_lock = threading.Lock() self.state_changed = True self.state = KuriWanderingRobotState.NORMAL", "rospy.Rate(1.0/refresh_secs) while not rospy.is_shutdown(): if not self.has_loaded: r.sleep() try: message_ids_and_action_ts", "with self.latest_image_lock: if self.latest_image is not None: image_contents = base64.b64encode(bytearray(self.latest_image.data)).decode('ascii')", "# Initialize the wandering module self.wandering_module_action = actionlib.SimpleActionClient('/wandering_behavior/navigate', WanderAction) #", "rospy.Rate(rate_hz) while not rospy.is_shutdown(): rate.sleep() with self.state_lock: state_at_start_of_loop = self.state", "of the wandering module in NORMAL, turning off wandering in", "None: rospy.loginfo(\"Attempted to send where_am_i help request but have no", "= requests.post( os.path.join(self.slackbot_url, 'get_updates'), json={'message_ids_and_action_ts':message_ids_and_action_ts}, ) res_json = res.json() rospy.logdebug(\"Got", "dict_to_send = {'battery_pct':msg.battery.pct} if self.low_battery_message_include_image: with self.latest_image_lock: if self.latest_image is", "threading.Thread( target=self.get_slackbot_updates, ) self.slackbot_responses_thread.start() # Initialize the state machine self.state_machine_thread", "'low_battery'), json=dict_to_send, ) res_json = res.json() if not res_json['success']: update_previous_battery", "= None self.battery_notification_thresholds = rospy.get_param('~battery_notification_thresholds', [40, 20, 10, 5, 4,", "updates for partular message_ids (e.g., those that have not received", "that continually queries the Slackbot for responses to its help", "Libraries from sent_messages_database import SentMessagesDatabase class KuriWanderingRobotState(Enum): \"\"\" During NORMAL,", "if it's battery is above a threshold or it is", "return with self.state_lock: with self.previous_battery_lock: self.previous_dock_present = msg.dock_present if self.state", "self.battery_notification_thresholds[i]) and msg.battery.pct <= self.battery_notification_thresholds[i]): try: # Send a low_battery_alert", "self.previous_battery = msg.battery.pct else: update_previous_battery = True if msg.battery.pct <=", "SentMessagesDatabase.load( self.sent_messages_database_filepath) self.database_save_interval = 1 self.database_updates_since_last_save = 0 # Initialize", "get_slackbot_updates(self, refresh_secs=5.0): \"\"\" Once every refresh_secs seconds, request updates (e.g.,", "[-0.75, 0.75] # Initialize the Slackbot updates thread self.slackbot_responses_thread =", "= res.json() if not res_json['success']: update_previous_battery = False except Exception", "help. Finally, this node has a separate thread that continually", "res_json = res.json() if not res_json['success']: update_previous_battery = False except", "the base moves according to wandering_behavior. During CHARGING, the robot's", "kuri_wandering_robot.msg import Power from wandering_behavior.msg import WanderAction, WanderGoal import rospy", "\"\"\" Center Kuri's head. This involves moving from the current", "endpoint and duration_secs duration_secs = max( abs(pan_endpoint-current_pan)/self.head_pan_speed, abs(tilt_endpoint-current_tilt)/self.head_tilt_speed) duration =", "Empty from trajectory_msgs.msg import JointTrajectoryPoint # Python Default Libraries import", "goal = FollowJointTrajectoryGoal() goal.trajectory.header.stamp = rospy.Time.now() goal.trajectory.joint_names = [\"eyelids_joint\"] point", "calls. During NORMAL, the base moves according to wandering_behavior. During", "helpers the sample `where_am_i` help message. Note that that is", "\"\"\" rospy.logdebug(\"Close Eyes\") duration = rospy.Duration.from_sec(duration_secs) goal = FollowJointTrajectoryGoal() goal.trajectory.header.stamp", "[\"head_1_joint\", \"head_2_joint\"] goal.trajectory.points = [] pan_interval = (pan_endpoint-current_pan)/(n_waypoints-1) tilt_interval =", "self.sent_messages_database.add_user_response(message_id, action_ts, response) num_updates += 1 self.database_updated(num_updates) except Exception as", "self.database_save_interval = 1 self.database_updates_since_last_save = 0 # Initialize the head", "to the Slackbot, which then sends it to the helpers.", "NORMAL when the robot is sufficiently charged. This node also", "needed. This node also subscribes to a dummy `where_am_i_help` topic,", "has a separate thread that continually queries the Slackbot for", "\"res\" in locals(): rospy.logwarn(\"Response text %s.\" % res.text) rospy.logwarn(traceback.format_exc()) rospy.logwarn(\"Error", "in message_id_to_responses[message_id]: rospy.loginfo(\"Got reaction %s from at ts %s\" %", "self.previous_battery_lock = threading.Lock() self.previous_battery = None self.previous_dock_present = None self.battery_notification_thresholds", "update. It Kuri's battery has crossed a battery_notification_threshold, notify the", "if (self.previous_battery is not None and self.previous_battery < self.to_charge_threshold and", "you can optionally request updates for partular message_ids (e.g., those", "target=self.get_slackbot_updates, ) self.slackbot_responses_thread.start() # Initialize the state machine self.state_machine_thread =", "the goal self.head_controller_action.wait_for_server() self.head_controller_action.send_goal(goal) self.head_controller_action.wait_for_result(duration) self.has_centered_head = True def state_machine_control_loop(self,", "1 CHARGING = 2 class KuriWanderingRobot(object): \"\"\" The central executive", "place to illsutrate the sample `where_am_i` help message, and actually", "None or not self.previous_dock_present or self.previous_battery >= self.charging_done_threshold): self.state =", "for triggering this help request. \"\"\" with self.latest_image_lock: if self.latest_image", "pan is in [-0.75, 0.75] # Initialize the Slackbot updates", "below a threshold and it is on the charger. It", "is only in place to illsutrate the sample `where_am_i` help", "None or msg.battery.pct < self.previous_battery)): self.previous_battery = msg.battery.pct def where_am_i_help_callback(self,", "power_callback(self, msg): \"\"\" Callback function for Kuri's power update. It", "this node has a separate thread that continually queries the", "message IDs self.sent_messages_database_filepath = rospy.get_param('~send_messages_database_filepath') self.sent_messages_database = SentMessagesDatabase.load( self.sent_messages_database_filepath) self.database_save_interval", "[] point.time_from_start = (i+1)*time_interval goal.trajectory.points.append(point) # Send the goal self.head_controller_action.wait_for_server()", "True) # Initialize the dummy `where_am_i` anomaly detector self.where_am_i_help_sub =", "goal self.eyelid_controller_action.wait_for_server() self.eyelid_controller_action.send_goal(goal) self.eyelid_controller_action.wait_for_result(duration) def close_eyes(self, duration_secs=0.2): \"\"\" Close the", "% e) def get_slackbot_updates(self, refresh_secs=5.0): \"\"\" Once every refresh_secs seconds,", "> self.battery_notification_thresholds[i]) and msg.battery.pct <= self.battery_notification_thresholds[i]): try: # Send a", "text %s.\" % res.text) rospy.logwarn(traceback.format_exc()) rospy.logwarn(\"Error %s.\" % e) r.sleep()", "rospy.Subscriber( \"/mobile_base/power\", Power, self.power_callback, queue_size=1) self.previous_battery_lock = threading.Lock() self.previous_battery =", "message_ids res = requests.post( os.path.join(self.slackbot_url, 'get_updates'), json={'message_ids_and_action_ts':message_ids_and_action_ts}, ) res_json =", "== GoalStatus.ABORTED or goal_state == GoalStatus.SUCCEEDED): rospy.logdebug(\"Waiting for wandering_module_action server\")", "with self.previous_battery_lock: if (self.previous_battery is None or not self.previous_dock_present or", "WanderAction, WanderGoal import rospy from sensor_msgs.msg import CompressedImage from std_msgs.msg", "queue_size=1) self.previous_battery_lock = threading.Lock() self.previous_battery = None self.previous_dock_present = None", "if not self.has_loaded: return with self.state_lock: with self.previous_battery_lock: self.previous_dock_present =", "machine logic is handled in this function and the functions", "means \"don't stop unless preempted\" self.wandering_module_action.send_goal(WanderGoal(effort=-1)) self.open_eyes() with self.previous_battery_lock: if", "detector self.where_am_i_help_sub = rospy.Subscriber( \"/where_am_i_help\", Empty, self.where_am_i_help_callback, queue_size=1) # Initialize", "is updated. Saves the database every self.database_save_interval updates \"\"\" self.database_updates_since_last_save", "the current pan and tilt to the centered values of", "the wandering module in NORMAL, turning off wandering in CHARGING,", "NORMAL\") state_at_end_of_loop = self.state self.state_changed = (state_at_start_of_loop != state_at_end_of_loop) def", "dummy callback that triggers sending a where_am_i help message to", "asking for that type of help. Finally, this node has", "self.wandering_module_action = actionlib.SimpleActionClient('/wandering_behavior/navigate', WanderAction) # Initialize the eye controller self.eyelid_controller_action", "self.previous_battery_lock: if (self.previous_battery is not None and self.previous_battery < self.to_charge_threshold", "the goal self.eyelid_controller_action.wait_for_server() self.eyelid_controller_action.send_goal(goal) self.eyelid_controller_action.wait_for_result(duration) def close_eyes(self, duration_secs=0.2): \"\"\" Close", "in range(n_waypoints): point = JointTrajectoryPoint() point.positions = [current_pan + i*pan_interval,", "and the functions it calls. During NORMAL, the base moves", "moving from the current pan and tilt to the centered", "[] point.accelerations = [] point.effort = [] point.time_from_start = duration", "actionlib.SimpleActionClient('/wandering_behavior/navigate', WanderAction) # Initialize the eye controller self.eyelid_controller_action = actionlib.SimpleActionClient('/eyelids_controller/follow_joint_trajectory',", "WanderGoal import rospy from sensor_msgs.msg import CompressedImage from std_msgs.msg import", "\"\"\" The control loop for the state machine. All of", "point.time_from_start = (i+1)*time_interval goal.trajectory.points.append(point) # Send the goal self.head_controller_action.wait_for_server() self.head_controller_action.send_goal(goal)", "class \"\"\" self.has_loaded = False # Get the Slackbot URL", "\"\"\" r = rospy.Rate(1.0/refresh_secs) while not rospy.is_shutdown(): if not self.has_loaded:", "the wandering module self.wandering_module_action = actionlib.SimpleActionClient('/wandering_behavior/navigate', WanderAction) # Initialize the", "NORMAL, the base moves according to wandering_behavior. During CHARGING, the", "base64.b64encode(bytearray(self.latest_image.data)).decode('ascii') dict_to_send['image'] = image_contents rospy.loginfo(\"Sending battery request for pct %s\"", "KuriWanderingRobotState.CHARGING: with self.previous_battery_lock: if (self.previous_battery is None or not self.previous_dock_present", "= max( abs(pan_endpoint-current_pan)/self.head_pan_speed, abs(tilt_endpoint-current_tilt)/self.head_tilt_speed) duration = rospy.Duration.from_sec(duration_secs) # Create the", "rospy.loginfo(\"Sending battery request for pct %s\" % msg.battery.pct) res =", "CHARGING to NORMAL if it's battery is above a threshold", "rospy.get_param('~charging_done_threshold', 90) # Whether the low battery message should include", "if not self.has_loaded: return if not self.has_centered_head: self.center_head(head_state_msg.actual.positions[0], head_state_msg.actual.positions[1]) def", "self.previous_battery = msg.battery.pct def where_am_i_help_callback(self, msg): \"\"\" A dummy callback", "= duration/n_waypoints for i in range(n_waypoints): point = JointTrajectoryPoint() point.positions", "self.charging_done_threshold): self.state = KuriWanderingRobotState.NORMAL rospy.loginfo(\"State: CHARGING ==> NORMAL\") state_at_end_of_loop =", "sufficiently charged. This node also runs anomaly detection to detect", "a control loop that manages the robot's state: turning on", "\"\"\" NORMAL = 1 CHARGING = 2 class KuriWanderingRobot(object): \"\"\"", "the actual endpoint and duration_secs duration_secs = max( abs(pan_endpoint-current_pan)/self.head_pan_speed, abs(tilt_endpoint-current_tilt)/self.head_tilt_speed)", "self.latest_image_lock: if self.latest_image is None: rospy.loginfo(\"Attempted to send where_am_i help", "is charging. The robot transitions from NORMAL to CHARGING if", "res.json() message_id = res_json['message_id'] self.sent_messages_database.add_respondable_message(message_id) self.database_updated() except Exception as e:", "detection and help requests, as needed. This node also subscribes", "manages the robot's state: turning on and monitoring progress of", "image.\") return try: # Send a low_battery_alert rospy.loginfo(\"Sending where_am_i help", "goal.trajectory.points.append(point) # Send the goal self.head_controller_action.wait_for_server() self.head_controller_action.send_goal(goal) self.head_controller_action.wait_for_result(duration) self.has_centered_head =", "to NORMAL when the robot is sufficiently charged. This node", "= JointTrajectoryPoint() point.positions = [self.eye_open_position] point.velocities = [] point.accelerations =", ") self.state_machine_thread.start() self.has_centered_head = False self.has_loaded = True def database_updated(self,", "defined in self.battery_notification_thresholds for i in range(len(self.battery_notification_thresholds)): if (self.previous_battery is", "self.wandering_module_action.wait_for_server() rospy.logdebug(\"Sending goal to wandering_module_action\") # Effort -1 means \"don't", "Kuri is charging, switch back to NORMAL self.charging_done_threshold = rospy.get_param('~charging_done_threshold',", "True if msg.battery.pct <= self.battery_notification_thresholds[0]: # Send the low-battery helper", "goal.trajectory.points = [point] # Send the goal self.eyelid_controller_action.wait_for_server() self.eyelid_controller_action.send_goal(goal) self.eyelid_controller_action.wait_for_result(duration)", "or msg.battery.pct < self.previous_battery)): self.previous_battery = msg.battery.pct def where_am_i_help_callback(self, msg):", "self.state = KuriWanderingRobotState.NORMAL # Initialize the wandering module self.wandering_module_action =", "of help. Finally, this node has a separate thread that", "the Slackbot updates thread self.slackbot_responses_thread = threading.Thread( target=self.get_slackbot_updates, ) self.slackbot_responses_thread.start()", "This node can be extended with additional anomaly detection and", "own anomaly detection system for triggering this help request. \"\"\"", "where_am_i help message to the Slackbot. This is merely intended", "The robot transitions from NORMAL to CHARGING if its battery", "to CHARGING if its battery is below a threshold and", "message_id_to_responses: for action_ts, response in message_id_to_responses[message_id]: rospy.loginfo(\"Got reaction %s from", "threshold or it is off the charger. \"\"\" NORMAL =", "return try: # Send a low_battery_alert rospy.loginfo(\"Sending where_am_i help request\")", "Corridoor\", \"Atrium\"]}, ) res_json = res.json() message_id = res_json['message_id'] self.sent_messages_database.add_respondable_message(message_id)", "1]) # if the battery is less than this and", "sample `where_am_i` help message. Note that that is only in", "(pan_endpoint-current_pan)/(n_waypoints-1) tilt_interval = (tilt_endpoint-current_tilt)/(n_waypoints-1) time_interval = duration/n_waypoints for i in", "= rospy.Time.now() goal.trajectory.joint_names = [\"eyelids_joint\"] point = JointTrajectoryPoint() point.positions =", "the low-battery helper notifications when the battery # crosses the", "(self.previous_battery > self.battery_notification_thresholds[i]) and msg.battery.pct <= self.battery_notification_thresholds[i]): try: # Send", "csv from enum import Enum import os import requests import", "Callback function for Kuri's power update. It Kuri's battery has", "the Slackbot's capabilities. Users who want a robot that autonomously", "actionlib from actionlib_msgs.msg import GoalStatus from control_msgs.msg import JointTrajectoryControllerState, FollowJointTrajectoryAction,", "FollowJointTrajectoryAction, FollowJointTrajectoryGoal from kuri_wandering_robot.msg import Power from wandering_behavior.msg import WanderAction,", "while not rospy.is_shutdown(): if not self.has_loaded: r.sleep() try: message_ids_and_action_ts =", "self.has_centered_head: self.center_head(head_state_msg.actual.positions[0], head_state_msg.actual.positions[1]) def center_head(self, current_pan, current_tilt): \"\"\" Center Kuri's", "0.3] self.head_pan_speed = 0.2 # head pan is in [-0.75,", "not None: image_contents = base64.b64encode(bytearray(self.latest_image.data)).decode('ascii') dict_to_send['image'] = image_contents rospy.loginfo(\"Sending battery", "False break if (update_previous_battery and (self.previous_battery is None or msg.battery.pct", "not rospy.is_shutdown(): if not self.has_loaded: r.sleep() try: message_ids_and_action_ts = self.sent_messages_database.get_message_ids_and_latest_action_ts()", "\"\"\" self.has_loaded = False # Get the Slackbot URL self.slackbot_url", "self.sent_messages_database.save(self.sent_messages_database_filepath) rospy.logdebug(\"Saved sent_messages_database!\") def open_eyes(self, duration_secs=0.2): \"\"\" Open the robot's", "self.database_updates_since_last_save % self.database_save_interval == 0: self.sent_messages_database.save(self.sent_messages_database_filepath) rospy.logdebug(\"Saved sent_messages_database!\") def open_eyes(self,", "= msg.dock_present if self.state == KuriWanderingRobotState.CHARGING: self.previous_battery = msg.battery.pct else:", "point.effort = [] point.time_from_start = (i+1)*time_interval goal.trajectory.points.append(point) # Send the", "or goal_state == GoalStatus.SUCCEEDED): rospy.logdebug(\"Waiting for wandering_module_action server\") self.wandering_module_action.wait_for_server() rospy.logdebug(\"Sending", "-1 means \"don't stop unless preempted\" self.wandering_module_action.send_goal(WanderGoal(effort=-1)) self.open_eyes() with self.previous_battery_lock:", "node runs a control loop that manages the robot's state:", "with self.latest_image_lock: if self.latest_image is None: rospy.loginfo(\"Attempted to send where_am_i", "in self.battery_notification_thresholds for i in range(len(self.battery_notification_thresholds)): if (self.previous_battery is None", "try: # Send a low_battery_alert rospy.loginfo(\"Sending where_am_i help request\") with", "# Get the Slackbot URL self.slackbot_url = rospy.get_param('~slackbot_url') # Initialize", "head_state_msg): \"\"\" Get the head's current position \"\"\" if not", "== KuriWanderingRobotState.CHARGING: with self.previous_battery_lock: if (self.previous_battery is None or not", "NORMAL ==> CHARGING\") elif self.state == KuriWanderingRobotState.CHARGING: with self.previous_battery_lock: if", "= SentMessagesDatabase.load( self.sent_messages_database_filepath) self.database_save_interval = 1 self.database_updates_since_last_save = 0 #", "not self.has_centered_head: self.center_head(head_state_msg.actual.positions[0], head_state_msg.actual.positions[1]) def center_head(self, current_pan, current_tilt): \"\"\" Center", "in range(len(self.battery_notification_thresholds)): if (self.previous_battery is None or (self.previous_battery > self.battery_notification_thresholds[i])", "2 class KuriWanderingRobot(object): \"\"\" The central executive node. This node", "else: update_previous_battery = True if msg.battery.pct <= self.battery_notification_thresholds[0]: # Send", "True def database_updated(self, num_updates=1): \"\"\" Called everytime the database is", "FollowJointTrajectoryGoal() goal.trajectory.header.stamp = rospy.Time.now() goal.trajectory.joint_names = [\"head_1_joint\", \"head_2_joint\"] goal.trajectory.points =", "res.text) rospy.logwarn(traceback.format_exc()) rospy.logwarn(\"Error %s.\" % e) r.sleep() if __name__ ==", "when the battery # crosses the thresholds defined in self.battery_notification_thresholds", "to a dummy `where_am_i_help` topic, which sends helpers the sample", "or self.previous_battery >= self.charging_done_threshold): self.state = KuriWanderingRobotState.NORMAL rospy.loginfo(\"State: CHARGING ==>", "During NORMAL, the base moves according to wandering_behavior. During CHARGING,", "request to the Slackbot, which then sends it to the", "import threading import time import traceback # Custom Libraries from", "e: rospy.logwarn(\"Error communicating with Slackbot /where_am_i at URL %s.\" %", "# Initialize the state. self.state_lock = threading.Lock() self.state_changed = True", "helpers. This node can be extended with additional anomaly detection", "is None: rospy.loginfo(\"Attempted to send where_am_i help request but have", "[-0.8, 0.3] self.head_pan_speed = 0.2 # head pan is in", "not self.has_loaded: return with self.latest_image_lock: self.latest_image = img_msg def power_callback(self,", "node also subscribes to a dummy `where_am_i_help` topic, which sends", "queries the Slackbot for responses to its help requests. \"\"\"", "rospy.get_param('~battery_notification_thresholds', [40, 20, 10, 5, 4, 3, 2, 1]) #", "help requests, as needed. This node also subscribes to a", "# Initialize the Slackbot updates thread self.slackbot_responses_thread = threading.Thread( target=self.get_slackbot_updates,", "off the charger. \"\"\" rate = rospy.Rate(rate_hz) while not rospy.is_shutdown():", "anomaly detection and help requests, as needed. This node also", "self.state_machine_thread.start() self.has_centered_head = False self.has_loaded = True def database_updated(self, num_updates=1):", "All of the state machine logic is handled in this", "2, 1]) # if the battery is less than this", "wandering_behavior.msg import WanderAction, WanderGoal import rospy from sensor_msgs.msg import CompressedImage", "battery request to the Slackbot, which then sends it to", "0.0 tilt_endpoint = -0.3 n_waypoints = 10 # Compute the", "close_eyes(self, duration_secs=0.2): \"\"\" Close the robot's eyes \"\"\" rospy.logdebug(\"Close Eyes\")", "(0.0, -0.3) \"\"\" pan_endpoint = 0.0 tilt_endpoint = -0.3 n_waypoints", "This node also runs anomaly detection to detect low battery;", "node has a separate thread that continually queries the Slackbot", "`where_am_i` anomaly detector self.where_am_i_help_sub = rospy.Subscriber( \"/where_am_i_help\", Empty, self.where_am_i_help_callback, queue_size=1)", "self.open_eyes() with self.previous_battery_lock: if (self.previous_battery is not None and self.previous_battery", "the sample `where_am_i` help message. Note that that is only", "is not None and self.previous_battery < self.to_charge_threshold and self.previous_dock_present): self.close_eyes()", "Kuri's power update. It Kuri's battery has crossed a battery_notification_threshold,", "`where_am_i` help message, and actually using that would require developing", "self.state = KuriWanderingRobotState.NORMAL rospy.loginfo(\"State: CHARGING ==> NORMAL\") state_at_end_of_loop = self.state", "an instance of the KuriWanderingRobot class \"\"\" self.has_loaded = False", "duration = rospy.Duration.from_sec(duration_secs) # Create the goal goal = FollowJointTrajectoryGoal()", "to send where_am_i help request but have no image.\") return", "the helpers. This node can be extended with additional anomaly", "request\") with self.latest_image_lock: image_contents = base64.b64encode(bytearray(self.latest_image.data)).decode('ascii') res = requests.post( os.path.join(self.slackbot_url,", "self.slackbot_url = rospy.get_param('~slackbot_url') # Initialize the state. self.state_lock = threading.Lock()", "[] point.accelerations = [] point.effort = [] point.time_from_start = (i+1)*time_interval", "robot's state: turning on and monitoring progress of the wandering", "'/upward_looking_camera/compressed', CompressedImage, self.image_callback, queue_size=1) self.latest_image = None self.latest_image_lock = threading.Lock()", "response in message_id_to_responses[message_id]: rospy.loginfo(\"Got reaction %s from at ts %s\"", "request. \"\"\" with self.latest_image_lock: if self.latest_image is None: rospy.loginfo(\"Attempted to", "import CompressedImage from std_msgs.msg import Empty from trajectory_msgs.msg import JointTrajectoryPoint", "charger. \"\"\" NORMAL = 1 CHARGING = 2 class KuriWanderingRobot(object):", "and help requests, as needed. This node also subscribes to", "self.state == KuriWanderingRobotState.CHARGING: with self.previous_battery_lock: if (self.previous_battery is None or", "module self.wandering_module_action = actionlib.SimpleActionClient('/wandering_behavior/navigate', WanderAction) # Initialize the eye controller", "from std_msgs.msg import Empty from trajectory_msgs.msg import JointTrajectoryPoint # Python", "self.latest_image_lock: self.latest_image = img_msg def power_callback(self, msg): \"\"\" Callback function", "message_ids (e.g., those that have not received responses yet) \"\"\"", "res.json() rospy.logdebug(\"Got updates from Slackbot %s\" % res_json) message_id_to_responses =", "that is only in place to illsutrate the sample `where_am_i`", "help message. Note that that is only in place to", "Initialize the camera self.img_sub = rospy.Subscriber( '/upward_looking_camera/compressed', CompressedImage, self.image_callback, queue_size=1)", "= rospy.get_param('~charging_done_threshold', 90) # Whether the low battery message should", "= [] point.time_from_start = (i+1)*time_interval goal.trajectory.points.append(point) # Send the goal", "Insert reactions into the database for message_id in message_id_to_responses: for", "0.2 # head pan is in [-0.75, 0.75] # Initialize", "= KuriWanderingRobotState.CHARGING self.wandering_module_action.cancel_all_goals() rospy.loginfo(\"State: NORMAL ==> CHARGING\") elif self.state ==", "if (update_previous_battery and (self.previous_battery is None or msg.battery.pct < self.previous_battery)):", "goal.trajectory.joint_names = [\"eyelids_joint\"] point = JointTrajectoryPoint() point.positions = [self.eye_closed_position] point.velocities", "Note that you can optionally request updates for partular message_ids", "goal_state == GoalStatus.ABORTED or goal_state == GoalStatus.SUCCEEDED): rospy.logdebug(\"Waiting for wandering_module_action", "controller self.head_state_sub = rospy.Subscriber( \"/head_controller/state\", JointTrajectoryControllerState, self.head_state_callback, queue_size=1) self.head_controller_action =", "rospy.loginfo(\"State: NORMAL ==> CHARGING\") elif self.state == KuriWanderingRobotState.CHARGING: with self.previous_battery_lock:", "is sufficiently charged. This node also runs anomaly detection to", "self.eyelid_controller_action.send_goal(goal) self.eyelid_controller_action.wait_for_result(duration) def close_eyes(self, duration_secs=0.2): \"\"\" Close the robot's eyes", "-0.3) \"\"\" pan_endpoint = 0.0 tilt_endpoint = -0.3 n_waypoints =", "= threading.Thread( target=self.get_slackbot_updates, ) self.slackbot_responses_thread.start() # Initialize the state machine", "= res.json() message_id = res_json['message_id'] self.sent_messages_database.add_respondable_message(message_id) self.database_updated() except Exception as", "res = requests.post( os.path.join(self.slackbot_url, 'where_am_i'), json={'image':image_contents, 'options':['Lounge', \"Office#252\", \"200 Corridoor\",", "i in range(len(self.battery_notification_thresholds)): if (self.previous_battery is None or (self.previous_battery >", "head pan is in [-0.75, 0.75] # Initialize the Slackbot", "# Create the goal goal = FollowJointTrajectoryGoal() goal.trajectory.header.stamp = rospy.Time.now()", "import requests import threading import time import traceback # Custom", "detect low battery; when it detects low battery, it sends", "\"head_2_joint\"] goal.trajectory.points = [] pan_interval = (pan_endpoint-current_pan)/(n_waypoints-1) tilt_interval = (tilt_endpoint-current_tilt)/(n_waypoints-1)", "\"200 Corridoor\", \"Atrium\"]}, ) res_json = res.json() message_id = res_json['message_id']", "goal self.eyelid_controller_action.wait_for_server() self.eyelid_controller_action.send_goal(goal) self.eyelid_controller_action.wait_for_result(duration) def head_state_callback(self, head_state_msg): \"\"\" Get the", "self.where_am_i_help_sub = rospy.Subscriber( \"/where_am_i_help\", Empty, self.where_am_i_help_callback, queue_size=1) # Initialize storing", "r = rospy.Rate(1.0/refresh_secs) while not rospy.is_shutdown(): if not self.has_loaded: r.sleep()", "NORMAL, turning off wandering in CHARGING, and switching back to", "< self.to_charge_threshold and self.previous_dock_present): self.close_eyes() self.state = KuriWanderingRobotState.CHARGING self.wandering_module_action.cancel_all_goals() rospy.loginfo(\"State:", "a robot that autonomously asks the human to tell it", "is in [-0.75, 0.75] # Initialize the Slackbot updates thread", "r.sleep() if __name__ == \"__main__\": rospy.init_node(\"kuri_wandering_robot\") kuri_wandering_robot = KuriWanderingRobot() rospy.spin()", "from enum import Enum import os import requests import threading", "rospy.get_param('~to_charge_threshold', 50) # if the batter is greater than this", "20, 10, 5, 4, 3, 2, 1]) # if the", "is not None: image_contents = base64.b64encode(bytearray(self.latest_image.data)).decode('ascii') dict_to_send['image'] = image_contents rospy.loginfo(\"Sending", "robot's eyes are closed and it is charging. The robot", "e) def get_slackbot_updates(self, refresh_secs=5.0): \"\"\" Once every refresh_secs seconds, request", "battery is below a threshold and it is on the", "URL self.slackbot_url = rospy.get_param('~slackbot_url') # Initialize the state. self.state_lock =", "reactions into the database for message_id in message_id_to_responses: for action_ts,", "break if (update_previous_battery and (self.previous_battery is None or msg.battery.pct <", "the batter is greater than this and Kuri is charging,", "= FollowJointTrajectoryGoal() goal.trajectory.header.stamp = rospy.Time.now() goal.trajectory.joint_names = [\"head_1_joint\", \"head_2_joint\"] goal.trajectory.points", "Exception as e: rospy.logwarn(\"Error communicating with Slackbot /low_battery at URL", "50) # if the batter is greater than this and", "num_updates if self.database_updates_since_last_save % self.database_save_interval == 0: self.sent_messages_database.save(self.sent_messages_database_filepath) rospy.logdebug(\"Saved sent_messages_database!\")", "rospy.loginfo(\"Sending where_am_i help request\") with self.latest_image_lock: image_contents = base64.b64encode(bytearray(self.latest_image.data)).decode('ascii') res", "help request\") with self.latest_image_lock: image_contents = base64.b64encode(bytearray(self.latest_image.data)).decode('ascii') res = requests.post(", "pan_endpoint = 0.0 tilt_endpoint = -0.3 n_waypoints = 10 #", "< self.previous_battery)): self.previous_battery = msg.battery.pct def where_am_i_help_callback(self, msg): \"\"\" A", "charged. This node also runs anomaly detection to detect low", "Initialize storing images and message IDs self.sent_messages_database_filepath = rospy.get_param('~send_messages_database_filepath') self.sent_messages_database", "every refresh_secs seconds, request updates (e.g., human responses) from the", "position \"\"\" if not self.has_loaded: return if not self.has_centered_head: self.center_head(head_state_msg.actual.positions[0],", "!= state_at_end_of_loop) def image_callback(self, img_msg): \"\"\" Store the latest image.", "it is on the charger. It transitions from CHARGING to", "anomaly detection system to trigger the robot asking for that", "msg.battery.pct) res = requests.post( os.path.join(self.slackbot_url, 'low_battery'), json=dict_to_send, ) res_json =", "This node runs a control loop that manages the robot's", "\"/where_am_i_help\", Empty, self.where_am_i_help_callback, queue_size=1) # Initialize storing images and message", "self.eyelid_controller_action.wait_for_server() self.eyelid_controller_action.send_goal(goal) self.eyelid_controller_action.wait_for_result(duration) def close_eyes(self, duration_secs=0.2): \"\"\" Close the robot's", "Initialize the state. self.state_lock = threading.Lock() self.state_changed = True self.state", "= rospy.get_param('~slackbot_url') # Initialize the state. self.state_lock = threading.Lock() self.state_changed", "asks the human to tell it where it is should", "moves according to wandering_behavior. During CHARGING, the robot's eyes are", "current pan and tilt to the centered values of (0.0,", "None self.battery_notification_thresholds = rospy.get_param('~battery_notification_thresholds', [40, 20, 10, 5, 4, 3,", "showcase some of the Slackbot's capabilities. Users who want a", "is greater than this and Kuri is charging, switch back", "= threading.Thread( target=self.state_machine_control_loop, ) self.state_machine_thread.start() self.has_centered_head = False self.has_loaded =", "self.low_battery_message_include_image = rospy.get_param('~low_battery_message_include_image', True) # Initialize the dummy `where_am_i` anomaly", "those that have not received responses yet) \"\"\" r =", "above a threshold or it is off the charger. \"\"\"", "= 0 # Initialize the head controller self.head_state_sub = rospy.Subscriber(", "on the charger. It transitions from CHARGING to NORMAL if", "if not self.has_loaded: return with self.latest_image_lock: self.latest_image = img_msg def", "= rospy.Subscriber( \"/head_controller/state\", JointTrajectoryControllerState, self.head_state_callback, queue_size=1) self.head_controller_action = actionlib.SimpleActionClient('/head_controller/follow_joint_trajectory', FollowJointTrajectoryAction)", "time import traceback # Custom Libraries from sent_messages_database import SentMessagesDatabase", "rospy.logwarn(traceback.format_exc()) rospy.logwarn(\"Error %s.\" % e) update_previous_battery = False break if", "type of help. Finally, this node has a separate thread", "True self.state = KuriWanderingRobotState.NORMAL # Initialize the wandering module self.wandering_module_action", "this function and the functions it calls. During NORMAL, the", "charging. The robot transitions from NORMAL to CHARGING if its", "to NORMAL self.charging_done_threshold = rospy.get_param('~charging_done_threshold', 90) # Whether the low", "msg): \"\"\" A dummy callback that triggers sending a where_am_i", "database every self.database_save_interval updates \"\"\" self.database_updates_since_last_save += num_updates if self.database_updates_since_last_save", "KuriWanderingRobotState.NORMAL rospy.loginfo(\"State: CHARGING ==> NORMAL\") state_at_end_of_loop = self.state self.state_changed =", "it is off the charger. \"\"\" rate = rospy.Rate(rate_hz) while", "During CHARGING, the robot's eyes are closed and it is", "is off the charger. \"\"\" rate = rospy.Rate(rate_hz) while not", "self.previous_dock_present = None self.battery_notification_thresholds = rospy.get_param('~battery_notification_thresholds', [40, 20, 10, 5,", "from the current pan and tilt to the centered values", "==> CHARGING\") elif self.state == KuriWanderingRobotState.CHARGING: with self.previous_battery_lock: if (self.previous_battery", "the sample `where_am_i` help message, and actually using that would", "the robot's eyes \"\"\" rospy.logdebug(\"Close Eyes\") duration = rospy.Duration.from_sec(duration_secs) goal", "request updates for partular message_ids (e.g., those that have not", "if not self.has_loaded: r.sleep() try: message_ids_and_action_ts = self.sent_messages_database.get_message_ids_and_latest_action_ts() # Request", "KuriWanderingRobotState.NORMAL): goal_state = self.wandering_module_action.get_state() if (self.state_changed or goal_state == GoalStatus.ABORTED", "to the Slackbot. This is merely intended to showcase some", "self.sent_messages_database.add_respondable_message(message_id) self.database_updated() except Exception as e: rospy.logwarn(\"Error communicating with Slackbot", "responses to its help requests. \"\"\" def __init__(self): \"\"\" Initialize", "def state_machine_control_loop(self, rate_hz=10): \"\"\" The control loop for the state", "Slackbot %s\" % res_json) message_id_to_responses = res_json[\"message_id_to_responses\"] if len(message_id_to_responses) >", "None and self.previous_battery < self.to_charge_threshold and self.previous_dock_present): self.close_eyes() self.state =", "max( abs(pan_endpoint-current_pan)/self.head_pan_speed, abs(tilt_endpoint-current_tilt)/self.head_tilt_speed) duration = rospy.Duration.from_sec(duration_secs) # Create the goal", "Power from wandering_behavior.msg import WanderAction, WanderGoal import rospy from sensor_msgs.msg", "\"\"\" if not self.has_loaded: return with self.latest_image_lock: self.latest_image = img_msg", "% self.database_save_interval == 0: self.sent_messages_database.save(self.sent_messages_database_filepath) rospy.logdebug(\"Saved sent_messages_database!\") def open_eyes(self, duration_secs=0.2):", "that you can optionally request updates for partular message_ids (e.g.,", "0: num_updates = 0 # Insert reactions into the database", "communicating with Slackbot /get_updates at URL %s.\" % self.slackbot_url) if", "rospy.loginfo(\"State: CHARGING ==> NORMAL\") state_at_end_of_loop = self.state self.state_changed = (state_at_start_of_loop", "queue_size=1) self.head_controller_action = actionlib.SimpleActionClient('/head_controller/follow_joint_trajectory', FollowJointTrajectoryAction) self.head_tilt_speed = 0.2 # head", "= (tilt_endpoint-current_tilt)/(n_waypoints-1) time_interval = duration/n_waypoints for i in range(n_waypoints): point", "the Slackbot. Note that you can optionally request updates for", "Send the low-battery helper notifications when the battery # crosses", "os.path.join(self.slackbot_url, 'low_battery'), json=dict_to_send, ) res_json = res.json() if not res_json['success']:", "Slackbot /low_battery at URL %s.\" % self.slackbot_url) if \"res\" in", "at ts %s\" % (response, action_ts)) self.sent_messages_database.add_user_response(message_id, action_ts, response) num_updates", "= rospy.Duration.from_sec(duration_secs) goal = FollowJointTrajectoryGoal() goal.trajectory.header.stamp = rospy.Time.now() goal.trajectory.joint_names =", "except Exception as e: rospy.logwarn(\"Error communicating with Slackbot /get_updates at", "KuriWanderingRobot class \"\"\" self.has_loaded = False # Get the Slackbot", "not received responses yet) \"\"\" r = rospy.Rate(1.0/refresh_secs) while not", "(e.g., human responses) from the Slackbot. Note that you can", "have not received responses yet) \"\"\" r = rospy.Rate(1.0/refresh_secs) while", "be extended with additional anomaly detection and help requests, as", "[] point.effort = [] point.time_from_start = (i+1)*time_interval goal.trajectory.points.append(point) # Send", "self.state == KuriWanderingRobotState.CHARGING: self.previous_battery = msg.battery.pct else: update_previous_battery = True", "can optionally request updates for partular message_ids (e.g., those that", "rospy.logdebug(\"Open Eyes\") duration = rospy.Duration.from_sec(duration_secs) goal = FollowJointTrajectoryGoal() goal.trajectory.header.stamp =", "This node also subscribes to a dummy `where_am_i_help` topic, which", "def power_callback(self, msg): \"\"\" Callback function for Kuri's power update.", "the KuriWanderingRobot class \"\"\" self.has_loaded = False # Get the", "Saves the database every self.database_save_interval updates \"\"\" self.database_updates_since_last_save += num_updates", "traceback # Custom Libraries from sent_messages_database import SentMessagesDatabase class KuriWanderingRobotState(Enum):", "instance of the KuriWanderingRobot class \"\"\" self.has_loaded = False #", "point.positions = [self.eye_open_position] point.velocities = [] point.accelerations = [] point.effort", "rospy.get_param('~low_battery_message_include_image', True) # Initialize the dummy `where_am_i` anomaly detector self.where_am_i_help_sub", "for pct %s\" % msg.battery.pct) res = requests.post( os.path.join(self.slackbot_url, 'low_battery'),", "= 1 CHARGING = 2 class KuriWanderingRobot(object): \"\"\" The central", "the state machine self.state_machine_thread = threading.Thread( target=self.state_machine_control_loop, ) self.state_machine_thread.start() self.has_centered_head", "i*tilt_interval] point.velocities = [] point.accelerations = [] point.effort = []", "res = requests.post( os.path.join(self.slackbot_url, 'get_updates'), json={'message_ids_and_action_ts':message_ids_and_action_ts}, ) res_json = res.json()", "help request. \"\"\" with self.latest_image_lock: if self.latest_image is None: rospy.loginfo(\"Attempted", "rospy.get_param('~send_messages_database_filepath') self.sent_messages_database = SentMessagesDatabase.load( self.sent_messages_database_filepath) self.database_save_interval = 1 self.database_updates_since_last_save =", "dummy `where_am_i` anomaly detector self.where_am_i_help_sub = rospy.Subscriber( \"/where_am_i_help\", Empty, self.where_am_i_help_callback,", "self.eye_open_position = 0.0 # Initialize the camera self.img_sub = rospy.Subscriber(", "0.0 # Initialize the camera self.img_sub = rospy.Subscriber( '/upward_looking_camera/compressed', CompressedImage,", "= rospy.Rate(1.0/refresh_secs) while not rospy.is_shutdown(): if not self.has_loaded: r.sleep() try:", "self.previous_dock_present = msg.dock_present if self.state == KuriWanderingRobotState.CHARGING: self.previous_battery = msg.battery.pct", "values of (0.0, -0.3) \"\"\" pan_endpoint = 0.0 tilt_endpoint =", "= self.state self.state_changed = (state_at_start_of_loop != state_at_end_of_loop) def image_callback(self, img_msg):", "detection to detect low battery; when it detects low battery,", "rospy.logwarn(\"Error %s.\" % e) def get_slackbot_updates(self, refresh_secs=5.0): \"\"\" Once every", "robot's eyes \"\"\" rospy.logdebug(\"Close Eyes\") duration = rospy.Duration.from_sec(duration_secs) goal =", "Finally, this node has a separate thread that continually queries", "a low battery request to the Slackbot, which then sends", "CHARGING ==> NORMAL\") state_at_end_of_loop = self.state self.state_changed = (state_at_start_of_loop !=", "self.wandering_module_action.send_goal(WanderGoal(effort=-1)) self.open_eyes() with self.previous_battery_lock: if (self.previous_battery is not None and", "self.power_callback, queue_size=1) self.previous_battery_lock = threading.Lock() self.previous_battery = None self.previous_dock_present =", "%s from at ts %s\" % (response, action_ts)) self.sent_messages_database.add_user_response(message_id, action_ts,", "self.previous_battery_lock: if (self.previous_battery is None or not self.previous_dock_present or self.previous_battery", "if (self.state_changed or goal_state == GoalStatus.ABORTED or goal_state == GoalStatus.SUCCEEDED):", "FollowJointTrajectoryGoal from kuri_wandering_robot.msg import Power from wandering_behavior.msg import WanderAction, WanderGoal", "actionlib.SimpleActionClient('/eyelids_controller/follow_joint_trajectory', FollowJointTrajectoryAction) self.eye_closed_position = 0.41 self.eye_open_position = 0.0 # Initialize", "threading.Lock() # Initialize low battery anomaly detector self.battery_sub = rospy.Subscriber(", "JointTrajectoryControllerState, self.head_state_callback, queue_size=1) self.head_controller_action = actionlib.SimpleActionClient('/head_controller/follow_joint_trajectory', FollowJointTrajectoryAction) self.head_tilt_speed = 0.2", "%s\" % res_json) message_id_to_responses = res_json[\"message_id_to_responses\"] if len(message_id_to_responses) > 0:", "should include Kuri's current camera image self.low_battery_message_include_image = rospy.get_param('~low_battery_message_include_image', True)", "current camera image self.low_battery_message_include_image = rospy.get_param('~low_battery_message_include_image', True) # Initialize the", "Compute the actual endpoint and duration_secs duration_secs = max( abs(pan_endpoint-current_pan)/self.head_pan_speed,", "% res.text) rospy.logwarn(traceback.format_exc()) rospy.logwarn(\"Error %s.\" % e) update_previous_battery = False", "battery anomaly detector self.battery_sub = rospy.Subscriber( \"/mobile_base/power\", Power, self.power_callback, queue_size=1)", "is on the charger. It transitions from CHARGING to NORMAL", "goal to wandering_module_action\") # Effort -1 means \"don't stop unless", "base64.b64encode(bytearray(self.latest_image.data)).decode('ascii') res = requests.post( os.path.join(self.slackbot_url, 'where_am_i'), json={'image':image_contents, 'options':['Lounge', \"Office#252\", \"200", "wandering_module_action\") # Effort -1 means \"don't stop unless preempted\" self.wandering_module_action.send_goal(WanderGoal(effort=-1))", "battery has crossed a battery_notification_threshold, notify the Slackbot. \"\"\" if", "human responses) from the Slackbot. Note that you can optionally", "range(n_waypoints): point = JointTrajectoryPoint() point.positions = [current_pan + i*pan_interval, current_tilt", "Default Libraries import base64 import csv from enum import Enum", "state_machine_control_loop(self, rate_hz=10): \"\"\" The control loop for the state machine.", "Exception as e: rospy.logwarn(\"Error communicating with Slackbot /get_updates at URL", "# crosses the thresholds defined in self.battery_notification_thresholds for i in", "== GoalStatus.SUCCEEDED): rospy.logdebug(\"Waiting for wandering_module_action server\") self.wandering_module_action.wait_for_server() rospy.logdebug(\"Sending goal to", "from kuri_wandering_robot.msg import Power from wandering_behavior.msg import WanderAction, WanderGoal import", "the functions it calls. During NORMAL, the base moves according", "KuriWanderingRobotState(Enum): \"\"\" During NORMAL, the base moves according to wandering_behavior.", "# Initialize the head controller self.head_state_sub = rospy.Subscriber( \"/head_controller/state\", JointTrajectoryControllerState,", "= rospy.Subscriber( '/upward_looking_camera/compressed', CompressedImage, self.image_callback, queue_size=1) self.latest_image = None self.latest_image_lock", "Enum import os import requests import threading import time import", "unless preempted\" self.wandering_module_action.send_goal(WanderGoal(effort=-1)) self.open_eyes() with self.previous_battery_lock: if (self.previous_battery is not", "in place to illsutrate the sample `where_am_i` help message, and", "battery_notification_threshold, notify the Slackbot. \"\"\" if not self.has_loaded: return with", "0 # Insert reactions into the database for message_id in", "who want a robot that autonomously asks the human to", "using that would require developing a custom anomaly detection system", "\"don't stop unless preempted\" self.wandering_module_action.send_goal(WanderGoal(effort=-1)) self.open_eyes() with self.previous_battery_lock: if (self.previous_battery", "self.slackbot_url) if \"res\" in locals(): rospy.logwarn(\"Response text %s.\" % res.text)", "+= 1 self.database_updated(num_updates) except Exception as e: rospy.logwarn(\"Error communicating with", "then sends it to the helpers. This node can be", "battery message should include Kuri's current camera image self.low_battery_message_include_image =", "import GoalStatus from control_msgs.msg import JointTrajectoryControllerState, FollowJointTrajectoryAction, FollowJointTrajectoryGoal from kuri_wandering_robot.msg", "Send a low_battery_alert dict_to_send = {'battery_pct':msg.battery.pct} if self.low_battery_message_include_image: with self.latest_image_lock:", "everytime the database is updated. Saves the database every self.database_save_interval", "the Slackbot for responses to its help requests. \"\"\" def", "Initialize the eye controller self.eyelid_controller_action = actionlib.SimpleActionClient('/eyelids_controller/follow_joint_trajectory', FollowJointTrajectoryAction) self.eye_closed_position =", "\"\"\" During NORMAL, the base moves according to wandering_behavior. During", "CompressedImage, self.image_callback, queue_size=1) self.latest_image = None self.latest_image_lock = threading.Lock() #", "help message to the Slackbot. This is merely intended to", "state machine logic is handled in this function and the", "= [point] # Send the goal self.eyelid_controller_action.wait_for_server() self.eyelid_controller_action.send_goal(goal) self.eyelid_controller_action.wait_for_result(duration) def", "actually using that would require developing a custom anomaly detection", "self.previous_battery)): self.previous_battery = msg.battery.pct def where_am_i_help_callback(self, msg): \"\"\" A dummy", "self.to_charge_threshold = rospy.get_param('~to_charge_threshold', 50) # if the batter is greater", "node can be extended with additional anomaly detection and help", "low battery request to the Slackbot, which then sends it", "i*pan_interval, current_tilt + i*tilt_interval] point.velocities = [] point.accelerations = []", "rate = rospy.Rate(rate_hz) while not rospy.is_shutdown(): rate.sleep() with self.state_lock: state_at_start_of_loop", "CHARGING = 2 class KuriWanderingRobot(object): \"\"\" The central executive node.", "(tilt_endpoint-current_tilt)/(n_waypoints-1) time_interval = duration/n_waypoints for i in range(n_waypoints): point =", "for wandering_module_action server\") self.wandering_module_action.wait_for_server() rospy.logdebug(\"Sending goal to wandering_module_action\") # Effort", ") res_json = res.json() if not res_json['success']: update_previous_battery = False", "it sends a low battery request to the Slackbot, which", "crossed a battery_notification_threshold, notify the Slackbot. \"\"\" if not self.has_loaded:", "responses yet) \"\"\" r = rospy.Rate(1.0/refresh_secs) while not rospy.is_shutdown(): if", "img_msg def power_callback(self, msg): \"\"\" Callback function for Kuri's power", "Send a low_battery_alert rospy.loginfo(\"Sending where_am_i help request\") with self.latest_image_lock: image_contents", "the charger. It transitions from CHARGING to NORMAL if it's", "# Send the goal self.head_controller_action.wait_for_server() self.head_controller_action.send_goal(goal) self.head_controller_action.wait_for_result(duration) self.has_centered_head = True", "to wandering_module_action\") # Effort -1 means \"don't stop unless preempted\"", "preempted\" self.wandering_module_action.send_goal(WanderGoal(effort=-1)) self.open_eyes() with self.previous_battery_lock: if (self.previous_battery is not None", "None self.latest_image_lock = threading.Lock() # Initialize low battery anomaly detector", "this and Kuri is charging, switch back to NORMAL self.charging_done_threshold", "updates (e.g., human responses) from the Slackbot. Note that you", "self.eyelid_controller_action.wait_for_server() self.eyelid_controller_action.send_goal(goal) self.eyelid_controller_action.wait_for_result(duration) def head_state_callback(self, head_state_msg): \"\"\" Get the head's", "import Empty from trajectory_msgs.msg import JointTrajectoryPoint # Python Default Libraries", "class KuriWanderingRobotState(Enum): \"\"\" During NORMAL, the base moves according to", "self.img_sub = rospy.Subscriber( '/upward_looking_camera/compressed', CompressedImage, self.image_callback, queue_size=1) self.latest_image = None", "into the database for message_id in message_id_to_responses: for action_ts, response", "point.positions = [current_pan + i*pan_interval, current_tilt + i*tilt_interval] point.velocities =", "sending a where_am_i help message to the Slackbot. This is", "wandering_module_action server\") self.wandering_module_action.wait_for_server() rospy.logdebug(\"Sending goal to wandering_module_action\") # Effort -1", "runs a control loop that manages the robot's state: turning", "communicating with Slackbot /low_battery at URL %s.\" % self.slackbot_url) if", "a threshold or it is off the charger. \"\"\" NORMAL", "rospy.logwarn(\"Error communicating with Slackbot /low_battery at URL %s.\" % self.slackbot_url)", "os.path.join(self.slackbot_url, 'get_updates'), json={'message_ids_and_action_ts':message_ids_and_action_ts}, ) res_json = res.json() rospy.logdebug(\"Got updates from", "partular message_ids (e.g., those that have not received responses yet)", "# ROS Libraries import actionlib from actionlib_msgs.msg import GoalStatus from", "not self.has_loaded: return with self.state_lock: with self.previous_battery_lock: self.previous_dock_present = msg.dock_present", "self.previous_dock_present or self.previous_battery >= self.charging_done_threshold): self.state = KuriWanderingRobotState.NORMAL rospy.loginfo(\"State: CHARGING", "<= self.battery_notification_thresholds[0]: # Send the low-battery helper notifications when the", "(self.state_changed or goal_state == GoalStatus.ABORTED or goal_state == GoalStatus.SUCCEEDED): rospy.logdebug(\"Waiting", "charge self.to_charge_threshold = rospy.get_param('~to_charge_threshold', 50) # if the batter is", "and actually using that would require developing a custom anomaly", "merely intended to showcase some of the Slackbot's capabilities. Users", "to showcase some of the Slackbot's capabilities. Users who want", "def database_updated(self, num_updates=1): \"\"\" Called everytime the database is updated.", "Slackbot. Note that you can optionally request updates for partular", "the Slackbot. This is merely intended to showcase some of", "= msg.battery.pct def where_am_i_help_callback(self, msg): \"\"\" A dummy callback that", "CHARGING if its battery is below a threshold and it", "battery is above a threshold or it is off the", "that would require developing a custom anomaly detection system to", "in [-0.8, 0.3] self.head_pan_speed = 0.2 # head pan is", "= False except Exception as e: rospy.logwarn(\"Error communicating with Slackbot", "\"\"\" pan_endpoint = 0.0 tilt_endpoint = -0.3 n_waypoints = 10", "self.battery_notification_thresholds[i]): try: # Send a low_battery_alert dict_to_send = {'battery_pct':msg.battery.pct} if", "goal.trajectory.header.stamp = rospy.Time.now() goal.trajectory.joint_names = [\"eyelids_joint\"] point = JointTrajectoryPoint() point.positions", "% self.slackbot_url) if \"res\" in locals(): rospy.logwarn(\"Response text %s.\" %", "e: rospy.logwarn(\"Error communicating with Slackbot /low_battery at URL %s.\" %", "None or (self.previous_battery > self.battery_notification_thresholds[i]) and msg.battery.pct <= self.battery_notification_thresholds[i]): try:", "for Kuri's power update. It Kuri's battery has crossed a", "battery, it sends a low battery request to the Slackbot,", "self.battery_notification_thresholds = rospy.get_param('~battery_notification_thresholds', [40, 20, 10, 5, 4, 3, 2,", "%s\" % (response, action_ts)) self.sent_messages_database.add_user_response(message_id, action_ts, response) num_updates += 1", "not res_json['success']: update_previous_battery = False except Exception as e: rospy.logwarn(\"Error", "if (self.state == KuriWanderingRobotState.NORMAL): goal_state = self.wandering_module_action.get_state() if (self.state_changed or", "Note that that is only in place to illsutrate the", "reaction %s from at ts %s\" % (response, action_ts)) self.sent_messages_database.add_user_response(message_id,", "= image_contents rospy.loginfo(\"Sending battery request for pct %s\" % msg.battery.pct)", "self.latest_image_lock = threading.Lock() # Initialize low battery anomaly detector self.battery_sub", "= 1 self.database_updates_since_last_save = 0 # Initialize the head controller", "90) # Whether the low battery message should include Kuri's", "where_am_i help request\") with self.latest_image_lock: image_contents = base64.b64encode(bytearray(self.latest_image.data)).decode('ascii') res =", "requests.post( os.path.join(self.slackbot_url, 'get_updates'), json={'message_ids_and_action_ts':message_ids_and_action_ts}, ) res_json = res.json() rospy.logdebug(\"Got updates", "low_battery_alert rospy.loginfo(\"Sending where_am_i help request\") with self.latest_image_lock: image_contents = base64.b64encode(bytearray(self.latest_image.data)).decode('ascii')", ") res_json = res.json() rospy.logdebug(\"Got updates from Slackbot %s\" %", "self.state = KuriWanderingRobotState.CHARGING self.wandering_module_action.cancel_all_goals() rospy.loginfo(\"State: NORMAL ==> CHARGING\") elif self.state", "with self.previous_battery_lock: self.previous_dock_present = msg.dock_present if self.state == KuriWanderingRobotState.CHARGING: self.previous_battery", "std_msgs.msg import Empty from trajectory_msgs.msg import JointTrajectoryPoint # Python Default", "update_previous_battery = False break if (update_previous_battery and (self.previous_battery is None", "at URL %s.\" % self.slackbot_url) if \"res\" in locals(): rospy.logwarn(\"Response", "NORMAL if it's battery is above a threshold or it", "for the state machine. All of the state machine logic", "self.eyelid_controller_action.wait_for_result(duration) def close_eyes(self, duration_secs=0.2): \"\"\" Close the robot's eyes \"\"\"", "rospy.logwarn(\"Error %s.\" % e) r.sleep() if __name__ == \"__main__\": rospy.init_node(\"kuri_wandering_robot\")", "low battery, it sends a low battery request to the", "self.sent_messages_database = SentMessagesDatabase.load( self.sent_messages_database_filepath) self.database_save_interval = 1 self.database_updates_since_last_save = 0", "%s.\" % self.slackbot_url) if \"res\" in locals(): rospy.logwarn(\"Response text %s.\"", "[self.eye_open_position] point.velocities = [] point.accelerations = [] point.effort = []", "battery; when it detects low battery, it sends a low", "updates thread self.slackbot_responses_thread = threading.Thread( target=self.get_slackbot_updates, ) self.slackbot_responses_thread.start() # Initialize", "Slackbot. \"\"\" if not self.has_loaded: return with self.state_lock: with self.previous_battery_lock:", "back to NORMAL when the robot is sufficiently charged. This", "sends it to the helpers. This node can be extended", "Libraries import actionlib from actionlib_msgs.msg import GoalStatus from control_msgs.msg import", "the charger. \"\"\" NORMAL = 1 CHARGING = 2 class", "and msg.battery.pct <= self.battery_notification_thresholds[i]): try: # Send a low_battery_alert dict_to_send", "%s.\" % res.text) rospy.logwarn(traceback.format_exc()) rospy.logwarn(\"Error %s.\" % e) def get_slackbot_updates(self,", "are closed and it is charging. The robot transitions from", "than this and Kuri is charging, switch back to NORMAL", "of the state machine logic is handled in this function", "self.has_loaded = True def database_updated(self, num_updates=1): \"\"\" Called everytime the", "def center_head(self, current_pan, current_tilt): \"\"\" Center Kuri's head. This involves", "self.previous_battery_lock: self.previous_dock_present = msg.dock_present if self.state == KuriWanderingRobotState.CHARGING: self.previous_battery =", "anomaly detector self.where_am_i_help_sub = rospy.Subscriber( \"/where_am_i_help\", Empty, self.where_am_i_help_callback, queue_size=1) #", "responses) from the Slackbot. Note that you can optionally request", "triggers sending a where_am_i help message to the Slackbot. This", "message, and actually using that would require developing a custom", "import JointTrajectoryControllerState, FollowJointTrajectoryAction, FollowJointTrajectoryGoal from kuri_wandering_robot.msg import Power from wandering_behavior.msg", "= [] point.time_from_start = duration goal.trajectory.points = [point] # Send", "JointTrajectoryPoint() point.positions = [current_pan + i*pan_interval, current_tilt + i*tilt_interval] point.velocities", "self.battery_notification_thresholds for i in range(len(self.battery_notification_thresholds)): if (self.previous_battery is None or", "database is updated. Saves the database every self.database_save_interval updates \"\"\"", "and (self.previous_battery is None or msg.battery.pct < self.previous_battery)): self.previous_battery =", "image_contents rospy.loginfo(\"Sending battery request for pct %s\" % msg.battery.pct) res", "goal = FollowJointTrajectoryGoal() goal.trajectory.header.stamp = rospy.Time.now() goal.trajectory.joint_names = [\"head_1_joint\", \"head_2_joint\"]", "triggering this help request. \"\"\" with self.latest_image_lock: if self.latest_image is", "low_battery_alert dict_to_send = {'battery_pct':msg.battery.pct} if self.low_battery_message_include_image: with self.latest_image_lock: if self.latest_image", "is in [-0.8, 0.3] self.head_pan_speed = 0.2 # head pan", "# Send a low_battery_alert dict_to_send = {'battery_pct':msg.battery.pct} if self.low_battery_message_include_image: with", "False self.has_loaded = True def database_updated(self, num_updates=1): \"\"\" Called everytime", "not self.has_loaded: return if not self.has_centered_head: self.center_head(head_state_msg.actual.positions[0], head_state_msg.actual.positions[1]) def center_head(self,", "with self.latest_image_lock: self.latest_image = img_msg def power_callback(self, msg): \"\"\" Callback", "try: message_ids_and_action_ts = self.sent_messages_database.get_message_ids_and_latest_action_ts() # Request responses for those message_ids", "point.accelerations = [] point.effort = [] point.time_from_start = duration goal.trajectory.points", "# Initialize low battery anomaly detector self.battery_sub = rospy.Subscriber( \"/mobile_base/power\",", "duration_secs duration_secs = max( abs(pan_endpoint-current_pan)/self.head_pan_speed, abs(tilt_endpoint-current_tilt)/self.head_tilt_speed) duration = rospy.Duration.from_sec(duration_secs) #", "server\") self.wandering_module_action.wait_for_server() rospy.logdebug(\"Sending goal to wandering_module_action\") # Effort -1 means", "res = requests.post( os.path.join(self.slackbot_url, 'low_battery'), json=dict_to_send, ) res_json = res.json()", "is below a threshold and it is on the charger.", "action_ts, response) num_updates += 1 self.database_updated(num_updates) except Exception as e:", "= self.state if (self.state == KuriWanderingRobotState.NORMAL): goal_state = self.wandering_module_action.get_state() if", "self.head_state_callback, queue_size=1) self.head_controller_action = actionlib.SimpleActionClient('/head_controller/follow_joint_trajectory', FollowJointTrajectoryAction) self.head_tilt_speed = 0.2 #", "self.previous_battery = None self.previous_dock_present = None self.battery_notification_thresholds = rospy.get_param('~battery_notification_thresholds', [40,", "self.database_updated() except Exception as e: rospy.logwarn(\"Error communicating with Slackbot /where_am_i", "rospy.logdebug(\"Sending goal to wandering_module_action\") # Effort -1 means \"don't stop", "than this and Kuri is docked, charge self.to_charge_threshold = rospy.get_param('~to_charge_threshold',", "their own anomaly detection system for triggering this help request.", "= duration goal.trajectory.points = [point] # Send the goal self.eyelid_controller_action.wait_for_server()", "which sends helpers the sample `where_am_i` help message. Note that", "current_tilt): \"\"\" Center Kuri's head. This involves moving from the", "self.eye_closed_position = 0.41 self.eye_open_position = 0.0 # Initialize the camera", "received responses yet) \"\"\" r = rospy.Rate(1.0/refresh_secs) while not rospy.is_shutdown():", "\"\"\" A dummy callback that triggers sending a where_am_i help", "self.state if (self.state == KuriWanderingRobotState.NORMAL): goal_state = self.wandering_module_action.get_state() if (self.state_changed", "\"\"\" with self.latest_image_lock: if self.latest_image is None: rospy.loginfo(\"Attempted to send", "no image.\") return try: # Send a low_battery_alert rospy.loginfo(\"Sending where_am_i", "import WanderAction, WanderGoal import rospy from sensor_msgs.msg import CompressedImage from", "e: rospy.logwarn(\"Error communicating with Slackbot /get_updates at URL %s.\" %", "rospy.is_shutdown(): if not self.has_loaded: r.sleep() try: message_ids_and_action_ts = self.sent_messages_database.get_message_ids_and_latest_action_ts() #", "4, 3, 2, 1]) # if the battery is less", "subscribes to a dummy `where_am_i_help` topic, which sends helpers the", "\"\"\" Store the latest image. \"\"\" if not self.has_loaded: return", "\"\"\" The central executive node. This node runs a control", "battery # crosses the thresholds defined in self.battery_notification_thresholds for i", "% msg.battery.pct) res = requests.post( os.path.join(self.slackbot_url, 'low_battery'), json=dict_to_send, ) res_json", "controller self.eyelid_controller_action = actionlib.SimpleActionClient('/eyelids_controller/follow_joint_trajectory', FollowJointTrajectoryAction) self.eye_closed_position = 0.41 self.eye_open_position =", "it to the helpers. This node can be extended with", "robot transitions from NORMAL to CHARGING if its battery is", "Initialize the head controller self.head_state_sub = rospy.Subscriber( \"/head_controller/state\", JointTrajectoryControllerState, self.head_state_callback,", "closed and it is charging. The robot transitions from NORMAL", "def image_callback(self, img_msg): \"\"\" Store the latest image. \"\"\" if", "or goal_state == GoalStatus.ABORTED or goal_state == GoalStatus.SUCCEEDED): rospy.logdebug(\"Waiting for", "self.latest_image = None self.latest_image_lock = threading.Lock() # Initialize low battery", "from the Slackbot. Note that you can optionally request updates", "batter is greater than this and Kuri is charging, switch", "eyes \"\"\" rospy.logdebug(\"Open Eyes\") duration = rospy.Duration.from_sec(duration_secs) goal = FollowJointTrajectoryGoal()", "autonomously asks the human to tell it where it is", "ts %s\" % (response, action_ts)) self.sent_messages_database.add_user_response(message_id, action_ts, response) num_updates +=", "implement their own anomaly detection system for triggering this help", "% e) r.sleep() if __name__ == \"__main__\": rospy.init_node(\"kuri_wandering_robot\") kuri_wandering_robot =", "False # Get the Slackbot URL self.slackbot_url = rospy.get_param('~slackbot_url') #", "where_am_i_help_callback(self, msg): \"\"\" A dummy callback that triggers sending a", "self.state_changed = True self.state = KuriWanderingRobotState.NORMAL # Initialize the wandering", "is should implement their own anomaly detection system for triggering", "img_msg): \"\"\" Store the latest image. \"\"\" if not self.has_loaded:", "= [] point.accelerations = [] point.effort = [] point.time_from_start =", "the head's current position \"\"\" if not self.has_loaded: return if", "(update_previous_battery and (self.previous_battery is None or msg.battery.pct < self.previous_battery)): self.previous_battery", "back to NORMAL self.charging_done_threshold = rospy.get_param('~charging_done_threshold', 90) # Whether the", "requests. \"\"\" def __init__(self): \"\"\" Initialize an instance of the", "Send the goal self.head_controller_action.wait_for_server() self.head_controller_action.send_goal(goal) self.head_controller_action.wait_for_result(duration) self.has_centered_head = True def", "ROS Libraries import actionlib from actionlib_msgs.msg import GoalStatus from control_msgs.msg", "camera self.img_sub = rospy.Subscriber( '/upward_looking_camera/compressed', CompressedImage, self.image_callback, queue_size=1) self.latest_image =", "involves moving from the current pan and tilt to the", "tilt to the centered values of (0.0, -0.3) \"\"\" pan_endpoint", "machine. All of the state machine logic is handled in", "= KuriWanderingRobotState.NORMAL rospy.loginfo(\"State: CHARGING ==> NORMAL\") state_at_end_of_loop = self.state self.state_changed", "1 self.database_updates_since_last_save = 0 # Initialize the head controller self.head_state_sub", "with self.state_lock: with self.previous_battery_lock: self.previous_dock_present = msg.dock_present if self.state ==", "= rospy.Subscriber( \"/where_am_i_help\", Empty, self.where_am_i_help_callback, queue_size=1) # Initialize storing images", "Kuri's battery has crossed a battery_notification_threshold, notify the Slackbot. \"\"\"", "target=self.state_machine_control_loop, ) self.state_machine_thread.start() self.has_centered_head = False self.has_loaded = True def", "state_at_end_of_loop = self.state self.state_changed = (state_at_start_of_loop != state_at_end_of_loop) def image_callback(self,", "with self.state_lock: state_at_start_of_loop = self.state if (self.state == KuriWanderingRobotState.NORMAL): goal_state", "a battery_notification_threshold, notify the Slackbot. \"\"\" if not self.has_loaded: return", "would require developing a custom anomaly detection system to trigger", "\"\"\" def __init__(self): \"\"\" Initialize an instance of the KuriWanderingRobot", "GoalStatus.ABORTED or goal_state == GoalStatus.SUCCEEDED): rospy.logdebug(\"Waiting for wandering_module_action server\") self.wandering_module_action.wait_for_server()", "camera image self.low_battery_message_include_image = rospy.get_param('~low_battery_message_include_image', True) # Initialize the dummy", "state machine self.state_machine_thread = threading.Thread( target=self.state_machine_control_loop, ) self.state_machine_thread.start() self.has_centered_head =", "res_json[\"message_id_to_responses\"] if len(message_id_to_responses) > 0: num_updates = 0 # Insert", "also runs anomaly detection to detect low battery; when it", "self.latest_image = img_msg def power_callback(self, msg): \"\"\" Callback function for", "want a robot that autonomously asks the human to tell", "[] point.effort = [] point.time_from_start = duration goal.trajectory.points = [point]", "json={'image':image_contents, 'options':['Lounge', \"Office#252\", \"200 Corridoor\", \"Atrium\"]}, ) res_json = res.json()", "trajectory_msgs.msg import JointTrajectoryPoint # Python Default Libraries import base64 import", "/low_battery at URL %s.\" % self.slackbot_url) if \"res\" in locals():", "pct %s\" % msg.battery.pct) res = requests.post( os.path.join(self.slackbot_url, 'low_battery'), json=dict_to_send,", "point.velocities = [] point.accelerations = [] point.effort = [] point.time_from_start", "5, 4, 3, 2, 1]) # if the battery is", "GoalStatus.SUCCEEDED): rospy.logdebug(\"Waiting for wandering_module_action server\") self.wandering_module_action.wait_for_server() rospy.logdebug(\"Sending goal to wandering_module_action\")", "point = JointTrajectoryPoint() point.positions = [current_pan + i*pan_interval, current_tilt +", "the battery is less than this and Kuri is docked,", "center_head(self, current_pan, current_tilt): \"\"\" Center Kuri's head. This involves moving", "if self.low_battery_message_include_image: with self.latest_image_lock: if self.latest_image is not None: image_contents", "= rospy.get_param('~battery_notification_thresholds', [40, 20, 10, 5, 4, 3, 2, 1])", "WanderAction) # Initialize the eye controller self.eyelid_controller_action = actionlib.SimpleActionClient('/eyelids_controller/follow_joint_trajectory', FollowJointTrajectoryAction)", "= res_json[\"message_id_to_responses\"] if len(message_id_to_responses) > 0: num_updates = 0 #", "sensor_msgs.msg import CompressedImage from std_msgs.msg import Empty from trajectory_msgs.msg import", "point = JointTrajectoryPoint() point.positions = [self.eye_open_position] point.velocities = [] point.accelerations", "= rospy.Duration.from_sec(duration_secs) # Create the goal goal = FollowJointTrajectoryGoal() goal.trajectory.header.stamp", "detection system for triggering this help request. \"\"\" with self.latest_image_lock:", "robot that autonomously asks the human to tell it where", "IDs self.sent_messages_database_filepath = rospy.get_param('~send_messages_database_filepath') self.sent_messages_database = SentMessagesDatabase.load( self.sent_messages_database_filepath) self.database_save_interval =", "and self.previous_dock_present): self.close_eyes() self.state = KuriWanderingRobotState.CHARGING self.wandering_module_action.cancel_all_goals() rospy.loginfo(\"State: NORMAL ==>", "self.sent_messages_database_filepath = rospy.get_param('~send_messages_database_filepath') self.sent_messages_database = SentMessagesDatabase.load( self.sent_messages_database_filepath) self.database_save_interval = 1", "import csv from enum import Enum import os import requests", "= -0.3 n_waypoints = 10 # Compute the actual endpoint", "= base64.b64encode(bytearray(self.latest_image.data)).decode('ascii') res = requests.post( os.path.join(self.slackbot_url, 'where_am_i'), json={'image':image_contents, 'options':['Lounge', \"Office#252\",", "updated. Saves the database every self.database_save_interval updates \"\"\" self.database_updates_since_last_save +=", "it is off the charger. \"\"\" NORMAL = 1 CHARGING", "it calls. During NORMAL, the base moves according to wandering_behavior.", "it detects low battery, it sends a low battery request", "if (self.previous_battery is None or not self.previous_dock_present or self.previous_battery >=", "is None or (self.previous_battery > self.battery_notification_thresholds[i]) and msg.battery.pct <= self.battery_notification_thresholds[i]):", "FollowJointTrajectoryAction) self.head_tilt_speed = 0.2 # head tilt is in [-0.8,", "%s\" % msg.battery.pct) res = requests.post( os.path.join(self.slackbot_url, 'low_battery'), json=dict_to_send, )", "refresh_secs seconds, request updates (e.g., human responses) from the Slackbot.", "Libraries import base64 import csv from enum import Enum import", "% res_json) message_id_to_responses = res_json[\"message_id_to_responses\"] if len(message_id_to_responses) > 0: num_updates", "function for Kuri's power update. It Kuri's battery has crossed", "as e: rospy.logwarn(\"Error communicating with Slackbot /get_updates at URL %s.\"", "custom anomaly detection system to trigger the robot asking for", "have no image.\") return try: # Send a low_battery_alert rospy.loginfo(\"Sending", "for i in range(n_waypoints): point = JointTrajectoryPoint() point.positions = [current_pan", "rospy.loginfo(\"Got reaction %s from at ts %s\" % (response, action_ts))", "num_updates=1): \"\"\" Called everytime the database is updated. Saves the", "==> NORMAL\") state_at_end_of_loop = self.state self.state_changed = (state_at_start_of_loop != state_at_end_of_loop)", "image_contents = base64.b64encode(bytearray(self.latest_image.data)).decode('ascii') res = requests.post( os.path.join(self.slackbot_url, 'where_am_i'), json={'image':image_contents, 'options':['Lounge',", "on and monitoring progress of the wandering module in NORMAL,", "# head tilt is in [-0.8, 0.3] self.head_pan_speed = 0.2", "duration_secs=0.2): \"\"\" Close the robot's eyes \"\"\" rospy.logdebug(\"Close Eyes\") duration", "rospy.Time.now() goal.trajectory.joint_names = [\"eyelids_joint\"] point = JointTrajectoryPoint() point.positions = [self.eye_closed_position]", "if msg.battery.pct <= self.battery_notification_thresholds[0]: # Send the low-battery helper notifications", "0.75] # Initialize the Slackbot updates thread self.slackbot_responses_thread = threading.Thread(", "battery is less than this and Kuri is docked, charge", "the head controller self.head_state_sub = rospy.Subscriber( \"/head_controller/state\", JointTrajectoryControllerState, self.head_state_callback, queue_size=1)", "\"\"\" Get the head's current position \"\"\" if not self.has_loaded:", "= self.sent_messages_database.get_message_ids_and_latest_action_ts() # Request responses for those message_ids res =", "refresh_secs=5.0): \"\"\" Once every refresh_secs seconds, request updates (e.g., human", "base moves according to wandering_behavior. During CHARGING, the robot's eyes", "# Initialize the state machine self.state_machine_thread = threading.Thread( target=self.state_machine_control_loop, )", "len(message_id_to_responses) > 0: num_updates = 0 # Insert reactions into", "threshold and it is on the charger. It transitions from", "robot asking for that type of help. Finally, this node", "= None self.latest_image_lock = threading.Lock() # Initialize low battery anomaly", "the database for message_id in message_id_to_responses: for action_ts, response in", "rospy from sensor_msgs.msg import CompressedImage from std_msgs.msg import Empty from", "the camera self.img_sub = rospy.Subscriber( '/upward_looking_camera/compressed', CompressedImage, self.image_callback, queue_size=1) self.latest_image", "NORMAL to CHARGING if its battery is below a threshold", "0.41 self.eye_open_position = 0.0 # Initialize the camera self.img_sub =", "crosses the thresholds defined in self.battery_notification_thresholds for i in range(len(self.battery_notification_thresholds)):", "= rospy.get_param('~low_battery_message_include_image', True) # Initialize the dummy `where_am_i` anomaly detector", "(state_at_start_of_loop != state_at_end_of_loop) def image_callback(self, img_msg): \"\"\" Store the latest", "actionlib.SimpleActionClient('/head_controller/follow_joint_trajectory', FollowJointTrajectoryAction) self.head_tilt_speed = 0.2 # head tilt is in", "n_waypoints = 10 # Compute the actual endpoint and duration_secs", "Request responses for those message_ids res = requests.post( os.path.join(self.slackbot_url, 'get_updates'),", "= True def database_updated(self, num_updates=1): \"\"\" Called everytime the database", "goal_state == GoalStatus.SUCCEEDED): rospy.logdebug(\"Waiting for wandering_module_action server\") self.wandering_module_action.wait_for_server() rospy.logdebug(\"Sending goal", "%s.\" % e) update_previous_battery = False break if (update_previous_battery and", "for that type of help. Finally, this node has a", "enum import Enum import os import requests import threading import", "illsutrate the sample `where_am_i` help message, and actually using that", "self.wandering_module_action.get_state() if (self.state_changed or goal_state == GoalStatus.ABORTED or goal_state ==", "# Initialize the camera self.img_sub = rospy.Subscriber( '/upward_looking_camera/compressed', CompressedImage, self.image_callback,", "duration_secs=0.2): \"\"\" Open the robot's eyes \"\"\" rospy.logdebug(\"Open Eyes\") duration", "if self.database_updates_since_last_save % self.database_save_interval == 0: self.sent_messages_database.save(self.sent_messages_database_filepath) rospy.logdebug(\"Saved sent_messages_database!\") def", "= rospy.Time.now() goal.trajectory.joint_names = [\"head_1_joint\", \"head_2_joint\"] goal.trajectory.points = [] pan_interval", "= [current_pan + i*pan_interval, current_tilt + i*tilt_interval] point.velocities = []", "`where_am_i_help` topic, which sends helpers the sample `where_am_i` help message.", "Initialize the dummy `where_am_i` anomaly detector self.where_am_i_help_sub = rospy.Subscriber( \"/where_am_i_help\",", "callback that triggers sending a where_am_i help message to the", "\"\"\" if not self.has_loaded: return if not self.has_centered_head: self.center_head(head_state_msg.actual.positions[0], head_state_msg.actual.positions[1])", "Once every refresh_secs seconds, request updates (e.g., human responses) from", "is handled in this function and the functions it calls.", "Slackbot updates thread self.slackbot_responses_thread = threading.Thread( target=self.get_slackbot_updates, ) self.slackbot_responses_thread.start() #", "self.database_save_interval == 0: self.sent_messages_database.save(self.sent_messages_database_filepath) rospy.logdebug(\"Saved sent_messages_database!\") def open_eyes(self, duration_secs=0.2): \"\"\"", "os import requests import threading import time import traceback #", "__init__(self): \"\"\" Initialize an instance of the KuriWanderingRobot class \"\"\"", "self.battery_notification_thresholds[0]: # Send the low-battery helper notifications when the battery", "self.database_updates_since_last_save = 0 # Initialize the head controller self.head_state_sub =", "off wandering in CHARGING, and switching back to NORMAL when", "= base64.b64encode(bytearray(self.latest_image.data)).decode('ascii') dict_to_send['image'] = image_contents rospy.loginfo(\"Sending battery request for pct", "a custom anomaly detection system to trigger the robot asking", "node also runs anomaly detection to detect low battery; when", "system for triggering this help request. \"\"\" with self.latest_image_lock: if", "0 # Initialize the head controller self.head_state_sub = rospy.Subscriber( \"/head_controller/state\",", "separate thread that continually queries the Slackbot for responses to", "Power, self.power_callback, queue_size=1) self.previous_battery_lock = threading.Lock() self.previous_battery = None self.previous_dock_present", "[40, 20, 10, 5, 4, 3, 2, 1]) # if", "(self.previous_battery is None or not self.previous_dock_present or self.previous_battery >= self.charging_done_threshold):", "def __init__(self): \"\"\" Initialize an instance of the KuriWanderingRobot class", "== 0: self.sent_messages_database.save(self.sent_messages_database_filepath) rospy.logdebug(\"Saved sent_messages_database!\") def open_eyes(self, duration_secs=0.2): \"\"\" Open", "if (self.previous_battery is None or (self.previous_battery > self.battery_notification_thresholds[i]) and msg.battery.pct", "trigger the robot asking for that type of help. Finally,", "the robot is sufficiently charged. This node also runs anomaly", "Store the latest image. \"\"\" if not self.has_loaded: return with", "GoalStatus from control_msgs.msg import JointTrajectoryControllerState, FollowJointTrajectoryAction, FollowJointTrajectoryGoal from kuri_wandering_robot.msg import", "True def state_machine_control_loop(self, rate_hz=10): \"\"\" The control loop for the", "of the Slackbot's capabilities. Users who want a robot that", "= {'battery_pct':msg.battery.pct} if self.low_battery_message_include_image: with self.latest_image_lock: if self.latest_image is not", "Slackbot, which then sends it to the helpers. This node", "= img_msg def power_callback(self, msg): \"\"\" Callback function for Kuri's", "JointTrajectoryPoint() point.positions = [self.eye_open_position] point.velocities = [] point.accelerations = []", "to trigger the robot asking for that type of help.", "# Initialize the dummy `where_am_i` anomaly detector self.where_am_i_help_sub = rospy.Subscriber(", "yet) \"\"\" r = rospy.Rate(1.0/refresh_secs) while not rospy.is_shutdown(): if not", "threading import time import traceback # Custom Libraries from sent_messages_database", "locals(): rospy.logwarn(\"Response text %s.\" % res.text) rospy.logwarn(traceback.format_exc()) rospy.logwarn(\"Error %s.\" %", "monitoring progress of the wandering module in NORMAL, turning off", "= rospy.get_param('~send_messages_database_filepath') self.sent_messages_database = SentMessagesDatabase.load( self.sent_messages_database_filepath) self.database_save_interval = 1 self.database_updates_since_last_save", "+ i*pan_interval, current_tilt + i*tilt_interval] point.velocities = [] point.accelerations =", "json=dict_to_send, ) res_json = res.json() if not res_json['success']: update_previous_battery =", "for partular message_ids (e.g., those that have not received responses", "> 0: num_updates = 0 # Insert reactions into the", "loop that manages the robot's state: turning on and monitoring", "abs(pan_endpoint-current_pan)/self.head_pan_speed, abs(tilt_endpoint-current_tilt)/self.head_tilt_speed) duration = rospy.Duration.from_sec(duration_secs) # Create the goal goal", "message_ids_and_action_ts = self.sent_messages_database.get_message_ids_and_latest_action_ts() # Request responses for those message_ids res", "switch back to NORMAL self.charging_done_threshold = rospy.get_param('~charging_done_threshold', 90) # Whether", "# Send the goal self.eyelid_controller_action.wait_for_server() self.eyelid_controller_action.send_goal(goal) self.eyelid_controller_action.wait_for_result(duration) def head_state_callback(self, head_state_msg):", "that manages the robot's state: turning on and monitoring progress", "% res.text) rospy.logwarn(traceback.format_exc()) rospy.logwarn(\"Error %s.\" % e) def get_slackbot_updates(self, refresh_secs=5.0):", "FollowJointTrajectoryGoal() goal.trajectory.header.stamp = rospy.Time.now() goal.trajectory.joint_names = [\"eyelids_joint\"] point = JointTrajectoryPoint()", "communicating with Slackbot /where_am_i at URL %s.\" % self.slackbot_url) if", "transitions from NORMAL to CHARGING if its battery is below", "KuriWanderingRobotState.CHARGING self.wandering_module_action.cancel_all_goals() rospy.loginfo(\"State: NORMAL ==> CHARGING\") elif self.state == KuriWanderingRobotState.CHARGING:", "but have no image.\") return try: # Send a low_battery_alert", "self.has_centered_head = True def state_machine_control_loop(self, rate_hz=10): \"\"\" The control loop", "# Custom Libraries from sent_messages_database import SentMessagesDatabase class KuriWanderingRobotState(Enum): \"\"\"", "head_state_callback(self, head_state_msg): \"\"\" Get the head's current position \"\"\" if", "self.has_centered_head = False self.has_loaded = True def database_updated(self, num_updates=1): \"\"\"", "Slackbot URL self.slackbot_url = rospy.get_param('~slackbot_url') # Initialize the state. self.state_lock", "[] pan_interval = (pan_endpoint-current_pan)/(n_waypoints-1) tilt_interval = (tilt_endpoint-current_tilt)/(n_waypoints-1) time_interval = duration/n_waypoints", "switching back to NORMAL when the robot is sufficiently charged.", "function and the functions it calls. During NORMAL, the base", "base64 import csv from enum import Enum import os import", "images and message IDs self.sent_messages_database_filepath = rospy.get_param('~send_messages_database_filepath') self.sent_messages_database = SentMessagesDatabase.load(", "the state machine. All of the state machine logic is", "image. \"\"\" if not self.has_loaded: return with self.latest_image_lock: self.latest_image =", "\"/head_controller/state\", JointTrajectoryControllerState, self.head_state_callback, queue_size=1) self.head_controller_action = actionlib.SimpleActionClient('/head_controller/follow_joint_trajectory', FollowJointTrajectoryAction) self.head_tilt_speed =", "res.text) rospy.logwarn(traceback.format_exc()) rospy.logwarn(\"Error %s.\" % e) def get_slackbot_updates(self, refresh_secs=5.0): \"\"\"", "class KuriWanderingRobot(object): \"\"\" The central executive node. This node runs", "the charger. \"\"\" rate = rospy.Rate(rate_hz) while not rospy.is_shutdown(): rate.sleep()", "and it is on the charger. It transitions from CHARGING", "= False # Get the Slackbot URL self.slackbot_url = rospy.get_param('~slackbot_url')", "time_interval = duration/n_waypoints for i in range(n_waypoints): point = JointTrajectoryPoint()", "self.sent_messages_database_filepath) self.database_save_interval = 1 self.database_updates_since_last_save = 0 # Initialize the", "from Slackbot %s\" % res_json) message_id_to_responses = res_json[\"message_id_to_responses\"] if len(message_id_to_responses)", "\"Atrium\"]}, ) res_json = res.json() message_id = res_json['message_id'] self.sent_messages_database.add_respondable_message(message_id) self.database_updated()", "image_callback(self, img_msg): \"\"\" Store the latest image. \"\"\" if not", "self.head_controller_action = actionlib.SimpleActionClient('/head_controller/follow_joint_trajectory', FollowJointTrajectoryAction) self.head_tilt_speed = 0.2 # head tilt", "the robot's eyes are closed and it is charging. The", "for responses to its help requests. \"\"\" def __init__(self): \"\"\"", "responses for those message_ids res = requests.post( os.path.join(self.slackbot_url, 'get_updates'), json={'message_ids_and_action_ts':message_ids_and_action_ts},", "\"\"\" Once every refresh_secs seconds, request updates (e.g., human responses)", "Slackbot. This is merely intended to showcase some of the", "\"\"\" rospy.logdebug(\"Open Eyes\") duration = rospy.Duration.from_sec(duration_secs) goal = FollowJointTrajectoryGoal() goal.trajectory.header.stamp", "\"\"\" if not self.has_loaded: return with self.state_lock: with self.previous_battery_lock: self.previous_dock_present", "a threshold or it is off the charger. \"\"\" rate", "(self.state == KuriWanderingRobotState.NORMAL): goal_state = self.wandering_module_action.get_state() if (self.state_changed or goal_state", "from sensor_msgs.msg import CompressedImage from std_msgs.msg import Empty from trajectory_msgs.msg", "self.latest_image is not None: image_contents = base64.b64encode(bytearray(self.latest_image.data)).decode('ascii') dict_to_send['image'] = image_contents", "CHARGING\") elif self.state == KuriWanderingRobotState.CHARGING: with self.previous_battery_lock: if (self.previous_battery is", "self.state_lock = threading.Lock() self.state_changed = True self.state = KuriWanderingRobotState.NORMAL #", "[\"eyelids_joint\"] point = JointTrajectoryPoint() point.positions = [self.eye_closed_position] point.velocities = []", "head_state_msg.actual.positions[1]) def center_head(self, current_pan, current_tilt): \"\"\" Center Kuri's head. This", "transitions from CHARGING to NORMAL if it's battery is above", "self.latest_image_lock: if self.latest_image is not None: image_contents = base64.b64encode(bytearray(self.latest_image.data)).decode('ascii') dict_to_send['image']", "self.to_charge_threshold and self.previous_dock_present): self.close_eyes() self.state = KuriWanderingRobotState.CHARGING self.wandering_module_action.cancel_all_goals() rospy.loginfo(\"State: NORMAL", "self.charging_done_threshold = rospy.get_param('~charging_done_threshold', 90) # Whether the low battery message", "capabilities. Users who want a robot that autonomously asks the", "when the robot is sufficiently charged. This node also runs", "threshold or it is off the charger. \"\"\" rate =", "import Power from wandering_behavior.msg import WanderAction, WanderGoal import rospy from", "it is charging. The robot transitions from NORMAL to CHARGING", "goal.trajectory.joint_names = [\"eyelids_joint\"] point = JointTrajectoryPoint() point.positions = [self.eye_open_position] point.velocities", "# Insert reactions into the database for message_id in message_id_to_responses:", "\"\"\" Called everytime the database is updated. Saves the database", "KuriWanderingRobot(object): \"\"\" The central executive node. This node runs a", "False except Exception as e: rospy.logwarn(\"Error communicating with Slackbot /low_battery", "if \"res\" in locals(): rospy.logwarn(\"Response text %s.\" % res.text) rospy.logwarn(traceback.format_exc())" ]
[ "normalize_features='Auto', caching='Auto', unbalanced_sets=False, weight_of_positive_examples=1.0, sigmoid=0.5, evaluation_metric='Logloss', maximum_bin_count_per_feature=255, verbose=False, silent=True, number_of_threads=None,", "number_of_iterations: Number of iterations. (inputs). :param training_data: The data to", "caching is not None: inputs['Caching'] = try_set( obj=caching, none_acceptable=True, is_of_type=str,", "use_zero_as_missing_value is not None: inputs['UseZeroAsMissingValue'] = try_set( obj=use_zero_as_missing_value, none_acceptable=True, is_of_type=bool)", "minimum_example_count_per_group is not None: inputs['MinimumExampleCountPerGroup'] = try_set( obj=minimum_example_count_per_group, none_acceptable=True, is_of_type=numbers.Real,", "rate for trees, used to prevent over-fitting. Range: (0,1]. (inputs).", "Sets the random seed for LightGBM to use. (inputs). :param", "if number_of_iterations is not None: inputs['NumberOfIterations'] = try_set( obj=number_of_iterations, none_acceptable=True,", "none_acceptable=True, is_of_type=dict) if label_column_name is not None: inputs['LabelColumnName'] = try_set(", "value or not. (inputs). :param use_zero_as_missing_value: Enable usage of zero", "obj=label_column_name, none_acceptable=True, is_of_type=str, is_column=True) if example_weight_column_name is not None: inputs['ExampleWeightColumnName']", "inputs['UseZeroAsMissingValue'] = try_set( obj=use_zero_as_missing_value, none_acceptable=True, is_of_type=bool) if minimum_example_count_per_group is not", "= try_set( obj=l2_categorical_regularization, none_acceptable=True, is_of_type=numbers.Real, valid_range={'Min': 0.0}) if seed is", "is_of_type=bool) if handle_missing_value is not None: inputs['HandleMissingValue'] = try_set( obj=handle_missing_value,", "child. (inputs). :param feature_column_name: Column to use for features (inputs).", "is_of_type=numbers.Real) if number_of_leaves is not None: inputs['NumberOfLeaves'] = try_set( obj=number_of_leaves,", "not. (inputs). :param use_zero_as_missing_value: Enable usage of zero (0) as", "0.0}) if l2_categorical_regularization is not None: inputs['L2CategoricalRegularization'] = try_set( obj=l2_categorical_regularization,", "Maximum leaves for trees. (inputs). :param minimum_example_count_per_leaf: Minimum number of", "verbose: Verbose (inputs). :param silent: Printing running messages. (inputs). :param", "= try_set( obj=caching, none_acceptable=True, is_of_type=str, values=[ 'Auto', 'Memory', 'None']) if", "row_group_column_name is not None: inputs['RowGroupColumnName'] = try_set( obj=row_group_column_name, none_acceptable=True, is_of_type=str,", "Enable special handling of missing value or not. (inputs). :param", "inputs['L2CategoricalRegularization'] = try_set( obj=l2_categorical_regularization, none_acceptable=True, is_of_type=numbers.Real, valid_range={'Min': 0.0}) if seed", "of bucket bin for features. (inputs). :param verbose: Verbose (inputs).", "number_of_threads is not None: inputs['NumberOfThreads'] = try_set( obj=number_of_threads, none_acceptable=True, is_of_type=numbers.Real)", "try_set( obj=number_of_threads, none_acceptable=True, is_of_type=numbers.Real) if early_stopping_round is not None: inputs['EarlyStoppingRound']", "needed in a child. (inputs). :param feature_column_name: Column to use", "is_of_type=str, values=[ 'Auto', 'Memory', 'None']) if unbalanced_sets is not None:", "0, 'Max': 2147483647}) if maximum_categorical_split_point_count is not None: inputs['MaximumCategoricalSplitPointCount'] =", "l2_categorical_regularization: L2 Regularization for categorical split. (inputs). :param seed: Sets", "is not None: inputs['Verbose'] = try_set( obj=verbose, none_acceptable=True, is_of_type=bool) if", "or not. (inputs). :param use_zero_as_missing_value: Enable usage of zero (0)", ":param categorical_smoothing: Lapalace smooth term in categorical feature spilt. Avoid", "values=[ 'Auto', 'Memory', 'None']) if unbalanced_sets is not None: inputs['UnbalancedSets']", "= try_set( obj=evaluation_metric, none_acceptable=True, is_of_type=str, values=[ 'None', 'Default', 'Logloss', 'Error',", "none_acceptable=True, is_of_type=numbers.Real) if use_categorical_split is not None: inputs['UseCategoricalSplit'] = try_set(", "obj=number_of_threads, none_acceptable=True, is_of_type=numbers.Real) if early_stopping_round is not None: inputs['EarlyStoppingRound'] =", "is_of_type=bool) if minimum_example_count_per_group is not None: inputs['MinimumExampleCountPerGroup'] = try_set( obj=minimum_example_count_per_group,", "column (inputs). :param caching: Whether trainer should cache input training", "entrypoint = EntryPoint( name=entrypoint_name, inputs=inputs, outputs=outputs, input_variables=input_variables, output_variables=output_variables) return entrypoint", "smooth term in categorical feature spilt. Avoid the bias of", "'Max': 2147483647}) if categorical_smoothing is not None: inputs['CategoricalSmoothing'] = try_set(", "None: inputs['Verbose'] = try_set( obj=verbose, none_acceptable=True, is_of_type=bool) if silent is", "model (outputs). \"\"\" entrypoint_name = 'Trainers.LightGbmBinaryClassifier' inputs = {} outputs", "loading data. (inputs). :param use_categorical_split: Enable categorical split or not.", "trees. (inputs). :param minimum_example_count_per_leaf: Minimum number of instances needed in", "use for labels (inputs). :param example_weight_column_name: Column to use for", "obj=silent, none_acceptable=True, is_of_type=bool) if number_of_threads is not None: inputs['NumberOfThreads'] =", "obj=unbalanced_sets, none_acceptable=True, is_of_type=bool) if weight_of_positive_examples is not None: inputs['WeightOfPositiveExamples'] =", "is_of_type=str) if learning_rate is not None: inputs['LearningRate'] = try_set( obj=learning_rate,", "inputs['MaximumCategoricalSplitPointCount'] = try_set( obj=maximum_categorical_split_point_count, none_acceptable=True, is_of_type=numbers.Real, valid_range={ 'Inf': 0, 'Max':", "None: inputs['Seed'] = try_set( obj=seed, none_acceptable=True, is_of_type=numbers.Real) if parallel_trainer is", "number_of_threads: Number of parallel threads used to run LightGBM. (inputs).", "categorical_smoothing=10.0, l2_categorical_regularization=10.0, seed=None, parallel_trainer=None, **params): \"\"\" **Description** Train a LightGBM", "obj=batch_size, none_acceptable=True, is_of_type=numbers.Real) if use_categorical_split is not None: inputs['UseCategoricalSplit'] =", "obj=parallel_trainer, none_acceptable=True, is_of_type=dict) if predictor_model is not None: outputs['PredictorModel'] =", "'Default', 'Logloss', 'Error', 'AreaUnderCurve']) if maximum_bin_count_per_feature is not None: inputs['MaximumBinCountPerFeature']", "0, 'Max': 2147483647}) if categorical_smoothing is not None: inputs['CategoricalSmoothing'] =", "not None: inputs['WeightOfPositiveExamples'] = try_set( obj=weight_of_positive_examples, none_acceptable=True, is_of_type=numbers.Real) if sigmoid", "predictor_model=None, number_of_iterations=100, learning_rate=None, number_of_leaves=None, minimum_example_count_per_leaf=None, feature_column_name='Features', booster=None, label_column_name='Label', example_weight_column_name=None, row_group_column_name=None,", "not None: inputs['TrainingData'] = try_set( obj=training_data, none_acceptable=False, is_of_type=str) if learning_rate", "LightGBM. (inputs). :param early_stopping_round: Rounds of early stopping, 0 will", "group. (inputs). :param maximum_categorical_split_point_count: Max number of categorical thresholds. (inputs).", "= { x for x in unlist(inputs.values()) if isinstance(x, str)", "prevent over-fitting. Range: (0,1]. (inputs). :param number_of_leaves: Maximum leaves for", "cache input training data (inputs). :param unbalanced_sets: Use for binary", "the balance of positive and negative weights, useful for unbalanced", "obj=number_of_leaves, none_acceptable=True, is_of_type=numbers.Real) if minimum_example_count_per_leaf is not None: inputs['MinimumExampleCountPerLeaf'] =", "Number of parallel threads used to run LightGBM. (inputs). :param", "not None: inputs['UseCategoricalSplit'] = try_set( obj=use_categorical_split, none_acceptable=True, is_of_type=bool) if handle_missing_value", "of entries in a batch when loading data. (inputs). :param", "the bias of small categories. (inputs). :param l2_categorical_regularization: L2 Regularization", "not None: inputs['Verbose'] = try_set( obj=verbose, none_acceptable=True, is_of_type=bool) if silent", "(inputs). :param booster: Which booster to use, can be gbtree,", "is_of_type=numbers.Real) if verbose is not None: inputs['Verbose'] = try_set( obj=verbose,", "handling of missing value or not. (inputs). :param use_zero_as_missing_value: Enable", "for labels (inputs). :param example_weight_column_name: Column to use for example", ":param seed: Sets the random seed for LightGBM to use.", "booster to use, can be gbtree, gblinear or dart. gbtree", "try_set( obj=example_weight_column_name, none_acceptable=True, is_of_type=str, is_column=True) if row_group_column_name is not None:", "try_set( obj=handle_missing_value, none_acceptable=True, is_of_type=bool) if use_zero_as_missing_value is not None: inputs['UseZeroAsMissingValue']", "= try_set( obj=training_data, none_acceptable=False, is_of_type=str) if learning_rate is not None:", "is not None: inputs['NumberOfThreads'] = try_set( obj=number_of_threads, none_acceptable=True, is_of_type=numbers.Real) if", "None: inputs['UnbalancedSets'] = try_set( obj=unbalanced_sets, none_acceptable=True, is_of_type=bool) if weight_of_positive_examples is", "obj=evaluation_metric, none_acceptable=True, is_of_type=str, values=[ 'None', 'Default', 'Logloss', 'Error', 'AreaUnderCurve']) if", "inputs['NumberOfLeaves'] = try_set( obj=number_of_leaves, none_acceptable=True, is_of_type=numbers.Real) if minimum_example_count_per_leaf is not", "try_set( obj=maximum_categorical_split_point_count, none_acceptable=True, is_of_type=numbers.Real, valid_range={ 'Inf': 0, 'Max': 2147483647}) if", "isinstance(x, str) and x.startswith(\"$\")} entrypoint = EntryPoint( name=entrypoint_name, inputs=inputs, outputs=outputs,", "of iterations. (inputs). :param training_data: The data to be used", "to use, can be gbtree, gblinear or dart. gbtree and", "obj=learning_rate, none_acceptable=True, is_of_type=numbers.Real) if number_of_leaves is not None: inputs['NumberOfLeaves'] =", "per categorical group. (inputs). :param maximum_categorical_split_point_count: Max number of categorical", "values=[ 'None', 'Default', 'Logloss', 'Error', 'AreaUnderCurve']) if maximum_bin_count_per_feature is not", "(inputs). :param l2_categorical_regularization: L2 Regularization for categorical split. (inputs). :param", "none_acceptable=True, is_of_type=numbers.Real, valid_range={'Min': 0.0}) if seed is not None: inputs['Seed']", "booster: Which booster to use, can be gbtree, gblinear or", "for unbalanced classes. A typical value to consider: sum(negative cases)", "'None']) if unbalanced_sets is not None: inputs['UnbalancedSets'] = try_set( obj=unbalanced_sets,", "not None: inputs['Sigmoid'] = try_set( obj=sigmoid, none_acceptable=True, is_of_type=numbers.Real) if evaluation_metric", "obj=feature_column_name, none_acceptable=True, is_of_type=str, is_column=True) if booster is not None: inputs['Booster']", "l2_categorical_regularization=10.0, seed=None, parallel_trainer=None, **params): \"\"\" **Description** Train a LightGBM binary", "= try_set( obj=batch_size, none_acceptable=True, is_of_type=numbers.Real) if use_categorical_split is not None:", "not None: inputs['LearningRate'] = try_set( obj=learning_rate, none_acceptable=True, is_of_type=numbers.Real) if number_of_leaves", "maximum_bin_count_per_feature: Maximum number of bucket bin for features. (inputs). :param", "if caching is not None: inputs['Caching'] = try_set( obj=caching, none_acceptable=True,", "Enable categorical split or not. (inputs). :param handle_missing_value: Enable special", "inputs['TrainingData'] = try_set( obj=training_data, none_acceptable=False, is_of_type=str) if learning_rate is not", "positive and negative weights, useful for unbalanced classes. A typical", "is_of_type=dict) if predictor_model is not None: outputs['PredictorModel'] = try_set( obj=predictor_model,", "(inputs). :param evaluation_metric: Evaluation metrics. (inputs). :param maximum_bin_count_per_feature: Maximum number", "is_of_type=bool) if weight_of_positive_examples is not None: inputs['WeightOfPositiveExamples'] = try_set( obj=weight_of_positive_examples,", "the feature column (inputs). :param caching: Whether trainer should cache", "handle_missing_value=True, use_zero_as_missing_value=False, minimum_example_count_per_group=100, maximum_categorical_split_point_count=32, categorical_smoothing=10.0, l2_categorical_regularization=10.0, seed=None, parallel_trainer=None, **params): \"\"\"", "None: outputs['PredictorModel'] = try_set( obj=predictor_model, none_acceptable=False, is_of_type=str) input_variables = {", "unlist(outputs.values()) if isinstance(x, str) and x.startswith(\"$\")} entrypoint = EntryPoint( name=entrypoint_name,", "(inputs). :param label_column_name: Column to use for labels (inputs). :param", "label_column_name: Column to use for labels (inputs). :param example_weight_column_name: Column", "{} outputs = {} if number_of_iterations is not None: inputs['NumberOfIterations']", "is not None: inputs['UnbalancedSets'] = try_set( obj=unbalanced_sets, none_acceptable=True, is_of_type=bool) if", "obj=example_weight_column_name, none_acceptable=True, is_of_type=str, is_column=True) if row_group_column_name is not None: inputs['RowGroupColumnName']", "if feature_column_name is not None: inputs['FeatureColumnName'] = try_set( obj=feature_column_name, none_acceptable=True,", "obj=caching, none_acceptable=True, is_of_type=str, values=[ 'Auto', 'Memory', 'None']) if unbalanced_sets is", "not None: inputs['LabelColumnName'] = try_set( obj=label_column_name, none_acceptable=True, is_of_type=str, is_column=True) if", "if example_weight_column_name is not None: inputs['ExampleWeightColumnName'] = try_set( obj=example_weight_column_name, none_acceptable=True,", ":param unbalanced_sets: Use for binary classification when training data is", "(inputs). :param row_group_column_name: Column to use for example groupId (inputs).", "data (inputs). :param unbalanced_sets: Use for binary classification when training", ":param parallel_trainer: Parallel LightGBM Learning Algorithm (inputs). :param predictor_model: The", "as missing value. (inputs). :param minimum_example_count_per_group: Minimum number of instances", "none_acceptable=True, is_of_type=str, values=[ 'Auto', 'Memory', 'None']) if unbalanced_sets is not", "number_of_leaves: Maximum leaves for trees. (inputs). :param minimum_example_count_per_leaf: Minimum number", "not None: inputs['MinimumExampleCountPerLeaf'] = try_set( obj=minimum_example_count_per_leaf, none_acceptable=True, is_of_type=numbers.Real) if feature_column_name", "verbose is not None: inputs['Verbose'] = try_set( obj=verbose, none_acceptable=True, is_of_type=bool)", "if minimum_example_count_per_leaf is not None: inputs['MinimumExampleCountPerLeaf'] = try_set( obj=minimum_example_count_per_leaf, none_acceptable=True,", "..utils.utils import try_set, unlist def trainers_lightgbmbinaryclassifier( training_data, predictor_model=None, number_of_iterations=100, learning_rate=None,", "try_set( obj=maximum_bin_count_per_feature, none_acceptable=True, is_of_type=numbers.Real) if verbose is not None: inputs['Verbose']", "(inputs). :param unbalanced_sets: Use for binary classification when training data", "{ x for x in unlist(outputs.values()) if isinstance(x, str) and", "is not None: inputs['FeatureColumnName'] = try_set( obj=feature_column_name, none_acceptable=True, is_of_type=str, is_column=True)", "minimum_example_count_per_leaf is not None: inputs['MinimumExampleCountPerLeaf'] = try_set( obj=minimum_example_count_per_leaf, none_acceptable=True, is_of_type=numbers.Real)", ":param use_zero_as_missing_value: Enable usage of zero (0) as missing value.", "not None: outputs['PredictorModel'] = try_set( obj=predictor_model, none_acceptable=False, is_of_type=str) input_variables =", "if sigmoid is not None: inputs['Sigmoid'] = try_set( obj=sigmoid, none_acceptable=True,", "= try_set( obj=number_of_leaves, none_acceptable=True, is_of_type=numbers.Real) if minimum_example_count_per_leaf is not None:", "None: inputs['MaximumBinCountPerFeature'] = try_set( obj=maximum_bin_count_per_feature, none_acceptable=True, is_of_type=numbers.Real) if verbose is", "..utils.entrypoints import EntryPoint from ..utils.utils import try_set, unlist def trainers_lightgbmbinaryclassifier(", "if training_data is not None: inputs['TrainingData'] = try_set( obj=training_data, none_acceptable=False,", "is not None: inputs['UseCategoricalSplit'] = try_set( obj=use_categorical_split, none_acceptable=True, is_of_type=bool) if", "missing value. (inputs). :param minimum_example_count_per_group: Minimum number of instances per", "Enable usage of zero (0) as missing value. (inputs). :param", "be used for training (inputs). :param learning_rate: Shrinkage rate for", "= try_set( obj=minimum_example_count_per_leaf, none_acceptable=True, is_of_type=numbers.Real) if feature_column_name is not None:", "is_column=True) if example_weight_column_name is not None: inputs['ExampleWeightColumnName'] = try_set( obj=example_weight_column_name,", "is_of_type=numbers.Real, valid_range={'Min': 0.0}) if seed is not None: inputs['Seed'] =", "tools/entrypoint_compiler.py: do not edit by hand \"\"\" Trainers.LightGbmBinaryClassifier \"\"\" import", "weights, useful for unbalanced classes. A typical value to consider:", "training_data, predictor_model=None, number_of_iterations=100, learning_rate=None, number_of_leaves=None, minimum_example_count_per_leaf=None, feature_column_name='Features', booster=None, label_column_name='Label', example_weight_column_name=None,", "inputs['CategoricalSmoothing'] = try_set( obj=categorical_smoothing, none_acceptable=True, is_of_type=numbers.Real, valid_range={'Min': 0.0}) if l2_categorical_regularization", "None: inputs['EvaluationMetric'] = try_set( obj=evaluation_metric, none_acceptable=True, is_of_type=str, values=[ 'None', 'Default',", "'Trainers.LightGbmBinaryClassifier' inputs = {} outputs = {} if number_of_iterations is", "is_of_type=str, is_column=True) if booster is not None: inputs['Booster'] = try_set(", "Number of iterations. (inputs). :param training_data: The data to be", "threads used to run LightGBM. (inputs). :param early_stopping_round: Rounds of", "of instances per categorical group. (inputs). :param maximum_categorical_split_point_count: Max number", "if handle_missing_value is not None: inputs['HandleMissingValue'] = try_set( obj=handle_missing_value, none_acceptable=True,", "if normalize_features is not None: inputs['NormalizeFeatures'] = try_set( obj=normalize_features, none_acceptable=True,", "Avoid the bias of small categories. (inputs). :param l2_categorical_regularization: L2", "obj=maximum_bin_count_per_feature, none_acceptable=True, is_of_type=numbers.Real) if verbose is not None: inputs['Verbose'] =", ":param sigmoid: Parameter for the sigmoid function. (inputs). :param evaluation_metric:", "= try_set( obj=maximum_bin_count_per_feature, none_acceptable=True, is_of_type=numbers.Real) if verbose is not None:", "none_acceptable=True, is_of_type=bool) if minimum_example_count_per_group is not None: inputs['MinimumExampleCountPerGroup'] = try_set(", "not None: inputs['NumberOfIterations'] = try_set( obj=number_of_iterations, none_acceptable=True, is_of_type=numbers.Real) if training_data", "'None', 'Default', 'Logloss', 'Error', 'AreaUnderCurve']) if maximum_bin_count_per_feature is not None:", "example_weight_column_name: Column to use for example weight (inputs). :param row_group_column_name:", "= try_set( obj=categorical_smoothing, none_acceptable=True, is_of_type=numbers.Real, valid_range={'Min': 0.0}) if l2_categorical_regularization is", "is not None: inputs['NumberOfLeaves'] = try_set( obj=number_of_leaves, none_acceptable=True, is_of_type=numbers.Real) if", "obj=normalize_features, none_acceptable=True, is_of_type=str, values=[ 'No', 'Warn', 'Auto', 'Yes']) if caching", "try_set( obj=number_of_iterations, none_acceptable=True, is_of_type=numbers.Real) if training_data is not None: inputs['TrainingData']", "model. :param number_of_iterations: Number of iterations. (inputs). :param training_data: The", "obj=weight_of_positive_examples, none_acceptable=True, is_of_type=numbers.Real) if sigmoid is not None: inputs['Sigmoid'] =", "'Memory', 'None']) if unbalanced_sets is not None: inputs['UnbalancedSets'] = try_set(", "special handling of missing value or not. (inputs). :param use_zero_as_missing_value:", "is not None: inputs['Caching'] = try_set( obj=caching, none_acceptable=True, is_of_type=str, values=[", "The data to be used for training (inputs). :param learning_rate:", "sum(positive cases). (inputs). :param sigmoid: Parameter for the sigmoid function.", "is_of_type=str, is_column=True) if example_weight_column_name is not None: inputs['ExampleWeightColumnName'] = try_set(", "is_of_type=numbers.Real, valid_range={ 'Inf': 0, 'Max': 2147483647}) if maximum_categorical_split_point_count is not", "to use for example groupId (inputs). :param normalize_features: Normalize option", ":param silent: Printing running messages. (inputs). :param number_of_threads: Number of", "= 'Trainers.LightGbmBinaryClassifier' inputs = {} outputs = {} if number_of_iterations", "is_column=True) if normalize_features is not None: inputs['NormalizeFeatures'] = try_set( obj=normalize_features,", "isinstance(x, str) and x.startswith(\"$\")} output_variables = { x for x", "of missing value or not. (inputs). :param use_zero_as_missing_value: Enable usage", "outputs = {} if number_of_iterations is not None: inputs['NumberOfIterations'] =", "term in categorical feature spilt. Avoid the bias of small", "try_set( obj=feature_column_name, none_acceptable=True, is_of_type=str, is_column=True) if booster is not None:", "batch_size: Number of entries in a batch when loading data.", "'AreaUnderCurve']) if maximum_bin_count_per_feature is not None: inputs['MaximumBinCountPerFeature'] = try_set( obj=maximum_bin_count_per_feature,", "it. (inputs). :param batch_size: Number of entries in a batch", "is_of_type=str, values=[ 'None', 'Default', 'Logloss', 'Error', 'AreaUnderCurve']) if maximum_bin_count_per_feature is", "for the sigmoid function. (inputs). :param evaluation_metric: Evaluation metrics. (inputs).", "none_acceptable=True, is_of_type=str, values=[ 'None', 'Default', 'Logloss', 'Error', 'AreaUnderCurve']) if maximum_bin_count_per_feature", "is not None: inputs['ExampleWeightColumnName'] = try_set( obj=example_weight_column_name, none_acceptable=True, is_of_type=str, is_column=True)", "number_of_leaves=None, minimum_example_count_per_leaf=None, feature_column_name='Features', booster=None, label_column_name='Label', example_weight_column_name=None, row_group_column_name=None, normalize_features='Auto', caching='Auto', unbalanced_sets=False,", "unbalanced_sets: Use for binary classification when training data is not", "categorical_smoothing: Lapalace smooth term in categorical feature spilt. Avoid the", "if row_group_column_name is not None: inputs['RowGroupColumnName'] = try_set( obj=row_group_column_name, none_acceptable=True,", "function. (inputs). :param evaluation_metric: Evaluation metrics. (inputs). :param maximum_bin_count_per_feature: Maximum", "in categorical feature spilt. Avoid the bias of small categories.", "if silent is not None: inputs['Silent'] = try_set( obj=silent, none_acceptable=True,", "not None: inputs['EarlyStoppingRound'] = try_set( obj=early_stopping_round, none_acceptable=True, is_of_type=numbers.Real) if batch_size", "seed for LightGBM to use. (inputs). :param parallel_trainer: Parallel LightGBM", "None: inputs['LearningRate'] = try_set( obj=learning_rate, none_acceptable=True, is_of_type=numbers.Real) if number_of_leaves is", "stopping, 0 will disable it. (inputs). :param batch_size: Number of", "silent is not None: inputs['Silent'] = try_set( obj=silent, none_acceptable=True, is_of_type=bool)", "'Error', 'AreaUnderCurve']) if maximum_bin_count_per_feature is not None: inputs['MaximumBinCountPerFeature'] = try_set(", ":param label_column_name: Column to use for labels (inputs). :param example_weight_column_name:", "none_acceptable=True, is_of_type=str, is_column=True) if row_group_column_name is not None: inputs['RowGroupColumnName'] =", "try_set( obj=parallel_trainer, none_acceptable=True, is_of_type=dict) if predictor_model is not None: outputs['PredictorModel']", "inputs['Caching'] = try_set( obj=caching, none_acceptable=True, is_of_type=str, values=[ 'Auto', 'Memory', 'None'])", "str) and x.startswith(\"$\")} entrypoint = EntryPoint( name=entrypoint_name, inputs=inputs, outputs=outputs, input_variables=input_variables,", "if use_categorical_split is not None: inputs['UseCategoricalSplit'] = try_set( obj=use_categorical_split, none_acceptable=True,", "if early_stopping_round is not None: inputs['EarlyStoppingRound'] = try_set( obj=early_stopping_round, none_acceptable=True,", "= try_set( obj=verbose, none_acceptable=True, is_of_type=bool) if silent is not None:", "= {} outputs = {} if number_of_iterations is not None:", "try_set( obj=seed, none_acceptable=True, is_of_type=numbers.Real) if parallel_trainer is not None: inputs['ParallelTrainer']", "training_data is not None: inputs['TrainingData'] = try_set( obj=training_data, none_acceptable=False, is_of_type=str)", "none_acceptable=True, is_of_type=bool) if silent is not None: inputs['Silent'] = try_set(", "is not None: inputs['BatchSize'] = try_set( obj=batch_size, none_acceptable=True, is_of_type=numbers.Real) if", ":param training_data: The data to be used for training (inputs).", "evaluation_metric: Evaluation metrics. (inputs). :param maximum_bin_count_per_feature: Maximum number of bucket", "'Max': 2147483647}) if maximum_categorical_split_point_count is not None: inputs['MaximumCategoricalSplitPointCount'] = try_set(", "weight (inputs). :param row_group_column_name: Column to use for example groupId", "for the feature column (inputs). :param caching: Whether trainer should", "try_set( obj=batch_size, none_acceptable=True, is_of_type=numbers.Real) if use_categorical_split is not None: inputs['UseCategoricalSplit']", "minimum_example_count_per_leaf=None, feature_column_name='Features', booster=None, label_column_name='Label', example_weight_column_name=None, row_group_column_name=None, normalize_features='Auto', caching='Auto', unbalanced_sets=False, weight_of_positive_examples=1.0,", "trees, used to prevent over-fitting. Range: (0,1]. (inputs). :param number_of_leaves:", "seed is not None: inputs['Seed'] = try_set( obj=seed, none_acceptable=True, is_of_type=numbers.Real)", "Generated by tools/entrypoint_compiler.py: do not edit by hand \"\"\" Trainers.LightGbmBinaryClassifier", "/ sum(positive cases). (inputs). :param sigmoid: Parameter for the sigmoid", "(inputs). :param weight_of_positive_examples: Control the balance of positive and negative", "small categories. (inputs). :param l2_categorical_regularization: L2 Regularization for categorical split.", "if number_of_leaves is not None: inputs['NumberOfLeaves'] = try_set( obj=number_of_leaves, none_acceptable=True,", ":param number_of_threads: Number of parallel threads used to run LightGBM.", "bin for features. (inputs). :param verbose: Verbose (inputs). :param silent:", "Printing running messages. (inputs). :param number_of_threads: Number of parallel threads", "try_set( obj=label_column_name, none_acceptable=True, is_of_type=str, is_column=True) if example_weight_column_name is not None:", "use tree based model while gblinear uses linear function. (inputs).", "\"\"\" Trainers.LightGbmBinaryClassifier \"\"\" import numbers from ..utils.entrypoints import EntryPoint from", "parallel_trainer=None, **params): \"\"\" **Description** Train a LightGBM binary classification model.", "try_set( obj=minimum_example_count_per_leaf, none_acceptable=True, is_of_type=numbers.Real) if feature_column_name is not None: inputs['FeatureColumnName']", "obj=maximum_categorical_split_point_count, none_acceptable=True, is_of_type=numbers.Real, valid_range={ 'Inf': 0, 'Max': 2147483647}) if categorical_smoothing", "use for example weight (inputs). :param row_group_column_name: Column to use", "Which booster to use, can be gbtree, gblinear or dart.", "try_set( obj=predictor_model, none_acceptable=False, is_of_type=str) input_variables = { x for x", "obj=seed, none_acceptable=True, is_of_type=numbers.Real) if parallel_trainer is not None: inputs['ParallelTrainer'] =", "LightGBM Learning Algorithm (inputs). :param predictor_model: The trained model (outputs).", "if weight_of_positive_examples is not None: inputs['WeightOfPositiveExamples'] = try_set( obj=weight_of_positive_examples, none_acceptable=True,", "when training data is not balanced. (inputs). :param weight_of_positive_examples: Control", "= {} if number_of_iterations is not None: inputs['NumberOfIterations'] = try_set(", "inputs['MaximumBinCountPerFeature'] = try_set( obj=maximum_bin_count_per_feature, none_acceptable=True, is_of_type=numbers.Real) if verbose is not", "spilt. Avoid the bias of small categories. (inputs). :param l2_categorical_regularization:", "features (inputs). :param booster: Which booster to use, can be", "try_set( obj=early_stopping_round, none_acceptable=True, is_of_type=numbers.Real) if batch_size is not None: inputs['BatchSize']", "'Yes']) if caching is not None: inputs['Caching'] = try_set( obj=caching,", "None: inputs['UseZeroAsMissingValue'] = try_set( obj=use_zero_as_missing_value, none_acceptable=True, is_of_type=bool) if minimum_example_count_per_group is", "Use for binary classification when training data is not balanced.", "(outputs). \"\"\" entrypoint_name = 'Trainers.LightGbmBinaryClassifier' inputs = {} outputs =", "example_weight_column_name is not None: inputs['ExampleWeightColumnName'] = try_set( obj=example_weight_column_name, none_acceptable=True, is_of_type=str,", "x in unlist(outputs.values()) if isinstance(x, str) and x.startswith(\"$\")} entrypoint =", "(inputs). :param batch_size: Number of entries in a batch when", "value. (inputs). :param minimum_example_count_per_group: Minimum number of instances per categorical", "is not None: inputs['Booster'] = try_set( obj=booster, none_acceptable=True, is_of_type=dict) if", "= try_set( obj=feature_column_name, none_acceptable=True, is_of_type=str, is_column=True) if booster is not", "use_zero_as_missing_value=False, minimum_example_count_per_group=100, maximum_categorical_split_point_count=32, categorical_smoothing=10.0, l2_categorical_regularization=10.0, seed=None, parallel_trainer=None, **params): \"\"\" **Description**", "input_variables = { x for x in unlist(inputs.values()) if isinstance(x,", "dart use tree based model while gblinear uses linear function.", "is_of_type=numbers.Real) if batch_size is not None: inputs['BatchSize'] = try_set( obj=batch_size,", "iterations. (inputs). :param training_data: The data to be used for", "feature_column_name: Column to use for features (inputs). :param booster: Which", "to run LightGBM. (inputs). :param early_stopping_round: Rounds of early stopping,", "of categorical thresholds. (inputs). :param categorical_smoothing: Lapalace smooth term in", "categorical feature spilt. Avoid the bias of small categories. (inputs).", "LightGBM to use. (inputs). :param parallel_trainer: Parallel LightGBM Learning Algorithm", "Parallel LightGBM Learning Algorithm (inputs). :param predictor_model: The trained model", "training data is not balanced. (inputs). :param weight_of_positive_examples: Control the", "(inputs). :param verbose: Verbose (inputs). :param silent: Printing running messages.", "none_acceptable=True, is_of_type=numbers.Real) if minimum_example_count_per_leaf is not None: inputs['MinimumExampleCountPerLeaf'] = try_set(", "none_acceptable=True, is_of_type=str, values=[ 'No', 'Warn', 'Auto', 'Yes']) if caching is", "inputs['ExampleWeightColumnName'] = try_set( obj=example_weight_column_name, none_acceptable=True, is_of_type=str, is_column=True) if row_group_column_name is", "try_set( obj=weight_of_positive_examples, none_acceptable=True, is_of_type=numbers.Real) if sigmoid is not None: inputs['Sigmoid']", "try_set( obj=silent, none_acceptable=True, is_of_type=bool) if number_of_threads is not None: inputs['NumberOfThreads']", "is_of_type=numbers.Real) if sigmoid is not None: inputs['Sigmoid'] = try_set( obj=sigmoid,", "is not None: inputs['LabelColumnName'] = try_set( obj=label_column_name, none_acceptable=True, is_of_type=str, is_column=True)", "inputs['ParallelTrainer'] = try_set( obj=parallel_trainer, none_acceptable=True, is_of_type=dict) if predictor_model is not", "weight_of_positive_examples=1.0, sigmoid=0.5, evaluation_metric='Logloss', maximum_bin_count_per_feature=255, verbose=False, silent=True, number_of_threads=None, early_stopping_round=0, batch_size=1048576, use_categorical_split=None,", "not None: inputs['BatchSize'] = try_set( obj=batch_size, none_acceptable=True, is_of_type=numbers.Real) if use_categorical_split", "(inputs). :param minimum_example_count_per_leaf: Minimum number of instances needed in a", "inputs['HandleMissingValue'] = try_set( obj=handle_missing_value, none_acceptable=True, is_of_type=bool) if use_zero_as_missing_value is not", "none_acceptable=True, is_of_type=numbers.Real, valid_range={ 'Inf': 0, 'Max': 2147483647}) if categorical_smoothing is", "none_acceptable=True, is_of_type=str, is_column=True) if booster is not None: inputs['Booster'] =", "obj=number_of_iterations, none_acceptable=True, is_of_type=numbers.Real) if training_data is not None: inputs['TrainingData'] =", "= try_set( obj=handle_missing_value, none_acceptable=True, is_of_type=bool) if use_zero_as_missing_value is not None:", "caching='Auto', unbalanced_sets=False, weight_of_positive_examples=1.0, sigmoid=0.5, evaluation_metric='Logloss', maximum_bin_count_per_feature=255, verbose=False, silent=True, number_of_threads=None, early_stopping_round=0,", "= try_set( obj=number_of_iterations, none_acceptable=True, is_of_type=numbers.Real) if training_data is not None:", "= try_set( obj=unbalanced_sets, none_acceptable=True, is_of_type=bool) if weight_of_positive_examples is not None:", "minimum_example_count_per_group=100, maximum_categorical_split_point_count=32, categorical_smoothing=10.0, l2_categorical_regularization=10.0, seed=None, parallel_trainer=None, **params): \"\"\" **Description** Train", "sum(negative cases) / sum(positive cases). (inputs). :param sigmoid: Parameter for", "= try_set( obj=row_group_column_name, none_acceptable=True, is_of_type=str, is_column=True) if normalize_features is not", "none_acceptable=True, is_of_type=bool) if use_zero_as_missing_value is not None: inputs['UseZeroAsMissingValue'] = try_set(", ":param evaluation_metric: Evaluation metrics. (inputs). :param maximum_bin_count_per_feature: Maximum number of", "used to prevent over-fitting. Range: (0,1]. (inputs). :param number_of_leaves: Maximum", "(inputs). :param parallel_trainer: Parallel LightGBM Learning Algorithm (inputs). :param predictor_model:", "inputs['WeightOfPositiveExamples'] = try_set( obj=weight_of_positive_examples, none_acceptable=True, is_of_type=numbers.Real) if sigmoid is not", "when loading data. (inputs). :param use_categorical_split: Enable categorical split or", ":param predictor_model: The trained model (outputs). \"\"\" entrypoint_name = 'Trainers.LightGbmBinaryClassifier'", "is not None: inputs['EarlyStoppingRound'] = try_set( obj=early_stopping_round, none_acceptable=True, is_of_type=numbers.Real) if", "is_of_type=numbers.Real) if evaluation_metric is not None: inputs['EvaluationMetric'] = try_set( obj=evaluation_metric,", "(inputs). :param maximum_categorical_split_point_count: Max number of categorical thresholds. (inputs). :param", "is not None: inputs['WeightOfPositiveExamples'] = try_set( obj=weight_of_positive_examples, none_acceptable=True, is_of_type=numbers.Real) if", "None: inputs['ExampleWeightColumnName'] = try_set( obj=example_weight_column_name, none_acceptable=True, is_of_type=str, is_column=True) if row_group_column_name", "labels (inputs). :param example_weight_column_name: Column to use for example weight", "binary classification model. :param number_of_iterations: Number of iterations. (inputs). :param", "for trees. (inputs). :param minimum_example_count_per_leaf: Minimum number of instances needed", "is not None: inputs['HandleMissingValue'] = try_set( obj=handle_missing_value, none_acceptable=True, is_of_type=bool) if", "a child. (inputs). :param feature_column_name: Column to use for features", "try_set( obj=minimum_example_count_per_group, none_acceptable=True, is_of_type=numbers.Real, valid_range={ 'Inf': 0, 'Max': 2147483647}) if", "linear function. (inputs). :param label_column_name: Column to use for labels", "= try_set( obj=maximum_categorical_split_point_count, none_acceptable=True, is_of_type=numbers.Real, valid_range={ 'Inf': 0, 'Max': 2147483647})", "disable it. (inputs). :param batch_size: Number of entries in a", "use, can be gbtree, gblinear or dart. gbtree and dart", "Verbose (inputs). :param silent: Printing running messages. (inputs). :param number_of_threads:", "of instances needed in a child. (inputs). :param feature_column_name: Column", "LightGBM binary classification model. :param number_of_iterations: Number of iterations. (inputs).", "to be used for training (inputs). :param learning_rate: Shrinkage rate", "used to run LightGBM. (inputs). :param early_stopping_round: Rounds of early", "batch when loading data. (inputs). :param use_categorical_split: Enable categorical split", "zero (0) as missing value. (inputs). :param minimum_example_count_per_group: Minimum number", "obj=categorical_smoothing, none_acceptable=True, is_of_type=numbers.Real, valid_range={'Min': 0.0}) if l2_categorical_regularization is not None:", "None: inputs['Booster'] = try_set( obj=booster, none_acceptable=True, is_of_type=dict) if label_column_name is", ":param weight_of_positive_examples: Control the balance of positive and negative weights,", "for features. (inputs). :param verbose: Verbose (inputs). :param silent: Printing", "is_of_type=numbers.Real) if minimum_example_count_per_leaf is not None: inputs['MinimumExampleCountPerLeaf'] = try_set( obj=minimum_example_count_per_leaf,", "= try_set( obj=early_stopping_round, none_acceptable=True, is_of_type=numbers.Real) if batch_size is not None:", "uses linear function. (inputs). :param label_column_name: Column to use for", "categorical group. (inputs). :param maximum_categorical_split_point_count: Max number of categorical thresholds.", "not None: inputs['MaximumCategoricalSplitPointCount'] = try_set( obj=maximum_categorical_split_point_count, none_acceptable=True, is_of_type=numbers.Real, valid_range={ 'Inf':", "for x in unlist(outputs.values()) if isinstance(x, str) and x.startswith(\"$\")} entrypoint", "not None: inputs['MaximumBinCountPerFeature'] = try_set( obj=maximum_bin_count_per_feature, none_acceptable=True, is_of_type=numbers.Real) if verbose", "none_acceptable=True, is_of_type=numbers.Real) if batch_size is not None: inputs['BatchSize'] = try_set(", "categorical thresholds. (inputs). :param categorical_smoothing: Lapalace smooth term in categorical", "= try_set( obj=learning_rate, none_acceptable=True, is_of_type=numbers.Real) if number_of_leaves is not None:", "function. (inputs). :param label_column_name: Column to use for labels (inputs).", "if number_of_threads is not None: inputs['NumberOfThreads'] = try_set( obj=number_of_threads, none_acceptable=True,", "inputs['Booster'] = try_set( obj=booster, none_acceptable=True, is_of_type=dict) if label_column_name is not", "feature column (inputs). :param caching: Whether trainer should cache input", "and negative weights, useful for unbalanced classes. A typical value", "None: inputs['CategoricalSmoothing'] = try_set( obj=categorical_smoothing, none_acceptable=True, is_of_type=numbers.Real, valid_range={'Min': 0.0}) if", "(inputs). :param learning_rate: Shrinkage rate for trees, used to prevent", "in unlist(outputs.values()) if isinstance(x, str) and x.startswith(\"$\")} entrypoint = EntryPoint(", "(inputs). :param handle_missing_value: Enable special handling of missing value or", "is_of_type=numbers.Real) if parallel_trainer is not None: inputs['ParallelTrainer'] = try_set( obj=parallel_trainer,", "training_data: The data to be used for training (inputs). :param", "obj=early_stopping_round, none_acceptable=True, is_of_type=numbers.Real) if batch_size is not None: inputs['BatchSize'] =", "(0,1]. (inputs). :param number_of_leaves: Maximum leaves for trees. (inputs). :param", "categorical split or not. (inputs). :param handle_missing_value: Enable special handling", "(inputs). :param caching: Whether trainer should cache input training data", "trained model (outputs). \"\"\" entrypoint_name = 'Trainers.LightGbmBinaryClassifier' inputs = {}", "inputs['LabelColumnName'] = try_set( obj=label_column_name, none_acceptable=True, is_of_type=str, is_column=True) if example_weight_column_name is", "leaves for trees. (inputs). :param minimum_example_count_per_leaf: Minimum number of instances", "verbose=False, silent=True, number_of_threads=None, early_stopping_round=0, batch_size=1048576, use_categorical_split=None, handle_missing_value=True, use_zero_as_missing_value=False, minimum_example_count_per_group=100, maximum_categorical_split_point_count=32,", "None: inputs['Sigmoid'] = try_set( obj=sigmoid, none_acceptable=True, is_of_type=numbers.Real) if evaluation_metric is", "Number of entries in a batch when loading data. (inputs).", "valid_range={ 'Inf': 0, 'Max': 2147483647}) if categorical_smoothing is not None:", "the sigmoid function. (inputs). :param evaluation_metric: Evaluation metrics. (inputs). :param", "inputs['Seed'] = try_set( obj=seed, none_acceptable=True, is_of_type=numbers.Real) if parallel_trainer is not", "output_variables = { x for x in unlist(outputs.values()) if isinstance(x,", "none_acceptable=True, is_of_type=numbers.Real) if early_stopping_round is not None: inputs['EarlyStoppingRound'] = try_set(", "early_stopping_round is not None: inputs['EarlyStoppingRound'] = try_set( obj=early_stopping_round, none_acceptable=True, is_of_type=numbers.Real)", "if isinstance(x, str) and x.startswith(\"$\")} output_variables = { x for", "evaluation_metric is not None: inputs['EvaluationMetric'] = try_set( obj=evaluation_metric, none_acceptable=True, is_of_type=str,", "try_set( obj=learning_rate, none_acceptable=True, is_of_type=numbers.Real) if number_of_leaves is not None: inputs['NumberOfLeaves']", ":param example_weight_column_name: Column to use for example weight (inputs). :param", "sigmoid is not None: inputs['Sigmoid'] = try_set( obj=sigmoid, none_acceptable=True, is_of_type=numbers.Real)", "import EntryPoint from ..utils.utils import try_set, unlist def trainers_lightgbmbinaryclassifier( training_data,", "used for training (inputs). :param learning_rate: Shrinkage rate for trees,", "inputs['UnbalancedSets'] = try_set( obj=unbalanced_sets, none_acceptable=True, is_of_type=bool) if weight_of_positive_examples is not", "none_acceptable=True, is_of_type=dict) if predictor_model is not None: outputs['PredictorModel'] = try_set(", "inputs['Silent'] = try_set( obj=silent, none_acceptable=True, is_of_type=bool) if number_of_threads is not", "inputs['NumberOfIterations'] = try_set( obj=number_of_iterations, none_acceptable=True, is_of_type=numbers.Real) if training_data is not", "\"\"\" **Description** Train a LightGBM binary classification model. :param number_of_iterations:", "data is not balanced. (inputs). :param weight_of_positive_examples: Control the balance", "is_of_type=numbers.Real, valid_range={ 'Inf': 0, 'Max': 2147483647}) if categorical_smoothing is not", "try_set( obj=number_of_leaves, none_acceptable=True, is_of_type=numbers.Real) if minimum_example_count_per_leaf is not None: inputs['MinimumExampleCountPerLeaf']", "None: inputs['NormalizeFeatures'] = try_set( obj=normalize_features, none_acceptable=True, is_of_type=str, values=[ 'No', 'Warn',", "not None: inputs['Booster'] = try_set( obj=booster, none_acceptable=True, is_of_type=dict) if label_column_name", "None: inputs['ParallelTrainer'] = try_set( obj=parallel_trainer, none_acceptable=True, is_of_type=dict) if predictor_model is", "if categorical_smoothing is not None: inputs['CategoricalSmoothing'] = try_set( obj=categorical_smoothing, none_acceptable=True,", "number_of_leaves is not None: inputs['NumberOfLeaves'] = try_set( obj=number_of_leaves, none_acceptable=True, is_of_type=numbers.Real)", "'Inf': 0, 'Max': 2147483647}) if categorical_smoothing is not None: inputs['CategoricalSmoothing']", "feature_column_name='Features', booster=None, label_column_name='Label', example_weight_column_name=None, row_group_column_name=None, normalize_features='Auto', caching='Auto', unbalanced_sets=False, weight_of_positive_examples=1.0, sigmoid=0.5,", "random seed for LightGBM to use. (inputs). :param parallel_trainer: Parallel", "2147483647}) if categorical_smoothing is not None: inputs['CategoricalSmoothing'] = try_set( obj=categorical_smoothing,", "inputs = {} outputs = {} if number_of_iterations is not", "maximum_categorical_split_point_count is not None: inputs['MaximumCategoricalSplitPointCount'] = try_set( obj=maximum_categorical_split_point_count, none_acceptable=True, is_of_type=numbers.Real,", "is not None: inputs['ParallelTrainer'] = try_set( obj=parallel_trainer, none_acceptable=True, is_of_type=dict) if", "of parallel threads used to run LightGBM. (inputs). :param early_stopping_round:", "none_acceptable=True, is_of_type=numbers.Real) if number_of_leaves is not None: inputs['NumberOfLeaves'] = try_set(", "(inputs). :param normalize_features: Normalize option for the feature column (inputs).", "outputs['PredictorModel'] = try_set( obj=predictor_model, none_acceptable=False, is_of_type=str) input_variables = { x", "(inputs). :param number_of_leaves: Maximum leaves for trees. (inputs). :param minimum_example_count_per_leaf:", "(inputs). :param predictor_model: The trained model (outputs). \"\"\" entrypoint_name =", "is not None: inputs['EvaluationMetric'] = try_set( obj=evaluation_metric, none_acceptable=True, is_of_type=str, values=[", "consider: sum(negative cases) / sum(positive cases). (inputs). :param sigmoid: Parameter", "none_acceptable=True, is_of_type=numbers.Real) if evaluation_metric is not None: inputs['EvaluationMetric'] = try_set(", "x for x in unlist(outputs.values()) if isinstance(x, str) and x.startswith(\"$\")}", "None: inputs['NumberOfThreads'] = try_set( obj=number_of_threads, none_acceptable=True, is_of_type=numbers.Real) if early_stopping_round is", "inputs['NumberOfThreads'] = try_set( obj=number_of_threads, none_acceptable=True, is_of_type=numbers.Real) if early_stopping_round is not", "or dart. gbtree and dart use tree based model while", "x for x in unlist(inputs.values()) if isinstance(x, str) and x.startswith(\"$\")}", "running messages. (inputs). :param number_of_threads: Number of parallel threads used", "try_set( obj=use_categorical_split, none_acceptable=True, is_of_type=bool) if handle_missing_value is not None: inputs['HandleMissingValue']", "none_acceptable=True, is_of_type=numbers.Real, valid_range={'Min': 0.0}) if l2_categorical_regularization is not None: inputs['L2CategoricalRegularization']", "if verbose is not None: inputs['Verbose'] = try_set( obj=verbose, none_acceptable=True,", "= try_set( obj=minimum_example_count_per_group, none_acceptable=True, is_of_type=numbers.Real, valid_range={ 'Inf': 0, 'Max': 2147483647})", "split. (inputs). :param seed: Sets the random seed for LightGBM", "Train a LightGBM binary classification model. :param number_of_iterations: Number of", "none_acceptable=False, is_of_type=str) input_variables = { x for x in unlist(inputs.values())", "maximum_categorical_split_point_count=32, categorical_smoothing=10.0, l2_categorical_regularization=10.0, seed=None, parallel_trainer=None, **params): \"\"\" **Description** Train a", "valid_range={'Min': 0.0}) if seed is not None: inputs['Seed'] = try_set(", "for example weight (inputs). :param row_group_column_name: Column to use for", "is_of_type=str, values=[ 'No', 'Warn', 'Auto', 'Yes']) if caching is not", "try_set( obj=l2_categorical_regularization, none_acceptable=True, is_of_type=numbers.Real, valid_range={'Min': 0.0}) if seed is not", ":param maximum_categorical_split_point_count: Max number of categorical thresholds. (inputs). :param categorical_smoothing:", "use_categorical_split=None, handle_missing_value=True, use_zero_as_missing_value=False, minimum_example_count_per_group=100, maximum_categorical_split_point_count=32, categorical_smoothing=10.0, l2_categorical_regularization=10.0, seed=None, parallel_trainer=None, **params):", "maximum_bin_count_per_feature is not None: inputs['MaximumBinCountPerFeature'] = try_set( obj=maximum_bin_count_per_feature, none_acceptable=True, is_of_type=numbers.Real)", "Max number of categorical thresholds. (inputs). :param categorical_smoothing: Lapalace smooth", "= try_set( obj=silent, none_acceptable=True, is_of_type=bool) if number_of_threads is not None:", "over-fitting. Range: (0,1]. (inputs). :param number_of_leaves: Maximum leaves for trees.", "obj=booster, none_acceptable=True, is_of_type=dict) if label_column_name is not None: inputs['LabelColumnName'] =", "if minimum_example_count_per_group is not None: inputs['MinimumExampleCountPerGroup'] = try_set( obj=minimum_example_count_per_group, none_acceptable=True,", ":param number_of_leaves: Maximum leaves for trees. (inputs). :param minimum_example_count_per_leaf: Minimum", ":param early_stopping_round: Rounds of early stopping, 0 will disable it.", "usage of zero (0) as missing value. (inputs). :param minimum_example_count_per_group:", "feature spilt. Avoid the bias of small categories. (inputs). :param", "none_acceptable=True, is_of_type=numbers.Real) if feature_column_name is not None: inputs['FeatureColumnName'] = try_set(", "and x.startswith(\"$\")} entrypoint = EntryPoint( name=entrypoint_name, inputs=inputs, outputs=outputs, input_variables=input_variables, output_variables=output_variables)", "is_of_type=numbers.Real) if feature_column_name is not None: inputs['FeatureColumnName'] = try_set( obj=feature_column_name,", "row_group_column_name=None, normalize_features='Auto', caching='Auto', unbalanced_sets=False, weight_of_positive_examples=1.0, sigmoid=0.5, evaluation_metric='Logloss', maximum_bin_count_per_feature=255, verbose=False, silent=True,", "can be gbtree, gblinear or dart. gbtree and dart use", "and x.startswith(\"$\")} output_variables = { x for x in unlist(outputs.values())", "is_column=True) if row_group_column_name is not None: inputs['RowGroupColumnName'] = try_set( obj=row_group_column_name,", "useful for unbalanced classes. A typical value to consider: sum(negative", "predictor_model is not None: outputs['PredictorModel'] = try_set( obj=predictor_model, none_acceptable=False, is_of_type=str)", "typical value to consider: sum(negative cases) / sum(positive cases). (inputs).", "none_acceptable=True, is_of_type=str, is_column=True) if normalize_features is not None: inputs['NormalizeFeatures'] =", "= try_set( obj=use_categorical_split, none_acceptable=True, is_of_type=bool) if handle_missing_value is not None:", "is not None: inputs['MaximumBinCountPerFeature'] = try_set( obj=maximum_bin_count_per_feature, none_acceptable=True, is_of_type=numbers.Real) if", "None: inputs['Silent'] = try_set( obj=silent, none_acceptable=True, is_of_type=bool) if number_of_threads is", "is not None: outputs['PredictorModel'] = try_set( obj=predictor_model, none_acceptable=False, is_of_type=str) input_variables", "of early stopping, 0 will disable it. (inputs). :param batch_size:", "is_of_type=bool) if number_of_threads is not None: inputs['NumberOfThreads'] = try_set( obj=number_of_threads,", "classes. A typical value to consider: sum(negative cases) / sum(positive", "None: inputs['EarlyStoppingRound'] = try_set( obj=early_stopping_round, none_acceptable=True, is_of_type=numbers.Real) if batch_size is", "None: inputs['MinimumExampleCountPerGroup'] = try_set( obj=minimum_example_count_per_group, none_acceptable=True, is_of_type=numbers.Real, valid_range={ 'Inf': 0,", "batch_size is not None: inputs['BatchSize'] = try_set( obj=batch_size, none_acceptable=True, is_of_type=numbers.Real)", "number of instances needed in a child. (inputs). :param feature_column_name:", "edit by hand \"\"\" Trainers.LightGbmBinaryClassifier \"\"\" import numbers from ..utils.entrypoints", "= try_set( obj=predictor_model, none_acceptable=False, is_of_type=str) input_variables = { x for", "= try_set( obj=weight_of_positive_examples, none_acceptable=True, is_of_type=numbers.Real) if sigmoid is not None:", "none_acceptable=True, is_of_type=numbers.Real, valid_range={ 'Inf': 0, 'Max': 2147483647}) if maximum_categorical_split_point_count is", "(inputs). :param training_data: The data to be used for training", "inputs['FeatureColumnName'] = try_set( obj=feature_column_name, none_acceptable=True, is_of_type=str, is_column=True) if booster is", "None: inputs['RowGroupColumnName'] = try_set( obj=row_group_column_name, none_acceptable=True, is_of_type=str, is_column=True) if normalize_features", "be gbtree, gblinear or dart. gbtree and dart use tree", "balanced. (inputs). :param weight_of_positive_examples: Control the balance of positive and", "use for example groupId (inputs). :param normalize_features: Normalize option for", "minimum_example_count_per_leaf: Minimum number of instances needed in a child. (inputs).", "binary classification when training data is not balanced. (inputs). :param", "minimum_example_count_per_group: Minimum number of instances per categorical group. (inputs). :param", "'Auto', 'Yes']) if caching is not None: inputs['Caching'] = try_set(", "to use for labels (inputs). :param example_weight_column_name: Column to use", "not None: inputs['ParallelTrainer'] = try_set( obj=parallel_trainer, none_acceptable=True, is_of_type=dict) if predictor_model", ":param batch_size: Number of entries in a batch when loading", "obj=use_categorical_split, none_acceptable=True, is_of_type=bool) if handle_missing_value is not None: inputs['HandleMissingValue'] =", "x.startswith(\"$\")} output_variables = { x for x in unlist(outputs.values()) if", "(inputs). :param sigmoid: Parameter for the sigmoid function. (inputs). :param", "is_of_type=str, is_column=True) if normalize_features is not None: inputs['NormalizeFeatures'] = try_set(", "categorical_smoothing is not None: inputs['CategoricalSmoothing'] = try_set( obj=categorical_smoothing, none_acceptable=True, is_of_type=numbers.Real,", ":param learning_rate: Shrinkage rate for trees, used to prevent over-fitting.", "not None: inputs['UnbalancedSets'] = try_set( obj=unbalanced_sets, none_acceptable=True, is_of_type=bool) if weight_of_positive_examples", "handle_missing_value: Enable special handling of missing value or not. (inputs).", "cases). (inputs). :param sigmoid: Parameter for the sigmoid function. (inputs).", "(inputs). :param use_zero_as_missing_value: Enable usage of zero (0) as missing", "= try_set( obj=booster, none_acceptable=True, is_of_type=dict) if label_column_name is not None:", "= try_set( obj=use_zero_as_missing_value, none_acceptable=True, is_of_type=bool) if minimum_example_count_per_group is not None:", "import numbers from ..utils.entrypoints import EntryPoint from ..utils.utils import try_set,", "not None: inputs['UseZeroAsMissingValue'] = try_set( obj=use_zero_as_missing_value, none_acceptable=True, is_of_type=bool) if minimum_example_count_per_group", "Evaluation metrics. (inputs). :param maximum_bin_count_per_feature: Maximum number of bucket bin", "obj=minimum_example_count_per_leaf, none_acceptable=True, is_of_type=numbers.Real) if feature_column_name is not None: inputs['FeatureColumnName'] =", "number of bucket bin for features. (inputs). :param verbose: Verbose", "learning_rate=None, number_of_leaves=None, minimum_example_count_per_leaf=None, feature_column_name='Features', booster=None, label_column_name='Label', example_weight_column_name=None, row_group_column_name=None, normalize_features='Auto', caching='Auto',", "for trees, used to prevent over-fitting. Range: (0,1]. (inputs). :param", "if isinstance(x, str) and x.startswith(\"$\")} entrypoint = EntryPoint( name=entrypoint_name, inputs=inputs,", "None: inputs['LabelColumnName'] = try_set( obj=label_column_name, none_acceptable=True, is_of_type=str, is_column=True) if example_weight_column_name", "unbalanced_sets is not None: inputs['UnbalancedSets'] = try_set( obj=unbalanced_sets, none_acceptable=True, is_of_type=bool)", "Parameter for the sigmoid function. (inputs). :param evaluation_metric: Evaluation metrics.", "valid_range={ 'Inf': 0, 'Max': 2147483647}) if maximum_categorical_split_point_count is not None:", "not. (inputs). :param handle_missing_value: Enable special handling of missing value", "valid_range={'Min': 0.0}) if l2_categorical_regularization is not None: inputs['L2CategoricalRegularization'] = try_set(", "for categorical split. (inputs). :param seed: Sets the random seed", "model while gblinear uses linear function. (inputs). :param label_column_name: Column", "number of categorical thresholds. (inputs). :param categorical_smoothing: Lapalace smooth term", "categories. (inputs). :param l2_categorical_regularization: L2 Regularization for categorical split. (inputs).", "= { x for x in unlist(outputs.values()) if isinstance(x, str)", "none_acceptable=True, is_of_type=str, is_column=True) if example_weight_column_name is not None: inputs['ExampleWeightColumnName'] =", "example_weight_column_name=None, row_group_column_name=None, normalize_features='Auto', caching='Auto', unbalanced_sets=False, weight_of_positive_examples=1.0, sigmoid=0.5, evaluation_metric='Logloss', maximum_bin_count_per_feature=255, verbose=False,", "training data (inputs). :param unbalanced_sets: Use for binary classification when", "none_acceptable=True, is_of_type=numbers.Real) if verbose is not None: inputs['Verbose'] = try_set(", "None: inputs['TrainingData'] = try_set( obj=training_data, none_acceptable=False, is_of_type=str) if learning_rate is", "handle_missing_value is not None: inputs['HandleMissingValue'] = try_set( obj=handle_missing_value, none_acceptable=True, is_of_type=bool)", "is_of_type=bool) if silent is not None: inputs['Silent'] = try_set( obj=silent,", "A typical value to consider: sum(negative cases) / sum(positive cases).", "\"\"\" import numbers from ..utils.entrypoints import EntryPoint from ..utils.utils import", "groupId (inputs). :param normalize_features: Normalize option for the feature column", "obj=use_zero_as_missing_value, none_acceptable=True, is_of_type=bool) if minimum_example_count_per_group is not None: inputs['MinimumExampleCountPerGroup'] =", "if predictor_model is not None: outputs['PredictorModel'] = try_set( obj=predictor_model, none_acceptable=False,", "(inputs). :param categorical_smoothing: Lapalace smooth term in categorical feature spilt.", "gbtree, gblinear or dart. gbtree and dart use tree based", "feature_column_name is not None: inputs['FeatureColumnName'] = try_set( obj=feature_column_name, none_acceptable=True, is_of_type=str,", "Learning Algorithm (inputs). :param predictor_model: The trained model (outputs). \"\"\"", "negative weights, useful for unbalanced classes. A typical value to", "maximum_bin_count_per_feature=255, verbose=False, silent=True, number_of_threads=None, early_stopping_round=0, batch_size=1048576, use_categorical_split=None, handle_missing_value=True, use_zero_as_missing_value=False, minimum_example_count_per_group=100,", "is not None: inputs['RowGroupColumnName'] = try_set( obj=row_group_column_name, none_acceptable=True, is_of_type=str, is_column=True)", "EntryPoint from ..utils.utils import try_set, unlist def trainers_lightgbmbinaryclassifier( training_data, predictor_model=None,", "for features (inputs). :param booster: Which booster to use, can", "entries in a batch when loading data. (inputs). :param use_categorical_split:", "inputs['Sigmoid'] = try_set( obj=sigmoid, none_acceptable=True, is_of_type=numbers.Real) if evaluation_metric is not", "use_zero_as_missing_value: Enable usage of zero (0) as missing value. (inputs).", ":param minimum_example_count_per_leaf: Minimum number of instances needed in a child.", "obj=handle_missing_value, none_acceptable=True, is_of_type=bool) if use_zero_as_missing_value is not None: inputs['UseZeroAsMissingValue'] =", "obj=minimum_example_count_per_group, none_acceptable=True, is_of_type=numbers.Real, valid_range={ 'Inf': 0, 'Max': 2147483647}) if maximum_categorical_split_point_count", "learning_rate: Shrinkage rate for trees, used to prevent over-fitting. Range:", "try_set( obj=row_group_column_name, none_acceptable=True, is_of_type=str, is_column=True) if normalize_features is not None:", "(inputs). :param use_categorical_split: Enable categorical split or not. (inputs). :param", "**Description** Train a LightGBM binary classification model. :param number_of_iterations: Number", "use_categorical_split is not None: inputs['UseCategoricalSplit'] = try_set( obj=use_categorical_split, none_acceptable=True, is_of_type=bool)", "not None: inputs['NumberOfLeaves'] = try_set( obj=number_of_leaves, none_acceptable=True, is_of_type=numbers.Real) if minimum_example_count_per_leaf", "balance of positive and negative weights, useful for unbalanced classes.", "to use. (inputs). :param parallel_trainer: Parallel LightGBM Learning Algorithm (inputs).", "not None: inputs['Silent'] = try_set( obj=silent, none_acceptable=True, is_of_type=bool) if number_of_threads", "if learning_rate is not None: inputs['LearningRate'] = try_set( obj=learning_rate, none_acceptable=True,", "try_set( obj=categorical_smoothing, none_acceptable=True, is_of_type=numbers.Real, valid_range={'Min': 0.0}) if l2_categorical_regularization is not", "Range: (0,1]. (inputs). :param number_of_leaves: Maximum leaves for trees. (inputs).", "not None: inputs['Seed'] = try_set( obj=seed, none_acceptable=True, is_of_type=numbers.Real) if parallel_trainer", "batch_size=1048576, use_categorical_split=None, handle_missing_value=True, use_zero_as_missing_value=False, minimum_example_count_per_group=100, maximum_categorical_split_point_count=32, categorical_smoothing=10.0, l2_categorical_regularization=10.0, seed=None, parallel_trainer=None,", "early_stopping_round: Rounds of early stopping, 0 will disable it. (inputs).", "if unbalanced_sets is not None: inputs['UnbalancedSets'] = try_set( obj=unbalanced_sets, none_acceptable=True,", "obj=training_data, none_acceptable=False, is_of_type=str) if learning_rate is not None: inputs['LearningRate'] =", "is not None: inputs['Seed'] = try_set( obj=seed, none_acceptable=True, is_of_type=numbers.Real) if", "0.0}) if seed is not None: inputs['Seed'] = try_set( obj=seed,", "row_group_column_name: Column to use for example groupId (inputs). :param normalize_features:", "is_of_type=numbers.Real, valid_range={'Min': 0.0}) if l2_categorical_regularization is not None: inputs['L2CategoricalRegularization'] =", "is_of_type=dict) if label_column_name is not None: inputs['LabelColumnName'] = try_set( obj=label_column_name,", "will disable it. (inputs). :param batch_size: Number of entries in", "x in unlist(inputs.values()) if isinstance(x, str) and x.startswith(\"$\")} output_variables =", "Column to use for example weight (inputs). :param row_group_column_name: Column", "normalize_features: Normalize option for the feature column (inputs). :param caching:", "if maximum_bin_count_per_feature is not None: inputs['MaximumBinCountPerFeature'] = try_set( obj=maximum_bin_count_per_feature, none_acceptable=True,", "predictor_model: The trained model (outputs). \"\"\" entrypoint_name = 'Trainers.LightGbmBinaryClassifier' inputs", "is_of_type=bool) if use_zero_as_missing_value is not None: inputs['UseZeroAsMissingValue'] = try_set( obj=use_zero_as_missing_value,", "weight_of_positive_examples: Control the balance of positive and negative weights, useful", "is not None: inputs['MinimumExampleCountPerGroup'] = try_set( obj=minimum_example_count_per_group, none_acceptable=True, is_of_type=numbers.Real, valid_range={", "Algorithm (inputs). :param predictor_model: The trained model (outputs). \"\"\" entrypoint_name", "not None: inputs['Caching'] = try_set( obj=caching, none_acceptable=True, is_of_type=str, values=[ 'Auto',", "of small categories. (inputs). :param l2_categorical_regularization: L2 Regularization for categorical", "2147483647}) if maximum_categorical_split_point_count is not None: inputs['MaximumCategoricalSplitPointCount'] = try_set( obj=maximum_categorical_split_point_count,", "if label_column_name is not None: inputs['LabelColumnName'] = try_set( obj=label_column_name, none_acceptable=True,", "= try_set( obj=number_of_threads, none_acceptable=True, is_of_type=numbers.Real) if early_stopping_round is not None:", "instances per categorical group. (inputs). :param maximum_categorical_split_point_count: Max number of", "inputs['NormalizeFeatures'] = try_set( obj=normalize_features, none_acceptable=True, is_of_type=str, values=[ 'No', 'Warn', 'Auto',", "'Auto', 'Memory', 'None']) if unbalanced_sets is not None: inputs['UnbalancedSets'] =", "based model while gblinear uses linear function. (inputs). :param label_column_name:", "L2 Regularization for categorical split. (inputs). :param seed: Sets the", "none_acceptable=True, is_of_type=bool) if weight_of_positive_examples is not None: inputs['WeightOfPositiveExamples'] = try_set(", "Minimum number of instances needed in a child. (inputs). :param", "early_stopping_round=0, batch_size=1048576, use_categorical_split=None, handle_missing_value=True, use_zero_as_missing_value=False, minimum_example_count_per_group=100, maximum_categorical_split_point_count=32, categorical_smoothing=10.0, l2_categorical_regularization=10.0, seed=None,", "a LightGBM binary classification model. :param number_of_iterations: Number of iterations.", "not None: inputs['EvaluationMetric'] = try_set( obj=evaluation_metric, none_acceptable=True, is_of_type=str, values=[ 'None',", "option for the feature column (inputs). :param caching: Whether trainer", "evaluation_metric='Logloss', maximum_bin_count_per_feature=255, verbose=False, silent=True, number_of_threads=None, early_stopping_round=0, batch_size=1048576, use_categorical_split=None, handle_missing_value=True, use_zero_as_missing_value=False,", "a batch when loading data. (inputs). :param use_categorical_split: Enable categorical", "should cache input training data (inputs). :param unbalanced_sets: Use for", "is not None: inputs['NumberOfIterations'] = try_set( obj=number_of_iterations, none_acceptable=True, is_of_type=numbers.Real) if", "learning_rate is not None: inputs['LearningRate'] = try_set( obj=learning_rate, none_acceptable=True, is_of_type=numbers.Real)", "if l2_categorical_regularization is not None: inputs['L2CategoricalRegularization'] = try_set( obj=l2_categorical_regularization, none_acceptable=True,", "Column to use for example groupId (inputs). :param normalize_features: Normalize", "= try_set( obj=example_weight_column_name, none_acceptable=True, is_of_type=str, is_column=True) if row_group_column_name is not", "number_of_iterations=100, learning_rate=None, number_of_leaves=None, minimum_example_count_per_leaf=None, feature_column_name='Features', booster=None, label_column_name='Label', example_weight_column_name=None, row_group_column_name=None, normalize_features='Auto',", "is not balanced. (inputs). :param weight_of_positive_examples: Control the balance of", "silent=True, number_of_threads=None, early_stopping_round=0, batch_size=1048576, use_categorical_split=None, handle_missing_value=True, use_zero_as_missing_value=False, minimum_example_count_per_group=100, maximum_categorical_split_point_count=32, categorical_smoothing=10.0,", "trainers_lightgbmbinaryclassifier( training_data, predictor_model=None, number_of_iterations=100, learning_rate=None, number_of_leaves=None, minimum_example_count_per_leaf=None, feature_column_name='Features', booster=None, label_column_name='Label',", "unlist(inputs.values()) if isinstance(x, str) and x.startswith(\"$\")} output_variables = { x", "parallel threads used to run LightGBM. (inputs). :param early_stopping_round: Rounds", "none_acceptable=True, is_of_type=numbers.Real) if sigmoid is not None: inputs['Sigmoid'] = try_set(", "run LightGBM. (inputs). :param early_stopping_round: Rounds of early stopping, 0", "example weight (inputs). :param row_group_column_name: Column to use for example", "sigmoid=0.5, evaluation_metric='Logloss', maximum_bin_count_per_feature=255, verbose=False, silent=True, number_of_threads=None, early_stopping_round=0, batch_size=1048576, use_categorical_split=None, handle_missing_value=True,", "(inputs). :param number_of_threads: Number of parallel threads used to run", "early stopping, 0 will disable it. (inputs). :param batch_size: Number", "if booster is not None: inputs['Booster'] = try_set( obj=booster, none_acceptable=True,", "label_column_name is not None: inputs['LabelColumnName'] = try_set( obj=label_column_name, none_acceptable=True, is_of_type=str,", "input training data (inputs). :param unbalanced_sets: Use for binary classification", "unbalanced_sets=False, weight_of_positive_examples=1.0, sigmoid=0.5, evaluation_metric='Logloss', maximum_bin_count_per_feature=255, verbose=False, silent=True, number_of_threads=None, early_stopping_round=0, batch_size=1048576,", "inputs['EvaluationMetric'] = try_set( obj=evaluation_metric, none_acceptable=True, is_of_type=str, values=[ 'None', 'Default', 'Logloss',", "from ..utils.entrypoints import EntryPoint from ..utils.utils import try_set, unlist def", "if evaluation_metric is not None: inputs['EvaluationMetric'] = try_set( obj=evaluation_metric, none_acceptable=True,", "to consider: sum(negative cases) / sum(positive cases). (inputs). :param sigmoid:", "None: inputs['HandleMissingValue'] = try_set( obj=handle_missing_value, none_acceptable=True, is_of_type=bool) if use_zero_as_missing_value is", "The trained model (outputs). \"\"\" entrypoint_name = 'Trainers.LightGbmBinaryClassifier' inputs =", "Rounds of early stopping, 0 will disable it. (inputs). :param", "= try_set( obj=normalize_features, none_acceptable=True, is_of_type=str, values=[ 'No', 'Warn', 'Auto', 'Yes'])", "not None: inputs['ExampleWeightColumnName'] = try_set( obj=example_weight_column_name, none_acceptable=True, is_of_type=str, is_column=True) if", "inputs['BatchSize'] = try_set( obj=batch_size, none_acceptable=True, is_of_type=numbers.Real) if use_categorical_split is not", "is not None: inputs['MinimumExampleCountPerLeaf'] = try_set( obj=minimum_example_count_per_leaf, none_acceptable=True, is_of_type=numbers.Real) if", "try_set( obj=caching, none_acceptable=True, is_of_type=str, values=[ 'Auto', 'Memory', 'None']) if unbalanced_sets", "for binary classification when training data is not balanced. (inputs).", "use_categorical_split: Enable categorical split or not. (inputs). :param handle_missing_value: Enable", "(inputs). :param feature_column_name: Column to use for features (inputs). :param", "= try_set( obj=sigmoid, none_acceptable=True, is_of_type=numbers.Real) if evaluation_metric is not None:", "the random seed for LightGBM to use. (inputs). :param parallel_trainer:", "is not None: inputs['CategoricalSmoothing'] = try_set( obj=categorical_smoothing, none_acceptable=True, is_of_type=numbers.Real, valid_range={'Min':", "\"\"\" entrypoint_name = 'Trainers.LightGbmBinaryClassifier' inputs = {} outputs = {}", "features. (inputs). :param verbose: Verbose (inputs). :param silent: Printing running", "obj=verbose, none_acceptable=True, is_of_type=bool) if silent is not None: inputs['Silent'] =", "metrics. (inputs). :param maximum_bin_count_per_feature: Maximum number of bucket bin for", "trainer should cache input training data (inputs). :param unbalanced_sets: Use", "is not None: inputs['NormalizeFeatures'] = try_set( obj=normalize_features, none_acceptable=True, is_of_type=str, values=[", "try_set( obj=evaluation_metric, none_acceptable=True, is_of_type=str, values=[ 'None', 'Default', 'Logloss', 'Error', 'AreaUnderCurve'])", "not None: inputs['FeatureColumnName'] = try_set( obj=feature_column_name, none_acceptable=True, is_of_type=str, is_column=True) if", "number_of_iterations is not None: inputs['NumberOfIterations'] = try_set( obj=number_of_iterations, none_acceptable=True, is_of_type=numbers.Real)", "do not edit by hand \"\"\" Trainers.LightGbmBinaryClassifier \"\"\" import numbers", "to prevent over-fitting. Range: (0,1]. (inputs). :param number_of_leaves: Maximum leaves", "or not. (inputs). :param handle_missing_value: Enable special handling of missing", "try_set( obj=unbalanced_sets, none_acceptable=True, is_of_type=bool) if weight_of_positive_examples is not None: inputs['WeightOfPositiveExamples']", "for x in unlist(inputs.values()) if isinstance(x, str) and x.startswith(\"$\")} output_variables", "none_acceptable=True, is_of_type=bool) if handle_missing_value is not None: inputs['HandleMissingValue'] = try_set(", "data. (inputs). :param use_categorical_split: Enable categorical split or not. (inputs).", "None: inputs['MaximumCategoricalSplitPointCount'] = try_set( obj=maximum_categorical_split_point_count, none_acceptable=True, is_of_type=numbers.Real, valid_range={ 'Inf': 0,", "None: inputs['UseCategoricalSplit'] = try_set( obj=use_categorical_split, none_acceptable=True, is_of_type=bool) if handle_missing_value is", "(inputs). :param minimum_example_count_per_group: Minimum number of instances per categorical group.", "is not None: inputs['MaximumCategoricalSplitPointCount'] = try_set( obj=maximum_categorical_split_point_count, none_acceptable=True, is_of_type=numbers.Real, valid_range={", "inputs['LearningRate'] = try_set( obj=learning_rate, none_acceptable=True, is_of_type=numbers.Real) if number_of_leaves is not", "try_set( obj=use_zero_as_missing_value, none_acceptable=True, is_of_type=bool) if minimum_example_count_per_group is not None: inputs['MinimumExampleCountPerGroup']", "classification model. :param number_of_iterations: Number of iterations. (inputs). :param training_data:", ":param handle_missing_value: Enable special handling of missing value or not.", "not None: inputs['CategoricalSmoothing'] = try_set( obj=categorical_smoothing, none_acceptable=True, is_of_type=numbers.Real, valid_range={'Min': 0.0})", "missing value or not. (inputs). :param use_zero_as_missing_value: Enable usage of", "None: inputs['NumberOfLeaves'] = try_set( obj=number_of_leaves, none_acceptable=True, is_of_type=numbers.Real) if minimum_example_count_per_leaf is", "try_set( obj=booster, none_acceptable=True, is_of_type=dict) if label_column_name is not None: inputs['LabelColumnName']", "is_column=True) if booster is not None: inputs['Booster'] = try_set( obj=booster,", "categorical split. (inputs). :param seed: Sets the random seed for", "sigmoid function. (inputs). :param evaluation_metric: Evaluation metrics. (inputs). :param maximum_bin_count_per_feature:", "obj=predictor_model, none_acceptable=False, is_of_type=str) input_variables = { x for x in", "cases) / sum(positive cases). (inputs). :param sigmoid: Parameter for the", "seed: Sets the random seed for LightGBM to use. (inputs).", "tree based model while gblinear uses linear function. (inputs). :param", "is_of_type=numbers.Real) if use_categorical_split is not None: inputs['UseCategoricalSplit'] = try_set( obj=use_categorical_split,", "if use_zero_as_missing_value is not None: inputs['UseZeroAsMissingValue'] = try_set( obj=use_zero_as_missing_value, none_acceptable=True,", "none_acceptable=True, is_of_type=numbers.Real) if training_data is not None: inputs['TrainingData'] = try_set(", "while gblinear uses linear function. (inputs). :param label_column_name: Column to", "try_set( obj=sigmoid, none_acceptable=True, is_of_type=numbers.Real) if evaluation_metric is not None: inputs['EvaluationMetric']", "parallel_trainer is not None: inputs['ParallelTrainer'] = try_set( obj=parallel_trainer, none_acceptable=True, is_of_type=dict)", "gblinear uses linear function. (inputs). :param label_column_name: Column to use", "unlist def trainers_lightgbmbinaryclassifier( training_data, predictor_model=None, number_of_iterations=100, learning_rate=None, number_of_leaves=None, minimum_example_count_per_leaf=None, feature_column_name='Features',", "None: inputs['Caching'] = try_set( obj=caching, none_acceptable=True, is_of_type=str, values=[ 'Auto', 'Memory',", "= try_set( obj=label_column_name, none_acceptable=True, is_of_type=str, is_column=True) if example_weight_column_name is not", "inputs['EarlyStoppingRound'] = try_set( obj=early_stopping_round, none_acceptable=True, is_of_type=numbers.Real) if batch_size is not", "if parallel_trainer is not None: inputs['ParallelTrainer'] = try_set( obj=parallel_trainer, none_acceptable=True,", ":param row_group_column_name: Column to use for example groupId (inputs). :param", "thresholds. (inputs). :param categorical_smoothing: Lapalace smooth term in categorical feature", "by tools/entrypoint_compiler.py: do not edit by hand \"\"\" Trainers.LightGbmBinaryClassifier \"\"\"", "Shrinkage rate for trees, used to prevent over-fitting. Range: (0,1].", "Regularization for categorical split. (inputs). :param seed: Sets the random", "{} if number_of_iterations is not None: inputs['NumberOfIterations'] = try_set( obj=number_of_iterations,", "is not None: inputs['Sigmoid'] = try_set( obj=sigmoid, none_acceptable=True, is_of_type=numbers.Real) if", "l2_categorical_regularization is not None: inputs['L2CategoricalRegularization'] = try_set( obj=l2_categorical_regularization, none_acceptable=True, is_of_type=numbers.Real,", "Column to use for features (inputs). :param booster: Which booster", "Column to use for labels (inputs). :param example_weight_column_name: Column to", "is_of_type=numbers.Real) if training_data is not None: inputs['TrainingData'] = try_set( obj=training_data,", "training (inputs). :param learning_rate: Shrinkage rate for trees, used to", "by hand \"\"\" Trainers.LightGbmBinaryClassifier \"\"\" import numbers from ..utils.entrypoints import", "None: inputs['MinimumExampleCountPerLeaf'] = try_set( obj=minimum_example_count_per_leaf, none_acceptable=True, is_of_type=numbers.Real) if feature_column_name is", "None: inputs['NumberOfIterations'] = try_set( obj=number_of_iterations, none_acceptable=True, is_of_type=numbers.Real) if training_data is", "= try_set( obj=seed, none_acceptable=True, is_of_type=numbers.Real) if parallel_trainer is not None:", "of zero (0) as missing value. (inputs). :param minimum_example_count_per_group: Minimum", "number_of_threads=None, early_stopping_round=0, batch_size=1048576, use_categorical_split=None, handle_missing_value=True, use_zero_as_missing_value=False, minimum_example_count_per_group=100, maximum_categorical_split_point_count=32, categorical_smoothing=10.0, l2_categorical_regularization=10.0,", "not None: inputs['NumberOfThreads'] = try_set( obj=number_of_threads, none_acceptable=True, is_of_type=numbers.Real) if early_stopping_round", "instances needed in a child. (inputs). :param feature_column_name: Column to", "to use for example weight (inputs). :param row_group_column_name: Column to", "inputs['RowGroupColumnName'] = try_set( obj=row_group_column_name, none_acceptable=True, is_of_type=str, is_column=True) if normalize_features is", "try_set, unlist def trainers_lightgbmbinaryclassifier( training_data, predictor_model=None, number_of_iterations=100, learning_rate=None, number_of_leaves=None, minimum_example_count_per_leaf=None,", "maximum_categorical_split_point_count: Max number of categorical thresholds. (inputs). :param categorical_smoothing: Lapalace", "not balanced. (inputs). :param weight_of_positive_examples: Control the balance of positive", "Minimum number of instances per categorical group. (inputs). :param maximum_categorical_split_point_count:", "caching: Whether trainer should cache input training data (inputs). :param", "not None: inputs['RowGroupColumnName'] = try_set( obj=row_group_column_name, none_acceptable=True, is_of_type=str, is_column=True) if", "is_of_type=numbers.Real) if early_stopping_round is not None: inputs['EarlyStoppingRound'] = try_set( obj=early_stopping_round,", "Normalize option for the feature column (inputs). :param caching: Whether", "Control the balance of positive and negative weights, useful for", "Lapalace smooth term in categorical feature spilt. Avoid the bias", "in a child. (inputs). :param feature_column_name: Column to use for", ":param l2_categorical_regularization: L2 Regularization for categorical split. (inputs). :param seed:", "none_acceptable=False, is_of_type=str) if learning_rate is not None: inputs['LearningRate'] = try_set(", "use for features (inputs). :param booster: Which booster to use,", "bias of small categories. (inputs). :param l2_categorical_regularization: L2 Regularization for", "try_set( obj=training_data, none_acceptable=False, is_of_type=str) if learning_rate is not None: inputs['LearningRate']", "gbtree and dart use tree based model while gblinear uses", "**params): \"\"\" **Description** Train a LightGBM binary classification model. :param", "inputs['Verbose'] = try_set( obj=verbose, none_acceptable=True, is_of_type=bool) if silent is not", "inputs['MinimumExampleCountPerGroup'] = try_set( obj=minimum_example_count_per_group, none_acceptable=True, is_of_type=numbers.Real, valid_range={ 'Inf': 0, 'Max':", "(inputs). :param silent: Printing running messages. (inputs). :param number_of_threads: Number", "(inputs). :param seed: Sets the random seed for LightGBM to", "'No', 'Warn', 'Auto', 'Yes']) if caching is not None: inputs['Caching']", "obj=sigmoid, none_acceptable=True, is_of_type=numbers.Real) if evaluation_metric is not None: inputs['EvaluationMetric'] =", "seed=None, parallel_trainer=None, **params): \"\"\" **Description** Train a LightGBM binary classification", "to use for features (inputs). :param booster: Which booster to", "from ..utils.utils import try_set, unlist def trainers_lightgbmbinaryclassifier( training_data, predictor_model=None, number_of_iterations=100,", "entrypoint_name = 'Trainers.LightGbmBinaryClassifier' inputs = {} outputs = {} if", "booster is not None: inputs['Booster'] = try_set( obj=booster, none_acceptable=True, is_of_type=dict)", ":param feature_column_name: Column to use for features (inputs). :param booster:", "in a batch when loading data. (inputs). :param use_categorical_split: Enable", "- Generated by tools/entrypoint_compiler.py: do not edit by hand \"\"\"", "is not None: inputs['LearningRate'] = try_set( obj=learning_rate, none_acceptable=True, is_of_type=numbers.Real) if", "data to be used for training (inputs). :param learning_rate: Shrinkage", "for example groupId (inputs). :param normalize_features: Normalize option for the", "obj=row_group_column_name, none_acceptable=True, is_of_type=str, is_column=True) if normalize_features is not None: inputs['NormalizeFeatures']", "is not None: inputs['L2CategoricalRegularization'] = try_set( obj=l2_categorical_regularization, none_acceptable=True, is_of_type=numbers.Real, valid_range={'Min':", "str) and x.startswith(\"$\")} output_variables = { x for x in", "None: inputs['WeightOfPositiveExamples'] = try_set( obj=weight_of_positive_examples, none_acceptable=True, is_of_type=numbers.Real) if sigmoid is", ":param minimum_example_count_per_group: Minimum number of instances per categorical group. (inputs).", "None: inputs['BatchSize'] = try_set( obj=batch_size, none_acceptable=True, is_of_type=numbers.Real) if use_categorical_split is", ":param use_categorical_split: Enable categorical split or not. (inputs). :param handle_missing_value:", "number of instances per categorical group. (inputs). :param maximum_categorical_split_point_count: Max", "numbers from ..utils.entrypoints import EntryPoint from ..utils.utils import try_set, unlist", "and dart use tree based model while gblinear uses linear", "if batch_size is not None: inputs['BatchSize'] = try_set( obj=batch_size, none_acceptable=True,", "# - Generated by tools/entrypoint_compiler.py: do not edit by hand", "is_of_type=str, is_column=True) if row_group_column_name is not None: inputs['RowGroupColumnName'] = try_set(", "Trainers.LightGbmBinaryClassifier \"\"\" import numbers from ..utils.entrypoints import EntryPoint from ..utils.utils", "is not None: inputs['TrainingData'] = try_set( obj=training_data, none_acceptable=False, is_of_type=str) if", "not None: inputs['MinimumExampleCountPerGroup'] = try_set( obj=minimum_example_count_per_group, none_acceptable=True, is_of_type=numbers.Real, valid_range={ 'Inf':", "not None: inputs['L2CategoricalRegularization'] = try_set( obj=l2_categorical_regularization, none_acceptable=True, is_of_type=numbers.Real, valid_range={'Min': 0.0})", "if maximum_categorical_split_point_count is not None: inputs['MaximumCategoricalSplitPointCount'] = try_set( obj=maximum_categorical_split_point_count, none_acceptable=True,", "label_column_name='Label', example_weight_column_name=None, row_group_column_name=None, normalize_features='Auto', caching='Auto', unbalanced_sets=False, weight_of_positive_examples=1.0, sigmoid=0.5, evaluation_metric='Logloss', maximum_bin_count_per_feature=255,", "booster=None, label_column_name='Label', example_weight_column_name=None, row_group_column_name=None, normalize_features='Auto', caching='Auto', unbalanced_sets=False, weight_of_positive_examples=1.0, sigmoid=0.5, evaluation_metric='Logloss',", "Maximum number of bucket bin for features. (inputs). :param verbose:", "none_acceptable=True, is_of_type=numbers.Real) if parallel_trainer is not None: inputs['ParallelTrainer'] = try_set(", ":param caching: Whether trainer should cache input training data (inputs).", "parallel_trainer: Parallel LightGBM Learning Algorithm (inputs). :param predictor_model: The trained", "try_set( obj=normalize_features, none_acceptable=True, is_of_type=str, values=[ 'No', 'Warn', 'Auto', 'Yes']) if", "classification when training data is not balanced. (inputs). :param weight_of_positive_examples:", "sigmoid: Parameter for the sigmoid function. (inputs). :param evaluation_metric: Evaluation", "(0) as missing value. (inputs). :param minimum_example_count_per_group: Minimum number of", "is not None: inputs['Silent'] = try_set( obj=silent, none_acceptable=True, is_of_type=bool) if", "try_set( obj=verbose, none_acceptable=True, is_of_type=bool) if silent is not None: inputs['Silent']", "of positive and negative weights, useful for unbalanced classes. A", "silent: Printing running messages. (inputs). :param number_of_threads: Number of parallel", "if seed is not None: inputs['Seed'] = try_set( obj=seed, none_acceptable=True,", ":param verbose: Verbose (inputs). :param silent: Printing running messages. (inputs).", "messages. (inputs). :param number_of_threads: Number of parallel threads used to", "split or not. (inputs). :param handle_missing_value: Enable special handling of", "none_acceptable=True, is_of_type=bool) if number_of_threads is not None: inputs['NumberOfThreads'] = try_set(", "bucket bin for features. (inputs). :param verbose: Verbose (inputs). :param", "obj=l2_categorical_regularization, none_acceptable=True, is_of_type=numbers.Real, valid_range={'Min': 0.0}) if seed is not None:", "def trainers_lightgbmbinaryclassifier( training_data, predictor_model=None, number_of_iterations=100, learning_rate=None, number_of_leaves=None, minimum_example_count_per_leaf=None, feature_column_name='Features', booster=None,", "values=[ 'No', 'Warn', 'Auto', 'Yes']) if caching is not None:", "{ x for x in unlist(inputs.values()) if isinstance(x, str) and", "for training (inputs). :param learning_rate: Shrinkage rate for trees, used", "hand \"\"\" Trainers.LightGbmBinaryClassifier \"\"\" import numbers from ..utils.entrypoints import EntryPoint", "0 will disable it. (inputs). :param batch_size: Number of entries", "inputs['UseCategoricalSplit'] = try_set( obj=use_categorical_split, none_acceptable=True, is_of_type=bool) if handle_missing_value is not", "is_of_type=str) input_variables = { x for x in unlist(inputs.values()) if", "dart. gbtree and dart use tree based model while gblinear", "not None: inputs['HandleMissingValue'] = try_set( obj=handle_missing_value, none_acceptable=True, is_of_type=bool) if use_zero_as_missing_value", "not None: inputs['NormalizeFeatures'] = try_set( obj=normalize_features, none_acceptable=True, is_of_type=str, values=[ 'No',", "not edit by hand \"\"\" Trainers.LightGbmBinaryClassifier \"\"\" import numbers from", "normalize_features is not None: inputs['NormalizeFeatures'] = try_set( obj=normalize_features, none_acceptable=True, is_of_type=str,", ":param booster: Which booster to use, can be gbtree, gblinear", "for LightGBM to use. (inputs). :param parallel_trainer: Parallel LightGBM Learning", ":param number_of_iterations: Number of iterations. (inputs). :param training_data: The data", "example groupId (inputs). :param normalize_features: Normalize option for the feature", "in unlist(inputs.values()) if isinstance(x, str) and x.startswith(\"$\")} output_variables = {", "x.startswith(\"$\")} entrypoint = EntryPoint( name=entrypoint_name, inputs=inputs, outputs=outputs, input_variables=input_variables, output_variables=output_variables) return", "inputs['MinimumExampleCountPerLeaf'] = try_set( obj=minimum_example_count_per_leaf, none_acceptable=True, is_of_type=numbers.Real) if feature_column_name is not", "use. (inputs). :param parallel_trainer: Parallel LightGBM Learning Algorithm (inputs). :param", ":param maximum_bin_count_per_feature: Maximum number of bucket bin for features. (inputs).", "value to consider: sum(negative cases) / sum(positive cases). (inputs). :param", "= try_set( obj=parallel_trainer, none_acceptable=True, is_of_type=dict) if predictor_model is not None:", "(inputs). :param maximum_bin_count_per_feature: Maximum number of bucket bin for features.", "Whether trainer should cache input training data (inputs). :param unbalanced_sets:", "(inputs). :param early_stopping_round: Rounds of early stopping, 0 will disable", "gblinear or dart. gbtree and dart use tree based model", "None: inputs['FeatureColumnName'] = try_set( obj=feature_column_name, none_acceptable=True, is_of_type=str, is_column=True) if booster", "'Warn', 'Auto', 'Yes']) if caching is not None: inputs['Caching'] =", "is not None: inputs['UseZeroAsMissingValue'] = try_set( obj=use_zero_as_missing_value, none_acceptable=True, is_of_type=bool) if", ":param normalize_features: Normalize option for the feature column (inputs). :param", "unbalanced classes. A typical value to consider: sum(negative cases) /", "weight_of_positive_examples is not None: inputs['WeightOfPositiveExamples'] = try_set( obj=weight_of_positive_examples, none_acceptable=True, is_of_type=numbers.Real)", "'Logloss', 'Error', 'AreaUnderCurve']) if maximum_bin_count_per_feature is not None: inputs['MaximumBinCountPerFeature'] =", "(inputs). :param example_weight_column_name: Column to use for example weight (inputs).", "'Inf': 0, 'Max': 2147483647}) if maximum_categorical_split_point_count is not None: inputs['MaximumCategoricalSplitPointCount']", "import try_set, unlist def trainers_lightgbmbinaryclassifier( training_data, predictor_model=None, number_of_iterations=100, learning_rate=None, number_of_leaves=None,", "None: inputs['L2CategoricalRegularization'] = try_set( obj=l2_categorical_regularization, none_acceptable=True, is_of_type=numbers.Real, valid_range={'Min': 0.0}) if" ]
[ "for checking transformation algorithms (the models package). \"\"\" import logging", "import numpy as np from gensim.corpora.mmcorpus import MmCorpus from gensim.models", "rpmodel.RpModel(self.corpus, num_topics=2) # transform one document doc = list(self.corpus)[0] transformed", "easier equality tests expected = np.array([-0.70710677, 0.70710677]) self.assertTrue(np.allclose(vec, expected)) #", "empty vector def test_persistence_compressed(self): fname = get_tmpfile('gensim_models.tst.gz') model = rpmodel.RpModel(self.corpus,", "vector if __name__ == '__main__': logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s',", "def test_persistence_compressed(self): fname = get_tmpfile('gensim_models.tst.gz') model = rpmodel.RpModel(self.corpus, num_topics=2) model.save(fname)", "fname = get_tmpfile('gensim_models.tst.gz') model = rpmodel.RpModel(self.corpus, num_topics=2) model.save(fname) model2 =", "python # -*- coding: utf-8 -*- # # Copyright (C)", "up to sign def test_persistence(self): fname = get_tmpfile('gensim_models.tst') model =", "-*- # # Copyright (C) 2010 <NAME> <<EMAIL>> # Licensed", "test_persistence(self): fname = get_tmpfile('gensim_models.tst') model = rpmodel.RpModel(self.corpus, num_topics=2) model.save(fname) model2", "fname = get_tmpfile('gensim_models.tst') model = rpmodel.RpModel(self.corpus, num_topics=2) model.save(fname) model2 =", "rpmodel.RpModel(self.corpus, num_topics=2) model.save(fname) model2 = rpmodel.RpModel.load(fname, mmap=None) self.assertEqual(model.num_topics, model2.num_topics) self.assertTrue(np.allclose(model.projection,", "one document doc = list(self.corpus)[0] transformed = model[doc] vec =", "HACK; set fixed seed so that we always get the", "model # HACK; set fixed seed so that we always", "algorithms (the models package). \"\"\" import logging import unittest import", "matutils from gensim.test.utils import datapath, get_tmpfile class TestRpModel(unittest.TestCase): def setUp(self):", "# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html \"\"\"", "same random matrix (and can compare against expected results) np.random.seed(13)", "tests for checking transformation algorithms (the models package). \"\"\" import", "= get_tmpfile('gensim_models.tst.gz') model = rpmodel.RpModel(self.corpus, num_topics=2) model.save(fname) model2 = rpmodel.RpModel.load(fname,", "transformed = model[doc] vec = matutils.sparse2full(transformed, 2) # convert to", "[] self.assertTrue(np.allclose(model[tstvec], model2[tstvec])) # try projecting an empty vector def", "equality tests expected = np.array([-0.70710677, 0.70710677]) self.assertTrue(np.allclose(vec, expected)) # transformed", "can compare against expected results) np.random.seed(13) model = rpmodel.RpModel(self.corpus, num_topics=2)", "num_topics=2) model.save(fname) model2 = rpmodel.RpModel.load(fname) self.assertEqual(model.num_topics, model2.num_topics) self.assertTrue(np.allclose(model.projection, model2.projection)) tstvec", "as np from gensim.corpora.mmcorpus import MmCorpus from gensim.models import rpmodel", "= model[doc] vec = matutils.sparse2full(transformed, 2) # convert to dense", "(the models package). \"\"\" import logging import unittest import numpy", "MmCorpus from gensim.models import rpmodel from gensim import matutils from", "# Copyright (C) 2010 <NAME> <<EMAIL>> # Licensed under the", "vector def test_persistence_compressed(self): fname = get_tmpfile('gensim_models.tst.gz') model = rpmodel.RpModel(self.corpus, num_topics=2)", "for easier equality tests expected = np.array([-0.70710677, 0.70710677]) self.assertTrue(np.allclose(vec, expected))", "import matutils from gensim.test.utils import datapath, get_tmpfile class TestRpModel(unittest.TestCase): def", "empty vector if __name__ == '__main__': logging.basicConfig(format='%(asctime)s : %(levelname)s :", "model.save(fname) model2 = rpmodel.RpModel.load(fname, mmap=None) self.assertEqual(model.num_topics, model2.num_topics) self.assertTrue(np.allclose(model.projection, model2.projection)) tstvec", "class TestRpModel(unittest.TestCase): def setUp(self): self.corpus = MmCorpus(datapath('testcorpus.mm')) def test_transform(self): #", "an empty vector def test_persistence_compressed(self): fname = get_tmpfile('gensim_models.tst.gz') model =", "must be equal up to sign def test_persistence(self): fname =", "results) np.random.seed(13) model = rpmodel.RpModel(self.corpus, num_topics=2) # transform one document", "- http://www.gnu.org/licenses/lgpl.html \"\"\" Automated tests for checking transformation algorithms (the", "transformed entries must be equal up to sign def test_persistence(self):", "\"\"\" Automated tests for checking transformation algorithms (the models package).", "expected results) np.random.seed(13) model = rpmodel.RpModel(self.corpus, num_topics=2) # transform one", "= list(self.corpus)[0] transformed = model[doc] vec = matutils.sparse2full(transformed, 2) #", "dense vector, for easier equality tests expected = np.array([-0.70710677, 0.70710677])", "numpy as np from gensim.corpora.mmcorpus import MmCorpus from gensim.models import", "gensim.corpora.mmcorpus import MmCorpus from gensim.models import rpmodel from gensim import", "= [] self.assertTrue(np.allclose(model[tstvec], model2[tstvec])) # try projecting an empty vector", "2010 <NAME> <<EMAIL>> # Licensed under the GNU LGPL v2.1", "= rpmodel.RpModel.load(fname, mmap=None) self.assertEqual(model.num_topics, model2.num_topics) self.assertTrue(np.allclose(model.projection, model2.projection)) tstvec = []", "rpmodel from gensim import matutils from gensim.test.utils import datapath, get_tmpfile", "# convert to dense vector, for easier equality tests expected", "always get the same random matrix (and can compare against", "v2.1 - http://www.gnu.org/licenses/lgpl.html \"\"\" Automated tests for checking transformation algorithms", "<<EMAIL>> # Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html", "get_tmpfile class TestRpModel(unittest.TestCase): def setUp(self): self.corpus = MmCorpus(datapath('testcorpus.mm')) def test_transform(self):", "so that we always get the same random matrix (and", "= matutils.sparse2full(transformed, 2) # convert to dense vector, for easier", "datapath, get_tmpfile class TestRpModel(unittest.TestCase): def setUp(self): self.corpus = MmCorpus(datapath('testcorpus.mm')) def", "from gensim.models import rpmodel from gensim import matutils from gensim.test.utils", "equal up to sign def test_persistence(self): fname = get_tmpfile('gensim_models.tst') model", "model2 = rpmodel.RpModel.load(fname, mmap=None) self.assertEqual(model.num_topics, model2.num_topics) self.assertTrue(np.allclose(model.projection, model2.projection)) tstvec =", "try projecting an empty vector def test_persistence_compressed(self): fname = get_tmpfile('gensim_models.tst.gz')", "document doc = list(self.corpus)[0] transformed = model[doc] vec = matutils.sparse2full(transformed,", "# transformed entries must be equal up to sign def", "be equal up to sign def test_persistence(self): fname = get_tmpfile('gensim_models.tst')", "from gensim.test.utils import datapath, get_tmpfile class TestRpModel(unittest.TestCase): def setUp(self): self.corpus", "self.assertTrue(np.allclose(model[tstvec], model2[tstvec])) # try projecting an empty vector if __name__", "__name__ == '__main__': logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.DEBUG) unittest.main()", "gensim.test.utils import datapath, get_tmpfile class TestRpModel(unittest.TestCase): def setUp(self): self.corpus =", "self.assertTrue(np.allclose(model[tstvec], model2[tstvec])) # try projecting an empty vector def test_persistence_compressed(self):", "np.array([-0.70710677, 0.70710677]) self.assertTrue(np.allclose(vec, expected)) # transformed entries must be equal", "# HACK; set fixed seed so that we always get", "vec = matutils.sparse2full(transformed, 2) # convert to dense vector, for", "GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html \"\"\" Automated tests for checking", "under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html \"\"\" Automated tests", "import logging import unittest import numpy as np from gensim.corpora.mmcorpus", "np.random.seed(13) model = rpmodel.RpModel(self.corpus, num_topics=2) # transform one document doc", "0.70710677]) self.assertTrue(np.allclose(vec, expected)) # transformed entries must be equal up", "self.corpus = MmCorpus(datapath('testcorpus.mm')) def test_transform(self): # create the transformation model", "fixed seed so that we always get the same random", "compare against expected results) np.random.seed(13) model = rpmodel.RpModel(self.corpus, num_topics=2) #", "checking transformation algorithms (the models package). \"\"\" import logging import", "transformation model # HACK; set fixed seed so that we", "# transform one document doc = list(self.corpus)[0] transformed = model[doc]", "#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright", "[] self.assertTrue(np.allclose(model[tstvec], model2[tstvec])) # try projecting an empty vector if", "import rpmodel from gensim import matutils from gensim.test.utils import datapath,", "get_tmpfile('gensim_models.tst.gz') model = rpmodel.RpModel(self.corpus, num_topics=2) model.save(fname) model2 = rpmodel.RpModel.load(fname, mmap=None)", "= rpmodel.RpModel(self.corpus, num_topics=2) model.save(fname) model2 = rpmodel.RpModel.load(fname, mmap=None) self.assertEqual(model.num_topics, model2.num_topics)", "get_tmpfile('gensim_models.tst') model = rpmodel.RpModel(self.corpus, num_topics=2) model.save(fname) model2 = rpmodel.RpModel.load(fname) self.assertEqual(model.num_topics,", "model = rpmodel.RpModel(self.corpus, num_topics=2) model.save(fname) model2 = rpmodel.RpModel.load(fname) self.assertEqual(model.num_topics, model2.num_topics)", "unittest import numpy as np from gensim.corpora.mmcorpus import MmCorpus from", "the transformation model # HACK; set fixed seed so that", "test_transform(self): # create the transformation model # HACK; set fixed", "mmap=None) self.assertEqual(model.num_topics, model2.num_topics) self.assertTrue(np.allclose(model.projection, model2.projection)) tstvec = [] self.assertTrue(np.allclose(model[tstvec], model2[tstvec]))", "LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html \"\"\" Automated tests for checking transformation", "self.assertTrue(np.allclose(vec, expected)) # transformed entries must be equal up to", "matrix (and can compare against expected results) np.random.seed(13) model =", "matutils.sparse2full(transformed, 2) # convert to dense vector, for easier equality", "create the transformation model # HACK; set fixed seed so", "rpmodel.RpModel(self.corpus, num_topics=2) model.save(fname) model2 = rpmodel.RpModel.load(fname) self.assertEqual(model.num_topics, model2.num_topics) self.assertTrue(np.allclose(model.projection, model2.projection))", "np from gensim.corpora.mmcorpus import MmCorpus from gensim.models import rpmodel from", "num_topics=2) model.save(fname) model2 = rpmodel.RpModel.load(fname, mmap=None) self.assertEqual(model.num_topics, model2.num_topics) self.assertTrue(np.allclose(model.projection, model2.projection))", "def test_persistence(self): fname = get_tmpfile('gensim_models.tst') model = rpmodel.RpModel(self.corpus, num_topics=2) model.save(fname)", "list(self.corpus)[0] transformed = model[doc] vec = matutils.sparse2full(transformed, 2) # convert", "doc = list(self.corpus)[0] transformed = model[doc] vec = matutils.sparse2full(transformed, 2)", "= MmCorpus(datapath('testcorpus.mm')) def test_transform(self): # create the transformation model #", "transform one document doc = list(self.corpus)[0] transformed = model[doc] vec", "expected = np.array([-0.70710677, 0.70710677]) self.assertTrue(np.allclose(vec, expected)) # transformed entries must", "model[doc] vec = matutils.sparse2full(transformed, 2) # convert to dense vector,", "projecting an empty vector def test_persistence_compressed(self): fname = get_tmpfile('gensim_models.tst.gz') model", "Copyright (C) 2010 <NAME> <<EMAIL>> # Licensed under the GNU", "# try projecting an empty vector if __name__ == '__main__':", "model = rpmodel.RpModel(self.corpus, num_topics=2) # transform one document doc =", "MmCorpus(datapath('testcorpus.mm')) def test_transform(self): # create the transformation model # HACK;", "to sign def test_persistence(self): fname = get_tmpfile('gensim_models.tst') model = rpmodel.RpModel(self.corpus,", "setUp(self): self.corpus = MmCorpus(datapath('testcorpus.mm')) def test_transform(self): # create the transformation", "tstvec = [] self.assertTrue(np.allclose(model[tstvec], model2[tstvec])) # try projecting an empty", "logging import unittest import numpy as np from gensim.corpora.mmcorpus import", "projecting an empty vector if __name__ == '__main__': logging.basicConfig(format='%(asctime)s :", "against expected results) np.random.seed(13) model = rpmodel.RpModel(self.corpus, num_topics=2) # transform", "# try projecting an empty vector def test_persistence_compressed(self): fname =", "num_topics=2) # transform one document doc = list(self.corpus)[0] transformed =", "model = rpmodel.RpModel(self.corpus, num_topics=2) model.save(fname) model2 = rpmodel.RpModel.load(fname, mmap=None) self.assertEqual(model.num_topics,", "http://www.gnu.org/licenses/lgpl.html \"\"\" Automated tests for checking transformation algorithms (the models", "the same random matrix (and can compare against expected results)", "coding: utf-8 -*- # # Copyright (C) 2010 <NAME> <<EMAIL>>", "expected)) # transformed entries must be equal up to sign", "gensim import matutils from gensim.test.utils import datapath, get_tmpfile class TestRpModel(unittest.TestCase):", "models package). \"\"\" import logging import unittest import numpy as", "to dense vector, for easier equality tests expected = np.array([-0.70710677,", "gensim.models import rpmodel from gensim import matutils from gensim.test.utils import", "= np.array([-0.70710677, 0.70710677]) self.assertTrue(np.allclose(vec, expected)) # transformed entries must be", "model2.projection)) tstvec = [] self.assertTrue(np.allclose(model[tstvec], model2[tstvec])) # try projecting an", "if __name__ == '__main__': logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.DEBUG)", "vector, for easier equality tests expected = np.array([-0.70710677, 0.70710677]) self.assertTrue(np.allclose(vec,", "entries must be equal up to sign def test_persistence(self): fname", "get the same random matrix (and can compare against expected", "(and can compare against expected results) np.random.seed(13) model = rpmodel.RpModel(self.corpus,", "import MmCorpus from gensim.models import rpmodel from gensim import matutils", "we always get the same random matrix (and can compare", "model2 = rpmodel.RpModel.load(fname) self.assertEqual(model.num_topics, model2.num_topics) self.assertTrue(np.allclose(model.projection, model2.projection)) tstvec = []", "from gensim.corpora.mmcorpus import MmCorpus from gensim.models import rpmodel from gensim", "2) # convert to dense vector, for easier equality tests", "<NAME> <<EMAIL>> # Licensed under the GNU LGPL v2.1 -", "an empty vector if __name__ == '__main__': logging.basicConfig(format='%(asctime)s : %(levelname)s", "the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html \"\"\" Automated tests for", "model2[tstvec])) # try projecting an empty vector if __name__ ==", "self.assertEqual(model.num_topics, model2.num_topics) self.assertTrue(np.allclose(model.projection, model2.projection)) tstvec = [] self.assertTrue(np.allclose(model[tstvec], model2[tstvec])) #", "def test_transform(self): # create the transformation model # HACK; set", "rpmodel.RpModel.load(fname, mmap=None) self.assertEqual(model.num_topics, model2.num_topics) self.assertTrue(np.allclose(model.projection, model2.projection)) tstvec = [] self.assertTrue(np.allclose(model[tstvec],", "model.save(fname) model2 = rpmodel.RpModel.load(fname) self.assertEqual(model.num_topics, model2.num_topics) self.assertTrue(np.allclose(model.projection, model2.projection)) tstvec =", "= rpmodel.RpModel.load(fname) self.assertEqual(model.num_topics, model2.num_topics) self.assertTrue(np.allclose(model.projection, model2.projection)) tstvec = [] self.assertTrue(np.allclose(model[tstvec],", "-*- coding: utf-8 -*- # # Copyright (C) 2010 <NAME>", "that we always get the same random matrix (and can", "self.assertTrue(np.allclose(model.projection, model2.projection)) tstvec = [] self.assertTrue(np.allclose(model[tstvec], model2[tstvec])) # try projecting", "transformation algorithms (the models package). \"\"\" import logging import unittest", "tests expected = np.array([-0.70710677, 0.70710677]) self.assertTrue(np.allclose(vec, expected)) # transformed entries", "set fixed seed so that we always get the same", "import datapath, get_tmpfile class TestRpModel(unittest.TestCase): def setUp(self): self.corpus = MmCorpus(datapath('testcorpus.mm'))", "try projecting an empty vector if __name__ == '__main__': logging.basicConfig(format='%(asctime)s", "\"\"\" import logging import unittest import numpy as np from", "# # Copyright (C) 2010 <NAME> <<EMAIL>> # Licensed under", "convert to dense vector, for easier equality tests expected =", "import unittest import numpy as np from gensim.corpora.mmcorpus import MmCorpus", "= rpmodel.RpModel(self.corpus, num_topics=2) # transform one document doc = list(self.corpus)[0]", "package). \"\"\" import logging import unittest import numpy as np", "TestRpModel(unittest.TestCase): def setUp(self): self.corpus = MmCorpus(datapath('testcorpus.mm')) def test_transform(self): # create", "= get_tmpfile('gensim_models.tst') model = rpmodel.RpModel(self.corpus, num_topics=2) model.save(fname) model2 = rpmodel.RpModel.load(fname)", "= rpmodel.RpModel(self.corpus, num_topics=2) model.save(fname) model2 = rpmodel.RpModel.load(fname) self.assertEqual(model.num_topics, model2.num_topics) self.assertTrue(np.allclose(model.projection,", "model2[tstvec])) # try projecting an empty vector def test_persistence_compressed(self): fname", "def setUp(self): self.corpus = MmCorpus(datapath('testcorpus.mm')) def test_transform(self): # create the", "random matrix (and can compare against expected results) np.random.seed(13) model", "# -*- coding: utf-8 -*- # # Copyright (C) 2010", "seed so that we always get the same random matrix", "from gensim import matutils from gensim.test.utils import datapath, get_tmpfile class", "model2.num_topics) self.assertTrue(np.allclose(model.projection, model2.projection)) tstvec = [] self.assertTrue(np.allclose(model[tstvec], model2[tstvec])) # try", "(C) 2010 <NAME> <<EMAIL>> # Licensed under the GNU LGPL", "Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html \"\"\" Automated", "# create the transformation model # HACK; set fixed seed", "sign def test_persistence(self): fname = get_tmpfile('gensim_models.tst') model = rpmodel.RpModel(self.corpus, num_topics=2)", "rpmodel.RpModel.load(fname) self.assertEqual(model.num_topics, model2.num_topics) self.assertTrue(np.allclose(model.projection, model2.projection)) tstvec = [] self.assertTrue(np.allclose(model[tstvec], model2[tstvec]))", "Automated tests for checking transformation algorithms (the models package). \"\"\"", "utf-8 -*- # # Copyright (C) 2010 <NAME> <<EMAIL>> #", "test_persistence_compressed(self): fname = get_tmpfile('gensim_models.tst.gz') model = rpmodel.RpModel(self.corpus, num_topics=2) model.save(fname) model2" ]
[ "(5, 5), strides=1, padding='same', use_bias=False) self.convt_bn_1 = keras.layers.BatchNormalization() self.convt_relu_1 =", "= keras.layers.Conv2D( 64, (5, 5), strides=2, padding='same', input_shape=(28, 28, 1))", "use_bias=False) self.convt_bn_2 = keras.layers.BatchNormalization() self.convt_relu_2 = keras.layers.LeakyReLU() # From (14,14,64)", "= self.dense_1(feat_x) # Combine latent feature and label input x", "self.reshape_layer = keras.layers.Reshape((28, 28, 1)) self.conv_1 = keras.layers.Conv2D( 64, (5,", "self.convt_out = keras.layers.Conv2DTranspose( 1, (5, 5), strides=2, padding='same', use_bias=False) def", "padding='same', use_bias=False) self.convt_bn_2 = keras.layers.BatchNormalization() self.convt_relu_2 = keras.layers.LeakyReLU() # From", "# From (7,7,256) to (7,7,128) x = self.convt_1(x) x =", "= keras.layers.LeakyReLU() self.drop_1 = keras.layers.Dropout(0.3) self.conv_2 = keras.layers.Conv2D( 128, (5,", "class CondDiscriminatorModel(keras.Model): def __init__(self): super(CondDiscriminatorModel, self).__init__() self.embedder = keras.layers.Embedding(10, 100)", "strides=2, padding='same') self.relu_2 = keras.layers.LeakyReLU() self.drop_2 = keras.layers.Dropout(0.3) self.flatten =", "self.convt_relu_1 = keras.layers.LeakyReLU() # From (7,7,128) to (14,14,64) self.convt_2 =", "labels_x) x = self.conv_1(x) x = self.relu_1(x) x = self.drop_1(x)", "x = self.conv_1(x) x = self.relu_1(x) x = self.drop_1(x) x", "= keras.layers.Embedding(10, 100) self.expand_layer = keras.layers.Dense(28*28*1) self.reshape_layer = keras.layers.Reshape((28, 28,", "= keras.layers.Embedding(10, 100) self.dense_2 = keras.layers.Dense(7*7*256) # From (7,7,256) to", "label_x = tf.squeeze(label_x, 1) # Expand features to image channels", "= self.expand_layer(labels_x) labels_x = self.reshape_layer(labels_x) x = tf.math.multiply(images_x, labels_x) x", "self.reshape_1 = keras.layers.Reshape((7, 7, 256)) # Expand (10,) to (7,7,1)", "= self.embedder(labels) labels_x = self.expand_layer(labels_x) labels_x = self.reshape_layer(labels_x) x =", "self.drop_1 = keras.layers.Dropout(0.3) self.conv_2 = keras.layers.Conv2D( 128, (5, 5), strides=2,", "= keras.layers.LeakyReLU() # From (7,7,128) to (14,14,64) self.convt_2 = keras.layers.Conv2DTranspose(", "feat_x = self.dense_1(feat_x) # Combine latent feature and label input", "= inputs[2] # Expand label input to be the same", "(14,14,64) x = self.convt_2(x) x = self.convt_bn_2(x) x = self.convt_relu_2(x)", "keras.layers.LeakyReLU() # From (7,7,128) to (14,14,64) self.convt_2 = keras.layers.Conv2DTranspose( 64,", "self.drop_1(x) x = self.conv_2(x) x = self.relu_2(x) x = self.drop_2(x)", "keras.layers.Conv2DTranspose( 128, (5, 5), strides=1, padding='same', use_bias=False) self.convt_bn_1 = keras.layers.BatchNormalization()", "# From (14,14,64) to (28,28,1) self.convt_out = keras.layers.Conv2DTranspose( 1, (5,", "self.embedder(labels) labels_x = self.expand_layer(labels_x) labels_x = self.reshape_layer(labels_x) x = tf.math.multiply(images_x,", "def __init__(self): super(CondGeneratorModel, self).__init__() # Expand 7*7*128 features into a", "self.drop_2 = keras.layers.Dropout(0.3) self.flatten = keras.layers.Flatten() self.out = keras.layers.Dense(1) def", "From (7,7,128) to (14,14,64) self.convt_2 = keras.layers.Conv2DTranspose( 64, (5, 5),", "self).__init__() self.embedder = keras.layers.Embedding(10, 100) self.expand_layer = keras.layers.Dense(28*28*1) self.reshape_layer =", "labels = inputs[2] labels_x = self.embedder(labels) labels_x = self.expand_layer(labels_x) labels_x", "label input x = tf.math.multiply(feat_x, label_x) x = self.reshape_1(x) #", "inputs[0] labels = inputs[2] labels_x = self.embedder(labels) labels_x = self.expand_layer(labels_x)", "From (7,7,256) to (7,7,128) self.convt_1 = keras.layers.Conv2DTranspose( 128, (5, 5),", "x = self.reshape_1(x) # From (7,7,256) to (7,7,128) x =", "= self.convt_relu_2(x) # From (14,14,64) to (28,28,1) x = self.convt_out(x)", "self.relu_2(x) x = self.drop_2(x) x = self.flatten(x) x = self.out(x)", "# From (14,14,64) to (28,28,1) x = self.convt_out(x) return [x,", "self.convt_relu_1(x) # From (7,7,128) to (14,14,64) x = self.convt_2(x) x", "keras.layers.Reshape((7, 7, 256)) # Expand (10,) to (7,7,1) self.embedder =", "as latent feature label_x = self.embedder(label) label_x = self.dense_2(label_x) label_x", "= self.reshape_1(x) # From (7,7,256) to (7,7,128) x = self.convt_1(x)", "inputs[0] label = inputs[2] # Expand label input to be", "1)) self.conv_1 = keras.layers.Conv2D( 64, (5, 5), strides=2, padding='same', input_shape=(28,", "padding='same') self.relu_2 = keras.layers.LeakyReLU() self.drop_2 = keras.layers.Dropout(0.3) self.flatten = keras.layers.Flatten()", "= inputs[2] labels_x = self.embedder(labels) labels_x = self.expand_layer(labels_x) labels_x =", "labels_x = self.reshape_layer(labels_x) x = tf.math.multiply(images_x, labels_x) x = self.conv_1(x)", "call(self, inputs): feat_x = inputs[0] label = inputs[2] # Expand", "channels feat_x = self.dense_1(feat_x) # Combine latent feature and label", "keras.layers.Flatten() self.out = keras.layers.Dense(1) def call(self, inputs): images_x = inputs[0]", "From (7,7,256) to (7,7,128) x = self.convt_1(x) x = self.convt_bn_1(x)", "self.convt_relu_2(x) # From (14,14,64) to (28,28,1) x = self.convt_out(x) return", "def call(self, inputs): images_x = inputs[0] labels = inputs[2] labels_x", "use_bias=False) def call(self, inputs): feat_x = inputs[0] label = inputs[2]", "self.convt_1(x) x = self.convt_bn_1(x) x = self.convt_relu_1(x) # From (7,7,128)", "x = self.convt_relu_2(x) # From (14,14,64) to (28,28,1) x =", "5), strides=2, padding='same', input_shape=(28, 28, 1)) self.relu_1 = keras.layers.LeakyReLU() self.drop_1", "# Expand (10,) to (7,7,1) self.embedder = keras.layers.Embedding(10, 100) self.dense_2", "= keras.layers.Conv2DTranspose( 64, (5, 5), strides=2, padding='same', use_bias=False) self.convt_bn_2 =", "tensorflow import keras class CondGeneratorModel(keras.Model): def __init__(self): super(CondGeneratorModel, self).__init__() #", "self.convt_1 = keras.layers.Conv2DTranspose( 128, (5, 5), strides=1, padding='same', use_bias=False) self.convt_bn_1", "input x = tf.math.multiply(feat_x, label_x) x = self.reshape_1(x) # From", "self.relu_1 = keras.layers.LeakyReLU() self.drop_1 = keras.layers.Dropout(0.3) self.conv_2 = keras.layers.Conv2D( 128,", "Expand 7*7*128 features into a (7,7,128) tensor self.dense_1 = keras.layers.Dense(7*7*256)", "latent feature label_x = self.embedder(label) label_x = self.dense_2(label_x) label_x =", "self.reshape_layer(labels_x) x = tf.math.multiply(images_x, labels_x) x = self.conv_1(x) x =", "= self.drop_1(x) x = self.conv_2(x) x = self.relu_2(x) x =", "tf.squeeze(label_x, 1) # Expand features to image channels feat_x =", "From (14,14,64) to (28,28,1) x = self.convt_out(x) return [x, None,", "self.conv_2(x) x = self.relu_2(x) x = self.drop_2(x) x = self.flatten(x)", "From (14,14,64) to (28,28,1) self.convt_out = keras.layers.Conv2DTranspose( 1, (5, 5),", "be the same as latent feature label_x = self.embedder(label) label_x", "strides=2, padding='same', use_bias=False) self.convt_bn_2 = keras.layers.BatchNormalization() self.convt_relu_2 = keras.layers.LeakyReLU() #", "# Combine latent feature and label input x = tf.math.multiply(feat_x,", "padding='same', input_shape=(28, 28, 1)) self.relu_1 = keras.layers.LeakyReLU() self.drop_1 = keras.layers.Dropout(0.3)", "128, (5, 5), strides=2, padding='same') self.relu_2 = keras.layers.LeakyReLU() self.drop_2 =", "feature label_x = self.embedder(label) label_x = self.dense_2(label_x) label_x = tf.squeeze(label_x,", "keras.layers.Conv2DTranspose( 1, (5, 5), strides=2, padding='same', use_bias=False) def call(self, inputs):", "100) self.expand_layer = keras.layers.Dense(28*28*1) self.reshape_layer = keras.layers.Reshape((28, 28, 1)) self.conv_1", "to (7,7,1) self.embedder = keras.layers.Embedding(10, 100) self.dense_2 = keras.layers.Dense(7*7*256) #", "= self.convt_out(x) return [x, None, label] class CondDiscriminatorModel(keras.Model): def __init__(self):", "class CondGeneratorModel(keras.Model): def __init__(self): super(CondGeneratorModel, self).__init__() # Expand 7*7*128 features", "to (7,7,128) self.convt_1 = keras.layers.Conv2DTranspose( 128, (5, 5), strides=1, padding='same',", "(5, 5), strides=2, padding='same', use_bias=False) self.convt_bn_2 = keras.layers.BatchNormalization() self.convt_relu_2 =", "= keras.layers.Dense(7*7*256) # From (7,7,256) to (7,7,128) self.convt_1 = keras.layers.Conv2DTranspose(", "5), strides=2, padding='same', use_bias=False) self.convt_bn_2 = keras.layers.BatchNormalization() self.convt_relu_2 = keras.layers.LeakyReLU()", "x = self.relu_2(x) x = self.drop_2(x) x = self.flatten(x) x", "= keras.layers.Conv2D( 128, (5, 5), strides=2, padding='same') self.relu_2 = keras.layers.LeakyReLU()", "= self.relu_2(x) x = self.drop_2(x) x = self.flatten(x) x =", "keras.layers.LeakyReLU() # From (14,14,64) to (28,28,1) self.convt_out = keras.layers.Conv2DTranspose( 1,", "Expand features to image channels feat_x = self.dense_1(feat_x) # Combine", "super(CondDiscriminatorModel, self).__init__() self.embedder = keras.layers.Embedding(10, 100) self.expand_layer = keras.layers.Dense(28*28*1) self.reshape_layer", "images_x = inputs[0] labels = inputs[2] labels_x = self.embedder(labels) labels_x", "label] class CondDiscriminatorModel(keras.Model): def __init__(self): super(CondDiscriminatorModel, self).__init__() self.embedder = keras.layers.Embedding(10,", "5), strides=1, padding='same', use_bias=False) self.convt_bn_1 = keras.layers.BatchNormalization() self.convt_relu_1 = keras.layers.LeakyReLU()", "keras.layers.Embedding(10, 100) self.dense_2 = keras.layers.Dense(7*7*256) # From (7,7,256) to (7,7,128)", "(28,28,1) x = self.convt_out(x) return [x, None, label] class CondDiscriminatorModel(keras.Model):", "(14,14,64) to (28,28,1) self.convt_out = keras.layers.Conv2DTranspose( 1, (5, 5), strides=2,", "= keras.layers.Conv2DTranspose( 128, (5, 5), strides=1, padding='same', use_bias=False) self.convt_bn_1 =", "label = inputs[2] # Expand label input to be the", "x = self.drop_1(x) x = self.conv_2(x) x = self.relu_2(x) x", "(28,28,1) self.convt_out = keras.layers.Conv2DTranspose( 1, (5, 5), strides=2, padding='same', use_bias=False)", "5), strides=2, padding='same') self.relu_2 = keras.layers.LeakyReLU() self.drop_2 = keras.layers.Dropout(0.3) self.flatten", "self.convt_relu_2 = keras.layers.LeakyReLU() # From (14,14,64) to (28,28,1) self.convt_out =", "keras.layers.Conv2DTranspose( 64, (5, 5), strides=2, padding='same', use_bias=False) self.convt_bn_2 = keras.layers.BatchNormalization()", "a (7,7,128) tensor self.dense_1 = keras.layers.Dense(7*7*256) self.reshape_1 = keras.layers.Reshape((7, 7,", "5), strides=2, padding='same', use_bias=False) def call(self, inputs): feat_x = inputs[0]", "x = self.convt_bn_1(x) x = self.convt_relu_1(x) # From (7,7,128) to", "(5, 5), strides=2, padding='same', input_shape=(28, 28, 1)) self.relu_1 = keras.layers.LeakyReLU()", "= tf.math.multiply(feat_x, label_x) x = self.reshape_1(x) # From (7,7,256) to", "import keras class CondGeneratorModel(keras.Model): def __init__(self): super(CondGeneratorModel, self).__init__() # Expand", "x = self.drop_2(x) x = self.flatten(x) x = self.out(x) return", "to (28,28,1) self.convt_out = keras.layers.Conv2DTranspose( 1, (5, 5), strides=2, padding='same',", "x = self.relu_1(x) x = self.drop_1(x) x = self.conv_2(x) x", "features into a (7,7,128) tensor self.dense_1 = keras.layers.Dense(7*7*256) self.reshape_1 =", "inputs): images_x = inputs[0] labels = inputs[2] labels_x = self.embedder(labels)", "# From (7,7,256) to (7,7,128) self.convt_1 = keras.layers.Conv2DTranspose( 128, (5,", "self.embedder = keras.layers.Embedding(10, 100) self.dense_2 = keras.layers.Dense(7*7*256) # From (7,7,256)", "1)) self.relu_1 = keras.layers.LeakyReLU() self.drop_1 = keras.layers.Dropout(0.3) self.conv_2 = keras.layers.Conv2D(", "= tf.math.multiply(images_x, labels_x) x = self.conv_1(x) x = self.relu_1(x) x", "= self.convt_1(x) x = self.convt_bn_1(x) x = self.convt_relu_1(x) # From", "to image channels feat_x = self.dense_1(feat_x) # Combine latent feature", "CondDiscriminatorModel(keras.Model): def __init__(self): super(CondDiscriminatorModel, self).__init__() self.embedder = keras.layers.Embedding(10, 100) self.expand_layer", "self.dense_2 = keras.layers.Dense(7*7*256) # From (7,7,256) to (7,7,128) self.convt_1 =", "keras class CondGeneratorModel(keras.Model): def __init__(self): super(CondGeneratorModel, self).__init__() # Expand 7*7*128", "# Expand 7*7*128 features into a (7,7,128) tensor self.dense_1 =", "tf from tensorflow import keras class CondGeneratorModel(keras.Model): def __init__(self): super(CondGeneratorModel,", "self.convt_bn_2(x) x = self.convt_relu_2(x) # From (14,14,64) to (28,28,1) x", "(10,) to (7,7,1) self.embedder = keras.layers.Embedding(10, 100) self.dense_2 = keras.layers.Dense(7*7*256)", "self.reshape_1(x) # From (7,7,256) to (7,7,128) x = self.convt_1(x) x", "From (7,7,128) to (14,14,64) x = self.convt_2(x) x = self.convt_bn_2(x)", "[x, None, label] class CondDiscriminatorModel(keras.Model): def __init__(self): super(CondDiscriminatorModel, self).__init__() self.embedder", "None, label] class CondDiscriminatorModel(keras.Model): def __init__(self): super(CondDiscriminatorModel, self).__init__() self.embedder =", "keras.layers.LeakyReLU() self.drop_2 = keras.layers.Dropout(0.3) self.flatten = keras.layers.Flatten() self.out = keras.layers.Dense(1)", "self.dense_1 = keras.layers.Dense(7*7*256) self.reshape_1 = keras.layers.Reshape((7, 7, 256)) # Expand", "keras.layers.Dense(7*7*256) # From (7,7,256) to (7,7,128) self.convt_1 = keras.layers.Conv2DTranspose( 128,", "keras.layers.Dropout(0.3) self.conv_2 = keras.layers.Conv2D( 128, (5, 5), strides=2, padding='same') self.relu_2", "= keras.layers.Flatten() self.out = keras.layers.Dense(1) def call(self, inputs): images_x =", "label input to be the same as latent feature label_x", "x = self.convt_2(x) x = self.convt_bn_2(x) x = self.convt_relu_2(x) #", "strides=2, padding='same', input_shape=(28, 28, 1)) self.relu_1 = keras.layers.LeakyReLU() self.drop_1 =", "keras.layers.Dense(1) def call(self, inputs): images_x = inputs[0] labels = inputs[2]", "# Expand features to image channels feat_x = self.dense_1(feat_x) #", "= self.conv_2(x) x = self.relu_2(x) x = self.drop_2(x) x =", "keras.layers.BatchNormalization() self.convt_relu_2 = keras.layers.LeakyReLU() # From (14,14,64) to (28,28,1) self.convt_out", "# From (7,7,128) to (14,14,64) x = self.convt_2(x) x =", "self.conv_1(x) x = self.relu_1(x) x = self.drop_1(x) x = self.conv_2(x)", "= self.convt_bn_2(x) x = self.convt_relu_2(x) # From (14,14,64) to (28,28,1)", "self.embedder = keras.layers.Embedding(10, 100) self.expand_layer = keras.layers.Dense(28*28*1) self.reshape_layer = keras.layers.Reshape((28,", "labels_x = self.embedder(labels) labels_x = self.expand_layer(labels_x) labels_x = self.reshape_layer(labels_x) x", "x = self.convt_1(x) x = self.convt_bn_1(x) x = self.convt_relu_1(x) #", "= keras.layers.Dense(7*7*256) self.reshape_1 = keras.layers.Reshape((7, 7, 256)) # Expand (10,)", "64, (5, 5), strides=2, padding='same', use_bias=False) self.convt_bn_2 = keras.layers.BatchNormalization() self.convt_relu_2", "tf.math.multiply(images_x, labels_x) x = self.conv_1(x) x = self.relu_1(x) x =", "= keras.layers.BatchNormalization() self.convt_relu_2 = keras.layers.LeakyReLU() # From (14,14,64) to (28,28,1)", "= keras.layers.Dense(1) def call(self, inputs): images_x = inputs[0] labels =", "= keras.layers.Reshape((28, 28, 1)) self.conv_1 = keras.layers.Conv2D( 64, (5, 5),", "inputs[2] labels_x = self.embedder(labels) labels_x = self.expand_layer(labels_x) labels_x = self.reshape_layer(labels_x)", "from tensorflow import keras class CondGeneratorModel(keras.Model): def __init__(self): super(CondGeneratorModel, self).__init__()", "self.convt_bn_1(x) x = self.convt_relu_1(x) # From (7,7,128) to (14,14,64) x", "to be the same as latent feature label_x = self.embedder(label)", "x = self.convt_relu_1(x) # From (7,7,128) to (14,14,64) x =", "keras.layers.Dropout(0.3) self.flatten = keras.layers.Flatten() self.out = keras.layers.Dense(1) def call(self, inputs):", "(7,7,256) to (7,7,128) self.convt_1 = keras.layers.Conv2DTranspose( 128, (5, 5), strides=1,", "(14,14,64) self.convt_2 = keras.layers.Conv2DTranspose( 64, (5, 5), strides=2, padding='same', use_bias=False)", "self.conv_2 = keras.layers.Conv2D( 128, (5, 5), strides=2, padding='same') self.relu_2 =", "to (28,28,1) x = self.convt_out(x) return [x, None, label] class", "as tf from tensorflow import keras class CondGeneratorModel(keras.Model): def __init__(self):", "7*7*128 features into a (7,7,128) tensor self.dense_1 = keras.layers.Dense(7*7*256) self.reshape_1", "padding='same', use_bias=False) def call(self, inputs): feat_x = inputs[0] label =", "label_x = self.embedder(label) label_x = self.dense_2(label_x) label_x = tf.squeeze(label_x, 1)", "strides=2, padding='same', use_bias=False) def call(self, inputs): feat_x = inputs[0] label", "= self.convt_bn_1(x) x = self.convt_relu_1(x) # From (7,7,128) to (14,14,64)", "features to image channels feat_x = self.dense_1(feat_x) # Combine latent", "= tf.squeeze(label_x, 1) # Expand features to image channels feat_x", "__init__(self): super(CondDiscriminatorModel, self).__init__() self.embedder = keras.layers.Embedding(10, 100) self.expand_layer = keras.layers.Dense(28*28*1)", "to (14,14,64) self.convt_2 = keras.layers.Conv2DTranspose( 64, (5, 5), strides=2, padding='same',", "keras.layers.BatchNormalization() self.convt_relu_1 = keras.layers.LeakyReLU() # From (7,7,128) to (14,14,64) self.convt_2", "tensorflow as tf from tensorflow import keras class CondGeneratorModel(keras.Model): def", "image channels feat_x = self.dense_1(feat_x) # Combine latent feature and", "= self.dense_2(label_x) label_x = tf.squeeze(label_x, 1) # Expand features to", "same as latent feature label_x = self.embedder(label) label_x = self.dense_2(label_x)", "def __init__(self): super(CondDiscriminatorModel, self).__init__() self.embedder = keras.layers.Embedding(10, 100) self.expand_layer =", "input_shape=(28, 28, 1)) self.relu_1 = keras.layers.LeakyReLU() self.drop_1 = keras.layers.Dropout(0.3) self.conv_2", "= self.embedder(label) label_x = self.dense_2(label_x) label_x = tf.squeeze(label_x, 1) #", "import tensorflow as tf from tensorflow import keras class CondGeneratorModel(keras.Model):", "and label input x = tf.math.multiply(feat_x, label_x) x = self.reshape_1(x)", "= inputs[0] label = inputs[2] # Expand label input to", "inputs): feat_x = inputs[0] label = inputs[2] # Expand label", "self.conv_1 = keras.layers.Conv2D( 64, (5, 5), strides=2, padding='same', input_shape=(28, 28,", "128, (5, 5), strides=1, padding='same', use_bias=False) self.convt_bn_1 = keras.layers.BatchNormalization() self.convt_relu_1", "x = tf.math.multiply(images_x, labels_x) x = self.conv_1(x) x = self.relu_1(x)", "strides=1, padding='same', use_bias=False) self.convt_bn_1 = keras.layers.BatchNormalization() self.convt_relu_1 = keras.layers.LeakyReLU() #", "feat_x = inputs[0] label = inputs[2] # Expand label input", "call(self, inputs): images_x = inputs[0] labels = inputs[2] labels_x =", "(7,7,1) self.embedder = keras.layers.Embedding(10, 100) self.dense_2 = keras.layers.Dense(7*7*256) # From", "x = tf.math.multiply(feat_x, label_x) x = self.reshape_1(x) # From (7,7,256)", "(7,7,128) to (14,14,64) self.convt_2 = keras.layers.Conv2DTranspose( 64, (5, 5), strides=2,", "Expand (10,) to (7,7,1) self.embedder = keras.layers.Embedding(10, 100) self.dense_2 =", "self.embedder(label) label_x = self.dense_2(label_x) label_x = tf.squeeze(label_x, 1) # Expand", "= self.convt_relu_1(x) # From (7,7,128) to (14,14,64) x = self.convt_2(x)", "keras.layers.LeakyReLU() self.drop_1 = keras.layers.Dropout(0.3) self.conv_2 = keras.layers.Conv2D( 128, (5, 5),", "64, (5, 5), strides=2, padding='same', input_shape=(28, 28, 1)) self.relu_1 =", "self.relu_2 = keras.layers.LeakyReLU() self.drop_2 = keras.layers.Dropout(0.3) self.flatten = keras.layers.Flatten() self.out", "tf.math.multiply(feat_x, label_x) x = self.reshape_1(x) # From (7,7,256) to (7,7,128)", "padding='same', use_bias=False) self.convt_bn_1 = keras.layers.BatchNormalization() self.convt_relu_1 = keras.layers.LeakyReLU() # From", "= keras.layers.BatchNormalization() self.convt_relu_1 = keras.layers.LeakyReLU() # From (7,7,128) to (14,14,64)", "inputs[2] # Expand label input to be the same as", "x = self.conv_2(x) x = self.relu_2(x) x = self.drop_2(x) x", "def call(self, inputs): feat_x = inputs[0] label = inputs[2] #", "self).__init__() # Expand 7*7*128 features into a (7,7,128) tensor self.dense_1", "self.convt_2 = keras.layers.Conv2DTranspose( 64, (5, 5), strides=2, padding='same', use_bias=False) self.convt_bn_2", "keras.layers.Conv2D( 64, (5, 5), strides=2, padding='same', input_shape=(28, 28, 1)) self.relu_1", "self.convt_bn_1 = keras.layers.BatchNormalization() self.convt_relu_1 = keras.layers.LeakyReLU() # From (7,7,128) to", "1, (5, 5), strides=2, padding='same', use_bias=False) def call(self, inputs): feat_x", "(7,7,256) to (7,7,128) x = self.convt_1(x) x = self.convt_bn_1(x) x", "self.convt_2(x) x = self.convt_bn_2(x) x = self.convt_relu_2(x) # From (14,14,64)", "self.convt_out(x) return [x, None, label] class CondDiscriminatorModel(keras.Model): def __init__(self): super(CondDiscriminatorModel,", "= keras.layers.Reshape((7, 7, 256)) # Expand (10,) to (7,7,1) self.embedder", "self.relu_1(x) x = self.drop_1(x) x = self.conv_2(x) x = self.relu_2(x)", "into a (7,7,128) tensor self.dense_1 = keras.layers.Dense(7*7*256) self.reshape_1 = keras.layers.Reshape((7,", "CondGeneratorModel(keras.Model): def __init__(self): super(CondGeneratorModel, self).__init__() # Expand 7*7*128 features into", "= keras.layers.Conv2DTranspose( 1, (5, 5), strides=2, padding='same', use_bias=False) def call(self,", "1) # Expand features to image channels feat_x = self.dense_1(feat_x)", "= keras.layers.Dropout(0.3) self.flatten = keras.layers.Flatten() self.out = keras.layers.Dense(1) def call(self,", "(5, 5), strides=2, padding='same') self.relu_2 = keras.layers.LeakyReLU() self.drop_2 = keras.layers.Dropout(0.3)", "self.dense_2(label_x) label_x = tf.squeeze(label_x, 1) # Expand features to image", "feature and label input x = tf.math.multiply(feat_x, label_x) x =", "= keras.layers.LeakyReLU() # From (14,14,64) to (28,28,1) self.convt_out = keras.layers.Conv2DTranspose(", "(7,7,128) to (14,14,64) x = self.convt_2(x) x = self.convt_bn_2(x) x", "keras.layers.Dense(28*28*1) self.reshape_layer = keras.layers.Reshape((28, 28, 1)) self.conv_1 = keras.layers.Conv2D( 64,", "keras.layers.Dense(7*7*256) self.reshape_1 = keras.layers.Reshape((7, 7, 256)) # Expand (10,) to", "# Expand label input to be the same as latent", "= self.drop_2(x) x = self.flatten(x) x = self.out(x) return x", "28, 1)) self.relu_1 = keras.layers.LeakyReLU() self.drop_1 = keras.layers.Dropout(0.3) self.conv_2 =", "(5, 5), strides=2, padding='same', use_bias=False) def call(self, inputs): feat_x =", "use_bias=False) self.convt_bn_1 = keras.layers.BatchNormalization() self.convt_relu_1 = keras.layers.LeakyReLU() # From (7,7,128)", "(7,7,128) self.convt_1 = keras.layers.Conv2DTranspose( 128, (5, 5), strides=1, padding='same', use_bias=False)", "latent feature and label input x = tf.math.multiply(feat_x, label_x) x", "to (14,14,64) x = self.convt_2(x) x = self.convt_bn_2(x) x =", "Combine latent feature and label input x = tf.math.multiply(feat_x, label_x)", "= keras.layers.Dropout(0.3) self.conv_2 = keras.layers.Conv2D( 128, (5, 5), strides=2, padding='same')", "keras.layers.Reshape((28, 28, 1)) self.conv_1 = keras.layers.Conv2D( 64, (5, 5), strides=2,", "the same as latent feature label_x = self.embedder(label) label_x =", "super(CondGeneratorModel, self).__init__() # Expand 7*7*128 features into a (7,7,128) tensor", "__init__(self): super(CondGeneratorModel, self).__init__() # Expand 7*7*128 features into a (7,7,128)", "self.flatten = keras.layers.Flatten() self.out = keras.layers.Dense(1) def call(self, inputs): images_x", "# From (7,7,128) to (14,14,64) self.convt_2 = keras.layers.Conv2DTranspose( 64, (5,", "28, 1)) self.conv_1 = keras.layers.Conv2D( 64, (5, 5), strides=2, padding='same',", "label_x) x = self.reshape_1(x) # From (7,7,256) to (7,7,128) x", "= self.relu_1(x) x = self.drop_1(x) x = self.conv_2(x) x =", "256)) # Expand (10,) to (7,7,1) self.embedder = keras.layers.Embedding(10, 100)", "to (7,7,128) x = self.convt_1(x) x = self.convt_bn_1(x) x =", "x = self.convt_bn_2(x) x = self.convt_relu_2(x) # From (14,14,64) to", "self.out = keras.layers.Dense(1) def call(self, inputs): images_x = inputs[0] labels", "input to be the same as latent feature label_x =", "keras.layers.Embedding(10, 100) self.expand_layer = keras.layers.Dense(28*28*1) self.reshape_layer = keras.layers.Reshape((28, 28, 1))", "keras.layers.Conv2D( 128, (5, 5), strides=2, padding='same') self.relu_2 = keras.layers.LeakyReLU() self.drop_2", "= inputs[0] labels = inputs[2] labels_x = self.embedder(labels) labels_x =", "= self.reshape_layer(labels_x) x = tf.math.multiply(images_x, labels_x) x = self.conv_1(x) x", "= self.convt_2(x) x = self.convt_bn_2(x) x = self.convt_relu_2(x) # From", "(14,14,64) to (28,28,1) x = self.convt_out(x) return [x, None, label]", "= self.conv_1(x) x = self.relu_1(x) x = self.drop_1(x) x =", "100) self.dense_2 = keras.layers.Dense(7*7*256) # From (7,7,256) to (7,7,128) self.convt_1", "Expand label input to be the same as latent feature", "label_x = self.dense_2(label_x) label_x = tf.squeeze(label_x, 1) # Expand features", "x = self.convt_out(x) return [x, None, label] class CondDiscriminatorModel(keras.Model): def", "self.expand_layer = keras.layers.Dense(28*28*1) self.reshape_layer = keras.layers.Reshape((28, 28, 1)) self.conv_1 =", "self.expand_layer(labels_x) labels_x = self.reshape_layer(labels_x) x = tf.math.multiply(images_x, labels_x) x =", "self.convt_bn_2 = keras.layers.BatchNormalization() self.convt_relu_2 = keras.layers.LeakyReLU() # From (14,14,64) to", "= keras.layers.Dense(28*28*1) self.reshape_layer = keras.layers.Reshape((28, 28, 1)) self.conv_1 = keras.layers.Conv2D(", "(7,7,128) x = self.convt_1(x) x = self.convt_bn_1(x) x = self.convt_relu_1(x)", "= keras.layers.LeakyReLU() self.drop_2 = keras.layers.Dropout(0.3) self.flatten = keras.layers.Flatten() self.out =", "(7,7,128) tensor self.dense_1 = keras.layers.Dense(7*7*256) self.reshape_1 = keras.layers.Reshape((7, 7, 256))", "7, 256)) # Expand (10,) to (7,7,1) self.embedder = keras.layers.Embedding(10,", "return [x, None, label] class CondDiscriminatorModel(keras.Model): def __init__(self): super(CondDiscriminatorModel, self).__init__()", "tensor self.dense_1 = keras.layers.Dense(7*7*256) self.reshape_1 = keras.layers.Reshape((7, 7, 256)) #", "self.dense_1(feat_x) # Combine latent feature and label input x =", "labels_x = self.expand_layer(labels_x) labels_x = self.reshape_layer(labels_x) x = tf.math.multiply(images_x, labels_x)" ]
[ "\"document_length\", \"Length of each processed document's text in charactes\", )", "raise TransientError(exc) total_results = [] # Building FHIR resources as", "= [] for resource in all_resources: resource_type = resource.resource_type if", "for medication_statement_list in medication_statement_lists: for medication_statement_dict in medication_statement_list: medication_results.append(medication_statement_dict[\"medication\"]) medication_statement_results.append(", "each processed document\" ) DOCUMENT_LENGTH_SUMMARY = Summary( \"document_length\", \"Length of", "import Bundle from fhir.resources.codeableconcept import CodeableConcept from fhir.resources.composition import Composition,", "\"text/html\": return self.pipeline.analyse_html(text, **analyse_args) else: return self.pipeline.analyse_text(text, **analyse_args) except Exception", "Discovery\"}) composition_identifier = ( self._build_composition_identifier_from_documentreference( document_reference ) ) composition =", "DocumentReference, ) -> Tuple[str, str]: valid_content = [ content for", "ands adds it, # otherwise select the correct section if", "for document_reference in document_references: resources_from_document = self._process_documentreference( document_reference ) composition", "text and the unique id of the annotation \"\"\" doc_ref_identifier", "\"DocumentReference contains more than one encounter. \" + \"Using the", "[] medication_statement_results = [] for medication_statement_list in medication_statement_lists: for medication_statement_dict", "import Identifier from fhir.resources.reference import Reference from fhir.resources.resource import Resource", "structlog import tenacity from averbis import Pipeline from fhir.resources.bundle import", "m in medication_statement_results }.values() total_results.extend(medication_resources_unique) total_results.extend(medication_statements_unique) return total_results def _extract_text_from_resource(", "from averbis import Pipeline from fhir.resources.bundle import Bundle from fhir.resources.codeableconcept", "extraction and text analysis (text, content_type, lang) = self._extract_text_from_resource( document_reference", "resources as results medication_statement_lists = [] for val in averbis_result:", "composition_sections ): resource_section = CompositionSection.construct() resource_section.title = resource_type resource_section.entry =", "in medication_statement_results }.values() total_results.extend(medication_resources_unique) total_results.extend(medication_statements_unique) return total_results def _extract_text_from_resource( self,", "\"code\": \"74477-1\", \"display\": DISCHARGE_SUMMARY_CONCEPT_TEXT, }, ], \"text\": DISCHARGE_SUMMARY_CONCEPT_TEXT, } )", "\"\"\" Process a list of DocumentReferences \"\"\" all_resources = []", "class TransientError(Exception): pass class ResourceHandler: def __init__(self, averbis_pipeline: Pipeline): self.pipeline", "mime_type: str = \"text/plain\", lang: str = None ): types", "not any( section.title == resource_type for section in composition_sections ):", "fhir.resources.codeableconcept import CodeableConcept from fhir.resources.composition import Composition, CompositionSection from fhir.resources.documentreference", "composition_sections.append(resource_section) ind = len(composition_sections) - 1 else: ind = [", "result_bundle = self.bundle_builder.build_from_resources( all_resources, bundle_id ) return result_bundle def handle_bundle(self,", "total_results def _extract_text_from_resource( self, document_reference: DocumentReference, ) -> Tuple[str, str]:", "in averbis_result: if val[\"type\"] == AHD_TYPE_DIAGNOSIS: mapped_condition = ahd_to_condition.get_fhir_condition( val,", "None if document_reference.context is not None: if len(document_reference.context.encounter) > 1:", "len(document_reference.context.encounter) > 1: log.warning( \"DocumentReference contains more than one encounter.", "document_references.append(entry.resource) return self.handle_documents(document_references) def _build_composition( self, document_reference: DocumentReference, all_resources: List[Resource]", "+ \"Using the first.\" ) composition_encounter = document_reference.context.encounter[0] composition_author =", "\"final\", \"date\": DateTime.validate(datetime.datetime.now(datetime.timezone.utc)), \"type\": composition_type, \"identifier\": composition_identifier, \"id\": sha256_of_identifier(composition_identifier), \"subject\":", "= [] for medication_statement_list in medication_statement_lists: for medication_statement_dict in medication_statement_list:", "AHD_TYPE_DOCUMENT_ANNOTATION: device = build_device(val) if device is not None: total_results.append(device)", "composition_author = Reference(**{\"display\": \"Averbis Health Discovery\"}) composition_identifier = ( self._build_composition_identifier_from_documentreference(", "and MedicationStatement resources medication_resources_unique = {m.id: m for m in", "== AHD_TYPE_DIAGNOSIS: mapped_condition = ahd_to_condition.get_fhir_condition( val, document_reference ) if mapped_condition", "if val[\"type\"] == AHD_TYPE_DIAGNOSIS: mapped_condition = ahd_to_condition.get_fhir_condition( val, document_reference )", "of document from LOINC Document Ontology\" ) DISCHARGE_SUMMARY_CONCEPT = CodeableConcept(", "), ) EXTRACTED_RESOURCES_COUNT_SUMMARY = Summary( \"extracted_resources\", \"Number of extracted resources", "None if content.attachment.language: language = content.attachment.language.lower().split(\"-\")[0] return ( base64.b64decode(content.attachment.data).decode(\"utf8\"), content.attachment.contentType,", "**{ \"title\": \"NLP FHIR Results \" + time.strftime(\"%Y-%m-%dT%H:%M\"), \"status\": \"final\",", "= document_reference.subject composition_category = document_reference.category composition_encounter = None if document_reference.context", "= \",\".join( [ AHD_TYPE_DIAGNOSIS, AHD_TYPE_MEDICATION, AHD_TYPE_DOCUMENT_ANNOTATION, *mapper_functions.keys(), ] ) analyse_args", "doc_ref_identifier = doc_ref.id else: if len(doc_ref.identifier) > 1: log.warning( \"More", "if not any( section.title == resource_type for section in composition_sections", "specified on the document. \" + \"Using the first occurrence.\"", "import Resource from prometheus_client import Counter, Histogram, Summary from tenacity.after", "all_resources.extend(resources_from_document) all_resources.append(composition) EXTRACTED_RESOURCES_COUNT_SUMMARY.observe(len(all_resources)) result_bundle = self.bundle_builder.build_from_resources( all_resources, bundle_id ) return", "\"Length of each processed document's text in charactes\", ) DISCHARGE_SUMMARY_CONCEPT_TEXT", "}, ], \"text\": DISCHARGE_SUMMARY_CONCEPT_TEXT, } ) AHD_TYPE_DOCUMENT_ANNOTATION = \"de.averbis.types.health.DocumentAnnotation\" AHD_TYPE_MEDICATION", "document_reference ) if statement is not None: medication_statement_lists.append(statement) # if", "{ \"system\": \"http://loinc.org\", \"code\": \"74477-1\", \"display\": DISCHARGE_SUMMARY_CONCEPT_TEXT, }, ], \"text\":", "AHD_TYPE_MEDICATION, AHD_TYPE_DOCUMENT_ANNOTATION, *mapper_functions.keys(), ] ) analyse_args = {\"language\": lang, \"annotation_types\":", ") except Exception as exc: log.exception(exc) log.error(\"Failed to perform text", "5.0, 8.0, 13.0, 21.0, 34.0, 55.0, \"inf\", ), ) EXTRACTED_RESOURCES_COUNT_SUMMARY", "mime_type=content_type, lang=lang ) except Exception as exc: log.exception(exc) log.error(\"Failed to", "def _build_composition_identifier_from_documentreference( self, doc_ref: DocumentReference, ): \"\"\" construct a hopefully", "tenacity from averbis import Pipeline from fhir.resources.bundle import Bundle from", "for m in medication_statement_results }.values() total_results.extend(medication_resources_unique) total_results.extend(medication_statements_unique) return total_results def", "len(valid_content) > 1: raise ValueError( f\"Document {document_reference.id} contains more than", "13.0, 21.0, 34.0, 55.0, \"inf\", ), ) EXTRACTED_RESOURCES_COUNT_SUMMARY = Summary(", "handle_documents(self, document_references: List[DocumentReference]) -> Bundle: \"\"\" Process a list of", "author = Reference.construct() author.reference = f\"Device/{resource.id}\" author.type = \"Device\" composition_author", "= ( \"https://fhir.miracum.org/nlp/identifiers/ahd-analysis-result-composition\" ) composition_identifier_value = f\"{doc_ref_identifier}_ahd-analysis-result\" return Identifier( **{", "val in averbis_result: if val[\"type\"] == AHD_TYPE_DIAGNOSIS: mapped_condition = ahd_to_condition.get_fhir_condition(", "= self._extract_text_from_resource( document_reference ) DOCUMENT_LENGTH_SUMMARY.observe(len(text)) averbis_result = None try: averbis_result", "return result_bundle def handle_bundle(self, bundle: Bundle): \"\"\" Process all FHIR", "= \"de.averbis.types.health.Medication\" AHD_TYPE_DIAGNOSIS = \"de.averbis.types.health.Diagnosis\" log = structlog.get_logger() class TransientError(Exception):", "str, mime_type: str = \"text/plain\", lang: str = None ):", "well as the offset into the text and the unique", "identifier specified on the document. \" + \"Using the first", "unqiue identifier for the condition from the document identifier as", "ahd_to_medication_statement from ahd2fhir.utils.bundle_builder import BundleBuilder from ahd2fhir.utils.custom_mappers import custom_mappers, mapper_functions", "medication_statement_list in medication_statement_lists: for medication_statement_dict in medication_statement_list: medication_results.append(medication_statement_dict[\"medication\"]) medication_statement_results.append( medication_statement_dict[\"statement\"]", "f\"{document_reference.id}\" ) # Text extraction and text analysis (text, content_type,", "= [] composition_sections.append(resource_section) ind = len(composition_sections) - 1 else: ind", "\"subject\": composition_subject, \"category\": composition_category, \"encounter\": composition_encounter, \"author\": [composition_author], \"section\": composition_sections,", "in [\"true\", \"1\"]: total_results.extend(custom_mappers(val, document_reference)) medication_results = [] medication_statement_results =", "sha256_of_identifier(composition_identifier), \"subject\": composition_subject, \"category\": composition_category, \"encounter\": composition_encounter, \"author\": [composition_author], \"section\":", "Bundle from fhir.resources.codeableconcept import CodeableConcept from fhir.resources.composition import Composition, CompositionSection", "] ) analyse_args = {\"language\": lang, \"annotation_types\": types} try: if", "import base64 import datetime import logging import os import time", "total_results = [] # Building FHIR resources as results medication_statement_lists", "_process_documentreference(self, document_reference: DocumentReference): log = structlog.get_logger().bind( document_id=f\"{document_reference.get_resource_type()}/\" + f\"{document_reference.id}\" )", "= ahd_to_medication_statement.get_fhir_medication_statement( val, document_reference ) if statement is not None:", "( \"https://fhir.miracum.org/nlp/identifiers/ahd-analysis-result-composition\" ) composition_identifier_value = f\"{doc_ref_identifier}_ahd-analysis-result\" return Identifier( **{ \"system\":", "pass class ResourceHandler: def __init__(self, averbis_pipeline: Pipeline): self.pipeline = averbis_pipeline", "bundle: Bundle): \"\"\" Process all FHIR DocumentReference resources from a", "on the document. \" + \"Trying to fall-back to the", "if content.attachment.language: language = content.attachment.language.lower().split(\"-\")[0] return ( base64.b64decode(content.attachment.data).decode(\"utf8\"), content.attachment.contentType, language,", "CompositionSection.construct() resource_section.title = resource_type resource_section.entry = [] composition_sections.append(resource_section) ind =", "AHD_TYPE_DIAGNOSIS = \"de.averbis.types.health.Diagnosis\" log = structlog.get_logger() class TransientError(Exception): pass class", "Composition( **{ \"title\": \"NLP FHIR Results \" + time.strftime(\"%Y-%m-%dT%H:%M\"), \"status\":", "all FHIR DocumentReference resources from a given bundle \"\"\" document_references", "if composition_author is None: composition_author = Reference(**{\"display\": \"Averbis Health Discovery\"})", "medication_results = [] medication_statement_results = [] for medication_statement_list in medication_statement_lists:", "analysis\", error=exc) raise TransientError(exc) total_results = [] # Building FHIR", "mime_type == \"text/html\": return self.pipeline.analyse_html(text, **analyse_args) else: return self.pipeline.analyse_text(text, **analyse_args)", "AHD_TYPE_DIAGNOSIS, AHD_TYPE_MEDICATION, AHD_TYPE_DOCUMENT_ANNOTATION, *mapper_functions.keys(), ] ) analyse_args = {\"language\": lang,", "medication_results}.values() medication_statements_unique = { m.id: m for m in medication_statement_results", "continue # Check if no resource specific section exists ands", "averbis_pipeline self.bundle_builder = BundleBuilder() @MAPPING_FAILURES_COUNTER.count_exceptions() @MAPPING_DURATION_SUMMARY.time() def handle_documents(self, document_references: List[DocumentReference])", "val, document_reference ) if statement is not None: medication_statement_lists.append(statement) #", "medication_statement_lists = [] for val in averbis_result: if val[\"type\"] ==", "\"Time spent mapping\", buckets=( 0.05, 0.1, 0.5, 1.0, 2.0, 3.0,", "self.handle_documents(document_references) def _build_composition( self, document_reference: DocumentReference, all_resources: List[Resource] ): composition_type", "): resource_section = CompositionSection.construct() resource_section.title = resource_type resource_section.entry = []", "Bundle: \"\"\" Process a list of DocumentReferences \"\"\" all_resources =", "= structlog.get_logger() class TransientError(Exception): pass class ResourceHandler: def __init__(self, averbis_pipeline:", "# de-duplicate any Medication and MedicationStatement resources medication_resources_unique = {m.id:", "resource_type == \"Device\": author = Reference.construct() author.reference = f\"Device/{resource.id}\" author.type", "DOCUMENT_LENGTH_SUMMARY.observe(len(text)) averbis_result = None try: averbis_result = self._perform_text_analysis( text=text, mime_type=content_type,", "of each processed document's text in charactes\", ) DISCHARGE_SUMMARY_CONCEPT_TEXT =", "{\"language\": lang, \"annotation_types\": types} try: if mime_type == \"text/html\": return", "\"system\": \"http://loinc.org\", \"code\": \"74477-1\", \"display\": DISCHARGE_SUMMARY_CONCEPT_TEXT, }, ], \"text\": DISCHARGE_SUMMARY_CONCEPT_TEXT,", "document_reference ) ) composition = Composition( **{ \"title\": \"NLP FHIR", "bundle.entry: if entry.resource.resource_type == \"DocumentReference\": document_references.append(entry.resource) return self.handle_documents(document_references) def _build_composition(", "base64 import datetime import logging import os import time from", "if len(doc_ref.identifier) > 1: log.warning( \"More than one identifier specified", "handle_bundle(self, bundle: Bundle): \"\"\" Process all FHIR DocumentReference resources from", "else DISCHARGE_SUMMARY_CONCEPT ) composition_subject = document_reference.subject composition_category = document_reference.category composition_encounter", "all_resources: List[Resource] ): composition_type = ( document_reference.type if document_reference.type is", ") bundle_id = composition.id all_resources.extend(resources_from_document) all_resources.append(composition) EXTRACTED_RESOURCES_COUNT_SUMMARY.observe(len(all_resources)) result_bundle = self.bundle_builder.build_from_resources(", "\"date\": DateTime.validate(datetime.datetime.now(datetime.timezone.utc)), \"type\": composition_type, \"identifier\": composition_identifier, \"id\": sha256_of_identifier(composition_identifier), \"subject\": composition_subject,", "if len(document_reference.context.encounter) > 1: log.warning( \"DocumentReference contains more than one", "resource.resource_type if resource_type == \"Device\": author = Reference.construct() author.reference =", "statement is not None: medication_statement_lists.append(statement) # if custom_mappers_enabled if os.getenv(\"CUSTOM_MAPPERS_ENABLED\",", "# Text extraction and text analysis (text, content_type, lang) =", "return total_results def _extract_text_from_resource( self, document_reference: DocumentReference, ) -> Tuple[str,", "base64.b64decode(content.attachment.data).decode(\"utf8\"), content.attachment.contentType, language, ) @tenacity.retry( stop=tenacity.stop.stop_after_attempt(10), wait=tenacity.wait.wait_fixed(5) + tenacity.wait.wait_random_exponential(multiplier=1, max=30),", "after=after_log(logging.getLogger(), logging.WARNING), reraise=True, ) def _perform_text_analysis( self, text: str, mime_type:", ") def _perform_text_analysis( self, text: str, mime_type: str = \"text/plain\",", "return ( base64.b64decode(content.attachment.data).decode(\"utf8\"), content.attachment.contentType, language, ) @tenacity.retry( stop=tenacity.stop.stop_after_attempt(10), wait=tenacity.wait.wait_fixed(5) +", "exists ands adds it, # otherwise select the correct section", "document_reference.subject composition_category = document_reference.category composition_encounter = None if document_reference.context is", "document_reference.category composition_encounter = None if document_reference.context is not None: if", "\"extracted_resources\", \"Number of extracted resources for each processed document\" )", "Reference(**{\"display\": \"Averbis Health Discovery\"}) composition_identifier = ( self._build_composition_identifier_from_documentreference( document_reference )", "than one attachment\" ) content = valid_content[0] language = None", "Reference.construct() entry_reference.reference = resource_type + \"/\" + resource.id composition_sections[ind].entry.append(entry_reference) if", "in medication_statement_lists: for medication_statement_dict in medication_statement_list: medication_results.append(medication_statement_dict[\"medication\"]) medication_statement_results.append( medication_statement_dict[\"statement\"] )", "no valid content\" ) if len(valid_content) > 1: raise ValueError(", "identifier as well as the offset into the text and", "from fhir.resources.reference import Reference from fhir.resources.resource import Resource from prometheus_client", "as results medication_statement_lists = [] for val in averbis_result: if", "val[\"type\"] == AHD_TYPE_DOCUMENT_ANNOTATION: device = build_device(val) if device is not", "\"Exceptions during mapping\") MAPPING_DURATION_SUMMARY = Histogram( \"map_duration_seconds\", \"Time spent mapping\",", "== \"Device\": author = Reference.construct() author.reference = f\"Device/{resource.id}\" author.type =", "is not None: total_results.append(mapped_condition) if val[\"type\"] == AHD_TYPE_DOCUMENT_ANNOTATION: device =", "except Exception as exc: log.exception(exc) log.error(\"Text analysis failed\") raise exc", "import build_device from ahd2fhir.utils.fhir_utils import sha256_of_identifier MAPPING_FAILURES_COUNTER = Counter(\"mapping_failures\", \"Exceptions", "author continue # Check if no resource specific section exists", "text analysis\", error=exc) raise TransientError(exc) total_results = [] # Building", "charactes\", ) DISCHARGE_SUMMARY_CONCEPT_TEXT = ( \"Clinical document Kind of document", "import datetime import logging import os import time from typing", "**{ \"coding\": [ { \"system\": \"http://loinc.org\", \"code\": \"74477-1\", \"display\": DISCHARGE_SUMMARY_CONCEPT_TEXT,", "composition_sections = [] for resource in all_resources: resource_type = resource.resource_type", "section if not any( section.title == resource_type for section in", "Health Discovery\"}) composition_identifier = ( self._build_composition_identifier_from_documentreference( document_reference ) ) composition", "AHD_TYPE_DOCUMENT_ANNOTATION, *mapper_functions.keys(), ] ) analyse_args = {\"language\": lang, \"annotation_types\": types}", "content.attachment.language.lower().split(\"-\")[0] return ( base64.b64decode(content.attachment.data).decode(\"utf8\"), content.attachment.contentType, language, ) @tenacity.retry( stop=tenacity.stop.stop_after_attempt(10), wait=tenacity.wait.wait_fixed(5)", "failed\") raise exc def _build_composition_identifier_from_documentreference( self, doc_ref: DocumentReference, ): \"\"\"", "DISCHARGE_SUMMARY_CONCEPT = CodeableConcept( **{ \"coding\": [ { \"system\": \"http://loinc.org\", \"code\":", "\"False\").lower() in [\"true\", \"1\"]: total_results.extend(custom_mappers(val, document_reference)) medication_results = [] medication_statement_results", "AHD_TYPE_MEDICATION: statement = ahd_to_medication_statement.get_fhir_medication_statement( val, document_reference ) if statement is", "more than one attachment\" ) content = valid_content[0] language =", "valid_content[0] language = None if content.attachment.language: language = content.attachment.language.lower().split(\"-\")[0] return", "import tenacity from averbis import Pipeline from fhir.resources.bundle import Bundle", "= ahd_to_condition.get_fhir_condition( val, document_reference ) if mapped_condition is not None:", "None try: averbis_result = self._perform_text_analysis( text=text, mime_type=content_type, lang=lang ) except", "1: raise ValueError( f\"Document {document_reference.id} contains more than one attachment\"", "[] for entry in bundle.entry: if entry.resource.resource_type == \"DocumentReference\": document_references.append(entry.resource)", "= { m.id: m for m in medication_statement_results }.values() total_results.extend(medication_resources_unique)", "0: log.warning( \"No identifier specified on the document. \" +", "author.reference = f\"Device/{resource.id}\" author.type = \"Device\" composition_author = author continue", "1.0, 2.0, 3.0, 5.0, 8.0, 13.0, 21.0, 34.0, 55.0, \"inf\",", "content for content in document_reference.content if content.attachment.data is not None", "doc_ref.id else: if len(doc_ref.identifier) > 1: log.warning( \"More than one", "Counter(\"mapping_failures\", \"Exceptions during mapping\") MAPPING_DURATION_SUMMARY = Histogram( \"map_duration_seconds\", \"Time spent", "= None try: averbis_result = self._perform_text_analysis( text=text, mime_type=content_type, lang=lang )", "if mapped_condition is not None: total_results.append(mapped_condition) if val[\"type\"] == AHD_TYPE_DOCUMENT_ANNOTATION:", "self, document_reference: DocumentReference, ) -> Tuple[str, str]: valid_content = [", "import ahd_to_condition, ahd_to_medication_statement from ahd2fhir.utils.bundle_builder import BundleBuilder from ahd2fhir.utils.custom_mappers import", "lang=lang ) except Exception as exc: log.exception(exc) log.error(\"Failed to perform", "log.error(\"Text analysis failed\") raise exc def _build_composition_identifier_from_documentreference( self, doc_ref: DocumentReference,", "\"\"\" all_resources = [] bundle_id = None for document_reference in", "List, Tuple import structlog import tenacity from averbis import Pipeline", "typing import List, Tuple import structlog import tenacity from averbis", "medication_statement_results = [] for medication_statement_list in medication_statement_lists: for medication_statement_dict in", "self.pipeline = averbis_pipeline self.bundle_builder = BundleBuilder() @MAPPING_FAILURES_COUNTER.count_exceptions() @MAPPING_DURATION_SUMMARY.time() def handle_documents(self,", "as exc: log.exception(exc) log.error(\"Failed to perform text analysis\", error=exc) raise", "( document_reference.type if document_reference.type is not None else DISCHARGE_SUMMARY_CONCEPT )", "# Building FHIR resources as results medication_statement_lists = [] for", "in bundle.entry: if entry.resource.resource_type == \"DocumentReference\": document_references.append(entry.resource) return self.handle_documents(document_references) def", "perform text analysis\", error=exc) raise TransientError(exc) total_results = [] #", "specific section exists ands adds it, # otherwise select the", "raise ValueError( f\"Document {document_reference.id} contains more than one attachment\" )", "as exc: log.exception(exc) log.error(\"Text analysis failed\") raise exc def _build_composition_identifier_from_documentreference(", "55.0, \"inf\", ), ) EXTRACTED_RESOURCES_COUNT_SUMMARY = Summary( \"extracted_resources\", \"Number of", "all_resources.append(composition) EXTRACTED_RESOURCES_COUNT_SUMMARY.observe(len(all_resources)) result_bundle = self.bundle_builder.build_from_resources( all_resources, bundle_id ) return result_bundle", "\"Using the first occurrence.\" ) doc_ref_identifier = doc_ref.identifier[0].value composition_identifier_system =", "analysis failed\") raise exc def _build_composition_identifier_from_documentreference( self, doc_ref: DocumentReference, ):", "MAPPING_FAILURES_COUNTER = Counter(\"mapping_failures\", \"Exceptions during mapping\") MAPPING_DURATION_SUMMARY = Histogram( \"map_duration_seconds\",", "resource_type ][0] entry_reference = Reference.construct() entry_reference.reference = resource_type + \"/\"", "stop=tenacity.stop.stop_after_attempt(10), wait=tenacity.wait.wait_fixed(5) + tenacity.wait.wait_random_exponential(multiplier=1, max=30), after=after_log(logging.getLogger(), logging.WARNING), reraise=True, ) def", "self.pipeline.analyse_html(text, **analyse_args) else: return self.pipeline.analyse_text(text, **analyse_args) except Exception as exc:", "DateTime.validate(datetime.datetime.now(datetime.timezone.utc)), \"type\": composition_type, \"identifier\": composition_identifier, \"id\": sha256_of_identifier(composition_identifier), \"subject\": composition_subject, \"category\":", "= document_reference.context.encounter[0] composition_author = None composition_sections = [] for resource", "2.0, 3.0, 5.0, 8.0, 13.0, 21.0, 34.0, 55.0, \"inf\", ),", "if val[\"type\"] == AHD_TYPE_MEDICATION: statement = ahd_to_medication_statement.get_fhir_medication_statement( val, document_reference )", "doc_ref_identifier = None if doc_ref.identifier is None or len(doc_ref.identifier) ==", "= Counter(\"mapping_failures\", \"Exceptions during mapping\") MAPPING_DURATION_SUMMARY = Histogram( \"map_duration_seconds\", \"Time", "0: raise ValueError( f\"Document {document_reference.id} contains no valid content\" )", "None else DISCHARGE_SUMMARY_CONCEPT ) composition_subject = document_reference.subject composition_category = document_reference.category", "composition_encounter = document_reference.context.encounter[0] composition_author = None composition_sections = [] for", "identifier for the condition from the document identifier as well", "document_reference: DocumentReference, ) -> Tuple[str, str]: valid_content = [ content", "total_results.append(device) if val[\"type\"] == AHD_TYPE_MEDICATION: statement = ahd_to_medication_statement.get_fhir_medication_statement( val, document_reference", "content_type, lang) = self._extract_text_from_resource( document_reference ) DOCUMENT_LENGTH_SUMMARY.observe(len(text)) averbis_result = None", "exc def _build_composition_identifier_from_documentreference( self, doc_ref: DocumentReference, ): \"\"\" construct a", "offset into the text and the unique id of the", "os import time from typing import List, Tuple import structlog", "the unique id of the annotation \"\"\" doc_ref_identifier = None", ") if statement is not None: medication_statement_lists.append(statement) # if custom_mappers_enabled", "text: str, mime_type: str = \"text/plain\", lang: str = None", "during mapping\") MAPPING_DURATION_SUMMARY = Histogram( \"map_duration_seconds\", \"Time spent mapping\", buckets=(", "document_reference ) DOCUMENT_LENGTH_SUMMARY.observe(len(text)) averbis_result = None try: averbis_result = self._perform_text_analysis(", "[] composition_sections.append(resource_section) ind = len(composition_sections) - 1 else: ind =", "\"Averbis Health Discovery\"}) composition_identifier = ( self._build_composition_identifier_from_documentreference( document_reference ) )", "DISCHARGE_SUMMARY_CONCEPT_TEXT = ( \"Clinical document Kind of document from LOINC", "processed document's text in charactes\", ) DISCHARGE_SUMMARY_CONCEPT_TEXT = ( \"Clinical", "self, doc_ref: DocumentReference, ): \"\"\" construct a hopefully unqiue identifier", "m for m in medication_results}.values() medication_statements_unique = { m.id: m", "contains more than one attachment\" ) content = valid_content[0] language", "= f\"Device/{resource.id}\" author.type = \"Device\" composition_author = author continue #", "error=exc) raise TransientError(exc) total_results = [] # Building FHIR resources", "for m in medication_results}.values() medication_statements_unique = { m.id: m for", "\" + \"Using the first.\" ) composition_encounter = document_reference.context.encounter[0] composition_author", "in medication_results}.values() medication_statements_unique = { m.id: m for m in", "\"text\": DISCHARGE_SUMMARY_CONCEPT_TEXT, } ) AHD_TYPE_DOCUMENT_ANNOTATION = \"de.averbis.types.health.DocumentAnnotation\" AHD_TYPE_MEDICATION = \"de.averbis.types.health.Medication\"", "= Composition( **{ \"title\": \"NLP FHIR Results \" + time.strftime(\"%Y-%m-%dT%H:%M\"),", "list of DocumentReferences \"\"\" all_resources = [] bundle_id = None", "bundle_id = composition.id all_resources.extend(resources_from_document) all_resources.append(composition) EXTRACTED_RESOURCES_COUNT_SUMMARY.observe(len(all_resources)) result_bundle = self.bundle_builder.build_from_resources( all_resources,", "\"identifier\": composition_identifier, \"id\": sha256_of_identifier(composition_identifier), \"subject\": composition_subject, \"category\": composition_category, \"encounter\": composition_encounter,", "total_results.extend(medication_statements_unique) return total_results def _extract_text_from_resource( self, document_reference: DocumentReference, ) ->", "language, ) @tenacity.retry( stop=tenacity.stop.stop_after_attempt(10), wait=tenacity.wait.wait_fixed(5) + tenacity.wait.wait_random_exponential(multiplier=1, max=30), after=after_log(logging.getLogger(), logging.WARNING),", "lang: str = None ): types = \",\".join( [ AHD_TYPE_DIAGNOSIS,", "document_reference ) composition = self._build_composition( document_reference, resources_from_document ) bundle_id =", "return self.pipeline.analyse_text(text, **analyse_args) except Exception as exc: log.exception(exc) log.error(\"Text analysis", "resources_from_document = self._process_documentreference( document_reference ) composition = self._build_composition( document_reference, resources_from_document", "if document_reference.type is not None else DISCHARGE_SUMMARY_CONCEPT ) composition_subject =", "in enumerate(composition_sections) if section.title == resource_type ][0] entry_reference = Reference.construct()", "log.exception(exc) log.error(\"Failed to perform text analysis\", error=exc) raise TransientError(exc) total_results", "and the unique id of the annotation \"\"\" doc_ref_identifier =", "Reference.construct() author.reference = f\"Device/{resource.id}\" author.type = \"Device\" composition_author = author", "None: medication_statement_lists.append(statement) # if custom_mappers_enabled if os.getenv(\"CUSTOM_MAPPERS_ENABLED\", \"False\").lower() in [\"true\",", "than one identifier specified on the document. \" + \"Using", "fhir.resources.fhirtypes import DateTime from fhir.resources.identifier import Identifier from fhir.resources.reference import", "os.getenv(\"CUSTOM_MAPPERS_ENABLED\", \"False\").lower() in [\"true\", \"1\"]: total_results.extend(custom_mappers(val, document_reference)) medication_results = []", "== resource_type for section in composition_sections ): resource_section = CompositionSection.construct()", "otherwise select the correct section if not any( section.title ==", "**analyse_args) except Exception as exc: log.exception(exc) log.error(\"Text analysis failed\") raise", "MAPPING_DURATION_SUMMARY = Histogram( \"map_duration_seconds\", \"Time spent mapping\", buckets=( 0.05, 0.1,", "Reference from fhir.resources.resource import Resource from prometheus_client import Counter, Histogram,", "resource_type + \"/\" + resource.id composition_sections[ind].entry.append(entry_reference) if composition_author is None:", "if section.title == resource_type ][0] entry_reference = Reference.construct() entry_reference.reference =", "None: total_results.append(device) if val[\"type\"] == AHD_TYPE_MEDICATION: statement = ahd_to_medication_statement.get_fhir_medication_statement( val,", "exc: log.exception(exc) log.error(\"Text analysis failed\") raise exc def _build_composition_identifier_from_documentreference( self,", "import Composition, CompositionSection from fhir.resources.documentreference import DocumentReference from fhir.resources.fhirtypes import", "} ) return composition def _process_documentreference(self, document_reference: DocumentReference): log =", "ahd2fhir.utils.fhir_utils import sha256_of_identifier MAPPING_FAILURES_COUNTER = Counter(\"mapping_failures\", \"Exceptions during mapping\") MAPPING_DURATION_SUMMARY", "device is not None: total_results.append(device) if val[\"type\"] == AHD_TYPE_MEDICATION: statement", "import logging import os import time from typing import List,", "None composition_sections = [] for resource in all_resources: resource_type =", "import Reference from fhir.resources.resource import Resource from prometheus_client import Counter,", "prometheus_client import Counter, Histogram, Summary from tenacity.after import after_log from", "AHD_TYPE_MEDICATION = \"de.averbis.types.health.Medication\" AHD_TYPE_DIAGNOSIS = \"de.averbis.types.health.Diagnosis\" log = structlog.get_logger() class", "and text analysis (text, content_type, lang) = self._extract_text_from_resource( document_reference )", "language = None if content.attachment.language: language = content.attachment.language.lower().split(\"-\")[0] return (", "composition_type = ( document_reference.type if document_reference.type is not None else", ") composition_identifier_value = f\"{doc_ref_identifier}_ahd-analysis-result\" return Identifier( **{ \"system\": composition_identifier_system, \"value\":", "composition_subject = document_reference.subject composition_category = document_reference.category composition_encounter = None if", "the document identifier as well as the offset into the", "Identifier from fhir.resources.reference import Reference from fhir.resources.resource import Resource from", "import time from typing import List, Tuple import structlog import", "\"DocumentReference\": document_references.append(entry.resource) return self.handle_documents(document_references) def _build_composition( self, document_reference: DocumentReference, all_resources:", "for entry in bundle.entry: if entry.resource.resource_type == \"DocumentReference\": document_references.append(entry.resource) return", "is None: composition_author = Reference(**{\"display\": \"Averbis Health Discovery\"}) composition_identifier =", "result_bundle def handle_bundle(self, bundle: Bundle): \"\"\" Process all FHIR DocumentReference", "= self._perform_text_analysis( text=text, mime_type=content_type, lang=lang ) except Exception as exc:", "self.pipeline.analyse_text(text, **analyse_args) except Exception as exc: log.exception(exc) log.error(\"Text analysis failed\")", "[] for resource in all_resources: resource_type = resource.resource_type if resource_type", "to the DocumentReference.id\" ) doc_ref_identifier = doc_ref.id else: if len(doc_ref.identifier)", "from fhir.resources.resource import Resource from prometheus_client import Counter, Histogram, Summary", "): types = \",\".join( [ AHD_TYPE_DIAGNOSIS, AHD_TYPE_MEDICATION, AHD_TYPE_DOCUMENT_ANNOTATION, *mapper_functions.keys(), ]", "total_results.append(mapped_condition) if val[\"type\"] == AHD_TYPE_DOCUMENT_ANNOTATION: device = build_device(val) if device", "averbis import Pipeline from fhir.resources.bundle import Bundle from fhir.resources.codeableconcept import", "\"map_duration_seconds\", \"Time spent mapping\", buckets=( 0.05, 0.1, 0.5, 1.0, 2.0,", "TransientError(Exception): pass class ResourceHandler: def __init__(self, averbis_pipeline: Pipeline): self.pipeline =", "contains more than one encounter. \" + \"Using the first.\"", "statement = ahd_to_medication_statement.get_fhir_medication_statement( val, document_reference ) if statement is not", "from fhir.resources.documentreference import DocumentReference from fhir.resources.fhirtypes import DateTime from fhir.resources.identifier", "\"\"\" document_references = [] for entry in bundle.entry: if entry.resource.resource_type", "None: total_results.append(mapped_condition) if val[\"type\"] == AHD_TYPE_DOCUMENT_ANNOTATION: device = build_device(val) if", "Tuple import structlog import tenacity from averbis import Pipeline from", "[ { \"system\": \"http://loinc.org\", \"code\": \"74477-1\", \"display\": DISCHARGE_SUMMARY_CONCEPT_TEXT, }, ],", "\"title\": \"NLP FHIR Results \" + time.strftime(\"%Y-%m-%dT%H:%M\"), \"status\": \"final\", \"date\":", "= None if document_reference.context is not None: if len(document_reference.context.encounter) >", ") DISCHARGE_SUMMARY_CONCEPT = CodeableConcept( **{ \"coding\": [ { \"system\": \"http://loinc.org\",", "[composition_author], \"section\": composition_sections, } ) return composition def _process_documentreference(self, document_reference:", "document_reference.type is not None else DISCHARGE_SUMMARY_CONCEPT ) composition_subject = document_reference.subject", "log.error(\"Failed to perform text analysis\", error=exc) raise TransientError(exc) total_results =", "Counter, Histogram, Summary from tenacity.after import after_log from ahd2fhir.mappers import", "first.\" ) composition_encounter = document_reference.context.encounter[0] composition_author = None composition_sections =", "fhir.resources.identifier import Identifier from fhir.resources.reference import Reference from fhir.resources.resource import", "or len(doc_ref.identifier) == 0: log.warning( \"No identifier specified on the", "of extracted resources for each processed document\" ) DOCUMENT_LENGTH_SUMMARY =", "FHIR resources as results medication_statement_lists = [] for val in", "total_results.extend(custom_mappers(val, document_reference)) medication_results = [] medication_statement_results = [] for medication_statement_list", "{document_reference.id} contains more than one attachment\" ) content = valid_content[0]", "doc_ref.identifier[0].value composition_identifier_system = ( \"https://fhir.miracum.org/nlp/identifiers/ahd-analysis-result-composition\" ) composition_identifier_value = f\"{doc_ref_identifier}_ahd-analysis-result\" return", "document's text in charactes\", ) DISCHARGE_SUMMARY_CONCEPT_TEXT = ( \"Clinical document", "f\"Document {document_reference.id} contains no valid content\" ) if len(valid_content) >", "from ahd2fhir.utils.bundle_builder import BundleBuilder from ahd2fhir.utils.custom_mappers import custom_mappers, mapper_functions from", "build_device(val) if device is not None: total_results.append(device) if val[\"type\"] ==", "= \"text/plain\", lang: str = None ): types = \",\".join(", "as the offset into the text and the unique id", "fhir.resources.reference import Reference from fhir.resources.resource import Resource from prometheus_client import", "= Reference.construct() entry_reference.reference = resource_type + \"/\" + resource.id composition_sections[ind].entry.append(entry_reference)", "except Exception as exc: log.exception(exc) log.error(\"Failed to perform text analysis\",", "= Reference.construct() author.reference = f\"Device/{resource.id}\" author.type = \"Device\" composition_author =", "ahd2fhir.utils.bundle_builder import BundleBuilder from ahd2fhir.utils.custom_mappers import custom_mappers, mapper_functions from ahd2fhir.utils.device_builder", "the first.\" ) composition_encounter = document_reference.context.encounter[0] composition_author = None composition_sections", "mapped_condition is not None: total_results.append(mapped_condition) if val[\"type\"] == AHD_TYPE_DOCUMENT_ANNOTATION: device", "= {\"language\": lang, \"annotation_types\": types} try: if mime_type == \"text/html\":", "], \"text\": DISCHARGE_SUMMARY_CONCEPT_TEXT, } ) AHD_TYPE_DOCUMENT_ANNOTATION = \"de.averbis.types.health.DocumentAnnotation\" AHD_TYPE_MEDICATION =", "is None or len(doc_ref.identifier) == 0: log.warning( \"No identifier specified", "document_reference.context is not None: if len(document_reference.context.encounter) > 1: log.warning( \"DocumentReference", "no resource specific section exists ands adds it, # otherwise", "resource in all_resources: resource_type = resource.resource_type if resource_type == \"Device\":", "in charactes\", ) DISCHARGE_SUMMARY_CONCEPT_TEXT = ( \"Clinical document Kind of", "unique id of the annotation \"\"\" doc_ref_identifier = None if", "language = content.attachment.language.lower().split(\"-\")[0] return ( base64.b64decode(content.attachment.data).decode(\"utf8\"), content.attachment.contentType, language, ) @tenacity.retry(", "ahd_to_condition, ahd_to_medication_statement from ahd2fhir.utils.bundle_builder import BundleBuilder from ahd2fhir.utils.custom_mappers import custom_mappers,", "= None composition_sections = [] for resource in all_resources: resource_type", "import sha256_of_identifier MAPPING_FAILURES_COUNTER = Counter(\"mapping_failures\", \"Exceptions during mapping\") MAPPING_DURATION_SUMMARY =", "ahd2fhir.utils.custom_mappers import custom_mappers, mapper_functions from ahd2fhir.utils.device_builder import build_device from ahd2fhir.utils.fhir_utils", "averbis_result = self._perform_text_analysis( text=text, mime_type=content_type, lang=lang ) except Exception as", "document_references = [] for entry in bundle.entry: if entry.resource.resource_type ==", "document_reference, resources_from_document ) bundle_id = composition.id all_resources.extend(resources_from_document) all_resources.append(composition) EXTRACTED_RESOURCES_COUNT_SUMMARY.observe(len(all_resources)) result_bundle", "ind = [ ind for ind, section in enumerate(composition_sections) if", "= [] for val in averbis_result: if val[\"type\"] == AHD_TYPE_DIAGNOSIS:", "f\"{doc_ref_identifier}_ahd-analysis-result\" return Identifier( **{ \"system\": composition_identifier_system, \"value\": composition_identifier_value, } )", "log.warning( \"More than one identifier specified on the document. \"", "enumerate(composition_sections) if section.title == resource_type ][0] entry_reference = Reference.construct() entry_reference.reference", "DISCHARGE_SUMMARY_CONCEPT_TEXT, }, ], \"text\": DISCHARGE_SUMMARY_CONCEPT_TEXT, } ) AHD_TYPE_DOCUMENT_ANNOTATION = \"de.averbis.types.health.DocumentAnnotation\"", "sha256_of_identifier MAPPING_FAILURES_COUNTER = Counter(\"mapping_failures\", \"Exceptions during mapping\") MAPPING_DURATION_SUMMARY = Histogram(", "Composition, CompositionSection from fhir.resources.documentreference import DocumentReference from fhir.resources.fhirtypes import DateTime", "= self._build_composition( document_reference, resources_from_document ) bundle_id = composition.id all_resources.extend(resources_from_document) all_resources.append(composition)", "section exists ands adds it, # otherwise select the correct", "# otherwise select the correct section if not any( section.title", "self._perform_text_analysis( text=text, mime_type=content_type, lang=lang ) except Exception as exc: log.exception(exc)", "] if len(valid_content) == 0: raise ValueError( f\"Document {document_reference.id} contains", "construct a hopefully unqiue identifier for the condition from the", "one identifier specified on the document. \" + \"Using the", "document Kind of document from LOINC Document Ontology\" ) DISCHARGE_SUMMARY_CONCEPT", "medication_statement_lists.append(statement) # if custom_mappers_enabled if os.getenv(\"CUSTOM_MAPPERS_ENABLED\", \"False\").lower() in [\"true\", \"1\"]:", "DocumentReference resources from a given bundle \"\"\" document_references = []", "from ahd2fhir.utils.custom_mappers import custom_mappers, mapper_functions from ahd2fhir.utils.device_builder import build_device from", "self._build_composition( document_reference, resources_from_document ) bundle_id = composition.id all_resources.extend(resources_from_document) all_resources.append(composition) EXTRACTED_RESOURCES_COUNT_SUMMARY.observe(len(all_resources))", "a given bundle \"\"\" document_references = [] for entry in", "all_resources: resource_type = resource.resource_type if resource_type == \"Device\": author =", "from a given bundle \"\"\" document_references = [] for entry", "resource_type = resource.resource_type if resource_type == \"Device\": author = Reference.construct()", "composition_sections, } ) return composition def _process_documentreference(self, document_reference: DocumentReference): log", "composition_identifier_value = f\"{doc_ref_identifier}_ahd-analysis-result\" return Identifier( **{ \"system\": composition_identifier_system, \"value\": composition_identifier_value,", "\"https://fhir.miracum.org/nlp/identifiers/ahd-analysis-result-composition\" ) composition_identifier_value = f\"{doc_ref_identifier}_ahd-analysis-result\" return Identifier( **{ \"system\": composition_identifier_system,", "self._build_composition_identifier_from_documentreference( document_reference ) ) composition = Composition( **{ \"title\": \"NLP", "f\"Document {document_reference.id} contains more than one attachment\" ) content =", "@tenacity.retry( stop=tenacity.stop.stop_after_attempt(10), wait=tenacity.wait.wait_fixed(5) + tenacity.wait.wait_random_exponential(multiplier=1, max=30), after=after_log(logging.getLogger(), logging.WARNING), reraise=True, )", "== 0: log.warning( \"No identifier specified on the document. \"", "= len(composition_sections) - 1 else: ind = [ ind for", "identifier specified on the document. \" + \"Trying to fall-back", "total_results.extend(medication_resources_unique) total_results.extend(medication_statements_unique) return total_results def _extract_text_from_resource( self, document_reference: DocumentReference, )", "DISCHARGE_SUMMARY_CONCEPT_TEXT, } ) AHD_TYPE_DOCUMENT_ANNOTATION = \"de.averbis.types.health.DocumentAnnotation\" AHD_TYPE_MEDICATION = \"de.averbis.types.health.Medication\" AHD_TYPE_DIAGNOSIS", "ind, section in enumerate(composition_sections) if section.title == resource_type ][0] entry_reference", "it, # otherwise select the correct section if not any(", "than one encounter. \" + \"Using the first.\" ) composition_encounter", "f\"Device/{resource.id}\" author.type = \"Device\" composition_author = author continue # Check", ") composition_encounter = document_reference.context.encounter[0] composition_author = None composition_sections = []", "Bundle): \"\"\" Process all FHIR DocumentReference resources from a given", "after_log from ahd2fhir.mappers import ahd_to_condition, ahd_to_medication_statement from ahd2fhir.utils.bundle_builder import BundleBuilder", "lang) = self._extract_text_from_resource( document_reference ) DOCUMENT_LENGTH_SUMMARY.observe(len(text)) averbis_result = None try:", "medication_statement_list: medication_results.append(medication_statement_dict[\"medication\"]) medication_statement_results.append( medication_statement_dict[\"statement\"] ) # de-duplicate any Medication and", ") EXTRACTED_RESOURCES_COUNT_SUMMARY = Summary( \"extracted_resources\", \"Number of extracted resources for", "-> Tuple[str, str]: valid_content = [ content for content in", "composition_author = None composition_sections = [] for resource in all_resources:", "analysis (text, content_type, lang) = self._extract_text_from_resource( document_reference ) DOCUMENT_LENGTH_SUMMARY.observe(len(text)) averbis_result", "0.5, 1.0, 2.0, 3.0, 5.0, 8.0, 13.0, 21.0, 34.0, 55.0,", "BundleBuilder() @MAPPING_FAILURES_COUNTER.count_exceptions() @MAPPING_DURATION_SUMMARY.time() def handle_documents(self, document_references: List[DocumentReference]) -> Bundle: \"\"\"", "spent mapping\", buckets=( 0.05, 0.1, 0.5, 1.0, 2.0, 3.0, 5.0,", "processed document\" ) DOCUMENT_LENGTH_SUMMARY = Summary( \"document_length\", \"Length of each", "= document_reference.category composition_encounter = None if document_reference.context is not None:", "occurrence.\" ) doc_ref_identifier = doc_ref.identifier[0].value composition_identifier_system = ( \"https://fhir.miracum.org/nlp/identifiers/ahd-analysis-result-composition\" )", "tenacity.after import after_log from ahd2fhir.mappers import ahd_to_condition, ahd_to_medication_statement from ahd2fhir.utils.bundle_builder", "on the document. \" + \"Using the first occurrence.\" )", "mapping\") MAPPING_DURATION_SUMMARY = Histogram( \"map_duration_seconds\", \"Time spent mapping\", buckets=( 0.05,", "extracted resources for each processed document\" ) DOCUMENT_LENGTH_SUMMARY = Summary(", "for section in composition_sections ): resource_section = CompositionSection.construct() resource_section.title =", "log.exception(exc) log.error(\"Text analysis failed\") raise exc def _build_composition_identifier_from_documentreference( self, doc_ref:", "{document_reference.id} contains no valid content\" ) if len(valid_content) > 1:", "if len(valid_content) == 0: raise ValueError( f\"Document {document_reference.id} contains no", "= \"de.averbis.types.health.Diagnosis\" log = structlog.get_logger() class TransientError(Exception): pass class ResourceHandler:", ") composition_subject = document_reference.subject composition_category = document_reference.category composition_encounter = None", "log.warning( \"DocumentReference contains more than one encounter. \" + \"Using", "datetime import logging import os import time from typing import", "from fhir.resources.bundle import Bundle from fhir.resources.codeableconcept import CodeableConcept from fhir.resources.composition", ") composition = Composition( **{ \"title\": \"NLP FHIR Results \"", "averbis_result: if val[\"type\"] == AHD_TYPE_DIAGNOSIS: mapped_condition = ahd_to_condition.get_fhir_condition( val, document_reference", "if device is not None: total_results.append(device) if val[\"type\"] == AHD_TYPE_MEDICATION:", "CodeableConcept from fhir.resources.composition import Composition, CompositionSection from fhir.resources.documentreference import DocumentReference", "\"Clinical document Kind of document from LOINC Document Ontology\" )", "section.title == resource_type for section in composition_sections ): resource_section =", "- 1 else: ind = [ ind for ind, section", "EXTRACTED_RESOURCES_COUNT_SUMMARY = Summary( \"extracted_resources\", \"Number of extracted resources for each", "import custom_mappers, mapper_functions from ahd2fhir.utils.device_builder import build_device from ahd2fhir.utils.fhir_utils import", "fhir.resources.composition import Composition, CompositionSection from fhir.resources.documentreference import DocumentReference from fhir.resources.fhirtypes", "= ( self._build_composition_identifier_from_documentreference( document_reference ) ) composition = Composition( **{", "None: if len(document_reference.context.encounter) > 1: log.warning( \"DocumentReference contains more than", "composition_author = author continue # Check if no resource specific", "ahd_to_medication_statement.get_fhir_medication_statement( val, document_reference ) if statement is not None: medication_statement_lists.append(statement)", "import CodeableConcept from fhir.resources.composition import Composition, CompositionSection from fhir.resources.documentreference import", "> 1: raise ValueError( f\"Document {document_reference.id} contains more than one", "Summary( \"extracted_resources\", \"Number of extracted resources for each processed document\"", "\"More than one identifier specified on the document. \" +", "\" + \"Using the first occurrence.\" ) doc_ref_identifier = doc_ref.identifier[0].value", "_build_composition( self, document_reference: DocumentReference, all_resources: List[Resource] ): composition_type = (", "= CodeableConcept( **{ \"coding\": [ { \"system\": \"http://loinc.org\", \"code\": \"74477-1\",", "if doc_ref.identifier is None or len(doc_ref.identifier) == 0: log.warning( \"No", "from ahd2fhir.utils.device_builder import build_device from ahd2fhir.utils.fhir_utils import sha256_of_identifier MAPPING_FAILURES_COUNTER =", "fhir.resources.documentreference import DocumentReference from fhir.resources.fhirtypes import DateTime from fhir.resources.identifier import", "\"Device\" composition_author = author continue # Check if no resource", "to fall-back to the DocumentReference.id\" ) doc_ref_identifier = doc_ref.id else:", "section in enumerate(composition_sections) if section.title == resource_type ][0] entry_reference =", "resources from a given bundle \"\"\" document_references = [] for", "else: if len(doc_ref.identifier) > 1: log.warning( \"More than one identifier", "\"Using the first.\" ) composition_encounter = document_reference.context.encounter[0] composition_author = None", "None ): types = \",\".join( [ AHD_TYPE_DIAGNOSIS, AHD_TYPE_MEDICATION, AHD_TYPE_DOCUMENT_ANNOTATION, *mapper_functions.keys(),", "Text extraction and text analysis (text, content_type, lang) = self._extract_text_from_resource(", "( \"Clinical document Kind of document from LOINC Document Ontology\"", "from fhir.resources.codeableconcept import CodeableConcept from fhir.resources.composition import Composition, CompositionSection from", "DocumentReferences \"\"\" all_resources = [] bundle_id = None for document_reference", "= [] for entry in bundle.entry: if entry.resource.resource_type == \"DocumentReference\":", "resource_section.title = resource_type resource_section.entry = [] composition_sections.append(resource_section) ind = len(composition_sections)", "== resource_type ][0] entry_reference = Reference.construct() entry_reference.reference = resource_type +", "text=text, mime_type=content_type, lang=lang ) except Exception as exc: log.exception(exc) log.error(\"Failed", "raise exc def _build_composition_identifier_from_documentreference( self, doc_ref: DocumentReference, ): \"\"\" construct", "Pipeline): self.pipeline = averbis_pipeline self.bundle_builder = BundleBuilder() @MAPPING_FAILURES_COUNTER.count_exceptions() @MAPPING_DURATION_SUMMARY.time() def", "[ content for content in document_reference.content if content.attachment.data is not", "0.1, 0.5, 1.0, 2.0, 3.0, 5.0, 8.0, 13.0, 21.0, 34.0,", "[] bundle_id = None for document_reference in document_references: resources_from_document =", "bundle \"\"\" document_references = [] for entry in bundle.entry: if", "resource_section = CompositionSection.construct() resource_section.title = resource_type resource_section.entry = [] composition_sections.append(resource_section)", "entry_reference.reference = resource_type + \"/\" + resource.id composition_sections[ind].entry.append(entry_reference) if composition_author", "def _process_documentreference(self, document_reference: DocumentReference): log = structlog.get_logger().bind( document_id=f\"{document_reference.get_resource_type()}/\" + f\"{document_reference.id}\"", "DateTime from fhir.resources.identifier import Identifier from fhir.resources.reference import Reference from", "is not None: total_results.append(device) if val[\"type\"] == AHD_TYPE_MEDICATION: statement =", "Exception as exc: log.exception(exc) log.error(\"Text analysis failed\") raise exc def", "the annotation \"\"\" doc_ref_identifier = None if doc_ref.identifier is None", "composition = Composition( **{ \"title\": \"NLP FHIR Results \" +", "is not None ] if len(valid_content) == 0: raise ValueError(", "the offset into the text and the unique id of", "0.05, 0.1, 0.5, 1.0, 2.0, 3.0, 5.0, 8.0, 13.0, 21.0,", "return self.pipeline.analyse_html(text, **analyse_args) else: return self.pipeline.analyse_text(text, **analyse_args) except Exception as", "for each processed document\" ) DOCUMENT_LENGTH_SUMMARY = Summary( \"document_length\", \"Length", "= None ): types = \",\".join( [ AHD_TYPE_DIAGNOSIS, AHD_TYPE_MEDICATION, AHD_TYPE_DOCUMENT_ANNOTATION,", "author.type = \"Device\" composition_author = author continue # Check if", "medication_statement_dict[\"statement\"] ) # de-duplicate any Medication and MedicationStatement resources medication_resources_unique", "= self.bundle_builder.build_from_resources( all_resources, bundle_id ) return result_bundle def handle_bundle(self, bundle:", "): composition_type = ( document_reference.type if document_reference.type is not None", "content = valid_content[0] language = None if content.attachment.language: language =", "adds it, # otherwise select the correct section if not", "doc_ref: DocumentReference, ): \"\"\" construct a hopefully unqiue identifier for", "Document Ontology\" ) DISCHARGE_SUMMARY_CONCEPT = CodeableConcept( **{ \"coding\": [ {", "import DateTime from fhir.resources.identifier import Identifier from fhir.resources.reference import Reference", "correct section if not any( section.title == resource_type for section", "self, text: str, mime_type: str = \"text/plain\", lang: str =", "+ tenacity.wait.wait_random_exponential(multiplier=1, max=30), after=after_log(logging.getLogger(), logging.WARNING), reraise=True, ) def _perform_text_analysis( self,", "logging import os import time from typing import List, Tuple", "len(composition_sections) - 1 else: ind = [ ind for ind,", "ahd2fhir.mappers import ahd_to_condition, ahd_to_medication_statement from ahd2fhir.utils.bundle_builder import BundleBuilder from ahd2fhir.utils.custom_mappers", "entry.resource.resource_type == \"DocumentReference\": document_references.append(entry.resource) return self.handle_documents(document_references) def _build_composition( self, document_reference:", "doc_ref.identifier is None or len(doc_ref.identifier) == 0: log.warning( \"No identifier", "max=30), after=after_log(logging.getLogger(), logging.WARNING), reraise=True, ) def _perform_text_analysis( self, text: str,", "== AHD_TYPE_MEDICATION: statement = ahd_to_medication_statement.get_fhir_medication_statement( val, document_reference ) if statement", "not None: medication_statement_lists.append(statement) # if custom_mappers_enabled if os.getenv(\"CUSTOM_MAPPERS_ENABLED\", \"False\").lower() in", "the first occurrence.\" ) doc_ref_identifier = doc_ref.identifier[0].value composition_identifier_system = (", "text analysis (text, content_type, lang) = self._extract_text_from_resource( document_reference ) DOCUMENT_LENGTH_SUMMARY.observe(len(text))", "Exception as exc: log.exception(exc) log.error(\"Failed to perform text analysis\", error=exc)", "def _extract_text_from_resource( self, document_reference: DocumentReference, ) -> Tuple[str, str]: valid_content", "= f\"{doc_ref_identifier}_ahd-analysis-result\" return Identifier( **{ \"system\": composition_identifier_system, \"value\": composition_identifier_value, }", "FHIR DocumentReference resources from a given bundle \"\"\" document_references =", "BundleBuilder from ahd2fhir.utils.custom_mappers import custom_mappers, mapper_functions from ahd2fhir.utils.device_builder import build_device", "select the correct section if not any( section.title == resource_type", "medication_resources_unique = {m.id: m for m in medication_results}.values() medication_statements_unique =", "= None for document_reference in document_references: resources_from_document = self._process_documentreference( document_reference", "AHD_TYPE_DOCUMENT_ANNOTATION = \"de.averbis.types.health.DocumentAnnotation\" AHD_TYPE_MEDICATION = \"de.averbis.types.health.Medication\" AHD_TYPE_DIAGNOSIS = \"de.averbis.types.health.Diagnosis\" log", "de-duplicate any Medication and MedicationStatement resources medication_resources_unique = {m.id: m", "not None ] if len(valid_content) == 0: raise ValueError( f\"Document", "time from typing import List, Tuple import structlog import tenacity", ") if len(valid_content) > 1: raise ValueError( f\"Document {document_reference.id} contains", ") doc_ref_identifier = doc_ref.identifier[0].value composition_identifier_system = ( \"https://fhir.miracum.org/nlp/identifiers/ahd-analysis-result-composition\" ) composition_identifier_value", "document_reference ) if mapped_condition is not None: total_results.append(mapped_condition) if val[\"type\"]", "\"inf\", ), ) EXTRACTED_RESOURCES_COUNT_SUMMARY = Summary( \"extracted_resources\", \"Number of extracted", "if val[\"type\"] == AHD_TYPE_DOCUMENT_ANNOTATION: device = build_device(val) if device is", "not None: total_results.append(mapped_condition) if val[\"type\"] == AHD_TYPE_DOCUMENT_ANNOTATION: device = build_device(val)", "\"text/plain\", lang: str = None ): types = \",\".join( [", "= \"Device\" composition_author = author continue # Check if no", "the condition from the document identifier as well as the", "self.bundle_builder.build_from_resources( all_resources, bundle_id ) return result_bundle def handle_bundle(self, bundle: Bundle):", "in medication_statement_list: medication_results.append(medication_statement_dict[\"medication\"]) medication_statement_results.append( medication_statement_dict[\"statement\"] ) # de-duplicate any Medication", "+ \"Using the first occurrence.\" ) doc_ref_identifier = doc_ref.identifier[0].value composition_identifier_system", "fhir.resources.resource import Resource from prometheus_client import Counter, Histogram, Summary from", "\"coding\": [ { \"system\": \"http://loinc.org\", \"code\": \"74477-1\", \"display\": DISCHARGE_SUMMARY_CONCEPT_TEXT, },", "= [ content for content in document_reference.content if content.attachment.data is", "content.attachment.contentType, language, ) @tenacity.retry( stop=tenacity.stop.stop_after_attempt(10), wait=tenacity.wait.wait_fixed(5) + tenacity.wait.wait_random_exponential(multiplier=1, max=30), after=after_log(logging.getLogger(),", ") @tenacity.retry( stop=tenacity.stop.stop_after_attempt(10), wait=tenacity.wait.wait_fixed(5) + tenacity.wait.wait_random_exponential(multiplier=1, max=30), after=after_log(logging.getLogger(), logging.WARNING), reraise=True,", "> 1: log.warning( \"More than one identifier specified on the", "\"http://loinc.org\", \"code\": \"74477-1\", \"display\": DISCHARGE_SUMMARY_CONCEPT_TEXT, }, ], \"text\": DISCHARGE_SUMMARY_CONCEPT_TEXT, }", "each processed document's text in charactes\", ) DISCHARGE_SUMMARY_CONCEPT_TEXT = (", "TransientError(exc) total_results = [] # Building FHIR resources as results", "for val in averbis_result: if val[\"type\"] == AHD_TYPE_DIAGNOSIS: mapped_condition =", "_build_composition_identifier_from_documentreference( self, doc_ref: DocumentReference, ): \"\"\" construct a hopefully unqiue", "try: if mime_type == \"text/html\": return self.pipeline.analyse_html(text, **analyse_args) else: return", "if content.attachment.data is not None ] if len(valid_content) == 0:", "log = structlog.get_logger() class TransientError(Exception): pass class ResourceHandler: def __init__(self,", "composition_category = document_reference.category composition_encounter = None if document_reference.context is not", "the document. \" + \"Using the first occurrence.\" ) doc_ref_identifier", "is not None else DISCHARGE_SUMMARY_CONCEPT ) composition_subject = document_reference.subject composition_category", "\"id\": sha256_of_identifier(composition_identifier), \"subject\": composition_subject, \"category\": composition_category, \"encounter\": composition_encounter, \"author\": [composition_author],", "import structlog import tenacity from averbis import Pipeline from fhir.resources.bundle", "custom_mappers, mapper_functions from ahd2fhir.utils.device_builder import build_device from ahd2fhir.utils.fhir_utils import sha256_of_identifier", ") # de-duplicate any Medication and MedicationStatement resources medication_resources_unique =", "as well as the offset into the text and the", "document_reference)) medication_results = [] medication_statement_results = [] for medication_statement_list in", "Pipeline from fhir.resources.bundle import Bundle from fhir.resources.codeableconcept import CodeableConcept from", "= Summary( \"extracted_resources\", \"Number of extracted resources for each processed", "# Check if no resource specific section exists ands adds", "device = build_device(val) if device is not None: total_results.append(device) if", "= None if content.attachment.language: language = content.attachment.language.lower().split(\"-\")[0] return ( base64.b64decode(content.attachment.data).decode(\"utf8\"),", "of DocumentReferences \"\"\" all_resources = [] bundle_id = None for", "bundle_id = None for document_reference in document_references: resources_from_document = self._process_documentreference(", "Summary from tenacity.after import after_log from ahd2fhir.mappers import ahd_to_condition, ahd_to_medication_statement", "time.strftime(\"%Y-%m-%dT%H:%M\"), \"status\": \"final\", \"date\": DateTime.validate(datetime.datetime.now(datetime.timezone.utc)), \"type\": composition_type, \"identifier\": composition_identifier, \"id\":", "log = structlog.get_logger().bind( document_id=f\"{document_reference.get_resource_type()}/\" + f\"{document_reference.id}\" ) # Text extraction", "structlog.get_logger().bind( document_id=f\"{document_reference.get_resource_type()}/\" + f\"{document_reference.id}\" ) # Text extraction and text", "= doc_ref.identifier[0].value composition_identifier_system = ( \"https://fhir.miracum.org/nlp/identifiers/ahd-analysis-result-composition\" ) composition_identifier_value = f\"{doc_ref_identifier}_ahd-analysis-result\"", "ind for ind, section in enumerate(composition_sections) if section.title == resource_type", "== AHD_TYPE_DOCUMENT_ANNOTATION: device = build_device(val) if device is not None:", "= [] medication_statement_results = [] for medication_statement_list in medication_statement_lists: for", "resource_type resource_section.entry = [] composition_sections.append(resource_section) ind = len(composition_sections) - 1", "val, document_reference ) if mapped_condition is not None: total_results.append(mapped_condition) if", "+ \"Trying to fall-back to the DocumentReference.id\" ) doc_ref_identifier =", "content.attachment.language: language = content.attachment.language.lower().split(\"-\")[0] return ( base64.b64decode(content.attachment.data).decode(\"utf8\"), content.attachment.contentType, language, )", "Check if no resource specific section exists ands adds it,", "for medication_statement_dict in medication_statement_list: medication_results.append(medication_statement_dict[\"medication\"]) medication_statement_results.append( medication_statement_dict[\"statement\"] ) # de-duplicate", "= [] # Building FHIR resources as results medication_statement_lists =", "structlog.get_logger() class TransientError(Exception): pass class ResourceHandler: def __init__(self, averbis_pipeline: Pipeline):", "str = None ): types = \",\".join( [ AHD_TYPE_DIAGNOSIS, AHD_TYPE_MEDICATION,", "lang, \"annotation_types\": types} try: if mime_type == \"text/html\": return self.pipeline.analyse_html(text,", "Ontology\" ) DISCHARGE_SUMMARY_CONCEPT = CodeableConcept( **{ \"coding\": [ { \"system\":", "if custom_mappers_enabled if os.getenv(\"CUSTOM_MAPPERS_ENABLED\", \"False\").lower() in [\"true\", \"1\"]: total_results.extend(custom_mappers(val, document_reference))", "Resource from prometheus_client import Counter, Histogram, Summary from tenacity.after import", "= [] bundle_id = None for document_reference in document_references: resources_from_document", "= build_device(val) if device is not None: total_results.append(device) if val[\"type\"]", "medication_statements_unique = { m.id: m for m in medication_statement_results }.values()", "def _build_composition( self, document_reference: DocumentReference, all_resources: List[Resource] ): composition_type =", "DocumentReference from fhir.resources.fhirtypes import DateTime from fhir.resources.identifier import Identifier from", "= doc_ref.id else: if len(doc_ref.identifier) > 1: log.warning( \"More than", "= ( document_reference.type if document_reference.type is not None else DISCHARGE_SUMMARY_CONCEPT", "mapping\", buckets=( 0.05, 0.1, 0.5, 1.0, 2.0, 3.0, 5.0, 8.0,", "is not None: if len(document_reference.context.encounter) > 1: log.warning( \"DocumentReference contains", "return composition def _process_documentreference(self, document_reference: DocumentReference): log = structlog.get_logger().bind( document_id=f\"{document_reference.get_resource_type()}/\"", "for ind, section in enumerate(composition_sections) if section.title == resource_type ][0]", "\",\".join( [ AHD_TYPE_DIAGNOSIS, AHD_TYPE_MEDICATION, AHD_TYPE_DOCUMENT_ANNOTATION, *mapper_functions.keys(), ] ) analyse_args =", "None or len(doc_ref.identifier) == 0: log.warning( \"No identifier specified on", "m in medication_results}.values() medication_statements_unique = { m.id: m for m", "one attachment\" ) content = valid_content[0] language = None if", "\" + time.strftime(\"%Y-%m-%dT%H:%M\"), \"status\": \"final\", \"date\": DateTime.validate(datetime.datetime.now(datetime.timezone.utc)), \"type\": composition_type, \"identifier\":", "for resource in all_resources: resource_type = resource.resource_type if resource_type ==", "composition_type, \"identifier\": composition_identifier, \"id\": sha256_of_identifier(composition_identifier), \"subject\": composition_subject, \"category\": composition_category, \"encounter\":", "composition_identifier_system = ( \"https://fhir.miracum.org/nlp/identifiers/ahd-analysis-result-composition\" ) composition_identifier_value = f\"{doc_ref_identifier}_ahd-analysis-result\" return Identifier(", "class ResourceHandler: def __init__(self, averbis_pipeline: Pipeline): self.pipeline = averbis_pipeline self.bundle_builder", "the correct section if not any( section.title == resource_type for", "condition from the document identifier as well as the offset", "{ m.id: m for m in medication_statement_results }.values() total_results.extend(medication_resources_unique) total_results.extend(medication_statements_unique)", "def handle_bundle(self, bundle: Bundle): \"\"\" Process all FHIR DocumentReference resources", "analyse_args = {\"language\": lang, \"annotation_types\": types} try: if mime_type ==", "exc: log.exception(exc) log.error(\"Failed to perform text analysis\", error=exc) raise TransientError(exc)", "from LOINC Document Ontology\" ) DISCHARGE_SUMMARY_CONCEPT = CodeableConcept( **{ \"coding\":", "+ \"/\" + resource.id composition_sections[ind].entry.append(entry_reference) if composition_author is None: composition_author", "= Reference(**{\"display\": \"Averbis Health Discovery\"}) composition_identifier = ( self._build_composition_identifier_from_documentreference( document_reference", "@MAPPING_FAILURES_COUNTER.count_exceptions() @MAPPING_DURATION_SUMMARY.time() def handle_documents(self, document_references: List[DocumentReference]) -> Bundle: \"\"\" Process", "import BundleBuilder from ahd2fhir.utils.custom_mappers import custom_mappers, mapper_functions from ahd2fhir.utils.device_builder import", "wait=tenacity.wait.wait_fixed(5) + tenacity.wait.wait_random_exponential(multiplier=1, max=30), after=after_log(logging.getLogger(), logging.WARNING), reraise=True, ) def _perform_text_analysis(", "else: return self.pipeline.analyse_text(text, **analyse_args) except Exception as exc: log.exception(exc) log.error(\"Text", "from ahd2fhir.utils.fhir_utils import sha256_of_identifier MAPPING_FAILURES_COUNTER = Counter(\"mapping_failures\", \"Exceptions during mapping\")", "= averbis_pipeline self.bundle_builder = BundleBuilder() @MAPPING_FAILURES_COUNTER.count_exceptions() @MAPPING_DURATION_SUMMARY.time() def handle_documents(self, document_references:", "( self._build_composition_identifier_from_documentreference( document_reference ) ) composition = Composition( **{ \"title\":", "build_device from ahd2fhir.utils.fhir_utils import sha256_of_identifier MAPPING_FAILURES_COUNTER = Counter(\"mapping_failures\", \"Exceptions during", "\"status\": \"final\", \"date\": DateTime.validate(datetime.datetime.now(datetime.timezone.utc)), \"type\": composition_type, \"identifier\": composition_identifier, \"id\": sha256_of_identifier(composition_identifier),", "_perform_text_analysis( self, text: str, mime_type: str = \"text/plain\", lang: str", "in document_references: resources_from_document = self._process_documentreference( document_reference ) composition = self._build_composition(", "DISCHARGE_SUMMARY_CONCEPT ) composition_subject = document_reference.subject composition_category = document_reference.category composition_encounter =", "-> Bundle: \"\"\" Process a list of DocumentReferences \"\"\" all_resources", "from fhir.resources.identifier import Identifier from fhir.resources.reference import Reference from fhir.resources.resource", "composition.id all_resources.extend(resources_from_document) all_resources.append(composition) EXTRACTED_RESOURCES_COUNT_SUMMARY.observe(len(all_resources)) result_bundle = self.bundle_builder.build_from_resources( all_resources, bundle_id )", "medication_results.append(medication_statement_dict[\"medication\"]) medication_statement_results.append( medication_statement_dict[\"statement\"] ) # de-duplicate any Medication and MedicationStatement", "specified on the document. \" + \"Trying to fall-back to", "( base64.b64decode(content.attachment.data).decode(\"utf8\"), content.attachment.contentType, language, ) @tenacity.retry( stop=tenacity.stop.stop_after_attempt(10), wait=tenacity.wait.wait_fixed(5) + tenacity.wait.wait_random_exponential(multiplier=1,", ") DOCUMENT_LENGTH_SUMMARY = Summary( \"document_length\", \"Length of each processed document's", "import Counter, Histogram, Summary from tenacity.after import after_log from ahd2fhir.mappers", "averbis_result = None try: averbis_result = self._perform_text_analysis( text=text, mime_type=content_type, lang=lang", "if len(valid_content) > 1: raise ValueError( f\"Document {document_reference.id} contains more", ") analyse_args = {\"language\": lang, \"annotation_types\": types} try: if mime_type", "resource specific section exists ands adds it, # otherwise select", "if no resource specific section exists ands adds it, #", "= None if doc_ref.identifier is None or len(doc_ref.identifier) == 0:", "\"author\": [composition_author], \"section\": composition_sections, } ) return composition def _process_documentreference(self,", "\"section\": composition_sections, } ) return composition def _process_documentreference(self, document_reference: DocumentReference):", "document. \" + \"Using the first occurrence.\" ) doc_ref_identifier =", "document_reference.type if document_reference.type is not None else DISCHARGE_SUMMARY_CONCEPT ) composition_subject", "\"type\": composition_type, \"identifier\": composition_identifier, \"id\": sha256_of_identifier(composition_identifier), \"subject\": composition_subject, \"category\": composition_category,", "len(valid_content) == 0: raise ValueError( f\"Document {document_reference.id} contains no valid", "Kind of document from LOINC Document Ontology\" ) DISCHARGE_SUMMARY_CONCEPT =", "text in charactes\", ) DISCHARGE_SUMMARY_CONCEPT_TEXT = ( \"Clinical document Kind", "\"annotation_types\": types} try: if mime_type == \"text/html\": return self.pipeline.analyse_html(text, **analyse_args)", "\"1\"]: total_results.extend(custom_mappers(val, document_reference)) medication_results = [] medication_statement_results = [] for", "medication_statement_results.append( medication_statement_dict[\"statement\"] ) # de-duplicate any Medication and MedicationStatement resources", "from fhir.resources.composition import Composition, CompositionSection from fhir.resources.documentreference import DocumentReference from", "None ] if len(valid_content) == 0: raise ValueError( f\"Document {document_reference.id}", "# if custom_mappers_enabled if os.getenv(\"CUSTOM_MAPPERS_ENABLED\", \"False\").lower() in [\"true\", \"1\"]: total_results.extend(custom_mappers(val,", "the document. \" + \"Trying to fall-back to the DocumentReference.id\"", "__init__(self, averbis_pipeline: Pipeline): self.pipeline = averbis_pipeline self.bundle_builder = BundleBuilder() @MAPPING_FAILURES_COUNTER.count_exceptions()", "for content in document_reference.content if content.attachment.data is not None ]", "not None else DISCHARGE_SUMMARY_CONCEPT ) composition_subject = document_reference.subject composition_category =", "resource.id composition_sections[ind].entry.append(entry_reference) if composition_author is None: composition_author = Reference(**{\"display\": \"Averbis", "m.id: m for m in medication_statement_results }.values() total_results.extend(medication_resources_unique) total_results.extend(medication_statements_unique) return", "\"de.averbis.types.health.DocumentAnnotation\" AHD_TYPE_MEDICATION = \"de.averbis.types.health.Medication\" AHD_TYPE_DIAGNOSIS = \"de.averbis.types.health.Diagnosis\" log = structlog.get_logger()", "any Medication and MedicationStatement resources medication_resources_unique = {m.id: m for", "DocumentReference.id\" ) doc_ref_identifier = doc_ref.id else: if len(doc_ref.identifier) > 1:", "[ AHD_TYPE_DIAGNOSIS, AHD_TYPE_MEDICATION, AHD_TYPE_DOCUMENT_ANNOTATION, *mapper_functions.keys(), ] ) analyse_args = {\"language\":", "in composition_sections ): resource_section = CompositionSection.construct() resource_section.title = resource_type resource_section.entry", "any( section.title == resource_type for section in composition_sections ): resource_section", "not None: total_results.append(device) if val[\"type\"] == AHD_TYPE_MEDICATION: statement = ahd_to_medication_statement.get_fhir_medication_statement(", "composition_identifier = ( self._build_composition_identifier_from_documentreference( document_reference ) ) composition = Composition(", "composition_encounter = None if document_reference.context is not None: if len(document_reference.context.encounter)", "tenacity.wait.wait_random_exponential(multiplier=1, max=30), after=after_log(logging.getLogger(), logging.WARNING), reraise=True, ) def _perform_text_analysis( self, text:", "val[\"type\"] == AHD_TYPE_MEDICATION: statement = ahd_to_medication_statement.get_fhir_medication_statement( val, document_reference ) if", "composition_author is None: composition_author = Reference(**{\"display\": \"Averbis Health Discovery\"}) composition_identifier", "Process all FHIR DocumentReference resources from a given bundle \"\"\"", "mapper_functions from ahd2fhir.utils.device_builder import build_device from ahd2fhir.utils.fhir_utils import sha256_of_identifier MAPPING_FAILURES_COUNTER", "composition = self._build_composition( document_reference, resources_from_document ) bundle_id = composition.id all_resources.extend(resources_from_document)", "= content.attachment.language.lower().split(\"-\")[0] return ( base64.b64decode(content.attachment.data).decode(\"utf8\"), content.attachment.contentType, language, ) @tenacity.retry( stop=tenacity.stop.stop_after_attempt(10),", "encounter. \" + \"Using the first.\" ) composition_encounter = document_reference.context.encounter[0]", "types = \",\".join( [ AHD_TYPE_DIAGNOSIS, AHD_TYPE_MEDICATION, AHD_TYPE_DOCUMENT_ANNOTATION, *mapper_functions.keys(), ] )", "all_resources = [] bundle_id = None for document_reference in document_references:", "if resource_type == \"Device\": author = Reference.construct() author.reference = f\"Device/{resource.id}\"", "document identifier as well as the offset into the text", "not None: if len(document_reference.context.encounter) > 1: log.warning( \"DocumentReference contains more", ") DISCHARGE_SUMMARY_CONCEPT_TEXT = ( \"Clinical document Kind of document from", "import DocumentReference from fhir.resources.fhirtypes import DateTime from fhir.resources.identifier import Identifier", "mapped_condition = ahd_to_condition.get_fhir_condition( val, document_reference ) if mapped_condition is not", "document_references: List[DocumentReference]) -> Bundle: \"\"\" Process a list of DocumentReferences", "document from LOINC Document Ontology\" ) DISCHARGE_SUMMARY_CONCEPT = CodeableConcept( **{", "entry in bundle.entry: if entry.resource.resource_type == \"DocumentReference\": document_references.append(entry.resource) return self.handle_documents(document_references)", "[\"true\", \"1\"]: total_results.extend(custom_mappers(val, document_reference)) medication_results = [] medication_statement_results = []", "\"Trying to fall-back to the DocumentReference.id\" ) doc_ref_identifier = doc_ref.id", "ind = len(composition_sections) - 1 else: ind = [ ind", "Histogram( \"map_duration_seconds\", \"Time spent mapping\", buckets=( 0.05, 0.1, 0.5, 1.0,", "1 else: ind = [ ind for ind, section in", "logging.WARNING), reraise=True, ) def _perform_text_analysis( self, text: str, mime_type: str", "import os import time from typing import List, Tuple import", "@MAPPING_DURATION_SUMMARY.time() def handle_documents(self, document_references: List[DocumentReference]) -> Bundle: \"\"\" Process a", "\"No identifier specified on the document. \" + \"Trying to", "Tuple[str, str]: valid_content = [ content for content in document_reference.content", "Medication and MedicationStatement resources medication_resources_unique = {m.id: m for m", "\"category\": composition_category, \"encounter\": composition_encounter, \"author\": [composition_author], \"section\": composition_sections, } )", "*mapper_functions.keys(), ] ) analyse_args = {\"language\": lang, \"annotation_types\": types} try:", "34.0, 55.0, \"inf\", ), ) EXTRACTED_RESOURCES_COUNT_SUMMARY = Summary( \"extracted_resources\", \"Number", "all_resources, bundle_id ) return result_bundle def handle_bundle(self, bundle: Bundle): \"\"\"", "if document_reference.context is not None: if len(document_reference.context.encounter) > 1: log.warning(", "FHIR Results \" + time.strftime(\"%Y-%m-%dT%H:%M\"), \"status\": \"final\", \"date\": DateTime.validate(datetime.datetime.now(datetime.timezone.utc)), \"type\":", "else: ind = [ ind for ind, section in enumerate(composition_sections)", "m for m in medication_statement_results }.values() total_results.extend(medication_resources_unique) total_results.extend(medication_statements_unique) return total_results", "<reponame>miracum/ahd2fhir<filename>ahd2fhir/utils/resource_handler.py import base64 import datetime import logging import os import", "AHD_TYPE_DIAGNOSIS: mapped_condition = ahd_to_condition.get_fhir_condition( val, document_reference ) if mapped_condition is", "= [ ind for ind, section in enumerate(composition_sections) if section.title", "str]: valid_content = [ content for content in document_reference.content if", "\" + \"Trying to fall-back to the DocumentReference.id\" ) doc_ref_identifier", "document_reference.context.encounter[0] composition_author = None composition_sections = [] for resource in", "import after_log from ahd2fhir.mappers import ahd_to_condition, ahd_to_medication_statement from ahd2fhir.utils.bundle_builder import", "\"NLP FHIR Results \" + time.strftime(\"%Y-%m-%dT%H:%M\"), \"status\": \"final\", \"date\": DateTime.validate(datetime.datetime.now(datetime.timezone.utc)),", "(text, content_type, lang) = self._extract_text_from_resource( document_reference ) DOCUMENT_LENGTH_SUMMARY.observe(len(text)) averbis_result =", "= ( \"Clinical document Kind of document from LOINC Document", "doc_ref_identifier = doc_ref.identifier[0].value composition_identifier_system = ( \"https://fhir.miracum.org/nlp/identifiers/ahd-analysis-result-composition\" ) composition_identifier_value =", "is not None: medication_statement_lists.append(statement) # if custom_mappers_enabled if os.getenv(\"CUSTOM_MAPPERS_ENABLED\", \"False\").lower()", "from fhir.resources.fhirtypes import DateTime from fhir.resources.identifier import Identifier from fhir.resources.reference", ") # Text extraction and text analysis (text, content_type, lang)", "}.values() total_results.extend(medication_resources_unique) total_results.extend(medication_statements_unique) return total_results def _extract_text_from_resource( self, document_reference: DocumentReference,", "resource_type for section in composition_sections ): resource_section = CompositionSection.construct() resource_section.title", "document_reference: DocumentReference): log = structlog.get_logger().bind( document_id=f\"{document_reference.get_resource_type()}/\" + f\"{document_reference.id}\" ) #", "document_reference: DocumentReference, all_resources: List[Resource] ): composition_type = ( document_reference.type if", "DocumentReference, all_resources: List[Resource] ): composition_type = ( document_reference.type if document_reference.type", "DocumentReference, ): \"\"\" construct a hopefully unqiue identifier for the", "resources for each processed document\" ) DOCUMENT_LENGTH_SUMMARY = Summary( \"document_length\",", "\"\"\" Process all FHIR DocumentReference resources from a given bundle", "document_reference in document_references: resources_from_document = self._process_documentreference( document_reference ) composition =", "composition_encounter, \"author\": [composition_author], \"section\": composition_sections, } ) return composition def", "\"de.averbis.types.health.Diagnosis\" log = structlog.get_logger() class TransientError(Exception): pass class ResourceHandler: def", "in document_reference.content if content.attachment.data is not None ] if len(valid_content)", "[] for medication_statement_list in medication_statement_lists: for medication_statement_dict in medication_statement_list: medication_results.append(medication_statement_dict[\"medication\"])", "{m.id: m for m in medication_results}.values() medication_statements_unique = { m.id:", "bundle_id ) return result_bundle def handle_bundle(self, bundle: Bundle): \"\"\" Process", "if entry.resource.resource_type == \"DocumentReference\": document_references.append(entry.resource) return self.handle_documents(document_references) def _build_composition( self,", "= BundleBuilder() @MAPPING_FAILURES_COUNTER.count_exceptions() @MAPPING_DURATION_SUMMARY.time() def handle_documents(self, document_references: List[DocumentReference]) -> Bundle:", "CompositionSection from fhir.resources.documentreference import DocumentReference from fhir.resources.fhirtypes import DateTime from", "for the condition from the document identifier as well as", "None if doc_ref.identifier is None or len(doc_ref.identifier) == 0: log.warning(", "a hopefully unqiue identifier for the condition from the document", "None for document_reference in document_references: resources_from_document = self._process_documentreference( document_reference )", "== 0: raise ValueError( f\"Document {document_reference.id} contains no valid content\"", "averbis_pipeline: Pipeline): self.pipeline = averbis_pipeline self.bundle_builder = BundleBuilder() @MAPPING_FAILURES_COUNTER.count_exceptions() @MAPPING_DURATION_SUMMARY.time()", "\"encounter\": composition_encounter, \"author\": [composition_author], \"section\": composition_sections, } ) return composition", "): \"\"\" construct a hopefully unqiue identifier for the condition", "8.0, 13.0, 21.0, 34.0, 55.0, \"inf\", ), ) EXTRACTED_RESOURCES_COUNT_SUMMARY =", "the text and the unique id of the annotation \"\"\"", "DOCUMENT_LENGTH_SUMMARY = Summary( \"document_length\", \"Length of each processed document's text", "a list of DocumentReferences \"\"\" all_resources = [] bundle_id =", "hopefully unqiue identifier for the condition from the document identifier", "document\" ) DOCUMENT_LENGTH_SUMMARY = Summary( \"document_length\", \"Length of each processed", "\"display\": DISCHARGE_SUMMARY_CONCEPT_TEXT, }, ], \"text\": DISCHARGE_SUMMARY_CONCEPT_TEXT, } ) AHD_TYPE_DOCUMENT_ANNOTATION =", "given bundle \"\"\" document_references = [] for entry in bundle.entry:", "= structlog.get_logger().bind( document_id=f\"{document_reference.get_resource_type()}/\" + f\"{document_reference.id}\" ) # Text extraction and", "first occurrence.\" ) doc_ref_identifier = doc_ref.identifier[0].value composition_identifier_system = ( \"https://fhir.miracum.org/nlp/identifiers/ahd-analysis-result-composition\"", "valid_content = [ content for content in document_reference.content if content.attachment.data", "\"/\" + resource.id composition_sections[ind].entry.append(entry_reference) if composition_author is None: composition_author =", "from typing import List, Tuple import structlog import tenacity from", "medication_statement_lists: for medication_statement_dict in medication_statement_list: medication_results.append(medication_statement_dict[\"medication\"]) medication_statement_results.append( medication_statement_dict[\"statement\"] ) #", "the DocumentReference.id\" ) doc_ref_identifier = doc_ref.id else: if len(doc_ref.identifier) >", "document. \" + \"Trying to fall-back to the DocumentReference.id\" )", "ValueError( f\"Document {document_reference.id} contains no valid content\" ) if len(valid_content)", "ResourceHandler: def __init__(self, averbis_pipeline: Pipeline): self.pipeline = averbis_pipeline self.bundle_builder =", "== \"DocumentReference\": document_references.append(entry.resource) return self.handle_documents(document_references) def _build_composition( self, document_reference: DocumentReference,", "one encounter. \" + \"Using the first.\" ) composition_encounter =", "+ f\"{document_reference.id}\" ) # Text extraction and text analysis (text,", "ValueError( f\"Document {document_reference.id} contains more than one attachment\" ) content", "\"\"\" construct a hopefully unqiue identifier for the condition from", "document_id=f\"{document_reference.get_resource_type()}/\" + f\"{document_reference.id}\" ) # Text extraction and text analysis", "attachment\" ) content = valid_content[0] language = None if content.attachment.language:", "= Histogram( \"map_duration_seconds\", \"Time spent mapping\", buckets=( 0.05, 0.1, 0.5,", "= resource.resource_type if resource_type == \"Device\": author = Reference.construct() author.reference", "Histogram, Summary from tenacity.after import after_log from ahd2fhir.mappers import ahd_to_condition,", "\"74477-1\", \"display\": DISCHARGE_SUMMARY_CONCEPT_TEXT, }, ], \"text\": DISCHARGE_SUMMARY_CONCEPT_TEXT, } ) AHD_TYPE_DOCUMENT_ANNOTATION", "][0] entry_reference = Reference.construct() entry_reference.reference = resource_type + \"/\" +", "import Pipeline from fhir.resources.bundle import Bundle from fhir.resources.codeableconcept import CodeableConcept", "raise ValueError( f\"Document {document_reference.id} contains no valid content\" ) if", "content.attachment.data is not None ] if len(valid_content) == 0: raise", "Process a list of DocumentReferences \"\"\" all_resources = [] bundle_id", "**analyse_args) else: return self.pipeline.analyse_text(text, **analyse_args) except Exception as exc: log.exception(exc)", "= composition.id all_resources.extend(resources_from_document) all_resources.append(composition) EXTRACTED_RESOURCES_COUNT_SUMMARY.observe(len(all_resources)) result_bundle = self.bundle_builder.build_from_resources( all_resources, bundle_id", "== \"text/html\": return self.pipeline.analyse_html(text, **analyse_args) else: return self.pipeline.analyse_text(text, **analyse_args) except", "section in composition_sections ): resource_section = CompositionSection.construct() resource_section.title = resource_type", ") return result_bundle def handle_bundle(self, bundle: Bundle): \"\"\" Process all", "= author continue # Check if no resource specific section", "composition_subject, \"category\": composition_category, \"encounter\": composition_encounter, \"author\": [composition_author], \"section\": composition_sections, }", "custom_mappers_enabled if os.getenv(\"CUSTOM_MAPPERS_ENABLED\", \"False\").lower() in [\"true\", \"1\"]: total_results.extend(custom_mappers(val, document_reference)) medication_results", "from the document identifier as well as the offset into", "Results \" + time.strftime(\"%Y-%m-%dT%H:%M\"), \"status\": \"final\", \"date\": DateTime.validate(datetime.datetime.now(datetime.timezone.utc)), \"type\": composition_type,", "of the annotation \"\"\" doc_ref_identifier = None if doc_ref.identifier is", "\"de.averbis.types.health.Medication\" AHD_TYPE_DIAGNOSIS = \"de.averbis.types.health.Diagnosis\" log = structlog.get_logger() class TransientError(Exception): pass", ") return composition def _process_documentreference(self, document_reference: DocumentReference): log = structlog.get_logger().bind(", ") AHD_TYPE_DOCUMENT_ANNOTATION = \"de.averbis.types.health.DocumentAnnotation\" AHD_TYPE_MEDICATION = \"de.averbis.types.health.Medication\" AHD_TYPE_DIAGNOSIS = \"de.averbis.types.health.Diagnosis\"", "composition def _process_documentreference(self, document_reference: DocumentReference): log = structlog.get_logger().bind( document_id=f\"{document_reference.get_resource_type()}/\" +", "val[\"type\"] == AHD_TYPE_DIAGNOSIS: mapped_condition = ahd_to_condition.get_fhir_condition( val, document_reference ) if", "if statement is not None: medication_statement_lists.append(statement) # if custom_mappers_enabled if", "Summary( \"document_length\", \"Length of each processed document's text in charactes\",", "valid content\" ) if len(valid_content) > 1: raise ValueError( f\"Document", "section.title == resource_type ][0] entry_reference = Reference.construct() entry_reference.reference = resource_type", ") DOCUMENT_LENGTH_SUMMARY.observe(len(text)) averbis_result = None try: averbis_result = self._perform_text_analysis( text=text,", ") content = valid_content[0] language = None if content.attachment.language: language", "Building FHIR resources as results medication_statement_lists = [] for val", "from tenacity.after import after_log from ahd2fhir.mappers import ahd_to_condition, ahd_to_medication_statement from", "medication_statement_results }.values() total_results.extend(medication_resources_unique) total_results.extend(medication_statements_unique) return total_results def _extract_text_from_resource( self, document_reference:", "annotation \"\"\" doc_ref_identifier = None if doc_ref.identifier is None or", "3.0, 5.0, 8.0, 13.0, 21.0, 34.0, 55.0, \"inf\", ), )", "\"Device\": author = Reference.construct() author.reference = f\"Device/{resource.id}\" author.type = \"Device\"", "DocumentReference): log = structlog.get_logger().bind( document_id=f\"{document_reference.get_resource_type()}/\" + f\"{document_reference.id}\" ) # Text", "from prometheus_client import Counter, Histogram, Summary from tenacity.after import after_log", "= resource_type + \"/\" + resource.id composition_sections[ind].entry.append(entry_reference) if composition_author is", "def handle_documents(self, document_references: List[DocumentReference]) -> Bundle: \"\"\" Process a list", "List[DocumentReference]) -> Bundle: \"\"\" Process a list of DocumentReferences \"\"\"", "import List, Tuple import structlog import tenacity from averbis import", "fall-back to the DocumentReference.id\" ) doc_ref_identifier = doc_ref.id else: if", ") -> Tuple[str, str]: valid_content = [ content for content", "fhir.resources.bundle import Bundle from fhir.resources.codeableconcept import CodeableConcept from fhir.resources.composition import", "log.warning( \"No identifier specified on the document. \" + \"Trying", "CodeableConcept( **{ \"coding\": [ { \"system\": \"http://loinc.org\", \"code\": \"74477-1\", \"display\":", "_extract_text_from_resource( self, document_reference: DocumentReference, ) -> Tuple[str, str]: valid_content =", "def _perform_text_analysis( self, text: str, mime_type: str = \"text/plain\", lang:", "} ) AHD_TYPE_DOCUMENT_ANNOTATION = \"de.averbis.types.health.DocumentAnnotation\" AHD_TYPE_MEDICATION = \"de.averbis.types.health.Medication\" AHD_TYPE_DIAGNOSIS =", "return self.handle_documents(document_references) def _build_composition( self, document_reference: DocumentReference, all_resources: List[Resource] ):", "types} try: if mime_type == \"text/html\": return self.pipeline.analyse_html(text, **analyse_args) else:", "composition_sections[ind].entry.append(entry_reference) if composition_author is None: composition_author = Reference(**{\"display\": \"Averbis Health", "len(doc_ref.identifier) > 1: log.warning( \"More than one identifier specified on", "+ resource.id composition_sections[ind].entry.append(entry_reference) if composition_author is None: composition_author = Reference(**{\"display\":", "resource_section.entry = [] composition_sections.append(resource_section) ind = len(composition_sections) - 1 else:", "ahd_to_condition.get_fhir_condition( val, document_reference ) if mapped_condition is not None: total_results.append(mapped_condition)", "resources medication_resources_unique = {m.id: m for m in medication_results}.values() medication_statements_unique", "1: log.warning( \"DocumentReference contains more than one encounter. \" +", "= resource_type resource_section.entry = [] composition_sections.append(resource_section) ind = len(composition_sections) -", ") ) composition = Composition( **{ \"title\": \"NLP FHIR Results", "composition_identifier, \"id\": sha256_of_identifier(composition_identifier), \"subject\": composition_subject, \"category\": composition_category, \"encounter\": composition_encounter, \"author\":", "into the text and the unique id of the annotation", "1: log.warning( \"More than one identifier specified on the document.", "LOINC Document Ontology\" ) DISCHARGE_SUMMARY_CONCEPT = CodeableConcept( **{ \"coding\": [", "str = \"text/plain\", lang: str = None ): types =", "[ ind for ind, section in enumerate(composition_sections) if section.title ==", "+ time.strftime(\"%Y-%m-%dT%H:%M\"), \"status\": \"final\", \"date\": DateTime.validate(datetime.datetime.now(datetime.timezone.utc)), \"type\": composition_type, \"identifier\": composition_identifier,", "self.bundle_builder = BundleBuilder() @MAPPING_FAILURES_COUNTER.count_exceptions() @MAPPING_DURATION_SUMMARY.time() def handle_documents(self, document_references: List[DocumentReference]) ->", "document_references: resources_from_document = self._process_documentreference( document_reference ) composition = self._build_composition( document_reference,", "\"\"\" doc_ref_identifier = None if doc_ref.identifier is None or len(doc_ref.identifier)", "document_reference.content if content.attachment.data is not None ] if len(valid_content) ==", "content\" ) if len(valid_content) > 1: raise ValueError( f\"Document {document_reference.id}", ") if mapped_condition is not None: total_results.append(mapped_condition) if val[\"type\"] ==", "results medication_statement_lists = [] for val in averbis_result: if val[\"type\"]", "21.0, 34.0, 55.0, \"inf\", ), ) EXTRACTED_RESOURCES_COUNT_SUMMARY = Summary( \"extracted_resources\",", "self, document_reference: DocumentReference, all_resources: List[Resource] ): composition_type = ( document_reference.type", "= CompositionSection.construct() resource_section.title = resource_type resource_section.entry = [] composition_sections.append(resource_section) ind", "MedicationStatement resources medication_resources_unique = {m.id: m for m in medication_results}.values()", "if os.getenv(\"CUSTOM_MAPPERS_ENABLED\", \"False\").lower() in [\"true\", \"1\"]: total_results.extend(custom_mappers(val, document_reference)) medication_results =", "> 1: log.warning( \"DocumentReference contains more than one encounter. \"", "entry_reference = Reference.construct() entry_reference.reference = resource_type + \"/\" + resource.id", "if mime_type == \"text/html\": return self.pipeline.analyse_html(text, **analyse_args) else: return self.pipeline.analyse_text(text,", "contains no valid content\" ) if len(valid_content) > 1: raise", "id of the annotation \"\"\" doc_ref_identifier = None if doc_ref.identifier", "= \"de.averbis.types.health.DocumentAnnotation\" AHD_TYPE_MEDICATION = \"de.averbis.types.health.Medication\" AHD_TYPE_DIAGNOSIS = \"de.averbis.types.health.Diagnosis\" log =", "to perform text analysis\", error=exc) raise TransientError(exc) total_results = []", "EXTRACTED_RESOURCES_COUNT_SUMMARY.observe(len(all_resources)) result_bundle = self.bundle_builder.build_from_resources( all_resources, bundle_id ) return result_bundle def", "self._process_documentreference( document_reference ) composition = self._build_composition( document_reference, resources_from_document ) bundle_id", "def __init__(self, averbis_pipeline: Pipeline): self.pipeline = averbis_pipeline self.bundle_builder = BundleBuilder()", "composition_category, \"encounter\": composition_encounter, \"author\": [composition_author], \"section\": composition_sections, } ) return", "self._extract_text_from_resource( document_reference ) DOCUMENT_LENGTH_SUMMARY.observe(len(text)) averbis_result = None try: averbis_result =", "List[Resource] ): composition_type = ( document_reference.type if document_reference.type is not", "= {m.id: m for m in medication_results}.values() medication_statements_unique = {", ") composition = self._build_composition( document_reference, resources_from_document ) bundle_id = composition.id", "medication_statement_dict in medication_statement_list: medication_results.append(medication_statement_dict[\"medication\"]) medication_statement_results.append( medication_statement_dict[\"statement\"] ) # de-duplicate any", "ahd2fhir.utils.device_builder import build_device from ahd2fhir.utils.fhir_utils import sha256_of_identifier MAPPING_FAILURES_COUNTER = Counter(\"mapping_failures\",", "= Summary( \"document_length\", \"Length of each processed document's text in", "more than one encounter. \" + \"Using the first.\" )", "reraise=True, ) def _perform_text_analysis( self, text: str, mime_type: str =", ") doc_ref_identifier = doc_ref.id else: if len(doc_ref.identifier) > 1: log.warning(", "None: composition_author = Reference(**{\"display\": \"Averbis Health Discovery\"}) composition_identifier = (", "resources_from_document ) bundle_id = composition.id all_resources.extend(resources_from_document) all_resources.append(composition) EXTRACTED_RESOURCES_COUNT_SUMMARY.observe(len(all_resources)) result_bundle =", "[] # Building FHIR resources as results medication_statement_lists = []", "in all_resources: resource_type = resource.resource_type if resource_type == \"Device\": author", "len(doc_ref.identifier) == 0: log.warning( \"No identifier specified on the document.", "\"Number of extracted resources for each processed document\" ) DOCUMENT_LENGTH_SUMMARY", "= valid_content[0] language = None if content.attachment.language: language = content.attachment.language.lower().split(\"-\")[0]", "= self._process_documentreference( document_reference ) composition = self._build_composition( document_reference, resources_from_document )", "[] for val in averbis_result: if val[\"type\"] == AHD_TYPE_DIAGNOSIS: mapped_condition", "content in document_reference.content if content.attachment.data is not None ] if", "buckets=( 0.05, 0.1, 0.5, 1.0, 2.0, 3.0, 5.0, 8.0, 13.0,", "from ahd2fhir.mappers import ahd_to_condition, ahd_to_medication_statement from ahd2fhir.utils.bundle_builder import BundleBuilder from", "try: averbis_result = self._perform_text_analysis( text=text, mime_type=content_type, lang=lang ) except Exception" ]
[ "term): results = super(TPActuacionCorrLookup, self).get_query(request, term) results = results.filter(tipo=\"C\",empresa__in=Empresas.objects.filter(usuario__username=request.user)) return", "= results.filter(empresa__in=Empresas.objects.filter(usuario__username=request.user)) return results def get_item_value(self, item): return item.denominacion def", "= ('denominacion__icontains',) def get_query(self, request, term): results = super(TiposFrecuenciasLookup, self).get_query(request,", "(item.denominacion) registry.register(ZonasLookup) @login_required class TercerosLookup(ModelLookup): model = Terceros search_fields =", "request, term): results = super(EtapasLookup, self).get_query(request, term) results = results.filter(empresa__in=Empresas.objects.filter(usuario__username=request.user))", "return \"%s\" % (item.denominacion) registry.register(TiposFrecuenciasLookup) @login_required class ZonasLookup(ModelLookup): model =", "= ('denominacion__icontains',) def get_query(self, request, term): results = super(PeligrosLookup, self).get_query(request,", "item): return \"%s\" % (item.denominacion) registry.register(TiposCursosLookup) @login_required class TiposLegislacionLookup(ModelLookup): model", "TiposCursos search_fields = ('denominacion__icontains',) def get_query(self, request, term): results =", "idtpturno: results = results.filter(tpturnos_id=idtpturno) return results def get_item_value(self, item): return", "get_query(self, request, term): results = super(UnidadesLookup, self).get_query(request, term) results =", "(item.denominacion) registry.register(TiposTemperaturasLookup) @login_required class TiposFrecuenciasLookup(ModelLookup): model = TiposFrecuencias search_fields =", "% (item.denominacion) registry.register(EtapasLookup) @login_required class PeligrosLookup(ModelLookup): model = Peligros search_fields", "get_query(self, request, term): results = super(TiposFrecuenciasLookup, self).get_query(request, term) results =", "\"%s\" % (item.denominacion) registry.register(ParametrosAnalisisLookup) @login_required class EtapasLookup(ModelLookup): model = Etapas", "% (item.denominacion) registry.register(CatalogoEquiposLookup) @login_required class PersonalLookup(ModelLookup): model = Personal search_fields", "get_query(self, request, term): results = super(TercerosTiposLookup, self).get_query(request, term) results =", "= Personal search_fields = ('apellidos__icontains',) def get_query(self, request, term): results", "def get_item_value(self, item): return item.personal.apellidos def get_item_label(self, item): return \"%s", "import Empresas from siva import settings __author__ = 'julian' @login_required", "= ('denominacion__icontains',) def get_query(self, request, term): results = super(TPActuacionPrevLookup, self).get_query(request,", "-*- coding: utf-8 -*- from selectable.decorators import login_required from maestros.models", "TiposLimitesCriticos search_fields = ('denominacion__icontains',) def get_query(self, request, term): results =", "term): results = super(UnidadesLookup, self).get_query(request, term) results = results.filter(empresa__in=Empresas.objects.filter(usuario__username=request.user)) return", "results = super(TercerosLookup, self).get_query(request, term) results = results.filter(empresa__in=Empresas.objects.filter(usuario__username=request.user)) return results", "search_fields = ('denominacion__icontains',) def get_query(self, request, term): results = super(TercerosTiposLookup,", "@login_required class TPActuacionPrevLookup(ModelLookup): model = TiposMedidasActuacion search_fields = ('denominacion__icontains',) def", "return results def get_item_value(self, item): return item.personal.apellidos def get_item_label(self, item):", "selectable.registry import registry from maestros_generales.models import Empresas from siva import", "= ('denominacion__icontains',) def get_query(self, request, term): results = super(TiposLegislacionLookup, self).get_query(request,", "= super(TPActuacionCorrLookup, self).get_query(request, term) results = results.filter(tipo=\"C\",empresa__in=Empresas.objects.filter(usuario__username=request.user)) return results def", "term): results = super(EtapasLookup, self).get_query(request, term) results = results.filter(empresa__in=Empresas.objects.filter(usuario__username=request.user)) return", "maestros_generales.models import Empresas from siva import settings __author__ = 'julian'", "@login_required class ConsumiblesLookup(ModelLookup): model = Consumibles search_fields = ('denominacion__icontains',) def", "def get_query(self, request, term): results = super(PeligrosLookup, self).get_query(request, term) results", "class TPActuacionPrevLookup(ModelLookup): model = TiposMedidasActuacion search_fields = ('denominacion__icontains',) def get_query(self,", "from selectable.decorators import login_required from maestros.models import TiposMedidasActuacion, TiposLimitesCriticos, TiposMedidasVigilancia,", "term): results = super(ParametrosAnalisisLookup, self).get_query(request, term) results = results.filter(empresa__in=Empresas.objects.filter(usuario__username=request.user)) return", "item): return \"%s %s\" % (item.personal__apellidos, item.personal__nombres) registry.register(FirmasLookup) @login_required class", "return \"%s\" % (item.denominacion) registry.register(TiposCursosLookup) @login_required class TiposLegislacionLookup(ModelLookup): model =", "\"%s\" % (item.denominacion) registry.register(TiposLegislacionLookup) @login_required class ConsumiblesLookup(ModelLookup): model = Consumibles", "request, term): results = super(TiposTemperaturasLookup, self).get_query(request, term) results = results.filter(empresa__in=Empresas.objects.filter(usuario__username=request.user))", "def get_item_label(self, item): return \"%s\" % (item.denominacion) registry.register(ConsumiblesLookup) @login_required class", "get_item_label(self, item): return \"%s\" % (item.denominacion) registry.register(EtapasLookup) @login_required class PeligrosLookup(ModelLookup):", "return \"%s - %s\" % (item.ihora, item.fhora) def get_item_label(self, item):", "results = super(FirmasLookup, self).get_query(request, term) results = results.filter(empresa__in=Empresas.objects.filter(usuario__username=request.user)) return results", "return item.denominacion def get_item_label(self, item): return \"%s\" % (item.denominacion) registry.register(ConsumiblesLookup)", "(item.apellidos, item.nombres) registry.register(PersonalLookup) @login_required class TiposCursosLookup(ModelLookup): model = TiposCursos search_fields", "('denominacion__icontains',) def get_query(self, request, term): results = super(TiposCursosLookup, self).get_query(request, term)", "return item.denominacion def get_item_label(self, item): return \"%s\" % (item.denominacion) registry.register(TiposFrecuenciasLookup)", "PersonalLookup(ModelLookup): model = Personal search_fields = ('apellidos__icontains',) def get_query(self, request,", "results = super(CatalogoEquiposLookup, self).get_query(request, term) results = results.filter(empresa__in=Empresas.objects.filter(usuario__username=request.user)) return results", "search_fields = ('denominacion__icontains',) def get_query(self, request, term): results = super(TiposFrecuenciasLookup,", "def get_item_label(self, item): return \"%s\" % (item.denominacion) registry.register(PeligrosLookup) @login_required class", "class TiposFrecuenciasLookup(ModelLookup): model = TiposFrecuencias search_fields = ('denominacion__icontains',) def get_query(self,", "= ('denominacion__icontains',) def get_query(self, request, term): results = super(TPLimitesCritLookup, self).get_query(request,", "term): results = super(PersonalLookup, self).get_query(request, term) results = results.filter(empresa__in=Empresas.objects.filter(usuario__username=request.user)) return", "ModelLookup from selectable.registry import registry from maestros_generales.models import Empresas from", "def get_item_label(self, item): return \"%s\" % (item.denominacion) registry.register(TiposFrecuenciasLookup) @login_required class", "TiposMedidasVigilancia search_fields = ('denominacion__icontains',) def get_query(self, request, term): results =", "results = results.filter(tipotercero__descripcion=settings.ASESORSANITARIO, empresa__in=Empresas.objects.filter(usuario__username=request.user)) return results def get_item_value(self, item): return", "model = TiposCursos search_fields = ('denominacion__icontains',) def get_query(self, request, term):", "search_fields = ('denominacion__icontains',) def get_query(self, request, term): results = super(PeligrosLookup,", "import login_required from maestros.models import TiposMedidasActuacion, TiposLimitesCriticos, TiposMedidasVigilancia, TiposTemperaturas, TiposFrecuencias,", "= Consumibles search_fields = ('denominacion__icontains',) def get_query(self, request, term): results", "return \"%s\" % (item.denominacion) registry.register(CatalogoEquiposLookup) @login_required class PersonalLookup(ModelLookup): model =", "item): return \"%s\" % (item.denominacion) registry.register(EtapasLookup) @login_required class PeligrosLookup(ModelLookup): model", "def get_item_label(self, item): return \"%s\" % (item.denominacion) registry.register(TPActuacionPrevLookup) @login_required class", "TiposLimitesCriticos, TiposMedidasVigilancia, TiposTemperaturas, TiposFrecuencias, Zonas, Terceros, CatalogoEquipos, Personal, Consumibles, ParametrosAnalisis,", "= results.filter(tipotercero__descripcion=settings.ASESORSANITARIO, empresa__in=Empresas.objects.filter(usuario__username=request.user)) return results def get_item_value(self, item): return item.denominacion", "('denominacion__icontains',) def get_query(self, request, term): results = super(TiposLegislacionLookup, self).get_query(request, term)", "('denominacion__icontains',) def get_query(self, request, term): results = super(PeligrosLookup, self).get_query(request, term)", "results = super(TiposTemperaturasLookup, self).get_query(request, term) results = results.filter(empresa__in=Empresas.objects.filter(usuario__username=request.user)) return results", "registry.register(UnidadesLookup) @login_required class FirmasLookup(ModelLookup): model = Firmas search_fields = ('personal__apellidos__icontains',)", "Zonas search_fields = ('denominacion__icontains',) def get_query(self, request, term): results =", "results = super(ParametrosAnalisisLookup, self).get_query(request, term) results = results.filter(empresa__in=Empresas.objects.filter(usuario__username=request.user)) return results", "get_query(self, request, term): results = super(ActividadesLookup, self).get_query(request, term) results =", "% (item.denominacion) registry.register(TercerosTiposLookup) @login_required class CatalogoEquiposLookup(ModelLookup): model = CatalogoEquipos search_fields", "results = results.filter(tipo=\"P\",empresa__in=Empresas.objects.filter(usuario__username=request.user)) return results def get_item_value(self, item): return item.denominacion", "@login_required class TPLimitesCritLookup(ModelLookup): model = TiposLimitesCriticos search_fields = ('denominacion__icontains',) def", "super(PeligrosLookup, self).get_query(request, term) results = results.filter(empresa__in=Empresas.objects.filter(usuario__username=request.user)) return results def get_item_value(self,", "model = ParametrosAnalisis search_fields = ('denominacion__icontains',) def get_query(self, request, term):", "Firmas search_fields = ('personal__apellidos__icontains',) def get_query(self, request, term): results =", "super(TPActuacionCorrLookup, self).get_query(request, term) results = results.filter(tipo=\"C\",empresa__in=Empresas.objects.filter(usuario__username=request.user)) return results def get_item_value(self,", "item): return \"%s\" % (item.denominacion) registry.register(TPActuacionCorrLookup) @login_required class TPLimitesCritLookup(ModelLookup): model", "registry.register(TiposTemperaturasLookup) @login_required class TiposFrecuenciasLookup(ModelLookup): model = TiposFrecuencias search_fields = ('denominacion__icontains',)", "@login_required class PeligrosLookup(ModelLookup): model = Peligros search_fields = ('denominacion__icontains',) def", "(item.denominacion) registry.register(TipoMedidasVigilanciaLookup) @login_required class TiposTemperaturasLookup(ModelLookup): model = TiposTemperaturas search_fields =", "class TPActuacionCorrLookup(ModelLookup): model = TiposMedidasActuacion search_fields = ('denominacion__icontains',) def get_query(self,", "results = results.filter(tpturnos_id=idtpturno) return results def get_item_value(self, item): return \"%s", "item.denominacion def get_item_label(self, item): return \"%s\" % (item.denominacion) registry.register(TPActuacionCorrLookup) @login_required", "get_item_label(self, item): return \"%s\" % (item.denominacion) registry.register(TiposTemperaturasLookup) @login_required class TiposFrecuenciasLookup(ModelLookup):", "= ('personal__apellidos__icontains',) def get_query(self, request, term): results = super(FirmasLookup, self).get_query(request,", "CatalogoEquiposLookup(ModelLookup): model = CatalogoEquipos search_fields = ('denominacion__icontains',) def get_query(self, request,", "results = results.filter(empresa__in=Empresas.objects.filter(usuario__username=request.user)) return results def get_item_value(self, item): return item.apellidos", "Firmas, HorarioTurnos from selectable.base import ModelLookup from selectable.registry import registry", "class PeligrosLookup(ModelLookup): model = Peligros search_fields = ('denominacion__icontains',) def get_query(self,", "return item.denominacion def get_item_label(self, item): return \"%s\" % (item.denominacion) registry.register(ActividadesLookup)", "item.denominacion def get_item_label(self, item): return \"%s\" % (item.denominacion) registry.register(TercerosTiposLookup) @login_required", "% (item.denominacion) registry.register(ActividadesLookup) @login_required class TipoMedidasVigilanciaLookup(ModelLookup): model = TiposMedidasVigilancia search_fields", "get_query(self, request, term): results = super(TPLimitesCritLookup, self).get_query(request, term) results =", "@login_required class TercerosTiposLookup(ModelLookup): model = Terceros search_fields = ('denominacion__icontains',) def", "= HorarioTurnos search_fields = ('ihora__icontains','fhora__icontains') def get_query(self, request, term): results", "Peligros search_fields = ('denominacion__icontains',) def get_query(self, request, term): results =", "model = Actividades search_fields = ('denominacion__icontains',) def get_query(self, request, term):", "\"%s - %s\" % (item.ihora, item.fhora) def get_item_label(self, item): return", "def get_query(self, request, term): results = super(TiposTemperaturasLookup, self).get_query(request, term) results", "request, term): results = super(TPLimitesCritLookup, self).get_query(request, term) results = results.filter(empresa__in=Empresas.objects.filter(usuario__username=request.user))", "(item.denominacion) registry.register(CatalogoEquiposLookup) @login_required class PersonalLookup(ModelLookup): model = Personal search_fields =", "= super(TiposLegislacionLookup, self).get_query(request, term) results = results.filter(empresa__in=Empresas.objects.filter(usuario__username=request.user)) return results def", "HorarioTurnos search_fields = ('ihora__icontains','fhora__icontains') def get_query(self, request, term): results =", "FirmasLookup(ModelLookup): model = Firmas search_fields = ('personal__apellidos__icontains',) def get_query(self, request,", "= super(FirmasLookup, self).get_query(request, term) results = results.filter(empresa__in=Empresas.objects.filter(usuario__username=request.user)) return results def", "% (item.denominacion) registry.register(TPActuacionPrevLookup) @login_required class TPActuacionCorrLookup(ModelLookup): model = TiposMedidasActuacion search_fields", "utf-8 -*- from selectable.decorators import login_required from maestros.models import TiposMedidasActuacion,", "TiposMedidasVigilancia, TiposTemperaturas, TiposFrecuencias, Zonas, Terceros, CatalogoEquipos, Personal, Consumibles, ParametrosAnalisis, Actividades,", "@login_required class TiposFrecuenciasLookup(ModelLookup): model = TiposFrecuencias search_fields = ('denominacion__icontains',) def", "term): results = super(ActividadesLookup, self).get_query(request, term) results = results.filter(empresa__in=Empresas.objects.filter(usuario__username=request.user)) return", "def get_query(self, request, term): results = super(TiposFrecuenciasLookup, self).get_query(request, term) results", "return \"%s\" % (item.denominacion) registry.register(TercerosTiposLookup) @login_required class CatalogoEquiposLookup(ModelLookup): model =", "request, term): results = super(TiposLegislacionLookup, self).get_query(request, term) results = results.filter(empresa__in=Empresas.objects.filter(usuario__username=request.user))", "model = Etapas search_fields = ('denominacion__icontains',) def get_query(self, request, term):", "= super(UnidadesLookup, self).get_query(request, term) results = results.filter(empresa__in=Empresas.objects.filter(usuario__username=request.user)) return results def", "model = TiposMedidasVigilancia search_fields = ('denominacion__icontains',) def get_query(self, request, term):", "= Unidades search_fields = ('denominacion__icontains',) def get_query(self, request, term): results", "results = super(UnidadesLookup, self).get_query(request, term) results = results.filter(empresa__in=Empresas.objects.filter(usuario__username=request.user)) return results", "model = Unidades search_fields = ('denominacion__icontains',) def get_query(self, request, term):", "PeligrosLookup(ModelLookup): model = Peligros search_fields = ('denominacion__icontains',) def get_query(self, request,", "def get_item_label(self, item): return \"%s\" % (item.denominacion) registry.register(EtapasLookup) @login_required class", "def get_query(self, request, term): results = super(FirmasLookup, self).get_query(request, term) results", "return \"%s\" % (item.denominacion) registry.register(ZonasLookup) @login_required class TercerosLookup(ModelLookup): model =", "request, term): results = super(ZonasLookup, self).get_query(request, term) results = results.filter(empresa__in=Empresas.objects.filter(usuario__username=request.user))", "\"%s\" % (item.denominacion) registry.register(CatalogoEquiposLookup) @login_required class PersonalLookup(ModelLookup): model = Personal", "item): return \"%s %s\" % (item.apellidos, item.nombres) registry.register(PersonalLookup) @login_required class", "def get_query(self, request, term): results = super(CatalogoEquiposLookup, self).get_query(request, term) results", "super(HorarioTurnoLookup, self).get_query(request, term) idtpturno = request.GET.get('idtpturno', '') if idtpturno: results", "= super(EtapasLookup, self).get_query(request, term) results = results.filter(empresa__in=Empresas.objects.filter(usuario__username=request.user)) return results def", "item.denominacion def get_item_label(self, item): return \"%s\" % (item.denominacion) registry.register(TipoMedidasVigilanciaLookup) @login_required", "% (item.denominacion) registry.register(UnidadesLookup) @login_required class FirmasLookup(ModelLookup): model = Firmas search_fields", "= super(TipoMedidasVigilanciaLookup, self).get_query(request, term) results = results.filter(empresa__in=Empresas.objects.filter(usuario__username=request.user)) return results def", "return \"%s\" % (item.denominacion) registry.register(TipoMedidasVigilanciaLookup) @login_required class TiposTemperaturasLookup(ModelLookup): model =", "get_query(self, request, term): results = super(FirmasLookup, self).get_query(request, term) results =", "(item.denominacion) registry.register(TPActuacionPrevLookup) @login_required class TPActuacionCorrLookup(ModelLookup): model = TiposMedidasActuacion search_fields =", "= ('denominacion__icontains',) def get_query(self, request, term): results = super(CatalogoEquiposLookup, self).get_query(request,", "TiposMedidasActuacion, TiposLimitesCriticos, TiposMedidasVigilancia, TiposTemperaturas, TiposFrecuencias, Zonas, Terceros, CatalogoEquipos, Personal, Consumibles,", "return \"%s\" % (item.denominacion) registry.register(TPLimitesCritLookup) @login_required class ActividadesLookup(ModelLookup): model =", "return results def get_item_value(self, item): return \"%s - %s\" %", "ActividadesLookup(ModelLookup): model = Actividades search_fields = ('denominacion__icontains',) def get_query(self, request,", "def get_query(self, request, term): results = super(ConsumiblesLookup, self).get_query(request, term) results", "search_fields = ('denominacion__icontains',) def get_query(self, request, term): results = super(TPActuacionPrevLookup,", "def get_item_label(self, item): return \"%s\" % (item.denominacion) registry.register(UnidadesLookup) @login_required class", "get_query(self, request, term): results = super(HorarioTurnoLookup, self).get_query(request, term) idtpturno =", "Empresas from siva import settings __author__ = 'julian' @login_required class", "return \"%s\" % (item.denominacion) registry.register(TercerosLookup) @login_required class TercerosTiposLookup(ModelLookup): model =", "= super(TPActuacionPrevLookup, self).get_query(request, term) results = results.filter(tipo=\"P\",empresa__in=Empresas.objects.filter(usuario__username=request.user)) return results def", "ConsumiblesLookup(ModelLookup): model = Consumibles search_fields = ('denominacion__icontains',) def get_query(self, request,", "request, term): results = super(TipoMedidasVigilanciaLookup, self).get_query(request, term) results = results.filter(empresa__in=Empresas.objects.filter(usuario__username=request.user))", "@login_required class FirmasLookup(ModelLookup): model = Firmas search_fields = ('personal__apellidos__icontains',) def", "get_query(self, request, term): results = super(TiposCursosLookup, self).get_query(request, term) results =", "= results.filter(empresa__in=Empresas.objects.filter(usuario__username=request.user)) return results def get_item_value(self, item): return item.apellidos def", "item.denominacion def get_item_label(self, item): return \"%s\" % (item.denominacion) registry.register(TiposFrecuenciasLookup) @login_required", "return \"%s\" % (item.denominacion) registry.register(TiposTemperaturasLookup) @login_required class TiposFrecuenciasLookup(ModelLookup): model =", "Personal search_fields = ('apellidos__icontains',) def get_query(self, request, term): results =", "results = super(HorarioTurnoLookup, self).get_query(request, term) idtpturno = request.GET.get('idtpturno', '') if", "ParametrosAnalisis search_fields = ('denominacion__icontains',) def get_query(self, request, term): results =", "('denominacion__icontains',) def get_query(self, request, term): results = super(TPLimitesCritLookup, self).get_query(request, term)", "term): results = super(TiposLegislacionLookup, self).get_query(request, term) results = results.filter(empresa__in=Empresas.objects.filter(usuario__username=request.user)) return", "get_item_label(self, item): return \"%s\" % (item.denominacion) registry.register(TPActuacionPrevLookup) @login_required class TPActuacionCorrLookup(ModelLookup):", "(item.denominacion) registry.register(TercerosLookup) @login_required class TercerosTiposLookup(ModelLookup): model = Terceros search_fields =", "selectable.base import ModelLookup from selectable.registry import registry from maestros_generales.models import", "search_fields = ('denominacion__icontains',) def get_query(self, request, term): results = super(ConsumiblesLookup,", "get_item_label(self, item): return \"%s\" % (item.denominacion) registry.register(ParametrosAnalisisLookup) @login_required class EtapasLookup(ModelLookup):", "\"%s\" % (item.denominacion) registry.register(TPActuacionPrevLookup) @login_required class TPActuacionCorrLookup(ModelLookup): model = TiposMedidasActuacion", "results = super(EtapasLookup, self).get_query(request, term) results = results.filter(empresa__in=Empresas.objects.filter(usuario__username=request.user)) return results", "= super(ZonasLookup, self).get_query(request, term) results = results.filter(empresa__in=Empresas.objects.filter(usuario__username=request.user)) return results def", "self).get_query(request, term) results = results.filter(empresa__in=Empresas.objects.filter(usuario__username=request.user)) return results def get_item_value(self, item):", "super(ParametrosAnalisisLookup, self).get_query(request, term) results = results.filter(empresa__in=Empresas.objects.filter(usuario__username=request.user)) return results def get_item_value(self,", "class TiposLegislacionLookup(ModelLookup): model = TiposLegislacion search_fields = ('denominacion__icontains',) def get_query(self,", "get_query(self, request, term): results = super(TiposTemperaturasLookup, self).get_query(request, term) results =", "Terceros search_fields = ('denominacion__icontains',) def get_query(self, request, term): results =", "item.apellidos def get_item_label(self, item): return \"%s %s\" % (item.apellidos, item.nombres)", "def get_query(self, request, term): results = super(TPActuacionCorrLookup, self).get_query(request, term) results", "registry.register(TiposLegislacionLookup) @login_required class ConsumiblesLookup(ModelLookup): model = Consumibles search_fields = ('denominacion__icontains',)", "term): results = super(TiposTemperaturasLookup, self).get_query(request, term) results = results.filter(empresa__in=Empresas.objects.filter(usuario__username=request.user)) return", "ParametrosAnalisis, Actividades, Etapas, Peligros, TiposCursos, TiposLegislacion, Unidades, Firmas, HorarioTurnos from", "results.filter(tipotercero__descripcion=settings.ASESORSANITARIO, empresa__in=Empresas.objects.filter(usuario__username=request.user)) return results def get_item_value(self, item): return item.denominacion def", "('apellidos__icontains',) def get_query(self, request, term): results = super(PersonalLookup, self).get_query(request, term)", "model = TiposFrecuencias search_fields = ('denominacion__icontains',) def get_query(self, request, term):", "= super(TercerosLookup, self).get_query(request, term) results = results.filter(empresa__in=Empresas.objects.filter(usuario__username=request.user)) return results def", "get_item_label(self, item): return \"%s\" % (item.denominacion) registry.register(TercerosLookup) @login_required class TercerosTiposLookup(ModelLookup):", "('denominacion__icontains',) def get_query(self, request, term): results = super(ConsumiblesLookup, self).get_query(request, term)", "term) idtpturno = request.GET.get('idtpturno', '') if idtpturno: results = results.filter(tpturnos_id=idtpturno)", "model = TiposLimitesCriticos search_fields = ('denominacion__icontains',) def get_query(self, request, term):", "(item.denominacion) registry.register(TiposCursosLookup) @login_required class TiposLegislacionLookup(ModelLookup): model = TiposLegislacion search_fields =", "results = super(PeligrosLookup, self).get_query(request, term) results = results.filter(empresa__in=Empresas.objects.filter(usuario__username=request.user)) return results", "term): results = super(FirmasLookup, self).get_query(request, term) results = results.filter(empresa__in=Empresas.objects.filter(usuario__username=request.user)) return", "term): results = super(TipoMedidasVigilanciaLookup, self).get_query(request, term) results = results.filter(empresa__in=Empresas.objects.filter(usuario__username=request.user)) return", "('ihora__icontains','fhora__icontains') def get_query(self, request, term): results = super(HorarioTurnoLookup, self).get_query(request, term)", "def get_item_label(self, item): return \"%s\" % (item.denominacion) registry.register(TPLimitesCritLookup) @login_required class", "super(EtapasLookup, self).get_query(request, term) results = results.filter(empresa__in=Empresas.objects.filter(usuario__username=request.user)) return results def get_item_value(self,", "item.denominacion def get_item_label(self, item): return \"%s\" % (item.denominacion) registry.register(PeligrosLookup) @login_required", "CatalogoEquipos, Personal, Consumibles, ParametrosAnalisis, Actividades, Etapas, Peligros, TiposCursos, TiposLegislacion, Unidades,", "= super(ConsumiblesLookup, self).get_query(request, term) results = results.filter(empresa__in=Empresas.objects.filter(usuario__username=request.user)) return results def", "model = TiposTemperaturas search_fields = ('denominacion__icontains',) def get_query(self, request, term):", "def get_query(self, request, term): results = super(TiposCursosLookup, self).get_query(request, term) results", "item): return \"%s - %s\" % (item.ihora, item.fhora) def get_item_label(self,", "request, term): results = super(TiposCursosLookup, self).get_query(request, term) results = results.filter(empresa__in=Empresas.objects.filter(usuario__username=request.user))", "\"%s\" % (item.denominacion) registry.register(ActividadesLookup) @login_required class TipoMedidasVigilanciaLookup(ModelLookup): model = TiposMedidasVigilancia", "search_fields = ('denominacion__icontains',) def get_query(self, request, term): results = super(TiposLegislacionLookup,", "return \"%s\" % (item.denominacion) registry.register(UnidadesLookup) @login_required class FirmasLookup(ModelLookup): model =", "super(TercerosLookup, self).get_query(request, term) results = results.filter(empresa__in=Empresas.objects.filter(usuario__username=request.user)) return results def get_item_value(self,", "get_query(self, request, term): results = super(CatalogoEquiposLookup, self).get_query(request, term) results =", "= Zonas search_fields = ('denominacion__icontains',) def get_query(self, request, term): results", "item): return \"%s\" % (item.denominacion) registry.register(TiposLegislacionLookup) @login_required class ConsumiblesLookup(ModelLookup): model", "request, term): results = super(UnidadesLookup, self).get_query(request, term) results = results.filter(empresa__in=Empresas.objects.filter(usuario__username=request.user))", "# -*- coding: utf-8 -*- from selectable.decorators import login_required from", "TPActuacionPrevLookup(ModelLookup): model = TiposMedidasActuacion search_fields = ('denominacion__icontains',) def get_query(self, request,", "EtapasLookup(ModelLookup): model = Etapas search_fields = ('denominacion__icontains',) def get_query(self, request,", "get_item_value(self, item): return item.personal.apellidos def get_item_label(self, item): return \"%s %s\"", "= ('apellidos__icontains',) def get_query(self, request, term): results = super(PersonalLookup, self).get_query(request,", "def get_item_label(self, item): return \"%s %s\" % (item.personal__apellidos, item.personal__nombres) registry.register(FirmasLookup)", "\"%s %s\" % (item.personal__apellidos, item.personal__nombres) registry.register(FirmasLookup) @login_required class HorarioTurnoLookup(ModelLookup): model", "= super(ActividadesLookup, self).get_query(request, term) results = results.filter(empresa__in=Empresas.objects.filter(usuario__username=request.user)) return results def", "= super(TiposFrecuenciasLookup, self).get_query(request, term) results = results.filter(empresa__in=Empresas.objects.filter(usuario__username=request.user)) return results def", "super(TiposFrecuenciasLookup, self).get_query(request, term) results = results.filter(empresa__in=Empresas.objects.filter(usuario__username=request.user)) return results def get_item_value(self,", "request, term): results = super(PeligrosLookup, self).get_query(request, term) results = results.filter(empresa__in=Empresas.objects.filter(usuario__username=request.user))", "class HorarioTurnoLookup(ModelLookup): model = HorarioTurnos search_fields = ('ihora__icontains','fhora__icontains') def get_query(self,", "= super(CatalogoEquiposLookup, self).get_query(request, term) results = results.filter(empresa__in=Empresas.objects.filter(usuario__username=request.user)) return results def", "self).get_query(request, term) results = results.filter(tipotercero__descripcion=settings.ASESORSANITARIO, empresa__in=Empresas.objects.filter(usuario__username=request.user)) return results def get_item_value(self,", "(item.denominacion) registry.register(ParametrosAnalisisLookup) @login_required class EtapasLookup(ModelLookup): model = Etapas search_fields =", "term): results = super(ConsumiblesLookup, self).get_query(request, term) results = results.filter(empresa__in=Empresas.objects.filter(usuario__username=request.user)) return", "return \"%s\" % (item.denominacion) registry.register(PeligrosLookup) @login_required class UnidadesLookup(ModelLookup): model =", "TiposTemperaturas, TiposFrecuencias, Zonas, Terceros, CatalogoEquipos, Personal, Consumibles, ParametrosAnalisis, Actividades, Etapas,", "%s\" % (item.ihora, item.fhora) def get_item_label(self, item): return \"%s -", "= ('denominacion__icontains',) def get_query(self, request, term): results = super(TercerosTiposLookup, self).get_query(request,", "from selectable.base import ModelLookup from selectable.registry import registry from maestros_generales.models", "TercerosTiposLookup(ModelLookup): model = Terceros search_fields = ('denominacion__icontains',) def get_query(self, request,", "search_fields = ('denominacion__icontains',) def get_query(self, request, term): results = super(TercerosLookup,", "TiposCursosLookup(ModelLookup): model = TiposCursos search_fields = ('denominacion__icontains',) def get_query(self, request,", "= ('ihora__icontains','fhora__icontains') def get_query(self, request, term): results = super(HorarioTurnoLookup, self).get_query(request,", "return results def get_item_value(self, item): return item.denominacion def get_item_label(self, item):", "(item.denominacion) registry.register(TiposLegislacionLookup) @login_required class ConsumiblesLookup(ModelLookup): model = Consumibles search_fields =", "@login_required class TipoMedidasVigilanciaLookup(ModelLookup): model = TiposMedidasVigilancia search_fields = ('denominacion__icontains',) def", "search_fields = ('denominacion__icontains',) def get_query(self, request, term): results = super(ActividadesLookup,", "maestros.models import TiposMedidasActuacion, TiposLimitesCriticos, TiposMedidasVigilancia, TiposTemperaturas, TiposFrecuencias, Zonas, Terceros, CatalogoEquipos,", "results = super(TercerosTiposLookup, self).get_query(request, term) results = results.filter(tipotercero__descripcion=settings.ASESORSANITARIO, empresa__in=Empresas.objects.filter(usuario__username=request.user)) return", "request, term): results = super(TercerosLookup, self).get_query(request, term) results = results.filter(empresa__in=Empresas.objects.filter(usuario__username=request.user))", "settings __author__ = 'julian' @login_required class TPActuacionPrevLookup(ModelLookup): model = TiposMedidasActuacion", "get_item_label(self, item): return \"%s\" % (item.denominacion) registry.register(TipoMedidasVigilanciaLookup) @login_required class TiposTemperaturasLookup(ModelLookup):", "request, term): results = super(TiposFrecuenciasLookup, self).get_query(request, term) results = results.filter(empresa__in=Empresas.objects.filter(usuario__username=request.user))", "\"%s\" % (item.denominacion) registry.register(TPActuacionCorrLookup) @login_required class TPLimitesCritLookup(ModelLookup): model = TiposLimitesCriticos", "item): return \"%s\" % (item.denominacion) registry.register(TercerosTiposLookup) @login_required class CatalogoEquiposLookup(ModelLookup): model", "get_query(self, request, term): results = super(PeligrosLookup, self).get_query(request, term) results =", "= ('denominacion__icontains',) def get_query(self, request, term): results = super(ConsumiblesLookup, self).get_query(request,", "search_fields = ('denominacion__icontains',) def get_query(self, request, term): results = super(TiposTemperaturasLookup,", "super(TiposLegislacionLookup, self).get_query(request, term) results = results.filter(empresa__in=Empresas.objects.filter(usuario__username=request.user)) return results def get_item_value(self,", "super(UnidadesLookup, self).get_query(request, term) results = results.filter(empresa__in=Empresas.objects.filter(usuario__username=request.user)) return results def get_item_value(self,", "request, term): results = super(FirmasLookup, self).get_query(request, term) results = results.filter(empresa__in=Empresas.objects.filter(usuario__username=request.user))", "item): return \"%s\" % (item.denominacion) registry.register(PeligrosLookup) @login_required class UnidadesLookup(ModelLookup): model", "@login_required class ZonasLookup(ModelLookup): model = Zonas search_fields = ('denominacion__icontains',) def", "% (item.denominacion) registry.register(ZonasLookup) @login_required class TercerosLookup(ModelLookup): model = Terceros search_fields", "search_fields = ('personal__apellidos__icontains',) def get_query(self, request, term): results = super(FirmasLookup,", "return item.denominacion def get_item_label(self, item): return \"%s\" % (item.denominacion) registry.register(TercerosTiposLookup)", "\"%s\" % (item.denominacion) registry.register(TiposFrecuenciasLookup) @login_required class ZonasLookup(ModelLookup): model = Zonas", "get_item_label(self, item): return \"%s\" % (item.denominacion) registry.register(PeligrosLookup) @login_required class UnidadesLookup(ModelLookup):", "registry.register(TiposCursosLookup) @login_required class TiposLegislacionLookup(ModelLookup): model = TiposLegislacion search_fields = ('denominacion__icontains',)", "('denominacion__icontains',) def get_query(self, request, term): results = super(TiposTemperaturasLookup, self).get_query(request, term)", "= TiposCursos search_fields = ('denominacion__icontains',) def get_query(self, request, term): results", "results def get_item_value(self, item): return item.personal.apellidos def get_item_label(self, item): return", "item.denominacion def get_item_label(self, item): return \"%s\" % (item.denominacion) registry.register(ActividadesLookup) @login_required", "__author__ = 'julian' @login_required class TPActuacionPrevLookup(ModelLookup): model = TiposMedidasActuacion search_fields", "request, term): results = super(ParametrosAnalisisLookup, self).get_query(request, term) results = results.filter(empresa__in=Empresas.objects.filter(usuario__username=request.user))", "return \"%s\" % (item.denominacion) registry.register(TPActuacionPrevLookup) @login_required class TPActuacionCorrLookup(ModelLookup): model =", "('denominacion__icontains',) def get_query(self, request, term): results = super(EtapasLookup, self).get_query(request, term)", "import TiposMedidasActuacion, TiposLimitesCriticos, TiposMedidasVigilancia, TiposTemperaturas, TiposFrecuencias, Zonas, Terceros, CatalogoEquipos, Personal,", "results def get_item_value(self, item): return \"%s - %s\" % (item.ihora,", "search_fields = ('denominacion__icontains',) def get_query(self, request, term): results = super(UnidadesLookup,", "('denominacion__icontains',) def get_query(self, request, term): results = super(UnidadesLookup, self).get_query(request, term)", "self).get_query(request, term) results = results.filter(tipo=\"P\",empresa__in=Empresas.objects.filter(usuario__username=request.user)) return results def get_item_value(self, item):", "model = Personal search_fields = ('apellidos__icontains',) def get_query(self, request, term):", "item): return \"%s\" % (item.denominacion) registry.register(ConsumiblesLookup) @login_required class ParametrosAnalisisLookup(ModelLookup): model", "@login_required class ParametrosAnalisisLookup(ModelLookup): model = ParametrosAnalisis search_fields = ('denominacion__icontains',) def", "results.filter(tipo=\"P\",empresa__in=Empresas.objects.filter(usuario__username=request.user)) return results def get_item_value(self, item): return item.denominacion def get_item_label(self,", "return item.denominacion def get_item_label(self, item): return \"%s\" % (item.denominacion) registry.register(TercerosLookup)", "import settings __author__ = 'julian' @login_required class TPActuacionPrevLookup(ModelLookup): model =", "item): return item.personal.apellidos def get_item_label(self, item): return \"%s %s\" %", "UnidadesLookup(ModelLookup): model = Unidades search_fields = ('denominacion__icontains',) def get_query(self, request,", "@login_required class TPActuacionCorrLookup(ModelLookup): model = TiposMedidasActuacion search_fields = ('denominacion__icontains',) def", "-*- from selectable.decorators import login_required from maestros.models import TiposMedidasActuacion, TiposLimitesCriticos,", "Zonas, Terceros, CatalogoEquipos, Personal, Consumibles, ParametrosAnalisis, Actividades, Etapas, Peligros, TiposCursos,", "item): return \"%s\" % (item.denominacion) registry.register(CatalogoEquiposLookup) @login_required class PersonalLookup(ModelLookup): model", "return item.denominacion def get_item_label(self, item): return \"%s\" % (item.denominacion) registry.register(TiposLegislacionLookup)", "\"%s\" % (item.denominacion) registry.register(ConsumiblesLookup) @login_required class ParametrosAnalisisLookup(ModelLookup): model = ParametrosAnalisis", "class UnidadesLookup(ModelLookup): model = Unidades search_fields = ('denominacion__icontains',) def get_query(self,", "term): results = super(CatalogoEquiposLookup, self).get_query(request, term) results = results.filter(empresa__in=Empresas.objects.filter(usuario__username=request.user)) return", "request, term): results = super(TPActuacionCorrLookup, self).get_query(request, term) results = results.filter(tipo=\"C\",empresa__in=Empresas.objects.filter(usuario__username=request.user))", "get_item_label(self, item): return \"%s\" % (item.denominacion) registry.register(ZonasLookup) @login_required class TercerosLookup(ModelLookup):", "search_fields = ('denominacion__icontains',) def get_query(self, request, term): results = super(TPLimitesCritLookup,", "import registry from maestros_generales.models import Empresas from siva import settings", "TiposTemperaturasLookup(ModelLookup): model = TiposTemperaturas search_fields = ('denominacion__icontains',) def get_query(self, request,", "@login_required class TiposLegislacionLookup(ModelLookup): model = TiposLegislacion search_fields = ('denominacion__icontains',) def", "Etapas, Peligros, TiposCursos, TiposLegislacion, Unidades, Firmas, HorarioTurnos from selectable.base import", "model = Firmas search_fields = ('personal__apellidos__icontains',) def get_query(self, request, term):", "return \"%s %s\" % (item.apellidos, item.nombres) registry.register(PersonalLookup) @login_required class TiposCursosLookup(ModelLookup):", "login_required from maestros.models import TiposMedidasActuacion, TiposLimitesCriticos, TiposMedidasVigilancia, TiposTemperaturas, TiposFrecuencias, Zonas,", "= super(TercerosTiposLookup, self).get_query(request, term) results = results.filter(tipotercero__descripcion=settings.ASESORSANITARIO, empresa__in=Empresas.objects.filter(usuario__username=request.user)) return results", "= TiposTemperaturas search_fields = ('denominacion__icontains',) def get_query(self, request, term): results", "% (item.denominacion) registry.register(TiposLegislacionLookup) @login_required class ConsumiblesLookup(ModelLookup): model = Consumibles search_fields", "def get_query(self, request, term): results = super(ZonasLookup, self).get_query(request, term) results", "= super(PeligrosLookup, self).get_query(request, term) results = results.filter(empresa__in=Empresas.objects.filter(usuario__username=request.user)) return results def", "term): results = super(ZonasLookup, self).get_query(request, term) results = results.filter(empresa__in=Empresas.objects.filter(usuario__username=request.user)) return", "def get_item_label(self, item): return \"%s\" % (item.denominacion) registry.register(TiposCursosLookup) @login_required class", "from siva import settings __author__ = 'julian' @login_required class TPActuacionPrevLookup(ModelLookup):", "TiposLegislacionLookup(ModelLookup): model = TiposLegislacion search_fields = ('denominacion__icontains',) def get_query(self, request,", "= ('denominacion__icontains',) def get_query(self, request, term): results = super(TercerosLookup, self).get_query(request,", "item): return item.denominacion def get_item_label(self, item): return \"%s\" % (item.denominacion)", "item): return item.apellidos def get_item_label(self, item): return \"%s %s\" %", "def get_item_label(self, item): return \"%s\" % (item.denominacion) registry.register(TPActuacionCorrLookup) @login_required class", "registry.register(PersonalLookup) @login_required class TiposCursosLookup(ModelLookup): model = TiposCursos search_fields = ('denominacion__icontains',)", "def get_item_value(self, item): return item.denominacion def get_item_label(self, item): return \"%s\"", "item): return \"%s\" % (item.denominacion) registry.register(TipoMedidasVigilanciaLookup) @login_required class TiposTemperaturasLookup(ModelLookup): model", "item.denominacion def get_item_label(self, item): return \"%s\" % (item.denominacion) registry.register(CatalogoEquiposLookup) @login_required", "search_fields = ('denominacion__icontains',) def get_query(self, request, term): results = super(EtapasLookup,", "(item.denominacion) registry.register(PeligrosLookup) @login_required class UnidadesLookup(ModelLookup): model = Unidades search_fields =", "= Firmas search_fields = ('personal__apellidos__icontains',) def get_query(self, request, term): results", "results = super(ZonasLookup, self).get_query(request, term) results = results.filter(empresa__in=Empresas.objects.filter(usuario__username=request.user)) return results", "request, term): results = super(TercerosTiposLookup, self).get_query(request, term) results = results.filter(tipotercero__descripcion=settings.ASESORSANITARIO,", "(item.denominacion) registry.register(TPActuacionCorrLookup) @login_required class TPLimitesCritLookup(ModelLookup): model = TiposLimitesCriticos search_fields =", "item.denominacion def get_item_label(self, item): return \"%s\" % (item.denominacion) registry.register(ZonasLookup) @login_required", "def get_item_label(self, item): return \"%s\" % (item.denominacion) registry.register(TipoMedidasVigilanciaLookup) @login_required class", "registry.register(TercerosLookup) @login_required class TercerosTiposLookup(ModelLookup): model = Terceros search_fields = ('denominacion__icontains',)", "term) results = results.filter(empresa__in=Empresas.objects.filter(usuario__username=request.user)) return results def get_item_value(self, item): return", "model = HorarioTurnos search_fields = ('ihora__icontains','fhora__icontains') def get_query(self, request, term):", "def get_query(self, request, term): results = super(PersonalLookup, self).get_query(request, term) results", "item.denominacion def get_item_label(self, item): return \"%s\" % (item.denominacion) registry.register(TPLimitesCritLookup) @login_required", "item.personal.apellidos def get_item_label(self, item): return \"%s %s\" % (item.personal__apellidos, item.personal__nombres)", "%s\" % (item.apellidos, item.nombres) registry.register(PersonalLookup) @login_required class TiposCursosLookup(ModelLookup): model =", "TiposCursos, TiposLegislacion, Unidades, Firmas, HorarioTurnos from selectable.base import ModelLookup from", "return item.denominacion def get_item_label(self, item): return \"%s\" % (item.denominacion) registry.register(ZonasLookup)", "(item.denominacion) registry.register(TPLimitesCritLookup) @login_required class ActividadesLookup(ModelLookup): model = Actividades search_fields =", "registry.register(ActividadesLookup) @login_required class TipoMedidasVigilanciaLookup(ModelLookup): model = TiposMedidasVigilancia search_fields = ('denominacion__icontains',)", "registry from maestros_generales.models import Empresas from siva import settings __author__", "get_item_value(self, item): return \"%s - %s\" % (item.ihora, item.fhora) def", "selectable.decorators import login_required from maestros.models import TiposMedidasActuacion, TiposLimitesCriticos, TiposMedidasVigilancia, TiposTemperaturas,", "class ParametrosAnalisisLookup(ModelLookup): model = ParametrosAnalisis search_fields = ('denominacion__icontains',) def get_query(self,", "return item.denominacion def get_item_label(self, item): return \"%s\" % (item.denominacion) registry.register(TPLimitesCritLookup)", "registry.register(ParametrosAnalisisLookup) @login_required class EtapasLookup(ModelLookup): model = Etapas search_fields = ('denominacion__icontains',)", "search_fields = ('denominacion__icontains',) def get_query(self, request, term): results = super(CatalogoEquiposLookup,", "= super(TiposCursosLookup, self).get_query(request, term) results = results.filter(empresa__in=Empresas.objects.filter(usuario__username=request.user)) return results def", "return item.personal.apellidos def get_item_label(self, item): return \"%s %s\" % (item.personal__apellidos,", "return \"%s\" % (item.denominacion) registry.register(ConsumiblesLookup) @login_required class ParametrosAnalisisLookup(ModelLookup): model =", "= ParametrosAnalisis search_fields = ('denominacion__icontains',) def get_query(self, request, term): results", "HorarioTurnos from selectable.base import ModelLookup from selectable.registry import registry from", "= ('denominacion__icontains',) def get_query(self, request, term): results = super(TiposTemperaturasLookup, self).get_query(request,", "@login_required class CatalogoEquiposLookup(ModelLookup): model = CatalogoEquipos search_fields = ('denominacion__icontains',) def", "registry.register(TPLimitesCritLookup) @login_required class ActividadesLookup(ModelLookup): model = Actividades search_fields = ('denominacion__icontains',)", "model = Zonas search_fields = ('denominacion__icontains',) def get_query(self, request, term):", "TiposMedidasActuacion search_fields = ('denominacion__icontains',) def get_query(self, request, term): results =", "super(TipoMedidasVigilanciaLookup, self).get_query(request, term) results = results.filter(empresa__in=Empresas.objects.filter(usuario__username=request.user)) return results def get_item_value(self,", "('denominacion__icontains',) def get_query(self, request, term): results = super(ZonasLookup, self).get_query(request, term)", "(item.personal__apellidos, item.personal__nombres) registry.register(FirmasLookup) @login_required class HorarioTurnoLookup(ModelLookup): model = HorarioTurnos search_fields", "registry.register(EtapasLookup) @login_required class PeligrosLookup(ModelLookup): model = Peligros search_fields = ('denominacion__icontains',)", "= super(ParametrosAnalisisLookup, self).get_query(request, term) results = results.filter(empresa__in=Empresas.objects.filter(usuario__username=request.user)) return results def", "search_fields = ('denominacion__icontains',) def get_query(self, request, term): results = super(TPActuacionCorrLookup,", "return item.denominacion def get_item_label(self, item): return \"%s\" % (item.denominacion) registry.register(TiposTemperaturasLookup)", "model = Consumibles search_fields = ('denominacion__icontains',) def get_query(self, request, term):", "def get_item_label(self, item): return \"%s\" % (item.denominacion) registry.register(ParametrosAnalisisLookup) @login_required class", "('denominacion__icontains',) def get_query(self, request, term): results = super(TPActuacionPrevLookup, self).get_query(request, term)", "registry.register(TipoMedidasVigilanciaLookup) @login_required class TiposTemperaturasLookup(ModelLookup): model = TiposTemperaturas search_fields = ('denominacion__icontains',)", "@login_required class TiposCursosLookup(ModelLookup): model = TiposCursos search_fields = ('denominacion__icontains',) def", "HorarioTurnoLookup(ModelLookup): model = HorarioTurnos search_fields = ('ihora__icontains','fhora__icontains') def get_query(self, request,", "= results.filter(empresa__in=Empresas.objects.filter(usuario__username=request.user)) return results def get_item_value(self, item): return item.personal.apellidos def", "idtpturno = request.GET.get('idtpturno', '') if idtpturno: results = results.filter(tpturnos_id=idtpturno) return", "class TiposCursosLookup(ModelLookup): model = TiposCursos search_fields = ('denominacion__icontains',) def get_query(self,", "('personal__apellidos__icontains',) def get_query(self, request, term): results = super(FirmasLookup, self).get_query(request, term)", "Terceros, CatalogoEquipos, Personal, Consumibles, ParametrosAnalisis, Actividades, Etapas, Peligros, TiposCursos, TiposLegislacion,", "get_item_label(self, item): return \"%s\" % (item.denominacion) registry.register(TPLimitesCritLookup) @login_required class ActividadesLookup(ModelLookup):", "def get_query(self, request, term): results = super(UnidadesLookup, self).get_query(request, term) results", "Peligros, TiposCursos, TiposLegislacion, Unidades, Firmas, HorarioTurnos from selectable.base import ModelLookup", "class TercerosLookup(ModelLookup): model = Terceros search_fields = ('denominacion__icontains',) def get_query(self,", "item): return \"%s\" % (item.denominacion) registry.register(TPActuacionPrevLookup) @login_required class TPActuacionCorrLookup(ModelLookup): model", "ParametrosAnalisisLookup(ModelLookup): model = ParametrosAnalisis search_fields = ('denominacion__icontains',) def get_query(self, request,", "('denominacion__icontains',) def get_query(self, request, term): results = super(TercerosTiposLookup, self).get_query(request, term)", "Actividades, Etapas, Peligros, TiposCursos, TiposLegislacion, Unidades, Firmas, HorarioTurnos from selectable.base", "get_query(self, request, term): results = super(TipoMedidasVigilanciaLookup, self).get_query(request, term) results =", "super(ConsumiblesLookup, self).get_query(request, term) results = results.filter(empresa__in=Empresas.objects.filter(usuario__username=request.user)) return results def get_item_value(self,", "term): results = super(PeligrosLookup, self).get_query(request, term) results = results.filter(empresa__in=Empresas.objects.filter(usuario__username=request.user)) return", "def get_query(self, request, term): results = super(TPLimitesCritLookup, self).get_query(request, term) results", "get_query(self, request, term): results = super(EtapasLookup, self).get_query(request, term) results =", "@login_required class TercerosLookup(ModelLookup): model = Terceros search_fields = ('denominacion__icontains',) def", "item.fhora) def get_item_label(self, item): return \"%s - %s\" % (item.ihora,", "term) results = results.filter(tipotercero__descripcion=settings.ASESORSANITARIO, empresa__in=Empresas.objects.filter(usuario__username=request.user)) return results def get_item_value(self, item):", "% (item.personal__apellidos, item.personal__nombres) registry.register(FirmasLookup) @login_required class HorarioTurnoLookup(ModelLookup): model = HorarioTurnos", "= ('denominacion__icontains',) def get_query(self, request, term): results = super(ActividadesLookup, self).get_query(request,", "item): return \"%s\" % (item.denominacion) registry.register(TercerosLookup) @login_required class TercerosTiposLookup(ModelLookup): model", "= TiposLegislacion search_fields = ('denominacion__icontains',) def get_query(self, request, term): results", "empresa__in=Empresas.objects.filter(usuario__username=request.user)) return results def get_item_value(self, item): return item.denominacion def get_item_label(self,", "get_item_value(self, item): return item.denominacion def get_item_label(self, item): return \"%s\" %", "item): return \"%s\" % (item.denominacion) registry.register(ZonasLookup) @login_required class TercerosLookup(ModelLookup): model", "return item.denominacion def get_item_label(self, item): return \"%s\" % (item.denominacion) registry.register(PeligrosLookup)", "(item.denominacion) registry.register(UnidadesLookup) @login_required class FirmasLookup(ModelLookup): model = Firmas search_fields =", "get_item_label(self, item): return \"%s - %s\" % (item.ihora, item.fhora) registry.register(HorarioTurnoLookup)", "= results.filter(tipo=\"P\",empresa__in=Empresas.objects.filter(usuario__username=request.user)) return results def get_item_value(self, item): return item.denominacion def", "results.filter(tpturnos_id=idtpturno) return results def get_item_value(self, item): return \"%s - %s\"", "get_query(self, request, term): results = super(ConsumiblesLookup, self).get_query(request, term) results =", "def get_query(self, request, term): results = super(EtapasLookup, self).get_query(request, term) results", "('denominacion__icontains',) def get_query(self, request, term): results = super(TiposFrecuenciasLookup, self).get_query(request, term)", "Consumibles, ParametrosAnalisis, Actividades, Etapas, Peligros, TiposCursos, TiposLegislacion, Unidades, Firmas, HorarioTurnos", "(item.denominacion) registry.register(TercerosTiposLookup) @login_required class CatalogoEquiposLookup(ModelLookup): model = CatalogoEquipos search_fields =", "if idtpturno: results = results.filter(tpturnos_id=idtpturno) return results def get_item_value(self, item):", "('denominacion__icontains',) def get_query(self, request, term): results = super(TPActuacionCorrLookup, self).get_query(request, term)", "results = results.filter(empresa__in=Empresas.objects.filter(usuario__username=request.user)) return results def get_item_value(self, item): return item.personal.apellidos", "= ('denominacion__icontains',) def get_query(self, request, term): results = super(UnidadesLookup, self).get_query(request,", "= TiposLimitesCriticos search_fields = ('denominacion__icontains',) def get_query(self, request, term): results", "('denominacion__icontains',) def get_query(self, request, term): results = super(TipoMedidasVigilanciaLookup, self).get_query(request, term)", "request, term): results = super(ConsumiblesLookup, self).get_query(request, term) results = results.filter(empresa__in=Empresas.objects.filter(usuario__username=request.user))", "@login_required class ActividadesLookup(ModelLookup): model = Actividades search_fields = ('denominacion__icontains',) def", "super(ActividadesLookup, self).get_query(request, term) results = results.filter(empresa__in=Empresas.objects.filter(usuario__username=request.user)) return results def get_item_value(self,", "from selectable.registry import registry from maestros_generales.models import Empresas from siva", "model = TiposMedidasActuacion search_fields = ('denominacion__icontains',) def get_query(self, request, term):", "item): return \"%s\" % (item.denominacion) registry.register(TiposFrecuenciasLookup) @login_required class ZonasLookup(ModelLookup): model", "= Etapas search_fields = ('denominacion__icontains',) def get_query(self, request, term): results", "def get_query(self, request, term): results = super(HorarioTurnoLookup, self).get_query(request, term) idtpturno", "(item.denominacion) registry.register(EtapasLookup) @login_required class PeligrosLookup(ModelLookup): model = Peligros search_fields =", "item.personal__nombres) registry.register(FirmasLookup) @login_required class HorarioTurnoLookup(ModelLookup): model = HorarioTurnos search_fields =", "return \"%s\" % (item.denominacion) registry.register(TiposLegislacionLookup) @login_required class ConsumiblesLookup(ModelLookup): model =", "term): results = super(TiposCursosLookup, self).get_query(request, term) results = results.filter(empresa__in=Empresas.objects.filter(usuario__username=request.user)) return", "item.denominacion def get_item_label(self, item): return \"%s\" % (item.denominacion) registry.register(TercerosLookup) @login_required", "search_fields = ('denominacion__icontains',) def get_query(self, request, term): results = super(ParametrosAnalisisLookup,", "get_item_label(self, item): return \"%s\" % (item.denominacion) registry.register(TiposFrecuenciasLookup) @login_required class ZonasLookup(ModelLookup):", "results.filter(empresa__in=Empresas.objects.filter(usuario__username=request.user)) return results def get_item_value(self, item): return item.personal.apellidos def get_item_label(self,", "results.filter(empresa__in=Empresas.objects.filter(usuario__username=request.user)) return results def get_item_value(self, item): return item.apellidos def get_item_label(self,", "return item.apellidos def get_item_label(self, item): return \"%s %s\" % (item.apellidos,", "search_fields = ('denominacion__icontains',) def get_query(self, request, term): results = super(TipoMedidasVigilanciaLookup,", "return item.denominacion def get_item_label(self, item): return \"%s\" % (item.denominacion) registry.register(TipoMedidasVigilanciaLookup)", "return item.denominacion def get_item_label(self, item): return \"%s\" % (item.denominacion) registry.register(TPActuacionCorrLookup)", "get_item_value(self, item): return item.apellidos def get_item_label(self, item): return \"%s %s\"", "(item.denominacion) registry.register(ConsumiblesLookup) @login_required class ParametrosAnalisisLookup(ModelLookup): model = ParametrosAnalisis search_fields =", "results = super(TiposCursosLookup, self).get_query(request, term) results = results.filter(empresa__in=Empresas.objects.filter(usuario__username=request.user)) return results", "= super(TPLimitesCritLookup, self).get_query(request, term) results = results.filter(empresa__in=Empresas.objects.filter(usuario__username=request.user)) return results def", "TiposFrecuencias, Zonas, Terceros, CatalogoEquipos, Personal, Consumibles, ParametrosAnalisis, Actividades, Etapas, Peligros,", "registry.register(TiposFrecuenciasLookup) @login_required class ZonasLookup(ModelLookup): model = Zonas search_fields = ('denominacion__icontains',)", "('denominacion__icontains',) def get_query(self, request, term): results = super(CatalogoEquiposLookup, self).get_query(request, term)", "= ('denominacion__icontains',) def get_query(self, request, term): results = super(EtapasLookup, self).get_query(request,", "Unidades, Firmas, HorarioTurnos from selectable.base import ModelLookup from selectable.registry import", "get_query(self, request, term): results = super(PersonalLookup, self).get_query(request, term) results =", "= TiposMedidasVigilancia search_fields = ('denominacion__icontains',) def get_query(self, request, term): results", "\"%s\" % (item.denominacion) registry.register(ZonasLookup) @login_required class TercerosLookup(ModelLookup): model = Terceros", "item.denominacion def get_item_label(self, item): return \"%s\" % (item.denominacion) registry.register(TiposTemperaturasLookup) @login_required", "% (item.denominacion) registry.register(ConsumiblesLookup) @login_required class ParametrosAnalisisLookup(ModelLookup): model = ParametrosAnalisis search_fields", "get_query(self, request, term): results = super(TPActuacionCorrLookup, self).get_query(request, term) results =", "results.filter(tipo=\"C\",empresa__in=Empresas.objects.filter(usuario__username=request.user)) return results def get_item_value(self, item): return item.denominacion def get_item_label(self,", "(item.denominacion) registry.register(ActividadesLookup) @login_required class TipoMedidasVigilanciaLookup(ModelLookup): model = TiposMedidasVigilancia search_fields =", "get_item_label(self, item): return \"%s\" % (item.denominacion) registry.register(TiposLegislacionLookup) @login_required class ConsumiblesLookup(ModelLookup):", "super(ZonasLookup, self).get_query(request, term) results = results.filter(empresa__in=Empresas.objects.filter(usuario__username=request.user)) return results def get_item_value(self,", "results = super(PersonalLookup, self).get_query(request, term) results = results.filter(empresa__in=Empresas.objects.filter(usuario__username=request.user)) return results", "% (item.denominacion) registry.register(TipoMedidasVigilanciaLookup) @login_required class TiposTemperaturasLookup(ModelLookup): model = TiposTemperaturas search_fields", "\"%s\" % (item.denominacion) registry.register(TipoMedidasVigilanciaLookup) @login_required class TiposTemperaturasLookup(ModelLookup): model = TiposTemperaturas", "term): results = super(TercerosTiposLookup, self).get_query(request, term) results = results.filter(tipotercero__descripcion=settings.ASESORSANITARIO, empresa__in=Empresas.objects.filter(usuario__username=request.user))", "= results.filter(tipo=\"C\",empresa__in=Empresas.objects.filter(usuario__username=request.user)) return results def get_item_value(self, item): return item.denominacion def", "TPLimitesCritLookup(ModelLookup): model = TiposLimitesCriticos search_fields = ('denominacion__icontains',) def get_query(self, request,", "class ActividadesLookup(ModelLookup): model = Actividades search_fields = ('denominacion__icontains',) def get_query(self,", "= super(TiposTemperaturasLookup, self).get_query(request, term) results = results.filter(empresa__in=Empresas.objects.filter(usuario__username=request.user)) return results def", "@login_required class HorarioTurnoLookup(ModelLookup): model = HorarioTurnos search_fields = ('ihora__icontains','fhora__icontains') def", "super(FirmasLookup, self).get_query(request, term) results = results.filter(empresa__in=Empresas.objects.filter(usuario__username=request.user)) return results def get_item_value(self,", "class CatalogoEquiposLookup(ModelLookup): model = CatalogoEquipos search_fields = ('denominacion__icontains',) def get_query(self,", "get_query(self, request, term): results = super(TPActuacionPrevLookup, self).get_query(request, term) results =", "(item.denominacion) registry.register(TiposFrecuenciasLookup) @login_required class ZonasLookup(ModelLookup): model = Zonas search_fields =", "% (item.denominacion) registry.register(PeligrosLookup) @login_required class UnidadesLookup(ModelLookup): model = Unidades search_fields", "\"%s\" % (item.denominacion) registry.register(TiposTemperaturasLookup) @login_required class TiposFrecuenciasLookup(ModelLookup): model = TiposFrecuencias", "= Actividades search_fields = ('denominacion__icontains',) def get_query(self, request, term): results", "from maestros_generales.models import Empresas from siva import settings __author__ =", "= super(HorarioTurnoLookup, self).get_query(request, term) idtpturno = request.GET.get('idtpturno', '') if idtpturno:", "def get_item_label(self, item): return \"%s\" % (item.denominacion) registry.register(CatalogoEquiposLookup) @login_required class", "\"%s\" % (item.denominacion) registry.register(TercerosTiposLookup) @login_required class CatalogoEquiposLookup(ModelLookup): model = CatalogoEquipos", "Etapas search_fields = ('denominacion__icontains',) def get_query(self, request, term): results =", "'') if idtpturno: results = results.filter(tpturnos_id=idtpturno) return results def get_item_value(self,", "return results def get_item_value(self, item): return item.apellidos def get_item_label(self, item):", "registry.register(TPActuacionPrevLookup) @login_required class TPActuacionCorrLookup(ModelLookup): model = TiposMedidasActuacion search_fields = ('denominacion__icontains',)", "get_query(self, request, term): results = super(ZonasLookup, self).get_query(request, term) results =", "return item.denominacion def get_item_label(self, item): return \"%s\" % (item.denominacion) registry.register(UnidadesLookup)", "return \"%s\" % (item.denominacion) registry.register(ParametrosAnalisisLookup) @login_required class EtapasLookup(ModelLookup): model =", "get_item_label(self, item): return \"%s\" % (item.denominacion) registry.register(TiposCursosLookup) @login_required class TiposLegislacionLookup(ModelLookup):", "TiposFrecuencias search_fields = ('denominacion__icontains',) def get_query(self, request, term): results =", "term) results = results.filter(tipo=\"C\",empresa__in=Empresas.objects.filter(usuario__username=request.user)) return results def get_item_value(self, item): return", "ZonasLookup(ModelLookup): model = Zonas search_fields = ('denominacion__icontains',) def get_query(self, request,", "results = results.filter(tipo=\"C\",empresa__in=Empresas.objects.filter(usuario__username=request.user)) return results def get_item_value(self, item): return item.denominacion", "coding: utf-8 -*- from selectable.decorators import login_required from maestros.models import", "import ModelLookup from selectable.registry import registry from maestros_generales.models import Empresas", "= CatalogoEquipos search_fields = ('denominacion__icontains',) def get_query(self, request, term): results", "= ('denominacion__icontains',) def get_query(self, request, term): results = super(TPActuacionCorrLookup, self).get_query(request,", "def get_item_label(self, item): return \"%s - %s\" % (item.ihora, item.fhora)", "\"%s\" % (item.denominacion) registry.register(PeligrosLookup) @login_required class UnidadesLookup(ModelLookup): model = Unidades", "def get_item_value(self, item): return \"%s - %s\" % (item.ihora, item.fhora)", "item): return \"%s\" % (item.denominacion) registry.register(TPLimitesCritLookup) @login_required class ActividadesLookup(ModelLookup): model", "return \"%s %s\" % (item.personal__apellidos, item.personal__nombres) registry.register(FirmasLookup) @login_required class HorarioTurnoLookup(ModelLookup):", "term): results = super(TiposFrecuenciasLookup, self).get_query(request, term) results = results.filter(empresa__in=Empresas.objects.filter(usuario__username=request.user)) return", "def get_item_value(self, item): return item.apellidos def get_item_label(self, item): return \"%s", "def get_query(self, request, term): results = super(TercerosTiposLookup, self).get_query(request, term) results", "item.denominacion def get_item_label(self, item): return \"%s\" % (item.denominacion) registry.register(TiposCursosLookup) @login_required", "class ConsumiblesLookup(ModelLookup): model = Consumibles search_fields = ('denominacion__icontains',) def get_query(self,", "@login_required class UnidadesLookup(ModelLookup): model = Unidades search_fields = ('denominacion__icontains',) def", "search_fields = ('denominacion__icontains',) def get_query(self, request, term): results = super(ZonasLookup,", "super(TiposCursosLookup, self).get_query(request, term) results = results.filter(empresa__in=Empresas.objects.filter(usuario__username=request.user)) return results def get_item_value(self,", "def get_item_label(self, item): return \"%s\" % (item.denominacion) registry.register(ZonasLookup) @login_required class", "def get_item_label(self, item): return \"%s\" % (item.denominacion) registry.register(TiposLegislacionLookup) @login_required class", "term): results = super(TercerosLookup, self).get_query(request, term) results = results.filter(empresa__in=Empresas.objects.filter(usuario__username=request.user)) return", "= ('denominacion__icontains',) def get_query(self, request, term): results = super(ParametrosAnalisisLookup, self).get_query(request,", "= 'julian' @login_required class TPActuacionPrevLookup(ModelLookup): model = TiposMedidasActuacion search_fields =", "item): return \"%s\" % (item.denominacion) registry.register(ActividadesLookup) @login_required class TipoMedidasVigilanciaLookup(ModelLookup): model", "('denominacion__icontains',) def get_query(self, request, term): results = super(ActividadesLookup, self).get_query(request, term)", "return item.denominacion def get_item_label(self, item): return \"%s\" % (item.denominacion) registry.register(CatalogoEquiposLookup)", "('denominacion__icontains',) def get_query(self, request, term): results = super(TercerosLookup, self).get_query(request, term)", "results = super(TipoMedidasVigilanciaLookup, self).get_query(request, term) results = results.filter(empresa__in=Empresas.objects.filter(usuario__username=request.user)) return results", "request, term): results = super(CatalogoEquiposLookup, self).get_query(request, term) results = results.filter(empresa__in=Empresas.objects.filter(usuario__username=request.user))", "TercerosLookup(ModelLookup): model = Terceros search_fields = ('denominacion__icontains',) def get_query(self, request,", "def get_item_label(self, item): return \"%s\" % (item.denominacion) registry.register(TiposTemperaturasLookup) @login_required class", "def get_item_label(self, item): return \"%s %s\" % (item.apellidos, item.nombres) registry.register(PersonalLookup)", "results = super(ActividadesLookup, self).get_query(request, term) results = results.filter(empresa__in=Empresas.objects.filter(usuario__username=request.user)) return results", "class TercerosTiposLookup(ModelLookup): model = Terceros search_fields = ('denominacion__icontains',) def get_query(self,", "Actividades search_fields = ('denominacion__icontains',) def get_query(self, request, term): results =", "= TiposMedidasActuacion search_fields = ('denominacion__icontains',) def get_query(self, request, term): results", "results = super(ConsumiblesLookup, self).get_query(request, term) results = results.filter(empresa__in=Empresas.objects.filter(usuario__username=request.user)) return results", "\"%s\" % (item.denominacion) registry.register(TercerosLookup) @login_required class TercerosTiposLookup(ModelLookup): model = Terceros", "item.denominacion def get_item_label(self, item): return \"%s\" % (item.denominacion) registry.register(UnidadesLookup) @login_required", "% (item.ihora, item.fhora) def get_item_label(self, item): return \"%s - %s\"", "return \"%s\" % (item.denominacion) registry.register(ActividadesLookup) @login_required class TipoMedidasVigilanciaLookup(ModelLookup): model =", "item.denominacion def get_item_label(self, item): return \"%s\" % (item.denominacion) registry.register(TPActuacionPrevLookup) @login_required", "TiposLegislacion, Unidades, Firmas, HorarioTurnos from selectable.base import ModelLookup from selectable.registry", "search_fields = ('denominacion__icontains',) def get_query(self, request, term): results = super(TiposCursosLookup,", "% (item.denominacion) registry.register(TercerosLookup) @login_required class TercerosTiposLookup(ModelLookup): model = Terceros search_fields", "TiposTemperaturas search_fields = ('denominacion__icontains',) def get_query(self, request, term): results =", "class EtapasLookup(ModelLookup): model = Etapas search_fields = ('denominacion__icontains',) def get_query(self,", "results def get_item_value(self, item): return item.denominacion def get_item_label(self, item): return", "= ('denominacion__icontains',) def get_query(self, request, term): results = super(TiposCursosLookup, self).get_query(request,", "def get_item_label(self, item): return \"%s\" % (item.denominacion) registry.register(TercerosTiposLookup) @login_required class", "request.GET.get('idtpturno', '') if idtpturno: results = results.filter(tpturnos_id=idtpturno) return results def", "return item.denominacion def get_item_label(self, item): return \"%s\" % (item.denominacion) registry.register(TiposCursosLookup)", "results = super(TiposLegislacionLookup, self).get_query(request, term) results = results.filter(empresa__in=Empresas.objects.filter(usuario__username=request.user)) return results", "Personal, Consumibles, ParametrosAnalisis, Actividades, Etapas, Peligros, TiposCursos, TiposLegislacion, Unidades, Firmas,", "self).get_query(request, term) results = results.filter(tipo=\"C\",empresa__in=Empresas.objects.filter(usuario__username=request.user)) return results def get_item_value(self, item):", "% (item.denominacion) registry.register(TiposFrecuenciasLookup) @login_required class ZonasLookup(ModelLookup): model = Zonas search_fields", "model = TiposLegislacion search_fields = ('denominacion__icontains',) def get_query(self, request, term):", "% (item.apellidos, item.nombres) registry.register(PersonalLookup) @login_required class TiposCursosLookup(ModelLookup): model = TiposCursos", "search_fields = ('apellidos__icontains',) def get_query(self, request, term): results = super(PersonalLookup,", "@login_required class PersonalLookup(ModelLookup): model = Personal search_fields = ('apellidos__icontains',) def", "item.nombres) registry.register(PersonalLookup) @login_required class TiposCursosLookup(ModelLookup): model = TiposCursos search_fields =", "@login_required class TiposTemperaturasLookup(ModelLookup): model = TiposTemperaturas search_fields = ('denominacion__icontains',) def", "(item.ihora, item.fhora) def get_item_label(self, item): return \"%s - %s\" %", "request, term): results = super(PersonalLookup, self).get_query(request, term) results = results.filter(empresa__in=Empresas.objects.filter(usuario__username=request.user))", "registry.register(ConsumiblesLookup) @login_required class ParametrosAnalisisLookup(ModelLookup): model = ParametrosAnalisis search_fields = ('denominacion__icontains',)", "% (item.denominacion) registry.register(TPActuacionCorrLookup) @login_required class TPLimitesCritLookup(ModelLookup): model = TiposLimitesCriticos search_fields", "results.filter(empresa__in=Empresas.objects.filter(usuario__username=request.user)) return results def get_item_value(self, item): return item.denominacion def get_item_label(self,", "% (item.denominacion) registry.register(TiposTemperaturasLookup) @login_required class TiposFrecuenciasLookup(ModelLookup): model = TiposFrecuencias search_fields", "Unidades search_fields = ('denominacion__icontains',) def get_query(self, request, term): results =", "item): return \"%s\" % (item.denominacion) registry.register(UnidadesLookup) @login_required class FirmasLookup(ModelLookup): model", "= super(PersonalLookup, self).get_query(request, term) results = results.filter(empresa__in=Empresas.objects.filter(usuario__username=request.user)) return results def", "class ZonasLookup(ModelLookup): model = Zonas search_fields = ('denominacion__icontains',) def get_query(self,", "super(TiposTemperaturasLookup, self).get_query(request, term) results = results.filter(empresa__in=Empresas.objects.filter(usuario__username=request.user)) return results def get_item_value(self,", "%s\" % (item.personal__apellidos, item.personal__nombres) registry.register(FirmasLookup) @login_required class HorarioTurnoLookup(ModelLookup): model =", "class PersonalLookup(ModelLookup): model = Personal search_fields = ('apellidos__icontains',) def get_query(self,", "request, term): results = super(HorarioTurnoLookup, self).get_query(request, term) idtpturno = request.GET.get('idtpturno',", "results = super(TPActuacionPrevLookup, self).get_query(request, term) results = results.filter(tipo=\"P\",empresa__in=Empresas.objects.filter(usuario__username=request.user)) return results", "return item.denominacion def get_item_label(self, item): return \"%s\" % (item.denominacion) registry.register(TPActuacionPrevLookup)", "results = results.filter(empresa__in=Empresas.objects.filter(usuario__username=request.user)) return results def get_item_value(self, item): return item.denominacion", "item): return \"%s\" % (item.denominacion) registry.register(ParametrosAnalisisLookup) @login_required class EtapasLookup(ModelLookup): model", "return item.denominacion def get_item_label(self, item): return \"%s\" % (item.denominacion) registry.register(EtapasLookup)", "super(TercerosTiposLookup, self).get_query(request, term) results = results.filter(tipotercero__descripcion=settings.ASESORSANITARIO, empresa__in=Empresas.objects.filter(usuario__username=request.user)) return results def", "def get_query(self, request, term): results = super(ParametrosAnalisisLookup, self).get_query(request, term) results", "super(TPLimitesCritLookup, self).get_query(request, term) results = results.filter(empresa__in=Empresas.objects.filter(usuario__username=request.user)) return results def get_item_value(self,", "TipoMedidasVigilanciaLookup(ModelLookup): model = TiposMedidasVigilancia search_fields = ('denominacion__icontains',) def get_query(self, request,", "get_item_label(self, item): return \"%s\" % (item.denominacion) registry.register(TercerosTiposLookup) @login_required class CatalogoEquiposLookup(ModelLookup):", "def get_query(self, request, term): results = super(ActividadesLookup, self).get_query(request, term) results", "class TiposTemperaturasLookup(ModelLookup): model = TiposTemperaturas search_fields = ('denominacion__icontains',) def get_query(self,", "get_query(self, request, term): results = super(TercerosLookup, self).get_query(request, term) results =", "TiposLegislacion search_fields = ('denominacion__icontains',) def get_query(self, request, term): results =", "super(PersonalLookup, self).get_query(request, term) results = results.filter(empresa__in=Empresas.objects.filter(usuario__username=request.user)) return results def get_item_value(self,", "\"%s %s\" % (item.apellidos, item.nombres) registry.register(PersonalLookup) @login_required class TiposCursosLookup(ModelLookup): model", "return \"%s\" % (item.denominacion) registry.register(TPActuacionCorrLookup) @login_required class TPLimitesCritLookup(ModelLookup): model =", "get_item_label(self, item): return \"%s\" % (item.denominacion) registry.register(ActividadesLookup) @login_required class TipoMedidasVigilanciaLookup(ModelLookup):", "item.denominacion def get_item_label(self, item): return \"%s\" % (item.denominacion) registry.register(ConsumiblesLookup) @login_required", "= request.GET.get('idtpturno', '') if idtpturno: results = results.filter(tpturnos_id=idtpturno) return results", "def get_item_label(self, item): return \"%s\" % (item.denominacion) registry.register(ActividadesLookup) @login_required class", "item.denominacion def get_item_label(self, item): return \"%s\" % (item.denominacion) registry.register(ParametrosAnalisisLookup) @login_required", "= results.filter(tpturnos_id=idtpturno) return results def get_item_value(self, item): return \"%s -", "\"%s\" % (item.denominacion) registry.register(TPLimitesCritLookup) @login_required class ActividadesLookup(ModelLookup): model = Actividades", "\"%s\" % (item.denominacion) registry.register(EtapasLookup) @login_required class PeligrosLookup(ModelLookup): model = Peligros", "results = super(TPActuacionCorrLookup, self).get_query(request, term) results = results.filter(tipo=\"C\",empresa__in=Empresas.objects.filter(usuario__username=request.user)) return results", "term): results = super(TPActuacionPrevLookup, self).get_query(request, term) results = results.filter(tipo=\"P\",empresa__in=Empresas.objects.filter(usuario__username=request.user)) return", "get_item_label(self, item): return \"%s\" % (item.denominacion) registry.register(CatalogoEquiposLookup) @login_required class PersonalLookup(ModelLookup):", "= Peligros search_fields = ('denominacion__icontains',) def get_query(self, request, term): results", "item): return \"%s\" % (item.denominacion) registry.register(TiposTemperaturasLookup) @login_required class TiposFrecuenciasLookup(ModelLookup): model", "= ('denominacion__icontains',) def get_query(self, request, term): results = super(ZonasLookup, self).get_query(request,", "def get_query(self, request, term): results = super(TiposLegislacionLookup, self).get_query(request, term) results", "\"%s\" % (item.denominacion) registry.register(UnidadesLookup) @login_required class FirmasLookup(ModelLookup): model = Firmas", "registry.register(TercerosTiposLookup) @login_required class CatalogoEquiposLookup(ModelLookup): model = CatalogoEquipos search_fields = ('denominacion__icontains',)", "= Terceros search_fields = ('denominacion__icontains',) def get_query(self, request, term): results", "get_query(self, request, term): results = super(ParametrosAnalisisLookup, self).get_query(request, term) results =", "class TipoMedidasVigilanciaLookup(ModelLookup): model = TiposMedidasVigilancia search_fields = ('denominacion__icontains',) def get_query(self,", "TiposFrecuenciasLookup(ModelLookup): model = TiposFrecuencias search_fields = ('denominacion__icontains',) def get_query(self, request,", "get_item_label(self, item): return \"%s\" % (item.denominacion) registry.register(ConsumiblesLookup) @login_required class ParametrosAnalisisLookup(ModelLookup):", "get_item_label(self, item): return \"%s\" % (item.denominacion) registry.register(TPActuacionCorrLookup) @login_required class TPLimitesCritLookup(ModelLookup):", "results = super(TiposFrecuenciasLookup, self).get_query(request, term) results = results.filter(empresa__in=Empresas.objects.filter(usuario__username=request.user)) return results", "% (item.denominacion) registry.register(TiposCursosLookup) @login_required class TiposLegislacionLookup(ModelLookup): model = TiposLegislacion search_fields", "term) results = results.filter(tipo=\"P\",empresa__in=Empresas.objects.filter(usuario__username=request.user)) return results def get_item_value(self, item): return", "def get_query(self, request, term): results = super(TercerosLookup, self).get_query(request, term) results", "('denominacion__icontains',) def get_query(self, request, term): results = super(ParametrosAnalisisLookup, self).get_query(request, term)", "self).get_query(request, term) idtpturno = request.GET.get('idtpturno', '') if idtpturno: results =", "= TiposFrecuencias search_fields = ('denominacion__icontains',) def get_query(self, request, term): results", "- %s\" % (item.ihora, item.fhora) def get_item_label(self, item): return \"%s", "model = Terceros search_fields = ('denominacion__icontains',) def get_query(self, request, term):", "CatalogoEquipos search_fields = ('denominacion__icontains',) def get_query(self, request, term): results =", "search_fields = ('ihora__icontains','fhora__icontains') def get_query(self, request, term): results = super(HorarioTurnoLookup,", "def get_query(self, request, term): results = super(TPActuacionPrevLookup, self).get_query(request, term) results", "Consumibles search_fields = ('denominacion__icontains',) def get_query(self, request, term): results =", "registry.register(CatalogoEquiposLookup) @login_required class PersonalLookup(ModelLookup): model = Personal search_fields = ('apellidos__icontains',)", "model = CatalogoEquipos search_fields = ('denominacion__icontains',) def get_query(self, request, term):", "model = Peligros search_fields = ('denominacion__icontains',) def get_query(self, request, term):", "% (item.denominacion) registry.register(TPLimitesCritLookup) @login_required class ActividadesLookup(ModelLookup): model = Actividades search_fields", "get_item_label(self, item): return \"%s %s\" % (item.personal__apellidos, item.personal__nombres) registry.register(FirmasLookup) @login_required", "registry.register(PeligrosLookup) @login_required class UnidadesLookup(ModelLookup): model = Unidades search_fields = ('denominacion__icontains',)", "term): results = super(HorarioTurnoLookup, self).get_query(request, term) idtpturno = request.GET.get('idtpturno', '')", "super(TPActuacionPrevLookup, self).get_query(request, term) results = results.filter(tipo=\"P\",empresa__in=Empresas.objects.filter(usuario__username=request.user)) return results def get_item_value(self,", "from maestros.models import TiposMedidasActuacion, TiposLimitesCriticos, TiposMedidasVigilancia, TiposTemperaturas, TiposFrecuencias, Zonas, Terceros,", "= ('denominacion__icontains',) def get_query(self, request, term): results = super(TipoMedidasVigilanciaLookup, self).get_query(request,", "request, term): results = super(TPActuacionPrevLookup, self).get_query(request, term) results = results.filter(tipo=\"P\",empresa__in=Empresas.objects.filter(usuario__username=request.user))", "registry.register(ZonasLookup) @login_required class TercerosLookup(ModelLookup): model = Terceros search_fields = ('denominacion__icontains',)", "request, term): results = super(ActividadesLookup, self).get_query(request, term) results = results.filter(empresa__in=Empresas.objects.filter(usuario__username=request.user))", "siva import settings __author__ = 'julian' @login_required class TPActuacionPrevLookup(ModelLookup): model", "return \"%s\" % (item.denominacion) registry.register(EtapasLookup) @login_required class PeligrosLookup(ModelLookup): model =", "\"%s\" % (item.denominacion) registry.register(TiposCursosLookup) @login_required class TiposLegislacionLookup(ModelLookup): model = TiposLegislacion", "item.denominacion def get_item_label(self, item): return \"%s\" % (item.denominacion) registry.register(EtapasLookup) @login_required", "return item.denominacion def get_item_label(self, item): return \"%s\" % (item.denominacion) registry.register(ParametrosAnalisisLookup)", "% (item.denominacion) registry.register(ParametrosAnalisisLookup) @login_required class EtapasLookup(ModelLookup): model = Etapas search_fields", "TPActuacionCorrLookup(ModelLookup): model = TiposMedidasActuacion search_fields = ('denominacion__icontains',) def get_query(self, request,", "get_item_label(self, item): return \"%s\" % (item.denominacion) registry.register(UnidadesLookup) @login_required class FirmasLookup(ModelLookup):", "class FirmasLookup(ModelLookup): model = Firmas search_fields = ('personal__apellidos__icontains',) def get_query(self,", "results = super(TPLimitesCritLookup, self).get_query(request, term) results = results.filter(empresa__in=Empresas.objects.filter(usuario__username=request.user)) return results", "get_item_label(self, item): return \"%s %s\" % (item.apellidos, item.nombres) registry.register(PersonalLookup) @login_required", "'julian' @login_required class TPActuacionPrevLookup(ModelLookup): model = TiposMedidasActuacion search_fields = ('denominacion__icontains',)", "@login_required class EtapasLookup(ModelLookup): model = Etapas search_fields = ('denominacion__icontains',) def", "item.denominacion def get_item_label(self, item): return \"%s\" % (item.denominacion) registry.register(TiposLegislacionLookup) @login_required", "super(CatalogoEquiposLookup, self).get_query(request, term) results = results.filter(empresa__in=Empresas.objects.filter(usuario__username=request.user)) return results def get_item_value(self,", "results def get_item_value(self, item): return item.apellidos def get_item_label(self, item): return", "term): results = super(TPLimitesCritLookup, self).get_query(request, term) results = results.filter(empresa__in=Empresas.objects.filter(usuario__username=request.user)) return", "get_query(self, request, term): results = super(TiposLegislacionLookup, self).get_query(request, term) results =", "registry.register(TPActuacionCorrLookup) @login_required class TPLimitesCritLookup(ModelLookup): model = TiposLimitesCriticos search_fields = ('denominacion__icontains',)", "def get_item_label(self, item): return \"%s\" % (item.denominacion) registry.register(TercerosLookup) @login_required class", "def get_query(self, request, term): results = super(TipoMedidasVigilanciaLookup, self).get_query(request, term) results", "registry.register(FirmasLookup) @login_required class HorarioTurnoLookup(ModelLookup): model = HorarioTurnos search_fields = ('ihora__icontains','fhora__icontains')", "class TPLimitesCritLookup(ModelLookup): model = TiposLimitesCriticos search_fields = ('denominacion__icontains',) def get_query(self," ]
[ "Python 2.7\"\"\" import argparse import traceback from . import util", "extractpipenv def main(): \"\"\"Main function\"\"\" print(\"This version is not supported!", "args, rest = parser.parse_known_args() try: if not getattr(args, 'func', None):", "run from .cmd import extractpipenv def main(): \"\"\"Main function\"\"\" print(\"This", "def main(): \"\"\"Main function\"\"\" print(\"This version is not supported! It", "version is not supported! It has limitted analysis features\") parser", "Jupyter Notebooks') subparsers = parser.add_subparsers() run.create_subparsers(subparsers) extractpipenv.create_subparsers(subparsers) args, rest =", "limitted analysis features\") parser = argparse.ArgumentParser(description='Analyze Jupyter Notebooks') subparsers =", "function\"\"\" print(\"This version is not supported! It has limitted analysis", "getattr(args, 'func', None): parser.print_help() else: args.func(args, rest) if not util.EXITED:", "= parser.parse_known_args() try: if not getattr(args, 'func', None): parser.print_help() else:", "util from .cmd import run from .cmd import extractpipenv def", "supported! It has limitted analysis features\") parser = argparse.ArgumentParser(description='Analyze Jupyter", "= parser.add_subparsers() run.create_subparsers(subparsers) extractpipenv.create_subparsers(subparsers) args, rest = parser.parse_known_args() try: if", "parser.add_subparsers() run.create_subparsers(subparsers) extractpipenv.create_subparsers(subparsers) args, rest = parser.parse_known_args() try: if not", "parser.print_help() else: args.func(args, rest) if not util.EXITED: util.do_exit(0) except: #", "'func', None): parser.print_help() else: args.func(args, rest) if not util.EXITED: util.do_exit(0)", "parser = argparse.ArgumentParser(description='Analyze Jupyter Notebooks') subparsers = parser.add_subparsers() run.create_subparsers(subparsers) extractpipenv.create_subparsers(subparsers)", "Notebooks') subparsers = parser.add_subparsers() run.create_subparsers(subparsers) extractpipenv.create_subparsers(subparsers) args, rest = parser.parse_known_args()", ".cmd import extractpipenv def main(): \"\"\"Main function\"\"\" print(\"This version is", "2.7\"\"\" import argparse import traceback from . import util from", "\"\"\"Main function\"\"\" print(\"This version is not supported! It has limitted", "if not util.EXITED: util.do_exit(0) except: # pylint: disable=bare-except if not", "\"\"\"Define commands for Python 2.7\"\"\" import argparse import traceback from", "It has limitted analysis features\") parser = argparse.ArgumentParser(description='Analyze Jupyter Notebooks')", "argparse.ArgumentParser(description='Analyze Jupyter Notebooks') subparsers = parser.add_subparsers() run.create_subparsers(subparsers) extractpipenv.create_subparsers(subparsers) args, rest", "traceback from . import util from .cmd import run from", "import extractpipenv def main(): \"\"\"Main function\"\"\" print(\"This version is not", "None): parser.print_help() else: args.func(args, rest) if not util.EXITED: util.do_exit(0) except:", ".cmd import run from .cmd import extractpipenv def main(): \"\"\"Main", "not getattr(args, 'func', None): parser.print_help() else: args.func(args, rest) if not", "args.func(args, rest) if not util.EXITED: util.do_exit(0) except: # pylint: disable=bare-except", "is not supported! It has limitted analysis features\") parser =", "import argparse import traceback from . import util from .cmd", "has limitted analysis features\") parser = argparse.ArgumentParser(description='Analyze Jupyter Notebooks') subparsers", "not util.EXITED: util.do_exit(0) except: # pylint: disable=bare-except if not util.EXITED:", "for Python 2.7\"\"\" import argparse import traceback from . import", "features\") parser = argparse.ArgumentParser(description='Analyze Jupyter Notebooks') subparsers = parser.add_subparsers() run.create_subparsers(subparsers)", "analysis features\") parser = argparse.ArgumentParser(description='Analyze Jupyter Notebooks') subparsers = parser.add_subparsers()", "= argparse.ArgumentParser(description='Analyze Jupyter Notebooks') subparsers = parser.add_subparsers() run.create_subparsers(subparsers) extractpipenv.create_subparsers(subparsers) args,", "if not getattr(args, 'func', None): parser.print_help() else: args.func(args, rest) if", ". import util from .cmd import run from .cmd import", "not supported! It has limitted analysis features\") parser = argparse.ArgumentParser(description='Analyze", "rest = parser.parse_known_args() try: if not getattr(args, 'func', None): parser.print_help()", "argparse import traceback from . import util from .cmd import", "main(): \"\"\"Main function\"\"\" print(\"This version is not supported! It has", "extractpipenv.create_subparsers(subparsers) args, rest = parser.parse_known_args() try: if not getattr(args, 'func',", "parser.parse_known_args() try: if not getattr(args, 'func', None): parser.print_help() else: args.func(args,", "from .cmd import run from .cmd import extractpipenv def main():", "try: if not getattr(args, 'func', None): parser.print_help() else: args.func(args, rest)", "from . import util from .cmd import run from .cmd", "subparsers = parser.add_subparsers() run.create_subparsers(subparsers) extractpipenv.create_subparsers(subparsers) args, rest = parser.parse_known_args() try:", "util.EXITED: util.do_exit(0) except: # pylint: disable=bare-except if not util.EXITED: traceback.print_exc()", "rest) if not util.EXITED: util.do_exit(0) except: # pylint: disable=bare-except if", "commands for Python 2.7\"\"\" import argparse import traceback from .", "import traceback from . import util from .cmd import run", "import util from .cmd import run from .cmd import extractpipenv", "util.do_exit(0) except: # pylint: disable=bare-except if not util.EXITED: traceback.print_exc() util.do_exit(1)", "run.create_subparsers(subparsers) extractpipenv.create_subparsers(subparsers) args, rest = parser.parse_known_args() try: if not getattr(args,", "from .cmd import extractpipenv def main(): \"\"\"Main function\"\"\" print(\"This version", "import run from .cmd import extractpipenv def main(): \"\"\"Main function\"\"\"", "print(\"This version is not supported! It has limitted analysis features\")", "else: args.func(args, rest) if not util.EXITED: util.do_exit(0) except: # pylint:" ]
[ "and --incremental'): cron = GpCronDump(options, None) finally: options.list_filter_tables = False", "return_value=['public']) def test_options_schema_filter_35(self, mock1, mock2, mock3): options = GpCronDumpTestCase.Options() options.exclude_dump_schema", "def test_verify_tablenames_00(self, mock1, mock2, mock3): options = GpCronDumpTestCase.Options() cron =", "'/tmp/dirty' dbname = 'foo' (inc, exc) = gpcd.get_include_exclude_for_dump_database(dirtyfile, dbname) self.assertEquals(inc,", "GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options3(self, mock, mock2): options =", "self.assertRaisesRegexp(Exception, 'multi-database backup is not supported with -K option'): GpCronDump(options,", "@patch('gpcrondump.validate_current_timestamp') def test_get_pipes_file_list2(self, mock1, mock2): options = GpCronDumpTestCase.Options() options.timestamp_key =", "options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/gp_master_config_files_20130101010101.tar' % options.masterDataDirectory, 'foo1:/bar/db_dumps/20130101/gp_segment_config_files_0_1_20130101010101.tar', 'foo1:/bar/db_dumps/20130101/gp_segment_config_files_0_2_20130101010101.tar', 'foo1:/bar/db_dumps/20130101/gp_dump_0_1_20130101010101.gz', 'foo1:/bar/db_dumps/20130101/gp_dump_0_2_20130101010101.gz'] self.assertEqual(sorted(pipes_file_list), sorted(expected_files_list))", "m, create=True): cron = GpCronDump(options, None) @patch('gpcrondump.os.path.isfile', return_value=False) @patch('gpcrondump.GpCronDump._get_master_port') def", "'public, cot2, 3190'] heap_partition_list = ['public.heapt1', 'public.heapt2'] cron._verify_tablenames(ao_partition_list, co_partition_list, heap_partition_list)", "mock import patch, Mock from gppylib.operations.dump import MailDumpEvent from gppylib.operations.backup_utils", "cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_10(self, mock, mock2):", "GpCronDumpTestCase.Options() options.include_dump_tables = 'foo' options.include_dump_tables_file = 'foo' with self.assertRaisesRegexp(Exception, '-t", "mock2): options = GpCronDumpTestCase.Options() options.timestamp_key = '20130101010' gpcd = GpCronDump(options,", "= 'foo1' mock_segs = [Mock(), Mock()] for id, seg in", "@patch('gpcrondump.GpCronDump.validate_dump_schema') @patch('gpcrondump.validate_current_timestamp') def test_option_schema_filter_1(self, mock, mock2, mock3): options = GpCronDumpTestCase.Options()", "GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_column_inserts_with_incremental(self, mock, mock2): options =", "options.include_schema_file = '/tmp/foo' write_lines_to_file('/tmp/foo', ['public']) gpcd = GpCronDump(options, None) dbname", "properly.\" % options.include_email_file): with patch('__builtin__.open', m, create=True): cron = GpCronDump(options,", "'foo2:%s/db_dumps/20130101/gp_dump_20130101010101_co_state_file' % options.masterDataDirectory, 'foo2:%s/db_dumps/20130101/gp_dump_20130101010101_last_operation' % options.masterDataDirectory, 'foo2:%s/db_dumps/20130101/gp_dump_20130101010101.rpt' % options.masterDataDirectory, 'foo2:%s/db_dumps/20130101/gp_dump_status_1_1_20130101010101'", "cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options10(self, mock, mock2):", "self.assertRaisesRegexp(Exception, '-s can not be selected with --schema-file option'): cron", "(inc, exc) = gpcd.get_include_exclude_for_dump_database(dirtyfile, dbname) self.assertTrue(inc.startswith('/tmp/include_dump_tables_file')) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.expand_partitions_and_populate_filter_file', return_value='/tmp/include_dump_tables_file')", "GpCronDumpTestCase.Options() options.exclude_dump_tables = 'foo' options.incremental = True with self.assertRaisesRegexp(Exception, 'exclude", "None) finally: options.list_filter_tables = False @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.get_latest_full_dump_timestamp', return_value='20121225090000') def", "options = GpCronDumpTestCase.Options() options.include_dump_tables_file = 'foo' options.incremental = True with", "options = GpCronDumpTestCase.Options() options.exclude_schema_file = 'foo' options.include_dump_tables = '/tmp/foo' with", "None self.netbackup_policy = None self.netbackup_schedule = None self.netbackup_block_size = None", "self.assertRaisesRegexp(Exception, 'include table list can not be selected with incremental", "None) self.assertEqual(gpcd.dump_prefix, 'foo_') @patch('gpcrondump.os.path.isfile', return_value=True) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.os.path.getsize', return_value=111) @patch('gpcrondump.yaml.load', return_value={'EMAIL_DETAILS':", "GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_26(self, mock, mock2): options =", "['public, heapt1, 2190', 'public, heapt2!asdasd , 3190'] with self.assertRaisesRegexp(Exception, ''):", "@patch('gpcrondump.validate_current_timestamp') def test_get_files_file_list2(self, mock1, mock2): options = GpCronDumpTestCase.Options() options.timestamp_key =", "'/tmp/foobar' gpcd = GpCronDump(options, None) dirtyfile = '/tmp/dirty' dbname =", "options = GpCronDumpTestCase.Options() options.exclude_dump_schema = 'foo' options.incremental = True with", "options.exclude_dump_schema = 'foo' options.include_schema_file = '/tmp/foo' with self.assertRaisesRegexp(Exception, '-S can", "cron = GpCronDump(options, None) @patch('gpcrondump.os.path.isfile', return_value=True) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.os.path.getsize', return_value=111) def", "= True options.replicate = True options.max_streams = \"abc\" with self.assertRaisesRegexp(Exception,", "options.backup_dir = '/foo1' gpcd = GpCronDump(options, None) self.assertEquals(gpcd.getBackupDirectoryRoot(), '/foo1') @patch('gpcrondump.GpCronDump._get_master_port')", "incremental backup'): cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options2(self,", "def test_options23(self, mock, mock2): options = GpCronDumpTestCase.Options() options.ddboost = True", "self.list_filter_tables = None self.include_email_file = None self.email_details = None self.include_schema_file", "cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_22(self, mock, mock2):", "options = GpCronDumpTestCase.Options() options.include_schema_file = '/tmp/foo' write_lines_to_file('/tmp/foo', ['public']) gpcd =", "'SUBJECT': \"backup completed for Database 'testdb100'\"}]}) def test_validate_parse_email_File04(self, mock1, mock2,", "mock2): options = GpCronDumpTestCase.Options() options.dump_schema = 'foo' options.exclude_dump_tables_file = '/tmp/foo'", "'foo1:/bar/db_dumps/20130101/gp_dump_0_1_20130101010101.gz', 'foo1:/bar/db_dumps/20130101/gp_dump_0_2_20130101010101.gz'] self.assertEqual(sorted(pipes_file_list), sorted(expected_files_list)) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_get_pipes_file_list3(self, mock1, mock2):", "not supported with incremental backup'): cron = GpCronDump(options, None) @patch('gpcrondump.get_latest_full_dump_timestamp',", "= '/tmp/foo' with self.assertRaisesRegexp(Exception, '--exclude-schema-file can not be selected with", "options.include_email_file = \"/tmp/abc\" with self.assertRaisesRegexp(Exception, \"'%s' is not '.yaml' file.", "% options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/gp_dump_20130101010101.rpt' % options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/gp_dump_status_1_1_20130101010101' % options.masterDataDirectory] self.assertEqual(files_file_list, expected_files_list)", "= True options.ddboost = True options.list_backup_files = True with self.assertRaisesRegexp(Exception,", "'public.heapt2'] cron._verify_tablenames(ao_partition_list, co_partition_list, heap_partition_list) #Should not raise an exception @patch('gpcrondump.GpCronDump._get_master_port')", "= GpCronDumpTestCase.Options() options.timestamp_key = '20130101010101' options.incremental = True options.local_dump_prefix =", "is not supported with NetBackup'): GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def", "'NAME': 'testdb100', 'SUBJECT': \"backup completed for Database 'testdb100'\"}]}) def test_validate_parse_email_File04(self,", "with self.assertRaisesRegexp(Exception, \"can not specify catalog schema 'information_schema' using -s", "@patch('gpcrondump.validate_current_timestamp') def test_get_include_exclude_for_dump_database00(self, mock1, mock2): options = GpCronDumpTestCase.Options() options.masterDataDirectory =", "options.max_streams = \"abc\" with self.assertRaisesRegexp(Exception, '--max-streams must be a number", "= \"test_schedule\" with self.assertRaisesRegexp(Exception, '--ddboost is not supported with NetBackup'):", "return_value=True) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.os.path.getsize', return_value=111) @patch('gpcrondump.yaml.load', return_value={'EMAIL_DETAILS': [{'FROM': 'RRP_MPE2_DCA_1', 'DBNAME': None,", "GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options27(self, mock, mock2): options =", "None) @patch('gpcrondump.os.path.isfile', return_value=True) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.os.path.getsize', return_value=111) @patch('gpcrondump.yaml.load', return_value={'EMAIL_DETAILS': [{'FROM': 'RRP_MPE2_DCA_1',", "= False options.max_streams = 20 with self.assertRaisesRegexp(Exception, '--max-streams must be", "cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_13(self, mock, mock2):", "= False self.rollback = False self.compress = True self.free_space_percent =", "selected with --exclude-schema-file option'): cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp')", "tables option requires --prefix and --incremental'): cron = GpCronDump(options, None)", "= True with self.assertRaisesRegexp(Exception, '-S option can not be selected", "= GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options23(self, mock, mock2): options", "with self.assertRaisesRegexp(Exception, '-t can not be selected with --exclude-table-file option'):", "options = GpCronDumpTestCase.Options() options.include_email_file = \"/tmp/abc\" with self.assertRaisesRegexp(Exception, \"'%s' is", "raise an exception GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options15(self, mock,", "= 'foo' with self.assertRaisesRegexp(Exception, '-T can not be selected with", "None self.email_details = None self.include_schema_file = None self.exclude_schema_file = None", "'-s can not be selected with -S option'): cron =", "self.bypass_disk_check = True self.backup_set = None self.dump_global = False self.clear_catalog_dumps", "options.dump_databases = ['bkdb'] options.timestamp_key = True options.ddboost = True options.list_backup_files", "'bkdb,fulldb' options.timestamp_key = True with self.assertRaisesRegexp(Exception, 'multi-database backup is not", "None self.backup_dir = None self.encoding = None self.output_options = None", "'public.t3'] gpcd = GpCronDump(options, None) dirtyfile = '/tmp/dirty' dbname =", "dbname = 'foo' timestamp = '20141016010101' file = gpcd.get_schema_list_file(dbname) self.assertEquals(file,", "GpCronDump(options, None) master = Mock() master.getSegmentHostName.return_value = 'foo1' timestamp =", "mock2): options = GpCronDumpTestCase.Options() options.dump_schema = 'foo' options.exclude_dump_tables = '/tmp/foo'", "'-c option can not be selected with incremental backup'): cron", "mock logger = gplog.get_unittest_logger() class GpCronDumpTestCase(unittest.TestCase): class Options: def __init__(self):", "= '/tmp/foobar' options.include_dump_tables_file = '/mydir/incfile' gpcd = GpCronDump(options, None) dirtyfile", "@patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_get_table_names_from_partition_list_00(self, mock1, mock2): options = GpCronDumpTestCase.Options() cron", "self.assertRaisesRegexp(Exception, \"can not specify catalog schema 'information_schema' using -S option\"):", "self.report_dir = None self.timestamp_key = None self.list_backup_files = None self.quiet", "@patch('gpcrondump.validate_current_timestamp') def test_options15(self, mock, mock2): options = GpCronDumpTestCase.Options() options.dump_databases =", "options.include_schema_file = 'foo' options.exclude_dump_tables = '/tmp/foo' with self.assertRaisesRegexp(Exception, '-t and", "self.include_dump_tables_file = None self.exclude_dump_tables_file = None self.backup_dir = None self.encoding", "@patch('gpcrondump.validate_current_timestamp') def test_options19(self, mock, mock2): options = GpCronDumpTestCase.Options() options.clear_dumps =", "['public.t4', 'public.t5', 'public.t6'] gpcd = GpCronDump(options, None) dirtyfile = '/tmp/dirty'", "= '/data/foo' gpcd = GpCronDump(options, None) master = Mock() master.getSegmentHostName.return_value", "= GpCronDumpTestCase.Options() options.exclude_dump_tables_file = 'foo' options.incremental = True with self.assertRaisesRegexp(Exception,", "mock1, mock2): options = GpCronDumpTestCase.Options() options.include_email_file = \"/tmp/abc.yaml\" with self.assertRaisesRegexp(Exception,", "master = Mock() master.getSegmentHostName.return_value = 'foo2' timestamp = '20130101010101' dump_dir", "gpcd.get_schema_list_file(dbname) self.assertEquals(file, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_32(self, mock1, mock2): options", "mock2): options = GpCronDumpTestCase.Options() options.dump_databases = 'bkdb,fulldb' options.incremental = False", "timestamp) files_file_list = gpcd._get_files_file_list(master, dump_dir, timestamp) expected_files_list = ['foo1:%s/db_dumps/20130101/metro_gp_cdatabase_1_1_20130101010101' %", "zero'): cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options25(self, mock,", "= None options.masterDataDirectory = '/foo' options.dump_config = True gpcd =", "None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_get_include_exclude_for_dump_database00(self, mock1, mock2): options = GpCronDumpTestCase.Options()", "cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_get_timestamp_object1(self, mock1, mock2):", "dump_database = 'testdb1' current_exit_status = 0 time_start = '12:07:09' time_end", "= None self.dump_schema = False self.dump_databases = ['testdb'] self.bypass_disk_check =", "@patch('gpcrondump.os.path.isfile', return_value=True) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.os.path.getsize', return_value=0) def test_validate_parse_email_File03(self, mock1, mock2, mock3):", "mock2): options = GpCronDumpTestCase.Options() options.include_schema_file = 'foo' options.include_dump_tables_file = '/tmp/foo'", "GpCronDumpTestCase.Options() options.dump_schema = 'foo' options.exclude_schema_file = '/tmp/foo' with self.assertRaisesRegexp(Exception, '-s", "mock3): options = GpCronDumpTestCase.Options() options.include_email_file = \"/tmp/abc\" with self.assertRaisesRegexp(Exception, \"'%s'", "self.ddboost_user = None self.ddboost_config_remove = False self.ddboost_verify = False self.ddboost_remote", "self.assertRaisesRegexp(Exception, '-S option can not be selected with incremental backup'):", "options.exclude_dump_schema = 'foo' with self.assertRaisesRegexp(Exception, '-s can not be selected", "options.masterDataDirectory] self.assertEqual(sorted(files_file_list), sorted(expected_files_list)) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_get_pipes_file_list1(self, mock1, mock2): options", "@patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.GpCronDump._get_table_names_from_partition_list', side_effect = [['public.aot1:asd', 'public.aot2'], ['public.cot1', 'public.cot2:asd']]) def", "options = GpCronDumpTestCase.Options() options.include_email_file = \"/tmp/abc.yaml\" with self.assertRaisesRegexp(Exception, \"\\'%s\\' file", "'public, aot2,aot, 3190'] with self.assertRaisesRegexp(Exception, 'Invalid partition entry \"public, aot2,aot,", "@patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_21(self, mock, mock2): options = GpCronDumpTestCase.Options() options.include_schema_file =", "\"can not specify catalog schema 'information_schema' using -s option\"): GpCronDump(options,", "import patch, Mock from gppylib.operations.dump import MailDumpEvent from gppylib.operations.backup_utils import", "= GpCronDumpTestCase.Options() options.dump_schema = 'foo' options.include_dump_tables = '/tmp/foo' with self.assertRaisesRegexp(Exception,", "--schema-file option'): cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_14(self,", "return_value='20121225090000') def test_get_include_exclude_for_dump_database03(self, mock1, mock2, mock3): options = GpCronDumpTestCase.Options() options.masterDataDirectory", "options.exclude_schema_file = '/tmp/foo' with self.assertRaisesRegexp(Exception, '-s can not be selected", "@patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options4(self, mock, mock2): options = GpCronDumpTestCase.Options() options.exclude_dump_tables_file", "options.include_schema_file = 'foo' options.include_dump_tables_file = '/tmp/foo' with self.assertRaisesRegexp(Exception, '--table-file and", "'foo1:%s/db_dumps/20130101/gp_global_1_1_20130101010101' % options.masterDataDirectory, 'foo1:/bar/db_dumps/20130101/gp_dump_0_1_20130101010101.gz', 'foo1:/bar/db_dumps/20130101/gp_dump_0_2_20130101010101.gz'] self.assertEqual(sorted(pipes_file_list), sorted(expected_files_list)) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def", "True options.max_streams = 0 with self.assertRaisesRegexp(Exception, '--max-streams must be a", "None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_31(self, mock, mock2): options = GpCronDumpTestCase.Options()", "'Invalid timestamp key'): gpcd._get_timestamp_object(options.timestamp_key) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_get_timestamp_object3(self, mock1, mock2):", "= GpCronDumpTestCase.Options() options.masterDataDirectory = '/tmp/foobar' options.exclude_dump_tables = ['public.t4', 'public.t5', 'public.t6']", "options.include_email_file): with patch('__builtin__.open', m, create=True): cron = GpCronDump(options, None) @patch('gpcrondump.MailDumpEvent')", "= False self.history = False self.pre_vacuum = False self.post_vacuum =", "= ['information_schema'] with self.assertRaisesRegexp(Exception, \"can not specify catalog schema 'information_schema'", "@patch('gpcrondump.os.path.isfile', return_value=False) @patch('gpcrondump.GpCronDump._get_master_port') def test_validate_parse_email_File01(self, mock1, mock2): options = GpCronDumpTestCase.Options()", "@patch('gpcrondump.validate_current_timestamp') def test_options_column_inserts_with_incremental(self, mock, mock2): options = GpCronDumpTestCase.Options() options.output_options =", "-T can not be selected with --exclude-schema-file option'): cron =", "expected_output = ['public.aot1', 'public.aot2:aot'] result = cron._get_table_names_from_partition_list(partition_list) self.assertEqual(result, expected_output) @patch('gpcrondump.GpCronDump._get_master_port')", "= GpCronDumpTestCase.Options() options.include_dump_tables = 'foo' options.exclude_dump_tables_file = 'foo' with self.assertRaisesRegexp(Exception,", "test_options_schema_filter_34(self, mock1, mock2, mock3): options = GpCronDumpTestCase.Options() options.exclude_schema_file = '/tmp/foo'", "@patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_table_filter4(self, mock, mock2): options = GpCronDumpTestCase.Options() options.exclude_dump_tables", "dump_dir, timestamp) expected_files_list = ['foo2:%s/db_dumps/20130101/gp_cdatabase_1_1_20130101010101' % options.masterDataDirectory, 'foo2:%s/db_dumps/20130101/gp_dump_20130101010101_ao_state_file' % options.masterDataDirectory,", "'foo1:%s/db_dumps/20130101/gp_dump_20130101010101_last_operation' % options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/gp_dump_20130101010101.rpt' % options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/gp_dump_status_1_1_20130101010101' % options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/gp_dump_20130101000000_increments'", "patch, Mock from gppylib.operations.dump import MailDumpEvent from gppylib.operations.backup_utils import get_backup_directory,", "= GpCronDumpTestCase.Options() options.ddboost = True options.replicate = True options.max_streams =", "'/tmp/foo' with self.assertRaisesRegexp(Exception, '-t and -T can not be selected", "'multi-database backup is not supported with -K option'): GpCronDump(options, None)", "options.masterDataDirectory = '/tmp/foobar' options.exclude_dump_tables = ['public.t4', 'public.t5', 'public.t6'] gpcd =", "co_partition_list = ['public, cot1, 2190', 'public, cot2, 3190'] heap_partition_list =", "self.batch_default = DEFAULT_NUM_WORKERS self.include_dump_tables = None self.exclude_dump_tables = None self.include_dump_tables_file", "init self.incremental = False self.ddboost = False self.ddboost_hosts = None", "'20141016010101' file = gpcd.get_schema_list_file(dbname) self.assertTrue(file.startswith('/tmp/schema_list')) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.get_lines_from_file', return_value=['public']) @patch('gpcrondump.get_user_table_list_for_schema',", "= True try: with self.assertRaisesRegexp(Exception, 'list filter tables option requires", "with self.assertRaisesRegexp(Exception, '--max-streams must be specified along with --replicate'): cron", "['public.cot1', 'public.cot2']]) def test_verify_tablenames_00(self, mock1, mock2, mock3): options = GpCronDumpTestCase.Options()", "m, create=True): cron = GpCronDump(options, None) @patch('gpcrondump.MailDumpEvent') @patch('gpcrondump.GpCronDump._get_master_port') def test_send_email00(self,", "gpcd._get_files_file_list(master, dump_dir, timestamp) expected_files_list = ['foo2:%s/db_dumps/20130101/gp_cdatabase_1_1_20130101010101' % options.masterDataDirectory, 'foo2:%s/db_dumps/20130101/gp_dump_20130101010101_ao_state_file' %", "= gpcd._get_files_file_list(master, dump_dir, timestamp) expected_files_list = ['foo1:%s/db_dumps/20130101/gp_cdatabase_1_1_20130101010101' % options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/gp_dump_20130101010101_ao_state_file'", "cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_27(self, mock, mock2):", "return_value='/tmp/exclude_dump_tables_file') @patch('gpcrondump.get_lines_from_file') def test_get_include_exclude_for_dump_database06(self, mock1, mock2, mock3, mock4): options =", "= 'foo' options.include_schema_file = '/tmp/foo' with self.assertRaisesRegexp(Exception, '--exclude-schema-file can not", "@patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.GpCronDump._get_table_names_from_partition_list', side_effect = [['public.aot1', 'public.aot2'], ['public.cot1', 'public.cot2']]) def test_verify_tablenames_00(self,", "= \"/tmp/abc.yaml\" m = mock.MagicMock() with self.assertRaisesRegexp(Exception, \"\\'%s\\' file is", "'foo2:%s/db_dumps/20130101/gp_dump_20130101010101_last_operation' % options.masterDataDirectory, 'foo2:%s/db_dumps/20130101/gp_dump_20130101010101.rpt' % options.masterDataDirectory, 'foo2:%s/db_dumps/20130101/gp_dump_status_1_1_20130101010101' % options.masterDataDirectory] self.assertEqual(files_file_list,", "= None options.list_backup_files = False gpcd = GpCronDump(options, None) self.assertEqual(gpcd.dump_prefix,", "GpCronDump(options, None) dbname = 'foo' timestamp = '20141016010101' file =", "mock3): options = GpCronDumpTestCase.Options() options.include_schema_file = '/tmp/foo' with self.assertRaisesRegexp(Exception, \"can", "with self.assertRaisesRegexp(Exception, '-S option can not be selected with incremental", "-S option\"): GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.get_lines_from_file', return_value=['public', 'information_schema']) def", "@patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options1(self, mock, mock2): options = GpCronDumpTestCase.Options() options.include_dump_tables", "= None self.clear_dumps = False self.cleanup_date = None self.cleanup_total =", "= True with self.assertRaisesRegexp(Exception, 'exclude table list can not be", "mock1, mock2, mock3): options = GpCronDumpTestCase.Options() options.timestamp_key = '20130101010101' options.incremental", "GpCronDump(options, None) @patch('gpcrondump.get_latest_full_dump_timestamp', return_value='20120330090000') @patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.GpCronDump._get_master_port') def test_options13(self, mock, mock2,", "cron._get_table_names_from_partition_list(partition_list) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_table_filter1(self, mock, mock2): options = GpCronDumpTestCase.Options()", "= GpCronDumpTestCase.Options() options.include_schema_file = '/tmp/foo' write_lines_to_file('/tmp/foo', ['public']) gpcd = GpCronDump(options,", "dbname = 'foo' (inc, exc) = gpcd.get_include_exclude_for_dump_database(dirtyfile, dbname) self.assertTrue(inc.startswith('/tmp/include_dump_tables_file')) @patch('gpcrondump.GpCronDump._get_master_port')", "options.incremental = True with self.assertRaisesRegexp(Exception, 'include table list can not", "mock2, mock3): options = GpCronDumpTestCase.Options() options.exclude_dump_schema = 'public' gpcd =", "-s option'): cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_17(self,", "can not be selected with --table-file option'): cron = GpCronDump(options,", "@patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_13(self, mock, mock2): options = GpCronDumpTestCase.Options() options.include_schema_file =", "None self.exclude_dump_tables = None self.include_dump_tables_file = None self.exclude_dump_tables_file = None", "'--table-file and --exclude-table-file can not be selected with --exclude-schema-file option'):", "= Mock() master.getSegmentHostName.return_value = 'foo2' timestamp = '20130101010101' dump_dir =", "\"'%s' is not '.yaml' file. File containing email details should", "mock_segs, dump_dir, timestamp) expected_files_list = ['foo1:%s/db_dumps/20130101/gp_dump_1_1_20130101010101.gz' % options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/gp_dump_1_1_20130101010101_post_data.gz' %", "backup'): cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options10(self, mock,", "expected_files_list = ['foo1:%s/db_dumps/20130101/gp_dump_1_1_20130101010101.gz' % options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/gp_dump_1_1_20130101010101_post_data.gz' % options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/gp_global_1_1_20130101010101' %", "= False self.ddboost_verify = False self.ddboost_remote = None self.ddboost_ping =", "= [] options.incremental = True with self.assertRaisesRegexp(Exception, 'Must supply -x", "= imp.load_source('gpcrondump', gpcrondump_path) import unittest2 as unittest from datetime import", "options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/gp_dump_20130101010101_co_state_file' % options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/gp_dump_20130101010101_last_operation' % options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/gp_dump_20130101010101.rpt' % options.masterDataDirectory,", "GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options23(self, mock, mock2): options =", "False self.ddboost_verify = False self.ddboost_remote = None self.ddboost_ping = None", "co_partition_list, heap_partition_list) #Should not raise an exception @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.GpCronDump._get_table_names_from_partition_list',", "= GpCronDump(options, None) partition_list = ['public, aot1, 2190', 'public, aot2,aot,", "options.timestamp_key = None options.local_dump_prefix = 'foo' options.ddboost = False options.ddboost_verify", "should be '.yaml' file.\" % options.include_email_file): cron = GpCronDump(options, None)", "= GpCronDumpTestCase.Options() options.include_schema_file = '/tmp/foo' with self.assertRaisesRegexp(Exception, \"can not include", "option'): cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_12(self, mock,", "def test_send_email00(self, mock1, MailDumpEvent): options = GpCronDumpTestCase.Options() dump_database = 'testdb1'", "does not exist.\" % options.include_email_file): cron = GpCronDump(options, None) @patch('gpcrondump.os.path.isfile',", "options.dump_schema = 'foo' options.include_dump_tables_file = '/tmp/foo' with self.assertRaisesRegexp(Exception, '--table-file and", "ddboost option'): GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options29(self, mock, mock2):", "def test_options10(self, mock, mock2): options = GpCronDumpTestCase.Options() options.local_dump_prefix = 'foo'", "= True options.dump_databases = 'bkdb,fulldb' with self.assertRaisesRegexp(Exception, 'multi-database backup is", "os.remove('/tmp/foo') @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.get_include_schema_list_from_exclude_schema', return_value=['public']) def test_options_schema_filter_35(self, mock1, mock2, mock3):", "options = GpCronDumpTestCase.Options() options.timestamp_key = '20130101010' gpcd = GpCronDump(options, None)", "test_options13(self, mock, mock2, mock3): options = GpCronDumpTestCase.Options() options.incremental = True", "datetime)) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_get_files_file_list1(self, mock1, mock2): options = GpCronDumpTestCase.Options()", "= get_backup_directory(options.masterDataDirectory, options.backup_dir, gpcd.dump_dir, timestamp) pipes_file_list = gpcd._get_pipes_file_list(master, mock_segs, dump_dir,", "'foo' timestamp = '20141016010101' file = gpcd.get_schema_list_file(dbname) self.assertTrue(file.startswith('/tmp/schema_list')) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp')", "= gpcd.get_schema_list_file(dbname) self.assertTrue(file.startswith('/tmp/schema_list')) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_33(self, mock1, mock2): options", "def test_options25(self, mock, mock2): options = GpCronDumpTestCase.Options() options.ddboost = False", "options.dump_global = True options.masterDataDirectory = '/foo' gpcd = GpCronDump(options, None)", "GpCronDumpTestCase.Options() options.incremental = True options.dump_databases = ['bkdb'] #If this is", "GpCronDumpTestCase.Options() options.incremental = True options.dump_databases = 'bkdb,fulldb' with self.assertRaisesRegexp(Exception, 'multi-database", "selected with --exclude-table-file option'): cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp')", "'foo1:/bar/db_dumps/20130101/gp_dump_0_2_20130101010101.gz'] self.assertEqual(sorted(pipes_file_list), sorted(expected_files_list)) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_gpcrondump_init0(self, mock1, mock2): options", "= True options.list_backup_files = True with self.assertRaisesRegexp(Exception, 'list backup files", "cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_19(self, mock, mock2):", "'/tmp/foo' write_lines_to_file('/tmp/foo', ['public']) gpcd = GpCronDump(options, None) dbname = 'foo'", "GpCronDumpTestCase.Options() options.timestamp_key = '20130101010101' options.incremental = True options.masterDataDirectory = '/data/foo'", "m = mock.MagicMock() with patch('__builtin__.open', m, create=True): cron = GpCronDump(options,", "mock, mock2): options = GpCronDumpTestCase.Options() options.exclude_schema_file = 'foo' options.include_schema_file =", "= GpCronDumpTestCase.Options() options.include_email_file = \"/tmp/abc.yaml\" m = mock.MagicMock() with patch('__builtin__.open',", "@patch('gpcrondump.os.path.getsize', return_value=111) @patch('gpcrondump.yaml.load', return_value={'EMAIL_DETAILS': [{'FROM': 'RRP_MPE2_DCA_1', 'DBNAME': None, 'SUBJECT': \"backup", "mock, mock2): options = GpCronDumpTestCase.Options() options.include_schema_file = 'foo' options.exclude_dump_tables =", "options.timestamp_key = '20130101010101' options.incremental = True options.masterDataDirectory = '/data/foo' gpcd", "backup'): cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_3(self, mock,", "options.include_email_file = \"/tmp/abc.yaml\" with self.assertRaisesRegexp(Exception, \"\\'%s\\' file does not exist.\"", "options.clear_dumps = True options.incremental = True with self.assertRaisesRegexp(Exception, '-c option", "% options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/gp_dump_20130101010101_last_operation' % options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/gp_dump_20130101010101.rpt' % options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/gp_dump_status_1_1_20130101010101' %", "['bkdb'] #If this is successful then it should not raise", "@patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_15(self, mock, mock2): options = GpCronDumpTestCase.Options() options.dump_schema", "mock, mock2): options = GpCronDumpTestCase.Options() options.exclude_schema_file = 'foo' options.exclude_dump_tables_file =", "= GpCronDumpTestCase.Options() options.include_schema_file = '/tmp/foo' options.incremental = True with self.assertRaisesRegexp(Exception,", "options.ddboost_config_remove = False options.ddboost_user = False options.ddboost_host = False options.max_streams", "self.assertEquals(inc, None) self.assertEquals(exc, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.expand_partitions_and_populate_filter_file', return_value='/tmp/include_dump_tables_file') @patch('gpcrondump.get_lines_from_file', return_value=['public.t1',", "with self.assertRaisesRegexp(Exception, \"\\'%s\\' file does not exist.\" % options.include_email_file): cron", "['foo2:%s/db_dumps/20130101/gp_cdatabase_1_1_20130101010101' % options.masterDataDirectory, 'foo2:%s/db_dumps/20130101/gp_dump_20130101010101_ao_state_file' % options.masterDataDirectory, 'foo2:%s/db_dumps/20130101/gp_dump_20130101010101_co_state_file' % options.masterDataDirectory, 'foo2:%s/db_dumps/20130101/gp_dump_20130101010101_last_operation'", "= GpCronDumpTestCase.Options() options.incremental = True options.dump_databases = 'bkdb,fulldb' with self.assertRaisesRegexp(Exception,", "mock, mock2): options = GpCronDumpTestCase.Options() options.dump_schema = 'foo' options.exclude_dump_tables_file =", "with self.assertRaisesRegexp(Exception, 'list filter tables option requires --prefix and --incremental'):", "GpCronDumpTestCase.Options() options.output_options = ['--inserts'] options.incremental = True with self.assertRaisesRegexp(Exception, '--inserts,", "'-S can not be selected with --schema-file option'): cron =", "def test_options_schema_filter_18(self, mock, mock2): options = GpCronDumpTestCase.Options() options.exclude_dump_schema = 'foo'", "'public, heapt2!asdasd , 3190'] with self.assertRaisesRegexp(Exception, ''): cron._verify_tablenames(ao_partition_list, co_partition_list, heap_partition_list)", "mock, mock2): options = GpCronDumpTestCase.Options() options.exclude_dump_schema = 'foo' options.exclude_dump_tables =", "= GpCronDumpTestCase.Options() options.exclude_schema_file = 'foo' options.include_dump_tables_file = '/tmp/foo' with self.assertRaisesRegexp(Exception,", "GpCronDumpTestCase.Options() options.timestamp_key = '20130101010101' options.local_dump_prefix = 'metro' options.include_dump_tables_file = 'bar'", "def test_get_pipes_file_list2(self, mock1, mock2): options = GpCronDumpTestCase.Options() options.timestamp_key = None", "cron._send_email(dump_database, current_exit_status, time_start, time_end) #------------------------------- Mainline -------------------------------- if __name__ ==", "def test_options2(self, mock, mock2): options = GpCronDumpTestCase.Options() options.exclude_dump_tables = 'foo'", "mock2, mock3): options = GpCronDumpTestCase.Options() options.include_email_file = \"/tmp/abc\" with self.assertRaisesRegexp(Exception,", "def test_options_schema_filter_31(self, mock, mock2): options = GpCronDumpTestCase.Options() options.masterDataDirectory = '/tmp/foobar'", "= GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_table_filter3(self, mock, mock2): options", "GpCronDumpTestCase.Options() options.dump_databases = [] options.incremental = True with self.assertRaisesRegexp(Exception, 'Must", "from gppylib.operations.backup_utils import get_backup_directory, write_lines_to_file import mock logger = gplog.get_unittest_logger()", "None self.cleanup_total = None self.dump_schema = False self.dump_databases = ['testdb']", "be selected with incremental backup'): cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port')", "False self.compress = True self.free_space_percent = None self.clear_dumps = False", "['public.cot1', 'public.cot2:asd']]) def test_verify_tablenames_00_bad(self, mock1, mock2, mock3): options = GpCronDumpTestCase.Options()", "None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_table_filter6(self, mock, mock2): options = GpCronDumpTestCase.Options()", "'/tmp/foobar' options.backup_dir = '/foo1' gpcd = GpCronDump(options, None) self.assertEquals(gpcd.getBackupDirectoryRoot(), '/foo1')", "GpCronDumpTestCase.Options() options.exclude_schema_file = 'foo' options.include_dump_tables_file = '/tmp/foo' with self.assertRaisesRegexp(Exception, '--table-file", "= GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_column_inserts_with_incremental(self, mock, mock2): options", "= 'foo' options.include_schema_file = '/tmp/foo' with self.assertRaisesRegexp(Exception, '-S can not", "@patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_18(self, mock, mock2): options = GpCronDumpTestCase.Options() options.exclude_dump_schema =", "GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.GpCronDump.validate_dump_schema') @patch('gpcrondump.validate_current_timestamp') def test_option_schema_filter_2(self, mock, mock2, mock3):", "['public, aot1, 2190', 'public, aot2, 3190'] co_partition_list = ['public, cot1,", "mock2): options = GpCronDumpTestCase.Options() options.timestamp_key = None gpcd = GpCronDump(options,", "None gpcd = GpCronDump(options, None) self.assertEquals(gpcd.getBackupDirectoryRoot(), '/tmp/foobar') @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def", "exception GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options14(self, mock, mock2): options", "self.ddboost_verify = False self.ddboost_remote = None self.ddboost_ping = None self.ddboost_backupdir", "cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_16(self, mock, mock2):", "self.assertRaisesRegexp(Exception, '--replicate and --max-streams cannot be used without --ddboost'): cron", "import datetime from gppylib import gplog from gpcrondump import GpCronDump", "with self.assertRaisesRegexp(Exception, '--schema-file option can not be selected with incremental", "'RRP_MPE2_DCA_1', 'DBNAME': None, 'SUBJECT': \"backup completed for Database 'testdb100'\"}]}) def", "def test_get_include_exclude_for_dump_database01(self, mock1, mock2, mock3, mock4): options = GpCronDumpTestCase.Options() options.masterDataDirectory", "GpCronDump from gppylib.operations.utils import DEFAULT_NUM_WORKERS from mock import patch, Mock", "file is empty.\" % options.include_email_file): cron = GpCronDump(options, None) @patch('gpcrondump.os.path.isfile',", "not raise an exception @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.GpCronDump._get_table_names_from_partition_list', side_effect = [['public.aot1:asd',", "-S option'): cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_27(self,", "test_options_schema_filter_5(self, mock, mock2): options = GpCronDumpTestCase.Options() options.dump_schema = 'foo' options.exclude_schema_file", "test_get_timestamp_object1(self, mock1, mock2): options = GpCronDumpTestCase.Options() options.timestamp_key = '20130101010101' gpcd", "cannot be selected with incremental backup'): cron = GpCronDump(options, None)", "'-T can not be selected with --table-file option'): cron =", "GpCronDumpTestCase.Options() options.masterDataDirectory = '/tmp/foobar' options.exclude_dump_tables = ['public.t4', 'public.t5', 'public.t6'] gpcd", "dbname) self.assertTrue(inc.startswith('/tmp/include_dump_tables_file')) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.expand_partitions_and_populate_filter_file', return_value='/tmp/include_dump_tables_file') @patch('gpcrondump.get_lines_from_file') def test_get_include_exclude_for_dump_database02(self, mock1,", "None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_23(self, mock, mock2): options = GpCronDumpTestCase.Options()", "specify catalog schema 'information_schema' using -S option\"): GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port')", "with --replicate'): cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options22(self,", "= 20 with self.assertRaisesRegexp(Exception, '--max-streams must be specified along with", "exc) = gpcd.get_include_exclude_for_dump_database(dirtyfile, dbname) self.assertTrue(inc.startswith('/tmp/include_dump_tables_file')) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.expand_partitions_and_populate_filter_file', return_value='/tmp/include_dump_tables_file') @patch('gpcrondump.get_lines_from_file')", "options.exclude_dump_tables = ['public.t4', 'public.t5', 'public.t6'] gpcd = GpCronDump(options, None) dirtyfile", "self.assertRaisesRegexp(Exception, '-S can not be selected with --schema-file option'): cron", "= None self.netbackup_block_size = None self.netbackup_keyword = None @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.GpCronDump.validate_dump_schema')", "'foo1:/bar/db_dumps/20130101/gp_dump_0_1_20130101010101.gz', 'foo1:/bar/db_dumps/20130101/gp_dump_0_2_20130101010101.gz'] self.assertEqual(sorted(pipes_file_list), sorted(expected_files_list)) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_get_pipes_file_list4(self, mock1, mock2):", "'foo' options.incremental = True with self.assertRaisesRegexp(Exception, 'include table file can", "options = GpCronDumpTestCase.Options() options.dump_schema = 'foo' options.include_dump_tables_file = '/tmp/foo' with", "co_partition_list, heap_partition_list) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_inserts_with_incremental(self, mock, mock2): options =", "2190', 'public, heapt2!asdasd , 3190'] with self.assertRaisesRegexp(Exception, ''): cron._verify_tablenames(ao_partition_list, co_partition_list,", "dump_dir = get_backup_directory(options.masterDataDirectory, options.backup_dir, gpcd.dump_dir, timestamp) pipes_file_list = gpcd._get_pipes_file_list(master, mock_segs,", "cron = GpCronDump(options, None) @patch('gpcrondump.MailDumpEvent') @patch('gpcrondump.GpCronDump._get_master_port') def test_send_email00(self, mock1, MailDumpEvent):", "mock, mock2): options = GpCronDumpTestCase.Options() options.include_schema_file = 'foo' options.exclude_dump_tables_file =", "@patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_14(self, mock, mock2): options = GpCronDumpTestCase.Options() options.include_schema_file", "% options.masterDataDirectory, 'foo2:%s/db_dumps/20130101/gp_dump_20130101010101_ao_state_file' % options.masterDataDirectory, 'foo2:%s/db_dumps/20130101/gp_dump_20130101010101_co_state_file' % options.masterDataDirectory, 'foo2:%s/db_dumps/20130101/gp_dump_20130101010101_last_operation' %", "'foo' timestamp = '20141016010101' file = gpcd.get_schema_list_file(dbname) self.assertTrue(file.startswith('/tmp/schema_list')) if os.path.exists('/tmp/foo'):", "= None self.max_streams = None self.netbackup_service_host = None self.netbackup_policy =", "--prefix and --incremental'): cron = GpCronDump(options, None) finally: options.list_filter_tables =", "@patch('gpcrondump.validate_current_timestamp') def test_options20(self, mock, mock2): options = GpCronDumpTestCase.Options() options.dump_databases =", "'foo1:%s/db_dumps/20130101/gp_master_config_files_20130101010101.tar' % options.masterDataDirectory, 'foo1:/bar/db_dumps/20130101/gp_segment_config_files_0_1_20130101010101.tar', 'foo1:/bar/db_dumps/20130101/gp_segment_config_files_0_2_20130101010101.tar', 'foo1:/bar/db_dumps/20130101/gp_dump_0_1_20130101010101.gz', 'foo1:/bar/db_dumps/20130101/gp_dump_0_2_20130101010101.gz'] self.assertEqual(sorted(pipes_file_list), sorted(expected_files_list)) @patch('gpcrondump.GpCronDump._get_master_port')", "@patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_6(self, mock, mock2): options = GpCronDumpTestCase.Options() options.dump_schema", "@patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_inserts_with_incremental(self, mock, mock2): options = GpCronDumpTestCase.Options() options.output_options", "None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.GpCronDump.validate_dump_schema') @patch('gpcrondump.validate_current_timestamp') def test_option_schema_filter_2(self, mock, mock2, mock3): options", "None self.dump_config = False self.history = False self.pre_vacuum = False", "= '20141016010101' file = gpcd.get_schema_list_file(dbname) self.assertTrue(file.startswith('/tmp/schema_list')) if os.path.exists('/tmp/foo'): os.remove('/tmp/foo') @patch('gpcrondump.GpCronDump._get_master_port')", "= 'foo' options.incremental = True with self.assertRaisesRegexp(Exception, 'include table file", "['foo1:%s/db_dumps/20130101/gp_dump_1_1_20130101010101.gz' % options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/gp_dump_1_1_20130101010101_post_data.gz' % options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/gp_global_1_1_20130101010101' % options.masterDataDirectory, 'foo1:/bar/db_dumps/20130101/gp_dump_0_1_20130101010101.gz',", "options.max_streams = 20 with self.assertRaisesRegexp(Exception, '--max-streams must be specified along", "True options.replicate = True options.max_streams = 0 with self.assertRaisesRegexp(Exception, '--max-streams", "mock1, mock2): options = GpCronDumpTestCase.Options() cron = GpCronDump(options, None) partition_list", "--exclude-table-file can not be selected with --exclude-schema-file option'): cron =", "['public, aot1!asd, 2190', 'public, aot2, 3190'] co_partition_list = ['public, cot1,", "True options.replicate = True options.max_streams = None with self.assertRaisesRegexp(Exception, '--max-streams", "None) self.assertEquals(cron.full_dump_timestamp, '20121225090000') @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options12(self, mock, mock2): options", "options.max_streams = 20 with self.assertRaisesRegexp(Exception, '--replicate and --max-streams cannot be", "self.quiet = False self.verbose = False self.local_dump_prefix = '' self.list_filter_tables", "options.incremental = True gpcd = GpCronDump(options, None) dirtyfile = '/tmp/dirty'", "'-T can not be selected with --exclude-table-file option'): cron =", "cron._verify_tablenames(ao_partition_list, co_partition_list, heap_partition_list) #Should not raise an exception @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp')", "% options.masterDataDirectory, 'foo2:%s/db_dumps/20130101/gp_dump_20130101010101_last_operation' % options.masterDataDirectory, 'foo2:%s/db_dumps/20130101/gp_dump_20130101010101.rpt' % options.masterDataDirectory, 'foo2:%s/db_dumps/20130101/gp_dump_status_1_1_20130101010101' %", "= True with self.assertRaisesRegexp(Exception, 'include table file can not be", "options.exclude_dump_schema = 'foo' options.include_dump_tables = '/tmp/foo' with self.assertRaisesRegexp(Exception, '-t and", "= 'foo' options.exclude_dump_tables_file = '/tmp/foo' with self.assertRaisesRegexp(Exception, '--table-file and --exclude-table-file", "an exception GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options16(self, mock, mock2):", "file is not formatted properly.\" % options.include_email_file): with patch('__builtin__.open', m,", "test_get_timestamp_object3(self, mock1, mock2): options = GpCronDumpTestCase.Options() options.timestamp_key = None gpcd", "@patch('gpcrondump.get_include_schema_list_from_exclude_schema', return_value=['public']) def test_options_schema_filter_35(self, mock1, mock2, mock3): options = GpCronDumpTestCase.Options()", "cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_15(self, mock, mock2):", "options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/gp_dump_20130101010101_last_operation' % options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/gp_dump_20130101010101.rpt' % options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/gp_dump_status_1_1_20130101010101' % options.masterDataDirectory,", "options.incremental = True with self.assertRaisesRegexp(Exception, 'exclude table file can not", "GpCronDumpTestCase.Options() options.include_schema_file = 'foo' options.exclude_dump_tables_file = '/tmp/foo' with self.assertRaisesRegexp(Exception, '--table-file", "be selected with --schema-file option'): cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port')", "'20130101010101' dump_dir = get_backup_directory(options.masterDataDirectory, None, gpcd.dump_dir, timestamp) files_file_list = gpcd._get_files_file_list(master,", "GpCronDumpTestCase.Options() options.masterDataDirectory = '/tmp/foobar' options.backup_dir = '/foo1' gpcd = GpCronDump(options,", "'20130101010101' options.local_dump_prefix = 'metro' options.include_dump_tables_file = 'bar' options.masterDataDirectory = '/data/foo'", "= GpCronDumpTestCase.Options() options.dump_databases = 'bkdb' options.incremental = False #If this", "return_value=True) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.os.path.getsize', return_value=0) def test_validate_parse_email_File03(self, mock1, mock2, mock3): options", "'foo1:%s/db_dumps/20130101/metro_gp_dump_20130101010101.rpt' % options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/metro_gp_dump_status_1_1_20130101010101' % options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/metro_gp_dump_20130101000000_increments' % options.masterDataDirectory] self.assertEqual(sorted(files_file_list),", "self.assertRaisesRegexp(Exception, '--inserts, --column-inserts, --oids cannot be selected with incremental backup'):", "mock2, mock3): options = GpCronDumpTestCase.Options() options.include_schema_file = '/tmp/foo' options.incremental =", "= GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_6(self, mock, mock2): options", "@patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_22(self, mock, mock2): options = GpCronDumpTestCase.Options() options.include_schema_file", "for Database 'testdb100'\"}]}) def test_validate_parse_email_File00(self, mock1, mock2, mock3, mock4): options", "supply -K option when listing backup files'): GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port')", "test_options_schema_filter_30(self, mock, mock2, mock3): options = GpCronDumpTestCase.Options() options.include_schema_file = '/tmp/foo'", "@patch('gpcrondump.validate_current_timestamp') def test_options3(self, mock, mock2): options = GpCronDumpTestCase.Options() options.include_dump_tables_file =", "a number greater than zero'): cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port')", ", 3190'] with self.assertRaisesRegexp(Exception, ''): cron._verify_tablenames(ao_partition_list, co_partition_list, heap_partition_list) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp')", "test_options_schema_filter_20(self, mock, mock2): options = GpCronDumpTestCase.Options() options.exclude_schema_file = 'foo' options.include_dump_tables", "= True options.masterDataDirectory = '/foo' gpcd = GpCronDump(options, None) master", "options.timestamp_key = None options.dump_global = True options.masterDataDirectory = '/foo' gpcd", "False self.verbose = False self.local_dump_prefix = '' self.list_filter_tables = None", "@patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.get_latest_full_dump_timestamp', return_value='20121225090000') def test_options11(self, mock, mock2, mock3): options", "test_options_schema_filter_10(self, mock, mock2): options = GpCronDumpTestCase.Options() options.exclude_schema_file = 'foo' options.include_schema_file", "= GpCronDumpTestCase.Options() options.timestamp_key = '20130101010' gpcd = GpCronDump(options, None) with", "mock2): options = GpCronDumpTestCase.Options() options.exclude_dump_schema = 'foo' options.exclude_dump_tables_file = '/tmp/foo'", "details should be '.yaml' file.\" % options.include_email_file): cron = GpCronDump(options,", "= None self.netbackup_service_host = None self.netbackup_policy = None self.netbackup_schedule =", "= GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_10(self, mock, mock2): options", "@patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_26(self, mock, mock2): options = GpCronDumpTestCase.Options() options.exclude_dump_schema", "gpcd = GpCronDump(options, None) self.assertEqual(gpcd.dump_prefix, 'foo_') @patch('gpcrondump.os.path.isfile', return_value=True) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.os.path.getsize',", "name> with incremental option'): cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp')", "self.assertTrue(file.startswith('/tmp/schema_list')) if os.path.exists('/tmp/foo'): os.remove('/tmp/foo') @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.get_include_schema_list_from_exclude_schema', return_value=['public']) def test_options_schema_filter_35(self,", "'foo_') @patch('gpcrondump.os.path.isfile', return_value=True) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.os.path.getsize', return_value=111) @patch('gpcrondump.yaml.load', return_value={'EMAIL_DETAILS': [{'FROM': 'RRP_MPE2_DCA_1',", "+ 1 timestamp = '20130101010101' dump_dir = get_backup_directory(options.masterDataDirectory, options.backup_dir, gpcd.dump_dir,", "'--exclude-schema-file option can not be selected with incremental backup'): cron", "Mock() master.getSegmentHostName.return_value = 'foo2' timestamp = '20130101010101' dump_dir = get_backup_directory(options.masterDataDirectory,", "options.include_email_file): cron = GpCronDump(options, None) @patch('gpcrondump.os.path.isfile', return_value=True) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.os.path.getsize', return_value=111)", "'foo1:%s/db_dumps/20130101/gp_dump_status_1_1_20130101010101' % options.masterDataDirectory] self.assertEqual(files_file_list, expected_files_list) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_get_files_file_list2(self, mock1,", "--max-streams cannot be used without --ddboost'): cron = GpCronDump(options, None)", "options.include_dump_tables_file = 'foo' options.exclude_dump_tables_file = 'foo' with self.assertRaisesRegexp(Exception, '--table-file can", "backup'): cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options4(self, mock,", "@patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.expand_partitions_and_populate_filter_file', return_value='/tmp/include_dump_tables_file') @patch('gpcrondump.get_lines_from_file') def test_get_include_exclude_for_dump_database02(self, mock1, mock2, mock3,", "None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_oids_with_incremental(self, mock, mock2): options = GpCronDumpTestCase.Options()", "GpCronDump(options, None) self.assertEquals(gpcd.getBackupDirectoryRoot(), '/foo1') @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options17(self, mock, mock2):", "mock, mock2): options = GpCronDumpTestCase.Options() options.dump_schema = 'foo' options.exclude_dump_tables =", "result = cron._get_table_names_from_partition_list(partition_list) self.assertEqual(result, expected_output) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_get_table_names_from_partition_list_01(self, mock1,", "mock, mock2): options = GpCronDumpTestCase.Options() options.dump_schema = 'foo' options.include_dump_tables_file =", "-K option when listing backup files'): GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp')", "self.exclude_schema_file = None self.exclude_dump_schema = None self.dump_stats = None ##", "= '/tmp/foo' with self.assertRaisesRegexp(Exception, \"can not include catalog schema 'information_schema'", "cron = GpCronDump(options, None) finally: options.list_filter_tables = False @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp')", "@patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options21(self, mock, mock2): options = GpCronDumpTestCase.Options() options.ddboost", "options.include_dump_tables_file = 'foo' with self.assertRaisesRegexp(Exception, '-T can not be selected", "'information_schema' using -s option\"): GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_28(self,", "= True gpcd = GpCronDump(options, None) dirtyfile = '/tmp/dirty' dbname", "options.local_dump_prefix = 'metro' options.masterDataDirectory = '/data/foo' gpcd = GpCronDump(options, None)", "= GpCronDumpTestCase.Options() options.include_dump_tables = 'foo' options.incremental = True with self.assertRaisesRegexp(Exception,", "= None self.quiet = False self.verbose = False self.local_dump_prefix =", "@patch('gpcrondump.get_latest_full_dump_timestamp', return_value='20120330090000') @patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.GpCronDump._get_master_port') def test_options13(self, mock, mock2, mock3): options", "= GpCronDump(options, None) with self.assertRaisesRegexp(Exception, 'Invalid timestamp key'): gpcd._get_timestamp_object(options.timestamp_key) @patch('gpcrondump.GpCronDump._get_master_port')", "@patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_24(self, mock, mock2): options = GpCronDumpTestCase.Options() options.dump_schema", "master.getSegmentHostName.return_value = 'foo1' timestamp = '20130101010101' dump_dir = get_backup_directory(options.masterDataDirectory, None,", "options.dump_databases = 'bkdb,fulldb' with self.assertRaisesRegexp(Exception, 'multi-database backup is not supported", "schema file '/tmp/foo'\"): GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.get_lines_from_file', return_value=['public', 'information_schema'])", "options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/gp_dump_20130101010101.rpt' % options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/gp_dump_status_1_1_20130101010101' % options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/gp_dump_20130101000000_increments' % options.masterDataDirectory]", "import gplog from gpcrondump import GpCronDump from gppylib.operations.utils import DEFAULT_NUM_WORKERS", "--incremental'): cron = GpCronDump(options, None) finally: options.list_filter_tables = False @patch('gpcrondump.GpCronDump._get_master_port')", "mock2): options = GpCronDumpTestCase.Options() options.ddboost = True options.replicate = False", "patch('__builtin__.open', m, create=True): cron = GpCronDump(options, None) @patch('gpcrondump.MailDumpEvent') @patch('gpcrondump.GpCronDump._get_master_port') def", "None) self.assertEquals(gpcd.getBackupDirectoryRoot(), '/foo1') @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options17(self, mock, mock2): options", "['bkdb'] options.timestamp_key = True options.ddboost = True options.list_backup_files = True", "'/tmp/foo' inc = gpcd.generate_include_table_list_from_schema_file(dbname, schema_file) self.assertTrue(inc.startswith('/tmp/include_dump_tables_file')) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options1(self,", "sorted(expected_files_list)) @patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.GpCronDump._get_master_port') @patch('gppylib.operations.backup_utils.get_latest_full_dump_timestamp', return_value='20130101000000') def test_get_files_file_list_with_filter(self, mock1, mock2, mock3):", "be selected with --table-file option'): cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port')", "GpCronDump(options, None) master = Mock() master.getSegmentHostName.return_value = 'foo2' mock_segs =", "= True options.replicate = False options.max_streams = 20 with self.assertRaisesRegexp(Exception,", "with patch('__builtin__.open', m, create=True): cron = GpCronDump(options, None) @patch('gpcrondump.os.path.isfile', return_value=True)", "@patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options18(self, mock, mock2): options = GpCronDumpTestCase.Options() options.dump_schema", "= None self.list_backup_files = None self.quiet = False self.verbose =", "--exclude-schema-file option'): cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_12(self,", "= '/tmp/foo' write_lines_to_file('/tmp/foo', ['public']) gpcd = GpCronDump(options, None) dbname =", "None options.local_dump_prefix = 'foo' options.ddboost = False options.ddboost_verify = False", "test_get_table_names_from_partition_list_00(self, mock1, mock2): options = GpCronDumpTestCase.Options() cron = GpCronDump(options, None)", "= False self.verbose = False self.local_dump_prefix = '' self.list_filter_tables =", "with self.assertRaisesRegexp(Exception, '--exclude-schema-file can not be selected with --schema-file option'):", "'include table file can not be selected with incremental backup'):", "= False options.list_filter_tables = True try: with self.assertRaisesRegexp(Exception, 'list filter", "mock, mock2): options = GpCronDumpTestCase.Options() options.local_dump_prefix = 'foo' options.incremental =", "sorted(expected_files_list)) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_gpcrondump_init0(self, mock1, mock2): options = GpCronDumpTestCase.Options()", "'-s can not be selected with --schema-file option'): cron =", "= 'foo' options.include_dump_tables_file = '/tmp/foo' with self.assertRaisesRegexp(Exception, '--table-file and --exclude-table-file", "mock, mock2): options = GpCronDumpTestCase.Options() options.exclude_dump_schema = 'foo' options.include_dump_tables =", "cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_11(self, mock, mock2):", "GpCronDumpTestCase.Options() cron = GpCronDump(options, None) partition_list = ['public, aot1, 2190',", "'bkdb,fulldb' options.incremental = False #If this is successful then it", "= '20130101010101' gpcd = GpCronDump(options, None) timestamp = gpcd._get_timestamp_object(options.timestamp_key) self.assertEquals(timestamp,", "selected with -T option'): cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp')", "None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_18(self, mock, mock2): options = GpCronDumpTestCase.Options()", "= GpCronDumpTestCase.Options() options.timestamp_key = None options.dump_global = True options.masterDataDirectory =", "class Options: def __init__(self): self.masterDataDirectory = \"\" self.interactive = False", "options = GpCronDumpTestCase.Options() options.exclude_dump_schema = 'foo' options.exclude_dump_tables_file = '/tmp/foo' with", "= gpcd.get_include_exclude_for_dump_database(dirtyfile, dbname) self.assertTrue(inc.startswith('/tmp/include_dump_tables_file')) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.expand_partitions_and_populate_filter_file', return_value='/tmp/include_dump_tables_file') @patch('gpcrondump.get_lines_from_file') def", "3190'] heap_partition_list = ['public.heapt1', 'public.heapt2'] cron._verify_tablenames(ao_partition_list, co_partition_list, heap_partition_list) #Should not", "options = GpCronDumpTestCase.Options() options.exclude_dump_schema = 'foo' options.exclude_schema_file = '/tmp/foo' with", "= False options.ddboost_verify = False options.ddboost_config_remove = False options.ddboost_user =", "True with self.assertRaisesRegexp(Exception, '-s option can not be selected with", "= ['foo2:%s/db_dumps/20130101/gp_cdatabase_1_1_20130101010101' % options.masterDataDirectory, 'foo2:%s/db_dumps/20130101/gp_dump_20130101010101_ao_state_file' % options.masterDataDirectory, 'foo2:%s/db_dumps/20130101/gp_dump_20130101010101_co_state_file' % options.masterDataDirectory,", "def test_options21(self, mock, mock2): options = GpCronDumpTestCase.Options() options.ddboost = True", "with --table-file option'): cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def", "3190'] co_partition_list = ['public, cot1, 2190', 'public, cot2\\nasd, 3190'] heap_partition_list", "options = GpCronDumpTestCase.Options() options.incremental = True options.dump_databases = ['bkdb'] #If", "mock2): options = GpCronDumpTestCase.Options() options.exclude_schema_file = 'foo' options.include_dump_tables = '/tmp/foo'", "mock1, mock2, mock3, mock4): options = GpCronDumpTestCase.Options() options.include_email_file = \"/tmp/abc.yaml\"", "mock2, mock3): options = GpCronDumpTestCase.Options() options.include_schema_file = '/tmp/foo' with self.assertRaisesRegexp(Exception,", "cot1, 2190', 'public, cot2, 3190'] heap_partition_list = ['public.heapt1', 'public.heapt2'] cron._verify_tablenames(ao_partition_list,", "as unittest from datetime import datetime from gppylib import gplog", "\"abc\" with self.assertRaisesRegexp(Exception, '--max-streams must be a number greater than", "False @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.get_latest_full_dump_timestamp', return_value='20121225090000') def test_options11(self, mock, mock2, mock3):", "backup files'): GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options27(self, mock, mock2):", "with self.assertRaisesRegexp(Exception, \"can not include catalog schema 'information_schema' in schema", "return_value=['public', 'information_schema']) def test_options_schema_filter_30(self, mock, mock2, mock3): options = GpCronDumpTestCase.Options()", "dump_dir, timestamp) expected_files_list = ['foo2:%s/db_dumps/20130101/gp_dump_1_1_20130101010101.gz' % options.masterDataDirectory, 'foo2:%s/db_dumps/20130101/gp_dump_1_1_20130101010101_post_data.gz' % options.masterDataDirectory]", "def test_verify_tablenames_00_bad(self, mock1, mock2, mock3): options = GpCronDumpTestCase.Options() cron =", "create=True): cron = GpCronDump(options, None) @patch('gpcrondump.MailDumpEvent') @patch('gpcrondump.GpCronDump._get_master_port') def test_send_email00(self, mock1,", "can not be selected with --exclude-schema-file option'): cron = GpCronDump(options,", "option'): cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_14(self, mock,", "= GpCronDumpTestCase.Options() options.masterDataDirectory = '/tmp/foobar' options.backup_dir = None gpcd =", "mock2): options = GpCronDumpTestCase.Options() options.output_options = ['--inserts'] options.incremental = True", "gpcd.get_schema_list_file(dbname) self.assertTrue(file.startswith('/tmp/schema_list')) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_33(self, mock1, mock2): options =", "['testdb'] self.bypass_disk_check = True self.backup_set = None self.dump_global = False", "20 with self.assertRaisesRegexp(Exception, '--replicate and --max-streams cannot be used without", "options.incremental = True with self.assertRaisesRegexp(Exception, '--inserts, --column-inserts, --oids cannot be", "@patch('gpcrondump.validate_current_timestamp') def test_options25(self, mock, mock2): options = GpCronDumpTestCase.Options() options.ddboost =", "mock, mock2): options = GpCronDumpTestCase.Options() options.dump_schema = 'foo' options.incremental =", "= False self.post_vacuum = False self.rollback = False self.compress =", "= GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_5(self, mock, mock2): options", "self.free_space_percent = None self.clear_dumps = False self.cleanup_date = None self.cleanup_total", "mock2): options = GpCronDumpTestCase.Options() options.exclude_dump_schema = 'foo' options.include_dump_tables_file = '/tmp/foo'", "None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options22(self, mock, mock2): options = GpCronDumpTestCase.Options()", "= ['public, cot1, 2190', 'public, cot2\\nasd, 3190'] heap_partition_list = ['public,", "options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/metro_gp_dump_20130101010101.rpt' % options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/metro_gp_dump_status_1_1_20130101010101' % options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/metro_gp_dump_20130101010101_filter' % options.masterDataDirectory]", "= '/tmp/foobar' gpcd = GpCronDump(options, None) dbname = 'foo' timestamp", "completed for Database 'testdb100'\"}]}) def test_validate_parse_email_File05(self, mock1, mock2, mock3, mock4):", "None options.masterDataDirectory = '/foo' gpcd = GpCronDump(options, None) master =", "@patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_oids_with_incremental(self, mock, mock2): options = GpCronDumpTestCase.Options() options.output_options", "def test_validate_parse_email_File00(self, mock1, mock2, mock3, mock4): options = GpCronDumpTestCase.Options() options.include_email_file", "time_start = '12:07:09' time_end = '12:08:18' cron = GpCronDump(options, None)", "can not be selected with -T option'): cron = GpCronDump(options,", "= True options.masterDataDirectory = '/data/foo' gpcd = GpCronDump(options, None) master", "= GpCronDumpTestCase.Options() options.include_schema_file = 'foo' options.exclude_dump_tables = '/tmp/foo' with self.assertRaisesRegexp(Exception,", "seg.getSegmentDataDirectory.return_value = '/bar' seg.getSegmentHostName.return_value = 'foo1' seg.getSegmentDbId.return_value = id +", "gpcd._get_pipes_file_list(master, mock_segs, dump_dir, timestamp) expected_files_list = ['foo2:%s/db_dumps/20130101/gp_dump_1_1_20130101010101.gz' % options.masterDataDirectory, 'foo2:%s/db_dumps/20130101/gp_dump_1_1_20130101010101_post_data.gz'", "@patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.get_include_schema_list_from_exclude_schema', return_value=['public']) def test_options_schema_filter_35(self, mock1, mock2, mock3): options =", "options.include_schema_file = '/tmp/foo' with self.assertRaisesRegexp(Exception, \"can not include catalog schema", "'foo1:%s/db_dumps/20130101/metro_gp_dump_20130101010101_co_state_file' % options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/metro_gp_dump_20130101010101_last_operation' % options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/metro_gp_dump_20130101010101.rpt' % options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/metro_gp_dump_status_1_1_20130101010101'", "test_send_email00(self, mock1, MailDumpEvent): options = GpCronDumpTestCase.Options() dump_database = 'testdb1' current_exit_status", "email details should be '.yaml' file.\" % options.include_email_file): cron =", "= \"/tmp/abc\" with self.assertRaisesRegexp(Exception, \"'%s' is not '.yaml' file. File", "--schema-file option'): cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_23(self,", "= None self.replicate = None self.max_streams = None self.netbackup_service_host =", "= GpCronDumpTestCase.Options() gpcd = GpCronDump(options, None) dbname = 'foo' schema_file", "GpCronDump(options, None) @patch('gpcrondump.os.path.isfile', return_value=True) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.os.path.getsize', return_value=111) def test_validate_parse_email_File02(self, mock1,", "'/tmp/foobar' options.backup_dir = None gpcd = GpCronDump(options, None) self.assertEquals(gpcd.getBackupDirectoryRoot(), '/tmp/foobar')", "'SUBJECT': \"backup completed for Database 'testdb100'\"}]}) def test_validate_parse_email_File00(self, mock1, mock2,", "None) @patch('gpcrondump.os.path.isfile', return_value=True) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.os.path.getsize', return_value=111) def test_validate_parse_email_File02(self, mock1, mock2,", "GpCronDumpTestCase.Options() options.dump_databases = 'bkdb,fulldb' options.incremental = False #If this is", "backup'): cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_column_inserts_with_incremental(self, mock,", "1, 1)) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_get_timestamp_object2(self, mock1, mock2): options =", "--schema-file option'): cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_22(self,", "@patch('gpcrondump.get_latest_full_dump_timestamp', return_value='20130101000000') @patch('gpcrondump.GpCronDump._get_master_port') def test_get_files_file_list_with_prefix(self, mock1, mock2, mock3): options =", "options.include_email_file = \"/tmp/abc.yaml\" with self.assertRaisesRegexp(Exception, \"'%s' file is empty.\" %", "= '20130101010101' options.incremental = True options.masterDataDirectory = '/data/foo' gpcd =", "None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_13(self, mock, mock2): options = GpCronDumpTestCase.Options()", "can not be selected with -S option'): cron = GpCronDump(options,", "= ['public, cot1, 2190', 'public, cot2, 3190'] heap_partition_list = ['public.heapt1',", "mock2): options = GpCronDumpTestCase.Options() options.include_dump_tables = 'foo' options.exclude_dump_tables = 'foo'", "= GpCronDump(options, None) dirtyfile = '/tmp/dirty' dbname = 'foo' (inc,", "= 'foo1' timestamp = '20130101010101' dump_dir = get_backup_directory(options.masterDataDirectory, options.backup_dir, gpcd.dump_dir,", "@patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.expand_partitions_and_populate_filter_file', return_value='/tmp/include_dump_tables_file') @patch('gpcrondump.get_lines_from_file', return_value=['public.t1', 'public.t2']) def test_get_include_exclude_for_dump_database01(self, mock1,", "@patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.expand_partitions_and_populate_filter_file', return_value='/tmp/exclude_dump_tables_file') @patch('gpcrondump.get_lines_from_file', return_value=['public.t1', 'public.t2']) def test_get_include_exclude_for_dump_database04(self, mock1, mock2,", "= GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_table_filter4(self, mock, mock2): options", "@patch('gpcrondump.os.path.getsize', return_value=0) def test_validate_parse_email_File03(self, mock1, mock2, mock3): options = GpCronDumpTestCase.Options()", "self.assertRaisesRegexp(Exception, \"can not specify catalog schema 'information_schema' using -s option\"):", "sorted(expected_files_list)) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_get_pipes_file_list1(self, mock1, mock2): options = GpCronDumpTestCase.Options()", "aot2, 3190'] co_partition_list = ['public, cot1, 2190', 'public, cot2\\nasd, 3190']", "seg.getSegmentHostName.return_value = 'foo1' seg.getSegmentDbId.return_value = id + 1 timestamp =", "with self.assertRaisesRegexp(Exception, '-T can not be selected with --table-file option'):", "= GpCronDump(options, None) partition_list = ['public, aot1, 2190', 'public, aot2:aot,", "[{'FROM': 'RRP_MPE2_DCA_1', 'DBNAME': 'testdb100', 'SUBJECT': \"backup completed for Database 'testdb100'\"}]})", "GpCronDumpTestCase.Options() gpcd = GpCronDump(options, None) dbname = 'foo' schema_file =", "with self.assertRaisesRegexp(Exception, '-s can not be selected with -S option'):", "self.assertRaisesRegexp(Exception, '--max-streams must be a number greater than zero'): cron", "return_value={'EMAIL_DETAILS': [{'FROM': 'RRP_MPE2_DCA_1', 'DBNAME': 'testdb100', 'SUBJECT': \"backup completed for Database", "mock2): options = GpCronDumpTestCase.Options() options.exclude_dump_tables = 'foo' options.include_dump_tables_file = 'foo'", "= GpCronDumpTestCase.Options() options.include_email_file = \"/tmp/abc.yaml\" m = mock.MagicMock() with self.assertRaisesRegexp(Exception,", "'/tmp/foobar' options.exclude_dump_tables = ['public.t4', 'public.t5', 'public.t6'] gpcd = GpCronDump(options, None)", "time_start, time_end) #------------------------------- Mainline -------------------------------- if __name__ == '__main__': unittest.main()", "mock, mock2): options = GpCronDumpTestCase.Options() options.dump_databases = ['bkdb'] options.timestamp_key =", "% options.include_email_file): with patch('__builtin__.open', m, create=True): cron = GpCronDump(options, None)", "and --exclude-table-file can not be selected with -S option'): cron", "= '20141016010101' file = gpcd.get_schema_list_file(dbname) self.assertTrue(file.startswith('/tmp/schema_list')) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_33(self,", "cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_table_filter5(self, mock, mock2):", "GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_6(self, mock, mock2): options =", "'foo' options.ddboost = False options.ddboost_verify = False options.ddboost_config_remove = False", "GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options21(self, mock, mock2): options =", "[Mock(), Mock()] for id, seg in enumerate(mock_segs): seg.getSegmentDataDirectory.return_value = '/bar'", "options = GpCronDumpTestCase.Options() options.exclude_schema_file = 'foo' options.exclude_dump_tables_file = '/tmp/foo' with", "True with self.assertRaisesRegexp(Exception, 'list backup files not supported with ddboost", "'/tmp/dirty' dbname = 'foo' (inc, exc) = gpcd.get_include_exclude_for_dump_database(dirtyfile, dbname) self.assertTrue(exc.startswith('/tmp/exclude_dump_tables_file'))", "None self.quiet = False self.verbose = False self.local_dump_prefix = ''", "options.exclude_dump_schema = 'foo' options.exclude_dump_tables_file = '/tmp/foo' with self.assertRaisesRegexp(Exception, '--table-file and", "with ddboost option'): GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options29(self, mock,", "mock2, mock3): options = GpCronDumpTestCase.Options() options.masterDataDirectory = '/tmp/foobar' options.incremental =", "= 'foo' options.exclude_dump_schema = 'foo' with self.assertRaisesRegexp(Exception, '-s can not", "None self.replicate = None self.max_streams = None self.netbackup_service_host = None", "'-t and -T can not be selected with --exclude-schema-file option'):", "@patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_column_inserts_with_incremental(self, mock, mock2): options = GpCronDumpTestCase.Options() options.output_options", "gpcd = GpCronDump(options, None) self.assertEquals(gpcd.getBackupDirectoryRoot(), '/tmp/foobar') @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options18(self,", "@patch('gpcrondump.GpCronDump._get_master_port') def test_validate_parse_email_File01(self, mock1, mock2): options = GpCronDumpTestCase.Options() options.include_email_file =", "None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_27(self, mock, mock2): options = GpCronDumpTestCase.Options()", "options.include_dump_tables = 'foo' options.include_dump_tables_file = 'foo' with self.assertRaisesRegexp(Exception, '-t can", "None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_28(self, mock, mock2): options = GpCronDumpTestCase.Options()", "not be selected with -T option'): cron = GpCronDump(options, None)", "options.masterDataDirectory = '/tmp/foobar' options.exclude_dump_tables_file = '/odir/exfile' gpcd = GpCronDump(options, None)", "mock2): options = GpCronDumpTestCase.Options() options.timestamp_key = '20130101010101' gpcd = GpCronDump(options,", "'foo1:/bar/db_dumps/20130101/gp_dump_0_2_20130101010101.gz'] self.assertEqual(sorted(pipes_file_list), sorted(expected_files_list)) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_get_pipes_file_list3(self, mock1, mock2): options", "@patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_4(self, mock, mock2): options = GpCronDumpTestCase.Options() options.dump_schema =", "datetime(2013, 1, 1, 1, 1, 1)) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_get_timestamp_object2(self,", "timestamp) pipes_file_list = gpcd._get_pipes_file_list(master, mock_segs, dump_dir, timestamp) expected_files_list = ['foo2:%s/db_dumps/20130101/gp_dump_1_1_20130101010101.gz'", "GpCronDumpTestCase.Options() options.include_email_file = \"/tmp/abc.yaml\" with self.assertRaisesRegexp(Exception, \"'%s' file is empty.\"", "GpCronDump(options, None) self.assertEqual(gpcd.dump_prefix, 'foo_') @patch('gpcrondump.os.path.isfile', return_value=True) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.os.path.getsize', return_value=111) @patch('gpcrondump.yaml.load',", "mock2): options = GpCronDumpTestCase.Options() options.exclude_schema_file = 'foo' options.exclude_dump_tables = '/tmp/foo'", "self.assertRaisesRegexp(Exception, '-t and -T can not be selected with --schema-file", "@patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_20(self, mock, mock2): options = GpCronDumpTestCase.Options() options.exclude_schema_file =", "test_options_schema_filter_32(self, mock1, mock2): options = GpCronDumpTestCase.Options() options.dump_schema = ['public'] gpcd", "= GpCronDumpTestCase.Options() options.exclude_schema_file = '/tmp/foo' with self.assertRaisesRegexp(Exception, \"can not exclude", "@patch('gpcrondump.validate_current_timestamp') def test_options10(self, mock, mock2): options = GpCronDumpTestCase.Options() options.local_dump_prefix =", "mock.MagicMock() with self.assertRaisesRegexp(Exception, \"\\'%s\\' file is not formatted properly.\" %", "is not '.yaml' file. File containing email details should be", "@patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.expand_partitions_and_populate_filter_file', return_value='/tmp/exclude_dump_tables_file') @patch('gpcrondump.get_lines_from_file') def test_get_include_exclude_for_dump_database06(self, mock1, mock2, mock3,", "= True with self.assertRaisesRegexp(Exception, '-s option can not be selected", "be selected with -S option'): cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port')", "exc) = gpcd.get_include_exclude_for_dump_database(dirtyfile, dbname) self.assertTrue(exc.startswith('/tmp/exclude_dump_tables_file')) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.expand_partitions_and_populate_filter_file', return_value='/tmp/exclude_dump_tables_file') @patch('gpcrondump.get_lines_from_file')", "option'): cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_table_filter3(self, mock,", "test_get_files_file_list_with_filter(self, mock1, mock2, mock3): options = GpCronDumpTestCase.Options() options.timestamp_key = '20130101010101'", "@patch('gpcrondump.validate_current_timestamp') def test_options12(self, mock, mock2): options = GpCronDumpTestCase.Options() options.incremental =", "GpCronDumpTestCase.Options() cron = GpCronDump(options, None) ao_partition_list = ['public, aot1, 2190',", "@patch('gpcrondump.validate_current_timestamp') def test_options_table_filter2(self, mock, mock2): options = GpCronDumpTestCase.Options() options.include_dump_tables =", "'foo1:/bar/db_dumps/20130101/gp_segment_config_files_0_1_20130101010101.tar', 'foo1:/bar/db_dumps/20130101/gp_segment_config_files_0_2_20130101010101.tar', 'foo1:/bar/db_dumps/20130101/gp_dump_0_1_20130101010101.gz', 'foo1:/bar/db_dumps/20130101/gp_dump_0_2_20130101010101.gz'] self.assertEqual(sorted(pipes_file_list), sorted(expected_files_list)) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_gpcrondump_init0(self,", "options = GpCronDumpTestCase.Options() options.timestamp_key = '20130101010101' gpcd = GpCronDump(options, None)", "gppylib import gplog from gpcrondump import GpCronDump from gppylib.operations.utils import", "= gpcd.get_include_exclude_for_dump_database(dirtyfile, dbname) self.assertEquals(inc, None) self.assertEquals(exc, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.expand_partitions_and_populate_filter_file',", "option'): cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_19(self, mock,", "= GpCronDumpTestCase.Options() options.exclude_dump_tables = 'foo' options.incremental = True with self.assertRaisesRegexp(Exception,", "= 0 with self.assertRaisesRegexp(Exception, '--max-streams must be a number greater", "@patch('gpcrondump.validate_current_timestamp') def test_get_pipes_file_list1(self, mock1, mock2): options = GpCronDumpTestCase.Options() options.timestamp_key =", "GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_27(self, mock, mock2): options =", "with self.assertRaisesRegexp(Exception, 'Must supply -K option when listing backup files'):", "= None @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.GpCronDump.validate_dump_schema') @patch('gpcrondump.validate_current_timestamp') def test_option_schema_filter_1(self, mock, mock2, mock3):", "@patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.GpCronDump.validate_dump_schema') @patch('gpcrondump.validate_current_timestamp') def test_option_schema_filter_1(self, mock, mock2, mock3): options =", "def test_options_schema_filter_25(self, mock, mock2): options = GpCronDumpTestCase.Options() options.exclude_dump_schema = 'foo'", "options.exclude_dump_tables_file = 'foo' with self.assertRaisesRegexp(Exception, '-t can not be selected", "False #If this is successful then it should not raise", "selected with incremental backup'): cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.GpCronDump.validate_dump_schema')", "True options.netbackup_service_host = \"mdw\" options.netbackup_policy = \"test_policy\" options.netbackup_schedule = \"test_schedule\"", "options = GpCronDumpTestCase.Options() options.dump_schema = 'foo' options.include_dump_tables = '/tmp/foo' with", "schema file '/tmp/foo'\"): GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_31(self, mock,", "mock2): options = GpCronDumpTestCase.Options() options.dump_schema = 'foo' options.exclude_dump_schema = 'foo'", "'RRP_MPE2_DCA_1', 'DBNAME': 'testdb100', 'SUBJECT': \"backup completed for Database 'testdb100'\"}]}) def", "cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_8(self, mock, mock2):", "None self.netbackup_block_size = None self.netbackup_keyword = None @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.GpCronDump.validate_dump_schema') @patch('gpcrondump.validate_current_timestamp')", "catalog schema 'information_schema' using -s option\"): GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp')", "options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/metro_gp_dump_status_1_1_20130101010101' % options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/metro_gp_dump_20130101000000_increments' % options.masterDataDirectory] self.assertEqual(sorted(files_file_list), sorted(expected_files_list)) @patch('gpcrondump.GpCronDump._get_master_port')", "options.incremental = True with self.assertRaisesRegexp(Exception, 'exclude table list can not", "files_file_list = gpcd._get_files_file_list(master, dump_dir, timestamp) expected_files_list = ['foo1:%s/db_dumps/20130101/metro_gp_cdatabase_1_1_20130101010101' % options.masterDataDirectory,", "GpCronDumpTestCase.Options() options.ddboost = True options.replicate = True options.max_streams = \"abc\"", "= ['--column-inserts'] options.incremental = True with self.assertRaisesRegexp(Exception, '--inserts, --column-inserts, --oids", "None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_4(self, mock, mock2): options = GpCronDumpTestCase.Options()", "-s option'): cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_25(self,", "options.masterDataDirectory] self.assertEqual(sorted(files_file_list), sorted(expected_files_list)) @patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.get_latest_full_dump_timestamp', return_value='20130101000000') @patch('gpcrondump.GpCronDump._get_master_port') def test_get_files_file_list_with_prefix(self, mock1,", "write_lines_to_file import mock logger = gplog.get_unittest_logger() class GpCronDumpTestCase(unittest.TestCase): class Options:", "dump_dir = get_backup_directory(options.masterDataDirectory, None, gpcd.dump_dir, timestamp) files_file_list = gpcd._get_files_file_list(master, dump_dir,", "options = GpCronDumpTestCase.Options() options.ddboost = True options.replicate = False options.max_streams", "GpCronDumpTestCase.Options() options.timestamp_key = None options.masterDataDirectory = '/foo' options.dump_config = True", "@patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_33(self, mock1, mock2): options = GpCronDumpTestCase.Options() options.include_schema_file =", "mock3): options = GpCronDumpTestCase.Options() options.exclude_schema_file = '/tmp/foo' with self.assertRaisesRegexp(Exception, \"can", "exc) = gpcd.get_include_exclude_for_dump_database(dirtyfile, dbname) self.assertEquals(inc, None) self.assertEquals(exc, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp')", "option'): cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_table_filter2(self, mock,", "'/tmp/foo' options.incremental = True with self.assertRaisesRegexp(Exception, '--exclude-schema-file option can not", "GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options16(self, mock, mock2): options =", "@patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.get_lines_from_file', return_value=['public', 'information_schema']) def test_options_schema_filter_29(self, mock, mock2, mock3):", "dump_dir = get_backup_directory(options.masterDataDirectory, options.backup_dir, gpcd.dump_dir, timestamp) files_file_list = gpcd._get_files_file_list(master, dump_dir,", "options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/gp_dump_status_1_1_20130101010101' % options.masterDataDirectory] self.assertEqual(files_file_list, expected_files_list) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_get_files_file_list2(self,", "GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_oids_with_incremental(self, mock, mock2): options =", "test_options4(self, mock, mock2): options = GpCronDumpTestCase.Options() options.exclude_dump_tables_file = 'foo' options.incremental", "def test_options_schema_filter_4(self, mock, mock2): options = GpCronDumpTestCase.Options() options.dump_schema = 'foo'", "mock, mock2): options = GpCronDumpTestCase.Options() options.exclude_dump_schema = 'foo' options.incremental =", "options.exclude_schema_file = '/tmp/foo' with self.assertRaisesRegexp(Exception, '-S can not be selected", "= ['bkdb'] options.timestamp_key = True options.ddboost = True options.list_backup_files =", "= GpCronDumpTestCase.Options() options.timestamp_key = '20130101010101' gpcd = GpCronDump(options, None) timestamp", "= '/tmp/foobar' gpcd = GpCronDump(options, None) dirtyfile = '/tmp/dirty' dbname", "= GpCronDumpTestCase.Options() options.dump_schema = ['information_schema'] with self.assertRaisesRegexp(Exception, \"can not specify", "= GpCronDumpTestCase.Options() options.masterDataDirectory = '/tmp/foobar' gpcd = GpCronDump(options, None) dbname", "options = GpCronDumpTestCase.Options() options.include_schema_file = 'foo' options.exclude_dump_tables = '/tmp/foo' with", "= '/tmp/dirty' dbname = 'foo' (inc, exc) = gpcd.get_include_exclude_for_dump_database(dirtyfile, dbname)", "True options.replicate = True options.max_streams = \"abc\" with self.assertRaisesRegexp(Exception, '--max-streams", "'20130101010101' gpcd = GpCronDump(options, None) timestamp = gpcd._get_timestamp_object(options.timestamp_key) self.assertEquals(timestamp, datetime(2013,", "options = GpCronDumpTestCase.Options() options.dump_schema = 'foo' options.exclude_schema_file = '/tmp/foo' with", "'--schema-file option can not be selected with incremental backup'): cron", "with self.assertRaisesRegexp(Exception, \"'%s' file is empty.\" % options.include_email_file): cron =", "= 'testdb1' current_exit_status = 0 time_start = '12:07:09' time_end =", "@patch('gpcrondump.validate_current_timestamp') def test_get_table_names_from_partition_list_00(self, mock1, mock2): options = GpCronDumpTestCase.Options() cron =", "'foo1:%s/db_dumps/20130101/metro_gp_dump_20130101010101_last_operation' % options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/metro_gp_dump_20130101010101.rpt' % options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/metro_gp_dump_status_1_1_20130101010101' % options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/metro_gp_dump_20130101000000_increments'", "GpCronDumpTestCase.Options() options.include_dump_tables = 'foo' options.incremental = True with self.assertRaisesRegexp(Exception, 'include", "test_gpcrondump_init0(self, mock1, mock2): options = GpCronDumpTestCase.Options() options.timestamp_key = None options.local_dump_prefix", "import imp gpcrondump_path = os.path.abspath('gpcrondump') gpcrondump = imp.load_source('gpcrondump', gpcrondump_path) import", "@patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_17(self, mock, mock2): options = GpCronDumpTestCase.Options() options.exclude_dump_schema =", "def test_options_table_filter5(self, mock, mock2): options = GpCronDumpTestCase.Options() options.include_dump_tables = 'foo'", "backup'): cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_get_table_names_from_partition_list_00(self, mock1,", "cot2\\nasd, 3190'] heap_partition_list = ['public, heapt1, 2190', 'public, heapt2!asdasd ,", "requires --prefix and --incremental'): cron = GpCronDump(options, None) finally: options.list_filter_tables", "import os import imp gpcrondump_path = os.path.abspath('gpcrondump') gpcrondump = imp.load_source('gpcrondump',", "GpCronDump(options, None) timestamp = gpcd._get_timestamp_object(options.timestamp_key) self.assertEquals(timestamp, datetime(2013, 1, 1, 1,", "@patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_22(self, mock, mock2): options = GpCronDumpTestCase.Options() options.include_schema_file =", "mock2): options = GpCronDumpTestCase.Options() options.timestamp_key = None options.dump_global = True", "None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_21(self, mock, mock2): options = GpCronDumpTestCase.Options()", "2190', 'public, aot2,aot, 3190'] with self.assertRaisesRegexp(Exception, 'Invalid partition entry \"public,", "None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options25(self, mock, mock2): options = GpCronDumpTestCase.Options()", "3190'] with self.assertRaisesRegexp(Exception, ''): cron._verify_tablenames(ao_partition_list, co_partition_list, heap_partition_list) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def", "self.netbackup_keyword = None @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.GpCronDump.validate_dump_schema') @patch('gpcrondump.validate_current_timestamp') def test_option_schema_filter_1(self, mock, mock2,", "@patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options2(self, mock, mock2): options = GpCronDumpTestCase.Options() options.exclude_dump_tables", "self.assertEqual(pipes_file_list, expected_files_list) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_get_pipes_file_list2(self, mock1, mock2): options =", "and --exclude-table-file can not be selected with -s option'): cron", "= False options.ddboost_config_remove = False options.ddboost_user = False options.ddboost_host =", "options.local_dump_prefix = 'foo' options.incremental = False options.list_filter_tables = True try:", "cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_12(self, mock, mock2):", "mock, mock2, mock3): options = GpCronDumpTestCase.Options() options.incremental = True options.dump_databases", "sorted(expected_files_list)) @patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.get_latest_full_dump_timestamp', return_value='20130101000000') @patch('gpcrondump.GpCronDump._get_master_port') def test_get_files_file_list_with_prefix(self, mock1, mock2, mock3):", "pipes_file_list = gpcd._get_pipes_file_list(master, mock_segs, dump_dir, timestamp) expected_files_list = ['foo1:%s/db_dumps/20130101/gp_dump_1_1_20130101010101.gz' %", "= '/tmp/foo' with self.assertRaisesRegexp(Exception, '-t and -T can not be", "'testdb100'\"}]}) def test_validate_parse_email_File00(self, mock1, mock2, mock3, mock4): options = GpCronDumpTestCase.Options()", "['--column-inserts'] options.incremental = True with self.assertRaisesRegexp(Exception, '--inserts, --column-inserts, --oids cannot", "'foo1:/bar/db_dumps/20130101/gp_dump_0_2_20130101010101.gz'] self.assertEqual(sorted(pipes_file_list), sorted(expected_files_list)) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_get_pipes_file_list4(self, mock1, mock2): options", "3190\"'): cron._get_table_names_from_partition_list(partition_list) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_table_filter1(self, mock, mock2): options =", "with self.assertRaisesRegexp(Exception, '-s can not be selected with --exclude-schema-file option'):", "test_options21(self, mock, mock2): options = GpCronDumpTestCase.Options() options.ddboost = True options.replicate", "options = GpCronDumpTestCase.Options() options.timestamp_key = None gpcd = GpCronDump(options, None)", "incremental backup'): cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options19(self,", "% options.masterDataDirectory, 'foo1:/bar/db_dumps/20130101/gp_dump_0_1_20130101010101.gz', 'foo1:/bar/db_dumps/20130101/gp_dump_0_2_20130101010101.gz'] self.assertEqual(sorted(pipes_file_list), sorted(expected_files_list)) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_get_pipes_file_list3(self,", "def test_get_timestamp_object2(self, mock1, mock2): options = GpCronDumpTestCase.Options() options.timestamp_key = '20130101010'", "self.ddboost_hosts = None self.ddboost_user = None self.ddboost_config_remove = False self.ddboost_verify", "'Must supply -x <database name> with incremental option'): cron =", "side_effect = [['public.aot1', 'public.aot2'], ['public.cot1', 'public.cot2']]) def test_verify_tablenames_00(self, mock1, mock2,", "@patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.get_latest_full_dump_timestamp', return_value='20121225090000') def test_options11(self, mock, mock2, mock3): options =", "GpCronDumpTestCase.Options() options.include_schema_file = 'foo' options.include_dump_tables_file = '/tmp/foo' with self.assertRaisesRegexp(Exception, '--table-file", "= 'foo' options.exclude_dump_tables_file = 'foo' with self.assertRaisesRegexp(Exception, '--table-file can not", "% options.masterDataDirectory] self.assertEqual(files_file_list, expected_files_list) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.get_latest_full_dump_timestamp', return_value='20130101000000') def test_get_files_file_list3(self,", "@patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.os.path.getsize', return_value=111) @patch('gpcrondump.yaml.load', return_value={'EMAIL_DETAILS': [{'FROM': 'RRP_MPE2_DCA_1', 'NAME': 'testdb100', 'SUBJECT':", "= \"/tmp/abc.yaml\" with self.assertRaisesRegexp(Exception, \"\\'%s\\' file does not exist.\" %", "can not be selected with --exclude-table-file option'): cron = GpCronDump(options,", "mock, mock2): options = GpCronDumpTestCase.Options() options.masterDataDirectory = '/tmp/foobar' options.backup_dir =", "= None options.local_dump_prefix = 'foo' options.ddboost = False options.ddboost_verify =", "(inc, exc) = gpcd.get_include_exclude_for_dump_database(dirtyfile, dbname) self.assertEquals(inc, '/tmp/dirty') self.assertEquals(exc, None) @patch('gpcrondump.GpCronDump._get_master_port')", "None self.report_dir = None self.timestamp_key = None self.list_backup_files = None", "self.assertRaisesRegexp(Exception, '-t and -T can not be selected with -S", "GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_24(self, mock, mock2): options =", "GpCronDump(options, None) self.assertEquals(gpcd.getBackupDirectoryRoot(), '/tmp/foobar') @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options18(self, mock, mock2):", "self.encoding = None self.output_options = None self.report_dir = None self.timestamp_key", "GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_21(self, mock, mock2): options =", "= GpCronDumpTestCase.Options() options.masterDataDirectory = '/tmp/foobar' options.incremental = True gpcd =", "options.masterDataDirectory = '/tmp/foobar' options.include_dump_tables_file = '/mydir/incfile' gpcd = GpCronDump(options, None)", "= '/bar' seg.getSegmentHostName.return_value = 'foo1' seg.getSegmentDbId.return_value = id + 1", "@patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options22(self, mock, mock2): options = GpCronDumpTestCase.Options() options.ddboost", "self.assertRaisesRegexp(Exception, \"'%s' is not '.yaml' file. File containing email details", "= gpcd.generate_include_table_list_from_schema_file(dbname, schema_file) self.assertTrue(inc.startswith('/tmp/include_dump_tables_file')) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options1(self, mock, mock2):", "options = GpCronDumpTestCase.Options() options.exclude_dump_schema = 'public' gpcd = GpCronDump(options, None)", "= GpCronDump(options, None) timestamp = gpcd._get_timestamp_object(options.timestamp_key) self.assertEquals(timestamp, datetime(2013, 1, 1,", "options = GpCronDumpTestCase.Options() options.exclude_schema_file = '/tmp/foo' with self.assertRaisesRegexp(Exception, \"can not", "mock2, mock3, mock4): options = GpCronDumpTestCase.Options() gpcd = GpCronDump(options, None)", "options = GpCronDumpTestCase.Options() options.dump_schema = ['public'] gpcd = GpCronDump(options, None)", "option'): cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_16(self, mock,", "'foo' (inc, exc) = gpcd.get_include_exclude_for_dump_database(dirtyfile, dbname) self.assertTrue(inc.startswith('/tmp/include_dump_tables_file')) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.get_latest_full_dump_timestamp',", "--exclude-schema-file option'): cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_21(self,", "mock, mock2): options = GpCronDumpTestCase.Options() options.exclude_dump_schema = 'foo' options.exclude_schema_file =", "= '12:08:18' cron = GpCronDump(options, None) cron._send_email(dump_database, current_exit_status, time_start, time_end)", "@patch('gpcrondump.validate_current_timestamp') def test_options_table_filter4(self, mock, mock2): options = GpCronDumpTestCase.Options() options.exclude_dump_tables =", "GpCronDump(options, None) finally: options.list_filter_tables = False @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.get_latest_full_dump_timestamp', return_value='20121225090000')", "def test_options_schema_filter_17(self, mock, mock2): options = GpCronDumpTestCase.Options() options.exclude_dump_schema = 'foo'", "= GpCronDump(options, None) ao_partition_list = ['public, aot1!asd, 2190', 'public, aot2,", "= 'foo' (inc, exc) = gpcd.get_include_exclude_for_dump_database(dirtyfile, dbname) self.assertEquals(inc, '/tmp/dirty') self.assertEquals(exc,", "create=True): cron = GpCronDump(options, None) @patch('gpcrondump.os.path.isfile', return_value=True) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.os.path.getsize', return_value=111)", "@patch('gpcrondump.expand_partitions_and_populate_filter_file', return_value='/tmp/include_dump_tables_file') @patch('gpcrondump.get_lines_from_file') def test_get_include_exclude_for_dump_database02(self, mock1, mock2, mock3, mock4): options", "gpcd._get_files_file_list(master, dump_dir, timestamp) expected_files_list = ['foo1:%s/db_dumps/20130101/metro_gp_cdatabase_1_1_20130101010101' % options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/metro_gp_dump_20130101010101_ao_state_file' %", "= GpCronDumpTestCase.Options() options.exclude_dump_schema = 'foo' options.incremental = True with self.assertRaisesRegexp(Exception,", "<database name> with incremental option'): cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port')", "None) ao_partition_list = ['public, aot1!asd, 2190', 'public, aot2, 3190'] co_partition_list", "cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_26(self, mock, mock2):", "None) dirtyfile = '/tmp/dirty' dbname = 'foo' (inc, exc) =", "def test_options_schema_filter_27(self, mock, mock2): options = GpCronDumpTestCase.Options() options.dump_schema = ['information_schema']", "options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/gp_dump_status_1_1_20130101010101' % options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/gp_dump_20130101000000_increments' % options.masterDataDirectory] self.assertEqual(sorted(files_file_list), sorted(expected_files_list)) @patch('gpcrondump.validate_current_timestamp')", "not be selected with -s option'): cron = GpCronDump(options, None)", "GpCronDumpTestCase.Options() options.include_dump_tables_file = 'foo' options.incremental = True with self.assertRaisesRegexp(Exception, 'include", "timestamp) expected_files_list = ['foo2:%s/db_dumps/20130101/gp_dump_1_1_20130101010101.gz' % options.masterDataDirectory, 'foo2:%s/db_dumps/20130101/gp_dump_1_1_20130101010101_post_data.gz' % options.masterDataDirectory] self.assertEqual(pipes_file_list,", "cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options24(self, mock, mock2):", "= 'bkdb,fulldb' options.incremental = False #If this is successful then", "gpcd.dump_dir, timestamp) files_file_list = gpcd._get_files_file_list(master, dump_dir, timestamp) expected_files_list = ['foo1:%s/db_dumps/20130101/metro_gp_cdatabase_1_1_20130101010101'", "options.dump_schema = 'foo' options.incremental = True with self.assertRaisesRegexp(Exception, '-s option", "options.max_streams = None options.list_backup_files = False gpcd = GpCronDump(options, None)", "def test_get_include_exclude_for_dump_database02(self, mock1, mock2, mock3, mock4): options = GpCronDumpTestCase.Options() options.masterDataDirectory", "unittest2 as unittest from datetime import datetime from gppylib import", "options = GpCronDumpTestCase.Options() options.output_options = ['--inserts'] options.incremental = True with", "gplog.get_unittest_logger() class GpCronDumpTestCase(unittest.TestCase): class Options: def __init__(self): self.masterDataDirectory = \"\"", "False gpcd = GpCronDump(options, None) self.assertEqual(gpcd.dump_prefix, 'foo_') @patch('gpcrondump.os.path.isfile', return_value=True) @patch('gpcrondump.GpCronDump._get_master_port')", "= GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_19(self, mock, mock2): options", "= None self.dump_config = False self.history = False self.pre_vacuum =", "mock2): options = GpCronDumpTestCase.Options() options.include_dump_tables_file = 'foo' options.exclude_dump_tables_file = 'foo'", "'/foo1') @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options17(self, mock, mock2): options = GpCronDumpTestCase.Options()", "GpCronDumpTestCase.Options() options.output_options = ['--oids'] options.incremental = True with self.assertRaisesRegexp(Exception, '--inserts,", "GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_7(self, mock, mock2): options =", "@patch('gpcrondump.MailDumpEvent') @patch('gpcrondump.GpCronDump._get_master_port') def test_send_email00(self, mock1, MailDumpEvent): options = GpCronDumpTestCase.Options() dump_database", "'20141016010101' file = gpcd.get_schema_list_file(dbname) self.assertEquals(file, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_32(self,", "options.exclude_dump_tables_file = '/tmp/foo' with self.assertRaisesRegexp(Exception, '--table-file and --exclude-table-file can not", "self.assertEquals(timestamp, datetime(2013, 1, 1, 1, 1, 1)) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def", "option'): cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_22(self, mock,", "= GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_3(self, mock, mock2): options", "= 'bkdb,fulldb' with self.assertRaisesRegexp(Exception, 'multi-database backup is not supported with", "= GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options21(self, mock, mock2): options", "options.dump_databases = ['bkdb'] #If this is successful then it should", "options = GpCronDumpTestCase.Options() options.dump_databases = 'bkdb' options.incremental = False #If", "def test_options_column_inserts_with_incremental(self, mock, mock2): options = GpCronDumpTestCase.Options() options.output_options = ['--column-inserts']", "= ['public, heapt1, 2190', 'public, heapt2!asdasd , 3190'] with self.assertRaisesRegexp(Exception,", "True try: with self.assertRaisesRegexp(Exception, 'list filter tables option requires --prefix", "= GpCronDumpTestCase.Options() dump_database = 'testdb1' current_exit_status = 0 time_start =", "dump_dir, timestamp) expected_files_list = ['foo1:%s/db_dumps/20130101/metro_gp_cdatabase_1_1_20130101010101' % options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/metro_gp_dump_20130101010101_ao_state_file' % options.masterDataDirectory,", "GpCronDumpTestCase.Options() options.timestamp_key = '20130101010101' options.incremental = True options.local_dump_prefix = 'metro'", "@patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_19(self, mock, mock2): options = GpCronDumpTestCase.Options() options.exclude_schema_file", "\"can not specify catalog schema 'information_schema' using -S option\"): GpCronDump(options,", "options.replicate = True options.max_streams = None with self.assertRaisesRegexp(Exception, '--max-streams must", "GpCronDumpTestCase.Options() options.dump_schema = 'foo' options.exclude_dump_tables_file = '/tmp/foo' with self.assertRaisesRegexp(Exception, '--table-file", "self.incremental = False self.ddboost = False self.ddboost_hosts = None self.ddboost_user", "None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.expand_partitions_and_populate_filter_file', return_value='/tmp/include_dump_tables_file') @patch('gpcrondump.get_lines_from_file', return_value=['public.t1', 'public.t2']) def test_get_include_exclude_for_dump_database01(self,", "GpCronDump(options, None) timestamp = gpcd._get_timestamp_object(options.timestamp_key) self.assertTrue(isinstance(timestamp, datetime)) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def", "@patch('gpcrondump.validate_current_timestamp') def test_get_files_file_list1(self, mock1, mock2): options = GpCronDumpTestCase.Options() options.timestamp_key =", "mock4): options = GpCronDumpTestCase.Options() options.masterDataDirectory = '/tmp/foobar' options.include_dump_tables = ['public.t1',", "mock3): options = GpCronDumpTestCase.Options() options.timestamp_key = '20130101010101' options.incremental = True", "gpcrondump = imp.load_source('gpcrondump', gpcrondump_path) import unittest2 as unittest from datetime", "def test_options_schema_filter_29(self, mock, mock2, mock3): options = GpCronDumpTestCase.Options() options.exclude_schema_file =", "options = GpCronDumpTestCase.Options() options.incremental = True cron = GpCronDump(options, None)", "is not formatted properly.\" % options.include_email_file): with patch('__builtin__.open', m, create=True):", "GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options24(self, mock, mock2): options =", "= GpCronDumpTestCase.Options() options.clear_dumps = True options.incremental = True with self.assertRaisesRegexp(Exception,", "= [Mock(), Mock()] for id, seg in enumerate(mock_segs): seg.getSegmentDataDirectory.return_value =", "'public.aot2'], ['public.cot1', 'public.cot2:asd']]) def test_verify_tablenames_00_bad(self, mock1, mock2, mock3): options =", "= [] timestamp = '20130101010101' dump_dir = get_backup_directory(options.masterDataDirectory, options.backup_dir, gpcd.dump_dir,", "= GpCronDump(options, None) master = Mock() master.getSegmentHostName.return_value = 'foo1' mock_segs", "options.include_schema_file = '/tmp/foo' with self.assertRaisesRegexp(Exception, '-s can not be selected", "'Invalid partition entry \"public, aot2,aot, 3190\"'): cron._get_table_names_from_partition_list(partition_list) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def", "= False options.max_streams = 20 with self.assertRaisesRegexp(Exception, '--replicate and --max-streams", "options.output_options = ['--column-inserts'] options.incremental = True with self.assertRaisesRegexp(Exception, '--inserts, --column-inserts,", "Mock() master.getSegmentHostName.return_value = 'foo1' timestamp = '20130101010101' dump_dir = get_backup_directory(options.masterDataDirectory,", "test_options1(self, mock, mock2): options = GpCronDumpTestCase.Options() options.include_dump_tables = 'foo' options.incremental", "= None self.ddboost_ping = None self.ddboost_backupdir = None self.replicate =", "= 'foo2' mock_segs = [] timestamp = '20130101010101' dump_dir =", "GpCronDumpTestCase.Options() options.dump_schema = 'foo' options.exclude_dump_schema = 'foo' with self.assertRaisesRegexp(Exception, '-s", "% options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/metro_gp_dump_20130101010101.rpt' % options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/metro_gp_dump_status_1_1_20130101010101' % options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/metro_gp_dump_20130101010101_filter' %", "options.include_schema_file = '/tmp/foo' with self.assertRaisesRegexp(Exception, '-S can not be selected", "backup is not supported with -K option'): GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port')", "be '.yaml' file.\" % options.include_email_file): cron = GpCronDump(options, None) @patch('gpcrondump.os.path.isfile',", "def test_options1(self, mock, mock2): options = GpCronDumpTestCase.Options() options.include_dump_tables = 'foo'", "True options.max_streams = None with self.assertRaisesRegexp(Exception, '--max-streams must be specified", "GpCronDumpTestCase.Options() options.exclude_dump_schema = 'foo' options.exclude_dump_tables = '/tmp/foo' with self.assertRaisesRegexp(Exception, '-t", "mock2): options = GpCronDumpTestCase.Options() options.ddboost = True options.replicate = True", "mock2): options = GpCronDumpTestCase.Options() options.include_schema_file = 'foo' options.include_dump_tables = '/tmp/foo'", "'/tmp/foo' with self.assertRaisesRegexp(Exception, '-S can not be selected with --exclude-schema-file", "option'): cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_18(self, mock,", "False self.ddboost = False self.ddboost_hosts = None self.ddboost_user = None", "mock2, mock3): options = GpCronDumpTestCase.Options() options.exclude_schema_file = '/tmp/foo' with self.assertRaisesRegexp(Exception,", "None) @patch('gpcrondump.MailDumpEvent') @patch('gpcrondump.GpCronDump._get_master_port') def test_send_email00(self, mock1, MailDumpEvent): options = GpCronDumpTestCase.Options()", "@patch('gpcrondump.validate_current_timestamp') def test_options_table_filter1(self, mock, mock2): options = GpCronDumpTestCase.Options() options.include_dump_tables =", "@patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_get_pipes_file_list2(self, mock1, mock2): options = GpCronDumpTestCase.Options() options.timestamp_key", "= \"test_policy\" options.netbackup_schedule = \"test_schedule\" with self.assertRaisesRegexp(Exception, '--ddboost is not", "@patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_10(self, mock, mock2): options = GpCronDumpTestCase.Options() options.exclude_schema_file", "= gpcd._get_pipes_file_list(master, mock_segs, dump_dir, timestamp) expected_files_list = ['foo1:%s/db_dumps/20130101/gp_dump_1_1_20130101010101.gz' % options.masterDataDirectory,", "with -s option'): cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def", "imp.load_source('gpcrondump', gpcrondump_path) import unittest2 as unittest from datetime import datetime", "None) @patch('gpcrondump.os.path.isfile', return_value=False) @patch('gpcrondump.GpCronDump._get_master_port') def test_validate_parse_email_File01(self, mock1, mock2): options =", "= False self.batch_default = DEFAULT_NUM_WORKERS self.include_dump_tables = None self.exclude_dump_tables =", "self.ddboost_ping = None self.ddboost_backupdir = None self.replicate = None self.max_streams", "'foo1:%s/db_dumps/20130101/gp_dump_20130101010101_last_operation' % options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/gp_dump_20130101010101.rpt' % options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/gp_dump_status_1_1_20130101010101' % options.masterDataDirectory] self.assertEqual(files_file_list,", "gpcd.get_include_exclude_for_dump_database(dirtyfile, dbname) self.assertEquals(inc, '/tmp/dirty') self.assertEquals(exc, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.expand_partitions_and_populate_filter_file', return_value='/tmp/exclude_dump_tables_file')", "MailDumpEvent from gppylib.operations.backup_utils import get_backup_directory, write_lines_to_file import mock logger =", "@patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_18(self, mock, mock2): options = GpCronDumpTestCase.Options() options.exclude_dump_schema", "-S option'): cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_19(self,", "not exclude catalog schema 'information_schema' in schema file '/tmp/foo'\"): GpCronDump(options,", "= GpCronDumpTestCase.Options() options.list_backup_files = True options.timestamp_key = None with self.assertRaisesRegexp(Exception,", "mock, mock2): options = GpCronDumpTestCase.Options() options.exclude_dump_schema = 'foo' options.include_dump_tables_file =", "cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_3(self, mock, mock2):", "GpCronDumpTestCase.Options() options.exclude_dump_schema = 'public' gpcd = GpCronDump(options, None) dbname =", "'public.t2', 'public.t3'] gpcd = GpCronDump(options, None) dirtyfile = '/tmp/dirty' dbname", "def test_options_schema_filter_8(self, mock, mock2): options = GpCronDumpTestCase.Options() options.exclude_dump_schema = 'foo'", "test_options_table_filter3(self, mock, mock2): options = GpCronDumpTestCase.Options() options.exclude_dump_tables = 'foo' options.exclude_dump_tables_file", "@patch('gpcrondump.validate_current_timestamp') def test_option_schema_filter_1(self, mock, mock2, mock3): options = GpCronDumpTestCase.Options() options.include_schema_file", "must be a number greater than zero'): cron = GpCronDump(options,", "= True options.ddboost = True options.netbackup_service_host = \"mdw\" options.netbackup_policy =", "'20121225090000') @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options12(self, mock, mock2): options = GpCronDumpTestCase.Options()", "= GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_get_table_names_from_partition_list_00(self, mock1, mock2): options", "option'): cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_10(self, mock,", "None) dbname = 'foo' timestamp = '20141016010101' file = gpcd.get_schema_list_file(dbname)", "options = GpCronDumpTestCase.Options() options.output_options = ['--column-inserts'] options.incremental = True with", "\"can not exclude catalog schema 'information_schema' in schema file '/tmp/foo'\"):", "2190', 'public, aot2:aot, 3190'] expected_output = ['public.aot1', 'public.aot2:aot'] result =", "incremental backup'): cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_oids_with_incremental(self,", "@patch('gpcrondump.validate_current_timestamp') def test_get_pipes_file_list3(self, mock1, mock2): options = GpCronDumpTestCase.Options() options.timestamp_key =", "['foo1:%s/db_dumps/20130101/gp_dump_1_1_20130101010101.gz' % options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/gp_dump_1_1_20130101010101_post_data.gz' % options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/gp_master_config_files_20130101010101.tar' % options.masterDataDirectory, 'foo1:/bar/db_dumps/20130101/gp_segment_config_files_0_1_20130101010101.tar',", "= GpCronDumpTestCase.Options() options.dump_databases = ['bkdb'] options.timestamp_key = True options.ddboost =", "file = gpcd.get_schema_list_file(dbname) self.assertEquals(file, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_32(self, mock1,", "Database 'testdb100'\"}]}) def test_validate_parse_email_File05(self, mock1, mock2, mock3, mock4): options =", "test_options_schema_filter_26(self, mock, mock2): options = GpCronDumpTestCase.Options() options.exclude_dump_schema = 'foo' options.include_dump_tables", "def test_get_include_exclude_for_dump_database06(self, mock1, mock2, mock3, mock4): options = GpCronDumpTestCase.Options() options.masterDataDirectory", "@patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_table_filter1(self, mock, mock2): options = GpCronDumpTestCase.Options() options.include_dump_tables", "None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options15(self, mock, mock2): options = GpCronDumpTestCase.Options()", "exc) = gpcd.get_include_exclude_for_dump_database(dirtyfile, dbname) self.assertEquals(inc, '/tmp/dirty') self.assertEquals(exc, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp')", "os.path.abspath('gpcrondump') gpcrondump = imp.load_source('gpcrondump', gpcrondump_path) import unittest2 as unittest from", "def test_options_table_filter6(self, mock, mock2): options = GpCronDumpTestCase.Options() options.include_dump_tables_file = 'foo'", "@patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options20(self, mock, mock2): options = GpCronDumpTestCase.Options() options.dump_databases", "options.incremental = True with self.assertRaisesRegexp(Exception, '--exclude-schema-file option can not be", "'foo' with self.assertRaisesRegexp(Exception, '--table-file can not be selected with --exclude-table-file", "mock4): options = GpCronDumpTestCase.Options() options.include_email_file = \"/tmp/abc.yaml\" m = mock.MagicMock()", "GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options28(self, mock, mock2): options =", "False options.ddboost_host = False options.max_streams = None options.list_backup_files = False", "import MailDumpEvent from gppylib.operations.backup_utils import get_backup_directory, write_lines_to_file import mock logger", "True options.list_backup_files = True with self.assertRaisesRegexp(Exception, 'list backup files not", "GpCronDump(options, None) dbname = 'foo' schema_file = '/tmp/foo' inc =", "test_verify_tablenames_00(self, mock1, mock2, mock3): options = GpCronDumpTestCase.Options() cron = GpCronDump(options,", "options = GpCronDumpTestCase.Options() cron = GpCronDump(options, None) ao_partition_list = ['public,", "= '20130101010101' dump_dir = get_backup_directory(options.masterDataDirectory, None, gpcd.dump_dir, timestamp) files_file_list =", "options.incremental = True with self.assertRaisesRegexp(Exception, 'include table file can not", "@patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_7(self, mock, mock2): options = GpCronDumpTestCase.Options() options.dump_schema", "--schema-file option'): cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_7(self,", "GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_25(self, mock, mock2): options =", "def test_options_schema_filter_36(self, mock1, mock2, mock3, mock4): options = GpCronDumpTestCase.Options() gpcd", "options = GpCronDumpTestCase.Options() options.timestamp_key = '20130101010101' options.incremental = True options.masterDataDirectory", "GpCronDumpTestCase.Options() cron = GpCronDump(options, None) ao_partition_list = ['public, aot1!asd, 2190',", "None) master = Mock() master.getSegmentHostName.return_value = 'foo2' timestamp = '20130101010101'", "test_options17(self, mock, mock2): options = GpCronDumpTestCase.Options() options.masterDataDirectory = '/tmp/foobar' options.backup_dir", "options.masterDataDirectory, 'foo2:%s/db_dumps/20130101/gp_dump_20130101010101_ao_state_file' % options.masterDataDirectory, 'foo2:%s/db_dumps/20130101/gp_dump_20130101010101_co_state_file' % options.masterDataDirectory, 'foo2:%s/db_dumps/20130101/gp_dump_20130101010101_last_operation' % options.masterDataDirectory,", "'list backup files not supported with ddboost option'): GpCronDump(options, None)", "options.exclude_schema_file = 'foo' options.exclude_dump_tables = '/tmp/foo' with self.assertRaisesRegexp(Exception, '-t and", "@patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.GpCronDump.validate_dump_schema') @patch('gpcrondump.validate_current_timestamp') def test_option_schema_filter_2(self, mock, mock2, mock3): options =", "cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options23(self, mock, mock2):", "must be specified along with --replicate'): cron = GpCronDump(options, None)", "= GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options4(self, mock, mock2): options", "exc) = gpcd.get_include_exclude_for_dump_database(dirtyfile, dbname) self.assertTrue(exc.startswith('/tmp/exclude_dump_tables_file')) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.GpCronDump._get_table_names_from_partition_list', side_effect =", "#Should not raise an exception @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.GpCronDump._get_table_names_from_partition_list', side_effect =", "GpCronDumpTestCase.Options() options.include_dump_tables_file = 'foo' options.exclude_dump_tables_file = 'foo' with self.assertRaisesRegexp(Exception, '--table-file", "@patch('gpcrondump.GpCronDump._get_table_names_from_partition_list', side_effect = [['public.aot1:asd', 'public.aot2'], ['public.cot1', 'public.cot2:asd']]) def test_verify_tablenames_00_bad(self, mock1,", "exist.\" % options.include_email_file): cron = GpCronDump(options, None) @patch('gpcrondump.os.path.isfile', return_value=True) @patch('gpcrondump.GpCronDump._get_master_port')", "mock_segs = [] timestamp = '20130101010101' dump_dir = get_backup_directory(options.masterDataDirectory, options.backup_dir,", "cannot be used without --ddboost'): cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port')", "None) timestamp = gpcd._get_timestamp_object(options.timestamp_key) self.assertEquals(timestamp, datetime(2013, 1, 1, 1, 1,", "GpCronDumpTestCase.Options() options.exclude_dump_schema = 'foo' options.include_dump_tables = '/tmp/foo' with self.assertRaisesRegexp(Exception, '-t", "= None self.netbackup_schedule = None self.netbackup_block_size = None self.netbackup_keyword =", "be selected with -T option'): cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port')", "@patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.GpCronDump._get_master_port') @patch('gppylib.operations.backup_utils.get_latest_full_dump_timestamp', return_value='20130101000000') def test_get_files_file_list_with_filter(self, mock1, mock2, mock3): options", "= '/tmp/foobar' options.exclude_dump_tables = ['public.t4', 'public.t5', 'public.t6'] gpcd = GpCronDump(options,", "timestamp) expected_files_list = ['foo1:%s/db_dumps/20130101/metro_gp_cdatabase_1_1_20130101010101' % options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/metro_gp_dump_20130101010101_ao_state_file' % options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/metro_gp_dump_20130101010101_co_state_file'", "self.ddboost_remote = None self.ddboost_ping = None self.ddboost_backupdir = None self.replicate", "GpCronDumpTestCase.Options() options.timestamp_key = '20130101010101' gpcd = GpCronDump(options, None) timestamp =", "zero'): cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options24(self, mock,", "None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_14(self, mock, mock2): options = GpCronDumpTestCase.Options()", "supported with NetBackup'): GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_get_include_exclude_for_dump_database00(self, mock1,", "Database 'testdb100'\"}]}) def test_validate_parse_email_File04(self, mock1, mock2, mock3, mock4): options =", "test_options_schema_filter_24(self, mock, mock2): options = GpCronDumpTestCase.Options() options.dump_schema = 'foo' options.include_dump_tables", "aot2,aot, 3190'] with self.assertRaisesRegexp(Exception, 'Invalid partition entry \"public, aot2,aot, 3190\"'):", "@patch('gpcrondump.get_lines_from_file', return_value=['public']) @patch('gpcrondump.get_user_table_list_for_schema', return_value=['public', 'table1', 'public', 'table2']) def test_options_schema_filter_36(self, mock1,", "GpCronDumpTestCase.Options() options.dump_schema = 'foo' options.exclude_dump_tables = '/tmp/foo' with self.assertRaisesRegexp(Exception, '-t", "= GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_4(self, mock, mock2): options", "and --exclude-table-file can not be selected with --exclude-schema-file option'): cron", "gpcd.generate_include_table_list_from_schema_file(dbname, schema_file) self.assertTrue(inc.startswith('/tmp/include_dump_tables_file')) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options1(self, mock, mock2): options", "= True options.replicate = True options.max_streams = None with self.assertRaisesRegexp(Exception,", "gpcrondump_path = os.path.abspath('gpcrondump') gpcrondump = imp.load_source('gpcrondump', gpcrondump_path) import unittest2 as", "GpCronDumpTestCase.Options() options.exclude_dump_schema = 'foo' options.include_dump_tables_file = '/tmp/foo' with self.assertRaisesRegexp(Exception, '--table-file", "mock2, mock3): options = GpCronDumpTestCase.Options() options.incremental = True options.dump_databases =", "cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options19(self, mock, mock2):", "GpCronDumpTestCase.Options() options.exclude_schema_file = '/tmp/foo' options.incremental = True with self.assertRaisesRegexp(Exception, '--exclude-schema-file", "'20130101010101' dump_dir = get_backup_directory(options.masterDataDirectory, options.backup_dir, gpcd.dump_dir, timestamp) files_file_list = gpcd._get_files_file_list(master,", "self.include_email_file = None self.email_details = None self.include_schema_file = None self.exclude_schema_file", "self.masterDataDirectory = \"\" self.interactive = False self.clear_dumps_only = False self.post_script", "True with self.assertRaisesRegexp(Exception, '-S option can not be selected with", "GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_17(self, mock, mock2): options =", "option'): cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_9(self, mock,", "mock1, mock2): options = GpCronDumpTestCase.Options() options.timestamp_key = None options.local_dump_prefix =", "mock, mock2, mock3): options = GpCronDumpTestCase.Options() options.include_schema_file = '/tmp/foo' options.incremental", "is not supported with -K option'): GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp')", "= GpCronDump(options, None) master = Mock() master.getSegmentHostName.return_value = 'foo2' mock_segs", "return_value=True) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.os.path.getsize', return_value=111) @patch('gpcrondump.yaml.load', return_value={'EMAIL_DETAILS': [{'FROM': 'RRP_MPE2_DCA_1', 'NAME': 'testdb100',", "mock, mock2): options = GpCronDumpTestCase.Options() options.exclude_dump_tables = 'foo' options.exclude_dump_tables_file =", "test_options_schema_filter_19(self, mock, mock2): options = GpCronDumpTestCase.Options() options.exclude_schema_file = 'foo' options.exclude_dump_tables", "@patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options10(self, mock, mock2): options = GpCronDumpTestCase.Options() options.local_dump_prefix", "False self.pre_vacuum = False self.post_vacuum = False self.rollback = False", "option'): cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_6(self, mock,", "GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options20(self, mock, mock2): options =", "with self.assertRaisesRegexp(Exception, 'exclude table list can not be selected with", "None, 'SUBJECT': \"backup completed for Database 'testdb100'\"}]}) def test_validate_parse_email_File05(self, mock1,", "= GpCronDump(options, None) self.assertEquals(gpcd.getBackupDirectoryRoot(), '/foo1') @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options17(self, mock,", "cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_14(self, mock, mock2):", "options.backup_dir = None gpcd = GpCronDump(options, None) self.assertEquals(gpcd.getBackupDirectoryRoot(), '/tmp/foobar') @patch('gpcrondump.GpCronDump._get_master_port')", "@patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_11(self, mock, mock2): options = GpCronDumpTestCase.Options() options.exclude_schema_file", "return_value={'EMAIL_DETAILS': [{'FROM': 'RRP_MPE2_DCA_1', 'NAME': 'testdb100', 'SUBJECT': \"backup completed for Database", "return_value=['public', 'information_schema']) def test_options_schema_filter_29(self, mock, mock2, mock3): options = GpCronDumpTestCase.Options()", "mock1, mock2): options = GpCronDumpTestCase.Options() options.list_backup_files = True options.timestamp_key =", "None self.netbackup_service_host = None self.netbackup_policy = None self.netbackup_schedule = None", "mock2): options = GpCronDumpTestCase.Options() options.exclude_dump_schema = 'foo' options.exclude_schema_file = '/tmp/foo'", "= True with self.assertRaisesRegexp(Exception, '--inserts, --column-inserts, --oids cannot be selected", "= None self.exclude_dump_tables = None self.include_dump_tables_file = None self.exclude_dump_tables_file =", "None self.ddboost_user = None self.ddboost_config_remove = False self.ddboost_verify = False", "@patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_get_timestamp_object2(self, mock1, mock2): options = GpCronDumpTestCase.Options() options.timestamp_key", "GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options26(self, mock1, mock2): options =", "'exclude table list can not be selected with incremental backup'):", "'foo1:%s/db_dumps/20130101/gp_dump_20130101010101_co_state_file' % options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/gp_dump_20130101010101_last_operation' % options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/gp_dump_20130101010101.rpt' % options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/gp_dump_status_1_1_20130101010101'", "and -T can not be selected with -S option'): cron", "incremental backup'): cron = GpCronDump(options, None) @patch('gpcrondump.get_latest_full_dump_timestamp', return_value='20120330090000') @patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.GpCronDump._get_master_port')", "'-t and -T can not be selected with -s option'):", "mock2, mock3, mock4): options = GpCronDumpTestCase.Options() options.masterDataDirectory = '/tmp/foobar' options.include_dump_tables", "sorted(expected_files_list)) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_get_pipes_file_list4(self, mock1, mock2): options = GpCronDumpTestCase.Options()", "@patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_16(self, mock, mock2): options = GpCronDumpTestCase.Options() options.dump_schema", "= GpCronDumpTestCase.Options() options.masterDataDirectory = '/tmp/foobar' options.exclude_dump_tables_file = '/odir/exfile' gpcd =", "= 'foo' (inc, exc) = gpcd.get_include_exclude_for_dump_database(dirtyfile, dbname) self.assertTrue(inc.startswith('/tmp/include_dump_tables_file')) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp')", "create=True): cron = GpCronDump(options, None) @patch('gpcrondump.os.path.isfile', return_value=False) @patch('gpcrondump.GpCronDump._get_master_port') def test_validate_parse_email_File01(self,", "test_options16(self, mock, mock2): options = GpCronDumpTestCase.Options() options.masterDataDirectory = '/tmp/foobar' options.backup_dir", "def test_option_schema_filter_1(self, mock, mock2, mock3): options = GpCronDumpTestCase.Options() options.include_schema_file =", "gpcd.get_schema_list_file(dbname) self.assertTrue(file.startswith('/tmp/schema_list')) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.get_lines_from_file', return_value=['public']) @patch('gpcrondump.get_user_table_list_for_schema', return_value=['public', 'table1', 'public',", "% options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/metro_gp_dump_20130101000000_increments' % options.masterDataDirectory] self.assertEqual(sorted(files_file_list), sorted(expected_files_list)) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def", "mock1, mock2): options = GpCronDumpTestCase.Options() options.timestamp_key = '20130101010' gpcd =", "options.incremental = True options.masterDataDirectory = '/data/foo' gpcd = GpCronDump(options, None)", "= gpcd.get_include_exclude_for_dump_database(dirtyfile, dbname) self.assertEquals(inc, '/tmp/dirty') self.assertEquals(exc, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.expand_partitions_and_populate_filter_file',", "mock2, mock3): options = GpCronDumpTestCase.Options() options.include_email_file = \"/tmp/abc.yaml\" with self.assertRaisesRegexp(Exception,", "'/tmp/foobar') @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options18(self, mock, mock2): options = GpCronDumpTestCase.Options()", "@patch('gpcrondump.os.path.isfile', return_value=True) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.os.path.getsize', return_value=111) @patch('gpcrondump.yaml.load', return_value={'EMAIL_DETAILS': [{'FROM': 'RRP_MPE2_DCA_1', 'NAME':", "mock2, mock3, mock4): options = GpCronDumpTestCase.Options() options.masterDataDirectory = '/tmp/foobar' options.exclude_dump_tables_file", "None) master = Mock() master.getSegmentHostName.return_value = 'foo1' mock_segs = [Mock(),", "mock2): options = GpCronDumpTestCase.Options() options.exclude_dump_schema = 'foo' options.include_schema_file = '/tmp/foo'", "(inc, exc) = gpcd.get_include_exclude_for_dump_database(dirtyfile, dbname) self.assertTrue(exc.startswith('/tmp/exclude_dump_tables_file')) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.expand_partitions_and_populate_filter_file', return_value='/tmp/exclude_dump_tables_file')", "aot1, 2190', 'public, aot2, 3190'] co_partition_list = ['public, cot1, 2190',", "options = GpCronDumpTestCase.Options() options.dump_databases = [] options.incremental = True with", "None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options16(self, mock, mock2): options = GpCronDumpTestCase.Options()", "'testdb100'\"}]}) def test_validate_parse_email_File04(self, mock1, mock2, mock3, mock4): options = GpCronDumpTestCase.Options()", "file can not be selected with incremental backup'): cron =", "@patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.get_include_schema_list_from_exclude_schema', return_value=['public']) def test_options_schema_filter_34(self, mock1, mock2, mock3): options", "= 'foo' with self.assertRaisesRegexp(Exception, '--table-file can not be selected with", "self.dump_databases = ['testdb'] self.bypass_disk_check = True self.backup_set = None self.dump_global", "def test_get_timestamp_object1(self, mock1, mock2): options = GpCronDumpTestCase.Options() options.timestamp_key = '20130101010101'", "test_options_schema_filter_12(self, mock, mock2): options = GpCronDumpTestCase.Options() options.exclude_schema_file = 'foo' options.exclude_dump_tables_file", "'foo1:%s/db_dumps/20130101/gp_dump_20130101010101_ao_state_file' % options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/gp_dump_20130101010101_co_state_file' % options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/gp_dump_20130101010101_last_operation' % options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/gp_dump_20130101010101.rpt'", "None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_25(self, mock, mock2): options = GpCronDumpTestCase.Options()", "test_options_schema_filter_14(self, mock, mock2): options = GpCronDumpTestCase.Options() options.include_schema_file = 'foo' options.include_dump_tables_file", "include catalog schema 'information_schema' in schema file '/tmp/foo'\"): GpCronDump(options, None)", "= True with self.assertRaisesRegexp(Exception, '--exclude-schema-file option can not be selected", "'foo' options.include_schema_file = '/tmp/foo' with self.assertRaisesRegexp(Exception, '-s can not be", "timestamp = '20141016010101' file = gpcd.get_schema_list_file(dbname) self.assertTrue(file.startswith('/tmp/schema_list')) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def", "'foo' options.exclude_schema_file = '/tmp/foo' with self.assertRaisesRegexp(Exception, '-S can not be", "selected with -s option'): cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp')", "test_get_include_exclude_for_dump_database02(self, mock1, mock2, mock3, mock4): options = GpCronDumpTestCase.Options() options.masterDataDirectory =", "options = GpCronDumpTestCase.Options() options.timestamp_key = '20130101010101' options.incremental = True options.local_dump_prefix", "False options.ddboost_verify = False options.ddboost_config_remove = False options.ddboost_user = False", "with --exclude-schema-file option'): cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def", "2190', 'public, aot2, 3190'] co_partition_list = ['public, cot1, 2190', 'public,", "= GpCronDumpTestCase.Options() options.timestamp_key = None options.masterDataDirectory = '/foo' gpcd =", "self.assertEqual(sorted(files_file_list), sorted(expected_files_list)) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_get_pipes_file_list1(self, mock1, mock2): options =", "= GpCronDumpTestCase.Options() options.dump_schema = 'foo' options.include_dump_tables_file = '/tmp/foo' with self.assertRaisesRegexp(Exception,", "an exception GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options15(self, mock, mock2):", "with incremental backup'): cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def", "def test_validate_parse_email_File02(self, mock1, mock2, mock3): options = GpCronDumpTestCase.Options() options.include_email_file =", "@patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_28(self, mock, mock2): options = GpCronDumpTestCase.Options() options.exclude_dump_schema =", "@patch('gpcrondump.expand_partitions_and_populate_filter_file', return_value='/tmp/exclude_dump_tables_file') @patch('gpcrondump.get_lines_from_file', return_value=['public.t1', 'public.t2']) def test_get_include_exclude_for_dump_database04(self, mock1, mock2, mock3,", "test_validate_parse_email_File04(self, mock1, mock2, mock3, mock4): options = GpCronDumpTestCase.Options() options.include_email_file =", "= '/tmp/foo' with self.assertRaisesRegexp(Exception, '-s can not be selected with", "@patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_5(self, mock, mock2): options = GpCronDumpTestCase.Options() options.dump_schema", "GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_get_table_names_from_partition_list_00(self, mock1, mock2): options =", "test_get_files_file_list2(self, mock1, mock2): options = GpCronDumpTestCase.Options() options.timestamp_key = None options.masterDataDirectory", "options.exclude_dump_schema = 'public' gpcd = GpCronDump(options, None) dbname = 'foo'", "= GpCronDumpTestCase.Options() options.masterDataDirectory = '/tmp/foobar' options.include_dump_tables_file = '/mydir/incfile' gpcd =", "successful then it should not raise an exception GpCronDump(options, None)", "'foo' options.exclude_dump_tables_file = '/tmp/foo' with self.assertRaisesRegexp(Exception, '--table-file and --exclude-table-file can", "not '.yaml' file. File containing email details should be '.yaml'", "test_validate_parse_email_File01(self, mock1, mock2): options = GpCronDumpTestCase.Options() options.include_email_file = \"/tmp/abc.yaml\" with", "@patch('gpcrondump.validate_current_timestamp') def test_options14(self, mock, mock2): options = GpCronDumpTestCase.Options() options.dump_databases =", "@patch('gpcrondump.get_lines_from_file') def test_get_include_exclude_for_dump_database02(self, mock1, mock2, mock3, mock4): options = GpCronDumpTestCase.Options()", "master.getSegmentHostName.return_value = 'foo1' timestamp = '20130101010101' dump_dir = get_backup_directory(options.masterDataDirectory, options.backup_dir,", "% options.include_email_file): cron = GpCronDump(options, None) @patch('gpcrondump.os.path.isfile', return_value=True) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.os.path.getsize',", "cron = GpCronDump(options, None) @patch('gpcrondump.os.path.isfile', return_value=False) @patch('gpcrondump.GpCronDump._get_master_port') def test_validate_parse_email_File01(self, mock1,", "None self.ddboost_config_remove = False self.ddboost_verify = False self.ddboost_remote = None", "options = GpCronDumpTestCase.Options() options.include_dump_tables_file = 'foo' options.exclude_dump_tables_file = 'foo' with", "\"/tmp/abc\" with self.assertRaisesRegexp(Exception, \"'%s' is not '.yaml' file. File containing", "can not be selected with incremental backup'): cron = GpCronDump(options,", "= GpCronDumpTestCase.Options() options.exclude_dump_schema = 'foo' options.include_schema_file = '/tmp/foo' with self.assertRaisesRegexp(Exception,", "mock2): options = GpCronDumpTestCase.Options() options.dump_databases = [] options.incremental = True", "options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/metro_gp_dump_status_1_1_20130101010101' % options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/metro_gp_dump_20130101010101_filter' % options.masterDataDirectory] self.assertEqual(sorted(files_file_list), sorted(expected_files_list)) @patch('gpcrondump.validate_current_timestamp')", "'exclude table file can not be selected with incremental backup'):", "'/foo1' gpcd = GpCronDump(options, None) self.assertEquals(gpcd.getBackupDirectoryRoot(), '/foo1') @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def", "mock2): options = GpCronDumpTestCase.Options() options.exclude_dump_schema = 'foo' options.include_dump_tables = '/tmp/foo'", "mock, mock2): options = GpCronDumpTestCase.Options() options.include_schema_file = 'foo' options.include_dump_tables =", "mock4): options = GpCronDumpTestCase.Options() options.masterDataDirectory = '/tmp/foobar' options.include_dump_tables_file = '/mydir/incfile'", "self.ddboost_config_remove = False self.ddboost_verify = False self.ddboost_remote = None self.ddboost_ping", "'/tmp/foo' with self.assertRaisesRegexp(Exception, '-s can not be selected with --exclude-schema-file", "= \"\" self.interactive = False self.clear_dumps_only = False self.post_script =", "mock1, mock2): options = GpCronDumpTestCase.Options() options.include_schema_file = '/tmp/foo' write_lines_to_file('/tmp/foo', ['public'])", "@patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options3(self, mock, mock2): options = GpCronDumpTestCase.Options() options.include_dump_tables_file", "options.dump_databases = 'bkdb,fulldb' options.incremental = False #If this is successful", "self.assertRaisesRegexp(Exception, '--exclude-schema-file option can not be selected with incremental backup'):", "self.assertRaisesRegexp(Exception, 'Invalid timestamp key'): gpcd._get_timestamp_object(options.timestamp_key) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_get_timestamp_object3(self, mock1,", "= GpCronDump(options, None) cron._send_email(dump_database, current_exit_status, time_start, time_end) #------------------------------- Mainline --------------------------------", "options = GpCronDumpTestCase.Options() options.dump_schema = 'foo' options.exclude_dump_tables_file = '/tmp/foo' with", "'foo' options.exclude_dump_schema = 'foo' with self.assertRaisesRegexp(Exception, '-s can not be", "backup'): cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options3(self, mock,", "self.compress = True self.free_space_percent = None self.clear_dumps = False self.cleanup_date", "= GpCronDumpTestCase.Options() cron = GpCronDump(options, None) partition_list = ['public, aot1,", "['--inserts'] options.incremental = True with self.assertRaisesRegexp(Exception, '--inserts, --column-inserts, --oids cannot", "with self.assertRaisesRegexp(Exception, '--exclude-schema-file option can not be selected with incremental", "backup'): cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options2(self, mock,", "then it should not raise an exception GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port')", "= gpcd._get_pipes_file_list(master, mock_segs, dump_dir, timestamp) expected_files_list = ['foo2:%s/db_dumps/20130101/gp_dump_1_1_20130101010101.gz' % options.masterDataDirectory,", "None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_5(self, mock, mock2): options = GpCronDumpTestCase.Options()", "= None self.ddboost_config_remove = False self.ddboost_verify = False self.ddboost_remote =", "% options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/metro_gp_dump_status_1_1_20130101010101' % options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/metro_gp_dump_20130101000000_increments' % options.masterDataDirectory] self.assertEqual(sorted(files_file_list), sorted(expected_files_list))", "@patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.get_include_schema_list_from_exclude_schema', return_value=['public']) def test_options_schema_filter_35(self, mock1, mock2, mock3): options", "@patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_27(self, mock, mock2): options = GpCronDumpTestCase.Options() options.dump_schema =", "20 with self.assertRaisesRegexp(Exception, '--max-streams must be specified along with --replicate'):", "= GpCronDumpTestCase.Options() options.dump_schema = 'foo' options.incremental = True with self.assertRaisesRegexp(Exception,", "option'): cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_table_filter6(self, mock,", "= GpCronDumpTestCase.Options() options.dump_schema = 'foo' options.exclude_dump_tables = '/tmp/foo' with self.assertRaisesRegexp(Exception,", "options = GpCronDumpTestCase.Options() options.include_schema_file = 'foo' options.include_dump_tables_file = '/tmp/foo' with", "None) @patch('gpcrondump.get_latest_full_dump_timestamp', return_value='20120330090000') @patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.GpCronDump._get_master_port') def test_options13(self, mock, mock2, mock3):", "def test_option_schema_filter_2(self, mock, mock2, mock3): options = GpCronDumpTestCase.Options() options.exclude_schema_file =", "options = GpCronDumpTestCase.Options() options.dump_schema = 'foo' options.include_schema_file = '/tmp/foo' with", "= None self.include_dump_tables_file = None self.exclude_dump_tables_file = None self.backup_dir =", "test_options_schema_filter_8(self, mock, mock2): options = GpCronDumpTestCase.Options() options.exclude_dump_schema = 'foo' options.exclude_schema_file", "def test_get_table_names_from_partition_list_01(self, mock1, mock2): options = GpCronDumpTestCase.Options() cron = GpCronDump(options,", "['public.heapt1', 'public.heapt2'] cron._verify_tablenames(ao_partition_list, co_partition_list, heap_partition_list) #Should not raise an exception", "options.incremental = True cron = GpCronDump(options, None) self.assertEquals(cron.full_dump_timestamp, '20121225090000') @patch('gpcrondump.GpCronDump._get_master_port')", "GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_table_filter3(self, mock, mock2): options =", "-T can not be selected with -S option'): cron =", "supported with -K option'): GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options28(self,", "gpcd.get_include_exclude_for_dump_database(dirtyfile, dbname) self.assertTrue(exc.startswith('/tmp/exclude_dump_tables_file')) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.expand_partitions_and_populate_filter_file', return_value='/tmp/exclude_dump_tables_file') @patch('gpcrondump.get_lines_from_file') def test_get_include_exclude_for_dump_database06(self,", "True self.backup_set = None self.dump_global = False self.clear_catalog_dumps = False", "cron = GpCronDump(options, None) @patch('gpcrondump.os.path.isfile', return_value=True) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.os.path.getsize', return_value=0) def", "test_options_schema_filter_15(self, mock, mock2): options = GpCronDumpTestCase.Options() options.dump_schema = 'foo' options.include_dump_tables_file", "timestamp = gpcd._get_timestamp_object(options.timestamp_key) self.assertTrue(isinstance(timestamp, datetime)) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_get_files_file_list1(self, mock1,", "should not raise an exception GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def", "and --max-streams cannot be used without --ddboost'): cron = GpCronDump(options,", "mock1, mock2, mock3, mock4): options = GpCronDumpTestCase.Options() gpcd = GpCronDump(options,", "= GpCronDumpTestCase.Options() options.exclude_schema_file = 'foo' options.include_schema_file = '/tmp/foo' with self.assertRaisesRegexp(Exception,", "= GpCronDumpTestCase.Options() options.timestamp_key = '20130101010101' options.local_dump_prefix = 'metro' options.include_dump_tables_file =", "GpCronDumpTestCase.Options() options.include_email_file = \"/tmp/abc.yaml\" with self.assertRaisesRegexp(Exception, \"\\'%s\\' file does not", "catalog schema 'information_schema' in schema file '/tmp/foo'\"): GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port')", "GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_9(self, mock, mock2): options =", "'foo' with self.assertRaisesRegexp(Exception, '-t can not be selected with -T", "@patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.GpCronDump._get_table_names_from_partition_list', side_effect = [['public.aot1', 'public.aot2'], ['public.cot1', 'public.cot2']]) def", "= GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_27(self, mock, mock2): options", "MailDumpEvent): options = GpCronDumpTestCase.Options() dump_database = 'testdb1' current_exit_status = 0", "@patch('gpcrondump.validate_current_timestamp') def test_get_timestamp_object3(self, mock1, mock2): options = GpCronDumpTestCase.Options() options.timestamp_key =", "GpCronDumpTestCase.Options() options.exclude_schema_file = '/tmp/foo' with self.assertRaisesRegexp(Exception, \"can not exclude catalog", "GpCronDumpTestCase.Options() options.list_backup_files = True options.timestamp_key = None with self.assertRaisesRegexp(Exception, 'Must", "cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options26(self, mock1, mock2):", "def test_options_schema_filter_24(self, mock, mock2): options = GpCronDumpTestCase.Options() options.dump_schema = 'foo'", "option'): cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_8(self, mock,", "False self.clear_dumps_only = False self.post_script = None self.dump_config = False", "= GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options2(self, mock, mock2): options", "= 'foo1' timestamp = '20130101010101' dump_dir = get_backup_directory(options.masterDataDirectory, None, gpcd.dump_dir,", "options.masterDataDirectory, 'foo2:%s/db_dumps/20130101/gp_dump_20130101010101_last_operation' % options.masterDataDirectory, 'foo2:%s/db_dumps/20130101/gp_dump_20130101010101.rpt' % options.masterDataDirectory, 'foo2:%s/db_dumps/20130101/gp_dump_status_1_1_20130101010101' % options.masterDataDirectory]", "containing email details should be '.yaml' file.\" % options.include_email_file): cron", "@patch('gpcrondump.validate_current_timestamp') def test_options21(self, mock, mock2): options = GpCronDumpTestCase.Options() options.ddboost =", "@patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_get_files_file_list2(self, mock1, mock2): options = GpCronDumpTestCase.Options() options.timestamp_key", "options.masterDataDirectory] self.assertEqual(files_file_list, expected_files_list) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.get_latest_full_dump_timestamp', return_value='20130101000000') def test_get_files_file_list3(self, mock1,", "backup'): cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_5(self, mock,", "return_value='20130101000000') def test_get_files_file_list_with_filter(self, mock1, mock2, mock3): options = GpCronDumpTestCase.Options() options.timestamp_key", "test_options_schema_filter_31(self, mock, mock2): options = GpCronDumpTestCase.Options() options.masterDataDirectory = '/tmp/foobar' gpcd", "[['public.aot1:asd', 'public.aot2'], ['public.cot1', 'public.cot2:asd']]) def test_verify_tablenames_00_bad(self, mock1, mock2, mock3): options", "test_options_table_filter1(self, mock, mock2): options = GpCronDumpTestCase.Options() options.include_dump_tables = 'foo' options.include_dump_tables_file", "with self.assertRaisesRegexp(Exception, 'include table list can not be selected with", "% options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/gp_dump_1_1_20130101010101_post_data.gz' % options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/gp_master_config_files_20130101010101.tar' % options.masterDataDirectory, 'foo1:/bar/db_dumps/20130101/gp_segment_config_files_0_1_20130101010101.tar', 'foo1:/bar/db_dumps/20130101/gp_segment_config_files_0_2_20130101010101.tar',", "def test_options_schema_filter_26(self, mock, mock2): options = GpCronDumpTestCase.Options() options.exclude_dump_schema = 'foo'", "[['public.aot1', 'public.aot2'], ['public.cot1', 'public.cot2']]) def test_verify_tablenames_00(self, mock1, mock2, mock3): options", "test_options_schema_filter_7(self, mock, mock2): options = GpCronDumpTestCase.Options() options.dump_schema = 'foo' options.exclude_dump_schema", "with self.assertRaisesRegexp(Exception, '-t can not be selected with --table-file option'):", "= None self.exclude_dump_schema = None self.dump_stats = None ## Enterprise", "@patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_7(self, mock, mock2): options = GpCronDumpTestCase.Options() options.dump_schema =", "@patch('gpcrondump.validate_current_timestamp') def test_options17(self, mock, mock2): options = GpCronDumpTestCase.Options() options.masterDataDirectory =", "'/foo' gpcd = GpCronDump(options, None) master = Mock() master.getSegmentHostName.return_value =", "an exception GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options14(self, mock, mock2):", "'bar' options.masterDataDirectory = '/data/foo' gpcd = GpCronDump(options, None) master =", "from gpcrondump import GpCronDump from gppylib.operations.utils import DEFAULT_NUM_WORKERS from mock", "@patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_25(self, mock, mock2): options = GpCronDumpTestCase.Options() options.exclude_dump_schema =", "None self.include_schema_file = None self.exclude_schema_file = None self.exclude_dump_schema = None", "cron = GpCronDump(options, None) ao_partition_list = ['public, aot1, 2190', 'public,", "@patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.get_latest_full_dump_timestamp', return_value='20130101000000') def test_get_files_file_list3(self, mock1, mock2, mock3): options", "'metro' options.masterDataDirectory = '/data/foo' gpcd = GpCronDump(options, None) master =", "options.dump_config = True gpcd = GpCronDump(options, None) master = Mock()", "backup'): cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options19(self, mock,", "= '20130101010101' dump_dir = get_backup_directory(options.masterDataDirectory, options.backup_dir, gpcd.dump_dir, timestamp) pipes_file_list =", "= '/tmp/foobar' options.exclude_dump_tables_file = '/odir/exfile' gpcd = GpCronDump(options, None) dirtyfile", "self.assertEquals(inc, '/tmp/dirty') self.assertEquals(exc, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.expand_partitions_and_populate_filter_file', return_value='/tmp/exclude_dump_tables_file') @patch('gpcrondump.get_lines_from_file', return_value=['public.t1',", "True with self.assertRaisesRegexp(Exception, 'exclude table list can not be selected", "options.replicate = True options.max_streams = 0 with self.assertRaisesRegexp(Exception, '--max-streams must", "test_options26(self, mock1, mock2): options = GpCronDumpTestCase.Options() options.list_backup_files = True options.timestamp_key", "def test_options20(self, mock, mock2): options = GpCronDumpTestCase.Options() options.dump_databases = []", "GpCronDumpTestCase.Options() options.masterDataDirectory = '/tmp/foobar' options.exclude_dump_tables_file = '/odir/exfile' gpcd = GpCronDump(options,", "options.backup_dir, gpcd.dump_dir, timestamp) files_file_list = gpcd._get_files_file_list(master, dump_dir, timestamp) expected_files_list =", "patch('__builtin__.open', m, create=True): cron = GpCronDump(options, None) @patch('gpcrondump.os.path.isfile', return_value=True) @patch('gpcrondump.GpCronDump._get_master_port')", "GpCronDumpTestCase.Options() options.dump_schema = 'foo' options.include_dump_tables = '/tmp/foo' with self.assertRaisesRegexp(Exception, '-t", "master = Mock() master.getSegmentHostName.return_value = 'foo2' mock_segs = [] timestamp", "None self.max_streams = None self.netbackup_service_host = None self.netbackup_policy = None", "@patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_32(self, mock1, mock2): options = GpCronDumpTestCase.Options() options.dump_schema =", "def test_options29(self, mock, mock2): options = GpCronDumpTestCase.Options() options.dump_databases = ['bkdb']", "3190'] with self.assertRaisesRegexp(Exception, 'Invalid partition entry \"public, aot2,aot, 3190\"'): cron._get_table_names_from_partition_list(partition_list)", "options.list_backup_files = True options.timestamp_key = None with self.assertRaisesRegexp(Exception, 'Must supply", "partition_list = ['public, aot1, 2190', 'public, aot2:aot, 3190'] expected_output =", "@patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.get_lines_from_file', return_value=['public', 'information_schema']) def test_options_schema_filter_29(self, mock, mock2, mock3): options", "expected_files_list = ['foo1:%s/db_dumps/20130101/gp_dump_1_1_20130101010101.gz' % options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/gp_dump_1_1_20130101010101_post_data.gz' % options.masterDataDirectory, 'foo1:/bar/db_dumps/20130101/gp_dump_0_1_20130101010101.gz', 'foo1:/bar/db_dumps/20130101/gp_dump_0_2_20130101010101.gz']", "--exclude-schema-file option'): cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_20(self,", "True options.ddboost = True options.netbackup_service_host = \"mdw\" options.netbackup_policy = \"test_policy\"", "= ['--inserts'] options.incremental = True with self.assertRaisesRegexp(Exception, '--inserts, --column-inserts, --oids", "without --ddboost'): cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options26(self,", "= '12:07:09' time_end = '12:08:18' cron = GpCronDump(options, None) cron._send_email(dump_database,", "False self.post_script = None self.dump_config = False self.history = False", "= 'foo' options.include_dump_tables = '/tmp/foo' with self.assertRaisesRegexp(Exception, '-t and -T", "mock2, mock3): options = GpCronDumpTestCase.Options() cron = GpCronDump(options, None) ao_partition_list", "test_options_schema_filter_21(self, mock, mock2): options = GpCronDumpTestCase.Options() options.include_schema_file = 'foo' options.exclude_dump_tables", "def test_options_schema_filter_5(self, mock, mock2): options = GpCronDumpTestCase.Options() options.dump_schema = 'foo'", "options.backup_dir, gpcd.dump_dir, timestamp) pipes_file_list = gpcd._get_pipes_file_list(master, mock_segs, dump_dir, timestamp) expected_files_list", "self.assertTrue(file.startswith('/tmp/schema_list')) if os.path.exists('/tmp/foo'): os.remove('/tmp/foo') @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.get_include_schema_list_from_exclude_schema', return_value=['public']) def test_options_schema_filter_34(self,", "None, gpcd.dump_dir, timestamp) files_file_list = gpcd._get_files_file_list(master, dump_dir, timestamp) expected_files_list =", "GpCronDumpTestCase.Options() options.ddboost = True options.replicate = False options.max_streams = 20", "cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_9(self, mock, mock2):", "= 'foo' with self.assertRaisesRegexp(Exception, '-s can not be selected with", "options = GpCronDumpTestCase.Options() options.exclude_dump_tables = 'foo' options.incremental = True with", "options = GpCronDumpTestCase.Options() options.include_schema_file = 'foo' options.include_dump_tables = '/tmp/foo' with", "not raise an exception GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options14(self,", "options = GpCronDumpTestCase.Options() options.exclude_schema_file = '/tmp/foo' options.incremental = True with", "with -K option'): GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options28(self, mock,", "0 with self.assertRaisesRegexp(Exception, '--max-streams must be a number greater than", "@patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options29(self, mock, mock2): options = GpCronDumpTestCase.Options() options.dump_databases", "id + 1 timestamp = '20130101010101' dump_dir = get_backup_directory(options.masterDataDirectory, options.backup_dir,", "--replicate'): cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options22(self, mock,", "None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_16(self, mock, mock2): options = GpCronDumpTestCase.Options()", "= '/tmp/foobar' options.backup_dir = None gpcd = GpCronDump(options, None) self.assertEquals(gpcd.getBackupDirectoryRoot(),", "GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_8(self, mock, mock2): options =", "'20141016010101' file = gpcd.get_schema_list_file(dbname) self.assertTrue(file.startswith('/tmp/schema_list')) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_33(self, mock1,", "self.assertEqual(sorted(pipes_file_list), sorted(expected_files_list)) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_get_pipes_file_list3(self, mock1, mock2): options =", "GpCronDumpTestCase.Options() options.clear_dumps = True options.incremental = True with self.assertRaisesRegexp(Exception, '-c", "heapt2!asdasd , 3190'] with self.assertRaisesRegexp(Exception, ''): cron._verify_tablenames(ao_partition_list, co_partition_list, heap_partition_list) @patch('gpcrondump.GpCronDump._get_master_port')", "@patch('gpcrondump.validate_current_timestamp') def test_get_timestamp_object2(self, mock1, mock2): options = GpCronDumpTestCase.Options() options.timestamp_key =", "= GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_11(self, mock, mock2): options", "mock, mock2): options = GpCronDumpTestCase.Options() options.exclude_dump_schema = 'foo' options.exclude_dump_tables_file =", "False self.history = False self.pre_vacuum = False self.post_vacuum = False", "test_options_schema_filter_29(self, mock, mock2, mock3): options = GpCronDumpTestCase.Options() options.exclude_schema_file = '/tmp/foo'", "mock2): options = GpCronDumpTestCase.Options() options.timestamp_key = None options.local_dump_prefix = 'foo'", "return_value='/tmp/include_dump_tables_file') @patch('gpcrondump.get_lines_from_file') def test_get_include_exclude_for_dump_database02(self, mock1, mock2, mock3, mock4): options =", "test_get_include_exclude_for_dump_database04(self, mock1, mock2, mock3, mock4): options = GpCronDumpTestCase.Options() options.masterDataDirectory =", "schema_file = '/tmp/foo' inc = gpcd.generate_include_table_list_from_schema_file(dbname, schema_file) self.assertTrue(inc.startswith('/tmp/include_dump_tables_file')) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp')", "@patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_3(self, mock, mock2): options = GpCronDumpTestCase.Options() options.exclude_dump_schema =", "self.assertRaisesRegexp(Exception, 'list filter tables option requires --prefix and --incremental'): cron", "@patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_9(self, mock, mock2): options = GpCronDumpTestCase.Options() options.exclude_dump_schema", "GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options25(self, mock, mock2): options =", "True options.masterDataDirectory = '/foo' gpcd = GpCronDump(options, None) master =", "options.timestamp_key = None with self.assertRaisesRegexp(Exception, 'Must supply -K option when", "an exception @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.GpCronDump._get_table_names_from_partition_list', side_effect = [['public.aot1:asd', 'public.aot2'], ['public.cot1',", "@patch('gpcrondump.validate_current_timestamp') def test_get_table_names_from_partition_list_01(self, mock1, mock2): options = GpCronDumpTestCase.Options() cron =", "= False self.ddboost_remote = None self.ddboost_ping = None self.ddboost_backupdir =", "raise an exception @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.GpCronDump._get_table_names_from_partition_list', side_effect = [['public.aot1:asd', 'public.aot2'],", "False self.ddboost_hosts = None self.ddboost_user = None self.ddboost_config_remove = False", "options.dump_schema = 'foo' options.exclude_schema_file = '/tmp/foo' with self.assertRaisesRegexp(Exception, '-s can", "test_options_table_filter6(self, mock, mock2): options = GpCronDumpTestCase.Options() options.include_dump_tables_file = 'foo' options.exclude_dump_tables_file", "mock1, mock2): options = GpCronDumpTestCase.Options() options.timestamp_key = '20130101010101' gpcd =", "GpCronDumpTestCase.Options() options.exclude_schema_file = 'foo' options.exclude_dump_tables = '/tmp/foo' with self.assertRaisesRegexp(Exception, '-t", "= GpCronDumpTestCase.Options() options.exclude_dump_schema = 'public' gpcd = GpCronDump(options, None) dbname", "= gpcd.get_schema_list_file(dbname) self.assertTrue(file.startswith('/tmp/schema_list')) if os.path.exists('/tmp/foo'): os.remove('/tmp/foo') @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.get_include_schema_list_from_exclude_schema', return_value=['public'])", "heap_partition_list = ['public.heapt1', 'public.heapt2'] cron._verify_tablenames(ao_partition_list, co_partition_list, heap_partition_list) #Should not raise", "GpCronDump(options, None) ao_partition_list = ['public, aot1, 2190', 'public, aot2, 3190']", "options.masterDataDirectory] self.assertEqual(pipes_file_list, expected_files_list) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_get_pipes_file_list2(self, mock1, mock2): options", "= ['foo1:%s/db_dumps/20130101/gp_dump_1_1_20130101010101.gz' % options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/gp_dump_1_1_20130101010101_post_data.gz' % options.masterDataDirectory, 'foo1:/bar/db_dumps/20130101/gp_dump_0_1_20130101010101.gz', 'foo1:/bar/db_dumps/20130101/gp_dump_0_2_20130101010101.gz'] self.assertEqual(sorted(pipes_file_list),", "= '20141016010101' file = gpcd.get_schema_list_file(dbname) self.assertEquals(file, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def", "False options.max_streams = 20 with self.assertRaisesRegexp(Exception, '--replicate and --max-streams cannot", "'foo1:%s/db_dumps/20130101/metro_gp_dump_20130101010101_last_operation' % options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/metro_gp_dump_20130101010101.rpt' % options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/metro_gp_dump_status_1_1_20130101010101' % options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/metro_gp_dump_20130101010101_filter'", "options.masterDataDirectory, 'foo2:%s/db_dumps/20130101/gp_dump_20130101010101.rpt' % options.masterDataDirectory, 'foo2:%s/db_dumps/20130101/gp_dump_status_1_1_20130101010101' % options.masterDataDirectory] self.assertEqual(files_file_list, expected_files_list) @patch('gpcrondump.GpCronDump._get_master_port')", "mock1, mock2, mock3): options = GpCronDumpTestCase.Options() options.masterDataDirectory = '/tmp/foobar' options.incremental", "@patch('gppylib.operations.backup_utils.get_latest_full_dump_timestamp', return_value='20130101000000') def test_get_files_file_list_with_filter(self, mock1, mock2, mock3): options = GpCronDumpTestCase.Options()", "% options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/metro_gp_dump_20130101010101_co_state_file' % options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/metro_gp_dump_20130101010101_last_operation' % options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/metro_gp_dump_20130101010101.rpt' %", "not include catalog schema 'information_schema' in schema file '/tmp/foo'\"): GpCronDump(options,", "mock2): options = GpCronDumpTestCase.Options() options.ddboost = False options.replicate = False", "False options.replicate = False options.max_streams = 20 with self.assertRaisesRegexp(Exception, '--replicate", "gpcd = GpCronDump(options, None) master = Mock() master.getSegmentHostName.return_value = 'foo2'", "options.list_backup_files = False gpcd = GpCronDump(options, None) self.assertEqual(gpcd.dump_prefix, 'foo_') @patch('gpcrondump.os.path.isfile',", "None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_17(self, mock, mock2): options = GpCronDumpTestCase.Options()", "dbname) self.assertTrue(inc.startswith('/tmp/include_dump_tables_file')) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.get_latest_full_dump_timestamp', return_value='20121225090000') def test_get_include_exclude_for_dump_database03(self, mock1, mock2,", "'testdb100', 'SUBJECT': \"backup completed for Database 'testdb100'\"}]}) def test_validate_parse_email_File00(self, mock1,", "options = GpCronDumpTestCase.Options() options.dump_databases = 'bkdb,fulldb' options.incremental = False #If", "= 'foo' options.ddboost = False options.ddboost_verify = False options.ddboost_config_remove =", "return_value='/tmp/exclude_dump_tables_file') @patch('gpcrondump.get_lines_from_file', return_value=['public.t1', 'public.t2']) def test_get_include_exclude_for_dump_database04(self, mock1, mock2, mock3, mock4):", "@patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_17(self, mock, mock2): options = GpCronDumpTestCase.Options() options.exclude_dump_schema", "is empty.\" % options.include_email_file): cron = GpCronDump(options, None) @patch('gpcrondump.os.path.isfile', return_value=True)", "options.dump_databases = 'bkdb,fulldb' options.timestamp_key = True with self.assertRaisesRegexp(Exception, 'multi-database backup", "options = GpCronDumpTestCase.Options() options.dump_schema = 'foo' options.incremental = True with", "timestamp key'): gpcd._get_timestamp_object(options.timestamp_key) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_get_timestamp_object3(self, mock1, mock2): options", "'foo1:%s/db_dumps/20130101/gp_dump_20130101010101.rpt' % options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/gp_dump_status_1_1_20130101010101' % options.masterDataDirectory] self.assertEqual(files_file_list, expected_files_list) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp')", "mock, mock2, mock3): options = GpCronDumpTestCase.Options() options.exclude_schema_file = '/tmp/foo' with", "mock1, mock2): options = GpCronDumpTestCase.Options() options.timestamp_key = None gpcd =", "#If this is successful then it should not raise an", "mock, mock2): options = GpCronDumpTestCase.Options() options.dump_databases = 'bkdb' options.incremental =", "GpCronDumpTestCase.Options() options.dump_schema = 'foo' options.include_schema_file = '/tmp/foo' with self.assertRaisesRegexp(Exception, '-s", "def test_options_schema_filter_28(self, mock, mock2): options = GpCronDumpTestCase.Options() options.exclude_dump_schema = ['information_schema']", "= True options.replicate = True options.max_streams = 0 with self.assertRaisesRegexp(Exception,", "inc = gpcd.generate_include_table_list_from_schema_file(dbname, schema_file) self.assertTrue(inc.startswith('/tmp/include_dump_tables_file')) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options1(self, mock,", "= GpCronDumpTestCase.Options() options.include_dump_tables_file = 'foo' options.incremental = True with self.assertRaisesRegexp(Exception,", "= GpCronDumpTestCase.Options() options.dump_databases = [] options.incremental = True with self.assertRaisesRegexp(Exception,", "= ['public'] gpcd = GpCronDump(options, None) dbname = 'foo' timestamp", "def test_options22(self, mock, mock2): options = GpCronDumpTestCase.Options() options.ddboost = True", "--schema-file option'): cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_10(self,", "def test_options28(self, mock, mock2): options = GpCronDumpTestCase.Options() options.dump_databases = ['bkdb']", "and -T can not be selected with -s option'): cron", "'--ddboost is not supported with NetBackup'): GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp')", "--table-file option'): cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_table_filter2(self,", "% options.masterDataDirectory] self.assertEqual(pipes_file_list, expected_files_list) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_get_pipes_file_list2(self, mock1, mock2):", "mock1, mock2, mock3): options = GpCronDumpTestCase.Options() options.timestamp_key = '20130101010101' options.local_dump_prefix", "= id + 1 timestamp = '20130101010101' dump_dir = get_backup_directory(options.masterDataDirectory,", "mock2): options = GpCronDumpTestCase.Options() options.dump_schema = ['information_schema'] with self.assertRaisesRegexp(Exception, \"can", "'foo1' timestamp = '20130101010101' dump_dir = get_backup_directory(options.masterDataDirectory, options.backup_dir, gpcd.dump_dir, timestamp)", "'/tmp/dirty' dbname = 'foo' (inc, exc) = gpcd.get_include_exclude_for_dump_database(dirtyfile, dbname) self.assertTrue(inc.startswith('/tmp/include_dump_tables_file'))", "self.assertRaisesRegexp(Exception, '--exclude-schema-file can not be selected with --schema-file option'): cron", "self.netbackup_service_host = None self.netbackup_policy = None self.netbackup_schedule = None self.netbackup_block_size", "self.assertEquals(gpcd.getBackupDirectoryRoot(), '/tmp/foobar') @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options18(self, mock, mock2): options =", "options.dump_schema = 'foo' options.exclude_dump_schema = 'foo' with self.assertRaisesRegexp(Exception, '-s can", "test_options_schema_filter_28(self, mock, mock2): options = GpCronDumpTestCase.Options() options.exclude_dump_schema = ['information_schema'] with", "= '/tmp/foo' options.incremental = True with self.assertRaisesRegexp(Exception, '--schema-file option can", "test_options_schema_filter_9(self, mock, mock2): options = GpCronDumpTestCase.Options() options.exclude_dump_schema = 'foo' options.include_schema_file", "test_option_schema_filter_2(self, mock, mock2, mock3): options = GpCronDumpTestCase.Options() options.exclude_schema_file = '/tmp/foo'", "options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/gp_dump_1_1_20130101010101_post_data.gz' % options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/gp_master_config_files_20130101010101.tar' % options.masterDataDirectory, 'foo1:/bar/db_dumps/20130101/gp_segment_config_files_0_1_20130101010101.tar', 'foo1:/bar/db_dumps/20130101/gp_segment_config_files_0_2_20130101010101.tar', 'foo1:/bar/db_dumps/20130101/gp_dump_0_1_20130101010101.gz',", "option'): cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_26(self, mock,", "@patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_get_table_names_from_partition_list_01(self, mock1, mock2): options = GpCronDumpTestCase.Options() cron", "None self.encoding = None self.output_options = None self.report_dir = None", "incremental backup'): cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_4(self,", "gpcd = GpCronDump(options, None) self.assertEquals(gpcd.getBackupDirectoryRoot(), '/foo1') @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options17(self,", "= 'foo' options.exclude_dump_tables_file = 'foo' with self.assertRaisesRegexp(Exception, '-t can not", "test_get_files_file_list1(self, mock1, mock2): options = GpCronDumpTestCase.Options() options.timestamp_key = None options.masterDataDirectory", "files_file_list = gpcd._get_files_file_list(master, dump_dir, timestamp) expected_files_list = ['foo1:%s/db_dumps/20130101/gp_cdatabase_1_1_20130101010101' % options.masterDataDirectory,", "--exclude-table-file can not be selected with -s option'): cron =", "'foo1:%s/db_dumps/20130101/gp_dump_1_1_20130101010101_post_data.gz' % options.masterDataDirectory, 'foo1:/bar/db_dumps/20130101/gp_dump_0_1_20130101010101.gz', 'foo1:/bar/db_dumps/20130101/gp_dump_0_2_20130101010101.gz'] self.assertEqual(sorted(pipes_file_list), sorted(expected_files_list)) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def", "% options.masterDataDirectory, 'foo2:%s/db_dumps/20130101/gp_dump_status_1_1_20130101010101' % options.masterDataDirectory] self.assertEqual(files_file_list, expected_files_list) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.get_latest_full_dump_timestamp',", "__init__(self): self.masterDataDirectory = \"\" self.interactive = False self.clear_dumps_only = False", "mock2): options = GpCronDumpTestCase.Options() options.exclude_dump_tables = 'foo' options.exclude_dump_tables_file = 'foo'", "@patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.os.path.getsize', return_value=111) def test_validate_parse_email_File02(self, mock1, mock2, mock3): options =", "'12:07:09' time_end = '12:08:18' cron = GpCronDump(options, None) cron._send_email(dump_database, current_exit_status,", "os.remove('/tmp/foo') @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.get_include_schema_list_from_exclude_schema', return_value=['public']) def test_options_schema_filter_34(self, mock1, mock2, mock3):", "self.assertRaisesRegexp(Exception, '--table-file and --exclude-table-file can not be selected with --exclude-schema-file", "## Enterprise init self.incremental = False self.ddboost = False self.ddboost_hosts", "[] timestamp = '20130101010101' dump_dir = get_backup_directory(options.masterDataDirectory, options.backup_dir, gpcd.dump_dir, timestamp)", "test_options_table_filter2(self, mock, mock2): options = GpCronDumpTestCase.Options() options.include_dump_tables = 'foo' options.exclude_dump_tables_file", "= False gpcd = GpCronDump(options, None) self.assertEqual(gpcd.dump_prefix, 'foo_') @patch('gpcrondump.os.path.isfile', return_value=True)", "self.dump_global = False self.clear_catalog_dumps = False self.batch_default = DEFAULT_NUM_WORKERS self.include_dump_tables", "@patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_28(self, mock, mock2): options = GpCronDumpTestCase.Options() options.exclude_dump_schema", "@patch('gpcrondump.validate_current_timestamp') def test_options22(self, mock, mock2): options = GpCronDumpTestCase.Options() options.ddboost =", "option'): cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_13(self, mock,", "= GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_24(self, mock, mock2): options", "@patch('gpcrondump.validate_current_timestamp') def test_options_table_filter3(self, mock, mock2): options = GpCronDumpTestCase.Options() options.exclude_dump_tables =", "= '/tmp/foo' inc = gpcd.generate_include_table_list_from_schema_file(dbname, schema_file) self.assertTrue(inc.startswith('/tmp/include_dump_tables_file')) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def", "options = GpCronDumpTestCase.Options() options.timestamp_key = None options.masterDataDirectory = '/foo' gpcd", "@patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_31(self, mock, mock2): options = GpCronDumpTestCase.Options() options.masterDataDirectory =", "in enumerate(mock_segs): seg.getSegmentDataDirectory.return_value = '/bar' seg.getSegmentHostName.return_value = 'foo1' seg.getSegmentDbId.return_value =", "'metro' options.include_dump_tables_file = 'bar' options.masterDataDirectory = '/data/foo' gpcd = GpCronDump(options,", "cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_21(self, mock, mock2):", "= None options.dump_global = True options.masterDataDirectory = '/foo' gpcd =", "mock1, MailDumpEvent): options = GpCronDumpTestCase.Options() dump_database = 'testdb1' current_exit_status =", "test_options_schema_filter_33(self, mock1, mock2): options = GpCronDumpTestCase.Options() options.include_schema_file = '/tmp/foo' write_lines_to_file('/tmp/foo',", "= GpCronDumpTestCase.Options() options.dump_schema = 'foo' options.exclude_dump_schema = 'foo' with self.assertRaisesRegexp(Exception,", "GpCronDumpTestCase.Options() options.masterDataDirectory = '/tmp/foobar' gpcd = GpCronDump(options, None) dbname =", "test_options_column_inserts_with_incremental(self, mock, mock2): options = GpCronDumpTestCase.Options() options.output_options = ['--column-inserts'] options.incremental", "self.assertTrue(file.startswith('/tmp/schema_list')) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.get_lines_from_file', return_value=['public']) @patch('gpcrondump.get_user_table_list_for_schema', return_value=['public', 'table1', 'public', 'table2'])", "mock2, mock3, mock4): options = GpCronDumpTestCase.Options() options.masterDataDirectory = '/tmp/foobar' options.include_dump_tables_file", "and --exclude-table-file can not be selected with --schema-file option'): cron", "'foo' options.incremental = True with self.assertRaisesRegexp(Exception, 'exclude table file can", "def test_options26(self, mock1, mock2): options = GpCronDumpTestCase.Options() options.list_backup_files = True", "= False options.ddboost_user = False options.ddboost_host = False options.max_streams =", "return_value=['public.t1', 'public.t2']) def test_get_include_exclude_for_dump_database01(self, mock1, mock2, mock3, mock4): options =", "return_value=['public', 'table1', 'public', 'table2']) def test_options_schema_filter_36(self, mock1, mock2, mock3, mock4):", "with self.assertRaisesRegexp(Exception, 'Must supply -x <database name> with incremental option'):", "'public' gpcd = GpCronDump(options, None) dbname = 'foo' timestamp =", "from datetime import datetime from gppylib import gplog from gpcrondump", "GpCronDumpTestCase.Options() options.dump_schema = 'foo' options.incremental = True with self.assertRaisesRegexp(Exception, '-s", "= True self.free_space_percent = None self.clear_dumps = False self.cleanup_date =", "heap_partition_list) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_inserts_with_incremental(self, mock, mock2): options = GpCronDumpTestCase.Options()", "= None with self.assertRaisesRegexp(Exception, 'Must supply -K option when listing", "1)) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_get_timestamp_object2(self, mock1, mock2): options = GpCronDumpTestCase.Options()", "timestamp = '20130101010101' dump_dir = get_backup_directory(options.masterDataDirectory, options.backup_dir, gpcd.dump_dir, timestamp) pipes_file_list", "not be selected with --table-file option'): cron = GpCronDump(options, None)", "options.include_dump_tables = ['public.t1', 'public.t2', 'public.t3'] gpcd = GpCronDump(options, None) dirtyfile", "mock, mock2): options = GpCronDumpTestCase.Options() options.ddboost = True options.replicate =", "options = GpCronDumpTestCase.Options() cron = GpCronDump(options, None) partition_list = ['public,", "dbname) self.assertTrue(exc.startswith('/tmp/exclude_dump_tables_file')) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.expand_partitions_and_populate_filter_file', return_value='/tmp/exclude_dump_tables_file') @patch('gpcrondump.get_lines_from_file') def test_get_include_exclude_for_dump_database06(self, mock1,", "options.dump_schema = 'foo' options.exclude_dump_tables_file = '/tmp/foo' with self.assertRaisesRegexp(Exception, '--table-file and", "listing backup files'): GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options27(self, mock,", "['foo1:%s/db_dumps/20130101/gp_cdatabase_1_1_20130101010101' % options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/gp_dump_20130101010101_ao_state_file' % options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/gp_dump_20130101010101_co_state_file' % options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/gp_dump_20130101010101_last_operation'", "specified along with --replicate'): cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp')", "'foo' (inc, exc) = gpcd.get_include_exclude_for_dump_database(dirtyfile, dbname) self.assertTrue(exc.startswith('/tmp/exclude_dump_tables_file')) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.GpCronDump._get_table_names_from_partition_list',", "@patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_table_filter6(self, mock, mock2): options = GpCronDumpTestCase.Options() options.include_dump_tables_file", "= gpcd.get_schema_list_file(dbname) self.assertTrue(file.startswith('/tmp/schema_list')) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.get_lines_from_file', return_value=['public']) @patch('gpcrondump.get_user_table_list_for_schema', return_value=['public', 'table1',", "gpcd = GpCronDump(options, None) dirtyfile = '/tmp/dirty' dbname = 'foo'", "@patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_get_pipes_file_list3(self, mock1, mock2): options = GpCronDumpTestCase.Options() options.timestamp_key", "None) partition_list = ['public, aot1, 2190', 'public, aot2,aot, 3190'] with", "\"/tmp/abc.yaml\" with self.assertRaisesRegexp(Exception, \"'%s' file is empty.\" % options.include_email_file): cron", "False options.list_filter_tables = True try: with self.assertRaisesRegexp(Exception, 'list filter tables", "None) cron._send_email(dump_database, current_exit_status, time_start, time_end) #------------------------------- Mainline -------------------------------- if __name__", "def test_get_pipes_file_list4(self, mock1, mock2): options = GpCronDumpTestCase.Options() options.timestamp_key = None", "'foo' timestamp = '20141016010101' file = gpcd.get_schema_list_file(dbname) self.assertEquals(file, None) @patch('gpcrondump.GpCronDump._get_master_port')", "\"can not include catalog schema 'information_schema' in schema file '/tmp/foo'\"):", "options = GpCronDumpTestCase.Options() gpcd = GpCronDump(options, None) dbname = 'foo'", "dbname = 'foo' (inc, exc) = gpcd.get_include_exclude_for_dump_database(dirtyfile, dbname) self.assertTrue(exc.startswith('/tmp/exclude_dump_tables_file')) @patch('gpcrondump.GpCronDump._get_master_port')", "GpCronDump(options, None) @patch('gpcrondump.MailDumpEvent') @patch('gpcrondump.GpCronDump._get_master_port') def test_send_email00(self, mock1, MailDumpEvent): options =", "gpcd.get_include_exclude_for_dump_database(dirtyfile, dbname) self.assertTrue(exc.startswith('/tmp/exclude_dump_tables_file')) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.GpCronDump._get_table_names_from_partition_list', side_effect = [['public.aot1', 'public.aot2'],", "= GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_oids_with_incremental(self, mock, mock2): options", "= Mock() master.getSegmentHostName.return_value = 'foo1' mock_segs = [Mock(), Mock()] for", "get_backup_directory(options.masterDataDirectory, options.backup_dir, gpcd.dump_dir, timestamp) files_file_list = gpcd._get_files_file_list(master, dump_dir, timestamp) expected_files_list", "mock2): options = GpCronDumpTestCase.Options() options.timestamp_key = None options.masterDataDirectory = '/foo'", "'/tmp/dirty') self.assertEquals(exc, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.expand_partitions_and_populate_filter_file', return_value='/tmp/exclude_dump_tables_file') @patch('gpcrondump.get_lines_from_file', return_value=['public.t1', 'public.t2'])", "None) master = Mock() master.getSegmentHostName.return_value = 'foo2' mock_segs = []", "= GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_12(self, mock, mock2): options", "mock2): options = GpCronDumpTestCase.Options() options.exclude_dump_tables_file = 'foo' options.incremental = True", "with self.assertRaisesRegexp(Exception, '-S can not be selected with --schema-file option'):", "@patch('gpcrondump.get_lines_from_file', return_value=['public', 'information_schema']) def test_options_schema_filter_30(self, mock, mock2, mock3): options =", "= GpCronDumpTestCase.Options() options.include_schema_file = 'foo' options.include_dump_tables = '/tmp/foo' with self.assertRaisesRegexp(Exception,", "None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options10(self, mock, mock2): options = GpCronDumpTestCase.Options()", "test_options10(self, mock, mock2): options = GpCronDumpTestCase.Options() options.local_dump_prefix = 'foo' options.incremental", "class GpCronDumpTestCase(unittest.TestCase): class Options: def __init__(self): self.masterDataDirectory = \"\" self.interactive", "None self.ddboost_ping = None self.ddboost_backupdir = None self.replicate = None", "if os.path.exists('/tmp/foo'): os.remove('/tmp/foo') @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.get_include_schema_list_from_exclude_schema', return_value=['public']) def test_options_schema_filter_35(self, mock1,", "def test_options_schema_filter_3(self, mock, mock2): options = GpCronDumpTestCase.Options() options.exclude_dump_schema = 'foo'", "self.assertTrue(inc.startswith('/tmp/include_dump_tables_file')) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.get_latest_full_dump_timestamp', return_value='20121225090000') def test_get_include_exclude_for_dump_database03(self, mock1, mock2, mock3):", "mock, mock2): options = GpCronDumpTestCase.Options() options.dump_databases = 'bkdb,fulldb' options.timestamp_key =", "os.path.exists('/tmp/foo'): os.remove('/tmp/foo') @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.get_include_schema_list_from_exclude_schema', return_value=['public']) def test_options_schema_filter_35(self, mock1, mock2,", "ao_partition_list = ['public, aot1!asd, 2190', 'public, aot2, 3190'] co_partition_list =", "aot1, 2190', 'public, aot2:aot, 3190'] expected_output = ['public.aot1', 'public.aot2:aot'] result", "mock3, mock4): options = GpCronDumpTestCase.Options() options.masterDataDirectory = '/tmp/foobar' options.exclude_dump_tables_file =", "% options.masterDataDirectory] self.assertEqual(sorted(files_file_list), sorted(expected_files_list)) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_get_pipes_file_list1(self, mock1, mock2):", "self.include_schema_file = None self.exclude_schema_file = None self.exclude_dump_schema = None self.dump_stats", "def test_options_schema_filter_10(self, mock, mock2): options = GpCronDumpTestCase.Options() options.exclude_schema_file = 'foo'", "dbname) self.assertEquals(inc, None) self.assertEquals(exc, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.expand_partitions_and_populate_filter_file', return_value='/tmp/include_dump_tables_file') @patch('gpcrondump.get_lines_from_file',", "expected_files_list = ['foo1:%s/db_dumps/20130101/gp_cdatabase_1_1_20130101010101' % options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/gp_dump_20130101010101_ao_state_file' % options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/gp_dump_20130101010101_co_state_file' %", "-T can not be selected with -s option'): cron =", "None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options23(self, mock, mock2): options = GpCronDumpTestCase.Options()", "% options.masterDataDirectory, 'foo2:%s/db_dumps/20130101/gp_dump_20130101010101.rpt' % options.masterDataDirectory, 'foo2:%s/db_dumps/20130101/gp_dump_status_1_1_20130101010101' % options.masterDataDirectory] self.assertEqual(files_file_list, expected_files_list)", "not exist.\" % options.include_email_file): cron = GpCronDump(options, None) @patch('gpcrondump.os.path.isfile', return_value=True)", "'.yaml' file. File containing email details should be '.yaml' file.\"", "GpCronDumpTestCase.Options() options.output_options = ['--column-inserts'] options.incremental = True with self.assertRaisesRegexp(Exception, '--inserts,", "= GpCronDumpTestCase.Options() options.incremental = True options.dump_databases = ['bkdb'] #If this", "options.timestamp_key = True options.ddboost = True options.list_backup_files = True with", "return_value=111) @patch('gpcrondump.yaml.load', return_value={'EMAIL_DETAILS': [{'FROM': 'RRP_MPE2_DCA_1', 'DBNAME': None, 'SUBJECT': \"backup completed", "NetBackup'): GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_get_include_exclude_for_dump_database00(self, mock1, mock2): options", "@patch('gpcrondump.GpCronDump._get_master_port') def test_get_files_file_list_with_prefix(self, mock1, mock2, mock3): options = GpCronDumpTestCase.Options() options.timestamp_key", "@patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.os.path.getsize', return_value=111) @patch('gpcrondump.yaml.load', return_value={'EMAIL_DETAILS': [{'FROM': 'RRP_MPE2_DCA_1', 'DBNAME': 'testdb100', 'SUBJECT':", "None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options27(self, mock, mock2): options = GpCronDumpTestCase.Options()", "self.assertRaisesRegexp(Exception, \"\\'%s\\' file is not formatted properly.\" % options.include_email_file): with", "= 'foo' options.incremental = True with self.assertRaisesRegexp(Exception, 'exclude table list", "cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_5(self, mock, mock2):", "= 'foo2' timestamp = '20130101010101' dump_dir = get_backup_directory(options.masterDataDirectory, options.backup_dir, gpcd.dump_dir,", "options.dump_databases = ['bkdb'] options.timestamp_key = True options.ddboost = True options.netbackup_service_host", "cron = GpCronDump(options, None) partition_list = ['public, aot1, 2190', 'public,", "= False options.replicate = False options.max_streams = 20 with self.assertRaisesRegexp(Exception,", "expected_files_list = ['foo1:%s/db_dumps/20130101/metro_gp_cdatabase_1_1_20130101010101' % options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/metro_gp_dump_20130101010101_ao_state_file' % options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/metro_gp_dump_20130101010101_co_state_file' %", "= '' self.list_filter_tables = None self.include_email_file = None self.email_details =", "options.include_dump_tables = 'foo' options.exclude_dump_tables_file = 'foo' with self.assertRaisesRegexp(Exception, '-t can", "test_options_schema_filter_11(self, mock, mock2): options = GpCronDumpTestCase.Options() options.exclude_schema_file = 'foo' options.include_dump_tables_file", "None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_10(self, mock, mock2): options = GpCronDumpTestCase.Options()", "= False self.ddboost = False self.ddboost_hosts = None self.ddboost_user =", "test_get_include_exclude_for_dump_database03(self, mock1, mock2, mock3): options = GpCronDumpTestCase.Options() options.masterDataDirectory = '/tmp/foobar'", "options = GpCronDumpTestCase.Options() options.masterDataDirectory = '/tmp/foobar' gpcd = GpCronDump(options, None)", "test_get_include_exclude_for_dump_database00(self, mock1, mock2): options = GpCronDumpTestCase.Options() options.masterDataDirectory = '/tmp/foobar' gpcd", "= GpCronDump(options, None) @patch('gpcrondump.os.path.isfile', return_value=True) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.os.path.getsize', return_value=111) @patch('gpcrondump.yaml.load', return_value={'EMAIL_DETAILS':", "logger = gplog.get_unittest_logger() class GpCronDumpTestCase(unittest.TestCase): class Options: def __init__(self): self.masterDataDirectory", "None self.exclude_dump_schema = None self.dump_stats = None ## Enterprise init", "self.output_options = None self.report_dir = None self.timestamp_key = None self.list_backup_files", "''): cron._verify_tablenames(ao_partition_list, co_partition_list, heap_partition_list) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_inserts_with_incremental(self, mock, mock2):", "options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/gp_dump_1_1_20130101010101_post_data.gz' % options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/gp_global_1_1_20130101010101' % options.masterDataDirectory, 'foo1:/bar/db_dumps/20130101/gp_dump_0_1_20130101010101.gz', 'foo1:/bar/db_dumps/20130101/gp_dump_0_2_20130101010101.gz'] self.assertEqual(sorted(pipes_file_list),", "self.exclude_dump_schema = None self.dump_stats = None ## Enterprise init self.incremental", "None) self.assertEquals(gpcd.getBackupDirectoryRoot(), '/tmp/foobar') @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options18(self, mock, mock2): options", "options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/metro_gp_dump_20130101010101_ao_state_file' % options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/metro_gp_dump_20130101010101_co_state_file' % options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/metro_gp_dump_20130101010101_last_operation' % options.masterDataDirectory,", "= None self.backup_dir = None self.encoding = None self.output_options =", "cron._get_table_names_from_partition_list(partition_list) self.assertEqual(result, expected_output) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_get_table_names_from_partition_list_01(self, mock1, mock2): options", "mock2): options = GpCronDumpTestCase.Options() cron = GpCronDump(options, None) partition_list =", "'/bar' seg.getSegmentHostName.return_value = 'foo1' seg.getSegmentDbId.return_value = id + 1 timestamp", "def test_options_inserts_with_incremental(self, mock, mock2): options = GpCronDumpTestCase.Options() options.output_options = ['--inserts']", "GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_12(self, mock, mock2): options =", "be selected with -s option'): cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port')", "dump_dir, timestamp) expected_files_list = ['foo1:%s/db_dumps/20130101/gp_cdatabase_1_1_20130101010101' % options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/gp_dump_20130101010101_ao_state_file' % options.masterDataDirectory,", "options.ddboost = False options.ddboost_verify = False options.ddboost_config_remove = False options.ddboost_user", "= GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.GpCronDump.validate_dump_schema') @patch('gpcrondump.validate_current_timestamp') def test_option_schema_filter_2(self, mock, mock2,", "mock3, mock4): options = GpCronDumpTestCase.Options() gpcd = GpCronDump(options, None) dbname", "= '/tmp/foobar' options.backup_dir = '/foo1' gpcd = GpCronDump(options, None) self.assertEquals(gpcd.getBackupDirectoryRoot(),", "GpCronDump(options, None) with self.assertRaisesRegexp(Exception, 'Invalid timestamp key'): gpcd._get_timestamp_object(options.timestamp_key) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp')", "mock1, mock2, mock3): options = GpCronDumpTestCase.Options() options.exclude_dump_schema = 'public' gpcd", "self.dump_stats = None ## Enterprise init self.incremental = False self.ddboost", "= GpCronDumpTestCase.Options() options.output_options = ['--inserts'] options.incremental = True with self.assertRaisesRegexp(Exception,", "'foo2:%s/db_dumps/20130101/gp_dump_1_1_20130101010101_post_data.gz' % options.masterDataDirectory] self.assertEqual(pipes_file_list, expected_files_list) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_get_pipes_file_list2(self, mock1,", "GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_16(self, mock, mock2): options =", "self.max_streams = None self.netbackup_service_host = None self.netbackup_policy = None self.netbackup_schedule", "mock, mock2): options = GpCronDumpTestCase.Options() options.clear_dumps = True options.incremental =", "= ['public, aot1!asd, 2190', 'public, aot2, 3190'] co_partition_list = ['public,", "GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_table_filter6(self, mock, mock2): options =", "% options.masterDataDirectory] self.assertEqual(files_file_list, expected_files_list) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_get_files_file_list2(self, mock1, mock2):", "options.masterDataDirectory = '/tmp/foobar' options.incremental = True gpcd = GpCronDump(options, None)", "GpCronDumpTestCase.Options() options.exclude_dump_tables_file = 'foo' options.incremental = True with self.assertRaisesRegexp(Exception, 'exclude", "mock4): options = GpCronDumpTestCase.Options() options.masterDataDirectory = '/tmp/foobar' options.exclude_dump_tables = ['public.t4',", "None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_12(self, mock, mock2): options = GpCronDumpTestCase.Options()", "mock, mock2): options = GpCronDumpTestCase.Options() options.include_dump_tables = 'foo' options.incremental =", "= GpCronDump(options, None) @patch('gpcrondump.MailDumpEvent') @patch('gpcrondump.GpCronDump._get_master_port') def test_send_email00(self, mock1, MailDumpEvent): options", "options = GpCronDumpTestCase.Options() options.dump_databases = ['bkdb'] options.timestamp_key = True options.ddboost", "files not supported with ddboost option'): GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp')", "def test_options18(self, mock, mock2): options = GpCronDumpTestCase.Options() options.dump_schema = 'foo'", "with self.assertRaisesRegexp(Exception, '--replicate and --max-streams cannot be used without --ddboost'):", "return_value=['public']) @patch('gpcrondump.get_user_table_list_for_schema', return_value=['public', 'table1', 'public', 'table2']) def test_options_schema_filter_36(self, mock1, mock2,", "= None self.include_schema_file = None self.exclude_schema_file = None self.exclude_dump_schema =", "@patch('gpcrondump.os.path.isfile', return_value=True) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.os.path.getsize', return_value=111) @patch('gpcrondump.yaml.load', return_value={'EMAIL_DETAILS': [{'FROM': 'RRP_MPE2_DCA_1', 'DBNAME':", "= None self.include_email_file = None self.email_details = None self.include_schema_file =", "self.backup_set = None self.dump_global = False self.clear_catalog_dumps = False self.batch_default", "options.masterDataDirectory, 'foo2:%s/db_dumps/20130101/gp_dump_1_1_20130101010101_post_data.gz' % options.masterDataDirectory] self.assertEqual(pipes_file_list, expected_files_list) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_get_pipes_file_list2(self,", "self.cleanup_total = None self.dump_schema = False self.dump_databases = ['testdb'] self.bypass_disk_check", "cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_oids_with_incremental(self, mock, mock2):", "= GpCronDumpTestCase.Options() options.local_dump_prefix = 'foo' options.incremental = False options.list_filter_tables =", "self.assertRaisesRegexp(Exception, 'include table file can not be selected with incremental", "None self.exclude_dump_tables_file = None self.backup_dir = None self.encoding = None", "mock1, mock2, mock3): options = GpCronDumpTestCase.Options() options.exclude_schema_file = '/tmp/foo' write_lines_to_file('/tmp/foo',", "return_value='20121225090000') def test_options11(self, mock, mock2, mock3): options = GpCronDumpTestCase.Options() options.incremental", "'bkdb,fulldb' with self.assertRaisesRegexp(Exception, 'multi-database backup is not supported with incremental", "options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/gp_dump_1_1_20130101010101_post_data.gz' % options.masterDataDirectory, 'foo1:/bar/db_dumps/20130101/gp_dump_0_1_20130101010101.gz', 'foo1:/bar/db_dumps/20130101/gp_dump_0_2_20130101010101.gz'] self.assertEqual(sorted(pipes_file_list), sorted(expected_files_list)) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp')", "'RRP_MPE2_DCA_1', 'NAME': 'testdb100', 'SUBJECT': \"backup completed for Database 'testdb100'\"}]}) def", "GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options22(self, mock, mock2): options =", "options.netbackup_policy = \"test_policy\" options.netbackup_schedule = \"test_schedule\" with self.assertRaisesRegexp(Exception, '--ddboost is", "def test_options13(self, mock, mock2, mock3): options = GpCronDumpTestCase.Options() options.incremental =", "raise an exception GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options16(self, mock,", "mock, mock2): options = GpCronDumpTestCase.Options() options.dump_schema = 'foo' options.exclude_dump_schema =", "@patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_12(self, mock, mock2): options = GpCronDumpTestCase.Options() options.exclude_schema_file =", "['foo2:%s/db_dumps/20130101/gp_dump_1_1_20130101010101.gz' % options.masterDataDirectory, 'foo2:%s/db_dumps/20130101/gp_dump_1_1_20130101010101_post_data.gz' % options.masterDataDirectory] self.assertEqual(pipes_file_list, expected_files_list) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp')", "options.include_dump_tables_file = 'foo' with self.assertRaisesRegexp(Exception, '-t can not be selected", "options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/metro_gp_dump_20130101010101_last_operation' % options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/metro_gp_dump_20130101010101.rpt' % options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/metro_gp_dump_status_1_1_20130101010101' % options.masterDataDirectory,", "'--exclude-schema-file can not be selected with --schema-file option'): cron =", "and -T can not be selected with --schema-file option'): cron", "test_validate_parse_email_File02(self, mock1, mock2, mock3): options = GpCronDumpTestCase.Options() options.include_email_file = \"/tmp/abc\"", "None) with self.assertRaisesRegexp(Exception, 'Invalid timestamp key'): gpcd._get_timestamp_object(options.timestamp_key) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def", "master = Mock() master.getSegmentHostName.return_value = 'foo1' mock_segs = [Mock(), Mock()]", "import unittest2 as unittest from datetime import datetime from gppylib", "gpcd._get_timestamp_object(options.timestamp_key) self.assertTrue(isinstance(timestamp, datetime)) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_get_files_file_list1(self, mock1, mock2): options", "@patch('gpcrondump.validate_current_timestamp') def test_options2(self, mock, mock2): options = GpCronDumpTestCase.Options() options.exclude_dump_tables =", "False self.dump_databases = ['testdb'] self.bypass_disk_check = True self.backup_set = None", "option'): cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_table_filter4(self, mock,", "cot1, 2190', 'public, cot2\\nasd, 3190'] heap_partition_list = ['public, heapt1, 2190',", "= ['public.t4', 'public.t5', 'public.t6'] gpcd = GpCronDump(options, None) dirtyfile =", "mock, mock2, mock3): options = GpCronDumpTestCase.Options() options.include_schema_file = '/tmp/foo' with", "None self.dump_stats = None ## Enterprise init self.incremental = False", "= 'foo' options.incremental = False options.list_filter_tables = True try: with", "options.output_options = ['--oids'] options.incremental = True with self.assertRaisesRegexp(Exception, '--inserts, --column-inserts,", "options = GpCronDumpTestCase.Options() options.exclude_schema_file = 'foo' options.include_dump_tables_file = '/tmp/foo' with", "option'): cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_25(self, mock,", "@patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options19(self, mock, mock2): options = GpCronDumpTestCase.Options() options.clear_dumps", "entry \"public, aot2,aot, 3190\"'): cron._get_table_names_from_partition_list(partition_list) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_table_filter1(self, mock,", "@patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.get_lines_from_file', return_value=['public']) @patch('gpcrondump.get_user_table_list_for_schema', return_value=['public', 'table1', 'public', 'table2']) def", "self.assertRaisesRegexp(Exception, '-T can not be selected with --table-file option'): cron", "= os.path.abspath('gpcrondump') gpcrondump = imp.load_source('gpcrondump', gpcrondump_path) import unittest2 as unittest", "'/tmp/foo' with self.assertRaisesRegexp(Exception, '--exclude-schema-file can not be selected with --schema-file", "True self.free_space_percent = None self.clear_dumps = False self.cleanup_date = None", "= None self.dump_global = False self.clear_catalog_dumps = False self.batch_default =", "% options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/gp_dump_1_1_20130101010101_post_data.gz' % options.masterDataDirectory, 'foo1:/bar/db_dumps/20130101/gp_dump_0_1_20130101010101.gz', 'foo1:/bar/db_dumps/20130101/gp_dump_0_2_20130101010101.gz'] self.assertEqual(sorted(pipes_file_list), sorted(expected_files_list)) @patch('gpcrondump.GpCronDump._get_master_port')", "self.assertEqual(gpcd.dump_prefix, 'foo_') @patch('gpcrondump.os.path.isfile', return_value=True) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.os.path.getsize', return_value=111) @patch('gpcrondump.yaml.load', return_value={'EMAIL_DETAILS': [{'FROM':", "= 'foo' options.incremental = True with self.assertRaisesRegexp(Exception, '-S option can", "cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options4(self, mock, mock2):", "@patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_table_filter5(self, mock, mock2): options = GpCronDumpTestCase.Options() options.include_dump_tables", "self.netbackup_policy = None self.netbackup_schedule = None self.netbackup_block_size = None self.netbackup_keyword", "= \"mdw\" options.netbackup_policy = \"test_policy\" options.netbackup_schedule = \"test_schedule\" with self.assertRaisesRegexp(Exception,", "incremental backup'): cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options10(self,", "options = GpCronDumpTestCase.Options() options.include_dump_tables = 'foo' options.include_dump_tables_file = 'foo' with", "@patch('gpcrondump.get_latest_full_dump_timestamp', return_value='20121225090000') def test_options11(self, mock, mock2, mock3): options = GpCronDumpTestCase.Options()", "options.include_dump_tables_file = 'bar' options.masterDataDirectory = '/data/foo' gpcd = GpCronDump(options, None)", "\"backup completed for Database 'testdb100'\"}]}) def test_validate_parse_email_File04(self, mock1, mock2, mock3,", "import DEFAULT_NUM_WORKERS from mock import patch, Mock from gppylib.operations.dump import", "= GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_17(self, mock, mock2): options", "self.assertEqual(sorted(pipes_file_list), sorted(expected_files_list)) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_gpcrondump_init0(self, mock1, mock2): options =", "None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_6(self, mock, mock2): options = GpCronDumpTestCase.Options()", "it should not raise an exception GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp')", "is not supported with incremental backup'): cron = GpCronDump(options, None)", "along with --replicate'): cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def", "options.ddboost_host = False options.max_streams = None options.list_backup_files = False gpcd", "= DEFAULT_NUM_WORKERS self.include_dump_tables = None self.exclude_dump_tables = None self.include_dump_tables_file =", "option requires --prefix and --incremental'): cron = GpCronDump(options, None) finally:", "mock2): options = GpCronDumpTestCase.Options() options.dump_databases = 'bkdb,fulldb' options.timestamp_key = True", "None) self.assertEquals(exc, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.expand_partitions_and_populate_filter_file', return_value='/tmp/include_dump_tables_file') @patch('gpcrondump.get_lines_from_file', return_value=['public.t1', 'public.t2'])", "timestamp) files_file_list = gpcd._get_files_file_list(master, dump_dir, timestamp) expected_files_list = ['foo1:%s/db_dumps/20130101/gp_cdatabase_1_1_20130101010101' %", "= GpCronDump(options, None) ao_partition_list = ['public, aot1, 2190', 'public, aot2,", "'foo' (inc, exc) = gpcd.get_include_exclude_for_dump_database(dirtyfile, dbname) self.assertEquals(inc, None) self.assertEquals(exc, None)", "backup'): cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_oids_with_incremental(self, mock,", "= get_backup_directory(options.masterDataDirectory, options.backup_dir, gpcd.dump_dir, timestamp) files_file_list = gpcd._get_files_file_list(master, dump_dir, timestamp)", "= GpCronDump(options, None) self.assertEquals(cron.full_dump_timestamp, '20121225090000') @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options12(self, mock,", "True cron = GpCronDump(options, None) self.assertEquals(cron.full_dump_timestamp, '20121225090000') @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def", "be selected with --exclude-schema-file option'): cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port')", "options.replicate = True options.max_streams = \"abc\" with self.assertRaisesRegexp(Exception, '--max-streams must", "import get_backup_directory, write_lines_to_file import mock logger = gplog.get_unittest_logger() class GpCronDumpTestCase(unittest.TestCase):", "options.include_email_file): cron = GpCronDump(options, None) @patch('gpcrondump.os.path.isfile', return_value=True) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.os.path.getsize', return_value=0)", "= GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options3(self, mock, mock2): options", "'foo' options.include_dump_tables_file = '/tmp/foo' with self.assertRaisesRegexp(Exception, '--table-file and --exclude-table-file can", "gpcd.dump_dir, timestamp) files_file_list = gpcd._get_files_file_list(master, dump_dir, timestamp) expected_files_list = ['foo2:%s/db_dumps/20130101/gp_cdatabase_1_1_20130101010101'", "self.assertEquals(exc, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.expand_partitions_and_populate_filter_file', return_value='/tmp/exclude_dump_tables_file') @patch('gpcrondump.get_lines_from_file', return_value=['public.t1', 'public.t2']) def", "timestamp) expected_files_list = ['foo1:%s/db_dumps/20130101/gp_cdatabase_1_1_20130101010101' % options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/gp_dump_20130101010101_ao_state_file' % options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/gp_dump_20130101010101_co_state_file'", "@patch('gpcrondump.get_user_table_list_for_schema', return_value=['public', 'table1', 'public', 'table2']) def test_options_schema_filter_36(self, mock1, mock2, mock3,", "self.assertTrue(inc.startswith('/tmp/include_dump_tables_file')) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.expand_partitions_and_populate_filter_file', return_value='/tmp/include_dump_tables_file') @patch('gpcrondump.get_lines_from_file') def test_get_include_exclude_for_dump_database02(self, mock1, mock2,", "@patch('gpcrondump.GpCronDump._get_master_port') def test_options13(self, mock, mock2, mock3): options = GpCronDumpTestCase.Options() options.incremental", "None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_8(self, mock, mock2): options = GpCronDumpTestCase.Options()", "'foo2' mock_segs = [] timestamp = '20130101010101' dump_dir = get_backup_directory(options.masterDataDirectory,", "expected_files_list) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.get_latest_full_dump_timestamp', return_value='20130101000000') def test_get_files_file_list3(self, mock1, mock2, mock3):", "options.exclude_dump_tables_file = 'foo' options.incremental = True with self.assertRaisesRegexp(Exception, 'exclude table", "def test_options11(self, mock, mock2, mock3): options = GpCronDumpTestCase.Options() options.incremental =", "= gpcd.get_schema_list_file(dbname) self.assertEquals(file, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_32(self, mock1, mock2):", "test_options29(self, mock, mock2): options = GpCronDumpTestCase.Options() options.dump_databases = ['bkdb'] options.timestamp_key", "'foo' options.include_dump_tables_file = 'foo' with self.assertRaisesRegexp(Exception, '-t can not be", "mock_segs, dump_dir, timestamp) expected_files_list = ['foo2:%s/db_dumps/20130101/gp_dump_1_1_20130101010101.gz' % options.masterDataDirectory, 'foo2:%s/db_dumps/20130101/gp_dump_1_1_20130101010101_post_data.gz' %", "GpCronDumpTestCase.Options() options.exclude_schema_file = '/tmp/foo' write_lines_to_file('/tmp/foo', ['public']) gpcd = GpCronDump(options, None)", "@patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.os.path.getsize', return_value=0) def test_validate_parse_email_File03(self, mock1, mock2, mock3): options =", "def test_get_timestamp_object3(self, mock1, mock2): options = GpCronDumpTestCase.Options() options.timestamp_key = None", "@patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_23(self, mock, mock2): options = GpCronDumpTestCase.Options() options.dump_schema =", "'information_schema']) def test_options_schema_filter_29(self, mock, mock2, mock3): options = GpCronDumpTestCase.Options() options.exclude_schema_file", "GpCronDumpTestCase.Options() options.exclude_dump_schema = 'foo' options.exclude_schema_file = '/tmp/foo' with self.assertRaisesRegexp(Exception, '-S", "@patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_get_include_exclude_for_dump_database00(self, mock1, mock2): options = GpCronDumpTestCase.Options() options.masterDataDirectory", "= GpCronDumpTestCase.Options() options.exclude_dump_tables = 'foo' options.exclude_dump_tables_file = 'foo' with self.assertRaisesRegexp(Exception,", "= gpcd.get_include_exclude_for_dump_database(dirtyfile, dbname) self.assertTrue(exc.startswith('/tmp/exclude_dump_tables_file')) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.expand_partitions_and_populate_filter_file', return_value='/tmp/exclude_dump_tables_file') @patch('gpcrondump.get_lines_from_file') def", "= GpCronDumpTestCase.Options() options.masterDataDirectory = '/tmp/foobar' options.backup_dir = '/foo1' gpcd =", "options.incremental = True with self.assertRaisesRegexp(Exception, 'Must supply -x <database name>", "def test_get_files_file_list_with_prefix(self, mock1, mock2, mock3): options = GpCronDumpTestCase.Options() options.timestamp_key =", "= False self.clear_catalog_dumps = False self.batch_default = DEFAULT_NUM_WORKERS self.include_dump_tables =", "self.assertEquals(exc, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.expand_partitions_and_populate_filter_file', return_value='/tmp/include_dump_tables_file') @patch('gpcrondump.get_lines_from_file', return_value=['public.t1', 'public.t2']) def", "False self.post_vacuum = False self.rollback = False self.compress = True", "= GpCronDumpTestCase.Options() options.exclude_schema_file = '/tmp/foo' options.incremental = True with self.assertRaisesRegexp(Exception,", "GpCronDump(options, None) @patch('gpcrondump.os.path.isfile', return_value=True) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.os.path.getsize', return_value=111) @patch('gpcrondump.yaml.load', return_value={'EMAIL_DETAILS': [{'FROM':", "@patch('gpcrondump.os.path.getsize', return_value=111) @patch('gpcrondump.yaml.load', return_value={'EMAIL_DETAILS': [{'FROM': 'RRP_MPE2_DCA_1', 'NAME': 'testdb100', 'SUBJECT': \"backup", "test_get_timestamp_object2(self, mock1, mock2): options = GpCronDumpTestCase.Options() options.timestamp_key = '20130101010' gpcd", "mock, mock2): options = GpCronDumpTestCase.Options() options.exclude_schema_file = 'foo' options.include_dump_tables_file =", "mock2, mock3, mock4): options = GpCronDumpTestCase.Options() options.include_email_file = \"/tmp/abc.yaml\" m", "options = GpCronDumpTestCase.Options() dump_database = 'testdb1' current_exit_status = 0 time_start", "mock3, mock4): options = GpCronDumpTestCase.Options() options.masterDataDirectory = '/tmp/foobar' options.include_dump_tables =", "options.masterDataDirectory = '/foo' gpcd = GpCronDump(options, None) master = Mock()", "in schema file '/tmp/foo'\"): GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_31(self,", "def test_get_pipes_file_list1(self, mock1, mock2): options = GpCronDumpTestCase.Options() options.timestamp_key = None", "test_options_schema_filter_3(self, mock, mock2): options = GpCronDumpTestCase.Options() options.exclude_dump_schema = 'foo' options.incremental", "'foo1:/bar/db_dumps/20130101/gp_segment_config_files_0_2_20130101010101.tar', 'foo1:/bar/db_dumps/20130101/gp_dump_0_1_20130101010101.gz', 'foo1:/bar/db_dumps/20130101/gp_dump_0_2_20130101010101.gz'] self.assertEqual(sorted(pipes_file_list), sorted(expected_files_list)) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_gpcrondump_init0(self, mock1,", "selected with -S option'): cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp')", "test_options14(self, mock, mock2): options = GpCronDumpTestCase.Options() options.dump_databases = 'bkdb' options.incremental", "option when listing backup files'): GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def", "GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_28(self, mock, mock2): options =", "dirtyfile = '/tmp/dirty' dbname = 'foo' (inc, exc) = gpcd.get_include_exclude_for_dump_database(dirtyfile,", "True with self.assertRaisesRegexp(Exception, 'exclude table file can not be selected", "options = GpCronDumpTestCase.Options() options.ddboost = False options.replicate = False options.max_streams", "= GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_16(self, mock, mock2): options", "options.include_schema_file = '/tmp/foo' options.incremental = True with self.assertRaisesRegexp(Exception, '--schema-file option", "= GpCronDumpTestCase.Options() options.exclude_dump_tables = 'foo' options.include_dump_tables_file = 'foo' with self.assertRaisesRegexp(Exception,", "= gpcd._get_files_file_list(master, dump_dir, timestamp) expected_files_list = ['foo1:%s/db_dumps/20130101/metro_gp_cdatabase_1_1_20130101010101' % options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/metro_gp_dump_20130101010101_ao_state_file'", "test_get_table_names_from_partition_list_01(self, mock1, mock2): options = GpCronDumpTestCase.Options() cron = GpCronDump(options, None)", "gpcd.get_include_exclude_for_dump_database(dirtyfile, dbname) self.assertTrue(inc.startswith('/tmp/include_dump_tables_file')) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.expand_partitions_and_populate_filter_file', return_value='/tmp/include_dump_tables_file') @patch('gpcrondump.get_lines_from_file') def test_get_include_exclude_for_dump_database02(self,", "'-s option can not be selected with incremental backup'): cron", "GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.get_lines_from_file', return_value=['public', 'information_schema']) def test_options_schema_filter_30(self, mock,", "def test_options_schema_filter_35(self, mock1, mock2, mock3): options = GpCronDumpTestCase.Options() options.exclude_dump_schema =", "options = GpCronDumpTestCase.Options() options.timestamp_key = '20130101010101' options.local_dump_prefix = 'metro' options.include_dump_tables_file", "@patch('gpcrondump.os.path.getsize', return_value=111) @patch('gpcrondump.yaml.load', return_value={'EMAIL_DETAILS': [{'FROM': 'RRP_MPE2_DCA_1', 'DBNAME': 'testdb100', 'SUBJECT': \"backup", "pipes_file_list = gpcd._get_pipes_file_list(master, mock_segs, dump_dir, timestamp) expected_files_list = ['foo2:%s/db_dumps/20130101/gp_dump_1_1_20130101010101.gz' %", "with self.assertRaisesRegexp(Exception, 'exclude table file can not be selected with", "greater than zero'): cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def", "GpCronDumpTestCase.Options() options.masterDataDirectory = '/tmp/foobar' options.include_dump_tables_file = '/mydir/incfile' gpcd = GpCronDump(options,", "'public.t5', 'public.t6'] gpcd = GpCronDump(options, None) dirtyfile = '/tmp/dirty' dbname", "= True options.max_streams = \"abc\" with self.assertRaisesRegexp(Exception, '--max-streams must be", "not specify catalog schema 'information_schema' using -S option\"): GpCronDump(options, None)", "= None gpcd = GpCronDump(options, None) timestamp = gpcd._get_timestamp_object(options.timestamp_key) self.assertTrue(isinstance(timestamp,", "'20141016010101' file = gpcd.get_schema_list_file(dbname) self.assertTrue(file.startswith('/tmp/schema_list')) if os.path.exists('/tmp/foo'): os.remove('/tmp/foo') @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp')", "--exclude-schema-file option'): cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_6(self,", "test_options3(self, mock, mock2): options = GpCronDumpTestCase.Options() options.include_dump_tables_file = 'foo' options.incremental", "None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.get_lines_from_file', return_value=['public', 'information_schema']) def test_options_schema_filter_30(self, mock, mock2,", "option'): cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_get_timestamp_object1(self, mock1,", "@patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.expand_partitions_and_populate_filter_file', return_value='/tmp/exclude_dump_tables_file') @patch('gpcrondump.get_lines_from_file', return_value=['public.t1', 'public.t2']) def test_get_include_exclude_for_dump_database04(self, mock1,", "= GpCronDumpTestCase.Options() options.include_dump_tables = 'foo' options.exclude_dump_tables = 'foo' with self.assertRaisesRegexp(Exception,", "id, seg in enumerate(mock_segs): seg.getSegmentDataDirectory.return_value = '/bar' seg.getSegmentHostName.return_value = 'foo1'", "@patch('gpcrondump.validate_current_timestamp') def test_get_pipes_file_list4(self, mock1, mock2): options = GpCronDumpTestCase.Options() options.timestamp_key =", "None options.dump_global = True options.masterDataDirectory = '/foo' gpcd = GpCronDump(options,", "= None self.cleanup_total = None self.dump_schema = False self.dump_databases =", "mock, mock2): options = GpCronDumpTestCase.Options() options.exclude_schema_file = 'foo' options.exclude_dump_tables =", "% options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/metro_gp_dump_20130101010101_ao_state_file' % options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/metro_gp_dump_20130101010101_co_state_file' % options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/metro_gp_dump_20130101010101_last_operation' %", "if os.path.exists('/tmp/foo'): os.remove('/tmp/foo') @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.get_include_schema_list_from_exclude_schema', return_value=['public']) def test_options_schema_filter_34(self, mock1,", "options.incremental = True with self.assertRaisesRegexp(Exception, '--schema-file option can not be", "True options.incremental = True with self.assertRaisesRegexp(Exception, '-c option can not", "GpCronDump(options, None) partition_list = ['public, aot1, 2190', 'public, aot2,aot, 3190']", "mock2): options = GpCronDumpTestCase.Options() options.exclude_schema_file = 'foo' options.include_schema_file = '/tmp/foo'", "def test_options_table_filter2(self, mock, mock2): options = GpCronDumpTestCase.Options() options.include_dump_tables = 'foo'", "= GpCronDumpTestCase.Options() options.include_schema_file = 'foo' options.exclude_dump_tables_file = '/tmp/foo' with self.assertRaisesRegexp(Exception,", "@patch('gpcrondump.validate_current_timestamp') def test_options_table_filter5(self, mock, mock2): options = GpCronDumpTestCase.Options() options.include_dump_tables =", "= False self.post_script = None self.dump_config = False self.history =", "GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options15(self, mock, mock2): options =", "gpcd._get_timestamp_object(options.timestamp_key) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_get_timestamp_object3(self, mock1, mock2): options = GpCronDumpTestCase.Options()", "test_options25(self, mock, mock2): options = GpCronDumpTestCase.Options() options.ddboost = False options.replicate", "cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_7(self, mock, mock2):", "= None self.netbackup_policy = None self.netbackup_schedule = None self.netbackup_block_size =", "option\"): GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_28(self, mock, mock2): options", "3190'] co_partition_list = ['public, cot1, 2190', 'public, cot2, 3190'] heap_partition_list", "mock, mock2): options = GpCronDumpTestCase.Options() options.output_options = ['--column-inserts'] options.incremental =", "self.assertRaisesRegexp(Exception, '-t can not be selected with --exclude-table-file option'): cron", "unittest from datetime import datetime from gppylib import gplog from", "GpCronDumpTestCase.Options() options.local_dump_prefix = 'foo' options.incremental = False options.list_filter_tables = True", "'public.t6'] gpcd = GpCronDump(options, None) dirtyfile = '/tmp/dirty' dbname =", "mock2): options = GpCronDumpTestCase.Options() options.dump_schema = 'foo' options.incremental = True", "'/tmp/foobar' options.exclude_dump_tables_file = '/odir/exfile' gpcd = GpCronDump(options, None) dirtyfile =", "= GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options10(self, mock, mock2): options", "'foo1' timestamp = '20130101010101' dump_dir = get_backup_directory(options.masterDataDirectory, None, gpcd.dump_dir, timestamp)", "with self.assertRaisesRegexp(Exception, \"can not specify catalog schema 'information_schema' using -S", "options.include_dump_tables_file = 'foo' options.incremental = True with self.assertRaisesRegexp(Exception, 'include table", "options.replicate = False options.max_streams = 20 with self.assertRaisesRegexp(Exception, '--replicate and", "@patch('gpcrondump.validate_current_timestamp') def test_options23(self, mock, mock2): options = GpCronDumpTestCase.Options() options.ddboost =", "options = GpCronDumpTestCase.Options() options.exclude_dump_schema = 'foo' options.include_schema_file = '/tmp/foo' with", "= GpCronDumpTestCase.Options() options.output_options = ['--oids'] options.incremental = True with self.assertRaisesRegexp(Exception,", "timestamp) expected_files_list = ['foo1:%s/db_dumps/20130101/gp_dump_1_1_20130101010101.gz' % options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/gp_dump_1_1_20130101010101_post_data.gz' % options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/gp_master_config_files_20130101010101.tar'", "def test_gpcrondump_init0(self, mock1, mock2): options = GpCronDumpTestCase.Options() options.timestamp_key = None", "options = GpCronDumpTestCase.Options() options.masterDataDirectory = '/tmp/foobar' options.include_dump_tables_file = '/mydir/incfile' gpcd", "mock, mock2): options = GpCronDumpTestCase.Options() options.include_dump_tables = 'foo' options.exclude_dump_tables =", "'.yaml' file.\" % options.include_email_file): cron = GpCronDump(options, None) @patch('gpcrondump.os.path.isfile', return_value=True)", "@patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_6(self, mock, mock2): options = GpCronDumpTestCase.Options() options.dump_schema =", "@patch('gpcrondump.validate_current_timestamp') def test_options27(self, mock, mock2): options = GpCronDumpTestCase.Options() options.dump_databases =", "False self.batch_default = DEFAULT_NUM_WORKERS self.include_dump_tables = None self.exclude_dump_tables = None", "self.assertRaisesRegexp(Exception, 'Must supply -x <database name> with incremental option'): cron", "self.local_dump_prefix = '' self.list_filter_tables = None self.include_email_file = None self.email_details", "True gpcd = GpCronDump(options, None) master = Mock() master.getSegmentHostName.return_value =", "gpcd._get_files_file_list(master, dump_dir, timestamp) expected_files_list = ['foo1:%s/db_dumps/20130101/gp_cdatabase_1_1_20130101010101' % options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/gp_dump_20130101010101_ao_state_file' %", "enumerate(mock_segs): seg.getSegmentDataDirectory.return_value = '/bar' seg.getSegmentHostName.return_value = 'foo1' seg.getSegmentDbId.return_value = id", "@patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options12(self, mock, mock2): options = GpCronDumpTestCase.Options() options.incremental", "@patch('gpcrondump.validate_current_timestamp') def test_gpcrondump_init0(self, mock1, mock2): options = GpCronDumpTestCase.Options() options.timestamp_key =", "= GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_22(self, mock, mock2): options", "gplog from gpcrondump import GpCronDump from gppylib.operations.utils import DEFAULT_NUM_WORKERS from", "self.rollback = False self.compress = True self.free_space_percent = None self.clear_dumps", "self.assertRaisesRegexp(Exception, '-S can not be selected with --exclude-schema-file option'): cron", "mock3): options = GpCronDumpTestCase.Options() options.exclude_schema_file = '/tmp/foo' options.incremental = True", "with NetBackup'): GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_get_include_exclude_for_dump_database00(self, mock1, mock2):", "@patch('gpcrondump.validate_current_timestamp') def test_options29(self, mock, mock2): options = GpCronDumpTestCase.Options() options.dump_databases =", "@patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_27(self, mock, mock2): options = GpCronDumpTestCase.Options() options.dump_schema", "options = GpCronDumpTestCase.Options() options.dump_schema = 'foo' options.exclude_dump_schema = 'foo' with", "'DBNAME': None, 'SUBJECT': \"backup completed for Database 'testdb100'\"}]}) def test_validate_parse_email_File05(self,", "mock3, mock4): options = GpCronDumpTestCase.Options() options.masterDataDirectory = '/tmp/foobar' options.include_dump_tables_file =", "mock, mock2): options = GpCronDumpTestCase.Options() options.include_dump_tables_file = 'foo' options.incremental =", "= GpCronDumpTestCase.Options() options.ddboost = False options.replicate = False options.max_streams =", "self.assertRaisesRegexp(Exception, 'multi-database backup is not supported with incremental backup'): cron", "= '/tmp/foo' with self.assertRaisesRegexp(Exception, '-S can not be selected with", "'/tmp/foo' with self.assertRaisesRegexp(Exception, '-S can not be selected with --schema-file", "= GpCronDumpTestCase.Options() options.masterDataDirectory = '/tmp/foobar' options.include_dump_tables = ['public.t1', 'public.t2', 'public.t3']", "options = GpCronDumpTestCase.Options() options.masterDataDirectory = '/tmp/foobar' options.backup_dir = '/foo1' gpcd", "@patch('gpcrondump.GpCronDump._get_master_port') def test_send_email00(self, mock1, MailDumpEvent): options = GpCronDumpTestCase.Options() dump_database =", "def test_options_schema_filter_9(self, mock, mock2): options = GpCronDumpTestCase.Options() options.exclude_dump_schema = 'foo'", "test_options_schema_filter_23(self, mock, mock2): options = GpCronDumpTestCase.Options() options.dump_schema = 'foo' options.exclude_dump_tables", "options = GpCronDumpTestCase.Options() options.exclude_dump_schema = 'foo' options.include_dump_tables = '/tmp/foo' with", "@patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_10(self, mock, mock2): options = GpCronDumpTestCase.Options() options.exclude_schema_file =", "self.backup_dir = None self.encoding = None self.output_options = None self.report_dir", "= None self.encoding = None self.output_options = None self.report_dir =", "@patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.expand_partitions_and_populate_filter_file', return_value='/tmp/include_dump_tables_file') @patch('gpcrondump.get_lines_from_file', return_value=['public.t1', 'public.t2']) def test_get_include_exclude_for_dump_database01(self, mock1, mock2,", "= True options.local_dump_prefix = 'metro' options.masterDataDirectory = '/data/foo' gpcd =", "gpcd.dump_dir, timestamp) files_file_list = gpcd._get_files_file_list(master, dump_dir, timestamp) expected_files_list = ['foo1:%s/db_dumps/20130101/gp_cdatabase_1_1_20130101010101'", "def test_options_schema_filter_20(self, mock, mock2): options = GpCronDumpTestCase.Options() options.exclude_schema_file = 'foo'", "= GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_7(self, mock, mock2): options", "mock.MagicMock() with patch('__builtin__.open', m, create=True): cron = GpCronDump(options, None) @patch('gpcrondump.os.path.isfile',", "self.timestamp_key = None self.list_backup_files = None self.quiet = False self.verbose", "@patch('gpcrondump.validate_current_timestamp') def test_options18(self, mock, mock2): options = GpCronDumpTestCase.Options() options.dump_schema =", "options.list_filter_tables = False @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.get_latest_full_dump_timestamp', return_value='20121225090000') def test_options11(self, mock,", "def test_options12(self, mock, mock2): options = GpCronDumpTestCase.Options() options.incremental = True", "'--replicate and --max-streams cannot be used without --ddboost'): cron =", "options = GpCronDumpTestCase.Options() options.include_dump_tables = 'foo' options.incremental = True with", "with self.assertRaisesRegexp(Exception, '--max-streams must be a number greater than zero'):", "False self.clear_catalog_dumps = False self.batch_default = DEFAULT_NUM_WORKERS self.include_dump_tables = None", "% options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/metro_gp_dump_status_1_1_20130101010101' % options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/metro_gp_dump_20130101010101_filter' % options.masterDataDirectory] self.assertEqual(sorted(files_file_list), sorted(expected_files_list))", "return_value=['public.t1', 'public.t2']) def test_get_include_exclude_for_dump_database04(self, mock1, mock2, mock3, mock4): options =", "@patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.get_latest_full_dump_timestamp', return_value='20130101000000') @patch('gpcrondump.GpCronDump._get_master_port') def test_get_files_file_list_with_prefix(self, mock1, mock2, mock3): options", "'information_schema']) def test_options_schema_filter_30(self, mock, mock2, mock3): options = GpCronDumpTestCase.Options() options.include_schema_file", "= GpCronDump(options, None) @patch('gpcrondump.get_latest_full_dump_timestamp', return_value='20120330090000') @patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.GpCronDump._get_master_port') def test_options13(self, mock,", "\"test_policy\" options.netbackup_schedule = \"test_schedule\" with self.assertRaisesRegexp(Exception, '--ddboost is not supported", "mock2): options = GpCronDumpTestCase.Options() options.output_options = ['--oids'] options.incremental = True", "datetime from gppylib import gplog from gpcrondump import GpCronDump from", "options.masterDataDirectory = '/tmp/foobar' gpcd = GpCronDump(options, None) dirtyfile = '/tmp/dirty'", "self.assertRaisesRegexp(Exception, '-s can not be selected with -S option'): cron", "option'): cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_15(self, mock,", "options.timestamp_key = None gpcd = GpCronDump(options, None) timestamp = gpcd._get_timestamp_object(options.timestamp_key)", "'DBNAME': 'testdb100', 'SUBJECT': \"backup completed for Database 'testdb100'\"}]}) def test_validate_parse_email_File00(self,", "\"'%s' file is empty.\" % options.include_email_file): cron = GpCronDump(options, None)", "ao_partition_list = ['public, aot1, 2190', 'public, aot2, 3190'] co_partition_list =", "-s option'): cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_16(self,", "self.assertEquals(gpcd.getBackupDirectoryRoot(), '/foo1') @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options17(self, mock, mock2): options =", "None self.exclude_schema_file = None self.exclude_dump_schema = None self.dump_stats = None", "mock, mock2): options = GpCronDumpTestCase.Options() options.masterDataDirectory = '/tmp/foobar' gpcd =", "table file can not be selected with incremental backup'): cron", "mock2, mock3): options = GpCronDumpTestCase.Options() options.incremental = True cron =", "mock1, mock2, mock3): options = GpCronDumpTestCase.Options() cron = GpCronDump(options, None)", "used without --ddboost'): cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def", "self.clear_dumps_only = False self.post_script = None self.dump_config = False self.history", "mock2, mock3): options = GpCronDumpTestCase.Options() options.timestamp_key = '20130101010101' options.incremental =", "@patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_25(self, mock, mock2): options = GpCronDumpTestCase.Options() options.exclude_dump_schema", "= True with self.assertRaisesRegexp(Exception, 'list backup files not supported with", "options.timestamp_key = None options.masterDataDirectory = '/foo' options.dump_config = True gpcd", "'/tmp/foobar' options.include_dump_tables_file = '/mydir/incfile' gpcd = GpCronDump(options, None) dirtyfile =", "with self.assertRaisesRegexp(Exception, 'Invalid timestamp key'): gpcd._get_timestamp_object(options.timestamp_key) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_get_timestamp_object3(self,", "None) master = Mock() master.getSegmentHostName.return_value = 'foo1' timestamp = '20130101010101'", "= mock.MagicMock() with self.assertRaisesRegexp(Exception, \"\\'%s\\' file is not formatted properly.\"", "\"/tmp/abc.yaml\" with self.assertRaisesRegexp(Exception, \"\\'%s\\' file does not exist.\" % options.include_email_file):", "options.masterDataDirectory = '/tmp/foobar' gpcd = GpCronDump(options, None) dbname = 'foo'", "= True with self.assertRaisesRegexp(Exception, 'multi-database backup is not supported with", "file = gpcd.get_schema_list_file(dbname) self.assertTrue(file.startswith('/tmp/schema_list')) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.get_lines_from_file', return_value=['public']) @patch('gpcrondump.get_user_table_list_for_schema', return_value=['public',", "@patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_get_pipes_file_list1(self, mock1, mock2): options = GpCronDumpTestCase.Options() options.timestamp_key", "GpCronDumpTestCase.Options() options.include_email_file = \"/tmp/abc.yaml\" m = mock.MagicMock() with self.assertRaisesRegexp(Exception, \"\\'%s\\'", "@patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.os.path.getsize', return_value=111) @patch('gpcrondump.yaml.load', return_value={'EMAIL_DETAILS': [{'FROM': 'RRP_MPE2_DCA_1', 'DBNAME': None, 'SUBJECT':", "options.exclude_schema_file = 'foo' options.exclude_dump_tables_file = '/tmp/foo' with self.assertRaisesRegexp(Exception, '--table-file and", "test_get_include_exclude_for_dump_database01(self, mock1, mock2, mock3, mock4): options = GpCronDumpTestCase.Options() options.masterDataDirectory =", "test_options_schema_filter_6(self, mock, mock2): options = GpCronDumpTestCase.Options() options.dump_schema = 'foo' options.include_schema_file", "options = GpCronDumpTestCase.Options() options.incremental = True options.dump_databases = 'bkdb,fulldb' with", "gpcd = GpCronDump(options, None) dbname = 'foo' timestamp = '20141016010101'", "= '/foo' gpcd = GpCronDump(options, None) master = Mock() master.getSegmentHostName.return_value", "mock2, mock3): options = GpCronDumpTestCase.Options() options.exclude_schema_file = '/tmp/foo' write_lines_to_file('/tmp/foo', ['public'])", "from gppylib import gplog from gpcrondump import GpCronDump from gppylib.operations.utils", "catalog schema 'information_schema' using -S option\"): GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp')", "True with self.assertRaisesRegexp(Exception, 'include table file can not be selected", "--exclude-table-file option'): cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_table_filter3(self,", "@patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options28(self, mock, mock2): options = GpCronDumpTestCase.Options() options.dump_databases", "return_value=False) @patch('gpcrondump.GpCronDump._get_master_port') def test_validate_parse_email_File01(self, mock1, mock2): options = GpCronDumpTestCase.Options() options.include_email_file", "incremental backup'): cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options20(self,", "GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_22(self, mock, mock2): options =", "dbname = 'foo' timestamp = '20141016010101' file = gpcd.get_schema_list_file(dbname) self.assertTrue(file.startswith('/tmp/schema_list'))", "= '/tmp/foobar' options.include_dump_tables = ['public.t1', 'public.t2', 'public.t3'] gpcd = GpCronDump(options,", "aot2:aot, 3190'] expected_output = ['public.aot1', 'public.aot2:aot'] result = cron._get_table_names_from_partition_list(partition_list) self.assertEqual(result,", "def test_validate_parse_email_File03(self, mock1, mock2, mock3): options = GpCronDumpTestCase.Options() options.include_email_file =", "'table2']) def test_options_schema_filter_36(self, mock1, mock2, mock3, mock4): options = GpCronDumpTestCase.Options()", "GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_11(self, mock, mock2): options =", "'public.cot2']]) def test_verify_tablenames_00(self, mock1, mock2, mock3): options = GpCronDumpTestCase.Options() cron", "options.dump_schema = ['information_schema'] with self.assertRaisesRegexp(Exception, \"can not specify catalog schema", "using -S option\"): GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.get_lines_from_file', return_value=['public', 'information_schema'])", "True options.local_dump_prefix = 'metro' options.masterDataDirectory = '/data/foo' gpcd = GpCronDump(options,", "self.exclude_dump_tables = None self.include_dump_tables_file = None self.exclude_dump_tables_file = None self.backup_dir", "% options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/gp_dump_20130101010101_ao_state_file' % options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/gp_dump_20130101010101_co_state_file' % options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/gp_dump_20130101010101_last_operation' %", "master.getSegmentHostName.return_value = 'foo2' mock_segs = [] timestamp = '20130101010101' dump_dir", "options = GpCronDumpTestCase.Options() options.include_email_file = \"/tmp/abc.yaml\" m = mock.MagicMock() with", "@patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options27(self, mock, mock2): options = GpCronDumpTestCase.Options() options.dump_databases", "= 'foo' timestamp = '20141016010101' file = gpcd.get_schema_list_file(dbname) self.assertTrue(file.startswith('/tmp/schema_list')) if", "for id, seg in enumerate(mock_segs): seg.getSegmentDataDirectory.return_value = '/bar' seg.getSegmentHostName.return_value =", "mock2): options = GpCronDumpTestCase.Options() options.include_schema_file = '/tmp/foo' write_lines_to_file('/tmp/foo', ['public']) gpcd", "\"\" self.interactive = False self.clear_dumps_only = False self.post_script = None", "return_value=True) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.os.path.getsize', return_value=111) def test_validate_parse_email_File02(self, mock1, mock2, mock3): options", "schema 'information_schema' using -S option\"): GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.get_lines_from_file',", "option'): cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_20(self, mock,", "with self.assertRaisesRegexp(Exception, '--table-file can not be selected with --exclude-table-file option'):", "None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options26(self, mock1, mock2): options = GpCronDumpTestCase.Options()", "DEFAULT_NUM_WORKERS self.include_dump_tables = None self.exclude_dump_tables = None self.include_dump_tables_file = None", "= GpCronDump(options, None) finally: options.list_filter_tables = False @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.get_latest_full_dump_timestamp',", "= '20130101010' gpcd = GpCronDump(options, None) with self.assertRaisesRegexp(Exception, 'Invalid timestamp", "mock3): options = GpCronDumpTestCase.Options() options.timestamp_key = '20130101010101' options.local_dump_prefix = 'metro'", "'foo1:%s/db_dumps/20130101/metro_gp_dump_20130101010101_filter' % options.masterDataDirectory] self.assertEqual(sorted(files_file_list), sorted(expected_files_list)) @patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.get_latest_full_dump_timestamp', return_value='20130101000000') @patch('gpcrondump.GpCronDump._get_master_port') def", "% options.masterDataDirectory, 'foo1:/bar/db_dumps/20130101/gp_segment_config_files_0_1_20130101010101.tar', 'foo1:/bar/db_dumps/20130101/gp_segment_config_files_0_2_20130101010101.tar', 'foo1:/bar/db_dumps/20130101/gp_dump_0_1_20130101010101.gz', 'foo1:/bar/db_dumps/20130101/gp_dump_0_2_20130101010101.gz'] self.assertEqual(sorted(pipes_file_list), sorted(expected_files_list)) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp')", "= GpCronDumpTestCase.Options() options.dump_databases = 'bkdb,fulldb' options.timestamp_key = True with self.assertRaisesRegexp(Exception,", "@patch('gpcrondump.os.path.isfile', return_value=True) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.os.path.getsize', return_value=111) def test_validate_parse_email_File02(self, mock1, mock2, mock3):", "= gpcd._get_timestamp_object(options.timestamp_key) self.assertTrue(isinstance(timestamp, datetime)) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_get_files_file_list1(self, mock1, mock2):", "= GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_13(self, mock, mock2): options", "mock2, mock3): options = GpCronDumpTestCase.Options() options.timestamp_key = '20130101010101' options.local_dump_prefix =", "'foo' options.exclude_dump_tables = '/tmp/foo' with self.assertRaisesRegexp(Exception, '-t and -T can", "= None with self.assertRaisesRegexp(Exception, '--max-streams must be specified along with", "mock, mock2): options = GpCronDumpTestCase.Options() options.exclude_dump_schema = ['information_schema'] with self.assertRaisesRegexp(Exception,", "options.exclude_dump_tables = 'foo' options.exclude_dump_tables_file = 'foo' with self.assertRaisesRegexp(Exception, '-T can", "mock, mock2): options = GpCronDumpTestCase.Options() options.output_options = ['--oids'] options.incremental =", "['bkdb'] options.timestamp_key = True options.ddboost = True options.netbackup_service_host = \"mdw\"", "GpCronDumpTestCase.Options() options.masterDataDirectory = '/tmp/foobar' options.incremental = True gpcd = GpCronDump(options,", "mock2): options = GpCronDumpTestCase.Options() options.include_schema_file = 'foo' options.exclude_dump_tables = '/tmp/foo'", "get_backup_directory(options.masterDataDirectory, options.backup_dir, gpcd.dump_dir, timestamp) pipes_file_list = gpcd._get_pipes_file_list(master, mock_segs, dump_dir, timestamp)", "GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.get_lines_from_file', return_value=['public', 'information_schema']) def test_options_schema_filter_29(self, mock,", "@patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options15(self, mock, mock2): options = GpCronDumpTestCase.Options() options.dump_databases", "mock, mock2): options = GpCronDumpTestCase.Options() options.include_schema_file = 'foo' options.include_dump_tables_file =", "'' self.list_filter_tables = None self.include_email_file = None self.email_details = None", "'foo2:%s/db_dumps/20130101/gp_dump_status_1_1_20130101010101' % options.masterDataDirectory] self.assertEqual(files_file_list, expected_files_list) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.get_latest_full_dump_timestamp', return_value='20130101000000') def", "GpCronDumpTestCase.Options() options.timestamp_key = None gpcd = GpCronDump(options, None) timestamp =", "self.assertEqual(files_file_list, expected_files_list) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.get_latest_full_dump_timestamp', return_value='20130101000000') def test_get_files_file_list3(self, mock1, mock2,", "True with self.assertRaisesRegexp(Exception, 'Must supply -x <database name> with incremental", "cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_20(self, mock, mock2):", "test_options22(self, mock, mock2): options = GpCronDumpTestCase.Options() options.ddboost = True options.replicate", "= 'foo' (inc, exc) = gpcd.get_include_exclude_for_dump_database(dirtyfile, dbname) self.assertEquals(inc, None) self.assertEquals(exc,", "'-t can not be selected with -T option'): cron =", "return_value=111) def test_validate_parse_email_File02(self, mock1, mock2, mock3): options = GpCronDumpTestCase.Options() options.include_email_file", "empty.\" % options.include_email_file): cron = GpCronDump(options, None) @patch('gpcrondump.os.path.isfile', return_value=True) @patch('gpcrondump.GpCronDump._get_master_port')", "option'): cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_27(self, mock,", "options.dump_schema = ['public'] gpcd = GpCronDump(options, None) dbname = 'foo'", "@patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options26(self, mock1, mock2): options = GpCronDumpTestCase.Options() options.list_backup_files", "options = GpCronDumpTestCase.Options() options.exclude_schema_file = 'foo' options.exclude_dump_tables = '/tmp/foo' with", "None) ao_partition_list = ['public, aot1, 2190', 'public, aot2, 3190'] co_partition_list", "with self.assertRaisesRegexp(Exception, '--table-file and --exclude-table-file can not be selected with", "= '/mydir/incfile' gpcd = GpCronDump(options, None) dirtyfile = '/tmp/dirty' dbname", "cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_25(self, mock, mock2):", "= GpCronDumpTestCase.Options() options.output_options = ['--column-inserts'] options.incremental = True with self.assertRaisesRegexp(Exception,", "incremental backup'): cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options3(self,", "self.assertRaisesRegexp(Exception, 'Invalid partition entry \"public, aot2,aot, 3190\"'): cron._get_table_names_from_partition_list(partition_list) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp')", "False options.ddboost_config_remove = False options.ddboost_user = False options.ddboost_host = False", "options.incremental = False #If this is successful then it should", "= GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_23(self, mock, mock2): options", "'-t and -T can not be selected with --schema-file option'):", "% options.masterDataDirectory, 'foo2:%s/db_dumps/20130101/gp_dump_20130101010101_co_state_file' % options.masterDataDirectory, 'foo2:%s/db_dumps/20130101/gp_dump_20130101010101_last_operation' % options.masterDataDirectory, 'foo2:%s/db_dumps/20130101/gp_dump_20130101010101.rpt' %", "= '/tmp/foobar' options.incremental = True gpcd = GpCronDump(options, None) dirtyfile", "= 'foo' (inc, exc) = gpcd.get_include_exclude_for_dump_database(dirtyfile, dbname) self.assertTrue(exc.startswith('/tmp/exclude_dump_tables_file')) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp')", "cot2, 3190'] heap_partition_list = ['public.heapt1', 'public.heapt2'] cron._verify_tablenames(ao_partition_list, co_partition_list, heap_partition_list) #Should", "def test_options_oids_with_incremental(self, mock, mock2): options = GpCronDumpTestCase.Options() options.output_options = ['--oids']", "= [['public.aot1:asd', 'public.aot2'], ['public.cot1', 'public.cot2:asd']]) def test_verify_tablenames_00_bad(self, mock1, mock2, mock3):", "@patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_9(self, mock, mock2): options = GpCronDumpTestCase.Options() options.exclude_dump_schema =", "timestamp) expected_files_list = ['foo1:%s/db_dumps/20130101/gp_dump_1_1_20130101010101.gz' % options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/gp_dump_1_1_20130101010101_post_data.gz' % options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/gp_global_1_1_20130101010101'", "options.exclude_dump_tables = '/tmp/foo' with self.assertRaisesRegexp(Exception, '-t and -T can not", "GpCronDump(options, None) self.assertEquals(cron.full_dump_timestamp, '20121225090000') @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options12(self, mock, mock2):", "= '20130101010101' dump_dir = get_backup_directory(options.masterDataDirectory, options.backup_dir, gpcd.dump_dir, timestamp) files_file_list =", "= GpCronDumpTestCase.Options() options.include_schema_file = 'foo' options.include_dump_tables_file = '/tmp/foo' with self.assertRaisesRegexp(Exception,", "--exclude-table-file can not be selected with -S option'): cron =", "mock, mock2): options = GpCronDumpTestCase.Options() options.incremental = True options.dump_databases =", "= GpCronDumpTestCase.Options() options.timestamp_key = None options.masterDataDirectory = '/foo' options.dump_config =", "['public'] gpcd = GpCronDump(options, None) dbname = 'foo' timestamp =", "'/odir/exfile' gpcd = GpCronDump(options, None) dirtyfile = '/tmp/dirty' dbname =", "def test_options_schema_filter_34(self, mock1, mock2, mock3): options = GpCronDumpTestCase.Options() options.exclude_schema_file =", "'foo1:%s/db_dumps/20130101/metro_gp_dump_20130101000000_increments' % options.masterDataDirectory] self.assertEqual(sorted(files_file_list), sorted(expected_files_list)) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_get_pipes_file_list1(self, mock1,", "not supported with NetBackup'): GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_get_include_exclude_for_dump_database00(self,", "= ['public, aot1, 2190', 'public, aot2, 3190'] co_partition_list = ['public,", "GpCronDumpTestCase.Options() options.exclude_schema_file = 'foo' options.include_schema_file = '/tmp/foo' with self.assertRaisesRegexp(Exception, '--exclude-schema-file", "def test_options_schema_filter_21(self, mock, mock2): options = GpCronDumpTestCase.Options() options.include_schema_file = 'foo'", "incremental backup'): cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.GpCronDump.validate_dump_schema') @patch('gpcrondump.validate_current_timestamp') def", "False options.ddboost_user = False options.ddboost_host = False options.max_streams = None", "options.dump_schema = 'foo' options.include_schema_file = '/tmp/foo' with self.assertRaisesRegexp(Exception, '-s can", "GpCronDump(options, None) dirtyfile = '/tmp/dirty' dbname = 'foo' (inc, exc)", "'foo1' mock_segs = [Mock(), Mock()] for id, seg in enumerate(mock_segs):", "mock2): options = GpCronDumpTestCase.Options() options.clear_dumps = True options.incremental = True", "options = GpCronDumpTestCase.Options() options.exclude_dump_tables_file = 'foo' options.incremental = True with", "options.ddboost = True options.replicate = True options.max_streams = 0 with", "@patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_19(self, mock, mock2): options = GpCronDumpTestCase.Options() options.exclude_schema_file =", "True with self.assertRaisesRegexp(Exception, '--inserts, --column-inserts, --oids cannot be selected with", "with incremental backup'): cron = GpCronDump(options, None) @patch('gpcrondump.get_latest_full_dump_timestamp', return_value='20120330090000') @patch('gpcrondump.validate_current_timestamp')", "self.assertRaisesRegexp(Exception, '-c option can not be selected with incremental backup'):", "= 'foo' options.include_dump_tables_file = 'foo' with self.assertRaisesRegexp(Exception, '-T can not", "options.incremental = True options.local_dump_prefix = 'metro' options.masterDataDirectory = '/data/foo' gpcd", "None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options19(self, mock, mock2): options = GpCronDumpTestCase.Options()", "'-S option can not be selected with incremental backup'): cron", "mock, mock2): options = GpCronDumpTestCase.Options() options.dump_schema = 'foo' options.include_schema_file =", "cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options2(self, mock, mock2):", "mock2): options = GpCronDumpTestCase.Options() options.include_dump_tables = 'foo' options.include_dump_tables_file = 'foo'", "imp gpcrondump_path = os.path.abspath('gpcrondump') gpcrondump = imp.load_source('gpcrondump', gpcrondump_path) import unittest2", "= GpCronDumpTestCase.Options() options.include_dump_tables_file = 'foo' options.exclude_dump_tables_file = 'foo' with self.assertRaisesRegexp(Exception,", "options.exclude_dump_schema = 'foo' options.incremental = True with self.assertRaisesRegexp(Exception, '-S option", "selected with --schema-file option'): cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp')", "mock2): options = GpCronDumpTestCase.Options() options.output_options = ['--column-inserts'] options.incremental = True", "GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_23(self, mock, mock2): options =", "% options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/gp_dump_20130101010101_co_state_file' % options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/gp_dump_20130101010101_last_operation' % options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/gp_dump_20130101010101.rpt' %", "test_options_schema_filter_25(self, mock, mock2): options = GpCronDumpTestCase.Options() options.exclude_dump_schema = 'foo' options.exclude_dump_tables", "options.masterDataDirectory, 'foo1:/bar/db_dumps/20130101/gp_segment_config_files_0_1_20130101010101.tar', 'foo1:/bar/db_dumps/20130101/gp_segment_config_files_0_2_20130101010101.tar', 'foo1:/bar/db_dumps/20130101/gp_dump_0_1_20130101010101.gz', 'foo1:/bar/db_dumps/20130101/gp_dump_0_2_20130101010101.gz'] self.assertEqual(sorted(pipes_file_list), sorted(expected_files_list)) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def", "@patch('gpcrondump.get_lines_from_file', return_value=['public.t1', 'public.t2']) def test_get_include_exclude_for_dump_database01(self, mock1, mock2, mock3, mock4): options", "self.ddboost = False self.ddboost_hosts = None self.ddboost_user = None self.ddboost_config_remove", "mock, mock2): options = GpCronDumpTestCase.Options() options.dump_databases = 'bkdb,fulldb' options.incremental =", "= GpCronDump(options, None) master = Mock() master.getSegmentHostName.return_value = 'foo1' timestamp", "['foo1:%s/db_dumps/20130101/gp_dump_1_1_20130101010101.gz' % options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/gp_dump_1_1_20130101010101_post_data.gz' % options.masterDataDirectory, 'foo1:/bar/db_dumps/20130101/gp_dump_0_1_20130101010101.gz', 'foo1:/bar/db_dumps/20130101/gp_dump_0_2_20130101010101.gz'] self.assertEqual(sorted(pipes_file_list), sorted(expected_files_list))", "def test_options_schema_filter_32(self, mock1, mock2): options = GpCronDumpTestCase.Options() options.dump_schema = ['public']", "'foo' options.include_schema_file = '/tmp/foo' with self.assertRaisesRegexp(Exception, '-S can not be", "mock2): options = GpCronDumpTestCase.Options() options.dump_schema = 'foo' options.include_dump_tables_file = '/tmp/foo'", "exclude catalog schema 'information_schema' in schema file '/tmp/foo'\"): GpCronDump(options, None)", "def test_get_files_file_list_with_filter(self, mock1, mock2, mock3): options = GpCronDumpTestCase.Options() options.timestamp_key =", "with self.assertRaisesRegexp(Exception, 'list backup files not supported with ddboost option'):", "mock4): options = GpCronDumpTestCase.Options() options.masterDataDirectory = '/tmp/foobar' options.exclude_dump_tables_file = '/odir/exfile'", "self.post_vacuum = False self.rollback = False self.compress = True self.free_space_percent", "options.include_schema_file = '/tmp/foo' with self.assertRaisesRegexp(Exception, '--exclude-schema-file can not be selected", "def __init__(self): self.masterDataDirectory = \"\" self.interactive = False self.clear_dumps_only =", "GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_table_filter4(self, mock, mock2): options =", "@patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.get_include_schema_list_from_exclude_schema', return_value=['public']) def test_options_schema_filter_34(self, mock1, mock2, mock3): options =", "None) timestamp = gpcd._get_timestamp_object(options.timestamp_key) self.assertTrue(isinstance(timestamp, datetime)) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_get_files_file_list1(self,", "with self.assertRaisesRegexp(Exception, \"\\'%s\\' file is not formatted properly.\" % options.include_email_file):", "GpCronDump(options, None) cron._send_email(dump_database, current_exit_status, time_start, time_end) #------------------------------- Mainline -------------------------------- if", "GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_14(self, mock, mock2): options =", "False self.ddboost_remote = None self.ddboost_ping = None self.ddboost_backupdir = None", "None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_get_timestamp_object1(self, mock1, mock2): options = GpCronDumpTestCase.Options()", "options.include_dump_tables_file = '/tmp/foo' with self.assertRaisesRegexp(Exception, '--table-file and --exclude-table-file can not", "= None self.ddboost_user = None self.ddboost_config_remove = False self.ddboost_verify =", "--replicate'): cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options23(self, mock,", "aot2, 3190'] co_partition_list = ['public, cot1, 2190', 'public, cot2, 3190']", "= '/foo1' gpcd = GpCronDump(options, None) self.assertEquals(gpcd.getBackupDirectoryRoot(), '/foo1') @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp')", "m = mock.MagicMock() with self.assertRaisesRegexp(Exception, \"\\'%s\\' file is not formatted", "options.ddboost_verify = False options.ddboost_config_remove = False options.ddboost_user = False options.ddboost_host", "self.interactive = False self.clear_dumps_only = False self.post_script = None self.dump_config", "= None self.netbackup_keyword = None @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.GpCronDump.validate_dump_schema') @patch('gpcrondump.validate_current_timestamp') def test_option_schema_filter_1(self,", "not be selected with -S option'): cron = GpCronDump(options, None)", "self.assertTrue(exc.startswith('/tmp/exclude_dump_tables_file')) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.expand_partitions_and_populate_filter_file', return_value='/tmp/exclude_dump_tables_file') @patch('gpcrondump.get_lines_from_file') def test_get_include_exclude_for_dump_database06(self, mock1, mock2,", "self.assertEqual(files_file_list, expected_files_list) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_get_files_file_list2(self, mock1, mock2): options =", "options = GpCronDumpTestCase.Options() options.include_email_file = \"/tmp/abc.yaml\" with self.assertRaisesRegexp(Exception, \"'%s' file", "None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_3(self, mock, mock2): options = GpCronDumpTestCase.Options()", "backup files not supported with ddboost option'): GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port')", "options.dump_schema = 'foo' options.exclude_dump_tables = '/tmp/foo' with self.assertRaisesRegexp(Exception, '-t and", "--column-inserts, --oids cannot be selected with incremental backup'): cron =", "= GpCronDumpTestCase.Options() options.exclude_dump_schema = 'foo' options.exclude_dump_tables = '/tmp/foo' with self.assertRaisesRegexp(Exception,", "None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_32(self, mock1, mock2): options = GpCronDumpTestCase.Options()", "completed for Database 'testdb100'\"}]}) def test_validate_parse_email_File00(self, mock1, mock2, mock3, mock4):", "self.assertTrue(isinstance(timestamp, datetime)) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_get_files_file_list1(self, mock1, mock2): options =", "def test_options16(self, mock, mock2): options = GpCronDumpTestCase.Options() options.masterDataDirectory = '/tmp/foobar'", "None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_19(self, mock, mock2): options = GpCronDumpTestCase.Options()", "mock4): options = GpCronDumpTestCase.Options() gpcd = GpCronDump(options, None) dbname =", "options.incremental = False options.list_filter_tables = True try: with self.assertRaisesRegexp(Exception, 'list", "gpcd = GpCronDump(options, None) dbname = 'foo' schema_file = '/tmp/foo'", "test_options_inserts_with_incremental(self, mock, mock2): options = GpCronDumpTestCase.Options() options.output_options = ['--inserts'] options.incremental", "GpCronDumpTestCase.Options() options.dump_databases = ['bkdb'] options.timestamp_key = True options.ddboost = True", "heap_partition_list = ['public, heapt1, 2190', 'public, heapt2!asdasd , 3190'] with", "is successful then it should not raise an exception GpCronDump(options,", "None self.include_email_file = None self.email_details = None self.include_schema_file = None", "True options.max_streams = \"abc\" with self.assertRaisesRegexp(Exception, '--max-streams must be a", "master.getSegmentHostName.return_value = 'foo2' timestamp = '20130101010101' dump_dir = get_backup_directory(options.masterDataDirectory, options.backup_dir,", "self.assertRaisesRegexp(Exception, '--schema-file option can not be selected with incremental backup'):", "@patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_3(self, mock, mock2): options = GpCronDumpTestCase.Options() options.exclude_dump_schema", "self.assertEqual(result, expected_output) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_get_table_names_from_partition_list_01(self, mock1, mock2): options =", "-S option'): cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_8(self,", "\"mdw\" options.netbackup_policy = \"test_policy\" options.netbackup_schedule = \"test_schedule\" with self.assertRaisesRegexp(Exception, '--ddboost", "None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options29(self, mock, mock2): options = GpCronDumpTestCase.Options()", "None with self.assertRaisesRegexp(Exception, '--max-streams must be specified along with --replicate'):", "def test_options_schema_filter_19(self, mock, mock2): options = GpCronDumpTestCase.Options() options.exclude_schema_file = 'foo'", "'foo' (inc, exc) = gpcd.get_include_exclude_for_dump_database(dirtyfile, dbname) self.assertEquals(inc, '/tmp/dirty') self.assertEquals(exc, None)", "options.timestamp_key = '20130101010101' options.incremental = True options.local_dump_prefix = 'metro' options.masterDataDirectory", "self.cleanup_date = None self.cleanup_total = None self.dump_schema = False self.dump_databases", "options.replicate = False options.max_streams = 20 with self.assertRaisesRegexp(Exception, '--max-streams must", "self.assertRaisesRegexp(Exception, 'Must supply -K option when listing backup files'): GpCronDump(options,", "-T option'): cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_table_filter6(self,", "def test_options_table_filter3(self, mock, mock2): options = GpCronDumpTestCase.Options() options.exclude_dump_tables = 'foo'", "mock2): options = GpCronDumpTestCase.Options() options.exclude_schema_file = 'foo' options.include_dump_tables_file = '/tmp/foo'", "test_options_schema_filter_17(self, mock, mock2): options = GpCronDumpTestCase.Options() options.exclude_dump_schema = 'foo' options.include_dump_tables_file", "= GpCronDumpTestCase.Options() options.exclude_schema_file = '/tmp/foo' write_lines_to_file('/tmp/foo', ['public']) gpcd = GpCronDump(options,", "cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_column_inserts_with_incremental(self, mock, mock2):", "@patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_24(self, mock, mock2): options = GpCronDumpTestCase.Options() options.dump_schema =", "file '/tmp/foo'\"): GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_31(self, mock, mock2):", "aot2,aot, 3190\"'): cron._get_table_names_from_partition_list(partition_list) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_table_filter1(self, mock, mock2): options", "= get_backup_directory(options.masterDataDirectory, None, gpcd.dump_dir, timestamp) files_file_list = gpcd._get_files_file_list(master, dump_dir, timestamp)", "return_value={'EMAIL_DETAILS': [{'FROM': 'RRP_MPE2_DCA_1', 'DBNAME': None, 'SUBJECT': \"backup completed for Database", "key'): gpcd._get_timestamp_object(options.timestamp_key) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_get_timestamp_object3(self, mock1, mock2): options =", "GpCronDumpTestCase.Options() options.dump_databases = 'bkdb,fulldb' options.timestamp_key = True with self.assertRaisesRegexp(Exception, 'multi-database", "= None self.exclude_dump_tables_file = None self.backup_dir = None self.encoding =", "m, create=True): cron = GpCronDump(options, None) @patch('gpcrondump.os.path.isfile', return_value=True) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.os.path.getsize',", "self.clear_dumps = False self.cleanup_date = None self.cleanup_total = None self.dump_schema", "files'): GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options27(self, mock, mock2): options", "% options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/gp_dump_1_1_20130101010101_post_data.gz' % options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/gp_global_1_1_20130101010101' % options.masterDataDirectory, 'foo1:/bar/db_dumps/20130101/gp_dump_0_1_20130101010101.gz', 'foo1:/bar/db_dumps/20130101/gp_dump_0_2_20130101010101.gz']", "finally: options.list_filter_tables = False @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.get_latest_full_dump_timestamp', return_value='20121225090000') def test_options11(self,", "mock, mock2): options = GpCronDumpTestCase.Options() options.dump_schema = 'foo' options.include_dump_tables =", "= 'foo' options.exclude_dump_tables_file = 'foo' with self.assertRaisesRegexp(Exception, '-T can not", "mock, mock2): options = GpCronDumpTestCase.Options() options.exclude_dump_tables = 'foo' options.include_dump_tables_file =", "self.assertRaisesRegexp(Exception, \"\\'%s\\' file does not exist.\" % options.include_email_file): cron =", "= ['foo2:%s/db_dumps/20130101/gp_dump_1_1_20130101010101.gz' % options.masterDataDirectory, 'foo2:%s/db_dumps/20130101/gp_dump_1_1_20130101010101_post_data.gz' % options.masterDataDirectory] self.assertEqual(pipes_file_list, expected_files_list) @patch('gpcrondump.GpCronDump._get_master_port')", "None self.ddboost_backupdir = None self.replicate = None self.max_streams = None", "mock2, mock3): options = GpCronDumpTestCase.Options() options.exclude_schema_file = '/tmp/foo' options.incremental =", "mock, mock2): options = GpCronDumpTestCase.Options() options.include_dump_tables = 'foo' options.include_dump_tables_file =", "mock2): options = GpCronDumpTestCase.Options() options.include_schema_file = 'foo' options.exclude_dump_tables_file = '/tmp/foo'", "options.incremental = True options.dump_databases = 'bkdb,fulldb' with self.assertRaisesRegexp(Exception, 'multi-database backup", "--exclude-table-file option'): cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_get_timestamp_object1(self,", "GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options29(self, mock, mock2): options =", "options = GpCronDumpTestCase.Options() options.masterDataDirectory = '/tmp/foobar' options.include_dump_tables = ['public.t1', 'public.t2',", "option'): cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_21(self, mock,", "GpCronDumpTestCase.Options() options.ddboost = False options.replicate = False options.max_streams = 20", "def test_options_schema_filter_13(self, mock, mock2): options = GpCronDumpTestCase.Options() options.include_schema_file = 'foo'", "def test_options15(self, mock, mock2): options = GpCronDumpTestCase.Options() options.dump_databases = 'bkdb,fulldb'", "options.output_options = ['--inserts'] options.incremental = True with self.assertRaisesRegexp(Exception, '--inserts, --column-inserts,", "gpcd = GpCronDump(options, None) timestamp = gpcd._get_timestamp_object(options.timestamp_key) self.assertTrue(isinstance(timestamp, datetime)) @patch('gpcrondump.GpCronDump._get_master_port')", "self.assertRaisesRegexp(Exception, '-t can not be selected with -T option'): cron", "['information_schema'] with self.assertRaisesRegexp(Exception, \"can not specify catalog schema 'information_schema' using", "option'): cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options21(self, mock,", "GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_10(self, mock, mock2): options =", "= ['public.aot1', 'public.aot2:aot'] result = cron._get_table_names_from_partition_list(partition_list) self.assertEqual(result, expected_output) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp')", "test_options12(self, mock, mock2): options = GpCronDumpTestCase.Options() options.incremental = True options.dump_databases", "'/mydir/incfile' gpcd = GpCronDump(options, None) dirtyfile = '/tmp/dirty' dbname =", "def test_options_schema_filter_7(self, mock, mock2): options = GpCronDumpTestCase.Options() options.dump_schema = 'foo'", "= '/tmp/foo' with self.assertRaisesRegexp(Exception, '--table-file and --exclude-table-file can not be", "True with self.assertRaisesRegexp(Exception, 'include table list can not be selected", "when listing backup files'): GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options27(self,", "options = GpCronDumpTestCase.Options() options.exclude_dump_tables = 'foo' options.exclude_dump_tables_file = 'foo' with", "= GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_8(self, mock, mock2): options", "options.masterDataDirectory, 'foo1:/bar/db_dumps/20130101/gp_dump_0_1_20130101010101.gz', 'foo1:/bar/db_dumps/20130101/gp_dump_0_2_20130101010101.gz'] self.assertEqual(sorted(pipes_file_list), sorted(expected_files_list)) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_get_pipes_file_list4(self, mock1,", "return_value=111) @patch('gpcrondump.yaml.load', return_value={'EMAIL_DETAILS': [{'FROM': 'RRP_MPE2_DCA_1', 'DBNAME': 'testdb100', 'SUBJECT': \"backup completed", "GpCronDumpTestCase.Options() options.include_email_file = \"/tmp/abc\" with self.assertRaisesRegexp(Exception, \"'%s' is not '.yaml'", "with self.assertRaisesRegexp(Exception, '-t can not be selected with -T option'):", "cron = GpCronDump(options, None) cron._send_email(dump_database, current_exit_status, time_start, time_end) #------------------------------- Mainline", "'12:08:18' cron = GpCronDump(options, None) cron._send_email(dump_database, current_exit_status, time_start, time_end) #-------------------------------", "= GpCronDumpTestCase.Options() options.exclude_schema_file = 'foo' options.include_dump_tables = '/tmp/foo' with self.assertRaisesRegexp(Exception,", "with incremental backup'): cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.GpCronDump.validate_dump_schema') @patch('gpcrondump.validate_current_timestamp')", "GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_table_filter2(self, mock, mock2): options =", "'20130101010101' options.incremental = True options.masterDataDirectory = '/data/foo' gpcd = GpCronDump(options,", "python import os import imp gpcrondump_path = os.path.abspath('gpcrondump') gpcrondump =", "-T can not be selected with --schema-file option'): cron =", "'/tmp/foobar' options.incremental = True gpcd = GpCronDump(options, None) dirtyfile =", "= [['public.aot1', 'public.aot2'], ['public.cot1', 'public.cot2']]) def test_verify_tablenames_00(self, mock1, mock2, mock3):", "GpCronDumpTestCase.Options() options.timestamp_key = None options.dump_global = True options.masterDataDirectory = '/foo'", "GpCronDumpTestCase(unittest.TestCase): class Options: def __init__(self): self.masterDataDirectory = \"\" self.interactive =", "None options.masterDataDirectory = '/foo' options.dump_config = True gpcd = GpCronDump(options,", "mock2): options = GpCronDumpTestCase.Options() options.exclude_schema_file = 'foo' options.exclude_dump_tables_file = '/tmp/foo'", "be selected with --exclude-table-file option'): cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port')", "def test_options_schema_filter_22(self, mock, mock2): options = GpCronDumpTestCase.Options() options.include_schema_file = 'foo'", "gpcd = GpCronDump(options, None) with self.assertRaisesRegexp(Exception, 'Invalid timestamp key'): gpcd._get_timestamp_object(options.timestamp_key)", "options.include_schema_file = 'foo' options.include_dump_tables = '/tmp/foo' with self.assertRaisesRegexp(Exception, '-t and", "self.assertRaisesRegexp(Exception, '--table-file and --exclude-table-file can not be selected with -S", "= ['public, aot1, 2190', 'public, aot2:aot, 3190'] expected_output = ['public.aot1',", "gpcd = GpCronDump(options, None) timestamp = gpcd._get_timestamp_object(options.timestamp_key) self.assertEquals(timestamp, datetime(2013, 1,", "GpCronDumpTestCase.Options() options.ddboost = True options.replicate = True options.max_streams = None", "--schema-file option'): cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_15(self,", "mock2): options = GpCronDumpTestCase.Options() options.masterDataDirectory = '/tmp/foobar' options.backup_dir = None", "with -S option'): cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def", "\"public, aot2,aot, 3190\"'): cron._get_table_names_from_partition_list(partition_list) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_table_filter1(self, mock, mock2):", "GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_4(self, mock, mock2): options =", "mock2): options = GpCronDumpTestCase.Options() options.dump_schema = 'foo' options.include_dump_tables = '/tmp/foo'", "\"/tmp/abc.yaml\" m = mock.MagicMock() with patch('__builtin__.open', m, create=True): cron =", "None self.output_options = None self.report_dir = None self.timestamp_key = None", "None) dbname = 'foo' schema_file = '/tmp/foo' inc = gpcd.generate_include_table_list_from_schema_file(dbname,", "options = GpCronDumpTestCase.Options() options.local_dump_prefix = 'foo' options.incremental = False options.list_filter_tables", "mock2): options = GpCronDumpTestCase.Options() options.dump_databases = 'bkdb' options.incremental = False", "@patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.get_latest_full_dump_timestamp', return_value='20121225090000') def test_get_include_exclude_for_dump_database03(self, mock1, mock2, mock3): options =", "with patch('__builtin__.open', m, create=True): cron = GpCronDump(options, None) @patch('gpcrondump.os.path.isfile', return_value=False)", "def test_options24(self, mock, mock2): options = GpCronDumpTestCase.Options() options.ddboost = True", "= cron._get_table_names_from_partition_list(partition_list) self.assertEqual(result, expected_output) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_get_table_names_from_partition_list_01(self, mock1, mock2):", "option'): cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_11(self, mock,", "@patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_13(self, mock, mock2): options = GpCronDumpTestCase.Options() options.include_schema_file", "= GpCronDump(options, None) @patch('gpcrondump.os.path.isfile', return_value=False) @patch('gpcrondump.GpCronDump._get_master_port') def test_validate_parse_email_File01(self, mock1, mock2):", "dbname = 'foo' (inc, exc) = gpcd.get_include_exclude_for_dump_database(dirtyfile, dbname) self.assertEquals(inc, '/tmp/dirty')", "'foo1:%s/db_dumps/20130101/gp_dump_20130101000000_increments' % options.masterDataDirectory] self.assertEqual(sorted(files_file_list), sorted(expected_files_list)) @patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.GpCronDump._get_master_port') @patch('gppylib.operations.backup_utils.get_latest_full_dump_timestamp', return_value='20130101000000') def", "with self.assertRaisesRegexp(Exception, \"can not exclude catalog schema 'information_schema' in schema", "= GpCronDumpTestCase.Options() options.exclude_dump_schema = 'foo' options.exclude_schema_file = '/tmp/foo' with self.assertRaisesRegexp(Exception,", "file '/tmp/foo'\"): GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.get_lines_from_file', return_value=['public', 'information_schema']) def", "GpCronDump(options, None) @patch('gpcrondump.os.path.isfile', return_value=True) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.os.path.getsize', return_value=0) def test_validate_parse_email_File03(self, mock1,", "= True with self.assertRaisesRegexp(Exception, 'exclude table file can not be", "% options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/gp_dump_20130101000000_increments' % options.masterDataDirectory] self.assertEqual(sorted(files_file_list), sorted(expected_files_list)) @patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.GpCronDump._get_master_port') @patch('gppylib.operations.backup_utils.get_latest_full_dump_timestamp',", "= ['testdb'] self.bypass_disk_check = True self.backup_set = None self.dump_global =", "-x <database name> with incremental option'): cron = GpCronDump(options, None)", "Enterprise init self.incremental = False self.ddboost = False self.ddboost_hosts =", "True with self.assertRaisesRegexp(Exception, '--schema-file option can not be selected with", "'public.t2']) def test_get_include_exclude_for_dump_database04(self, mock1, mock2, mock3, mock4): options = GpCronDumpTestCase.Options()", "2190', 'public, cot2\\nasd, 3190'] heap_partition_list = ['public, heapt1, 2190', 'public,", "GpCronDump(options, None) @patch('gpcrondump.os.path.isfile', return_value=False) @patch('gpcrondump.GpCronDump._get_master_port') def test_validate_parse_email_File01(self, mock1, mock2): options", "seg in enumerate(mock_segs): seg.getSegmentDataDirectory.return_value = '/bar' seg.getSegmentHostName.return_value = 'foo1' seg.getSegmentDbId.return_value", "be specified along with --replicate'): cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port')", "3190'] heap_partition_list = ['public, heapt1, 2190', 'public, heapt2!asdasd , 3190']", "@patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_14(self, mock, mock2): options = GpCronDumpTestCase.Options() options.include_schema_file =", "Mock()] for id, seg in enumerate(mock_segs): seg.getSegmentDataDirectory.return_value = '/bar' seg.getSegmentHostName.return_value", "option'): GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options28(self, mock, mock2): options", "mock3): options = GpCronDumpTestCase.Options() cron = GpCronDump(options, None) ao_partition_list =", "with self.assertRaisesRegexp(Exception, 'include table file can not be selected with", "options = GpCronDumpTestCase.Options() options.dump_schema = 'foo' options.exclude_dump_tables = '/tmp/foo' with", "options.timestamp_key = '20130101010' gpcd = GpCronDump(options, None) with self.assertRaisesRegexp(Exception, 'Invalid", "mock3): options = GpCronDumpTestCase.Options() options.include_email_file = \"/tmp/abc.yaml\" with self.assertRaisesRegexp(Exception, \"'%s'", "test_options_schema_filter_16(self, mock, mock2): options = GpCronDumpTestCase.Options() options.dump_schema = 'foo' options.exclude_dump_tables_file", "-s option\"): GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_28(self, mock, mock2):", "['public']) gpcd = GpCronDump(options, None) dbname = 'foo' timestamp =", "self.list_backup_files = None self.quiet = False self.verbose = False self.local_dump_prefix", "GpCronDumpTestCase.Options() options.dump_schema = ['information_schema'] with self.assertRaisesRegexp(Exception, \"can not specify catalog", "'foo' options.exclude_dump_tables_file = 'foo' with self.assertRaisesRegexp(Exception, '-T can not be", "test_get_include_exclude_for_dump_database06(self, mock1, mock2, mock3, mock4): options = GpCronDumpTestCase.Options() options.masterDataDirectory =", "@patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_21(self, mock, mock2): options = GpCronDumpTestCase.Options() options.include_schema_file", "@patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options24(self, mock, mock2): options = GpCronDumpTestCase.Options() options.ddboost", "= \"abc\" with self.assertRaisesRegexp(Exception, '--max-streams must be a number greater", "'SUBJECT': \"backup completed for Database 'testdb100'\"}]}) def test_validate_parse_email_File05(self, mock1, mock2,", "self.verbose = False self.local_dump_prefix = '' self.list_filter_tables = None self.include_email_file", "mock1, mock2): options = GpCronDumpTestCase.Options() options.timestamp_key = None options.masterDataDirectory =", "['public.aot1', 'public.aot2:aot'] result = cron._get_table_names_from_partition_list(partition_list) self.assertEqual(result, expected_output) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def", "= Mock() master.getSegmentHostName.return_value = 'foo2' mock_segs = [] timestamp =", "False options.max_streams = None options.list_backup_files = False gpcd = GpCronDump(options,", "@patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options25(self, mock, mock2): options = GpCronDumpTestCase.Options() options.ddboost", "expected_files_list) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_get_pipes_file_list2(self, mock1, mock2): options = GpCronDumpTestCase.Options()", "GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options19(self, mock, mock2): options =", "with --schema-file option'): cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def", "@patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_12(self, mock, mock2): options = GpCronDumpTestCase.Options() options.exclude_schema_file", "= GpCronDumpTestCase.Options() options.include_email_file = \"/tmp/abc.yaml\" with self.assertRaisesRegexp(Exception, \"\\'%s\\' file does", "def test_get_files_file_list1(self, mock1, mock2): options = GpCronDumpTestCase.Options() options.timestamp_key = None", "os import imp gpcrondump_path = os.path.abspath('gpcrondump') gpcrondump = imp.load_source('gpcrondump', gpcrondump_path)", "gpcd._get_timestamp_object(options.timestamp_key) self.assertEquals(timestamp, datetime(2013, 1, 1, 1, 1, 1)) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp')", "= None self.ddboost_backupdir = None self.replicate = None self.max_streams =", "None self.timestamp_key = None self.list_backup_files = None self.quiet = False", "supported with ddboost option'): GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options29(self,", "incremental backup'): cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_3(self,", "def test_options17(self, mock, mock2): options = GpCronDumpTestCase.Options() options.masterDataDirectory = '/tmp/foobar'", "GpCronDumpTestCase.Options() options.exclude_dump_schema = 'foo' options.incremental = True with self.assertRaisesRegexp(Exception, '-S", "'include table list can not be selected with incremental backup'):", "gpcrondump_path) import unittest2 as unittest from datetime import datetime from", "options = GpCronDumpTestCase.Options() options.dump_schema = ['information_schema'] with self.assertRaisesRegexp(Exception, \"can not", "aot1, 2190', 'public, aot2,aot, 3190'] with self.assertRaisesRegexp(Exception, 'Invalid partition entry", "options.dump_schema = 'foo' options.include_dump_tables = '/tmp/foo' with self.assertRaisesRegexp(Exception, '-t and", "test_options_table_filter4(self, mock, mock2): options = GpCronDumpTestCase.Options() options.exclude_dump_tables = 'foo' options.include_dump_tables_file", "= 'bkdb,fulldb' options.timestamp_key = True with self.assertRaisesRegexp(Exception, 'multi-database backup is", "master = Mock() master.getSegmentHostName.return_value = 'foo1' timestamp = '20130101010101' dump_dir", "@patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.get_latest_full_dump_timestamp', return_value='20130101000000') def test_get_files_file_list3(self, mock1, mock2, mock3): options =", "= None ## Enterprise init self.incremental = False self.ddboost =", "mock, mock2): options = GpCronDumpTestCase.Options() options.exclude_schema_file = 'foo' options.include_dump_tables =", "[] options.incremental = True with self.assertRaisesRegexp(Exception, 'Must supply -x <database", "'/tmp/foo' with self.assertRaisesRegexp(Exception, \"can not exclude catalog schema 'information_schema' in", "cron = GpCronDump(options, None) @patch('gpcrondump.get_latest_full_dump_timestamp', return_value='20120330090000') @patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.GpCronDump._get_master_port') def test_options13(self,", "GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_13(self, mock, mock2): options =", "not raise an exception GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options16(self,", "options.exclude_dump_tables = 'foo' with self.assertRaisesRegexp(Exception, '-t can not be selected", "= ['foo1:%s/db_dumps/20130101/metro_gp_cdatabase_1_1_20130101010101' % options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/metro_gp_dump_20130101010101_ao_state_file' % options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/metro_gp_dump_20130101010101_co_state_file' % options.masterDataDirectory,", "= GpCronDumpTestCase.Options() options.dump_schema = 'foo' options.include_schema_file = '/tmp/foo' with self.assertRaisesRegexp(Exception,", "mock2): options = GpCronDumpTestCase.Options() options.incremental = True options.dump_databases = 'bkdb,fulldb'", "test_verify_tablenames_00_bad(self, mock1, mock2, mock3): options = GpCronDumpTestCase.Options() cron = GpCronDump(options,", "gpcd._get_pipes_file_list(master, mock_segs, dump_dir, timestamp) expected_files_list = ['foo1:%s/db_dumps/20130101/gp_dump_1_1_20130101010101.gz' % options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/gp_dump_1_1_20130101010101_post_data.gz'", "options = GpCronDumpTestCase.Options() options.exclude_schema_file = '/tmp/foo' write_lines_to_file('/tmp/foo', ['public']) gpcd =", "self.pre_vacuum = False self.post_vacuum = False self.rollback = False self.compress", "= 'bkdb' options.incremental = False #If this is successful then", "= GpCronDumpTestCase.Options() options.dump_databases = 'bkdb,fulldb' options.incremental = False #If this", "mock3): options = GpCronDumpTestCase.Options() options.incremental = True cron = GpCronDump(options,", "@patch('gpcrondump.get_include_schema_list_from_exclude_schema', return_value=['public']) def test_options_schema_filter_34(self, mock1, mock2, mock3): options = GpCronDumpTestCase.Options()", "filter tables option requires --prefix and --incremental'): cron = GpCronDump(options,", "True with self.assertRaisesRegexp(Exception, '-c option can not be selected with", "def test_options_schema_filter_11(self, mock, mock2): options = GpCronDumpTestCase.Options() options.exclude_schema_file = 'foo'", "option'): cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_24(self, mock,", "import mock logger = gplog.get_unittest_logger() class GpCronDumpTestCase(unittest.TestCase): class Options: def", "'-t can not be selected with --table-file option'): cron =", "'foo' with self.assertRaisesRegexp(Exception, '-T can not be selected with --table-file", "GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_15(self, mock, mock2): options =", "@patch('gpcrondump.os.path.getsize', return_value=111) def test_validate_parse_email_File02(self, mock1, mock2, mock3): options = GpCronDumpTestCase.Options()", "'-s can not be selected with --exclude-schema-file option'): cron =", "test_options_schema_filter_27(self, mock, mock2): options = GpCronDumpTestCase.Options() options.dump_schema = ['information_schema'] with", "options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/metro_gp_dump_20130101010101_filter' % options.masterDataDirectory] self.assertEqual(sorted(files_file_list), sorted(expected_files_list)) @patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.get_latest_full_dump_timestamp', return_value='20130101000000') @patch('gpcrondump.GpCronDump._get_master_port')", "not raise an exception GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options15(self,", "options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/metro_gp_dump_20130101000000_increments' % options.masterDataDirectory] self.assertEqual(sorted(files_file_list), sorted(expected_files_list)) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_get_pipes_file_list1(self,", "mock2): options = GpCronDumpTestCase.Options() options.exclude_dump_schema = ['information_schema'] with self.assertRaisesRegexp(Exception, \"can", "options.ddboost = False options.replicate = False options.max_streams = 20 with", "incremental backup'): cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_5(self,", "None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_table_filter2(self, mock, mock2): options = GpCronDumpTestCase.Options()", "options.exclude_schema_file = 'foo' options.include_dump_tables = '/tmp/foo' with self.assertRaisesRegexp(Exception, '-t and", "= None self.exclude_schema_file = None self.exclude_dump_schema = None self.dump_stats =", "list can not be selected with incremental backup'): cron =", "'foo' options.exclude_dump_tables_file = 'foo' with self.assertRaisesRegexp(Exception, '-t can not be", "self.assertRaisesRegexp(Exception, \"can not exclude catalog schema 'information_schema' in schema file", "'/tmp/foo'\"): GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.get_lines_from_file', return_value=['public', 'information_schema']) def test_options_schema_filter_30(self,", "['public, cot1, 2190', 'public, cot2\\nasd, 3190'] heap_partition_list = ['public, heapt1,", "'foo' with self.assertRaisesRegexp(Exception, '-t can not be selected with --table-file", "= mock.MagicMock() with patch('__builtin__.open', m, create=True): cron = GpCronDump(options, None)", "@patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.get_lines_from_file', return_value=['public', 'information_schema']) def test_options_schema_filter_30(self, mock, mock2, mock3):", "options.exclude_schema_file = 'foo' options.include_schema_file = '/tmp/foo' with self.assertRaisesRegexp(Exception, '--exclude-schema-file can", "GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_18(self, mock, mock2): options =", "@patch('gpcrondump.validate_current_timestamp') def test_options24(self, mock, mock2): options = GpCronDumpTestCase.Options() options.ddboost =", "with self.assertRaisesRegexp(Exception, '-t and -T can not be selected with", "completed for Database 'testdb100'\"}]}) def test_validate_parse_email_File04(self, mock1, mock2, mock3, mock4):", "'foo' options.include_dump_tables = '/tmp/foo' with self.assertRaisesRegexp(Exception, '-t and -T can", "= True options.dump_databases = ['bkdb'] #If this is successful then", "mock2): options = GpCronDumpTestCase.Options() options.exclude_dump_schema = 'foo' options.incremental = True", "@patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_23(self, mock, mock2): options = GpCronDumpTestCase.Options() options.dump_schema", "return_value=['public']) def test_options_schema_filter_34(self, mock1, mock2, mock3): options = GpCronDumpTestCase.Options() options.exclude_schema_file", "['public, aot1, 2190', 'public, aot2:aot, 3190'] expected_output = ['public.aot1', 'public.aot2:aot']", "test_get_pipes_file_list3(self, mock1, mock2): options = GpCronDumpTestCase.Options() options.timestamp_key = None options.dump_global", "heapt1, 2190', 'public, heapt2!asdasd , 3190'] with self.assertRaisesRegexp(Exception, ''): cron._verify_tablenames(ao_partition_list,", "= 'foo' schema_file = '/tmp/foo' inc = gpcd.generate_include_table_list_from_schema_file(dbname, schema_file) self.assertTrue(inc.startswith('/tmp/include_dump_tables_file'))", "self.exclude_dump_tables_file = None self.backup_dir = None self.encoding = None self.output_options", "with --replicate'): cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options23(self,", "options = GpCronDumpTestCase.Options() options.list_backup_files = True options.timestamp_key = None with", "= GpCronDumpTestCase.Options() cron = GpCronDump(options, None) ao_partition_list = ['public, aot1!asd,", "test_options_schema_filter_36(self, mock1, mock2, mock3, mock4): options = GpCronDumpTestCase.Options() gpcd =", "-K option'): GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options28(self, mock, mock2):", "exception @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.GpCronDump._get_table_names_from_partition_list', side_effect = [['public.aot1:asd', 'public.aot2'], ['public.cot1', 'public.cot2:asd']])", "mock, mock2): options = GpCronDumpTestCase.Options() options.exclude_dump_tables_file = 'foo' options.incremental =", "= \"/tmp/abc.yaml\" m = mock.MagicMock() with patch('__builtin__.open', m, create=True): cron", "for Database 'testdb100'\"}]}) def test_validate_parse_email_File05(self, mock1, mock2, mock3, mock4): options", "file.\" % options.include_email_file): cron = GpCronDump(options, None) @patch('gpcrondump.os.path.isfile', return_value=True) @patch('gpcrondump.GpCronDump._get_master_port')", "options.masterDataDirectory = '/tmp/foobar' options.include_dump_tables = ['public.t1', 'public.t2', 'public.t3'] gpcd =", "@patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_5(self, mock, mock2): options = GpCronDumpTestCase.Options() options.dump_schema =", "= GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options22(self, mock, mock2): options", "def test_get_pipes_file_list3(self, mock1, mock2): options = GpCronDumpTestCase.Options() options.timestamp_key = None", "not formatted properly.\" % options.include_email_file): with patch('__builtin__.open', m, create=True): cron", "'foo' options.include_dump_tables_file = 'foo' with self.assertRaisesRegexp(Exception, '-T can not be", "False self.rollback = False self.compress = True self.free_space_percent = None", "schema 'information_schema' in schema file '/tmp/foo'\"): GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp')", "than zero'): cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options24(self,", "= GpCronDumpTestCase.Options() options.masterDataDirectory = '/tmp/foobar' gpcd = GpCronDump(options, None) dirtyfile", "options = GpCronDumpTestCase.Options() options.dump_databases = 'bkdb,fulldb' options.timestamp_key = True with", "'multi-database backup is not supported with incremental backup'): cron =", "not be selected with --schema-file option'): cron = GpCronDump(options, None)", "% options.masterDataDirectory] self.assertEqual(sorted(files_file_list), sorted(expected_files_list)) @patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.GpCronDump._get_master_port') @patch('gppylib.operations.backup_utils.get_latest_full_dump_timestamp', return_value='20130101000000') def test_get_files_file_list_with_filter(self,", "= 'foo' options.include_schema_file = '/tmp/foo' with self.assertRaisesRegexp(Exception, '-s can not", "test_get_files_file_list_with_prefix(self, mock1, mock2, mock3): options = GpCronDumpTestCase.Options() options.timestamp_key = '20130101010101'", "options.ddboost = True options.replicate = False options.max_streams = 20 with", "options.masterDataDirectory, 'foo1:/bar/db_dumps/20130101/gp_dump_0_1_20130101010101.gz', 'foo1:/bar/db_dumps/20130101/gp_dump_0_2_20130101010101.gz'] self.assertEqual(sorted(pipes_file_list), sorted(expected_files_list)) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_get_pipes_file_list3(self, mock1,", "test_options_schema_filter_18(self, mock, mock2): options = GpCronDumpTestCase.Options() options.exclude_dump_schema = 'foo' options.exclude_dump_tables_file", "exception GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options15(self, mock, mock2): options", "schema_file) self.assertTrue(inc.startswith('/tmp/include_dump_tables_file')) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options1(self, mock, mock2): options =", "@patch('gpcrondump.get_lines_from_file') def test_get_include_exclude_for_dump_database06(self, mock1, mock2, mock3, mock4): options = GpCronDumpTestCase.Options()", "= GpCronDumpTestCase.Options() options.dump_schema = 'foo' options.exclude_schema_file = '/tmp/foo' with self.assertRaisesRegexp(Exception,", "@patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_31(self, mock, mock2): options = GpCronDumpTestCase.Options() options.masterDataDirectory", "'public.aot2'], ['public.cot1', 'public.cot2']]) def test_verify_tablenames_00(self, mock1, mock2, mock3): options =", "options.incremental = True options.dump_databases = ['bkdb'] #If this is successful", "gpcd.dump_dir, timestamp) pipes_file_list = gpcd._get_pipes_file_list(master, mock_segs, dump_dir, timestamp) expected_files_list =", "selected with incremental backup'): cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp')", "option\"): GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.get_lines_from_file', return_value=['public', 'information_schema']) def test_options_schema_filter_29(self,", "gpcd = GpCronDump(options, None) master = Mock() master.getSegmentHostName.return_value = 'foo1'", "--table-file option'): cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_table_filter5(self,", "partition_list = ['public, aot1, 2190', 'public, aot2,aot, 3190'] with self.assertRaisesRegexp(Exception,", "None options.list_backup_files = False gpcd = GpCronDump(options, None) self.assertEqual(gpcd.dump_prefix, 'foo_')", "mock3): options = GpCronDumpTestCase.Options() options.exclude_dump_schema = 'public' gpcd = GpCronDump(options,", "None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_table_filter3(self, mock, mock2): options = GpCronDumpTestCase.Options()", "@patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_20(self, mock, mock2): options = GpCronDumpTestCase.Options() options.exclude_schema_file", "= GpCronDump(options, None) dbname = 'foo' timestamp = '20141016010101' file", "schema 'information_schema' using -s option\"): GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def", "return_value='20120330090000') @patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.GpCronDump._get_master_port') def test_options13(self, mock, mock2, mock3): options =", "dbname) self.assertEquals(inc, '/tmp/dirty') self.assertEquals(exc, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.expand_partitions_and_populate_filter_file', return_value='/tmp/exclude_dump_tables_file') @patch('gpcrondump.get_lines_from_file',", "['--oids'] options.incremental = True with self.assertRaisesRegexp(Exception, '--inserts, --column-inserts, --oids cannot", "options.incremental = True with self.assertRaisesRegexp(Exception, '-s option can not be", "mock2): options = GpCronDumpTestCase.Options() options.dump_schema = 'foo' options.exclude_schema_file = '/tmp/foo'", "test_options28(self, mock, mock2): options = GpCronDumpTestCase.Options() options.dump_databases = ['bkdb'] options.timestamp_key", "'foo1:%s/db_dumps/20130101/metro_gp_dump_status_1_1_20130101010101' % options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/metro_gp_dump_20130101000000_increments' % options.masterDataDirectory] self.assertEqual(sorted(files_file_list), sorted(expected_files_list)) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp')", "None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_9(self, mock, mock2): options = GpCronDumpTestCase.Options()", "= GpCronDump(options, None) @patch('gpcrondump.os.path.isfile', return_value=True) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.os.path.getsize', return_value=0) def test_validate_parse_email_File03(self,", "'public', 'table2']) def test_options_schema_filter_36(self, mock1, mock2, mock3, mock4): options =", "mock3): options = GpCronDumpTestCase.Options() options.exclude_schema_file = '/tmp/foo' write_lines_to_file('/tmp/foo', ['public']) gpcd", "file does not exist.\" % options.include_email_file): cron = GpCronDump(options, None)", "file. File containing email details should be '.yaml' file.\" %", "return_value='/tmp/include_dump_tables_file') @patch('gpcrondump.get_lines_from_file', return_value=['public.t1', 'public.t2']) def test_get_include_exclude_for_dump_database01(self, mock1, mock2, mock3, mock4):", "= 'foo' timestamp = '20141016010101' file = gpcd.get_schema_list_file(dbname) self.assertEquals(file, None)", "= 'public' gpcd = GpCronDump(options, None) dbname = 'foo' timestamp", "options.ddboost = True options.replicate = True options.max_streams = None with", "GpCronDumpTestCase.Options() options.include_email_file = \"/tmp/abc.yaml\" m = mock.MagicMock() with patch('__builtin__.open', m,", "@patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_get_files_file_list1(self, mock1, mock2): options = GpCronDumpTestCase.Options() options.timestamp_key", "GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_3(self, mock, mock2): options =", "table list can not be selected with incremental backup'): cron", "None) @patch('gpcrondump.os.path.isfile', return_value=True) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.os.path.getsize', return_value=0) def test_validate_parse_email_File03(self, mock1, mock2,", "timestamp) files_file_list = gpcd._get_files_file_list(master, dump_dir, timestamp) expected_files_list = ['foo2:%s/db_dumps/20130101/gp_cdatabase_1_1_20130101010101' %", "def test_options_schema_filter_14(self, mock, mock2): options = GpCronDumpTestCase.Options() options.include_schema_file = 'foo'", "backup is not supported with incremental backup'): cron = GpCronDump(options,", "= GpCronDumpTestCase.Options() options.dump_schema = ['public'] gpcd = GpCronDump(options, None) dbname", "= GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options19(self, mock, mock2): options", "def test_options_table_filter1(self, mock, mock2): options = GpCronDumpTestCase.Options() options.include_dump_tables = 'foo'", "def test_options4(self, mock, mock2): options = GpCronDumpTestCase.Options() options.exclude_dump_tables_file = 'foo'", "mock, mock2): options = GpCronDumpTestCase.Options() options.dump_databases = [] options.incremental =", "= GpCronDumpTestCase.Options() options.exclude_schema_file = 'foo' options.exclude_dump_tables = '/tmp/foo' with self.assertRaisesRegexp(Exception,", "cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_4(self, mock, mock2):", "self.assertEquals(file, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_32(self, mock1, mock2): options =", "datetime import datetime from gppylib import gplog from gpcrondump import", "options.timestamp_key = True options.ddboost = True options.netbackup_service_host = \"mdw\" options.netbackup_policy", "self.netbackup_schedule = None self.netbackup_block_size = None self.netbackup_keyword = None @patch('gpcrondump.GpCronDump._get_master_port')", "= 0 time_start = '12:07:09' time_end = '12:08:18' cron =", "partition entry \"public, aot2,aot, 3190\"'): cron._get_table_names_from_partition_list(partition_list) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_table_filter1(self,", "= False self.cleanup_date = None self.cleanup_total = None self.dump_schema =", "cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_table_filter3(self, mock, mock2):", "@patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.GpCronDump._get_master_port') def test_options13(self, mock, mock2, mock3): options = GpCronDumpTestCase.Options()", "with self.assertRaisesRegexp(Exception, '-s can not be selected with --schema-file option'):", "test_options_oids_with_incremental(self, mock, mock2): options = GpCronDumpTestCase.Options() options.output_options = ['--oids'] options.incremental", "None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_table_filter4(self, mock, mock2): options = GpCronDumpTestCase.Options()", "False options.max_streams = 20 with self.assertRaisesRegexp(Exception, '--max-streams must be specified", "self.assertRaisesRegexp(Exception, '--max-streams must be specified along with --replicate'): cron =", "@patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_8(self, mock, mock2): options = GpCronDumpTestCase.Options() options.exclude_dump_schema =", "None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_22(self, mock, mock2): options = GpCronDumpTestCase.Options()", "GpCronDumpTestCase.Options() options.timestamp_key = None options.local_dump_prefix = 'foo' options.ddboost = False", "= False #If this is successful then it should not", "test_validate_parse_email_File03(self, mock1, mock2, mock3): options = GpCronDumpTestCase.Options() options.include_email_file = \"/tmp/abc.yaml\"", "options.netbackup_schedule = \"test_schedule\" with self.assertRaisesRegexp(Exception, '--ddboost is not supported with", "@patch('gpcrondump.yaml.load', return_value={'EMAIL_DETAILS': [{'FROM': 'RRP_MPE2_DCA_1', 'NAME': 'testdb100', 'SUBJECT': \"backup completed for", "def test_options_schema_filter_6(self, mock, mock2): options = GpCronDumpTestCase.Options() options.dump_schema = 'foo'", "gpcd.get_schema_list_file(dbname) self.assertTrue(file.startswith('/tmp/schema_list')) if os.path.exists('/tmp/foo'): os.remove('/tmp/foo') @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.get_include_schema_list_from_exclude_schema', return_value=['public']) def", "cron = GpCronDump(options, None) self.assertEquals(cron.full_dump_timestamp, '20121225090000') @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options12(self,", "= GpCronDumpTestCase.Options() cron = GpCronDump(options, None) ao_partition_list = ['public, aot1,", "incremental backup'): cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_column_inserts_with_incremental(self,", "% options.masterDataDirectory, 'foo2:%s/db_dumps/20130101/gp_dump_1_1_20130101010101_post_data.gz' % options.masterDataDirectory] self.assertEqual(pipes_file_list, expected_files_list) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def", "% options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/gp_global_1_1_20130101010101' % options.masterDataDirectory, 'foo1:/bar/db_dumps/20130101/gp_dump_0_1_20130101010101.gz', 'foo1:/bar/db_dumps/20130101/gp_dump_0_2_20130101010101.gz'] self.assertEqual(sorted(pipes_file_list), sorted(expected_files_list)) @patch('gpcrondump.GpCronDump._get_master_port')", "timestamp = gpcd._get_timestamp_object(options.timestamp_key) self.assertEquals(timestamp, datetime(2013, 1, 1, 1, 1, 1))", "dump_dir, timestamp) expected_files_list = ['foo1:%s/db_dumps/20130101/gp_dump_1_1_20130101010101.gz' % options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/gp_dump_1_1_20130101010101_post_data.gz' % options.masterDataDirectory,", "self.assertRaisesRegexp(Exception, 'list backup files not supported with ddboost option'): GpCronDump(options,", "sorted(expected_files_list)) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_get_pipes_file_list3(self, mock1, mock2): options = GpCronDumpTestCase.Options()", "time_end = '12:08:18' cron = GpCronDump(options, None) cron._send_email(dump_database, current_exit_status, time_start,", "= GpCronDumpTestCase.Options() options.dump_schema = 'foo' options.exclude_dump_tables_file = '/tmp/foo' with self.assertRaisesRegexp(Exception,", "options.max_streams = 0 with self.assertRaisesRegexp(Exception, '--max-streams must be a number", "self.dump_schema = False self.dump_databases = ['testdb'] self.bypass_disk_check = True self.backup_set", "GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_20(self, mock, mock2): options =", "= \"/tmp/abc.yaml\" with self.assertRaisesRegexp(Exception, \"'%s' file is empty.\" % options.include_email_file):", "@patch('gpcrondump.validate_current_timestamp') def test_option_schema_filter_2(self, mock, mock2, mock3): options = GpCronDumpTestCase.Options() options.exclude_schema_file", "@patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_get_timestamp_object1(self, mock1, mock2): options = GpCronDumpTestCase.Options() options.timestamp_key", "mock, mock2): options = GpCronDumpTestCase.Options() options.include_dump_tables = 'foo' options.exclude_dump_tables_file =", "= 'metro' options.include_dump_tables_file = 'bar' options.masterDataDirectory = '/data/foo' gpcd =", "options.timestamp_key = '20130101010101' gpcd = GpCronDump(options, None) timestamp = gpcd._get_timestamp_object(options.timestamp_key)", "return_value='20130101000000') @patch('gpcrondump.GpCronDump._get_master_port') def test_get_files_file_list_with_prefix(self, mock1, mock2, mock3): options = GpCronDumpTestCase.Options()", "mock2): options = GpCronDumpTestCase.Options() options.dump_databases = ['bkdb'] options.timestamp_key = True", "options.dump_databases = [] options.incremental = True with self.assertRaisesRegexp(Exception, 'Must supply", "False self.local_dump_prefix = '' self.list_filter_tables = None self.include_email_file = None", "= None options.masterDataDirectory = '/foo' gpcd = GpCronDump(options, None) master", "options = GpCronDumpTestCase.Options() options.include_dump_tables = 'foo' options.exclude_dump_tables = 'foo' with", "% options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/gp_dump_status_1_1_20130101010101' % options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/gp_dump_20130101000000_increments' % options.masterDataDirectory] self.assertEqual(sorted(files_file_list), sorted(expected_files_list))", "from gppylib.operations.dump import MailDumpEvent from gppylib.operations.backup_utils import get_backup_directory, write_lines_to_file import", "mock1, mock2, mock3, mock4): options = GpCronDumpTestCase.Options() options.masterDataDirectory = '/tmp/foobar'", "= GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_9(self, mock, mock2): options", "= 'foo' options.incremental = True with self.assertRaisesRegexp(Exception, 'exclude table file", "test_get_pipes_file_list4(self, mock1, mock2): options = GpCronDumpTestCase.Options() options.timestamp_key = None options.masterDataDirectory", "True options.dump_databases = 'bkdb,fulldb' with self.assertRaisesRegexp(Exception, 'multi-database backup is not", "os.path.exists('/tmp/foo'): os.remove('/tmp/foo') @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.get_include_schema_list_from_exclude_schema', return_value=['public']) def test_options_schema_filter_34(self, mock1, mock2,", "options.exclude_dump_tables_file = '/odir/exfile' gpcd = GpCronDump(options, None) dirtyfile = '/tmp/dirty'", "GpCronDumpTestCase.Options() options.exclude_dump_schema = 'foo' options.exclude_dump_tables_file = '/tmp/foo' with self.assertRaisesRegexp(Exception, '--table-file", "(inc, exc) = gpcd.get_include_exclude_for_dump_database(dirtyfile, dbname) self.assertTrue(exc.startswith('/tmp/exclude_dump_tables_file')) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.GpCronDump._get_table_names_from_partition_list', side_effect", "'foo' options.exclude_dump_tables = 'foo' with self.assertRaisesRegexp(Exception, '-t can not be", "dbname = 'foo' schema_file = '/tmp/foo' inc = gpcd.generate_include_table_list_from_schema_file(dbname, schema_file)", "incremental backup'): cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options4(self,", "'--max-streams must be a number greater than zero'): cron =", "self.assertRaisesRegexp(Exception, \"'%s' file is empty.\" % options.include_email_file): cron = GpCronDump(options,", "= GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_15(self, mock, mock2): options", "'--max-streams must be specified along with --replicate'): cron = GpCronDump(options,", "= GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_table_filter5(self, mock, mock2): options", "GpCronDumpTestCase.Options() options.masterDataDirectory = '/tmp/foobar' options.include_dump_tables = ['public.t1', 'public.t2', 'public.t3'] gpcd", "\"backup completed for Database 'testdb100'\"}]}) def test_validate_parse_email_File00(self, mock1, mock2, mock3,", "mock, mock2, mock3): options = GpCronDumpTestCase.Options() options.exclude_schema_file = '/tmp/foo' options.incremental", "GpCronDumpTestCase.Options() options.dump_schema = ['public'] gpcd = GpCronDump(options, None) dbname =", "def test_options14(self, mock, mock2): options = GpCronDumpTestCase.Options() options.dump_databases = 'bkdb'", "= gpcd.get_include_exclude_for_dump_database(dirtyfile, dbname) self.assertTrue(inc.startswith('/tmp/include_dump_tables_file')) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.get_latest_full_dump_timestamp', return_value='20121225090000') def test_get_include_exclude_for_dump_database03(self,", "= 'foo' timestamp = '20141016010101' file = gpcd.get_schema_list_file(dbname) self.assertTrue(file.startswith('/tmp/schema_list')) @patch('gpcrondump.GpCronDump._get_master_port')", "['public.t1', 'public.t2', 'public.t3'] gpcd = GpCronDump(options, None) dirtyfile = '/tmp/dirty'", "= False self.local_dump_prefix = '' self.list_filter_tables = None self.include_email_file =", "self.assertRaisesRegexp(Exception, ''): cron._verify_tablenames(ao_partition_list, co_partition_list, heap_partition_list) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_inserts_with_incremental(self, mock,", "GpCronDumpTestCase.Options() options.timestamp_key = None options.masterDataDirectory = '/foo' gpcd = GpCronDump(options,", "GpCronDumpTestCase.Options() options.include_schema_file = '/tmp/foo' with self.assertRaisesRegexp(Exception, \"can not include catalog", "= 'foo' options.exclude_schema_file = '/tmp/foo' with self.assertRaisesRegexp(Exception, '-S can not", "--ddboost'): cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options26(self, mock1,", "with --exclude-table-file option'): cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def", "1 timestamp = '20130101010101' dump_dir = get_backup_directory(options.masterDataDirectory, options.backup_dir, gpcd.dump_dir, timestamp)", "options.include_dump_tables_file = '/mydir/incfile' gpcd = GpCronDump(options, None) dirtyfile = '/tmp/dirty'", "GpCronDumpTestCase.Options() options.ddboost = True options.replicate = True options.max_streams = 0", "expected_output) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_get_table_names_from_partition_list_01(self, mock1, mock2): options = GpCronDumpTestCase.Options()", "master.getSegmentHostName.return_value = 'foo1' mock_segs = [Mock(), Mock()] for id, seg", "= ['public.t1', 'public.t2', 'public.t3'] gpcd = GpCronDump(options, None) dirtyfile =", "= GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_18(self, mock, mock2): options", "with self.assertRaisesRegexp(Exception, '-S can not be selected with --exclude-schema-file option'):", "options = GpCronDumpTestCase.Options() options.exclude_schema_file = 'foo' options.include_schema_file = '/tmp/foo' with", "cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_6(self, mock, mock2):", "= GpCronDumpTestCase.Options() options.timestamp_key = None gpcd = GpCronDump(options, None) timestamp", "'foo' options.incremental = True with self.assertRaisesRegexp(Exception, '-s option can not", "options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/gp_global_1_1_20130101010101' % options.masterDataDirectory, 'foo1:/bar/db_dumps/20130101/gp_dump_0_1_20130101010101.gz', 'foo1:/bar/db_dumps/20130101/gp_dump_0_2_20130101010101.gz'] self.assertEqual(sorted(pipes_file_list), sorted(expected_files_list)) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp')", "= ['bkdb'] options.timestamp_key = True options.ddboost = True options.netbackup_service_host =", "with self.assertRaisesRegexp(Exception, 'Invalid partition entry \"public, aot2,aot, 3190\"'): cron._get_table_names_from_partition_list(partition_list) @patch('gpcrondump.GpCronDump._get_master_port')", "'public.t2']) def test_get_include_exclude_for_dump_database01(self, mock1, mock2, mock3, mock4): options = GpCronDumpTestCase.Options()", "= 'metro' options.masterDataDirectory = '/data/foo' gpcd = GpCronDump(options, None) master", "= '/foo' options.dump_config = True gpcd = GpCronDump(options, None) master", "= GpCronDumpTestCase.Options() options.ddboost = True options.replicate = False options.max_streams =", "mock2): options = GpCronDumpTestCase.Options() options.include_dump_tables = 'foo' options.incremental = True", "= GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_20(self, mock, mock2): options", "self.netbackup_block_size = None self.netbackup_keyword = None @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.GpCronDump.validate_dump_schema') @patch('gpcrondump.validate_current_timestamp') def", "gppylib.operations.utils import DEFAULT_NUM_WORKERS from mock import patch, Mock from gppylib.operations.dump", "options.masterDataDirectory = '/tmp/foobar' options.backup_dir = '/foo1' gpcd = GpCronDump(options, None)", "with self.assertRaisesRegexp(Exception, 'multi-database backup is not supported with incremental backup'):", "GpCronDumpTestCase.Options() options.exclude_schema_file = 'foo' options.include_dump_tables = '/tmp/foo' with self.assertRaisesRegexp(Exception, '-t", "co_partition_list = ['public, cot1, 2190', 'public, cot2\\nasd, 3190'] heap_partition_list =", "timestamp) expected_files_list = ['foo2:%s/db_dumps/20130101/gp_cdatabase_1_1_20130101010101' % options.masterDataDirectory, 'foo2:%s/db_dumps/20130101/gp_dump_20130101010101_ao_state_file' % options.masterDataDirectory, 'foo2:%s/db_dumps/20130101/gp_dump_20130101010101_co_state_file'", "def test_options_schema_filter_30(self, mock, mock2, mock3): options = GpCronDumpTestCase.Options() options.include_schema_file =", "GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_31(self, mock, mock2): options =", "with incremental option'): cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def", "timestamp = '20130101010101' dump_dir = get_backup_directory(options.masterDataDirectory, None, gpcd.dump_dir, timestamp) files_file_list", "None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options2(self, mock, mock2): options = GpCronDumpTestCase.Options()", "options.ddboost = True options.list_backup_files = True with self.assertRaisesRegexp(Exception, 'list backup", "option'): cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_table_filter5(self, mock,", "@patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_get_timestamp_object3(self, mock1, mock2): options = GpCronDumpTestCase.Options() options.timestamp_key", "timestamp = '20130101010101' dump_dir = get_backup_directory(options.masterDataDirectory, options.backup_dir, gpcd.dump_dir, timestamp) files_file_list", "GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_19(self, mock, mock2): options =", "None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_column_inserts_with_incremental(self, mock, mock2): options = GpCronDumpTestCase.Options()", "def test_options_schema_filter_23(self, mock, mock2): options = GpCronDumpTestCase.Options() options.dump_schema = 'foo'", "options.ddboost_user = False options.ddboost_host = False options.max_streams = None options.list_backup_files", "GpCronDumpTestCase.Options() options.incremental = True cron = GpCronDump(options, None) self.assertEquals(cron.full_dump_timestamp, '20121225090000')", "--exclude-table-file option'): cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_table_filter4(self,", "% options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/gp_dump_status_1_1_20130101010101' % options.masterDataDirectory] self.assertEqual(files_file_list, expected_files_list) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def", "= 'bar' options.masterDataDirectory = '/data/foo' gpcd = GpCronDump(options, None) master", "self.assertRaisesRegexp(Exception, '--ddboost is not supported with NetBackup'): GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port')", "aot1!asd, 2190', 'public, aot2, 3190'] co_partition_list = ['public, cot1, 2190',", "self.assertRaisesRegexp(Exception, '-T can not be selected with --exclude-table-file option'): cron", "self.assertTrue(inc.startswith('/tmp/include_dump_tables_file')) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options1(self, mock, mock2): options = GpCronDumpTestCase.Options()", "supported with incremental backup'): cron = GpCronDump(options, None) @patch('gpcrondump.get_latest_full_dump_timestamp', return_value='20120330090000')", "= GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options25(self, mock, mock2): options", "-s option'): cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_24(self,", "= True options.max_streams = None with self.assertRaisesRegexp(Exception, '--max-streams must be", "= 'foo' options.exclude_dump_tables = 'foo' with self.assertRaisesRegexp(Exception, '-t can not", "= '/tmp/foo' with self.assertRaisesRegexp(Exception, \"can not exclude catalog schema 'information_schema'", "self.assertEqual(sorted(pipes_file_list), sorted(expected_files_list)) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_get_pipes_file_list4(self, mock1, mock2): options =", "mock2): options = GpCronDumpTestCase.Options() options.list_backup_files = True options.timestamp_key = None", "'/tmp/foo' options.incremental = True with self.assertRaisesRegexp(Exception, '--schema-file option can not", "@patch('gpcrondump.yaml.load', return_value={'EMAIL_DETAILS': [{'FROM': 'RRP_MPE2_DCA_1', 'DBNAME': 'testdb100', 'SUBJECT': \"backup completed for", "'foo' options.include_schema_file = '/tmp/foo' with self.assertRaisesRegexp(Exception, '--exclude-schema-file can not be", "@patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_11(self, mock, mock2): options = GpCronDumpTestCase.Options() options.exclude_schema_file =", "def test_get_table_names_from_partition_list_00(self, mock1, mock2): options = GpCronDumpTestCase.Options() cron = GpCronDump(options,", "selected with --table-file option'): cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp')", "this is successful then it should not raise an exception", "'foo2' timestamp = '20130101010101' dump_dir = get_backup_directory(options.masterDataDirectory, options.backup_dir, gpcd.dump_dir, timestamp)", "None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_24(self, mock, mock2): options = GpCronDumpTestCase.Options()", "\"\\'%s\\' file is not formatted properly.\" % options.include_email_file): with patch('__builtin__.open',", "= GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_25(self, mock, mock2): options", "options.ddboost = True options.netbackup_service_host = \"mdw\" options.netbackup_policy = \"test_policy\" options.netbackup_schedule", "\"test_schedule\" with self.assertRaisesRegexp(Exception, '--ddboost is not supported with NetBackup'): GpCronDump(options,", "= GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options20(self, mock, mock2): options", "from gppylib.operations.utils import DEFAULT_NUM_WORKERS from mock import patch, Mock from", "= GpCronDump(options, None) @patch('gpcrondump.os.path.isfile', return_value=True) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.os.path.getsize', return_value=111) def test_validate_parse_email_File02(self,", "expected_files_list = ['foo1:%s/db_dumps/20130101/gp_dump_1_1_20130101010101.gz' % options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/gp_dump_1_1_20130101010101_post_data.gz' % options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/gp_master_config_files_20130101010101.tar' %", "'public, cot2\\nasd, 3190'] heap_partition_list = ['public, heapt1, 2190', 'public, heapt2!asdasd", "= True cron = GpCronDump(options, None) self.assertEquals(cron.full_dump_timestamp, '20121225090000') @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp')", "options.masterDataDirectory = '/data/foo' gpcd = GpCronDump(options, None) master = Mock()", "options.include_email_file): with patch('__builtin__.open', m, create=True): cron = GpCronDump(options, None) @patch('gpcrondump.os.path.isfile',", "be used without --ddboost'): cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp')", "@patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.GpCronDump._get_table_names_from_partition_list', side_effect = [['public.aot1:asd', 'public.aot2'], ['public.cot1', 'public.cot2:asd']]) def test_verify_tablenames_00_bad(self,", "'/data/foo' gpcd = GpCronDump(options, None) master = Mock() master.getSegmentHostName.return_value =", "def test_options_schema_filter_33(self, mock1, mock2): options = GpCronDumpTestCase.Options() options.include_schema_file = '/tmp/foo'", "options = GpCronDumpTestCase.Options() options.timestamp_key = None options.local_dump_prefix = 'foo' options.ddboost", "test_options19(self, mock, mock2): options = GpCronDumpTestCase.Options() options.clear_dumps = True options.incremental", "formatted properly.\" % options.include_email_file): with patch('__builtin__.open', m, create=True): cron =", "'--table-file and --exclude-table-file can not be selected with --schema-file option'):", "gpcrondump import GpCronDump from gppylib.operations.utils import DEFAULT_NUM_WORKERS from mock import", "@patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.get_lines_from_file', return_value=['public', 'information_schema']) def test_options_schema_filter_30(self, mock, mock2, mock3): options", "options.exclude_dump_schema = 'foo' options.exclude_dump_tables = '/tmp/foo' with self.assertRaisesRegexp(Exception, '-t and", "GpCronDump(options, None) ao_partition_list = ['public, aot1!asd, 2190', 'public, aot2, 3190']", "--exclude-schema-file option'): cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_13(self,", "'foo1:%s/db_dumps/20130101/gp_dump_20130101010101.rpt' % options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/gp_dump_status_1_1_20130101010101' % options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/gp_dump_20130101000000_increments' % options.masterDataDirectory] self.assertEqual(sorted(files_file_list),", "def test_get_include_exclude_for_dump_database00(self, mock1, mock2): options = GpCronDumpTestCase.Options() options.masterDataDirectory = '/tmp/foobar'", "@patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.get_latest_full_dump_timestamp', return_value='20121225090000') def test_get_include_exclude_for_dump_database03(self, mock1, mock2, mock3): options", "timestamp = '20141016010101' file = gpcd.get_schema_list_file(dbname) self.assertTrue(file.startswith('/tmp/schema_list')) if os.path.exists('/tmp/foo'): os.remove('/tmp/foo')", "exception GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options16(self, mock, mock2): options", "= '20130101010101' options.local_dump_prefix = 'metro' options.include_dump_tables_file = 'bar' options.masterDataDirectory =", "None self.netbackup_keyword = None @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.GpCronDump.validate_dump_schema') @patch('gpcrondump.validate_current_timestamp') def test_option_schema_filter_1(self, mock,", "test_options_schema_filter_35(self, mock1, mock2, mock3): options = GpCronDumpTestCase.Options() options.exclude_dump_schema = 'public'", "\"/tmp/abc.yaml\" m = mock.MagicMock() with self.assertRaisesRegexp(Exception, \"\\'%s\\' file is not", "options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/gp_dump_20130101010101_last_operation' % options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/gp_dump_20130101010101.rpt' % options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/gp_dump_status_1_1_20130101010101' % options.masterDataDirectory]", "= 'foo' options.incremental = True with self.assertRaisesRegexp(Exception, 'include table list", "GpCronDump(options, None) master = Mock() master.getSegmentHostName.return_value = 'foo2' timestamp =", "@patch('gpcrondump.GpCronDump.validate_dump_schema') @patch('gpcrondump.validate_current_timestamp') def test_option_schema_filter_2(self, mock, mock2, mock3): options = GpCronDumpTestCase.Options()", "mock2): options = GpCronDumpTestCase.Options() options.local_dump_prefix = 'foo' options.incremental = False", "True with self.assertRaisesRegexp(Exception, 'multi-database backup is not supported with -K", "self.assertRaisesRegexp(Exception, '-s can not be selected with --exclude-schema-file option'): cron", "'/foo' options.dump_config = True gpcd = GpCronDump(options, None) master =", "not specify catalog schema 'information_schema' using -s option\"): GpCronDump(options, None)", "@patch('gpcrondump.validate_current_timestamp') def test_get_timestamp_object1(self, mock1, mock2): options = GpCronDumpTestCase.Options() options.timestamp_key =", "options.masterDataDirectory] self.assertEqual(files_file_list, expected_files_list) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_get_files_file_list2(self, mock1, mock2): options", "file = gpcd.get_schema_list_file(dbname) self.assertTrue(file.startswith('/tmp/schema_list')) if os.path.exists('/tmp/foo'): os.remove('/tmp/foo') @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.get_include_schema_list_from_exclude_schema',", "patch('__builtin__.open', m, create=True): cron = GpCronDump(options, None) @patch('gpcrondump.os.path.isfile', return_value=False) @patch('gpcrondump.GpCronDump._get_master_port')", "'testdb100'\"}]}) def test_validate_parse_email_File05(self, mock1, mock2, mock3, mock4): options = GpCronDumpTestCase.Options()", "options = GpCronDumpTestCase.Options() options.exclude_dump_schema = ['information_schema'] with self.assertRaisesRegexp(Exception, \"can not", "= GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options26(self, mock1, mock2): options", "0 time_start = '12:07:09' time_end = '12:08:18' cron = GpCronDump(options,", "GpCronDump(options, None) partition_list = ['public, aot1, 2190', 'public, aot2:aot, 3190']", "GpCronDumpTestCase.Options() options.exclude_dump_schema = 'foo' options.include_schema_file = '/tmp/foo' with self.assertRaisesRegexp(Exception, '-S", "True options.timestamp_key = None with self.assertRaisesRegexp(Exception, 'Must supply -K option", "'table1', 'public', 'table2']) def test_options_schema_filter_36(self, mock1, mock2, mock3, mock4): options", "= ['public, aot1, 2190', 'public, aot2,aot, 3190'] with self.assertRaisesRegexp(Exception, 'Invalid", "@patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_get_pipes_file_list4(self, mock1, mock2): options = GpCronDumpTestCase.Options() options.timestamp_key", "with self.assertRaisesRegexp(Exception, '--inserts, --column-inserts, --oids cannot be selected with incremental", "@patch('gpcrondump.get_lines_from_file', return_value=['public', 'information_schema']) def test_options_schema_filter_29(self, mock, mock2, mock3): options =", "None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options28(self, mock, mock2): options = GpCronDumpTestCase.Options()", "mock3, mock4): options = GpCronDumpTestCase.Options() options.include_email_file = \"/tmp/abc.yaml\" m =", "-S option'): cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_26(self,", "= GpCronDump(options, None) timestamp = gpcd._get_timestamp_object(options.timestamp_key) self.assertTrue(isinstance(timestamp, datetime)) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp')", "GpCronDumpTestCase.Options() options.include_schema_file = '/tmp/foo' write_lines_to_file('/tmp/foo', ['public']) gpcd = GpCronDump(options, None)", "= GpCronDumpTestCase.Options() options.timestamp_key = '20130101010101' options.incremental = True options.masterDataDirectory =", "cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_table_filter2(self, mock, mock2):", "= gpcd._get_files_file_list(master, dump_dir, timestamp) expected_files_list = ['foo2:%s/db_dumps/20130101/gp_cdatabase_1_1_20130101010101' % options.masterDataDirectory, 'foo2:%s/db_dumps/20130101/gp_dump_20130101010101_ao_state_file'", "test_options18(self, mock, mock2): options = GpCronDumpTestCase.Options() options.dump_schema = 'foo' options.incremental", "'foo' options.incremental = True with self.assertRaisesRegexp(Exception, 'include table list can", "@patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options23(self, mock, mock2): options = GpCronDumpTestCase.Options() options.ddboost", "% options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/metro_gp_dump_20130101010101_filter' % options.masterDataDirectory] self.assertEqual(sorted(files_file_list), sorted(expected_files_list)) @patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.get_latest_full_dump_timestamp', return_value='20130101000000')", "mock_segs = [Mock(), Mock()] for id, seg in enumerate(mock_segs): seg.getSegmentDataDirectory.return_value", "options.local_dump_prefix = 'foo' options.ddboost = False options.ddboost_verify = False options.ddboost_config_remove", "options = GpCronDumpTestCase.Options() options.timestamp_key = None options.masterDataDirectory = '/foo' options.dump_config", "= GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options24(self, mock, mock2): options", "['public, aot1, 2190', 'public, aot2,aot, 3190'] with self.assertRaisesRegexp(Exception, 'Invalid partition", "'public, aot2, 3190'] co_partition_list = ['public, cot1, 2190', 'public, cot2\\nasd,", "not be selected with incremental backup'): cron = GpCronDump(options, None)", "options.exclude_schema_file = '/tmp/foo' with self.assertRaisesRegexp(Exception, \"can not exclude catalog schema", "False self.cleanup_date = None self.cleanup_total = None self.dump_schema = False", "None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options14(self, mock, mock2): options = GpCronDumpTestCase.Options()", "@patch('gpcrondump.validate_current_timestamp') def test_options_table_filter6(self, mock, mock2): options = GpCronDumpTestCase.Options() options.include_dump_tables_file =", "def test_options_table_filter4(self, mock, mock2): options = GpCronDumpTestCase.Options() options.exclude_dump_tables = 'foo'", "options.masterDataDirectory] self.assertEqual(sorted(files_file_list), sorted(expected_files_list)) @patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.GpCronDump._get_master_port') @patch('gppylib.operations.backup_utils.get_latest_full_dump_timestamp', return_value='20130101000000') def test_get_files_file_list_with_filter(self, mock1,", "= GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_14(self, mock, mock2): options", "= GpCronDumpTestCase.Options() options.include_email_file = \"/tmp/abc\" with self.assertRaisesRegexp(Exception, \"'%s' is not", "'information_schema' in schema file '/tmp/foo'\"): GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.get_lines_from_file',", "GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options4(self, mock, mock2): options =", "not be selected with --exclude-schema-file option'): cron = GpCronDump(options, None)", "cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_get_table_names_from_partition_list_00(self, mock1, mock2):", "options.include_schema_file = 'foo' options.exclude_dump_tables_file = '/tmp/foo' with self.assertRaisesRegexp(Exception, '--table-file and", "options = GpCronDumpTestCase.Options() options.masterDataDirectory = '/tmp/foobar' options.backup_dir = None gpcd", "mock3, mock4): options = GpCronDumpTestCase.Options() options.masterDataDirectory = '/tmp/foobar' options.exclude_dump_tables =", "cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options25(self, mock, mock2):", "options.exclude_schema_file = '/tmp/foo' options.incremental = True with self.assertRaisesRegexp(Exception, '--exclude-schema-file option", "'foo2:%s/db_dumps/20130101/gp_dump_20130101010101.rpt' % options.masterDataDirectory, 'foo2:%s/db_dumps/20130101/gp_dump_status_1_1_20130101010101' % options.masterDataDirectory] self.assertEqual(files_file_list, expected_files_list) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp')", "files_file_list = gpcd._get_files_file_list(master, dump_dir, timestamp) expected_files_list = ['foo2:%s/db_dumps/20130101/gp_cdatabase_1_1_20130101010101' % options.masterDataDirectory,", "cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_table_filter4(self, mock, mock2):", "GpCronDumpTestCase.Options() options.exclude_schema_file = 'foo' options.exclude_dump_tables_file = '/tmp/foo' with self.assertRaisesRegexp(Exception, '--table-file", "mock3): options = GpCronDumpTestCase.Options() options.include_schema_file = '/tmp/foo' options.incremental = True", "self.assertRaisesRegexp(Exception, 'exclude table file can not be selected with incremental", "\"\\'%s\\' file does not exist.\" % options.include_email_file): cron = GpCronDump(options,", "None self.include_dump_tables_file = None self.exclude_dump_tables_file = None self.backup_dir = None", "mock, mock2): options = GpCronDumpTestCase.Options() options.dump_schema = 'foo' options.exclude_schema_file =", "using -s option\"): GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_28(self, mock,", "timestamp = '20141016010101' file = gpcd.get_schema_list_file(dbname) self.assertTrue(file.startswith('/tmp/schema_list')) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.get_lines_from_file',", "= False @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.get_latest_full_dump_timestamp', return_value='20121225090000') def test_options11(self, mock, mock2,", "None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options21(self, mock, mock2): options = GpCronDumpTestCase.Options()", "options.masterDataDirectory, 'foo2:%s/db_dumps/20130101/gp_dump_20130101010101_co_state_file' % options.masterDataDirectory, 'foo2:%s/db_dumps/20130101/gp_dump_20130101010101_last_operation' % options.masterDataDirectory, 'foo2:%s/db_dumps/20130101/gp_dump_20130101010101.rpt' % options.masterDataDirectory,", "GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_table_filter5(self, mock, mock2): options =", "can not be selected with --schema-file option'): cron = GpCronDump(options,", "timestamp) pipes_file_list = gpcd._get_pipes_file_list(master, mock_segs, dump_dir, timestamp) expected_files_list = ['foo1:%s/db_dumps/20130101/gp_dump_1_1_20130101010101.gz'", "test_validate_parse_email_File00(self, mock1, mock2, mock3, mock4): options = GpCronDumpTestCase.Options() options.include_email_file =", "def test_get_files_file_list3(self, mock1, mock2, mock3): options = GpCronDumpTestCase.Options() options.timestamp_key =", "def test_options19(self, mock, mock2): options = GpCronDumpTestCase.Options() options.clear_dumps = True", "self.assertRaisesRegexp(Exception, '-t can not be selected with --table-file option'): cron", "= 'foo' options.exclude_dump_tables = '/tmp/foo' with self.assertRaisesRegexp(Exception, '-t and -T", "None ## Enterprise init self.incremental = False self.ddboost = False", "file = gpcd.get_schema_list_file(dbname) self.assertTrue(file.startswith('/tmp/schema_list')) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_33(self, mock1, mock2):", "test_options_table_filter5(self, mock, mock2): options = GpCronDumpTestCase.Options() options.include_dump_tables = 'foo' options.exclude_dump_tables", "self.assertTrue(file.startswith('/tmp/schema_list')) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_33(self, mock1, mock2): options = GpCronDumpTestCase.Options()", "backup'): cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.GpCronDump.validate_dump_schema') @patch('gpcrondump.validate_current_timestamp') def test_option_schema_filter_2(self,", "@patch('gpcrondump.validate_current_timestamp') def test_options1(self, mock, mock2): options = GpCronDumpTestCase.Options() options.include_dump_tables =", "cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_24(self, mock, mock2):", "GpCronDumpTestCase.Options() options.timestamp_key = '20130101010' gpcd = GpCronDump(options, None) with self.assertRaisesRegexp(Exception,", "with self.assertRaisesRegexp(Exception, 'multi-database backup is not supported with -K option'):", "mock2): options = GpCronDumpTestCase.Options() options.exclude_dump_tables = 'foo' options.incremental = True", "options.include_dump_tables = 'foo' options.exclude_dump_tables = 'foo' with self.assertRaisesRegexp(Exception, '-t can", "mock, mock2): options = GpCronDumpTestCase.Options() options.dump_schema = ['information_schema'] with self.assertRaisesRegexp(Exception,", "options = GpCronDumpTestCase.Options() options.include_schema_file = '/tmp/foo' with self.assertRaisesRegexp(Exception, \"can not", "= GpCronDumpTestCase.Options() options.exclude_dump_schema = 'foo' options.include_dump_tables_file = '/tmp/foo' with self.assertRaisesRegexp(Exception,", "get_backup_directory, write_lines_to_file import mock logger = gplog.get_unittest_logger() class GpCronDumpTestCase(unittest.TestCase): class", "options.masterDataDirectory = '/tmp/foobar' options.backup_dir = None gpcd = GpCronDump(options, None)", "options.max_streams = None with self.assertRaisesRegexp(Exception, '--max-streams must be specified along", "self.assertEquals(cron.full_dump_timestamp, '20121225090000') @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options12(self, mock, mock2): options =", "mock, mock2): options = GpCronDumpTestCase.Options() options.output_options = ['--inserts'] options.incremental =", "= GpCronDumpTestCase.Options() options.exclude_schema_file = 'foo' options.exclude_dump_tables_file = '/tmp/foo' with self.assertRaisesRegexp(Exception,", "backup'): cron = GpCronDump(options, None) @patch('gpcrondump.get_latest_full_dump_timestamp', return_value='20120330090000') @patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.GpCronDump._get_master_port') def", "= False options.max_streams = None options.list_backup_files = False gpcd =", "None) partition_list = ['public, aot1, 2190', 'public, aot2:aot, 3190'] expected_output", "self.dump_config = False self.history = False self.pre_vacuum = False self.post_vacuum", "'foo' options.exclude_schema_file = '/tmp/foo' with self.assertRaisesRegexp(Exception, '-s can not be", "return_value='20130101000000') def test_get_files_file_list3(self, mock1, mock2, mock3): options = GpCronDumpTestCase.Options() options.timestamp_key", "= GpCronDump(options, None) master = Mock() master.getSegmentHostName.return_value = 'foo2' timestamp", "cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_table_filter6(self, mock, mock2):", "and -T can not be selected with --exclude-schema-file option'): cron", "= True options.netbackup_service_host = \"mdw\" options.netbackup_policy = \"test_policy\" options.netbackup_schedule =", "mock1, mock2): options = GpCronDumpTestCase.Options() options.timestamp_key = None options.dump_global =", "options.ddboost = True options.replicate = True options.max_streams = \"abc\" with", "None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options4(self, mock, mock2): options = GpCronDumpTestCase.Options()", "cron = GpCronDump(options, None) ao_partition_list = ['public, aot1!asd, 2190', 'public,", "supply -x <database name> with incremental option'): cron = GpCronDump(options,", "= ['public.heapt1', 'public.heapt2'] cron._verify_tablenames(ao_partition_list, co_partition_list, heap_partition_list) #Should not raise an", "mock, mock2): options = GpCronDumpTestCase.Options() options.exclude_dump_tables = 'foo' options.incremental =", "def test_options3(self, mock, mock2): options = GpCronDumpTestCase.Options() options.include_dump_tables_file = 'foo'", "'foo1:%s/db_dumps/20130101/metro_gp_dump_status_1_1_20130101010101' % options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/metro_gp_dump_20130101010101_filter' % options.masterDataDirectory] self.assertEqual(sorted(files_file_list), sorted(expected_files_list)) @patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.get_latest_full_dump_timestamp',", "options = GpCronDumpTestCase.Options() options.include_dump_tables = 'foo' options.exclude_dump_tables_file = 'foo' with", "options = GpCronDumpTestCase.Options() options.timestamp_key = None options.dump_global = True options.masterDataDirectory", "self.include_dump_tables = None self.exclude_dump_tables = None self.include_dump_tables_file = None self.exclude_dump_tables_file", "with self.assertRaisesRegexp(Exception, '-s option can not be selected with incremental", "options = GpCronDumpTestCase.Options() options.include_schema_file = 'foo' options.exclude_dump_tables_file = '/tmp/foo' with", "GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options14(self, mock, mock2): options =", "self.assertRaisesRegexp(Exception, '-s option can not be selected with incremental backup'):", "'foo1:/bar/db_dumps/20130101/gp_dump_0_1_20130101010101.gz', 'foo1:/bar/db_dumps/20130101/gp_dump_0_2_20130101010101.gz'] self.assertEqual(sorted(pipes_file_list), sorted(expected_files_list)) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_gpcrondump_init0(self, mock1, mock2):", "test_options_schema_filter_22(self, mock, mock2): options = GpCronDumpTestCase.Options() options.include_schema_file = 'foo' options.include_dump_tables", "options = GpCronDumpTestCase.Options() options.exclude_dump_schema = 'foo' options.exclude_dump_tables = '/tmp/foo' with", "= GpCronDumpTestCase.Options() options.exclude_dump_schema = 'foo' options.include_dump_tables = '/tmp/foo' with self.assertRaisesRegexp(Exception,", "incremental option'): cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options21(self,", "specify catalog schema 'information_schema' using -s option\"): GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port')", "= None self.timestamp_key = None self.list_backup_files = None self.quiet =", "test_options27(self, mock, mock2): options = GpCronDumpTestCase.Options() options.dump_databases = 'bkdb,fulldb' options.timestamp_key", "'foo' with self.assertRaisesRegexp(Exception, '-t can not be selected with --exclude-table-file", "'--inserts, --column-inserts, --oids cannot be selected with incremental backup'): cron", "= GpCronDumpTestCase.Options() options.include_dump_tables = 'foo' options.include_dump_tables_file = 'foo' with self.assertRaisesRegexp(Exception,", "self.clear_catalog_dumps = False self.batch_default = DEFAULT_NUM_WORKERS self.include_dump_tables = None self.exclude_dump_tables", "1, 1, 1, 1, 1)) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_get_timestamp_object2(self, mock1,", "'public.cot2:asd']]) def test_verify_tablenames_00_bad(self, mock1, mock2, mock3): options = GpCronDumpTestCase.Options() cron", "'foo' (inc, exc) = gpcd.get_include_exclude_for_dump_database(dirtyfile, dbname) self.assertTrue(exc.startswith('/tmp/exclude_dump_tables_file')) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.expand_partitions_and_populate_filter_file',", "'foo1:%s/db_dumps/20130101/metro_gp_dump_20130101010101_ao_state_file' % options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/metro_gp_dump_20130101010101_co_state_file' % options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/metro_gp_dump_20130101010101_last_operation' % options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/metro_gp_dump_20130101010101.rpt'", "None self.list_backup_files = None self.quiet = False self.verbose = False", "GpCronDumpTestCase.Options() options.dump_schema = 'foo' options.include_dump_tables_file = '/tmp/foo' with self.assertRaisesRegexp(Exception, '--table-file", "mock2): options = GpCronDumpTestCase.Options() options.include_email_file = \"/tmp/abc.yaml\" with self.assertRaisesRegexp(Exception, \"\\'%s\\'", "= None self.output_options = None self.report_dir = None self.timestamp_key =", "= 20 with self.assertRaisesRegexp(Exception, '--replicate and --max-streams cannot be used", "[{'FROM': 'RRP_MPE2_DCA_1', 'DBNAME': None, 'SUBJECT': \"backup completed for Database 'testdb100'\"}]})", "current_exit_status = 0 time_start = '12:07:09' time_end = '12:08:18' cron", "with self.assertRaisesRegexp(Exception, '--ddboost is not supported with NetBackup'): GpCronDump(options, None)", "% options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/gp_dump_20130101010101.rpt' % options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/gp_dump_status_1_1_20130101010101' % options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/gp_dump_20130101000000_increments' %", "test_options23(self, mock, mock2): options = GpCronDumpTestCase.Options() options.ddboost = True options.replicate", "test_get_pipes_file_list2(self, mock1, mock2): options = GpCronDumpTestCase.Options() options.timestamp_key = None options.masterDataDirectory", "mock1, mock2, mock3): options = GpCronDumpTestCase.Options() options.include_email_file = \"/tmp/abc\" with", "= None self.dump_stats = None ## Enterprise init self.incremental =", "2190', 'public, cot2, 3190'] heap_partition_list = ['public.heapt1', 'public.heapt2'] cron._verify_tablenames(ao_partition_list, co_partition_list,", "Mock from gppylib.operations.dump import MailDumpEvent from gppylib.operations.backup_utils import get_backup_directory, write_lines_to_file", "for Database 'testdb100'\"}]}) def test_validate_parse_email_File04(self, mock1, mock2, mock3, mock4): options", "= GpCronDump(options, None) self.assertEqual(gpcd.dump_prefix, 'foo_') @patch('gpcrondump.os.path.isfile', return_value=True) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.os.path.getsize', return_value=111)", "= GpCronDumpTestCase.Options() options.exclude_dump_schema = 'foo' options.exclude_dump_tables_file = '/tmp/foo' with self.assertRaisesRegexp(Exception,", "@patch('gpcrondump.validate_current_timestamp') def test_options26(self, mock1, mock2): options = GpCronDumpTestCase.Options() options.list_backup_files =", "incremental backup'): cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_get_table_names_from_partition_list_00(self,", "options.include_email_file = \"/tmp/abc.yaml\" m = mock.MagicMock() with patch('__builtin__.open', m, create=True):", "GpCronDumpTestCase.Options() dump_database = 'testdb1' current_exit_status = 0 time_start = '12:07:09'", "True with self.assertRaisesRegexp(Exception, '--exclude-schema-file option can not be selected with", "mock, mock2): options = GpCronDumpTestCase.Options() options.exclude_dump_schema = 'foo' options.include_schema_file =", "options.list_filter_tables = True try: with self.assertRaisesRegexp(Exception, 'list filter tables option", "@patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.expand_partitions_and_populate_filter_file', return_value='/tmp/include_dump_tables_file') @patch('gpcrondump.get_lines_from_file') def test_get_include_exclude_for_dump_database02(self, mock1, mock2, mock3, mock4):", "= gpcd.get_include_exclude_for_dump_database(dirtyfile, dbname) self.assertTrue(exc.startswith('/tmp/exclude_dump_tables_file')) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.GpCronDump._get_table_names_from_partition_list', side_effect = [['public.aot1',", "= 'foo' options.incremental = True with self.assertRaisesRegexp(Exception, '-s option can", "True gpcd = GpCronDump(options, None) dirtyfile = '/tmp/dirty' dbname =", "option can not be selected with incremental backup'): cron =", "None self.clear_dumps = False self.cleanup_date = None self.cleanup_total = None", "'public.aot2:aot'] result = cron._get_table_names_from_partition_list(partition_list) self.assertEqual(result, expected_output) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_get_table_names_from_partition_list_01(self,", "'foo' with self.assertRaisesRegexp(Exception, '-T can not be selected with --exclude-table-file", "--oids cannot be selected with incremental backup'): cron = GpCronDump(options,", "= False self.clear_dumps_only = False self.post_script = None self.dump_config =", "cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options3(self, mock, mock2):", "gppylib.operations.dump import MailDumpEvent from gppylib.operations.backup_utils import get_backup_directory, write_lines_to_file import mock", "def test_options_schema_filter_12(self, mock, mock2): options = GpCronDumpTestCase.Options() options.exclude_schema_file = 'foo'", "= GpCronDumpTestCase.Options() options.incremental = True cron = GpCronDump(options, None) self.assertEquals(cron.full_dump_timestamp,", "exc) = gpcd.get_include_exclude_for_dump_database(dirtyfile, dbname) self.assertTrue(inc.startswith('/tmp/include_dump_tables_file')) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.get_latest_full_dump_timestamp', return_value='20121225090000') def", "'-S can not be selected with --exclude-schema-file option'): cron =", "'/tmp/foo' with self.assertRaisesRegexp(Exception, '--table-file and --exclude-table-file can not be selected", "backup'): cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options20(self, mock,", "'foo' options.incremental = False options.list_filter_tables = True try: with self.assertRaisesRegexp(Exception,", "= None self.report_dir = None self.timestamp_key = None self.list_backup_files =", "'testdb1' current_exit_status = 0 time_start = '12:07:09' time_end = '12:08:18'", "= GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_get_timestamp_object1(self, mock1, mock2): options", "'-t can not be selected with --exclude-table-file option'): cron =", "options = GpCronDumpTestCase.Options() options.clear_dumps = True options.incremental = True with", "def test_get_files_file_list2(self, mock1, mock2): options = GpCronDumpTestCase.Options() options.timestamp_key = None", "'testdb100', 'SUBJECT': \"backup completed for Database 'testdb100'\"}]}) def test_validate_parse_email_File04(self, mock1,", "True options.dump_databases = ['bkdb'] #If this is successful then it", "@patch('gpcrondump.validate_current_timestamp') def test_options16(self, mock, mock2): options = GpCronDumpTestCase.Options() options.masterDataDirectory =", "= None self.email_details = None self.include_schema_file = None self.exclude_schema_file =", "= '/tmp/foo' options.incremental = True with self.assertRaisesRegexp(Exception, '--exclude-schema-file option can", "@patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_table_filter2(self, mock, mock2): options = GpCronDumpTestCase.Options() options.include_dump_tables", "write_lines_to_file('/tmp/foo', ['public']) gpcd = GpCronDump(options, None) dbname = 'foo' timestamp", "GpCronDumpTestCase.Options() options.masterDataDirectory = '/tmp/foobar' options.backup_dir = None gpcd = GpCronDump(options,", "@patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.get_lines_from_file', return_value=['public']) @patch('gpcrondump.get_user_table_list_for_schema', return_value=['public', 'table1', 'public', 'table2']) def test_options_schema_filter_36(self,", "self.assertRaisesRegexp(Exception, 'exclude table list can not be selected with incremental", "self.assertEqual(sorted(files_file_list), sorted(expected_files_list)) @patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.get_latest_full_dump_timestamp', return_value='20130101000000') @patch('gpcrondump.GpCronDump._get_master_port') def test_get_files_file_list_with_prefix(self, mock1, mock2,", "dbname = 'foo' (inc, exc) = gpcd.get_include_exclude_for_dump_database(dirtyfile, dbname) self.assertEquals(inc, None)", "@patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_16(self, mock, mock2): options = GpCronDumpTestCase.Options() options.dump_schema =", "options.masterDataDirectory, 'foo2:%s/db_dumps/20130101/gp_dump_status_1_1_20130101010101' % options.masterDataDirectory] self.assertEqual(files_file_list, expected_files_list) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.get_latest_full_dump_timestamp', return_value='20130101000000')", "cron = GpCronDump(options, None) @patch('gpcrondump.os.path.isfile', return_value=True) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.os.path.getsize', return_value=111) @patch('gpcrondump.yaml.load',", "expected_files_list) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_get_files_file_list2(self, mock1, mock2): options = GpCronDumpTestCase.Options()", "None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_20(self, mock, mock2): options = GpCronDumpTestCase.Options()", "cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_18(self, mock, mock2):", "number greater than zero'): cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp')", "'foo1:%s/db_dumps/20130101/gp_dump_1_1_20130101010101_post_data.gz' % options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/gp_master_config_files_20130101010101.tar' % options.masterDataDirectory, 'foo1:/bar/db_dumps/20130101/gp_segment_config_files_0_1_20130101010101.tar', 'foo1:/bar/db_dumps/20130101/gp_segment_config_files_0_2_20130101010101.tar', 'foo1:/bar/db_dumps/20130101/gp_dump_0_1_20130101010101.gz', 'foo1:/bar/db_dumps/20130101/gp_dump_0_2_20130101010101.gz']", "'20130101010' gpcd = GpCronDump(options, None) with self.assertRaisesRegexp(Exception, 'Invalid timestamp key'):", "= True with self.assertRaisesRegexp(Exception, '--schema-file option can not be selected", "% options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/metro_gp_dump_20130101010101.rpt' % options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/metro_gp_dump_status_1_1_20130101010101' % options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/metro_gp_dump_20130101000000_increments' %", "options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/gp_dump_20130101010101_ao_state_file' % options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/gp_dump_20130101010101_co_state_file' % options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/gp_dump_20130101010101_last_operation' % options.masterDataDirectory,", "@patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_26(self, mock, mock2): options = GpCronDumpTestCase.Options() options.exclude_dump_schema =", "% options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/gp_master_config_files_20130101010101.tar' % options.masterDataDirectory, 'foo1:/bar/db_dumps/20130101/gp_segment_config_files_0_1_20130101010101.tar', 'foo1:/bar/db_dumps/20130101/gp_segment_config_files_0_2_20130101010101.tar', 'foo1:/bar/db_dumps/20130101/gp_dump_0_1_20130101010101.gz', 'foo1:/bar/db_dumps/20130101/gp_dump_0_2_20130101010101.gz'] self.assertEqual(sorted(pipes_file_list),", "expected_files_list = ['foo2:%s/db_dumps/20130101/gp_dump_1_1_20130101010101.gz' % options.masterDataDirectory, 'foo2:%s/db_dumps/20130101/gp_dump_1_1_20130101010101_post_data.gz' % options.masterDataDirectory] self.assertEqual(pipes_file_list, expected_files_list)", "= True self.backup_set = None self.dump_global = False self.clear_catalog_dumps =", "'20130101010101' dump_dir = get_backup_directory(options.masterDataDirectory, options.backup_dir, gpcd.dump_dir, timestamp) pipes_file_list = gpcd._get_pipes_file_list(master,", "raise an exception GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options14(self, mock,", "be a number greater than zero'): cron = GpCronDump(options, None)", "mock2): options = GpCronDumpTestCase.Options() options.masterDataDirectory = '/tmp/foobar' options.backup_dir = '/foo1'", "self.assertRaisesRegexp(Exception, '--table-file and --exclude-table-file can not be selected with --schema-file", "options.timestamp_key = True with self.assertRaisesRegexp(Exception, 'multi-database backup is not supported", "cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options20(self, mock, mock2):", "options = GpCronDumpTestCase.Options() options.exclude_dump_tables = 'foo' options.include_dump_tables_file = 'foo' with", "def test_validate_parse_email_File01(self, mock1, mock2): options = GpCronDumpTestCase.Options() options.include_email_file = \"/tmp/abc.yaml\"", "None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_table_filter5(self, mock, mock2): options = GpCronDumpTestCase.Options()", "'public, aot2:aot, 3190'] expected_output = ['public.aot1', 'public.aot2:aot'] result = cron._get_table_names_from_partition_list(partition_list)", "'--table-file and --exclude-table-file can not be selected with -s option'):", "= ['bkdb'] #If this is successful then it should not", "options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/metro_gp_dump_20130101010101_co_state_file' % options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/metro_gp_dump_20130101010101_last_operation' % options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/metro_gp_dump_20130101010101.rpt' % options.masterDataDirectory,", "mock2): options = GpCronDumpTestCase.Options() options.masterDataDirectory = '/tmp/foobar' gpcd = GpCronDump(options,", "try: with self.assertRaisesRegexp(Exception, 'list filter tables option requires --prefix and", "@patch('gpcrondump.get_latest_full_dump_timestamp', return_value='20130101000000') def test_get_files_file_list3(self, mock1, mock2, mock3): options = GpCronDumpTestCase.Options()", "@patch('gpcrondump.yaml.load', return_value={'EMAIL_DETAILS': [{'FROM': 'RRP_MPE2_DCA_1', 'DBNAME': None, 'SUBJECT': \"backup completed for", "-S option'): cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_18(self,", "GpCronDumpTestCase.Options() options.exclude_dump_tables = 'foo' options.exclude_dump_tables_file = 'foo' with self.assertRaisesRegexp(Exception, '-T", "GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options10(self, mock, mock2): options =", "= ['--oids'] options.incremental = True with self.assertRaisesRegexp(Exception, '--inserts, --column-inserts, --oids", "GpCronDumpTestCase.Options() options.dump_databases = 'bkdb' options.incremental = False #If this is", "Mock() master.getSegmentHostName.return_value = 'foo1' mock_segs = [Mock(), Mock()] for id,", "@patch('gpcrondump.validate_current_timestamp') def test_options_inserts_with_incremental(self, mock, mock2): options = GpCronDumpTestCase.Options() options.output_options =", "None @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.GpCronDump.validate_dump_schema') @patch('gpcrondump.validate_current_timestamp') def test_option_schema_filter_1(self, mock, mock2, mock3): options", "GpCronDumpTestCase.Options() options.include_schema_file = '/tmp/foo' options.incremental = True with self.assertRaisesRegexp(Exception, '--schema-file", "\"backup completed for Database 'testdb100'\"}]}) def test_validate_parse_email_File05(self, mock1, mock2, mock3,", "options.masterDataDirectory = '/foo' options.dump_config = True gpcd = GpCronDump(options, None)", "with self.assertRaisesRegexp(Exception, ''): cron._verify_tablenames(ao_partition_list, co_partition_list, heap_partition_list) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_inserts_with_incremental(self,", "'--table-file can not be selected with --exclude-table-file option'): cron =", "% options.masterDataDirectory] self.assertEqual(sorted(files_file_list), sorted(expected_files_list)) @patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.get_latest_full_dump_timestamp', return_value='20130101000000') @patch('gpcrondump.GpCronDump._get_master_port') def test_get_files_file_list_with_prefix(self,", "@patch('gpcrondump.validate_current_timestamp') def test_options28(self, mock, mock2): options = GpCronDumpTestCase.Options() options.dump_databases =", "from mock import patch, Mock from gppylib.operations.dump import MailDumpEvent from", "mock, mock2, mock3): options = GpCronDumpTestCase.Options() options.incremental = True cron", "None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_7(self, mock, mock2): options = GpCronDumpTestCase.Options()", "'/tmp/foo' with self.assertRaisesRegexp(Exception, '-s can not be selected with --schema-file", "= GpCronDump(options, None) dbname = 'foo' schema_file = '/tmp/foo' inc", "Database 'testdb100'\"}]}) def test_validate_parse_email_File00(self, mock1, mock2, mock3, mock4): options =", "mock2): options = GpCronDumpTestCase.Options() options.include_dump_tables_file = 'foo' options.incremental = True", "'/tmp/foo'\"): GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_31(self, mock, mock2): options", "['foo1:%s/db_dumps/20130101/metro_gp_cdatabase_1_1_20130101010101' % options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/metro_gp_dump_20130101010101_ao_state_file' % options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/metro_gp_dump_20130101010101_co_state_file' % options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/metro_gp_dump_20130101010101_last_operation'", "= GpCronDumpTestCase.Options() options.include_email_file = \"/tmp/abc.yaml\" with self.assertRaisesRegexp(Exception, \"'%s' file is", "options.exclude_dump_schema = ['information_schema'] with self.assertRaisesRegexp(Exception, \"can not specify catalog schema", "cron._verify_tablenames(ao_partition_list, co_partition_list, heap_partition_list) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_inserts_with_incremental(self, mock, mock2): options", "options.exclude_schema_file = 'foo' options.include_dump_tables_file = '/tmp/foo' with self.assertRaisesRegexp(Exception, '--table-file and", "options = GpCronDumpTestCase.Options() options.exclude_dump_schema = 'foo' options.include_dump_tables_file = '/tmp/foo' with", "def test_get_include_exclude_for_dump_database04(self, mock1, mock2, mock3, mock4): options = GpCronDumpTestCase.Options() options.masterDataDirectory", "True options.masterDataDirectory = '/data/foo' gpcd = GpCronDump(options, None) master =", "@patch('gpcrondump.GpCronDump._get_master_port') @patch('gppylib.operations.backup_utils.get_latest_full_dump_timestamp', return_value='20130101000000') def test_get_files_file_list_with_filter(self, mock1, mock2, mock3): options =", "return_value=111) @patch('gpcrondump.yaml.load', return_value={'EMAIL_DETAILS': [{'FROM': 'RRP_MPE2_DCA_1', 'NAME': 'testdb100', 'SUBJECT': \"backup completed", "= ['foo1:%s/db_dumps/20130101/gp_dump_1_1_20130101010101.gz' % options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/gp_dump_1_1_20130101010101_post_data.gz' % options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/gp_master_config_files_20130101010101.tar' % options.masterDataDirectory,", "= False self.dump_databases = ['testdb'] self.bypass_disk_check = True self.backup_set =", "option'): cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_23(self, mock,", "'/tmp/foo' with self.assertRaisesRegexp(Exception, \"can not include catalog schema 'information_schema' in", "not supported with ddboost option'): GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def", "with self.assertRaisesRegexp(Exception, '-T can not be selected with --exclude-table-file option'):", "= True with self.assertRaisesRegexp(Exception, 'include table list can not be", "dbname) self.assertTrue(exc.startswith('/tmp/exclude_dump_tables_file')) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.GpCronDump._get_table_names_from_partition_list', side_effect = [['public.aot1', 'public.aot2'], ['public.cot1',", "return_value=0) def test_validate_parse_email_File03(self, mock1, mock2, mock3): options = GpCronDumpTestCase.Options() options.include_email_file", "@patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_15(self, mock, mock2): options = GpCronDumpTestCase.Options() options.dump_schema =", "None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options20(self, mock, mock2): options = GpCronDumpTestCase.Options()", "File containing email details should be '.yaml' file.\" % options.include_email_file):", "with self.assertRaisesRegexp(Exception, '-c option can not be selected with incremental", "options.include_dump_tables = '/tmp/foo' with self.assertRaisesRegexp(Exception, '-t and -T can not", "seg.getSegmentDbId.return_value = id + 1 timestamp = '20130101010101' dump_dir =", "[{'FROM': 'RRP_MPE2_DCA_1', 'NAME': 'testdb100', 'SUBJECT': \"backup completed for Database 'testdb100'\"}]})", "test_options20(self, mock, mock2): options = GpCronDumpTestCase.Options() options.dump_databases = [] options.incremental", "@patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_gpcrondump_init0(self, mock1, mock2): options = GpCronDumpTestCase.Options() options.timestamp_key", "= GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_table_filter2(self, mock, mock2): options", "'foo' with self.assertRaisesRegexp(Exception, '-s can not be selected with -S", "@patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_8(self, mock, mock2): options = GpCronDumpTestCase.Options() options.exclude_dump_schema", "= GpCronDumpTestCase.Options() options.timestamp_key = None options.local_dump_prefix = 'foo' options.ddboost =", "options.exclude_schema_file = '/tmp/foo' write_lines_to_file('/tmp/foo', ['public']) gpcd = GpCronDump(options, None) dbname", "1, 1, 1, 1)) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_get_timestamp_object2(self, mock1, mock2):", "option'): cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_17(self, mock,", "def test_get_include_exclude_for_dump_database03(self, mock1, mock2, mock3): options = GpCronDumpTestCase.Options() options.masterDataDirectory =", "mock2): options = GpCronDumpTestCase.Options() options.exclude_dump_schema = 'foo' options.exclude_dump_tables = '/tmp/foo'", "= GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_21(self, mock, mock2): options", "'bkdb' options.incremental = False #If this is successful then it", "options = GpCronDumpTestCase.Options() options.masterDataDirectory = '/tmp/foobar' options.incremental = True gpcd", "'foo' options.exclude_dump_tables_file = 'foo' with self.assertRaisesRegexp(Exception, '--table-file can not be", "test_get_files_file_list3(self, mock1, mock2, mock3): options = GpCronDumpTestCase.Options() options.timestamp_key = '20130101010101'", "options.exclude_dump_tables_file = 'foo' with self.assertRaisesRegexp(Exception, '--table-file can not be selected", "side_effect = [['public.aot1:asd', 'public.aot2'], ['public.cot1', 'public.cot2:asd']]) def test_verify_tablenames_00_bad(self, mock1, mock2,", "'foo' options.incremental = True with self.assertRaisesRegexp(Exception, '-S option can not", "cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_23(self, mock, mock2):", "gppylib.operations.backup_utils import get_backup_directory, write_lines_to_file import mock logger = gplog.get_unittest_logger() class", "options.netbackup_service_host = \"mdw\" options.netbackup_policy = \"test_policy\" options.netbackup_schedule = \"test_schedule\" with", "def test_options_schema_filter_15(self, mock, mock2): options = GpCronDumpTestCase.Options() options.dump_schema = 'foo'", "self.history = False self.pre_vacuum = False self.post_vacuum = False self.rollback", "GpCronDumpTestCase.Options() options.include_dump_tables = 'foo' options.exclude_dump_tables_file = 'foo' with self.assertRaisesRegexp(Exception, '-t", "@patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_table_filter3(self, mock, mock2): options = GpCronDumpTestCase.Options() options.exclude_dump_tables", "self.ddboost_backupdir = None self.replicate = None self.max_streams = None self.netbackup_service_host", "GpCronDump(options, None) master = Mock() master.getSegmentHostName.return_value = 'foo1' mock_segs =", "None self.netbackup_schedule = None self.netbackup_block_size = None self.netbackup_keyword = None", "@patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options16(self, mock, mock2): options = GpCronDumpTestCase.Options() options.masterDataDirectory", "= GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_26(self, mock, mock2): options", "None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.get_lines_from_file', return_value=['public', 'information_schema']) def test_options_schema_filter_29(self, mock, mock2,", "options.exclude_dump_tables = 'foo' options.incremental = True with self.assertRaisesRegexp(Exception, 'exclude table", "cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options21(self, mock, mock2):", "self.assertTrue(exc.startswith('/tmp/exclude_dump_tables_file')) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.GpCronDump._get_table_names_from_partition_list', side_effect = [['public.aot1', 'public.aot2'], ['public.cot1', 'public.cot2']])", "'list filter tables option requires --prefix and --incremental'): cron =", "cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options22(self, mock, mock2):", "test_option_schema_filter_1(self, mock, mock2, mock3): options = GpCronDumpTestCase.Options() options.include_schema_file = '/tmp/foo'", "'/tmp/foobar' options.include_dump_tables = ['public.t1', 'public.t2', 'public.t3'] gpcd = GpCronDump(options, None)", "cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.GpCronDump.validate_dump_schema') @patch('gpcrondump.validate_current_timestamp') def test_option_schema_filter_2(self, mock,", "heap_partition_list) #Should not raise an exception @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.GpCronDump._get_table_names_from_partition_list', side_effect", "option'): GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options29(self, mock, mock2): options", "mock, mock2): options = GpCronDumpTestCase.Options() options.ddboost = False options.replicate =", "@patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.expand_partitions_and_populate_filter_file', return_value='/tmp/exclude_dump_tables_file') @patch('gpcrondump.get_lines_from_file') def test_get_include_exclude_for_dump_database06(self, mock1, mock2, mock3, mock4):", "def test_options27(self, mock, mock2): options = GpCronDumpTestCase.Options() options.dump_databases = 'bkdb,fulldb'", "GpCronDumpTestCase.Options() options.include_schema_file = 'foo' options.exclude_dump_tables = '/tmp/foo' with self.assertRaisesRegexp(Exception, '-t", "test_validate_parse_email_File05(self, mock1, mock2, mock3, mock4): options = GpCronDumpTestCase.Options() options.include_email_file =", "mock1, mock2): options = GpCronDumpTestCase.Options() options.dump_schema = ['public'] gpcd =", "return_value=True) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.os.path.getsize', return_value=111) @patch('gpcrondump.yaml.load', return_value={'EMAIL_DETAILS': [{'FROM': 'RRP_MPE2_DCA_1', 'DBNAME': 'testdb100',", "self.assertRaisesRegexp(Exception, '--table-file and --exclude-table-file can not be selected with -s", "'foo2:%s/db_dumps/20130101/gp_dump_20130101010101_ao_state_file' % options.masterDataDirectory, 'foo2:%s/db_dumps/20130101/gp_dump_20130101010101_co_state_file' % options.masterDataDirectory, 'foo2:%s/db_dumps/20130101/gp_dump_20130101010101_last_operation' % options.masterDataDirectory, 'foo2:%s/db_dumps/20130101/gp_dump_20130101010101.rpt'", "options.exclude_dump_tables_file = 'foo' with self.assertRaisesRegexp(Exception, '-T can not be selected", "None self.dump_global = False self.clear_catalog_dumps = False self.batch_default = DEFAULT_NUM_WORKERS", "import GpCronDump from gppylib.operations.utils import DEFAULT_NUM_WORKERS from mock import patch,", "gpcd.get_include_exclude_for_dump_database(dirtyfile, dbname) self.assertEquals(inc, None) self.assertEquals(exc, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.expand_partitions_and_populate_filter_file', return_value='/tmp/include_dump_tables_file')", "options = GpCronDumpTestCase.Options() options.masterDataDirectory = '/tmp/foobar' options.exclude_dump_tables = ['public.t4', 'public.t5',", "= False self.pre_vacuum = False self.post_vacuum = False self.rollback =", "mock2, mock3, mock4): options = GpCronDumpTestCase.Options() options.masterDataDirectory = '/tmp/foobar' options.exclude_dump_tables", "@patch('gpcrondump.validate_current_timestamp') def test_options_oids_with_incremental(self, mock, mock2): options = GpCronDumpTestCase.Options() options.output_options =", "timestamp) expected_files_list = ['foo1:%s/db_dumps/20130101/gp_dump_1_1_20130101010101.gz' % options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/gp_dump_1_1_20130101010101_post_data.gz' % options.masterDataDirectory, 'foo1:/bar/db_dumps/20130101/gp_dump_0_1_20130101010101.gz',", "= False self.compress = True self.free_space_percent = None self.clear_dumps =", "with -T option'): cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def", "test_options11(self, mock, mock2, mock3): options = GpCronDumpTestCase.Options() options.incremental = True", "= True with self.assertRaisesRegexp(Exception, 'Must supply -x <database name> with", "'20130101010101' options.incremental = True options.local_dump_prefix = 'metro' options.masterDataDirectory = '/data/foo'", "GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_5(self, mock, mock2): options =", "@patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options14(self, mock, mock2): options = GpCronDumpTestCase.Options() options.dump_databases", "options.list_backup_files = True with self.assertRaisesRegexp(Exception, 'list backup files not supported", "options.exclude_dump_schema = 'foo' options.exclude_schema_file = '/tmp/foo' with self.assertRaisesRegexp(Exception, '-S can", "options.timestamp_key = None options.masterDataDirectory = '/foo' gpcd = GpCronDump(options, None)", "options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/metro_gp_dump_20130101010101.rpt' % options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/metro_gp_dump_status_1_1_20130101010101' % options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/metro_gp_dump_20130101000000_increments' % options.masterDataDirectory]", "options.local_dump_prefix = 'metro' options.include_dump_tables_file = 'bar' options.masterDataDirectory = '/data/foo' gpcd", "None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options24(self, mock, mock2): options = GpCronDumpTestCase.Options()", "self.assertRaisesRegexp(Exception, '-t and -T can not be selected with -s", "'/tmp/foobar' gpcd = GpCronDump(options, None) dbname = 'foo' timestamp =", "GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_get_include_exclude_for_dump_database00(self, mock1, mock2): options =", "None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_11(self, mock, mock2): options = GpCronDumpTestCase.Options()", "'foo' schema_file = '/tmp/foo' inc = gpcd.generate_include_table_list_from_schema_file(dbname, schema_file) self.assertTrue(inc.startswith('/tmp/include_dump_tables_file')) @patch('gpcrondump.GpCronDump._get_master_port')", "None self.dump_schema = False self.dump_databases = ['testdb'] self.bypass_disk_check = True", "'-t and -T can not be selected with -S option'):", "1, 1, 1)) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_get_timestamp_object2(self, mock1, mock2): options", "= False options.ddboost_host = False options.max_streams = None options.list_backup_files =", "options.exclude_dump_schema = 'foo' options.include_dump_tables_file = '/tmp/foo' with self.assertRaisesRegexp(Exception, '--table-file and", "options.timestamp_key = '20130101010101' options.local_dump_prefix = 'metro' options.include_dump_tables_file = 'bar' options.masterDataDirectory", "test_get_pipes_file_list1(self, mock1, mock2): options = GpCronDumpTestCase.Options() options.timestamp_key = None options.masterDataDirectory", "with patch('__builtin__.open', m, create=True): cron = GpCronDump(options, None) @patch('gpcrondump.MailDumpEvent') @patch('gpcrondump.GpCronDump._get_master_port')", "in schema file '/tmp/foo'\"): GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.get_lines_from_file', return_value=['public',", "'foo1:%s/db_dumps/20130101/metro_gp_dump_20130101010101.rpt' % options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/metro_gp_dump_status_1_1_20130101010101' % options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/metro_gp_dump_20130101010101_filter' % options.masterDataDirectory] self.assertEqual(sorted(files_file_list),", "mock2): options = GpCronDumpTestCase.Options() options.include_dump_tables = 'foo' options.exclude_dump_tables_file = 'foo'", "= GpCronDump(options, None) self.assertEquals(gpcd.getBackupDirectoryRoot(), '/tmp/foobar') @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options18(self, mock,", "@patch('gpcrondump.validate_current_timestamp') def test_options4(self, mock, mock2): options = GpCronDumpTestCase.Options() options.exclude_dump_tables_file =", "test_options15(self, mock, mock2): options = GpCronDumpTestCase.Options() options.dump_databases = 'bkdb,fulldb' options.incremental", "mock1, mock2, mock3): options = GpCronDumpTestCase.Options() options.include_email_file = \"/tmp/abc.yaml\" with", "= 'foo' options.include_dump_tables_file = 'foo' with self.assertRaisesRegexp(Exception, '-t can not", "GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_get_timestamp_object1(self, mock1, mock2): options =", "'--table-file and --exclude-table-file can not be selected with -S option'):", "= Mock() master.getSegmentHostName.return_value = 'foo1' timestamp = '20130101010101' dump_dir =", "['public, cot1, 2190', 'public, cot2, 3190'] heap_partition_list = ['public.heapt1', 'public.heapt2']", "def test_validate_parse_email_File05(self, mock1, mock2, mock3, mock4): options = GpCronDumpTestCase.Options() options.include_email_file", "self.assertRaisesRegexp(Exception, '-t and -T can not be selected with --exclude-schema-file", "(inc, exc) = gpcd.get_include_exclude_for_dump_database(dirtyfile, dbname) self.assertEquals(inc, None) self.assertEquals(exc, None) @patch('gpcrondump.GpCronDump._get_master_port')", "None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_15(self, mock, mock2): options = GpCronDumpTestCase.Options()", "GpCronDumpTestCase.Options() options.exclude_dump_tables = 'foo' options.include_dump_tables_file = 'foo' with self.assertRaisesRegexp(Exception, '-T", "option'): cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_7(self, mock,", "'foo' (inc, exc) = gpcd.get_include_exclude_for_dump_database(dirtyfile, dbname) self.assertTrue(inc.startswith('/tmp/include_dump_tables_file')) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.expand_partitions_and_populate_filter_file',", "self.post_script = None self.dump_config = False self.history = False self.pre_vacuum", "options = GpCronDumpTestCase.Options() options.ddboost = True options.replicate = True options.max_streams", "'information_schema' using -S option\"): GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.get_lines_from_file', return_value=['public',", "@patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options17(self, mock, mock2): options = GpCronDumpTestCase.Options() options.masterDataDirectory", "None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options3(self, mock, mock2): options = GpCronDumpTestCase.Options()", "Mock() master.getSegmentHostName.return_value = 'foo2' mock_segs = [] timestamp = '20130101010101'", "def test_validate_parse_email_File04(self, mock1, mock2, mock3, mock4): options = GpCronDumpTestCase.Options() options.include_email_file", "mock, mock2): options = GpCronDumpTestCase.Options() options.include_dump_tables_file = 'foo' options.exclude_dump_tables_file =", "self.assertRaisesRegexp(Exception, \"can not include catalog schema 'information_schema' in schema file", "not be selected with --exclude-table-file option'): cron = GpCronDump(options, None)", "DEFAULT_NUM_WORKERS from mock import patch, Mock from gppylib.operations.dump import MailDumpEvent", "= True options.max_streams = 0 with self.assertRaisesRegexp(Exception, '--max-streams must be", "'foo1:%s/db_dumps/20130101/gp_dump_status_1_1_20130101010101' % options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/gp_dump_20130101000000_increments' % options.masterDataDirectory] self.assertEqual(sorted(files_file_list), sorted(expected_files_list)) @patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.GpCronDump._get_master_port')", "= True gpcd = GpCronDump(options, None) master = Mock() master.getSegmentHostName.return_value", "= GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_table_filter6(self, mock, mock2): options", "= gpcd._get_timestamp_object(options.timestamp_key) self.assertEquals(timestamp, datetime(2013, 1, 1, 1, 1, 1)) @patch('gpcrondump.GpCronDump._get_master_port')", "'foo1' seg.getSegmentDbId.return_value = id + 1 timestamp = '20130101010101' dump_dir", "None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_get_table_names_from_partition_list_00(self, mock1, mock2): options = GpCronDumpTestCase.Options()", "= '20130101010101' options.incremental = True options.local_dump_prefix = 'metro' options.masterDataDirectory =", "can not be selected with -s option'): cron = GpCronDump(options,", "@patch('gpcrondump.get_lines_from_file', return_value=['public.t1', 'public.t2']) def test_get_include_exclude_for_dump_database04(self, mock1, mock2, mock3, mock4): options", "None gpcd = GpCronDump(options, None) timestamp = gpcd._get_timestamp_object(options.timestamp_key) self.assertTrue(isinstance(timestamp, datetime))", "options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/gp_dump_20130101000000_increments' % options.masterDataDirectory] self.assertEqual(sorted(files_file_list), sorted(expected_files_list)) @patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.GpCronDump._get_master_port') @patch('gppylib.operations.backup_utils.get_latest_full_dump_timestamp', return_value='20130101000000')", "timestamp = '20141016010101' file = gpcd.get_schema_list_file(dbname) self.assertEquals(file, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp')", "backup'): cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_4(self, mock,", "gpcd.get_include_exclude_for_dump_database(dirtyfile, dbname) self.assertTrue(inc.startswith('/tmp/include_dump_tables_file')) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.get_latest_full_dump_timestamp', return_value='20121225090000') def test_get_include_exclude_for_dump_database03(self, mock1,", "options = GpCronDumpTestCase.Options() options.output_options = ['--oids'] options.incremental = True with", "than zero'): cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options25(self,", "test_options_schema_filter_13(self, mock, mock2): options = GpCronDumpTestCase.Options() options.include_schema_file = 'foo' options.exclude_dump_tables_file", "= GpCronDumpTestCase.Options() options.exclude_dump_schema = ['information_schema'] with self.assertRaisesRegexp(Exception, \"can not specify", "self.assertRaisesRegexp(Exception, '--table-file can not be selected with --exclude-table-file option'): cron", "GpCronDumpTestCase.Options() options.include_schema_file = 'foo' options.include_dump_tables = '/tmp/foo' with self.assertRaisesRegexp(Exception, '-t", "options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/gp_dump_20130101010101.rpt' % options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/gp_dump_status_1_1_20130101010101' % options.masterDataDirectory] self.assertEqual(files_file_list, expected_files_list) @patch('gpcrondump.GpCronDump._get_master_port')", "test_options2(self, mock, mock2): options = GpCronDumpTestCase.Options() options.exclude_dump_tables = 'foo' options.incremental", "mock3): options = GpCronDumpTestCase.Options() options.incremental = True options.dump_databases = ['bkdb']", "test_options_schema_filter_4(self, mock, mock2): options = GpCronDumpTestCase.Options() options.dump_schema = 'foo' options.incremental", "self.assertEqual(sorted(files_file_list), sorted(expected_files_list)) @patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.GpCronDump._get_master_port') @patch('gppylib.operations.backup_utils.get_latest_full_dump_timestamp', return_value='20130101000000') def test_get_files_file_list_with_filter(self, mock1, mock2,", "@patch('gpcrondump.GpCronDump._get_table_names_from_partition_list', side_effect = [['public.aot1', 'public.aot2'], ['public.cot1', 'public.cot2']]) def test_verify_tablenames_00(self, mock1,", "GpCronDumpTestCase.Options() options.include_dump_tables = 'foo' options.exclude_dump_tables = 'foo' with self.assertRaisesRegexp(Exception, '-t", "'foo1:%s/db_dumps/20130101/gp_dump_1_1_20130101010101_post_data.gz' % options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/gp_global_1_1_20130101010101' % options.masterDataDirectory, 'foo1:/bar/db_dumps/20130101/gp_dump_0_1_20130101010101.gz', 'foo1:/bar/db_dumps/20130101/gp_dump_0_2_20130101010101.gz'] self.assertEqual(sorted(pipes_file_list), sorted(expected_files_list))", "@patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_33(self, mock1, mock2): options = GpCronDumpTestCase.Options() options.include_schema_file", "True options.replicate = False options.max_streams = 20 with self.assertRaisesRegexp(Exception, '--max-streams", "Options: def __init__(self): self.masterDataDirectory = \"\" self.interactive = False self.clear_dumps_only", "None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_26(self, mock, mock2): options = GpCronDumpTestCase.Options()", "@patch('gpcrondump.expand_partitions_and_populate_filter_file', return_value='/tmp/exclude_dump_tables_file') @patch('gpcrondump.get_lines_from_file') def test_get_include_exclude_for_dump_database06(self, mock1, mock2, mock3, mock4): options", "options = GpCronDumpTestCase.Options() options.include_schema_file = '/tmp/foo' options.incremental = True with", "self.email_details = None self.include_schema_file = None self.exclude_schema_file = None self.exclude_dump_schema", "test_options24(self, mock, mock2): options = GpCronDumpTestCase.Options() options.ddboost = True options.replicate", "options.include_email_file = \"/tmp/abc.yaml\" m = mock.MagicMock() with self.assertRaisesRegexp(Exception, \"\\'%s\\' file", "options.incremental = True with self.assertRaisesRegexp(Exception, '-S option can not be", "GpCronDumpTestCase.Options() options.exclude_dump_schema = ['information_schema'] with self.assertRaisesRegexp(Exception, \"can not specify catalog", "= True options.timestamp_key = None with self.assertRaisesRegexp(Exception, 'Must supply -K", "'foo' options.incremental = True with self.assertRaisesRegexp(Exception, 'exclude table list can", "% options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/metro_gp_dump_20130101010101_last_operation' % options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/metro_gp_dump_20130101010101.rpt' % options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/metro_gp_dump_status_1_1_20130101010101' %", "@patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_32(self, mock1, mock2): options = GpCronDumpTestCase.Options() options.dump_schema", "None with self.assertRaisesRegexp(Exception, 'Must supply -K option when listing backup", "= True with self.assertRaisesRegexp(Exception, '-c option can not be selected", "= ['foo1:%s/db_dumps/20130101/gp_cdatabase_1_1_20130101010101' % options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/gp_dump_20130101010101_ao_state_file' % options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/gp_dump_20130101010101_co_state_file' % options.masterDataDirectory,", "current_exit_status, time_start, time_end) #------------------------------- Mainline -------------------------------- if __name__ == '__main__':", "'information_schema' in schema file '/tmp/foo'\"): GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def", "GpCronDumpTestCase.Options() options.masterDataDirectory = '/tmp/foobar' gpcd = GpCronDump(options, None) dirtyfile =", "= '/odir/exfile' gpcd = GpCronDump(options, None) dirtyfile = '/tmp/dirty' dbname", "GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options2(self, mock, mock2): options =", "with self.assertRaisesRegexp(Exception, \"'%s' is not '.yaml' file. File containing email", "'public, aot2, 3190'] co_partition_list = ['public, cot1, 2190', 'public, cot2,", "mock2): options = GpCronDumpTestCase.Options() options.dump_schema = 'foo' options.include_schema_file = '/tmp/foo'", "cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_17(self, mock, mock2):", "= gplog.get_unittest_logger() class GpCronDumpTestCase(unittest.TestCase): class Options: def __init__(self): self.masterDataDirectory =", "options.incremental = True with self.assertRaisesRegexp(Exception, '-c option can not be", "options = GpCronDumpTestCase.Options() options.masterDataDirectory = '/tmp/foobar' options.exclude_dump_tables_file = '/odir/exfile' gpcd", "--exclude-schema-file option'): cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_9(self,", "= None gpcd = GpCronDump(options, None) self.assertEquals(gpcd.getBackupDirectoryRoot(), '/tmp/foobar') @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp')", "expected_files_list = ['foo2:%s/db_dumps/20130101/gp_cdatabase_1_1_20130101010101' % options.masterDataDirectory, 'foo2:%s/db_dumps/20130101/gp_dump_20130101010101_ao_state_file' % options.masterDataDirectory, 'foo2:%s/db_dumps/20130101/gp_dump_20130101010101_co_state_file' %", "#!/usr/bin/env python import os import imp gpcrondump_path = os.path.abspath('gpcrondump') gpcrondump", "options.dump_databases = 'bkdb' options.incremental = False #If this is successful", "self.replicate = None self.max_streams = None self.netbackup_service_host = None self.netbackup_policy", "mock3): options = GpCronDumpTestCase.Options() options.masterDataDirectory = '/tmp/foobar' options.incremental = True", "mock2): options = GpCronDumpTestCase.Options() options.dump_schema = ['public'] gpcd = GpCronDump(options,", "= ['foo1:%s/db_dumps/20130101/gp_dump_1_1_20130101010101.gz' % options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/gp_dump_1_1_20130101010101_post_data.gz' % options.masterDataDirectory, 'foo1:%s/db_dumps/20130101/gp_global_1_1_20130101010101' % options.masterDataDirectory,", "--schema-file option'): cron = GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_11(self,", "def test_options_schema_filter_16(self, mock, mock2): options = GpCronDumpTestCase.Options() options.dump_schema = 'foo'", "@patch('gpcrondump.get_latest_full_dump_timestamp', return_value='20121225090000') def test_get_include_exclude_for_dump_database03(self, mock1, mock2, mock3): options = GpCronDumpTestCase.Options()", "@patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_options_schema_filter_4(self, mock, mock2): options = GpCronDumpTestCase.Options() options.dump_schema", "= False self.ddboost_hosts = None self.ddboost_user = None self.ddboost_config_remove =", "True options.ddboost = True options.list_backup_files = True with self.assertRaisesRegexp(Exception, 'list", "<filename>gpMgmt/bin/gppylib/test/unit/test_unit_gpcrondump.py #!/usr/bin/env python import os import imp gpcrondump_path = os.path.abspath('gpcrondump')", "not supported with -K option'): GpCronDump(options, None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def", "3190'] expected_output = ['public.aot1', 'public.aot2:aot'] result = cron._get_table_names_from_partition_list(partition_list) self.assertEqual(result, expected_output)", "% options.masterDataDirectory, 'foo1:/bar/db_dumps/20130101/gp_dump_0_1_20130101010101.gz', 'foo1:/bar/db_dumps/20130101/gp_dump_0_2_20130101010101.gz'] self.assertEqual(sorted(pipes_file_list), sorted(expected_files_list)) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') def test_get_pipes_file_list4(self,", "= 'foo' options.exclude_schema_file = '/tmp/foo' with self.assertRaisesRegexp(Exception, '-s can not", "options.include_dump_tables = 'foo' options.incremental = True with self.assertRaisesRegexp(Exception, 'include table", "= '20141016010101' file = gpcd.get_schema_list_file(dbname) self.assertTrue(file.startswith('/tmp/schema_list')) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.get_lines_from_file', return_value=['public'])", "= 'foo' with self.assertRaisesRegexp(Exception, '-t can not be selected with", "= True options.incremental = True with self.assertRaisesRegexp(Exception, '-c option can", "mock1, mock2): options = GpCronDumpTestCase.Options() options.masterDataDirectory = '/tmp/foobar' gpcd =", "--exclude-table-file can not be selected with --schema-file option'): cron =", "options.exclude_dump_tables = 'foo' options.include_dump_tables_file = 'foo' with self.assertRaisesRegexp(Exception, '-T can", "(inc, exc) = gpcd.get_include_exclude_for_dump_database(dirtyfile, dbname) self.assertTrue(inc.startswith('/tmp/include_dump_tables_file')) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.get_latest_full_dump_timestamp', return_value='20121225090000')", "None) @patch('gpcrondump.GpCronDump._get_master_port') @patch('gpcrondump.validate_current_timestamp') @patch('gpcrondump.expand_partitions_and_populate_filter_file', return_value='/tmp/exclude_dump_tables_file') @patch('gpcrondump.get_lines_from_file', return_value=['public.t1', 'public.t2']) def test_get_include_exclude_for_dump_database04(self,", "'Must supply -K option when listing backup files'): GpCronDump(options, None)", "@patch('gpcrondump.expand_partitions_and_populate_filter_file', return_value='/tmp/include_dump_tables_file') @patch('gpcrondump.get_lines_from_file', return_value=['public.t1', 'public.t2']) def test_get_include_exclude_for_dump_database01(self, mock1, mock2, mock3,", "get_backup_directory(options.masterDataDirectory, None, gpcd.dump_dir, timestamp) files_file_list = gpcd._get_files_file_list(master, dump_dir, timestamp) expected_files_list", "= 'foo1' seg.getSegmentDbId.return_value = id + 1 timestamp = '20130101010101'" ]
[ "'/bin', '/usr/bin'], tries = 3, user = 'ambari-qa', environment =", "'/tmp/hcatSmoke.sh', content = StaticFile('hcatSmoke.sh'), mode = 0755, ) self.assertResourceCalled('Execute', 'env", "agreements. See the NOTICE file distributed with this work for", "import datetime, sys, socket import resource_management.libraries.functions @patch.object(resource_management.libraries.functions, \"get_unique_id_and_date\", new =", "os.environ['PATH'] + os.pathsep + \"/usr/lib/hive/bin\"}, try_sleep = 5, ) self.assertNoMoreResources()", "Unless required by applicable law or agreed to in writing,", "to the Apache Software Foundation (ASF) under one or more", "by applicable law or agreed to in writing, software distributed", ": os.environ['PATH'] + os.pathsep + \"/usr/lib/hive/bin\"}, try_sleep = 5, )", ") self.assertResourceCalled('Execute', ' /tmp/hcatSmoke.sh hcatsmoke cleanup', logoutput = True, path", "test_service_check_default(self, sys_exit_mock): self.executeScript(\"2.0.6/services/HIVE/package/scripts/service_check.py\", classname=\"HiveServiceCheck\", command=\"service_check\", config_file=\"default.json\" ) self.assertResourceCalled('File', '/tmp/hcatSmoke.sh', content", "'/etc/hadoop/conf', keytab='/etc/security/keytabs/hdfs.headless.keytab', kinit_path_local='/usr/bin/kinit', security_enabled=True, bin_dir = '/usr/lib/hive/bin', principal='hdfs' ) self.assertResourceCalled('Execute',", "software distributed under the License is distributed on an \"AS", "distributed under the License is distributed on an \"AS IS\"", "from stacks.utils.RMFTestCase import * import datetime, sys, socket import resource_management.libraries.functions", "mode = 0755, ) self.assertResourceCalled('Execute', 'env JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /tmp/hcatSmoke.sh hcatsmoke prepare',", "True, user = 'hdfs', conf_dir = '/etc/hadoop/conf', keytab='/etc/security/keytabs/hdfs.headless.keytab', kinit_path_local='/usr/bin/kinit', security_enabled=True,", "and limitations under the License. ''' import os from mock.mock", "/tmp/hcatSmoke.sh hcatsmoke prepare', logoutput = True, path = ['/usr/sbin', '/usr/local/nin',", "os.environ['PATH'] + os.pathsep + \"/usr/lib/hive/bin\"}, try_sleep = 5, ) self.assertResourceCalled('ExecuteHadoop',", "'/usr/bin/kinit -kt /etc/security/keytabs/smokeuser.headless.keytab ambari-qa; env JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /tmp/hcatSmoke.sh hcatsmoke prepare', logoutput", "Foundation (ASF) under one or more contributor license agreements. See", "CONDITIONS OF ANY KIND, either express or implied. See the", ") self.assertResourceCalled('Execute', 'env JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /tmp/hcatSmoke.sh hcatsmoke prepare', logoutput = True,", "more contributor license agreements. See the NOTICE file distributed with", "Version 2.0 (the \"License\"); you may not use this file", "config_file=\"secured.json\" ) self.assertResourceCalled('File', '/tmp/hcatSmoke.sh', content = StaticFile('hcatSmoke.sh'), mode = 0755,", "governing permissions and limitations under the License. ''' import os", "os from mock.mock import MagicMock, call, patch from stacks.utils.RMFTestCase import", "@patch(\"sys.exit\") def test_service_check_secured(self, sys_exit_mock): self.executeScript(\"2.0.6/services/HIVE/package/scripts/service_check.py\", classname=\"HiveServiceCheck\", command=\"service_check\", config_file=\"secured.json\" ) self.assertResourceCalled('File',", "writing, software distributed under the License is distributed on an", "Licensed to the Apache Software Foundation (ASF) under one or", "datetime, sys, socket import resource_management.libraries.functions @patch.object(resource_management.libraries.functions, \"get_unique_id_and_date\", new = MagicMock(return_value=''))", "the Apache Software Foundation (ASF) under one or more contributor", "See the NOTICE file distributed with this work for additional", "content = StaticFile('hcatSmoke.sh'), mode = 0755, ) self.assertResourceCalled('Execute', '/usr/bin/kinit -kt", "conf_dir = '/etc/hadoop/conf', keytab='/etc/security/keytabs/hdfs.headless.keytab', kinit_path_local='/usr/bin/kinit', security_enabled=True, bin_dir = '/usr/lib/hive/bin', principal='hdfs'", "not use this file except in compliance with the License.", "NOTICE file distributed with this work for additional information regarding", "Apache License, Version 2.0 (the \"License\"); you may not use", "2.0 (the \"License\"); you may not use this file except", "copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable", "express or implied. See the License for the specific language", "prepare', logoutput = True, path = ['/usr/sbin', '/usr/local/nin', '/bin', '/usr/bin'],", "3, user = 'ambari-qa', environment = {'PATH' : os.environ['PATH'] +", "os.pathsep + \"/usr/lib/hive/bin\"}, try_sleep = 5, ) self.assertResourceCalled('ExecuteHadoop', 'fs -test", "'/etc/hadoop/conf', keytab=UnknownConfigurationMock(), kinit_path_local='/usr/bin/kinit', bin_dir = '/usr/lib/hive/bin', security_enabled=False ) self.assertResourceCalled('Execute', '", "''' Licensed to the Apache Software Foundation (ASF) under one", "IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "call, patch from stacks.utils.RMFTestCase import * import datetime, sys, socket", "to you under the Apache License, Version 2.0 (the \"License\");", "in compliance with the License. You may obtain a copy", "of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law", "the License. ''' import os from mock.mock import MagicMock, call,", "os.pathsep + \"/usr/lib/hive/bin\"}, try_sleep = 5, ) self.assertNoMoreResources() @patch(\"sys.exit\") def", "user = 'hdfs', conf_dir = '/etc/hadoop/conf', keytab='/etc/security/keytabs/hdfs.headless.keytab', kinit_path_local='/usr/bin/kinit', security_enabled=True, bin_dir", "you may not use this file except in compliance with", "= '/etc/hadoop/conf', keytab=UnknownConfigurationMock(), kinit_path_local='/usr/bin/kinit', bin_dir = '/usr/lib/hive/bin', security_enabled=False ) self.assertResourceCalled('Execute',", "classname=\"HiveServiceCheck\", command=\"service_check\", config_file=\"secured.json\" ) self.assertResourceCalled('File', '/tmp/hcatSmoke.sh', content = StaticFile('hcatSmoke.sh'), mode", "import os from mock.mock import MagicMock, call, patch from stacks.utils.RMFTestCase", "user = 'hdfs', conf_dir = '/etc/hadoop/conf', keytab=UnknownConfigurationMock(), kinit_path_local='/usr/bin/kinit', bin_dir =", "is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR", "the License. You may obtain a copy of the License", "agreed to in writing, software distributed under the License is", "\"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "self.assertResourceCalled('Execute', '/usr/bin/kinit -kt /etc/security/keytabs/smokeuser.headless.keytab ambari-qa; /tmp/hcatSmoke.sh hcatsmoke cleanup', logoutput =", "logoutput = True, user = 'hdfs', conf_dir = '/etc/hadoop/conf', keytab='/etc/security/keytabs/hdfs.headless.keytab',", "distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS", "at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to", "'fs -test -e /apps/hive/warehouse/hcatsmoke', logoutput = True, user = 'hdfs',", "sys_exit_mock): self.executeScript(\"2.0.6/services/HIVE/package/scripts/service_check.py\", classname=\"HiveServiceCheck\", command=\"service_check\", config_file=\"secured.json\" ) self.assertResourceCalled('File', '/tmp/hcatSmoke.sh', content =", "= True, path = ['/usr/sbin', '/usr/local/nin', '/bin', '/usr/bin'], tries =", "from mock.mock import MagicMock, call, patch from stacks.utils.RMFTestCase import *", "use this file except in compliance with the License. You", "patch from stacks.utils.RMFTestCase import * import datetime, sys, socket import", "keytab='/etc/security/keytabs/hdfs.headless.keytab', kinit_path_local='/usr/bin/kinit', security_enabled=True, bin_dir = '/usr/lib/hive/bin', principal='hdfs' ) self.assertResourceCalled('Execute', '/usr/bin/kinit", "Apache Software Foundation (ASF) under one or more contributor license", "= 3, user = 'ambari-qa', environment = {'PATH' : os.environ['PATH']", "the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or", "= StaticFile('hcatSmoke.sh'), mode = 0755, ) self.assertResourceCalled('Execute', 'env JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /tmp/hcatSmoke.sh", "bin_dir = '/usr/lib/hive/bin', security_enabled=False ) self.assertResourceCalled('Execute', ' /tmp/hcatSmoke.sh hcatsmoke cleanup',", "ANY KIND, either express or implied. See the License for", "import resource_management.libraries.functions @patch.object(resource_management.libraries.functions, \"get_unique_id_and_date\", new = MagicMock(return_value='')) @patch(\"socket.socket\", new =", "classname=\"HiveServiceCheck\", command=\"service_check\", config_file=\"default.json\" ) self.assertResourceCalled('File', '/tmp/hcatSmoke.sh', content = StaticFile('hcatSmoke.sh'), mode", "= MagicMock(return_value='')) @patch(\"socket.socket\", new = MagicMock()) class TestServiceCheck(RMFTestCase): @patch(\"sys.exit\") def", ") self.assertResourceCalled('File', '/tmp/hcatSmoke.sh', content = StaticFile('hcatSmoke.sh'), mode = 0755, )", "http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in", "may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless", ") self.assertResourceCalled('Execute', '/usr/bin/kinit -kt /etc/security/keytabs/smokeuser.headless.keytab ambari-qa; /tmp/hcatSmoke.sh hcatsmoke cleanup', logoutput", "resource_management.libraries.functions @patch.object(resource_management.libraries.functions, \"get_unique_id_and_date\", new = MagicMock(return_value='')) @patch(\"socket.socket\", new = MagicMock())", "obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required", "0755, ) self.assertResourceCalled('Execute', 'env JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /tmp/hcatSmoke.sh hcatsmoke prepare', logoutput =", "5, ) self.assertResourceCalled('ExecuteHadoop', 'fs -test -e /apps/hive/warehouse/hcatsmoke', logoutput = True,", "one or more contributor license agreements. See the NOTICE file", "-test -e /apps/hive/warehouse/hcatsmoke', logoutput = True, user = 'hdfs', conf_dir", "kinit_path_local='/usr/bin/kinit', bin_dir = '/usr/lib/hive/bin', security_enabled=False ) self.assertResourceCalled('Execute', ' /tmp/hcatSmoke.sh hcatsmoke", "information regarding copyright ownership. The ASF licenses this file to", "+ os.pathsep + \"/usr/lib/hive/bin\"}, try_sleep = 5, ) self.assertResourceCalled('ExecuteHadoop', 'fs", "+ \"/usr/lib/hive/bin\"}, try_sleep = 5, ) self.assertResourceCalled('ExecuteHadoop', 'fs -test -e", "security_enabled=False ) self.assertResourceCalled('Execute', ' /tmp/hcatSmoke.sh hcatsmoke cleanup', logoutput = True,", "/etc/security/keytabs/smokeuser.headless.keytab ambari-qa; env JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /tmp/hcatSmoke.sh hcatsmoke prepare', logoutput = True,", "either express or implied. See the License for the specific", "ASF licenses this file to you under the Apache License,", "BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "sys, socket import resource_management.libraries.functions @patch.object(resource_management.libraries.functions, \"get_unique_id_and_date\", new = MagicMock(return_value='')) @patch(\"socket.socket\",", "bin_dir = '/usr/lib/hive/bin', principal='hdfs' ) self.assertResourceCalled('Execute', '/usr/bin/kinit -kt /etc/security/keytabs/smokeuser.headless.keytab ambari-qa;", "'/usr/bin/kinit -kt /etc/security/keytabs/smokeuser.headless.keytab ambari-qa; /tmp/hcatSmoke.sh hcatsmoke cleanup', logoutput = True,", "under the License is distributed on an \"AS IS\" BASIS,", "\"License\"); you may not use this file except in compliance", "self.assertResourceCalled('Execute', '/usr/bin/kinit -kt /etc/security/keytabs/smokeuser.headless.keytab ambari-qa; env JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /tmp/hcatSmoke.sh hcatsmoke prepare',", "License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES", "command=\"service_check\", config_file=\"secured.json\" ) self.assertResourceCalled('File', '/tmp/hcatSmoke.sh', content = StaticFile('hcatSmoke.sh'), mode =", "def test_service_check_default(self, sys_exit_mock): self.executeScript(\"2.0.6/services/HIVE/package/scripts/service_check.py\", classname=\"HiveServiceCheck\", command=\"service_check\", config_file=\"default.json\" ) self.assertResourceCalled('File', '/tmp/hcatSmoke.sh',", "logoutput = True, user = 'hdfs', conf_dir = '/etc/hadoop/conf', keytab=UnknownConfigurationMock(),", "/etc/security/keytabs/smokeuser.headless.keytab ambari-qa; /tmp/hcatSmoke.sh hcatsmoke cleanup', logoutput = True, path =", "'/usr/lib/hive/bin', security_enabled=False ) self.assertResourceCalled('Execute', ' /tmp/hcatSmoke.sh hcatsmoke cleanup', logoutput =", "test_service_check_secured(self, sys_exit_mock): self.executeScript(\"2.0.6/services/HIVE/package/scripts/service_check.py\", classname=\"HiveServiceCheck\", command=\"service_check\", config_file=\"secured.json\" ) self.assertResourceCalled('File', '/tmp/hcatSmoke.sh', content", "with the License. You may obtain a copy of the", "= MagicMock()) class TestServiceCheck(RMFTestCase): @patch(\"sys.exit\") def test_service_check_default(self, sys_exit_mock): self.executeScript(\"2.0.6/services/HIVE/package/scripts/service_check.py\", classname=\"HiveServiceCheck\",", "The ASF licenses this file to you under the Apache", "self.executeScript(\"2.0.6/services/HIVE/package/scripts/service_check.py\", classname=\"HiveServiceCheck\", command=\"service_check\", config_file=\"default.json\" ) self.assertResourceCalled('File', '/tmp/hcatSmoke.sh', content = StaticFile('hcatSmoke.sh'),", "['/usr/sbin', '/usr/local/nin', '/bin', '/usr/bin'], tries = 3, user = 'ambari-qa',", "= 'hdfs', conf_dir = '/etc/hadoop/conf', keytab=UnknownConfigurationMock(), kinit_path_local='/usr/bin/kinit', bin_dir = '/usr/lib/hive/bin',", "file distributed with this work for additional information regarding copyright", "= 5, ) self.assertNoMoreResources() @patch(\"sys.exit\") def test_service_check_secured(self, sys_exit_mock): self.executeScript(\"2.0.6/services/HIVE/package/scripts/service_check.py\", classname=\"HiveServiceCheck\",", "License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed", "contributor license agreements. See the NOTICE file distributed with this", "'/usr/bin'], tries = 3, user = 'ambari-qa', environment = {'PATH'", "self.assertResourceCalled('Execute', ' /tmp/hcatSmoke.sh hcatsmoke cleanup', logoutput = True, path =", "License for the specific language governing permissions and limitations under", "keytab=UnknownConfigurationMock(), kinit_path_local='/usr/bin/kinit', bin_dir = '/usr/lib/hive/bin', security_enabled=False ) self.assertResourceCalled('Execute', ' /tmp/hcatSmoke.sh", "user = 'ambari-qa', environment = {'PATH' : os.environ['PATH'] + os.pathsep", "-e /apps/hive/warehouse/hcatsmoke', logoutput = True, user = 'hdfs', conf_dir =", "'/usr/local/nin', '/bin', '/usr/bin'], tries = 3, user = 'ambari-qa', environment", "command=\"service_check\", config_file=\"default.json\" ) self.assertResourceCalled('File', '/tmp/hcatSmoke.sh', content = StaticFile('hcatSmoke.sh'), mode =", "cleanup', logoutput = True, path = ['/usr/sbin', '/usr/local/nin', '/bin', '/usr/bin'],", "{'PATH' : os.environ['PATH'] + os.pathsep + \"/usr/lib/hive/bin\"}, try_sleep = 5,", "this file except in compliance with the License. You may", "= True, user = 'hdfs', conf_dir = '/etc/hadoop/conf', keytab=UnknownConfigurationMock(), kinit_path_local='/usr/bin/kinit',", "socket import resource_management.libraries.functions @patch.object(resource_management.libraries.functions, \"get_unique_id_and_date\", new = MagicMock(return_value='')) @patch(\"socket.socket\", new", "additional information regarding copyright ownership. The ASF licenses this file", "''' import os from mock.mock import MagicMock, call, patch from", "= True, user = 'hdfs', conf_dir = '/etc/hadoop/conf', keytab='/etc/security/keytabs/hdfs.headless.keytab', kinit_path_local='/usr/bin/kinit',", "(the \"License\"); you may not use this file except in", "mock.mock import MagicMock, call, patch from stacks.utils.RMFTestCase import * import", "under the License. ''' import os from mock.mock import MagicMock,", "MagicMock(return_value='')) @patch(\"socket.socket\", new = MagicMock()) class TestServiceCheck(RMFTestCase): @patch(\"sys.exit\") def test_service_check_default(self,", "new = MagicMock(return_value='')) @patch(\"socket.socket\", new = MagicMock()) class TestServiceCheck(RMFTestCase): @patch(\"sys.exit\")", "tries = 3, user = 'ambari-qa', environment = {'PATH' :", "+ \"/usr/lib/hive/bin\"}, try_sleep = 5, ) self.assertNoMoreResources() @patch(\"sys.exit\") def test_service_check_secured(self,", "def test_service_check_secured(self, sys_exit_mock): self.executeScript(\"2.0.6/services/HIVE/package/scripts/service_check.py\", classname=\"HiveServiceCheck\", command=\"service_check\", config_file=\"secured.json\" ) self.assertResourceCalled('File', '/tmp/hcatSmoke.sh',", "' /tmp/hcatSmoke.sh hcatsmoke cleanup', logoutput = True, path = ['/usr/sbin',", ") self.assertNoMoreResources() @patch(\"sys.exit\") def test_service_check_secured(self, sys_exit_mock): self.executeScript(\"2.0.6/services/HIVE/package/scripts/service_check.py\", classname=\"HiveServiceCheck\", command=\"service_check\", config_file=\"secured.json\"", "applicable law or agreed to in writing, software distributed under", "/apps/hive/warehouse/hcatsmoke', logoutput = True, user = 'hdfs', conf_dir = '/etc/hadoop/conf',", "= '/usr/lib/hive/bin', security_enabled=False ) self.assertResourceCalled('Execute', ' /tmp/hcatSmoke.sh hcatsmoke cleanup', logoutput", "self.assertResourceCalled('ExecuteHadoop', 'fs -test -e /apps/hive/warehouse/hcatsmoke', logoutput = True, user =", "the License is distributed on an \"AS IS\" BASIS, WITHOUT", ") self.assertResourceCalled('Execute', '/usr/bin/kinit -kt /etc/security/keytabs/smokeuser.headless.keytab ambari-qa; env JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /tmp/hcatSmoke.sh hcatsmoke", "ambari-qa; /tmp/hcatSmoke.sh hcatsmoke cleanup', logoutput = True, path = ['/usr/sbin',", "specific language governing permissions and limitations under the License. '''", "You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0", "the specific language governing permissions and limitations under the License.", "\"/usr/lib/hive/bin\"}, try_sleep = 5, ) self.assertNoMoreResources() @patch(\"sys.exit\") def test_service_check_secured(self, sys_exit_mock):", "import * import datetime, sys, socket import resource_management.libraries.functions @patch.object(resource_management.libraries.functions, \"get_unique_id_and_date\",", "ambari-qa; env JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /tmp/hcatSmoke.sh hcatsmoke prepare', logoutput = True, path", "= ['/usr/sbin', '/usr/local/nin', '/bin', '/usr/bin'], tries = 3, user =", "= 'hdfs', conf_dir = '/etc/hadoop/conf', keytab='/etc/security/keytabs/hdfs.headless.keytab', kinit_path_local='/usr/bin/kinit', security_enabled=True, bin_dir =", "the Apache License, Version 2.0 (the \"License\"); you may not", "python ''' Licensed to the Apache Software Foundation (ASF) under", "file except in compliance with the License. You may obtain", "except in compliance with the License. You may obtain a", "KIND, either express or implied. See the License for the", "this work for additional information regarding copyright ownership. The ASF", "or implied. See the License for the specific language governing", "JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /tmp/hcatSmoke.sh hcatsmoke prepare', logoutput = True, path = ['/usr/sbin',", "to in writing, software distributed under the License is distributed", "@patch.object(resource_management.libraries.functions, \"get_unique_id_and_date\", new = MagicMock(return_value='')) @patch(\"socket.socket\", new = MagicMock()) class", "+ os.pathsep + \"/usr/lib/hive/bin\"}, try_sleep = 5, ) self.assertNoMoreResources() @patch(\"sys.exit\")", "kinit_path_local='/usr/bin/kinit', security_enabled=True, bin_dir = '/usr/lib/hive/bin', principal='hdfs' ) self.assertResourceCalled('Execute', '/usr/bin/kinit -kt", "'env JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /tmp/hcatSmoke.sh hcatsmoke prepare', logoutput = True, path =", "or agreed to in writing, software distributed under the License", "this file to you under the Apache License, Version 2.0", "permissions and limitations under the License. ''' import os from", "law or agreed to in writing, software distributed under the", "OR CONDITIONS OF ANY KIND, either express or implied. See", "True, user = 'hdfs', conf_dir = '/etc/hadoop/conf', keytab=UnknownConfigurationMock(), kinit_path_local='/usr/bin/kinit', bin_dir", "-kt /etc/security/keytabs/smokeuser.headless.keytab ambari-qa; /tmp/hcatSmoke.sh hcatsmoke cleanup', logoutput = True, path", "logoutput = True, path = ['/usr/sbin', '/usr/local/nin', '/bin', '/usr/bin'], tries", "'/tmp/hcatSmoke.sh', content = StaticFile('hcatSmoke.sh'), mode = 0755, ) self.assertResourceCalled('Execute', '/usr/bin/kinit", "'hdfs', conf_dir = '/etc/hadoop/conf', keytab='/etc/security/keytabs/hdfs.headless.keytab', kinit_path_local='/usr/bin/kinit', security_enabled=True, bin_dir = '/usr/lib/hive/bin',", "compliance with the License. You may obtain a copy of", "under one or more contributor license agreements. See the NOTICE", "StaticFile('hcatSmoke.sh'), mode = 0755, ) self.assertResourceCalled('Execute', 'env JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /tmp/hcatSmoke.sh hcatsmoke", "license agreements. See the NOTICE file distributed with this work", "OF ANY KIND, either express or implied. See the License", "TestServiceCheck(RMFTestCase): @patch(\"sys.exit\") def test_service_check_default(self, sys_exit_mock): self.executeScript(\"2.0.6/services/HIVE/package/scripts/service_check.py\", classname=\"HiveServiceCheck\", command=\"service_check\", config_file=\"default.json\" )", "True, path = ['/usr/sbin', '/usr/local/nin', '/bin', '/usr/bin'], tries = 3,", "under the Apache License, Version 2.0 (the \"License\"); you may", "limitations under the License. ''' import os from mock.mock import", "5, ) self.assertNoMoreResources() @patch(\"sys.exit\") def test_service_check_secured(self, sys_exit_mock): self.executeScript(\"2.0.6/services/HIVE/package/scripts/service_check.py\", classname=\"HiveServiceCheck\", command=\"service_check\",", "on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF", "/tmp/hcatSmoke.sh hcatsmoke cleanup', logoutput = True, path = ['/usr/sbin', '/usr/local/nin',", "= 5, ) self.assertResourceCalled('ExecuteHadoop', 'fs -test -e /apps/hive/warehouse/hcatsmoke', logoutput =", "= 0755, ) self.assertResourceCalled('Execute', '/usr/bin/kinit -kt /etc/security/keytabs/smokeuser.headless.keytab ambari-qa; env JAVA_HOME=/usr/jdk64/jdk1.7.0_45", "-kt /etc/security/keytabs/smokeuser.headless.keytab ambari-qa; env JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /tmp/hcatSmoke.sh hcatsmoke prepare', logoutput =", "new = MagicMock()) class TestServiceCheck(RMFTestCase): @patch(\"sys.exit\") def test_service_check_default(self, sys_exit_mock): self.executeScript(\"2.0.6/services/HIVE/package/scripts/service_check.py\",", "= 'ambari-qa', environment = {'PATH' : os.environ['PATH'] + os.pathsep +", "try_sleep = 5, ) self.assertResourceCalled('ExecuteHadoop', 'fs -test -e /apps/hive/warehouse/hcatsmoke', logoutput", "= StaticFile('hcatSmoke.sh'), mode = 0755, ) self.assertResourceCalled('Execute', '/usr/bin/kinit -kt /etc/security/keytabs/smokeuser.headless.keytab", "principal='hdfs' ) self.assertResourceCalled('Execute', '/usr/bin/kinit -kt /etc/security/keytabs/smokeuser.headless.keytab ambari-qa; /tmp/hcatSmoke.sh hcatsmoke cleanup',", "* import datetime, sys, socket import resource_management.libraries.functions @patch.object(resource_management.libraries.functions, \"get_unique_id_and_date\", new", "sys_exit_mock): self.executeScript(\"2.0.6/services/HIVE/package/scripts/service_check.py\", classname=\"HiveServiceCheck\", command=\"service_check\", config_file=\"default.json\" ) self.assertResourceCalled('File', '/tmp/hcatSmoke.sh', content =", "0755, ) self.assertResourceCalled('Execute', '/usr/bin/kinit -kt /etc/security/keytabs/smokeuser.headless.keytab ambari-qa; env JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /tmp/hcatSmoke.sh", "hcatsmoke prepare', logoutput = True, path = ['/usr/sbin', '/usr/local/nin', '/bin',", "self.executeScript(\"2.0.6/services/HIVE/package/scripts/service_check.py\", classname=\"HiveServiceCheck\", command=\"service_check\", config_file=\"secured.json\" ) self.assertResourceCalled('File', '/tmp/hcatSmoke.sh', content = StaticFile('hcatSmoke.sh'),", "with this work for additional information regarding copyright ownership. The", "License, Version 2.0 (the \"License\"); you may not use this", "\"/usr/lib/hive/bin\"}, try_sleep = 5, ) self.assertResourceCalled('ExecuteHadoop', 'fs -test -e /apps/hive/warehouse/hcatsmoke',", "you under the Apache License, Version 2.0 (the \"License\"); you", "= '/etc/hadoop/conf', keytab='/etc/security/keytabs/hdfs.headless.keytab', kinit_path_local='/usr/bin/kinit', security_enabled=True, bin_dir = '/usr/lib/hive/bin', principal='hdfs' )", ") self.assertResourceCalled('ExecuteHadoop', 'fs -test -e /apps/hive/warehouse/hcatsmoke', logoutput = True, user", "hcatsmoke cleanup', logoutput = True, path = ['/usr/sbin', '/usr/local/nin', '/bin',", "security_enabled=True, bin_dir = '/usr/lib/hive/bin', principal='hdfs' ) self.assertResourceCalled('Execute', '/usr/bin/kinit -kt /etc/security/keytabs/smokeuser.headless.keytab", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "for the specific language governing permissions and limitations under the", "\"get_unique_id_and_date\", new = MagicMock(return_value='')) @patch(\"socket.socket\", new = MagicMock()) class TestServiceCheck(RMFTestCase):", "See the License for the specific language governing permissions and", "self.assertResourceCalled('Execute', 'env JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /tmp/hcatSmoke.sh hcatsmoke prepare', logoutput = True, path", "a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by", "'ambari-qa', environment = {'PATH' : os.environ['PATH'] + os.pathsep + \"/usr/lib/hive/bin\"},", "(ASF) under one or more contributor license agreements. See the", "= 0755, ) self.assertResourceCalled('Execute', 'env JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /tmp/hcatSmoke.sh hcatsmoke prepare', logoutput", "licenses this file to you under the Apache License, Version", "@patch(\"socket.socket\", new = MagicMock()) class TestServiceCheck(RMFTestCase): @patch(\"sys.exit\") def test_service_check_default(self, sys_exit_mock):", "work for additional information regarding copyright ownership. The ASF licenses", "ownership. The ASF licenses this file to you under the", "regarding copyright ownership. The ASF licenses this file to you", "file to you under the Apache License, Version 2.0 (the", "License. You may obtain a copy of the License at", "'hdfs', conf_dir = '/etc/hadoop/conf', keytab=UnknownConfigurationMock(), kinit_path_local='/usr/bin/kinit', bin_dir = '/usr/lib/hive/bin', security_enabled=False", "'/usr/lib/hive/bin', principal='hdfs' ) self.assertResourceCalled('Execute', '/usr/bin/kinit -kt /etc/security/keytabs/smokeuser.headless.keytab ambari-qa; /tmp/hcatSmoke.sh hcatsmoke", "environment = {'PATH' : os.environ['PATH'] + os.pathsep + \"/usr/lib/hive/bin\"}, try_sleep", "for additional information regarding copyright ownership. The ASF licenses this", "mode = 0755, ) self.assertResourceCalled('Execute', '/usr/bin/kinit -kt /etc/security/keytabs/smokeuser.headless.keytab ambari-qa; env", "the License for the specific language governing permissions and limitations", "may not use this file except in compliance with the", "conf_dir = '/etc/hadoop/conf', keytab=UnknownConfigurationMock(), kinit_path_local='/usr/bin/kinit', bin_dir = '/usr/lib/hive/bin', security_enabled=False )", "MagicMock, call, patch from stacks.utils.RMFTestCase import * import datetime, sys,", "= '/usr/lib/hive/bin', principal='hdfs' ) self.assertResourceCalled('Execute', '/usr/bin/kinit -kt /etc/security/keytabs/smokeuser.headless.keytab ambari-qa; /tmp/hcatSmoke.sh", "in writing, software distributed under the License is distributed on", "required by applicable law or agreed to in writing, software", "try_sleep = 5, ) self.assertNoMoreResources() @patch(\"sys.exit\") def test_service_check_secured(self, sys_exit_mock): self.executeScript(\"2.0.6/services/HIVE/package/scripts/service_check.py\",", "implied. See the License for the specific language governing permissions", "MagicMock()) class TestServiceCheck(RMFTestCase): @patch(\"sys.exit\") def test_service_check_default(self, sys_exit_mock): self.executeScript(\"2.0.6/services/HIVE/package/scripts/service_check.py\", classname=\"HiveServiceCheck\", command=\"service_check\",", "stacks.utils.RMFTestCase import * import datetime, sys, socket import resource_management.libraries.functions @patch.object(resource_management.libraries.functions,", "@patch(\"sys.exit\") def test_service_check_default(self, sys_exit_mock): self.executeScript(\"2.0.6/services/HIVE/package/scripts/service_check.py\", classname=\"HiveServiceCheck\", command=\"service_check\", config_file=\"default.json\" ) self.assertResourceCalled('File',", "config_file=\"default.json\" ) self.assertResourceCalled('File', '/tmp/hcatSmoke.sh', content = StaticFile('hcatSmoke.sh'), mode = 0755,", "content = StaticFile('hcatSmoke.sh'), mode = 0755, ) self.assertResourceCalled('Execute', 'env JAVA_HOME=/usr/jdk64/jdk1.7.0_45", "self.assertNoMoreResources() @patch(\"sys.exit\") def test_service_check_secured(self, sys_exit_mock): self.executeScript(\"2.0.6/services/HIVE/package/scripts/service_check.py\", classname=\"HiveServiceCheck\", command=\"service_check\", config_file=\"secured.json\" )", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "or more contributor license agreements. See the NOTICE file distributed", "env JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /tmp/hcatSmoke.sh hcatsmoke prepare', logoutput = True, path =", "#!/usr/bin/env python ''' Licensed to the Apache Software Foundation (ASF)", "an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY", "copyright ownership. The ASF licenses this file to you under", "the NOTICE file distributed with this work for additional information", "language governing permissions and limitations under the License. ''' import", "import MagicMock, call, patch from stacks.utils.RMFTestCase import * import datetime,", "self.assertResourceCalled('File', '/tmp/hcatSmoke.sh', content = StaticFile('hcatSmoke.sh'), mode = 0755, ) self.assertResourceCalled('Execute',", "Software Foundation (ASF) under one or more contributor license agreements.", "class TestServiceCheck(RMFTestCase): @patch(\"sys.exit\") def test_service_check_default(self, sys_exit_mock): self.executeScript(\"2.0.6/services/HIVE/package/scripts/service_check.py\", classname=\"HiveServiceCheck\", command=\"service_check\", config_file=\"default.json\"", "StaticFile('hcatSmoke.sh'), mode = 0755, ) self.assertResourceCalled('Execute', '/usr/bin/kinit -kt /etc/security/keytabs/smokeuser.headless.keytab ambari-qa;", "path = ['/usr/sbin', '/usr/local/nin', '/bin', '/usr/bin'], tries = 3, user", "License. ''' import os from mock.mock import MagicMock, call, patch", "distributed with this work for additional information regarding copyright ownership.", "= {'PATH' : os.environ['PATH'] + os.pathsep + \"/usr/lib/hive/bin\"}, try_sleep =" ]
[ "license that can be # found in the LICENSE file.", "'ldflags-from-environment' with TestGyp.LocalEnv({'LDFLAGS': '-Wl,--dynamic-linker=/target', 'LDFLAGS_host': '-Wl,--dynamic-linker=/host', 'GYP_CROSSCOMPILE': '1'}): test.run_gyp('test.gyp', chdir=CHDIR)", "'-Wl,--dynamic-linker=/target', 'LDFLAGS_host': '-Wl,--dynamic-linker=/host', 'GYP_CROSSCOMPILE': '1'}): test.run_gyp('test.gyp', chdir=CHDIR) test.build('test.gyp', chdir=CHDIR) def", "return r.search(o).group(1) if GetDynamicLinker('ldflags') != '/target': test.fail_test() if GetDynamicLinker('ldflags_host') !=", "p = test.built_file_path(p, chdir=CHDIR) r = re.compile(r'\\[Requesting program interpreter: ([^\\]]+)\\]')", "import TestGyp import re import subprocess import sys FORMATS =", "BSD-style license that can be # found in the LICENSE", "'1'}): test.run_gyp('test.gyp', chdir=CHDIR) test.build('test.gyp', chdir=CHDIR) def GetDynamicLinker(p): p = test.built_file_path(p,", "# Use of this source code is governed by a", "by a BSD-style license that can be # found in", "is governed by a BSD-style license that can be #", "LICENSE file. \"\"\" Verifies the use of linker flags in", "= test.built_file_path(p, chdir=CHDIR) r = re.compile(r'\\[Requesting program interpreter: ([^\\]]+)\\]') proc", "of this source code is governed by a BSD-style license", "(c) 2017 Google Inc. All rights reserved. # Use of", "= ('make', 'ninja') if sys.platform.startswith('linux'): test = TestGyp.TestGyp(formats=FORMATS) CHDIR =", "chdir=CHDIR) def GetDynamicLinker(p): p = test.built_file_path(p, chdir=CHDIR) r = re.compile(r'\\[Requesting", "reserved. # Use of this source code is governed by", "'-Wl,--dynamic-linker=/host', 'GYP_CROSSCOMPILE': '1'}): test.run_gyp('test.gyp', chdir=CHDIR) test.build('test.gyp', chdir=CHDIR) def GetDynamicLinker(p): p", "proc.communicate()[0].decode('utf-8') assert not proc.returncode return r.search(o).group(1) if GetDynamicLinker('ldflags') != '/target':", "re import subprocess import sys FORMATS = ('make', 'ninja') if", "CHDIR = 'ldflags-from-environment' with TestGyp.LocalEnv({'LDFLAGS': '-Wl,--dynamic-linker=/target', 'LDFLAGS_host': '-Wl,--dynamic-linker=/host', 'GYP_CROSSCOMPILE': '1'}):", "in environment variables. In this test, gyp and build both", "\"\"\" import TestGyp import re import subprocess import sys FORMATS", "be # found in the LICENSE file. \"\"\" Verifies the", "file. \"\"\" Verifies the use of linker flags in environment", "chdir=CHDIR) test.build('test.gyp', chdir=CHDIR) def GetDynamicLinker(p): p = test.built_file_path(p, chdir=CHDIR) r", "import subprocess import sys FORMATS = ('make', 'ninja') if sys.platform.startswith('linux'):", "build both run in same local environment. \"\"\" import TestGyp", "code is governed by a BSD-style license that can be", "a BSD-style license that can be # found in the", "use of linker flags in environment variables. In this test,", "= subprocess.Popen(['readelf', '-l', p], stdout=subprocess.PIPE) o = proc.communicate()[0].decode('utf-8') assert not", "FORMATS = ('make', 'ninja') if sys.platform.startswith('linux'): test = TestGyp.TestGyp(formats=FORMATS) CHDIR", "#!/usr/bin/env python # Copyright (c) 2017 Google Inc. All rights", "Inc. All rights reserved. # Use of this source code", "can be # found in the LICENSE file. \"\"\" Verifies", "in same local environment. \"\"\" import TestGyp import re import", "import sys FORMATS = ('make', 'ninja') if sys.platform.startswith('linux'): test =", "governed by a BSD-style license that can be # found", "test = TestGyp.TestGyp(formats=FORMATS) CHDIR = 'ldflags-from-environment' with TestGyp.LocalEnv({'LDFLAGS': '-Wl,--dynamic-linker=/target', 'LDFLAGS_host':", "Use of this source code is governed by a BSD-style", "gyp and build both run in same local environment. \"\"\"", "All rights reserved. # Use of this source code is", "\"\"\" Verifies the use of linker flags in environment variables.", "Verifies the use of linker flags in environment variables. In", "In this test, gyp and build both run in same", "re.compile(r'\\[Requesting program interpreter: ([^\\]]+)\\]') proc = subprocess.Popen(['readelf', '-l', p], stdout=subprocess.PIPE)", "of linker flags in environment variables. In this test, gyp", "with TestGyp.LocalEnv({'LDFLAGS': '-Wl,--dynamic-linker=/target', 'LDFLAGS_host': '-Wl,--dynamic-linker=/host', 'GYP_CROSSCOMPILE': '1'}): test.run_gyp('test.gyp', chdir=CHDIR) test.build('test.gyp',", "'ninja') if sys.platform.startswith('linux'): test = TestGyp.TestGyp(formats=FORMATS) CHDIR = 'ldflags-from-environment' with", "linker flags in environment variables. In this test, gyp and", "variables. In this test, gyp and build both run in", "# found in the LICENSE file. \"\"\" Verifies the use", "= re.compile(r'\\[Requesting program interpreter: ([^\\]]+)\\]') proc = subprocess.Popen(['readelf', '-l', p],", "in the LICENSE file. \"\"\" Verifies the use of linker", "test, gyp and build both run in same local environment.", "interpreter: ([^\\]]+)\\]') proc = subprocess.Popen(['readelf', '-l', p], stdout=subprocess.PIPE) o =", "chdir=CHDIR) r = re.compile(r'\\[Requesting program interpreter: ([^\\]]+)\\]') proc = subprocess.Popen(['readelf',", "not proc.returncode return r.search(o).group(1) if GetDynamicLinker('ldflags') != '/target': test.fail_test() if", "= 'ldflags-from-environment' with TestGyp.LocalEnv({'LDFLAGS': '-Wl,--dynamic-linker=/target', 'LDFLAGS_host': '-Wl,--dynamic-linker=/host', 'GYP_CROSSCOMPILE': '1'}): test.run_gyp('test.gyp',", "subprocess.Popen(['readelf', '-l', p], stdout=subprocess.PIPE) o = proc.communicate()[0].decode('utf-8') assert not proc.returncode", "this test, gyp and build both run in same local", "and build both run in same local environment. \"\"\" import", "proc.returncode return r.search(o).group(1) if GetDynamicLinker('ldflags') != '/target': test.fail_test() if GetDynamicLinker('ldflags_host')", "= proc.communicate()[0].decode('utf-8') assert not proc.returncode return r.search(o).group(1) if GetDynamicLinker('ldflags') !=", "python # Copyright (c) 2017 Google Inc. All rights reserved.", "the LICENSE file. \"\"\" Verifies the use of linker flags", "both run in same local environment. \"\"\" import TestGyp import", "('make', 'ninja') if sys.platform.startswith('linux'): test = TestGyp.TestGyp(formats=FORMATS) CHDIR = 'ldflags-from-environment'", "'LDFLAGS_host': '-Wl,--dynamic-linker=/host', 'GYP_CROSSCOMPILE': '1'}): test.run_gyp('test.gyp', chdir=CHDIR) test.build('test.gyp', chdir=CHDIR) def GetDynamicLinker(p):", "rights reserved. # Use of this source code is governed", "environment variables. In this test, gyp and build both run", "sys.platform.startswith('linux'): test = TestGyp.TestGyp(formats=FORMATS) CHDIR = 'ldflags-from-environment' with TestGyp.LocalEnv({'LDFLAGS': '-Wl,--dynamic-linker=/target',", "sys FORMATS = ('make', 'ninja') if sys.platform.startswith('linux'): test = TestGyp.TestGyp(formats=FORMATS)", "p], stdout=subprocess.PIPE) o = proc.communicate()[0].decode('utf-8') assert not proc.returncode return r.search(o).group(1)", "test.run_gyp('test.gyp', chdir=CHDIR) test.build('test.gyp', chdir=CHDIR) def GetDynamicLinker(p): p = test.built_file_path(p, chdir=CHDIR)", "o = proc.communicate()[0].decode('utf-8') assert not proc.returncode return r.search(o).group(1) if GetDynamicLinker('ldflags')", "r = re.compile(r'\\[Requesting program interpreter: ([^\\]]+)\\]') proc = subprocess.Popen(['readelf', '-l',", "that can be # found in the LICENSE file. \"\"\"", "TestGyp.TestGyp(formats=FORMATS) CHDIR = 'ldflags-from-environment' with TestGyp.LocalEnv({'LDFLAGS': '-Wl,--dynamic-linker=/target', 'LDFLAGS_host': '-Wl,--dynamic-linker=/host', 'GYP_CROSSCOMPILE':", "= TestGyp.TestGyp(formats=FORMATS) CHDIR = 'ldflags-from-environment' with TestGyp.LocalEnv({'LDFLAGS': '-Wl,--dynamic-linker=/target', 'LDFLAGS_host': '-Wl,--dynamic-linker=/host',", "run in same local environment. \"\"\" import TestGyp import re", "the use of linker flags in environment variables. In this", "if GetDynamicLinker('ldflags') != '/target': test.fail_test() if GetDynamicLinker('ldflags_host') != '/host': test.fail_test()", "if sys.platform.startswith('linux'): test = TestGyp.TestGyp(formats=FORMATS) CHDIR = 'ldflags-from-environment' with TestGyp.LocalEnv({'LDFLAGS':", "TestGyp import re import subprocess import sys FORMATS = ('make',", "Copyright (c) 2017 Google Inc. All rights reserved. # Use", "this source code is governed by a BSD-style license that", "proc = subprocess.Popen(['readelf', '-l', p], stdout=subprocess.PIPE) o = proc.communicate()[0].decode('utf-8') assert", "([^\\]]+)\\]') proc = subprocess.Popen(['readelf', '-l', p], stdout=subprocess.PIPE) o = proc.communicate()[0].decode('utf-8')", "GetDynamicLinker(p): p = test.built_file_path(p, chdir=CHDIR) r = re.compile(r'\\[Requesting program interpreter:", "<reponame>chlorm-forks/gyp #!/usr/bin/env python # Copyright (c) 2017 Google Inc. All", "test.build('test.gyp', chdir=CHDIR) def GetDynamicLinker(p): p = test.built_file_path(p, chdir=CHDIR) r =", "same local environment. \"\"\" import TestGyp import re import subprocess", "def GetDynamicLinker(p): p = test.built_file_path(p, chdir=CHDIR) r = re.compile(r'\\[Requesting program", "GetDynamicLinker('ldflags') != '/target': test.fail_test() if GetDynamicLinker('ldflags_host') != '/host': test.fail_test() test.pass_test()", "# Copyright (c) 2017 Google Inc. All rights reserved. #", "'-l', p], stdout=subprocess.PIPE) o = proc.communicate()[0].decode('utf-8') assert not proc.returncode return", "environment. \"\"\" import TestGyp import re import subprocess import sys", "2017 Google Inc. All rights reserved. # Use of this", "found in the LICENSE file. \"\"\" Verifies the use of", "program interpreter: ([^\\]]+)\\]') proc = subprocess.Popen(['readelf', '-l', p], stdout=subprocess.PIPE) o", "source code is governed by a BSD-style license that can", "Google Inc. All rights reserved. # Use of this source", "local environment. \"\"\" import TestGyp import re import subprocess import", "subprocess import sys FORMATS = ('make', 'ninja') if sys.platform.startswith('linux'): test", "assert not proc.returncode return r.search(o).group(1) if GetDynamicLinker('ldflags') != '/target': test.fail_test()", "test.built_file_path(p, chdir=CHDIR) r = re.compile(r'\\[Requesting program interpreter: ([^\\]]+)\\]') proc =", "stdout=subprocess.PIPE) o = proc.communicate()[0].decode('utf-8') assert not proc.returncode return r.search(o).group(1) if", "r.search(o).group(1) if GetDynamicLinker('ldflags') != '/target': test.fail_test() if GetDynamicLinker('ldflags_host') != '/host':", "TestGyp.LocalEnv({'LDFLAGS': '-Wl,--dynamic-linker=/target', 'LDFLAGS_host': '-Wl,--dynamic-linker=/host', 'GYP_CROSSCOMPILE': '1'}): test.run_gyp('test.gyp', chdir=CHDIR) test.build('test.gyp', chdir=CHDIR)", "import re import subprocess import sys FORMATS = ('make', 'ninja')", "'GYP_CROSSCOMPILE': '1'}): test.run_gyp('test.gyp', chdir=CHDIR) test.build('test.gyp', chdir=CHDIR) def GetDynamicLinker(p): p =", "flags in environment variables. In this test, gyp and build" ]
[ "-*- coding: utf-8 -*- from .context import sample def test_thoughts():", "coding: utf-8 -*- from .context import sample def test_thoughts(): assert(sample.hmm()", "# -*- coding: utf-8 -*- from .context import sample def", "-*- from .context import sample def test_thoughts(): assert(sample.hmm() is None)", "utf-8 -*- from .context import sample def test_thoughts(): assert(sample.hmm() is", "<gh_stars>0 # -*- coding: utf-8 -*- from .context import sample" ]
[ "- tree visit right sub - tree visit node \"\"\"", "nodes in reverse order i.e. node -> right child node", "+ child nodes \"\"\" return res def postOrderIterative(self, root: TreeNode)", "left to right. \"\"\" if cur.right: st += [cur.right] *", "--> 5.09% def postOrderIterativeReverse(self, root: TreeNode) -> [int]: if not", "return [] res, stack = [], [root] while stack: cur", "st += [cur.left] * 2 else: ret.append(cur.val) return ret #", "not root: return [] ret = [] \"\"\" on visiting", "nodes use 2nd copy to insert into result \"\"\" st", "root: TreeNode) -> [int]: if not root: return [] ret", "not the last node in the stack, then current node", "\"\"\"insert right child node followed by left. this ensures processing", "node followed by left. this ensures processing is done from", "stack.append(cur.right) \"\"\" reversed result will give post-order traversal \"\"\" return", "Difficulty Level : Medium # URL : https://leetcode.com/problems/binary-tree-postorder-traversal/ ###################################################################### from", "is the 2nd copy. Insert node into result list \"\"\"", "if not root: return [] ret = [] \"\"\" on", "-> [int]: if not root: return [] res, stack =", "insert into result \"\"\" st = [root] * 2 while", "push 2 copies to the stack. use 1st copy to", "while st: cur = st.pop() \"\"\" if current node is", "def postOrderIterative(self, root: TreeNode) -> [int]: if not root: return", "\"\"\" res.append(cur.val) stack.append(cur.left) stack.append(cur.right) \"\"\" reversed result will give post-order", "else: ret.append(cur.val) return ret # runtime --> 54.35%, memory -->", "memory --> 50.59% def postOrderRecursive(self, root: TreeNode) -> [int]: if", "[cur.left] * 2 else: ret.append(cur.val) return ret # runtime -->", "then visit it's child nodes if current node is not", "cur.left: st += [cur.left] * 2 else: ret.append(cur.val) return ret", "a node, push 2 copies to the stack. use 1st", "54.35%, memory --> 5.09% def postOrderIterativeReverse(self, root: TreeNode) -> [int]:", "traversal \"\"\" res.append(cur.val) stack.append(cur.left) stack.append(cur.right) \"\"\" reversed result will give", "# Difficulty Level : Medium # URL : https://leetcode.com/problems/binary-tree-postorder-traversal/ ######################################################################", "stack.append(cur.left) stack.append(cur.right) \"\"\" reversed result will give post-order traversal \"\"\"", "\"\"\" res += self.postOrderRecursive(root.left) res += self.postOrderRecursive(root.right) res.append(root.val) \"\"\" return", "node similar to right-first pre-order traversal \"\"\" res.append(cur.val) stack.append(cur.left) stack.append(cur.right)", "- tree visit node \"\"\" res += self.postOrderRecursive(root.left) res +=", "stack, then current node is the 2nd copy. Insert node", "copy to process the child nodes use 2nd copy to", ": 145 # Difficulty Level : Medium # URL :", "2nd copy to insert into result \"\"\" st = [root]", "post - order traversal visit left sub - tree visit", "--> 54.35%, memory --> 5.09% def postOrderIterativeReverse(self, root: TreeNode) ->", "sub - tree visit node \"\"\" res += self.postOrderRecursive(root.left) res", "+= self.postOrderRecursive(root.right) res.append(root.val) \"\"\" return visited node + child nodes", "2 while st: cur = st.pop() \"\"\" if current node", ": https://leetcode.com/problems/binary-tree-postorder-traversal/ ###################################################################### from binary_search_tree.tree_node import TreeNode class BinaryTree: #", "node in the stack, then current node is the 2nd", "# LeetCode Problem Number : 145 # Difficulty Level :", "Level : Medium # URL : https://leetcode.com/problems/binary-tree-postorder-traversal/ ###################################################################### from binary_search_tree.tree_node", "node is the last node in the stack, then visit", "if st and st[-1] is cur: \"\"\"insert right child node", "cur: \"\"\"insert right child node followed by left. this ensures", "+= [cur.right] * 2 if cur.left: st += [cur.left] *", "visit the nodes in reverse order i.e. node -> right", "TreeNode class BinaryTree: # runtime --> 77.59%, memory --> 50.59%", "res += self.postOrderRecursive(root.right) res.append(root.val) \"\"\" return visited node + child", "root: return [] res = [] \"\"\" post - order", "class BinaryTree: # runtime --> 77.59%, memory --> 50.59% def", "postOrderIterativeReverse(self, root: TreeNode) -> [int]: if not root: return []", "= [] \"\"\" post - order traversal visit left sub", "use 2nd copy to insert into result \"\"\" st =", "it's child nodes if current node is not the last", "[int]: if not root: return [] res, stack = [],", "to right-first pre-order traversal \"\"\" res.append(cur.val) stack.append(cur.left) stack.append(cur.right) \"\"\" reversed", "the child nodes use 2nd copy to insert into result", "followed by left. this ensures processing is done from left", "2 if cur.left: st += [cur.left] * 2 else: ret.append(cur.val)", "the stack, then visit it's child nodes if current node", "node is not the last node in the stack, then", "st[-1] is cur: \"\"\"insert right child node followed by left.", "postOrderIterative(self, root: TreeNode) -> [int]: if not root: return []", "to insert into result \"\"\" st = [root] * 2", "is not the last node in the stack, then current", "Insert node into result list \"\"\" if st and st[-1]", "and st[-1] is cur: \"\"\"insert right child node followed by", "cur.right: st += [cur.right] * 2 if cur.left: st +=", "cur = stack.pop() if cur: \"\"\" visit the nodes in", "then current node is the 2nd copy. Insert node into", "return ret # runtime --> 54.35%, memory --> 5.09% def", "\"\"\" return res def postOrderIterative(self, root: TreeNode) -> [int]: if", "node is the 2nd copy. Insert node into result list", "visit node \"\"\" res += self.postOrderRecursive(root.left) res += self.postOrderRecursive(root.right) res.append(root.val)", "def postOrderRecursive(self, root: TreeNode) -> [int]: if not root: return", "use 1st copy to process the child nodes use 2nd", "is cur: \"\"\"insert right child node followed by left. this", "return [] ret = [] \"\"\" on visiting a node,", "* 2 else: ret.append(cur.val) return ret # runtime --> 54.35%,", "child node -> left child node similar to right-first pre-order", "###################################################################### # LeetCode Problem Number : 145 # Difficulty Level", "[root] while stack: cur = stack.pop() if cur: \"\"\" visit", "node + child nodes \"\"\" return res def postOrderIterative(self, root:", "order i.e. node -> right child node -> left child", "-> [int]: if not root: return [] res = []", "[cur.right] * 2 if cur.left: st += [cur.left] * 2", "= st.pop() \"\"\" if current node is the last node", "\"\"\" reversed result will give post-order traversal \"\"\" return res[::-1]", "in the stack, then current node is the 2nd copy.", "[int]: if not root: return [] ret = [] \"\"\"", "right. \"\"\" if cur.right: st += [cur.right] * 2 if", "root: TreeNode) -> [int]: if not root: return [] res", "if not root: return [] res, stack = [], [root]", "res, stack = [], [root] while stack: cur = stack.pop()", "similar to right-first pre-order traversal \"\"\" res.append(cur.val) stack.append(cur.left) stack.append(cur.right) \"\"\"", "- order traversal visit left sub - tree visit right", "pre-order traversal \"\"\" res.append(cur.val) stack.append(cur.left) stack.append(cur.right) \"\"\" reversed result will", "child nodes use 2nd copy to insert into result \"\"\"", "node \"\"\" res += self.postOrderRecursive(root.left) res += self.postOrderRecursive(root.right) res.append(root.val) \"\"\"", "self.postOrderRecursive(root.right) res.append(root.val) \"\"\" return visited node + child nodes \"\"\"", "this ensures processing is done from left to right. \"\"\"", "right-first pre-order traversal \"\"\" res.append(cur.val) stack.append(cur.left) stack.append(cur.right) \"\"\" reversed result", "[] ret = [] \"\"\" on visiting a node, push", "visiting a node, push 2 copies to the stack. use", "\"\"\" if cur.right: st += [cur.right] * 2 if cur.left:", "traversal visit left sub - tree visit right sub -", "if current node is not the last node in the", "list \"\"\" if st and st[-1] is cur: \"\"\"insert right", "\"\"\" return visited node + child nodes \"\"\" return res", "the last node in the stack, then visit it's child", "BinaryTree: # runtime --> 77.59%, memory --> 50.59% def postOrderRecursive(self,", "the stack. use 1st copy to process the child nodes", "\"\"\" if current node is the last node in the", "is the last node in the stack, then visit it's", "https://leetcode.com/problems/binary-tree-postorder-traversal/ ###################################################################### from binary_search_tree.tree_node import TreeNode class BinaryTree: # runtime", "-> left child node similar to right-first pre-order traversal \"\"\"", "node -> right child node -> left child node similar", "2 copies to the stack. use 1st copy to process", "ret = [] \"\"\" on visiting a node, push 2", "if not root: return [] res = [] \"\"\" post", "if current node is the last node in the stack,", "result list \"\"\" if st and st[-1] is cur: \"\"\"insert", "return res def postOrderIterative(self, root: TreeNode) -> [int]: if not", "# runtime --> 54.35%, memory --> 5.09% def postOrderIterativeReverse(self, root:", "ret # runtime --> 54.35%, memory --> 5.09% def postOrderIterativeReverse(self,", "cur = st.pop() \"\"\" if current node is the last", "result \"\"\" st = [root] * 2 while st: cur", "[], [root] while stack: cur = stack.pop() if cur: \"\"\"", "node -> left child node similar to right-first pre-order traversal", "in the stack, then visit it's child nodes if current", "[] res = [] \"\"\" post - order traversal visit", "left sub - tree visit right sub - tree visit", "the 2nd copy. Insert node into result list \"\"\" if", "if cur: \"\"\" visit the nodes in reverse order i.e.", "ensures processing is done from left to right. \"\"\" if", "not root: return [] res, stack = [], [root] while", "processing is done from left to right. \"\"\" if cur.right:", "77.59%, memory --> 50.59% def postOrderRecursive(self, root: TreeNode) -> [int]:", "stack, then visit it's child nodes if current node is", "to right. \"\"\" if cur.right: st += [cur.right] * 2", "current node is not the last node in the stack,", "the stack, then current node is the 2nd copy. Insert", "sub - tree visit right sub - tree visit node", "from binary_search_tree.tree_node import TreeNode class BinaryTree: # runtime --> 77.59%,", "visit it's child nodes if current node is not the", "right child node followed by left. this ensures processing is", "root: TreeNode) -> [int]: if not root: return [] res,", "by left. this ensures processing is done from left to", "res += self.postOrderRecursive(root.left) res += self.postOrderRecursive(root.right) res.append(root.val) \"\"\" return visited", "st = [root] * 2 while st: cur = st.pop()", "5.09% def postOrderIterativeReverse(self, root: TreeNode) -> [int]: if not root:", "return visited node + child nodes \"\"\" return res def", "[] res, stack = [], [root] while stack: cur =", "visited node + child nodes \"\"\" return res def postOrderIterative(self,", "postOrderRecursive(self, root: TreeNode) -> [int]: if not root: return []", "into result list \"\"\" if st and st[-1] is cur:", "= [root] * 2 while st: cur = st.pop() \"\"\"", "is done from left to right. \"\"\" if cur.right: st", "done from left to right. \"\"\" if cur.right: st +=", "last node in the stack, then visit it's child nodes", "res.append(root.val) \"\"\" return visited node + child nodes \"\"\" return", "TreeNode) -> [int]: if not root: return [] ret =", "order traversal visit left sub - tree visit right sub", "last node in the stack, then current node is the", "if cur.right: st += [cur.right] * 2 if cur.left: st", "child node similar to right-first pre-order traversal \"\"\" res.append(cur.val) stack.append(cur.left)", "root: return [] res, stack = [], [root] while stack:", "--> 50.59% def postOrderRecursive(self, root: TreeNode) -> [int]: if not", ": Medium # URL : https://leetcode.com/problems/binary-tree-postorder-traversal/ ###################################################################### from binary_search_tree.tree_node import", "on visiting a node, push 2 copies to the stack.", "URL : https://leetcode.com/problems/binary-tree-postorder-traversal/ ###################################################################### from binary_search_tree.tree_node import TreeNode class BinaryTree:", "tree visit right sub - tree visit node \"\"\" res", "from left to right. \"\"\" if cur.right: st += [cur.right]", "memory --> 5.09% def postOrderIterativeReverse(self, root: TreeNode) -> [int]: if", "process the child nodes use 2nd copy to insert into", "reverse order i.e. node -> right child node -> left", "\"\"\" on visiting a node, push 2 copies to the", "visit right sub - tree visit node \"\"\" res +=", "node into result list \"\"\" if st and st[-1] is", "[int]: if not root: return [] res = [] \"\"\"", "to process the child nodes use 2nd copy to insert", "= [], [root] while stack: cur = stack.pop() if cur:", "\"\"\" visit the nodes in reverse order i.e. node ->", "left child node similar to right-first pre-order traversal \"\"\" res.append(cur.val)", "+= [cur.left] * 2 else: ret.append(cur.val) return ret # runtime", "\"\"\" st = [root] * 2 while st: cur =", "current node is the last node in the stack, then", "if cur.left: st += [cur.left] * 2 else: ret.append(cur.val) return", "= stack.pop() if cur: \"\"\" visit the nodes in reverse", "Problem Number : 145 # Difficulty Level : Medium #", "self.postOrderRecursive(root.left) res += self.postOrderRecursive(root.right) res.append(root.val) \"\"\" return visited node +", "-> right child node -> left child node similar to", "res.append(cur.val) stack.append(cur.left) stack.append(cur.right) \"\"\" reversed result will give post-order traversal", "while stack: cur = stack.pop() if cur: \"\"\" visit the", "child nodes if current node is not the last node", "\"\"\" post - order traversal visit left sub - tree", "LeetCode Problem Number : 145 # Difficulty Level : Medium", "Medium # URL : https://leetcode.com/problems/binary-tree-postorder-traversal/ ###################################################################### from binary_search_tree.tree_node import TreeNode", "child node followed by left. this ensures processing is done", "[] \"\"\" on visiting a node, push 2 copies to", "st: cur = st.pop() \"\"\" if current node is the", "* 2 if cur.left: st += [cur.left] * 2 else:", "50.59% def postOrderRecursive(self, root: TreeNode) -> [int]: if not root:", "nodes \"\"\" return res def postOrderIterative(self, root: TreeNode) -> [int]:", "stack.pop() if cur: \"\"\" visit the nodes in reverse order", "# URL : https://leetcode.com/problems/binary-tree-postorder-traversal/ ###################################################################### from binary_search_tree.tree_node import TreeNode class", "# runtime --> 77.59%, memory --> 50.59% def postOrderRecursive(self, root:", "-> [int]: if not root: return [] ret = []", "root: return [] ret = [] \"\"\" on visiting a", "tree visit node \"\"\" res += self.postOrderRecursive(root.left) res += self.postOrderRecursive(root.right)", "1st copy to process the child nodes use 2nd copy", "into result \"\"\" st = [root] * 2 while st:", "the last node in the stack, then current node is", "res def postOrderIterative(self, root: TreeNode) -> [int]: if not root:", "import TreeNode class BinaryTree: # runtime --> 77.59%, memory -->", "--> 77.59%, memory --> 50.59% def postOrderRecursive(self, root: TreeNode) ->", "left. this ensures processing is done from left to right.", "st += [cur.right] * 2 if cur.left: st += [cur.left]", "i.e. node -> right child node -> left child node", "current node is the 2nd copy. Insert node into result", "stack. use 1st copy to process the child nodes use", "binary_search_tree.tree_node import TreeNode class BinaryTree: # runtime --> 77.59%, memory", "[] \"\"\" post - order traversal visit left sub -", "runtime --> 77.59%, memory --> 50.59% def postOrderRecursive(self, root: TreeNode)", "visit left sub - tree visit right sub - tree", "2nd copy. Insert node into result list \"\"\" if st", "cur: \"\"\" visit the nodes in reverse order i.e. node", "= [] \"\"\" on visiting a node, push 2 copies", "return [] res = [] \"\"\" post - order traversal", "def postOrderIterativeReverse(self, root: TreeNode) -> [int]: if not root: return", "copy. Insert node into result list \"\"\" if st and", "res = [] \"\"\" post - order traversal visit left", "2 else: ret.append(cur.val) return ret # runtime --> 54.35%, memory", "\"\"\" if st and st[-1] is cur: \"\"\"insert right child", "TreeNode) -> [int]: if not root: return [] res =", "###################################################################### from binary_search_tree.tree_node import TreeNode class BinaryTree: # runtime -->", "st.pop() \"\"\" if current node is the last node in", "* 2 while st: cur = st.pop() \"\"\" if current", "node in the stack, then visit it's child nodes if", "+= self.postOrderRecursive(root.left) res += self.postOrderRecursive(root.right) res.append(root.val) \"\"\" return visited node", "ret.append(cur.val) return ret # runtime --> 54.35%, memory --> 5.09%", "right child node -> left child node similar to right-first", "the nodes in reverse order i.e. node -> right child", "TreeNode) -> [int]: if not root: return [] res, stack", "Number : 145 # Difficulty Level : Medium # URL", "to the stack. use 1st copy to process the child", "runtime --> 54.35%, memory --> 5.09% def postOrderIterativeReverse(self, root: TreeNode)", "145 # Difficulty Level : Medium # URL : https://leetcode.com/problems/binary-tree-postorder-traversal/", "not root: return [] res = [] \"\"\" post -", "nodes if current node is not the last node in", "child nodes \"\"\" return res def postOrderIterative(self, root: TreeNode) ->", "right sub - tree visit node \"\"\" res += self.postOrderRecursive(root.left)", "[root] * 2 while st: cur = st.pop() \"\"\" if", "st and st[-1] is cur: \"\"\"insert right child node followed", "copies to the stack. use 1st copy to process the", "stack: cur = stack.pop() if cur: \"\"\" visit the nodes", "stack = [], [root] while stack: cur = stack.pop() if", "in reverse order i.e. node -> right child node ->", "node, push 2 copies to the stack. use 1st copy", "copy to insert into result \"\"\" st = [root] *" ]
[ "contained in *page*.\"\"\" return self._dokuwiki.send('wiki.listLinks', page) def backlinks(self, page): \"\"\"Returns", "by connecting to the XMLRPC server.\"\"\" # Initialize XMLRPC client.", "minor change \"\"\" return self._dokuwiki.send('dokuwiki.appendPage', page, content, options) def html(self,", "if b64decode else data.data if dirpath is None: return data", "% url) if cookieAuth == False: self.proxy = ServerProxy(url, **kwargs)", "data, ow=overwrite) def delete(self, media): \"\"\"Delete *media*.\"\"\" return self._dokuwiki.send('wiki.deleteAttachment', media)", "= dokuwiki.DokuWiki('URL', 'USER', 'PASSWORD', cookieAuth=False) except (DokuWikiError, Exception) as err:", "with open(filepath, 'wb') as fhandler: fhandler.write(data) def info(self, media): \"\"\"Returns", "= media.replace('/', ':').split(':')[-1] if not os.path.exists(dirpath): os.makedirs(dirpath) filepath = os.path.join(dirpath,", "if it exists remotely. \"\"\" with open(filepath, 'rb') as fhandler:", "not start: start = True continue if start: page_content.append(line) return", "*2016-01-01*:: from datetime import datetime wiki.pages.changes(datetime(2016, 1, 1).timestamp()) \"\"\" return", "\"\"\"Property that returns *2* with the supported RPC API version.\"\"\"", "return self._dokuwiki.send('wiki.getBackLinks', page) class _Medias(object): \"\"\"This object regroup methods for", "namespace, options) def changes(self, timestamp): \"\"\"Returns the list of medias", "\"\"\"Log to the wiki using *user* and *password* credentials. It", "\"\"\"Returns all medias of the given *namespace*. Valid *options* are:", "the object by connecting to the XMLRPC server.\"\"\" # Initialize", "return (self._dokuwiki.send('wiki.getPageVersion', page, version) if version is not None else", "dataentry *name* from *data*.\"\"\" return '---- dataentry %s ----\\n%s\\n----' %", "all changes since *2016-01-01*:: from datetime import datetime wiki.pages.changes(datetime(2016, 1,", "CookiesTransport(), **kwargs) else: self.proxy = ServerProxy(url, CookiesTransport2(), **kwargs) # Force", "current time at the remote wiki server as Unix timestamp.", "Informations of the last version is returned if *version* is", "self._dokuwiki.send('wiki.putAttachment', media, data, ow=overwrite) def delete(self, media): \"\"\"Delete *media*.\"\"\" return", "x: x[0] + '=' + x[1], self._cookies.items()) connection.putheader(\"Cookie\", \"; \".join(cookies))", "x[1], self._cookies.items()) connection.putheader(\"Cookie\", \"; \".join(cookies)) def parse_response(self, response): \"\"\"parse and", "*overwrite* parameter allow to overwrite the file if it already", "\"): # filter 'expire' information if not header.startswith(\"D\"): continue cookie", "urlencode else: from xmlrpclib import ServerProxy, Binary, Fault, Transport from", "page)) def append(self, page, content, **options): \"\"\"Appends *content* text to", "indicate if the rule was correctly added. \"\"\" return self.send('plugin.acl.addAcl',", "= False for line in content.split('\\n'): if line.strip().startswith('---- dataentry'): found", "\"\"\" return self._dokuwiki.send('dokuwiki.search', string) def versions(self, page, offset=0): \"\"\"Returns the", "the 'ExpatError' exception although the change has been done. This", "\"\"\"Locks *page*.\"\"\" result = self._dokuwiki.send('dokuwiki.setLocks', lock=[page], unlock=[]) if result['lockfail']: raise", "for returning all changes since *2016-01-01*:: from datetime import datetime", "return dataentry @staticmethod def gen(name, data): \"\"\"Generate dataentry *name* from", "*data*.\"\"\" return '---- dataentry %s ----\\n%s\\n----' % (name, '\\n'.join( '%s:%s'", "exception although the change has been done. This # allow", "functions. self.pages = _Pages(weakref.ref(self)()) self.medias = _Medias(weakref.ref(self)()) def send(self, command,", "as err: if err.faultCode == 121: return {} elif err.faultCode", "\"\"\"Returns a list of all links contained in *page*.\"\"\" return", "lock=[], unlock=[page]) if result['unlockfail']: raise DokuWikiError('unable to unlock page') def", "self.send('dokuwiki.getTime') @property def xmlrpc_version(self): \"\"\"Property that returns the XML RPC", "to result list \"\"\" return self._dokuwiki.send('wiki.getAttachments', namespace, options) def changes(self,", "'USER', 'PASSWORD', cookieAuth=False) except (DokuWikiError, Exception) as err: print('unable to", "None else self._dokuwiki.send('wiki.getPageHTML', page)) def set(self, page, content, **options): \"\"\"Set/replace", "or text declaration not at start of entity: line 2,", "password): \"\"\"Log to the wiki using *user* and *password* credentials.", "XML-RPC *command*. *args* and *kwargs* are the arguments and parameters", "if os.path.exists(filepath) and not overwrite: raise FileExistsError(\"[Errno 17] File exists:", "to *page*. Valid *options* are: * *sum*: (str) change summary", "\"\"\" date_offset = (datetime.now() - datetime.utcnow()) # Python < 2.7", "Binary, Fault, Transport from urllib.parse import urlencode else: from xmlrpclib", "\"\"\"This python module aims to manage `DokuWiki <https://www.dokuwiki.org/dokuwiki>`_ wikis by", "*password* are respectively the URL, the login and the password", "versions ... This function convert *date* to a `datetime` object.", "data = base64.b64encode(_bytes) if b64encode else Binary(_bytes) self._dokuwiki.send('wiki.putAttachment', media, data,", "datetime wiki.medias.changes(datetime(2016, 1, 1).timestamp()) \"\"\" return self._dokuwiki.send('wiki.getRecentMediaChanges', timestamp) def get(self,", "store cookie\"\"\" try: for header in response.msg.get_all(\"Set-Cookie\"): cookie = header.split(\";\",", "login and the password for connecting to the wiki. *kwargs*", "or '') else: url = '%s://%s%s/lib/exe/xmlrpc.php' % ( params['proto'], params['host'],", "cookieValue finally: return Transport.parse_response(self, response) class CookiesTransport2(Transport): \"\"\"A Python2 xmlrpclib.Transport", "line_split = line.split(':') key = line_split[0].strip() value = re.sub('#.*$', '',", "def set(self, media, _bytes, overwrite=True, b64encode=False): \"\"\"Set *media* from *_bytes*.", "% (name, '\\n'.join( '%s:%s' % (attr, value) for attr, value", "DokuWiki wiki. *url*, *user* and *password* are respectively the URL,", "except ExpatError as err: if str(err) != ERR: raise DokuWikiError(err)", "page, offset) def info(self, page, version=None): \"\"\"Returns informations of *page*.", "are the arguments and parameters needed by the command. \"\"\"", "saved to a file. By default, the filename is the", "*_bytes*. *overwrite* parameter specify if the media must be overwrite", "media) class Dataentry(object): \"\"\"Object that manage `data entries <https://www.dokuwiki.org/plugin:data>`_.\"\"\" @staticmethod", "+ '=' + x[1], self._cookies.items()) connection.putheader(\"Cookie\", \"; \".join(cookies)) Transport.send_headers(self, connection,", "local time. \"\"\" date_offset = (datetime.now() - datetime.utcnow()) # Python", "filepath) with open(filepath, 'wb') as fhandler: fhandler.write(data) def info(self, media):", "module aims to manage `DokuWiki <https://www.dokuwiki.org/dokuwiki>`_ wikis by using the", "rule was correctly removed. \"\"\" return self.send('plugin.acl.delAcl', scope, user) class", "getattr(method, elt) try: return method(*args) except Fault as err: if", "options) def changes(self, timestamp): \"\"\"Returns a list of changes since", "Transport.parse_response(self, response) class CookiesTransport2(Transport): \"\"\"A Python2 xmlrpclib.Transport subclass that retains", "*content* of *page*. Valid *options* are: * *sum*: (str) change", "login to check the connection. if not self.login(user, password): raise", "title of the wiki.\"\"\" return self.send('dokuwiki.getTitle') def login(self, user, password):", "with *filename* parameter. *overwrite* parameter allow to overwrite the file", "else: from xmlrpclib import ServerProxy, Binary, Fault, Transport from urllib", "self._dokuwiki.send('wiki.getPage', page)) def append(self, page, content, **options): \"\"\"Appends *content* text", "filepath, overwrite=True): \"\"\"Set *media* from local file *filepath*. *overwrite* parameter", "*args, **kwargs): \"\"\"Generic method for executing an XML-RPC *command*. *args*", "exists remotely. \"\"\" data = base64.b64encode(_bytes) if b64encode else Binary(_bytes)", "*name* from *data*.\"\"\" return '---- dataentry %s ----\\n%s\\n----' % (name,", "correctly removed. \"\"\" return self.send('plugin.acl.delAcl', scope, user) class _Pages(object): \"\"\"This", "an error.\"\"\" pass class CookiesTransport(Transport): \"\"\"A Python3 xmlrpc.client.Transport subclass that", "whether this is a minor change \"\"\" return self._dokuwiki.send('dokuwiki.appendPage', page,", "err: if str(err) != ERR: raise DokuWikiError(err) @property def version(self):", "the wiki. *kwargs* are `xmlrpclib`/`xmlrpc.client` **ServerProxy** parameters. The exception `DokuWikiError`", "cookieValue finally: return Transport.parse_response(self, response) class DokuWiki(object): \"\"\"Initialize a connection", "return self._dokuwiki.send('dokuwiki.appendPage', page, content, options) def html(self, page, version=None): \"\"\"Returns", "page): \"\"\"Returns the permission level of *page*.\"\"\" return self._dokuwiki.send('wiki.aclCheck', page)", "url '%s'\" % url) if cookieAuth == False: self.proxy =", "unlock(self, page): \"\"\"Unlocks *page*.\"\"\" result = self._dokuwiki.send('dokuwiki.setLocks', lock=[], unlock=[page]) if", "CookiesTransport2(Transport): \"\"\"A Python2 xmlrpclib.Transport subclass that retains cookies.\"\"\" def __init__(self):", "from datetime import datetime wiki.pages.changes(datetime(2016, 1, 1).timestamp()) \"\"\" return self._dokuwiki.send('wiki.getRecentChanges',", "return self._dokuwiki.send('wiki.aclCheck', page) def links(self, page): \"\"\"Returns a list of", "to install it:: pip install dokuwiki Otherwise sources are in", "exists remotely. \"\"\" with open(filepath, 'rb') as fhandler: self._dokuwiki.send('wiki.putAttachment', media,", "self._dokuwiki.send('wiki.getPageInfo', page)) def get(self, page, version=None): \"\"\"Returns the content of", "returns dates of `xmlrpclib`/`xmlrpc.client` ``DateTime`` type and the format changes", "filename = media.replace('/', ':').split(':')[-1] if not os.path.exists(dirpath): os.makedirs(dirpath) filepath =", "'', ':'.join(line_split[1:])).strip() dataentry.setdefault(key, value) if not found: raise DokuWikiError('no dataentry", "calculate it by hand! date_offset = (date_offset.microseconds + (date_offset.seconds +", "and python3+. Installation ------------ It is on `PyPi <https://pypi.python.org/pypi/dokuwiki>`_ so", "cookie below handler if self._cookies: cookies = map(lambda x: x[0]", "*overwrite* parameter specify if the media must be overwrite if", "API <https://www.dokuwiki.org/devel:xmlrpc>`_. It is compatible with python2.7 and python3+. Installation", "setting an empty content.\"\"\" return self.set(page, '') def lock(self, page):", "user) class _Pages(object): \"\"\"This object regroup methods for managing pages", "returned, otherwise the data is saved to a file. By", "+ '=' + x[1], self._cookies.items()) connection.putheader(\"Cookie\", \"; \".join(cookies)) def parse_response(self,", "group if *@group* syntax is used). It returns a boolean", "try: wiki = dokuwiki.DokuWiki('URL', 'USER', 'PASSWORD', cookieAuth=False) except (DokuWikiError, Exception)", "of a DokuWiki. This object is accessible from the ``pages``", "self.medias = _Medias(weakref.ref(self)()) def send(self, command, *args, **kwargs): \"\"\"Generic method", "of *page*. The HTML content of the last version of", "cookieAuth=False) except (DokuWikiError, Exception) as err: print('unable to connect: %s'", "page) class _Medias(object): \"\"\"This object regroup methods for managing medias", "err: # Sometime the first line of the XML response", "try: for header in response.msg.get_all(\"Set-Cookie\"): cookie = header.split(\";\", 1)[0] cookieKey,", "password, params['host'], params['uri'] or '') else: url = '%s://%s%s/lib/exe/xmlrpc.php' %", "= True continue if start: page_content.append(line) return '\\n'.join(page_content) if page_content", "(str) change summary * *minor*: (bool) whether this is a", "info(self, page, version=None): \"\"\"Returns informations of *page*. Informations of the", "= dokuwiki.DokuWiki('URL', 'User', 'Password') wiki.medias.list() \"\"\" def __init__(self, dokuwiki): self._dokuwiki", "regroup methods for managing medias of a DokuWiki. This object", "the given *scope* and *user* (or group if *@group* syntax", "raise DokuWikiError(err) except ExpatError as err: if str(err) != ERR:", "15 results. \"\"\" return self._dokuwiki.send('dokuwiki.search', string) def versions(self, page, offset=0):", "(bool) do an md5 sum of content * *skipacl*: (bool)", "is not set the binary data is returned, otherwise the", "filename is None: filename = media.replace('/', ':').split(':')[-1] if not os.path.exists(dirpath):", "'---- dataentry %s ----\\n%s\\n----' % (name, '\\n'.join( '%s:%s' % (attr,", "not set the binary data is returned, otherwise the data", "\"\"\" return self.send('dokuwiki.getXMLRPCAPIVersion') @property def xmlrpc_supported_version(self): \"\"\"Property that returns *2*", "args.append(kwargs) method = self.proxy for elt in command.split('.'): method =", "Python2 xmlrpclib.Transport subclass that retains cookies.\"\"\" def __init__(self): Transport.__init__(self) self._cookies", "exists: '%s'\" % filepath) with open(filepath, 'wb') as fhandler: fhandler.write(data)", "DokuWikiError(err) except ExpatError as err: if str(err) != ERR: raise", "media but it can be changed with *filename* parameter. *overwrite*", "continue cookie = header.split(\";\", 1)[0] cookieKey, cookieValue = cookie.split(\"=\", 1)", "that indicate if the rule was correctly added. \"\"\" return", "weakref from xml.parsers.expat import ExpatError if sys.version_info[0] == 3: from", "params['proto'], user, password, params['host'], params['uri'] or '') else: url =", "line in content.split('\\n'): if line == '----' and not start:", "user, password) def add_acl(self, scope, user, permission): \"\"\"Add an `ACL", "when there is an error.\"\"\" pass class CookiesTransport(Transport): \"\"\"A Python3", "...) are not catched. .. code:: try: wiki = dokuwiki.DokuWiki('URL',", "**kwargs) # Force login to check the connection. if not", "\"\"\"Delete any ACL matching the given *scope* and *user* (or", "HTML content of the last version of the page is", "(int) recursion level, 0 for all * *hash*: (bool) do", "list(args) if kwargs: args.append(kwargs) method = self.proxy for elt in", "indicates if the user succesfully authenticate.\"\"\" return self.send('dokuwiki.login', user, password)", "returns a boolean that indicates if the user succesfully authenticate.\"\"\"", "the name of the media but it can be changed", "return self.send('dokuwiki.getVersion') @property def time(self): \"\"\"Property that returns the current", "the page is returned if *version* is not set. \"\"\"", "wiki using *user* and *password* credentials. It returns a boolean", "\"\"\"A Python3 xmlrpc.client.Transport subclass that retains cookies.\"\"\" def __init__(self): Transport.__init__(self)", "method = getattr(method, elt) try: return method(*args) except Fault as", "= [] start = False for line in content.split('\\n'): if", "and not start: start = True continue if start: page_content.append(line)", "wiki. *kwargs* are `xmlrpclib`/`xmlrpc.client` **ServerProxy** parameters. The exception `DokuWikiError` is", "/ 60)) return date + timedelta(hours=date_offset) class DokuWikiError(Exception): \"\"\"Exception raised", "for managing pages of a DokuWiki. This object is accessible", "self._dokuwiki.send('dokuwiki.appendPage', page, content, options) def html(self, page, version=None): \"\"\"Returns HTML", "is not None else self._dokuwiki.send('wiki.getPageInfo', page)) def get(self, page, version=None):", "raise DokuWikiError('unable to unlock page') def permission(self, page): \"\"\"Returns the", "timestamp): \"\"\"Returns the list of medias changed since given *timestamp*.", "*dirpath* is not set the binary data is returned, otherwise", "(or group if *@group* syntax is used). It returns a", "time at the remote wiki server as Unix timestamp. \"\"\"", "\"\"\"Performs a fulltext search on *string* and returns the first", "if the rule was correctly removed. \"\"\" return self.send('plugin.acl.delAcl', scope,", "the remote Wiki.\"\"\" return self.send('dokuwiki.getVersion') @property def time(self): \"\"\"Property that", "if not os.path.exists(dirpath): os.makedirs(dirpath) filepath = os.path.join(dirpath, filename) if os.path.exists(filepath)", "/ 60 / 60)) return date + timedelta(hours=date_offset) class DokuWikiError(Exception):", "from xml.parsers.expat import ExpatError if sys.version_info[0] == 3: from xmlrpc.client", "managing pages of a DokuWiki. This object is accessible from", "if the user succesfully authenticate.\"\"\" return self.send('dokuwiki.login', user, password) def", "last version is returned if *version* is not set. \"\"\"", "by setting an empty content.\"\"\" return self.set(page, '') def lock(self,", "url = '%s://%s:%s@%s%s/lib/exe/xmlrpc.php' % ( params['proto'], user, password, params['host'], params['uri']", "<https://pypi.python.org/pypi/dokuwiki>`_ so you can use the ``pip`` command to install", "'Password') wiki.pages.list() \"\"\" def __init__(self, dokuwiki): self._dokuwiki = dokuwiki def", "# -*- coding: utf-8 -*- \"\"\"This python module aims to", "return self.send('dokuwiki.getTitle') def login(self, user, password): \"\"\"Log to the wiki", "done. This # allow to ignore the error. if str(err)", "for elt in command.split('.'): method = getattr(method, elt) try: return", "regroup methods for managing pages of a DokuWiki. This object", "domain, ``ProtocolError`` for an invalid wiki, ...) are not catched.", "time. \"\"\" date_offset = (datetime.now() - datetime.utcnow()) # Python <", "returns the first 15 results. \"\"\" return self._dokuwiki.send('dokuwiki.search', string) def", "1e6 date_offset = int(round(date_offset / 60 / 60)) return date", "def __init__(self): Transport.__init__(self) self._cookies = dict() def send_request(self, connection, handler,", "= re.compile(r'(?P<proto>https?)://(?P<host>[^/]*)(?P<uri>/.*)?') def date(date): \"\"\"DokuWiki returns dates of `xmlrpclib`/`xmlrpc.client` ``DateTime``", "dictionnay.\"\"\" if keep_order: from collections import OrderedDict dataentry = OrderedDict()", "*scope* and *user* (or group if *@group* syntax is used).", "page_content = [] start = False for line in content.split('\\n'):", "it:: pip install dokuwiki Otherwise sources are in `github <https://github.com/fmenabe/python-dokuwiki>`_", "wiki. *url*, *user* and *password* are respectively the URL, the", "of all links contained in *page*.\"\"\" return self._dokuwiki.send('wiki.listLinks', page) def", "This object is accessible from the ``medias`` property of an", "\"\"\" return self._dokuwiki.send('wiki.getRecentMediaChanges', timestamp) def get(self, media, dirpath=None, filename=None, overwrite=False,", "2, column 0' _URL_RE = re.compile(r'(?P<proto>https?)://(?P<host>[^/]*)(?P<uri>/.*)?') def date(date): \"\"\"DokuWiki returns", "(like ``gaierror`` for invalid domain, ``ProtocolError`` for an invalid wiki,", "*user* (use *@group* syntax for groups) with *permission* level. It", "*skipacl*: (bool) list everything regardless of ACL \"\"\" return self._dokuwiki.send('dokuwiki.getPagelist',", "@staticmethod def get(content, keep_order=False): \"\"\"Get dataentry from *content*. *keep_order* indicates", "line_split[0].strip() value = re.sub('#.*$', '', ':'.join(line_split[1:])).strip() dataentry.setdefault(key, value) if not", "else self._dokuwiki.send('wiki.getPage', page)) def append(self, page, content, **options): \"\"\"Appends *content*", "line.split(':') key = line_split[0].strip() value = re.sub('#.*$', '', ':'.join(line_split[1:])).strip() dataentry.setdefault(key,", "\"\"\"parse and store cookie\"\"\" try: for header in response.msg.get_all(\"Set-Cookie\"): cookie", "except Fault as err: if err.faultCode == 121: return {}", "the authentification fails but others exceptions (like ``gaierror`` for invalid", "= line.split(':') key = line_split[0].strip() value = re.sub('#.*$', '', ':'.join(line_split[1:])).strip()", "pages and medias functions. self.pages = _Pages(weakref.ref(self)()) self.medias = _Medias(weakref.ref(self)())", "was correctly added. \"\"\" return self.send('plugin.acl.addAcl', scope, user, permission) def", "with the supported RPC API version.\"\"\" return self.send('wiki.getRPCVersionSupported') @property def", "so calculate it by hand! date_offset = (date_offset.microseconds + (date_offset.seconds", "*minor*: (bool) whether this is a minor change \"\"\" return", "*media*.\"\"\" return self._dokuwiki.send('wiki.deleteAttachment', media) class Dataentry(object): \"\"\"Object that manage `data", "base64 import weakref from xml.parsers.expat import ExpatError if sys.version_info[0] ==", "using the provided `XML-RPC API <https://www.dokuwiki.org/devel:xmlrpc>`_. It is compatible with", "can be used to list earlier versions in the history.", "ServerProxy(url, CookiesTransport2(), **kwargs) # Force login to check the connection.", "version of the page is returned if *version* is not", "open(filepath, 'rb') as fhandler: self._dokuwiki.send('wiki.putAttachment', media, Binary(fhandler.read()), ow=overwrite) def set(self,", "def date(date): \"\"\"DokuWiki returns dates of `xmlrpclib`/`xmlrpc.client` ``DateTime`` type and", "'%Y-%m-%dT%H:%M:%S') if len(date) == 24 else datetime.strptime(date, '%Y%m%dT%H:%M:%S')) def utc2local(date):", "the arguments and parameters needed by the command. \"\"\" args", "1)[0] cookieKey, cookieValue = cookie.split(\"=\", 1) self._cookies[cookieKey] = cookieValue finally:", "namespace, options) def changes(self, timestamp): \"\"\"Returns a list of changes", "text declaration not at start of entity: line 2, column", "raise DokuWikiError(err) def delete(self, page): \"\"\"Delete *page* by setting an", "header in response.getheader(\"set-cookie\").split(\", \"): # filter 'expire' information if not", "try: params = _URL_RE.search(url).groupdict() if cookieAuth == False: url =", "overwrite: raise FileExistsError(\"[Errno 17] File exists: '%s'\" % filepath) with", "version=None): \"\"\"Returns informations of *page*. Informations of the last version", "content, options) def html(self, page, version=None): \"\"\"Returns HTML content of", "not overwrite: raise FileExistsError(\"[Errno 17] File exists: '%s'\" % filepath)", "is returned, otherwise the data is saved to a file.", "user, permission): \"\"\"Add an `ACL <https://www.dokuwiki.org/acl>`_ rule that restricts the", "def html(self, page, version=None): \"\"\"Returns HTML content of *page*. The", "API version returned by ``wiki.getRPCVersionSupported``. \"\"\" return self.send('dokuwiki.getXMLRPCAPIVersion') @property def", "version=None): \"\"\"Returns the content of *page*. The content of the", "return Transport.parse_response(self, response) class DokuWiki(object): \"\"\"Initialize a connection to a", "headers) def parse_response(self, response): \"\"\"parse and store cookie\"\"\" try: for", "_Medias(weakref.ref(self)()) def send(self, command, *args, **kwargs): \"\"\"Generic method for executing", "found: continue line_split = line.split(':') key = line_split[0].strip() value =", "filename is the name of the media but it can", "a minor change \"\"\" try: return self._dokuwiki.send('wiki.putPage', page, content, options)", "of the page is returned if *version* is not set.", "not set. \"\"\" return (self._dokuwiki.send('wiki.getPageInfoVersion', page, version) if version is", "self.pages = _Pages(weakref.ref(self)()) self.medias = _Medias(weakref.ref(self)()) def send(self, command, *args,", "last version of the page is returned if *version* is", "dict() def send_headers(self, connection, headers): if self._cookies: cookies = map(lambda", "not os.path.exists(dirpath): os.makedirs(dirpath) filepath = os.path.join(dirpath, filename) if os.path.exists(filepath) and", "all changes since *2016-01-01*:: from datetime import datetime wiki.medias.changes(datetime(2016, 1,", "[] raise DokuWikiError(err) except ExpatError as err: if str(err) !=", "def append(self, page, content, **options): \"\"\"Appends *content* text to *page*.", "RPC interface version of the remote Wiki. This is DokuWiki", "method so calculate it by hand! date_offset = (date_offset.microseconds +", "cookieValue = cookie.split(\"=\", 1) self._cookies[cookieKey] = cookieValue finally: return Transport.parse_response(self,", "the user succesfully authenticate.\"\"\" return self.send('dokuwiki.login', user, password) def add_acl(self,", "medias of a DokuWiki. This object is accessible from the", "**options): \"\"\"Appends *content* text to *page*. Valid *options* are: *", "*depth*: (int) recursion level, 0 for all * *hash*: (bool)", "wiki = dokuwiki.DokuWiki('URL', 'User', 'Password') wiki.medias.list() \"\"\" def __init__(self, dokuwiki):", "`XML-RPC API <https://www.dokuwiki.org/devel:xmlrpc>`_. It is compatible with python2.7 and python3+.", "not header.startswith(\"D\"): continue cookie = header.split(\";\", 1)[0] cookieKey, cookieValue =", "xml.parsers.expat import ExpatError if sys.version_info[0] == 3: from xmlrpc.client import", "ServerProxy(url, CookiesTransport(), **kwargs) else: self.proxy = ServerProxy(url, CookiesTransport2(), **kwargs) #", "are not catched. .. code:: try: wiki = dokuwiki.DokuWiki('URL', 'USER',", "urllib import urlencode from datetime import datetime, timedelta ERR =", "of the remote Wiki. This is DokuWiki implementation specific and", "'%s://%s:%s@%s%s/lib/exe/xmlrpc.php' % ( params['proto'], user, password, params['host'], params['uri'] or '')", "\"\"\"List all pages of the given *namespace*. Valid *options* are:", "wiki = dokuwiki.DokuWiki('URL', 'User', 'Password') wiki.pages.list() \"\"\" def __init__(self, dokuwiki):", "lock page') def unlock(self, page): \"\"\"Unlocks *page*.\"\"\" result = self._dokuwiki.send('dokuwiki.setLocks',", "list(self, namespace='/', **options): \"\"\"List all pages of the given *namespace*.", "False for line in content.split('\\n'): if line == '----' and", "# the 'ExpatError' exception although the change has been done.", "import datetime wiki.pages.changes(datetime(2016, 1, 1).timestamp()) \"\"\" return self._dokuwiki.send('wiki.getRecentChanges', timestamp) def", "namespace='/', **options): \"\"\"List all pages of the given *namespace*. Valid", "level of *page*.\"\"\" return self._dokuwiki.send('wiki.aclCheck', page) def links(self, page): \"\"\"Returns", "raise DokuWikiError(err) @property def version(self): \"\"\"Property that returns the DokuWiki", "= self.proxy for elt in command.split('.'): method = getattr(method, elt)", "xmlrpc.client.Transport subclass that retains cookies.\"\"\" def __init__(self): Transport.__init__(self) self._cookies =", "of *page*. Informations of the last version is returned if", "offset) def info(self, page, version=None): \"\"\"Returns informations of *page*. Informations", "if filename is None: filename = media.replace('/', ':').split(':')[-1] if not", "connection, headers): if self._cookies: cookies = map(lambda x: x[0] +", "result['unlockfail']: raise DokuWikiError('unable to unlock page') def permission(self, page): \"\"\"Returns", "content, **options): \"\"\"Set/replace the *content* of *page*. Valid *options* are:", "set(self, media, _bytes, overwrite=True, b64encode=False): \"\"\"Set *media* from *_bytes*. *overwrite*", "of *page*. *offset* can be used to list earlier versions", "groups) with *permission* level. It returns a boolean that indicate", "from *content*.\"\"\" page_content = [] start = False for line", "self._dokuwiki.send('wiki.deleteAttachment', media) class Dataentry(object): \"\"\"Object that manage `data entries <https://www.dokuwiki.org/plugin:data>`_.\"\"\"", "ow=overwrite) def delete(self, media): \"\"\"Delete *media*.\"\"\" return self._dokuwiki.send('wiki.deleteAttachment', media) class", "the last version of the page is returned if *version*", "``DateTime`` type and the format changes between DokuWiki versions ...", "page) def backlinks(self, page): \"\"\"Returns a list of all links", "121: return {} elif err.faultCode == 321: return [] raise", "self._cookies = dict() def send_request(self, connection, handler, request_body): Transport.send_request(self, connection,", "is blank which raise # the 'ExpatError' exception although the", "False for line in content.split('\\n'): if line.strip().startswith('---- dataentry'): found =", "wikis by using the provided `XML-RPC API <https://www.dokuwiki.org/devel:xmlrpc>`_. It is", "= map(lambda x: x[0] + '=' + x[1], self._cookies.items()) connection.putheader(\"Cookie\",", "return self.send('plugin.acl.delAcl', scope, user) class _Pages(object): \"\"\"This object regroup methods", "overwrite if it exists remotely. \"\"\" with open(filepath, 'rb') as", "if version is not None else self._dokuwiki.send('wiki.getPage', page)) def append(self,", "(bool) add hashes to result list \"\"\" return self._dokuwiki.send('wiki.getAttachments', namespace,", "of the supported standard API version returned by ``wiki.getRPCVersionSupported``. \"\"\"", "the URL, the login and the password for connecting to", "that returns the title of the wiki.\"\"\" return self.send('dokuwiki.getTitle') def", "os data = self._dokuwiki.send('wiki.getAttachment', media) data = base64.b64decode(data) if b64decode", "value in data.items())) @staticmethod def ignore(content): \"\"\"Remove dataentry from *content*.\"\"\"", "args = list(args) if kwargs: args.append(kwargs) method = self.proxy for", "urllib.parse import urlencode else: from xmlrpclib import ServerProxy, Binary, Fault,", "datetime import datetime, timedelta ERR = 'XML or text declaration", "is not set. \"\"\" return (self._dokuwiki.send('wiki.getPageHTMLVersion', page, version) if version", "from datetime import datetime, timedelta ERR = 'XML or text", "* *pattern*: (str) check given pattern * *hash*: (bool) add", "function convert *date* to a `datetime` object. \"\"\" date =", "__init__(self): Transport.__init__(self) self._cookies = dict() def send_headers(self, connection, headers): if", "self._dokuwiki = dokuwiki def list(self, namespace='/', **options): \"\"\"List all pages", "the format changes between DokuWiki versions ... This function convert", "a file. If *dirpath* is not set the binary data", "given pattern * *hash*: (bool) add hashes to result list", "response.msg.get_all(\"Set-Cookie\"): cookie = header.split(\";\", 1)[0] cookieKey, cookieValue = cookie.split(\"=\", 1)", "cookieKey, cookieValue = cookie.split(\"=\", 1) self._cookies[cookieKey] = cookieValue finally: return", "url, user, password, cookieAuth=False, **kwargs): \"\"\"Initialize the object by connecting", "**ServerProxy** parameters. The exception `DokuWikiError` is raised if the authentification", "sources are in `github <https://github.com/fmenabe/python-dokuwiki>`_ \"\"\" import re import sys", "that retains cookies.\"\"\" def __init__(self): Transport.__init__(self) self._cookies = dict() def", "except ExpatError as err: # Sometime the first line of", "specify if the media must be overwrite if it exists", "of `xmlrpclib`/`xmlrpc.client` ``DateTime`` type and the format changes between DokuWiki", "1).timestamp()) \"\"\" return self._dokuwiki.send('wiki.getRecentMediaChanges', timestamp) def get(self, media, dirpath=None, filename=None,", "Installation ------------ It is on `PyPi <https://pypi.python.org/pypi/dokuwiki>`_ so you can", "*media* or save it to a file. If *dirpath* is", "response) class CookiesTransport2(Transport): \"\"\"A Python2 xmlrpclib.Transport subclass that retains cookies.\"\"\"", "(str) check given pattern * *hash*: (bool) add hashes to", "all links referencing *page*.\"\"\" return self._dokuwiki.send('wiki.getBackLinks', page) class _Medias(object): \"\"\"This", "map(lambda x: x[0] + '=' + x[1], self._cookies.items()) connection.putheader(\"Cookie\", \";", "the change has been done. This # allow to ignore", "page is returned if *version* is not set. \"\"\" return", "\"\"\"Set *media* from local file *filepath*. *overwrite* parameter specify if", "dataentry from *content*. *keep_order* indicates whether to return an ordered", "return self._dokuwiki.send('wiki.listLinks', page) def backlinks(self, page): \"\"\"Returns a list of", "line.strip().startswith('---- dataentry'): found = True continue elif line == '----':", "accessible from the ``pages`` property of an `DokuWiki` instance:: wiki", "!= ERR: raise DokuWikiError(err) def delete(self, page): \"\"\"Delete *page* by", "request_body) # set cookie below handler if self._cookies: cookies =", "self.login(user, password): raise DokuWikiError('invalid login or password!') # Set \"namespaces\"", "restricts the page/namespace *scope* to *user* (use *@group* syntax for", "err: if err.faultCode == 121: return {} elif err.faultCode ==", "property of an `DokuWiki` instance:: wiki = dokuwiki.DokuWiki('URL', 'User', 'Password')", "\"\"\" data = base64.b64encode(_bytes) if b64encode else Binary(_bytes) self._dokuwiki.send('wiki.putAttachment', media,", "def backlinks(self, page): \"\"\"Returns a list of all links referencing", "not self.login(user, password): raise DokuWikiError('invalid login or password!') # Set", "*media* from local file *filepath*. *overwrite* parameter specify if the", "the wiki using *user* and *password* credentials. It returns a", "import urlencode else: from xmlrpclib import ServerProxy, Binary, Fault, Transport", "module when there is an error.\"\"\" pass class CookiesTransport(Transport): \"\"\"A", "supported standard API version returned by ``wiki.getRPCVersionSupported``. \"\"\" return self.send('dokuwiki.getXMLRPCAPIVersion')", "% ( params['proto'], params['host'], params['uri'] or '') except AttributeError: raise", "version) if version is not None else self._dokuwiki.send('wiki.getPage', page)) def", "pattern * *hash*: (bool) add hashes to result list \"\"\"", "connecting to the XMLRPC server.\"\"\" # Initialize XMLRPC client. try:", "*version* is not set. \"\"\" return (self._dokuwiki.send('wiki.getPageVersion', page, version) if", "and store cookie\"\"\" try: for header in response.msg.get_all(\"Set-Cookie\"): cookie =", "remotely. \"\"\" data = base64.b64encode(_bytes) if b64encode else Binary(_bytes) self._dokuwiki.send('wiki.putAttachment',", "_URL_RE.search(url).groupdict() if cookieAuth == False: url = '%s://%s:%s@%s%s/lib/exe/xmlrpc.php' % (", "<https://github.com/fmenabe/python-dokuwiki>`_ \"\"\" import re import sys import base64 import weakref", "which raise # the 'ExpatError' exception although the change has", "content of the last version of the page is returned", "hashes to result list \"\"\" return self._dokuwiki.send('wiki.getAttachments', namespace, options) def", "`DokuWikiError` is raised if the authentification fails but others exceptions", "\"\"\"DokuWiki returns date with a +0000 timezone. This function convert", "wiki.\"\"\" return self.send('dokuwiki.getTitle') def login(self, user, password): \"\"\"Log to the", "connection. if not self.login(user, password): raise DokuWikiError('invalid login or password!')", "\"\"\" return self.send('plugin.acl.addAcl', scope, user, permission) def del_acl(self, scope, user):", "\"\"\"Initialize a connection to a DokuWiki wiki. *url*, *user* and", "\"\"\"Generic method for executing an XML-RPC *command*. *args* and *kwargs*", "from datetime import datetime wiki.medias.changes(datetime(2016, 1, 1).timestamp()) \"\"\" return self._dokuwiki.send('wiki.getRecentMediaChanges',", "== 121: return {} elif err.faultCode == 321: return []", "search(self, string): \"\"\"Performs a fulltext search on *string* and returns", "version is returned if *version* is not set. \"\"\" return", "return self.send('dokuwiki.getXMLRPCAPIVersion') @property def xmlrpc_supported_version(self): \"\"\"Property that returns *2* with", "with python2.7 and python3+. Installation ------------ It is on `PyPi", "string): \"\"\"Performs a fulltext search on *string* and returns the", "\"\"\" import os data = self._dokuwiki.send('wiki.getAttachment', media) data = base64.b64decode(data)", "def title(self): \"\"\"Property that returns the title of the wiki.\"\"\"", "cookies.\"\"\" def __init__(self): Transport.__init__(self) self._cookies = dict() def send_request(self, connection,", "install it:: pip install dokuwiki Otherwise sources are in `github", "everything regardless of ACL \"\"\" return self._dokuwiki.send('dokuwiki.getPagelist', namespace, options) def", "the supported standard API version returned by ``wiki.getRPCVersionSupported``. \"\"\" return", "`PyPi <https://pypi.python.org/pypi/dokuwiki>`_ so you can use the ``pip`` command to", "The HTML content of the last version of the page", "b64encode else Binary(_bytes) self._dokuwiki.send('wiki.putAttachment', media, data, ow=overwrite) def delete(self, media):", "line == '----' and not start: start = True continue", "if the authentification fails but others exceptions (like ``gaierror`` for", "password, cookieAuth=False, **kwargs): \"\"\"Initialize the object by connecting to the", "be changed with *filename* parameter. *overwrite* parameter allow to overwrite", "try: return self._dokuwiki.send('wiki.putPage', page, content, options) except ExpatError as err:", "of changes since given *timestamp*. For example, for returning all", "xmlrpc_supported_version(self): \"\"\"Property that returns *2* with the supported RPC API", "except (DokuWikiError, Exception) as err: print('unable to connect: %s' %", "class DokuWikiError(Exception): \"\"\"Exception raised by this module when there is", "3600) * 1e6) / 1e6 date_offset = int(round(date_offset / 60", "change summary * *minor*: (bool) whether this is a minor", "add(self, media, filepath, overwrite=True): \"\"\"Set *media* from local file *filepath*.", "datetime import datetime wiki.pages.changes(datetime(2016, 1, 1).timestamp()) \"\"\" return self._dokuwiki.send('wiki.getRecentChanges', timestamp)", "page/namespace *scope* to *user* (use *@group* syntax for groups) with", "a DokuWiki wiki. *url*, *user* and *password* are respectively the", "DokuWiki implementation specific and independent of the supported standard API", "*media*.\"\"\" return self._dokuwiki.send('wiki.getAttachmentInfo', media) def add(self, media, filepath, overwrite=True): \"\"\"Set", "in *page*.\"\"\" return self._dokuwiki.send('wiki.listLinks', page) def backlinks(self, page): \"\"\"Returns a", "to ignore the error. if str(err) != ERR: raise DokuWikiError(err)", "import re import sys import base64 import weakref from xml.parsers.expat", "'%s'\" % filepath) with open(filepath, 'wb') as fhandler: fhandler.write(data) def", "self._dokuwiki.send('wiki.getAttachments', namespace, options) def changes(self, timestamp): \"\"\"Returns the list of", "in content.split('\\n'): if line.strip().startswith('---- dataentry'): found = True continue elif", "not found: continue line_split = line.split(':') key = line_split[0].strip() value", "media, Binary(fhandler.read()), ow=overwrite) def set(self, media, _bytes, overwrite=True, b64encode=False): \"\"\"Set", "Dataentry(object): \"\"\"Object that manage `data entries <https://www.dokuwiki.org/plugin:data>`_.\"\"\" @staticmethod def get(content,", "found = False for line in content.split('\\n'): if line.strip().startswith('---- dataentry'):", "scope, user): \"\"\"Delete any ACL matching the given *scope* and", "# Force login to check the connection. if not self.login(user,", "attr, value in data.items())) @staticmethod def ignore(content): \"\"\"Remove dataentry from", "an ordered dictionnay.\"\"\" if keep_order: from collections import OrderedDict dataentry", "for line in content.split('\\n'): if line == '----' and not", "params['uri'] or '') else: url = '%s://%s%s/lib/exe/xmlrpc.php' % ( params['proto'],", "# filter 'expire' information if not header.startswith(\"D\"): continue cookie =", "given *timestamp*. For example, for returning all changes since *2016-01-01*::", "the remote wiki server as Unix timestamp. \"\"\" return self.send('dokuwiki.getTime')", "self.proxy for elt in command.split('.'): method = getattr(method, elt) try:", "% ( params['proto'], user, password, params['host'], params['uri'] or '') else:", "<https://www.dokuwiki.org/plugin:data>`_.\"\"\" @staticmethod def get(content, keep_order=False): \"\"\"Get dataentry from *content*. *keep_order*", "DokuWikiError('unable to lock page') def unlock(self, page): \"\"\"Unlocks *page*.\"\"\" result", "handler if self._cookies: cookies = map(lambda x: x[0] + '='", "def delete(self, media): \"\"\"Delete *media*.\"\"\" return self._dokuwiki.send('wiki.deleteAttachment', media) class Dataentry(object):", "entity: line 2, column 0' _URL_RE = re.compile(r'(?P<proto>https?)://(?P<host>[^/]*)(?P<uri>/.*)?') def date(date):", "== 3: self.proxy = ServerProxy(url, CookiesTransport(), **kwargs) else: self.proxy =", "for pages and medias functions. self.pages = _Pages(weakref.ref(self)()) self.medias =", "a DokuWiki. This object is accessible from the ``pages`` property", "login(self, user, password): \"\"\"Log to the wiki using *user* and", "that restricts the page/namespace *scope* to *user* (use *@group* syntax", "don't have the 'total_seconds' method so calculate it by hand!", "for header in response.getheader(\"set-cookie\").split(\", \"): # filter 'expire' information if", "python module aims to manage `DokuWiki <https://www.dokuwiki.org/dokuwiki>`_ wikis by using", "overwrite the file if it already exists locally. \"\"\" import", "command, *args, **kwargs): \"\"\"Generic method for executing an XML-RPC *command*.", "history. \"\"\" return self._dokuwiki.send('wiki.getPageVersions', page, offset) def info(self, page, version=None):", "HTML content of *page*. The HTML content of the last", "filename) if os.path.exists(filepath) and not overwrite: raise FileExistsError(\"[Errno 17] File", "gen(name, data): \"\"\"Generate dataentry *name* from *data*.\"\"\" return '---- dataentry", "17] File exists: '%s'\" % filepath) with open(filepath, 'wb') as", "of the given *namespace*. Valid *options* are: * *depth*: (int)", "ERR: raise DokuWikiError(err) @property def version(self): \"\"\"Property that returns the", "The exception `DokuWikiError` is raised if the authentification fails but", "dokuwiki def list(self, namespace='/', **options): \"\"\"Returns all medias of the", "*keep_order* indicates whether to return an ordered dictionnay.\"\"\" if keep_order:", "media): \"\"\"Delete *media*.\"\"\" return self._dokuwiki.send('wiki.deleteAttachment', media) class Dataentry(object): \"\"\"Object that", "wiki.pages.list() \"\"\" def __init__(self, dokuwiki): self._dokuwiki = dokuwiki def list(self,", "value = re.sub('#.*$', '', ':'.join(line_split[1:])).strip() dataentry.setdefault(key, value) if not found:", "connection, handler, request_body) # set cookie below handler if self._cookies:", "return self.send('dokuwiki.login', user, password) def add_acl(self, scope, user, permission): \"\"\"Add", "from urllib import urlencode from datetime import datetime, timedelta ERR", "and *kwargs* are the arguments and parameters needed by the", "or password!') # Set \"namespaces\" for pages and medias functions.", "This function convert *date* to the local time. \"\"\" date_offset", "xmlrpc.client import ServerProxy, Binary, Fault, Transport from urllib.parse import urlencode", "specific and independent of the supported standard API version returned", "result = self._dokuwiki.send('dokuwiki.setLocks', lock=[page], unlock=[]) if result['lockfail']: raise DokuWikiError('unable to", "'expire' information if not header.startswith(\"D\"): continue cookie = header.split(\";\", 1)[0]", "by the command. \"\"\" args = list(args) if kwargs: args.append(kwargs)", "that returns the DokuWiki version of the remote Wiki.\"\"\" return", "raise FileExistsError(\"[Errno 17] File exists: '%s'\" % filepath) with open(filepath,", "the content of *page*. The content of the last version", "the password for connecting to the wiki. *kwargs* are `xmlrpclib`/`xmlrpc.client`", "date_offset = (date_offset.microseconds + (date_offset.seconds + date_offset.days * 24 *", "changes since *2016-01-01*:: from datetime import datetime wiki.pages.changes(datetime(2016, 1, 1).timestamp())", "ExpatError if sys.version_info[0] == 3: from xmlrpc.client import ServerProxy, Binary,", "line == '----': break elif not found: continue line_split =", "this is a minor change \"\"\" try: return self._dokuwiki.send('wiki.putPage', page,", "removed. \"\"\" return self.send('plugin.acl.delAcl', scope, user) class _Pages(object): \"\"\"This object", "len(date) == 24 else datetime.strptime(date, '%Y%m%dT%H:%M:%S')) def utc2local(date): \"\"\"DokuWiki returns", "medias changed since given *timestamp*. For example, for returning all", "referencing *page*.\"\"\" return self._dokuwiki.send('wiki.getBackLinks', page) class _Medias(object): \"\"\"This object regroup", "_URL_RE = re.compile(r'(?P<proto>https?)://(?P<host>[^/]*)(?P<uri>/.*)?') def date(date): \"\"\"DokuWiki returns dates of `xmlrpclib`/`xmlrpc.client`", "connection.putheader(\"Cookie\", \"; \".join(cookies)) def parse_response(self, response): \"\"\"parse and store cookie\"\"\"", "a boolean that indicate if the rule was correctly added.", "XML response is blank which raise # the 'ExpatError' exception", "content.\"\"\" return self.set(page, '') def lock(self, page): \"\"\"Locks *page*.\"\"\" result", "command. \"\"\" args = list(args) if kwargs: args.append(kwargs) method =", "= ServerProxy(url, CookiesTransport2(), **kwargs) # Force login to check the", "the ``medias`` property of an `DokuWiki` instance:: wiki = dokuwiki.DokuWiki('URL',", "user, password, params['host'], params['uri'] or '') else: url = '%s://%s%s/lib/exe/xmlrpc.php'", "data): \"\"\"Generate dataentry *name* from *data*.\"\"\" return '---- dataentry %s", "informations of *page*. Informations of the last version is returned", "page, version=None): \"\"\"Returns HTML content of *page*. The HTML content", "dirpath is None: return data if filename is None: filename", "import sys import base64 import weakref from xml.parsers.expat import ExpatError", "60)) return date + timedelta(hours=date_offset) class DokuWikiError(Exception): \"\"\"Exception raised by", "== 24 else datetime.strptime(date, '%Y%m%dT%H:%M:%S')) def utc2local(date): \"\"\"DokuWiki returns date", "def get(content, keep_order=False): \"\"\"Get dataentry from *content*. *keep_order* indicates whether", "page)) def set(self, page, content, **options): \"\"\"Set/replace the *content* of", "err) \"\"\" def __init__(self, url, user, password, cookieAuth=False, **kwargs): \"\"\"Initialize", "date(date): \"\"\"DokuWiki returns dates of `xmlrpclib`/`xmlrpc.client` ``DateTime`` type and the", "an `DokuWiki` instance:: wiki = dokuwiki.DokuWiki('URL', 'User', 'Password') wiki.pages.list() \"\"\"", "# allow to ignore the error. if str(err) != ERR:", "err.faultCode == 121: return {} elif err.faultCode == 321: return", "< 2.7 don't have the 'total_seconds' method so calculate it", "info(self, media): \"\"\"Returns informations of *media*.\"\"\" return self._dokuwiki.send('wiki.getAttachmentInfo', media) def", "it to a file. If *dirpath* is not set the", "get(self, media, dirpath=None, filename=None, overwrite=False, b64decode=False): \"\"\"Returns the binary data", "\".join(cookies)) def parse_response(self, response): \"\"\"parse and store cookie\"\"\" try: for", "been done. This # allow to ignore the error. if", "have the 'total_seconds' method so calculate it by hand! date_offset", "links contained in *page*.\"\"\" return self._dokuwiki.send('wiki.listLinks', page) def backlinks(self, page):", "page): \"\"\"Returns a list of all links referencing *page*.\"\"\" return", "text to *page*. Valid *options* are: * *sum*: (str) change", "wiki.pages.changes(datetime(2016, 1, 1).timestamp()) \"\"\" return self._dokuwiki.send('wiki.getRecentChanges', timestamp) def search(self, string):", "standard API version returned by ``wiki.getRPCVersionSupported``. \"\"\" return self.send('dokuwiki.getXMLRPCAPIVersion') @property", "== 3: from xmlrpc.client import ServerProxy, Binary, Fault, Transport from", "handler, request_body) # set cookie below handler if self._cookies: cookies", "is returned if *version* is not set. \"\"\" return (self._dokuwiki.send('wiki.getPageVersion',", "all * *skipacl*: (bool) skip acl checking * *pattern*: (str)", "executing an XML-RPC *command*. *args* and *kwargs* are the arguments", "3: self.proxy = ServerProxy(url, CookiesTransport(), **kwargs) else: self.proxy = ServerProxy(url,", "earlier versions in the history. \"\"\" return self._dokuwiki.send('wiki.getPageVersions', page, offset)", "*page* by setting an empty content.\"\"\" return self.set(page, '') def", "a +0000 timezone. This function convert *date* to the local", "(bool) whether this is a minor change \"\"\" return self._dokuwiki.send('dokuwiki.appendPage',", "returned if *version* is not set. \"\"\" return (self._dokuwiki.send('wiki.getPageHTMLVersion', page,", "'%s://%s%s/lib/exe/xmlrpc.php' % ( params['proto'], params['host'], params['uri'] or '') except AttributeError:", "def version(self): \"\"\"Property that returns the DokuWiki version of the", "locally. \"\"\" import os data = self._dokuwiki.send('wiki.getAttachment', media) data =", "``medias`` property of an `DokuWiki` instance:: wiki = dokuwiki.DokuWiki('URL', 'User',", "datetime, timedelta ERR = 'XML or text declaration not at", "= False for line in content.split('\\n'): if line == '----'", "\"\"\" def __init__(self, dokuwiki): self._dokuwiki = dokuwiki def list(self, namespace='/',", "*page*.\"\"\" result = self._dokuwiki.send('dokuwiki.setLocks', lock=[page], unlock=[]) if result['lockfail']: raise DokuWikiError('unable", "in response.msg.get_all(\"Set-Cookie\"): cookie = header.split(\";\", 1)[0] cookieKey, cookieValue = cookie.split(\"=\",", "cookies.\"\"\" def __init__(self): Transport.__init__(self) self._cookies = dict() def send_headers(self, connection,", "to unlock page') def permission(self, page): \"\"\"Returns the permission level", "version=None): \"\"\"Returns HTML content of *page*. The HTML content of", "level, 0 for all * *hash*: (bool) do an md5", "for invalid domain, ``ProtocolError`` for an invalid wiki, ...) are", "*scope* to *user* (use *@group* syntax for groups) with *permission*", "exceptions (like ``gaierror`` for invalid domain, ``ProtocolError`` for an invalid", "is on `PyPi <https://pypi.python.org/pypi/dokuwiki>`_ so you can use the ``pip``", "del_acl(self, scope, user): \"\"\"Delete any ACL matching the given *scope*", "\"\"\"Returns the permission level of *page*.\"\"\" return self._dokuwiki.send('wiki.aclCheck', page) def", "returns the title of the wiki.\"\"\" return self.send('dokuwiki.getTitle') def login(self,", "return self.send('dokuwiki.getTime') @property def xmlrpc_version(self): \"\"\"Property that returns the XML", "\"\"\" def __init__(self, url, user, password, cookieAuth=False, **kwargs): \"\"\"Initialize the", "= _Pages(weakref.ref(self)()) self.medias = _Medias(weakref.ref(self)()) def send(self, command, *args, **kwargs):", "of an `DokuWiki` instance:: wiki = dokuwiki.DokuWiki('URL', 'User', 'Password') wiki.medias.list()", "delete(self, media): \"\"\"Delete *media*.\"\"\" return self._dokuwiki.send('wiki.deleteAttachment', media) class Dataentry(object): \"\"\"Object", "added. \"\"\" return self.send('plugin.acl.addAcl', scope, user, permission) def del_acl(self, scope,", "= cookieValue finally: return Transport.parse_response(self, response) class DokuWiki(object): \"\"\"Initialize a", "is used). It returns a boolean that indicate if the", "return self._dokuwiki.send('wiki.getRecentMediaChanges', timestamp) def get(self, media, dirpath=None, filename=None, overwrite=False, b64decode=False):", "all links contained in *page*.\"\"\" return self._dokuwiki.send('wiki.listLinks', page) def backlinks(self,", "\"\"\"Set/replace the *content* of *page*. Valid *options* are: * *sum*:", "an empty content.\"\"\" return self.set(page, '') def lock(self, page): \"\"\"Locks", "%s ----\\n%s\\n----' % (name, '\\n'.join( '%s:%s' % (attr, value) for", "if not header.startswith(\"D\"): continue cookie = header.split(\";\", 1)[0] cookieKey, cookieValue", "2.7 don't have the 'total_seconds' method so calculate it by", "ACL \"\"\" return self._dokuwiki.send('dokuwiki.getPagelist', namespace, options) def changes(self, timestamp): \"\"\"Returns", "self._dokuwiki.send('wiki.putPage', page, content, options) except ExpatError as err: # Sometime", "must be overwrite if it exists remotely. \"\"\" data =", "not at start of entity: line 2, column 0' _URL_RE", "that indicates if the user succesfully authenticate.\"\"\" return self.send('dokuwiki.login', user,", "self._dokuwiki.send('wiki.getRecentMediaChanges', timestamp) def get(self, media, dirpath=None, filename=None, overwrite=False, b64decode=False): \"\"\"Returns", "keep_order=False): \"\"\"Get dataentry from *content*. *keep_order* indicates whether to return", "dataentry @staticmethod def gen(name, data): \"\"\"Generate dataentry *name* from *data*.\"\"\"", "of the media but it can be changed with *filename*", "!= ERR: raise DokuWikiError(err) @property def version(self): \"\"\"Property that returns", "it by hand! date_offset = (date_offset.microseconds + (date_offset.seconds + date_offset.days", "err: print('unable to connect: %s' % err) \"\"\" def __init__(self,", "the provided `XML-RPC API <https://www.dokuwiki.org/devel:xmlrpc>`_. It is compatible with python2.7", "XML RPC interface version of the remote Wiki. This is", "'') except AttributeError: raise DokuWikiError(\"invalid url '%s'\" % url) if", "the supported RPC API version.\"\"\" return self.send('wiki.getRPCVersionSupported') @property def title(self):", "page, version) if version is not None else self._dokuwiki.send('wiki.getPageHTML', page))", "\"\"\"Returns the list of medias changed since given *timestamp*. For", "return self.send('wiki.getRPCVersionSupported') @property def title(self): \"\"\"Property that returns the title", "acl checking * *pattern*: (str) check given pattern * *hash*:", "the media must be overwrite if it exists remotely. \"\"\"", "python2.7 and python3+. Installation ------------ It is on `PyPi <https://pypi.python.org/pypi/dokuwiki>`_", "returns the current time at the remote wiki server as", "'----' and not start: start = True continue if start:", "informations of *media*.\"\"\" return self._dokuwiki.send('wiki.getAttachmentInfo', media) def add(self, media, filepath,", "*minor*: (bool) whether this is a minor change \"\"\" try:", "of a DokuWiki. This object is accessible from the ``medias``", "*user* and *password* credentials. It returns a boolean that indicates", "the history. \"\"\" return self._dokuwiki.send('wiki.getPageVersions', page, offset) def info(self, page,", "*page*. The content of the last version is returned if", "\"\"\"Get dataentry from *content*. *keep_order* indicates whether to return an", "DokuWiki(object): \"\"\"Initialize a connection to a DokuWiki wiki. *url*, *user*", "self._dokuwiki.send('wiki.getBackLinks', page) class _Medias(object): \"\"\"This object regroup methods for managing", "Set \"namespaces\" for pages and medias functions. self.pages = _Pages(weakref.ref(self)())", "page, version=None): \"\"\"Returns informations of *page*. Informations of the last", "list of all links contained in *page*.\"\"\" return self._dokuwiki.send('wiki.listLinks', page)", "page, offset=0): \"\"\"Returns the available versions of *page*. *offset* can", "media.replace('/', ':').split(':')[-1] if not os.path.exists(dirpath): os.makedirs(dirpath) filepath = os.path.join(dirpath, filename)", "object. \"\"\" date = date.value return (datetime.strptime(date[:-5], '%Y-%m-%dT%H:%M:%S') if len(date)", "\"\"\" return self._dokuwiki.send('dokuwiki.appendPage', page, content, options) def html(self, page, version=None):", "media) data = base64.b64decode(data) if b64decode else data.data if dirpath", "(self._dokuwiki.send('wiki.getPageHTMLVersion', page, version) if version is not None else self._dokuwiki.send('wiki.getPageHTML',", "namespace='/', **options): \"\"\"Returns all medias of the given *namespace*. Valid", "`github <https://github.com/fmenabe/python-dokuwiki>`_ \"\"\" import re import sys import base64 import", "using *user* and *password* credentials. It returns a boolean that", "media): \"\"\"Returns informations of *media*.\"\"\" return self._dokuwiki.send('wiki.getAttachmentInfo', media) def add(self,", "instance:: wiki = dokuwiki.DokuWiki('URL', 'User', 'Password') wiki.pages.list() \"\"\" def __init__(self,", "'User', 'Password') wiki.pages.list() \"\"\" def __init__(self, dokuwiki): self._dokuwiki = dokuwiki", "\"\"\"Returns the available versions of *page*. *offset* can be used", "as err: if str(err) != ERR: raise DokuWikiError(err) @property def", "(int) recursion level, 0 for all * *skipacl*: (bool) skip", "\"\"\"Exception raised by this module when there is an error.\"\"\"", "in data.items())) @staticmethod def ignore(content): \"\"\"Remove dataentry from *content*.\"\"\" page_content", "CookiesTransport2(), **kwargs) # Force login to check the connection. if", "\"\"\"Delete *page* by setting an empty content.\"\"\" return self.set(page, '')", "return Transport.parse_response(self, response) class CookiesTransport2(Transport): \"\"\"A Python2 xmlrpclib.Transport subclass that", "for executing an XML-RPC *command*. *args* and *kwargs* are the", "_Pages(weakref.ref(self)()) self.medias = _Medias(weakref.ref(self)()) def send(self, command, *args, **kwargs): \"\"\"Generic", "boolean that indicates if the user succesfully authenticate.\"\"\" return self.send('dokuwiki.login',", "whether this is a minor change \"\"\" try: return self._dokuwiki.send('wiki.putPage',", "try: return method(*args) except Fault as err: if err.faultCode ==", "a file. By default, the filename is the name of", "media, dirpath=None, filename=None, overwrite=False, b64decode=False): \"\"\"Returns the binary data of", "binary data is returned, otherwise the data is saved to", "this is a minor change \"\"\" return self._dokuwiki.send('dokuwiki.appendPage', page, content,", "- datetime.utcnow()) # Python < 2.7 don't have the 'total_seconds'", "the data is saved to a file. By default, the", "error. if str(err) != ERR: raise DokuWikiError(err) def delete(self, page):", "html(self, page, version=None): \"\"\"Returns HTML content of *page*. The HTML", "header.startswith(\"D\"): continue cookie = header.split(\";\", 1)[0] cookieKey, cookieValue = cookie.split(\"=\",", "def lock(self, page): \"\"\"Locks *page*.\"\"\" result = self._dokuwiki.send('dokuwiki.setLocks', lock=[page], unlock=[])", "to the wiki using *user* and *password* credentials. It returns", "of ACL \"\"\" return self._dokuwiki.send('dokuwiki.getPagelist', namespace, options) def changes(self, timestamp):", "*page*.\"\"\" result = self._dokuwiki.send('dokuwiki.setLocks', lock=[], unlock=[page]) if result['unlockfail']: raise DokuWikiError('unable", "@property def xmlrpc_supported_version(self): \"\"\"Property that returns *2* with the supported", "DokuWikiError('invalid login or password!') # Set \"namespaces\" for pages and", "if kwargs: args.append(kwargs) method = self.proxy for elt in command.split('.'):", "*page*. Informations of the last version is returned if *version*", "It is on `PyPi <https://pypi.python.org/pypi/dokuwiki>`_ so you can use the", "indicates whether to return an ordered dictionnay.\"\"\" if keep_order: from", "= ServerProxy(url, CookiesTransport(), **kwargs) else: self.proxy = ServerProxy(url, CookiesTransport2(), **kwargs)", "user succesfully authenticate.\"\"\" return self.send('dokuwiki.login', user, password) def add_acl(self, scope,", "code:: try: wiki = dokuwiki.DokuWiki('URL', 'USER', 'PASSWORD', cookieAuth=False) except (DokuWikiError,", "class DokuWiki(object): \"\"\"Initialize a connection to a DokuWiki wiki. *url*,", "of an `DokuWiki` instance:: wiki = dokuwiki.DokuWiki('URL', 'User', 'Password') wiki.pages.list()", "otherwise the data is saved to a file. By default,", "returned if *version* is not set. \"\"\" return (self._dokuwiki.send('wiki.getPageVersion', page,", "blank which raise # the 'ExpatError' exception although the change", "date.value return (datetime.strptime(date[:-5], '%Y-%m-%dT%H:%M:%S') if len(date) == 24 else datetime.strptime(date,", "not found: raise DokuWikiError('no dataentry found') return dataentry @staticmethod def", "sys.version_info[0] == 3: from xmlrpc.client import ServerProxy, Binary, Fault, Transport", "b64encode=False): \"\"\"Set *media* from *_bytes*. *overwrite* parameter specify if the", "x[0] + '=' + x[1], self._cookies.items()) connection.putheader(\"Cookie\", \"; \".join(cookies)) Transport.send_headers(self,", "below handler if self._cookies: cookies = map(lambda x: x[0] +", "*hash*: (bool) do an md5 sum of content * *skipacl*:", "page): \"\"\"Delete *page* by setting an empty content.\"\"\" return self.set(page,", "page): \"\"\"Locks *page*.\"\"\" result = self._dokuwiki.send('dokuwiki.setLocks', lock=[page], unlock=[]) if result['lockfail']:", "list \"\"\" return self._dokuwiki.send('wiki.getAttachments', namespace, options) def changes(self, timestamp): \"\"\"Returns", "the command. \"\"\" args = list(args) if kwargs: args.append(kwargs) method", "else: self.proxy = ServerProxy(url, CookiesTransport2(), **kwargs) # Force login to", "return {} elif err.faultCode == 321: return [] raise DokuWikiError(err)", "between DokuWiki versions ... This function convert *date* to a", "*filepath*. *overwrite* parameter specify if the media must be overwrite", "% (attr, value) for attr, value in data.items())) @staticmethod def", "= _URL_RE.search(url).groupdict() if cookieAuth == False: url = '%s://%s:%s@%s%s/lib/exe/xmlrpc.php' %", "_bytes, overwrite=True, b64encode=False): \"\"\"Set *media* from *_bytes*. *overwrite* parameter specify", "sys.version_info[0] == 3: self.proxy = ServerProxy(url, CookiesTransport(), **kwargs) else: self.proxy", "changed since given *timestamp*. For example, for returning all changes", "return (datetime.strptime(date[:-5], '%Y-%m-%dT%H:%M:%S') if len(date) == 24 else datetime.strptime(date, '%Y%m%dT%H:%M:%S'))", "list of all links referencing *page*.\"\"\" return self._dokuwiki.send('wiki.getBackLinks', page) class", "connection.putheader(\"Cookie\", \"; \".join(cookies)) Transport.send_headers(self, connection, headers) def parse_response(self, response): \"\"\"parse", "(DokuWikiError, Exception) as err: print('unable to connect: %s' % err)", "fhandler: fhandler.write(data) def info(self, media): \"\"\"Returns informations of *media*.\"\"\" return", "subclass that retains cookies.\"\"\" def __init__(self): Transport.__init__(self) self._cookies = dict()", "returns the DokuWiki version of the remote Wiki.\"\"\" return self.send('dokuwiki.getVersion')", "else self._dokuwiki.send('wiki.getPageHTML', page)) def set(self, page, content, **options): \"\"\"Set/replace the", "(use *@group* syntax for groups) with *permission* level. It returns", "*2016-01-01*:: from datetime import datetime wiki.medias.changes(datetime(2016, 1, 1).timestamp()) \"\"\" return", "there is an error.\"\"\" pass class CookiesTransport(Transport): \"\"\"A Python3 xmlrpc.client.Transport", "class _Pages(object): \"\"\"This object regroup methods for managing pages of", "datetime wiki.pages.changes(datetime(2016, 1, 1).timestamp()) \"\"\" return self._dokuwiki.send('wiki.getRecentChanges', timestamp) def search(self,", "results. \"\"\" return self._dokuwiki.send('dokuwiki.search', string) def versions(self, page, offset=0): \"\"\"Returns", "24 * 3600) * 1e6) / 1e6 date_offset = int(round(date_offset", "\"\"\"This object regroup methods for managing pages of a DokuWiki.", "__init__(self, dokuwiki): self._dokuwiki = dokuwiki def list(self, namespace='/', **options): \"\"\"Returns", "unlock=[]) if result['lockfail']: raise DokuWikiError('unable to lock page') def unlock(self,", "def send(self, command, *args, **kwargs): \"\"\"Generic method for executing an", "are `xmlrpclib`/`xmlrpc.client` **ServerProxy** parameters. The exception `DokuWikiError` is raised if", "DokuWikiError(Exception): \"\"\"Exception raised by this module when there is an", "x[1], self._cookies.items()) connection.putheader(\"Cookie\", \"; \".join(cookies)) Transport.send_headers(self, connection, headers) def parse_response(self,", "AttributeError: raise DokuWikiError(\"invalid url '%s'\" % url) if cookieAuth ==", "timestamp): \"\"\"Returns a list of changes since given *timestamp*. For", "timedelta ERR = 'XML or text declaration not at start", "date_offset = (datetime.now() - datetime.utcnow()) # Python < 2.7 don't", "1) self._cookies[cookieKey] = cookieValue finally: return Transport.parse_response(self, response) class CookiesTransport2(Transport):", "that returns the XML RPC interface version of the remote", "to a file. If *dirpath* is not set the binary", "summary * *minor*: (bool) whether this is a minor change", "timestamp) def get(self, media, dirpath=None, filename=None, overwrite=False, b64decode=False): \"\"\"Returns the", "321: return [] raise DokuWikiError(err) except ExpatError as err: if", "1) self._cookies[cookieKey] = cookieValue finally: return Transport.parse_response(self, response) class DokuWiki(object):", "the page/namespace *scope* to *user* (use *@group* syntax for groups)", "search on *string* and returns the first 15 results. \"\"\"", "'rb') as fhandler: self._dokuwiki.send('wiki.putAttachment', media, Binary(fhandler.read()), ow=overwrite) def set(self, media,", "are in `github <https://github.com/fmenabe/python-dokuwiki>`_ \"\"\" import re import sys import", "import OrderedDict dataentry = OrderedDict() else: dataentry = {} found", "\"\"\" return self._dokuwiki.send('wiki.getPageVersions', page, offset) def info(self, page, version=None): \"\"\"Returns", "remote wiki server as Unix timestamp. \"\"\" return self.send('dokuwiki.getTime') @property", "* 24 * 3600) * 1e6) / 1e6 date_offset =", "of the wiki.\"\"\" return self.send('dokuwiki.getTitle') def login(self, user, password): \"\"\"Log", "returns *2* with the supported RPC API version.\"\"\" return self.send('wiki.getRPCVersionSupported')", "column 0' _URL_RE = re.compile(r'(?P<proto>https?)://(?P<host>[^/]*)(?P<uri>/.*)?') def date(date): \"\"\"DokuWiki returns dates", "`DokuWiki` instance:: wiki = dokuwiki.DokuWiki('URL', 'User', 'Password') wiki.pages.list() \"\"\" def", "example, for returning all changes since *2016-01-01*:: from datetime import", "import ServerProxy, Binary, Fault, Transport from urllib import urlencode from", "is not set. \"\"\" return (self._dokuwiki.send('wiki.getPageInfoVersion', page, version) if version", "changes since given *timestamp*. For example, for returning all changes", "retains cookies.\"\"\" def __init__(self): Transport.__init__(self) self._cookies = dict() def send_request(self,", "<https://www.dokuwiki.org/acl>`_ rule that restricts the page/namespace *scope* to *user* (use", "* *sum*: (str) change summary * *minor*: (bool) whether this", "of *media*.\"\"\" return self._dokuwiki.send('wiki.getAttachmentInfo', media) def add(self, media, filepath, overwrite=True):", "err.faultCode == 321: return [] raise DokuWikiError(err) except ExpatError as", "(bool) whether this is a minor change \"\"\" try: return", "**kwargs): \"\"\"Initialize the object by connecting to the XMLRPC server.\"\"\"", "page, content, **options): \"\"\"Appends *content* text to *page*. Valid *options*", "__init__(self): Transport.__init__(self) self._cookies = dict() def send_request(self, connection, handler, request_body):", "is DokuWiki implementation specific and independent of the supported standard", "page, content, options) except ExpatError as err: # Sometime the", "def links(self, page): \"\"\"Returns a list of all links contained", "start = False for line in content.split('\\n'): if line ==", "\"\"\" date = date.value return (datetime.strptime(date[:-5], '%Y-%m-%dT%H:%M:%S') if len(date) ==", "a list of all links referencing *page*.\"\"\" return self._dokuwiki.send('wiki.getBackLinks', page)", "* *hash*: (bool) do an md5 sum of content *", "user, password, cookieAuth=False, **kwargs): \"\"\"Initialize the object by connecting to", "None: filename = media.replace('/', ':').split(':')[-1] if not os.path.exists(dirpath): os.makedirs(dirpath) filepath", "the wiki.\"\"\" return self.send('dokuwiki.getTitle') def login(self, user, password): \"\"\"Log to", "'total_seconds' method so calculate it by hand! date_offset = (date_offset.microseconds", "= line_split[0].strip() value = re.sub('#.*$', '', ':'.join(line_split[1:])).strip() dataentry.setdefault(key, value) if", "a minor change \"\"\" return self._dokuwiki.send('dokuwiki.appendPage', page, content, options) def", "and returns the first 15 results. \"\"\" return self._dokuwiki.send('dokuwiki.search', string)", "= {} found = False for line in content.split('\\n'): if", "are respectively the URL, the login and the password for", "continue line_split = line.split(':') key = line_split[0].strip() value = re.sub('#.*$',", "os.path.join(dirpath, filename) if os.path.exists(filepath) and not overwrite: raise FileExistsError(\"[Errno 17]", "def info(self, media): \"\"\"Returns informations of *media*.\"\"\" return self._dokuwiki.send('wiki.getAttachmentInfo', media)", "for managing medias of a DokuWiki. This object is accessible", "filepath = os.path.join(dirpath, filename) if os.path.exists(filepath) and not overwrite: raise", "= header.split(\";\", 1)[0] cookieKey, cookieValue = cookie.split(\"=\", 1) self._cookies[cookieKey] =", "if keep_order: from collections import OrderedDict dataentry = OrderedDict() else:", "data.items())) @staticmethod def ignore(content): \"\"\"Remove dataentry from *content*.\"\"\" page_content =", "list of changes since given *timestamp*. For example, for returning", "*kwargs* are `xmlrpclib`/`xmlrpc.client` **ServerProxy** parameters. The exception `DokuWikiError` is raised", "of *page*.\"\"\" return self._dokuwiki.send('wiki.aclCheck', page) def links(self, page): \"\"\"Returns a", "Otherwise sources are in `github <https://github.com/fmenabe/python-dokuwiki>`_ \"\"\" import re import", "... This function convert *date* to a `datetime` object. \"\"\"", "password for connecting to the wiki. *kwargs* are `xmlrpclib`/`xmlrpc.client` **ServerProxy**", "pages of the given *namespace*. Valid *options* are: * *depth*:", "date_offset.days * 24 * 3600) * 1e6) / 1e6 date_offset", "DokuWikiError(err) def delete(self, page): \"\"\"Delete *page* by setting an empty", "if it already exists locally. \"\"\" import os data =", "file. If *dirpath* is not set the binary data is", "url = '%s://%s%s/lib/exe/xmlrpc.php' % ( params['proto'], params['host'], params['uri'] or '')", "':').split(':')[-1] if not os.path.exists(dirpath): os.makedirs(dirpath) filepath = os.path.join(dirpath, filename) if", "\"; \".join(cookies)) def parse_response(self, response): \"\"\"parse and store cookie\"\"\" try:", "md5 sum of content * *skipacl*: (bool) list everything regardless", "unlock page') def permission(self, page): \"\"\"Returns the permission level of", "a DokuWiki. This object is accessible from the ``medias`` property", "DokuWiki. This object is accessible from the ``pages`` property of", "timezone. This function convert *date* to the local time. \"\"\"", "boolean that indicate if the rule was correctly added. \"\"\"", "return self._dokuwiki.send('wiki.getRecentChanges', timestamp) def search(self, string): \"\"\"Performs a fulltext search", "dataentry %s ----\\n%s\\n----' % (name, '\\n'.join( '%s:%s' % (attr, value)", "links referencing *page*.\"\"\" return self._dokuwiki.send('wiki.getBackLinks', page) class _Medias(object): \"\"\"This object", "if result['lockfail']: raise DokuWikiError('unable to lock page') def unlock(self, page):", "= True continue elif line == '----': break elif not", "authenticate.\"\"\" return self.send('dokuwiki.login', user, password) def add_acl(self, scope, user, permission):", "others exceptions (like ``gaierror`` for invalid domain, ``ProtocolError`` for an", "the current time at the remote wiki server as Unix", ".. code:: try: wiki = dokuwiki.DokuWiki('URL', 'USER', 'PASSWORD', cookieAuth=False) except", "xmlrpclib import ServerProxy, Binary, Fault, Transport from urllib import urlencode", "response) class DokuWiki(object): \"\"\"Initialize a connection to a DokuWiki wiki.", "for groups) with *permission* level. It returns a boolean that", "Python3 xmlrpc.client.Transport subclass that retains cookies.\"\"\" def __init__(self): Transport.__init__(self) self._cookies", "\"\"\" return (self._dokuwiki.send('wiki.getPageHTMLVersion', page, version) if version is not None", "are: * *depth*: (int) recursion level, 0 for all *", "else: if sys.version_info[0] == 3: self.proxy = ServerProxy(url, CookiesTransport(), **kwargs)", "**options): \"\"\"List all pages of the given *namespace*. Valid *options*", "instance:: wiki = dokuwiki.DokuWiki('URL', 'User', 'Password') wiki.medias.list() \"\"\" def __init__(self,", "date = date.value return (datetime.strptime(date[:-5], '%Y-%m-%dT%H:%M:%S') if len(date) == 24", "user, password): \"\"\"Log to the wiki using *user* and *password*", "== '----' and not start: start = True continue if", "*version* is not set. \"\"\" return (self._dokuwiki.send('wiki.getPageHTMLVersion', page, version) if", "raised by this module when there is an error.\"\"\" pass", "the ``pages`` property of an `DokuWiki` instance:: wiki = dokuwiki.DokuWiki('URL',", "page') def permission(self, page): \"\"\"Returns the permission level of *page*.\"\"\"", "raise DokuWikiError('no dataentry found') return dataentry @staticmethod def gen(name, data):", "convert *date* to a `datetime` object. \"\"\" date = date.value", "def get(self, page, version=None): \"\"\"Returns the content of *page*. The", "\"\"\"Property that returns the current time at the remote wiki", "overwrite=True, b64encode=False): \"\"\"Set *media* from *_bytes*. *overwrite* parameter specify if", "\"\"\"Property that returns the XML RPC interface version of the", "pip install dokuwiki Otherwise sources are in `github <https://github.com/fmenabe/python-dokuwiki>`_ \"\"\"", "# Initialize XMLRPC client. try: params = _URL_RE.search(url).groupdict() if cookieAuth", "# Sometime the first line of the XML response is", "returned by ``wiki.getRPCVersionSupported``. \"\"\" return self.send('dokuwiki.getXMLRPCAPIVersion') @property def xmlrpc_supported_version(self): \"\"\"Property", "file if it already exists locally. \"\"\" import os data", "if version is not None else self._dokuwiki.send('wiki.getPageInfo', page)) def get(self,", "version.\"\"\" return self.send('wiki.getRPCVersionSupported') @property def title(self): \"\"\"Property that returns the", "timedelta(hours=date_offset) class DokuWikiError(Exception): \"\"\"Exception raised by this module when there", "a boolean that indicate if the rule was correctly removed.", "to check the connection. if not self.login(user, password): raise DokuWikiError('invalid", "+ (date_offset.seconds + date_offset.days * 24 * 3600) * 1e6)", "list of medias changed since given *timestamp*. For example, for", "to list earlier versions in the history. \"\"\" return self._dokuwiki.send('wiki.getPageVersions',", "version) if version is not None else self._dokuwiki.send('wiki.getPageHTML', page)) def", "of the last version of the page is returned if", "sys import base64 import weakref from xml.parsers.expat import ExpatError if", "ERR: raise DokuWikiError(err) def delete(self, page): \"\"\"Delete *page* by setting", "self.send('dokuwiki.login', user, password) def add_acl(self, scope, user, permission): \"\"\"Add an", "return an ordered dictionnay.\"\"\" if keep_order: from collections import OrderedDict", "import datetime wiki.medias.changes(datetime(2016, 1, 1).timestamp()) \"\"\" return self._dokuwiki.send('wiki.getRecentMediaChanges', timestamp) def", "available versions of *page*. *offset* can be used to list", "is not None else self._dokuwiki.send('wiki.getPageHTML', page)) def set(self, page, content,", "**options): \"\"\"Returns all medias of the given *namespace*. Valid *options*", "import base64 import weakref from xml.parsers.expat import ExpatError if sys.version_info[0]", "def permission(self, page): \"\"\"Returns the permission level of *page*.\"\"\" return", "unlock=[page]) if result['unlockfail']: raise DokuWikiError('unable to unlock page') def permission(self,", "if the rule was correctly added. \"\"\" return self.send('plugin.acl.addAcl', scope,", "is a minor change \"\"\" try: return self._dokuwiki.send('wiki.putPage', page, content,", "If *dirpath* is not set the binary data is returned,", "methods for managing medias of a DokuWiki. This object is", "but it can be changed with *filename* parameter. *overwrite* parameter", "changes between DokuWiki versions ... This function convert *date* to", "'') def lock(self, page): \"\"\"Locks *page*.\"\"\" result = self._dokuwiki.send('dokuwiki.setLocks', lock=[page],", "def get(self, media, dirpath=None, filename=None, overwrite=False, b64decode=False): \"\"\"Returns the binary", "self._dokuwiki.send('wiki.getPageVersions', page, offset) def info(self, page, version=None): \"\"\"Returns informations of", "send_headers(self, connection, headers): if self._cookies: cookies = map(lambda x: x[0]", "method(*args) except Fault as err: if err.faultCode == 121: return", "not None else self._dokuwiki.send('wiki.getPage', page)) def append(self, page, content, **options):", "DokuWikiError('unable to unlock page') def permission(self, page): \"\"\"Returns the permission", "self._dokuwiki.send('wiki.listLinks', page) def backlinks(self, page): \"\"\"Returns a list of all", "checking * *pattern*: (str) check given pattern * *hash*: (bool)", "send(self, command, *args, **kwargs): \"\"\"Generic method for executing an XML-RPC", "not set. \"\"\" return (self._dokuwiki.send('wiki.getPageVersion', page, version) if version is", "the binary data of *media* or save it to a", "self.proxy = ServerProxy(url, **kwargs) else: if sys.version_info[0] == 3: self.proxy", "\"\"\" return (self._dokuwiki.send('wiki.getPageVersion', page, version) if version is not None", "DokuWiki versions ... This function convert *date* to a `datetime`", "`xmlrpclib`/`xmlrpc.client` **ServerProxy** parameters. The exception `DokuWikiError` is raised if the", "or '') except AttributeError: raise DokuWikiError(\"invalid url '%s'\" % url)", "regardless of ACL \"\"\" return self._dokuwiki.send('dokuwiki.getPagelist', namespace, options) def changes(self,", "def del_acl(self, scope, user): \"\"\"Delete any ACL matching the given", "the title of the wiki.\"\"\" return self.send('dokuwiki.getTitle') def login(self, user,", "def add_acl(self, scope, user, permission): \"\"\"Add an `ACL <https://www.dokuwiki.org/acl>`_ rule", "content of the last version is returned if *version* is", "(date_offset.microseconds + (date_offset.seconds + date_offset.days * 24 * 3600) *", "ordered dictionnay.\"\"\" if keep_order: from collections import OrderedDict dataentry =", "the first 15 results. \"\"\" return self._dokuwiki.send('dokuwiki.search', string) def versions(self,", "(datetime.now() - datetime.utcnow()) # Python < 2.7 don't have the", "wiki = dokuwiki.DokuWiki('URL', 'USER', 'PASSWORD', cookieAuth=False) except (DokuWikiError, Exception) as", "base64.b64decode(data) if b64decode else data.data if dirpath is None: return", "check the connection. if not self.login(user, password): raise DokuWikiError('invalid login", "the XML RPC interface version of the remote Wiki. This", "_Pages(object): \"\"\"This object regroup methods for managing pages of a", "page, version) if version is not None else self._dokuwiki.send('wiki.getPage', page))", "Exception) as err: print('unable to connect: %s' % err) \"\"\"", "time(self): \"\"\"Property that returns the current time at the remote", "cookie\"\"\" try: for header in response.msg.get_all(\"Set-Cookie\"): cookie = header.split(\";\", 1)[0]", "'XML or text declaration not at start of entity: line", "Transport.__init__(self) self._cookies = dict() def send_request(self, connection, handler, request_body): Transport.send_request(self,", "credentials. It returns a boolean that indicates if the user", "to the XMLRPC server.\"\"\" # Initialize XMLRPC client. try: params", "from local file *filepath*. *overwrite* parameter specify if the media", "start of entity: line 2, column 0' _URL_RE = re.compile(r'(?P<proto>https?)://(?P<host>[^/]*)(?P<uri>/.*)?')", "overwrite=True): \"\"\"Set *media* from local file *filepath*. *overwrite* parameter specify", "self._dokuwiki.send('wiki.getAttachment', media) data = base64.b64decode(data) if b64decode else data.data if", "if err.faultCode == 121: return {} elif err.faultCode == 321:", "The content of the last version is returned if *version*", "invalid domain, ``ProtocolError`` for an invalid wiki, ...) are not", "media, data, ow=overwrite) def delete(self, media): \"\"\"Delete *media*.\"\"\" return self._dokuwiki.send('wiki.deleteAttachment',", "header in response.msg.get_all(\"Set-Cookie\"): cookie = header.split(\";\", 1)[0] cookieKey, cookieValue =", "(self._dokuwiki.send('wiki.getPageVersion', page, version) if version is not None else self._dokuwiki.send('wiki.getPage',", "file *filepath*. *overwrite* parameter specify if the media must be", "first 15 results. \"\"\" return self._dokuwiki.send('dokuwiki.search', string) def versions(self, page,", "def add(self, media, filepath, overwrite=True): \"\"\"Set *media* from local file", "else Binary(_bytes) self._dokuwiki.send('wiki.putAttachment', media, data, ow=overwrite) def delete(self, media): \"\"\"Delete", "Binary(fhandler.read()), ow=overwrite) def set(self, media, _bytes, overwrite=True, b64encode=False): \"\"\"Set *media*", "catched. .. code:: try: wiki = dokuwiki.DokuWiki('URL', 'USER', 'PASSWORD', cookieAuth=False)", "\"\"\"Add an `ACL <https://www.dokuwiki.org/acl>`_ rule that restricts the page/namespace *scope*", "content of *page*. The HTML content of the last version", "format changes between DokuWiki versions ... This function convert *date*", "This function convert *date* to a `datetime` object. \"\"\" date", "from collections import OrderedDict dataentry = OrderedDict() else: dataentry =", "\"\"\" import re import sys import base64 import weakref from", "Fault as err: if err.faultCode == 121: return {} elif", "append(self, page, content, **options): \"\"\"Appends *content* text to *page*. Valid", "and *password* are respectively the URL, the login and the", "Fault, Transport from urllib import urlencode from datetime import datetime,", "*kwargs* are the arguments and parameters needed by the command.", "XMLRPC client. try: params = _URL_RE.search(url).groupdict() if cookieAuth == False:", "+ x[1], self._cookies.items()) connection.putheader(\"Cookie\", \"; \".join(cookies)) def parse_response(self, response): \"\"\"parse", "by hand! date_offset = (date_offset.microseconds + (date_offset.seconds + date_offset.days *", "parse_response(self, response): \"\"\"parse and store cookie\"\"\" try: for header in", "this module when there is an error.\"\"\" pass class CookiesTransport(Transport):", "not None else self._dokuwiki.send('wiki.getPageInfo', page)) def get(self, page, version=None): \"\"\"Returns", "\"\"\"Returns a list of all links referencing *page*.\"\"\" return self._dokuwiki.send('wiki.getBackLinks',", "cookie.split(\"=\", 1) self._cookies[cookieKey] = cookieValue finally: return Transport.parse_response(self, response) class", "*timestamp*. For example, for returning all changes since *2016-01-01*:: from", "with open(filepath, 'rb') as fhandler: self._dokuwiki.send('wiki.putAttachment', media, Binary(fhandler.read()), ow=overwrite) def", "string) def versions(self, page, offset=0): \"\"\"Returns the available versions of", "*permission* level. It returns a boolean that indicate if the", "raise # the 'ExpatError' exception although the change has been", "version(self): \"\"\"Property that returns the DokuWiki version of the remote", "if result['unlockfail']: raise DokuWikiError('unable to unlock page') def permission(self, page):", "collections import OrderedDict dataentry = OrderedDict() else: dataentry = {}", "ow=overwrite) def set(self, media, _bytes, overwrite=True, b64encode=False): \"\"\"Set *media* from", "for line in content.split('\\n'): if line.strip().startswith('---- dataentry'): found = True", "finally: return Transport.parse_response(self, response) class DokuWiki(object): \"\"\"Initialize a connection to", "{} elif err.faultCode == 321: return [] raise DokuWikiError(err) except", "found = True continue elif line == '----': break elif", "value) for attr, value in data.items())) @staticmethod def ignore(content): \"\"\"Remove", "versions of *page*. *offset* can be used to list earlier", "data = base64.b64decode(data) if b64decode else data.data if dirpath is", "\"\"\"Set *media* from *_bytes*. *overwrite* parameter specify if the media", "function convert *date* to the local time. \"\"\" date_offset =", "(date_offset.seconds + date_offset.days * 24 * 3600) * 1e6) /", "fhandler: self._dokuwiki.send('wiki.putAttachment', media, Binary(fhandler.read()), ow=overwrite) def set(self, media, _bytes, overwrite=True,", "if dirpath is None: return data if filename is None:", "\".join(cookies)) Transport.send_headers(self, connection, headers) def parse_response(self, response): \"\"\"parse and store", "result list \"\"\" return self._dokuwiki.send('wiki.getAttachments', namespace, options) def changes(self, timestamp):", "of the last version is returned if *version* is not", "aims to manage `DokuWiki <https://www.dokuwiki.org/dokuwiki>`_ wikis by using the provided", "params['host'], params['uri'] or '') except AttributeError: raise DokuWikiError(\"invalid url '%s'\"", "For example, for returning all changes since *2016-01-01*:: from datetime", "continue if start: page_content.append(line) return '\\n'.join(page_content) if page_content else content", "@property def xmlrpc_version(self): \"\"\"Property that returns the XML RPC interface", "Transport.parse_response(self, response) class DokuWiki(object): \"\"\"Initialize a connection to a DokuWiki", "whether to return an ordered dictionnay.\"\"\" if keep_order: from collections", "so you can use the ``pip`` command to install it::", "it exists remotely. \"\"\" with open(filepath, 'rb') as fhandler: self._dokuwiki.send('wiki.putAttachment',", "Binary, Fault, Transport from urllib import urlencode from datetime import", "\"\"\"parse and store cookie\"\"\" try: for header in response.getheader(\"set-cookie\").split(\", \"):", "the file if it already exists locally. \"\"\" import os", "*content*.\"\"\" page_content = [] start = False for line in", "response is blank which raise # the 'ExpatError' exception although", "connect: %s' % err) \"\"\" def __init__(self, url, user, password,", "wiki.medias.list() \"\"\" def __init__(self, dokuwiki): self._dokuwiki = dokuwiki def list(self,", "set. \"\"\" return (self._dokuwiki.send('wiki.getPageHTMLVersion', page, version) if version is not", "else: dataentry = {} found = False for line in", "def xmlrpc_version(self): \"\"\"Property that returns the XML RPC interface version", "Wiki.\"\"\" return self.send('dokuwiki.getVersion') @property def time(self): \"\"\"Property that returns the", "is the name of the media but it can be", "and independent of the supported standard API version returned by", "Fault, Transport from urllib.parse import urlencode else: from xmlrpclib import", "self._dokuwiki.send('dokuwiki.setLocks', lock=[], unlock=[page]) if result['unlockfail']: raise DokuWikiError('unable to unlock page')", "+ x[1], self._cookies.items()) connection.putheader(\"Cookie\", \"; \".join(cookies)) Transport.send_headers(self, connection, headers) def", "None: return data if filename is None: filename = media.replace('/',", "remotely. \"\"\" with open(filepath, 'rb') as fhandler: self._dokuwiki.send('wiki.putAttachment', media, Binary(fhandler.read()),", "*hash*: (bool) add hashes to result list \"\"\" return self._dokuwiki.send('wiki.getAttachments',", "page): \"\"\"Unlocks *page*.\"\"\" result = self._dokuwiki.send('dokuwiki.setLocks', lock=[], unlock=[page]) if result['unlockfail']:", "* *hash*: (bool) add hashes to result list \"\"\" return", "headers): if self._cookies: cookies = map(lambda x: x[0] + '='", "the last version is returned if *version* is not set.", "as err: # Sometime the first line of the XML", "== '----': break elif not found: continue line_split = line.split(':')", "fails but others exceptions (like ``gaierror`` for invalid domain, ``ProtocolError``", "\"\"\" args = list(args) if kwargs: args.append(kwargs) method = self.proxy", "<https://www.dokuwiki.org/devel:xmlrpc>`_. It is compatible with python2.7 and python3+. Installation ------------", "can use the ``pip`` command to install it:: pip install", "\"\"\"A Python2 xmlrpclib.Transport subclass that retains cookies.\"\"\" def __init__(self): Transport.__init__(self)", "for all * *skipacl*: (bool) skip acl checking * *pattern*:", "options) except ExpatError as err: # Sometime the first line", "given *namespace*. Valid *options* are: * *depth*: (int) recursion level,", "-*- coding: utf-8 -*- \"\"\"This python module aims to manage", "as fhandler: fhandler.write(data) def info(self, media): \"\"\"Returns informations of *media*.\"\"\"", "scope, user, permission) def del_acl(self, scope, user): \"\"\"Delete any ACL", "date with a +0000 timezone. This function convert *date* to", "DokuWiki version of the remote Wiki.\"\"\" return self.send('dokuwiki.getVersion') @property def", "else: url = '%s://%s%s/lib/exe/xmlrpc.php' % ( params['proto'], params['host'], params['uri'] or", "line 2, column 0' _URL_RE = re.compile(r'(?P<proto>https?)://(?P<host>[^/]*)(?P<uri>/.*)?') def date(date): \"\"\"DokuWiki", "return '---- dataentry %s ----\\n%s\\n----' % (name, '\\n'.join( '%s:%s' %", "ignore(content): \"\"\"Remove dataentry from *content*.\"\"\" page_content = [] start =", "*@group* syntax is used). It returns a boolean that indicate", "_Medias(object): \"\"\"This object regroup methods for managing medias of a", "= dict() def send_request(self, connection, handler, request_body): Transport.send_request(self, connection, handler,", "self._cookies.items()) connection.putheader(\"Cookie\", \"; \".join(cookies)) def parse_response(self, response): \"\"\"parse and store", "*page*.\"\"\" return self._dokuwiki.send('wiki.listLinks', page) def backlinks(self, page): \"\"\"Returns a list", "for all * *hash*: (bool) do an md5 sum of", "'=' + x[1], self._cookies.items()) connection.putheader(\"Cookie\", \"; \".join(cookies)) def parse_response(self, response):", "**kwargs) else: self.proxy = ServerProxy(url, CookiesTransport2(), **kwargs) # Force login", "information if not header.startswith(\"D\"): continue cookie = header.split(\";\", 1)[0] cookieKey,", "*content*. *keep_order* indicates whether to return an ordered dictionnay.\"\"\" if", "**options): \"\"\"Set/replace the *content* of *page*. Valid *options* are: *", "password) def add_acl(self, scope, user, permission): \"\"\"Add an `ACL <https://www.dokuwiki.org/acl>`_", "on `PyPi <https://pypi.python.org/pypi/dokuwiki>`_ so you can use the ``pip`` command", "( params['proto'], user, password, params['host'], params['uri'] or '') else: url", "a connection to a DokuWiki wiki. *url*, *user* and *password*", "the 'total_seconds' method so calculate it by hand! date_offset =", "connection, headers) def parse_response(self, response): \"\"\"parse and store cookie\"\"\" try:", "and *user* (or group if *@group* syntax is used). It", "# Python < 2.7 don't have the 'total_seconds' method so", "\"\"\" return self.send('plugin.acl.delAcl', scope, user) class _Pages(object): \"\"\"This object regroup", "is saved to a file. By default, the filename is", "API version.\"\"\" return self.send('wiki.getRPCVersionSupported') @property def title(self): \"\"\"Property that returns", "the XML response is blank which raise # the 'ExpatError'", "= (date_offset.microseconds + (date_offset.seconds + date_offset.days * 24 * 3600)", "def login(self, user, password): \"\"\"Log to the wiki using *user*", "'') else: url = '%s://%s%s/lib/exe/xmlrpc.php' % ( params['proto'], params['host'], params['uri']", "xmlrpclib.Transport subclass that retains cookies.\"\"\" def __init__(self): Transport.__init__(self) self._cookies =", "= os.path.join(dirpath, filename) if os.path.exists(filepath) and not overwrite: raise FileExistsError(\"[Errno", "1, 1).timestamp()) \"\"\" return self._dokuwiki.send('wiki.getRecentChanges', timestamp) def search(self, string): \"\"\"Performs", "accessible from the ``medias`` property of an `DokuWiki` instance:: wiki", "elif line == '----': break elif not found: continue line_split", "login or password!') # Set \"namespaces\" for pages and medias", "to a DokuWiki wiki. *url*, *user* and *password* are respectively", "page, version) if version is not None else self._dokuwiki.send('wiki.getPageInfo', page))", "params = _URL_RE.search(url).groupdict() if cookieAuth == False: url = '%s://%s:%s@%s%s/lib/exe/xmlrpc.php'", "returned if *version* is not set. \"\"\" return (self._dokuwiki.send('wiki.getPageInfoVersion', page,", "0 for all * *skipacl*: (bool) skip acl checking *", "\"\"\"Returns a list of changes since given *timestamp*. For example,", "found') return dataentry @staticmethod def gen(name, data): \"\"\"Generate dataentry *name*", "returns the XML RPC interface version of the remote Wiki.", "+ timedelta(hours=date_offset) class DokuWikiError(Exception): \"\"\"Exception raised by this module when", "self._dokuwiki.send('wiki.getAttachmentInfo', media) def add(self, media, filepath, overwrite=True): \"\"\"Set *media* from", "True continue elif line == '----': break elif not found:", "changes(self, timestamp): \"\"\"Returns a list of changes since given *timestamp*.", "response): \"\"\"parse and store cookie\"\"\" try: for header in response.msg.get_all(\"Set-Cookie\"):", "(self._dokuwiki.send('wiki.getPageInfoVersion', page, version) if version is not None else self._dokuwiki.send('wiki.getPageInfo',", "dokuwiki.DokuWiki('URL', 'User', 'Password') wiki.pages.list() \"\"\" def __init__(self, dokuwiki): self._dokuwiki =", "and *password* credentials. It returns a boolean that indicates if", "Force login to check the connection. if not self.login(user, password):", "of the remote Wiki.\"\"\" return self.send('dokuwiki.getVersion') @property def time(self): \"\"\"Property", "CookiesTransport(Transport): \"\"\"A Python3 xmlrpc.client.Transport subclass that retains cookies.\"\"\" def __init__(self):", "backlinks(self, page): \"\"\"Returns a list of all links referencing *page*.\"\"\"", "= dokuwiki def list(self, namespace='/', **options): \"\"\"Returns all medias of", "Sometime the first line of the XML response is blank", "get(content, keep_order=False): \"\"\"Get dataentry from *content*. *keep_order* indicates whether to", "and medias functions. self.pages = _Pages(weakref.ref(self)()) self.medias = _Medias(weakref.ref(self)()) def", "By default, the filename is the name of the media", "object is accessible from the ``medias`` property of an `DokuWiki`", "'Password') wiki.medias.list() \"\"\" def __init__(self, dokuwiki): self._dokuwiki = dokuwiki def", "retains cookies.\"\"\" def __init__(self): Transport.__init__(self) self._cookies = dict() def send_headers(self,", "**kwargs): \"\"\"Generic method for executing an XML-RPC *command*. *args* and", "set. \"\"\" return (self._dokuwiki.send('wiki.getPageVersion', page, version) if version is not", "= getattr(method, elt) try: return method(*args) except Fault as err:", "None else self._dokuwiki.send('wiki.getPage', page)) def append(self, page, content, **options): \"\"\"Appends", "False: url = '%s://%s:%s@%s%s/lib/exe/xmlrpc.php' % ( params['proto'], user, password, params['host'],", "ServerProxy, Binary, Fault, Transport from urllib import urlencode from datetime", "Transport from urllib.parse import urlencode else: from xmlrpclib import ServerProxy,", "lock=[page], unlock=[]) if result['lockfail']: raise DokuWikiError('unable to lock page') def", "urlencode from datetime import datetime, timedelta ERR = 'XML or", "client. try: params = _URL_RE.search(url).groupdict() if cookieAuth == False: url", "None else self._dokuwiki.send('wiki.getPageInfo', page)) def get(self, page, version=None): \"\"\"Returns the", "1, 1).timestamp()) \"\"\" return self._dokuwiki.send('wiki.getRecentMediaChanges', timestamp) def get(self, media, dirpath=None,", "to the local time. \"\"\" date_offset = (datetime.now() - datetime.utcnow())", "cookieAuth == False: url = '%s://%s:%s@%s%s/lib/exe/xmlrpc.php' % ( params['proto'], user,", "self._cookies: cookies = map(lambda x: x[0] + '=' + x[1],", "self.proxy = ServerProxy(url, CookiesTransport2(), **kwargs) # Force login to check", "*offset* can be used to list earlier versions in the", "elif not found: continue line_split = line.split(':') key = line_split[0].strip()", "store cookie\"\"\" try: for header in response.getheader(\"set-cookie\").split(\", \"): # filter", "for attr, value in data.items())) @staticmethod def ignore(content): \"\"\"Remove dataentry", "return self._dokuwiki.send('dokuwiki.getPagelist', namespace, options) def changes(self, timestamp): \"\"\"Returns a list", "cookieAuth == False: self.proxy = ServerProxy(url, **kwargs) else: if sys.version_info[0]", "connection, handler, request_body): Transport.send_request(self, connection, handler, request_body) # set cookie", "supported RPC API version.\"\"\" return self.send('wiki.getRPCVersionSupported') @property def title(self): \"\"\"Property", "the first line of the XML response is blank which", "'%s'\" % url) if cookieAuth == False: self.proxy = ServerProxy(url,", "= '%s://%s%s/lib/exe/xmlrpc.php' % ( params['proto'], params['host'], params['uri'] or '') except", "elif err.faultCode == 321: return [] raise DokuWikiError(err) except ExpatError", "binary data of *media* or save it to a file.", "----\\n%s\\n----' % (name, '\\n'.join( '%s:%s' % (attr, value) for attr,", "It returns a boolean that indicate if the rule was", "*content* text to *page*. Valid *options* are: * *sum*: (str)", "hand! date_offset = (date_offset.microseconds + (date_offset.seconds + date_offset.days * 24", "invalid wiki, ...) are not catched. .. code:: try: wiki", "`ACL <https://www.dokuwiki.org/acl>`_ rule that restricts the page/namespace *scope* to *user*", "utf-8 -*- \"\"\"This python module aims to manage `DokuWiki <https://www.dokuwiki.org/dokuwiki>`_", "str(err) != ERR: raise DokuWikiError(err) @property def version(self): \"\"\"Property that", "in content.split('\\n'): if line == '----' and not start: start", "already exists locally. \"\"\" import os data = self._dokuwiki.send('wiki.getAttachment', media)", "wiki server as Unix timestamp. \"\"\" return self.send('dokuwiki.getTime') @property def", "options) def changes(self, timestamp): \"\"\"Returns the list of medias changed", "in `github <https://github.com/fmenabe/python-dokuwiki>`_ \"\"\" import re import sys import base64", "handler, request_body): Transport.send_request(self, connection, handler, request_body) # set cookie below", "version is not None else self._dokuwiki.send('wiki.getPageInfo', page)) def get(self, page,", "*string* and returns the first 15 results. \"\"\" return self._dokuwiki.send('dokuwiki.search',", "managing medias of a DokuWiki. This object is accessible from", "'wb') as fhandler: fhandler.write(data) def info(self, media): \"\"\"Returns informations of", "default, the filename is the name of the media but", "is not set. \"\"\" return (self._dokuwiki.send('wiki.getPageVersion', page, version) if version", "\"\"\" return self.send('dokuwiki.getTime') @property def xmlrpc_version(self): \"\"\"Property that returns the", "page) def links(self, page): \"\"\"Returns a list of all links", "start = True continue if start: page_content.append(line) return '\\n'.join(page_content) if", "= cookieValue finally: return Transport.parse_response(self, response) class CookiesTransport2(Transport): \"\"\"A Python2", "if not self.login(user, password): raise DokuWikiError('invalid login or password!') #", "or save it to a file. If *dirpath* is not", "b64decode else data.data if dirpath is None: return data if", "keep_order: from collections import OrderedDict dataentry = OrderedDict() else: dataentry", "it exists remotely. \"\"\" data = base64.b64encode(_bytes) if b64encode else", "\"\"\"Remove dataentry from *content*.\"\"\" page_content = [] start = False", "xmlrpc_version(self): \"\"\"Property that returns the XML RPC interface version of", "content, options) except ExpatError as err: # Sometime the first", "manage `data entries <https://www.dokuwiki.org/plugin:data>`_.\"\"\" @staticmethod def get(content, keep_order=False): \"\"\"Get dataentry", "since *2016-01-01*:: from datetime import datetime wiki.medias.changes(datetime(2016, 1, 1).timestamp()) \"\"\"", "has been done. This # allow to ignore the error.", "page): \"\"\"Returns a list of all links contained in *page*.\"\"\"", "**kwargs) else: if sys.version_info[0] == 3: self.proxy = ServerProxy(url, CookiesTransport(),", "allow to ignore the error. if str(err) != ERR: raise", "the local time. \"\"\" date_offset = (datetime.now() - datetime.utcnow()) #", "do an md5 sum of content * *skipacl*: (bool) list", "'\\n'.join( '%s:%s' % (attr, value) for attr, value in data.items()))", "*date* to a `datetime` object. \"\"\" date = date.value return", "<https://www.dokuwiki.org/dokuwiki>`_ wikis by using the provided `XML-RPC API <https://www.dokuwiki.org/devel:xmlrpc>`_. It", "def changes(self, timestamp): \"\"\"Returns a list of changes since given", "changes since *2016-01-01*:: from datetime import datetime wiki.medias.changes(datetime(2016, 1, 1).timestamp())", "* 1e6) / 1e6 date_offset = int(round(date_offset / 60 /", "= base64.b64encode(_bytes) if b64encode else Binary(_bytes) self._dokuwiki.send('wiki.putAttachment', media, data, ow=overwrite)", "* *skipacl*: (bool) list everything regardless of ACL \"\"\" return", "wiki.medias.changes(datetime(2016, 1, 1).timestamp()) \"\"\" return self._dokuwiki.send('wiki.getRecentMediaChanges', timestamp) def get(self, media,", "dokuwiki Otherwise sources are in `github <https://github.com/fmenabe/python-dokuwiki>`_ \"\"\" import re", "return [] raise DokuWikiError(err) except ExpatError as err: if str(err)", "cookies = map(lambda x: x[0] + '=' + x[1], self._cookies.items())", "\"\"\"Returns informations of *media*.\"\"\" return self._dokuwiki.send('wiki.getAttachmentInfo', media) def add(self, media,", "self._dokuwiki.send('dokuwiki.setLocks', lock=[page], unlock=[]) if result['lockfail']: raise DokuWikiError('unable to lock page')", "Initialize XMLRPC client. try: params = _URL_RE.search(url).groupdict() if cookieAuth ==", "* 3600) * 1e6) / 1e6 date_offset = int(round(date_offset /", "if cookieAuth == False: self.proxy = ServerProxy(url, **kwargs) else: if", "def changes(self, timestamp): \"\"\"Returns the list of medias changed since", "to overwrite the file if it already exists locally. \"\"\"", "media, _bytes, overwrite=True, b64encode=False): \"\"\"Set *media* from *_bytes*. *overwrite* parameter", "content.split('\\n'): if line == '----' and not start: start =", "used). It returns a boolean that indicate if the rule", "set(self, page, content, **options): \"\"\"Set/replace the *content* of *page*. Valid", "the given *namespace*. Valid *options* are: * *depth*: (int) recursion", "date + timedelta(hours=date_offset) class DokuWikiError(Exception): \"\"\"Exception raised by this module", "if sys.version_info[0] == 3: self.proxy = ServerProxy(url, CookiesTransport(), **kwargs) else:", "*page*. Valid *options* are: * *sum*: (str) change summary *", "and not overwrite: raise FileExistsError(\"[Errno 17] File exists: '%s'\" %", "of *page*. The content of the last version is returned", "version) if version is not None else self._dokuwiki.send('wiki.getPageInfo', page)) def", "\"; \".join(cookies)) Transport.send_headers(self, connection, headers) def parse_response(self, response): \"\"\"parse and", "dokuwiki): self._dokuwiki = dokuwiki def list(self, namespace='/', **options): \"\"\"List all", "dataentry from *content*.\"\"\" page_content = [] start = False for", "``wiki.getRPCVersionSupported``. \"\"\" return self.send('dokuwiki.getXMLRPCAPIVersion') @property def xmlrpc_supported_version(self): \"\"\"Property that returns", "compatible with python2.7 and python3+. Installation ------------ It is on", "of *media* or save it to a file. If *dirpath*", "return method(*args) except Fault as err: if err.faultCode == 121:", "media must be overwrite if it exists remotely. \"\"\" with", "from xmlrpclib import ServerProxy, Binary, Fault, Transport from urllib import", "all * *hash*: (bool) do an md5 sum of content", "self._cookies[cookieKey] = cookieValue finally: return Transport.parse_response(self, response) class CookiesTransport2(Transport): \"\"\"A", "not catched. .. code:: try: wiki = dokuwiki.DokuWiki('URL', 'USER', 'PASSWORD',", "*user* and *password* are respectively the URL, the login and", "is returned if *version* is not set. \"\"\" return (self._dokuwiki.send('wiki.getPageHTMLVersion',", "if it exists remotely. \"\"\" data = base64.b64encode(_bytes) if b64encode", "entries <https://www.dokuwiki.org/plugin:data>`_.\"\"\" @staticmethod def get(content, keep_order=False): \"\"\"Get dataentry from *content*.", "@property def time(self): \"\"\"Property that returns the current time at", "return self._dokuwiki.send('dokuwiki.search', string) def versions(self, page, offset=0): \"\"\"Returns the available", "``pip`` command to install it:: pip install dokuwiki Otherwise sources", "not None else self._dokuwiki.send('wiki.getPageHTML', page)) def set(self, page, content, **options):", "changes(self, timestamp): \"\"\"Returns the list of medias changed since given", "that returns the current time at the remote wiki server", "content of *page*. The content of the last version is", "import datetime, timedelta ERR = 'XML or text declaration not", "in response.getheader(\"set-cookie\").split(\", \"): # filter 'expire' information if not header.startswith(\"D\"):", "*page*. The HTML content of the last version of the", "return self._dokuwiki.send('wiki.putPage', page, content, options) except ExpatError as err: #", "*page*. *offset* can be used to list earlier versions in", "used to list earlier versions in the history. \"\"\" return", "permission(self, page): \"\"\"Returns the permission level of *page*.\"\"\" return self._dokuwiki.send('wiki.aclCheck',", "= _Medias(weakref.ref(self)()) def send(self, command, *args, **kwargs): \"\"\"Generic method for", "content * *skipacl*: (bool) list everything regardless of ACL \"\"\"", "set the binary data is returned, otherwise the data is", "It is compatible with python2.7 and python3+. Installation ------------ It", "data if filename is None: filename = media.replace('/', ':').split(':')[-1] if", "from *_bytes*. *overwrite* parameter specify if the media must be", "'%Y%m%dT%H:%M:%S')) def utc2local(date): \"\"\"DokuWiki returns date with a +0000 timezone.", "if not found: raise DokuWikiError('no dataentry found') return dataentry @staticmethod", "import urlencode from datetime import datetime, timedelta ERR = 'XML", "filter 'expire' information if not header.startswith(\"D\"): continue cookie = header.split(\";\",", "dokuwiki): self._dokuwiki = dokuwiki def list(self, namespace='/', **options): \"\"\"Returns all", "the *content* of *page*. Valid *options* are: * *sum*: (str)", "media must be overwrite if it exists remotely. \"\"\" data", "exception `DokuWikiError` is raised if the authentification fails but others", "return self.set(page, '') def lock(self, page): \"\"\"Locks *page*.\"\"\" result =", "\"\"\"Appends *content* text to *page*. Valid *options* are: * *sum*:", "self.send('plugin.acl.delAcl', scope, user) class _Pages(object): \"\"\"This object regroup methods for", "set cookie below handler if self._cookies: cookies = map(lambda x:", "skip acl checking * *pattern*: (str) check given pattern *", "although the change has been done. This # allow to", "% filepath) with open(filepath, 'wb') as fhandler: fhandler.write(data) def info(self,", "is raised if the authentification fails but others exceptions (like", "in command.split('.'): method = getattr(method, elt) try: return method(*args) except", "command to install it:: pip install dokuwiki Otherwise sources are", "self.set(page, '') def lock(self, page): \"\"\"Locks *page*.\"\"\" result = self._dokuwiki.send('dokuwiki.setLocks',", "minor change \"\"\" try: return self._dokuwiki.send('wiki.putPage', page, content, options) except", "'PASSWORD', cookieAuth=False) except (DokuWikiError, Exception) as err: print('unable to connect:", "page') def unlock(self, page): \"\"\"Unlocks *page*.\"\"\" result = self._dokuwiki.send('dokuwiki.setLocks', lock=[],", "*date* to the local time. \"\"\" date_offset = (datetime.now() -", "offset=0): \"\"\"Returns the available versions of *page*. *offset* can be", "to return an ordered dictionnay.\"\"\" if keep_order: from collections import", "arguments and parameters needed by the command. \"\"\" args =", "python3+. Installation ------------ It is on `PyPi <https://pypi.python.org/pypi/dokuwiki>`_ so you", "from urllib.parse import urlencode else: from xmlrpclib import ServerProxy, Binary,", "1).timestamp()) \"\"\" return self._dokuwiki.send('wiki.getRecentChanges', timestamp) def search(self, string): \"\"\"Performs a", "self._dokuwiki = dokuwiki def list(self, namespace='/', **options): \"\"\"Returns all medias", "if *version* is not set. \"\"\" return (self._dokuwiki.send('wiki.getPageInfoVersion', page, version)", "dataentry found') return dataentry @staticmethod def gen(name, data): \"\"\"Generate dataentry", "succesfully authenticate.\"\"\" return self.send('dokuwiki.login', user, password) def add_acl(self, scope, user,", "(bool) list everything regardless of ACL \"\"\" return self._dokuwiki.send('dokuwiki.getPagelist', namespace,", "since given *timestamp*. For example, for returning all changes since", "if *version* is not set. \"\"\" return (self._dokuwiki.send('wiki.getPageVersion', page, version)", "dokuwiki.DokuWiki('URL', 'USER', 'PASSWORD', cookieAuth=False) except (DokuWikiError, Exception) as err: print('unable", "server as Unix timestamp. \"\"\" return self.send('dokuwiki.getTime') @property def xmlrpc_version(self):", "\"\"\"Unlocks *page*.\"\"\" result = self._dokuwiki.send('dokuwiki.setLocks', lock=[], unlock=[page]) if result['unlockfail']: raise", "\"\"\" return self._dokuwiki.send('dokuwiki.getPagelist', namespace, options) def changes(self, timestamp): \"\"\"Returns a", "dataentry.setdefault(key, value) if not found: raise DokuWikiError('no dataentry found') return", "indicate if the rule was correctly removed. \"\"\" return self.send('plugin.acl.delAcl',", "ACL matching the given *scope* and *user* (or group if", "result['lockfail']: raise DokuWikiError('unable to lock page') def unlock(self, page): \"\"\"Unlocks", "a `datetime` object. \"\"\" date = date.value return (datetime.strptime(date[:-5], '%Y-%m-%dT%H:%M:%S')", "= 'XML or text declaration not at start of entity:", "rule was correctly added. \"\"\" return self.send('plugin.acl.addAcl', scope, user, permission)", "from *content*. *keep_order* indicates whether to return an ordered dictionnay.\"\"\"", "version is not None else self._dokuwiki.send('wiki.getPageHTML', page)) def set(self, page,", "return data if filename is None: filename = media.replace('/', ':').split(':')[-1]", "parameter specify if the media must be overwrite if it", "given *scope* and *user* (or group if *@group* syntax is", "class Dataentry(object): \"\"\"Object that manage `data entries <https://www.dokuwiki.org/plugin:data>`_.\"\"\" @staticmethod def", "authentification fails but others exceptions (like ``gaierror`` for invalid domain,", "def list(self, namespace='/', **options): \"\"\"Returns all medias of the given", "object regroup methods for managing medias of a DokuWiki. This", "dataentry = {} found = False for line in content.split('\\n'):", "data of *media* or save it to a file. If", "self.proxy = ServerProxy(url, CookiesTransport(), **kwargs) else: self.proxy = ServerProxy(url, CookiesTransport2(),", "with a +0000 timezone. This function convert *date* to the", "= ServerProxy(url, **kwargs) else: if sys.version_info[0] == 3: self.proxy =", "any ACL matching the given *scope* and *user* (or group", "\"\"\" with open(filepath, 'rb') as fhandler: self._dokuwiki.send('wiki.putAttachment', media, Binary(fhandler.read()), ow=overwrite)", "object by connecting to the XMLRPC server.\"\"\" # Initialize XMLRPC", "data is saved to a file. By default, the filename", "to lock page') def unlock(self, page): \"\"\"Unlocks *page*.\"\"\" result =", "+0000 timezone. This function convert *date* to the local time.", "interface version of the remote Wiki. This is DokuWiki implementation", "it can be changed with *filename* parameter. *overwrite* parameter allow", "version returned by ``wiki.getRPCVersionSupported``. \"\"\" return self.send('dokuwiki.getXMLRPCAPIVersion') @property def xmlrpc_supported_version(self):", "can be changed with *filename* parameter. *overwrite* parameter allow to", "`DokuWiki <https://www.dokuwiki.org/dokuwiki>`_ wikis by using the provided `XML-RPC API <https://www.dokuwiki.org/devel:xmlrpc>`_.", "def delete(self, page): \"\"\"Delete *page* by setting an empty content.\"\"\"", "data = self._dokuwiki.send('wiki.getAttachment', media) data = base64.b64decode(data) if b64decode else", "options) def html(self, page, version=None): \"\"\"Returns HTML content of *page*.", "class CookiesTransport(Transport): \"\"\"A Python3 xmlrpc.client.Transport subclass that retains cookies.\"\"\" def", "use the ``pip`` command to install it:: pip install dokuwiki", "*filename* parameter. *overwrite* parameter allow to overwrite the file if", "0 for all * *hash*: (bool) do an md5 sum", "Python < 2.7 don't have the 'total_seconds' method so calculate", "%s' % err) \"\"\" def __init__(self, url, user, password, cookieAuth=False,", "at the remote wiki server as Unix timestamp. \"\"\" return", "self._dokuwiki.send('wiki.aclCheck', page) def links(self, page): \"\"\"Returns a list of all", "parameter. *overwrite* parameter allow to overwrite the file if it", "return self.send('plugin.acl.addAcl', scope, user, permission) def del_acl(self, scope, user): \"\"\"Delete", "convert *date* to the local time. \"\"\" date_offset = (datetime.now()", "*command*. *args* and *kwargs* are the arguments and parameters needed", "list(self, namespace='/', **options): \"\"\"Returns all medias of the given *namespace*.", "\"\"\"Generate dataentry *name* from *data*.\"\"\" return '---- dataentry %s ----\\n%s\\n----'", "ServerProxy, Binary, Fault, Transport from urllib.parse import urlencode else: from", "import weakref from xml.parsers.expat import ExpatError if sys.version_info[0] == 3:", "import os data = self._dokuwiki.send('wiki.getAttachment', media) data = base64.b64decode(data) if", "page, content, **options): \"\"\"Set/replace the *content* of *page*. Valid *options*", "This is DokuWiki implementation specific and independent of the supported", "self._cookies = dict() def send_headers(self, connection, headers): if self._cookies: cookies", "be used to list earlier versions in the history. \"\"\"", "coding: utf-8 -*- \"\"\"This python module aims to manage `DokuWiki", "def send_request(self, connection, handler, request_body): Transport.send_request(self, connection, handler, request_body) #", "\"\"\" return (self._dokuwiki.send('wiki.getPageInfoVersion', page, version) if version is not None", "dokuwiki.DokuWiki('URL', 'User', 'Password') wiki.medias.list() \"\"\" def __init__(self, dokuwiki): self._dokuwiki =", "for header in response.msg.get_all(\"Set-Cookie\"): cookie = header.split(\";\", 1)[0] cookieKey, cookieValue", "b64decode=False): \"\"\"Returns the binary data of *media* or save it", "dict() def send_request(self, connection, handler, request_body): Transport.send_request(self, connection, handler, request_body)", "{} found = False for line in content.split('\\n'): if line.strip().startswith('----", "and the format changes between DokuWiki versions ... This function", "class _Medias(object): \"\"\"This object regroup methods for managing medias of", "def set(self, page, content, **options): \"\"\"Set/replace the *content* of *page*.", "a list of all links contained in *page*.\"\"\" return self._dokuwiki.send('wiki.listLinks',", "URL, the login and the password for connecting to the", "but others exceptions (like ``gaierror`` for invalid domain, ``ProtocolError`` for", "*media* from *_bytes*. *overwrite* parameter specify if the media must", "the available versions of *page*. *offset* can be used to", "*sum*: (str) change summary * *minor*: (bool) whether this is", "% err) \"\"\" def __init__(self, url, user, password, cookieAuth=False, **kwargs):", "an `ACL <https://www.dokuwiki.org/acl>`_ rule that restricts the page/namespace *scope* to", "ServerProxy(url, **kwargs) else: if sys.version_info[0] == 3: self.proxy = ServerProxy(url,", "connecting to the wiki. *kwargs* are `xmlrpclib`/`xmlrpc.client` **ServerProxy** parameters. The", "( params['proto'], params['host'], params['uri'] or '') except AttributeError: raise DokuWikiError(\"invalid", "== 321: return [] raise DokuWikiError(err) except ExpatError as err:", "return self._dokuwiki.send('wiki.getPageVersions', page, offset) def info(self, page, version=None): \"\"\"Returns informations", "*@group* syntax for groups) with *permission* level. It returns a", "is None: filename = media.replace('/', ':').split(':')[-1] if not os.path.exists(dirpath): os.makedirs(dirpath)", "class CookiesTransport2(Transport): \"\"\"A Python2 xmlrpclib.Transport subclass that retains cookies.\"\"\" def", "OrderedDict() else: dataentry = {} found = False for line", "returns date with a +0000 timezone. This function convert *date*", "def ignore(content): \"\"\"Remove dataentry from *content*.\"\"\" page_content = [] start", "= self._dokuwiki.send('dokuwiki.setLocks', lock=[], unlock=[page]) if result['unlockfail']: raise DokuWikiError('unable to unlock", "check given pattern * *hash*: (bool) add hashes to result", "all pages of the given *namespace*. Valid *options* are: *", "of content * *skipacl*: (bool) list everything regardless of ACL", "\"\"\"Returns the binary data of *media* or save it to", "object is accessible from the ``pages`` property of an `DokuWiki`", "changed with *filename* parameter. *overwrite* parameter allow to overwrite the", "== False: url = '%s://%s:%s@%s%s/lib/exe/xmlrpc.php' % ( params['proto'], user, password,", "remote Wiki.\"\"\" return self.send('dokuwiki.getVersion') @property def time(self): \"\"\"Property that returns", "to manage `DokuWiki <https://www.dokuwiki.org/dokuwiki>`_ wikis by using the provided `XML-RPC", "sum of content * *skipacl*: (bool) list everything regardless of", "= self._dokuwiki.send('dokuwiki.setLocks', lock=[page], unlock=[]) if result['lockfail']: raise DokuWikiError('unable to lock", "the login and the password for connecting to the wiki.", "the binary data is returned, otherwise the data is saved", "dataentry'): found = True continue elif line == '----': break", "= date.value return (datetime.strptime(date[:-5], '%Y-%m-%dT%H:%M:%S') if len(date) == 24 else", "True continue if start: page_content.append(line) return '\\n'.join(page_content) if page_content else", "an invalid wiki, ...) are not catched. .. code:: try:", "must be overwrite if it exists remotely. \"\"\" with open(filepath,", "if len(date) == 24 else datetime.strptime(date, '%Y%m%dT%H:%M:%S')) def utc2local(date): \"\"\"DokuWiki", "with *permission* level. It returns a boolean that indicate if", "from the ``pages`` property of an `DokuWiki` instance:: wiki =", "= dokuwiki.DokuWiki('URL', 'User', 'Password') wiki.pages.list() \"\"\" def __init__(self, dokuwiki): self._dokuwiki", "if b64encode else Binary(_bytes) self._dokuwiki.send('wiki.putAttachment', media, data, ow=overwrite) def delete(self,", "def __init__(self): Transport.__init__(self) self._cookies = dict() def send_headers(self, connection, headers):", "# Set \"namespaces\" for pages and medias functions. self.pages =", "the XMLRPC server.\"\"\" # Initialize XMLRPC client. try: params =", "timestamp) def search(self, string): \"\"\"Performs a fulltext search on *string*", "-*- \"\"\"This python module aims to manage `DokuWiki <https://www.dokuwiki.org/dokuwiki>`_ wikis", "if *@group* syntax is used). It returns a boolean that", "respectively the URL, the login and the password for connecting", "\"namespaces\" for pages and medias functions. self.pages = _Pages(weakref.ref(self)()) self.medias", "of *page*. Valid *options* are: * *sum*: (str) change summary", "media) def add(self, media, filepath, overwrite=True): \"\"\"Set *media* from local", "\"\"\"Property that returns the DokuWiki version of the remote Wiki.\"\"\"", "Transport from urllib import urlencode from datetime import datetime, timedelta", "os.path.exists(dirpath): os.makedirs(dirpath) filepath = os.path.join(dirpath, filename) if os.path.exists(filepath) and not", "@staticmethod def gen(name, data): \"\"\"Generate dataentry *name* from *data*.\"\"\" return", "Unix timestamp. \"\"\" return self.send('dokuwiki.getTime') @property def xmlrpc_version(self): \"\"\"Property that", "page, version=None): \"\"\"Returns the content of *page*. The content of", "correctly added. \"\"\" return self.send('plugin.acl.addAcl', scope, user, permission) def del_acl(self,", "elt in command.split('.'): method = getattr(method, elt) try: return method(*args)", "self.send('dokuwiki.getVersion') @property def time(self): \"\"\"Property that returns the current time", "* *depth*: (int) recursion level, 0 for all * *hash*:", "request_body): Transport.send_request(self, connection, handler, request_body) # set cookie below handler", "else datetime.strptime(date, '%Y%m%dT%H:%M:%S')) def utc2local(date): \"\"\"DokuWiki returns date with a", "by ``wiki.getRPCVersionSupported``. \"\"\" return self.send('dokuwiki.getXMLRPCAPIVersion') @property def xmlrpc_supported_version(self): \"\"\"Property that", "returning all changes since *2016-01-01*:: from datetime import datetime wiki.medias.changes(datetime(2016,", "return self._dokuwiki.send('wiki.deleteAttachment', media) class Dataentry(object): \"\"\"Object that manage `data entries", "This object is accessible from the ``pages`` property of an", "to connect: %s' % err) \"\"\" def __init__(self, url, user,", "wiki, ...) are not catched. .. code:: try: wiki =", "fulltext search on *string* and returns the first 15 results.", "(name, '\\n'.join( '%s:%s' % (attr, value) for attr, value in", "is accessible from the ``pages`` property of an `DokuWiki` instance::", "a boolean that indicates if the user succesfully authenticate.\"\"\" return", "filename=None, overwrite=False, b64decode=False): \"\"\"Returns the binary data of *media* or", "*options* are: * *depth*: (int) recursion level, 0 for all", "that manage `data entries <https://www.dokuwiki.org/plugin:data>`_.\"\"\" @staticmethod def get(content, keep_order=False): \"\"\"Get", "install dokuwiki Otherwise sources are in `github <https://github.com/fmenabe/python-dokuwiki>`_ \"\"\" import", "to the wiki. *kwargs* are `xmlrpclib`/`xmlrpc.client` **ServerProxy** parameters. The exception", "dates of `xmlrpclib`/`xmlrpc.client` ``DateTime`` type and the format changes between", "local file *filepath*. *overwrite* parameter specify if the media must", "as Unix timestamp. \"\"\" return self.send('dokuwiki.getTime') @property def xmlrpc_version(self): \"\"\"Property", "'----': break elif not found: continue line_split = line.split(':') key", "boolean that indicate if the rule was correctly removed. \"\"\"", "scope, user, permission): \"\"\"Add an `ACL <https://www.dokuwiki.org/acl>`_ rule that restricts", "It returns a boolean that indicates if the user succesfully", "Transport.send_request(self, connection, handler, request_body) # set cookie below handler if", "= cookie.split(\"=\", 1) self._cookies[cookieKey] = cookieValue finally: return Transport.parse_response(self, response)", "int(round(date_offset / 60 / 60)) return date + timedelta(hours=date_offset) class", "medias functions. self.pages = _Pages(weakref.ref(self)()) self.medias = _Medias(weakref.ref(self)()) def send(self,", "pages of a DokuWiki. This object is accessible from the", "DokuWikiError('no dataentry found') return dataentry @staticmethod def gen(name, data): \"\"\"Generate", "'ExpatError' exception although the change has been done. This #", "*2* with the supported RPC API version.\"\"\" return self.send('wiki.getRPCVersionSupported') @property", "DokuWiki. This object is accessible from the ``medias`` property of", "an XML-RPC *command*. *args* and *kwargs* are the arguments and", "import ExpatError if sys.version_info[0] == 3: from xmlrpc.client import ServerProxy,", "an `DokuWiki` instance:: wiki = dokuwiki.DokuWiki('URL', 'User', 'Password') wiki.medias.list() \"\"\"", "the media but it can be changed with *filename* parameter.", "of the XML response is blank which raise # the", "\"\"\"Delete *media*.\"\"\" return self._dokuwiki.send('wiki.deleteAttachment', media) class Dataentry(object): \"\"\"Object that manage", "method for executing an XML-RPC *command*. *args* and *kwargs* are", "*user* (or group if *@group* syntax is used). It returns", "params['host'], params['uri'] or '') else: url = '%s://%s%s/lib/exe/xmlrpc.php' % (", "syntax for groups) with *permission* level. It returns a boolean", "self._dokuwiki.send('wiki.putAttachment', media, Binary(fhandler.read()), ow=overwrite) def set(self, media, _bytes, overwrite=True, b64encode=False):", "= re.sub('#.*$', '', ':'.join(line_split[1:])).strip() dataentry.setdefault(key, value) if not found: raise", "``ProtocolError`` for an invalid wiki, ...) are not catched. ..", "*pattern*: (str) check given pattern * *hash*: (bool) add hashes", "the rule was correctly removed. \"\"\" return self.send('plugin.acl.delAcl', scope, user)", "if str(err) != ERR: raise DokuWikiError(err) def delete(self, page): \"\"\"Delete", "dirpath=None, filename=None, overwrite=False, b64decode=False): \"\"\"Returns the binary data of *media*", "* *depth*: (int) recursion level, 0 for all * *skipacl*:", "password!') # Set \"namespaces\" for pages and medias functions. self.pages", "rule that restricts the page/namespace *scope* to *user* (use *@group*", "def xmlrpc_supported_version(self): \"\"\"Property that returns *2* with the supported RPC", "command.split('.'): method = getattr(method, elt) try: return method(*args) except Fault", "Transport.send_headers(self, connection, headers) def parse_response(self, response): \"\"\"parse and store cookie\"\"\"", "parameters needed by the command. \"\"\" args = list(args) if", "elt) try: return method(*args) except Fault as err: if err.faultCode", "date_offset = int(round(date_offset / 60 / 60)) return date +", "list everything regardless of ACL \"\"\" return self._dokuwiki.send('dokuwiki.getPagelist', namespace, options)", "is returned if *version* is not set. \"\"\" return (self._dokuwiki.send('wiki.getPageInfoVersion',", "self._dokuwiki.send('wiki.getPageHTML', page)) def set(self, page, content, **options): \"\"\"Set/replace the *content*", "be overwrite if it exists remotely. \"\"\" data = base64.b64encode(_bytes)", "change \"\"\" return self._dokuwiki.send('dokuwiki.appendPage', page, content, options) def html(self, page,", "ExpatError as err: if str(err) != ERR: raise DokuWikiError(err) @property", "utc2local(date): \"\"\"DokuWiki returns date with a +0000 timezone. This function", "connection to a DokuWiki wiki. *url*, *user* and *password* are", "= self._dokuwiki.send('wiki.getAttachment', media) data = base64.b64decode(data) if b64decode else data.data", "key = line_split[0].strip() value = re.sub('#.*$', '', ':'.join(line_split[1:])).strip() dataentry.setdefault(key, value)", "error.\"\"\" pass class CookiesTransport(Transport): \"\"\"A Python3 xmlrpc.client.Transport subclass that retains", "False: self.proxy = ServerProxy(url, **kwargs) else: if sys.version_info[0] == 3:", "set. \"\"\" return (self._dokuwiki.send('wiki.getPageInfoVersion', page, version) if version is not", "def versions(self, page, offset=0): \"\"\"Returns the available versions of *page*.", "version is not None else self._dokuwiki.send('wiki.getPage', page)) def append(self, page,", "was correctly removed. \"\"\" return self.send('plugin.acl.delAcl', scope, user) class _Pages(object):", "of all links referencing *page*.\"\"\" return self._dokuwiki.send('wiki.getBackLinks', page) class _Medias(object):", "------------ It is on `PyPi <https://pypi.python.org/pypi/dokuwiki>`_ so you can use", "data is returned, otherwise the data is saved to a", "*args* and *kwargs* are the arguments and parameters needed by", "Binary(_bytes) self._dokuwiki.send('wiki.putAttachment', media, data, ow=overwrite) def delete(self, media): \"\"\"Delete *media*.\"\"\"", "allow to overwrite the file if it already exists locally.", "content, **options): \"\"\"Appends *content* text to *page*. Valid *options* are:", "= '%s://%s:%s@%s%s/lib/exe/xmlrpc.php' % ( params['proto'], user, password, params['host'], params['uri'] or", "print('unable to connect: %s' % err) \"\"\" def __init__(self, url,", "1e6) / 1e6 date_offset = int(round(date_offset / 60 / 60))", "manage `DokuWiki <https://www.dokuwiki.org/dokuwiki>`_ wikis by using the provided `XML-RPC API", "@property def version(self): \"\"\"Property that returns the DokuWiki version of", "24 else datetime.strptime(date, '%Y%m%dT%H:%M:%S')) def utc2local(date): \"\"\"DokuWiki returns date with", "def search(self, string): \"\"\"Performs a fulltext search on *string* and", "*version* is not set. \"\"\" return (self._dokuwiki.send('wiki.getPageInfoVersion', page, version) if", "':'.join(line_split[1:])).strip() dataentry.setdefault(key, value) if not found: raise DokuWikiError('no dataentry found')", "is an error.\"\"\" pass class CookiesTransport(Transport): \"\"\"A Python3 xmlrpc.client.Transport subclass", "File exists: '%s'\" % filepath) with open(filepath, 'wb') as fhandler:", "response): \"\"\"parse and store cookie\"\"\" try: for header in response.getheader(\"set-cookie\").split(\",", "permission): \"\"\"Add an `ACL <https://www.dokuwiki.org/acl>`_ rule that restricts the page/namespace", "\"\"\"Property that returns the title of the wiki.\"\"\" return self.send('dokuwiki.getTitle')", "methods for managing pages of a DokuWiki. This object is", "the rule was correctly added. \"\"\" return self.send('plugin.acl.addAcl', scope, user,", "line of the XML response is blank which raise #", "file. By default, the filename is the name of the", "@property def title(self): \"\"\"Property that returns the title of the", "versions(self, page, offset=0): \"\"\"Returns the available versions of *page*. *offset*", "page)) def get(self, page, version=None): \"\"\"Returns the content of *page*.", "as fhandler: self._dokuwiki.send('wiki.putAttachment', media, Binary(fhandler.read()), ow=overwrite) def set(self, media, _bytes,", "cookie = header.split(\";\", 1)[0] cookieKey, cookieValue = cookie.split(\"=\", 1) self._cookies[cookieKey]", "raise DokuWikiError('unable to lock page') def unlock(self, page): \"\"\"Unlocks *page*.\"\"\"", "break elif not found: continue line_split = line.split(':') key =", "self.send('wiki.getRPCVersionSupported') @property def title(self): \"\"\"Property that returns the title of", "by this module when there is an error.\"\"\" pass class", "Valid *options* are: * *sum*: (str) change summary * *minor*:", "dokuwiki def list(self, namespace='/', **options): \"\"\"List all pages of the", "def time(self): \"\"\"Property that returns the current time at the", "return self._dokuwiki.send('wiki.getAttachments', namespace, options) def changes(self, timestamp): \"\"\"Returns the list", "def info(self, page, version=None): \"\"\"Returns informations of *page*. Informations of", "of medias changed since given *timestamp*. For example, for returning", "datetime.utcnow()) # Python < 2.7 don't have the 'total_seconds' method", "value) if not found: raise DokuWikiError('no dataentry found') return dataentry", "0' _URL_RE = re.compile(r'(?P<proto>https?)://(?P<host>[^/]*)(?P<uri>/.*)?') def date(date): \"\"\"DokuWiki returns dates of", "and the password for connecting to the wiki. *kwargs* are", "def list(self, namespace='/', **options): \"\"\"List all pages of the given", "def utc2local(date): \"\"\"DokuWiki returns date with a +0000 timezone. This", "3: from xmlrpc.client import ServerProxy, Binary, Fault, Transport from urllib.parse", "the DokuWiki version of the remote Wiki.\"\"\" return self.send('dokuwiki.getVersion') @property", "* *minor*: (bool) whether this is a minor change \"\"\"", "lock(self, page): \"\"\"Locks *page*.\"\"\" result = self._dokuwiki.send('dokuwiki.setLocks', lock=[page], unlock=[]) if", "re import sys import base64 import weakref from xml.parsers.expat import", "*password* credentials. It returns a boolean that indicates if the", "user): \"\"\"Delete any ACL matching the given *scope* and *user*", "FileExistsError(\"[Errno 17] File exists: '%s'\" % filepath) with open(filepath, 'wb')", "for an invalid wiki, ...) are not catched. .. code::", "permission level of *page*.\"\"\" return self._dokuwiki.send('wiki.aclCheck', page) def links(self, page):", "'=' + x[1], self._cookies.items()) connection.putheader(\"Cookie\", \"; \".join(cookies)) Transport.send_headers(self, connection, headers)", "and store cookie\"\"\" try: for header in response.getheader(\"set-cookie\").split(\", \"): #", "from *data*.\"\"\" return '---- dataentry %s ----\\n%s\\n----' % (name, '\\n'.join(", "list earlier versions in the history. \"\"\" return self._dokuwiki.send('wiki.getPageVersions', page,", "the ``pip`` command to install it:: pip install dokuwiki Otherwise", "overwrite=False, b64decode=False): \"\"\"Returns the binary data of *media* or save", "line in content.split('\\n'): if line.strip().startswith('---- dataentry'): found = True continue", "is None: return data if filename is None: filename =", "else self._dokuwiki.send('wiki.getPageInfo', page)) def get(self, page, version=None): \"\"\"Returns the content", "data.data if dirpath is None: return data if filename is", "empty content.\"\"\" return self.set(page, '') def lock(self, page): \"\"\"Locks *page*.\"\"\"", "delete(self, page): \"\"\"Delete *page* by setting an empty content.\"\"\" return", "os.makedirs(dirpath) filepath = os.path.join(dirpath, filename) if os.path.exists(filepath) and not overwrite:", "result = self._dokuwiki.send('dokuwiki.setLocks', lock=[], unlock=[page]) if result['unlockfail']: raise DokuWikiError('unable to", "if line.strip().startswith('---- dataentry'): found = True continue elif line ==", "for connecting to the wiki. *kwargs* are `xmlrpclib`/`xmlrpc.client` **ServerProxy** parameters.", "[] start = False for line in content.split('\\n'): if line", "self._dokuwiki.send('dokuwiki.search', string) def versions(self, page, offset=0): \"\"\"Returns the available versions", "versions in the history. \"\"\" return self._dokuwiki.send('wiki.getPageVersions', page, offset) def", "def __init__(self, dokuwiki): self._dokuwiki = dokuwiki def list(self, namespace='/', **options):", "'User', 'Password') wiki.medias.list() \"\"\" def __init__(self, dokuwiki): self._dokuwiki = dokuwiki", "Transport.__init__(self) self._cookies = dict() def send_headers(self, connection, headers): if self._cookies:", "from xmlrpc.client import ServerProxy, Binary, Fault, Transport from urllib.parse import", "not set. \"\"\" return (self._dokuwiki.send('wiki.getPageHTMLVersion', page, version) if version is", "`DokuWiki` instance:: wiki = dokuwiki.DokuWiki('URL', 'User', 'Password') wiki.medias.list() \"\"\" def", "60 / 60)) return date + timedelta(hours=date_offset) class DokuWikiError(Exception): \"\"\"Exception", "the connection. if not self.login(user, password): raise DokuWikiError('invalid login or", "the error. if str(err) != ERR: raise DokuWikiError(err) def delete(self,", "import ServerProxy, Binary, Fault, Transport from urllib.parse import urlencode else:", "\"\"\"Returns HTML content of *page*. The HTML content of the", "cookieAuth=False, **kwargs): \"\"\"Initialize the object by connecting to the XMLRPC", "\"\"\"Returns the content of *page*. The content of the last", "base64.b64encode(_bytes) if b64encode else Binary(_bytes) self._dokuwiki.send('wiki.putAttachment', media, data, ow=overwrite) def", "found: raise DokuWikiError('no dataentry found') return dataentry @staticmethod def gen(name,", "exists locally. \"\"\" import os data = self._dokuwiki.send('wiki.getAttachment', media) data", "= list(args) if kwargs: args.append(kwargs) method = self.proxy for elt", "= base64.b64decode(data) if b64decode else data.data if dirpath is None:", "__init__(self, url, user, password, cookieAuth=False, **kwargs): \"\"\"Initialize the object by", "(datetime.strptime(date[:-5], '%Y-%m-%dT%H:%M:%S') if len(date) == 24 else datetime.strptime(date, '%Y%m%dT%H:%M:%S')) def", "name of the media but it can be changed with", "kwargs: args.append(kwargs) method = self.proxy for elt in command.split('.'): method", "*depth*: (int) recursion level, 0 for all * *skipacl*: (bool)", "\"\"\"Returns informations of *page*. Informations of the last version is", "= OrderedDict() else: dataentry = {} found = False for", "at start of entity: line 2, column 0' _URL_RE =", "on *string* and returns the first 15 results. \"\"\" return", "by using the provided `XML-RPC API <https://www.dokuwiki.org/devel:xmlrpc>`_. It is compatible", "declaration not at start of entity: line 2, column 0'", "finally: return Transport.parse_response(self, response) class CookiesTransport2(Transport): \"\"\"A Python2 xmlrpclib.Transport subclass", "the permission level of *page*.\"\"\" return self._dokuwiki.send('wiki.aclCheck', page) def links(self,", "return date + timedelta(hours=date_offset) class DokuWikiError(Exception): \"\"\"Exception raised by this", "server.\"\"\" # Initialize XMLRPC client. try: params = _URL_RE.search(url).groupdict() if", "\"\"\" return self._dokuwiki.send('wiki.getAttachments', namespace, options) def changes(self, timestamp): \"\"\"Returns the", "first line of the XML response is blank which raise", "cookie\"\"\" try: for header in response.getheader(\"set-cookie\").split(\", \"): # filter 'expire'", "DokuWikiError(err) @property def version(self): \"\"\"Property that returns the DokuWiki version", "provided `XML-RPC API <https://www.dokuwiki.org/devel:xmlrpc>`_. It is compatible with python2.7 and", "self._cookies[cookieKey] = cookieValue finally: return Transport.parse_response(self, response) class DokuWiki(object): \"\"\"Initialize", "``gaierror`` for invalid domain, ``ProtocolError`` for an invalid wiki, ...)", "(bool) skip acl checking * *pattern*: (str) check given pattern", "to a `datetime` object. \"\"\" date = date.value return (datetime.strptime(date[:-5],", "add_acl(self, scope, user, permission): \"\"\"Add an `ACL <https://www.dokuwiki.org/acl>`_ rule that", "\"\"\"Initialize the object by connecting to the XMLRPC server.\"\"\" #", "except AttributeError: raise DokuWikiError(\"invalid url '%s'\" % url) if cookieAuth", "timestamp. \"\"\" return self.send('dokuwiki.getTime') @property def xmlrpc_version(self): \"\"\"Property that returns", "to *user* (use *@group* syntax for groups) with *permission* level.", "a fulltext search on *string* and returns the first 15", "content.split('\\n'): if line.strip().startswith('---- dataentry'): found = True continue elif line", "page, content, options) def html(self, page, version=None): \"\"\"Returns HTML content", "if line == '----' and not start: start = True", "version of the remote Wiki.\"\"\" return self.send('dokuwiki.getVersion') @property def time(self):", "the remote Wiki. This is DokuWiki implementation specific and independent", "to a file. By default, the filename is the name", "`xmlrpclib`/`xmlrpc.client` ``DateTime`` type and the format changes between DokuWiki versions", "needed by the command. \"\"\" args = list(args) if kwargs:", "pass class CookiesTransport(Transport): \"\"\"A Python3 xmlrpc.client.Transport subclass that retains cookies.\"\"\"", "try: for header in response.getheader(\"set-cookie\").split(\", \"): # filter 'expire' information", "datetime import datetime wiki.medias.changes(datetime(2016, 1, 1).timestamp()) \"\"\" return self._dokuwiki.send('wiki.getRecentMediaChanges', timestamp)", "Valid *options* are: * *depth*: (int) recursion level, 0 for", "media, filepath, overwrite=True): \"\"\"Set *media* from local file *filepath*. *overwrite*", "def unlock(self, page): \"\"\"Unlocks *page*.\"\"\" result = self._dokuwiki.send('dokuwiki.setLocks', lock=[], unlock=[page])", "return (self._dokuwiki.send('wiki.getPageInfoVersion', page, version) if version is not None else", "This # allow to ignore the error. if str(err) !=", "get(self, page, version=None): \"\"\"Returns the content of *page*. The content", "level, 0 for all * *skipacl*: (bool) skip acl checking", "return self._dokuwiki.send('wiki.getAttachmentInfo', media) def add(self, media, filepath, overwrite=True): \"\"\"Set *media*", "== False: self.proxy = ServerProxy(url, **kwargs) else: if sys.version_info[0] ==", "type and the format changes between DokuWiki versions ... This", "that indicate if the rule was correctly removed. \"\"\" return", "matching the given *scope* and *user* (or group if *@group*", "def parse_response(self, response): \"\"\"parse and store cookie\"\"\" try: for header", "self.send('dokuwiki.getTitle') def login(self, user, password): \"\"\"Log to the wiki using", "an md5 sum of content * *skipacl*: (bool) list everything", "response.getheader(\"set-cookie\").split(\", \"): # filter 'expire' information if not header.startswith(\"D\"): continue", "as err: print('unable to connect: %s' % err) \"\"\" def", "*page*.\"\"\" return self._dokuwiki.send('wiki.aclCheck', page) def links(self, page): \"\"\"Returns a list", "\"\"\"This object regroup methods for managing medias of a DokuWiki.", "datetime.strptime(date, '%Y%m%dT%H:%M:%S')) def utc2local(date): \"\"\"DokuWiki returns date with a +0000", "re.compile(r'(?P<proto>https?)://(?P<host>[^/]*)(?P<uri>/.*)?') def date(date): \"\"\"DokuWiki returns dates of `xmlrpclib`/`xmlrpc.client` ``DateTime`` type", "is accessible from the ``medias`` property of an `DokuWiki` instance::", "is compatible with python2.7 and python3+. Installation ------------ It is", "\"\"\"Object that manage `data entries <https://www.dokuwiki.org/plugin:data>`_.\"\"\" @staticmethod def get(content, keep_order=False):", "version of the remote Wiki. This is DokuWiki implementation specific", "change \"\"\" try: return self._dokuwiki.send('wiki.putPage', page, content, options) except ExpatError", "parameters. The exception `DokuWikiError` is raised if the authentification fails", "recursion level, 0 for all * *hash*: (bool) do an", "else data.data if dirpath is None: return data if filename", "the filename is the name of the media but it", "OrderedDict dataentry = OrderedDict() else: dataentry = {} found =", "self._cookies.items()) connection.putheader(\"Cookie\", \"; \".join(cookies)) Transport.send_headers(self, connection, headers) def parse_response(self, response):", "is a minor change \"\"\" return self._dokuwiki.send('dokuwiki.appendPage', page, content, options)", "method = self.proxy for elt in command.split('.'): method = getattr(method,", "raise DokuWikiError('invalid login or password!') # Set \"namespaces\" for pages", "= (datetime.now() - datetime.utcnow()) # Python < 2.7 don't have", "syntax is used). It returns a boolean that indicate if", "of entity: line 2, column 0' _URL_RE = re.compile(r'(?P<proto>https?)://(?P<host>[^/]*)(?P<uri>/.*)?') def", "= int(round(date_offset / 60 / 60)) return date + timedelta(hours=date_offset)", "# set cookie below handler if self._cookies: cookies = map(lambda", "you can use the ``pip`` command to install it:: pip", "+ date_offset.days * 24 * 3600) * 1e6) / 1e6", "since *2016-01-01*:: from datetime import datetime wiki.pages.changes(datetime(2016, 1, 1).timestamp()) \"\"\"", "permission) def del_acl(self, scope, user): \"\"\"Delete any ACL matching the", "add hashes to result list \"\"\" return self._dokuwiki.send('wiki.getAttachments', namespace, options)", "save it to a file. If *dirpath* is not set", "raise DokuWikiError(\"invalid url '%s'\" % url) if cookieAuth == False:", "from the ``medias`` property of an `DokuWiki` instance:: wiki =", "def send_headers(self, connection, headers): if self._cookies: cookies = map(lambda x:", "it already exists locally. \"\"\" import os data = self._dokuwiki.send('wiki.getAttachment',", "is not None else self._dokuwiki.send('wiki.getPage', page)) def append(self, page, content,", "in the history. \"\"\" return self._dokuwiki.send('wiki.getPageVersions', page, offset) def info(self,", "self.send('dokuwiki.getXMLRPCAPIVersion') @property def xmlrpc_supported_version(self): \"\"\"Property that returns *2* with the", "medias of the given *namespace*. Valid *options* are: * *depth*:", "all medias of the given *namespace*. Valid *options* are: *", "dataentry = OrderedDict() else: dataentry = {} found = False", "raised if the authentification fails but others exceptions (like ``gaierror``", "open(filepath, 'wb') as fhandler: fhandler.write(data) def info(self, media): \"\"\"Returns informations", "that returns *2* with the supported RPC API version.\"\"\" return", "if the media must be overwrite if it exists remotely.", "self._dokuwiki.send('wiki.getRecentChanges', timestamp) def search(self, string): \"\"\"Performs a fulltext search on", "*namespace*. Valid *options* are: * *depth*: (int) recursion level, 0", "self.send('plugin.acl.addAcl', scope, user, permission) def del_acl(self, scope, user): \"\"\"Delete any", "recursion level, 0 for all * *skipacl*: (bool) skip acl", "os.path.exists(filepath) and not overwrite: raise FileExistsError(\"[Errno 17] File exists: '%s'\"", "password): raise DokuWikiError('invalid login or password!') # Set \"namespaces\" for", "a list of changes since given *timestamp*. For example, for", "def gen(name, data): \"\"\"Generate dataentry *name* from *data*.\"\"\" return '----", "*url*, *user* and *password* are respectively the URL, the login", "scope, user) class _Pages(object): \"\"\"This object regroup methods for managing", "url) if cookieAuth == False: self.proxy = ServerProxy(url, **kwargs) else:", "/ 1e6 date_offset = int(round(date_offset / 60 / 60)) return", "@staticmethod def ignore(content): \"\"\"Remove dataentry from *content*.\"\"\" page_content = []", "= dokuwiki def list(self, namespace='/', **options): \"\"\"List all pages of", "remote Wiki. This is DokuWiki implementation specific and independent of", "parameter allow to overwrite the file if it already exists", "fhandler.write(data) def info(self, media): \"\"\"Returns informations of *media*.\"\"\" return self._dokuwiki.send('wiki.getAttachmentInfo',", "XMLRPC server.\"\"\" # Initialize XMLRPC client. try: params = _URL_RE.search(url).groupdict()", "if cookieAuth == False: url = '%s://%s:%s@%s%s/lib/exe/xmlrpc.php' % ( params['proto'],", "if str(err) != ERR: raise DokuWikiError(err) @property def version(self): \"\"\"Property", "``pages`` property of an `DokuWiki` instance:: wiki = dokuwiki.DokuWiki('URL', 'User',", "object regroup methods for managing pages of a DokuWiki. This", "self._dokuwiki.send('dokuwiki.getPagelist', namespace, options) def changes(self, timestamp): \"\"\"Returns a list of", "\"\"\" try: return self._dokuwiki.send('wiki.putPage', page, content, options) except ExpatError as", "overwrite if it exists remotely. \"\"\" data = base64.b64encode(_bytes) if", "(attr, value) for attr, value in data.items())) @staticmethod def ignore(content):", "* *skipacl*: (bool) skip acl checking * *pattern*: (str) check", "be overwrite if it exists remotely. \"\"\" with open(filepath, 'rb')", "the list of medias changed since given *timestamp*. For example,", "Wiki. This is DokuWiki implementation specific and independent of the", "if sys.version_info[0] == 3: from xmlrpc.client import ServerProxy, Binary, Fault,", "\"\"\" return self._dokuwiki.send('wiki.getRecentChanges', timestamp) def search(self, string): \"\"\"Performs a fulltext", "str(err) != ERR: raise DokuWikiError(err) def delete(self, page): \"\"\"Delete *page*", "__init__(self, dokuwiki): self._dokuwiki = dokuwiki def list(self, namespace='/', **options): \"\"\"List", "'%s:%s' % (attr, value) for attr, value in data.items())) @staticmethod", "continue elif line == '----': break elif not found: continue", "*options* are: * *sum*: (str) change summary * *minor*: (bool)", "`data entries <https://www.dokuwiki.org/plugin:data>`_.\"\"\" @staticmethod def get(content, keep_order=False): \"\"\"Get dataentry from", "*page*.\"\"\" return self._dokuwiki.send('wiki.getBackLinks', page) class _Medias(object): \"\"\"This object regroup methods", "returning all changes since *2016-01-01*:: from datetime import datetime wiki.pages.changes(datetime(2016,", "returns a boolean that indicate if the rule was correctly", "implementation specific and independent of the supported standard API version", "send_request(self, connection, handler, request_body): Transport.send_request(self, connection, handler, request_body) # set", "and parameters needed by the command. \"\"\" args = list(args)", "re.sub('#.*$', '', ':'.join(line_split[1:])).strip() dataentry.setdefault(key, value) if not found: raise DokuWikiError('no", "are: * *sum*: (str) change summary * *minor*: (bool) whether", "independent of the supported standard API version returned by ``wiki.getRPCVersionSupported``.", "header.split(\";\", 1)[0] cookieKey, cookieValue = cookie.split(\"=\", 1) self._cookies[cookieKey] = cookieValue", "ExpatError as err: # Sometime the first line of the", "ignore the error. if str(err) != ERR: raise DokuWikiError(err) def", "DokuWikiError(\"invalid url '%s'\" % url) if cookieAuth == False: self.proxy", "RPC API version.\"\"\" return self.send('wiki.getRPCVersionSupported') @property def title(self): \"\"\"Property that", "\"\"\"DokuWiki returns dates of `xmlrpclib`/`xmlrpc.client` ``DateTime`` type and the format", "change has been done. This # allow to ignore the", "params['proto'], params['host'], params['uri'] or '') except AttributeError: raise DokuWikiError(\"invalid url", "if version is not None else self._dokuwiki.send('wiki.getPageHTML', page)) def set(self,", "= dict() def send_headers(self, connection, headers): if self._cookies: cookies =", "title(self): \"\"\"Property that returns the title of the wiki.\"\"\" return", "start: start = True continue if start: page_content.append(line) return '\\n'.join(page_content)", "return (self._dokuwiki.send('wiki.getPageHTMLVersion', page, version) if version is not None else", "x[0] + '=' + x[1], self._cookies.items()) connection.putheader(\"Cookie\", \"; \".join(cookies)) def", "params['uri'] or '') except AttributeError: raise DokuWikiError(\"invalid url '%s'\" %", "def __init__(self, url, user, password, cookieAuth=False, **kwargs): \"\"\"Initialize the object", "level. It returns a boolean that indicate if the rule", "`datetime` object. \"\"\" date = date.value return (datetime.strptime(date[:-5], '%Y-%m-%dT%H:%M:%S') if", "ERR = 'XML or text declaration not at start of", "if *version* is not set. \"\"\" return (self._dokuwiki.send('wiki.getPageHTMLVersion', page, version)", "if self._cookies: cookies = map(lambda x: x[0] + '=' +", "links(self, page): \"\"\"Returns a list of all links contained in", "user, permission) def del_acl(self, scope, user): \"\"\"Delete any ACL matching", "*skipacl*: (bool) skip acl checking * *pattern*: (str) check given" ]
[ "VERSION_FILE = \"test_aide/_version.py\" version_file_str = open(VERSION_FILE, \"rt\").read() VERSION_STR_RE = r\"^__version__", "GI Data Science Team\", author_email=\"<EMAIL>\", description=\"Package of helper functions to", "re.search(VERSION_STR_RE, version_file_str, re.M) if mo: version = mo.group(1) else: raise", ":: OS Independent\", \"License :: OSI Approved :: BSD License\",", "long_description=long_description, long_description_content_type=\"text/markdown\", packages=setuptools.find_packages(), install_requires=list_reqs(), python_requires=\">=3.6\", classifiers=[ \"Programming Language :: Python\",", "fh: long_description = fh.read() # get version from _version.py file,", "\"Programming Language :: Python :: 3.7\", \"Programming Language :: Python", "Language :: Python :: 3.6\", \"Programming Language :: Python ::", "mo.group(1) else: raise RuntimeError(\"Unable to find version string in %s.\"", "\"Programming Language :: Python\", \"Programming Language :: Python :: 3\",", "Independent\", \"License :: OSI Approved :: BSD License\", ], )", "3.7\", \"Programming Language :: Python :: 3.8\", \"Operating System ::", "testing\", long_description=long_description, long_description_content_type=\"text/markdown\", packages=setuptools.find_packages(), install_requires=list_reqs(), python_requires=\">=3.6\", classifiers=[ \"Programming Language ::", ":: Python\", \"Programming Language :: Python :: 3\", \"Programming Language", "fh.read() # get version from _version.py file, from below #", "Language :: Python :: 3\", \"Programming Language :: Python ::", "list_reqs(fname=\"requirements.txt\"): with open(fname) as fd: return fd.read().splitlines() setuptools.setup( name=\"test-aide\", version=version,", "\"Programming Language :: Python :: 3.6\", \"Programming Language :: Python", "= ['\\\"]([^'\\\"]*)['\\\"]\" mo = re.search(VERSION_STR_RE, version_file_str, re.M) if mo: version", "\"r\") as fh: long_description = fh.read() # get version from", "def list_reqs(fname=\"requirements.txt\"): with open(fname) as fd: return fd.read().splitlines() setuptools.setup( name=\"test-aide\",", "version_file_str = open(VERSION_FILE, \"rt\").read() VERSION_STR_RE = r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\" mo", "open(VERSION_FILE, \"rt\").read() VERSION_STR_RE = r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\" mo = re.search(VERSION_STR_RE,", "Python :: 3.8\", \"Operating System :: OS Independent\", \"License ::", "version=version, author=\"LV GI Data Science Team\", author_email=\"<EMAIL>\", description=\"Package of helper", "string in %s.\" % (VERSION_FILE,)) def list_reqs(fname=\"requirements.txt\"): with open(fname) as", "of helper functions to be used for unit testing\", long_description=long_description,", ":: 3.6\", \"Programming Language :: Python :: 3.7\", \"Programming Language", "Language :: Python :: 3.8\", \"Operating System :: OS Independent\",", "name=\"test-aide\", version=version, author=\"LV GI Data Science Team\", author_email=\"<EMAIL>\", description=\"Package of", ":: Python :: 3\", \"Programming Language :: Python :: 3.6\",", "functions to be used for unit testing\", long_description=long_description, long_description_content_type=\"text/markdown\", packages=setuptools.find_packages(),", "Python :: 3.7\", \"Programming Language :: Python :: 3.8\", \"Operating", "return fd.read().splitlines() setuptools.setup( name=\"test-aide\", version=version, author=\"LV GI Data Science Team\",", "helper functions to be used for unit testing\", long_description=long_description, long_description_content_type=\"text/markdown\",", "author_email=\"<EMAIL>\", description=\"Package of helper functions to be used for unit", "Language :: Python :: 3.7\", \"Programming Language :: Python ::", "\"Operating System :: OS Independent\", \"License :: OSI Approved ::", ":: 3\", \"Programming Language :: Python :: 3.6\", \"Programming Language", "setuptools.setup( name=\"test-aide\", version=version, author=\"LV GI Data Science Team\", author_email=\"<EMAIL>\", description=\"Package", "\"Programming Language :: Python :: 3\", \"Programming Language :: Python", ":: Python :: 3.7\", \"Programming Language :: Python :: 3.8\",", "version string in %s.\" % (VERSION_FILE,)) def list_reqs(fname=\"requirements.txt\"): with open(fname)", "fd: return fd.read().splitlines() setuptools.setup( name=\"test-aide\", version=version, author=\"LV GI Data Science", "open(fname) as fd: return fd.read().splitlines() setuptools.setup( name=\"test-aide\", version=version, author=\"LV GI", "setuptools import re with open(\"README.md\", \"r\") as fh: long_description =", "author=\"LV GI Data Science Team\", author_email=\"<EMAIL>\", description=\"Package of helper functions", "Language :: Python\", \"Programming Language :: Python :: 3\", \"Programming", "= \"test_aide/_version.py\" version_file_str = open(VERSION_FILE, \"rt\").read() VERSION_STR_RE = r\"^__version__ =", "\"rt\").read() VERSION_STR_RE = r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\" mo = re.search(VERSION_STR_RE, version_file_str,", "file, from below # https://stackoverflow.com/questions/458550/standard-way-to-embed-version-into-python-package VERSION_FILE = \"test_aide/_version.py\" version_file_str =", "Science Team\", author_email=\"<EMAIL>\", description=\"Package of helper functions to be used", "= r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\" mo = re.search(VERSION_STR_RE, version_file_str, re.M) if", "= mo.group(1) else: raise RuntimeError(\"Unable to find version string in", "%s.\" % (VERSION_FILE,)) def list_reqs(fname=\"requirements.txt\"): with open(fname) as fd: return", "Python :: 3\", \"Programming Language :: Python :: 3.6\", \"Programming", "find version string in %s.\" % (VERSION_FILE,)) def list_reqs(fname=\"requirements.txt\"): with", "% (VERSION_FILE,)) def list_reqs(fname=\"requirements.txt\"): with open(fname) as fd: return fd.read().splitlines()", ":: Python :: 3.6\", \"Programming Language :: Python :: 3.7\",", "https://stackoverflow.com/questions/458550/standard-way-to-embed-version-into-python-package VERSION_FILE = \"test_aide/_version.py\" version_file_str = open(VERSION_FILE, \"rt\").read() VERSION_STR_RE =", "version = mo.group(1) else: raise RuntimeError(\"Unable to find version string", "long_description_content_type=\"text/markdown\", packages=setuptools.find_packages(), install_requires=list_reqs(), python_requires=\">=3.6\", classifiers=[ \"Programming Language :: Python\", \"Programming", "unit testing\", long_description=long_description, long_description_content_type=\"text/markdown\", packages=setuptools.find_packages(), install_requires=list_reqs(), python_requires=\">=3.6\", classifiers=[ \"Programming Language", "Python\", \"Programming Language :: Python :: 3\", \"Programming Language ::", "get version from _version.py file, from below # https://stackoverflow.com/questions/458550/standard-way-to-embed-version-into-python-package VERSION_FILE", ":: 3.8\", \"Operating System :: OS Independent\", \"License :: OSI", "below # https://stackoverflow.com/questions/458550/standard-way-to-embed-version-into-python-package VERSION_FILE = \"test_aide/_version.py\" version_file_str = open(VERSION_FILE, \"rt\").read()", "for unit testing\", long_description=long_description, long_description_content_type=\"text/markdown\", packages=setuptools.find_packages(), install_requires=list_reqs(), python_requires=\">=3.6\", classifiers=[ \"Programming", "\"test_aide/_version.py\" version_file_str = open(VERSION_FILE, \"rt\").read() VERSION_STR_RE = r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\"", "to be used for unit testing\", long_description=long_description, long_description_content_type=\"text/markdown\", packages=setuptools.find_packages(), install_requires=list_reqs(),", "packages=setuptools.find_packages(), install_requires=list_reqs(), python_requires=\">=3.6\", classifiers=[ \"Programming Language :: Python\", \"Programming Language", "in %s.\" % (VERSION_FILE,)) def list_reqs(fname=\"requirements.txt\"): with open(fname) as fd:", "from _version.py file, from below # https://stackoverflow.com/questions/458550/standard-way-to-embed-version-into-python-package VERSION_FILE = \"test_aide/_version.py\"", "from below # https://stackoverflow.com/questions/458550/standard-way-to-embed-version-into-python-package VERSION_FILE = \"test_aide/_version.py\" version_file_str = open(VERSION_FILE,", "version_file_str, re.M) if mo: version = mo.group(1) else: raise RuntimeError(\"Unable", "raise RuntimeError(\"Unable to find version string in %s.\" % (VERSION_FILE,))", "be used for unit testing\", long_description=long_description, long_description_content_type=\"text/markdown\", packages=setuptools.find_packages(), install_requires=list_reqs(), python_requires=\">=3.6\",", "(VERSION_FILE,)) def list_reqs(fname=\"requirements.txt\"): with open(fname) as fd: return fd.read().splitlines() setuptools.setup(", "install_requires=list_reqs(), python_requires=\">=3.6\", classifiers=[ \"Programming Language :: Python\", \"Programming Language ::", "= open(VERSION_FILE, \"rt\").read() VERSION_STR_RE = r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\" mo =", "mo: version = mo.group(1) else: raise RuntimeError(\"Unable to find version", "Python :: 3.6\", \"Programming Language :: Python :: 3.7\", \"Programming", "to find version string in %s.\" % (VERSION_FILE,)) def list_reqs(fname=\"requirements.txt\"):", "used for unit testing\", long_description=long_description, long_description_content_type=\"text/markdown\", packages=setuptools.find_packages(), install_requires=list_reqs(), python_requires=\">=3.6\", classifiers=[", "# https://stackoverflow.com/questions/458550/standard-way-to-embed-version-into-python-package VERSION_FILE = \"test_aide/_version.py\" version_file_str = open(VERSION_FILE, \"rt\").read() VERSION_STR_RE", "3.8\", \"Operating System :: OS Independent\", \"License :: OSI Approved", "else: raise RuntimeError(\"Unable to find version string in %s.\" %", "['\\\"]([^'\\\"]*)['\\\"]\" mo = re.search(VERSION_STR_RE, version_file_str, re.M) if mo: version =", "with open(fname) as fd: return fd.read().splitlines() setuptools.setup( name=\"test-aide\", version=version, author=\"LV", "long_description = fh.read() # get version from _version.py file, from", "as fh: long_description = fh.read() # get version from _version.py", "re.M) if mo: version = mo.group(1) else: raise RuntimeError(\"Unable to", "= re.search(VERSION_STR_RE, version_file_str, re.M) if mo: version = mo.group(1) else:", "import setuptools import re with open(\"README.md\", \"r\") as fh: long_description", "RuntimeError(\"Unable to find version string in %s.\" % (VERSION_FILE,)) def", "# get version from _version.py file, from below # https://stackoverflow.com/questions/458550/standard-way-to-embed-version-into-python-package", "fd.read().splitlines() setuptools.setup( name=\"test-aide\", version=version, author=\"LV GI Data Science Team\", author_email=\"<EMAIL>\",", "3.6\", \"Programming Language :: Python :: 3.7\", \"Programming Language ::", "3\", \"Programming Language :: Python :: 3.6\", \"Programming Language ::", "_version.py file, from below # https://stackoverflow.com/questions/458550/standard-way-to-embed-version-into-python-package VERSION_FILE = \"test_aide/_version.py\" version_file_str", "VERSION_STR_RE = r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\" mo = re.search(VERSION_STR_RE, version_file_str, re.M)", "classifiers=[ \"Programming Language :: Python\", \"Programming Language :: Python ::", "\"Programming Language :: Python :: 3.8\", \"Operating System :: OS", "System :: OS Independent\", \"License :: OSI Approved :: BSD", "as fd: return fd.read().splitlines() setuptools.setup( name=\"test-aide\", version=version, author=\"LV GI Data", "OS Independent\", \"License :: OSI Approved :: BSD License\", ],", "if mo: version = mo.group(1) else: raise RuntimeError(\"Unable to find", "version from _version.py file, from below # https://stackoverflow.com/questions/458550/standard-way-to-embed-version-into-python-package VERSION_FILE =", "r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\" mo = re.search(VERSION_STR_RE, version_file_str, re.M) if mo:", "Data Science Team\", author_email=\"<EMAIL>\", description=\"Package of helper functions to be", ":: 3.7\", \"Programming Language :: Python :: 3.8\", \"Operating System", "mo = re.search(VERSION_STR_RE, version_file_str, re.M) if mo: version = mo.group(1)", "description=\"Package of helper functions to be used for unit testing\",", "with open(\"README.md\", \"r\") as fh: long_description = fh.read() # get", "python_requires=\">=3.6\", classifiers=[ \"Programming Language :: Python\", \"Programming Language :: Python", "Team\", author_email=\"<EMAIL>\", description=\"Package of helper functions to be used for", "= fh.read() # get version from _version.py file, from below", "re with open(\"README.md\", \"r\") as fh: long_description = fh.read() #", "import re with open(\"README.md\", \"r\") as fh: long_description = fh.read()", "open(\"README.md\", \"r\") as fh: long_description = fh.read() # get version", ":: Python :: 3.8\", \"Operating System :: OS Independent\", \"License" ]
[ "= [(12, 12), (15, 35)] for d in dimlist: matshow(samplemat(d))", "def samplemat(dims): \"\"\"Make a matrix with all zeros and increasing", "Display a random matrix with a specified figure number and", "a random matrix with a specified figure number and a", "matrices of different sizes dimlist = [(12, 12), (15, 35)]", "aa = zeros(dims) for i in range(min(dims)): aa[i, i] =", "elements on the diagonal\"\"\" aa = zeros(dims) for i in", "random matrix with a specified figure number and a grayscale", "zeros(dims) for i in range(min(dims)): aa[i, i] = i return", "aa[i, i] = i return aa # Display 2 matrices", "a matrix with all zeros and increasing elements on the", "= zeros(dims) for i in range(min(dims)): aa[i, i] = i", "\"\"\"Simple matshow() example.\"\"\" from matplotlib.pylab import * def samplemat(dims): \"\"\"Make", "import * def samplemat(dims): \"\"\"Make a matrix with all zeros", "increasing elements on the diagonal\"\"\" aa = zeros(dims) for i", "Display 2 matrices of different sizes dimlist = [(12, 12),", "on the diagonal\"\"\" aa = zeros(dims) for i in range(min(dims)):", "for d in dimlist: matshow(samplemat(d)) # Display a random matrix", "* def samplemat(dims): \"\"\"Make a matrix with all zeros and", "dimlist: matshow(samplemat(d)) # Display a random matrix with a specified", "i in range(min(dims)): aa[i, i] = i return aa #", "different sizes dimlist = [(12, 12), (15, 35)] for d", "matrix with all zeros and increasing elements on the diagonal\"\"\"", "samplemat(dims): \"\"\"Make a matrix with all zeros and increasing elements", "2 matrices of different sizes dimlist = [(12, 12), (15,", "number and a grayscale # colormap matshow(rand(64, 64), fignum=100, cmap=cm.gray)", "matshow() example.\"\"\" from matplotlib.pylab import * def samplemat(dims): \"\"\"Make a", "aa # Display 2 matrices of different sizes dimlist =", "dimlist = [(12, 12), (15, 35)] for d in dimlist:", "(15, 35)] for d in dimlist: matshow(samplemat(d)) # Display a", "# Display a random matrix with a specified figure number", "diagonal\"\"\" aa = zeros(dims) for i in range(min(dims)): aa[i, i]", "35)] for d in dimlist: matshow(samplemat(d)) # Display a random", "the diagonal\"\"\" aa = zeros(dims) for i in range(min(dims)): aa[i,", "specified figure number and a grayscale # colormap matshow(rand(64, 64),", "return aa # Display 2 matrices of different sizes dimlist", "sizes dimlist = [(12, 12), (15, 35)] for d in", "range(min(dims)): aa[i, i] = i return aa # Display 2", "\"\"\"Make a matrix with all zeros and increasing elements on", "figure number and a grayscale # colormap matshow(rand(64, 64), fignum=100,", "12), (15, 35)] for d in dimlist: matshow(samplemat(d)) # Display", "of different sizes dimlist = [(12, 12), (15, 35)] for", "matshow(samplemat(d)) # Display a random matrix with a specified figure", "all zeros and increasing elements on the diagonal\"\"\" aa =", "zeros and increasing elements on the diagonal\"\"\" aa = zeros(dims)", "= i return aa # Display 2 matrices of different", "in range(min(dims)): aa[i, i] = i return aa # Display", "a specified figure number and a grayscale # colormap matshow(rand(64,", "i] = i return aa # Display 2 matrices of", "from matplotlib.pylab import * def samplemat(dims): \"\"\"Make a matrix with", "[(12, 12), (15, 35)] for d in dimlist: matshow(samplemat(d)) #", "with all zeros and increasing elements on the diagonal\"\"\" aa", "in dimlist: matshow(samplemat(d)) # Display a random matrix with a", "with a specified figure number and a grayscale # colormap", "and increasing elements on the diagonal\"\"\" aa = zeros(dims) for", "matrix with a specified figure number and a grayscale #", "for i in range(min(dims)): aa[i, i] = i return aa", "# Display 2 matrices of different sizes dimlist = [(12,", "and a grayscale # colormap matshow(rand(64, 64), fignum=100, cmap=cm.gray) show()", "example.\"\"\" from matplotlib.pylab import * def samplemat(dims): \"\"\"Make a matrix", "i return aa # Display 2 matrices of different sizes", "d in dimlist: matshow(samplemat(d)) # Display a random matrix with", "matplotlib.pylab import * def samplemat(dims): \"\"\"Make a matrix with all" ]
[ "os.path.join(\"periodic\", \"start-high-low-low.stan\"), os.path.join(\"untranslated\", \"gaussian-naive-bayes.stan\"), os.path.join(\"translated\", \"periodic-gaussian-mixture.stan\") ] stan_pickle_files = [", "['flexbar'] shell_utils.check_programs_exist(programs, raise_on_error=False, package_name='flexbar', logger=logger) programs = ['STAR'] shell_utils.check_programs_exist(programs, raise_on_error=False,", "== 'True': _post_install(force_recompile) setup( cmdclass={ 'install': SetupInstall, 'develop': SetupDevelop }", "self.force_recompile # 0 or 1 level = logging.getLevelName(\"INFO\") logging.basicConfig(level=level, format='%(levelname)-8s", "python3 import importlib import logging import os import subprocess from", "# skip if RTD if not os.environ.get('READTHEDOCS') == 'True': _post_install(force_recompile)", "s) for s in stan_pickle_files] # Compile and pickle the", "install.finalize_options(self) def run(self): force_recompile = self.force_recompile # 0 or 1", "spf = [os.path.join(models_base, s) for s in stan_pickle_files] # Compile", "spf): if os.path.exists(pickle): msg = \"A model already exists at:", "['samtools'] shell_utils.check_programs_exist(programs, raise_on_error=False, package_name='SAMtools', logger=logger) class SetupInstall(install): user_options = install.user_options", "recompile the Stan models'), ] def initialize_options(self): develop.initialize_options(self) self.force_recompile =", "= \"A model already exists at: {}. Skipping.\".format(pickle) logging.warning(msg) continue", "zip(smf, spf): _pickle_it(stan, pickle) else: # default for stan, pickle", "'bowtie2-build-s'] shell_utils.check_programs_exist(programs, raise_on_error=False, package_name='bowtie2', logger=logger) programs = ['samtools'] shell_utils.check_programs_exist(programs, raise_on_error=False,", "level = logging.getLevelName(\"INFO\") logging.basicConfig(level=level, format='%(levelname)-8s : %(message)s') develop.run(self) # skip", "1 level = logging.getLevelName(\"INFO\") logging.basicConfig(level=level, format='%(levelname)-8s : %(message)s') install.run(self) #", "force_recompile: for stan, pickle in zip(smf, spf): _pickle_it(stan, pickle) else:", "model already exists at: {}. Skipping.\".format(pickle) logging.warning(msg) continue _pickle_it(stan, pickle)", "not os.environ.get('READTHEDOCS') == 'True': _post_install(force_recompile) class SetupDevelop(develop): user_options = develop.user_options", "'True': _post_install(force_recompile) class SetupDevelop(develop): user_options = develop.user_options + [ ('force-recompile',", "skip if RTD if not os.environ.get('READTHEDOCS') == 'True': _post_install(force_recompile) setup(", "import develop as develop logger = logging.getLogger(__name__) stan_model_files = [", "[ os.path.join(\"nonperiodic\", \"no-periodicity.pkl\"), os.path.join(\"nonperiodic\", \"start-high-high-low.pkl\"), os.path.join(\"nonperiodic\", \"start-high-low-high.pkl\"), os.path.join(\"periodic\", \"start-high-low-low.pkl\"), os.path.join(\"untranslated\",", "\"start-high-low-high.pkl\"), os.path.join(\"periodic\", \"start-high-low-low.pkl\"), os.path.join(\"untranslated\", \"gaussian-naive-bayes.pkl\"), os.path.join(\"translated\", \"periodic-gaussian-mixture.pkl\") ] def _pickle_it(stan,", "shell_utils smf = [os.path.join(\"rpbp_models\", s) for s in stan_model_files] models_base", "\"no-periodicity.stan\"), os.path.join(\"nonperiodic\", \"start-high-high-low.stan\"), os.path.join(\"nonperiodic\", \"start-high-low-high.stan\"), os.path.join(\"periodic\", \"start-high-low-low.stan\"), os.path.join(\"untranslated\", \"gaussian-naive-bayes.stan\"), os.path.join(\"translated\",", "def _post_install(force_recompile): import site importlib.reload(site) import pbio.ribo.ribo_filenames as filenames import", "models_base = filenames.get_default_models_base() spf = [os.path.join(models_base, s) for s in", "finalize_options(self): install.finalize_options(self) def run(self): force_recompile = self.force_recompile # 0 or", "shlex.quote(pickle)) logging.info(cmd) subprocess.call(cmd, shell=True) def _post_install(force_recompile): import site importlib.reload(site) import", "as filenames import pbio.misc.shell_utils as shell_utils smf = [os.path.join(\"rpbp_models\", s)", "%(message)s') install.run(self) # skip if RTD if not os.environ.get('READTHEDOCS') ==", "finalize_options(self): develop.finalize_options(self) def run(self): force_recompile = self.force_recompile # 0 or", "SetupDevelop(develop): user_options = develop.user_options + [ ('force-recompile', None, 'Set this", "programs = ['bowtie2', 'bowtie2-build-s'] shell_utils.check_programs_exist(programs, raise_on_error=False, package_name='bowtie2', logger=logger) programs =", "os import subprocess from setuptools import setup from setuptools.command.install import", "\"periodic-gaussian-mixture.pkl\") ] def _pickle_it(stan, pickle): import shlex dirname = os.path.dirname(pickle)", "the prerequisite programs programs = ['flexbar'] shell_utils.check_programs_exist(programs, raise_on_error=False, package_name='flexbar', logger=logger)", "continue _pickle_it(stan, pickle) # Check for the prerequisite programs programs", "_post_install(force_recompile) class SetupDevelop(develop): user_options = develop.user_options + [ ('force-recompile', None,", "= logging.getLevelName(\"INFO\") logging.basicConfig(level=level, format='%(levelname)-8s : %(message)s') develop.run(self) # skip if", "logging.getLogger(__name__) stan_model_files = [ os.path.join(\"nonperiodic\", \"no-periodicity.stan\"), os.path.join(\"nonperiodic\", \"start-high-high-low.stan\"), os.path.join(\"nonperiodic\", \"start-high-low-high.stan\"),", "'True': _post_install(force_recompile) setup( cmdclass={ 'install': SetupInstall, 'develop': SetupDevelop } )", "stan_model_files = [ os.path.join(\"nonperiodic\", \"no-periodicity.stan\"), os.path.join(\"nonperiodic\", \"start-high-high-low.stan\"), os.path.join(\"nonperiodic\", \"start-high-low-high.stan\"), os.path.join(\"periodic\",", "for stan, pickle in zip(smf, spf): _pickle_it(stan, pickle) else: #", "programs = ['flexbar'] shell_utils.check_programs_exist(programs, raise_on_error=False, package_name='flexbar', logger=logger) programs = ['STAR']", "\"start-high-low-high.stan\"), os.path.join(\"periodic\", \"start-high-low-low.stan\"), os.path.join(\"untranslated\", \"gaussian-naive-bayes.stan\"), os.path.join(\"translated\", \"periodic-gaussian-mixture.stan\") ] stan_pickle_files =", "logger=logger) class SetupInstall(install): user_options = install.user_options + [ ('force-recompile', None,", "initialize_options(self): develop.initialize_options(self) self.force_recompile = None def finalize_options(self): develop.finalize_options(self) def run(self):", "import os import subprocess from setuptools import setup from setuptools.command.install", "import importlib import logging import os import subprocess from setuptools", "{} {}\".format(shlex.quote(stan), shlex.quote(pickle)) logging.info(cmd) subprocess.call(cmd, shell=True) def _post_install(force_recompile): import site", "logging.getLevelName(\"INFO\") logging.basicConfig(level=level, format='%(levelname)-8s : %(message)s') install.run(self) # skip if RTD", "develop.run(self) # skip if RTD if not os.environ.get('READTHEDOCS') == 'True':", "= develop.user_options + [ ('force-recompile', None, 'Set this flag to", "os.environ.get('READTHEDOCS') == 'True': _post_install(force_recompile) setup( cmdclass={ 'install': SetupInstall, 'develop': SetupDevelop", "import site importlib.reload(site) import pbio.ribo.ribo_filenames as filenames import pbio.misc.shell_utils as", "= [os.path.join(\"rpbp_models\", s) for s in stan_model_files] models_base = filenames.get_default_models_base()", "filenames.get_default_models_base() spf = [os.path.join(models_base, s) for s in stan_pickle_files] #", "programs programs = ['flexbar'] shell_utils.check_programs_exist(programs, raise_on_error=False, package_name='flexbar', logger=logger) programs =", "= logging.getLevelName(\"INFO\") logging.basicConfig(level=level, format='%(levelname)-8s : %(message)s') install.run(self) # skip if", "Check for the prerequisite programs programs = ['flexbar'] shell_utils.check_programs_exist(programs, raise_on_error=False,", "importlib.reload(site) import pbio.ribo.ribo_filenames as filenames import pbio.misc.shell_utils as shell_utils smf", "as shell_utils smf = [os.path.join(\"rpbp_models\", s) for s in stan_model_files]", "_pickle_it(stan, pickle): import shlex dirname = os.path.dirname(pickle) if not os.path.exists(dirname):", "stan_pickle_files] # Compile and pickle the Stan models if force_recompile:", "pickle the Stan models if force_recompile: for stan, pickle in", "Skipping.\".format(pickle) logging.warning(msg) continue _pickle_it(stan, pickle) # Check for the prerequisite", "for stan, pickle in zip(smf, spf): if os.path.exists(pickle): msg =", "= ['flexbar'] shell_utils.check_programs_exist(programs, raise_on_error=False, package_name='flexbar', logger=logger) programs = ['STAR'] shell_utils.check_programs_exist(programs,", "package_name='STAR', logger=logger) programs = ['bowtie2', 'bowtie2-build-s'] shell_utils.check_programs_exist(programs, raise_on_error=False, package_name='bowtie2', logger=logger)", ": %(message)s') install.run(self) # skip if RTD if not os.environ.get('READTHEDOCS')", "def finalize_options(self): develop.finalize_options(self) def run(self): force_recompile = self.force_recompile # 0", "= [ os.path.join(\"nonperiodic\", \"no-periodicity.pkl\"), os.path.join(\"nonperiodic\", \"start-high-high-low.pkl\"), os.path.join(\"nonperiodic\", \"start-high-low-high.pkl\"), os.path.join(\"periodic\", \"start-high-low-low.pkl\"),", "import pbio.ribo.ribo_filenames as filenames import pbio.misc.shell_utils as shell_utils smf =", "from setuptools.command.develop import develop as develop logger = logging.getLogger(__name__) stan_model_files", "pickle in zip(smf, spf): if os.path.exists(pickle): msg = \"A model", "run(self): force_recompile = self.force_recompile # 0 or 1 level =", "os.path.exists(dirname): os.makedirs(dirname) cmd = \"pickle-stan {} {}\".format(shlex.quote(stan), shlex.quote(pickle)) logging.info(cmd) subprocess.call(cmd,", "import shlex dirname = os.path.dirname(pickle) if not os.path.exists(dirname): os.makedirs(dirname) cmd", "pbio.ribo.ribo_filenames as filenames import pbio.misc.shell_utils as shell_utils smf = [os.path.join(\"rpbp_models\",", "to recompile the Stan models'), ] def initialize_options(self): install.initialize_options(self) self.force_recompile", "0 or 1 level = logging.getLevelName(\"INFO\") logging.basicConfig(level=level, format='%(levelname)-8s : %(message)s')", "= ['samtools'] shell_utils.check_programs_exist(programs, raise_on_error=False, package_name='SAMtools', logger=logger) class SetupInstall(install): user_options =", "if RTD if not os.environ.get('READTHEDOCS') == 'True': _post_install(force_recompile) class SetupDevelop(develop):", "\"periodic-gaussian-mixture.stan\") ] stan_pickle_files = [ os.path.join(\"nonperiodic\", \"no-periodicity.pkl\"), os.path.join(\"nonperiodic\", \"start-high-high-low.pkl\"), os.path.join(\"nonperiodic\",", "import setup from setuptools.command.install import install as install from setuptools.command.develop", "package_name='SAMtools', logger=logger) class SetupInstall(install): user_options = install.user_options + [ ('force-recompile',", "smf = [os.path.join(\"rpbp_models\", s) for s in stan_model_files] models_base =", "+ [ ('force-recompile', None, 'Set this flag to recompile the", "in zip(smf, spf): _pickle_it(stan, pickle) else: # default for stan,", "[os.path.join(\"rpbp_models\", s) for s in stan_model_files] models_base = filenames.get_default_models_base() spf", "stan_pickle_files = [ os.path.join(\"nonperiodic\", \"no-periodicity.pkl\"), os.path.join(\"nonperiodic\", \"start-high-high-low.pkl\"), os.path.join(\"nonperiodic\", \"start-high-low-high.pkl\"), os.path.join(\"periodic\",", "raise_on_error=False, package_name='STAR', logger=logger) programs = ['bowtie2', 'bowtie2-build-s'] shell_utils.check_programs_exist(programs, raise_on_error=False, package_name='bowtie2',", "initialize_options(self): install.initialize_options(self) self.force_recompile = None def finalize_options(self): install.finalize_options(self) def run(self):", "develop as develop logger = logging.getLogger(__name__) stan_model_files = [ os.path.join(\"nonperiodic\",", "stan, pickle in zip(smf, spf): _pickle_it(stan, pickle) else: # default", "['bowtie2', 'bowtie2-build-s'] shell_utils.check_programs_exist(programs, raise_on_error=False, package_name='bowtie2', logger=logger) programs = ['samtools'] shell_utils.check_programs_exist(programs,", "level = logging.getLevelName(\"INFO\") logging.basicConfig(level=level, format='%(levelname)-8s : %(message)s') install.run(self) # skip", "RTD if not os.environ.get('READTHEDOCS') == 'True': _post_install(force_recompile) setup( cmdclass={ 'install':", "os.path.join(\"nonperiodic\", \"start-high-low-high.stan\"), os.path.join(\"periodic\", \"start-high-low-low.stan\"), os.path.join(\"untranslated\", \"gaussian-naive-bayes.stan\"), os.path.join(\"translated\", \"periodic-gaussian-mixture.stan\") ] stan_pickle_files", "] stan_pickle_files = [ os.path.join(\"nonperiodic\", \"no-periodicity.pkl\"), os.path.join(\"nonperiodic\", \"start-high-high-low.pkl\"), os.path.join(\"nonperiodic\", \"start-high-low-high.pkl\"),", "pickle) else: # default for stan, pickle in zip(smf, spf):", "'Set this flag to recompile the Stan models'), ] def", "models'), ] def initialize_options(self): develop.initialize_options(self) self.force_recompile = None def finalize_options(self):", "os.environ.get('READTHEDOCS') == 'True': _post_install(force_recompile) class SetupDevelop(develop): user_options = develop.user_options +", "s) for s in stan_model_files] models_base = filenames.get_default_models_base() spf =", "import pbio.misc.shell_utils as shell_utils smf = [os.path.join(\"rpbp_models\", s) for s", "os.path.join(\"nonperiodic\", \"no-periodicity.pkl\"), os.path.join(\"nonperiodic\", \"start-high-high-low.pkl\"), os.path.join(\"nonperiodic\", \"start-high-low-high.pkl\"), os.path.join(\"periodic\", \"start-high-low-low.pkl\"), os.path.join(\"untranslated\", \"gaussian-naive-bayes.pkl\"),", "shell_utils.check_programs_exist(programs, raise_on_error=False, package_name='bowtie2', logger=logger) programs = ['samtools'] shell_utils.check_programs_exist(programs, raise_on_error=False, package_name='SAMtools',", "= [ os.path.join(\"nonperiodic\", \"no-periodicity.stan\"), os.path.join(\"nonperiodic\", \"start-high-high-low.stan\"), os.path.join(\"nonperiodic\", \"start-high-low-high.stan\"), os.path.join(\"periodic\", \"start-high-low-low.stan\"),", "raise_on_error=False, package_name='flexbar', logger=logger) programs = ['STAR'] shell_utils.check_programs_exist(programs, raise_on_error=False, package_name='STAR', logger=logger)", "def _pickle_it(stan, pickle): import shlex dirname = os.path.dirname(pickle) if not", "] def _pickle_it(stan, pickle): import shlex dirname = os.path.dirname(pickle) if", "logger=logger) programs = ['samtools'] shell_utils.check_programs_exist(programs, raise_on_error=False, package_name='SAMtools', logger=logger) class SetupInstall(install):", "{}. Skipping.\".format(pickle) logging.warning(msg) continue _pickle_it(stan, pickle) # Check for the", "class SetupInstall(install): user_options = install.user_options + [ ('force-recompile', None, 'Set", "logging import os import subprocess from setuptools import setup from", "if os.path.exists(pickle): msg = \"A model already exists at: {}.", "self.force_recompile = None def finalize_options(self): develop.finalize_options(self) def run(self): force_recompile =", "in stan_model_files] models_base = filenames.get_default_models_base() spf = [os.path.join(models_base, s) for", "Stan models'), ] def initialize_options(self): develop.initialize_options(self) self.force_recompile = None def", "to recompile the Stan models'), ] def initialize_options(self): develop.initialize_options(self) self.force_recompile", "SetupInstall(install): user_options = install.user_options + [ ('force-recompile', None, 'Set this", "programs = ['STAR'] shell_utils.check_programs_exist(programs, raise_on_error=False, package_name='STAR', logger=logger) programs = ['bowtie2',", "programs = ['samtools'] shell_utils.check_programs_exist(programs, raise_on_error=False, package_name='SAMtools', logger=logger) class SetupInstall(install): user_options", "\"start-high-high-low.stan\"), os.path.join(\"nonperiodic\", \"start-high-low-high.stan\"), os.path.join(\"periodic\", \"start-high-low-low.stan\"), os.path.join(\"untranslated\", \"gaussian-naive-bayes.stan\"), os.path.join(\"translated\", \"periodic-gaussian-mixture.stan\") ]", "zip(smf, spf): if os.path.exists(pickle): msg = \"A model already exists", "setup from setuptools.command.install import install as install from setuptools.command.develop import", "import logging import os import subprocess from setuptools import setup", "site importlib.reload(site) import pbio.ribo.ribo_filenames as filenames import pbio.misc.shell_utils as shell_utils", "pickle in zip(smf, spf): _pickle_it(stan, pickle) else: # default for", "pickle) # Check for the prerequisite programs programs = ['flexbar']", "def run(self): force_recompile = self.force_recompile # 0 or 1 level", "= logging.getLogger(__name__) stan_model_files = [ os.path.join(\"nonperiodic\", \"no-periodicity.stan\"), os.path.join(\"nonperiodic\", \"start-high-high-low.stan\"), os.path.join(\"nonperiodic\",", "setuptools import setup from setuptools.command.install import install as install from", "install.initialize_options(self) self.force_recompile = None def finalize_options(self): install.finalize_options(self) def run(self): force_recompile", "cmd = \"pickle-stan {} {}\".format(shlex.quote(stan), shlex.quote(pickle)) logging.info(cmd) subprocess.call(cmd, shell=True) def", "not os.path.exists(dirname): os.makedirs(dirname) cmd = \"pickle-stan {} {}\".format(shlex.quote(stan), shlex.quote(pickle)) logging.info(cmd)", "force_recompile = self.force_recompile # 0 or 1 level = logging.getLevelName(\"INFO\")", "for s in stan_model_files] models_base = filenames.get_default_models_base() spf = [os.path.join(models_base,", "Stan models if force_recompile: for stan, pickle in zip(smf, spf):", "#! /usr/bin/env python3 import importlib import logging import os import", "install from setuptools.command.develop import develop as develop logger = logging.getLogger(__name__)", "s in stan_pickle_files] # Compile and pickle the Stan models", "exists at: {}. Skipping.\".format(pickle) logging.warning(msg) continue _pickle_it(stan, pickle) # Check", "# 0 or 1 level = logging.getLevelName(\"INFO\") logging.basicConfig(level=level, format='%(levelname)-8s :", "if not os.environ.get('READTHEDOCS') == 'True': _post_install(force_recompile) setup( cmdclass={ 'install': SetupInstall,", "\"no-periodicity.pkl\"), os.path.join(\"nonperiodic\", \"start-high-high-low.pkl\"), os.path.join(\"nonperiodic\", \"start-high-low-high.pkl\"), os.path.join(\"periodic\", \"start-high-low-low.pkl\"), os.path.join(\"untranslated\", \"gaussian-naive-bayes.pkl\"), os.path.join(\"translated\",", "shell_utils.check_programs_exist(programs, raise_on_error=False, package_name='STAR', logger=logger) programs = ['bowtie2', 'bowtie2-build-s'] shell_utils.check_programs_exist(programs, raise_on_error=False,", "= self.force_recompile # 0 or 1 level = logging.getLevelName(\"INFO\") logging.basicConfig(level=level,", "%(message)s') develop.run(self) # skip if RTD if not os.environ.get('READTHEDOCS') ==", "for the prerequisite programs programs = ['flexbar'] shell_utils.check_programs_exist(programs, raise_on_error=False, package_name='flexbar',", "if RTD if not os.environ.get('READTHEDOCS') == 'True': _post_install(force_recompile) setup( cmdclass={", "develop.finalize_options(self) def run(self): force_recompile = self.force_recompile # 0 or 1", "subprocess.call(cmd, shell=True) def _post_install(force_recompile): import site importlib.reload(site) import pbio.ribo.ribo_filenames as", "raise_on_error=False, package_name='SAMtools', logger=logger) class SetupInstall(install): user_options = install.user_options + [", "the Stan models'), ] def initialize_options(self): develop.initialize_options(self) self.force_recompile = None", "\"pickle-stan {} {}\".format(shlex.quote(stan), shlex.quote(pickle)) logging.info(cmd) subprocess.call(cmd, shell=True) def _post_install(force_recompile): import", "filenames import pbio.misc.shell_utils as shell_utils smf = [os.path.join(\"rpbp_models\", s) for", "logging.getLevelName(\"INFO\") logging.basicConfig(level=level, format='%(levelname)-8s : %(message)s') develop.run(self) # skip if RTD", "setuptools.command.install import install as install from setuptools.command.develop import develop as", "from setuptools.command.install import install as install from setuptools.command.develop import develop", "= os.path.dirname(pickle) if not os.path.exists(dirname): os.makedirs(dirname) cmd = \"pickle-stan {}", "install as install from setuptools.command.develop import develop as develop logger", "shell=True) def _post_install(force_recompile): import site importlib.reload(site) import pbio.ribo.ribo_filenames as filenames", "if not os.environ.get('READTHEDOCS') == 'True': _post_install(force_recompile) class SetupDevelop(develop): user_options =", "] def initialize_options(self): develop.initialize_options(self) self.force_recompile = None def finalize_options(self): develop.finalize_options(self)", "logging.info(cmd) subprocess.call(cmd, shell=True) def _post_install(force_recompile): import site importlib.reload(site) import pbio.ribo.ribo_filenames", "prerequisite programs programs = ['flexbar'] shell_utils.check_programs_exist(programs, raise_on_error=False, package_name='flexbar', logger=logger) programs", "{}\".format(shlex.quote(stan), shlex.quote(pickle)) logging.info(cmd) subprocess.call(cmd, shell=True) def _post_install(force_recompile): import site importlib.reload(site)", "logging.basicConfig(level=level, format='%(levelname)-8s : %(message)s') install.run(self) # skip if RTD if", "None def finalize_options(self): install.finalize_options(self) def run(self): force_recompile = self.force_recompile #", "models'), ] def initialize_options(self): install.initialize_options(self) self.force_recompile = None def finalize_options(self):", "self.force_recompile = None def finalize_options(self): install.finalize_options(self) def run(self): force_recompile =", "at: {}. Skipping.\".format(pickle) logging.warning(msg) continue _pickle_it(stan, pickle) # Check for", "import install as install from setuptools.command.develop import develop as develop", "_pickle_it(stan, pickle) # Check for the prerequisite programs programs =", "os.path.dirname(pickle) if not os.path.exists(dirname): os.makedirs(dirname) cmd = \"pickle-stan {} {}\".format(shlex.quote(stan),", "os.path.join(\"periodic\", \"start-high-low-low.pkl\"), os.path.join(\"untranslated\", \"gaussian-naive-bayes.pkl\"), os.path.join(\"translated\", \"periodic-gaussian-mixture.pkl\") ] def _pickle_it(stan, pickle):", "\"start-high-low-low.pkl\"), os.path.join(\"untranslated\", \"gaussian-naive-bayes.pkl\"), os.path.join(\"translated\", \"periodic-gaussian-mixture.pkl\") ] def _pickle_it(stan, pickle): import", "import subprocess from setuptools import setup from setuptools.command.install import install", "as install from setuptools.command.develop import develop as develop logger =", "_pickle_it(stan, pickle) else: # default for stan, pickle in zip(smf,", "develop.user_options + [ ('force-recompile', None, 'Set this flag to recompile", "= install.user_options + [ ('force-recompile', None, 'Set this flag to", "os.path.join(\"untranslated\", \"gaussian-naive-bayes.stan\"), os.path.join(\"translated\", \"periodic-gaussian-mixture.stan\") ] stan_pickle_files = [ os.path.join(\"nonperiodic\", \"no-periodicity.pkl\"),", "# Check for the prerequisite programs programs = ['flexbar'] shell_utils.check_programs_exist(programs,", "importlib import logging import os import subprocess from setuptools import", ": %(message)s') develop.run(self) # skip if RTD if not os.environ.get('READTHEDOCS')", "def finalize_options(self): install.finalize_options(self) def run(self): force_recompile = self.force_recompile # 0", "in zip(smf, spf): if os.path.exists(pickle): msg = \"A model already", "def initialize_options(self): develop.initialize_options(self) self.force_recompile = None def finalize_options(self): develop.finalize_options(self) def", "= [os.path.join(models_base, s) for s in stan_pickle_files] # Compile and", "\"gaussian-naive-bayes.stan\"), os.path.join(\"translated\", \"periodic-gaussian-mixture.stan\") ] stan_pickle_files = [ os.path.join(\"nonperiodic\", \"no-periodicity.pkl\"), os.path.join(\"nonperiodic\",", "not os.environ.get('READTHEDOCS') == 'True': _post_install(force_recompile) setup( cmdclass={ 'install': SetupInstall, 'develop':", "_post_install(force_recompile): import site importlib.reload(site) import pbio.ribo.ribo_filenames as filenames import pbio.misc.shell_utils", "1 level = logging.getLevelName(\"INFO\") logging.basicConfig(level=level, format='%(levelname)-8s : %(message)s') develop.run(self) #", "= filenames.get_default_models_base() spf = [os.path.join(models_base, s) for s in stan_pickle_files]", "package_name='flexbar', logger=logger) programs = ['STAR'] shell_utils.check_programs_exist(programs, raise_on_error=False, package_name='STAR', logger=logger) programs", "os.path.join(\"translated\", \"periodic-gaussian-mixture.pkl\") ] def _pickle_it(stan, pickle): import shlex dirname =", "os.path.join(\"untranslated\", \"gaussian-naive-bayes.pkl\"), os.path.join(\"translated\", \"periodic-gaussian-mixture.pkl\") ] def _pickle_it(stan, pickle): import shlex", "os.path.exists(pickle): msg = \"A model already exists at: {}. Skipping.\".format(pickle)", "os.path.join(\"nonperiodic\", \"start-high-high-low.stan\"), os.path.join(\"nonperiodic\", \"start-high-low-high.stan\"), os.path.join(\"periodic\", \"start-high-low-low.stan\"), os.path.join(\"untranslated\", \"gaussian-naive-bayes.stan\"), os.path.join(\"translated\", \"periodic-gaussian-mixture.stan\")", "pickle): import shlex dirname = os.path.dirname(pickle) if not os.path.exists(dirname): os.makedirs(dirname)", "or 1 level = logging.getLevelName(\"INFO\") logging.basicConfig(level=level, format='%(levelname)-8s : %(message)s') develop.run(self)", "logging.basicConfig(level=level, format='%(levelname)-8s : %(message)s') develop.run(self) # skip if RTD if", "\"gaussian-naive-bayes.pkl\"), os.path.join(\"translated\", \"periodic-gaussian-mixture.pkl\") ] def _pickle_it(stan, pickle): import shlex dirname", "shlex dirname = os.path.dirname(pickle) if not os.path.exists(dirname): os.makedirs(dirname) cmd =", "if not os.path.exists(dirname): os.makedirs(dirname) cmd = \"pickle-stan {} {}\".format(shlex.quote(stan), shlex.quote(pickle))", "\"A model already exists at: {}. Skipping.\".format(pickle) logging.warning(msg) continue _pickle_it(stan,", "and pickle the Stan models if force_recompile: for stan, pickle", "logger=logger) programs = ['bowtie2', 'bowtie2-build-s'] shell_utils.check_programs_exist(programs, raise_on_error=False, package_name='bowtie2', logger=logger) programs", "spf): _pickle_it(stan, pickle) else: # default for stan, pickle in", "def initialize_options(self): install.initialize_options(self) self.force_recompile = None def finalize_options(self): install.finalize_options(self) def", "stan_model_files] models_base = filenames.get_default_models_base() spf = [os.path.join(models_base, s) for s", "raise_on_error=False, package_name='bowtie2', logger=logger) programs = ['samtools'] shell_utils.check_programs_exist(programs, raise_on_error=False, package_name='SAMtools', logger=logger)", "develop.initialize_options(self) self.force_recompile = None def finalize_options(self): develop.finalize_options(self) def run(self): force_recompile", "from setuptools import setup from setuptools.command.install import install as install", "# default for stan, pickle in zip(smf, spf): if os.path.exists(pickle):", "format='%(levelname)-8s : %(message)s') develop.run(self) # skip if RTD if not", "flag to recompile the Stan models'), ] def initialize_options(self): develop.initialize_options(self)", "s in stan_model_files] models_base = filenames.get_default_models_base() spf = [os.path.join(models_base, s)", "dirname = os.path.dirname(pickle) if not os.path.exists(dirname): os.makedirs(dirname) cmd = \"pickle-stan", "# Compile and pickle the Stan models if force_recompile: for", "Compile and pickle the Stan models if force_recompile: for stan,", "None, 'Set this flag to recompile the Stan models'), ]", "('force-recompile', None, 'Set this flag to recompile the Stan models'),", "os.makedirs(dirname) cmd = \"pickle-stan {} {}\".format(shlex.quote(stan), shlex.quote(pickle)) logging.info(cmd) subprocess.call(cmd, shell=True)", "= \"pickle-stan {} {}\".format(shlex.quote(stan), shlex.quote(pickle)) logging.info(cmd) subprocess.call(cmd, shell=True) def _post_install(force_recompile):", "msg = \"A model already exists at: {}. Skipping.\".format(pickle) logging.warning(msg)", "for s in stan_pickle_files] # Compile and pickle the Stan", "in stan_pickle_files] # Compile and pickle the Stan models if", "subprocess from setuptools import setup from setuptools.command.install import install as", "= ['bowtie2', 'bowtie2-build-s'] shell_utils.check_programs_exist(programs, raise_on_error=False, package_name='bowtie2', logger=logger) programs = ['samtools']", "or 1 level = logging.getLevelName(\"INFO\") logging.basicConfig(level=level, format='%(levelname)-8s : %(message)s') install.run(self)", "os.path.join(\"translated\", \"periodic-gaussian-mixture.stan\") ] stan_pickle_files = [ os.path.join(\"nonperiodic\", \"no-periodicity.pkl\"), os.path.join(\"nonperiodic\", \"start-high-high-low.pkl\"),", "= None def finalize_options(self): install.finalize_options(self) def run(self): force_recompile = self.force_recompile", "[ os.path.join(\"nonperiodic\", \"no-periodicity.stan\"), os.path.join(\"nonperiodic\", \"start-high-high-low.stan\"), os.path.join(\"nonperiodic\", \"start-high-low-high.stan\"), os.path.join(\"periodic\", \"start-high-low-low.stan\"), os.path.join(\"untranslated\",", "[os.path.join(models_base, s) for s in stan_pickle_files] # Compile and pickle", "user_options = install.user_options + [ ('force-recompile', None, 'Set this flag", "\"start-high-high-low.pkl\"), os.path.join(\"nonperiodic\", \"start-high-low-high.pkl\"), os.path.join(\"periodic\", \"start-high-low-low.pkl\"), os.path.join(\"untranslated\", \"gaussian-naive-bayes.pkl\"), os.path.join(\"translated\", \"periodic-gaussian-mixture.pkl\") ]", "stan, pickle in zip(smf, spf): if os.path.exists(pickle): msg = \"A", "else: # default for stan, pickle in zip(smf, spf): if", "install.run(self) # skip if RTD if not os.environ.get('READTHEDOCS') == 'True':", "/usr/bin/env python3 import importlib import logging import os import subprocess", "develop logger = logging.getLogger(__name__) stan_model_files = [ os.path.join(\"nonperiodic\", \"no-periodicity.stan\"), os.path.join(\"nonperiodic\",", "as develop logger = logging.getLogger(__name__) stan_model_files = [ os.path.join(\"nonperiodic\", \"no-periodicity.stan\"),", "['STAR'] shell_utils.check_programs_exist(programs, raise_on_error=False, package_name='STAR', logger=logger) programs = ['bowtie2', 'bowtie2-build-s'] shell_utils.check_programs_exist(programs,", "the Stan models'), ] def initialize_options(self): install.initialize_options(self) self.force_recompile = None", "Stan models'), ] def initialize_options(self): install.initialize_options(self) self.force_recompile = None def", "install.user_options + [ ('force-recompile', None, 'Set this flag to recompile", "recompile the Stan models'), ] def initialize_options(self): install.initialize_options(self) self.force_recompile =", "class SetupDevelop(develop): user_options = develop.user_options + [ ('force-recompile', None, 'Set", "shell_utils.check_programs_exist(programs, raise_on_error=False, package_name='flexbar', logger=logger) programs = ['STAR'] shell_utils.check_programs_exist(programs, raise_on_error=False, package_name='STAR',", "logging.warning(msg) continue _pickle_it(stan, pickle) # Check for the prerequisite programs", "package_name='bowtie2', logger=logger) programs = ['samtools'] shell_utils.check_programs_exist(programs, raise_on_error=False, package_name='SAMtools', logger=logger) class", "pbio.misc.shell_utils as shell_utils smf = [os.path.join(\"rpbp_models\", s) for s in", "shell_utils.check_programs_exist(programs, raise_on_error=False, package_name='SAMtools', logger=logger) class SetupInstall(install): user_options = install.user_options +", "[ ('force-recompile', None, 'Set this flag to recompile the Stan", "= None def finalize_options(self): develop.finalize_options(self) def run(self): force_recompile = self.force_recompile", "logger = logging.getLogger(__name__) stan_model_files = [ os.path.join(\"nonperiodic\", \"no-periodicity.stan\"), os.path.join(\"nonperiodic\", \"start-high-high-low.stan\"),", "= ['STAR'] shell_utils.check_programs_exist(programs, raise_on_error=False, package_name='STAR', logger=logger) programs = ['bowtie2', 'bowtie2-build-s']", "None def finalize_options(self): develop.finalize_options(self) def run(self): force_recompile = self.force_recompile #", "models if force_recompile: for stan, pickle in zip(smf, spf): _pickle_it(stan,", "os.path.join(\"nonperiodic\", \"no-periodicity.stan\"), os.path.join(\"nonperiodic\", \"start-high-high-low.stan\"), os.path.join(\"nonperiodic\", \"start-high-low-high.stan\"), os.path.join(\"periodic\", \"start-high-low-low.stan\"), os.path.join(\"untranslated\", \"gaussian-naive-bayes.stan\"),", "os.path.join(\"nonperiodic\", \"start-high-low-high.pkl\"), os.path.join(\"periodic\", \"start-high-low-low.pkl\"), os.path.join(\"untranslated\", \"gaussian-naive-bayes.pkl\"), os.path.join(\"translated\", \"periodic-gaussian-mixture.pkl\") ] def", "== 'True': _post_install(force_recompile) class SetupDevelop(develop): user_options = develop.user_options + [", "skip if RTD if not os.environ.get('READTHEDOCS') == 'True': _post_install(force_recompile) class", "already exists at: {}. Skipping.\".format(pickle) logging.warning(msg) continue _pickle_it(stan, pickle) #", "\"start-high-low-low.stan\"), os.path.join(\"untranslated\", \"gaussian-naive-bayes.stan\"), os.path.join(\"translated\", \"periodic-gaussian-mixture.stan\") ] stan_pickle_files = [ os.path.join(\"nonperiodic\",", "if force_recompile: for stan, pickle in zip(smf, spf): _pickle_it(stan, pickle)", "the Stan models if force_recompile: for stan, pickle in zip(smf,", "setuptools.command.develop import develop as develop logger = logging.getLogger(__name__) stan_model_files =", "default for stan, pickle in zip(smf, spf): if os.path.exists(pickle): msg", "logger=logger) programs = ['STAR'] shell_utils.check_programs_exist(programs, raise_on_error=False, package_name='STAR', logger=logger) programs =", "] def initialize_options(self): install.initialize_options(self) self.force_recompile = None def finalize_options(self): install.finalize_options(self)", "os.path.join(\"nonperiodic\", \"start-high-high-low.pkl\"), os.path.join(\"nonperiodic\", \"start-high-low-high.pkl\"), os.path.join(\"periodic\", \"start-high-low-low.pkl\"), os.path.join(\"untranslated\", \"gaussian-naive-bayes.pkl\"), os.path.join(\"translated\", \"periodic-gaussian-mixture.pkl\")", "format='%(levelname)-8s : %(message)s') install.run(self) # skip if RTD if not", "RTD if not os.environ.get('READTHEDOCS') == 'True': _post_install(force_recompile) class SetupDevelop(develop): user_options", "flag to recompile the Stan models'), ] def initialize_options(self): install.initialize_options(self)", "user_options = develop.user_options + [ ('force-recompile', None, 'Set this flag", "this flag to recompile the Stan models'), ] def initialize_options(self):" ]
[ "for vol in volumes_to_use] elif dir_struct == \"part_FS\": file_paths =", "which contains the data files # :param label_dir: Directory which", "get_imdb_dataset(data_params): data_train = h5py.File(os.path.join(data_params['data_dir'], data_params['train_data_file']), 'r') label_train = h5py.File(os.path.join(data_params['data_dir'], data_params['train_label_file']),", "vol in volumes_to_use] elif dir_struct == \"part_FS\": file_paths = [", "the project :param data_dir: Directory which contains the data files", "labelmap_nifty.get_fdata() volume = (volume - np.min(volume)) / (np.max(volume) - np.min(volume))", "transform_train = transforms.Compose([ # transforms.RandomCrop(200, padding=56), # transforms.ToTensor(), # ])", "def __getitem__(self, index): img = torch.from_numpy(self.X[index]) label = torch.from_numpy(self.y[index]) weight", "as preprocessor # transform_train = transforms.Compose([ # transforms.RandomCrop(200, padding=56), #", "else: volumes_to_use = [name for name in os.listdir(data_dir)] if data_id", "a 2 element tuple, 0th being data and 1st being", "If the id_list is in FreeSurfer style or normal :return:", "data_id == \"ADNI\": file_paths = [ [os.path.join(data_dir, vol, 'orig.mgz'), os.path.join(label_dir,", "list of file paths as string \"\"\" if volumes_txt_file: with", "return_weights=False): if reduce_slices: volume, labelmap = preprocessor.reduce_slices(volume, labelmap) if remap_config:", "(np.max(volume) - np.min(volume)) volume, labelmap = preprocessor.rotate_orientation(volume, labelmap, orientation) return", "Directory which contains the data files :param volumes_txt_file: Path to", "the a csv file, when provided only these data points", "volume, labelmap, header = load_data(file_path, orientation) volume, labelmap, class_weights, weights", "vol + '_1.mgz'), os.path.join(label_dir, vol + '/' + vol +", "+ '_1_seg.mgz')] for vol in volumes_to_use] elif data_id == \"IBSR\":", "these data points will be read # :return: list of", "style or normal :return: list of file paths as string", "data_test = h5py.File(os.path.join(data_params['data_dir'], data_params['test_data_file']), 'r') label_test = h5py.File(os.path.join(data_params['data_dir'], data_params['test_label_file']), 'r')", "nibabel as nb import numpy as np import torch import", "tuple, 0th being data and 1st being label. It should", "= np.round(volume) if orientation == \"COR\": volume = volume.transpose((2, 0,", ":param data_dir: Directory which contains the data files :param label_dir:", "to suit the need of the project # :param data_dir:", "volumes_to_use] elif data_id == \"ADNI\": file_paths = [ [os.path.join(data_dir, vol,", "# file_paths = [ # [os.path.join(data_dir, vol, 'mri/orig.mgz'), os.path.join(label_dir, vol,", "label_train['label'][()], class_weight_train['class_weights'][()]), ImdbData(data_test['data'][()], label_test['label'][()], class_weight_test['class_weights'][()])) def load_dataset(file_paths, orientation, remap_config, return_weights=False,", "in volumes_to_use] elif dir_struct == \"part_FS\": file_paths = [ [os.path.join(data_dir,", "else: raise ValueError(\"Invalid entry, valid options are FS and Linear\")", "'r') class_weight_test = h5py.File(os.path.join(data_params['data_dir'], data_params['test_class_weights_file']), 'r') weight_test = h5py.File(os.path.join(data_params['data_dir'], data_params['test_weights_file']),", "data_params['train_class_weights_file']), 'r') weight_train = h5py.File(os.path.join(data_params['data_dir'], data_params['train_weights_file']), 'r') data_test = h5py.File(os.path.join(data_params['data_dir'],", "== \"AXI\": volume = volume.transpose((1, 2, 0)) return volume, header", "the project # :param data_dir: Directory which contains the data", "= volume_nifty.header volume = volume_nifty.get_fdata() if notlabel: volume = (volume", "as string # \"\"\" # # volume_exclude_list = ['IXI290', 'IXI423']", "file_paths = [ [os.path.join(data_dir, vol, 'mri/orig.mgz'), os.path.join(label_dir, vol + '_map.nii.gz')]", "[], [], [], [], [] for file_path in file_paths: volume,", "if remove_black: volume, labelmap = preprocessor.remove_black(volume, labelmap) if return_weights: class_weights,", "vol.replace('.nii', '_seg.nii'))] for vol in volumes_to_use] else: raise ValueError(\"Invalid entry,", "data_params['train_label_file']), 'r') class_weight_train = h5py.File(os.path.join(data_params['data_dir'], data_params['train_class_weights_file']), 'r') weight_train = h5py.File(os.path.join(data_params['data_dir'],", "the file paths combined as a list where each element", "[name for name in os.listdir(data_dir)] if data_id == \"MALC\": file_paths", "in FreeSurfer style or normal :return: list of file paths", "suit the need of the project # :param data_dir: Directory", "returns the file paths combined as a list where each", "list of file paths as string # \"\"\" # #", "data_train = h5py.File(os.path.join(data_params['data_dir'], data_params['train_data_file']), 'r') label_train = h5py.File(os.path.join(data_params['data_dir'], data_params['train_label_file']), 'r')", "contains the data files # :param label_dir: Directory which contains", "data points will be read # :return: list of file", "label_dir, data_id, volumes_txt_file=None): \"\"\" This function returns the file paths", "vol + '/' + vol + '_1.mgz'), os.path.join(label_dir, vol +", "as file_handle: volumes_to_use = file_handle.read().splitlines() else: volumes_to_use = [name for", "== \"CANDI\": file_paths = [ [os.path.join(data_dir, vol + '/' +", "weight def __len__(self): return len(self.y) def get_imdb_dataset(data_params): data_train = h5py.File(os.path.join(data_params['data_dir'],", "as data from torchvision import transforms import utils.preprocessor as preprocessor", "reading :param volumes_txt_file: (Optional) Path to the a csv file,", "[ [os.path.join(data_dir, vol, 'mri/orig.mgz')] for vol in volumes_to_use] elif dir_struct", "open(volumes_txt_file) as file_handle: volumes_to_use = file_handle.read().splitlines() if dir_struct == \"FS\":", "def __init__(self, X, y, w, transforms=None): self.X = X if", "label_test['label'][()], class_weight_test['class_weights'][()])) def load_dataset(file_paths, orientation, remap_config, return_weights=False, reduce_slices=False, remove_black=False): print(\"Loading", "remove_black: volume, labelmap = preprocessor.remove_black(volume, labelmap) if return_weights: class_weights, weights", "data_dir: Directory which contains the data files # :param label_dir:", "files :param label_dir: Directory which contains the label files :param", "torch.from_numpy(self.X[index]) label = torch.from_numpy(self.y[index]) weight = torch.from_numpy(self.w[index]) return img, label,", "class_weight_test['class_weights'][()])) def load_dataset(file_paths, orientation, remap_config, return_weights=False, reduce_slices=False, remove_black=False): print(\"Loading and", "(volume - np.min(volume)) / (np.max(volume) - np.min(volume)) else: volume =", "print(\"100%\", flush=True) if return_weights: return volume_list, labelmap_list, class_weights_list, weights_list, headers", "file_handle: volumes_to_use = file_handle.read().splitlines() else: volumes_to_use = [name for name", "else: return volume_list, labelmap_list, headers def load_and_preprocess(file_path, orientation, remap_config, reduce_slices=False,", "which contains the label files # :param volumes_txt_file: (Optional) Path", "in volumes_to_use] elif data_id == \"BORIS\": #BORIS file_paths = [", "options are MALC, ADNI, CANDI and IBSR\") return file_paths def", "data_dir: Directory which contains the data files :param volumes_txt_file: Path", "volume_exclude_list = ['IXI290', 'IXI423'] # if volumes_txt_file: # with open(volumes_txt_file)", "ADNI, CANDI and IBSR\") return file_paths def load_file_paths_eval(data_dir, volumes_txt_file, dir_struct):", "\"Linear\": file_paths = [ [os.path.join(data_dir, vol)] for vol in volumes_to_use]", "print(\"Loading and preprocessing data...\") volume_list, labelmap_list, headers, class_weights_list, weights_list =", "return img, label, weight def __len__(self): return len(self.y) def get_imdb_dataset(data_params):", "torch.from_numpy(self.y[index]) weight = torch.from_numpy(self.w[index]) return img, label, weight def __len__(self):", "self.y = y self.w = w self.transforms = transforms def", "volume = volume_nifty.get_fdata() if notlabel: volume = (volume - np.min(volume))", "h5py.File(os.path.join(data_params['data_dir'], data_params['test_class_weights_file']), 'r') weight_test = h5py.File(os.path.join(data_params['data_dir'], data_params['test_weights_file']), 'r') return (ImdbData(data_train['data'][()],", "else X[:, np.newaxis, :, :] self.y = y self.w =", "as a list where each element is a 2 element", "= volume.transpose((2, 0, 1)) elif orientation == \"AXI\": volume =", "import nibabel as nb import numpy as np import torch", "__getitem__(self, index): img = torch.from_numpy(self.X[index]) label = torch.from_numpy(self.y[index]) weight =", "labelmap) if return_weights: class_weights, weights = preprocessor.estimate_weights_mfb(labelmap) return volume, labelmap,", "'_1.mgz'), os.path.join(label_dir, vol + '/' + vol + '_1_seg.mgz')] for", "when provided only these data points will be read :return:", "open(volumes_txt_file) as file_handle: # volumes_to_use = file_handle.read().splitlines() # else: #", "these data points will be read :param dir_struct: If the", "= torch.from_numpy(self.w[index]) return img, label, weight def __len__(self): return len(self.y)", "elif data_id == \"CANDI\": file_paths = [ [os.path.join(data_dir, vol +", "volume_exclude_list] # # file_paths = [ # [os.path.join(data_dir, vol, 'mri/orig.mgz'),", "return volume, labelmap, class_weights, weights else: return volume, labelmap, None,", "# transform_train = transforms.Compose([ # transforms.RandomCrop(200, padding=56), # transforms.ToTensor(), #", "2 element tuple, 0th being data and 1st being label.", "# else: # volumes_to_use = [name for name in os.listdir(data_dir)", "vol in volumes_to_use] elif data_id == \"BORIS\": #BORIS file_paths =", "for vol in volumes_to_use] elif data_id == \"ADNI\": file_paths =", "volumes_txt_file=None): \"\"\" This function returns the file paths combined as", "contains the label files :param data_id: A flag indicates the", "= h5py.File(os.path.join(data_params['data_dir'], data_params['test_data_file']), 'r') label_test = h5py.File(os.path.join(data_params['data_dir'], data_params['test_label_file']), 'r') class_weight_test", "labelmap, None, None # def load_file_paths(data_dir, label_dir, volumes_txt_file=None): # \"\"\"", "paths as string \"\"\" if volumes_txt_file: with open(volumes_txt_file) as file_handle:", "weights, header def load_and_preprocess_eval(file_path, orientation, notlabel=True): volume_nifty = nb.load(file_path[0]) header", "# volumes_to_use = [name for name in os.listdir(data_dir) if #", "'orig.mgz'), os.path.join(label_dir, vol, 'Lab_con.mgz')] for vol in volumes_to_use] elif data_id", "volumes_to_use] else: raise ValueError(\"Invalid entry, valid options are FS and", "labelmap_list, headers, class_weights_list, weights_list = [], [], [], [], []", "[], [], [], [] for file_path in file_paths: volume, labelmap,", "weights = preprocess(volume, labelmap, remap_config=remap_config, reduce_slices=reduce_slices, remove_black=remove_black, return_weights=return_weights) return volume,", "torch.from_numpy(self.w[index]) return img, label, weight def __len__(self): return len(self.y) def", "volume_list.append(volume) labelmap_list.append(labelmap) if return_weights: class_weights_list.append(class_weights) weights_list.append(weights) headers.append(header) print(\"#\", end='', flush=True)", "preprocessor # transform_train = transforms.Compose([ # transforms.RandomCrop(200, padding=56), # transforms.ToTensor(),", "volumes_to_use] elif data_id == \"IBSR\": file_paths = [ [os.path.join(data_dir, vol,", "project # :param data_dir: Directory which contains the data files", "remap_config=remap_config, reduce_slices=reduce_slices, remove_black=remove_black, return_weights=return_weights) volume_list.append(volume) labelmap_list.append(labelmap) if return_weights: class_weights_list.append(class_weights) weights_list.append(weights)", "\"COR\": volume = volume.transpose((2, 0, 1)) elif orientation == \"AXI\":", "points will be read :return: list of file paths as", "data_id == \"BORIS\": #BORIS file_paths = [ [os.path.join(data_dir, vol), os.path.join(label_dir,", "= transforms def __getitem__(self, index): img = torch.from_numpy(self.X[index]) label =", "np.newaxis, :, :] self.y = y self.w = w self.transforms", "for name in os.listdir(data_dir)] if data_id == \"MALC\": file_paths =", "return_weights=return_weights) volume_list.append(volume) labelmap_list.append(labelmap) if return_weights: class_weights_list.append(class_weights) weights_list.append(weights) headers.append(header) print(\"#\", end='',", "volumes_txt_file=None): # \"\"\" # This function returns the file paths", "]) class ImdbData(data.Dataset): def __init__(self, X, y, w, transforms=None): self.X", "data_id: A flag indicates the name of Dataset for proper", "volumes_txt_file: # with open(volumes_txt_file) as file_handle: # volumes_to_use = file_handle.read().splitlines()", "vol + '_glm.mgz')] for vol in volumes_to_use] elif data_id ==", "data and 1st being label. It should be modified to", ":, :] self.y = y self.w = w self.transforms =", "volume_nifty.get_fdata() if notlabel: volume = (volume - np.min(volume)) / (np.max(volume)", "dir_struct == \"FS\": file_paths = [ [os.path.join(data_dir, vol, 'mri/orig.mgz')] for", "element tuple, 0th being data and 1st being label. It", "volumes_txt_file: Path to the a csv file, when provided only", "# This function returns the file paths combined as a", "'_map.nii.gz')] for vol in volumes_to_use] elif data_id == \"BORIS\": #BORIS", "vol, 'mri/orig.mgz'), os.path.join(label_dir, vol + '_glm.mgz')] for vol in volumes_to_use]", "def load_data(file_path, orientation): volume_nifty, labelmap_nifty = nb.load(file_path[0]), nb.load(file_path[1]) volume, labelmap", "load_dataset(file_paths, orientation, remap_config, return_weights=False, reduce_slices=False, remove_black=False): print(\"Loading and preprocessing data...\")", "\"\"\" # This function returns the file paths combined as", "h5py.File(os.path.join(data_params['data_dir'], data_params['train_weights_file']), 'r') data_test = h5py.File(os.path.join(data_params['data_dir'], data_params['test_data_file']), 'r') label_test =", "contains the label files # :param volumes_txt_file: (Optional) Path to", "the need of the project :param data_dir: Directory which contains", "labelmap, class_weights, weights else: return volume, labelmap, None, None #", "\"BORIS\": #BORIS file_paths = [ [os.path.join(data_dir, vol), os.path.join(label_dir, vol.replace('.nii', '_seg.nii'))]", "in volumes_to_use] # return file_paths def load_file_paths(data_dir, label_dir, data_id, volumes_txt_file=None):", "or normal :return: list of file paths as string \"\"\"", "'r') data_test = h5py.File(os.path.join(data_params['data_dir'], data_params['test_data_file']), 'r') label_test = h5py.File(os.path.join(data_params['data_dir'], data_params['test_label_file']),", "header def load_and_preprocess_eval(file_path, orientation, notlabel=True): volume_nifty = nb.load(file_path[0]) header =", "where each element is a 2 element tuple, 0th being", "data_dir: Directory which contains the data files :param label_dir: Directory", "import torch.utils.data as data from torchvision import transforms import utils.preprocessor", "'r') label_test = h5py.File(os.path.join(data_params['data_dir'], data_params['test_label_file']), 'r') class_weight_test = h5py.File(os.path.join(data_params['data_dir'], data_params['test_class_weights_file']),", "import os import h5py import nibabel as nb import numpy", "padding=56), # transforms.ToTensor(), # ]) class ImdbData(data.Dataset): def __init__(self, X,", "vol, 'mri/orig.mgz'), os.path.join(label_dir, vol, 'mri/aseg.auto_noCCseg.mgz')] # for # vol in", "[os.path.join(data_dir, vol, 'mri/orig.mgz'), os.path.join(label_dir, vol + '_glm.mgz')] for vol in", "= [ [os.path.join(data_dir, vol + '/' + vol + '_1.mgz'),", "when provided only these data points will be read #", "\"\"\" if volumes_txt_file: with open(volumes_txt_file) as file_handle: volumes_to_use = file_handle.read().splitlines()", "remove_black=False, return_weights=False): volume, labelmap, header = load_data(file_path, orientation) volume, labelmap,", "def load_file_paths(data_dir, label_dir, volumes_txt_file=None): # \"\"\" # This function returns", "file_paths = [ [os.path.join(data_dir, vol)] for vol in volumes_to_use] elif", "label files # :param volumes_txt_file: (Optional) Path to the a", "\"IBSR\": file_paths = [ [os.path.join(data_dir, vol, 'mri/orig.mgz'), os.path.join(label_dir, vol +", "flush=True) if return_weights: return volume_list, labelmap_list, class_weights_list, weights_list, headers else:", "vol, 'Lab_con.mgz')] for vol in volumes_to_use] elif data_id == \"CANDI\":", "# :return: list of file paths as string # \"\"\"", "notlabel: volume = (volume - np.min(volume)) / (np.max(volume) - np.min(volume))", "volume_nifty.get_fdata(), labelmap_nifty.get_fdata() volume = (volume - np.min(volume)) / (np.max(volume) -", "the name of Dataset for proper file reading :param volumes_txt_file:", "vol), os.path.join(label_dir, vol.replace('.nii', '_seg.nii'))] for vol in volumes_to_use] else: raise", "headers def load_and_preprocess(file_path, orientation, remap_config, reduce_slices=False, remove_black=False, return_weights=False): volume, labelmap,", "for vol in volumes_to_use] elif dir_struct == \"Linear\": file_paths =", "ValueError(\"Invalid entry, valid options are MALC, ADNI, CANDI and IBSR\")", "= torch.from_numpy(self.y[index]) weight = torch.from_numpy(self.w[index]) return img, label, weight def", "volume, labelmap, class_weights, weights, header def load_and_preprocess_eval(file_path, orientation, notlabel=True): volume_nifty", "w self.transforms = transforms def __getitem__(self, index): img = torch.from_numpy(self.X[index])", "# It should be modified to suit the need of", "be read :param dir_struct: If the id_list is in FreeSurfer", "contains the data files :param label_dir: Directory which contains the", "0)) return volume, header def load_data(file_path, orientation): volume_nifty, labelmap_nifty =", "[os.path.join(data_dir, vol, 'mri/orig.mgz'), os.path.join(label_dir, vol + '_map.nii.gz')] for vol in", "= h5py.File(os.path.join(data_params['data_dir'], data_params['train_label_file']), 'r') class_weight_train = h5py.File(os.path.join(data_params['data_dir'], data_params['train_class_weights_file']), 'r') weight_train", "labelmap, remap_config, reduce_slices=False, remove_black=False, return_weights=False): if reduce_slices: volume, labelmap =", "file reading :param volumes_txt_file: (Optional) Path to the a csv", "def load_and_preprocess_eval(file_path, orientation, notlabel=True): volume_nifty = nb.load(file_path[0]) header = volume_nifty.header", "with open(volumes_txt_file) as file_handle: volumes_to_use = file_handle.read().splitlines() if dir_struct ==", "np import torch import torch.utils.data as data from torchvision import", "= load_and_preprocess(file_path, orientation, remap_config=remap_config, reduce_slices=reduce_slices, remove_black=remove_black, return_weights=return_weights) volume_list.append(volume) labelmap_list.append(labelmap) if", "= h5py.File(os.path.join(data_params['data_dir'], data_params['test_label_file']), 'r') class_weight_test = h5py.File(os.path.join(data_params['data_dir'], data_params['test_class_weights_file']), 'r') weight_test", "file paths combined as a list where each element is", "file_paths = [ [os.path.join(data_dir, vol, 'orig.mgz')] for vol in volumes_to_use]", "= load_data(file_path, orientation) volume, labelmap, class_weights, weights = preprocess(volume, labelmap,", "each element is a 2 element tuple, 0th being data", "name in os.listdir(data_dir) if # name.startswith('IXI') and name not in", "nb.load(file_path[0]), nb.load(file_path[1]) volume, labelmap = volume_nifty.get_fdata(), labelmap_nifty.get_fdata() volume = (volume", "h5py.File(os.path.join(data_params['data_dir'], data_params['train_label_file']), 'r') class_weight_train = h5py.File(os.path.join(data_params['data_dir'], data_params['train_class_weights_file']), 'r') weight_train =", "# volumes_to_use = file_handle.read().splitlines() # else: # volumes_to_use = [name", "if data_id == \"MALC\": file_paths = [ [os.path.join(data_dir, vol, 'mri/orig.mgz'),", ":param volumes_txt_file: (Optional) Path to the a csv file, when", "print(\"#\", end='', flush=True) print(\"100%\", flush=True) if return_weights: return volume_list, labelmap_list,", "[os.path.join(data_dir, vol, 'mri/orig.mgz'), os.path.join(label_dir, vol, 'mri/aseg.auto_noCCseg.mgz')] # for # vol", "flag indicates the name of Dataset for proper file reading", "return file_paths def load_file_paths(data_dir, label_dir, data_id, volumes_txt_file=None): \"\"\" This function", "+ '_glm.mgz')] for vol in volumes_to_use] elif data_id == \"ADNI\":", "/ (np.max(volume) - np.min(volume)) else: volume = np.round(volume) if orientation", "def get_imdb_dataset(data_params): data_train = h5py.File(os.path.join(data_params['data_dir'], data_params['train_data_file']), 'r') label_train = h5py.File(os.path.join(data_params['data_dir'],", "and preprocessing data...\") volume_list, labelmap_list, headers, class_weights_list, weights_list = [],", "[ [os.path.join(data_dir, vol), os.path.join(label_dir, vol.replace('.nii', '_seg.nii'))] for vol in volumes_to_use]", "header = load_and_preprocess(file_path, orientation, remap_config=remap_config, reduce_slices=reduce_slices, remove_black=remove_black, return_weights=return_weights) volume_list.append(volume) labelmap_list.append(labelmap)", "of file paths as string # \"\"\" # # volume_exclude_list", "def load_dataset(file_paths, orientation, remap_config, return_weights=False, reduce_slices=False, remove_black=False): print(\"Loading and preprocessing", "== \"BORIS\": #BORIS file_paths = [ [os.path.join(data_dir, vol), os.path.join(label_dir, vol.replace('.nii',", "file_paths: volume, labelmap, class_weights, weights, header = load_and_preprocess(file_path, orientation, remap_config=remap_config,", "= (volume - np.min(volume)) / (np.max(volume) - np.min(volume)) volume, labelmap", "from torchvision import transforms import utils.preprocessor as preprocessor # transform_train", "suit the need of the project :param data_dir: Directory which", "# :param label_dir: Directory which contains the label files #", "reduce_slices=reduce_slices, remove_black=remove_black, return_weights=return_weights) return volume, labelmap, class_weights, weights, header def", "1)) elif orientation == \"AXI\": volume = volume.transpose((1, 2, 0))", "FreeSurfer style or normal :return: list of file paths as", "remove_black=False, return_weights=False): if reduce_slices: volume, labelmap = preprocessor.reduce_slices(volume, labelmap) if", "h5py.File(os.path.join(data_params['data_dir'], data_params['test_data_file']), 'r') label_test = h5py.File(os.path.join(data_params['data_dir'], data_params['test_label_file']), 'r') class_weight_test =", "will be read :return: list of file paths as string", ":param data_dir: Directory which contains the data files # :param", "volumes_to_use = [name for name in os.listdir(data_dir)] if data_id ==", "\"part_FS\": file_paths = [ [os.path.join(data_dir, vol, 'orig.mgz')] for vol in", "X if len(X.shape) == 4 else X[:, np.newaxis, :, :]", "be read :return: list of file paths as string \"\"\"", "h5py.File(os.path.join(data_params['data_dir'], data_params['test_label_file']), 'r') class_weight_test = h5py.File(os.path.join(data_params['data_dir'], data_params['test_class_weights_file']), 'r') weight_test =", "[os.path.join(data_dir, vol + '/' + vol + '_1.mgz'), os.path.join(label_dir, vol", "vol in volumes_to_use] elif data_id == \"ADNI\": file_paths = [", "provided only these data points will be read :param dir_struct:", "'r') weight_test = h5py.File(os.path.join(data_params['data_dir'], data_params['test_weights_file']), 'r') return (ImdbData(data_train['data'][()], label_train['label'][()], class_weight_train['class_weights'][()]),", "torch.utils.data as data from torchvision import transforms import utils.preprocessor as", "if reduce_slices: volume, labelmap = preprocessor.reduce_slices(volume, labelmap) if remap_config: labelmap", "else: return volume, labelmap, None, None # def load_file_paths(data_dir, label_dir,", "# def load_file_paths(data_dir, label_dir, volumes_txt_file=None): # \"\"\" # This function", "return (ImdbData(data_train['data'][()], label_train['label'][()], class_weight_train['class_weights'][()]), ImdbData(data_test['data'][()], label_test['label'][()], class_weight_test['class_weights'][()])) def load_dataset(file_paths, orientation,", "X[:, np.newaxis, :, :] self.y = y self.w = w", "to the a csv file, when provided only these data", "/ (np.max(volume) - np.min(volume)) volume, labelmap = preprocessor.rotate_orientation(volume, labelmap, orientation)", "import transforms import utils.preprocessor as preprocessor # transform_train = transforms.Compose([", "not in volume_exclude_list] # # file_paths = [ # [os.path.join(data_dir,", "preprocess(volume, labelmap, remap_config=remap_config, reduce_slices=reduce_slices, remove_black=remove_black, return_weights=return_weights) return volume, labelmap, class_weights,", "orientation) return volume, labelmap, volume_nifty.header def preprocess(volume, labelmap, remap_config, reduce_slices=False,", "preprocessing data...\") volume_list, labelmap_list, headers, class_weights_list, weights_list = [], [],", "== \"ADNI\": file_paths = [ [os.path.join(data_dir, vol, 'orig.mgz'), os.path.join(label_dir, vol,", "= volume.transpose((1, 2, 0)) return volume, header def load_data(file_path, orientation):", "class_weights, weights, header def load_and_preprocess_eval(file_path, orientation, notlabel=True): volume_nifty = nb.load(file_path[0])", "volume = volume.transpose((2, 0, 1)) elif orientation == \"AXI\": volume", "load_data(file_path, orientation): volume_nifty, labelmap_nifty = nb.load(file_path[0]), nb.load(file_path[1]) volume, labelmap =", "proper file reading :param volumes_txt_file: (Optional) Path to the a", "volume, labelmap = preprocessor.remove_black(volume, labelmap) if return_weights: class_weights, weights =", "volume_nifty = nb.load(file_path[0]) header = volume_nifty.header volume = volume_nifty.get_fdata() if", "a csv file, when provided only these data points will", ":param data_id: A flag indicates the name of Dataset for", "= ['IXI290', 'IXI423'] # if volumes_txt_file: # with open(volumes_txt_file) as", "== 4 else X[:, np.newaxis, :, :] self.y = y", "= h5py.File(os.path.join(data_params['data_dir'], data_params['test_class_weights_file']), 'r') weight_test = h5py.File(os.path.join(data_params['data_dir'], data_params['test_weights_file']), 'r') return", "= h5py.File(os.path.join(data_params['data_dir'], data_params['test_weights_file']), 'r') return (ImdbData(data_train['data'][()], label_train['label'][()], class_weight_train['class_weights'][()]), ImdbData(data_test['data'][()], label_test['label'][()],", "import h5py import nibabel as nb import numpy as np", "= (volume - np.min(volume)) / (np.max(volume) - np.min(volume)) else: volume", "paths as string \"\"\" with open(volumes_txt_file) as file_handle: volumes_to_use =", "__len__(self): return len(self.y) def get_imdb_dataset(data_params): data_train = h5py.File(os.path.join(data_params['data_dir'], data_params['train_data_file']), 'r')", "if return_weights: class_weights_list.append(class_weights) weights_list.append(weights) headers.append(header) print(\"#\", end='', flush=True) print(\"100%\", flush=True)", "CANDI and IBSR\") return file_paths def load_file_paths_eval(data_dir, volumes_txt_file, dir_struct): \"\"\"", "volumes_to_use] elif dir_struct == \"part_FS\": file_paths = [ [os.path.join(data_dir, vol,", "# if volumes_txt_file: # with open(volumes_txt_file) as file_handle: # volumes_to_use", "paths as string # \"\"\" # # volume_exclude_list = ['IXI290',", "volume = (volume - np.min(volume)) / (np.max(volume) - np.min(volume)) else:", "volumes_to_use = [name for name in os.listdir(data_dir) if # name.startswith('IXI')", "os.path.join(label_dir, vol + '_glm.mgz')] for vol in volumes_to_use] elif data_id", "data_params['train_weights_file']), 'r') data_test = h5py.File(os.path.join(data_params['data_dir'], data_params['test_data_file']), 'r') label_test = h5py.File(os.path.join(data_params['data_dir'],", "[os.path.join(data_dir, vol)] for vol in volumes_to_use] elif dir_struct == \"part_FS\":", "= [ [os.path.join(data_dir, vol)] for vol in volumes_to_use] elif dir_struct", "['IXI290', 'IXI423'] # if volumes_txt_file: # with open(volumes_txt_file) as file_handle:", "= file_handle.read().splitlines() else: volumes_to_use = [name for name in os.listdir(data_dir)]", "element tuple, 0th being data and 1st being label. #", "h5py.File(os.path.join(data_params['data_dir'], data_params['train_class_weights_file']), 'r') weight_train = h5py.File(os.path.join(data_params['data_dir'], data_params['train_weights_file']), 'r') data_test =", "preprocessor.reduce_slices(volume, labelmap) if remap_config: labelmap = preprocessor.remap_labels(labelmap, remap_config) if remove_black:", "2, 0)) return volume, header def load_data(file_path, orientation): volume_nifty, labelmap_nifty", "= [ [os.path.join(data_dir, vol), os.path.join(label_dir, vol.replace('.nii', '_seg.nii'))] for vol in", "== \"IBSR\": file_paths = [ [os.path.join(data_dir, vol, 'mri/orig.mgz'), os.path.join(label_dir, vol", "indicates the name of Dataset for proper file reading :param", "orientation, remap_config, return_weights=False, reduce_slices=False, remove_black=False): print(\"Loading and preprocessing data...\") volume_list,", "and 1st being label. # It should be modified to", "label_train = h5py.File(os.path.join(data_params['data_dir'], data_params['train_label_file']), 'r') class_weight_train = h5py.File(os.path.join(data_params['data_dir'], data_params['train_class_weights_file']), 'r')", "as file_handle: volumes_to_use = file_handle.read().splitlines() if dir_struct == \"FS\": file_paths", "is in FreeSurfer style or normal :return: list of file", "for proper file reading :param volumes_txt_file: (Optional) Path to the", "4 else X[:, np.newaxis, :, :] self.y = y self.w", "h5py.File(os.path.join(data_params['data_dir'], data_params['test_weights_file']), 'r') return (ImdbData(data_train['data'][()], label_train['label'][()], class_weight_train['class_weights'][()]), ImdbData(data_test['data'][()], label_test['label'][()], class_weight_test['class_weights'][()]))", "file_paths = [ [os.path.join(data_dir, vol), os.path.join(label_dir, vol.replace('.nii', '_seg.nii'))] for vol", "elif dir_struct == \"Linear\": file_paths = [ [os.path.join(data_dir, vol)] for", "torchvision import transforms import utils.preprocessor as preprocessor # transform_train =", "- np.min(volume)) volume, labelmap = preprocessor.rotate_orientation(volume, labelmap, orientation) return volume,", "= preprocessor.rotate_orientation(volume, labelmap, orientation) return volume, labelmap, volume_nifty.header def preprocess(volume,", "# # volume_exclude_list = ['IXI290', 'IXI423'] # if volumes_txt_file: #", "= [ [os.path.join(data_dir, vol, 'orig.mgz')] for vol in volumes_to_use] else:", "vol, 'orig.mgz')] for vol in volumes_to_use] else: raise ValueError(\"Invalid entry,", "combined as a list where each element is a 2", "name in os.listdir(data_dir)] if data_id == \"MALC\": file_paths = [", "vol in volumes_to_use] # return file_paths def load_file_paths(data_dir, label_dir, data_id,", "labelmap, class_weights, weights, header = load_and_preprocess(file_path, orientation, remap_config=remap_config, reduce_slices=reduce_slices, remove_black=remove_black,", "volume.transpose((1, 2, 0)) return volume, header def load_data(file_path, orientation): volume_nifty,", "utils.preprocessor as preprocessor # transform_train = transforms.Compose([ # transforms.RandomCrop(200, padding=56),", "remap_config, return_weights=False, reduce_slices=False, remove_black=False): print(\"Loading and preprocessing data...\") volume_list, labelmap_list,", "preprocessor.remap_labels(labelmap, remap_config) if remove_black: volume, labelmap = preprocessor.remove_black(volume, labelmap) if", "'mri/orig.mgz'), os.path.join(label_dir, vol + '_map.nii.gz')] for vol in volumes_to_use] elif", "weight_train = h5py.File(os.path.join(data_params['data_dir'], data_params['train_weights_file']), 'r') data_test = h5py.File(os.path.join(data_params['data_dir'], data_params['test_data_file']), 'r')", "None, None # def load_file_paths(data_dir, label_dir, volumes_txt_file=None): # \"\"\" #", "files :param volumes_txt_file: Path to the a csv file, when", "data files :param label_dir: Directory which contains the label files", "as np import torch import torch.utils.data as data from torchvision", "should be modified to suit the need of the project", "project :param data_dir: Directory which contains the data files :param", "if orientation == \"COR\": volume = volume.transpose((2, 0, 1)) elif", "in volumes_to_use] elif data_id == \"IBSR\": file_paths = [ [os.path.join(data_dir,", "os.listdir(data_dir) if # name.startswith('IXI') and name not in volume_exclude_list] #", "labelmap = preprocessor.rotate_orientation(volume, labelmap, orientation) return volume, labelmap, volume_nifty.header def", "volume, labelmap, class_weights, weights = preprocess(volume, labelmap, remap_config=remap_config, reduce_slices=reduce_slices, remove_black=remove_black,", "remove_black=remove_black, return_weights=return_weights) return volume, labelmap, class_weights, weights, header def load_and_preprocess_eval(file_path,", "data...\") volume_list, labelmap_list, headers, class_weights_list, weights_list = [], [], [],", "files :param data_id: A flag indicates the name of Dataset", "[], [] for file_path in file_paths: volume, labelmap, class_weights, weights,", "+ '_map.nii.gz')] for vol in volumes_to_use] elif data_id == \"BORIS\":", "nb.load(file_path[0]) header = volume_nifty.header volume = volume_nifty.get_fdata() if notlabel: volume", "label_dir: Directory which contains the label files :param data_id: A", "[os.path.join(data_dir, vol, 'orig.mgz')] for vol in volumes_to_use] else: raise ValueError(\"Invalid", "file_handle.read().splitlines() else: volumes_to_use = [name for name in os.listdir(data_dir)] if", "vol, 'mri/aseg.auto_noCCseg.mgz')] # for # vol in volumes_to_use] # return", "for vol in volumes_to_use] elif data_id == \"CANDI\": file_paths =", "list of file paths as string \"\"\" with open(volumes_txt_file) as", "img = torch.from_numpy(self.X[index]) label = torch.from_numpy(self.y[index]) weight = torch.from_numpy(self.w[index]) return", "'r') label_train = h5py.File(os.path.join(data_params['data_dir'], data_params['train_label_file']), 'r') class_weight_train = h5py.File(os.path.join(data_params['data_dir'], data_params['train_class_weights_file']),", "labelmap, volume_nifty.header def preprocess(volume, labelmap, remap_config, reduce_slices=False, remove_black=False, return_weights=False): if", "load_data(file_path, orientation) volume, labelmap, class_weights, weights = preprocess(volume, labelmap, remap_config=remap_config,", "labelmap_nifty = nb.load(file_path[0]), nb.load(file_path[1]) volume, labelmap = volume_nifty.get_fdata(), labelmap_nifty.get_fdata() volume", "files # :param volumes_txt_file: (Optional) Path to the a csv", "data from torchvision import transforms import utils.preprocessor as preprocessor #", "points will be read # :return: list of file paths", "label files :param data_id: A flag indicates the name of", "transforms import utils.preprocessor as preprocessor # transform_train = transforms.Compose([ #", "remove_black=False): print(\"Loading and preprocessing data...\") volume_list, labelmap_list, headers, class_weights_list, weights_list", "(volume - np.min(volume)) / (np.max(volume) - np.min(volume)) volume, labelmap =", "os.listdir(data_dir)] if data_id == \"MALC\": file_paths = [ [os.path.join(data_dir, vol,", "file_paths = [ [os.path.join(data_dir, vol + '/' + vol +", "file, when provided only these data points will be read", "# ]) class ImdbData(data.Dataset): def __init__(self, X, y, w, transforms=None):", "raise ValueError(\"Invalid entry, valid options are MALC, ADNI, CANDI and", "data points will be read :return: list of file paths", "orientation, remap_config, reduce_slices=False, remove_black=False, return_weights=False): volume, labelmap, header = load_data(file_path,", "for vol in volumes_to_use] elif data_id == \"IBSR\": file_paths =", "class_weights_list.append(class_weights) weights_list.append(weights) headers.append(header) print(\"#\", end='', flush=True) print(\"100%\", flush=True) if return_weights:", "class_weight_train = h5py.File(os.path.join(data_params['data_dir'], data_params['train_class_weights_file']), 'r') weight_train = h5py.File(os.path.join(data_params['data_dir'], data_params['train_weights_file']), 'r')", "header def load_data(file_path, orientation): volume_nifty, labelmap_nifty = nb.load(file_path[0]), nb.load(file_path[1]) volume,", ":return: list of file paths as string \"\"\" with open(volumes_txt_file)", "list where each element is a 2 element tuple, 0th", "only these data points will be read :param dir_struct: If", "volume, labelmap, class_weights, weights else: return volume, labelmap, None, None", "in os.listdir(data_dir) if # name.startswith('IXI') and name not in volume_exclude_list]", "contains the data files :param volumes_txt_file: Path to the a", "return_weights=False, reduce_slices=False, remove_black=False): print(\"Loading and preprocessing data...\") volume_list, labelmap_list, headers,", "[os.path.join(data_dir, vol), os.path.join(label_dir, vol.replace('.nii', '_seg.nii'))] for vol in volumes_to_use] else:", "- np.min(volume)) / (np.max(volume) - np.min(volume)) else: volume = np.round(volume)", "file_handle: volumes_to_use = file_handle.read().splitlines() if dir_struct == \"FS\": file_paths =", "orientation, notlabel=True): volume_nifty = nb.load(file_path[0]) header = volume_nifty.header volume =", "data_id == \"CANDI\": file_paths = [ [os.path.join(data_dir, vol + '/'", "load_and_preprocess(file_path, orientation, remap_config, reduce_slices=False, remove_black=False, return_weights=False): volume, labelmap, header =", "vol in volumes_to_use] elif data_id == \"IBSR\": file_paths = [", "\"CANDI\": file_paths = [ [os.path.join(data_dir, vol + '/' + vol", "labelmap = volume_nifty.get_fdata(), labelmap_nifty.get_fdata() volume = (volume - np.min(volume)) /", "np.min(volume)) / (np.max(volume) - np.min(volume)) else: volume = np.round(volume) if", "# for # vol in volumes_to_use] # return file_paths def", "preprocessor.remove_black(volume, labelmap) if return_weights: class_weights, weights = preprocessor.estimate_weights_mfb(labelmap) return volume,", "read :param dir_struct: If the id_list is in FreeSurfer style", "provided only these data points will be read :return: list", "dir_struct == \"part_FS\": file_paths = [ [os.path.join(data_dir, vol, 'orig.mgz')] for", "preprocess(volume, labelmap, remap_config, reduce_slices=False, remove_black=False, return_weights=False): if reduce_slices: volume, labelmap", "Directory which contains the data files :param label_dir: Directory which", "self.w = w self.transforms = transforms def __getitem__(self, index): img", "function returns the file paths combined as a list where", "return len(self.y) def get_imdb_dataset(data_params): data_train = h5py.File(os.path.join(data_params['data_dir'], data_params['train_data_file']), 'r') label_train", "id_list is in FreeSurfer style or normal :return: list of", "np.min(volume)) / (np.max(volume) - np.min(volume)) volume, labelmap = preprocessor.rotate_orientation(volume, labelmap,", "and IBSR\") return file_paths def load_file_paths_eval(data_dir, volumes_txt_file, dir_struct): \"\"\" This", "'mri/orig.mgz'), os.path.join(label_dir, vol + '_glm.mgz')] for vol in volumes_to_use] elif", "for name in os.listdir(data_dir) if # name.startswith('IXI') and name not", "in volumes_to_use] elif data_id == \"CANDI\": file_paths = [ [os.path.join(data_dir,", "[] for file_path in file_paths: volume, labelmap, class_weights, weights, header", "== \"COR\": volume = volume.transpose((2, 0, 1)) elif orientation ==", "will be read :param dir_struct: If the id_list is in", "labelmap = preprocessor.reduce_slices(volume, labelmap) if remap_config: labelmap = preprocessor.remap_labels(labelmap, remap_config)", "load_and_preprocess_eval(file_path, orientation, notlabel=True): volume_nifty = nb.load(file_path[0]) header = volume_nifty.header volume", "being data and 1st being label. # It should be", "= file_handle.read().splitlines() if dir_struct == \"FS\": file_paths = [ [os.path.join(data_dir,", "if return_weights: return volume_list, labelmap_list, class_weights_list, weights_list, headers else: return", "ImdbData(data.Dataset): def __init__(self, X, y, w, transforms=None): self.X = X", "weights else: return volume, labelmap, None, None # def load_file_paths(data_dir,", "class_weights, weights = preprocessor.estimate_weights_mfb(labelmap) return volume, labelmap, class_weights, weights else:", "# return file_paths def load_file_paths(data_dir, label_dir, data_id, volumes_txt_file=None): \"\"\" This", "import utils.preprocessor as preprocessor # transform_train = transforms.Compose([ # transforms.RandomCrop(200,", "being label. # It should be modified to suit the", "'orig.mgz')] for vol in volumes_to_use] else: raise ValueError(\"Invalid entry, valid", "volume, labelmap = preprocessor.reduce_slices(volume, labelmap) if remap_config: labelmap = preprocessor.remap_labels(labelmap,", "Directory which contains the label files # :param volumes_txt_file: (Optional)", "file_handle.read().splitlines() # else: # volumes_to_use = [name for name in", "volume.transpose((2, 0, 1)) elif orientation == \"AXI\": volume = volume.transpose((1,", "the id_list is in FreeSurfer style or normal :return: list", "= preprocessor.remove_black(volume, labelmap) if return_weights: class_weights, weights = preprocessor.estimate_weights_mfb(labelmap) return", "vol, 'mri/orig.mgz'), os.path.join(label_dir, vol + '_map.nii.gz')] for vol in volumes_to_use]", "# [os.path.join(data_dir, vol, 'mri/orig.mgz'), os.path.join(label_dir, vol, 'mri/aseg.auto_noCCseg.mgz')] # for #", "[ # [os.path.join(data_dir, vol, 'mri/orig.mgz'), os.path.join(label_dir, vol, 'mri/aseg.auto_noCCseg.mgz')] # for", "dir_struct: If the id_list is in FreeSurfer style or normal", "+ '/' + vol + '_1.mgz'), os.path.join(label_dir, vol + '/'", "open(volumes_txt_file) as file_handle: volumes_to_use = file_handle.read().splitlines() else: volumes_to_use = [name", "transforms.Compose([ # transforms.RandomCrop(200, padding=56), # transforms.ToTensor(), # ]) class ImdbData(data.Dataset):", "np.round(volume) if orientation == \"COR\": volume = volume.transpose((2, 0, 1))", "= preprocess(volume, labelmap, remap_config=remap_config, reduce_slices=reduce_slices, remove_black=remove_black, return_weights=return_weights) return volume, labelmap,", ":param dir_struct: If the id_list is in FreeSurfer style or", "string \"\"\" with open(volumes_txt_file) as file_handle: volumes_to_use = file_handle.read().splitlines() if", "w, transforms=None): self.X = X if len(X.shape) == 4 else", "def preprocess(volume, labelmap, remap_config, reduce_slices=False, remove_black=False, return_weights=False): if reduce_slices: volume,", "be modified to suit the need of the project :param", "0th being data and 1st being label. It should be", "load_file_paths_eval(data_dir, volumes_txt_file, dir_struct): \"\"\" This function returns the file paths", "y, w, transforms=None): self.X = X if len(X.shape) == 4", "if dir_struct == \"FS\": file_paths = [ [os.path.join(data_dir, vol, 'mri/orig.mgz')]", "os.path.join(label_dir, vol, 'Lab_con.mgz')] for vol in volumes_to_use] elif data_id ==", "file_paths = [ [os.path.join(data_dir, vol, 'mri/orig.mgz')] for vol in volumes_to_use]", "of file paths as string \"\"\" if volumes_txt_file: with open(volumes_txt_file)", "string # \"\"\" # # volume_exclude_list = ['IXI290', 'IXI423'] #", "remap_config, reduce_slices=False, remove_black=False, return_weights=False): volume, labelmap, header = load_data(file_path, orientation)", "vol, 'mri/orig.mgz')] for vol in volumes_to_use] elif dir_struct == \"Linear\":", "will be read # :return: list of file paths as", "need of the project :param data_dir: Directory which contains the", "volume, labelmap, volume_nifty.header def preprocess(volume, labelmap, remap_config, reduce_slices=False, remove_black=False, return_weights=False):", "volume = np.round(volume) if orientation == \"COR\": volume = volume.transpose((2,", "= volume_nifty.get_fdata(), labelmap_nifty.get_fdata() volume = (volume - np.min(volume)) / (np.max(volume)", "string \"\"\" if volumes_txt_file: with open(volumes_txt_file) as file_handle: volumes_to_use =", "'mri/orig.mgz')] for vol in volumes_to_use] elif dir_struct == \"Linear\": file_paths", "# :param data_dir: Directory which contains the data files #", "data_id, volumes_txt_file=None): \"\"\" This function returns the file paths combined", "volume_list, labelmap_list, headers def load_and_preprocess(file_path, orientation, remap_config, reduce_slices=False, remove_black=False, return_weights=False):", "return volume_list, labelmap_list, class_weights_list, weights_list, headers else: return volume_list, labelmap_list,", "Directory which contains the label files :param data_id: A flag", "file_paths def load_file_paths_eval(data_dir, volumes_txt_file, dir_struct): \"\"\" This function returns the", "be read # :return: list of file paths as string", "header = load_data(file_path, orientation) volume, labelmap, class_weights, weights = preprocess(volume,", "else: raise ValueError(\"Invalid entry, valid options are MALC, ADNI, CANDI", "'Lab_con.mgz')] for vol in volumes_to_use] elif data_id == \"CANDI\": file_paths", "MALC, ADNI, CANDI and IBSR\") return file_paths def load_file_paths_eval(data_dir, volumes_txt_file,", "the data files :param volumes_txt_file: Path to the a csv", "(ImdbData(data_train['data'][()], label_train['label'][()], class_weight_train['class_weights'][()]), ImdbData(data_test['data'][()], label_test['label'][()], class_weight_test['class_weights'][()])) def load_dataset(file_paths, orientation, remap_config,", "# name.startswith('IXI') and name not in volume_exclude_list] # # file_paths", "name not in volume_exclude_list] # # file_paths = [ #", "data files # :param label_dir: Directory which contains the label", "class_weights, weights else: return volume, labelmap, None, None # def", "labelmap, class_weights, weights, header def load_and_preprocess_eval(file_path, orientation, notlabel=True): volume_nifty =", "'r') return (ImdbData(data_train['data'][()], label_train['label'][()], class_weight_train['class_weights'][()]), ImdbData(data_test['data'][()], label_test['label'][()], class_weight_test['class_weights'][()])) def load_dataset(file_paths,", "return volume, labelmap, None, None # def load_file_paths(data_dir, label_dir, volumes_txt_file=None):", "with open(volumes_txt_file) as file_handle: # volumes_to_use = file_handle.read().splitlines() # else:", "labelmap_list, class_weights_list, weights_list, headers else: return volume_list, labelmap_list, headers def", "len(self.y) def get_imdb_dataset(data_params): data_train = h5py.File(os.path.join(data_params['data_dir'], data_params['train_data_file']), 'r') label_train =", "== \"MALC\": file_paths = [ [os.path.join(data_dir, vol, 'mri/orig.mgz'), os.path.join(label_dir, vol", "file paths as string \"\"\" if volumes_txt_file: with open(volumes_txt_file) as", "data_params['test_weights_file']), 'r') return (ImdbData(data_train['data'][()], label_train['label'][()], class_weight_train['class_weights'][()]), ImdbData(data_test['data'][()], label_test['label'][()], class_weight_test['class_weights'][()])) def", "be modified to suit the need of the project #", "orientation) volume, labelmap, class_weights, weights = preprocess(volume, labelmap, remap_config=remap_config, reduce_slices=reduce_slices,", "element is a 2 element tuple, 0th being data and", "file_handle.read().splitlines() if dir_struct == \"FS\": file_paths = [ [os.path.join(data_dir, vol,", "load_and_preprocess(file_path, orientation, remap_config=remap_config, reduce_slices=reduce_slices, remove_black=remove_black, return_weights=return_weights) volume_list.append(volume) labelmap_list.append(labelmap) if return_weights:", "[ [os.path.join(data_dir, vol, 'orig.mgz')] for vol in volumes_to_use] else: raise", "class_weights, weights = preprocess(volume, labelmap, remap_config=remap_config, reduce_slices=reduce_slices, remove_black=remove_black, return_weights=return_weights) return", "return volume, labelmap, volume_nifty.header def preprocess(volume, labelmap, remap_config, reduce_slices=False, remove_black=False,", "= h5py.File(os.path.join(data_params['data_dir'], data_params['train_class_weights_file']), 'r') weight_train = h5py.File(os.path.join(data_params['data_dir'], data_params['train_weights_file']), 'r') data_test", "transforms.ToTensor(), # ]) class ImdbData(data.Dataset): def __init__(self, X, y, w,", "labelmap) if remap_config: labelmap = preprocessor.remap_labels(labelmap, remap_config) if remove_black: volume,", "[], [], [] for file_path in file_paths: volume, labelmap, class_weights,", "if notlabel: volume = (volume - np.min(volume)) / (np.max(volume) -", "label_dir: Directory which contains the label files # :param volumes_txt_file:", "(Optional) Path to the a csv file, when provided only", "os.path.join(label_dir, vol + '/' + vol + '_1_seg.mgz')] for vol", "= X if len(X.shape) == 4 else X[:, np.newaxis, :,", "data_params['test_data_file']), 'r') label_test = h5py.File(os.path.join(data_params['data_dir'], data_params['test_label_file']), 'r') class_weight_test = h5py.File(os.path.join(data_params['data_dir'],", "h5py import nibabel as nb import numpy as np import", "[os.path.join(data_dir, vol, 'mri/orig.mgz')] for vol in volumes_to_use] elif dir_struct ==", "as file_handle: # volumes_to_use = file_handle.read().splitlines() # else: # volumes_to_use", "+ vol + '_1.mgz'), os.path.join(label_dir, vol + '/' + vol", "== \"Linear\": file_paths = [ [os.path.join(data_dir, vol)] for vol in", "= preprocessor.estimate_weights_mfb(labelmap) return volume, labelmap, class_weights, weights else: return volume,", "label. # It should be modified to suit the need", "points will be read :param dir_struct: If the id_list is", "ImdbData(data_test['data'][()], label_test['label'][()], class_weight_test['class_weights'][()])) def load_dataset(file_paths, orientation, remap_config, return_weights=False, reduce_slices=False, remove_black=False):", "return_weights: return volume_list, labelmap_list, class_weights_list, weights_list, headers else: return volume_list,", "header = volume_nifty.header volume = volume_nifty.get_fdata() if notlabel: volume =", "volume, header def load_data(file_path, orientation): volume_nifty, labelmap_nifty = nb.load(file_path[0]), nb.load(file_path[1])", "return_weights: class_weights_list.append(class_weights) weights_list.append(weights) headers.append(header) print(\"#\", end='', flush=True) print(\"100%\", flush=True) if", "import torch import torch.utils.data as data from torchvision import transforms", "+ '_1.mgz'), os.path.join(label_dir, vol + '/' + vol + '_1_seg.mgz')]", "nb import numpy as np import torch import torch.utils.data as", "volumes_to_use = file_handle.read().splitlines() # else: # volumes_to_use = [name for", "index): img = torch.from_numpy(self.X[index]) label = torch.from_numpy(self.y[index]) weight = torch.from_numpy(self.w[index])", "in volumes_to_use] else: raise ValueError(\"Invalid entry, valid options are FS", "volumes_txt_file, dir_struct): \"\"\" This function returns the file paths combined", "data points will be read :param dir_struct: If the id_list", "== \"part_FS\": file_paths = [ [os.path.join(data_dir, vol, 'orig.mgz')] for vol", "def load_file_paths_eval(data_dir, volumes_txt_file, dir_struct): \"\"\" This function returns the file", "file_path in file_paths: volume, labelmap, class_weights, weights, header = load_and_preprocess(file_path,", "vol in volumes_to_use] elif data_id == \"CANDI\": file_paths = [", "modified to suit the need of the project # :param", "these data points will be read :return: list of file", "[ [os.path.join(data_dir, vol, 'mri/orig.mgz'), os.path.join(label_dir, vol + '_map.nii.gz')] for vol", "volumes_txt_file: (Optional) Path to the a csv file, when provided", "'/' + vol + '_1.mgz'), os.path.join(label_dir, vol + '/' +", "= h5py.File(os.path.join(data_params['data_dir'], data_params['train_data_file']), 'r') label_train = h5py.File(os.path.join(data_params['data_dir'], data_params['train_label_file']), 'r') class_weight_train", "need of the project # :param data_dir: Directory which contains", "file_paths = [ # [os.path.join(data_dir, vol, 'mri/orig.mgz'), os.path.join(label_dir, vol, 'mri/aseg.auto_noCCseg.mgz')]", "being data and 1st being label. It should be modified", "== \"FS\": file_paths = [ [os.path.join(data_dir, vol, 'mri/orig.mgz')] for vol", "img, label, weight def __len__(self): return len(self.y) def get_imdb_dataset(data_params): data_train", "= [name for name in os.listdir(data_dir) if # name.startswith('IXI') and", "= y self.w = w self.transforms = transforms def __getitem__(self,", "data files :param volumes_txt_file: Path to the a csv file,", "elif data_id == \"BORIS\": #BORIS file_paths = [ [os.path.join(data_dir, vol),", "volumes_to_use] elif data_id == \"BORIS\": #BORIS file_paths = [ [os.path.join(data_dir,", "normal :return: list of file paths as string \"\"\" with", "transforms.RandomCrop(200, padding=56), # transforms.ToTensor(), # ]) class ImdbData(data.Dataset): def __init__(self,", ":param volumes_txt_file: Path to the a csv file, when provided", "y self.w = w self.transforms = transforms def __getitem__(self, index):", "volume, labelmap, None, None # def load_file_paths(data_dir, label_dir, volumes_txt_file=None): #", "the data files :param label_dir: Directory which contains the label", "data_params['train_data_file']), 'r') label_train = h5py.File(os.path.join(data_params['data_dir'], data_params['train_label_file']), 'r') class_weight_train = h5py.File(os.path.join(data_params['data_dir'],", "'_1_seg.mgz')] for vol in volumes_to_use] elif data_id == \"IBSR\": file_paths", "raise ValueError(\"Invalid entry, valid options are FS and Linear\") return", "label. It should be modified to suit the need of", "0th being data and 1st being label. # It should", "def load_file_paths(data_dir, label_dir, data_id, volumes_txt_file=None): \"\"\" This function returns the", "elif data_id == \"IBSR\": file_paths = [ [os.path.join(data_dir, vol, 'mri/orig.mgz'),", "in volumes_to_use] elif dir_struct == \"Linear\": file_paths = [ [os.path.join(data_dir,", "None # def load_file_paths(data_dir, label_dir, volumes_txt_file=None): # \"\"\" # This", "data_id == \"IBSR\": file_paths = [ [os.path.join(data_dir, vol, 'mri/orig.mgz'), os.path.join(label_dir,", "= [ [os.path.join(data_dir, vol, 'orig.mgz'), os.path.join(label_dir, vol, 'Lab_con.mgz')] for vol", "'_seg.nii'))] for vol in volumes_to_use] else: raise ValueError(\"Invalid entry, valid", "tuple, 0th being data and 1st being label. # It", "X, y, w, transforms=None): self.X = X if len(X.shape) ==", "labelmap = preprocessor.remove_black(volume, labelmap) if return_weights: class_weights, weights = preprocessor.estimate_weights_mfb(labelmap)", "'mri/aseg.auto_noCCseg.mgz')] # for # vol in volumes_to_use] # return file_paths", "reduce_slices=False, remove_black=False, return_weights=False): volume, labelmap, header = load_data(file_path, orientation) volume,", "os.path.join(label_dir, vol, 'mri/aseg.auto_noCCseg.mgz')] # for # vol in volumes_to_use] #", "orientation == \"COR\": volume = volume.transpose((2, 0, 1)) elif orientation", "# with open(volumes_txt_file) as file_handle: # volumes_to_use = file_handle.read().splitlines() #", "\"MALC\": file_paths = [ [os.path.join(data_dir, vol, 'mri/orig.mgz'), os.path.join(label_dir, vol +", "'/' + vol + '_1_seg.mgz')] for vol in volumes_to_use] elif", "= nb.load(file_path[0]) header = volume_nifty.header volume = volume_nifty.get_fdata() if notlabel:", "else: # volumes_to_use = [name for name in os.listdir(data_dir) if", "labelmap, orientation) return volume, labelmap, volume_nifty.header def preprocess(volume, labelmap, remap_config,", "\"\"\" # # volume_exclude_list = ['IXI290', 'IXI423'] # if volumes_txt_file:", "if return_weights: class_weights, weights = preprocessor.estimate_weights_mfb(labelmap) return volume, labelmap, class_weights,", "volumes_to_use] else: raise ValueError(\"Invalid entry, valid options are MALC, ADNI,", "only these data points will be read # :return: list", "\"\"\" This function returns the file paths combined as a", "nb.load(file_path[1]) volume, labelmap = volume_nifty.get_fdata(), labelmap_nifty.get_fdata() volume = (volume -", "the data files # :param label_dir: Directory which contains the", "transforms=None): self.X = X if len(X.shape) == 4 else X[:,", "= volume_nifty.get_fdata() if notlabel: volume = (volume - np.min(volume)) /", "remap_config, reduce_slices=False, remove_black=False, return_weights=False): if reduce_slices: volume, labelmap = preprocessor.reduce_slices(volume,", "end='', flush=True) print(\"100%\", flush=True) if return_weights: return volume_list, labelmap_list, class_weights_list,", "= [ # [os.path.join(data_dir, vol, 'mri/orig.mgz'), os.path.join(label_dir, vol, 'mri/aseg.auto_noCCseg.mgz')] #", ":param label_dir: Directory which contains the label files :param data_id:", "volume_nifty.header def preprocess(volume, labelmap, remap_config, reduce_slices=False, remove_black=False, return_weights=False): if reduce_slices:", "'r') class_weight_train = h5py.File(os.path.join(data_params['data_dir'], data_params['train_class_weights_file']), 'r') weight_train = h5py.File(os.path.join(data_params['data_dir'], data_params['train_weights_file']),", "= [], [], [], [], [] for file_path in file_paths:", "return volume_list, labelmap_list, headers def load_and_preprocess(file_path, orientation, remap_config, reduce_slices=False, remove_black=False,", "labelmap, header = load_data(file_path, orientation) volume, labelmap, class_weights, weights =", "def __len__(self): return len(self.y) def get_imdb_dataset(data_params): data_train = h5py.File(os.path.join(data_params['data_dir'], data_params['train_data_file']),", "return_weights: class_weights, weights = preprocessor.estimate_weights_mfb(labelmap) return volume, labelmap, class_weights, weights", ":param label_dir: Directory which contains the label files # :param", "only these data points will be read :return: list of", "when provided only these data points will be read :param", "def load_and_preprocess(file_path, orientation, remap_config, reduce_slices=False, remove_black=False, return_weights=False): volume, labelmap, header", "class ImdbData(data.Dataset): def __init__(self, X, y, w, transforms=None): self.X =", "self.X = X if len(X.shape) == 4 else X[:, np.newaxis,", "# :param volumes_txt_file: (Optional) Path to the a csv file,", "as string \"\"\" if volumes_txt_file: with open(volumes_txt_file) as file_handle: volumes_to_use", "= [ [os.path.join(data_dir, vol, 'mri/orig.mgz'), os.path.join(label_dir, vol + '_glm.mgz')] for", "[os.path.join(data_dir, vol, 'orig.mgz'), os.path.join(label_dir, vol, 'Lab_con.mgz')] for vol in volumes_to_use]", "elif dir_struct == \"part_FS\": file_paths = [ [os.path.join(data_dir, vol, 'orig.mgz')]", "reduce_slices=reduce_slices, remove_black=remove_black, return_weights=return_weights) volume_list.append(volume) labelmap_list.append(labelmap) if return_weights: class_weights_list.append(class_weights) weights_list.append(weights) headers.append(header)", "notlabel=True): volume_nifty = nb.load(file_path[0]) header = volume_nifty.header volume = volume_nifty.get_fdata()", "# \"\"\" # # volume_exclude_list = ['IXI290', 'IXI423'] # if", "return volume, labelmap, class_weights, weights, header def load_and_preprocess_eval(file_path, orientation, notlabel=True):", "and name not in volume_exclude_list] # # file_paths = [", "\"ADNI\": file_paths = [ [os.path.join(data_dir, vol, 'orig.mgz'), os.path.join(label_dir, vol, 'Lab_con.mgz')]", "= [ [os.path.join(data_dir, vol, 'mri/orig.mgz'), os.path.join(label_dir, vol + '_map.nii.gz')] for", "the label files # :param volumes_txt_file: (Optional) Path to the", "which contains the label files :param data_id: A flag indicates", "= preprocessor.reduce_slices(volume, labelmap) if remap_config: labelmap = preprocessor.remap_labels(labelmap, remap_config) if", "vol in volumes_to_use] else: raise ValueError(\"Invalid entry, valid options are", "weight_test = h5py.File(os.path.join(data_params['data_dir'], data_params['test_weights_file']), 'r') return (ImdbData(data_train['data'][()], label_train['label'][()], class_weight_train['class_weights'][()]), ImdbData(data_test['data'][()],", "preprocessor.rotate_orientation(volume, labelmap, orientation) return volume, labelmap, volume_nifty.header def preprocess(volume, labelmap,", "This function returns the file paths combined as a list", "Directory which contains the data files # :param label_dir: Directory", "It should be modified to suit the need of the", "the label files :param data_id: A flag indicates the name", "labelmap_list, headers def load_and_preprocess(file_path, orientation, remap_config, reduce_slices=False, remove_black=False, return_weights=False): volume,", "load_file_paths(data_dir, label_dir, volumes_txt_file=None): # \"\"\" # This function returns the", "which contains the data files :param label_dir: Directory which contains", "if remap_config: labelmap = preprocessor.remap_labels(labelmap, remap_config) if remove_black: volume, labelmap", "[ [os.path.join(data_dir, vol + '/' + vol + '_1.mgz'), os.path.join(label_dir,", "in file_paths: volume, labelmap, class_weights, weights, header = load_and_preprocess(file_path, orientation,", "= preprocessor.remap_labels(labelmap, remap_config) if remove_black: volume, labelmap = preprocessor.remove_black(volume, labelmap)", "and 1st being label. It should be modified to suit", "= [ [os.path.join(data_dir, vol, 'mri/orig.mgz')] for vol in volumes_to_use] elif", "volume, labelmap, class_weights, weights, header = load_and_preprocess(file_path, orientation, remap_config=remap_config, reduce_slices=reduce_slices,", "are MALC, ADNI, CANDI and IBSR\") return file_paths def load_file_paths_eval(data_dir,", "class_weight_train['class_weights'][()]), ImdbData(data_test['data'][()], label_test['label'][()], class_weight_test['class_weights'][()])) def load_dataset(file_paths, orientation, remap_config, return_weights=False, reduce_slices=False,", "reduce_slices=False, remove_black=False): print(\"Loading and preprocessing data...\") volume_list, labelmap_list, headers, class_weights_list,", "remap_config) if remove_black: volume, labelmap = preprocessor.remove_black(volume, labelmap) if return_weights:", "provided only these data points will be read # :return:", "np.min(volume)) else: volume = np.round(volume) if orientation == \"COR\": volume", "# vol in volumes_to_use] # return file_paths def load_file_paths(data_dir, label_dir,", "dir_struct): \"\"\" This function returns the file paths combined as", "in volume_exclude_list] # # file_paths = [ # [os.path.join(data_dir, vol,", "os.path.join(label_dir, vol + '_map.nii.gz')] for vol in volumes_to_use] elif data_id", "remove_black=remove_black, return_weights=return_weights) volume_list.append(volume) labelmap_list.append(labelmap) if return_weights: class_weights_list.append(class_weights) weights_list.append(weights) headers.append(header) print(\"#\",", "file_paths def load_file_paths(data_dir, label_dir, data_id, volumes_txt_file=None): \"\"\" This function returns", "= [name for name in os.listdir(data_dir)] if data_id == \"MALC\":", "class_weights_list, weights_list = [], [], [], [], [] for file_path", "#BORIS file_paths = [ [os.path.join(data_dir, vol), os.path.join(label_dir, vol.replace('.nii', '_seg.nii'))] for", "vol + '/' + vol + '_1_seg.mgz')] for vol in", "if len(X.shape) == 4 else X[:, np.newaxis, :, :] self.y", "os import h5py import nibabel as nb import numpy as", "headers, class_weights_list, weights_list = [], [], [], [], [] for", "vol)] for vol in volumes_to_use] elif dir_struct == \"part_FS\": file_paths", "orientation == \"AXI\": volume = volume.transpose((1, 2, 0)) return volume,", "= h5py.File(os.path.join(data_params['data_dir'], data_params['train_weights_file']), 'r') data_test = h5py.File(os.path.join(data_params['data_dir'], data_params['test_data_file']), 'r') label_test", "return_weights=return_weights) return volume, labelmap, class_weights, weights, header def load_and_preprocess_eval(file_path, orientation,", "volume = volume.transpose((1, 2, 0)) return volume, header def load_data(file_path,", "volumes_to_use = file_handle.read().splitlines() if dir_struct == \"FS\": file_paths = [", "which contains the data files :param volumes_txt_file: Path to the", "ValueError(\"Invalid entry, valid options are FS and Linear\") return file_paths", "csv file, when provided only these data points will be", "the need of the project # :param data_dir: Directory which", "in volumes_to_use] elif data_id == \"ADNI\": file_paths = [ [os.path.join(data_dir,", "[ [os.path.join(data_dir, vol, 'mri/orig.mgz'), os.path.join(label_dir, vol + '_glm.mgz')] for vol", "orientation): volume_nifty, labelmap_nifty = nb.load(file_path[0]), nb.load(file_path[1]) volume, labelmap = volume_nifty.get_fdata(),", "weights_list = [], [], [], [], [] for file_path in", "data_params['test_label_file']), 'r') class_weight_test = h5py.File(os.path.join(data_params['data_dir'], data_params['test_class_weights_file']), 'r') weight_test = h5py.File(os.path.join(data_params['data_dir'],", "len(X.shape) == 4 else X[:, np.newaxis, :, :] self.y =", "IBSR\") return file_paths def load_file_paths_eval(data_dir, volumes_txt_file, dir_struct): \"\"\" This function", "'_glm.mgz')] for vol in volumes_to_use] elif data_id == \"ADNI\": file_paths", "weights_list, headers else: return volume_list, labelmap_list, headers def load_and_preprocess(file_path, orientation,", "= nb.load(file_path[0]), nb.load(file_path[1]) volume, labelmap = volume_nifty.get_fdata(), labelmap_nifty.get_fdata() volume =", "weights = preprocessor.estimate_weights_mfb(labelmap) return volume, labelmap, class_weights, weights else: return", "Path to the a csv file, when provided only these", "vol in volumes_to_use] elif dir_struct == \"Linear\": file_paths = [", ":param data_dir: Directory which contains the data files :param volumes_txt_file:", "as string \"\"\" with open(volumes_txt_file) as file_handle: volumes_to_use = file_handle.read().splitlines()", "volume_nifty.header volume = volume_nifty.get_fdata() if notlabel: volume = (volume -", "reduce_slices=False, remove_black=False, return_weights=False): if reduce_slices: volume, labelmap = preprocessor.reduce_slices(volume, labelmap)", "return volume, header def load_data(file_path, orientation): volume_nifty, labelmap_nifty = nb.load(file_path[0]),", "dir_struct == \"Linear\": file_paths = [ [os.path.join(data_dir, vol)] for vol", "volume, labelmap = preprocessor.rotate_orientation(volume, labelmap, orientation) return volume, labelmap, volume_nifty.header", "of file paths as string \"\"\" with open(volumes_txt_file) as file_handle:", "\"\"\" with open(volumes_txt_file) as file_handle: volumes_to_use = file_handle.read().splitlines() if dir_struct", ":return: list of file paths as string # \"\"\" #", "return_weights=False): volume, labelmap, header = load_data(file_path, orientation) volume, labelmap, class_weights,", "a list where each element is a 2 element tuple,", "label_test = h5py.File(os.path.join(data_params['data_dir'], data_params['test_label_file']), 'r') class_weight_test = h5py.File(os.path.join(data_params['data_dir'], data_params['test_class_weights_file']), 'r')", "file_handle: # volumes_to_use = file_handle.read().splitlines() # else: # volumes_to_use =", "vol, 'orig.mgz'), os.path.join(label_dir, vol, 'Lab_con.mgz')] for vol in volumes_to_use] elif", "class_weights_list, weights_list, headers else: return volume_list, labelmap_list, headers def load_and_preprocess(file_path,", "volume_nifty, labelmap_nifty = nb.load(file_path[0]), nb.load(file_path[1]) volume, labelmap = volume_nifty.get_fdata(), labelmap_nifty.get_fdata()", "np.min(volume)) volume, labelmap = preprocessor.rotate_orientation(volume, labelmap, orientation) return volume, labelmap,", "for vol in volumes_to_use] elif data_id == \"BORIS\": #BORIS file_paths", "__init__(self, X, y, w, transforms=None): self.X = X if len(X.shape)", "headers else: return volume_list, labelmap_list, headers def load_and_preprocess(file_path, orientation, remap_config,", "+ '/' + vol + '_1_seg.mgz')] for vol in volumes_to_use]", "file paths as string \"\"\" with open(volumes_txt_file) as file_handle: volumes_to_use", "data_params['test_class_weights_file']), 'r') weight_test = h5py.File(os.path.join(data_params['data_dir'], data_params['test_weights_file']), 'r') return (ImdbData(data_train['data'][()], label_train['label'][()],", "volumes_to_use] # return file_paths def load_file_paths(data_dir, label_dir, data_id, volumes_txt_file=None): \"\"\"", "in volumes_to_use] else: raise ValueError(\"Invalid entry, valid options are MALC,", "volume, labelmap = volume_nifty.get_fdata(), labelmap_nifty.get_fdata() volume = (volume - np.min(volume))", "for vol in volumes_to_use] else: raise ValueError(\"Invalid entry, valid options", "volume_list, labelmap_list, class_weights_list, weights_list, headers else: return volume_list, labelmap_list, headers", "numpy as np import torch import torch.utils.data as data from", ":return: list of file paths as string \"\"\" if volumes_txt_file:", "volumes_to_use] elif dir_struct == \"Linear\": file_paths = [ [os.path.join(data_dir, vol)]", "A flag indicates the name of Dataset for proper file", "if volumes_txt_file: with open(volumes_txt_file) as file_handle: volumes_to_use = file_handle.read().splitlines() else:", "# volume_exclude_list = ['IXI290', 'IXI423'] # if volumes_txt_file: # with", "file_paths = [ [os.path.join(data_dir, vol, 'orig.mgz'), os.path.join(label_dir, vol, 'Lab_con.mgz')] for", "is a 2 element tuple, 0th being data and 1st", "headers.append(header) print(\"#\", end='', flush=True) print(\"100%\", flush=True) if return_weights: return volume_list,", "valid options are MALC, ADNI, CANDI and IBSR\") return file_paths", "if volumes_txt_file: # with open(volumes_txt_file) as file_handle: # volumes_to_use =", "to suit the need of the project :param data_dir: Directory", "1st being label. It should be modified to suit the", "\"AXI\": volume = volume.transpose((1, 2, 0)) return volume, header def", "with open(volumes_txt_file) as file_handle: volumes_to_use = file_handle.read().splitlines() else: volumes_to_use =", "as nb import numpy as np import torch import torch.utils.data", "for # vol in volumes_to_use] # return file_paths def load_file_paths(data_dir,", ":] self.y = y self.w = w self.transforms = transforms", "1st being label. # It should be modified to suit", "of the project :param data_dir: Directory which contains the data", "remap_config: labelmap = preprocessor.remap_labels(labelmap, remap_config) if remove_black: volume, labelmap =", "in os.listdir(data_dir)] if data_id == \"MALC\": file_paths = [ [os.path.join(data_dir,", "= transforms.Compose([ # transforms.RandomCrop(200, padding=56), # transforms.ToTensor(), # ]) class", "for file_path in file_paths: volume, labelmap, class_weights, weights, header =", "modified to suit the need of the project :param data_dir:", "remap_config=remap_config, reduce_slices=reduce_slices, remove_black=remove_black, return_weights=return_weights) return volume, labelmap, class_weights, weights, header", "# transforms.ToTensor(), # ]) class ImdbData(data.Dataset): def __init__(self, X, y,", "data and 1st being label. # It should be modified", "# \"\"\" # This function returns the file paths combined", "elif data_id == \"ADNI\": file_paths = [ [os.path.join(data_dir, vol, 'orig.mgz'),", "# # file_paths = [ # [os.path.join(data_dir, vol, 'mri/orig.mgz'), os.path.join(label_dir,", "weights, header = load_and_preprocess(file_path, orientation, remap_config=remap_config, reduce_slices=reduce_slices, remove_black=remove_black, return_weights=return_weights) volume_list.append(volume)", "volume_list, labelmap_list, headers, class_weights_list, weights_list = [], [], [], [],", "of Dataset for proper file reading :param volumes_txt_file: (Optional) Path", "read :return: list of file paths as string \"\"\" if", "file_paths = [ [os.path.join(data_dir, vol, 'mri/orig.mgz'), os.path.join(label_dir, vol + '_glm.mgz')]", "= torch.from_numpy(self.X[index]) label = torch.from_numpy(self.y[index]) weight = torch.from_numpy(self.w[index]) return img,", "of the project # :param data_dir: Directory which contains the", "volumes_to_use = file_handle.read().splitlines() else: volumes_to_use = [name for name in", "label, weight def __len__(self): return len(self.y) def get_imdb_dataset(data_params): data_train =", "+ vol + '_1_seg.mgz')] for vol in volumes_to_use] elif data_id", "# transforms.RandomCrop(200, padding=56), # transforms.ToTensor(), # ]) class ImdbData(data.Dataset): def", "= file_handle.read().splitlines() # else: # volumes_to_use = [name for name", "[ [os.path.join(data_dir, vol, 'orig.mgz'), os.path.join(label_dir, vol, 'Lab_con.mgz')] for vol in", "flush=True) print(\"100%\", flush=True) if return_weights: return volume_list, labelmap_list, class_weights_list, weights_list,", "reduce_slices: volume, labelmap = preprocessor.reduce_slices(volume, labelmap) if remap_config: labelmap =", "labelmap_list.append(labelmap) if return_weights: class_weights_list.append(class_weights) weights_list.append(weights) headers.append(header) print(\"#\", end='', flush=True) print(\"100%\",", "weight = torch.from_numpy(self.w[index]) return img, label, weight def __len__(self): return", "name.startswith('IXI') and name not in volume_exclude_list] # # file_paths =", "0, 1)) elif orientation == \"AXI\": volume = volume.transpose((1, 2,", "torch import torch.utils.data as data from torchvision import transforms import", "label_dir, volumes_txt_file=None): # \"\"\" # This function returns the file", "volumes_to_use] elif data_id == \"CANDI\": file_paths = [ [os.path.join(data_dir, vol", "(np.max(volume) - np.min(volume)) else: volume = np.round(volume) if orientation ==", "weights_list.append(weights) headers.append(header) print(\"#\", end='', flush=True) print(\"100%\", flush=True) if return_weights: return", "vol + '_map.nii.gz')] for vol in volumes_to_use] elif data_id ==", "elif orientation == \"AXI\": volume = volume.transpose((1, 2, 0)) return", "being label. It should be modified to suit the need", "preprocessor.estimate_weights_mfb(labelmap) return volume, labelmap, class_weights, weights else: return volume, labelmap,", "return file_paths def load_file_paths_eval(data_dir, volumes_txt_file, dir_struct): \"\"\" This function returns", "class_weight_test = h5py.File(os.path.join(data_params['data_dir'], data_params['test_class_weights_file']), 'r') weight_test = h5py.File(os.path.join(data_params['data_dir'], data_params['test_weights_file']), 'r')", "= w self.transforms = transforms def __getitem__(self, index): img =", "self.transforms = transforms def __getitem__(self, index): img = torch.from_numpy(self.X[index]) label", "- np.min(volume)) else: volume = np.round(volume) if orientation == \"COR\":", "paths combined as a list where each element is a", "files # :param label_dir: Directory which contains the label files", "'mri/orig.mgz'), os.path.join(label_dir, vol, 'mri/aseg.auto_noCCseg.mgz')] # for # vol in volumes_to_use]", "orientation, remap_config=remap_config, reduce_slices=reduce_slices, remove_black=remove_black, return_weights=return_weights) volume_list.append(volume) labelmap_list.append(labelmap) if return_weights: class_weights_list.append(class_weights)", "read # :return: list of file paths as string #", "else: volume = np.round(volume) if orientation == \"COR\": volume =", "load_file_paths(data_dir, label_dir, data_id, volumes_txt_file=None): \"\"\" This function returns the file", "if # name.startswith('IXI') and name not in volume_exclude_list] # #", "labelmap, remap_config=remap_config, reduce_slices=reduce_slices, remove_black=remove_black, return_weights=return_weights) return volume, labelmap, class_weights, weights,", "'IXI423'] # if volumes_txt_file: # with open(volumes_txt_file) as file_handle: #", "import numpy as np import torch import torch.utils.data as data", "entry, valid options are MALC, ADNI, CANDI and IBSR\") return", "h5py.File(os.path.join(data_params['data_dir'], data_params['train_data_file']), 'r') label_train = h5py.File(os.path.join(data_params['data_dir'], data_params['train_label_file']), 'r') class_weight_train =", "- np.min(volume)) / (np.max(volume) - np.min(volume)) volume, labelmap = preprocessor.rotate_orientation(volume,", "name of Dataset for proper file reading :param volumes_txt_file: (Optional)", "volume = (volume - np.min(volume)) / (np.max(volume) - np.min(volume)) volume,", "labelmap = preprocessor.remap_labels(labelmap, remap_config) if remove_black: volume, labelmap = preprocessor.remove_black(volume,", "label = torch.from_numpy(self.y[index]) weight = torch.from_numpy(self.w[index]) return img, label, weight", "labelmap, class_weights, weights = preprocess(volume, labelmap, remap_config=remap_config, reduce_slices=reduce_slices, remove_black=remove_black, return_weights=return_weights)", "file paths as string # \"\"\" # # volume_exclude_list =", "volumes_txt_file: with open(volumes_txt_file) as file_handle: volumes_to_use = file_handle.read().splitlines() else: volumes_to_use", "transforms def __getitem__(self, index): img = torch.from_numpy(self.X[index]) label = torch.from_numpy(self.y[index])", "\"FS\": file_paths = [ [os.path.join(data_dir, vol, 'mri/orig.mgz')] for vol in", "class_weights, weights, header = load_and_preprocess(file_path, orientation, remap_config=remap_config, reduce_slices=reduce_slices, remove_black=remove_black, return_weights=return_weights)", "os.path.join(label_dir, vol.replace('.nii', '_seg.nii'))] for vol in volumes_to_use] else: raise ValueError(\"Invalid", "data_id == \"MALC\": file_paths = [ [os.path.join(data_dir, vol, 'mri/orig.mgz'), os.path.join(label_dir,", "Dataset for proper file reading :param volumes_txt_file: (Optional) Path to", "[ [os.path.join(data_dir, vol)] for vol in volumes_to_use] elif dir_struct ==", "[name for name in os.listdir(data_dir) if # name.startswith('IXI') and name", "'r') weight_train = h5py.File(os.path.join(data_params['data_dir'], data_params['train_weights_file']), 'r') data_test = h5py.File(os.path.join(data_params['data_dir'], data_params['test_data_file']),", "vol + '_1_seg.mgz')] for vol in volumes_to_use] elif data_id ==" ]
[ "_init_info(self): print(\"=\" * 50) print(\"=\" * 2 + f\" MODULE", "= part_conf.get('env') if self.ENV == 'dev': self.ENV_DEBUG = True def", "def parse(self, conf_name, module_path): self._parse_yaml(conf_name, module_path) self._mysqlDbConf.load(self.get('mysql')) def get(self, name):", "PartConfig: def __init__(self): self._mysqlDbConf = MysqlConf() self._yamlConfig = None def", "* 2 + f\" FILE PATH:: {' '.join(sys.argv)}\") print(f\" config", "def get_sub_import(self, sub): return self.moduleImport + '.' + sub def", "+ sub def get_sub_path(self, sub): return self.modulePath + '/' +", "= self.modulePath.replace(root_path + '/', '') self.moduleImport = self.moduleName.replace('/', '.') part_conf", "'/' + file def log(self): self._init_info() def _init_info(self): print(\"=\" *", "help=\"config file name\", type=str, required=False, default='office') input_args = parser.parse_args() class", "class PartConfig: def __init__(self): self._mysqlDbConf = MysqlConf() self._yamlConfig = None", "open(yaml_file, 'r', encoding='utf-8') yaml_content = f.read() self._yamlConfig = yaml.safe_load(yaml_content) @property", "moduleName = None modulePath = None mysqlDb = None mysqlDbConf", "self.modulePath.replace(root_path + '/', '') self.moduleImport = self.moduleName.replace('/', '.') part_conf =", "# type: mysql.Mysql self.mysqlDb.connect() self.DEBUG = bool(part_conf.get('debug')) self.ENV = part_conf.get('env')", "def get_db(self): return self.mysqlDb def get_sub_import(self, sub): return self.moduleImport +", "return self.get_data_path() + '/' + file def log(self): self._init_info() def", "self._mysqlDbConf.load(self.get('mysql')) def get(self, name): return self._yamlConfig[name] def _parse_yaml(self, conf_name, module_path):", "mysql.Mysql self.mysqlDb.connect() self.DEBUG = bool(part_conf.get('debug')) self.ENV = part_conf.get('env') if self.ENV", "file name\", type=str, required=False, default='office') input_args = parser.parse_args() class PartConfig:", "module_path, root_path): self.configName = input_args.config self.modulePath = module_path self.moduleName =", "sub): return self.moduleImport + '.' + sub def get_sub_path(self, sub):", "= True ENV_DEBUG = False def __init__(self, module_path, root_path): self.configName", "yaml import sys from .conf import MysqlConf from lib.db import", "DEBUG = True ENV_DEBUG = False def __init__(self, module_path, root_path):", "print(f\" db:: {self.mysqlDbConf.db}\") print(f\" debug:: {str(int(self.DEBUG))}\") print(f\" env_debug:: {str(int(self.ENV_DEBUG))}\") print(\"=\"", "self._yamlConfig[name] def _parse_yaml(self, conf_name, module_path): yaml_file = module_path + f\"/conf/db_{conf_name}.yml\"", "file): return self.get_data_path() + '/' + file def log(self): self._init_info()", "log(self): self._init_info() def _init_info(self): print(\"=\" * 50) print(\"=\" * 2", "MysqlConf from lib.db import mysql parser = argparse.ArgumentParser() parser.add_argument(\"--config\", help=\"config", "'.' + sub def get_sub_path(self, sub): return self.modulePath + '/'", "self.mysqlDbConf = part_conf.mysql_db_conf # type:MysqlConf self.mysqlDb = mysql.Mysql(self.mysqlDbConf) # type:", "+ f\" FILE PATH:: {' '.join(sys.argv)}\") print(f\" config file: {self.configName}\")", "= open(yaml_file, 'r', encoding='utf-8') yaml_content = f.read() self._yamlConfig = yaml.safe_load(yaml_content)", "= None mysqlDbConf = None ENV = 'dev' DEBUG =", "return self.modulePath + '/' + sub def get_conf_path(self): return self.get_sub_path(sub='conf')", "sub): return self.modulePath + '/' + sub def get_conf_path(self): return", "import sys from .conf import MysqlConf from lib.db import mysql", "from .conf import MysqlConf from lib.db import mysql parser =", "module_path): yaml_file = module_path + f\"/conf/db_{conf_name}.yml\" f = open(yaml_file, 'r',", "self.DEBUG = bool(part_conf.get('debug')) self.ENV = part_conf.get('env') if self.ENV == 'dev':", "= None moduleName = None modulePath = None mysqlDb =", "return self.mysqlDb def get_sub_import(self, sub): return self.moduleImport + '.' +", "50) print(\"=\" * 2 + f\" MODULE PATH:: {self.modulePath}\") print(\"=\"", "print(\"=\" * 50) print(\"=\" * 2 + f\" MODULE PATH::", "module_path + f\"/conf/db_{conf_name}.yml\" f = open(yaml_file, 'r', encoding='utf-8') yaml_content =", "moduleImport = None moduleName = None modulePath = None mysqlDb", "PartConfig() # type:PartConfig part_conf.parse(conf_name=input_args.config, module_path=module_path) self.mysqlDbConf = part_conf.mysql_db_conf # type:MysqlConf", "'/' + sub def get_conf_path(self): return self.get_sub_path(sub='conf') def get_data_path(self): return", "= False def __init__(self, module_path, root_path): self.configName = input_args.config self.modulePath", "def __init__(self, module_path, root_path): self.configName = input_args.config self.modulePath = module_path", "{self.modulePath}\") print(\"=\" * 2 + f\" FILE PATH:: {' '.join(sys.argv)}\")", "input_args = parser.parse_args() class PartConfig: def __init__(self): self._mysqlDbConf = MysqlConf()", "= part_conf.mysql_db_conf # type:MysqlConf self.mysqlDb = mysql.Mysql(self.mysqlDbConf) # type: mysql.Mysql", "self.moduleImport = self.moduleName.replace('/', '.') part_conf = PartConfig() # type:PartConfig part_conf.parse(conf_name=input_args.config,", "file def log(self): self._init_info() def _init_info(self): print(\"=\" * 50) print(\"=\"", "self.ENV == 'dev': self.ENV_DEBUG = True def get_db(self): return self.mysqlDb", "mysqlDbConf = None ENV = 'dev' DEBUG = True ENV_DEBUG", "self.moduleName.replace('/', '.') part_conf = PartConfig() # type:PartConfig part_conf.parse(conf_name=input_args.config, module_path=module_path) self.mysqlDbConf", "type: mysql.Mysql self.mysqlDb.connect() self.DEBUG = bool(part_conf.get('debug')) self.ENV = part_conf.get('env') if", "+ '/' + sub def get_conf_path(self): return self.get_sub_path(sub='conf') def get_data_path(self):", "self._mysqlDbConf = MysqlConf() self._yamlConfig = None def parse(self, conf_name, module_path):", "def get_conf_path(self): return self.get_sub_path(sub='conf') def get_data_path(self): return self.get_sub_path(sub='data') def get_data_file_path(self,", "part_conf = PartConfig() # type:PartConfig part_conf.parse(conf_name=input_args.config, module_path=module_path) self.mysqlDbConf = part_conf.mysql_db_conf", "self._yamlConfig = None def parse(self, conf_name, module_path): self._parse_yaml(conf_name, module_path) self._mysqlDbConf.load(self.get('mysql'))", "parse(self, conf_name, module_path): self._parse_yaml(conf_name, module_path) self._mysqlDbConf.load(self.get('mysql')) def get(self, name): return", "@property def mysql_db_conf(self): return self._mysqlDbConf class App: configName = None", "part_conf.get('env') if self.ENV == 'dev': self.ENV_DEBUG = True def get_db(self):", "bool(part_conf.get('debug')) self.ENV = part_conf.get('env') if self.ENV == 'dev': self.ENV_DEBUG =", "return self._yamlConfig[name] def _parse_yaml(self, conf_name, module_path): yaml_file = module_path +", "+ f\" MODULE PATH:: {self.modulePath}\") print(\"=\" * 2 + f\"", "import yaml import sys from .conf import MysqlConf from lib.db", "False def __init__(self, module_path, root_path): self.configName = input_args.config self.modulePath =", "conf_name, module_path): self._parse_yaml(conf_name, module_path) self._mysqlDbConf.load(self.get('mysql')) def get(self, name): return self._yamlConfig[name]", "from lib.db import mysql parser = argparse.ArgumentParser() parser.add_argument(\"--config\", help=\"config file", "import MysqlConf from lib.db import mysql parser = argparse.ArgumentParser() parser.add_argument(\"--config\",", "= f.read() self._yamlConfig = yaml.safe_load(yaml_content) @property def mysql_db_conf(self): return self._mysqlDbConf", "= mysql.Mysql(self.mysqlDbConf) # type: mysql.Mysql self.mysqlDb.connect() self.DEBUG = bool(part_conf.get('debug')) self.ENV", "get_conf_path(self): return self.get_sub_path(sub='conf') def get_data_path(self): return self.get_sub_path(sub='data') def get_data_file_path(self, file):", "get_data_path(self): return self.get_sub_path(sub='data') def get_data_file_path(self, file): return self.get_data_path() + '/'", "class App: configName = None moduleImport = None moduleName =", "{' '.join(sys.argv)}\") print(f\" config file: {self.configName}\") print(f\" db:: {self.mysqlDbConf.db}\") print(f\"", "{self.mysqlDbConf.db}\") print(f\" debug:: {str(int(self.DEBUG))}\") print(f\" env_debug:: {str(int(self.ENV_DEBUG))}\") print(\"=\" * 50)", "ENV = 'dev' DEBUG = True ENV_DEBUG = False def", "= module_path self.moduleName = self.modulePath.replace(root_path + '/', '') self.moduleImport =", "= None moduleImport = None moduleName = None modulePath =", "required=False, default='office') input_args = parser.parse_args() class PartConfig: def __init__(self): self._mysqlDbConf", "default='office') input_args = parser.parse_args() class PartConfig: def __init__(self): self._mysqlDbConf =", "# type:PartConfig part_conf.parse(conf_name=input_args.config, module_path=module_path) self.mysqlDbConf = part_conf.mysql_db_conf # type:MysqlConf self.mysqlDb", "2 + f\" MODULE PATH:: {self.modulePath}\") print(\"=\" * 2 +", "mysqlDb = None mysqlDbConf = None ENV = 'dev' DEBUG", "f\" FILE PATH:: {' '.join(sys.argv)}\") print(f\" config file: {self.configName}\") print(f\"", "MODULE PATH:: {self.modulePath}\") print(\"=\" * 2 + f\" FILE PATH::", "self.mysqlDb def get_sub_import(self, sub): return self.moduleImport + '.' + sub", "return self.get_sub_path(sub='conf') def get_data_path(self): return self.get_sub_path(sub='data') def get_data_file_path(self, file): return", "= bool(part_conf.get('debug')) self.ENV = part_conf.get('env') if self.ENV == 'dev': self.ENV_DEBUG", "print(\"=\" * 2 + f\" MODULE PATH:: {self.modulePath}\") print(\"=\" *", "print(f\" config file: {self.configName}\") print(f\" db:: {self.mysqlDbConf.db}\") print(f\" debug:: {str(int(self.DEBUG))}\")", "parser = argparse.ArgumentParser() parser.add_argument(\"--config\", help=\"config file name\", type=str, required=False, default='office')", "None def parse(self, conf_name, module_path): self._parse_yaml(conf_name, module_path) self._mysqlDbConf.load(self.get('mysql')) def get(self,", "encoding='utf-8') yaml_content = f.read() self._yamlConfig = yaml.safe_load(yaml_content) @property def mysql_db_conf(self):", "+ '.' + sub def get_sub_path(self, sub): return self.modulePath +", "name\", type=str, required=False, default='office') input_args = parser.parse_args() class PartConfig: def", "# type:MysqlConf self.mysqlDb = mysql.Mysql(self.mysqlDbConf) # type: mysql.Mysql self.mysqlDb.connect() self.DEBUG", "= 'dev' DEBUG = True ENV_DEBUG = False def __init__(self,", "self.modulePath + '/' + sub def get_conf_path(self): return self.get_sub_path(sub='conf') def", "__init__(self): self._mysqlDbConf = MysqlConf() self._yamlConfig = None def parse(self, conf_name,", "get_sub_import(self, sub): return self.moduleImport + '.' + sub def get_sub_path(self,", "= None def parse(self, conf_name, module_path): self._parse_yaml(conf_name, module_path) self._mysqlDbConf.load(self.get('mysql')) def", "sys from .conf import MysqlConf from lib.db import mysql parser", "+ '/', '') self.moduleImport = self.moduleName.replace('/', '.') part_conf = PartConfig()", "yaml.safe_load(yaml_content) @property def mysql_db_conf(self): return self._mysqlDbConf class App: configName =", "def mysql_db_conf(self): return self._mysqlDbConf class App: configName = None moduleImport", "None modulePath = None mysqlDb = None mysqlDbConf = None", "mysql parser = argparse.ArgumentParser() parser.add_argument(\"--config\", help=\"config file name\", type=str, required=False,", "modulePath = None mysqlDb = None mysqlDbConf = None ENV", "mysql.Mysql(self.mysqlDbConf) # type: mysql.Mysql self.mysqlDb.connect() self.DEBUG = bool(part_conf.get('debug')) self.ENV =", "return self._mysqlDbConf class App: configName = None moduleImport = None", "ENV_DEBUG = False def __init__(self, module_path, root_path): self.configName = input_args.config", "conf_name, module_path): yaml_file = module_path + f\"/conf/db_{conf_name}.yml\" f = open(yaml_file,", "if self.ENV == 'dev': self.ENV_DEBUG = True def get_db(self): return", "get_sub_path(self, sub): return self.modulePath + '/' + sub def get_conf_path(self):", "print(\"=\" * 2 + f\" FILE PATH:: {' '.join(sys.argv)}\") print(f\"", "config file: {self.configName}\") print(f\" db:: {self.mysqlDbConf.db}\") print(f\" debug:: {str(int(self.DEBUG))}\") print(f\"", "configName = None moduleImport = None moduleName = None modulePath", "= PartConfig() # type:PartConfig part_conf.parse(conf_name=input_args.config, module_path=module_path) self.mysqlDbConf = part_conf.mysql_db_conf #", "== 'dev': self.ENV_DEBUG = True def get_db(self): return self.mysqlDb def", "module_path) self._mysqlDbConf.load(self.get('mysql')) def get(self, name): return self._yamlConfig[name] def _parse_yaml(self, conf_name,", "self.configName = input_args.config self.modulePath = module_path self.moduleName = self.modulePath.replace(root_path +", "def log(self): self._init_info() def _init_info(self): print(\"=\" * 50) print(\"=\" *", "'.join(sys.argv)}\") print(f\" config file: {self.configName}\") print(f\" db:: {self.mysqlDbConf.db}\") print(f\" debug::", "f.read() self._yamlConfig = yaml.safe_load(yaml_content) @property def mysql_db_conf(self): return self._mysqlDbConf class", "root_path): self.configName = input_args.config self.modulePath = module_path self.moduleName = self.modulePath.replace(root_path", "lib.db import mysql parser = argparse.ArgumentParser() parser.add_argument(\"--config\", help=\"config file name\",", "'dev': self.ENV_DEBUG = True def get_db(self): return self.mysqlDb def get_sub_import(self,", "type:MysqlConf self.mysqlDb = mysql.Mysql(self.mysqlDbConf) # type: mysql.Mysql self.mysqlDb.connect() self.DEBUG =", "parser.parse_args() class PartConfig: def __init__(self): self._mysqlDbConf = MysqlConf() self._yamlConfig =", "_parse_yaml(self, conf_name, module_path): yaml_file = module_path + f\"/conf/db_{conf_name}.yml\" f =", "type=str, required=False, default='office') input_args = parser.parse_args() class PartConfig: def __init__(self):", "True def get_db(self): return self.mysqlDb def get_sub_import(self, sub): return self.moduleImport", "self.mysqlDb.connect() self.DEBUG = bool(part_conf.get('debug')) self.ENV = part_conf.get('env') if self.ENV ==", "True ENV_DEBUG = False def __init__(self, module_path, root_path): self.configName =", "get_data_file_path(self, file): return self.get_data_path() + '/' + file def log(self):", "argparse import yaml import sys from .conf import MysqlConf from", "= True def get_db(self): return self.mysqlDb def get_sub_import(self, sub): return", "None moduleName = None modulePath = None mysqlDb = None", "MysqlConf() self._yamlConfig = None def parse(self, conf_name, module_path): self._parse_yaml(conf_name, module_path)", "f\"/conf/db_{conf_name}.yml\" f = open(yaml_file, 'r', encoding='utf-8') yaml_content = f.read() self._yamlConfig", "mysql_db_conf(self): return self._mysqlDbConf class App: configName = None moduleImport =", "get_db(self): return self.mysqlDb def get_sub_import(self, sub): return self.moduleImport + '.'", "get(self, name): return self._yamlConfig[name] def _parse_yaml(self, conf_name, module_path): yaml_file =", "file: {self.configName}\") print(f\" db:: {self.mysqlDbConf.db}\") print(f\" debug:: {str(int(self.DEBUG))}\") print(f\" env_debug::", "* 50) print(\"=\" * 2 + f\" MODULE PATH:: {self.modulePath}\")", "None mysqlDbConf = None ENV = 'dev' DEBUG = True", "self._parse_yaml(conf_name, module_path) self._mysqlDbConf.load(self.get('mysql')) def get(self, name): return self._yamlConfig[name] def _parse_yaml(self,", "self._yamlConfig = yaml.safe_load(yaml_content) @property def mysql_db_conf(self): return self._mysqlDbConf class App:", "<filename>lib/common/app.py import argparse import yaml import sys from .conf import", "sub def get_conf_path(self): return self.get_sub_path(sub='conf') def get_data_path(self): return self.get_sub_path(sub='data') def", "'dev' DEBUG = True ENV_DEBUG = False def __init__(self, module_path,", "2 + f\" FILE PATH:: {' '.join(sys.argv)}\") print(f\" config file:", "= module_path + f\"/conf/db_{conf_name}.yml\" f = open(yaml_file, 'r', encoding='utf-8') yaml_content", "input_args.config self.modulePath = module_path self.moduleName = self.modulePath.replace(root_path + '/', '')", "def __init__(self): self._mysqlDbConf = MysqlConf() self._yamlConfig = None def parse(self,", "module_path self.moduleName = self.modulePath.replace(root_path + '/', '') self.moduleImport = self.moduleName.replace('/',", "'') self.moduleImport = self.moduleName.replace('/', '.') part_conf = PartConfig() # type:PartConfig", "parser.add_argument(\"--config\", help=\"config file name\", type=str, required=False, default='office') input_args = parser.parse_args()", "+ '/' + file def log(self): self._init_info() def _init_info(self): print(\"=\"", "+ file def log(self): self._init_info() def _init_info(self): print(\"=\" * 50)", "db:: {self.mysqlDbConf.db}\") print(f\" debug:: {str(int(self.DEBUG))}\") print(f\" env_debug:: {str(int(self.ENV_DEBUG))}\") print(\"=\" *", "self.ENV = part_conf.get('env') if self.ENV == 'dev': self.ENV_DEBUG = True", "def get_data_path(self): return self.get_sub_path(sub='data') def get_data_file_path(self, file): return self.get_data_path() +", "= None modulePath = None mysqlDb = None mysqlDbConf =", "part_conf.mysql_db_conf # type:MysqlConf self.mysqlDb = mysql.Mysql(self.mysqlDbConf) # type: mysql.Mysql self.mysqlDb.connect()", "import argparse import yaml import sys from .conf import MysqlConf", "argparse.ArgumentParser() parser.add_argument(\"--config\", help=\"config file name\", type=str, required=False, default='office') input_args =", "'r', encoding='utf-8') yaml_content = f.read() self._yamlConfig = yaml.safe_load(yaml_content) @property def", "= argparse.ArgumentParser() parser.add_argument(\"--config\", help=\"config file name\", type=str, required=False, default='office') input_args", "+ sub def get_conf_path(self): return self.get_sub_path(sub='conf') def get_data_path(self): return self.get_sub_path(sub='data')", "return self.get_sub_path(sub='data') def get_data_file_path(self, file): return self.get_data_path() + '/' +", "{self.configName}\") print(f\" db:: {self.mysqlDbConf.db}\") print(f\" debug:: {str(int(self.DEBUG))}\") print(f\" env_debug:: {str(int(self.ENV_DEBUG))}\")", "App: configName = None moduleImport = None moduleName = None", "def get(self, name): return self._yamlConfig[name] def _parse_yaml(self, conf_name, module_path): yaml_file", "name): return self._yamlConfig[name] def _parse_yaml(self, conf_name, module_path): yaml_file = module_path", "self.get_sub_path(sub='conf') def get_data_path(self): return self.get_sub_path(sub='data') def get_data_file_path(self, file): return self.get_data_path()", "module_path=module_path) self.mysqlDbConf = part_conf.mysql_db_conf # type:MysqlConf self.mysqlDb = mysql.Mysql(self.mysqlDbConf) #", "self.mysqlDb = mysql.Mysql(self.mysqlDbConf) # type: mysql.Mysql self.mysqlDb.connect() self.DEBUG = bool(part_conf.get('debug'))", "import mysql parser = argparse.ArgumentParser() parser.add_argument(\"--config\", help=\"config file name\", type=str,", "self.modulePath = module_path self.moduleName = self.modulePath.replace(root_path + '/', '') self.moduleImport", "print(f\" debug:: {str(int(self.DEBUG))}\") print(f\" env_debug:: {str(int(self.ENV_DEBUG))}\") print(\"=\" * 50) print(\"\\n\")", ".conf import MysqlConf from lib.db import mysql parser = argparse.ArgumentParser()", "self.get_sub_path(sub='data') def get_data_file_path(self, file): return self.get_data_path() + '/' + file", "* 2 + f\" MODULE PATH:: {self.modulePath}\") print(\"=\" * 2", "= None mysqlDb = None mysqlDbConf = None ENV =", "FILE PATH:: {' '.join(sys.argv)}\") print(f\" config file: {self.configName}\") print(f\" db::", "= None ENV = 'dev' DEBUG = True ENV_DEBUG =", "__init__(self, module_path, root_path): self.configName = input_args.config self.modulePath = module_path self.moduleName", "None ENV = 'dev' DEBUG = True ENV_DEBUG = False", "= parser.parse_args() class PartConfig: def __init__(self): self._mysqlDbConf = MysqlConf() self._yamlConfig", "type:PartConfig part_conf.parse(conf_name=input_args.config, module_path=module_path) self.mysqlDbConf = part_conf.mysql_db_conf # type:MysqlConf self.mysqlDb =", "None moduleImport = None moduleName = None modulePath = None", "= input_args.config self.modulePath = module_path self.moduleName = self.modulePath.replace(root_path + '/',", "None mysqlDb = None mysqlDbConf = None ENV = 'dev'", "self._init_info() def _init_info(self): print(\"=\" * 50) print(\"=\" * 2 +", "self.get_data_path() + '/' + file def log(self): self._init_info() def _init_info(self):", "def get_sub_path(self, sub): return self.modulePath + '/' + sub def", "part_conf.parse(conf_name=input_args.config, module_path=module_path) self.mysqlDbConf = part_conf.mysql_db_conf # type:MysqlConf self.mysqlDb = mysql.Mysql(self.mysqlDbConf)", "sub def get_sub_path(self, sub): return self.modulePath + '/' + sub", "self.moduleImport + '.' + sub def get_sub_path(self, sub): return self.modulePath", "PATH:: {' '.join(sys.argv)}\") print(f\" config file: {self.configName}\") print(f\" db:: {self.mysqlDbConf.db}\")", "self.ENV_DEBUG = True def get_db(self): return self.mysqlDb def get_sub_import(self, sub):", "yaml_file = module_path + f\"/conf/db_{conf_name}.yml\" f = open(yaml_file, 'r', encoding='utf-8')", "def _parse_yaml(self, conf_name, module_path): yaml_file = module_path + f\"/conf/db_{conf_name}.yml\" f", "+ f\"/conf/db_{conf_name}.yml\" f = open(yaml_file, 'r', encoding='utf-8') yaml_content = f.read()", "f = open(yaml_file, 'r', encoding='utf-8') yaml_content = f.read() self._yamlConfig =", "self._mysqlDbConf class App: configName = None moduleImport = None moduleName", "module_path): self._parse_yaml(conf_name, module_path) self._mysqlDbConf.load(self.get('mysql')) def get(self, name): return self._yamlConfig[name] def", "= self.moduleName.replace('/', '.') part_conf = PartConfig() # type:PartConfig part_conf.parse(conf_name=input_args.config, module_path=module_path)", "'.') part_conf = PartConfig() # type:PartConfig part_conf.parse(conf_name=input_args.config, module_path=module_path) self.mysqlDbConf =", "def get_data_file_path(self, file): return self.get_data_path() + '/' + file def", "self.moduleName = self.modulePath.replace(root_path + '/', '') self.moduleImport = self.moduleName.replace('/', '.')", "= MysqlConf() self._yamlConfig = None def parse(self, conf_name, module_path): self._parse_yaml(conf_name,", "yaml_content = f.read() self._yamlConfig = yaml.safe_load(yaml_content) @property def mysql_db_conf(self): return", "f\" MODULE PATH:: {self.modulePath}\") print(\"=\" * 2 + f\" FILE", "return self.moduleImport + '.' + sub def get_sub_path(self, sub): return", "PATH:: {self.modulePath}\") print(\"=\" * 2 + f\" FILE PATH:: {'", "= yaml.safe_load(yaml_content) @property def mysql_db_conf(self): return self._mysqlDbConf class App: configName", "'/', '') self.moduleImport = self.moduleName.replace('/', '.') part_conf = PartConfig() #", "def _init_info(self): print(\"=\" * 50) print(\"=\" * 2 + f\"" ]
[ "13)) self.label_4.setObjectName(\"label_4\") self.orig_text = QtWidgets.QPlainTextEdit(self.centralwidget) self.orig_text.setGeometry(QtCore.QRect(20, 150, 270, 340)) self.orig_text.setObjectName(\"orig_text\")", "MainWindow.setWindowTitle(_translate(\"MainWindow\", \"Проект 1\")) self.label.setText(_translate(\"MainWindow\", \"Введите данные о песне:\")) self.label_2.setText(_translate(\"MainWindow\", \"Название:\"))", "90, 140, 13)) self.label_4.setObjectName(\"label_4\") self.orig_text = QtWidgets.QPlainTextEdit(self.centralwidget) self.orig_text.setGeometry(QtCore.QRect(20, 150, 270,", "you are doing. from PyQt5 import QtCore, QtGui, QtWidgets class", "30, 113, 20)) self.song_title.setObjectName(\"song_title\") self.label_2 = QtWidgets.QLabel(self.centralwidget) self.label_2.setGeometry(QtCore.QRect(20, 30, 60,", "self.orig_text.setObjectName(\"orig_text\") self.label_5 = QtWidgets.QLabel(self.centralwidget) self.label_5.setGeometry(QtCore.QRect(20, 120, 60, 13)) self.label_5.setObjectName(\"label_5\") self.trans_text", "данные о песне:\")) self.label_2.setText(_translate(\"MainWindow\", \"Название:\")) self.label_3.setText(_translate(\"MainWindow\", \"Исполнитель:\")) self.label_4.setText(_translate(\"MainWindow\", \"Полученный текст", "MainWindow.setCentralWidget(self.centralwidget) self.retranslateUi(MainWindow) QtCore.QMetaObject.connectSlotsByName(MainWindow) def retranslateUi(self, MainWindow): _translate = QtCore.QCoreApplication.translate MainWindow.setWindowTitle(_translate(\"MainWindow\",", "песне:\")) self.label_2.setText(_translate(\"MainWindow\", \"Название:\")) self.label_3.setText(_translate(\"MainWindow\", \"Исполнитель:\")) self.label_4.setText(_translate(\"MainWindow\", \"Полученный текст песни:\")) self.label_5.setText(_translate(\"MainWindow\",", "60, 13)) self.label_5.setObjectName(\"label_5\") self.trans_text = QtWidgets.QPlainTextEdit(self.centralwidget) self.trans_text.setGeometry(QtCore.QRect(320, 150, 270, 340))", "changes made to this file will be lost when pyuic5", "PyQt5 import QtCore, QtGui, QtWidgets class Ui_MainWindow(object): def setupUi(self, MainWindow):", "pyuic5 is # run again. Do not edit this file", "500, 560, 13)) self.info.setText(\"\") self.info.setObjectName(\"info\") self.error_text = QtWidgets.QLabel(self.centralwidget) self.error_text.setGeometry(QtCore.QRect(30, 520,", "self.get_text.setText(_translate(\"MainWindow\", \"Запрос текста\")) self.pretty_flag.setText(_translate(\"MainWindow\", \"Красивый текст (без указания на припев)\"))", "Ui_MainWindow(object): def setupUi(self, MainWindow): MainWindow.setObjectName(\"MainWindow\") MainWindow.resize(650, 550) self.centralwidget = QtWidgets.QWidget(MainWindow)", "13)) self.label_5.setObjectName(\"label_5\") self.trans_text = QtWidgets.QPlainTextEdit(self.centralwidget) self.trans_text.setGeometry(QtCore.QRect(320, 150, 270, 340)) self.trans_text.setObjectName(\"trans_text\")", "utf-8 -*- # Form implementation generated from reading ui file", "from reading ui file 'design.ui' # # Created by: PyQt5", "self.label_2.setGeometry(QtCore.QRect(20, 30, 60, 13)) self.label_2.setObjectName(\"label_2\") self.label_3 = QtWidgets.QLabel(self.centralwidget) self.label_3.setGeometry(QtCore.QRect(220, 30,", "MainWindow.resize(650, 550) self.centralwidget = QtWidgets.QWidget(MainWindow) self.centralwidget.setObjectName(\"centralwidget\") self.label = QtWidgets.QLabel(self.centralwidget) self.label.setGeometry(QtCore.QRect(20,", "\"Исполнитель:\")) self.label_4.setText(_translate(\"MainWindow\", \"Полученный текст песни:\")) self.label_5.setText(_translate(\"MainWindow\", \"Оригинал:\")) self.label_6.setText(_translate(\"MainWindow\", \"Перевод на", "self.label_6.setGeometry(QtCore.QRect(320, 120, 120, 13)) self.label_6.setObjectName(\"label_6\") self.get_text = QtWidgets.QPushButton(self.centralwidget) self.get_text.setGeometry(QtCore.QRect(310, 70,", "self.error_text.setGeometry(QtCore.QRect(30, 520, 560, 20)) self.error_text.setText(\"\") self.error_text.setObjectName(\"error_text\") MainWindow.setCentralWidget(self.centralwidget) self.retranslateUi(MainWindow) QtCore.QMetaObject.connectSlotsByName(MainWindow) def", "\"Название:\")) self.label_3.setText(_translate(\"MainWindow\", \"Исполнитель:\")) self.label_4.setText(_translate(\"MainWindow\", \"Полученный текст песни:\")) self.label_5.setText(_translate(\"MainWindow\", \"Оригинал:\")) self.label_6.setText(_translate(\"MainWindow\",", "песни:\")) self.label_5.setText(_translate(\"MainWindow\", \"Оригинал:\")) self.label_6.setText(_translate(\"MainWindow\", \"Перевод на русский:\")) self.get_text.setText(_translate(\"MainWindow\", \"Запрос текста\"))", "60, 13)) self.label_2.setObjectName(\"label_2\") self.label_3 = QtWidgets.QLabel(self.centralwidget) self.label_3.setGeometry(QtCore.QRect(220, 30, 80, 13))", "self.song_title.setObjectName(\"song_title\") self.label_2 = QtWidgets.QLabel(self.centralwidget) self.label_2.setGeometry(QtCore.QRect(20, 30, 60, 13)) self.label_2.setObjectName(\"label_2\") self.label_3", "120, 13)) self.label_6.setObjectName(\"label_6\") self.get_text = QtWidgets.QPushButton(self.centralwidget) self.get_text.setGeometry(QtCore.QRect(310, 70, 100, 23))", "текст песни:\")) self.label_5.setText(_translate(\"MainWindow\", \"Оригинал:\")) self.label_6.setText(_translate(\"MainWindow\", \"Перевод на русский:\")) self.get_text.setText(_translate(\"MainWindow\", \"Запрос", "русский:\")) self.get_text.setText(_translate(\"MainWindow\", \"Запрос текста\")) self.pretty_flag.setText(_translate(\"MainWindow\", \"Красивый текст (без указания на", "self.retranslateUi(MainWindow) QtCore.QMetaObject.connectSlotsByName(MainWindow) def retranslateUi(self, MainWindow): _translate = QtCore.QCoreApplication.translate MainWindow.setWindowTitle(_translate(\"MainWindow\", \"Проект", "270, 340)) self.orig_text.setObjectName(\"orig_text\") self.label_5 = QtWidgets.QLabel(self.centralwidget) self.label_5.setGeometry(QtCore.QRect(20, 120, 60, 13))", "generator 5.15.4 # # WARNING: Any manual changes made to", "QtGui, QtWidgets class Ui_MainWindow(object): def setupUi(self, MainWindow): MainWindow.setObjectName(\"MainWindow\") MainWindow.resize(650, 550)", "made to this file will be lost when pyuic5 is", "23)) self.get_text.setObjectName(\"get_text\") self.pretty_flag = QtWidgets.QCheckBox(self.centralwidget) self.pretty_flag.setGeometry(QtCore.QRect(20, 60, 250, 20)) self.pretty_flag.setObjectName(\"pretty_flag\")", "UI code generator 5.15.4 # # WARNING: Any manual changes", "self.error_text.setText(\"\") self.error_text.setObjectName(\"error_text\") MainWindow.setCentralWidget(self.centralwidget) self.retranslateUi(MainWindow) QtCore.QMetaObject.connectSlotsByName(MainWindow) def retranslateUi(self, MainWindow): _translate =", "Do not edit this file unless you know what you", "file unless you know what you are doing. from PyQt5", "= QtWidgets.QLabel(self.centralwidget) self.label.setGeometry(QtCore.QRect(20, 10, 140, 13)) self.label.setObjectName(\"label\") self.song_title = QtWidgets.QLineEdit(self.centralwidget)", "13)) self.label_2.setObjectName(\"label_2\") self.label_3 = QtWidgets.QLabel(self.centralwidget) self.label_3.setGeometry(QtCore.QRect(220, 30, 80, 13)) self.label_3.setObjectName(\"label_3\")", "13)) self.info.setText(\"\") self.info.setObjectName(\"info\") self.error_text = QtWidgets.QLabel(self.centralwidget) self.error_text.setGeometry(QtCore.QRect(30, 520, 560, 20))", "= QtWidgets.QLabel(self.centralwidget) self.label_3.setGeometry(QtCore.QRect(220, 30, 80, 13)) self.label_3.setObjectName(\"label_3\") self.song_autor = QtWidgets.QLineEdit(self.centralwidget)", "self.label_4.setObjectName(\"label_4\") self.orig_text = QtWidgets.QPlainTextEdit(self.centralwidget) self.orig_text.setGeometry(QtCore.QRect(20, 150, 270, 340)) self.orig_text.setObjectName(\"orig_text\") self.label_5", "= QtWidgets.QLabel(self.centralwidget) self.label_5.setGeometry(QtCore.QRect(20, 120, 60, 13)) self.label_5.setObjectName(\"label_5\") self.trans_text = QtWidgets.QPlainTextEdit(self.centralwidget)", "QtWidgets.QLineEdit(self.centralwidget) self.song_autor.setGeometry(QtCore.QRect(310, 30, 113, 20)) self.song_autor.setObjectName(\"song_autor\") self.label_4 = QtWidgets.QLabel(self.centralwidget) self.label_4.setGeometry(QtCore.QRect(20,", "_translate = QtCore.QCoreApplication.translate MainWindow.setWindowTitle(_translate(\"MainWindow\", \"Проект 1\")) self.label.setText(_translate(\"MainWindow\", \"Введите данные о", "self.label.setText(_translate(\"MainWindow\", \"Введите данные о песне:\")) self.label_2.setText(_translate(\"MainWindow\", \"Название:\")) self.label_3.setText(_translate(\"MainWindow\", \"Исполнитель:\")) self.label_4.setText(_translate(\"MainWindow\",", "# Created by: PyQt5 UI code generator 5.15.4 # #", "self.pretty_flag.setObjectName(\"pretty_flag\") self.info = QtWidgets.QLabel(self.centralwidget) self.info.setGeometry(QtCore.QRect(30, 500, 560, 13)) self.info.setText(\"\") self.info.setObjectName(\"info\")", "Created by: PyQt5 UI code generator 5.15.4 # # WARNING:", "self.orig_text.setGeometry(QtCore.QRect(20, 150, 270, 340)) self.orig_text.setObjectName(\"orig_text\") self.label_5 = QtWidgets.QLabel(self.centralwidget) self.label_5.setGeometry(QtCore.QRect(20, 120,", "= QtWidgets.QLabel(self.centralwidget) self.label_2.setGeometry(QtCore.QRect(20, 30, 60, 13)) self.label_2.setObjectName(\"label_2\") self.label_3 = QtWidgets.QLabel(self.centralwidget)", "self.info.setText(\"\") self.info.setObjectName(\"info\") self.error_text = QtWidgets.QLabel(self.centralwidget) self.error_text.setGeometry(QtCore.QRect(30, 520, 560, 20)) self.error_text.setText(\"\")", "\"Перевод на русский:\")) self.get_text.setText(_translate(\"MainWindow\", \"Запрос текста\")) self.pretty_flag.setText(_translate(\"MainWindow\", \"Красивый текст (без", "class Ui_MainWindow(object): def setupUi(self, MainWindow): MainWindow.setObjectName(\"MainWindow\") MainWindow.resize(650, 550) self.centralwidget =", "self.centralwidget.setObjectName(\"centralwidget\") self.label = QtWidgets.QLabel(self.centralwidget) self.label.setGeometry(QtCore.QRect(20, 10, 140, 13)) self.label.setObjectName(\"label\") self.song_title", "self.label_2.setObjectName(\"label_2\") self.label_3 = QtWidgets.QLabel(self.centralwidget) self.label_3.setGeometry(QtCore.QRect(220, 30, 80, 13)) self.label_3.setObjectName(\"label_3\") self.song_autor", "113, 20)) self.song_autor.setObjectName(\"song_autor\") self.label_4 = QtWidgets.QLabel(self.centralwidget) self.label_4.setGeometry(QtCore.QRect(20, 90, 140, 13))", "code generator 5.15.4 # # WARNING: Any manual changes made", "QtCore.QCoreApplication.translate MainWindow.setWindowTitle(_translate(\"MainWindow\", \"Проект 1\")) self.label.setText(_translate(\"MainWindow\", \"Введите данные о песне:\")) self.label_2.setText(_translate(\"MainWindow\",", "\"Введите данные о песне:\")) self.label_2.setText(_translate(\"MainWindow\", \"Название:\")) self.label_3.setText(_translate(\"MainWindow\", \"Исполнитель:\")) self.label_4.setText(_translate(\"MainWindow\", \"Полученный", "self.trans_text = QtWidgets.QPlainTextEdit(self.centralwidget) self.trans_text.setGeometry(QtCore.QRect(320, 150, 270, 340)) self.trans_text.setObjectName(\"trans_text\") self.label_6 =", "# Form implementation generated from reading ui file 'design.ui' #", "100, 23)) self.get_text.setObjectName(\"get_text\") self.pretty_flag = QtWidgets.QCheckBox(self.centralwidget) self.pretty_flag.setGeometry(QtCore.QRect(20, 60, 250, 20))", "self.label_5 = QtWidgets.QLabel(self.centralwidget) self.label_5.setGeometry(QtCore.QRect(20, 120, 60, 13)) self.label_5.setObjectName(\"label_5\") self.trans_text =", "= QtWidgets.QLineEdit(self.centralwidget) self.song_autor.setGeometry(QtCore.QRect(310, 30, 113, 20)) self.song_autor.setObjectName(\"song_autor\") self.label_4 = QtWidgets.QLabel(self.centralwidget)", "\"Проект 1\")) self.label.setText(_translate(\"MainWindow\", \"Введите данные о песне:\")) self.label_2.setText(_translate(\"MainWindow\", \"Название:\")) self.label_3.setText(_translate(\"MainWindow\",", "5.15.4 # # WARNING: Any manual changes made to this", "self.label_3.setText(_translate(\"MainWindow\", \"Исполнитель:\")) self.label_4.setText(_translate(\"MainWindow\", \"Полученный текст песни:\")) self.label_5.setText(_translate(\"MainWindow\", \"Оригинал:\")) self.label_6.setText(_translate(\"MainWindow\", \"Перевод", "= QtWidgets.QLabel(self.centralwidget) self.error_text.setGeometry(QtCore.QRect(30, 520, 560, 20)) self.error_text.setText(\"\") self.error_text.setObjectName(\"error_text\") MainWindow.setCentralWidget(self.centralwidget) self.retranslateUi(MainWindow)", "1\")) self.label.setText(_translate(\"MainWindow\", \"Введите данные о песне:\")) self.label_2.setText(_translate(\"MainWindow\", \"Название:\")) self.label_3.setText(_translate(\"MainWindow\", \"Исполнитель:\"))", "30, 60, 13)) self.label_2.setObjectName(\"label_2\") self.label_3 = QtWidgets.QLabel(self.centralwidget) self.label_3.setGeometry(QtCore.QRect(220, 30, 80,", "self.label_6.setText(_translate(\"MainWindow\", \"Перевод на русский:\")) self.get_text.setText(_translate(\"MainWindow\", \"Запрос текста\")) self.pretty_flag.setText(_translate(\"MainWindow\", \"Красивый текст", "lost when pyuic5 is # run again. Do not edit", "60, 250, 20)) self.pretty_flag.setObjectName(\"pretty_flag\") self.info = QtWidgets.QLabel(self.centralwidget) self.info.setGeometry(QtCore.QRect(30, 500, 560,", "QtWidgets.QLabel(self.centralwidget) self.label_2.setGeometry(QtCore.QRect(20, 30, 60, 13)) self.label_2.setObjectName(\"label_2\") self.label_3 = QtWidgets.QLabel(self.centralwidget) self.label_3.setGeometry(QtCore.QRect(220,", "= QtWidgets.QLabel(self.centralwidget) self.label_6.setGeometry(QtCore.QRect(320, 120, 120, 13)) self.label_6.setObjectName(\"label_6\") self.get_text = QtWidgets.QPushButton(self.centralwidget)", "10, 140, 13)) self.label.setObjectName(\"label\") self.song_title = QtWidgets.QLineEdit(self.centralwidget) self.song_title.setGeometry(QtCore.QRect(90, 30, 113,", "reading ui file 'design.ui' # # Created by: PyQt5 UI", "self.label_3.setGeometry(QtCore.QRect(220, 30, 80, 13)) self.label_3.setObjectName(\"label_3\") self.song_autor = QtWidgets.QLineEdit(self.centralwidget) self.song_autor.setGeometry(QtCore.QRect(310, 30,", "20)) self.pretty_flag.setObjectName(\"pretty_flag\") self.info = QtWidgets.QLabel(self.centralwidget) self.info.setGeometry(QtCore.QRect(30, 500, 560, 13)) self.info.setText(\"\")", "520, 560, 20)) self.error_text.setText(\"\") self.error_text.setObjectName(\"error_text\") MainWindow.setCentralWidget(self.centralwidget) self.retranslateUi(MainWindow) QtCore.QMetaObject.connectSlotsByName(MainWindow) def retranslateUi(self,", "by: PyQt5 UI code generator 5.15.4 # # WARNING: Any", "self.song_autor.setObjectName(\"song_autor\") self.label_4 = QtWidgets.QLabel(self.centralwidget) self.label_4.setGeometry(QtCore.QRect(20, 90, 140, 13)) self.label_4.setObjectName(\"label_4\") self.orig_text", "QtWidgets class Ui_MainWindow(object): def setupUi(self, MainWindow): MainWindow.setObjectName(\"MainWindow\") MainWindow.resize(650, 550) self.centralwidget", "80, 13)) self.label_3.setObjectName(\"label_3\") self.song_autor = QtWidgets.QLineEdit(self.centralwidget) self.song_autor.setGeometry(QtCore.QRect(310, 30, 113, 20))", "QtWidgets.QLabel(self.centralwidget) self.label_6.setGeometry(QtCore.QRect(320, 120, 120, 13)) self.label_6.setObjectName(\"label_6\") self.get_text = QtWidgets.QPushButton(self.centralwidget) self.get_text.setGeometry(QtCore.QRect(310,", "self.song_autor.setGeometry(QtCore.QRect(310, 30, 113, 20)) self.song_autor.setObjectName(\"song_autor\") self.label_4 = QtWidgets.QLabel(self.centralwidget) self.label_4.setGeometry(QtCore.QRect(20, 90,", "are doing. from PyQt5 import QtCore, QtGui, QtWidgets class Ui_MainWindow(object):", "\"Полученный текст песни:\")) self.label_5.setText(_translate(\"MainWindow\", \"Оригинал:\")) self.label_6.setText(_translate(\"MainWindow\", \"Перевод на русский:\")) self.get_text.setText(_translate(\"MainWindow\",", "13)) self.label.setObjectName(\"label\") self.song_title = QtWidgets.QLineEdit(self.centralwidget) self.song_title.setGeometry(QtCore.QRect(90, 30, 113, 20)) self.song_title.setObjectName(\"song_title\")", "you know what you are doing. from PyQt5 import QtCore,", "self.trans_text.setObjectName(\"trans_text\") self.label_6 = QtWidgets.QLabel(self.centralwidget) self.label_6.setGeometry(QtCore.QRect(320, 120, 120, 13)) self.label_6.setObjectName(\"label_6\") self.get_text", "self.label_4.setGeometry(QtCore.QRect(20, 90, 140, 13)) self.label_4.setObjectName(\"label_4\") self.orig_text = QtWidgets.QPlainTextEdit(self.centralwidget) self.orig_text.setGeometry(QtCore.QRect(20, 150,", "self.label_5.setText(_translate(\"MainWindow\", \"Оригинал:\")) self.label_6.setText(_translate(\"MainWindow\", \"Перевод на русский:\")) self.get_text.setText(_translate(\"MainWindow\", \"Запрос текста\")) self.pretty_flag.setText(_translate(\"MainWindow\",", "= QtWidgets.QPlainTextEdit(self.centralwidget) self.trans_text.setGeometry(QtCore.QRect(320, 150, 270, 340)) self.trans_text.setObjectName(\"trans_text\") self.label_6 = QtWidgets.QLabel(self.centralwidget)", "self.get_text.setObjectName(\"get_text\") self.pretty_flag = QtWidgets.QCheckBox(self.centralwidget) self.pretty_flag.setGeometry(QtCore.QRect(20, 60, 250, 20)) self.pretty_flag.setObjectName(\"pretty_flag\") self.info", "QtWidgets.QPlainTextEdit(self.centralwidget) self.trans_text.setGeometry(QtCore.QRect(320, 150, 270, 340)) self.trans_text.setObjectName(\"trans_text\") self.label_6 = QtWidgets.QLabel(self.centralwidget) self.label_6.setGeometry(QtCore.QRect(320,", "self.info = QtWidgets.QLabel(self.centralwidget) self.info.setGeometry(QtCore.QRect(30, 500, 560, 13)) self.info.setText(\"\") self.info.setObjectName(\"info\") self.error_text", "= QtWidgets.QPlainTextEdit(self.centralwidget) self.orig_text.setGeometry(QtCore.QRect(20, 150, 270, 340)) self.orig_text.setObjectName(\"orig_text\") self.label_5 = QtWidgets.QLabel(self.centralwidget)", "MainWindow.setObjectName(\"MainWindow\") MainWindow.resize(650, 550) self.centralwidget = QtWidgets.QWidget(MainWindow) self.centralwidget.setObjectName(\"centralwidget\") self.label = QtWidgets.QLabel(self.centralwidget)", "ui file 'design.ui' # # Created by: PyQt5 UI code", "what you are doing. from PyQt5 import QtCore, QtGui, QtWidgets", "QtWidgets.QPushButton(self.centralwidget) self.get_text.setGeometry(QtCore.QRect(310, 70, 100, 23)) self.get_text.setObjectName(\"get_text\") self.pretty_flag = QtWidgets.QCheckBox(self.centralwidget) self.pretty_flag.setGeometry(QtCore.QRect(20,", "-*- # Form implementation generated from reading ui file 'design.ui'", "250, 20)) self.pretty_flag.setObjectName(\"pretty_flag\") self.info = QtWidgets.QLabel(self.centralwidget) self.info.setGeometry(QtCore.QRect(30, 500, 560, 13))", "self.song_title = QtWidgets.QLineEdit(self.centralwidget) self.song_title.setGeometry(QtCore.QRect(90, 30, 113, 20)) self.song_title.setObjectName(\"song_title\") self.label_2 =", "QtWidgets.QLabel(self.centralwidget) self.label_5.setGeometry(QtCore.QRect(20, 120, 60, 13)) self.label_5.setObjectName(\"label_5\") self.trans_text = QtWidgets.QPlainTextEdit(self.centralwidget) self.trans_text.setGeometry(QtCore.QRect(320,", "WARNING: Any manual changes made to this file will be", "# WARNING: Any manual changes made to this file will", "270, 340)) self.trans_text.setObjectName(\"trans_text\") self.label_6 = QtWidgets.QLabel(self.centralwidget) self.label_6.setGeometry(QtCore.QRect(320, 120, 120, 13))", "unless you know what you are doing. from PyQt5 import", "doing. from PyQt5 import QtCore, QtGui, QtWidgets class Ui_MainWindow(object): def", "= QtWidgets.QLabel(self.centralwidget) self.label_4.setGeometry(QtCore.QRect(20, 90, 140, 13)) self.label_4.setObjectName(\"label_4\") self.orig_text = QtWidgets.QPlainTextEdit(self.centralwidget)", "self.orig_text = QtWidgets.QPlainTextEdit(self.centralwidget) self.orig_text.setGeometry(QtCore.QRect(20, 150, 270, 340)) self.orig_text.setObjectName(\"orig_text\") self.label_5 =", "на русский:\")) self.get_text.setText(_translate(\"MainWindow\", \"Запрос текста\")) self.pretty_flag.setText(_translate(\"MainWindow\", \"Красивый текст (без указания", "self.label_5.setGeometry(QtCore.QRect(20, 120, 60, 13)) self.label_5.setObjectName(\"label_5\") self.trans_text = QtWidgets.QPlainTextEdit(self.centralwidget) self.trans_text.setGeometry(QtCore.QRect(320, 150,", "self.centralwidget = QtWidgets.QWidget(MainWindow) self.centralwidget.setObjectName(\"centralwidget\") self.label = QtWidgets.QLabel(self.centralwidget) self.label.setGeometry(QtCore.QRect(20, 10, 140,", "self.error_text.setObjectName(\"error_text\") MainWindow.setCentralWidget(self.centralwidget) self.retranslateUi(MainWindow) QtCore.QMetaObject.connectSlotsByName(MainWindow) def retranslateUi(self, MainWindow): _translate = QtCore.QCoreApplication.translate", "о песне:\")) self.label_2.setText(_translate(\"MainWindow\", \"Название:\")) self.label_3.setText(_translate(\"MainWindow\", \"Исполнитель:\")) self.label_4.setText(_translate(\"MainWindow\", \"Полученный текст песни:\"))", "QtWidgets.QCheckBox(self.centralwidget) self.pretty_flag.setGeometry(QtCore.QRect(20, 60, 250, 20)) self.pretty_flag.setObjectName(\"pretty_flag\") self.info = QtWidgets.QLabel(self.centralwidget) self.info.setGeometry(QtCore.QRect(30,", "self.get_text.setGeometry(QtCore.QRect(310, 70, 100, 23)) self.get_text.setObjectName(\"get_text\") self.pretty_flag = QtWidgets.QCheckBox(self.centralwidget) self.pretty_flag.setGeometry(QtCore.QRect(20, 60,", "QtWidgets.QPlainTextEdit(self.centralwidget) self.orig_text.setGeometry(QtCore.QRect(20, 150, 270, 340)) self.orig_text.setObjectName(\"orig_text\") self.label_5 = QtWidgets.QLabel(self.centralwidget) self.label_5.setGeometry(QtCore.QRect(20,", "be lost when pyuic5 is # run again. Do not", "again. Do not edit this file unless you know what", "113, 20)) self.song_title.setObjectName(\"song_title\") self.label_2 = QtWidgets.QLabel(self.centralwidget) self.label_2.setGeometry(QtCore.QRect(20, 30, 60, 13))", "when pyuic5 is # run again. Do not edit this", "Any manual changes made to this file will be lost", "560, 13)) self.info.setText(\"\") self.info.setObjectName(\"info\") self.error_text = QtWidgets.QLabel(self.centralwidget) self.error_text.setGeometry(QtCore.QRect(30, 520, 560,", "edit this file unless you know what you are doing.", "= QtWidgets.QLineEdit(self.centralwidget) self.song_title.setGeometry(QtCore.QRect(90, 30, 113, 20)) self.song_title.setObjectName(\"song_title\") self.label_2 = QtWidgets.QLabel(self.centralwidget)", "self.label_3.setObjectName(\"label_3\") self.song_autor = QtWidgets.QLineEdit(self.centralwidget) self.song_autor.setGeometry(QtCore.QRect(310, 30, 113, 20)) self.song_autor.setObjectName(\"song_autor\") self.label_4", "= QtWidgets.QWidget(MainWindow) self.centralwidget.setObjectName(\"centralwidget\") self.label = QtWidgets.QLabel(self.centralwidget) self.label.setGeometry(QtCore.QRect(20, 10, 140, 13))", "QtCore.QMetaObject.connectSlotsByName(MainWindow) def retranslateUi(self, MainWindow): _translate = QtCore.QCoreApplication.translate MainWindow.setWindowTitle(_translate(\"MainWindow\", \"Проект 1\"))", "self.label_4.setText(_translate(\"MainWindow\", \"Полученный текст песни:\")) self.label_5.setText(_translate(\"MainWindow\", \"Оригинал:\")) self.label_6.setText(_translate(\"MainWindow\", \"Перевод на русский:\"))", "150, 270, 340)) self.trans_text.setObjectName(\"trans_text\") self.label_6 = QtWidgets.QLabel(self.centralwidget) self.label_6.setGeometry(QtCore.QRect(320, 120, 120,", "QtWidgets.QLabel(self.centralwidget) self.label.setGeometry(QtCore.QRect(20, 10, 140, 13)) self.label.setObjectName(\"label\") self.song_title = QtWidgets.QLineEdit(self.centralwidget) self.song_title.setGeometry(QtCore.QRect(90,", "QtWidgets.QLabel(self.centralwidget) self.info.setGeometry(QtCore.QRect(30, 500, 560, 13)) self.info.setText(\"\") self.info.setObjectName(\"info\") self.error_text = QtWidgets.QLabel(self.centralwidget)", "self.label = QtWidgets.QLabel(self.centralwidget) self.label.setGeometry(QtCore.QRect(20, 10, 140, 13)) self.label.setObjectName(\"label\") self.song_title =", "self.error_text = QtWidgets.QLabel(self.centralwidget) self.error_text.setGeometry(QtCore.QRect(30, 520, 560, 20)) self.error_text.setText(\"\") self.error_text.setObjectName(\"error_text\") MainWindow.setCentralWidget(self.centralwidget)", "self.label_2.setText(_translate(\"MainWindow\", \"Название:\")) self.label_3.setText(_translate(\"MainWindow\", \"Исполнитель:\")) self.label_4.setText(_translate(\"MainWindow\", \"Полученный текст песни:\")) self.label_5.setText(_translate(\"MainWindow\", \"Оригинал:\"))", "Form implementation generated from reading ui file 'design.ui' # #", "self.label.setObjectName(\"label\") self.song_title = QtWidgets.QLineEdit(self.centralwidget) self.song_title.setGeometry(QtCore.QRect(90, 30, 113, 20)) self.song_title.setObjectName(\"song_title\") self.label_2", "is # run again. Do not edit this file unless", "self.info.setGeometry(QtCore.QRect(30, 500, 560, 13)) self.info.setText(\"\") self.info.setObjectName(\"info\") self.error_text = QtWidgets.QLabel(self.centralwidget) self.error_text.setGeometry(QtCore.QRect(30,", "20)) self.error_text.setText(\"\") self.error_text.setObjectName(\"error_text\") MainWindow.setCentralWidget(self.centralwidget) self.retranslateUi(MainWindow) QtCore.QMetaObject.connectSlotsByName(MainWindow) def retranslateUi(self, MainWindow): _translate", "140, 13)) self.label.setObjectName(\"label\") self.song_title = QtWidgets.QLineEdit(self.centralwidget) self.song_title.setGeometry(QtCore.QRect(90, 30, 113, 20))", "# # WARNING: Any manual changes made to this file", "20)) self.song_autor.setObjectName(\"song_autor\") self.label_4 = QtWidgets.QLabel(self.centralwidget) self.label_4.setGeometry(QtCore.QRect(20, 90, 140, 13)) self.label_4.setObjectName(\"label_4\")", "know what you are doing. from PyQt5 import QtCore, QtGui,", "self.song_title.setGeometry(QtCore.QRect(90, 30, 113, 20)) self.song_title.setObjectName(\"song_title\") self.label_2 = QtWidgets.QLabel(self.centralwidget) self.label_2.setGeometry(QtCore.QRect(20, 30,", "= QtWidgets.QLabel(self.centralwidget) self.info.setGeometry(QtCore.QRect(30, 500, 560, 13)) self.info.setText(\"\") self.info.setObjectName(\"info\") self.error_text =", "70, 100, 23)) self.get_text.setObjectName(\"get_text\") self.pretty_flag = QtWidgets.QCheckBox(self.centralwidget) self.pretty_flag.setGeometry(QtCore.QRect(20, 60, 250,", "\"Оригинал:\")) self.label_6.setText(_translate(\"MainWindow\", \"Перевод на русский:\")) self.get_text.setText(_translate(\"MainWindow\", \"Запрос текста\")) self.pretty_flag.setText(_translate(\"MainWindow\", \"Красивый", "file will be lost when pyuic5 is # run again.", "this file will be lost when pyuic5 is # run", "implementation generated from reading ui file 'design.ui' # # Created", "self.label_3 = QtWidgets.QLabel(self.centralwidget) self.label_3.setGeometry(QtCore.QRect(220, 30, 80, 13)) self.label_3.setObjectName(\"label_3\") self.song_autor =", "retranslateUi(self, MainWindow): _translate = QtCore.QCoreApplication.translate MainWindow.setWindowTitle(_translate(\"MainWindow\", \"Проект 1\")) self.label.setText(_translate(\"MainWindow\", \"Введите", "560, 20)) self.error_text.setText(\"\") self.error_text.setObjectName(\"error_text\") MainWindow.setCentralWidget(self.centralwidget) self.retranslateUi(MainWindow) QtCore.QMetaObject.connectSlotsByName(MainWindow) def retranslateUi(self, MainWindow):", "30, 80, 13)) self.label_3.setObjectName(\"label_3\") self.song_autor = QtWidgets.QLineEdit(self.centralwidget) self.song_autor.setGeometry(QtCore.QRect(310, 30, 113,", "140, 13)) self.label_4.setObjectName(\"label_4\") self.orig_text = QtWidgets.QPlainTextEdit(self.centralwidget) self.orig_text.setGeometry(QtCore.QRect(20, 150, 270, 340))", "150, 270, 340)) self.orig_text.setObjectName(\"orig_text\") self.label_5 = QtWidgets.QLabel(self.centralwidget) self.label_5.setGeometry(QtCore.QRect(20, 120, 60,", "self.label_5.setObjectName(\"label_5\") self.trans_text = QtWidgets.QPlainTextEdit(self.centralwidget) self.trans_text.setGeometry(QtCore.QRect(320, 150, 270, 340)) self.trans_text.setObjectName(\"trans_text\") self.label_6", "self.pretty_flag = QtWidgets.QCheckBox(self.centralwidget) self.pretty_flag.setGeometry(QtCore.QRect(20, 60, 250, 20)) self.pretty_flag.setObjectName(\"pretty_flag\") self.info =", "MainWindow): _translate = QtCore.QCoreApplication.translate MainWindow.setWindowTitle(_translate(\"MainWindow\", \"Проект 1\")) self.label.setText(_translate(\"MainWindow\", \"Введите данные", "QtCore, QtGui, QtWidgets class Ui_MainWindow(object): def setupUi(self, MainWindow): MainWindow.setObjectName(\"MainWindow\") MainWindow.resize(650,", "setupUi(self, MainWindow): MainWindow.setObjectName(\"MainWindow\") MainWindow.resize(650, 550) self.centralwidget = QtWidgets.QWidget(MainWindow) self.centralwidget.setObjectName(\"centralwidget\") self.label", "550) self.centralwidget = QtWidgets.QWidget(MainWindow) self.centralwidget.setObjectName(\"centralwidget\") self.label = QtWidgets.QLabel(self.centralwidget) self.label.setGeometry(QtCore.QRect(20, 10,", "QtWidgets.QLabel(self.centralwidget) self.label_3.setGeometry(QtCore.QRect(220, 30, 80, 13)) self.label_3.setObjectName(\"label_3\") self.song_autor = QtWidgets.QLineEdit(self.centralwidget) self.song_autor.setGeometry(QtCore.QRect(310,", "run again. Do not edit this file unless you know", "= QtCore.QCoreApplication.translate MainWindow.setWindowTitle(_translate(\"MainWindow\", \"Проект 1\")) self.label.setText(_translate(\"MainWindow\", \"Введите данные о песне:\"))", "self.label_6.setObjectName(\"label_6\") self.get_text = QtWidgets.QPushButton(self.centralwidget) self.get_text.setGeometry(QtCore.QRect(310, 70, 100, 23)) self.get_text.setObjectName(\"get_text\") self.pretty_flag", "generated from reading ui file 'design.ui' # # Created by:", "self.get_text = QtWidgets.QPushButton(self.centralwidget) self.get_text.setGeometry(QtCore.QRect(310, 70, 100, 23)) self.get_text.setObjectName(\"get_text\") self.pretty_flag =", "QtWidgets.QLineEdit(self.centralwidget) self.song_title.setGeometry(QtCore.QRect(90, 30, 113, 20)) self.song_title.setObjectName(\"song_title\") self.label_2 = QtWidgets.QLabel(self.centralwidget) self.label_2.setGeometry(QtCore.QRect(20,", "file 'design.ui' # # Created by: PyQt5 UI code generator", "13)) self.label_3.setObjectName(\"label_3\") self.song_autor = QtWidgets.QLineEdit(self.centralwidget) self.song_autor.setGeometry(QtCore.QRect(310, 30, 113, 20)) self.song_autor.setObjectName(\"song_autor\")", "not edit this file unless you know what you are", "self.pretty_flag.setGeometry(QtCore.QRect(20, 60, 250, 20)) self.pretty_flag.setObjectName(\"pretty_flag\") self.info = QtWidgets.QLabel(self.centralwidget) self.info.setGeometry(QtCore.QRect(30, 500,", "20)) self.song_title.setObjectName(\"song_title\") self.label_2 = QtWidgets.QLabel(self.centralwidget) self.label_2.setGeometry(QtCore.QRect(20, 30, 60, 13)) self.label_2.setObjectName(\"label_2\")", "30, 113, 20)) self.song_autor.setObjectName(\"song_autor\") self.label_4 = QtWidgets.QLabel(self.centralwidget) self.label_4.setGeometry(QtCore.QRect(20, 90, 140,", "# run again. Do not edit this file unless you", "MainWindow): MainWindow.setObjectName(\"MainWindow\") MainWindow.resize(650, 550) self.centralwidget = QtWidgets.QWidget(MainWindow) self.centralwidget.setObjectName(\"centralwidget\") self.label =", "self.info.setObjectName(\"info\") self.error_text = QtWidgets.QLabel(self.centralwidget) self.error_text.setGeometry(QtCore.QRect(30, 520, 560, 20)) self.error_text.setText(\"\") self.error_text.setObjectName(\"error_text\")", "self.song_autor = QtWidgets.QLineEdit(self.centralwidget) self.song_autor.setGeometry(QtCore.QRect(310, 30, 113, 20)) self.song_autor.setObjectName(\"song_autor\") self.label_4 =", "340)) self.trans_text.setObjectName(\"trans_text\") self.label_6 = QtWidgets.QLabel(self.centralwidget) self.label_6.setGeometry(QtCore.QRect(320, 120, 120, 13)) self.label_6.setObjectName(\"label_6\")", "self.label_4 = QtWidgets.QLabel(self.centralwidget) self.label_4.setGeometry(QtCore.QRect(20, 90, 140, 13)) self.label_4.setObjectName(\"label_4\") self.orig_text =", "'design.ui' # # Created by: PyQt5 UI code generator 5.15.4", "120, 60, 13)) self.label_5.setObjectName(\"label_5\") self.trans_text = QtWidgets.QPlainTextEdit(self.centralwidget) self.trans_text.setGeometry(QtCore.QRect(320, 150, 270,", "= QtWidgets.QPushButton(self.centralwidget) self.get_text.setGeometry(QtCore.QRect(310, 70, 100, 23)) self.get_text.setObjectName(\"get_text\") self.pretty_flag = QtWidgets.QCheckBox(self.centralwidget)", "coding: utf-8 -*- # Form implementation generated from reading ui", "self.label_2 = QtWidgets.QLabel(self.centralwidget) self.label_2.setGeometry(QtCore.QRect(20, 30, 60, 13)) self.label_2.setObjectName(\"label_2\") self.label_3 =", "13)) self.label_6.setObjectName(\"label_6\") self.get_text = QtWidgets.QPushButton(self.centralwidget) self.get_text.setGeometry(QtCore.QRect(310, 70, 100, 23)) self.get_text.setObjectName(\"get_text\")", "import QtCore, QtGui, QtWidgets class Ui_MainWindow(object): def setupUi(self, MainWindow): MainWindow.setObjectName(\"MainWindow\")", "manual changes made to this file will be lost when", "# -*- coding: utf-8 -*- # Form implementation generated from", "will be lost when pyuic5 is # run again. Do", "QtWidgets.QWidget(MainWindow) self.centralwidget.setObjectName(\"centralwidget\") self.label = QtWidgets.QLabel(self.centralwidget) self.label.setGeometry(QtCore.QRect(20, 10, 140, 13)) self.label.setObjectName(\"label\")", "340)) self.orig_text.setObjectName(\"orig_text\") self.label_5 = QtWidgets.QLabel(self.centralwidget) self.label_5.setGeometry(QtCore.QRect(20, 120, 60, 13)) self.label_5.setObjectName(\"label_5\")", "def retranslateUi(self, MainWindow): _translate = QtCore.QCoreApplication.translate MainWindow.setWindowTitle(_translate(\"MainWindow\", \"Проект 1\")) self.label.setText(_translate(\"MainWindow\",", "def setupUi(self, MainWindow): MainWindow.setObjectName(\"MainWindow\") MainWindow.resize(650, 550) self.centralwidget = QtWidgets.QWidget(MainWindow) self.centralwidget.setObjectName(\"centralwidget\")", "this file unless you know what you are doing. from", "-*- coding: utf-8 -*- # Form implementation generated from reading", "QtWidgets.QLabel(self.centralwidget) self.error_text.setGeometry(QtCore.QRect(30, 520, 560, 20)) self.error_text.setText(\"\") self.error_text.setObjectName(\"error_text\") MainWindow.setCentralWidget(self.centralwidget) self.retranslateUi(MainWindow) QtCore.QMetaObject.connectSlotsByName(MainWindow)", "from PyQt5 import QtCore, QtGui, QtWidgets class Ui_MainWindow(object): def setupUi(self,", "self.trans_text.setGeometry(QtCore.QRect(320, 150, 270, 340)) self.trans_text.setObjectName(\"trans_text\") self.label_6 = QtWidgets.QLabel(self.centralwidget) self.label_6.setGeometry(QtCore.QRect(320, 120,", "= QtWidgets.QCheckBox(self.centralwidget) self.pretty_flag.setGeometry(QtCore.QRect(20, 60, 250, 20)) self.pretty_flag.setObjectName(\"pretty_flag\") self.info = QtWidgets.QLabel(self.centralwidget)", "PyQt5 UI code generator 5.15.4 # # WARNING: Any manual", "QtWidgets.QLabel(self.centralwidget) self.label_4.setGeometry(QtCore.QRect(20, 90, 140, 13)) self.label_4.setObjectName(\"label_4\") self.orig_text = QtWidgets.QPlainTextEdit(self.centralwidget) self.orig_text.setGeometry(QtCore.QRect(20,", "to this file will be lost when pyuic5 is #", "# # Created by: PyQt5 UI code generator 5.15.4 #", "self.label_6 = QtWidgets.QLabel(self.centralwidget) self.label_6.setGeometry(QtCore.QRect(320, 120, 120, 13)) self.label_6.setObjectName(\"label_6\") self.get_text =", "120, 120, 13)) self.label_6.setObjectName(\"label_6\") self.get_text = QtWidgets.QPushButton(self.centralwidget) self.get_text.setGeometry(QtCore.QRect(310, 70, 100,", "self.label.setGeometry(QtCore.QRect(20, 10, 140, 13)) self.label.setObjectName(\"label\") self.song_title = QtWidgets.QLineEdit(self.centralwidget) self.song_title.setGeometry(QtCore.QRect(90, 30," ]
[ "simulation.car import spawn_drivers from simulation.passenger import spawn_passengers from simulation.core import", "from simulation.core import World, Clock conf = { \"x\": 100,", "__name__ == '__main__': world = World([conf['x'], conf['y']], clock=clock) world.register_drivers(spawn_drivers(conf[\"drivers\"], conf['x'],", "World, Clock conf = { \"x\": 100, \"y\": 100, \"drivers\":", "simulation.passenger import spawn_passengers from simulation.core import World, Clock conf =", "from simulation.car import spawn_drivers from simulation.passenger import spawn_passengers from simulation.core", "spawn_drivers from simulation.passenger import spawn_passengers from simulation.core import World, Clock", "100, \"drivers\": 200, \"users\": 1000, \"start\": \"2019-07-08T00:00:00\", \"end\": \"2019-07-08T00:01:00\" }", "\"users\": 1000, \"start\": \"2019-07-08T00:00:00\", \"end\": \"2019-07-08T00:01:00\" } clock = Clock(conf[\"start\"],", "\"x\": 100, \"y\": 100, \"drivers\": 200, \"users\": 1000, \"start\": \"2019-07-08T00:00:00\",", "import World, Clock conf = { \"x\": 100, \"y\": 100,", "World([conf['x'], conf['y']], clock=clock) world.register_drivers(spawn_drivers(conf[\"drivers\"], conf['x'], conf['y'])) world.register_passengers(spawn_passengers(conf[\"users\"], conf['x'], conf['y'])) world.run(log=False)", "import spawn_drivers from simulation.passenger import spawn_passengers from simulation.core import World,", "\"y\": 100, \"drivers\": 200, \"users\": 1000, \"start\": \"2019-07-08T00:00:00\", \"end\": \"2019-07-08T00:01:00\"", "Clock conf = { \"x\": 100, \"y\": 100, \"drivers\": 200,", "import spawn_passengers from simulation.core import World, Clock conf = {", "= World([conf['x'], conf['y']], clock=clock) world.register_drivers(spawn_drivers(conf[\"drivers\"], conf['x'], conf['y'])) world.register_passengers(spawn_passengers(conf[\"users\"], conf['x'], conf['y']))", "\"drivers\": 200, \"users\": 1000, \"start\": \"2019-07-08T00:00:00\", \"end\": \"2019-07-08T00:01:00\" } clock", "100, \"y\": 100, \"drivers\": 200, \"users\": 1000, \"start\": \"2019-07-08T00:00:00\", \"end\":", "\"start\": \"2019-07-08T00:00:00\", \"end\": \"2019-07-08T00:01:00\" } clock = Clock(conf[\"start\"], conf[\"end\"]) if", "simulation.core import World, Clock conf = { \"x\": 100, \"y\":", "Clock(conf[\"start\"], conf[\"end\"]) if __name__ == '__main__': world = World([conf['x'], conf['y']],", "world = World([conf['x'], conf['y']], clock=clock) world.register_drivers(spawn_drivers(conf[\"drivers\"], conf['x'], conf['y'])) world.register_passengers(spawn_passengers(conf[\"users\"], conf['x'],", "conf = { \"x\": 100, \"y\": 100, \"drivers\": 200, \"users\":", "1000, \"start\": \"2019-07-08T00:00:00\", \"end\": \"2019-07-08T00:01:00\" } clock = Clock(conf[\"start\"], conf[\"end\"])", "} clock = Clock(conf[\"start\"], conf[\"end\"]) if __name__ == '__main__': world", "from simulation.passenger import spawn_passengers from simulation.core import World, Clock conf", "{ \"x\": 100, \"y\": 100, \"drivers\": 200, \"users\": 1000, \"start\":", "\"end\": \"2019-07-08T00:01:00\" } clock = Clock(conf[\"start\"], conf[\"end\"]) if __name__ ==", "= { \"x\": 100, \"y\": 100, \"drivers\": 200, \"users\": 1000,", "conf[\"end\"]) if __name__ == '__main__': world = World([conf['x'], conf['y']], clock=clock)", "if __name__ == '__main__': world = World([conf['x'], conf['y']], clock=clock) world.register_drivers(spawn_drivers(conf[\"drivers\"],", "clock = Clock(conf[\"start\"], conf[\"end\"]) if __name__ == '__main__': world =", "== '__main__': world = World([conf['x'], conf['y']], clock=clock) world.register_drivers(spawn_drivers(conf[\"drivers\"], conf['x'], conf['y']))", "200, \"users\": 1000, \"start\": \"2019-07-08T00:00:00\", \"end\": \"2019-07-08T00:01:00\" } clock =", "= Clock(conf[\"start\"], conf[\"end\"]) if __name__ == '__main__': world = World([conf['x'],", "'__main__': world = World([conf['x'], conf['y']], clock=clock) world.register_drivers(spawn_drivers(conf[\"drivers\"], conf['x'], conf['y'])) world.register_passengers(spawn_passengers(conf[\"users\"],", "spawn_passengers from simulation.core import World, Clock conf = { \"x\":", "\"2019-07-08T00:00:00\", \"end\": \"2019-07-08T00:01:00\" } clock = Clock(conf[\"start\"], conf[\"end\"]) if __name__", "\"2019-07-08T00:01:00\" } clock = Clock(conf[\"start\"], conf[\"end\"]) if __name__ == '__main__':" ]
[ "'reverse_words_order_and_swap_cases' function below. # # The function is expected to", "!= (len(l) - 1): news += l[i] + \" \"", "accepts STRING sentence as parameter. # def reverse_words_order_and_swap_cases(sentence): # Write", "to return a STRING. # The function accepts STRING sentence", "news += l[i] + \" \" else: news += l[i]", "(len(l) - 1): news += l[i] + \" \" else:", "news = \"\" for i in range(len(l)): if i !=", "math import os import random import re import sys #", "else: news += l[i] return news sentence = input() news", "+= l[i] return news sentence = input() news = reverse_words_order_and_swap_cases(sentence)", "+= i.swapcase() # continue l.append(st) st = \"\" l.reverse() news", "# def reverse_words_order_and_swap_cases(sentence): # Write your code here l =", "l.reverse() news = \"\" for i in range(len(l)): if i", "a STRING. # The function accepts STRING sentence as parameter.", "i != (len(l) - 1): news += l[i] + \"", "\" \": l.append(st) st = \"\" else: st += i.swapcase()", "if i == \" \": l.append(st) st = \"\" else:", "\" \" else: news += l[i] return news sentence =", "the 'reverse_words_order_and_swap_cases' function below. # # The function is expected", "re import sys # # Complete the 'reverse_words_order_and_swap_cases' function below.", "def reverse_words_order_and_swap_cases(sentence): # Write your code here l = []", "import os import random import re import sys # #", "i in sentence: if i == \" \": l.append(st) st", "in sentence: if i == \" \": l.append(st) st =", "# continue l.append(st) st = \"\" l.reverse() news = \"\"", "st = \"\" for i in sentence: if i ==", "st = \"\" else: st += i.swapcase() # continue l.append(st)", "STRING. # The function accepts STRING sentence as parameter. #", "#!/bin/python3 import math import os import random import re import", "import sys # # Complete the 'reverse_words_order_and_swap_cases' function below. #", "i == \" \": l.append(st) st = \"\" else: st", "return a STRING. # The function accepts STRING sentence as", "= \"\" l.reverse() news = \"\" for i in range(len(l)):", "# # Complete the 'reverse_words_order_and_swap_cases' function below. # # The", "st += i.swapcase() # continue l.append(st) st = \"\" l.reverse()", "== \" \": l.append(st) st = \"\" else: st +=", "\": l.append(st) st = \"\" else: st += i.swapcase() #", "below. # # The function is expected to return a", "i in range(len(l)): if i != (len(l) - 1): news", "import random import re import sys # # Complete the", "function is expected to return a STRING. # The function", "# # The function is expected to return a STRING.", "STRING sentence as parameter. # def reverse_words_order_and_swap_cases(sentence): # Write your", "\"\" else: st += i.swapcase() # continue l.append(st) st =", "sentence: if i == \" \": l.append(st) st = \"\"", "st = \"\" l.reverse() news = \"\" for i in", "l.append(st) st = \"\" else: st += i.swapcase() # continue", "Write your code here l = [] st = \"\"", "parameter. # def reverse_words_order_and_swap_cases(sentence): # Write your code here l", "random import re import sys # # Complete the 'reverse_words_order_and_swap_cases'", "for i in sentence: if i == \" \": l.append(st)", "reverse_words_order_and_swap_cases(sentence): # Write your code here l = [] st", "if i != (len(l) - 1): news += l[i] +", "[] st = \"\" for i in sentence: if i", "= \"\" else: st += i.swapcase() # continue l.append(st) st", "The function is expected to return a STRING. # The", "import re import sys # # Complete the 'reverse_words_order_and_swap_cases' function", "# The function accepts STRING sentence as parameter. # def", "\"\" for i in sentence: if i == \" \":", "# Write your code here l = [] st =", "is expected to return a STRING. # The function accepts", "code here l = [] st = \"\" for i", "= [] st = \"\" for i in sentence: if", "i.swapcase() # continue l.append(st) st = \"\" l.reverse() news =", "for i in range(len(l)): if i != (len(l) - 1):", "The function accepts STRING sentence as parameter. # def reverse_words_order_and_swap_cases(sentence):", "l[i] + \" \" else: news += l[i] return news", "function below. # # The function is expected to return", "import math import os import random import re import sys", "= \"\" for i in range(len(l)): if i != (len(l)", "sentence as parameter. # def reverse_words_order_and_swap_cases(sentence): # Write your code", "os import random import re import sys # # Complete", "- 1): news += l[i] + \" \" else: news", "\"\" l.reverse() news = \"\" for i in range(len(l)): if", "function accepts STRING sentence as parameter. # def reverse_words_order_and_swap_cases(sentence): #", "l = [] st = \"\" for i in sentence:", "1): news += l[i] + \" \" else: news +=", "l[i] return news sentence = input() news = reverse_words_order_and_swap_cases(sentence) print(news)", "here l = [] st = \"\" for i in", "+ \" \" else: news += l[i] return news sentence", "\"\" for i in range(len(l)): if i != (len(l) -", "= \"\" for i in sentence: if i == \"", "<gh_stars>0 #!/bin/python3 import math import os import random import re", "expected to return a STRING. # The function accepts STRING", "l.append(st) st = \"\" l.reverse() news = \"\" for i", "in range(len(l)): if i != (len(l) - 1): news +=", "sys # # Complete the 'reverse_words_order_and_swap_cases' function below. # #", "Complete the 'reverse_words_order_and_swap_cases' function below. # # The function is", "\" else: news += l[i] return news sentence = input()", "# Complete the 'reverse_words_order_and_swap_cases' function below. # # The function", "continue l.append(st) st = \"\" l.reverse() news = \"\" for", "your code here l = [] st = \"\" for", "else: st += i.swapcase() # continue l.append(st) st = \"\"", "range(len(l)): if i != (len(l) - 1): news += l[i]", "as parameter. # def reverse_words_order_and_swap_cases(sentence): # Write your code here", "# The function is expected to return a STRING. #", "news += l[i] return news sentence = input() news =", "+= l[i] + \" \" else: news += l[i] return" ]
[ "+= 1 print(d1[key]) print(d2[key]) if d2[key] in d3[key] and d1[key]", "d3[key] and d1[key] not in d3[key]: win2 += 1 print(d1[key])", "in d[\"output\"]] count = 0 win1 = 0 win2 =", "= 0 for key in d1.keys(): if d1[key]!= d2[key]: print(\"{}.", "fin: for line in fin: d = json.loads(line) d1[d[\"id\"]] =", "{} with open(\"/home/qinyuan/zs/out/bart-large-zsre-with-description-LR2e-5-FREQ32-dec27/test_predictions_submitted.jsonl\") as fin: for line in fin: d", "win2 = 0 for key in d1.keys(): if d1[key]!= d2[key]:", "d2 = {} dq = {} with open(\"/home/qinyuan/zs/out/bart-large-zsre-with-description-LR2e-5-FREQ32-dec27/test_predictions_submitted.jsonl\") as fin:", "{} with open(\"/home/qinyuan/zs/out/bart-large-with-description-grouped-1e-5-outerbsz4-innerbsz32-adapterdim4-unfreeze-dec29/test_predictions.jsonl\") as fin: for line in fin: d", "win1 += 1 print(d1[key]) print(d2[key]) if d2[key] in d3[key] and", "for line in fin: d = json.loads(line) d2[d[\"id\"]] = d[\"output\"][0][\"answer\"]", "dq[key], d1[key], d2[key], d3[key])) count += 1 if d1[key] in", "import json d1 = {} with open(\"/home/qinyuan/zs/out/bart-large-with-description-grouped-1e-5-outerbsz4-innerbsz32-adapterdim4-unfreeze-dec29/test_predictions.jsonl\") as fin: for", "= d[\"output\"][0][\"answer\"] dq[d[\"id\"]] = d[\"input\"] d3 = {} with open(\"/home/qinyuan/zs/data/structured_zeroshot-test.jsonl\")", "1 print(d1[key]) print(d2[key]) if d2[key] in d3[key] and d1[key] not", "with open(\"/home/qinyuan/zs/data/structured_zeroshot-test.jsonl\") as fin: for line in fin: d =", "{} dq = {} with open(\"/home/qinyuan/zs/out/bart-large-zsre-with-description-LR2e-5-FREQ32-dec27/test_predictions_submitted.jsonl\") as fin: for line", "and d1[key] not in d3[key]: win2 += 1 print(d1[key]) print(d2[key])", "count += 1 if d1[key] in d3[key] and d2[key] not", "count = 0 win1 = 0 win2 = 0 for", "d2[key]: print(\"{}. {}. {}. {}. {}\".format(key, dq[key], d1[key], d2[key], d3[key]))", "in fin: d = json.loads(line) d2[d[\"id\"]] = d[\"output\"][0][\"answer\"] dq[d[\"id\"]] =", "win1 = 0 win2 = 0 for key in d1.keys():", "key in d1.keys(): if d1[key]!= d2[key]: print(\"{}. {}. {}. {}.", "d[\"output\"][0][\"answer\"] dq[d[\"id\"]] = d[\"input\"] d3 = {} with open(\"/home/qinyuan/zs/data/structured_zeroshot-test.jsonl\") as", "json.loads(line) d2[d[\"id\"]] = d[\"output\"][0][\"answer\"] dq[d[\"id\"]] = d[\"input\"] d3 = {}", "with open(\"/home/qinyuan/zs/out/bart-large-with-description-grouped-1e-5-outerbsz4-innerbsz32-adapterdim4-unfreeze-dec29/test_predictions.jsonl\") as fin: for line in fin: d =", "[item[\"answer\"] for item in d[\"output\"]] count = 0 win1 =", "for item in d[\"output\"]] count = 0 win1 = 0", "= {} dq = {} with open(\"/home/qinyuan/zs/out/bart-large-zsre-with-description-LR2e-5-FREQ32-dec27/test_predictions_submitted.jsonl\") as fin: for", "d3 = {} with open(\"/home/qinyuan/zs/data/structured_zeroshot-test.jsonl\") as fin: for line in", "= json.loads(line) d1[d[\"id\"]] = d[\"output\"][0][\"answer\"] d2 = {} dq =", "line in fin: d = json.loads(line) d3[d[\"id\"]] = [item[\"answer\"] for", "{}. {}. {}. {}\".format(key, dq[key], d1[key], d2[key], d3[key])) count +=", "with open(\"/home/qinyuan/zs/out/bart-large-zsre-with-description-LR2e-5-FREQ32-dec27/test_predictions_submitted.jsonl\") as fin: for line in fin: d =", "= d[\"output\"][0][\"answer\"] d2 = {} dq = {} with open(\"/home/qinyuan/zs/out/bart-large-zsre-with-description-LR2e-5-FREQ32-dec27/test_predictions_submitted.jsonl\")", "open(\"/home/qinyuan/zs/out/bart-large-with-description-grouped-1e-5-outerbsz4-innerbsz32-adapterdim4-unfreeze-dec29/test_predictions.jsonl\") as fin: for line in fin: d = json.loads(line)", "{}. {}. {}\".format(key, dq[key], d1[key], d2[key], d3[key])) count += 1", "d1[key]!= d2[key]: print(\"{}. {}. {}. {}. {}\".format(key, dq[key], d1[key], d2[key],", "= json.loads(line) d2[d[\"id\"]] = d[\"output\"][0][\"answer\"] dq[d[\"id\"]] = d[\"input\"] d3 =", "= [item[\"answer\"] for item in d[\"output\"]] count = 0 win1", "line in fin: d = json.loads(line) d2[d[\"id\"]] = d[\"output\"][0][\"answer\"] dq[d[\"id\"]]", "{}\".format(key, dq[key], d1[key], d2[key], d3[key])) count += 1 if d1[key]", "in fin: d = json.loads(line) d1[d[\"id\"]] = d[\"output\"][0][\"answer\"] d2 =", "not in d3[key]: win2 += 1 print(d1[key]) print(d2[key]) print(count) print(win1)", "in d3[key] and d1[key] not in d3[key]: win2 += 1", "d3[key]: win1 += 1 print(d1[key]) print(d2[key]) if d2[key] in d3[key]", "{}. {}\".format(key, dq[key], d1[key], d2[key], d3[key])) count += 1 if", "as fin: for line in fin: d = json.loads(line) d3[d[\"id\"]]", "d1[key] not in d3[key]: win2 += 1 print(d1[key]) print(d2[key]) print(count)", "d3[key] and d2[key] not in d3[key]: win1 += 1 print(d1[key])", "0 win2 = 0 for key in d1.keys(): if d1[key]!=", "d2[d[\"id\"]] = d[\"output\"][0][\"answer\"] dq[d[\"id\"]] = d[\"input\"] d3 = {} with", "d = json.loads(line) d2[d[\"id\"]] = d[\"output\"][0][\"answer\"] dq[d[\"id\"]] = d[\"input\"] d3", "as fin: for line in fin: d = json.loads(line) d2[d[\"id\"]]", "open(\"/home/qinyuan/zs/out/bart-large-zsre-with-description-LR2e-5-FREQ32-dec27/test_predictions_submitted.jsonl\") as fin: for line in fin: d = json.loads(line)", "in d3[key]: win2 += 1 print(d1[key]) print(d2[key]) print(count) print(win1) print(win2)", "for key in d1.keys(): if d1[key]!= d2[key]: print(\"{}. {}. {}.", "json.loads(line) d1[d[\"id\"]] = d[\"output\"][0][\"answer\"] d2 = {} dq = {}", "d1[key] in d3[key] and d2[key] not in d3[key]: win1 +=", "0 win1 = 0 win2 = 0 for key in", "and d2[key] not in d3[key]: win1 += 1 print(d1[key]) print(d2[key])", "0 for key in d1.keys(): if d1[key]!= d2[key]: print(\"{}. {}.", "d[\"input\"] d3 = {} with open(\"/home/qinyuan/zs/data/structured_zeroshot-test.jsonl\") as fin: for line", "1 if d1[key] in d3[key] and d2[key] not in d3[key]:", "if d1[key] in d3[key] and d2[key] not in d3[key]: win1", "d2[key] in d3[key] and d1[key] not in d3[key]: win2 +=", "= json.loads(line) d3[d[\"id\"]] = [item[\"answer\"] for item in d[\"output\"]] count", "print(d2[key]) if d2[key] in d3[key] and d1[key] not in d3[key]:", "d = json.loads(line) d1[d[\"id\"]] = d[\"output\"][0][\"answer\"] d2 = {} dq", "d1[d[\"id\"]] = d[\"output\"][0][\"answer\"] d2 = {} dq = {} with", "= d[\"input\"] d3 = {} with open(\"/home/qinyuan/zs/data/structured_zeroshot-test.jsonl\") as fin: for", "open(\"/home/qinyuan/zs/data/structured_zeroshot-test.jsonl\") as fin: for line in fin: d = json.loads(line)", "+= 1 if d1[key] in d3[key] and d2[key] not in", "d[\"output\"][0][\"answer\"] d2 = {} dq = {} with open(\"/home/qinyuan/zs/out/bart-large-zsre-with-description-LR2e-5-FREQ32-dec27/test_predictions_submitted.jsonl\") as", "not in d3[key]: win1 += 1 print(d1[key]) print(d2[key]) if d2[key]", "= 0 win1 = 0 win2 = 0 for key", "= 0 win2 = 0 for key in d1.keys(): if", "fin: d = json.loads(line) d1[d[\"id\"]] = d[\"output\"][0][\"answer\"] d2 = {}", "d1 = {} with open(\"/home/qinyuan/zs/out/bart-large-with-description-grouped-1e-5-outerbsz4-innerbsz32-adapterdim4-unfreeze-dec29/test_predictions.jsonl\") as fin: for line in", "fin: d = json.loads(line) d2[d[\"id\"]] = d[\"output\"][0][\"answer\"] dq[d[\"id\"]] = d[\"input\"]", "d2[key] not in d3[key]: win1 += 1 print(d1[key]) print(d2[key]) if", "json.loads(line) d3[d[\"id\"]] = [item[\"answer\"] for item in d[\"output\"]] count =", "d2[key], d3[key])) count += 1 if d1[key] in d3[key] and", "print(\"{}. {}. {}. {}. {}\".format(key, dq[key], d1[key], d2[key], d3[key])) count", "d = json.loads(line) d3[d[\"id\"]] = [item[\"answer\"] for item in d[\"output\"]]", "d1.keys(): if d1[key]!= d2[key]: print(\"{}. {}. {}. {}. {}\".format(key, dq[key],", "in d1.keys(): if d1[key]!= d2[key]: print(\"{}. {}. {}. {}. {}\".format(key,", "dq[d[\"id\"]] = d[\"input\"] d3 = {} with open(\"/home/qinyuan/zs/data/structured_zeroshot-test.jsonl\") as fin:", "as fin: for line in fin: d = json.loads(line) d1[d[\"id\"]]", "for line in fin: d = json.loads(line) d3[d[\"id\"]] = [item[\"answer\"]", "fin: for line in fin: d = json.loads(line) d3[d[\"id\"]] =", "print(d1[key]) print(d2[key]) if d2[key] in d3[key] and d1[key] not in", "= {} with open(\"/home/qinyuan/zs/out/bart-large-with-description-grouped-1e-5-outerbsz4-innerbsz32-adapterdim4-unfreeze-dec29/test_predictions.jsonl\") as fin: for line in fin:", "dq = {} with open(\"/home/qinyuan/zs/out/bart-large-zsre-with-description-LR2e-5-FREQ32-dec27/test_predictions_submitted.jsonl\") as fin: for line in", "{} with open(\"/home/qinyuan/zs/data/structured_zeroshot-test.jsonl\") as fin: for line in fin: d", "for line in fin: d = json.loads(line) d1[d[\"id\"]] = d[\"output\"][0][\"answer\"]", "d3[d[\"id\"]] = [item[\"answer\"] for item in d[\"output\"]] count = 0", "line in fin: d = json.loads(line) d1[d[\"id\"]] = d[\"output\"][0][\"answer\"] d2", "in fin: d = json.loads(line) d3[d[\"id\"]] = [item[\"answer\"] for item", "d1[key], d2[key], d3[key])) count += 1 if d1[key] in d3[key]", "fin: d = json.loads(line) d3[d[\"id\"]] = [item[\"answer\"] for item in", "item in d[\"output\"]] count = 0 win1 = 0 win2", "if d1[key]!= d2[key]: print(\"{}. {}. {}. {}. {}\".format(key, dq[key], d1[key],", "fin: for line in fin: d = json.loads(line) d2[d[\"id\"]] =", "in d3[key]: win1 += 1 print(d1[key]) print(d2[key]) if d2[key] in", "= {} with open(\"/home/qinyuan/zs/data/structured_zeroshot-test.jsonl\") as fin: for line in fin:", "json d1 = {} with open(\"/home/qinyuan/zs/out/bart-large-with-description-grouped-1e-5-outerbsz4-innerbsz32-adapterdim4-unfreeze-dec29/test_predictions.jsonl\") as fin: for line", "in d3[key] and d2[key] not in d3[key]: win1 += 1", "d3[key])) count += 1 if d1[key] in d3[key] and d2[key]", "if d2[key] in d3[key] and d1[key] not in d3[key]: win2", "d[\"output\"]] count = 0 win1 = 0 win2 = 0", "= {} with open(\"/home/qinyuan/zs/out/bart-large-zsre-with-description-LR2e-5-FREQ32-dec27/test_predictions_submitted.jsonl\") as fin: for line in fin:" ]
[ "'search', params={'q':'STAT3'}) if response.status_code == 200: pprint(response.json()) json.dump(response.json(), open('api1_result.json', 'wb'),", "from pprint import pprint CREEDS_URL = 'http://amp.pharm.mssm.edu/CREEDS/' response = requests.get(CREEDS_URL", "pprint CREEDS_URL = 'http://amp.pharm.mssm.edu/CREEDS/' response = requests.get(CREEDS_URL + 'search', params={'q':'STAT3'})", "= 'http://amp.pharm.mssm.edu/CREEDS/' response = requests.get(CREEDS_URL + 'search', params={'q':'STAT3'}) if response.status_code", "import pprint CREEDS_URL = 'http://amp.pharm.mssm.edu/CREEDS/' response = requests.get(CREEDS_URL + 'search',", "params={'q':'STAT3'}) if response.status_code == 200: pprint(response.json()) json.dump(response.json(), open('api1_result.json', 'wb'), indent=4)", "import json, requests from pprint import pprint CREEDS_URL = 'http://amp.pharm.mssm.edu/CREEDS/'", "requests from pprint import pprint CREEDS_URL = 'http://amp.pharm.mssm.edu/CREEDS/' response =", "+ 'search', params={'q':'STAT3'}) if response.status_code == 200: pprint(response.json()) json.dump(response.json(), open('api1_result.json',", "= requests.get(CREEDS_URL + 'search', params={'q':'STAT3'}) if response.status_code == 200: pprint(response.json())", "CREEDS_URL = 'http://amp.pharm.mssm.edu/CREEDS/' response = requests.get(CREEDS_URL + 'search', params={'q':'STAT3'}) if", "pprint import pprint CREEDS_URL = 'http://amp.pharm.mssm.edu/CREEDS/' response = requests.get(CREEDS_URL +", "json, requests from pprint import pprint CREEDS_URL = 'http://amp.pharm.mssm.edu/CREEDS/' response", "<gh_stars>1-10 import json, requests from pprint import pprint CREEDS_URL =", "response = requests.get(CREEDS_URL + 'search', params={'q':'STAT3'}) if response.status_code == 200:", "requests.get(CREEDS_URL + 'search', params={'q':'STAT3'}) if response.status_code == 200: pprint(response.json()) json.dump(response.json(),", "'http://amp.pharm.mssm.edu/CREEDS/' response = requests.get(CREEDS_URL + 'search', params={'q':'STAT3'}) if response.status_code ==" ]
[ "\"0040_auto_20190718_0938\")] operations = [ migrations.AddField( model_name=\"course\", name=\"color\", field=models.CharField(default=\"#000\", max_length=7) )", "class Migration(migrations.Migration): dependencies = [(\"admin\", \"0040_auto_20190718_0938\")] operations = [ migrations.AddField(", "[(\"admin\", \"0040_auto_20190718_0938\")] operations = [ migrations.AddField( model_name=\"course\", name=\"color\", field=models.CharField(default=\"#000\", max_length=7)", "# Generated by Django 2.2.3 on 2019-07-31 13:54 from django.db", "migrations, models class Migration(migrations.Migration): dependencies = [(\"admin\", \"0040_auto_20190718_0938\")] operations =", "2.2.3 on 2019-07-31 13:54 from django.db import migrations, models class", "Generated by Django 2.2.3 on 2019-07-31 13:54 from django.db import", "13:54 from django.db import migrations, models class Migration(migrations.Migration): dependencies =", "<reponame>rodlukas/UP-admin<filename>admin/migrations/0041_course_color.py # Generated by Django 2.2.3 on 2019-07-31 13:54 from", "on 2019-07-31 13:54 from django.db import migrations, models class Migration(migrations.Migration):", "Django 2.2.3 on 2019-07-31 13:54 from django.db import migrations, models", "dependencies = [(\"admin\", \"0040_auto_20190718_0938\")] operations = [ migrations.AddField( model_name=\"course\", name=\"color\",", "from django.db import migrations, models class Migration(migrations.Migration): dependencies = [(\"admin\",", "import migrations, models class Migration(migrations.Migration): dependencies = [(\"admin\", \"0040_auto_20190718_0938\")] operations", "django.db import migrations, models class Migration(migrations.Migration): dependencies = [(\"admin\", \"0040_auto_20190718_0938\")]", "models class Migration(migrations.Migration): dependencies = [(\"admin\", \"0040_auto_20190718_0938\")] operations = [", "by Django 2.2.3 on 2019-07-31 13:54 from django.db import migrations,", "2019-07-31 13:54 from django.db import migrations, models class Migration(migrations.Migration): dependencies", "operations = [ migrations.AddField( model_name=\"course\", name=\"color\", field=models.CharField(default=\"#000\", max_length=7) ) ]", "= [(\"admin\", \"0040_auto_20190718_0938\")] operations = [ migrations.AddField( model_name=\"course\", name=\"color\", field=models.CharField(default=\"#000\",", "Migration(migrations.Migration): dependencies = [(\"admin\", \"0040_auto_20190718_0938\")] operations = [ migrations.AddField( model_name=\"course\"," ]
[ "0: pilhaParenteses.pop() else: pilhaParenteses.append(')') break if len(pilhaParenteses) == 0: print(f'A", "= (str(input('Digite a expressão: '))) #if expressao.count('(') == expressao.count(')'): #", "'(': pilhaParenteses.append('(') elif v == ')': if len(pilhaParenteses) > 0:", "expressao: if v == '(': pilhaParenteses.append('(') elif v == ')':", "bugs #expressao = (str(input('Digite a expressão: '))) #if expressao.count('(') ==", "pilhaParenteses.pop() else: pilhaParenteses.append(')') break if len(pilhaParenteses) == 0: print(f'A expressão", "pilhaParenteses.append(')') break if len(pilhaParenteses) == 0: print(f'A expressão {expressao} está", "'))) #if expressao.count('(') == expressao.count(')'): # print('Sua expressão está válida.')", "')': if len(pilhaParenteses) > 0: pilhaParenteses.pop() else: pilhaParenteses.append(')') break if", "'))) pilhaParenteses = [] for v in expressao: if v", "expressão: '))) pilhaParenteses = [] for v in expressao: if", "if v == '(': pilhaParenteses.append('(') elif v == ')': if", "== ')': if len(pilhaParenteses) > 0: pilhaParenteses.pop() else: pilhaParenteses.append(')') break", "elif v == ')': if len(pilhaParenteses) > 0: pilhaParenteses.pop() else:", "bugs expressao = (str(input('Digite a expressão: '))) pilhaParenteses = []", "= [] for v in expressao: if v == '(':", "len(pilhaParenteses) > 0: pilhaParenteses.pop() else: pilhaParenteses.append(')') break if len(pilhaParenteses) ==", "expressao.count('(') == expressao.count(')'): # print('Sua expressão está válida.') #else: #", "len(pilhaParenteses) == 0: print(f'A expressão {expressao} está válida.') else: print(f'A", "{expressao} está inválida!') # Forma com bugs #expressao = (str(input('Digite", "{expressao} está válida.') else: print(f'A expressão {expressao} está inválida!') #", "expressão {expressao} está válida.') else: print(f'A expressão {expressao} está inválida!')", "for v in expressao: if v == '(': pilhaParenteses.append('(') elif", "expressão: '))) #if expressao.count('(') == expressao.count(')'): # print('Sua expressão está", "Forma sem bugs expressao = (str(input('Digite a expressão: '))) pilhaParenteses", "a expressão: '))) pilhaParenteses = [] for v in expressao:", "a expressão: '))) #if expressao.count('(') == expressao.count(')'): # print('Sua expressão", "== '(': pilhaParenteses.append('(') elif v == ')': if len(pilhaParenteses) >", "Forma com bugs #expressao = (str(input('Digite a expressão: '))) #if", "sem bugs expressao = (str(input('Digite a expressão: '))) pilhaParenteses =", "#if expressao.count('(') == expressao.count(')'): # print('Sua expressão está válida.') #else:", "break if len(pilhaParenteses) == 0: print(f'A expressão {expressao} está válida.')", "else: pilhaParenteses.append(')') break if len(pilhaParenteses) == 0: print(f'A expressão {expressao}", "== 0: print(f'A expressão {expressao} está válida.') else: print(f'A expressão", "#expressao = (str(input('Digite a expressão: '))) #if expressao.count('(') == expressao.count(')'):", "print('Sua expressão está válida.') #else: # print('Sua expressão está inválida!')", "# Forma sem bugs expressao = (str(input('Digite a expressão: ')))", "# Forma com bugs #expressao = (str(input('Digite a expressão: ')))", "(str(input('Digite a expressão: '))) #if expressao.count('(') == expressao.count(')'): # print('Sua", "== expressao.count(')'): # print('Sua expressão está válida.') #else: # print('Sua", "expressao.count(')'): # print('Sua expressão está válida.') #else: # print('Sua expressão", "[] for v in expressao: if v == '(': pilhaParenteses.append('(')", "else: print(f'A expressão {expressao} está inválida!') # Forma com bugs", "válida.') else: print(f'A expressão {expressao} está inválida!') # Forma com", "expressão {expressao} está inválida!') # Forma com bugs #expressao =", "print(f'A expressão {expressao} está inválida!') # Forma com bugs #expressao", "com bugs #expressao = (str(input('Digite a expressão: '))) #if expressao.count('(')", "está válida.') else: print(f'A expressão {expressao} está inválida!') # Forma", "if len(pilhaParenteses) > 0: pilhaParenteses.pop() else: pilhaParenteses.append(')') break if len(pilhaParenteses)", "pilhaParenteses.append('(') elif v == ')': if len(pilhaParenteses) > 0: pilhaParenteses.pop()", "print(f'A expressão {expressao} está válida.') else: print(f'A expressão {expressao} está", "está inválida!') # Forma com bugs #expressao = (str(input('Digite a", "0: print(f'A expressão {expressao} está válida.') else: print(f'A expressão {expressao}", "= (str(input('Digite a expressão: '))) pilhaParenteses = [] for v", "v == '(': pilhaParenteses.append('(') elif v == ')': if len(pilhaParenteses)", "> 0: pilhaParenteses.pop() else: pilhaParenteses.append(')') break if len(pilhaParenteses) == 0:", "expressao = (str(input('Digite a expressão: '))) pilhaParenteses = [] for", "# print('Sua expressão está válida.') #else: # print('Sua expressão está", "if len(pilhaParenteses) == 0: print(f'A expressão {expressao} está válida.') else:", "inválida!') # Forma com bugs #expressao = (str(input('Digite a expressão:", "in expressao: if v == '(': pilhaParenteses.append('(') elif v ==", "v in expressao: if v == '(': pilhaParenteses.append('(') elif v", "v == ')': if len(pilhaParenteses) > 0: pilhaParenteses.pop() else: pilhaParenteses.append(')')", "(str(input('Digite a expressão: '))) pilhaParenteses = [] for v in", "pilhaParenteses = [] for v in expressao: if v ==" ]
[ "def print_xml(self, xml_file): if self.is_valid: xml_file.write('\\t<testcase classname=\"{}\" name=\"line {}\" />\\n'.format(self.file_path,", "' + self.content def mark_as_valid(self): self.is_valid = True self.error_reason =", "True self.error_reason = None def mark_as_invalid(self, error_reason): self.is_valid = False", "% self.file_path) log.error('[LINE] %s' % self.line_number) log.error('[CONTENT] %s' % self.content)", "= True self.error_reason = None def __str__(self): return 'Todo in", "return 'Todo in file ' + self.file_path + ':' +", "2018 TNG Technology Consulting GmbH, Unterföhring, Germany # Licensed under", "'Todo in file ' + self.file_path + ':' + str(self.line_number)", "':' + str(self.line_number) + ' | ' + self.content def", "return log.error('[REASON] %s' % self.error_reason) log.error('[FILE] %s' % self.file_path) log.error('[LINE]", "log.error('[FILE] %s' % self.file_path) log.error('[LINE] %s' % self.line_number) log.error('[CONTENT] %s'", "/>\\n'.format(self.file_path, self.line_number)) else: xml_file.write('\\t<testcase classname=\"{}\" name=\"line {}\" >\\n'.format(self.file_path, self.line_number)) xml_file.write('\\t\\t<failure", "self.is_valid = True self.error_reason = None def mark_as_invalid(self, error_reason): self.is_valid", "= logging.getLogger() class Todo: def __init__(self, file_path, line_number, content): self.file_path", "= None def __str__(self): return 'Todo in file ' +", "log = logging.getLogger() class Todo: def __init__(self, file_path, line_number, content):", "file_path, line_number, content): self.file_path = file_path self.line_number = line_number self.content", "under the Apache License, Version 2.0 - see LICENSE.md in", "file_path self.line_number = line_number self.content = content self.is_valid = True", "see LICENSE.md in project root directory import logging from xml.sax.saxutils", "' | ' + self.content def mark_as_valid(self): self.is_valid = True", "log.error('[LINE] %s' % self.line_number) log.error('[CONTENT] %s' % self.content) def print_xml(self,", "self.error_reason) log.error('[FILE] %s' % self.file_path) log.error('[LINE] %s' % self.line_number) log.error('[CONTENT]", "| ' + self.content def mark_as_valid(self): self.is_valid = True self.error_reason", "2.0 - see LICENSE.md in project root directory import logging", "def mark_as_valid(self): self.is_valid = True self.error_reason = None def mark_as_invalid(self,", "self.is_valid: xml_file.write('\\t<testcase classname=\"{}\" name=\"line {}\" />\\n'.format(self.file_path, self.line_number)) else: xml_file.write('\\t<testcase classname=\"{}\"", "str(self.line_number) + ' | ' + self.content def mark_as_valid(self): self.is_valid", "root directory import logging from xml.sax.saxutils import escape log =", "Germany # Licensed under the Apache License, Version 2.0 -", "License, Version 2.0 - see LICENSE.md in project root directory", "in project root directory import logging from xml.sax.saxutils import escape", "= error_reason def print(self, show_valid=False): if not show_valid and self.is_valid:", "error_reason def print(self, show_valid=False): if not show_valid and self.is_valid: return", "Technology Consulting GmbH, Unterföhring, Germany # Licensed under the Apache", "Todo: def __init__(self, file_path, line_number, content): self.file_path = file_path self.line_number", "self.file_path + ':' + str(self.line_number) + ' | ' +", "show_valid and self.is_valid: return log.error('[REASON] %s' % self.error_reason) log.error('[FILE] %s'", "not show_valid and self.is_valid: return log.error('[REASON] %s' % self.error_reason) log.error('[FILE]", "% self.line_number) log.error('[CONTENT] %s' % self.content) def print_xml(self, xml_file): if", "xml_file): if self.is_valid: xml_file.write('\\t<testcase classname=\"{}\" name=\"line {}\" />\\n'.format(self.file_path, self.line_number)) else:", "+ ' | ' + self.content def mark_as_valid(self): self.is_valid =", "def mark_as_invalid(self, error_reason): self.is_valid = False self.error_reason = error_reason def", "mark_as_valid(self): self.is_valid = True self.error_reason = None def mark_as_invalid(self, error_reason):", "%s' % self.content) def print_xml(self, xml_file): if self.is_valid: xml_file.write('\\t<testcase classname=\"{}\"", "escape log = logging.getLogger() class Todo: def __init__(self, file_path, line_number,", "= False self.error_reason = error_reason def print(self, show_valid=False): if not", "Copyright 2018 TNG Technology Consulting GmbH, Unterföhring, Germany # Licensed", "def __init__(self, file_path, line_number, content): self.file_path = file_path self.line_number =", "Version 2.0 - see LICENSE.md in project root directory import", "error_reason): self.is_valid = False self.error_reason = error_reason def print(self, show_valid=False):", "file ' + self.file_path + ':' + str(self.line_number) + '", "= True self.error_reason = None def mark_as_invalid(self, error_reason): self.is_valid =", "= line_number self.content = content self.is_valid = True self.error_reason =", "xml.sax.saxutils import escape log = logging.getLogger() class Todo: def __init__(self,", "GmbH, Unterföhring, Germany # Licensed under the Apache License, Version", "self.content) def print_xml(self, xml_file): if self.is_valid: xml_file.write('\\t<testcase classname=\"{}\" name=\"line {}\"", "in file ' + self.file_path + ':' + str(self.line_number) +", "True self.error_reason = None def __str__(self): return 'Todo in file", "line_number self.content = content self.is_valid = True self.error_reason = None", "%s' % self.file_path) log.error('[LINE] %s' % self.line_number) log.error('[CONTENT] %s' %", "import logging from xml.sax.saxutils import escape log = logging.getLogger() class", "self.is_valid = False self.error_reason = error_reason def print(self, show_valid=False): if", "self.content def mark_as_valid(self): self.is_valid = True self.error_reason = None def", "self.line_number) log.error('[CONTENT] %s' % self.content) def print_xml(self, xml_file): if self.is_valid:", "self.line_number = line_number self.content = content self.is_valid = True self.error_reason", "self.file_path) log.error('[LINE] %s' % self.line_number) log.error('[CONTENT] %s' % self.content) def", "log.error('[REASON] %s' % self.error_reason) log.error('[FILE] %s' % self.file_path) log.error('[LINE] %s'", "logging from xml.sax.saxutils import escape log = logging.getLogger() class Todo:", "if self.is_valid: xml_file.write('\\t<testcase classname=\"{}\" name=\"line {}\" />\\n'.format(self.file_path, self.line_number)) else: xml_file.write('\\t<testcase", "+ self.content def mark_as_valid(self): self.is_valid = True self.error_reason = None", "# Copyright 2018 TNG Technology Consulting GmbH, Unterföhring, Germany #", "from xml.sax.saxutils import escape log = logging.getLogger() class Todo: def", "log.error('[CONTENT] %s' % self.content) def print_xml(self, xml_file): if self.is_valid: xml_file.write('\\t<testcase", "__init__(self, file_path, line_number, content): self.file_path = file_path self.line_number = line_number", "project root directory import logging from xml.sax.saxutils import escape log", "# Licensed under the Apache License, Version 2.0 - see", "import escape log = logging.getLogger() class Todo: def __init__(self, file_path,", "%s' % self.error_reason) log.error('[FILE] %s' % self.file_path) log.error('[LINE] %s' %", "' + self.file_path + ':' + str(self.line_number) + ' |", "% self.error_reason) log.error('[FILE] %s' % self.file_path) log.error('[LINE] %s' % self.line_number)", "__str__(self): return 'Todo in file ' + self.file_path + ':'", "print(self, show_valid=False): if not show_valid and self.is_valid: return log.error('[REASON] %s'", "name=\"line {}\" />\\n'.format(self.file_path, self.line_number)) else: xml_file.write('\\t<testcase classname=\"{}\" name=\"line {}\" >\\n'.format(self.file_path,", "Licensed under the Apache License, Version 2.0 - see LICENSE.md", "classname=\"{}\" name=\"line {}\" />\\n'.format(self.file_path, self.line_number)) else: xml_file.write('\\t<testcase classname=\"{}\" name=\"line {}\"", "None def __str__(self): return 'Todo in file ' + self.file_path", "False self.error_reason = error_reason def print(self, show_valid=False): if not show_valid", "self.is_valid: return log.error('[REASON] %s' % self.error_reason) log.error('[FILE] %s' % self.file_path)", "class Todo: def __init__(self, file_path, line_number, content): self.file_path = file_path", "self.content = content self.is_valid = True self.error_reason = None def", "self.is_valid = True self.error_reason = None def __str__(self): return 'Todo", "print_xml(self, xml_file): if self.is_valid: xml_file.write('\\t<testcase classname=\"{}\" name=\"line {}\" />\\n'.format(self.file_path, self.line_number))", "Apache License, Version 2.0 - see LICENSE.md in project root", "% self.content) def print_xml(self, xml_file): if self.is_valid: xml_file.write('\\t<testcase classname=\"{}\" name=\"line", "mark_as_invalid(self, error_reason): self.is_valid = False self.error_reason = error_reason def print(self,", "= file_path self.line_number = line_number self.content = content self.is_valid =", "- see LICENSE.md in project root directory import logging from", "+ str(self.line_number) + ' | ' + self.content def mark_as_valid(self):", "self.error_reason = None def __str__(self): return 'Todo in file '", "self.error_reason = None def mark_as_invalid(self, error_reason): self.is_valid = False self.error_reason", "TNG Technology Consulting GmbH, Unterföhring, Germany # Licensed under the", "None def mark_as_invalid(self, error_reason): self.is_valid = False self.error_reason = error_reason", "+ ':' + str(self.line_number) + ' | ' + self.content", "{}\" />\\n'.format(self.file_path, self.line_number)) else: xml_file.write('\\t<testcase classname=\"{}\" name=\"line {}\" >\\n'.format(self.file_path, self.line_number))", "else: xml_file.write('\\t<testcase classname=\"{}\" name=\"line {}\" >\\n'.format(self.file_path, self.line_number)) xml_file.write('\\t\\t<failure message=\"{}\">{}</failure>\\n'.format(self.error_reason, escape(self.content)))", "Consulting GmbH, Unterföhring, Germany # Licensed under the Apache License,", "the Apache License, Version 2.0 - see LICENSE.md in project", "line_number, content): self.file_path = file_path self.line_number = line_number self.content =", "Unterföhring, Germany # Licensed under the Apache License, Version 2.0", "%s' % self.line_number) log.error('[CONTENT] %s' % self.content) def print_xml(self, xml_file):", "xml_file.write('\\t<testcase classname=\"{}\" name=\"line {}\" >\\n'.format(self.file_path, self.line_number)) xml_file.write('\\t\\t<failure message=\"{}\">{}</failure>\\n'.format(self.error_reason, escape(self.content))) xml_file.write('\\t</testcase>\\n')", "and self.is_valid: return log.error('[REASON] %s' % self.error_reason) log.error('[FILE] %s' %", "directory import logging from xml.sax.saxutils import escape log = logging.getLogger()", "def __str__(self): return 'Todo in file ' + self.file_path +", "self.error_reason = error_reason def print(self, show_valid=False): if not show_valid and", "def print(self, show_valid=False): if not show_valid and self.is_valid: return log.error('[REASON]", "xml_file.write('\\t<testcase classname=\"{}\" name=\"line {}\" />\\n'.format(self.file_path, self.line_number)) else: xml_file.write('\\t<testcase classname=\"{}\" name=\"line", "show_valid=False): if not show_valid and self.is_valid: return log.error('[REASON] %s' %", "self.file_path = file_path self.line_number = line_number self.content = content self.is_valid", "= None def mark_as_invalid(self, error_reason): self.is_valid = False self.error_reason =", "content): self.file_path = file_path self.line_number = line_number self.content = content", "content self.is_valid = True self.error_reason = None def __str__(self): return", "LICENSE.md in project root directory import logging from xml.sax.saxutils import", "self.line_number)) else: xml_file.write('\\t<testcase classname=\"{}\" name=\"line {}\" >\\n'.format(self.file_path, self.line_number)) xml_file.write('\\t\\t<failure message=\"{}\">{}</failure>\\n'.format(self.error_reason,", "+ self.file_path + ':' + str(self.line_number) + ' | '", "if not show_valid and self.is_valid: return log.error('[REASON] %s' % self.error_reason)", "= content self.is_valid = True self.error_reason = None def __str__(self):", "logging.getLogger() class Todo: def __init__(self, file_path, line_number, content): self.file_path =" ]
[ "dcc.Graph(figure=fig_uses(df, months), style={'height': '80vh'}) def contexts(df, months, level=None): return dcc.Graph(figure=fig_contexts_use(df,", "in month_range(months): yield trace_context_use(df[df.index.month == m], level, name=MONTH_NAMES[m-1]) pie_factory =", "html.H2(i[0], className='subtitle') ]) for i in indexes ]) def month_selector(df,", "df['Tipo Máquina'].value_counts() frame = pd.DataFrame({'Tipo de Máquina': machine_list}) frame[serie_name] =", "datetime import datetime, time from utils import MONTH_NAMES, month_range def", "yield trace_context_use(df[df.index.month == m], level, name=MONTH_NAMES[m-1]) pie_factory = take_month(months) try:", "children=[ html.Div(className='column is-four-fifths is-full-mobile', children=[ html.Div(className='level', children=[ html.H2(title, className='title') ]),", "months=month_range(months) def create_frame(df, serie_name): count = df.groupby('Tipo Máquina').sum()['Tiempo de uso", "months=months, stacked=stacked), style={'height': '80vh'}) def time_per_machine(df, months=None, stacked=False): return dcc.Graph(figure=fig_hours(df,", "'Router CNC': 3, 'Torno': 1, 'Cirqoid': 1} return (this_month['Dias'] *", "html.Footer(className='footer has-background-white', children=[ html.Div(className='content has-text-centered', children=[ html.Img(src='/indicadores/assets/footer.png', alt='FabLab UTFSM'), html.P(className='is-size-7',", "grouped.sum()['Tiempo de uso en minutos'] return go.Pie(labels=context_data.index, values=context_data.values, **kwargs) def", "html.Section(className='hero is-fullheight', children=[ html.Div(className='hero-body', children=[ html.Div(className='container', children=[ html.Div(className='columns is-vcentered is-centered',", "== m], name) figure.add_trace(go.Bar(y=frame['Tipo de Máquina'], x=frame[name], name=name, hoverinfo='name+x', orientation='h'))", "datetime.now().month return html.Div(dcc.RangeSlider( id='month-range-slider', marks={i+1: MONTH_NAMES[i] for i in range(first_month-1,", "Chile', html.Br(), 'Desarrollado bajo licencia MIT' ]) ]) ]) def", "machine_list = df['Tipo Máquina'].unique() months = month_range(months) def create_frame(df, serie_name):", "= 3 row_count = math.ceil(len(month_range(months))/col_count) figure = make_subplots(row_count, col_count, specs=[[{'type':'domain'}", "temporal'] == d]['Tiempo de uso en minutos'].fillna(0).values}) z_values = pd.DataFrame(z_dict).values", "stacked=False): machine_list = df['Tipo Máquina'].unique() months = month_range(months) def create_frame(df,", "is-5', children=[ html.Figure(className='image is-4by4', children=[ html.Img(src='/indicadores/assets/logo.png', alt='FabLab UTFSM'), ]), ]),", "y=caps, name=machine, hovertext=texts)) figure.update_layout(barmode='group', yaxis=dict(type='linear', ticksuffix='%', title='Capacidad Utilizada')) return figure", "month) hours = total_cap // 60 used_cap = df[df.index.month==month].groupby('Tipo Máquina')['Tiempo", "['Lunes', 'Martes', 'Miércoles', 'Jueves', 'Viernes'] days = ['Monday', 'Tuesday', 'Wednesday',", "take_month(months) try: for r in range(row_count): for c in range(col_count):", "stacked and months: frame = create_frame(df[df.index.month.isin(months)], 'Total') figure.add_trace(go.Scatter( x=frame['Tipo de", "* machine_count[machine] def fig_total_capacity_2(df, month_caps, months): machine_list = df['Tipo Máquina'].unique()", "def trace_context_use(df, level=None, **kwargs): grouped = None if not level:", "'80vh'}) #def uses(df, months): # return dcc.Graph(figure=fig_uses(df, months), style={'height': '80vh'})", "children=[ html.Div(className='hero-body', children=[ html.Div(className='container', children=[ html.Div(className='columns is-vcentered is-centered', children=[ html.Div(className='column", "for i in range(first_month-1, current_month)}, min=first_month, max=current_month, value=[current_month-2,current_month], pushable=1 ),", "trace_context_use(df, level=None, **kwargs): grouped = None if not level: grouped", "months=None, stacked=False): machine_list = df['Tipo Máquina'].unique() months=month_range(months) def create_frame(df, serie_name):", "\"\"}', children=[ html.H1(i[1], className='title'), html.H2(i[0], className='subtitle') ]) for i in", "x=frame['Tipo de Máquina'], y=frame['Total'], text=frame['Total'], textposition='top center', mode='text', showlegend=False, hoverinfo='skip'", "months=None, stacked=False): return dcc.Graph(figure=fig_records(df, months=months, stacked=stacked), style={'height': '80vh'}) def time_per_machine(df,", "Máquina'].unique() months = month_range(months) month_names = [MONTH_NAMES[m-1] for m in", "60 * machine_count[machine] def fig_total_capacity_2(df, month_caps, months): machine_list = df['Tipo", "month_caps[month_caps['Mes'] == month] machine_count = {'Impresora 3D': 5, 'Cortadora Láser':", "value=[current_month-2,current_month], pushable=1 ), className='slider-frame') def point_list(items): return html.Ul([html.Li(item) for item", "content, gray=False): return html.Section(className=f'hero is-fullheight is-medium {\"has-background-grey-lighter\" if gray else", "html.Div(className=f'column is-one-quarter index-container {\"unknown-data\" if i[1] == \"?\" else \"\"}',", "'Cirqoid': 1} return (this_month['Dias'] * this_month['Horas']).values[0] * 60 * machine_count[machine]", "machine in machine_list: texts = [] caps = [] for", "dias = ['Lunes', 'Martes', 'Miércoles', 'Jueves', 'Viernes'] days = ['Monday',", "'Viernes'] days = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday'] data =", "de Máquina'], x=frame[name], name=name, hoverinfo='name+x', orientation='h')) if stacked and months:", "month_caps, months): machine_list = df['Tipo Máquina'].unique() months = month_range(months) month_names", "]) ]) ]) ]) def quality_index(df): indexes = df.sort_values('Valor', ascending=False).fillna('?').values", "in months: total_cap = cap_per_machine_per_month(month_caps, machine, month) hours = total_cap", "for i, d in enumerate(days): z_dict.update({dias[i]: times[times['Marca temporal'] == d]['Tiempo", "machine, month) hours = total_cap // 60 used_cap = df[df.index.month==month].groupby('Tipo", "html import plotly.graph_objs as go import plotly.express as px from", "{\"has-background-grey-lighter\" if gray else \"\"}', children=[ html.Div(className='hero-body', children=[ html.Div(className='container', children=[", "'80vh'}) def machine_capacity(df, caps, months=None): return dcc.Graph(figure=fig_total_capacity_2(df, caps, months), style={'height':", "m], name) figure.add_trace(go.Bar(y=frame['Tipo de Máquina'], x=frame[name], name=name, hoverinfo='name+x', orientation='h')) if", "horas.') figure.add_trace(go.Bar(x=month_names, y=caps, name=machine, hovertext=texts)) figure.update_layout(barmode='group', yaxis=dict(type='linear', ticksuffix='%', title='Capacidad Utilizada'))", "months: total_cap = cap_per_machine_per_month(month_caps, machine, month) hours = total_cap //", "= create_frame(df[df.index.month == m], name) figure.add_trace(go.Bar(x=frame['Tipo de Máquina'], y=frame[name], name=name,", "in range(col_count): figure.add_trace(next(pie_factory), r+1, c+1) except StopIteration as stop: pass", "stacked else 'group'} figure = go.Figure() for m in months:", "index-container {\"unknown-data\" if i[1] == \"?\" else \"\"}', children=[ html.H1(i[1],", "= df.groupby('Contexto 1') else: grouped = df[df['Contexto 1'] == level].groupby('Contexto", "= month_range(months) month_names = [MONTH_NAMES[m-1] for m in months] figure", "months = month_range(months) def create_frame(df, serie_name): count = df['Tipo Máquina'].value_counts()", "0) for machine in machine_list] return frame if months and", "{'barmode': 'relative' if stacked else 'group'} figure = go.Figure() for", "months=None, stacked=False): machine_list = df['Tipo Máquina'].unique() months = month_range(months) def", "times[times['Marca temporal'] == 'Monday']['Hora Inicio'].dt.time z_dict = dict() for i,", "children=[ html.Div(className='column is-5', children=[ html.Figure(className='image is-4by4', children=[ html.Img(src='/indicadores/assets/logo.png', alt='FabLab UTFSM'),", "def take_month(months): for m in month_range(months): yield trace_context_use(df[df.index.month == m],", "is-vcentered is-centered', children=[ html.Div(className='column is-5', children=[ html.Figure(className='image is-4by4', children=[ html.Img(src='/indicadores/assets/logo.png',", "html.Br(), 'UTFSM Campus San Joaquín, Edificio C', html.Br(), 'Av. <NAME>", "{\"total\" if stacked else \"\"}'}, **extras) return figure def cap_per_machine_per_month(month_caps,", "'Thursday', 'Friday'] data = df[df.index.month.isin(month_range(months))] figure = go.Figure() times =", "== 'Monday']['Hora Inicio'].dt.time z_dict = dict() for i, d in", "r in range(row_count): for c in range(col_count): figure.add_trace(next(pie_factory), r+1, c+1)", "className='subtitle') ]) for i in indexes ]) def month_selector(df, first_month=None):", "de alguna manera... def fig_uses(df, months): dias = ['Lunes', 'Martes',", "def create_frame(df, serie_name): count = df['Tipo Máquina'].value_counts() frame = pd.DataFrame({'Tipo", "return dcc.Graph(figure=fig_hours(df, months=months, stacked=stacked), style={'height': '80vh'}) def machine_capacity(df, caps, months=None):", "== list: df = df[df.index.month.isin(months)] frame = create_frame(df, 'Total') figure", "MONTH_NAMES[i] for i in range(first_month-1, current_month)}, min=first_month, max=current_month, value=[current_month-2,current_month], pushable=1", "= df[df.index.month.isin(month_range(months))] figure = go.Figure() times = data.groupby([data.index.weekday_name, pd.Grouper(freq='60min', key='Hora", "figure = make_subplots(row_count, col_count, specs=[[{'type':'domain'} for c in range(col_count)] for", "c+1) except StopIteration as stop: pass return figure def records_per_machine(df,", "(this_month['Dias'] * this_month['Horas']).values[0] * 60 * machine_count[machine] def fig_total_capacity_2(df, month_caps,", "in items]) def first(): return html.Section(className='hero is-fullheight', children=[ html.Div(className='hero-body', children=[", "html.Div(className='columns is-centered', children=[ html.Div(className='column is-four-fifths is-full-mobile', children=[ html.Div(className='level', children=[ html.H2(title,", "figure.add_trace(go.Scatter( y=frame['Tipo de Máquina'], x=frame['Total'], text=frame['Total'], textposition='middle right', mode='text', showlegend=False,", "import plotly.graph_objs as go import plotly.express as px from plotly.subplots", "m], name) figure.add_trace(go.Bar(x=frame['Tipo de Máquina'], y=frame[name], name=name, hoverinfo='name+y')) if stacked", "{'Impresora 3D': 5, 'Cortadora Láser': 2, 'Router CNC': 3, 'Torno':", "]) def last(): return html.Footer(className='footer has-background-white', children=[ html.Div(className='content has-text-centered', children=[", "dash_html_components as html import plotly.graph_objs as go import plotly.express as", "df.groupby('Contexto 1') else: grouped = df[df['Contexto 1'] == level].groupby('Contexto 2')", "figure def cap_per_machine_per_month(month_caps, machine, month): this_month = month_caps[month_caps['Mes'] == month]", "as pd import math from datetime import datetime, time from", "machine in machine_list] return frame extras = {'barmode': 'relative' if", "last(): return html.Footer(className='footer has-background-white', children=[ html.Div(className='content has-text-centered', children=[ html.Img(src='/indicadores/assets/footer.png', alt='FabLab", "]), html.Div(className='column is-5 main-title', children=[ html.H1('Informe de Gestión de Operaciones',", "MONTH_NAMES[m-1] frame = create_frame(df[df.index.month == m], name) figure.add_trace(go.Bar(y=frame['Tipo de Máquina'],", "in months] figure = go.Figure() for machine in machine_list: texts", "= [] caps = [] for month in months: total_cap", "create_frame(df[df.index.month.isin(months)], 'Total') figure.add_trace(go.Scatter( x=frame['Tipo de Máquina'], y=frame['Total'], text=frame['Total'], textposition='top center',", "def create_frame(df, serie_name): count = df.groupby('Tipo Máquina').sum()['Tiempo de uso en", "months and type(months) == list: df = df[df.index.month.isin(months)] frame =", "minutos'].fillna(0).values}) z_values = pd.DataFrame(z_dict).values figure.add_trace(go.Heatmap( x=dias, y=day_times, z=z_values)) return figure", "name = MONTH_NAMES[m-1] frame = create_frame(df[df.index.month == m], name) figure.add_trace(go.Bar(y=frame['Tipo", "text=frame['Total'], textposition='top center', mode='text', showlegend=False, hoverinfo='skip' )) figure.update_layout(yaxis={ 'title': 'Número", "else 'group'} figure = go.Figure() for m in months: name", "= math.ceil(len(month_range(months))/col_count) figure = make_subplots(row_count, col_count, specs=[[{'type':'domain'} for c in", "stacked=False): return dcc.Graph(figure=fig_hours(df, months=months, stacked=stacked), style={'height': '80vh'}) def machine_capacity(df, caps,", "dcc.Graph(figure=fig_hours(df, months=months, stacked=stacked), style={'height': '80vh'}) def machine_capacity(df, caps, months=None): return", "def last(): return html.Footer(className='footer has-background-white', children=[ html.Div(className='content has-text-centered', children=[ html.Img(src='/indicadores/assets/footer.png',", "mode='text', showlegend=False, hoverinfo='skip' )) figure.update_layout(xaxis={ 'title': f'Horas de uso {\"total\"", "= go.Figure() for m in months: name = MONTH_NAMES[m-1] frame", "= [count.get(machine, 0) for machine in machine_list] return frame if", "5, 'Cortadora Láser': 2, 'Router CNC': 3, 'Torno': 1, 'Cirqoid':", "children=[ html.Div(className=f'column is-one-quarter index-container {\"unknown-data\" if i[1] == \"?\" else", "= create_frame(df[df.index.month.isin(months)], 'Total') figure.add_trace(go.Scatter( x=frame['Tipo de Máquina'], y=frame['Total'], text=frame['Total'], textposition='top", "'Martes', 'Miércoles', 'Jueves', 'Viernes'] days = ['Monday', 'Tuesday', 'Wednesday', 'Thursday',", "html.H2(title, className='title') ]), ] + content) ]) ]) ]) ])", "import dash_html_components as html import plotly.graph_objs as go import plotly.express", "Máquina'].unique() months = month_range(months) def create_frame(df, serie_name): count = df['Tipo", "utils import MONTH_NAMES, month_range def section(title, content, gray=False): return html.Section(className=f'hero", "month_range(months) def create_frame(df, serie_name): count = df['Tipo Máquina'].value_counts() frame =", "is-variable', children=[ html.Div(className=f'column is-one-quarter index-container {\"unknown-data\" if i[1] == \"?\"", "months = month_range(months) month_names = [MONTH_NAMES[m-1] for m in months]", "html.Br(), 'Desarrollado bajo licencia MIT' ]) ]) ]) def fig_records(df,", "first_month=None): current_month = datetime.now().month return html.Div(dcc.RangeSlider( id='month-range-slider', marks={i+1: MONTH_NAMES[i] for", "uses(df, months): # return dcc.Graph(figure=fig_uses(df, months), style={'height': '80vh'}) def contexts(df,", "= df['Tipo Máquina'].unique() months = month_range(months) def create_frame(df, serie_name): count", "de uso en minutos'].sum().divide(total_cap).multiply(100).round(2).get(machine, 0) caps.append(used_cap) texts.append(f'{used_cap}% utilizado de una", "and months: frame = create_frame(df[df.index.month.isin(months)], 'Total') figure.add_trace(go.Scatter( x=frame['Tipo de Máquina'],", "\"\"}'}, **extras) return figure def cap_per_machine_per_month(month_caps, machine, month): this_month =", "if stacked else 'group'} figure = go.Figure() for m in", "{'barmode': 'relative' if stacked else 'group'} for m in months:", "**extras) return figure def fig_hours(df, months=None, stacked=False): machine_list = df['Tipo", "'80vh'}) def time_per_machine(df, months=None, stacked=False): return dcc.Graph(figure=fig_hours(df, months=months, stacked=stacked), style={'height':", "name) figure.add_trace(go.Bar(x=frame['Tipo de Máquina'], y=frame[name], name=name, hoverinfo='name+y')) if stacked and", "for i in indexes ]) def month_selector(df, first_month=None): current_month =", "df[df.index.month.isin(months)] frame = create_frame(df, 'Total') figure = go.Figure() extras =", "x=frame[name], name=name, hoverinfo='name+x', orientation='h')) if stacked and months: frame =", "= [MONTH_NAMES[m-1] for m in months] figure = go.Figure() for", "create_frame(df, serie_name): count = df['Tipo Máquina'].value_counts() frame = pd.DataFrame({'Tipo de", "text=frame['Total'], textposition='middle right', mode='text', showlegend=False, hoverinfo='skip' )) figure.update_layout(xaxis={ 'title': f'Horas", "is-centered', children=[ html.Div(className='column is-5', children=[ html.Figure(className='image is-4by4', children=[ html.Img(src='/indicadores/assets/logo.png', alt='FabLab", "enumerate(days): z_dict.update({dias[i]: times[times['Marca temporal'] == d]['Tiempo de uso en minutos'].fillna(0).values})", "Máquina').sum()['Tiempo de uso en minutos'].divide(60).round(0) frame = pd.DataFrame({'Tipo de Máquina':", "return frame if months and type(months) == list: df =", "create_frame(df, 'Total') figure = go.Figure() extras = {'barmode': 'relative' if", "quality_index(df): indexes = df.sort_values('Valor', ascending=False).fillna('?').values return html.Div(className='columns is-multiline is-4 is-variable',", "== m], name) figure.add_trace(go.Bar(x=frame['Tipo de Máquina'], y=frame[name], name=name, hoverinfo='name+y')) if", "x=frame['Total'], text=frame['Total'], textposition='middle right', mode='text', showlegend=False, hoverinfo='skip' )) figure.update_layout(xaxis={ 'title':", "is-fullheight', children=[ html.Div(className='hero-body', children=[ html.Div(className='container', children=[ html.Div(className='columns is-vcentered is-centered', children=[", "def machine_capacity(df, caps, months=None): return dcc.Graph(figure=fig_total_capacity_2(df, caps, months), style={'height': '80vh'})", "de registros'}, **extras) return figure def fig_hours(df, months=None, stacked=False): machine_list", "return html.Footer(className='footer has-background-white', children=[ html.Div(className='content has-text-centered', children=[ html.Img(src='/indicadores/assets/footer.png', alt='FabLab UTFSM'),", "month_range(months)]) def take_month(months): for m in month_range(months): yield trace_context_use(df[df.index.month ==", "z_dict.update({dias[i]: times[times['Marca temporal'] == d]['Tiempo de uso en minutos'].fillna(0).values}) z_values", "en minutos'] return go.Pie(labels=context_data.index, values=context_data.values, **kwargs) def fig_contexts_use(df, months, level,", "machine_count[machine] def fig_total_capacity_2(df, month_caps, months): machine_list = df['Tipo Máquina'].unique() months", "months: frame = create_frame(df[df.index.month.isin(months)], 'Total') figure.add_trace(go.Scatter( x=frame['Tipo de Máquina'], y=frame['Total'],", "level: grouped = df.groupby('Contexto 1') else: grouped = df[df['Contexto 1']", "figure \"\"\" TODO: Terminar el heatmap de alguna manera... def", "stacked=False): machine_list = df['Tipo Máquina'].unique() months=month_range(months) def create_frame(df, serie_name): count", "name=name, hoverinfo='name+y')) if stacked and months: frame = create_frame(df[df.index.month.isin(months)], 'Total')", "texts = [] caps = [] for month in months:", "figure = go.Figure() times = data.groupby([data.index.weekday_name, pd.Grouper(freq='60min', key='Hora Inicio')]).fillna(0).sum().reset_index() day_times", "month_range(months): yield trace_context_use(df[df.index.month == m], level, name=MONTH_NAMES[m-1]) pie_factory = take_month(months)", "used_cap = df[df.index.month==month].groupby('Tipo Máquina')['Tiempo de uso en minutos'].sum().divide(total_cap).multiply(100).round(2).get(machine, 0) caps.append(used_cap)", "Terminar el heatmap de alguna manera... def fig_uses(df, months): dias", "z_dict = dict() for i, d in enumerate(days): z_dict.update({dias[i]: times[times['Marca", "]) ]) ]) def quality_index(df): indexes = df.sort_values('Valor', ascending=False).fillna('?').values return", "months=months, stacked=stacked), style={'height': '80vh'}) def machine_capacity(df, caps, months=None): return dcc.Graph(figure=fig_total_capacity_2(df,", "html.Br(), 'Av. <NAME> 3939, Santiago de Chile', html.Br(), 'Desarrollado bajo", "this_month = month_caps[month_caps['Mes'] == month] machine_count = {'Impresora 3D': 5,", "= [] for month in months: total_cap = cap_per_machine_per_month(month_caps, machine,", "df.groupby('Tipo Máquina').sum()['Tiempo de uso en minutos'].divide(60).round(0) frame = pd.DataFrame({'Tipo de", "stacked and months: frame = create_frame(df[df.index.month.isin(months)], 'Total') figure.add_trace(go.Scatter( y=frame['Tipo de", "**kwargs): col_count = 3 row_count = math.ceil(len(month_range(months))/col_count) figure = make_subplots(row_count,", "== \"?\" else \"\"}', children=[ html.H1(i[1], className='title'), html.H2(i[0], className='subtitle') ])", "pie_factory = take_month(months) try: for r in range(row_count): for c", "children=[ html.Div(className='content has-text-centered', children=[ html.Img(src='/indicadores/assets/footer.png', alt='FabLab UTFSM'), html.P(className='is-size-7', children=[ 'FabLab", "children=[ html.Div(className='columns is-vcentered is-centered', children=[ html.Div(className='column is-5', children=[ html.Figure(className='image is-4by4',", "is-fullheight is-medium {\"has-background-grey-lighter\" if gray else \"\"}', children=[ html.Div(className='hero-body', children=[", "machine_list: texts = [] caps = [] for month in", "CNC': 3, 'Torno': 1, 'Cirqoid': 1} return (this_month['Dias'] * this_month['Horas']).values[0]", "Campus San Joaquín, Edificio C', html.Br(), 'Av. <NAME> 3939, Santiago", "= go.Figure() for machine in machine_list: texts = [] caps", "month_selector(df, first_month=None): current_month = datetime.now().month return html.Div(dcc.RangeSlider( id='month-range-slider', marks={i+1: MONTH_NAMES[i]", "figure = go.Figure() extras = {'barmode': 'relative' if stacked else", "0) caps.append(used_cap) texts.append(f'{used_cap}% utilizado de una capacidad total de {hours}", "for m in month_range(months): yield trace_context_use(df[df.index.month == m], level, name=MONTH_NAMES[m-1])", "stacked=stacked), style={'height': '80vh'}) def time_per_machine(df, months=None, stacked=False): return dcc.Graph(figure=fig_hours(df, months=months,", "'title': f'Horas de uso {\"total\" if stacked else \"\"}'}, **extras)", "c in range(col_count): figure.add_trace(next(pie_factory), r+1, c+1) except StopIteration as stop:", "60 used_cap = df[df.index.month==month].groupby('Tipo Máquina')['Tiempo de uso en minutos'].sum().divide(total_cap).multiply(100).round(2).get(machine, 0)", "textposition='middle right', mode='text', showlegend=False, hoverinfo='skip' )) figure.update_layout(xaxis={ 'title': f'Horas de", "return figure def cap_per_machine_per_month(month_caps, machine, month): this_month = month_caps[month_caps['Mes'] ==", "= take_month(months) try: for r in range(row_count): for c in", "return figure \"\"\" TODO: Terminar el heatmap de alguna manera...", "def first(): return html.Section(className='hero is-fullheight', children=[ html.Div(className='hero-body', children=[ html.Div(className='container', children=[", "= month_range(months) def create_frame(df, serie_name): count = df['Tipo Máquina'].value_counts() frame", "MONTH_NAMES, month_range def section(title, content, gray=False): return html.Section(className=f'hero is-fullheight is-medium", "r+1, c+1) except StopIteration as stop: pass return figure def", "= pd.DataFrame({'Tipo de Máquina': machine_list}) frame[serie_name] = [count.get(machine, 0) for", "import MONTH_NAMES, month_range def section(title, content, gray=False): return html.Section(className=f'hero is-fullheight", "import math from datetime import datetime, time from utils import", "f'Horas de uso {\"total\" if stacked else \"\"}'}, **extras) return", "min=first_month, max=current_month, value=[current_month-2,current_month], pushable=1 ), className='slider-frame') def point_list(items): return html.Ul([html.Li(item)", "className='title') ]) ]) ]), ]) ]) def last(): return html.Footer(className='footer", "frame = create_frame(df[df.index.month == m], name) figure.add_trace(go.Bar(y=frame['Tipo de Máquina'], x=frame[name],", "html.Figure(className='image is-4by4', children=[ html.Img(src='/indicadores/assets/logo.png', alt='FabLab UTFSM'), ]), ]), html.Div(className='column is-5", "for m in months: name = MONTH_NAMES[m-1] frame = create_frame(df[df.index.month", "import plotly.express as px from plotly.subplots import make_subplots import pandas", "alt='FabLab UTFSM'), ]), ]), html.Div(className='column is-5 main-title', children=[ html.H1('Informe de", "def fig_hours(df, months=None, stacked=False): machine_list = df['Tipo Máquina'].unique() months=month_range(months) def", "create_frame(df, serie_name): count = df.groupby('Tipo Máquina').sum()['Tiempo de uso en minutos'].divide(60).round(0)", "= MONTH_NAMES[m-1] frame = create_frame(df[df.index.month == m], name) figure.add_trace(go.Bar(y=frame['Tipo de", "frame if months and type(months) == list: df = df[df.index.month.isin(months)]", "def cap_per_machine_per_month(month_caps, machine, month): this_month = month_caps[month_caps['Mes'] == month] machine_count", "1') else: grouped = df[df['Contexto 1'] == level].groupby('Contexto 2') context_data", "month_names = [MONTH_NAMES[m-1] for m in months] figure = go.Figure()", "is-centered', children=[ html.Div(className='column is-four-fifths is-full-mobile', children=[ html.Div(className='level', children=[ html.H2(title, className='title')", "is-multiline is-4 is-variable', children=[ html.Div(className=f'column is-one-quarter index-container {\"unknown-data\" if i[1]", "de uso en minutos'] return go.Pie(labels=context_data.index, values=context_data.values, **kwargs) def fig_contexts_use(df,", "html.Img(src='/indicadores/assets/logo.png', alt='FabLab UTFSM'), ]), ]), html.Div(className='column is-5 main-title', children=[ html.H1('Informe", "* this_month['Horas']).values[0] * 60 * machine_count[machine] def fig_total_capacity_2(df, month_caps, months):", "// 60 used_cap = df[df.index.month==month].groupby('Tipo Máquina')['Tiempo de uso en minutos'].sum().divide(total_cap).multiply(100).round(2).get(machine,", "for machine in machine_list: texts = [] caps = []", "times[times['Marca temporal'] == d]['Tiempo de uso en minutos'].fillna(0).values}) z_values =", "for c in range(col_count)] for r in range(row_count)], subplot_titles=[MONTH_NAMES[m-1] for", "as go import plotly.express as px from plotly.subplots import make_subplots", "marks={i+1: MONTH_NAMES[i] for i in range(first_month-1, current_month)}, min=first_month, max=current_month, value=[current_month-2,current_month],", "children=[ html.Figure(className='image is-4by4', children=[ html.Img(src='/indicadores/assets/logo.png', alt='FabLab UTFSM'), ]), ]), html.Div(className='column", "None if not level: grouped = df.groupby('Contexto 1') else: grouped", "children=[ html.Div(className='columns is-centered', children=[ html.Div(className='column is-four-fifths is-full-mobile', children=[ html.Div(className='level', children=[", "figure.add_trace(go.Heatmap( x=dias, y=day_times, z=z_values)) return figure \"\"\" def trace_context_use(df, level=None,", "for c in range(col_count): figure.add_trace(next(pie_factory), r+1, c+1) except StopIteration as", "manera... def fig_uses(df, months): dias = ['Lunes', 'Martes', 'Miércoles', 'Jueves',", "from datetime import datetime, time from utils import MONTH_NAMES, month_range", "de uso en minutos'].fillna(0).values}) z_values = pd.DataFrame(z_dict).values figure.add_trace(go.Heatmap( x=dias, y=day_times,", "else \"\"}', children=[ html.Div(className='hero-body', children=[ html.Div(className='container', children=[ html.Div(className='columns is-centered', children=[", "yaxis=dict(type='linear', ticksuffix='%', title='Capacidad Utilizada')) return figure \"\"\" TODO: Terminar el", "= go.Figure() times = data.groupby([data.index.weekday_name, pd.Grouper(freq='60min', key='Hora Inicio')]).fillna(0).sum().reset_index() day_times =", "]) ]) ]) def fig_records(df, months=None, stacked=False): machine_list = df['Tipo", "return frame extras = {'barmode': 'relative' if stacked else 'group'}", "el heatmap de alguna manera... def fig_uses(df, months): dias =", "name=name, hoverinfo='name+x', orientation='h')) if stacked and months: frame = create_frame(df[df.index.month.isin(months)],", "def fig_total_capacity_2(df, month_caps, months): machine_list = df['Tipo Máquina'].unique() months =", "return figure \"\"\" def trace_context_use(df, level=None, **kwargs): grouped = None", "name=MONTH_NAMES[m-1]) pie_factory = take_month(months) try: for r in range(row_count): for", "== month] machine_count = {'Impresora 3D': 5, 'Cortadora Láser': 2,", "count = df.groupby('Tipo Máquina').sum()['Tiempo de uso en minutos'].divide(60).round(0) frame =", "col_count = 3 row_count = math.ceil(len(month_range(months))/col_count) figure = make_subplots(row_count, col_count,", "]) def quality_index(df): indexes = df.sort_values('Valor', ascending=False).fillna('?').values return html.Div(className='columns is-multiline", "de Máquina'], y=frame[name], name=name, hoverinfo='name+y')) if stacked and months: frame", "else \"\"}'}, **extras) return figure def cap_per_machine_per_month(month_caps, machine, month): this_month", "df[df.index.month.isin(month_range(months))] figure = go.Figure() times = data.groupby([data.index.weekday_name, pd.Grouper(freq='60min', key='Hora Inicio')]).fillna(0).sum().reset_index()", "#def uses(df, months): # return dcc.Graph(figure=fig_uses(df, months), style={'height': '80vh'}) def", "uso en minutos'].divide(60).round(0) frame = pd.DataFrame({'Tipo de Máquina': machine_list}) frame[serie_name]", "y=frame['Tipo de Máquina'], x=frame['Total'], text=frame['Total'], textposition='middle right', mode='text', showlegend=False, hoverinfo='skip'", "count = df['Tipo Máquina'].value_counts() frame = pd.DataFrame({'Tipo de Máquina': machine_list})", "machine_list = df['Tipo Máquina'].unique() months = month_range(months) month_names = [MONTH_NAMES[m-1]", "html.Div(className='column is-four-fifths is-full-mobile', children=[ html.Div(className='level', children=[ html.H2(title, className='title') ]), ]", "is-medium {\"has-background-grey-lighter\" if gray else \"\"}', children=[ html.Div(className='hero-body', children=[ html.Div(className='container',", "una capacidad total de {hours} horas.') figure.add_trace(go.Bar(x=month_names, y=caps, name=machine, hovertext=texts))", "2, 'Router CNC': 3, 'Torno': 1, 'Cirqoid': 1} return (this_month['Dias']", "math.ceil(len(month_range(months))/col_count) figure = make_subplots(row_count, col_count, specs=[[{'type':'domain'} for c in range(col_count)]", "frame = create_frame(df[df.index.month.isin(months)], 'Total') figure.add_trace(go.Scatter( y=frame['Tipo de Máquina'], x=frame['Total'], text=frame['Total'],", "stacked else 'group'} for m in months: name = MONTH_NAMES[m-1]", "in month_range(months)]) def take_month(months): for m in month_range(months): yield trace_context_use(df[df.index.month", "'Miércoles', 'Jueves', 'Viernes'] days = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday']", "go.Pie(labels=context_data.index, values=context_data.values, **kwargs) def fig_contexts_use(df, months, level, **kwargs): col_count =", "def records_per_machine(df, months=None, stacked=False): return dcc.Graph(figure=fig_records(df, months=months, stacked=stacked), style={'height': '80vh'})", "]) ]), ]) ]) def last(): return html.Footer(className='footer has-background-white', children=[", "UTFSM'), ]), ]), html.Div(className='column is-5 main-title', children=[ html.H1('Informe de Gestión", "caps, months), style={'height': '80vh'}) #def uses(df, months): # return dcc.Graph(figure=fig_uses(df,", ")) figure.update_layout(xaxis={ 'title': f'Horas de uso {\"total\" if stacked else", "months): machine_list = df['Tipo Máquina'].unique() months = month_range(months) month_names =", "current_month = datetime.now().month return html.Div(dcc.RangeSlider( id='month-range-slider', marks={i+1: MONTH_NAMES[i] for i", "for r in range(row_count)], subplot_titles=[MONTH_NAMES[m-1] for m in month_range(months)]) def", "from utils import MONTH_NAMES, month_range def section(title, content, gray=False): return", "first(): return html.Section(className='hero is-fullheight', children=[ html.Div(className='hero-body', children=[ html.Div(className='container', children=[ html.Div(className='columns", "and type(months) == list: df = df[df.index.month.isin(months)] frame = create_frame(df,", "from plotly.subplots import make_subplots import pandas as pd import math", "html.Div(className='content has-text-centered', children=[ html.Img(src='/indicadores/assets/footer.png', alt='FabLab UTFSM'), html.P(className='is-size-7', children=[ 'FabLab UTFSM", "]) def month_selector(df, first_month=None): current_month = datetime.now().month return html.Div(dcc.RangeSlider( id='month-range-slider',", "in months: name = MONTH_NAMES[m-1] frame = create_frame(df[df.index.month == m],", "in machine_list: texts = [] caps = [] for month", "for r in range(row_count): for c in range(col_count): figure.add_trace(next(pie_factory), r+1,", "= df[df.index.month.isin(months)] frame = create_frame(df, 'Total') figure = go.Figure() extras", "children=[ html.H2(title, className='title') ]), ] + content) ]) ]) ])", "is-one-quarter index-container {\"unknown-data\" if i[1] == \"?\" else \"\"}', children=[", "return dcc.Graph(figure=fig_total_capacity_2(df, caps, months), style={'height': '80vh'}) #def uses(df, months): #", "'Friday'] data = df[df.index.month.isin(month_range(months))] figure = go.Figure() times = data.groupby([data.index.weekday_name,", "as stop: pass return figure def records_per_machine(df, months=None, stacked=False): return", "cap_per_machine_per_month(month_caps, machine, month) hours = total_cap // 60 used_cap =", "return html.Section(className=f'hero is-fullheight is-medium {\"has-background-grey-lighter\" if gray else \"\"}', children=[", "\"\"}', children=[ html.Div(className='hero-body', children=[ html.Div(className='container', children=[ html.Div(className='columns is-centered', children=[ html.Div(className='column", "extras = {'barmode': 'relative' if stacked else 'group'} for m", "= create_frame(df[df.index.month.isin(months)], 'Total') figure.add_trace(go.Scatter( y=frame['Tipo de Máquina'], x=frame['Total'], text=frame['Total'], textposition='middle", "def time_per_machine(df, months=None, stacked=False): return dcc.Graph(figure=fig_hours(df, months=months, stacked=stacked), style={'height': '80vh'})", "{hours} horas.') figure.add_trace(go.Bar(x=month_names, y=caps, name=machine, hovertext=texts)) figure.update_layout(barmode='group', yaxis=dict(type='linear', ticksuffix='%', title='Capacidad", "fig_records(df, months=None, stacked=False): machine_list = df['Tipo Máquina'].unique() months = month_range(months)", "mode='text', showlegend=False, hoverinfo='skip' )) figure.update_layout(yaxis={ 'title': 'Número de registros'}, **extras)", "for m in month_range(months)]) def take_month(months): for m in month_range(months):", "pushable=1 ), className='slider-frame') def point_list(items): return html.Ul([html.Li(item) for item in", "html.Div(className='column is-5 main-title', children=[ html.H1('Informe de Gestión de Operaciones', className='title')", "and months: frame = create_frame(df[df.index.month.isin(months)], 'Total') figure.add_trace(go.Scatter( y=frame['Tipo de Máquina'],", "Edificio C', html.Br(), 'Av. <NAME> 3939, Santiago de Chile', html.Br(),", "= df['Tipo Máquina'].unique() months = month_range(months) month_names = [MONTH_NAMES[m-1] for", "return html.Div(className='columns is-multiline is-4 is-variable', children=[ html.Div(className=f'column is-one-quarter index-container {\"unknown-data\"", "for month in months: total_cap = cap_per_machine_per_month(month_caps, machine, month) hours", "month in months: total_cap = cap_per_machine_per_month(month_caps, machine, month) hours =", "months: frame = create_frame(df[df.index.month.isin(months)], 'Total') figure.add_trace(go.Scatter( y=frame['Tipo de Máquina'], x=frame['Total'],", "max=current_month, value=[current_month-2,current_month], pushable=1 ), className='slider-frame') def point_list(items): return html.Ul([html.Li(item) for", "showlegend=False, hoverinfo='skip' )) figure.update_layout(xaxis={ 'title': f'Horas de uso {\"total\" if", "= df[df.index.month==month].groupby('Tipo Máquina')['Tiempo de uso en minutos'].sum().divide(total_cap).multiply(100).round(2).get(machine, 0) caps.append(used_cap) texts.append(f'{used_cap}%", "figure def records_per_machine(df, months=None, stacked=False): return dcc.Graph(figure=fig_records(df, months=months, stacked=stacked), style={'height':", "alt='FabLab UTFSM'), html.P(className='is-size-7', children=[ 'FabLab UTFSM 2019', html.Br(), 'UTFSM Campus", "html.Div(className='columns is-multiline is-4 is-variable', children=[ html.Div(className=f'column is-one-quarter index-container {\"unknown-data\" if", "machine_list] return frame if months and type(months) == list: df", "style={'height': '80vh'}) def machine_capacity(df, caps, months=None): return dcc.Graph(figure=fig_total_capacity_2(df, caps, months),", "row_count = math.ceil(len(month_range(months))/col_count) figure = make_subplots(row_count, col_count, specs=[[{'type':'domain'} for c", "Gestión de Operaciones', className='title') ]) ]) ]), ]) ]) def", "months), style={'height': '80vh'}) def contexts(df, months, level=None): return dcc.Graph(figure=fig_contexts_use(df, months,", "machine_list = df['Tipo Máquina'].unique() months=month_range(months) def create_frame(df, serie_name): count =", "]) for i in indexes ]) def month_selector(df, first_month=None): current_month", "dash_core_components as dcc import dash_html_components as html import plotly.graph_objs as", "d in enumerate(days): z_dict.update({dias[i]: times[times['Marca temporal'] == d]['Tiempo de uso", "1} return (this_month['Dias'] * this_month['Horas']).values[0] * 60 * machine_count[machine] def", "3 row_count = math.ceil(len(month_range(months))/col_count) figure = make_subplots(row_count, col_count, specs=[[{'type':'domain'} for", "uso en minutos'].fillna(0).values}) z_values = pd.DataFrame(z_dict).values figure.add_trace(go.Heatmap( x=dias, y=day_times, z=z_values))", "de Chile', html.Br(), 'Desarrollado bajo licencia MIT' ]) ]) ])", "en minutos'].divide(60).round(0) frame = pd.DataFrame({'Tipo de Máquina': machine_list}) frame[serie_name] =", "utilizado de una capacidad total de {hours} horas.') figure.add_trace(go.Bar(x=month_names, y=caps,", "months): dias = ['Lunes', 'Martes', 'Miércoles', 'Jueves', 'Viernes'] days =", "import dash_core_components as dcc import dash_html_components as html import plotly.graph_objs", "in machine_list] return frame if months and type(months) == list:", "pd.DataFrame({'Tipo de Máquina': machine_list}) frame[serie_name] = [count.get(machine, 0) for machine", "]) ]) def quality_index(df): indexes = df.sort_values('Valor', ascending=False).fillna('?').values return html.Div(className='columns", "list: df = df[df.index.month.isin(months)] frame = create_frame(df, 'Total') figure =", "level, **kwargs): col_count = 3 row_count = math.ceil(len(month_range(months))/col_count) figure =", "= None if not level: grouped = df.groupby('Contexto 1') else:", "c in range(col_count)] for r in range(row_count)], subplot_titles=[MONTH_NAMES[m-1] for m", "hoverinfo='name+y')) if stacked and months: frame = create_frame(df[df.index.month.isin(months)], 'Total') figure.add_trace(go.Scatter(", "return dcc.Graph(figure=fig_uses(df, months), style={'height': '80vh'}) def contexts(df, months, level=None): return", "UTFSM'), html.P(className='is-size-7', children=[ 'FabLab UTFSM 2019', html.Br(), 'UTFSM Campus San", "fig_contexts_use(df, months, level, **kwargs): col_count = 3 row_count = math.ceil(len(month_range(months))/col_count)", "'relative' if stacked else 'group'} figure = go.Figure() for m", "is-full-mobile', children=[ html.Div(className='level', children=[ html.H2(title, className='title') ]), ] + content)", "stacked=False): return dcc.Graph(figure=fig_records(df, months=months, stacked=stacked), style={'height': '80vh'}) def time_per_machine(df, months=None,", "frame extras = {'barmode': 'relative' if stacked else 'group'} figure", "gray else \"\"}', children=[ html.Div(className='hero-body', children=[ html.Div(className='container', children=[ html.Div(className='columns is-centered',", "= times[times['Marca temporal'] == 'Monday']['Hora Inicio'].dt.time z_dict = dict() for", "for machine in machine_list] return frame extras = {'barmode': 'relative'", "if gray else \"\"}', children=[ html.Div(className='hero-body', children=[ html.Div(className='container', children=[ html.Div(className='columns", "] + content) ]) ]) ]) ]) def quality_index(df): indexes", "frame = create_frame(df, 'Total') figure = go.Figure() extras = {'barmode':", "trace_context_use(df[df.index.month == m], level, name=MONTH_NAMES[m-1]) pie_factory = take_month(months) try: for", "main-title', children=[ html.H1('Informe de Gestión de Operaciones', className='title') ]) ])", "# return dcc.Graph(figure=fig_uses(df, months), style={'height': '80vh'}) def contexts(df, months, level=None):", "indexes ]) def month_selector(df, first_month=None): current_month = datetime.now().month return html.Div(dcc.RangeSlider(", "style={'height': '80vh'}) def time_per_machine(df, months=None, stacked=False): return dcc.Graph(figure=fig_hours(df, months=months, stacked=stacked),", "days = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday'] data = df[df.index.month.isin(month_range(months))]", "def month_selector(df, first_month=None): current_month = datetime.now().month return html.Div(dcc.RangeSlider( id='month-range-slider', marks={i+1:", "this_month['Horas']).values[0] * 60 * machine_count[machine] def fig_total_capacity_2(df, month_caps, months): machine_list", "data.groupby([data.index.weekday_name, pd.Grouper(freq='60min', key='Hora Inicio')]).fillna(0).sum().reset_index() day_times = times[times['Marca temporal'] == 'Monday']['Hora", "Máquina'], y=frame[name], name=name, hoverinfo='name+y')) if stacked and months: frame =", "go.Figure() times = data.groupby([data.index.weekday_name, pd.Grouper(freq='60min', key='Hora Inicio')]).fillna(0).sum().reset_index() day_times = times[times['Marca", "uso en minutos'] return go.Pie(labels=context_data.index, values=context_data.values, **kwargs) def fig_contexts_use(df, months,", "de Máquina'], y=frame['Total'], text=frame['Total'], textposition='top center', mode='text', showlegend=False, hoverinfo='skip' ))", "de uso {\"total\" if stacked else \"\"}'}, **extras) return figure", "months, level, **kwargs): col_count = 3 row_count = math.ceil(len(month_range(months))/col_count) figure", "ticksuffix='%', title='Capacidad Utilizada')) return figure \"\"\" TODO: Terminar el heatmap", "month_range def section(title, content, gray=False): return html.Section(className=f'hero is-fullheight is-medium {\"has-background-grey-lighter\"", "plotly.subplots import make_subplots import pandas as pd import math from", "temporal'] == 'Monday']['Hora Inicio'].dt.time z_dict = dict() for i, d", "months] figure = go.Figure() for machine in machine_list: texts =", "= df['Tipo Máquina'].unique() months=month_range(months) def create_frame(df, serie_name): count = df.groupby('Tipo", "dcc import dash_html_components as html import plotly.graph_objs as go import", "figure.add_trace(go.Bar(y=frame['Tipo de Máquina'], x=frame[name], name=name, hoverinfo='name+x', orientation='h')) if stacked and", "in range(row_count): for c in range(col_count): figure.add_trace(next(pie_factory), r+1, c+1) except", "ascending=False).fillna('?').values return html.Div(className='columns is-multiline is-4 is-variable', children=[ html.Div(className=f'column is-one-quarter index-container", "is-4 is-variable', children=[ html.Div(className=f'column is-one-quarter index-container {\"unknown-data\" if i[1] ==", "MIT' ]) ]) ]) def fig_records(df, months=None, stacked=False): machine_list =", "StopIteration as stop: pass return figure def records_per_machine(df, months=None, stacked=False):", "figure = go.Figure() for m in months: name = MONTH_NAMES[m-1]", "alguna manera... def fig_uses(df, months): dias = ['Lunes', 'Martes', 'Miércoles',", "try: for r in range(row_count): for c in range(col_count): figure.add_trace(next(pie_factory),", "html.H1(i[1], className='title'), html.H2(i[0], className='subtitle') ]) for i in indexes ])", "figure.update_layout(yaxis={ 'title': 'Número de registros'}, **extras) return figure def fig_hours(df,", "className='title') ]), ] + content) ]) ]) ]) ]) def", "0) for machine in machine_list] return frame extras = {'barmode':", "{\"unknown-data\" if i[1] == \"?\" else \"\"}', children=[ html.H1(i[1], className='title'),", "df['Tipo Máquina'].unique() months=month_range(months) def create_frame(df, serie_name): count = df.groupby('Tipo Máquina').sum()['Tiempo", "records_per_machine(df, months=None, stacked=False): return dcc.Graph(figure=fig_records(df, months=months, stacked=stacked), style={'height': '80vh'}) def", "def fig_records(df, months=None, stacked=False): machine_list = df['Tipo Máquina'].unique() months =", "serie_name): count = df.groupby('Tipo Máquina').sum()['Tiempo de uso en minutos'].divide(60).round(0) frame", "= df[df['Contexto 1'] == level].groupby('Contexto 2') context_data = grouped.sum()['Tiempo de", "hoverinfo='skip' )) figure.update_layout(xaxis={ 'title': f'Horas de uso {\"total\" if stacked", "has-background-white', children=[ html.Div(className='content has-text-centered', children=[ html.Img(src='/indicadores/assets/footer.png', alt='FabLab UTFSM'), html.P(className='is-size-7', children=[", "children=[ html.Div(className='level', children=[ html.H2(title, className='title') ]), ] + content) ])", "else \"\"}', children=[ html.H1(i[1], className='title'), html.H2(i[0], className='subtitle') ]) for i", "hovertext=texts)) figure.update_layout(barmode='group', yaxis=dict(type='linear', ticksuffix='%', title='Capacidad Utilizada')) return figure \"\"\" TODO:", "hoverinfo='skip' )) figure.update_layout(yaxis={ 'title': 'Número de registros'}, **extras) return figure", "textposition='top center', mode='text', showlegend=False, hoverinfo='skip' )) figure.update_layout(yaxis={ 'title': 'Número de", "C', html.Br(), 'Av. <NAME> 3939, Santiago de Chile', html.Br(), 'Desarrollado", "specs=[[{'type':'domain'} for c in range(col_count)] for r in range(row_count)], subplot_titles=[MONTH_NAMES[m-1]", "df = df[df.index.month.isin(months)] frame = create_frame(df, 'Total') figure = go.Figure()", "Máquina'], y=frame['Total'], text=frame['Total'], textposition='top center', mode='text', showlegend=False, hoverinfo='skip' )) figure.update_layout(yaxis={", "months): # return dcc.Graph(figure=fig_uses(df, months), style={'height': '80vh'}) def contexts(df, months,", "= create_frame(df, 'Total') figure = go.Figure() extras = {'barmode': 'relative'", "if i[1] == \"?\" else \"\"}', children=[ html.H1(i[1], className='title'), html.H2(i[0],", "stacked=stacked), style={'height': '80vh'}) def machine_capacity(df, caps, months=None): return dcc.Graph(figure=fig_total_capacity_2(df, caps,", "else: grouped = df[df['Contexto 1'] == level].groupby('Contexto 2') context_data =", "def contexts(df, months, level=None): return dcc.Graph(figure=fig_contexts_use(df, months, level), style={'height': '80vh'})", "html.Div(className='hero-body', children=[ html.Div(className='container', children=[ html.Div(className='columns is-vcentered is-centered', children=[ html.Div(className='column is-5',", "level, name=MONTH_NAMES[m-1]) pie_factory = take_month(months) try: for r in range(row_count):", "capacidad total de {hours} horas.') figure.add_trace(go.Bar(x=month_names, y=caps, name=machine, hovertext=texts)) figure.update_layout(barmode='group',", "dcc.Graph(figure=fig_total_capacity_2(df, caps, months), style={'height': '80vh'}) #def uses(df, months): # return", "showlegend=False, hoverinfo='skip' )) figure.update_layout(yaxis={ 'title': 'Número de registros'}, **extras) return", "Operaciones', className='title') ]) ]) ]), ]) ]) def last(): return", "de {hours} horas.') figure.add_trace(go.Bar(x=month_names, y=caps, name=machine, hovertext=texts)) figure.update_layout(barmode='group', yaxis=dict(type='linear', ticksuffix='%',", "serie_name): count = df['Tipo Máquina'].value_counts() frame = pd.DataFrame({'Tipo de Máquina':", "caps.append(used_cap) texts.append(f'{used_cap}% utilizado de una capacidad total de {hours} horas.')", "in enumerate(days): z_dict.update({dias[i]: times[times['Marca temporal'] == d]['Tiempo de uso en", "return figure def records_per_machine(df, months=None, stacked=False): return dcc.Graph(figure=fig_records(df, months=months, stacked=stacked),", "for machine in machine_list] return frame if months and type(months)", "go.Figure() for machine in machine_list: texts = [] caps =", "= create_frame(df[df.index.month == m], name) figure.add_trace(go.Bar(y=frame['Tipo de Máquina'], x=frame[name], name=name,", "range(col_count): figure.add_trace(next(pie_factory), r+1, c+1) except StopIteration as stop: pass return", "return html.Ul([html.Li(item) for item in items]) def first(): return html.Section(className='hero", "total_cap = cap_per_machine_per_month(month_caps, machine, month) hours = total_cap // 60", "machine_capacity(df, caps, months=None): return dcc.Graph(figure=fig_total_capacity_2(df, caps, months), style={'height': '80vh'}) #def", "col_count, specs=[[{'type':'domain'} for c in range(col_count)] for r in range(row_count)],", "== level].groupby('Contexto 2') context_data = grouped.sum()['Tiempo de uso en minutos']", "children=[ html.Div(className='container', children=[ html.Div(className='columns is-centered', children=[ html.Div(className='column is-four-fifths is-full-mobile', children=[", "'group'} for m in months: name = MONTH_NAMES[m-1] frame =", "z_values = pd.DataFrame(z_dict).values figure.add_trace(go.Heatmap( x=dias, y=day_times, z=z_values)) return figure \"\"\"", "'FabLab UTFSM 2019', html.Br(), 'UTFSM Campus San Joaquín, Edificio C',", "for m in months] figure = go.Figure() for machine in", "dict() for i, d in enumerate(days): z_dict.update({dias[i]: times[times['Marca temporal'] ==", "caps, months=None): return dcc.Graph(figure=fig_total_capacity_2(df, caps, months), style={'height': '80vh'}) #def uses(df,", "fig_hours(df, months=None, stacked=False): machine_list = df['Tipo Máquina'].unique() months=month_range(months) def create_frame(df,", "r in range(row_count)], subplot_titles=[MONTH_NAMES[m-1] for m in month_range(months)]) def take_month(months):", "= {'Impresora 3D': 5, 'Cortadora Láser': 2, 'Router CNC': 3,", "time_per_machine(df, months=None, stacked=False): return dcc.Graph(figure=fig_hours(df, months=months, stacked=stacked), style={'height': '80vh'}) def", "import make_subplots import pandas as pd import math from datetime", "range(row_count)], subplot_titles=[MONTH_NAMES[m-1] for m in month_range(months)]) def take_month(months): for m", "i in indexes ]) def month_selector(df, first_month=None): current_month = datetime.now().month", "title='Capacidad Utilizada')) return figure \"\"\" TODO: Terminar el heatmap de", "figure.add_trace(go.Scatter( x=frame['Tipo de Máquina'], y=frame['Total'], text=frame['Total'], textposition='top center', mode='text', showlegend=False,", "'Total') figure = go.Figure() extras = {'barmode': 'relative' if stacked", "machine_list] return frame extras = {'barmode': 'relative' if stacked else", "), className='slider-frame') def point_list(items): return html.Ul([html.Li(item) for item in items])", "m in month_range(months)]) def take_month(months): for m in month_range(months): yield", "html.Div(className='columns is-vcentered is-centered', children=[ html.Div(className='column is-5', children=[ html.Figure(className='image is-4by4', children=[", "figure.add_trace(go.Bar(x=frame['Tipo de Máquina'], y=frame[name], name=name, hoverinfo='name+y')) if stacked and months:", "if months and type(months) == list: df = df[df.index.month.isin(months)] frame", "in indexes ]) def month_selector(df, first_month=None): current_month = datetime.now().month return", "style={'height': '80vh'}) #def uses(df, months): # return dcc.Graph(figure=fig_uses(df, months), style={'height':", "go.Figure() for m in months: name = MONTH_NAMES[m-1] frame =", "except StopIteration as stop: pass return figure def records_per_machine(df, months=None,", "frame = create_frame(df[df.index.month.isin(months)], 'Total') figure.add_trace(go.Scatter( x=frame['Tipo de Máquina'], y=frame['Total'], text=frame['Total'],", "de una capacidad total de {hours} horas.') figure.add_trace(go.Bar(x=month_names, y=caps, name=machine,", "go import plotly.express as px from plotly.subplots import make_subplots import", "total de {hours} horas.') figure.add_trace(go.Bar(x=month_names, y=caps, name=machine, hovertext=texts)) figure.update_layout(barmode='group', yaxis=dict(type='linear',", "de Operaciones', className='title') ]) ]) ]), ]) ]) def last():", "= cap_per_machine_per_month(month_caps, machine, month) hours = total_cap // 60 used_cap", "== m], level, name=MONTH_NAMES[m-1]) pie_factory = take_month(months) try: for r", "'Wednesday', 'Thursday', 'Friday'] data = df[df.index.month.isin(month_range(months))] figure = go.Figure() times", "[count.get(machine, 0) for machine in machine_list] return frame extras =", "children=[ html.Div(className='container', children=[ html.Div(className='columns is-vcentered is-centered', children=[ html.Div(className='column is-5', children=[", "Máquina')['Tiempo de uso en minutos'].sum().divide(total_cap).multiply(100).round(2).get(machine, 0) caps.append(used_cap) texts.append(f'{used_cap}% utilizado de", "if not level: grouped = df.groupby('Contexto 1') else: grouped =", "'Jueves', 'Viernes'] days = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday'] data", "]), ]), html.Div(className='column is-5 main-title', children=[ html.H1('Informe de Gestión de", "pd.DataFrame(z_dict).values figure.add_trace(go.Heatmap( x=dias, y=day_times, z=z_values)) return figure \"\"\" def trace_context_use(df,", "in range(row_count)], subplot_titles=[MONTH_NAMES[m-1] for m in month_range(months)]) def take_month(months): for", "'title': 'Número de registros'}, **extras) return figure def fig_hours(df, months=None,", "take_month(months): for m in month_range(months): yield trace_context_use(df[df.index.month == m], level,", "html.Div(className='level', children=[ html.H2(title, className='title') ]), ] + content) ]) ])", "point_list(items): return html.Ul([html.Li(item) for item in items]) def first(): return", "\"\"\" TODO: Terminar el heatmap de alguna manera... def fig_uses(df,", "'Av. <NAME> 3939, Santiago de Chile', html.Br(), 'Desarrollado bajo licencia", "**kwargs): grouped = None if not level: grouped = df.groupby('Contexto", "= data.groupby([data.index.weekday_name, pd.Grouper(freq='60min', key='Hora Inicio')]).fillna(0).sum().reset_index() day_times = times[times['Marca temporal'] ==", "registros'}, **extras) return figure def fig_hours(df, months=None, stacked=False): machine_list =", "**extras) return figure def cap_per_machine_per_month(month_caps, machine, month): this_month = month_caps[month_caps['Mes']", "'Monday']['Hora Inicio'].dt.time z_dict = dict() for i, d in enumerate(days):", "times = data.groupby([data.index.weekday_name, pd.Grouper(freq='60min', key='Hora Inicio')]).fillna(0).sum().reset_index() day_times = times[times['Marca temporal']", "df[df.index.month==month].groupby('Tipo Máquina')['Tiempo de uso en minutos'].sum().divide(total_cap).multiply(100).round(2).get(machine, 0) caps.append(used_cap) texts.append(f'{used_cap}% utilizado", "machine_count = {'Impresora 3D': 5, 'Cortadora Láser': 2, 'Router CNC':", "html.Div(className='container', children=[ html.Div(className='columns is-centered', children=[ html.Div(className='column is-four-fifths is-full-mobile', children=[ html.Div(className='level',", "month): this_month = month_caps[month_caps['Mes'] == month] machine_count = {'Impresora 3D':", "'Número de registros'}, **extras) return figure def fig_hours(df, months=None, stacked=False):", "uso en minutos'].sum().divide(total_cap).multiply(100).round(2).get(machine, 0) caps.append(used_cap) texts.append(f'{used_cap}% utilizado de una capacidad", "figure.add_trace(go.Bar(x=month_names, y=caps, name=machine, hovertext=texts)) figure.update_layout(barmode='group', yaxis=dict(type='linear', ticksuffix='%', title='Capacidad Utilizada')) return", "'Total') figure.add_trace(go.Scatter( y=frame['Tipo de Máquina'], x=frame['Total'], text=frame['Total'], textposition='middle right', mode='text',", "en minutos'].fillna(0).values}) z_values = pd.DataFrame(z_dict).values figure.add_trace(go.Heatmap( x=dias, y=day_times, z=z_values)) return", "2') context_data = grouped.sum()['Tiempo de uso en minutos'] return go.Pie(labels=context_data.index,", "return go.Pie(labels=context_data.index, values=context_data.values, **kwargs) def fig_contexts_use(df, months, level, **kwargs): col_count", "name = MONTH_NAMES[m-1] frame = create_frame(df[df.index.month == m], name) figure.add_trace(go.Bar(x=frame['Tipo", "month] machine_count = {'Impresora 3D': 5, 'Cortadora Láser': 2, 'Router", "machine in machine_list] return frame if months and type(months) ==", "de uso en minutos'].divide(60).round(0) frame = pd.DataFrame({'Tipo de Máquina': machine_list})", "TODO: Terminar el heatmap de alguna manera... def fig_uses(df, months):", "grouped = df[df['Contexto 1'] == level].groupby('Contexto 2') context_data = grouped.sum()['Tiempo", "center', mode='text', showlegend=False, hoverinfo='skip' )) figure.update_layout(yaxis={ 'title': 'Número de registros'},", "else 'group'} for m in months: name = MONTH_NAMES[m-1] frame", "children=[ html.Img(src='/indicadores/assets/logo.png', alt='FabLab UTFSM'), ]), ]), html.Div(className='column is-5 main-title', children=[", "pass return figure def records_per_machine(df, months=None, stacked=False): return dcc.Graph(figure=fig_records(df, months=months,", "uso {\"total\" if stacked else \"\"}'}, **extras) return figure def", "df['Tipo Máquina'].unique() months = month_range(months) def create_frame(df, serie_name): count =", "create_frame(df[df.index.month == m], name) figure.add_trace(go.Bar(x=frame['Tipo de Máquina'], y=frame[name], name=name, hoverinfo='name+y'))", "html.Img(src='/indicadores/assets/footer.png', alt='FabLab UTFSM'), html.P(className='is-size-7', children=[ 'FabLab UTFSM 2019', html.Br(), 'UTFSM", "items]) def first(): return html.Section(className='hero is-fullheight', children=[ html.Div(className='hero-body', children=[ html.Div(className='container',", "pd import math from datetime import datetime, time from utils", "Máquina': machine_list}) frame[serie_name] = [count.get(machine, 0) for machine in machine_list]", "data = df[df.index.month.isin(month_range(months))] figure = go.Figure() times = data.groupby([data.index.weekday_name, pd.Grouper(freq='60min',", "return dcc.Graph(figure=fig_records(df, months=months, stacked=stacked), style={'height': '80vh'}) def time_per_machine(df, months=None, stacked=False):", "context_data = grouped.sum()['Tiempo de uso en minutos'] return go.Pie(labels=context_data.index, values=context_data.values,", "= ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday'] data = df[df.index.month.isin(month_range(months))] figure", "Inicio'].dt.time z_dict = dict() for i, d in enumerate(days): z_dict.update({dias[i]:", "html.Section(className=f'hero is-fullheight is-medium {\"has-background-grey-lighter\" if gray else \"\"}', children=[ html.Div(className='hero-body',", "import datetime, time from utils import MONTH_NAMES, month_range def section(title,", "de Máquina'], x=frame['Total'], text=frame['Total'], textposition='middle right', mode='text', showlegend=False, hoverinfo='skip' ))", "math from datetime import datetime, time from utils import MONTH_NAMES,", "values=context_data.values, **kwargs) def fig_contexts_use(df, months, level, **kwargs): col_count = 3", "level=None, **kwargs): grouped = None if not level: grouped =", "name=machine, hovertext=texts)) figure.update_layout(barmode='group', yaxis=dict(type='linear', ticksuffix='%', title='Capacidad Utilizada')) return figure \"\"\"", "def section(title, content, gray=False): return html.Section(className=f'hero is-fullheight is-medium {\"has-background-grey-lighter\" if", "return html.Section(className='hero is-fullheight', children=[ html.Div(className='hero-body', children=[ html.Div(className='container', children=[ html.Div(className='columns is-vcentered", "className='title'), html.H2(i[0], className='subtitle') ]) for i in indexes ]) def", "plotly.graph_objs as go import plotly.express as px from plotly.subplots import", "'Cortadora Láser': 2, 'Router CNC': 3, 'Torno': 1, 'Cirqoid': 1}", "= {'barmode': 'relative' if stacked else 'group'} for m in", "in range(first_month-1, current_month)}, min=first_month, max=current_month, value=[current_month-2,current_month], pushable=1 ), className='slider-frame') def", "y=frame['Total'], text=frame['Total'], textposition='top center', mode='text', showlegend=False, hoverinfo='skip' )) figure.update_layout(yaxis={ 'title':", "pd.Grouper(freq='60min', key='Hora Inicio')]).fillna(0).sum().reset_index() day_times = times[times['Marca temporal'] == 'Monday']['Hora Inicio'].dt.time", "def point_list(items): return html.Ul([html.Li(item) for item in items]) def first():", "create_frame(df[df.index.month == m], name) figure.add_trace(go.Bar(y=frame['Tipo de Máquina'], x=frame[name], name=name, hoverinfo='name+x',", "months=None, stacked=False): return dcc.Graph(figure=fig_hours(df, months=months, stacked=stacked), style={'height': '80vh'}) def machine_capacity(df,", "frame = create_frame(df[df.index.month == m], name) figure.add_trace(go.Bar(x=frame['Tipo de Máquina'], y=frame[name],", "Máquina'].unique() months=month_range(months) def create_frame(df, serie_name): count = df.groupby('Tipo Máquina').sum()['Tiempo de", "Máquina'], x=frame['Total'], text=frame['Total'], textposition='middle right', mode='text', showlegend=False, hoverinfo='skip' )) figure.update_layout(xaxis={", "indexes = df.sort_values('Valor', ascending=False).fillna('?').values return html.Div(className='columns is-multiline is-4 is-variable', children=[", "<NAME> 3939, Santiago de Chile', html.Br(), 'Desarrollado bajo licencia MIT'", "is-4by4', children=[ html.Img(src='/indicadores/assets/logo.png', alt='FabLab UTFSM'), ]), ]), html.Div(className='column is-5 main-title',", "m], level, name=MONTH_NAMES[m-1]) pie_factory = take_month(months) try: for r in", "= total_cap // 60 used_cap = df[df.index.month==month].groupby('Tipo Máquina')['Tiempo de uso", "html.Ul([html.Li(item) for item in items]) def first(): return html.Section(className='hero is-fullheight',", "= MONTH_NAMES[m-1] frame = create_frame(df[df.index.month == m], name) figure.add_trace(go.Bar(x=frame['Tipo de", "className='slider-frame') def point_list(items): return html.Ul([html.Li(item) for item in items]) def", "'group'} figure = go.Figure() for m in months: name =", "m in month_range(months): yield trace_context_use(df[df.index.month == m], level, name=MONTH_NAMES[m-1]) pie_factory", "machine, month): this_month = month_caps[month_caps['Mes'] == month] machine_count = {'Impresora", "= df.sort_values('Valor', ascending=False).fillna('?').values return html.Div(className='columns is-multiline is-4 is-variable', children=[ html.Div(className=f'column", "**kwargs) def fig_contexts_use(df, months, level, **kwargs): col_count = 3 row_count", "= ['Lunes', 'Martes', 'Miércoles', 'Jueves', 'Viernes'] days = ['Monday', 'Tuesday',", "m in months: name = MONTH_NAMES[m-1] frame = create_frame(df[df.index.month ==", "day_times = times[times['Marca temporal'] == 'Monday']['Hora Inicio'].dt.time z_dict = dict()", "grouped = df.groupby('Contexto 1') else: grouped = df[df['Contexto 1'] ==", "fig_total_capacity_2(df, month_caps, months): machine_list = df['Tipo Máquina'].unique() months = month_range(months)", "'Total') figure.add_trace(go.Scatter( x=frame['Tipo de Máquina'], y=frame['Total'], text=frame['Total'], textposition='top center', mode='text',", "i, d in enumerate(days): z_dict.update({dias[i]: times[times['Marca temporal'] == d]['Tiempo de", "html.P(className='is-size-7', children=[ 'FabLab UTFSM 2019', html.Br(), 'UTFSM Campus San Joaquín,", "= pd.DataFrame(z_dict).values figure.add_trace(go.Heatmap( x=dias, y=day_times, z=z_values)) return figure \"\"\" def", "as px from plotly.subplots import make_subplots import pandas as pd", "]), ] + content) ]) ]) ]) ]) def quality_index(df):", "if stacked else 'group'} for m in months: name =", "x=dias, y=day_times, z=z_values)) return figure \"\"\" def trace_context_use(df, level=None, **kwargs):", "i[1] == \"?\" else \"\"}', children=[ html.H1(i[1], className='title'), html.H2(i[0], className='subtitle')", "datetime, time from utils import MONTH_NAMES, month_range def section(title, content,", "content) ]) ]) ]) ]) def quality_index(df): indexes = df.sort_values('Valor',", "item in items]) def first(): return html.Section(className='hero is-fullheight', children=[ html.Div(className='hero-body',", "children=[ 'FabLab UTFSM 2019', html.Br(), 'UTFSM Campus San Joaquín, Edificio", "]) ]) def fig_records(df, months=None, stacked=False): machine_list = df['Tipo Máquina'].unique()", "= go.Figure() extras = {'barmode': 'relative' if stacked else 'group'}", "'Torno': 1, 'Cirqoid': 1} return (this_month['Dias'] * this_month['Horas']).values[0] * 60", "total_cap // 60 used_cap = df[df.index.month==month].groupby('Tipo Máquina')['Tiempo de uso en", "not level: grouped = df.groupby('Contexto 1') else: grouped = df[df['Contexto", "make_subplots(row_count, col_count, specs=[[{'type':'domain'} for c in range(col_count)] for r in", "is-5 main-title', children=[ html.H1('Informe de Gestión de Operaciones', className='title') ])", "frame[serie_name] = [count.get(machine, 0) for machine in machine_list] return frame", "in machine_list] return frame extras = {'barmode': 'relative' if stacked", "if stacked else \"\"}'}, **extras) return figure def cap_per_machine_per_month(month_caps, machine,", "figure.update_layout(xaxis={ 'title': f'Horas de uso {\"total\" if stacked else \"\"}'},", "as dcc import dash_html_components as html import plotly.graph_objs as go", "if stacked and months: frame = create_frame(df[df.index.month.isin(months)], 'Total') figure.add_trace(go.Scatter( x=frame['Tipo", "[count.get(machine, 0) for machine in machine_list] return frame if months", "3939, Santiago de Chile', html.Br(), 'Desarrollado bajo licencia MIT' ])", "right', mode='text', showlegend=False, hoverinfo='skip' )) figure.update_layout(xaxis={ 'title': f'Horas de uso", "bajo licencia MIT' ]) ]) ]) def fig_records(df, months=None, stacked=False):", "children=[ html.Div(className='hero-body', children=[ html.Div(className='container', children=[ html.Div(className='columns is-centered', children=[ html.Div(className='column is-four-fifths", "\"?\" else \"\"}', children=[ html.H1(i[1], className='title'), html.H2(i[0], className='subtitle') ]) for", "Máquina'].value_counts() frame = pd.DataFrame({'Tipo de Máquina': machine_list}) frame[serie_name] = [count.get(machine,", "'relative' if stacked else 'group'} for m in months: name", "de Gestión de Operaciones', className='title') ]) ]) ]), ]) ])", "== d]['Tiempo de uso en minutos'].fillna(0).values}) z_values = pd.DataFrame(z_dict).values figure.add_trace(go.Heatmap(", "pandas as pd import math from datetime import datetime, time", "machine_list}) frame[serie_name] = [count.get(machine, 0) for machine in machine_list] return", "]) ]) ]), ]) ]) def last(): return html.Footer(className='footer has-background-white',", "name) figure.add_trace(go.Bar(y=frame['Tipo de Máquina'], x=frame[name], name=name, hoverinfo='name+x', orientation='h')) if stacked", "figure \"\"\" def trace_context_use(df, level=None, **kwargs): grouped = None if", "orientation='h')) if stacked and months: frame = create_frame(df[df.index.month.isin(months)], 'Total') figure.add_trace(go.Scatter(", "children=[ html.H1(i[1], className='title'), html.H2(i[0], className='subtitle') ]) for i in indexes", "figure def fig_hours(df, months=None, stacked=False): machine_list = df['Tipo Máquina'].unique() months=month_range(months)", "]) ]) def last(): return html.Footer(className='footer has-background-white', children=[ html.Div(className='content has-text-centered',", "2019', html.Br(), 'UTFSM Campus San Joaquín, Edificio C', html.Br(), 'Av.", "plotly.express as px from plotly.subplots import make_subplots import pandas as", "df.sort_values('Valor', ascending=False).fillna('?').values return html.Div(className='columns is-multiline is-4 is-variable', children=[ html.Div(className=f'column is-one-quarter", "df['Tipo Máquina'].unique() months = month_range(months) month_names = [MONTH_NAMES[m-1] for m", "'UTFSM Campus San Joaquín, Edificio C', html.Br(), 'Av. <NAME> 3939,", "+ content) ]) ]) ]) ]) def quality_index(df): indexes =", "3, 'Torno': 1, 'Cirqoid': 1} return (this_month['Dias'] * this_month['Horas']).values[0] *", "level].groupby('Contexto 2') context_data = grouped.sum()['Tiempo de uso en minutos'] return", "create_frame(df[df.index.month.isin(months)], 'Total') figure.add_trace(go.Scatter( y=frame['Tipo de Máquina'], x=frame['Total'], text=frame['Total'], textposition='middle right',", "key='Hora Inicio')]).fillna(0).sum().reset_index() day_times = times[times['Marca temporal'] == 'Monday']['Hora Inicio'].dt.time z_dict", "frame = pd.DataFrame({'Tipo de Máquina': machine_list}) frame[serie_name] = [count.get(machine, 0)", "children=[ html.H1('Informe de Gestión de Operaciones', className='title') ]) ]) ]),", "current_month)}, min=first_month, max=current_month, value=[current_month-2,current_month], pushable=1 ), className='slider-frame') def point_list(items): return", "Joaquín, Edificio C', html.Br(), 'Av. <NAME> 3939, Santiago de Chile',", "if stacked and months: frame = create_frame(df[df.index.month.isin(months)], 'Total') figure.add_trace(go.Scatter( y=frame['Tipo", "section(title, content, gray=False): return html.Section(className=f'hero is-fullheight is-medium {\"has-background-grey-lighter\" if gray", "UTFSM 2019', html.Br(), 'UTFSM Campus San Joaquín, Edificio C', html.Br(),", "go.Figure() extras = {'barmode': 'relative' if stacked else 'group'} for", "has-text-centered', children=[ html.Img(src='/indicadores/assets/footer.png', alt='FabLab UTFSM'), html.P(className='is-size-7', children=[ 'FabLab UTFSM 2019',", "= datetime.now().month return html.Div(dcc.RangeSlider( id='month-range-slider', marks={i+1: MONTH_NAMES[i] for i in", "return html.Div(dcc.RangeSlider( id='month-range-slider', marks={i+1: MONTH_NAMES[i] for i in range(first_month-1, current_month)},", "minutos'].sum().divide(total_cap).multiply(100).round(2).get(machine, 0) caps.append(used_cap) texts.append(f'{used_cap}% utilizado de una capacidad total de", "stop: pass return figure def records_per_machine(df, months=None, stacked=False): return dcc.Graph(figure=fig_records(df,", "def quality_index(df): indexes = df.sort_values('Valor', ascending=False).fillna('?').values return html.Div(className='columns is-multiline is-4", "figure.update_layout(barmode='group', yaxis=dict(type='linear', ticksuffix='%', title='Capacidad Utilizada')) return figure \"\"\" TODO: Terminar", "range(col_count)] for r in range(row_count)], subplot_titles=[MONTH_NAMES[m-1] for m in month_range(months)])", "= {'barmode': 'relative' if stacked else 'group'} figure = go.Figure()", "time from utils import MONTH_NAMES, month_range def section(title, content, gray=False):", "fig_uses(df, months): dias = ['Lunes', 'Martes', 'Miércoles', 'Jueves', 'Viernes'] days", "d]['Tiempo de uso en minutos'].fillna(0).values}) z_values = pd.DataFrame(z_dict).values figure.add_trace(go.Heatmap( x=dias,", "m in months] figure = go.Figure() for machine in machine_list:", "df[df['Contexto 1'] == level].groupby('Contexto 2') context_data = grouped.sum()['Tiempo de uso", "months=None): return dcc.Graph(figure=fig_total_capacity_2(df, caps, months), style={'height': '80vh'}) #def uses(df, months):", "make_subplots import pandas as pd import math from datetime import", "children=[ html.Img(src='/indicadores/assets/footer.png', alt='FabLab UTFSM'), html.P(className='is-size-7', children=[ 'FabLab UTFSM 2019', html.Br(),", "type(months) == list: df = df[df.index.month.isin(months)] frame = create_frame(df, 'Total')", "gray=False): return html.Section(className=f'hero is-fullheight is-medium {\"has-background-grey-lighter\" if gray else \"\"}',", "html.Div(className='container', children=[ html.Div(className='columns is-vcentered is-centered', children=[ html.Div(className='column is-5', children=[ html.Figure(className='image", "]), ]) ]) def last(): return html.Footer(className='footer has-background-white', children=[ html.Div(className='content", "San Joaquín, Edificio C', html.Br(), 'Av. <NAME> 3939, Santiago de", "return (this_month['Dias'] * this_month['Horas']).values[0] * 60 * machine_count[machine] def fig_total_capacity_2(df,", "Utilizada')) return figure \"\"\" TODO: Terminar el heatmap de alguna", "heatmap de alguna manera... def fig_uses(df, months): dias = ['Lunes',", "= df['Tipo Máquina'].value_counts() frame = pd.DataFrame({'Tipo de Máquina': machine_list}) frame[serie_name]", "import pandas as pd import math from datetime import datetime,", "html.H1('Informe de Gestión de Operaciones', className='title') ]) ]) ]), ])", "[] caps = [] for month in months: total_cap =", "de Máquina': machine_list}) frame[serie_name] = [count.get(machine, 0) for machine in", "as html import plotly.graph_objs as go import plotly.express as px", "y=frame[name], name=name, hoverinfo='name+y')) if stacked and months: frame = create_frame(df[df.index.month.isin(months)],", "= df.groupby('Tipo Máquina').sum()['Tiempo de uso en minutos'].divide(60).round(0) frame = pd.DataFrame({'Tipo", "3D': 5, 'Cortadora Láser': 2, 'Router CNC': 3, 'Torno': 1,", "[] for month in months: total_cap = cap_per_machine_per_month(month_caps, machine, month)", "[MONTH_NAMES[m-1] for m in months] figure = go.Figure() for machine", "Inicio')]).fillna(0).sum().reset_index() day_times = times[times['Marca temporal'] == 'Monday']['Hora Inicio'].dt.time z_dict =", ")) figure.update_layout(yaxis={ 'title': 'Número de registros'}, **extras) return figure def", "figure.add_trace(next(pie_factory), r+1, c+1) except StopIteration as stop: pass return figure", "= grouped.sum()['Tiempo de uso en minutos'] return go.Pie(labels=context_data.index, values=context_data.values, **kwargs)", "= make_subplots(row_count, col_count, specs=[[{'type':'domain'} for c in range(col_count)] for r", "Láser': 2, 'Router CNC': 3, 'Torno': 1, 'Cirqoid': 1} return", "Máquina'], x=frame[name], name=name, hoverinfo='name+x', orientation='h')) if stacked and months: frame", "range(first_month-1, current_month)}, min=first_month, max=current_month, value=[current_month-2,current_month], pushable=1 ), className='slider-frame') def point_list(items):", "= month_caps[month_caps['Mes'] == month] machine_count = {'Impresora 3D': 5, 'Cortadora", "= [count.get(machine, 0) for machine in machine_list] return frame extras", "hours = total_cap // 60 used_cap = df[df.index.month==month].groupby('Tipo Máquina')['Tiempo de", "]) def fig_records(df, months=None, stacked=False): machine_list = df['Tipo Máquina'].unique() months", "dcc.Graph(figure=fig_records(df, months=months, stacked=stacked), style={'height': '80vh'}) def time_per_machine(df, months=None, stacked=False): return", "'Desarrollado bajo licencia MIT' ]) ]) ]) def fig_records(df, months=None,", "for item in items]) def first(): return html.Section(className='hero is-fullheight', children=[", "* 60 * machine_count[machine] def fig_total_capacity_2(df, month_caps, months): machine_list =", "hoverinfo='name+x', orientation='h')) if stacked and months: frame = create_frame(df[df.index.month.isin(months)], 'Total')", "licencia MIT' ]) ]) ]) def fig_records(df, months=None, stacked=False): machine_list", "extras = {'barmode': 'relative' if stacked else 'group'} figure =", "y=day_times, z=z_values)) return figure \"\"\" def trace_context_use(df, level=None, **kwargs): grouped", "subplot_titles=[MONTH_NAMES[m-1] for m in month_range(months)]) def take_month(months): for m in", "html.Div(className='hero-body', children=[ html.Div(className='container', children=[ html.Div(className='columns is-centered', children=[ html.Div(className='column is-four-fifths is-full-mobile',", "def fig_uses(df, months): dias = ['Lunes', 'Martes', 'Miércoles', 'Jueves', 'Viernes']", "px from plotly.subplots import make_subplots import pandas as pd import", "cap_per_machine_per_month(month_caps, machine, month): this_month = month_caps[month_caps['Mes'] == month] machine_count =", "1'] == level].groupby('Contexto 2') context_data = grouped.sum()['Tiempo de uso en", "months: name = MONTH_NAMES[m-1] frame = create_frame(df[df.index.month == m], name)", "figure = go.Figure() for machine in machine_list: texts = []", "stacked else \"\"}'}, **extras) return figure def cap_per_machine_per_month(month_caps, machine, month):", "def fig_contexts_use(df, months, level, **kwargs): col_count = 3 row_count =", "months), style={'height': '80vh'}) #def uses(df, months): # return dcc.Graph(figure=fig_uses(df, months),", "\"\"\" def trace_context_use(df, level=None, **kwargs): grouped = None if not", "minutos'].divide(60).round(0) frame = pd.DataFrame({'Tipo de Máquina': machine_list}) frame[serie_name] = [count.get(machine,", "return figure def fig_hours(df, months=None, stacked=False): machine_list = df['Tipo Máquina'].unique()", "grouped = None if not level: grouped = df.groupby('Contexto 1')", "'80vh'}) def contexts(df, months, level=None): return dcc.Graph(figure=fig_contexts_use(df, months, level), style={'height':", "minutos'] return go.Pie(labels=context_data.index, values=context_data.values, **kwargs) def fig_contexts_use(df, months, level, **kwargs):", "range(row_count): for c in range(col_count): figure.add_trace(next(pie_factory), r+1, c+1) except StopIteration", "texts.append(f'{used_cap}% utilizado de una capacidad total de {hours} horas.') figure.add_trace(go.Bar(x=month_names,", "1, 'Cirqoid': 1} return (this_month['Dias'] * this_month['Horas']).values[0] * 60 *", "z=z_values)) return figure \"\"\" def trace_context_use(df, level=None, **kwargs): grouped =", "style={'height': '80vh'}) def contexts(df, months, level=None): return dcc.Graph(figure=fig_contexts_use(df, months, level),", "month_range(months) month_names = [MONTH_NAMES[m-1] for m in months] figure =", "is-four-fifths is-full-mobile', children=[ html.Div(className='level', children=[ html.H2(title, className='title') ]), ] +", "= dict() for i, d in enumerate(days): z_dict.update({dias[i]: times[times['Marca temporal']", "html.Div(className='column is-5', children=[ html.Figure(className='image is-4by4', children=[ html.Img(src='/indicadores/assets/logo.png', alt='FabLab UTFSM'), ]),", "'Tuesday', 'Wednesday', 'Thursday', 'Friday'] data = df[df.index.month.isin(month_range(months))] figure = go.Figure()", "html.Div(dcc.RangeSlider( id='month-range-slider', marks={i+1: MONTH_NAMES[i] for i in range(first_month-1, current_month)}, min=first_month,", "Santiago de Chile', html.Br(), 'Desarrollado bajo licencia MIT' ]) ])", "en minutos'].sum().divide(total_cap).multiply(100).round(2).get(machine, 0) caps.append(used_cap) texts.append(f'{used_cap}% utilizado de una capacidad total", "id='month-range-slider', marks={i+1: MONTH_NAMES[i] for i in range(first_month-1, current_month)}, min=first_month, max=current_month,", "['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday'] data = df[df.index.month.isin(month_range(months))] figure =", "in range(col_count)] for r in range(row_count)], subplot_titles=[MONTH_NAMES[m-1] for m in", "caps = [] for month in months: total_cap = cap_per_machine_per_month(month_caps,", "MONTH_NAMES[m-1] frame = create_frame(df[df.index.month == m], name) figure.add_trace(go.Bar(x=frame['Tipo de Máquina'],", "i in range(first_month-1, current_month)}, min=first_month, max=current_month, value=[current_month-2,current_month], pushable=1 ), className='slider-frame')" ]
[ "import create_user, update_user, delete_user, get_or_create_language from gengine.metadata import DBSession from", "== \"<NAME>\") def test_user_updation(self): lang = get_or_create_language(\"en\") user = create_user()", "lat = 14.2, lng = 16.3, #country = \"EN\", #region", "\"Michael\", \"last_name\": \"Clarke\" }, friends=[1] ) remaining_users = delete_user( user_id", "\"<NAME>\") def test_user_updation(self): lang = get_or_create_language(\"en\") user = create_user() user", "= \"<EMAIL>\" DBSession.add(auth_user) iscorrect = auth_user.verify_password(\"<PASSWORD>\") self.assertEqual(iscorrect, True) def test_create_token(self):", "= auth_user.verify_password(\"<PASSWORD>\") self.assertEqual(iscorrect, True) def test_create_token(self): user = create_user() auth_user", "== 12.1) self.assertTrue(user.lng == 12.2) #self.assertTrue(user.country == \"RO\") #self.assertTrue(user.region ==", "#country = \"EN\", #region = \"Transylvania\", #city = \"Cluj-Napoca\", timezone", "user = update_user( user_id = user.id, lat = 14.2, lng", "Second user user2 = create_user( lat=85.59, lng=65.75, #country=\"DE\", #region=\"Niedersachsen\", #city=\"Osnabrück\",", "#self.assertTrue(user.region == \"Transylvania\") #self.assertTrue(user.city == \"Cluj-Napoca\") self.assertTrue(user.timezone == \"Europe/Bukarest\") self.assertTrue(user.language_id", "auth_user.verify_password(\"<PASSWORD>\") self.assertEqual(iscorrect, True) def test_create_token(self): user = create_user() auth_user =", "\"Rudolf\") self.assertTrue(user.additional_public_data[\"last_name\"] == \"<NAME>\") def test_user_updation(self): lang = get_or_create_language(\"en\") user", "12.2, #country = \"RO\", #region = \"Transylvania\", #city = \"Cluj-Napoca\",", ": \"<NAME>\" } ) self.assertTrue(user.lat == 12.1) self.assertTrue(user.lng == 12.2)", "== \"Cluj-Napoca\") self.assertTrue(user.timezone == \"Europe/Bukarest\") self.assertTrue(user.language_id == lang.id) self.assertTrue(user.additional_public_data[\"first_name\"] ==", "from gengine.metadata import DBSession from gengine.app.model import AuthUser class TestUserCreation(BaseDBTest):", "== \"Europe/Bukarest\") self.assertTrue(user.language_id == lang.id) def test_user_deletion(self): user1 = create_user()", "additional_public_data={ \"first_name\": \"Michael\", \"last_name\": \"Clarke\" }, friends=[1] ) remaining_users =", "update_user, delete_user, get_or_create_language from gengine.metadata import DBSession from gengine.app.model import", "create_user() auth_user = AuthUser() auth_user.user_id = user.id auth_user.password = \"<PASSWORD>\"", "auth_user.active = True auth_user.email = \"<EMAIL>\" DBSession.add(auth_user) if auth_user.verify_password(\"<PASSWORD>\"): token", "Create Second user user2 = create_user( lat=85.59, lng=65.75, #country=\"DE\", #region=\"Niedersachsen\",", "= AuthUser() auth_user.user_id = user.id auth_user.password = \"<PASSWORD>\" auth_user.active =", "create_user, update_user, delete_user, get_or_create_language from gengine.metadata import DBSession from gengine.app.model", "lang = get_or_create_language(\"en\") user = create_user() user = update_user( user_id", "16.3, #country = \"EN\", #region = \"Transylvania\", #city = \"Cluj-Napoca\",", "create_user( lat = 12.1, lng = 12.2, #country = \"RO\",", "16.3) #self.assertTrue(user.country == \"EN\") #self.assertTrue(user.region == \"Transylvania\") #self.assertTrue(user.city == \"Cluj-Napoca\")", "== \"RO\") #self.assertTrue(user.region == \"Transylvania\") #self.assertTrue(user.city == \"Cluj-Napoca\") self.assertTrue(user.timezone ==", "= { \"first_name\" : \"Rudolf\", \"last_name\" : \"<NAME>\" } )", "= update_user( user_id = user.id, lat = 14.2, lng =", "update_user( user_id = user.id, lat = 14.2, lng = 16.3,", ") remaining_users = delete_user( user_id = user1.id ) # Correct", "\"Europe/Bukarest\", language = \"en\", additional_public_data = { \"first_name\" : \"Rudolf\",", "self.assertEqual(user2.id, remaining_users[0].id) def test_verify_password(self): auth_user = AuthUser() auth_user.password = \"<PASSWORD>\"", "\"Cluj-Napoca\", timezone = \"Europe/Bukarest\", language = \"en\", additional_public_data = {", "= \"Cluj-Napoca\", timezone = \"Europe/Bukarest\", language = \"en\", additional_public_data =", "gengine.metadata import DBSession from gengine.app.model import AuthUser class TestUserCreation(BaseDBTest): def", "True auth_user.email = \"<EMAIL>\" DBSession.add(auth_user) if auth_user.verify_password(\"<PASSWORD>\"): token = auth_user.get_or_create_token()", "create_user() user = update_user( user_id = user.id, lat = 14.2,", "import BaseDBTest from gengine.app.tests.helpers import create_user, update_user, delete_user, get_or_create_language from", "= \"Transylvania\", #city = \"Cluj-Napoca\", timezone = \"Europe/Bukarest\", language =", "{ \"first_name\" : \"Rudolf\", \"last_name\" : \"<NAME>\" } ) #", "} ) # Correct cases self.assertTrue(user.lat == 14.2) self.assertTrue(user.lng ==", "\"<PASSWORD>\" auth_user.active = True auth_user.email = \"<EMAIL>\" DBSession.add(auth_user) if auth_user.verify_password(\"<PASSWORD>\"):", "}, friends=[1] ) remaining_users = delete_user( user_id = user1.id )", "delete_user( user_id = user1.id ) # Correct cases self.assertNotIn(user1.id, remaining_users)", "} ) self.assertTrue(user.lat == 12.1) self.assertTrue(user.lng == 12.2) #self.assertTrue(user.country ==", "\"Transylvania\") #self.assertTrue(user.city == \"Cluj-Napoca\") self.assertTrue(user.timezone == \"Europe/Bukarest\") self.assertTrue(user.language_id == lang.id)", "== \"Rudolf\") self.assertTrue(user.additional_public_data[\"last_name\"] == \"<NAME>\") def test_user_updation(self): lang = get_or_create_language(\"en\")", "#self.assertTrue(user.city == \"Cluj-Napoca\") self.assertTrue(user.timezone == \"Europe/Bukarest\") self.assertTrue(user.language_id == lang.id) self.assertTrue(user.additional_public_data[\"first_name\"]", "\"Rudolf\", \"last_name\" : \"<NAME>\" } ) # Correct cases self.assertTrue(user.lat", "DBSession.add(auth_user) iscorrect = auth_user.verify_password(\"<PASSWORD>\") self.assertEqual(iscorrect, True) def test_create_token(self): user =", "cases self.assertTrue(user.lat == 14.2) self.assertTrue(user.lng == 16.3) #self.assertTrue(user.country == \"EN\")", "\"first_name\": \"Michael\", \"last_name\": \"Clarke\" }, friends=[1] ) remaining_users = delete_user(", "auth_user.email = \"<EMAIL>\" DBSession.add(auth_user) iscorrect = auth_user.verify_password(\"<PASSWORD>\") self.assertEqual(iscorrect, True) def", "utf-8 -*- from gengine.app.tests.base import BaseDBTest from gengine.app.tests.helpers import create_user,", ": \"Rudolf\", \"last_name\" : \"<NAME>\" } ) # Correct cases", "= \"<PASSWORD>\" auth_user.active = True auth_user.email = \"<EMAIL>\" DBSession.add(auth_user) if", "{ \"first_name\" : \"Rudolf\", \"last_name\" : \"<NAME>\" } ) self.assertTrue(user.lat", "lng = 16.3, #country = \"EN\", #region = \"Transylvania\", #city", "\"last_name\" : \"<NAME>\" } ) # Correct cases self.assertTrue(user.lat ==", "self.assertNotIn(user1.id, remaining_users) self.assertEqual(user2.id, remaining_users[0].id) def test_verify_password(self): auth_user = AuthUser() auth_user.password", "\"Europe/Bukarest\") self.assertTrue(user.language_id == lang.id) self.assertTrue(user.additional_public_data[\"first_name\"] == \"Rudolf\") self.assertTrue(user.additional_public_data[\"last_name\"] == \"<NAME>\")", "lang = get_or_create_language(\"en\") user = create_user( lat = 12.1, lng", "test_user_deletion(self): user1 = create_user() # Create Second user user2 =", "user2 = create_user( lat=85.59, lng=65.75, #country=\"DE\", #region=\"Niedersachsen\", #city=\"Osnabrück\", timezone=\"Europe/Berlin\", language=\"de\",", "delete_user, get_or_create_language from gengine.metadata import DBSession from gengine.app.model import AuthUser", "= create_user() # Create Second user user2 = create_user( lat=85.59,", "lat=85.59, lng=65.75, #country=\"DE\", #region=\"Niedersachsen\", #city=\"Osnabrück\", timezone=\"Europe/Berlin\", language=\"de\", additional_public_data={ \"first_name\": \"Michael\",", "user.id auth_user.password = \"<PASSWORD>\" auth_user.active = True auth_user.email = \"<EMAIL>\"", "= 12.2, #country = \"RO\", #region = \"Transylvania\", #city =", "import DBSession from gengine.app.model import AuthUser class TestUserCreation(BaseDBTest): def test_user_creation(self):", "user1.id ) # Correct cases self.assertNotIn(user1.id, remaining_users) self.assertEqual(user2.id, remaining_users[0].id) def", "user = create_user() auth_user = AuthUser() auth_user.user_id = user.id auth_user.password", "def test_user_creation(self): lang = get_or_create_language(\"en\") user = create_user( lat =", "user1 = create_user() # Create Second user user2 = create_user(", "= \"en\", additional_public_data = { \"first_name\" : \"Rudolf\", \"last_name\" :", "12.2) #self.assertTrue(user.country == \"RO\") #self.assertTrue(user.region == \"Transylvania\") #self.assertTrue(user.city == \"Cluj-Napoca\")", "iscorrect = auth_user.verify_password(\"<PASSWORD>\") self.assertEqual(iscorrect, True) def test_create_token(self): user = create_user()", "\"<EMAIL>\" DBSession.add(auth_user) iscorrect = auth_user.verify_password(\"<PASSWORD>\") self.assertEqual(iscorrect, True) def test_create_token(self): user", "-*- from gengine.app.tests.base import BaseDBTest from gengine.app.tests.helpers import create_user, update_user,", "= \"<EMAIL>\" DBSession.add(auth_user) if auth_user.verify_password(\"<PASSWORD>\"): token = auth_user.get_or_create_token() self.assertNotEqual(token, None)", "lang.id) def test_user_deletion(self): user1 = create_user() # Create Second user", "#self.assertTrue(user.country == \"RO\") #self.assertTrue(user.region == \"Transylvania\") #self.assertTrue(user.city == \"Cluj-Napoca\") self.assertTrue(user.timezone", "def test_user_deletion(self): user1 = create_user() # Create Second user user2", "= create_user( lat=85.59, lng=65.75, #country=\"DE\", #region=\"Niedersachsen\", #city=\"Osnabrück\", timezone=\"Europe/Berlin\", language=\"de\", additional_public_data={", "\"Transylvania\", #city = \"Cluj-Napoca\", timezone = \"Europe/Bukarest\", language = \"en\",", "import AuthUser class TestUserCreation(BaseDBTest): def test_user_creation(self): lang = get_or_create_language(\"en\") user", "friends=[1] ) remaining_users = delete_user( user_id = user1.id ) #", "== \"Cluj-Napoca\") self.assertTrue(user.timezone == \"Europe/Bukarest\") self.assertTrue(user.language_id == lang.id) def test_user_deletion(self):", "test_user_creation(self): lang = get_or_create_language(\"en\") user = create_user( lat = 12.1,", "\"<PASSWORD>\" auth_user.active = True auth_user.email = \"<EMAIL>\" DBSession.add(auth_user) iscorrect =", "\"<NAME>\" } ) # Correct cases self.assertTrue(user.lat == 14.2) self.assertTrue(user.lng", "user_id = user1.id ) # Correct cases self.assertNotIn(user1.id, remaining_users) self.assertEqual(user2.id,", "= create_user() auth_user = AuthUser() auth_user.user_id = user.id auth_user.password =", "\"last_name\" : \"<NAME>\" } ) self.assertTrue(user.lat == 12.1) self.assertTrue(user.lng ==", "self.assertTrue(user.lat == 12.1) self.assertTrue(user.lng == 12.2) #self.assertTrue(user.country == \"RO\") #self.assertTrue(user.region", "= True auth_user.email = \"<EMAIL>\" DBSession.add(auth_user) if auth_user.verify_password(\"<PASSWORD>\"): token =", "gengine.app.tests.base import BaseDBTest from gengine.app.tests.helpers import create_user, update_user, delete_user, get_or_create_language", "\"first_name\" : \"Rudolf\", \"last_name\" : \"<NAME>\" } ) # Correct", "== 12.2) #self.assertTrue(user.country == \"RO\") #self.assertTrue(user.region == \"Transylvania\") #self.assertTrue(user.city ==", "= 12.1, lng = 12.2, #country = \"RO\", #region =", "self.assertTrue(user.timezone == \"Europe/Bukarest\") self.assertTrue(user.language_id == lang.id) def test_user_deletion(self): user1 =", "\"EN\") #self.assertTrue(user.region == \"Transylvania\") #self.assertTrue(user.city == \"Cluj-Napoca\") self.assertTrue(user.timezone == \"Europe/Bukarest\")", "\"Clarke\" }, friends=[1] ) remaining_users = delete_user( user_id = user1.id", "self.assertTrue(user.lng == 16.3) #self.assertTrue(user.country == \"EN\") #self.assertTrue(user.region == \"Transylvania\") #self.assertTrue(user.city", "= True auth_user.email = \"<EMAIL>\" DBSession.add(auth_user) iscorrect = auth_user.verify_password(\"<PASSWORD>\") self.assertEqual(iscorrect,", "gengine.app.model import AuthUser class TestUserCreation(BaseDBTest): def test_user_creation(self): lang = get_or_create_language(\"en\")", ") # Correct cases self.assertTrue(user.lat == 14.2) self.assertTrue(user.lng == 16.3)", "def test_user_updation(self): lang = get_or_create_language(\"en\") user = create_user() user =", "lng = 12.2, #country = \"RO\", #region = \"Transylvania\", #city", "get_or_create_language(\"en\") user = create_user( lat = 12.1, lng = 12.2,", "DBSession from gengine.app.model import AuthUser class TestUserCreation(BaseDBTest): def test_user_creation(self): lang", "= get_or_create_language(\"en\") user = create_user( lat = 12.1, lng =", ": \"Rudolf\", \"last_name\" : \"<NAME>\" } ) self.assertTrue(user.lat == 12.1)", "#city = \"Cluj-Napoca\", timezone = \"Europe/Bukarest\", language = \"en\", additional_public_data", "self.assertTrue(user.language_id == lang.id) self.assertTrue(user.additional_public_data[\"first_name\"] == \"Rudolf\") self.assertTrue(user.additional_public_data[\"last_name\"] == \"<NAME>\") def", "user user2 = create_user( lat=85.59, lng=65.75, #country=\"DE\", #region=\"Niedersachsen\", #city=\"Osnabrück\", timezone=\"Europe/Berlin\",", "= delete_user( user_id = user1.id ) # Correct cases self.assertNotIn(user1.id,", "test_create_token(self): user = create_user() auth_user = AuthUser() auth_user.user_id = user.id", "auth_user = AuthUser() auth_user.user_id = user.id auth_user.password = \"<PASSWORD>\" auth_user.active", "AuthUser() auth_user.user_id = user.id auth_user.password = \"<PASSWORD>\" auth_user.active = True", "12.1, lng = 12.2, #country = \"RO\", #region = \"Transylvania\",", "from gengine.app.model import AuthUser class TestUserCreation(BaseDBTest): def test_user_creation(self): lang =", "\"Cluj-Napoca\") self.assertTrue(user.timezone == \"Europe/Bukarest\") self.assertTrue(user.language_id == lang.id) self.assertTrue(user.additional_public_data[\"first_name\"] == \"Rudolf\")", "# -*- coding: utf-8 -*- from gengine.app.tests.base import BaseDBTest from", "self.assertTrue(user.timezone == \"Europe/Bukarest\") self.assertTrue(user.language_id == lang.id) self.assertTrue(user.additional_public_data[\"first_name\"] == \"Rudolf\") self.assertTrue(user.additional_public_data[\"last_name\"]", "= 16.3, #country = \"EN\", #region = \"Transylvania\", #city =", "Correct cases self.assertTrue(user.lat == 14.2) self.assertTrue(user.lng == 16.3) #self.assertTrue(user.country ==", "user_id = user.id, lat = 14.2, lng = 16.3, #country", "#self.assertTrue(user.city == \"Cluj-Napoca\") self.assertTrue(user.timezone == \"Europe/Bukarest\") self.assertTrue(user.language_id == lang.id) def", "auth_user.email = \"<EMAIL>\" DBSession.add(auth_user) if auth_user.verify_password(\"<PASSWORD>\"): token = auth_user.get_or_create_token() self.assertNotEqual(token,", "#self.assertTrue(user.country == \"EN\") #self.assertTrue(user.region == \"Transylvania\") #self.assertTrue(user.city == \"Cluj-Napoca\") self.assertTrue(user.timezone", "\"RO\") #self.assertTrue(user.region == \"Transylvania\") #self.assertTrue(user.city == \"Cluj-Napoca\") self.assertTrue(user.timezone == \"Europe/Bukarest\")", "def test_create_token(self): user = create_user() auth_user = AuthUser() auth_user.user_id =", "Correct cases self.assertNotIn(user1.id, remaining_users) self.assertEqual(user2.id, remaining_users[0].id) def test_verify_password(self): auth_user =", "= 14.2, lng = 16.3, #country = \"EN\", #region =", "user = create_user() user = update_user( user_id = user.id, lat", "language = \"en\", additional_public_data = { \"first_name\" : \"Rudolf\", \"last_name\"", "auth_user.active = True auth_user.email = \"<EMAIL>\" DBSession.add(auth_user) iscorrect = auth_user.verify_password(\"<PASSWORD>\")", "self.assertTrue(user.lat == 14.2) self.assertTrue(user.lng == 16.3) #self.assertTrue(user.country == \"EN\") #self.assertTrue(user.region", "= create_user() user = update_user( user_id = user.id, lat =", "from gengine.app.tests.helpers import create_user, update_user, delete_user, get_or_create_language from gengine.metadata import", "remaining_users[0].id) def test_verify_password(self): auth_user = AuthUser() auth_user.password = \"<PASSWORD>\" auth_user.active", "= \"RO\", #region = \"Transylvania\", #city = \"Cluj-Napoca\", timezone =", "create_user() # Create Second user user2 = create_user( lat=85.59, lng=65.75,", "coding: utf-8 -*- from gengine.app.tests.base import BaseDBTest from gengine.app.tests.helpers import", "self.assertTrue(user.lng == 12.2) #self.assertTrue(user.country == \"RO\") #self.assertTrue(user.region == \"Transylvania\") #self.assertTrue(user.city", "remaining_users) self.assertEqual(user2.id, remaining_users[0].id) def test_verify_password(self): auth_user = AuthUser() auth_user.password =", "AuthUser() auth_user.password = \"<PASSWORD>\" auth_user.active = True auth_user.email = \"<EMAIL>\"", "additional_public_data = { \"first_name\" : \"Rudolf\", \"last_name\" : \"<NAME>\" }", "= user.id auth_user.password = \"<PASSWORD>\" auth_user.active = True auth_user.email =", "-*- coding: utf-8 -*- from gengine.app.tests.base import BaseDBTest from gengine.app.tests.helpers", "12.1) self.assertTrue(user.lng == 12.2) #self.assertTrue(user.country == \"RO\") #self.assertTrue(user.region == \"Transylvania\")", "self.assertTrue(user.language_id == lang.id) def test_user_deletion(self): user1 = create_user() # Create", "class TestUserCreation(BaseDBTest): def test_user_creation(self): lang = get_or_create_language(\"en\") user = create_user(", "auth_user.user_id = user.id auth_user.password = \"<PASSWORD>\" auth_user.active = True auth_user.email", "\"last_name\": \"Clarke\" }, friends=[1] ) remaining_users = delete_user( user_id =", "#region = \"Transylvania\", #city = \"Cluj-Napoca\", timezone = \"Europe/Bukarest\", language", "= \"<PASSWORD>\" auth_user.active = True auth_user.email = \"<EMAIL>\" DBSession.add(auth_user) iscorrect", "user = create_user( lat = 12.1, lng = 12.2, #country", "= \"EN\", #region = \"Transylvania\", #city = \"Cluj-Napoca\", timezone =", "\"en\", additional_public_data = { \"first_name\" : \"Rudolf\", \"last_name\" : \"<NAME>\"", "== \"EN\") #self.assertTrue(user.region == \"Transylvania\") #self.assertTrue(user.city == \"Cluj-Napoca\") self.assertTrue(user.timezone ==", ") # Correct cases self.assertNotIn(user1.id, remaining_users) self.assertEqual(user2.id, remaining_users[0].id) def test_verify_password(self):", "== lang.id) self.assertTrue(user.additional_public_data[\"first_name\"] == \"Rudolf\") self.assertTrue(user.additional_public_data[\"last_name\"] == \"<NAME>\") def test_user_updation(self):", "create_user( lat=85.59, lng=65.75, #country=\"DE\", #region=\"Niedersachsen\", #city=\"Osnabrück\", timezone=\"Europe/Berlin\", language=\"de\", additional_public_data={ \"first_name\":", "lng=65.75, #country=\"DE\", #region=\"Niedersachsen\", #city=\"Osnabrück\", timezone=\"Europe/Berlin\", language=\"de\", additional_public_data={ \"first_name\": \"Michael\", \"last_name\":", "#region=\"Niedersachsen\", #city=\"Osnabrück\", timezone=\"Europe/Berlin\", language=\"de\", additional_public_data={ \"first_name\": \"Michael\", \"last_name\": \"Clarke\" },", "from gengine.app.tests.base import BaseDBTest from gengine.app.tests.helpers import create_user, update_user, delete_user,", "test_user_updation(self): lang = get_or_create_language(\"en\") user = create_user() user = update_user(", "#city=\"Osnabrück\", timezone=\"Europe/Berlin\", language=\"de\", additional_public_data={ \"first_name\": \"Michael\", \"last_name\": \"Clarke\" }, friends=[1]", "BaseDBTest from gengine.app.tests.helpers import create_user, update_user, delete_user, get_or_create_language from gengine.metadata", "AuthUser class TestUserCreation(BaseDBTest): def test_user_creation(self): lang = get_or_create_language(\"en\") user =", "True auth_user.email = \"<EMAIL>\" DBSession.add(auth_user) iscorrect = auth_user.verify_password(\"<PASSWORD>\") self.assertEqual(iscorrect, True)", "\"<NAME>\" } ) self.assertTrue(user.lat == 12.1) self.assertTrue(user.lng == 12.2) #self.assertTrue(user.country", "\"Cluj-Napoca\") self.assertTrue(user.timezone == \"Europe/Bukarest\") self.assertTrue(user.language_id == lang.id) def test_user_deletion(self): user1", "True) def test_create_token(self): user = create_user() auth_user = AuthUser() auth_user.user_id", "TestUserCreation(BaseDBTest): def test_user_creation(self): lang = get_or_create_language(\"en\") user = create_user( lat", "== \"Transylvania\") #self.assertTrue(user.city == \"Cluj-Napoca\") self.assertTrue(user.timezone == \"Europe/Bukarest\") self.assertTrue(user.language_id ==", "14.2, lng = 16.3, #country = \"EN\", #region = \"Transylvania\",", "== \"Europe/Bukarest\") self.assertTrue(user.language_id == lang.id) self.assertTrue(user.additional_public_data[\"first_name\"] == \"Rudolf\") self.assertTrue(user.additional_public_data[\"last_name\"] ==", "get_or_create_language(\"en\") user = create_user() user = update_user( user_id = user.id,", "\"first_name\" : \"Rudolf\", \"last_name\" : \"<NAME>\" } ) self.assertTrue(user.lat ==", "timezone=\"Europe/Berlin\", language=\"de\", additional_public_data={ \"first_name\": \"Michael\", \"last_name\": \"Clarke\" }, friends=[1] )", "self.assertTrue(user.additional_public_data[\"last_name\"] == \"<NAME>\") def test_user_updation(self): lang = get_or_create_language(\"en\") user =", "= AuthUser() auth_user.password = \"<PASSWORD>\" auth_user.active = True auth_user.email =", "== 16.3) #self.assertTrue(user.country == \"EN\") #self.assertTrue(user.region == \"Transylvania\") #self.assertTrue(user.city ==", "\"Europe/Bukarest\") self.assertTrue(user.language_id == lang.id) def test_user_deletion(self): user1 = create_user() #", "auth_user.password = \"<PASSWORD>\" auth_user.active = True auth_user.email = \"<EMAIL>\" DBSession.add(auth_user)", "== lang.id) def test_user_deletion(self): user1 = create_user() # Create Second", "lat = 12.1, lng = 12.2, #country = \"RO\", #region", "language=\"de\", additional_public_data={ \"first_name\": \"Michael\", \"last_name\": \"Clarke\" }, friends=[1] ) remaining_users", "#country=\"DE\", #region=\"Niedersachsen\", #city=\"Osnabrück\", timezone=\"Europe/Berlin\", language=\"de\", additional_public_data={ \"first_name\": \"Michael\", \"last_name\": \"Clarke\"", "# Create Second user user2 = create_user( lat=85.59, lng=65.75, #country=\"DE\",", "lang.id) self.assertTrue(user.additional_public_data[\"first_name\"] == \"Rudolf\") self.assertTrue(user.additional_public_data[\"last_name\"] == \"<NAME>\") def test_user_updation(self): lang", "= get_or_create_language(\"en\") user = create_user() user = update_user( user_id =", "\"Rudolf\", \"last_name\" : \"<NAME>\" } ) self.assertTrue(user.lat == 12.1) self.assertTrue(user.lng", "# Correct cases self.assertTrue(user.lat == 14.2) self.assertTrue(user.lng == 16.3) #self.assertTrue(user.country", "= user1.id ) # Correct cases self.assertNotIn(user1.id, remaining_users) self.assertEqual(user2.id, remaining_users[0].id)", "= \"Europe/Bukarest\", language = \"en\", additional_public_data = { \"first_name\" :", "def test_verify_password(self): auth_user = AuthUser() auth_user.password = \"<PASSWORD>\" auth_user.active =", "test_verify_password(self): auth_user = AuthUser() auth_user.password = \"<PASSWORD>\" auth_user.active = True", "\"RO\", #region = \"Transylvania\", #city = \"Cluj-Napoca\", timezone = \"Europe/Bukarest\",", "timezone = \"Europe/Bukarest\", language = \"en\", additional_public_data = { \"first_name\"", "gengine.app.tests.helpers import create_user, update_user, delete_user, get_or_create_language from gengine.metadata import DBSession", ": \"<NAME>\" } ) # Correct cases self.assertTrue(user.lat == 14.2)", "== 14.2) self.assertTrue(user.lng == 16.3) #self.assertTrue(user.country == \"EN\") #self.assertTrue(user.region ==", ") self.assertTrue(user.lat == 12.1) self.assertTrue(user.lng == 12.2) #self.assertTrue(user.country == \"RO\")", "#country = \"RO\", #region = \"Transylvania\", #city = \"Cluj-Napoca\", timezone", "= user.id, lat = 14.2, lng = 16.3, #country =", "= create_user( lat = 12.1, lng = 12.2, #country =", "cases self.assertNotIn(user1.id, remaining_users) self.assertEqual(user2.id, remaining_users[0].id) def test_verify_password(self): auth_user = AuthUser()", "remaining_users = delete_user( user_id = user1.id ) # Correct cases", "get_or_create_language from gengine.metadata import DBSession from gengine.app.model import AuthUser class", "\"EN\", #region = \"Transylvania\", #city = \"Cluj-Napoca\", timezone = \"Europe/Bukarest\",", "14.2) self.assertTrue(user.lng == 16.3) #self.assertTrue(user.country == \"EN\") #self.assertTrue(user.region == \"Transylvania\")", "# Correct cases self.assertNotIn(user1.id, remaining_users) self.assertEqual(user2.id, remaining_users[0].id) def test_verify_password(self): auth_user", "auth_user = AuthUser() auth_user.password = \"<PASSWORD>\" auth_user.active = True auth_user.email", "self.assertTrue(user.additional_public_data[\"first_name\"] == \"Rudolf\") self.assertTrue(user.additional_public_data[\"last_name\"] == \"<NAME>\") def test_user_updation(self): lang =", "user.id, lat = 14.2, lng = 16.3, #country = \"EN\",", "self.assertEqual(iscorrect, True) def test_create_token(self): user = create_user() auth_user = AuthUser()" ]
[ "define subsets of that variable font that only include some", "``ruleDescriptor`` to :attr:`rules`.\"\"\" self.rules.append(ruleDescriptor) def addRuleDescriptor(self, **kwargs): \"\"\"Instantiate a new", "as a string. E.g. \"4.0\" \"\"\" self.elidedFallbackName: Optional[str] = None", "None: vfElement = ET.Element('variable-font') vfElement.attrib['name'] = vf.name if vf.filename is", "path=None, font=None, name=None, location=None, designLocation=None, layerName=None, familyName=None, styleName=None, localisedFamilyName=None, copyLib=False,", "arguments, and returns a new font object loaded from the", "self.axisDefaults: # In case the document contains no axis definitions,", "variation axis in a VF. .. code:: python a2 =", "must only have design locations (using xvalue=\"\").') sourceObject.location = designLocation", "a string. E.g. \"4.0\" \"\"\" self.elidedFallbackName: Optional[str] = None \"\"\"STAT", "can have different glyph sets. \"\"\" self.values: List[float] = values", "is its original file name, i.e. the last part of", "it can be efficient to have it right here. Varlib.", "in self.map if k == value), value) def map_backward(self, value):", "= int(labelElement.attrib[\"ordering\"]) for label in labelElement.findall(\".label\"): axisObject.axisLabels.append(self.readAxisLabel(label)) self.documentObject.axes.append(axisObject) self.axisDefaults[axisObject.name] =", "{}) \"\"\"dict. Axis values for this instance, in design space", "location or None. The default location is the set of", "is not None: ruleElement.attrib['name'] = ruleObject.name for conditions in ruleObject.conditionSets:", "return self.valueAxisSubsetDescriptorClass(name=name, userValue=userValue) else: xml_attrs = {'name', 'userminimum', 'userdefault', 'usermaximum'}", "s1.mutedGlyphNames.append(\"Z\") doc.addSource(s1) \"\"\" flavor = \"source\" _attrs = ['filename', 'path',", "list: - ``locationLabel``: the location along this axis is the", "minVersion def _makeLocationElement(self, locationObject, name=None): \"\"\" Convert Location dict to", "= True @classmethod def fromstring(cls, string, documentObject): f = BytesIO(tobytes(string,", "is None and cond.get('maximum') is None: # neither is defined,", "location is determined for each axis independently by taking the", "sourceObject in self.documentObject.sources: self._addSource(sourceObject) if self.documentObject.variableFonts: variableFontsElement = ET.Element(\"variable-fonts\") for", "{} \"\"\"dict. Axis values for this instance, in user space", "taken by the axis, nothing in-between. \"\"\" def map_forward(self, value):", "instanceObject) for libElement in instanceElement.findall('lib'): self.readLibElement(libElement, instanceObject) self.documentObject.instances.append(instanceObject) def readLibElement(self,", "in [(minimum, minimum), (maximum, maximum)]. Varlib. \"\"\" self.axisOrdering = axisOrdering", "self._addLib(self.root, self.documentObject.lib, 2) tree = ET.ElementTree(self.root) tree.write( self.path, encoding=encoding, method='xml',", "dict(...)]) r1.subs.append((\"a\", \"a.alt\")) .. code:: xml <!-- optional: list of", "and then adding them to the document. This makes it", "label in self.locationLabels: if label.name == name: return label return", "in self.documentObject.variableFonts: self._addVariableFont(variableFontsElement, variableFont) self.root.append(variableFontsElement) if self.documentObject.instances: self.root.append(ET.Element(\"instances\")) for instanceObject", "subsetElement.attrib['userdefault'] = self.intOrFloat(subset.userDefault) elif isinstance(subset, ValueAxisSubsetDescriptor): subsetElement.attrib['uservalue'] = self.intOrFloat(subset.userValue) subsetsElement.append(subsetElement)", "*, tag=None, name=None, labelNames=None, minimum=None, default=None, maximum=None, hidden=False, map=None, axisOrdering=None,", "= ET.Element('axis-subsets') for subset in vf.axisSubsets: subsetElement = ET.Element('axis-subset') subsetElement.attrib['name']", "addVariableFont(self, variableFontDescriptor: VariableFontDescriptor): \"\"\"Add the given ``variableFontDescriptor`` to :attr:`variableFonts`. ..", "is None: raise DesignSpaceDocumentError( \"The axis-subset element for a discrete", "or instance.userLocation for instance in self.documentObject.instances ) ): if minVersion", "ET.Element('axis-subsets') for subset in vf.axisSubsets: subsetElement = ET.Element('axis-subset') subsetElement.attrib['name'] =", "conflict between the given filename, and the path. So we", "def __init__(self, documentPath, documentObject): self.path = documentPath self.documentObject = documentObject", "and the copy. .. versionadded:: 5.0 \"\"\" fonts = [source.font", "with the same source.path only once loaded = {} fonts", "in unicodes.split(\" \")] glyphData['unicodes'] = unicodes except ValueError: raise DesignSpaceDocumentError(\"unicode", "new :class:`AxisDescriptor` using the given ``kwargs`` and add it to", "have needed 1 DesignSpace per such variable font, and a", "ET.Element('map') mapElement.attrib['input'] = self.intOrFloat(inputValue) mapElement.attrib['output'] = self.intOrFloat(outputValue) axisElement.append(mapElement) if axisObject.axisOrdering", "or after other glyph substitution features. - False: before -", "from the document path and the string in the filename", "executed rules for example). MutatorMath. .. deprecated:: 5.0 Use rules", "not exist. MutatorMath. \"\"\" self.font = font \"\"\"Same as :attr:`SourceDescriptor.font`", "in the element attribute localisedFamilyNameElement = ET.Element('familyname') localisedFamilyNameElement.attrib[XML_LANG] = code", "anisotropic locations, only the xvalue is used. .. versionadded:: 5.0", "Family name of this source. Though this data can be", "new_path = '/' + new_path elif path.startswith(r'\\\\'): # The above", "lang in labelNameElement.items(): if key == XML_LANG: axisObject.labelNames[lang] = tostr(labelNameElement.text)", "the instances. MutatorMath. .. deprecated:: 5.0 \"\"\" self.copyFeatures = copyFeatures", "identify it during the build process and from other parts", "axisSubsets=axisSubsets, lib=lib, ) self.documentObject.variableFonts.append(variableFont) def readAxisSubset(self, element: ET.Element): if \"uservalue\"", "and locationLabel is not None: raise DesignSpaceDocumentError('instance element must have", "\"en\": continue # already stored in the element attribute localisedStyleNameElement", "obj=None): self.msg = msg self.obj = obj def __str__(self): return", "source font that is loaded in memory, as a Python", "if self.documentObject.locationLabels: labelsElement = ET.Element(\"labels\") for labelObject in self.documentObject.locationLabels: self._addLocationLabel(labelsElement,", "posixpath.join(*path.split(os.path.sep)) if path.startswith('/'): # The above transformation loses absolute paths", "+ Varlib. \"\"\" self.designLocation = designLocation if designLocation is not", "{} for axis in doc.axes: if axis.name in self.designLocation: result[axis.name]", "need the axis data to do the scaling, so we", "if layerName is not None: sourceObject.layerName = layerName for libElement", "for this instance. MutatorMath + Varlib. \"\"\" self.styleMapStyleName = styleMapStyleName", "the copy. .. versionadded:: 5.0 \"\"\" fonts = [source.font for", "def getStyleMapFamilyName(self, languageCode=\"en\"): return self.localisedStyleMapFamilyName.get(languageCode) def clearLocation(self, axisName: Optional[str] =", "data associated with this variable font.\"\"\" class RangeAxisSubsetDescriptor(SimpleDescriptor): \"\"\"Subset of", "name=name, filename=filename, axisSubsets=axisSubsets, lib=lib, ) self.documentObject.variableFonts.append(variableFont) def readAxisSubset(self, element: ET.Element):", "= styleName for familyNameElement in sourceElement.findall('familyname'): for key, lang in", "glyphNames: swap = False for a, b in rule.subs: if", "import copy import itertools import math import os import posixpath", "have user locations (using uservalue=\"\").') elidable = True if labelElement.get(\"elidable\")", "object. Optional. Points to a representation of this source font", "for this source, in design space coordinates. MutatorMath + Varlib.", "memory, as a Python object (e.g. a ``defcon.Font`` or a", "this instance. .. seealso:: :meth:`getFullDesignLocation` .. versionadded:: 5.0 \"\"\" return", "labelElement.attrib['oldersibling'] = \"true\" self._addLabelNames(labelElement, label.labelNames) self._addLocationElement(labelElement, userLocation=label.userLocation) parentElement.append(labelElement) def _addLocationElement(", "the writer class to make us a new sourceDescriptor.\"\"\" return", "newMap.append((inputValue, newOutputValue)) if newMap: axis.map = newMap # finally the", "= tag \"\"\"string. Four letter tag for this axis. Some", "posix(path): \"\"\"Normalize paths using forward slash to work also on", "LocationLabelDescriptor ruleDescriptorClass = RuleDescriptor sourceDescriptorClass = SourceDescriptor variableFontDescriptorClass = VariableFontDescriptor", "stray rule conditions outside a conditionset. \" \"Wrapped them in", "is ugly. The 'print' is inappropriate here, and instead of", "glyphElement.attrib[\"name\"] = name glyphElement.attrib[\"mute\"] = '1' sourceElement.append(glyphElement) if self.effectiveFormatTuple >=", "instanceObject = self.instanceDescriptorClass() instanceObject.path = instancePath # absolute path to", "if swap: newNames.append(b) else: newNames.append(name) glyphNames = newNames newNames =", "axis will get in user space. MutatorMath + Varlib. \"\"\"", "math import os import posixpath from io import BytesIO, StringIO", "return f.getvalue() def read(self, path): \"\"\"Read a designspace file from", "the version specified in the document, or a sufficiently recent", "attribute is not None: skip it. \"\"\" if masters: for", "table, format 1, 2, 3 <https://docs.microsoft.com/en-us/typography/opentype/spec/stat#axis-value-table-format-1>`_ The STAT format of", "% unicodes) for noteElement in glyphElement.findall('.note'): glyphData['note'] = noteElement.text break", "xvalue is used. .. versionadded:: 5.0 \"\"\" return { axis.name:", "labelNames): for languageCode, labelName in sorted(labelNames.items()): languageElement = ET.Element('labelname') languageElement.attrib[XML_LANG]", "first clear all the fields, then change the field(s) for", "= info \"\"\"bool. Indicated if this instance needs the interpolating", "discreteAxes = [] rangeAxisSubsets: List[Union[RangeAxisSubsetDescriptor, ValueAxisSubsetDescriptor]] = [] for axis", "covers the whole space. In version 5 and above documents,", "also need to read any conditions that are outside of", "finally: for source, font in zip(self.sources, fonts): source.font = font", "= ET.Element('sub') subElement.attrib['name'] = sub[0] subElement.attrib['with'] = sub[1] ruleElement.append(subElement) if", "empty if clearing everything). In order to update the location", "dictionary of localised style map familyname strings, keyed by language", "'name', 'elidable', 'oldersibling', 'linkeduservalue'} unknown_attrs = set(element.attrib) - xml_attrs if", "must have a name attribute.\") valueStr = element.get(\"uservalue\") if valueStr", "= False elif encoding is None or encoding == \"utf-8\":", "and userMaximum is not None: return self.rangeAxisSubsetDescriptorClass( name=name, userMinimum=float(userMinimum), userDefault=float(userDefault),", "{} designLoc = {} for dimensionElement in locationElement.findall(\".dimension\"): dimName =", "text shaping/OpenType layout, as they are part of the `Required", "conditions matches the given location. - If a condition has", "should be hidden in user interfaces. \"\"\" self.map = map", "languageCodes = list(instanceObject.localisedFamilyName.keys()) languageCodes.sort() for code in languageCodes: if code", "required positional argument, the source.path, and an optional list of", "'name', 'maximum', 'minimum', 'default', 'map', 'axisOrdering', 'axisLabels'] def __init__( self,", "this variable font.\"\"\" class RangeAxisSubsetDescriptor(SimpleDescriptor): \"\"\"Subset of a continuous axis", "</rules> \"\"\" _attrs = ['name', 'conditionSets', 'subs'] # what do", "== '1': sourceObject.copyLib = True for groupsElement in sourceElement.findall('.groups'): if", "was validating and filling in the location # dict while", "`OTSpec STAT Axis Record <https://docs.microsoft.com/en-us/typography/opentype/spec/stat#axis-records>`_ .. versionadded:: 5.0 \"\"\" self.axisLabels:", "{} def setFamilyName(self, familyName, languageCode=\"en\"): \"\"\"Setter for :attr:`localisedFamilyName` .. versionadded::", "location labels. The source of truth for this instance's location", "keyed by language code. If present, will be used to", "specification <https://www.microsoft.com/typography/otspec/fvar.htm#VAT>`__. Privately-defined axis tags must begin with an uppercase", "of outlines from which deltas would apply, as this axis", "k, v in self.map if k == value), value) def", "specified, assume the same default value as the full axis.", "fromfile(cls, path, readerClass=None, writerClass=None): \"\"\"Read a designspace file from ``path``", "field in this list: - ``locationLabel``: the location along this", "stored in the element attribute localisedFamilyNameElement = ET.Element('familyname') localisedFamilyNameElement.attrib[XML_LANG] =", "your own data. Respect the data stored by others. \"\"\"", "user coordinates at which to freeze the given axis.\"\"\" class", "if axis.name in self.designLocation: result[axis.name] = self.designLocation[axis.name] else: result[axis.name] =", "[] discreteAxes = [] rangeAxisSubsets: List[Union[RangeAxisSubsetDescriptor, ValueAxisSubsetDescriptor]] = [] for", "would apply, as this axis does not interpolate. - it", "have a filename attr. useless, but no reason to interfere.", "as a filename in case the filename property is empty.", "self.updatePaths() writer = self.writerClass(path, self) writer.write() def _posixRelativePath(self, otherPath): relative", "designLocation[axis.name] if isinstance(value, tuple): dimElement.attrib['xvalue'] = self.intOrFloat(value[0]) dimElement.attrib['yvalue'] = self.intOrFloat(value[1])", "full design location. See :meth:`getFullDesignLocation()` .. versionadded:: 5.0 \"\"\" self.layerName", "\"\"\" self.layerName = layerName \"\"\"string. The name of the layer", "sourceObject.location = designLocation layerName = sourceElement.attrib.get('layer') if layerName is not", "set. rules = [] rulesElement = self.root.find(\".rules\") if rulesElement is", "of the `Required Variation Alternates OpenType feature <https://docs.microsoft.com/en-us/typography/opentype/spec/features_pt#-tag-rvrn>`_. See ref:`rules-element`", "See: `OTSpec STAT Axis Record <https://docs.microsoft.com/en-us/typography/opentype/spec/stat#axis-records>`_ .. versionadded:: 5.0 \"\"\"", "5.0 \"\"\" return { axis.name: self.userLocation.get(axis.name, axis.default) for axis in", "trigger conditionally in some parts of the designspace. .. code::", "return None label = doc.getLocationLabel(self.locationLabel) if label is None: raise", "or {} \"\"\"Custom data associated with this instance.\"\"\" @property def", "of font objects in the order they appear in the", "(maximum, maximum)]. Varlib. \"\"\" self.axisOrdering = axisOrdering \"\"\"STAT table field", "of ``values``. Example: an Italic axis with 2 stops, Roman", "= labelName parentElement.append(languageElement) def _addLocationLabel(self, parentElement: ET.Element, label: LocationLabelDescriptor) ->", "continue # already stored in the element attribute localisedStyleNameElement =", "not None: lib = plistlib.fromtree(libElement[0]) variableFont = self.variableFontsDescriptorClass( name=name, filename=filename,", "res finally: for source, font in zip(self.sources, fonts): source.font =", "axisSubsets = [] for axisSubset in axisSubsetsElement.iterfind(\".axis-subset\"): axisSubsets.append(self.readAxisSubset(axisSubset)) lib =", "from :attr:`labelNames` or the :attr:`name`.\"\"\" return self.labelNames.get(\"en\") or self.name def", "in the location dicts. MutatorMath + Varlib. \"\"\" # names", "5.0 \"\"\" return self.localisedFamilyName.get(languageCode) def getFullDesignLocation(self, doc: 'DesignSpaceDocument') -> AnisotropicLocationDict:", "to trigger conditionally in some parts of the designspace. ..", "glyphs need special masters (to record the results of executed", "- From a single DesignSpace with discrete axes, define 1", "not None and userMaximum is not None: return self.rangeAxisSubsetDescriptorClass( name=name,", "variable font that only include some axes and freeze other", "axisDescriptor: Union[AxisDescriptor, DiscreteAxisDescriptor]): \"\"\"Add the given ``axisDescriptor`` to :attr:`axes`.\"\"\" self.axes.append(axisDescriptor)", "= BytesIO() encoding = \"UTF-8\" xml_declaration = True else: raise", "from which deltas would apply, as this axis does not", "getattr(other, attr)) except AssertionError: print(\"failed attribute\", attr, getattr(self, attr), \"!=\",", "import LogMixin from fontTools.misc.textTools import tobytes, tostr \"\"\" designSpaceDocument -", "of the font.lib need to be copied to the instances.", "MutatorMath + Varlib. .. seealso:: This may be only part", "axis values, and they are assumed to be the default.", "cond['minimum']}).get(cond['name']) else: minimum = None if cond.get('maximum') is not None:", "\"last\"}: raise DesignSpaceDocumentError( \"<rules> processing attribute value is not valid:", "axis.map_forward(self.userLocation[axis.name]) else: result[axis.name] = axis.map_forward(axis.default) return result def getFullUserLocation(self, doc:", "if m.get('font') is not None: masterElement.attrib['source'] = m.get('font') if m.get('location')", "name=None, labelNames=None, hidden=False, map=None, axisOrdering=None, axisLabels=None, ): # opentype tag", "explicit design location along this axis, possibly anisotropic. - ``userLocation[axisName]``:", "\"\"\"Returns the designspace as a string. Default encoding ``utf-8``.\"\"\" if", "also on Windows.\"\"\" new_path = posixpath.join(*path.split(os.path.sep)) if path.startswith('/'): # The", "as tuples of glyphnames, e.g. (\"a\", \"a.alt\"). - Note: By", "sourceElement.findall('.groups'): if groupsElement.attrib.get('copy') == '1': sourceObject.copyGroups = True for infoElement", "= linkedUserValue \"\"\"STAT field ``linkedValue`` (format 3).\"\"\" self.labelNames: MutableMapping[str, str]", "stored by others. \"\"\" self.default: Optional[str] = None \"\"\"Name of", "xvalue is set. .. versionadded:: 5.0 \"\"\" return { axis.name:", "the original and the copy. .. versionadded:: 5.0 \"\"\" fonts", "them in the axis.minimum axis.minimum = minimum axis.maximum = maximum", "in sorted(instanceObject.glyphs.items()): glyphElement = self._writeGlyphElement(instanceElement, instanceObject, glyphName, data) glyphsElement.append(glyphElement) if", "readSources(self): for sourceCount, sourceElement in enumerate(self.root.findall(\".sources/source\")): filename = sourceElement.attrib.get('filename') if", "the groups need to be copied to the instances. MutatorMath.", "= name familyname = instanceElement.attrib.get('familyname') if familyname is not None:", "if masters: for descriptor in self.sources: if descriptor.filename is not", "be copied to the instances. MutatorMath. .. deprecated:: 5.0 \"\"\"", "for this axis. Contrary to continuous axes, only the values", "element.get(\"name\") if name is None: raise DesignSpaceDocumentError(\"label element must have", "is assumed user space is the same as design space,", "len(name) < 4: tag = name + \"*\" * (4", "name=\"b\"> <master location=\"location-token-bbb\" source=\"master-token-aaa2\"/> <master glyphname=\"b.alt1\" location=\"location-token-ccc\" source=\"master-token-aaa3\"/> <note> This", "always using forward slashes.\"\"\" def getter(self): # Normal getter return", "{} glyphName = glyphElement.attrib.get('name') if glyphName is None: raise DesignSpaceDocumentError(\"Glyph", "the values: .. code:: python instance.clearLocation('Weight') instance.designLocation['Weight'] = (34, 36.5)", "f = BytesIO() encoding = \"UTF-8\" xml_declaration = True else:", "def _addLocationElement( self, parentElement, *, designLocation: AnisotropicLocationDict = None, userLocation:", "doc: 'DesignSpaceDocument') -> SimpleLocationDict: \"\"\"Get the complete user location of", "v in self.map if k == value), value) def map_backward(self,", "\"\"\" self.filename: str = filename \"\"\"string, optional. Relative path to", "userValue \"\"\"Value in user coordinates at which to freeze the", "self.root.append(ET.Element(\"instances\")) for instanceObject in self.documentObject.instances: self._addInstance(instanceObject) if self.documentObject.lib: self._addLib(self.root, self.documentObject.lib,", "contains unknown attributes: {', '.join(unknown_attrs)}\") name = variableFontElement.get(\"name\") if name", "have the same attributes. Reader and Writer objects can be", "to be muted. MutatorMath only. \"\"\" self.mutedGlyphNames = mutedGlyphNames or", "versionchanged:: 5.0 Return a tuple of (designLocation, userLocation) \"\"\" if", "not None: noteElement = ET.Element('note') noteElement.text = data.get('note') glyphElement.append(noteElement) if", "however it can't become a variation axis in a VF.", "for axisObject in self.documentObject.axes: self._addAxis(axisObject) if self.documentObject.locationLabels: labelsElement = ET.Element(\"labels\")", "self.documentObject.newDefaultLocation() for axisName, axisValue in locationObject.items(): if axisName in validatedLocation:", "in sorted(labelNames.items()): languageElement = ET.Element('labelname') languageElement.attrib[XML_LANG] = languageCode languageElement.text =", "% ruleName if ruleName is not None else \"\")) cds.append(cd)", "a in self._attrs] attrs = indent('\\n'.join(attrs), ' ') return f\"{self.__class__.__name__}(\\n{attrs}\\n)\"", "of user space to design space coordinates. If no map", "= glyphElement.attrib.get('unicode') if unicodes is not None: try: unicodes =", "conditionSetElement, ruleName, ) if conditionSet is not None: ruleObject.conditionSets.append(conditionSet) for", "self.olderSibling: bool = olderSibling \"\"\"STAT flag ``OLDER_SIBLING_FONT_ATTRIBUTE``. See: `OTSpec STAT", "of conditions. - Each condition is a dict with ``name``,", "ET.Element('labels') if axisObject.axisOrdering is not None: labelsElement.attrib['ordering'] = str(axisObject.axisOrdering) for", "conditions have minimum or maximum values, do not add the", "- read and write designspace files \"\"\" __all__ = [", "s1.location = dict(weight=0) s1.familyName = \"MasterFamilyName\" s1.styleName = \"MasterStyleNameOne\" s1.localisedFamilyName", "value ] d[attr] = value return d class SimpleDescriptor(AsDictMixin): \"\"\"", "have different glyph sets. \"\"\" self.values: List[float] = values or", "instanceObject.locationLabel if instanceObject.familyName is not None: instanceElement.attrib['familyname'] = instanceObject.familyName if", "is read from the disk, this is the full path", "to reference this rule data.\"\"\" # list of lists of", "have a name attribute.\") userMinimum = element.get(\"userminimum\") userDefault = element.get(\"userdefault\")", "some other value for filename, it should be fine case", "f'unknown location label `{self.locationLabel}` in instance `{self.name}`.' ) return label", "for < maximum. - If a condition has no maximum,", "= \"instance.ufo2\" # anisotropic location i2.designLocation = dict(weight=500, width=(400,300)) i2.postScriptFontName", "in (axis.minimum, axis.default, axis.maximum) ] new[axis.name] = normalizeValue(value, triple) return", "designLocation is not None else location or {} \"\"\"dict. Axis", "XML_LANG: familyName = familyNameElement.text sourceObject.setFamilyName(familyName, lang) designLocation, userLocation = self.locationFromElement(sourceElement)", "tag=None, name=None, labelNames=None, values=None, default=None, hidden=False, map=None, axisOrdering=None, axisLabels=None, ):", "user should only clear that axis, then edit the values:", "= [] for cond in conditions: if cond.get('minimum') is not", "\"*\" * (4 - len(name)) else: tag = name[:4] return", "glyphNames): \"\"\"Apply these rules at this location to these glyphnames.", "None ): locElement = ET.Element(\"location\") for axis in self.documentObject.axes: if", "\"\"\"Represents the rule descriptor element: a set of glyph substitutions", "\"\"\" flavor = \"axis-subset\" _attrs = ('name', 'userValue') def __init__(self,", "to be able to encode what the document contains. \"\"\"", "subElement in ruleElement.findall('.sub'): a = subElement.attrib['name'] b = subElement.attrib['with'] ruleObject.subs.append((a,", "for axis, value in zip(discreteAxes, values)]) variableFonts.append(VariableFontDescriptor( name=f\"{basename}{axisNames}\", axisSubsets=rangeAxisSubsets +", "\"\"\" source = self.writerClass.sourceDescriptorClass(**kwargs) self.addSource(source) return source def addInstance(self, instanceDescriptor:", "Format userValue userMinimum userMaximum linkedUserValue =========== ========= =========== =========== ===============", "by ``filename`` and store it in this field, or write", "3 <https://docs.microsoft.com/en-us/typography/opentype/spec/stat#axis-value-table-format-1>`_ The STAT format of the Axis value depends", "axisLabels or [] \"\"\"STAT table entries for Axis Value Tables", "axisName in validatedLocation: # only accept values we know validatedLocation[axisName]", "</note> </glyph> \"\"\" glyphData = {} glyphName = glyphElement.attrib.get('name') if", "def setStyleMapStyleName(self, styleMapStyleName, languageCode=\"en\"): self.localisedStyleMapStyleName[languageCode] = tostr(styleMapStyleName) def getStyleMapStyleName(self, languageCode=\"en\"):", "of the Designspace. Use-cases: - From a single DesignSpace with", "applied before or after other glyph substitution features. - False:", "relative path to the source file, **as it is in", "a Python object (e.g. a ``defcon.Font`` or a ``fontTools.ttFont.TTFont``). The", "the instance, used to identify it if it needs to", "in axisElement.findall('map'): a = float(mapElement.attrib['input']) b = float(mapElement.attrib['output']) axisObject.map.append((a, b))", "in instance `{self.name}`.' ) return label def getFullDesignLocation(self, doc: 'DesignSpaceDocument')", "free-floating location (format 4). All values are user values. See:", "raise DesignSpaceDocumentError(\"No axes defined\") userLoc = {} designLoc = {}", "ValueAxisSubsetDescriptor]] = axisSubsets or [] \"\"\"Axis subsets to include in", "is not None: locationElement, m['location'] = self._makeLocationElement(m.get('location')) masterElement.append(locationElement) mastersElement.append(masterElement) glyphElement.append(mastersElement)", "design location of this source, from its :attr:`designLocation` and the", "\"\"\" class ValueAxisSubsetDescriptor(SimpleDescriptor): \"\"\"Single value of a discrete or continuous", "# the axes for axis in self.axes: # scale the", "``axisDescriptor`` to :attr:`axes`.\"\"\" self.axes.append(axisDescriptor) def addAxisDescriptor(self, **kwargs): \"\"\"Instantiate a new", "descriptor, we have to do the right thing for the", "_addVariableFont(self, parentElement: ET.Element, vf: VariableFontDescriptor) -> None: vfElement = ET.Element('variable-font')", "this field, or write the contents of this field to", "ET.Element('label') labelElement.attrib['name'] = label.name if label.elidable: labelElement.attrib['elidable'] = \"true\" if", "else location or {} \"\"\"dict. Axis values for this source,", "sourceObject.copyLib = True for groupsElement in sourceElement.findall('.groups'): if groupsElement.attrib.get('copy') ==", "in this list can be taken by the axis, nothing", "output XML would be non-deterministic. # https://github.com/LettError/designSpaceDocument/issues/10 loc = collections.OrderedDict()", "while writing it out, as preserved below. locationElement, sourceObject.location =", "name familyname = instanceElement.attrib.get('familyname') if familyname is not None: instanceObject.familyName", "*, name, userLocation, elidable=False, olderSibling=False, labelNames=None, ): self.name: str =", "MutatorMath + Varlib. This may be only part of the", "float(cdMin) else: # will allow these to be None, assume", "of localised stylename strings, keyed by language code. \"\"\" self.localisedStyleMapFamilyName", "import Any, Dict, List, MutableMapping, Optional, Tuple, Union from fontTools.misc", "= [] \"\"\"List of this document's instances.\"\"\" self.lib: Dict =", "styleMapStyleName=None, localisedFamilyName=None, localisedStyleName=None, localisedStyleMapFamilyName=None, localisedStyleMapStyleName=None, glyphs=None, kerning=True, info=True, lib=None, ):", "in self.map}) class DiscreteAxisDescriptor(AbstractAxisDescriptor): \"\"\"Container for discrete axis data. Use", "binary fonts, including extra options: designspace.loadSourceFonts(ttLib.TTFont, recalcBBoxes=False) Args: opener (Callable):", "5.0 \"\"\" if self.locationLabel is None: return None label =", "self.documentObject.rules: self._addRule(ruleObject) if self.documentObject.sources: self.root.append(ET.Element(\"sources\")) for sourceObject in self.documentObject.sources: self._addSource(sourceObject)", "f = BytesIO(tobytes(string, encoding=\"utf-8\")) self = cls(f, documentObject) self.path =", "libElement is not None: lib = plistlib.fromtree(libElement[0]) variableFont = self.variableFontsDescriptorClass(", "or ``None`` if no such label exists. .. versionadded:: 5.0", "next(numbers, 0) return (major, minor) def getVariableFonts(self) -> List[VariableFontDescriptor]: \"\"\"Return", "None cd['name'] = conditionElement.attrib.get(\"name\") # # test for things if", "instanceObject.styleMapFamilyName if instanceObject.styleMapStyleName is not None: instanceElement.attrib['stylemapstylename'] = instanceObject.styleMapStyleName if", "for that axis. .. versionadded:: 5.0 \"\"\" self.locationLabel = None", "of the :class:`AxisDescriptor` or :class:`DiscreteAxisDescriptor` to \"snapshot\" or \"freeze\". \"\"\"", "given ``kwargs`` and add it to :attr:`axes`. The axis will", "writerClass=None): self.path = None \"\"\"String, optional. When the document is", "\"\"\" self.elidedFallbackName: Optional[str] = None \"\"\"STAT Style Attributes Header field", "'values', 'default', 'map', 'axisOrdering', 'axisLabels') def __init__( self, *, tag=None,", "\"\"\"string, required. Name of this variable to identify it during", "target variable font. If not specified, assume the same maximum", "_writeGlyphElement(self, instanceElement, instanceObject, glyphName, data): glyphElement = ET.Element('glyph') if data.get('mute'):", "item.location = self.normalizeLocation(item.location) # the axes for axis in self.axes:", "Optional[float] = userMinimum \"\"\"STAT field ``rangeMinValue`` (format 2).\"\"\" self.userValue: float", "strings, even if they only contain ASCII characters. \"\"\" self.hidden", "5.0 \"\"\" flavor = \"variable-font\" _attrs = ('filename', 'axisSubsets', 'lib')", "def readAxisSubset(self, element: ET.Element): if \"uservalue\" in element.attrib: xml_attrs =", "between the given filename, and the path. So we know", "attribute names are usually camelCased, the corresponding `XML <document-xml-structure>`_ attributes", "python r1 = RuleDescriptor() r1.name = \"unique.rule.name\" r1.conditionSets.append([dict(name=\"weight\", minimum=-10, maximum=10),", "the :attr:`name`.\"\"\" return self.labelNames.get(\"en\") or self.name def getFullUserLocation(self, doc: 'DesignSpaceDocument')", "float] class InstanceDescriptor(SimpleDescriptor): \"\"\"Simple container for data related to the", "self.familyName = familyName \"\"\"string. Family name of this source. Though", "'.join(unknown_attrs)}\") name = element.get(\"name\") if name is None: raise DesignSpaceDocumentError(\"axis-subset", "tree = ET.parse(self.path) self.root = tree.getroot() self.documentObject.formatVersion = self.root.attrib.get(\"format\", \"3.0\")", "= familyNameElement.text instanceObject.setFamilyName(familyName, lang) for styleMapStyleNameElement in instanceElement.findall('stylemapstylename'): for key,", "default value for this axis, i.e. when a new location", "a1.axisOrdering = 1 a1.axisLabels = [ AxisLabelDescriptor(name=\"Regular\", userValue=400, elidable=True) ]", "addInstance(self, instanceDescriptor: InstanceDescriptor): \"\"\"Add the given ``instanceDescriptor`` to :attr:`instances`.\"\"\" self.instances.append(instanceDescriptor)", "doc.variableFonts doc.instances doc.lib \"\"\" def __init__(self, readerClass=None, writerClass=None): self.path =", "and axis.name in designLocation: dimElement = ET.Element('dimension') dimElement.attrib['name'] = axis.name", "readInstances(self, makeGlyphs=True, makeKerning=True, makeInfo=True): instanceElements = self.root.findall('.instances/instance') for instanceElement in", "newInstanceDescriptor(self): \"\"\"Ask the writer class to make us a new", "normalizeValue(value, triple) return new def normalize(self): \"\"\" Normalise the geometry", "before comparing # it against the SourceDescriptor locations (always in", "are not integers\" % unicodes) for noteElement in glyphElement.findall('.note'): glyphData['note']", "= [int(u, 16) for u in unicodes.split(\" \")] glyphData['unicodes'] =", "= \"Italic\" a2.tag = \"ITAL\" a2.labelNames['fr'] = \"Italique\" a2.map =", "without deep-copying attached UFO fonts or TTFont objects. The :attr:`font`", "be hidden in user interfaces. \"\"\" self.map = map or", "label `{self.locationLabel}` in instance `{self.name}`.' ) return label def getFullDesignLocation(self,", "for m in data.get('masters'): masterElement = ET.Element(\"master\") if m.get('glyphName') is", "rangeAxisSubsets.append(RangeAxisSubsetDescriptor(name=axis.name)) valueCombinations = itertools.product(*[axis.values for axis in discreteAxes]) for values", "glyphData['instanceLocation'] = designLocation glyphSources = None for masterElement in glyphElement.findall('.masters/master'):", "xvalue=\"\").') masterGlyphName = masterElement.attrib.get('glyphname') if masterGlyphName is None: # if", "forward slashes if value is not None: value = posix(value)", "tuple): value = value[0] return next((k for k, v in", "= self.intOrFloat(value[0]) dimElement.attrib['yvalue'] = self.intOrFloat(value[1]) else: dimElement.attrib['xvalue'] = self.intOrFloat(value) locElement.append(dimElement)", "to freeze the given axis.\"\"\" class BaseDocWriter(object): _whiteSpace = \"", "rule def addVariableFont(self, variableFontDescriptor: VariableFontDescriptor): \"\"\"Add the given ``variableFontDescriptor`` to", "\"\"\" self.mutedGlyphNames = mutedGlyphNames or [] \"\"\"list. Glyphnames that need", "not None and self.documentObject.path is not None: instancePath = os.path.join(os.path.dirname(self.documentObject.path),", "(\"%f\" % num).rstrip('0').rstrip('.') def _addRule(self, ruleObject): # if none of", "a1.maximum = 1000 a1.default = 400 a1.name = \"weight\" a1.tag", "None -- action: write as is, descriptors will not have", "getStyleMapFamilyName(self, languageCode=\"en\"): return self.localisedStyleMapFamilyName.get(languageCode) def clearLocation(self, axisName: Optional[str] = None):", "{'name', 'userminimum', 'userdefault', 'usermaximum'} unknown_attrs = set(element.attrib) - xml_attrs if", "flag indicates whether the substitution rules should be applied before", "Axis values for this instance. MutatorMath + Varlib. .. deprecated::", "cd['minimum'] = None cdMax = conditionElement.attrib.get(\"maximum\") if cdMax is not", "is not None: instancePath = os.path.join(os.path.dirname(self.documentObject.path), filename) else: instancePath =", "v in value ] d[attr] = value return d class", "if code == \"en\": continue localisedStyleMapFamilyNameElement = ET.Element('stylemapfamilyname') localisedStyleMapFamilyNameElement.attrib[XML_LANG] =", "if data.get('masters') is not None: mastersElement = ET.Element(\"masters\") for m", "descriptor.path is not None: # case 3 and 4: filename", "def write(self, path): \"\"\"Write this designspace to ``path``.\"\"\" if hasattr(path,", "3 ✅ ❌ ❌ ✅ =========== ========= =========== =========== ===============", "if sourceObject.muteInfo: infoElement.attrib['mute'] = \"1\" sourceElement.append(infoElement) if sourceObject.muteKerning: kerningElement =", "\"\"\"string. Style name of this source. Though this data can", "if name is not None: instanceObject.name = name familyname =", "source.path, and an optional list of keyword arguments, and returns", "subElement.attrib['with'] ruleObject.subs.append((a, b)) rules.append(ruleObject) self.documentObject.rules = rules def _readConditionElements(self, parentElement,", "name in glyphNames: swap = False for a, b in", "is not None: locElement.attrib['name'] = name validatedLocation = self.documentObject.newDefaultLocation() for", "else: axis = self.writerClass.axisDescriptorClass(**kwargs) self.addAxis(axis) return axis def addRule(self, ruleDescriptor:", "= hidden \"\"\"bool. Whether this axis should be hidden in", "a tuple of (designLocation, userLocation) \"\"\" if self._strictAxisNames and not", "4 label. No anisotropy. - ``designLocation[axisName]``: the explicit design location", "AxisLabelDescriptor(name=\"Regular\", userValue=400, elidable=True) ] doc.addAxis(a1) \"\"\" _attrs = ['tag', 'name',", "version 5.0 if instanceObject.glyphs: if instanceElement.findall('.glyphs') == []: glyphsElement =", "labelElement.findall(\".label\"): axisObject.axisLabels.append(self.readAxisLabel(label)) self.documentObject.axes.append(axisObject) self.axisDefaults[axisObject.name] = axisObject.default def readAxisLabel(self, element: ET.Element):", "value this axis will get in user space. MutatorMath +", "basename = \"VF\" axisNames = \"\".join([f\"-{axis.tag}{value}\" for axis, value in", "font in zip(res.sources, fonts): res.font = font return res finally:", "for labelElement in self.root.findall(\".labels/label\"): unknown_attrs = set(labelElement.attrib) - xml_attrs if", "not None: glyphData['masters'] = glyphSources instanceObject.glyphs[glyphName] = glyphData def readLib(self):", "must only have design locations (using xvalue=\"\").') if designLocation is", "len(name)) else: tag = name[:4] return tag, dict(en=name) class AbstractAxisDescriptor(SimpleDescriptor):", "= filename \"\"\"string. A relative path to the source file,", "<sub name=\"cent\" with=\"cent.alt\"/> <sub name=\"dollar\" with=\"dollar.alt\"/> </rule> </rules> \"\"\" _attrs", "stylename = instanceElement.attrib.get('stylename') if stylename is not None: instanceObject.styleName =", "userLocation=userLocation, elidable=elidable, olderSibling=olderSibling, labelNames=labelNames, ) self.documentObject.locationLabels.append(locationLabel) def readVariableFonts(self): if self.documentObject.formatTuple", "sourceElement.attrib['layer'] = sourceObject.layerName if sourceObject.localisedFamilyName: languageCodes = list(sourceObject.localisedFamilyName.keys()) languageCodes.sort() for", "self.root.find(\".axes\") if axesElement is not None and 'elidedfallbackname' in axesElement.attrib:", "in design space coordinates.\"\"\" # Without OrderedDict, output XML would", "value): \"\"\"Maps value from axis mapping's input to output. Returns", "localisations? .. code:: python a1 = AxisDescriptor() a1.minimum = 1", "= maximum \"\"\"number. The maximum value for this axis in", "self.glyphs = glyphs or {} \"\"\"dict for special master definitions", "= plistlib.fromtree(libElement[0]) def readInfoElement(self, infoElement, instanceObject): \"\"\" Read the info", "== '1': sourceObject.muteInfo = True for featuresElement in sourceElement.findall(\".features\"): if", "userDefault is not None and userMaximum is not None: return", "in self.axes: # scale the map first newMap = []", "styleMapStyleNameElement in instanceElement.findall('stylemapstylename'): for key, lang in styleMapStyleNameElement.items(): if key", "variable fonts as there are locations on discrete axes. ..", "the instance should have the same location as the LocationLabel.", "self.rulesProcessingLast: bool = False \"\"\"This flag indicates whether the substitution", "only. \"\"\" self.muteInfo = muteInfo \"\"\"bool. Indicated if the interpolating", "{} \"\"\"dict. A dictionary of localised stylename strings, keyed by", "getter(self): # Normal getter return getattr(self, private_name) def setter(self, value):", "or 'last'\" % processingValue) self.documentObject.rulesProcessingLast = processingValue == \"last\" for", "other axes at a given location. .. versionadded:: 5.0 \"\"\"", "not compatible. The axis still allows to bind together the", "= element.get(\"linkeduservalue\") linkedValue = float(linkedValueStr) if linkedValueStr is not None", "u in data.get('unicodes')]) if data.get('instanceLocation') is not None: locationElement, data['instanceLocation']", "filename, and the path. So we know where the file", "\".\".join(str(i) for i in self.effectiveFormatTuple) if self.documentObject.axes or self.documentObject.elidedFallbackName is", "label_name.text or \"\" for label_name in labelElement.findall(\"labelname\") for attr, lang", "= instanceObject.postScriptFontName if instanceObject.styleMapFamilyName is not None: instanceElement.attrib['stylemapfamilyname'] = instanceObject.styleMapFamilyName", "'designLocation', 'userLocation', 'familyName', 'styleName', 'postScriptFontName', 'styleMapFamilyName', 'styleMapStyleName', 'localisedFamilyName', 'localisedStyleName', 'localisedStyleMapFamilyName',", "100} instance.userLocation = {'Opsz': 16} In order to update a", "in styleMapStyleNameElement.items(): if key == XML_LANG: styleMapStyleName = styleMapStyleNameElement.text instanceObject.setStyleMapStyleName(styleMapStyleName,", "__init__( self, *, filename=None, path=None, font=None, name=None, location=None, locationLabel=None, designLocation=None,", "=========== =========== =============== STAT Format userValue userMinimum userMaximum linkedUserValue ===========", "InstanceDescriptor def __init__(self, documentPath, documentObject): self.path = documentPath self.documentObject =", "= XML_NS + \"lang\" def posix(path): \"\"\"Normalize paths using forward", "to :attr:`variableFonts`. .. versionadded:: 5.0 \"\"\" variableFont = self.writerClass.variableFontDescriptorClass(**kwargs) self.addVariableFont(variableFont)", "return getattr(self, private_name) def setter(self, value): # The setter rewrites", "'axisLabels'] def __init__( self, *, tag=None, name=None, labelNames=None, minimum=None, default=None,", "and the string in the filename attr. The file may", "the same maximum value as the full axis. (default =", "for outline data. Default ``None`` which means ``foreground``. \"\"\" self.familyName", "16) for u in unicodes.split(\" \")] glyphData['unicodes'] = unicodes except", "a Python script and still only exists in memory, the", "= set(labelElement.attrib) - xml_attrs if unknown_attrs: raise DesignSpaceDocumentError(f\"Label element contains", "2 and 3). All values are user values. See: `OTSpec", "in the document, or a sufficiently recent version to be", "a dict with ``name``, ``minimum`` and ``maximum`` keys. \"\"\" #", "rules for example). MutatorMath. .. deprecated:: 5.0 Use rules or", "can be efficient to have it right here. Varlib. \"\"\"", "along this axis is the same as the matching STAT", "unknown_attrs = set(element.attrib) - xml_attrs if unknown_attrs: raise DesignSpaceDocumentError(f\"label element", "and populates the fields of ``self`` with the data. \"\"\"", "or {} \"\"\"A dictionary of localised style map familyname strings,", "returns a new font object loaded from the path. **kwargs:", "None: instanceElement.attrib['stylemapstylename'] = instanceObject.styleMapStyleName if self.effectiveFormatTuple < (5, 0): #", "for k, v in self.map}) def map_backward(self, v): \"\"\"Maps value", "self.filename = os.path.basename(path) reader = self.readerClass(path, self) reader.read() if self.sources:", "If glyphs need special masters (to record the results of", "for this axis self.tag = tag \"\"\"string. Four letter tag", "English name from :attr:`labelNames` or the :attr:`name`.\"\"\" return self.labelNames.get(\"en\") or", "if instanceObject.kerning: kerningElement = ET.Element('kerning') instanceElement.append(kerningElement) if instanceObject.info: infoElement =", "= list(instanceObject.localisedFamilyName.keys()) languageCodes.sort() for code in languageCodes: if code ==", ".. code:: python a2 = DiscreteAxisDescriptor() a2.values = [0, 1]", "names, in the same order as defined in the document.\"\"\"", "= tostr(styleMapStyleName) def getStyleMapStyleName(self, languageCode=\"en\"): return self.localisedStyleMapStyleName.get(languageCode) def setStyleMapFamilyName(self, styleMapFamilyName,", "designLocation=instanceObject.designLocation, userLocation=instanceObject.userLocation ) else: # Pre-version 5.0 code was validating", "= None cdMax = conditionElement.attrib.get(\"maximum\") if cdMax is not None:", "\"\"\"Ensure SourceDescriptor.font attributes are loaded, and return list of fonts.", "newOutputValue)) if newMap: axis.map = newMap # finally the axis", "the location dicts. MutatorMath + Varlib. \"\"\" # names for", "= axisElement.attrib.get(\"tag\") for mapElement in axisElement.findall('map'): a = float(mapElement.attrib['input']) b", "temporary source name sourceName = \"temp_master.%d\" % (sourceCount) sourceObject =", "associated with this instance.\"\"\" @property def location(self): \"\"\"dict. Axis values", "Whether this axis should be hidden in user interfaces. \"\"\"", "self.documentObject.instances.append(instanceObject) def readLibElement(self, libElement, instanceObject): \"\"\"Read the lib element for", "Analogue of OpenType's STAT data for a single axis (formats", "\"\"\"STAT flag ``OLDER_SIBLING_FONT_ATTRIBUTE``. See: `OTSpec STAT Flags <https://docs.microsoft.com/en-us/typography/opentype/spec/stat#flags>`_ \"\"\" self.linkedUserValue:", "\"instance\" _defaultLanguageCode = \"en\" _attrs = ['filename', 'path', 'name', 'locationLabel',", "fromstring(cls, string, documentObject): f = BytesIO(tobytes(string, encoding=\"utf-8\")) self = cls(f,", "sourceName = sourceElement.attrib.get('name') if sourceName is None: # add a", "discreteAxes.append(axis) else: rangeAxisSubsets.append(RangeAxisSubsetDescriptor(name=axis.name)) valueCombinations = itertools.product(*[axis.values for axis in discreteAxes])", "= \"weight\" a1.tag = \"wght\" a1.labelNames['fa-IR'] = \"قطر\" a1.labelNames['en'] =", "is not None: vfElement.attrib['filename'] = vf.filename if vf.axisSubsets: subsetsElement =", "if isinstance(subset, RangeAxisSubsetDescriptor): if subset.userMinimum != -math.inf: subsetElement.attrib['userminimum'] = self.intOrFloat(subset.userMinimum)", "clear that axis, then edit the values: .. code:: python", "= SourceDescriptor variableFontDescriptorClass = VariableFontDescriptor valueAxisSubsetDescriptorClass = ValueAxisSubsetDescriptor rangeAxisSubsetDescriptorClass =", "= \"en\" _attrs = ['filename', 'path', 'name', 'locationLabel', 'designLocation', 'userLocation',", "the source.path, and an optional list of keyword arguments, and", "self.localisedFamilyName = localisedFamilyName or {} \"\"\"dict. A dictionary of localised", "and freeze other axes at a given location. .. versionadded::", "'1': sourceObject.copyGroups = True for infoElement in sourceElement.findall(\".info\"): if infoElement.attrib.get('copy')", "python a1 = AxisDescriptor() a1.minimum = 1 a1.maximum = 1000", "filling in the location # dict while writing it out,", "in glyphElement.findall('.masters/master'): fontSourceName = masterElement.attrib.get('source') designLocation, userLocation = self.locationFromElement(masterElement) if", "of this document's variable fonts. .. versionadded:: 5.0\"\"\" self.instances: List[InstanceDescriptor]", "this instance. MutatorMath + Varlib. \"\"\" self.localisedFamilyName = localisedFamilyName or", "instance.userLocation for instance in self.documentObject.instances ) ): if minVersion <", "[] for inputValue, outputValue in axis.map: newOutputValue = self.normalizeLocation({axis.name: outputValue}).get(axis.name)", "# add localisations if instanceObject.localisedStyleName: languageCodes = list(instanceObject.localisedStyleName.keys()) languageCodes.sort() for", "they're different, we just choose for path to be correct", "{v: k for k, v in self.map}) class DiscreteAxisDescriptor(AbstractAxisDescriptor): \"\"\"Container", "3. See: `OTSpec STAT Axis Value Tables <https://docs.microsoft.com/en-us/typography/opentype/spec/stat#axis-value-tables>`_ .. versionadded::", "\"true\" else False labelNames = { lang: label_name.text or \"\"", "ValueAxisSubsetDescriptor]] = [] for axis in self.axes: if isinstance(axis, DiscreteAxisDescriptor):", ") else: # Pre-version 5.0 code was validating and filling", "def getFamilyName(self, languageCode=\"en\"): \"\"\"Getter for :attr:`localisedFamilyName` .. versionadded:: 5.0 \"\"\"", "instances. MutatorMath only. \"\"\" @property def location(self): \"\"\"dict. Axis values", "disk and make ```filename`` point to that. \"\"\" self.name =", "attribute.\") filename = variableFontElement.get(\"filename\") axisSubsetsElement = variableFontElement.find(\".axis-subsets\") if axisSubsetsElement is", "evaluateRule(rule, location): \"\"\"Return True if any of the rule's conditionsets", "locElement = ET.Element(\"location\") if name is not None: locElement.attrib['name'] =", "location): for name in glyphNames: swap = False for a,", "or \"<Unknown>\") ) source.font = opener(source.path, **kwargs) loaded[source.path] = source.font", "conditions: value = location[cd['name']] if cd.get('minimum') is None: if value", "mastersElement.append(masterElement) glyphElement.append(mastersElement) return glyphElement class BaseDocReader(LogMixin): axisDescriptorClass = AxisDescriptor discreteAxisDescriptorClass", "if self.path is not None: descriptor.filename = self._posixRelativePath(descriptor.path) def newAxisDescriptor(self):", "= \"label\" _attrs = ('name', 'elidable', 'olderSibling', 'userLocation', 'labelNames') def", "\"values\" in axisElement.attrib: axisObject = self.discreteAxisDescriptorClass() axisObject.values = [float(s) for", "= masterElement.attrib.get('source') designLocation, userLocation = self.locationFromElement(masterElement) if userLocation: raise DesignSpaceDocumentError(f'<master>", "element must have a name attribute.\") filename = variableFontElement.get(\"filename\") axisSubsetsElement", "= float(mapElement.attrib['output']) axisObject.map.append((a, b)) for labelNameElement in axisElement.findall('labelname'): # Note:", "if yValue is not None: if xValue is None: raise", "{} self.userLocation = {} else: if self.designLocation is None: self.designLocation", "``<location>`` element. .. versionchanged:: 5.0 Return a tuple of (designLocation,", "not None: instanceObject.familyName = familyname stylename = instanceElement.attrib.get('stylename') if stylename", "space to design space before comparing # it against the", "localisedStyleMapFamilyNameElement.text = instanceObject.getStyleMapFamilyName(code) instanceElement.append(localisedStyleMapFamilyNameElement) if self.effectiveFormatTuple >= (5, 0): if", "<https://docs.microsoft.com/en-us/typography/opentype/spec/stat#flags>`_ \"\"\" self.labelNames: Dict[str, str] = labelNames or {} \"\"\"User-facing", "= name[:4] return tag, dict(en=name) class AbstractAxisDescriptor(SimpleDescriptor): flavor = \"axis\"", "version to be able to encode what the document contains.", "axis. No anisotropy. - ``axis.default``: default axis value. No anisotropy.", "objects. The :attr:`font` attribute is shared by reference between the", "writer.write(encoding=encoding, xml_declaration=xml_declaration) return f.getvalue() def read(self, path): \"\"\"Read a designspace", "nested ``<location>`` element inside the given ``element``. .. versionchanged:: 5.0", "xValue) try: yValue = dimensionElement.attrib.get('yvalue') if yValue is not None:", "value return d class SimpleDescriptor(AsDictMixin): \"\"\" Containers for a bunch", "doc.axes doc.locationLabels doc.rules doc.rulesProcessingLast doc.sources doc.variableFonts doc.instances doc.lib \"\"\" def", "full location. See: :meth:`getFullDesignLocation` :meth:`getFullUserLocation` .. versionadded:: 5.0 \"\"\" self.userLocation:", "by a Python script and still only exists in memory,", "Example: an Italic axis with 2 stops, Roman and Italic,", "define 1 variable font per value on the discrete axes.", "inappropriate here, and instead of # assert, it should simply", "to output (design).\"\"\" from fontTools.varLib.models import piecewiseLinearMap if not self.map:", "doc.instances doc.lib \"\"\" def __init__(self, readerClass=None, writerClass=None): self.path = None", "newMap: axis.map = newMap # finally the axis values minimum", "] new[axis.name] = normalizeValue(value, triple) return new def normalize(self): \"\"\"", "None: instancePath = os.path.join(os.path.dirname(self.documentObject.path), filename) else: instancePath = None instanceObject", "-> AnisotropicLocationDict: \"\"\"Get the complete design location of this instance,", "sourceObject.muteKerning = True self.documentObject.sources.append(sourceObject) def locationFromElement(self, element): \"\"\"Read a nested", "the output won't be anisotropic, only the xvalue is set.", "os.path.basename(path) self.updatePaths() writer = self.writerClass(path, self) writer.write() def _posixRelativePath(self, otherPath):", "self.path is not None: descriptor.filename = self._posixRelativePath(descriptor.path) def newAxisDescriptor(self): \"\"\"Ask", "Optional[AnisotropicLocationDict]): self.designLocation = location or {} def setStyleName(self, styleName, languageCode=\"en\"):", "sourceElement.append(featuresElement) if sourceObject.copyInfo or sourceObject.muteInfo: infoElement = ET.Element('info') if sourceObject.copyInfo:", "def read(self, path): \"\"\"Read a designspace file from ``path`` and", "given ``variableFontDescriptor`` to :attr:`variableFonts`. .. versionadded:: 5.0 \"\"\" self.variableFonts.append(variableFontDescriptor) def", "instanceObject.locationLabel is None: self._addLocationElement( instanceElement, designLocation=instanceObject.designLocation, userLocation=instanceObject.userLocation ) else: #", "\"1\" if data.get('unicodes') is not None: glyphElement.attrib['unicode'] = \" \".join([hex(u)", "instanceElement.findall('stylemapfamilyname'): for key, lang in styleMapFamilyNameElement.items(): if key == XML_LANG:", "the :class:`LocationLabelDescriptor` instance that matches this instances's :attr:`locationLabel`. Raises if", "s1.localisedFamilyName = dict(fr=\"Caractère\") s1.mutedGlyphNames.append(\"A\") s1.mutedGlyphNames.append(\"Z\") doc.addSource(s1) \"\"\" flavor = \"source\"", "reason to interfere. case 2. descriptor.filename == \"../something\" descriptor.path ==", "entry, if you intend that value to be mapped. \"\"\"", "if sourceObject.mutedGlyphNames: for name in sourceObject.mutedGlyphNames: glyphElement = ET.Element(\"glyph\") glyphElement.attrib[\"name\"]", "table, however it can't become a variation axis in a", "as # '{http://www.w3.org/XML/1998/namespace}lang' } return self.axisLabelDescriptorClass( name=name, userValue=value, userMinimum=minimum, userMaximum=maximum,", "dimensionElement.attrib.get(\"name\") if self._strictAxisNames and dimName not in self.axisDefaults: # In", "\"axis-subset\" _attrs = ('name', 'userValue') def __init__(self, *, name, userValue):", "are filled-in, see :meth:`getFormat` .. versionadded:: 5.0 \"\"\" flavor =", "localisedFamilyNameElement.attrib[XML_LANG] = code localisedFamilyNameElement.text = instanceObject.getFamilyName(code) instanceElement.append(localisedFamilyNameElement) if instanceObject.localisedStyleMapStyleName: languageCodes", "5.0 \"\"\" label = self.getLocationLabelDescriptor(doc) if label is not None:", "exists. .. versionadded:: 5.0 \"\"\" for label in self.locationLabels: if", "_attrs = ['filename', 'path', 'name', 'layerName', 'location', 'copyLib', 'copyGroups', 'copyFeatures',", "not None: locElement.attrib['name'] = name validatedLocation = self.documentObject.newDefaultLocation() for axisName,", "flag ``ELIDABLE_AXIS_VALUE_NAME``. See: `OTSpec STAT Flags <https://docs.microsoft.com/en-us/typography/opentype/spec/stat#flags>`_ \"\"\" self.olderSibling: bool", "- scale all the locations of all masters and instances", "recalcBBoxes=False) Args: opener (Callable): takes one required positional argument, the", "locElement.append(dimElement) elif userLocation is not None and axis.name in userLocation:", "must have a uservalue attribute.\" ) userValue = float(userValueStr) return", "original file name, i.e. the last part of its path.", "= os.path.basename(path) self.updatePaths() writer = self.writerClass(path, self) writer.write() def _posixRelativePath(self,", "is not None: labelElement.attrib['usermaximum'] = self.intOrFloat(label.userMaximum) labelElement.attrib['name'] = label.name if", "__all__ = [ 'DesignSpaceDocumentError', 'DesignSpaceDocument', 'SourceDescriptor', 'InstanceDescriptor', 'AxisDescriptor', 'RuleDescriptor', 'BaseDocReader',", "lib=lib, ) self.documentObject.variableFonts.append(variableFont) def readAxisSubset(self, element: ET.Element): if \"uservalue\" in", "Optional[str] = None): \"\"\"Clear all location-related fields. Ensures that :attr:``designLocation``", "name as # '{http://www.w3.org/XML/1998/namespace}lang' } locationLabel = self.locationLabelDescriptorClass( name=name, userLocation=userLocation,", "sourceObject.name.find(\"temp_master\") != 0: # do not save temporary source names", "addSource(self, sourceDescriptor: SourceDescriptor): \"\"\"Add the given ``sourceDescriptor`` to ``doc.sources``.\"\"\" self.sources.append(sourceDescriptor)", "getAxisDecriptor(cls): return cls.axisDescriptorClass() @classmethod def getSourceDescriptor(cls): return cls.sourceDescriptorClass() @classmethod def", "defaultDesignLocation = self.newDefaultLocation() for sourceDescriptor in self.sources: if sourceDescriptor.getFullDesignLocation(self) ==", "in element.attrib: xml_attrs = {'name', 'uservalue'} unknown_attrs = set(element.attrib) -", "a dict with normalized axis values.\"\"\" from fontTools.varLib.models import normalizeValue", "self.path = documentPath self.documentObject = documentObject tree = ET.parse(self.path) self.root", ") def _getEffectiveFormatTuple(self): \"\"\"Try to use the version specified in", "self.documentObject.formatVersion = self.root.attrib.get(\"format\", \"3.0\") self._axes = [] self.rules = []", "a1.default = 400 a1.name = \"weight\" a1.tag = \"wght\" a1.labelNames['fa-IR']", "glyphName=masterGlyphName) if glyphSources is None: glyphSources = [] glyphSources.append(d) if", "# list of substitutions stored as tuples of glyphnames (\"a\",", "name: return label return None def map_forward(self, userLocation: SimpleLocationDict) ->", "axes. This function updates the document's :attr:`default` value. .. versionchanged::", "float]]] SimpleLocationDict = Dict[str, float] class InstanceDescriptor(SimpleDescriptor): \"\"\"Simple container for", "if instanceObject.familyName is not None: instanceElement.attrib['familyname'] = instanceObject.familyName if instanceObject.styleName", "to :attr:`axes`. The axis will be and instance of :class:`DiscreteAxisDescriptor`", "= path \"\"\"string. Absolute path to the instance file, calculated", "5.0 \"\"\" return { axis.name: axis.map_forward(userLocation.get(axis.name, axis.default)) for axis in", "of these: .. code-block:: xml <glyph name=\"b\" unicode=\"0x62\"/> <glyph name=\"b\"/>", "self.documentObject.axes ) or self.documentObject.locationLabels or any( source.localisedFamilyName for source in", "space before comparing # it against the SourceDescriptor locations (always", "axisObject.map: mapElement = ET.Element('map') mapElement.attrib['input'] = self.intOrFloat(inputValue) mapElement.attrib['output'] = self.intOrFloat(outputValue)", "its :attr:`designLocation` and the document's axis defaults. .. versionadded:: 5.0", "('wght', dict(en = 'Weight')), 'width': ('wdth', dict(en = 'Width')), 'optical':", "value = value.asdict() elif isinstance(value, list): value = [ v.asdict()", "as a tuple of (major, minor). .. versionadded:: 5.0 \"\"\"", "========= =========== =========== =============== \"\"\" if self.linkedUserValue is not None:", "axis = self.writerClass.discreteAxisDescriptorClass(**kwargs) else: axis = self.writerClass.axisDescriptorClass(**kwargs) self.addAxis(axis) return axis", "conditionset. \" \"Wrapped them in a new conditionset.\" ) #", "= {'Opsz': 16} In order to update a single axis", "unicodes = glyphElement.attrib.get('unicode') if unicodes is not None: try: unicodes", "``designspaceLib`` to either load the resource identified by ``filename`` and", "data.\"\"\" # list of lists of dict(name='aaaa', minimum=0, maximum=1000) self.conditionSets", "List[LocationLabelDescriptor] = [] \"\"\"List of this document's STAT format 4", "setter) class DesignSpaceDocumentError(Exception): def __init__(self, msg, obj=None): self.msg = msg", "instanceObject): \"\"\"Read the lib element for the given instance.\"\"\" instanceObject.lib", "geometry of this designspace: - scale all the locations of", "name=\"width\"/> <condition minimum=\"10\" maximum=\"40\" name=\"optical\"/> </conditionset> <sub name=\"cent\" with=\"cent.alt\"/> <sub", "valueAxisSubsetDescriptorClass = ValueAxisSubsetDescriptor rangeAxisSubsetDescriptorClass = RangeAxisSubsetDescriptor instanceDescriptorClass = InstanceDescriptor def", "for ruleElement in self.root.findall(\".rules/rule\"): ruleObject = self.ruleDescriptorClass() ruleName = ruleObject.name", "in rules: if evaluateRule(rule, location): for name in glyphNames: swap", "to the instance file, **as it is in the document**.", "None: self.designLocation = {} self.userLocation = {} else: if self.designLocation", "userMinimum = element.get(\"userminimum\") userDefault = element.get(\"userdefault\") userMaximum = element.get(\"usermaximum\") if", "= name \"\"\"string. Unique name for this rule. Can be", "is not None: # font already loaded fonts.append(source.font) continue if", "0): return xml_attrs = {'name', 'filename'} for variableFontElement in self.root.findall(\".variable-fonts/variable-font\"):", "None and userDefault is not None and userMaximum is not", "attr.startswith(\"_\"): continue if hasattr(value, \"asdict\"): value = value.asdict() elif isinstance(value,", "space. MutatorMath + Varlib. \"\"\" def serialize(self): # output to", "if not data: return libElement = ET.Element('lib') libElement.append(plistlib.totree(data, indent_level=indent_level)) parentElement.append(libElement)", "integrate this object in different contexts. The **DesignSpaceDocument** object can", ":attr:`locationLabel`. Raises if the named label can't be found. ..", "``None``) \"\"\" self.userMaximum: float = userMaximum \"\"\"New maximum value of", "list. \"\"\" # we load fonts with the same source.path", "the given ``userLocation``, or ``None`` if no such label exists.", "dict(weight=0) s1.familyName = \"MasterFamilyName\" s1.styleName = \"MasterStyleNameOne\" s1.localisedFamilyName = dict(fr=\"Caractère\")", "= userValue \"\"\"STAT field ``value`` (format 1, 3) or ``nominalValue``", "the document by creating such descriptor objects, filling them with", "instanceObject.styleMapStyleName = styleMapStyleName # read localised names for styleNameElement in", "self.path = path self.filename = os.path.basename(path) self.updatePaths() writer = self.writerClass(path,", "of that variable font that only include some axes and", "for mapElement in axisElement.findall('map'): a = float(mapElement.attrib['input']) b = float(mapElement.attrib['output'])", "xml_attrs = {'name', 'filename'} for variableFontElement in self.root.findall(\".variable-fonts/variable-font\"): unknown_attrs =", "value is not valid: %r, \" \"expected 'first' or 'last'\"", "') return f\"{self.__class__.__name__}(\\n{attrs}\\n)\" class SourceDescriptor(SimpleDescriptor): \"\"\"Simple container for data related", "STAT Flags <https://docs.microsoft.com/en-us/typography/opentype/spec/stat#flags>`_ \"\"\" self.linkedUserValue: Optional[float] = linkedUserValue \"\"\"STAT field", "versionadded:: 5.0 \"\"\" flavor = \"axis-subset\" _attrs = ('name', 'userValue')", "'name', 'layerName', 'location', 'copyLib', 'copyGroups', 'copyFeatures', 'muteKerning', 'muteInfo', 'mutedGlyphNames', 'familyName',", "of that axis (same as a :class:`ValueAxisSubsetDescriptor`). \"\"\" self.lib: MutableMapping[str,", "calculated from the document path and the string in the", "return self.localisedStyleMapFamilyName.get(languageCode) def clearLocation(self, axisName: Optional[str] = None): \"\"\"Clear all", "= self.intOrFloat(dimensionValue) locElement.append(dimElement) return locElement, validatedLocation def intOrFloat(self, num): if", "axisElement in axisElements: if self.documentObject.formatTuple >= (5, 0) and \"values\"", "for axes that do not interpolate. The main difference from", "value in zip(discreteAxes, values) ] )) return variableFonts def deepcopyExceptFonts(self):", "variableFont = self.writerClass.variableFontDescriptorClass(**kwargs) self.addVariableFont(variableFont) return variableFont def addLocationLabel(self, locationLabelDescriptor: LocationLabelDescriptor):", "== \"en\": continue localisedStyleMapFamilyNameElement = ET.Element('stylemapfamilyname') localisedStyleMapFamilyNameElement.attrib[XML_LANG] = code localisedStyleMapFamilyNameElement.text", "substitutions stored as tuples of glyphnames (\"a\", \"a.alt\") self.subs =", "ruleName, ) if externalConditions: ruleObject.conditionSets.append(externalConditions) self.log.info( \"Found stray rule conditions", "ref:`rules-element` § Attributes. \"\"\" def evaluateRule(rule, location): \"\"\"Return True if", "axisLabels=None, ): # opentype tag for this axis self.tag =", "the axes for axis in self.axes: # scale the map", "ruleName if ruleName is not None else \"\")) cds.append(cd) return", "else: tag = name[:4] return tag, dict(en=name) class AbstractAxisDescriptor(SimpleDescriptor): flavor", "% num return (\"%f\" % num).rstrip('0').rstrip('.') def _addRule(self, ruleObject): #", "super().__init__( tag=tag, name=name, labelNames=labelNames, hidden=hidden, map=map, axisOrdering=axisOrdering, axisLabels=axisLabels, ) self.minimum", "if sourceObject.name.find(\"temp_master\") != 0: # do not save temporary source", "default location for that axis. When the input has anisotropic", "axisElement.attrib.get(\"tag\") for mapElement in axisElement.findall('map'): a = float(mapElement.attrib['input']) b =", "5.0 \"\"\" for label in self.locationLabels: if label.name == name:", "instanceObject.localisedFamilyName: languageCodes = list(instanceObject.localisedFamilyName.keys()) languageCodes.sort() for code in languageCodes: if", "as is, descriptors will not have a filename attr. useless,", "code. \"\"\" self.localisedStyleMapFamilyName = localisedStyleMapFamilyName or {} \"\"\"A dictionary of", "Size')), 'slant': ('slnt', dict(en = 'Slant')), 'italic': ('ital', dict(en =", "self.documentObject.instances ) ): if minVersion < (5, 0): minVersion =", "not fill-in this attribute, and the default writer will not", "as is. The filename attr should not be touched. case", "languageCodes = list(instanceObject.localisedStyleMapFamilyName.keys()) languageCodes.sort() for code in languageCodes: if code", "some of the axis values, and they are assumed to", "= [] for conditionElement in parentElement.findall('.condition'): cd = {} cdMin", "for sourceCount, sourceElement in enumerate(self.root.findall(\".sources/source\")): filename = sourceElement.attrib.get('filename') if filename", "deprecated:: 5.0 Use rules or sparse sources instead. \"\"\" self.kerning", "{} \"\"\"User-facing translations of this location's label. Keyed by xml:lang", "(user) to output (design).\"\"\" from fontTools.varLib.models import piecewiseLinearMap if not", "things if cd.get('minimum') is None and cd.get('maximum') is None: raise", "class AbstractAxisDescriptor(SimpleDescriptor): flavor = \"axis\" def __init__( self, *, tag=None,", "if no such axis exists.\"\"\" for axisDescriptor in self.axes: if", "cd['name'] = conditionElement.attrib.get(\"name\") # # test for things if cd.get('minimum')", "default \"\"\"number. The default value for this axis, i.e. when", "= LocationLabelDescriptor ruleDescriptorClass = RuleDescriptor sourceDescriptorClass = SourceDescriptor variableFontDescriptorClass =", "have minimum or maximum values, do not add the rule.", "seems continue value = location[axis.name] # 'anisotropic' location, take first", "Union[AxisDescriptor, DiscreteAxisDescriptor]): \"\"\"Add the given ``axisDescriptor`` to :attr:`axes`.\"\"\" self.axes.append(axisDescriptor) def", "this axis does not interpolate. - it doesn't provide the", "(400.0, 66.0), (1000.0, 990.0)] a1.axisOrdering = 1 a1.axisLabels = [", "if sourceObject.styleName is not None: sourceElement.attrib['stylename'] = sourceObject.styleName if sourceObject.layerName", "most one of the location=\"...\" attribute or the nested location", "familyname stylename = instanceElement.attrib.get('stylename') if stylename is not None: instanceObject.styleName", "= float(cdMax) else: # will allow these to be None,", "SimpleLocationDict = userLocation or {} \"\"\"dict. Axis values for this", "writerClass else: self.writerClass = BaseDocWriter @classmethod def fromfile(cls, path, readerClass=None,", "forward slash to work also on Windows.\"\"\" new_path = posixpath.join(*path.split(os.path.sep))", "_attrs = ('tag', 'name', 'values', 'default', 'map', 'axisOrdering', 'axisLabels') def", "to ``doc.sources``.\"\"\" self.sources.append(sourceDescriptor) def addSourceDescriptor(self, **kwargs): \"\"\"Instantiate a new :class:`SourceDescriptor`", "read from the disk, this is the full path that", "\"\"\"bool. Indicated if the interpolating font.info data for this source", "in self.designLocation: del self.designLocation[axisName] if self.userLocation is None: self.userLocation =", "axisElement.attrib.get('hidden', False): axisObject.hidden = True axisObject.tag = axisElement.attrib.get(\"tag\") for mapElement", ".. versionadded:: 5.0 \"\"\" return doc.map_backward(self.getFullDesignLocation(doc)) def tagForAxisName(name): # try", "else False labelNames = { lang: label_name.text or \"\" for", "of the conditions have minimum or maximum values, do not", "return fonts @property def formatTuple(self): \"\"\"Return the formatVersion as a", "sourcePath = None sourceName = sourceElement.attrib.get('name') if sourceName is None:", "attribute name as # '{http://www.w3.org/XML/1998/namespace}lang' for key, lang in labelNameElement.items():", "value[0] return next((k for k, v in self.map if v", "or sparse sources instead. \"\"\" self.kerning = kerning \"\"\" bool.", "return any(evaluateConditions(c, location) for c in rule.conditionSets) def evaluateConditions(conditions, location):", "'locationLabel', 'designLocation', 'userLocation', 'familyName', 'styleName', 'postScriptFontName', 'styleMapFamilyName', 'styleMapStyleName', 'localisedFamilyName', 'localisedStyleName',", "\"\"\"Container for axis label data. Analogue of OpenType's STAT data", "between the original and the copy. .. versionadded:: 5.0 \"\"\"", "label) axisElement.append(labelsElement) if isinstance(axisObject, AxisDescriptor): axisElement.attrib['minimum'] = self.intOrFloat(axisObject.minimum) axisElement.attrib['maximum'] =", "variable font. If not specified, assume the same default value", ") self.minimum = minimum \"\"\"number. The minimum value for this", "return None def getLocationLabel(self, name: str) -> Optional[LocationLabelDescriptor]: \"\"\"Return the", "lang) for familyNameElement in instanceElement.findall('familyname'): for key, lang in familyNameElement.items():", "the \"neutral\" version of outlines from which deltas would apply,", "using the given ``kwargs`` and add it to :attr:`instances`. \"\"\"", "change the field(s) for which they have data. .. code::", "**descriptor** objects that store the data in attributes. Data is", "reverse-DNS notation to identify your own data. Respect the data", "as FontTools binary fonts, including extra options: designspace.loadSourceFonts(ttLib.TTFont, recalcBBoxes=False) Args:", "= self.normalizeLocation({cond['name']: cond['maximum']}).get(cond['name']) else: maximum = None newConditions.append(dict(name=cond['name'], minimum=minimum, maximum=maximum))", "of this instance. MutatorMath + Varlib. \"\"\" self.postScriptFontName = postScriptFontName", "axis has a list of ``values``. Example: an Italic axis", "an anisotropic interpolation. </note> </glyph> \"\"\" glyphData = {} glyphName", "the set of all `default` values in user space of", "it to :attr:`axes`. The axis will be and instance of", "the data. \"\"\" if hasattr(path, \"__fspath__\"): # support os.PathLike objects", "== value), value) class AxisLabelDescriptor(SimpleDescriptor): \"\"\"Container for axis label data.", "lang: label_name.text or \"\" for label_name in element.findall(\"labelname\") for attr,", "name of the layer in the source to look for", "2).\"\"\" self.userValue: float = userValue \"\"\"STAT field ``value`` (format 1,", "maximum = float(maximumStr) if maximumStr is not None else None", "and return SourceDescriptor at the default location or None. The", "instanceElement.append(infoElement) self._addLib(instanceElement, instanceObject.lib, 4) self.root.findall('.instances')[0].append(instanceElement) def _addSource(self, sourceObject): sourceElement =", "axisSubsetsElement.iterfind(\".axis-subset\"): axisSubsets.append(self.readAxisSubset(axisSubset)) lib = None libElement = variableFontElement.find(\".lib\") if libElement", "new_path = posixpath.join(*path.split(os.path.sep)) if path.startswith('/'): # The above transformation loses", "coordinates are at the default location for that axis. Note:", "label in self.locationLabels if label.userLocation == userLocation), None ) def", "for conditions in rule.conditionSets: newConditions = [] for cond in", "instance. MutatorMath + Varlib. \"\"\" self.postScriptFontName = postScriptFontName \"\"\"string. Postscript", "styleMapFamilyName = styleMapFamilyNameElement.text instanceObject.setStyleMapFamilyName(styleMapFamilyName, lang) designLocation, userLocation = self.locationFromElement(instanceElement) locationLabel", "if axis.name in self.designLocation: result[axis.name] = self.designLocation[axis.name] elif axis.name in", "= ('name', 'userValue') def __init__(self, *, name, userValue): self.name: str", "\"\"\"Instantiate a new :class:`SourceDescriptor` using the given ``kwargs`` and add", "def addAxisDescriptor(self, **kwargs): \"\"\"Instantiate a new :class:`AxisDescriptor` using the given", "rangeAxisSubsetDescriptorClass = RangeAxisSubsetDescriptor instanceDescriptorClass = InstanceDescriptor def __init__(self, documentPath, documentObject):", "= float(minimumStr) if minimumStr is not None else None maximumStr", "muted (i.e. not be part of the calculations). MutatorMath only.", "userValue=userValue) else: xml_attrs = {'name', 'userminimum', 'userdefault', 'usermaximum'} unknown_attrs =", "in self.root.findall(\".rules/rule\"): ruleObject = self.ruleDescriptorClass() ruleName = ruleObject.name = ruleElement.attrib.get(\"name\")", "axis-subset element for a discrete subset must have a uservalue", "glyph element, which could look like either one of these:", "f.getvalue() def read(self, path): \"\"\"Read a designspace file from ``path``", "lib=None): self.name: str = name \"\"\"string, required. Name of this", "self.readAxes() self.readLabels() self.readRules() self.readVariableFonts() self.readSources() self.readInstances() self.readLib() def readRules(self): #", "sources, variable fonts and instances to very basic **descriptor** objects", "add localisations if instanceObject.localisedStyleName: languageCodes = list(instanceObject.localisedStyleName.keys()) languageCodes.sort() for code", "if clearing everything). In order to update the location of", "[0, 1] a2.name = \"Italic\" a2.tag = \"ITAL\" a2.labelNames['fr'] =", ") self.default: float = default \"\"\"The default value for this", "letter tag for this axis. Some might be registered at", "basename = os.path.splitext(self.filename)[0] + \"-VF\" if self.path is not None:", "userLocation or {} instanceObject.designLocation = designLocation or {} for glyphElement", "location. .. seealso:: This may be only part of the", "axis.maximum = maximum axis.default = default # now the rules", "if cdMin is not None: cd['minimum'] = float(cdMin) else: #", "``name``, or ``None`` if no such axis exists.\"\"\" for axisDescriptor", "stylename for this instance. MutatorMath + Varlib. \"\"\" self.localisedFamilyName =", "posixpath_property(\"_filename\") def __init__(self, *, name, filename=None, axisSubsets=None, lib=None): self.name: str", "None if cond.get('maximum') is not None: maximum = self.normalizeLocation({cond['name']: cond['maximum']}).get(cond['name'])", "the top-level location label with the given ``name``, or ``None``", "getattr(self, attr), \"!=\", getattr(other, attr)) def __repr__(self): attrs = [f\"{a}={repr(getattr(self,", "m in data.get('masters'): masterElement = ET.Element(\"master\") if m.get('glyphName') is not", "= yValue = None try: userValue = dimensionElement.attrib.get('uservalue') if userValue", "why they're different, we just choose for path to be", "❌ ✅ =========== ========= =========== =========== =============== \"\"\" if self.linkedUserValue", "only part of the full location. See: :meth:`getFullUserLocation` \"\"\" self.elidable:", "Axis Value Tables format 1, 2, 3. See: `OTSpec STAT", "axisDescriptor in self.axes: names.append(axisDescriptor.name) return names def getAxis(self, name): \"\"\"Return", "= ET.Element(\"location\") for axis in self.documentObject.axes: if designLocation is not", "versionadded:: 5.0 \"\"\" return { axis.name: self.userLocation.get(axis.name, axis.default) for axis", "path and the string in the filename attr. The file", "taking the first not-None field in this list: - ``locationLabel``:", "axis. Contrary to continuous axes, only the values in this", "not None: conditionElement.attrib['minimum'] = self.intOrFloat(cond.get('minimum')) if cond.get('maximum') is not None:", "doc.rulesProcessingLast doc.sources doc.variableFonts doc.instances doc.lib \"\"\" def __init__(self, readerClass=None, writerClass=None):", "to the -1 - 0 - 1 value. - we", "``path``.\"\"\" if hasattr(path, \"__fspath__\"): # support os.PathLike objects path =", "code was validating and filling in the location # dict", "can be taken by the axis, nothing in-between. \"\"\" def", "for > minimum. \"\"\" for cd in conditions: value =", "groupsElement.attrib['copy'] = \"1\" sourceElement.append(groupsElement) if sourceObject.copyFeatures: featuresElement = ET.Element('features') featuresElement.attrib['copy']", "def map_forward(self, userLocation: SimpleLocationDict) -> SimpleLocationDict: \"\"\"Map a user location", "familyName = familyNameElement.text instanceObject.setFamilyName(familyName, lang) for styleMapStyleNameElement in instanceElement.findall('stylemapstylename'): for", "None linkedValueStr = element.get(\"linkeduservalue\") linkedValue = float(linkedValueStr) if linkedValueStr is", "\"\"\" self.hidden = hidden \"\"\"bool. Whether this axis should be", "assume axis.minimum cd['minimum'] = None cdMax = conditionElement.attrib.get(\"maximum\") if cdMax", "whether the substitution rules should be applied before or after", "<https://docs.microsoft.com/en-us/typography/opentype/spec/stat#flags>`_ \"\"\" self.linkedUserValue: Optional[float] = linkedUserValue \"\"\"STAT field ``linkedValue`` (format", "versionadded:: 5.0 \"\"\" flavor = \"label\" _attrs = ('name', 'elidable',", "def processRules(rules, location, glyphNames): \"\"\"Apply these rules at this location", "variableFonts = [] discreteAxes = [] rangeAxisSubsets: List[Union[RangeAxisSubsetDescriptor, ValueAxisSubsetDescriptor]] =", "``instanceDescriptor`` to :attr:`instances`.\"\"\" self.instances.append(instanceDescriptor) def addInstanceDescriptor(self, **kwargs): \"\"\"Instantiate a new", "conditions. - Each condition is a dict with ``name``, ``minimum``", "= next(numbers) minor = next(numbers, 0) return (major, minor) def", "Some might be registered at the `OpenType specification <https://www.microsoft.com/typography/otspec/fvar.htm#VAT>`__. Privately-defined", "a2.map = [(0, 0), (1, -11)] a2.axisOrdering = 2 a2.axisLabels", "of this source. Though this data can be extracted from", "key == XML_LANG: styleMapFamilyName = styleMapFamilyNameElement.text instanceObject.setStyleMapFamilyName(styleMapFamilyName, lang) designLocation, userLocation", "substitutions. - Each substitution is stored as tuples of glyphnames,", "is not None and self.documentObject.path is not None: instancePath =", "to ``path``.\"\"\" if hasattr(path, \"__fspath__\"): # support os.PathLike objects path", "self.variableFonts: return self.variableFonts variableFonts = [] discreteAxes = [] rangeAxisSubsets:", "= dict(font=fontSourceName, location=designLocation, glyphName=masterGlyphName) if glyphSources is None: glyphSources =", "In case the document contains no axis definitions, self.log.warning(\"Location with", "not None: labelElement.attrib['linkeduservalue'] = self.intOrFloat(label.linkedUserValue) self._addLabelNames(labelElement, label.labelNames) axisElement.append(labelElement) def _addLabelNames(self,", "condition is a dict with ``name``, ``minimum`` and ``maximum`` keys.", "= ``math.inf``) \"\"\" class ValueAxisSubsetDescriptor(SimpleDescriptor): \"\"\"Single value of a discrete", "is not None: if sourceObject.name.find(\"temp_master\") != 0: # do not", "if valueStr is None: raise DesignSpaceDocumentError(\"label element must have a", "+ VarLib. \"\"\" self.path = path \"\"\"The absolute path, calculated", "value as the full axis. (default = ``-math.inf``) \"\"\" self.userDefault:", "False): attributes = {\"processing\": \"last\"} else: attributes = {} self.root.append(ET.Element(\"rules\",", "self.axisSubsets: List[Union[RangeAxisSubsetDescriptor, ValueAxisSubsetDescriptor]] = axisSubsets or [] \"\"\"Axis subsets to", "(format 2).\"\"\" self.name: str = name \"\"\"Label for this axis", "entries for Axis Value Tables format 1, 2, 3. See:", "the :class:`AxisDescriptor` to subset.\"\"\" self.userMinimum: float = userMinimum \"\"\"New minimum", "Tables <https://docs.microsoft.com/en-us/typography/opentype/spec/stat#axis-value-tables>`_ .. versionadded:: 5.0 \"\"\" class AxisDescriptor(AbstractAxisDescriptor): \"\"\" Simple", "the file is relative to the document. Can't guess why", "[] \"\"\"List of this document's STAT format 4 labels. ..", "one required positional argument, the source.path, and an optional list", "= self.normalizeLocation({axis.name: axis.minimum}).get(axis.name) maximum = self.normalizeLocation({axis.name: axis.maximum}).get(axis.name) default = self.normalizeLocation({axis.name:", "code. \"\"\" @property def defaultName(self) -> str: \"\"\"Return the English", "updateFilenameFromPath(self, masters=True, instances=True, force=False): \"\"\"Set a descriptor filename attr from", "dimensionElement in locationElement.findall(\".dimension\"): dimName = dimensionElement.attrib.get(\"name\") if self._strictAxisNames and dimName", "if rulesElement is not None: processingValue = rulesElement.attrib.get(\"processing\", \"first\") if", "self.styleName = styleName \"\"\"string. Style name of this source. Though", "<rules> <rule name=\"vertical.bars\"> <conditionset> <condition minimum=\"250.000000\" maximum=\"750.000000\" name=\"weight\"/> <condition minimum=\"100\"", "vf.axisSubsets: subsetElement = ET.Element('axis-subset') subsetElement.attrib['name'] = subset.name if isinstance(subset, RangeAxisSubsetDescriptor):", "instanceElement.attrib.get(\"name\") if name is not None: instanceObject.name = name familyname", "5.0 \"\"\" flavor = \"label\" _attrs = ('name', 'elidable', 'olderSibling',", "instanceObject.name if instanceObject.locationLabel is not None: instanceElement.attrib['location'] = instanceObject.locationLabel if", "( \": %r\" % self.obj if self.obj is not None", "``math.inf``) \"\"\" class ValueAxisSubsetDescriptor(SimpleDescriptor): \"\"\"Single value of a discrete or", "self.formatVersion: Optional[str] = None \"\"\"Format version for this document, as", "from axis mapping's output to input. Returns value unchanged if", "] doc.addAxis(a2) .. versionadded:: 5.0 \"\"\" flavor = \"axis\" _attrs", "location as the LocationLabel. .. seealso:: :meth:`getFullDesignLocation` :meth:`getFullUserLocation` .. versionadded::", "elementLocation def readLocationElement(self, locationElement): \"\"\"Read a ``<location>`` element. .. versionchanged::", "zip(discreteAxes, values)]) variableFonts.append(VariableFontDescriptor( name=f\"{basename}{axisNames}\", axisSubsets=rangeAxisSubsets + [ ValueAxisSubsetDescriptor(name=axis.name, userValue=value) for", "% (sourceCount) sourceObject = self.sourceDescriptorClass() sourceObject.path = sourcePath # absolute", "= cond.get('name') if cond.get('minimum') is not None: conditionElement.attrib['minimum'] = self.intOrFloat(cond.get('minimum'))", "\" \".join(self.intOrFloat(v) for v in axisObject.values) axisElement.attrib['default'] = self.intOrFloat(axisObject.default) if", "ruleName is not None else \"\")) cds.append(cd) return cds def", "value = posix(value) setattr(self, private_name, value) return property(getter, setter) class", "\" \"expected 'first' or 'last'\" % processingValue) self.documentObject.rulesProcessingLast = processingValue", "of substitutions stored as tuples of glyphnames (\"a\", \"a.alt\") self.subs", "in the sources list. \"\"\" # we load fonts with", "sourceObject.path = sourcePath # absolute path to the ufo source", "for variable fonts, sub-spaces of the Designspace. Use-cases: - From", "if key == XML_LANG: familyName = familyNameElement.text instanceObject.setFamilyName(familyName, lang) for", "sourceElement = ET.Element(\"source\") if sourceObject.filename is not None: sourceElement.attrib['filename'] =", "glyphElement in instanceElement.findall('.glyphs/glyph'): self.readGlyphElement(glyphElement, instanceObject) for infoElement in instanceElement.findall(\"info\"): self.readInfoElement(infoElement,", "VarLib. \"\"\" self.filename: str = filename \"\"\"string, optional. Relative path", "to :attr:`rules`. \"\"\" rule = self.writerClass.ruleDescriptorClass(**kwargs) self.addRule(rule) return rule def", "processingValue) self.documentObject.rulesProcessingLast = processingValue == \"last\" for ruleElement in self.root.findall(\".rules/rule\"):", "Note: for discrete axes, each value must have its mapping", "Indicated if this instance needs the interpolating font.info calculated. ..", "if familyname is not None: instanceObject.familyName = familyname stylename =", "the same path are only loaded once and shared among", "isinstance(value, list): value = [ v.asdict() if hasattr(v, \"asdict\") else", "None: raise DesignSpaceDocumentError(\"axis-subset element must have a name attribute.\") userMinimum", "libElement = ET.Element('lib') libElement.append(plistlib.totree(data, indent_level=indent_level)) parentElement.append(libElement) def _writeGlyphElement(self, instanceElement, instanceObject,", "bool. Indicates if this instance needs its kerning calculated. MutatorMath.", "\"\"\"string. Absolute path to the instance file, calculated from the", "b = subElement.attrib['with'] ruleObject.subs.append((a, b)) rules.append(ruleObject) self.documentObject.rules = rules def", "that need to be muted in the instances. MutatorMath only.", "if evaluateRule(rule, location): for name in glyphNames: swap = False", "\"\"\" instance = self.writerClass.instanceDescriptorClass(**kwargs) self.addInstance(instance) return instance def addAxis(self, axisDescriptor:", "reference between the original and the copy. .. versionadded:: 5.0", "True: after. Default is False. For new projects, you probably", "fonts.append(source.font) return fonts @property def formatTuple(self): \"\"\"Return the formatVersion as", "contains. \"\"\" minVersion = self.documentObject.formatTuple if ( any( isinstance(axis, DiscreteAxisDescriptor)", "doc.addInstance(i2) \"\"\" flavor = \"instance\" _defaultLanguageCode = \"en\" _attrs =", "gets updated and relativized descriptor.filename = self._posixRelativePath(descriptor.path) def addSource(self, sourceDescriptor:", "i2.familyName = \"InstanceFamilyName\" i2.styleName = \"InstanceStyleName\" i2.name = \"instance.ufo2\" #", "is not None: mastersElement = ET.Element(\"masters\") for m in data.get('masters'):", "Header <https://docs.microsoft.com/en-us/typography/opentype/spec/stat#style-attributes-header>`_ .. versionadded:: 5.0 \"\"\" self.axes: List[Union[AxisDescriptor, DiscreteAxisDescriptor]] =", "MutatorMath + Varlib. \"\"\" # names for UI purposes, if", "self.readLocationElement(locationElement) break return elementLocation def readLocationElement(self, locationElement): \"\"\"Read a ``<location>``", "using the given ``kwargs`` and add it to :attr:`variableFonts`. ..", "'conditionSets', 'subs'] # what do we need here def __init__(self,", "'layerName', 'location', 'copyLib', 'copyGroups', 'copyFeatures', 'muteKerning', 'muteInfo', 'mutedGlyphNames', 'familyName', 'styleName',", "coordinates are at the default location for that axis. When", "a :class:`LocationLabelDescriptor`. If provided, the instance should have the same", "axisElement.attrib['maximum'] = self.intOrFloat(axisObject.maximum) elif isinstance(axisObject, DiscreteAxisDescriptor): axisElement.attrib['values'] = \" \".join(self.intOrFloat(v)", "if glyphElement.attrib.get('mute') == '1': sourceObject.mutedGlyphNames.append(glyphName) for kerningElement in sourceElement.findall(\".kerning\"): if", "values=None, default=None, hidden=False, map=None, axisOrdering=None, axisLabels=None, ): super().__init__( tag=tag, name=name,", "maximum values, do not add the rule. ruleElement = ET.Element('rule')", "= ET.Element(\"location\") if name is not None: locElement.attrib['name'] = name", "in rule\" + (\" '%s'\" % ruleName if ruleName is", "return source def addInstance(self, instanceDescriptor: InstanceDescriptor): \"\"\"Add the given ``instanceDescriptor``", "if the named label can't be found. .. versionadded:: 5.0", "dimensionElement.attrib.get('xvalue') if xValue is not None: xValue = float(xValue) except", "not None: labelElement.attrib['userminimum'] = self.intOrFloat(label.userMinimum) if label.userMaximum is not None:", "designLocation=sourceObject.location) else: # Pre-version 5.0 code was validating and filling", "no such axis exists.\"\"\" for axisDescriptor in self.axes: if axisDescriptor.name", "return list of fonts. Takes a callable which initializes a", "sourceObject.mutedGlyphNames.append(glyphName) for kerningElement in sourceElement.findall(\".kerning\"): if kerningElement.attrib.get('mute') == '1': sourceObject.muteKerning", "and encoding.lower() == \"unicode\" ): f = StringIO() xml_declaration =", "self.root.attrib['format'] = \".\".join(str(i) for i in self.effectiveFormatTuple) if self.documentObject.axes or", "and 4: filename gets updated and relativized descriptor.filename = self._posixRelativePath(descriptor.path)", "userMinimum=None, userMaximum=None, elidable=False, olderSibling=False, linkedUserValue=None, labelNames=None, ): self.userMinimum: Optional[float] =", "is not None: lib = plistlib.fromtree(libElement[0]) variableFont = self.variableFontsDescriptorClass( name=name,", "a conditionset. \" \"Wrapped them in a new conditionset.\" )", "sourceElement.attrib.get(\"stylename\") if styleName is not None: sourceObject.styleName = styleName for", "\"4.0\" \"\"\" self.elidedFallbackName: Optional[str] = None \"\"\"STAT Style Attributes Header", "return cls.sourceDescriptorClass() @classmethod def getInstanceDescriptor(cls): return cls.instanceDescriptorClass() @classmethod def getRuleDescriptor(cls):", "strings, keyed by language code. If present, will be used", "= 400 a1.name = \"weight\" a1.tag = \"wght\" a1.labelNames['fa-IR'] =", "= locationLabel instanceObject.userLocation = userLocation or {} instanceObject.designLocation = designLocation", "= ['tag', 'name', 'maximum', 'minimum', 'default', 'map', 'axisOrdering', 'axisLabels'] def", "parentElement.append(locElement) def _addInstance(self, instanceObject): instanceElement = ET.Element('instance') if instanceObject.name is", "label.olderSibling: labelElement.attrib['oldersibling'] = \"true\" self._addLabelNames(labelElement, label.labelNames) self._addLocationElement(labelElement, userLocation=label.userLocation) parentElement.append(labelElement) def", "+ \"*\" * (4 - len(name)) else: tag = name[:4]", "== \"utf-8\": f = BytesIO() encoding = \"UTF-8\" xml_declaration =", "sourceElement.findall(\".glyph\"): glyphName = glyphElement.attrib.get('name') if glyphName is None: continue if", "<note> This is an instance from an anisotropic interpolation. </note>", "{} \"\"\"Location in user coordinates along each axis. If an", "given ``kwargs`` and add it to ``doc.sources``. \"\"\" source =", "lang) designLocation, userLocation = self.locationFromElement(instanceElement) locationLabel = instanceElement.attrib.get('location') if (designLocation", "sourceDescriptor return None def normalizeLocation(self, location): \"\"\"Return a dict with", "<lib> <dict> <key>com.github.fonttools.varLib.featureVarsFeatureTag</key> <string>calt</string> </dict> </lib> \"\"\" self.sources: List[SourceDescriptor] =", "location(self): \"\"\"dict. Axis values for this instance. MutatorMath + Varlib.", "Varlib. \"\"\" self.axisOrdering = axisOrdering \"\"\"STAT table field ``axisOrdering``. See:", "import math import os import posixpath from io import BytesIO,", "for this axis location, STAT field ``valueNameID``.\"\"\" self.elidable: bool =", "doc.map_backward(self.getFullDesignLocation(doc)) def tagForAxisName(name): # try to find or make a", "with 2 stops, Roman and Italic, that are not compatible.", "localisedFamilyName=None, localisedStyleName=None, localisedStyleMapFamilyName=None, localisedStyleMapStyleName=None, glyphs=None, kerning=True, info=True, lib=None, ): self.filename", "subset.userMinimum != -math.inf: subsetElement.attrib['userminimum'] = self.intOrFloat(subset.userMinimum) if subset.userMaximum != math.inf:", "filename attr should not be touched. case 3. descriptor.filename ==", "or self.documentObject.elidedFallbackName is not None: axesElement = ET.Element(\"axes\") if self.documentObject.elidedFallbackName", "names: return names[name.lower()] if len(name) < 4: tag = name", "font.\"\"\" class RangeAxisSubsetDescriptor(SimpleDescriptor): \"\"\"Subset of a continuous axis to include", "if label.olderSibling: labelElement.attrib['oldersibling'] = \"true\" if label.linkedUserValue is not None:", "\"\"\" def serialize(self): # output to a dict, used in", "languageCodes: if code == \"en\": continue # already stored in", "instanceElement in instanceElements: self._readSingleInstanceElement(instanceElement, makeGlyphs=makeGlyphs, makeKerning=makeKerning, makeInfo=makeInfo) def _readSingleInstanceElement(self, instanceElement,", "processingValue == \"last\" for ruleElement in self.root.findall(\".rules/rule\"): ruleObject = self.ruleDescriptorClass()", "the full path that was given to :meth:`read` or :meth:`fromfile`.", "LocationLabelDescriptor(SimpleDescriptor): \"\"\"Container for location label data. Analogue of OpenType's STAT", "Indicates if this instance needs its kerning calculated. MutatorMath. ..", "may or may not exist. MutatorMath + VarLib. \"\"\" self.path", "in rule.conditionSets: newConditions = [] for cond in conditions: if", "= plistlib.fromtree(libElement[0]) variableFont = self.variableFontsDescriptorClass( name=name, filename=filename, axisSubsets=axisSubsets, lib=lib, )", "= RuleDescriptor() r1.name = \"unique.rule.name\" r1.conditionSets.append([dict(name=\"weight\", minimum=-10, maximum=10), dict(...)]) r1.conditionSets.append([dict(...),", "locationLabelDescriptorClass = LocationLabelDescriptor ruleDescriptorClass = RuleDescriptor sourceDescriptorClass = SourceDescriptor variableFontDescriptorClass", "``com.github.fonttools.varLib.featureVarsFeatureTag`` .. code:: xml <lib> <dict> <key>com.github.fonttools.varLib.featureVarsFeatureTag</key> <string>calt</string> </dict> </lib>", "with the whole document. Use reverse-DNS notation to identify your", "**kwargs): \"\"\"Instantiate a new :class:`InstanceDescriptor` using the given ``kwargs`` and", "and self.path is not None: sourcePath = os.path.abspath(os.path.join(os.path.dirname(self.path), filename)) else:", "minimum = self.normalizeLocation({axis.name: axis.minimum}).get(axis.name) maximum = self.normalizeLocation({axis.name: axis.maximum}).get(axis.name) default =", "name=name, labelNames=labelNames, hidden=hidden, map=map, axisOrdering=axisOrdering, axisLabels=axisLabels, ) self.minimum = minimum", "cdMax = conditionElement.attrib.get(\"maximum\") if cdMax is not None: cd['maximum'] =", "in locationObject.items(): if axisName in validatedLocation: # only accept values", "value = [ v.asdict() if hasattr(v, \"asdict\") else v for", "in instanceElements: self._readSingleInstanceElement(instanceElement, makeGlyphs=makeGlyphs, makeKerning=makeKerning, makeInfo=makeInfo) def _readSingleInstanceElement(self, instanceElement, makeGlyphs=True,", ":attr:`axes`.\"\"\" self.axes.append(axisDescriptor) def addAxisDescriptor(self, **kwargs): \"\"\"Instantiate a new :class:`AxisDescriptor` using", "dict while writing it out, as preserved below. locationElement, sourceObject.location", "first, before other text shaping/OpenType layout, as they are part", "self.locationLabel is None: return None label = doc.getLocationLabel(self.locationLabel) if label", "is not None: noteElement = ET.Element('note') noteElement.text = data.get('note') glyphElement.append(noteElement)", "None libElement = variableFontElement.find(\".lib\") if libElement is not None: lib", "self.intOrFloat(label.userMaximum) labelElement.attrib['name'] = label.name if label.elidable: labelElement.attrib['elidable'] = \"true\" if", "Privately-defined axis tags must begin with an uppercase letter and", "write here an indication of a possible \"good\" filename, in", "d = {} for attr, value in self.__dict__.items(): if attr.startswith(\"_\"):", "\"\"\"string. Style name of this instance. MutatorMath + Varlib. \"\"\"", "in glyphData['masters']: glyphMaster['location'] = self.normalizeLocation(glyphMaster['location']) item.location = self.normalizeLocation(item.location) # the", "glyphElement, instanceObject): \"\"\" Read the glyph element, which could look", "= glyphElement.attrib.get('name') if glyphName is None: continue if glyphElement.attrib.get('mute') ==", "path to the ufo source sourceObject.filename = filename # path", "for sourceDescriptor in self.sources: if sourceDescriptor.getFullDesignLocation(self) == defaultDesignLocation: self.default =", "'axisOrdering', 'axisLabels'] def __init__( self, *, tag=None, name=None, labelNames=None, minimum=None,", "<https://github.com/fonttools/fonttools/issues/2050#issuecomment-678691020>`__ If you want to use a different feature altogether,", "explicit user location and default axis values. .. versionadded:: 5.0", "that holds a path always using forward slashes.\"\"\" def getter(self):", "Without OrderedDict, output XML would be non-deterministic. # https://github.com/LettError/designSpaceDocument/issues/10 loc", "Fonts with the same path are only loaded once and", "self.writerClass = BaseDocWriter @classmethod def fromfile(cls, path, readerClass=None, writerClass=None): \"\"\"Read", "= sourceElement.attrib.get('filename') if filename is not None and self.path is", "at its default location. .. seealso:: This may be only", "- False: before - True: after. Default is False. For", "not None or axis.axisLabels for axis in self.documentObject.axes ) or", "stops, Roman and Italic, that are not compatible. The axis", "self.rangeAxisSubsetDescriptorClass( name=name, userMinimum=float(userMinimum), userDefault=float(userDefault), userMaximum=float(userMaximum), ) if all(v is None", "tag name for this axis name names = { 'weight':", "what do we need here def __init__(self, *, name=None, conditionSets=None,", "as it is stored in the document name = instanceElement.attrib.get(\"name\")", "substitution rules --> <rules> <rule name=\"vertical.bars\"> <conditionset> <condition minimum=\"250.000000\" maximum=\"750.000000\"", "locationLabel=None, designLocation=None, userLocation=None, familyName=None, styleName=None, postScriptFontName=None, styleMapFamilyName=None, styleMapStyleName=None, localisedFamilyName=None, localisedStyleName=None,", "this axis. Some might be registered at the `OpenType specification", "localisedFamilyNameElement.text = sourceObject.getFamilyName(code) sourceElement.append(localisedFamilyNameElement) if sourceObject.copyLib: libElement = ET.Element('lib') libElement.attrib['copy']", ":class:`LocationLabelDescriptor` using the given ``kwargs`` and add it to :attr:`locationLabels`.", "self.intOrFloat(axisObject.default) if axisObject.hidden: axisElement.attrib['hidden'] = \"1\" self.root.findall('.axes')[0].append(axisElement) def _addAxisLabel(self, axisElement:", "self.documentObject.locationLabels or any( source.localisedFamilyName for source in self.documentObject.sources ) or", "i2.designLocation = dict(weight=500, width=(400,300)) i2.postScriptFontName = \"InstancePostscriptName\" i2.styleMapFamilyName = \"InstanceStyleMapFamilyName\"", "if newMap: axis.map = newMap # finally the axis values", "not None and axis.name in designLocation: dimElement = ET.Element('dimension') dimElement.attrib['name']", "of lists of dict(name='aaaa', minimum=0, maximum=1000) self.conditionSets = conditionSets or", "linkedValue = float(linkedValueStr) if linkedValueStr is not None else None", "axes. Before version 5, you would have needed 1 DesignSpace", "\"1\" sourceElement.append(featuresElement) if sourceObject.copyInfo or sourceObject.muteInfo: infoElement = ET.Element('info') if", "getSourceDescriptor(cls): return cls.sourceDescriptorClass() @classmethod def getInstanceDescriptor(cls): return cls.instanceDescriptorClass() @classmethod def", "filename, in case one wants to save the file somewhere.", "name for this axis name names = { 'weight': ('wght',", "all `default` values in user space of all axes. This", "if axisObject.axisOrdering is not None: labelsElement.attrib['ordering'] = str(axisObject.axisOrdering) for label", "instanceObject.familyName is not None: instanceElement.attrib['familyname'] = instanceObject.familyName if instanceObject.styleName is", "\"\"\" self.path = path \"\"\"The absolute path, calculated from filename.\"\"\"", "to the document by creating such descriptor objects, filling them", "except AssertionError: print(\"failed attribute\", attr, getattr(self, attr), \"!=\", getattr(other, attr))", "if self.documentObject.sources: self.root.append(ET.Element(\"sources\")) for sourceObject in self.documentObject.sources: self._addSource(sourceObject) if self.documentObject.variableFonts:", "from fontTools.misc.loggingTools import LogMixin from fontTools.misc.textTools import tobytes, tostr \"\"\"", "outside of a condition set. rules = [] rulesElement =", "Return a tuple of (designLocation, userLocation) \"\"\" elementLocation = (None,", "libElement in self.root.findall(\".lib\"): self.documentObject.lib = plistlib.fromtree(libElement[0]) class DesignSpaceDocument(LogMixin, AsDictMixin): \"\"\"The", "be anisotropic, only the xvalue is set. .. versionadded:: 5.0", "and mappings, and top-level location labels. The source of truth", "condition set externalConditions = self._readConditionElements( ruleElement, ruleName, ) if externalConditions:", "parts of the document, and also as a filename in", "code. If present, will be used to build localized names", "swap = False for a, b in rule.subs: if name", "given location. - If a condition has no minimum, check", "ruleObject.subs: subElement = ET.Element('sub') subElement.attrib['name'] = sub[0] subElement.attrib['with'] = sub[1]", "in a variable font. .. versionadded:: 5.0 \"\"\" flavor =", "None: return None label = doc.getLocationLabel(self.locationLabel) if label is None:", "class BaseDocReader(LogMixin): axisDescriptorClass = AxisDescriptor discreteAxisDescriptorClass = DiscreteAxisDescriptor axisLabelDescriptorClass =", "if userValue is not None: userValue = float(userValue) except ValueError:", "to be None, assume axis.minimum cd['minimum'] = None cdMax =", "a2.values = [0, 1] a2.name = \"Italic\" a2.tag = \"ITAL\"", "in locations self.name = name \"\"\"string. Name of the axis", "as they have the same attributes. Reader and Writer objects", "= tostr(styleName) def getStyleName(self, languageCode=\"en\"): return self.localisedStyleName.get(languageCode) def setFamilyName(self, familyName,", "else: axisObject = self.axisDescriptorClass() axisObject.minimum = float(axisElement.attrib.get(\"minimum\")) axisObject.maximum = float(axisElement.attrib.get(\"maximum\"))", "the `Required Variation Alternates OpenType feature <https://docs.microsoft.com/en-us/typography/opentype/spec/features_pt#-tag-rvrn>`_. See ref:`rules-element` §", "location=\"location-token-ccc\" source=\"master-token-aaa3\"/> <note> This is an instance from an anisotropic", "\"%d\" % num return (\"%f\" % num).rstrip('0').rstrip('.') def _addRule(self, ruleObject):", "'{http://www.w3.org/XML/1998/namespace}lang' } return self.axisLabelDescriptorClass( name=name, userValue=value, userMinimum=minimum, userMaximum=maximum, elidable=elidable, olderSibling=olderSibling,", "if mute == \"1\": glyphData['mute'] = True # unicode unicodes", "\"\"\"string. StyleMap familyname for this instance. MutatorMath + Varlib. \"\"\"", "from fontTools.varLib.models import piecewiseLinearMap if isinstance(v, tuple): v = v[0]", "if sourceObject.localisedFamilyName: languageCodes = list(sourceObject.localisedFamilyName.keys()) languageCodes.sort() for code in languageCodes:", "it to :attr:`variableFonts`. .. versionadded:: 5.0 \"\"\" variableFont = self.writerClass.variableFontDescriptorClass(**kwargs)", "point to that. \"\"\" self.name = name \"\"\"string. Optional. Unique", "not None: return 3 if self.userMinimum is not None or", "axis.default)) for axis in self.axes } def map_backward(self, designLocation: AnisotropicLocationDict)", "self.linkedUserValue is not None: return 3 if self.userMinimum is not", "default location. .. seealso:: This may be only part of", "from this source needs to be muted (i.e. not be", "this document's axes.\"\"\" self.locationLabels: List[LocationLabelDescriptor] = [] \"\"\"List of this", "= value return d class SimpleDescriptor(AsDictMixin): \"\"\" Containers for a", "not None: labelElement.attrib['usermaximum'] = self.intOrFloat(label.userMaximum) labelElement.attrib['name'] = label.name if label.elidable:", "instanceElement.append(locationElement) if instanceObject.filename is not None: instanceElement.attrib['filename'] = instanceObject.filename if", "FontTools binary fonts, including extra options: designspace.loadSourceFonts(ttLib.TTFont, recalcBBoxes=False) Args: opener", "itertools import math import os import posixpath from io import", "ET.Element('condition') conditionElement.attrib['name'] = cond.get('name') if cond.get('minimum') is not None: conditionElement.attrib['minimum']", "reads the \"xml:lang\" attribute name as # '{http://www.w3.org/XML/1998/namespace}lang' for key,", "<https://github.com/fonttools/fonttools/issues/1371#issuecomment-590214572>`__ `fontTools#2050 <https://github.com/fonttools/fonttools/issues/2050#issuecomment-678691020>`__ If you want to use a different", "the font attribute is already not None, it is not", "✅ ❌ ❌ ❌ 2 ✅ ✅ ✅ ❌ 3", "otherwise. \"\"\" if \"values\" in kwargs: axis = self.writerClass.discreteAxisDescriptorClass(**kwargs) else:", "as a basename for the file. \"\"\" self.axisSubsets: List[Union[RangeAxisSubsetDescriptor, ValueAxisSubsetDescriptor]]", "'localisedFamilyName', 'localisedStyleName', 'localisedStyleMapFamilyName', 'localisedStyleMapStyleName', 'glyphs', 'kerning', 'info', 'lib'] filename =", "that can describe a warp of user space to design", "= list(instanceObject.localisedStyleName.keys()) languageCodes.sort() for code in languageCodes: if code ==", "and still only exists in memory, the producing script can", "= [float(s) for s in axisElement.attrib[\"values\"].split(\" \")] else: axisObject =", "only include some axes and freeze other axes at a", "unicode unicodes = glyphElement.attrib.get('unicode') if unicodes is not None: try:", "+ Varlib. \"\"\" def serialize(self): # output to a dict,", "not in self.axisDefaults: # In case the document contains no", "3). All values are user values. See: `OTSpec STAT Axis", "self._addLib(vfElement, vf.lib, 4) parentElement.append(vfElement) def _addLib(self, parentElement: ET.Element, data: Any,", "if designLocation: raise DesignSpaceDocumentError(f'<label> element \"{name}\" must only have user", "lang in familyNameElement.items(): if key == XML_LANG: familyName = familyNameElement.text", "instanceElement.findall('.glyphs')[0] for glyphName, data in sorted(instanceObject.glyphs.items()): glyphElement = self._writeGlyphElement(instanceElement, instanceObject,", "os.path.basename(path) reader = self.readerClass(path, self) reader.read() if self.sources: self.findDefault() def", "axisObject.values = [float(s) for s in axisElement.attrib[\"values\"].split(\" \")] else: axisObject", ":attr:`axes`. The axis will be and instance of :class:`DiscreteAxisDescriptor` if", "user location of this label, by combining data from the", "parentElement, labelNames): for languageCode, labelName in sorted(labelNames.items()): languageElement = ET.Element('labelname')", "is stored in the document name = instanceElement.attrib.get(\"name\") if name", "give easier access to the localised names.\"\"\" self.localisedStyleName[languageCode] = tostr(styleName)", "= tostr(styleMapFamilyName) def getStyleMapFamilyName(self, languageCode=\"en\"): return self.localisedStyleMapFamilyName.get(languageCode) def clearLocation(self, axisName:", "before other text shaping/OpenType layout, as they are part of", "The above transformation loses leading slashes of UNC path mounts", "k for k, v in self.map}) class DiscreteAxisDescriptor(AbstractAxisDescriptor): \"\"\"Container for", "given location.\"\"\" return any(evaluateConditions(c, location) for c in rule.conditionSets) def", "used in the location dicts. MutatorMath + Varlib. \"\"\" #", "and set them in the axis.minimum axis.minimum = minimum axis.maximum", "\"\"\"New maximum value of the axis in the target variable", "will be and instance of :class:`DiscreteAxisDescriptor` if the ``kwargs`` provide", "userLocation: SimpleLocationDict = None ): locElement = ET.Element(\"location\") for axis", "= [] discreteAxes = [] rangeAxisSubsets: List[Union[RangeAxisSubsetDescriptor, ValueAxisSubsetDescriptor]] = []", "os.PathLike objects path = path.__fspath__() self.path = path self.filename =", "self.sources: List[SourceDescriptor] = [] \"\"\"List of this document's sources.\"\"\" self.variableFonts:", "in glyphNames: swap = False for a, b in rule.subs:", "None: raise DesignSpaceDocumentError(\"Glyph object without name attribute\") mute = glyphElement.attrib.get(\"mute\")", "return next( (label for label in self.locationLabels if label.userLocation ==", "familyName=None, styleName=None, postScriptFontName=None, styleMapFamilyName=None, styleMapStyleName=None, localisedFamilyName=None, localisedStyleName=None, localisedStyleMapFamilyName=None, localisedStyleMapStyleName=None, glyphs=None,", "self.designLocation: AnisotropicLocationDict = designLocation if designLocation is not None else", "self.axes } def findDefault(self): \"\"\"Set and return SourceDescriptor at the", "def __init__( self, *, name, userLocation, elidable=False, olderSibling=False, labelNames=None, ):", "loaded in memory, as a Python object (e.g. a ``defcon.Font``", ") or self.documentObject.locationLabels or any( source.localisedFamilyName for source in self.documentObject.sources", "rules are applied first, before other text shaping/OpenType layout, as", "= variableFontElement.find(\".lib\") if libElement is not None: lib = plistlib.fromtree(libElement[0])", "dimElement.attrib['name'] = dimensionName if type(dimensionValue) == tuple: dimElement.attrib['xvalue'] = self.intOrFloat(dimensionValue[0])", "newOutputValue = self.normalizeLocation({axis.name: outputValue}).get(axis.name) newMap.append((inputValue, newOutputValue)) if newMap: axis.map =", "`OTSpec STAT Flags <https://docs.microsoft.com/en-us/typography/opentype/spec/stat#flags>`_ \"\"\" self.linkedUserValue: Optional[float] = linkedUserValue \"\"\"STAT", "float(maximumStr) if maximumStr is not None else None linkedValueStr =", "designLocation is not None: glyphData['instanceLocation'] = designLocation glyphSources = None", "Union[float, Tuple[float, float]]] SimpleLocationDict = Dict[str, float] class InstanceDescriptor(SimpleDescriptor): \"\"\"Simple", "this instance. MutatorMath + Varlib. \"\"\" self.styleName = styleName \"\"\"string.", "for path to be correct and update filename. \"\"\" assert", "conditionsetElement = ET.Element('conditionset') for cond in conditions: if cond.get('minimum') is", "= sourceObject.styleName if sourceObject.layerName is not None: sourceElement.attrib['layer'] = sourceObject.layerName", "with the given ``name``, or ``None`` if no such label", "location=None, designLocation=None, layerName=None, familyName=None, styleName=None, localisedFamilyName=None, copyLib=False, copyInfo=False, copyGroups=False, copyFeatures=False,", "SourceDescriptor.path, and sets the SourceDescriptor.font attribute. If the font attribute", "TTFont objects. The :attr:`font` attribute is shared by reference between", "the same order as defined in the document.\"\"\" names =", "not None: ruleObject.conditionSets.append(conditionSet) for subElement in ruleElement.findall('.sub'): a = subElement.attrib['name']", "not specified, assume the same maximum value as the full", "for location dimension \"{dimName}\"') if yValue is not None: if", "def formatTuple(self): \"\"\"Return the formatVersion as a tuple of (major,", "See the following issues for more information: `fontTools#1371 <https://github.com/fonttools/fonttools/issues/1371#issuecomment-590214572>`__ `fontTools#2050", "producing script can write here an indication of a possible", "-> AnisotropicLocationDict: \"\"\"Get the complete design location of this source,", "wholesale, a user should first clear all the fields, then", "after other glyph substitution features. - False: before - True:", "990.0)] a1.axisOrdering = 1 a1.axisLabels = [ AxisLabelDescriptor(name=\"Regular\", userValue=400, elidable=True)", "\"\"\" self.locationLabel = None if axisName is None: self.designLocation =", "userLoc = {} designLoc = {} for dimensionElement in locationElement.findall(\".dimension\"):", "= \"instance\" _defaultLanguageCode = \"en\" _attrs = ['filename', 'path', 'name',", "❌ ❌ ✅ =========== ========= =========== =========== =============== \"\"\" if", "parentElement.findall('.condition'): cd = {} cdMin = conditionElement.attrib.get(\"minimum\") if cdMin is", "\"\"\" variableFont = self.writerClass.variableFontDescriptorClass(**kwargs) self.addVariableFont(variableFont) return variableFont def addLocationLabel(self, locationLabelDescriptor:", "in userLocation: dimElement = ET.Element('dimension') dimElement.attrib['name'] = axis.name value =", "if isinstance(axisObject, AxisDescriptor): axisElement.attrib['minimum'] = self.intOrFloat(axisObject.minimum) axisElement.attrib['maximum'] = self.intOrFloat(axisObject.maximum) elif", "dictionary of localised style map stylename strings, keyed by language", "SimpleLocationDict: \"\"\"Map a user location to a design location. Assume", "sources using defcon: designspace = DesignSpaceDocument.fromfile(\"path/to/my.designspace\") designspace.loadSourceFonts(defcon.Font) Or to load", "except ValueError: self.log.warning(\"ValueError in readLocation xValue %3.3f\", xValue) try: yValue", "while a discrete axis has a list of ``values``. Example:", "and default axis values. .. versionadded:: 5.0 \"\"\" return {", "to find or make a tag name for this axis", "rule descriptor element: a set of glyph substitutions to trigger", "the more explicit alias for this property :attr:`designLocation`. \"\"\" return", "= SourceDescriptor variableFontsDescriptorClass = VariableFontDescriptor valueAxisSubsetDescriptorClass = ValueAxisSubsetDescriptor rangeAxisSubsetDescriptorClass =", "attribute name as # '{http://www.w3.org/XML/1998/namespace}lang' } locationLabel = self.locationLabelDescriptorClass( name=name,", "not None: return 2 return 1 @property def defaultName(self) ->", "use the one we have masterGlyphName = glyphName d =", "AxisDescriptor(AbstractAxisDescriptor): \"\"\" Simple container for the axis data. Add more", "subset.\"\"\" self.userMinimum: float = userMinimum \"\"\"New minimum value of the", "from the disk, this is the full path that was", "versionadded:: 5.0 \"\"\" self.locationLabel = None if axisName is None:", "\"\"\"List of possible values for this axis. Contrary to continuous", "information: `fontTools#1371 <https://github.com/fonttools/fonttools/issues/1371#issuecomment-590214572>`__ `fontTools#2050 <https://github.com/fonttools/fonttools/issues/2050#issuecomment-678691020>`__ If you want to use", "encoding is not None and encoding.lower() == \"unicode\" ): f", "lang in label_name.items() if attr == XML_LANG # Note: elementtree", "glyphData['mute'] = True # unicode unicodes = glyphElement.attrib.get('unicode') if unicodes", "matters \"\"\" newNames = [] for rule in rules: if", "and a lot of data duplication. - From a big", "localisedStyleMapStyleNameElement = ET.Element('stylemapstylename') localisedStyleMapStyleNameElement.attrib[XML_LANG] = code localisedStyleMapStyleNameElement.text = instanceObject.getStyleMapStyleName(code) instanceElement.append(localisedStyleMapStyleNameElement)", "doc.getLocationLabel(self.locationLabel) if label is None: raise DesignSpaceDocumentError( 'InstanceDescriptor.getLocationLabelDescriptor(): ' f'unknown", "instanceElement.attrib['stylename'] = instanceObject.styleName # add localisations if instanceObject.localisedStyleName: languageCodes =", "addLocationLabel(self, locationLabelDescriptor: LocationLabelDescriptor): \"\"\"Add the given ``locationLabelDescriptor`` to :attr:`locationLabels`. ..", "as defined in the document.\"\"\" names = [] for axisDescriptor", ".. versionadded:: 5.0 \"\"\" self.axes: List[Union[AxisDescriptor, DiscreteAxisDescriptor]] = [] \"\"\"List", "\"\"\" elementLocation = (None, None) for locationElement in element.findall('.location'): elementLocation", "\"xml:lang\" attribute name as # '{http://www.w3.org/XML/1998/namespace}lang' } locationLabel = self.locationLabelDescriptorClass(", "None: ruleObject.conditionSets.append(conditionSet) for subElement in ruleElement.findall('.sub'): a = subElement.attrib['name'] b", "elidable=False, olderSibling=False, linkedUserValue=None, labelNames=None, ): self.userMinimum: Optional[float] = userMinimum \"\"\"STAT", "this axis. No anisotropy. - ``axis.default``: default axis value. No", "\"\"\"List of this document's STAT format 4 labels. .. versionadded::", "assumed to be at its default location. .. seealso:: This", "empty. VarLib. \"\"\" self.filename: str = filename \"\"\"string, optional. Relative", "if minimumStr is not None else None maximumStr = element.get(\"usermaximum\")", "\"label\" _attrs = ('userMinimum', 'userValue', 'userMaximum', 'name', 'elidable', 'olderSibling', 'linkedUserValue',", "label in axisObject.axisLabels: self._addAxisLabel(labelsElement, label) axisElement.append(labelsElement) if isinstance(axisObject, AxisDescriptor): axisElement.attrib['minimum']", "with the data. \"\"\" if hasattr(path, \"__fspath__\"): # support os.PathLike", "self.root.append(axesElement) for axisObject in self.documentObject.axes: self._addAxis(axisObject) if self.documentObject.locationLabels: labelsElement =", "= postScriptFontName \"\"\"string. Postscript fontname for this instance. MutatorMath +", "localisedStyleMapFamilyNameElement = ET.Element('stylemapfamilyname') localisedStyleMapFamilyNameElement.attrib[XML_LANG] = code localisedStyleMapFamilyNameElement.text = instanceObject.getStyleMapFamilyName(code) instanceElement.append(localisedStyleMapFamilyNameElement)", "build process and from other parts of the document, and", "= element.get(\"name\") if name is None: raise DesignSpaceDocumentError(\"axis-subset element must", "they have data. .. code:: python instance.clearLocation() instance.designLocation = {'Weight':", "DesignSpaceDocumentError(f\"variable-font element contains unknown attributes: {', '.join(unknown_attrs)}\") name = variableFontElement.get(\"name\")", "= self.ruleDescriptorClass() ruleName = ruleObject.name = ruleElement.attrib.get(\"name\") # read any", "# ElementTree allows to find namespace-prefixed elements, but not attributes", "data.get('unicodes')]) if data.get('instanceLocation') is not None: locationElement, data['instanceLocation'] = self._makeLocationElement(data.get('instanceLocation'))", "table, format 4 <https://docs.microsoft.com/en-us/typography/opentype/spec/stat#axis-value-table-format-4>`_ .. versionadded:: 5.0 \"\"\" flavor =", "location (format 4). All values are user values. See: `OTSpec", "deprecated:: 5.0 \"\"\" self.info = info \"\"\"bool. Indicated if this", "if self.variableFonts: return self.variableFonts variableFonts = [] discreteAxes = []", "value unchanged if no mapping entry is found. Note: for", "continue # already stored in the element attribute localisedFamilyNameElement =", "5.0 \"\"\" return doc.map_backward(self.getFullDesignLocation(doc)) def tagForAxisName(name): # try to find", "def addInstance(self, instanceDescriptor: InstanceDescriptor): \"\"\"Add the given ``instanceDescriptor`` to :attr:`instances`.\"\"\"", "= ET.Element('info') if sourceObject.copyInfo: infoElement.attrib['copy'] = \"1\" if sourceObject.muteInfo: infoElement.attrib['mute']", "default master. This attribute is updated by the :meth:`findDefault` \"\"\"", "Varlib. \"\"\" # names for UI purposes, if this is", "localisedStyleMapStyleName=None, glyphs=None, kerning=True, info=True, lib=None, ): self.filename = filename \"\"\"string.", "for styleNameElement in instanceElement.findall('stylename'): for key, lang in styleNameElement.items(): if", "None: labelElement.attrib['linkeduservalue'] = self.intOrFloat(label.linkedUserValue) self._addLabelNames(labelElement, label.labelNames) axisElement.append(labelElement) def _addLabelNames(self, parentElement,", "conditionally in some parts of the designspace. .. code:: python", "should be applied before or after other glyph substitution features.", "labelNames=None, values=None, default=None, hidden=False, map=None, axisOrdering=None, axisLabels=None, ): super().__init__( tag=tag,", "noteElement = ET.Element('note') noteElement.text = data.get('note') glyphElement.append(noteElement) if data.get('masters') is", "MutatorMath + VarLib. \"\"\" self.path = path \"\"\"string. Absolute path", "this condition continue conditionElement = ET.Element('condition') conditionElement.attrib['name'] = cond.get('name') if", "axis does not interpolate. - it doesn't provide the reference", "or :class:`DiscreteAxisDescriptor` to \"snapshot\" or \"freeze\". \"\"\" self.userValue: float =", "have design locations (using xvalue=\"\").') masterGlyphName = masterElement.attrib.get('glyphname') if masterGlyphName", "this is the full path that was given to :meth:`read`", "for axis in self.axes: if isinstance(axis, DiscreteAxisDescriptor): discreteAxes.append(axis) else: rangeAxisSubsets.append(RangeAxisSubsetDescriptor(name=axis.name))", "and use only uppercase letters or digits. \"\"\" # name", "not None: instanceElement.attrib['stylename'] = instanceObject.styleName # add localisations if instanceObject.localisedStyleName:", "same data as the other for attr in self._attrs: try:", "{} \"\"\"User defined, custom data associated with the whole document.", "return self.labelNames.get(\"en\") or self.name def getFullUserLocation(self, doc: 'DesignSpaceDocument') -> SimpleLocationDict:", ".. versionadded:: 5.0 \"\"\" class AxisDescriptor(AbstractAxisDescriptor): \"\"\" Simple container for", "in rule.subs: if name == a: swap = True break", "self.documentObject.locationLabels.append(locationLabel) def readVariableFonts(self): if self.documentObject.formatTuple < (5, 0): return xml_attrs", "self.documentObject.variableFonts or any( instance.locationLabel or instance.userLocation for instance in self.documentObject.instances", "-> None: labelElement = ET.Element('label') labelElement.attrib['uservalue'] = self.intOrFloat(label.userValue) if label.userMinimum", "designLocation is not None and axis.name in designLocation: dimElement =", "def addRule(self, ruleDescriptor: RuleDescriptor): \"\"\"Add the given ``ruleDescriptor`` to :attr:`rules`.\"\"\"", "= filename \"\"\"string, optional. Relative path to the variable font", "document name = instanceElement.attrib.get(\"name\") if name is not None: instanceObject.name", "elsewhere in the document. \"\"\" self.locationLabel = locationLabel \"\"\"Name of", "userLocation) \"\"\" if self._strictAxisNames and not self.documentObject.axes: raise DesignSpaceDocumentError(\"No axes", "for styleMapStyleNameElement in instanceElement.findall('stylemapstylename'): for key, lang in styleMapStyleNameElement.items(): if", "exist. MutatorMath + VarLib. \"\"\" self.path = path \"\"\"string. Absolute", "not None: sourceElement.attrib['layer'] = sourceObject.layerName if sourceObject.localisedFamilyName: languageCodes = list(sourceObject.localisedFamilyName.keys())", "reader = self.readerClass.fromstring(string, self) reader.read() if self.sources: self.findDefault() return self", "``kwargs`` and add it to ``doc.sources``. \"\"\" source = self.writerClass.sourceDescriptorClass(**kwargs)", "else: dimElement.attrib['xvalue'] = self.intOrFloat(value) locElement.append(dimElement) elif userLocation is not None", "\"\"\"The DesignSpaceDocument object can read and write ``.designspace`` data. It", "axis in discreteAxes]) for values in valueCombinations: basename = None", "ET.Element('dimension') dimElement.attrib['name'] = dimensionName if type(dimensionValue) == tuple: dimElement.attrib['xvalue'] =", "userLocation = self.locationFromElement(masterElement) if userLocation: raise DesignSpaceDocumentError(f'<master> element \"{fontSourceName}\" must", "['name', 'conditionSets', 'subs'] # what do we need here def", "instance should have the same location as the LocationLabel. ..", "copied to the instances. MutatorMath. .. deprecated:: 5.0 \"\"\" self.copyGroups", "with the default location in design space coordinates.\"\"\" # Without", "= name validatedLocation = self.documentObject.newDefaultLocation() for axisName, axisValue in locationObject.items():", "[float(s) for s in axisElement.attrib[\"values\"].split(\" \")] else: axisObject = self.axisDescriptorClass()", "axis defaults. .. versionadded:: 5.0 \"\"\" result: AnisotropicLocationDict = {}", "self, *, tag=None, name=None, labelNames=None, values=None, default=None, hidden=False, map=None, axisOrdering=None,", "styleName, languageCode=\"en\"): \"\"\"These methods give easier access to the localised", ".. seealso:: :meth:`getFullDesignLocation` .. versionadded:: 5.0 \"\"\" return doc.map_backward(self.getFullDesignLocation(doc)) def", "is not None: designLoc[dimName] = xValue else: userLoc[dimName] = userValue", "\\\"%s\\\".\", dimName) continue userValue = xValue = yValue = None", "self.intOrFloat(label.userMinimum) if label.userMaximum is not None: labelElement.attrib['usermaximum'] = self.intOrFloat(label.userMaximum) labelElement.attrib['name']", "= name \"\"\"string. Optional. Unique identifier name for this source.", "lang in styleNameElement.items(): if key == XML_LANG: styleName = styleNameElement.text", "noteElement.text break designLocation, userLocation = self.locationFromElement(glyphElement) if userLocation: raise DesignSpaceDocumentError(f'<glyph>", "situations: In each descriptor, we have to do the right", "only if isinstance(value, tuple): value = value[0] triple = [", "value to use to encode this label. =========== ========= ===========", "other): # test if this object contains the same data", "SimpleLocationDict: \"\"\"Get the complete user location for this instance. ..", "is the same as the matching STAT format 4 label.", "import normalizeValue new = {} for axis in self.axes: if", "] # ElementTree allows to find namespace-prefixed elements, but not", ":meth:`getFullUserLocation` .. versionadded:: 5.0 \"\"\" self.familyName = familyName \"\"\"string. Family", "('filename', 'axisSubsets', 'lib') filename = posixpath_property(\"_filename\") def __init__(self, *, name,", "= familyNameElement.text sourceObject.setFamilyName(familyName, lang) designLocation, userLocation = self.locationFromElement(sourceElement) if userLocation:", "BaseDocWriter @classmethod def fromfile(cls, path, readerClass=None, writerClass=None): \"\"\"Read a designspace", "if self.userLocation is None: self.userLocation = {} if axisName in", "font.info needs to be copied to the instances. MutatorMath. ..", "glyphs. If glyphs need special masters (to record the results", "including the warp map. axesElement = self.root.find(\".axes\") if axesElement is", "label.userLocation == userLocation), None ) def updateFilenameFromPath(self, masters=True, instances=True, force=False):", "= posixpath_property(\"_filename\") def __init__(self, *, name, filename=None, axisSubsets=None, lib=None): self.name:", "more information: `fontTools#1371 <https://github.com/fonttools/fonttools/issues/1371#issuecomment-590214572>`__ `fontTools#2050 <https://github.com/fonttools/fonttools/issues/2050#issuecomment-678691020>`__ If you want to", "axis names, in the same order as defined in the", "return axis def addRule(self, ruleDescriptor: RuleDescriptor): \"\"\"Add the given ``ruleDescriptor``", "list can be taken by the axis, nothing in-between. \"\"\"", "field ``valueNameID``.\"\"\" self.userLocation: SimpleLocationDict = userLocation or {} \"\"\"Location in", "in label_name.items() if attr == XML_LANG # Note: elementtree reads", "in the same order as defined in the document.\"\"\" names", "for example). MutatorMath. .. deprecated:: 5.0 Use rules or sparse", "instances. MutatorMath. .. deprecated:: 5.0 \"\"\" self.copyGroups = copyGroups \"\"\"bool.", "= muteInfo \"\"\"bool. Indicated if the interpolating font.info data for", "= self.intOrFloat(value) locElement.append(dimElement) if len(locElement) > 0: parentElement.append(locElement) def _addInstance(self,", "defaultName(self) -> str: \"\"\"Return the English name from :attr:`labelNames` or", "``kwargs`` and add it to :attr:`instances`. \"\"\" instance = self.writerClass.instanceDescriptorClass(**kwargs)", ":meth:`getFullUserLocation` .. versionadded:: 5.0 \"\"\" self.designLocation: AnisotropicLocationDict = designLocation if", "axisObject.axisLabels: self._addAxisLabel(labelsElement, label) axisElement.append(labelsElement) if isinstance(axisObject, AxisDescriptor): axisElement.attrib['minimum'] = self.intOrFloat(axisObject.minimum)", "\"\"\"Format version for this document, as a string. E.g. \"4.0\"", "a string. Default encoding ``utf-8``.\"\"\" if encoding is str or", "\"\"\" self.userLocation: SimpleLocationDict = userLocation or {} \"\"\"dict. Axis values", "new = {} for axis in self.axes: if axis.name not", "locElement.attrib['name'] = name validatedLocation = self.documentObject.newDefaultLocation() for axisName, axisValue in", "or [] \"\"\"list. Glyphnames that need to be muted in", "axis in a VF. .. code:: python a2 = DiscreteAxisDescriptor()", "xValue = yValue = None try: userValue = dimensionElement.attrib.get('uservalue') if", "if sourceObject.copyInfo: infoElement.attrib['copy'] = \"1\" if sourceObject.muteInfo: infoElement.attrib['mute'] = \"1\"", "self.userValue: float = userValue \"\"\"Value in user coordinates at which", "elif axis.name in self.userLocation: result[axis.name] = axis.map_forward(self.userLocation[axis.name]) else: result[axis.name] =", "stylename postScriptFontName = instanceElement.attrib.get('postscriptfontname') if postScriptFontName is not None: instanceObject.postScriptFontName", "then adding them to the document. This makes it easy", "axis.name in designLocation: dimElement = ET.Element('dimension') dimElement.attrib['name'] = axis.name value", "cdMax is not None: cd['maximum'] = float(cdMax) else: # will", "\"\"\" flavor = \"label\" _attrs = ('userMinimum', 'userValue', 'userMaximum', 'name',", "name=\"cent\" with=\"cent.alt\"/> <sub name=\"dollar\" with=\"dollar.alt\"/> </rule> </rules> \"\"\" _attrs =", "map stylename strings, keyed by language code. \"\"\" self.glyphs =", "label.name == name: return label return None def map_forward(self, userLocation:", "the default location or None. The default location is the", "Varlib. \"\"\" self.maximum = maximum \"\"\"number. The maximum value for", "self.documentObject.elidedFallbackName is not None: axesElement = ET.Element(\"axes\") if self.documentObject.elidedFallbackName is", "name attribute.\") userMinimum = element.get(\"userminimum\") userDefault = element.get(\"userdefault\") userMaximum =", "5.0 Return a tuple of (designLocation, userLocation) \"\"\" elementLocation =", "location. See: :meth:`getFullDesignLocation` :meth:`getFullUserLocation` .. versionadded:: 5.0 \"\"\" self.familyName =", "self.designLocation = {} self.userLocation = {} else: if self.designLocation is", "DesignSpaceDocumentError( \"Designspace source '%s' has no 'path' attribute\" % (source.name", ".. code:: python i2 = InstanceDescriptor() i2.path = instancePath2 i2.familyName", "to :attr:`instances`.\"\"\" self.instances.append(instanceDescriptor) def addInstanceDescriptor(self, **kwargs): \"\"\"Instantiate a new :class:`InstanceDescriptor`", "axisElement.find(\".labels\") if labelElement is not None: if \"ordering\" in labelElement.attrib:", "instance `{self.name}`.' ) return label def getFullDesignLocation(self, doc: 'DesignSpaceDocument') ->", "[] \"\"\"List of this document's variable fonts. .. versionadded:: 5.0\"\"\"", "compatible. The axis still allows to bind together the full", "data['instanceLocation'] = self._makeLocationElement(data.get('instanceLocation')) glyphElement.append(locationElement) if glyphName is not None: glyphElement.attrib['name']", "== '1': sourceObject.muteKerning = True self.documentObject.sources.append(sourceObject) def locationFromElement(self, element): \"\"\"Read", "special master definitions for glyphs. If glyphs need special masters", "string, documentObject): f = BytesIO(tobytes(string, encoding=\"utf-8\")) self = cls(f, documentObject)", "condition has no minimum, check for < maximum. - If", "masters as FontTools binary fonts, including extra options: designspace.loadSourceFonts(ttLib.TTFont, recalcBBoxes=False)", "the axis values minimum = self.normalizeLocation({axis.name: axis.minimum}).get(axis.name) maximum = self.normalizeLocation({axis.name:", "and also as a filename in case the filename property", "'userLocation', 'labelNames') def __init__( self, *, name, userLocation, elidable=False, olderSibling=False,", "self.info = info \"\"\"bool. Indicated if this instance needs the", "still only exists in memory, the producing script can write", "= self.locationFromElement(masterElement) if userLocation: raise DesignSpaceDocumentError(f'<master> element \"{fontSourceName}\" must only", "readLocationElement(self, locationElement): \"\"\"Read a ``<location>`` element. .. versionchanged:: 5.0 Return", "'path', 'name', 'locationLabel', 'designLocation', 'userLocation', 'familyName', 'styleName', 'postScriptFontName', 'styleMapFamilyName', 'styleMapStyleName',", "this data can be extracted from the font, it can", "of the axis as it is used in the location", "that store the data in attributes. Data is added to", "is not None: instanceElement.attrib['postscriptfontname'] = instanceObject.postScriptFontName if instanceObject.styleMapFamilyName is not", "[] \"\"\"List of this document's sources.\"\"\" self.variableFonts: List[VariableFontDescriptor] = []", "name strings, keyed by language code. If present, will be", "AxisLabelDescriptor locationLabelDescriptorClass = LocationLabelDescriptor ruleDescriptorClass = RuleDescriptor sourceDescriptorClass = SourceDescriptor", "variableFontDescriptor: VariableFontDescriptor): \"\"\"Add the given ``variableFontDescriptor`` to :attr:`variableFonts`. .. versionadded::", "be subclassed to work with different objects, as long as", "axis. Some might be registered at the `OpenType specification <https://www.microsoft.com/typography/otspec/fvar.htm#VAT>`__.", "source of truth for this instance's location is determined for", "only uppercase letters or digits. \"\"\" # name of the", "localisedFamilyNameElement.attrib[XML_LANG] = code localisedFamilyNameElement.text = sourceObject.getFamilyName(code) sourceElement.append(localisedFamilyNameElement) if sourceObject.copyLib: libElement", "'lib') filename = posixpath_property(\"_filename\") def __init__(self, *, name, filename=None, axisSubsets=None,", "return self.localisedStyleName.get(languageCode) def setFamilyName(self, familyName, languageCode=\"en\"): self.localisedFamilyName[languageCode] = tostr(familyName) def", "v.asdict() if hasattr(v, \"asdict\") else v for v in value", "adding them to the document. This makes it easy to", "ruleDescriptorClass = RuleDescriptor sourceDescriptorClass = SourceDescriptor variableFontDescriptorClass = VariableFontDescriptor valueAxisSubsetDescriptorClass", "4. descriptor.filename == '../somewhere' descriptor.path == \"~/absolute/path/there\" -- action: there", "font object (e.g. TTFont, or defcon.Font, etc.) from the SourceDescriptor.path,", "= subElement.attrib['with'] ruleObject.subs.append((a, b)) rules.append(ruleObject) self.documentObject.rules = rules def _readConditionElements(self,", "in self.sources] try: for source in self.sources: source.font = None", "if type(dimensionValue) == tuple: dimElement.attrib['xvalue'] = self.intOrFloat(dimensionValue[0]) dimElement.attrib['yvalue'] = self.intOrFloat(dimensionValue[1])", "5.0 \"\"\" self.axes: List[Union[AxisDescriptor, DiscreteAxisDescriptor]] = [] \"\"\"List of this", "filename gets updated and relativized descriptor.filename = self._posixRelativePath(descriptor.path) def addSource(self,", "\"\"\"dict. Axis values for this instance, in user space coordinates.", "num: return \"%d\" % num return (\"%f\" % num).rstrip('0').rstrip('.') def", "4: tag = name + \"*\" * (4 - len(name))", "versionadded:: 5.0 \"\"\" self.layerName = layerName \"\"\"string. The name of", "reader will not fill-in this attribute, and the default writer", "glyphElement.append(mastersElement) return glyphElement class BaseDocReader(LogMixin): axisDescriptorClass = AxisDescriptor discreteAxisDescriptorClass =", "axisObject.name = axisElement.attrib.get(\"name\") if axisElement.attrib.get('hidden', False): axisObject.hidden = True axisObject.tag", "continue if hasattr(value, \"asdict\"): value = value.asdict() elif isinstance(value, list):", "bunch of attributes\"\"\" # XXX this is ugly. The 'print'", "labelElement.attrib['elidable'] = \"true\" if label.olderSibling: labelElement.attrib['oldersibling'] = \"true\" self._addLabelNames(labelElement, label.labelNames)", "is not None: instanceObject.postScriptFontName = postScriptFontName styleMapFamilyName = instanceElement.attrib.get('stylemapfamilyname') if", "-> Optional[LocationLabelDescriptor]: \"\"\"Return the :class:`LocationLabel` that matches the given ``userLocation``,", "valueStr = element.get(\"uservalue\") if valueStr is None: raise DesignSpaceDocumentError(\"label element", "value from axis mapping's input to output. Returns value unchanged", "True if any of the rule's conditionsets matches the given", "\"\"\" _attrs = ['name', 'conditionSets', 'subs'] # what do we", "input. Returns value unchanged if no mapping entry is found.", "None \"\"\"String, optional. When the document is read from the", "writerClass=None): self = cls(readerClass=readerClass, writerClass=writerClass) reader = self.readerClass.fromstring(string, self) reader.read()", "5.0 Return a tuple of (designLocation, userLocation) \"\"\" if self._strictAxisNames", "\"\"\"These methods give easier access to the localised names.\"\"\" self.localisedStyleName[languageCode]", "is None: raise DesignSpaceDocumentError(f'Missing xvalue=\"\" for the location dimension \"{dimName}\"\"", "5.0 \"\"\" self.lib = lib or {} \"\"\"Custom data associated", "= [ axis.map_forward(v) for v in (axis.minimum, axis.default, axis.maximum) ]", "encoding.lower() == \"unicode\" ): f = StringIO() xml_declaration = False", "or {} instanceObject.designLocation = designLocation or {} for glyphElement in", "def __str__(self): return str(self.msg) + ( \": %r\" % self.obj", "copy import itertools import math import os import posixpath from", "axis.default = default # now the rules for rule in", "names are usually camelCased, the corresponding `XML <document-xml-structure>`_ attributes are", "font file, **as it is in the document**. The file", "to the localised names.\"\"\" self.localisedStyleName[languageCode] = tostr(styleName) def getStyleName(self, languageCode=\"en\"):", "xml:lang code. \"\"\" @property def defaultName(self) -> str: \"\"\"Return the", "filename \"\"\"string. A relative path to the source file, **as", "single axis (formats 1, 2 and 3). All values are", "updatePaths(self): \"\"\" Right before we save we need to identify", "{} \"\"\"dict for special master definitions for glyphs. If glyphs", "of STAT Axis value to use to encode this label.", "if you intend that value to be mapped. \"\"\" return", "\"-VF\" if basename is None: basename = \"VF\" axisNames =", "for the axis. Keyed by xml:lang code. Values are required", "_addRule(self, ruleObject): # if none of the conditions have minimum", "space. MutatorMath + Varlib. \"\"\" self.maximum = maximum \"\"\"number. The", "part of the full location. See: :meth:`getFullUserLocation` \"\"\" self.elidable: bool", "= source.font fonts.append(source.font) return fonts @property def formatTuple(self): \"\"\"Return the", "source. Though this data can be extracted from the font,", "= True break if swap: newNames.append(b) else: newNames.append(name) glyphNames =", "in self.designLocation: result[axis.name] = self.designLocation[axis.name] else: result[axis.name] = axis.map_forward(axis.default) return", "data for a free-floating location (format 4). All values are", "# do not save temporary source names sourceElement.attrib['name'] = sourceObject.name", "False \"\"\"This flag indicates whether the substitution rules should be", "self.path = path \"\"\"The absolute path, calculated from filename.\"\"\" self.font", "source '%s' has no 'path' attribute\" % (source.name or \"<Unknown>\")", "'axisLabels') def __init__( self, *, tag=None, name=None, labelNames=None, values=None, default=None,", "source.font fonts.append(source.font) return fonts @property def formatTuple(self): \"\"\"Return the formatVersion", "on Windows.\"\"\" new_path = posixpath.join(*path.split(os.path.sep)) if path.startswith('/'): # The above", "assumed user space is the same as design space, as", "Before version 5, you would have needed 1 DesignSpace per", "self.addRule(rule) return rule def addVariableFont(self, variableFontDescriptor: VariableFontDescriptor): \"\"\"Add the given", "conditions in rule.conditionSets: newConditions = [] for cond in conditions:", "value from axis mapping's output (design) to input (user).\"\"\" from", "space. However, this default value is less important than in", "of its path. When the document is produced by a", "input has anisotropic locations, only the xvalue is used. ..", "axisObject.labelNames[lang] = tostr(labelNameElement.text) labelElement = axisElement.find(\".labels\") if labelElement is not", "unknown_attrs: raise DesignSpaceDocumentError(f\"axis-subset element contains unknown attributes: {', '.join(unknown_attrs)}\") name", "using forward slashes if value is not None: value =", "to the instance file, calculated from the document path and", "glyphNames = newNames newNames = [] return glyphNames AnisotropicLocationDict =", "the document**. MutatorMath + VarLib. \"\"\" self.path = path \"\"\"The", "other for attr in self._attrs: try: assert(getattr(self, attr) == getattr(other,", "axis will get in user space. However, this default value", "which format of STAT Axis value to use to encode", "source=\"master-token-aaa3\"/> <note> This is an instance from an anisotropic interpolation.", "in glyphElement.findall('.note'): glyphData['note'] = noteElement.text break designLocation, userLocation = self.locationFromElement(glyphElement)", "axesElement = self.root.find(\".axes\") if axesElement is not None and 'elidedfallbackname'", "posixpath from io import BytesIO, StringIO from textwrap import indent", "\"\"\" if self.variableFonts: return self.variableFonts variableFonts = [] discreteAxes =", "No anisotropy. - ``designLocation[axisName]``: the explicit design location along this", "case the document contains no axis definitions, self.log.warning(\"Location with undefined", "location, STAT field ``valueNameID``.\"\"\" self.userLocation: SimpleLocationDict = userLocation or {}", "is not loaded again. Fonts with the same path are", "out, as preserved below. if instanceObject.location is not None: locationElement,", "labelElement.attrib['linkeduservalue'] = self.intOrFloat(label.linkedUserValue) self._addLabelNames(labelElement, label.labelNames) axisElement.append(labelElement) def _addLabelNames(self, parentElement, labelNames):", "if userValue is None == xValue is None: raise DesignSpaceDocumentError(f'Exactly", "= self.instanceDescriptorClass() instanceObject.path = instancePath # absolute path to the", "the values in this list can be taken by the", "None: descriptor.filename = self._posixRelativePath(descriptor.path) def newAxisDescriptor(self): \"\"\"Ask the writer class", "attribute is already not None, it is not loaded again.", "m.get('glyphName') if m.get('font') is not None: masterElement.attrib['source'] = m.get('font') if", "for glyphElement in sourceElement.findall(\".glyph\"): glyphName = glyphElement.attrib.get('name') if glyphName is", "= \"variable-font\" _attrs = ('filename', 'axisSubsets', 'lib') filename = posixpath_property(\"_filename\")", "this instance, by combining data from the various location fields,", "of glyph substitutions to trigger conditionally in some parts of", "= axisObject.name self._addLabelNames(axisElement, axisObject.labelNames) if axisObject.map: for inputValue, outputValue in", "and ``maximum`` keys. \"\"\" # list of substitutions stored as", "of the instance, used to identify it if it needs", "continuous axes: - it doesn't define the \"neutral\" version of", "None: cd['maximum'] = float(cdMax) else: # will allow these to", "instances for item in self.instances: # glyph masters for this", "to use a different feature altogether, e.g. ``calt``, use the", ".. deprecated:: 5.0 \"\"\" self.copyGroups = copyGroups \"\"\"bool. Indicates if", "or {}) \"\"\"dict. Axis values for this instance, in design", "self.locationFromElement(glyphElement) if userLocation: raise DesignSpaceDocumentError(f'<glyph> element \"{glyphName}\" must only have", "and add it to :attr:`instances`. \"\"\" instance = self.writerClass.instanceDescriptorClass(**kwargs) self.addInstance(instance)", "self.labelNames: MutableMapping[str, str] = labelNames or {} \"\"\"User-facing translations of", "axisElement.append(labelsElement) if isinstance(axisObject, AxisDescriptor): axisElement.attrib['minimum'] = self.intOrFloat(axisObject.minimum) axisElement.attrib['maximum'] = self.intOrFloat(axisObject.maximum)", "a new axisDescriptor.\"\"\" return self.writerClass.getAxisDecriptor() def newSourceDescriptor(self): \"\"\"Ask the writer", "'userdefault', 'usermaximum'} unknown_attrs = set(element.attrib) - xml_attrs if unknown_attrs: raise", "self.log.warning(\"ValueError in readLocation yValue %3.3f\", yValue) if userValue is None", "styleName is not None: sourceObject.styleName = styleName for familyNameElement in", "axisDescriptor in self.axes: loc[axisDescriptor.name] = axisDescriptor.map_forward( axisDescriptor.default ) return loc", "this instance. MutatorMath + Varlib. .. deprecated:: 5.0 Use the", "the last part of its path. When the document is", "for dimensionElement in locationElement.findall(\".dimension\"): dimName = dimensionElement.attrib.get(\"name\") if self._strictAxisNames and", "axisOrdering=None, axisLabels=None, ): # opentype tag for this axis self.tag", "for conditions in ruleObject.conditionSets: conditionsetElement = ET.Element('conditionset') for cond in", "axis tags must begin with an uppercase letter and use", "familyNameElement in sourceElement.findall('familyname'): for key, lang in familyNameElement.items(): if key", "reader.read() if self.sources: self.findDefault() def write(self, path): \"\"\"Write this designspace", "[f\"{a}={repr(getattr(self, a))},\" for a in self._attrs] attrs = indent('\\n'.join(attrs), '", "= 'Italic')), } if name.lower() in names: return names[name.lower()] if", "that can be built from the document's continuous axes. In", "name \"\"\"Name of the :class:`AxisDescriptor` or :class:`DiscreteAxisDescriptor` to \"snapshot\" or", "self.intOrFloat(value[1]) else: dimElement.attrib['xvalue'] = self.intOrFloat(value) locElement.append(dimElement) elif userLocation is not", "labelElement.attrib['userminimum'] = self.intOrFloat(label.userMinimum) if label.userMaximum is not None: labelElement.attrib['usermaximum'] =", "the given instance.\"\"\" instanceObject.lib = plistlib.fromtree(libElement[0]) def readInfoElement(self, infoElement, instanceObject):", "ruleObject.name for conditions in ruleObject.conditionSets: conditionsetElement = ET.Element('conditionset') for cond", "makeKerning=makeKerning, makeInfo=makeInfo) def _readSingleInstanceElement(self, instanceElement, makeGlyphs=True, makeKerning=True, makeInfo=True): filename =", "self.axes: # scale the map first newMap = [] for", "familyname for this instance. MutatorMath + Varlib. \"\"\" self.styleMapStyleName =", "a DesignSpace document without deep-copying attached UFO fonts or TTFont", "only contain ASCII characters. \"\"\" self.hidden = hidden \"\"\"bool. Whether", "STAT table, however it can't become a variation axis in", "not specified, assume the same default value as the full", "parentElement: ET.Element, label: LocationLabelDescriptor) -> None: labelElement = ET.Element('label') labelElement.attrib['name']", "def updateFilenameFromPath(self, masters=True, instances=True, force=False): \"\"\"Set a descriptor filename attr", "if ruleObject.name is not None: ruleElement.attrib['name'] = ruleObject.name for conditions", "if vf.filename is not None: vfElement.attrib['filename'] = vf.filename if vf.axisSubsets:", "object in different contexts. The **DesignSpaceDocument** object can be subclassed", "name=None, labelNames=None, values=None, default=None, hidden=False, map=None, axisOrdering=None, axisLabels=None, ): super().__init__(", "names.\"\"\" self.localisedStyleName[languageCode] = tostr(styleName) def getStyleName(self, languageCode=\"en\"): return self.localisedStyleName.get(languageCode) def", "\"\"\"Return True if all the conditions matches the given location.", "versionadded:: 5.0 \"\"\" return self.localisedFamilyName.get(languageCode) def getFullDesignLocation(self, doc: 'DesignSpaceDocument') ->", "of this document's rules.\"\"\" self.rulesProcessingLast: bool = False \"\"\"This flag", "layout, as they are part of the `Required Variation Alternates", "str = filename \"\"\"string, optional. Relative path to the variable", "location: # skipping this dimension it seems continue value =", "locElement, validatedLocation def intOrFloat(self, num): if int(num) == num: return", "Location dict to a locationElement.\"\"\" locElement = ET.Element(\"location\") if name", "newNames = [] for rule in rules: if evaluateRule(rule, location):", "- xml_attrs if unknown_attrs: raise DesignSpaceDocumentError(f\"Label element contains unknown attributes:", "return self.rangeAxisSubsetDescriptorClass(name=name) raise DesignSpaceDocumentError( \"axis-subset element must have min/max/default values", "of glyphNames with substitutions applied. - rule order matters \"\"\"", "the user should only clear that axis, then edit the", "as this axis does not interpolate. - it doesn't provide", "is the value this axis will get in user space.", "instance.designLocation['Weight'] = (34, 36.5) Args: axisName: if provided, only clear", "to do the right thing for the filename attribute. ::", "__init__( self, *, filename=None, path=None, font=None, name=None, location=None, designLocation=None, layerName=None,", "to :meth:`read` or :meth:`fromfile`. \"\"\" self.filename = None \"\"\"String, optional.", "minVersion < (5, 0): minVersion = (5, 0) return minVersion", "STAT Axis value table, format 4 <https://docs.microsoft.com/en-us/typography/opentype/spec/stat#axis-value-table-format-4>`_ .. versionadded:: 5.0", "\"\"\"Return all variable fonts defined in this document, or implicit", "= tostr(familyName) def getFamilyName(self, languageCode=\"en\"): \"\"\"Getter for :attr:`localisedFamilyName` .. versionadded::", "with this instance.\"\"\" @property def location(self): \"\"\"dict. Axis values for", "fonts @property def formatTuple(self): \"\"\"Return the formatVersion as a tuple", "glyphMaster['location'] = self.normalizeLocation(glyphMaster['location']) item.location = self.normalizeLocation(item.location) # the axes for", "= posix(value) setattr(self, private_name, value) return property(getter, setter) class DesignSpaceDocumentError(Exception):", "location of this source, from its :attr:`designLocation` and the document's", "@classmethod def fromfile(cls, path, readerClass=None, writerClass=None): \"\"\"Read a designspace file", "1] a2.name = \"Italic\" a2.tag = \"ITAL\" a2.labelNames['fr'] = \"Italique\"", "self.locationLabels if label.userLocation == userLocation), None ) def updateFilenameFromPath(self, masters=True,", "in memory, as a Python object (e.g. a ``defcon.Font`` or", "not None: descriptor.filename = self._posixRelativePath(descriptor.path) def newAxisDescriptor(self): \"\"\"Ask the writer", "copied to the instances. MutatorMath. .. deprecated:: 5.0 \"\"\" self.muteKerning", "flag ``OLDER_SIBLING_FONT_ATTRIBUTE``. See: `OTSpec STAT Flags <https://docs.microsoft.com/en-us/typography/opentype/spec/stat#flags>`_ \"\"\" self.linkedUserValue: Optional[float]", "userLoc[dimName] = userValue return designLoc, userLoc def readInstances(self, makeGlyphs=True, makeKerning=True,", "Value Tables format 1, 2, 3. See: `OTSpec STAT Axis", "propery that holds a path always using forward slashes.\"\"\" def", "None: glyphData['instanceLocation'] = designLocation glyphSources = None for masterElement in", "can be as many variable fonts as there are locations", "<condition minimum=\"250.000000\" maximum=\"750.000000\" name=\"weight\"/> <condition minimum=\"100\" name=\"width\"/> <condition minimum=\"10\" maximum=\"40\"", "matching STAT format 4 label. No anisotropy. - ``designLocation[axisName]``: the", "= \"Wéíght\" a1.map = [(1.0, 10.0), (400.0, 66.0), (1000.0, 990.0)]", "== num: return \"%d\" % num return (\"%f\" % num).rstrip('0').rstrip('.')", "if instanceObject.styleName is not None: instanceElement.attrib['stylename'] = instanceObject.styleName # add", "defcon.Font, etc.) from the SourceDescriptor.path, and sets the SourceDescriptor.font attribute.", "strings, keyed by language code. \"\"\" self.localisedStyleMapStyleName = localisedStyleMapStyleName or", "'Weight')), 'width': ('wdth', dict(en = 'Width')), 'optical': ('opsz', dict(en =", "os.path.relpath(otherPath, os.path.dirname(self.path)) return posix(relative) def updatePaths(self): \"\"\" Right before we", "'DesignSpaceDocument') -> AnisotropicLocationDict: \"\"\"Get the complete design location of this", "{} if axisName in self.userLocation: del self.userLocation[axisName] def getLocationLabelDescriptor(self, doc:", ".. versionadded:: 5.0 \"\"\" self.designLocation: AnisotropicLocationDict = designLocation if designLocation", "= {} glyphName = glyphElement.attrib.get('name') if glyphName is None: raise", "None: # case 3 and 4: filename gets updated and", "Each conditionset is a list of conditions. - Each condition", "res.font = font return res finally: for source, font in", "localisedFamilyNameElement = ET.Element('familyname') localisedFamilyNameElement.attrib[XML_LANG] = code localisedFamilyNameElement.text = sourceObject.getFamilyName(code) sourceElement.append(localisedFamilyNameElement)", "Italic axis with 2 stops, Roman and Italic, that are", "ruleObject): # if none of the conditions have minimum or", "parentElement.append(libElement) def _writeGlyphElement(self, instanceElement, instanceObject, glyphName, data): glyphElement = ET.Element('glyph')", "to the disk and make ```filename`` point to that. \"\"\"", "+ Varlib. \"\"\" self.styleMapFamilyName = styleMapFamilyName \"\"\"string. StyleMap familyname for", "added to the document by creating such descriptor objects, filling", "\"\"\"bool. Indicates if the non-interpolating font.info needs to be copied", "\")] else: axisObject = self.axisDescriptorClass() axisObject.minimum = float(axisElement.attrib.get(\"minimum\")) axisObject.maximum =", "dimElement.attrib['uservalue'] = self.intOrFloat(value) locElement.append(dimElement) if len(locElement) > 0: parentElement.append(locElement) def", "= userValue \"\"\"Value in user coordinates at which to freeze", "is not None or self.userMaximum is not None: return 2", "axisSubsets=None, lib=None): self.name: str = name \"\"\"string, required. Name of", "# already stored in the element attribute localisedStyleNameElement = ET.Element('stylename')", "= m.get('font') if m.get('location') is not None: locationElement, m['location'] =", "self.root.findall(\".variable-fonts/variable-font\"): unknown_attrs = set(variableFontElement.attrib) - xml_attrs if unknown_attrs: raise DesignSpaceDocumentError(f\"variable-font", "locationFromElement(self, element): \"\"\"Read a nested ``<location>`` element inside the given", "None if axisName is None: self.designLocation = {} self.userLocation =", "rule.conditionSets) def evaluateConditions(conditions, location): \"\"\"Return True if all the conditions", "k, v in self.map if v == value), value) class", "and filling in the location # dict while writing it", "k, v in self.map}) class DiscreteAxisDescriptor(AbstractAxisDescriptor): \"\"\"Container for discrete axis", "< (5, 0): # Deprecated members as of version 5.0", "def _addAxisLabel(self, axisElement: ET.Element, label: AxisLabelDescriptor) -> None: labelElement =", "# absolute path to the instance instanceObject.filename = filename #", ":class:`AxisDescriptor` or :class:`DiscreteAxisDescriptor` to \"snapshot\" or \"freeze\". \"\"\" self.userValue: float", "continue if source.path in loaded: source.font = loaded[source.path] else: if", "this instance wholesale, a user should first clear all the", "xml_attrs = {'name', 'userminimum', 'userdefault', 'usermaximum'} unknown_attrs = set(element.attrib) -", "if cond.get('minimum') is not None: conditionElement.attrib['minimum'] = self.intOrFloat(cond.get('minimum')) if cond.get('maximum')", "assume the same maximum value as the full axis. (default", "try: xValue = dimensionElement.attrib.get('xvalue') if xValue is not None: xValue", "\"\"\"Custom data associated with this instance.\"\"\" @property def location(self): \"\"\"dict.", "== None -- action: write as is. The filename attr", "return self.localisedFamilyName.get(languageCode) def getFullDesignLocation(self, doc: 'DesignSpaceDocument') -> AnisotropicLocationDict: \"\"\"Get the", "are not compatible. The axis still allows to bind together", "axes for axis in self.axes: # scale the map first", "See: :meth:`getFullDesignLocation` :meth:`getFullUserLocation` .. versionadded:: 5.0 \"\"\" self.familyName = familyName", "layerName = sourceElement.attrib.get('layer') if layerName is not None: sourceObject.layerName =", "is not None: userValue = float(userValue) except ValueError: self.log.warning(\"ValueError in", "== XML_LANG: styleName = styleNameElement.text instanceObject.setStyleName(styleName, lang) for familyNameElement in", "defined, don't add this condition continue conditionElement = ET.Element('condition') conditionElement.attrib['name']", "to a dict, used in testing return dict( tag=self.tag, name=self.name,", "if code == \"en\": continue localisedStyleMapStyleNameElement = ET.Element('stylemapstylename') localisedStyleMapStyleNameElement.attrib[XML_LANG] =", "\"\"\"Map a user location to a design location. Assume that", "{'name', 'elidable', 'oldersibling'} for labelElement in self.root.findall(\".labels/label\"): unknown_attrs = set(labelElement.attrib)", "= conditionElement.attrib.get(\"name\") # # test for things if cd.get('minimum') is", "[] self.sources = [] self.instances = [] self.axisDefaults = {}", "file, **as it is in the document**. The file may", "def loadSourceFonts(self, opener, **kwargs): \"\"\"Ensure SourceDescriptor.font attributes are loaded, and", "the :attr:`name`.\"\"\" return self.labelNames.get(\"en\") or self.name class LocationLabelDescriptor(SimpleDescriptor): \"\"\"Container for", "readVariableFonts(self): if self.documentObject.formatTuple < (5, 0): return xml_attrs = {'name',", "0: # do not save temporary source names sourceElement.attrib['name'] =", "location): \"\"\"Return True if any of the rule's conditionsets matches", "tag = name + \"*\" * (4 - len(name)) else:", "only accept values we know validatedLocation[axisName] = axisValue for dimensionName,", "location-related fields. Ensures that :attr:``designLocation`` and :attr:``userLocation`` are dictionaries (possibly", "the document.\"\"\" names = [] for axisDescriptor in self.axes: names.append(axisDescriptor.name)", "raise DesignSpaceDocumentError(\"axis-subset element must have a name attribute.\") userMinimum =", "normalizeLocation(self, location): \"\"\"Return a dict with normalized axis values.\"\"\" from", "or {} \"\"\"Custom data associated with this variable font.\"\"\" class", "if it needs to be referenced from elsewhere in the", "to read any conditions that are outside of a condition", "present, it is assumed user space is the same as", ") return label def getFullDesignLocation(self, doc: 'DesignSpaceDocument') -> AnisotropicLocationDict: \"\"\"Get", "(None, None) for locationElement in element.findall('.location'): elementLocation = self.readLocationElement(locationElement) break", "else: # will allow these to be None, assume axis.maximum", "{} if axisName in self.designLocation: del self.designLocation[axisName] if self.userLocation is", "given axis.\"\"\" class BaseDocWriter(object): _whiteSpace = \" \" axisDescriptorClass =", "\"InstanceStyleMapFamilyName\" i2.styleMapStyleName = \"InstanceStyleMapStyleName\" i2.lib['com.coolDesignspaceApp.specimenText'] = 'Hamburgerwhatever' doc.addInstance(i2) \"\"\" flavor", "5.0 \"\"\" class AxisDescriptor(AbstractAxisDescriptor): \"\"\" Simple container for the axis", "lang) for styleMapFamilyNameElement in instanceElement.findall('stylemapfamilyname'): for key, lang in styleMapFamilyNameElement.items():", "label: LocationLabelDescriptor) -> None: labelElement = ET.Element('label') labelElement.attrib['name'] = label.name", "sourceName is None: # add a temporary source name sourceName", "defined in the document.\"\"\" names = [] for axisDescriptor in", "self.designLocation @location.setter def location(self, location: Optional[AnisotropicLocationDict]): self.designLocation = location or", "flavor = \"axis\" _attrs = ('tag', 'name', 'values', 'default', 'map',", "AbstractAxisDescriptor(SimpleDescriptor): flavor = \"axis\" def __init__( self, *, tag=None, name=None,", "truth for this instance's location is determined for each axis", "mapped. \"\"\" if isinstance(value, tuple): value = value[0] return next((k", "label is not None: return doc.map_forward(label.userLocation) # type: ignore result:", "raise DesignSpaceDocumentError(\"axis-subset element must have a name attribute.\") userValueStr =", "can read and write ``.designspace`` data. It imports the axes,", "\"\"\"Simple container for data related to the source .. code::", "user location to a design location. Assume that missing coordinates", "if styleMapFamilyName is not None: instanceObject.styleMapFamilyName = styleMapFamilyName styleMapStyleName =", "in axesElement.attrib: self.documentObject.elidedFallbackName = axesElement.attrib['elidedfallbackname'] axisElements = self.root.findall(\".axes/axis\") if not", "source to look for outline data. Default ``None`` which means", "a standard axis, self.labelNames = labelNames or {} \"\"\"dict. When", "axisObject): axisElement = ET.Element('axis') axisElement.attrib['tag'] = axisObject.tag axisElement.attrib['name'] = axisObject.name", "): self.userMinimum: Optional[float] = userMinimum \"\"\"STAT field ``rangeMinValue`` (format 2).\"\"\"", ".. versionadded:: 5.0 \"\"\" flavor = \"variable-font\" _attrs = ('filename',", "\"\"\"list of substitutions. - Each substitution is stored as tuples", "at the default location for that axis. Note: the output", "glyphnames. Return a new list of glyphNames with substitutions applied.", "\"\"\" def evaluateRule(rule, location): \"\"\"Return True if any of the", "is None: # add a temporary source name sourceName =", "Optional. Points to a representation of this source font that", "new :class:`LocationLabelDescriptor` using the given ``kwargs`` and add it to", "MutatorMath + VarLib. \"\"\" self.path = path \"\"\"The absolute path,", "attr, value in self.__dict__.items(): if attr.startswith(\"_\"): continue if hasattr(value, \"asdict\"):", "out, as preserved below. locationElement, sourceObject.location = self._makeLocationElement(sourceObject.location) sourceElement.append(locationElement) self.root.findall('.sources')[0].append(sourceElement)", "= self.readerClass(path, self) reader.read() if self.sources: self.findDefault() def write(self, path):", "self.writerClass(path, self) writer.write() def _posixRelativePath(self, otherPath): relative = os.path.relpath(otherPath, os.path.dirname(self.path))", "# XXX this is ugly. The 'print' is inappropriate here,", "glyphData in item.glyphs.items(): glyphData['instanceLocation'] = self.normalizeLocation(glyphData['instanceLocation']) for glyphMaster in glyphData['masters']:", "documentPath, documentObject: DesignSpaceDocument): self.path = documentPath self.documentObject = documentObject self.effectiveFormatTuple", "\"\".join([f\"-{axis.tag}{value}\" for axis, value in zip(discreteAxes, values)]) variableFonts.append(VariableFontDescriptor( name=f\"{basename}{axisNames}\", axisSubsets=rangeAxisSubsets", "= dict(weight=0) s1.familyName = \"MasterFamilyName\" s1.styleName = \"MasterStyleNameOne\" s1.localisedFamilyName =", "either one of these: .. code-block:: xml <glyph name=\"b\" unicode=\"0x62\"/>", "The main difference from a continuous axis is that a", "location(self): \"\"\"dict. Axis values for this source, in design space", "- If a condition has no maximum, check for >", "= RangeAxisSubsetDescriptor instanceDescriptorClass = InstanceDescriptor @classmethod def getAxisDecriptor(cls): return cls.axisDescriptorClass()", "self.name: str = name \"\"\"string, required. Name of this variable", ".. seealso:: This may be only part of the full", "the substitution rules should be applied before or after other", "- it doesn't provide the reference glyph set for the", "field ``rangeMaxValue`` (format 2).\"\"\" self.name: str = name \"\"\"Label for", "if unknown_attrs: raise DesignSpaceDocumentError(f\"axis-subset element contains unknown attributes: {', '.join(unknown_attrs)}\")", "= ET.Element('kerning') instanceElement.append(kerningElement) if instanceObject.info: infoElement = ET.Element('info') instanceElement.append(infoElement) self._addLib(instanceElement,", "-> SimpleLocationDict: \"\"\"Map a user location to a design location.", "or {} \"\"\"dict. A dictionary of localised stylename strings, keyed", "for more information: `fontTools#1371 <https://github.com/fonttools/fonttools/issues/1371#issuecomment-590214572>`__ `fontTools#2050 <https://github.com/fonttools/fonttools/issues/2050#issuecomment-678691020>`__ If you want", "the given ``kwargs`` and add it to ``doc.sources``. \"\"\" source", "of the :class:`AxisDescriptor` to subset.\"\"\" self.userMinimum: float = userMinimum \"\"\"New", "for instance in self.documentObject.instances ) ): if minVersion < (5,", "AxisLabelDescriptor(name=\"Roman\", userValue=0, elidable=True) ] doc.addAxis(a2) .. versionadded:: 5.0 \"\"\" flavor", "def getAxis(self, name): \"\"\"Return the axis with the given ``name``,", "in instanceElement.findall('familyname'): for key, lang in familyNameElement.items(): if key ==", "num).rstrip('0').rstrip('.') def _addRule(self, ruleObject): # if none of the conditions", "= element.get(\"usermaximum\") if userMinimum is not None and userDefault is", "self._addAxisLabel(labelsElement, label) axisElement.append(labelsElement) if isinstance(axisObject, AxisDescriptor): axisElement.attrib['minimum'] = self.intOrFloat(axisObject.minimum) axisElement.attrib['maximum']", "private_name) def setter(self, value): # The setter rewrites paths using", "not None or self.userMaximum is not None: return 2 return", "False: before - True: after. Default is False. For new", "for axis in self.documentObject.axes ) or self.documentObject.locationLabels or any( source.localisedFamilyName", "code == \"en\": continue localisedStyleMapStyleNameElement = ET.Element('stylemapstylename') localisedStyleMapStyleNameElement.attrib[XML_LANG] = code", "to input (user).\"\"\" from fontTools.varLib.models import piecewiseLinearMap if isinstance(v, tuple):", "is less important than in continuous axes: - it doesn't", "\"ordering\" in labelElement.attrib: axisObject.axisOrdering = int(labelElement.attrib[\"ordering\"]) for label in labelElement.findall(\".label\"):", "objects can be subclassed as well. **Note:** Python attribute names", "the formatVersion as a tuple of (major, minor). .. versionadded::", "versionadded:: 5.0 \"\"\" if self.variableFonts: return self.variableFonts variableFonts = []", "= self.axisDescriptorClass() axisObject.minimum = float(axisElement.attrib.get(\"minimum\")) axisObject.maximum = float(axisElement.attrib.get(\"maximum\")) axisObject.default =", "instance, used to identify it if it needs to be", "dimName not in self.axisDefaults: # In case the document contains", ":attr:`labelNames` or the :attr:`name`.\"\"\" return self.labelNames.get(\"en\") or self.name class LocationLabelDescriptor(SimpleDescriptor):", "= instanceObject.familyName if instanceObject.styleName is not None: instanceElement.attrib['stylename'] = instanceObject.styleName", "minimum or maximum in rule\" + (\" '%s'\" % ruleName", "doc.addSource(s1) \"\"\" flavor = \"source\" _attrs = ['filename', 'path', 'name',", "in sourceElement.findall(\".info\"): if infoElement.attrib.get('copy') == '1': sourceObject.copyInfo = True if", "attr. useless, but no reason to interfere. case 2. descriptor.filename", "deprecated:: 5.0 \"\"\" self.copyFeatures = copyFeatures \"\"\"bool. Indicates if the", "of Designspace documents before version 5, the whole document was", "overwriting some other value for filename, it should be fine", "languageElement = ET.Element('labelname') languageElement.attrib[XML_LANG] = languageCode languageElement.text = labelName parentElement.append(languageElement)", "self.normalizeLocation({cond['name']: cond['minimum']}).get(cond['name']) else: minimum = None if cond.get('maximum') is not", "5.0 \"\"\" return next( (label for label in self.locationLabels if", "other parts of the document, and also as a filename", "you want to use a different feature altogether, e.g. ``calt``,", "a dict with the default location in design space coordinates.\"\"\"", "element') instanceObject.locationLabel = locationLabel instanceObject.userLocation = userLocation or {} instanceObject.designLocation", "path.__fspath__() self.path = path self.filename = os.path.basename(path) self.updatePaths() writer =", "xml <glyph name=\"b\" unicode=\"0x62\"/> <glyph name=\"b\"/> <glyph name=\"b\"> <master location=\"location-token-bbb\"", "\"\"\" self.axes: List[Union[AxisDescriptor, DiscreteAxisDescriptor]] = [] \"\"\"List of this document's", "None: locElement.attrib['name'] = name validatedLocation = self.documentObject.newDefaultLocation() for axisName, axisValue", "same maximum value as the full axis. (default = ``math.inf``)", "the designspace as a string. Default encoding ``utf-8``.\"\"\" if encoding", "conditionsetElement.append(conditionElement) if len(conditionsetElement): ruleElement.append(conditionsetElement) for sub in ruleObject.subs: subElement =", "the document path and the string in the filename attr.", "this label. =========== ========= =========== =========== =============== STAT Format userValue", "<string>calt</string> </dict> </lib> \"\"\" self.sources: List[SourceDescriptor] = [] \"\"\"List of", "self.findDefault() def write(self, path): \"\"\"Write this designspace to ``path``.\"\"\" if", "userMinimum=minimum, userMaximum=maximum, elidable=elidable, olderSibling=olderSibling, linkedUserValue=linkedValue, labelNames=labelNames, ) def readLabels(self): if", "= ET.Element(\"labels\") for labelObject in self.documentObject.locationLabels: self._addLocationLabel(labelsElement, labelObject) self.root.append(labelsElement) if", "minimum=self.minimum, default=self.default, hidden=self.hidden, map=self.map, axisOrdering=self.axisOrdering, axisLabels=self.axisLabels, ) def map_forward(self, v):", "= self.sourceDescriptorClass() sourceObject.path = sourcePath # absolute path to the", "- 1 value. - we need the axis data to", "the one we have masterGlyphName = glyphName d = dict(font=fontSourceName,", "axis.map_backward(designLocation[axis.name]) if axis.name in designLocation else axis.default ) for axis", "self.userLocation is None: self.userLocation = {} if axisName in self.userLocation:", "dict to a locationElement.\"\"\" locElement = ET.Element(\"location\") if name is", "this variable to identify it during the build process and", "source names sourceElement.attrib['name'] = sourceObject.name if sourceObject.familyName is not None:", "maximum axis.default = default # now the rules for rule", "def setStyleName(self, styleName, languageCode=\"en\"): \"\"\"These methods give easier access to", "each axis. If an axis is not mentioned, it is", "already not None, it is not loaded again. Fonts with", "rules for rule in self.rules: newConditionSets = [] for conditions", "validatedLocation def intOrFloat(self, num): if int(num) == num: return \"%d\"", "in axisElement.attrib[\"values\"].split(\" \")] else: axisObject = self.axisDescriptorClass() axisObject.minimum = float(axisElement.attrib.get(\"minimum\"))", "4) parentElement.append(vfElement) def _addLib(self, parentElement: ET.Element, data: Any, indent_level: int)", "space to design space coordinates. If no map values are", "None: sourcePath = os.path.abspath(os.path.join(os.path.dirname(self.path), filename)) else: sourcePath = None sourceName", "file. \"\"\" self.axisSubsets: List[Union[RangeAxisSubsetDescriptor, ValueAxisSubsetDescriptor]] = axisSubsets or [] \"\"\"Axis", "MutatorMath. .. deprecated:: 5.0 \"\"\" self.info = info \"\"\"bool. Indicated", "in memory, the producing script can write here an indication", "if we don't read a glyphname, use the one we", "space of all axes. This function updates the document's :attr:`default`", "libElement.append(plistlib.totree(data, indent_level=indent_level)) parentElement.append(libElement) def _writeGlyphElement(self, instanceElement, instanceObject, glyphName, data): glyphElement", "source=\"master-token-aaa2\"/> <master glyphname=\"b.alt1\" location=\"location-token-ccc\" source=\"master-token-aaa3\"/> <note> This is an instance", "you intend that value to be mapped. \"\"\" return next((v", "instanceObject.styleMapStyleName is not None: instanceElement.attrib['stylemapstylename'] = instanceObject.styleMapStyleName if self.effectiveFormatTuple <", "in the location # dict while writing it out, as", "rewrites paths using forward slashes if value is not None:", "userValue=value, userMinimum=minimum, userMaximum=maximum, elidable=elidable, olderSibling=olderSibling, linkedUserValue=linkedValue, labelNames=labelNames, ) def readLabels(self):", "values) ] )) return variableFonts def deepcopyExceptFonts(self): \"\"\"Allow deep-copying a", "# https://github.com/LettError/designSpaceDocument/issues/10 loc = collections.OrderedDict() for axisDescriptor in self.axes: loc[axisDescriptor.name]", "instanceObject.getStyleName(code) instanceElement.append(localisedStyleNameElement) if instanceObject.localisedFamilyName: languageCodes = list(instanceObject.localisedFamilyName.keys()) languageCodes.sort() for code", "[] \"\"\"List of this document's axes.\"\"\" self.locationLabels: List[LocationLabelDescriptor] = []", "labelNames=None, ): self.name: str = name \"\"\"Label for this named", "instanceObject.name is not None: instanceElement.attrib['name'] = instanceObject.name if instanceObject.locationLabel is", "element.get(\"name\") if name is None: raise DesignSpaceDocumentError(\"axis-subset element must have", "The **DesignSpaceDocument** object can be subclassed to work with different", "getFormat(self) -> int: \"\"\"Determine which format of STAT Axis value", "readLibElement(self, libElement, instanceObject): \"\"\"Read the lib element for the given", "or ( encoding is not None and encoding.lower() == \"unicode\"", "self.documentObject.rules: if getattr(self.documentObject, \"rulesProcessingLast\", False): attributes = {\"processing\": \"last\"} else:", "data.get('masters'): masterElement = ET.Element(\"master\") if m.get('glyphName') is not None: masterElement.attrib['glyphname']", "mounts new_path = '//' + new_path return new_path def posixpath_property(private_name):", "the named label can't be found. .. versionadded:: 5.0 \"\"\"", "'SourceDescriptor', 'InstanceDescriptor', 'AxisDescriptor', 'RuleDescriptor', 'BaseDocReader', 'BaseDocWriter' ] # ElementTree allows", "Python script and still only exists in memory, the producing", "self.documentObject.variableFonts: self._addVariableFont(variableFontsElement, variableFont) self.root.append(variableFontsElement) if self.documentObject.instances: self.root.append(ET.Element(\"instances\")) for instanceObject in", "axis.name: ( axis.map_backward(designLocation[axis.name]) if axis.name in designLocation else axis.default )", "attribute is shared by reference between the original and the", "instanceElement = ET.Element('instance') if instanceObject.name is not None: instanceElement.attrib['name'] =", "or encoding == \"utf-8\": f = BytesIO() encoding = \"UTF-8\"", "= mutedGlyphNames or [] \"\"\"list. Glyphnames that need to be", "= dimensionElement.attrib.get(\"name\") if self._strictAxisNames and dimName not in self.axisDefaults: #", "otherPath): relative = os.path.relpath(otherPath, os.path.dirname(self.path)) return posix(relative) def updatePaths(self): \"\"\"", "SourceDescriptor(SimpleDescriptor): \"\"\"Simple container for data related to the source ..", "\"asdict\") else v for v in value ] d[attr] =", "or TTFont objects. The :attr:`font` attribute is shared by reference", "'labelNames') def __init__( self, *, name, userValue, userMinimum=None, userMaximum=None, elidable=False,", "self.userMinimum: float = userMinimum \"\"\"New minimum value of the axis", "= ET.Element(\"kerning\") kerningElement.attrib[\"mute\"] = '1' sourceElement.append(kerningElement) if sourceObject.mutedGlyphNames: for name", "\" \".join([hex(u) for u in data.get('unicodes')]) if data.get('instanceLocation') is not", "[] for source in self.sources: if source.font is not None:", "= ['name', 'conditionSets', 'subs'] # what do we need here", "setStyleMapFamilyName(self, styleMapFamilyName, languageCode=\"en\"): self.localisedStyleMapFamilyName[languageCode] = tostr(styleMapFamilyName) def getStyleMapFamilyName(self, languageCode=\"en\"): return", "defined in this document, or implicit variable fonts that can", "type(dimensionValue) == tuple: dimElement.attrib['xvalue'] = self.intOrFloat(dimensionValue[0]) dimElement.attrib['yvalue'] = self.intOrFloat(dimensionValue[1]) else:", "< 4: tag = name + \"*\" * (4 -", "``unicode`` strings, even if they only contain ASCII characters. \"\"\"", "self.intOrFloat(label.userValue) if label.userMinimum is not None: labelElement.attrib['userminimum'] = self.intOrFloat(label.userMinimum) if", "namespace-prefixed elements, but not attributes # so we have to", "edit the values: .. code:: python instance.clearLocation('Weight') instance.designLocation['Weight'] = (34,", "def readAxes(self): # read the axes elements, including the warp", "test for things if cd.get('minimum') is None and cd.get('maximum') is", "value <= cd['maximum']: return False return True def processRules(rules, location,", "not None: if \"ordering\" in labelElement.attrib: axisObject.axisOrdering = int(labelElement.attrib[\"ordering\"]) for", "styleMapStyleName, languageCode=\"en\"): self.localisedStyleMapStyleName[languageCode] = tostr(styleMapStyleName) def getStyleMapStyleName(self, languageCode=\"en\"): return self.localisedStyleMapStyleName.get(languageCode)", "is None: raise DesignSpaceDocumentError(\"Glyph object without name attribute\") mute =", "(format 2).\"\"\" self.userValue: float = userValue \"\"\"STAT field ``value`` (format", "the build process and from other parts of the document,", "If no map values are present, it is assumed user", "Optional[str] = None \"\"\"Name of the default master. This attribute", "not None: skip it. \"\"\" if masters: for descriptor in", "not None: if xValue is None: raise DesignSpaceDocumentError(f'Missing xvalue=\"\" for", "return { axis.name: self.userLocation.get(axis.name, axis.default) for axis in doc.axes }", "make us a new sourceDescriptor.\"\"\" return self.writerClass.getSourceDescriptor() def newInstanceDescriptor(self): \"\"\"Ask", "right here. Varlib. \"\"\" self.styleName = styleName \"\"\"string. Style name", "self @classmethod def fromstring(cls, string, readerClass=None, writerClass=None): self = cls(readerClass=readerClass,", "is not None: glyphData['masters'] = glyphSources instanceObject.glyphs[glyphName] = glyphData def", "for glyphElement in instanceElement.findall('.glyphs/glyph'): self.readGlyphElement(glyphElement, instanceObject) for infoElement in instanceElement.findall(\"info\"):", "inside the given ``element``. .. versionchanged:: 5.0 Return a tuple", "maximumStr is not None else None linkedValueStr = element.get(\"linkeduservalue\") linkedValue", "glyphName is not None: glyphElement.attrib['name'] = glyphName if data.get('note') is", "tuple): dimElement.attrib['xvalue'] = self.intOrFloat(value[0]) dimElement.attrib['yvalue'] = self.intOrFloat(value[1]) else: dimElement.attrib['xvalue'] =", "self.localisedFamilyName.get(languageCode) def setStyleMapStyleName(self, styleMapStyleName, languageCode=\"en\"): self.localisedStyleMapStyleName[languageCode] = tostr(styleMapStyleName) def getStyleMapStyleName(self,", "None else None maximumStr = element.get(\"usermaximum\") maximum = float(maximumStr) if", "sets. \"\"\" self.values: List[float] = values or [] \"\"\"List of", "of possible values for this axis. Contrary to continuous axes,", "new :class:`SourceDescriptor` using the given ``kwargs`` and add it to", "code in languageCodes: if code == \"en\": continue # already", "sourceElement.attrib['familyname'] = sourceObject.familyName if sourceObject.styleName is not None: sourceElement.attrib['stylename'] =", "**kwargs: extra options passed on to the opener function. Returns:", "\"\"\" self.copyInfo = copyInfo \"\"\"bool. Indicates if the non-interpolating font.info", "instance. MutatorMath + Varlib. \"\"\" self.styleName = styleName \"\"\"string. Style", "its kerning calculated. MutatorMath. .. deprecated:: 5.0 \"\"\" self.info =", "as many variable fonts as there are locations on discrete", "attributes # so we have to do it ourselves for", "self._makeLocationElement(m.get('location')) masterElement.append(locationElement) mastersElement.append(masterElement) glyphElement.append(mastersElement) return glyphElement class BaseDocReader(LogMixin): axisDescriptorClass =", "elidable=elidable, olderSibling=olderSibling, linkedUserValue=linkedValue, labelNames=labelNames, ) def readLabels(self): if self.documentObject.formatTuple <", "encoding is str or ( encoding is not None and", "is not None: # case 3 and 4: filename gets", "look like either one of these: .. code-block:: xml <glyph", "\"\"\" # name of the axis used in locations self.name", "this axis in user space. MutatorMath + Varlib. \"\"\" self.default", "document path. If the filename attribute is not None: skip", "isinstance(axisObject, AxisDescriptor): axisElement.attrib['minimum'] = self.intOrFloat(axisObject.minimum) axisElement.attrib['maximum'] = self.intOrFloat(axisObject.maximum) elif isinstance(axisObject,", "axis in user space. MutatorMath + Varlib. \"\"\" self.default =", "the `OpenType specification <https://www.microsoft.com/typography/otspec/fvar.htm#VAT>`__. Privately-defined axis tags must begin with", "postScriptFontName styleMapFamilyName = instanceElement.attrib.get('stylemapfamilyname') if styleMapFamilyName is not None: instanceObject.styleMapFamilyName", "source, in design space coordinates. MutatorMath + Varlib. .. deprecated::", "designspace. .. code:: python r1 = RuleDescriptor() r1.name = \"unique.rule.name\"", "self.path = path self.filename = os.path.basename(path) reader = self.readerClass(path, self)", "sourceElement.attrib['name'] = sourceObject.name if sourceObject.familyName is not None: sourceElement.attrib['familyname'] =", "self.localisedStyleMapStyleName[languageCode] = tostr(styleMapStyleName) def getStyleMapStyleName(self, languageCode=\"en\"): return self.localisedStyleMapStyleName.get(languageCode) def setStyleMapFamilyName(self,", "by xml:lang code. \"\"\" @property def defaultName(self) -> str: \"\"\"Return", "of a possible \"good\" filename, in case one wants to", "be mapped. \"\"\" return next((v for k, v in self.map", ".. deprecated:: 5.0 \"\"\" self.copyInfo = copyInfo \"\"\"bool. Indicates if", ".. code:: python instance.clearLocation() instance.designLocation = {'Weight': (34, 36.5), 'Width':", "\"\"\" minVersion = self.documentObject.formatTuple if ( any( isinstance(axis, DiscreteAxisDescriptor) or", "axisLabels=None, ): super().__init__( tag=tag, name=name, labelNames=labelNames, hidden=hidden, map=map, axisOrdering=axisOrdering, axisLabels=axisLabels,", "master. This attribute is updated by the :meth:`findDefault` \"\"\" if", "Designspace documents before version 5, the whole document was implicitly", "= axis.name value = userLocation[axis.name] dimElement.attrib['uservalue'] = self.intOrFloat(value) locElement.append(dimElement) if", "and the path. So we know where the file is", "to the instances. MutatorMath. .. deprecated:: 5.0 \"\"\" self.copyFeatures =", "= [ AxisLabelDescriptor(name=\"Regular\", userValue=400, elidable=True) ] doc.addAxis(a1) \"\"\" _attrs =", "label_name.items() if attr == XML_LANG # Note: elementtree reads the", "record the results of executed rules for example). MutatorMath. ..", "self._readSingleInstanceElement(instanceElement, makeGlyphs=makeGlyphs, makeKerning=makeKerning, makeInfo=makeInfo) def _readSingleInstanceElement(self, instanceElement, makeGlyphs=True, makeKerning=True, makeInfo=True):", "None: instanceObject.name = name familyname = instanceElement.attrib.get('familyname') if familyname is", "language code. \"\"\" self.localisedStyleMapFamilyName = localisedStyleMapFamilyName or {} \"\"\"A dictionary", "elidable=True) ] doc.addAxis(a2) .. versionadded:: 5.0 \"\"\" flavor = \"axis\"", "self.effectiveFormatTuple = self._getEffectiveFormatTuple() self.root = ET.Element(\"designspace\") def write(self, pretty=True, encoding=\"UTF-8\",", "\"Wrapped them in a new conditionset.\" ) # read the", "u in unicodes.split(\" \")] glyphData['unicodes'] = unicodes except ValueError: raise", "axisObject.axisOrdering = int(labelElement.attrib[\"ordering\"]) for label in labelElement.findall(\".label\"): axisObject.axisLabels.append(self.readAxisLabel(label)) self.documentObject.axes.append(axisObject) self.axisDefaults[axisObject.name]", "and write ``.designspace`` data. It imports the axes, sources, variable", "self.normalizeLocation({axis.name: axis.minimum}).get(axis.name) maximum = self.normalizeLocation({axis.name: axis.maximum}).get(axis.name) default = self.normalizeLocation({axis.name: axis.default}).get(axis.name)", "set(labelElement.attrib) - xml_attrs if unknown_attrs: raise DesignSpaceDocumentError(f\"Label element contains unknown", "= instanceElement.attrib.get('stylemapfamilyname') if styleMapFamilyName is not None: instanceObject.styleMapFamilyName = styleMapFamilyName", "part of the full location. See: :meth:`getFullDesignLocation` :meth:`getFullUserLocation` .. versionadded::", "(format 1, 3) or ``nominalValue`` (format 2).\"\"\" self.userMaximum: Optional[float] =", "value[0] triple = [ axis.map_forward(v) for v in (axis.minimum, axis.default,", "`OpenType specification <https://www.microsoft.com/typography/otspec/fvar.htm#VAT>`__. Privately-defined axis tags must begin with an", "the rule. ruleElement = ET.Element('rule') if ruleObject.name is not None:", "be touched. case 3. descriptor.filename == None descriptor.path == \"~/absolute/path/there\"", "self.axes: loc[axisDescriptor.name] = axisDescriptor.map_forward( axisDescriptor.default ) return loc def labelForUserLocation(self,", "filename = variableFontElement.get(\"filename\") axisSubsetsElement = variableFontElement.find(\".axis-subsets\") if axisSubsetsElement is None:", "axis. If an axis is not mentioned, it is assumed", "is a conflict between the given filename, and the path.", "userLocation = self.locationFromElement(sourceElement) if userLocation: raise DesignSpaceDocumentError(f'<source> element \"{sourceName}\" must", "for languageCode, labelName in sorted(labelNames.items()): languageElement = ET.Element('labelname') languageElement.attrib[XML_LANG] =", "None: if value > cd['maximum']: return False elif cd.get('maximum') is", "a dict, used in testing return dict( tag=self.tag, name=self.name, labelNames=self.labelNames,", "instanceObject.name = name familyname = instanceElement.attrib.get('familyname') if familyname is not", "instance for _, glyphData in item.glyphs.items(): glyphData['instanceLocation'] = self.normalizeLocation(glyphData['instanceLocation']) for", "userValue): self.name: str = name \"\"\"Name of the :class:`AxisDescriptor` or", "-> SimpleLocationDict: \"\"\"Get the complete user location of this label,", "from elsewhere in the document. \"\"\" self.locationLabel = locationLabel \"\"\"Name", "None or axis.axisLabels for axis in self.documentObject.axes ) or self.documentObject.locationLabels", "if vf.axisSubsets: subsetsElement = ET.Element('axis-subsets') for subset in vf.axisSubsets: subsetElement", "kerningElement in sourceElement.findall(\".kerning\"): if kerningElement.attrib.get('mute') == '1': sourceObject.muteKerning = True", "name): \"\"\"Return the axis with the given ``name``, or ``None``", "if cd['minimum'] > value: return False elif not cd['minimum'] <=", "value = value[0] return next((k for k, v in self.map", "name=\"b\" unicode=\"0x62\"/> <glyph name=\"b\"/> <glyph name=\"b\"> <master location=\"location-token-bbb\" source=\"master-token-aaa2\"/> <master", "Simple container for the axis data. Add more localisations? ..", "class DesignSpaceDocumentError(Exception): def __init__(self, msg, obj=None): self.msg = msg self.obj", "self.intOrFloat(axisObject.minimum) axisElement.attrib['maximum'] = self.intOrFloat(axisObject.maximum) elif isinstance(axisObject, DiscreteAxisDescriptor): axisElement.attrib['values'] = \"", "this instance's location is determined for each axis independently by", "elidable=elidable, olderSibling=olderSibling, labelNames=labelNames, ) self.documentObject.locationLabels.append(locationLabel) def readVariableFonts(self): if self.documentObject.formatTuple <", "respond to the following situations: In each descriptor, we have", "``element``. .. versionchanged:: 5.0 Return a tuple of (designLocation, userLocation)", "== getattr(other, attr)) except AssertionError: print(\"failed attribute\", attr, getattr(self, attr),", "as it is used in the location dicts. MutatorMath +", "use a different feature altogether, e.g. ``calt``, use the lib", "None maximumStr = element.get(\"usermaximum\") maximum = float(maximumStr) if maximumStr is", "output to a dict, used in testing return dict( tag=self.tag,", "descriptor.path == \"~/absolute/path/there\" -- action: there is a conflict between", "return names def getAxis(self, name): \"\"\"Return the axis with the", "readLocation userValue %3.3f\", userValue) try: xValue = dimensionElement.attrib.get('xvalue') if xValue", "cond.get('maximum') is not None: maximum = self.normalizeLocation({cond['name']: cond['maximum']}).get(cond['name']) else: maximum", "See ref:`rules-element` § Attributes. \"\"\" def evaluateRule(rule, location): \"\"\"Return True", "return next((v for k, v in self.map if k ==", "layerName=None, familyName=None, styleName=None, localisedFamilyName=None, copyLib=False, copyInfo=False, copyGroups=False, copyFeatures=False, muteKerning=False, muteInfo=False,", "not None and axis.name in userLocation: dimElement = ET.Element('dimension') dimElement.attrib['name']", "localised style map stylename strings, keyed by language code. \"\"\"", "disk, this is the full path that was given to", "to continuous axes, only the values in this list can", "given ``kwargs`` and add it to :attr:`locationLabels`. .. versionadded:: 5.0", "-> Optional[LocationLabelDescriptor]: \"\"\"Return the top-level location label with the given", "is used. .. versionadded:: 5.0 \"\"\" return { axis.name: (", "of # assert, it should simply return True/False def compare(self,", "to a user location. Assume that missing coordinates are at", "= DesignSpaceDocument() s1 = SourceDescriptor() s1.path = masterPath1 s1.name =", "If present, will be used to build localized names for", "mentioned, it is assumed to be at its default location.", "readerClass else: self.readerClass = BaseDocReader if writerClass is not None:", "\"\"\" # masters for item in self.sources: item.location = self.normalizeLocation(item.location)", "write designspace files \"\"\" __all__ = [ 'DesignSpaceDocumentError', 'DesignSpaceDocument', 'SourceDescriptor',", "for variableFont in self.documentObject.variableFonts: self._addVariableFont(variableFontsElement, variableFont) self.root.append(variableFontsElement) if self.documentObject.instances: self.root.append(ET.Element(\"instances\"))", "width=(400,300)) i2.postScriptFontName = \"InstancePostscriptName\" i2.styleMapFamilyName = \"InstanceStyleMapFamilyName\" i2.styleMapStyleName = \"InstanceStyleMapStyleName\"", "be None, assume axis.minimum cd['minimum'] = None cdMax = conditionElement.attrib.get(\"maximum\")", "= sourcePath # absolute path to the ufo source sourceObject.filename", "self.root = ET.Element(\"designspace\") def write(self, pretty=True, encoding=\"UTF-8\", xml_declaration=True): self.root.attrib['format'] =", "don't add this condition continue conditionElement = ET.Element('condition') conditionElement.attrib['name'] =", "sourceElement.append(groupsElement) if sourceObject.copyFeatures: featuresElement = ET.Element('features') featuresElement.attrib['copy'] = \"1\" sourceElement.append(featuresElement)", "with yvalue=\"{yValue}\"') designLoc[dimName] = (xValue, yValue) elif xValue is not", "== '../somewhere' descriptor.path == \"~/absolute/path/there\" -- action: there is a", "instanceObject.locationLabel = locationLabel instanceObject.userLocation = userLocation or {} instanceObject.designLocation =", "this axis location, STAT field ``valueNameID``.\"\"\" self.elidable: bool = elidable", "= self._getEffectiveFormatTuple() self.root = ET.Element(\"designspace\") def write(self, pretty=True, encoding=\"UTF-8\", xml_declaration=True):", "is None for v in (userMinimum, userDefault, userMaximum)): return self.rangeAxisSubsetDescriptorClass(name=name)", "the lib element for the given instance.\"\"\" instanceObject.lib = plistlib.fromtree(libElement[0])", "def updatePaths(self): \"\"\" Right before we save we need to", "# Pre-version 5.0 code was validating and filling in the", "float = default \"\"\"The default value for this axis, i.e.", "be the default. See :meth:`SourceDescriptor.getFullDesignLocation()` \"\"\" self.default = None #", "lib=None, ): self.filename = filename \"\"\"string. Relative path to the", "itertools.product(*[axis.values for axis in discreteAxes]) for values in valueCombinations: basename", "found. Note: for discrete axes, each value must have its", "glyphNames AnisotropicLocationDict = Dict[str, Union[float, Tuple[float, float]]] SimpleLocationDict = Dict[str,", "applied. - rule order matters \"\"\" newNames = [] for", "axis.name value = designLocation[axis.name] if isinstance(value, tuple): dimElement.attrib['xvalue'] = self.intOrFloat(value[0])", "list(instanceObject.localisedFamilyName.keys()) languageCodes.sort() for code in languageCodes: if code == \"en\":", "accept values we know validatedLocation[axisName] = axisValue for dimensionName, dimensionValue", "glyphElement.attrib['name'] = glyphName if data.get('note') is not None: noteElement =", "tag = name[:4] return tag, dict(en=name) class AbstractAxisDescriptor(SimpleDescriptor): flavor =", "parentElement: ET.Element, vf: VariableFontDescriptor) -> None: vfElement = ET.Element('variable-font') vfElement.attrib['name']", "and :attr:``userLocation`` are dictionaries (possibly empty if clearing everything). In", "styleName for familyNameElement in sourceElement.findall('familyname'): for key, lang in familyNameElement.items():", "in familyNameElement.items(): if key == XML_LANG: familyName = familyNameElement.text sourceObject.setFamilyName(familyName,", "for :attr:`localisedFamilyName` .. versionadded:: 5.0 \"\"\" self.localisedFamilyName[languageCode] = tostr(familyName) def", "Python object (e.g. a ``defcon.Font`` or a ``fontTools.ttFont.TTFont``). The default", "all the fields, then change the field(s) for which they", "else: sourcePath = None sourceName = sourceElement.attrib.get('name') if sourceName is", "= [] self.sources = [] self.instances = [] self.axisDefaults =", "self.font = font \"\"\"Same as :attr:`SourceDescriptor.font` .. seealso:: :attr:`SourceDescriptor.font` \"\"\"", "str = name \"\"\"Label for this axis location, STAT field", "fontname for this instance. MutatorMath + Varlib. \"\"\" self.styleMapFamilyName =", "axis.maximum) ] new[axis.name] = normalizeValue(value, triple) return new def normalize(self):", "only clear the location for that axis. .. versionadded:: 5.0", "use only uppercase letters or digits. \"\"\" # name of", "variable font, and a lot of data duplication. - From", "``None`` if no such label exists. .. versionadded:: 5.0 \"\"\"", "data from the explicit user location and default axis values.", "fonts = [] for source in self.sources: if source.font is", "= ET.ElementTree(self.root) tree.write( self.path, encoding=encoding, method='xml', xml_declaration=xml_declaration, pretty_print=pretty, ) def", "just choose for path to be correct and update filename.", "deprecated:: 5.0 \"\"\" self.copyGroups = copyGroups \"\"\"bool. Indicates if the", ":meth:`getFullUserLocation` .. versionadded:: 5.0 \"\"\" self.userLocation: SimpleLocationDict = userLocation or", "names = { 'weight': ('wght', dict(en = 'Weight')), 'width': ('wdth',", "a propery that holds a path always using forward slashes.\"\"\"", "below. locationElement, sourceObject.location = self._makeLocationElement(sourceObject.location) sourceElement.append(locationElement) self.root.findall('.sources')[0].append(sourceElement) def _addVariableFont(self, parentElement:", "must have its mapping entry, if you intend that value", "the given filename, and the path. So we know where", "element.\") axisSubsets = [] for axisSubset in axisSubsetsElement.iterfind(\".axis-subset\"): axisSubsets.append(self.readAxisSubset(axisSubset)) lib", "axis. .. versionadded:: 5.0 \"\"\" self.locationLabel = None if axisName", "mapElement = ET.Element('map') mapElement.attrib['input'] = self.intOrFloat(inputValue) mapElement.attrib['output'] = self.intOrFloat(outputValue) axisElement.append(mapElement)", "that value to be mapped. \"\"\" if isinstance(value, tuple): value", "cd['minimum'] > value: return False elif not cd['minimum'] <= value", "\"true\" else False olderSibling = True if element.get(\"oldersibling\") == \"true\"", "discreteAxes]) for values in valueCombinations: basename = None if self.filename", "info element.\"\"\" instanceObject.info = True def readGlyphElement(self, glyphElement, instanceObject): \"\"\"", "a new :class:`LocationLabelDescriptor` using the given ``kwargs`` and add it", "class RangeAxisSubsetDescriptor(SimpleDescriptor): \"\"\"Subset of a continuous axis to include in", "not None: mastersElement = ET.Element(\"masters\") for m in data.get('masters'): masterElement", "\"\"\"list of input / output values that can describe a", "for code in languageCodes: if code == \"en\": continue localisedStyleMapFamilyNameElement", "designLoc[dimName] = xValue else: userLoc[dimName] = userValue return designLoc, userLoc", "is not None: labelElement.attrib['linkeduservalue'] = self.intOrFloat(label.linkedUserValue) self._addLabelNames(labelElement, label.labelNames) axisElement.append(labelElement) def", "subset.userDefault is not None: subsetElement.attrib['userdefault'] = self.intOrFloat(subset.userDefault) elif isinstance(subset, ValueAxisSubsetDescriptor):", "\"\"\" self.muteInfo = muteInfo \"\"\"bool. Indicated if the interpolating font.info", "versionadded:: 5.0 \"\"\" flavor = \"label\" _attrs = ('userMinimum', 'userValue',", "undefined axis: \\\"%s\\\".\", dimName) continue userValue = xValue = yValue", "bool = False \"\"\"This flag indicates whether the substitution rules", "self.userMaximum: float = userMaximum \"\"\"New maximum value of the axis", "Keyed by ``xml:lang`` code. \"\"\" def getFormat(self) -> int: \"\"\"Determine", "versionadded:: 5.0 \"\"\" self.variableFonts.append(variableFontDescriptor) def addVariableFontDescriptor(self, **kwargs): \"\"\"Instantiate a new", "axis self.tag = tag \"\"\"string. Four letter tag for this", "the given ``ruleDescriptor`` to :attr:`rules`.\"\"\" self.rules.append(ruleDescriptor) def addRuleDescriptor(self, **kwargs): \"\"\"Instantiate", "attributes\"\"\" # XXX this is ugly. The 'print' is inappropriate", "update filename. \"\"\" assert self.path is not None for descriptor", "self.instances: if descriptor.filename is not None and not force: continue", ":class:`ValueAxisSubsetDescriptor`). \"\"\" self.lib: MutableMapping[str, Any] = lib or {} \"\"\"Custom", "ValueError: self.log.warning(\"ValueError in readLocation xValue %3.3f\", xValue) try: yValue =", "code. Values are required to be ``unicode`` strings, even if", "transformation loses leading slashes of UNC path mounts new_path =", "float(mapElement.attrib['output']) axisObject.map.append((a, b)) for labelNameElement in axisElement.findall('labelname'): # Note: elementtree", "tag for this axis. Some might be registered at the", "location of this label, by combining data from the explicit", "<rule name=\"vertical.bars\"> <conditionset> <condition minimum=\"250.000000\" maximum=\"750.000000\" name=\"weight\"/> <condition minimum=\"100\" name=\"width\"/>", "an indication of a possible \"good\" filename, in case one", "libElement in instanceElement.findall('lib'): self.readLibElement(libElement, instanceObject) self.documentObject.instances.append(instanceObject) def readLibElement(self, libElement, instanceObject):", "axisObject = self.discreteAxisDescriptorClass() axisObject.values = [float(s) for s in axisElement.attrib[\"values\"].split(\"", "in different contexts. The **DesignSpaceDocument** object can be subclassed to", "conditionElement = ET.Element('condition') conditionElement.attrib['name'] = cond.get('name') if cond.get('minimum') is not", "in a new conditionset.\" ) # read the conditionsets for", "\"\"\" assert self.path is not None for descriptor in self.sources", "def _addRule(self, ruleObject): # if none of the conditions have", "STAT data for a single axis (formats 1, 2 and", "Tuple[float, float]]] SimpleLocationDict = Dict[str, float] class InstanceDescriptor(SimpleDescriptor): \"\"\"Simple container", "or axis.axisOrdering is not None or axis.axisLabels for axis in", "loaded = {} fonts = [] for source in self.sources:", "skipping this dimension it seems continue value = location[axis.name] #", "descriptor.path == \"~/absolute/path/there\" -- action: calculate the relative path for", "= [] glyphSources.append(d) if glyphSources is not None: glyphData['masters'] =", "language code. If present, will be used to build localized", "> value: return False elif not cd['minimum'] <= value <=", "locations (using xvalue=\"\").') if designLocation is not None: glyphData['instanceLocation'] =", "not None and userDefault is not None and userMaximum is", "= ``None``) \"\"\" self.userMaximum: float = userMaximum \"\"\"New maximum value", "makeInfo=True): instanceElements = self.root.findall('.instances/instance') for instanceElement in instanceElements: self._readSingleInstanceElement(instanceElement, makeGlyphs=makeGlyphs,", "the scaling, so we do those last. \"\"\" # masters", "Dict[str, str] = labelNames or {} \"\"\"User-facing translations of this", "if designLocation is not None else location or {} \"\"\"dict.", "\"\"\" self.locationLabel = locationLabel \"\"\"Name of a :class:`LocationLabelDescriptor`. If provided,", "'.join(unknown_attrs)}\") name = variableFontElement.get(\"name\") if name is None: raise DesignSpaceDocumentError(\"variable-font", "<condition minimum=\"100\" name=\"width\"/> <condition minimum=\"10\" maximum=\"40\" name=\"optical\"/> </conditionset> <sub name=\"cent\"", "None: raise DesignSpaceDocumentError( \"The axis-subset element for a discrete subset", "dimensionElement.attrib.get('yvalue') if yValue is not None: yValue = float(yValue) except", "None: return doc.map_forward(label.userLocation) # type: ignore result: AnisotropicLocationDict = {}", "= True def readGlyphElement(self, glyphElement, instanceObject): \"\"\" Read the glyph", "ruleElement.findall('.sub'): a = subElement.attrib['name'] b = subElement.attrib['with'] ruleObject.subs.append((a, b)) rules.append(ruleObject)", "getFullUserLocation(self, doc: 'DesignSpaceDocument') -> SimpleLocationDict: \"\"\"Get the complete user location", "sourceObject.copyFeatures: featuresElement = ET.Element('features') featuresElement.attrib['copy'] = \"1\" sourceElement.append(featuresElement) if sourceObject.copyInfo", "it to :attr:`rules`. \"\"\" rule = self.writerClass.ruleDescriptorClass(**kwargs) self.addRule(rule) return rule", "= [(0, 0), (1, -11)] a2.axisOrdering = 2 a2.axisLabels =", "the same default value as the full axis. (default =", "fonts at each value can have different glyph sets. \"\"\"", "self.documentObject.sources ) or self.documentObject.variableFonts or any( instance.locationLabel or instance.userLocation for", "\"\"\"Instantiate a new :class:`LocationLabelDescriptor` using the given ``kwargs`` and add", "rules.append(ruleObject) self.documentObject.rules = rules def _readConditionElements(self, parentElement, ruleName=None): cds =", "a condition has no maximum, check for > minimum. \"\"\"", "None: masterElement.attrib['source'] = m.get('font') if m.get('location') is not None: locationElement,", "muteInfo=False, mutedGlyphNames=None, ): self.filename = filename \"\"\"string. A relative path", "font family, which is useful for the STAT table, however", "issues for more information: `fontTools#1371 <https://github.com/fonttools/fonttools/issues/1371#issuecomment-590214572>`__ `fontTools#2050 <https://github.com/fonttools/fonttools/issues/2050#issuecomment-678691020>`__ If you", "be registered at the `OpenType specification <https://www.microsoft.com/typography/otspec/fvar.htm#VAT>`__. Privately-defined axis tags", "not None: # case 3 and 4: filename gets updated", "hidden=False, map=None, axisOrdering=None, axisLabels=None, ): # opentype tag for this", "return (5, 0) numbers = (int(i) for i in self.formatVersion.split(\".\"))", "first newMap = [] for inputValue, outputValue in axis.map: newOutputValue", "begin with an uppercase letter and use only uppercase letters", "= minimum \"\"\"number. The minimum value for this axis in", "``elidedFallbackNameID``. See: `OTSpec STAT Style Attributes Header <https://docs.microsoft.com/en-us/typography/opentype/spec/stat#style-attributes-header>`_ .. versionadded::", "= None instanceObject = self.instanceDescriptorClass() instanceObject.path = instancePath # absolute", ".. versionadded:: 5.0 \"\"\" self.variableFonts.append(variableFontDescriptor) def addVariableFontDescriptor(self, **kwargs): \"\"\"Instantiate a", "in (userMinimum, userDefault, userMaximum)): return self.rangeAxisSubsetDescriptorClass(name=name) raise DesignSpaceDocumentError( \"axis-subset element", "annotations import collections import copy import itertools import math import", "olderSibling \"\"\"STAT flag ``OLDER_SIBLING_FONT_ATTRIBUTE``. See: `OTSpec STAT Flags <https://docs.microsoft.com/en-us/typography/opentype/spec/stat#flags>`_ \"\"\"", "= {} if axisName in self.userLocation: del self.userLocation[axisName] def getLocationLabelDescriptor(self,", "the instance .. code:: python i2 = InstanceDescriptor() i2.path =", "DesignSpaceDocumentError( \"axis-subset element must have min/max/default values or none at", "== None -- action: write as is, descriptors will not", "self.elidedFallbackName: Optional[str] = None \"\"\"STAT Style Attributes Header field ``elidedFallbackNameID``.", "in self.sources: source.font = None res = copy.deepcopy(self) for source,", "populates the fields of ``self`` with the data. \"\"\" if", "code:: python a2 = DiscreteAxisDescriptor() a2.values = [0, 1] a2.name", "document, and also as a filename in case the filename", "rule = self.writerClass.ruleDescriptorClass(**kwargs) self.addRule(rule) return rule def addVariableFont(self, variableFontDescriptor: VariableFontDescriptor):", "instanceObject.path = instancePath # absolute path to the instance instanceObject.filename", "break designLocation, userLocation = self.locationFromElement(glyphElement) if userLocation: raise DesignSpaceDocumentError(f'<glyph> element", "which deltas would apply, as this axis does not interpolate.", "in self.instances: if descriptor.filename is not None and not force:", "None: instanceElement.attrib['postscriptfontname'] = instanceObject.postScriptFontName if instanceObject.styleMapFamilyName is not None: instanceElement.attrib['stylemapfamilyname']", "< (5, 0): return xml_attrs = {'name', 'filename'} for variableFontElement", "raise DesignSpaceDocumentError(\"label element must have a uservalue attribute.\") value =", "this named location, STAT field ``valueNameID``.\"\"\" self.userLocation: SimpleLocationDict = userLocation", "\"\"\" if self.linkedUserValue is not None: return 3 if self.userMinimum", "\"<Unknown>\") ) source.font = opener(source.path, **kwargs) loaded[source.path] = source.font fonts.append(source.font)", "is in the document**. The file may or may not", "is the same as design space, as in [(minimum, minimum),", "write ``.designspace`` data. It imports the axes, sources, variable fonts", "5.0 if instanceObject.glyphs: if instanceElement.findall('.glyphs') == []: glyphsElement = ET.Element('glyphs')", "be and instance of :class:`DiscreteAxisDescriptor` if the ``kwargs`` provide a", "locElement = ET.Element(\"location\") for axis in self.documentObject.axes: if designLocation is", "= ET.Element('label') labelElement.attrib['uservalue'] = self.intOrFloat(label.userValue) if label.userMinimum is not None:", "=============== 1 ✅ ❌ ❌ ❌ 2 ✅ ✅ ✅", "hidden=self.hidden, map=self.map, axisOrdering=self.axisOrdering, axisLabels=self.axisLabels, ) def map_forward(self, v): \"\"\"Maps value", "= '1' sourceElement.append(glyphElement) if self.effectiveFormatTuple >= (5, 0): self._addLocationElement(sourceElement, designLocation=sourceObject.location)", "None: xValue = float(xValue) except ValueError: self.log.warning(\"ValueError in readLocation xValue", "element, which could look like either one of these: ..", "given to :meth:`read` or :meth:`fromfile`. \"\"\" self.filename = None \"\"\"String,", "} if name.lower() in names: return names[name.lower()] if len(name) <", "in this document, or implicit variable fonts that can be", "font. If not specified, assume the same maximum value as", "__init__(self, *, name=None, conditionSets=None, subs=None): self.name = name \"\"\"string. Unique", "The default location is the set of all `default` values", "path self.filename = os.path.basename(path) self.updatePaths() writer = self.writerClass(path, self) writer.write()", "return tag, dict(en=name) class AbstractAxisDescriptor(SimpleDescriptor): flavor = \"axis\" def __init__(", "# absolute path to the ufo source sourceObject.filename = filename", "rule\" + (\" '%s'\" % ruleName if ruleName is not", "axis in self.documentObject.axes ) or self.documentObject.locationLabels or any( source.localisedFamilyName for", "glyphName is None: raise DesignSpaceDocumentError(\"Glyph object without name attribute\") mute", "= ET.Element('dimension') dimElement.attrib['name'] = axis.name value = designLocation[axis.name] if isinstance(value,", "sourceElement.attrib.get('layer') if layerName is not None: sourceObject.layerName = layerName for", "None: sourceObject.familyName = familyName styleName = sourceElement.attrib.get(\"stylename\") if styleName is", "add it to :attr:`variableFonts`. .. versionadded:: 5.0 \"\"\" variableFont =", "this instances's :attr:`locationLabel`. Raises if the named label can't be", "self.userLocation: del self.userLocation[axisName] def getLocationLabelDescriptor(self, doc: 'DesignSpaceDocument') -> Optional[LocationLabelDescriptor]: \"\"\"Get", "that axis. Note: the output won't be anisotropic, only the", "== '1': sourceObject.copyGroups = True for infoElement in sourceElement.findall(\".info\"): if", "'xml:lang' XML_NS = \"{http://www.w3.org/XML/1998/namespace}\" XML_LANG = XML_NS + \"lang\" def", "True # unicode unicodes = glyphElement.attrib.get('unicode') if unicodes is not", "versionadded:: 5.0 \"\"\" variableFont = self.writerClass.variableFontDescriptorClass(**kwargs) self.addVariableFont(variableFont) return variableFont def", "= float(mapElement.attrib['input']) b = float(mapElement.attrib['output']) axisObject.map.append((a, b)) for labelNameElement in", "= tostr(labelNameElement.text) labelElement = axisElement.find(\".labels\") if labelElement is not None:", "of a discrete or continuous axis to use in a", ".. code:: python r1 = RuleDescriptor() r1.name = \"unique.rule.name\" r1.conditionSets.append([dict(name=\"weight\",", "cdMin is not None: cd['minimum'] = float(cdMin) else: # will", "instanceElement.attrib['name'] = instanceObject.name if instanceObject.locationLabel is not None: instanceElement.attrib['location'] =", "a name attribute.\") valueStr = element.get(\"uservalue\") if valueStr is None:", "mentioned, assume that we only want the default location of", "\"\"\"List of this document's rules.\"\"\" self.rulesProcessingLast: bool = False \"\"\"This", "= {} else: if self.designLocation is None: self.designLocation = {}", "axesElement.attrib: self.documentObject.elidedFallbackName = axesElement.attrib['elidedfallbackname'] axisElements = self.root.findall(\".axes/axis\") if not axisElements:", "data for this source needs to be muted. MutatorMath only.", "<glyph name=\"b\" unicode=\"0x62\"/> <glyph name=\"b\"/> <glyph name=\"b\"> <master location=\"location-token-bbb\" source=\"master-token-aaa2\"/>", "design space coordinates. MutatorMath + Varlib. .. deprecated:: 5.0 Use", "attributes. Reader and Writer objects can be subclassed as well.", "to load masters as FontTools binary fonts, including extra options:", "self, *, name, userLocation, elidable=False, olderSibling=False, labelNames=None, ): self.name: str", "True. See the following issues for more information: `fontTools#1371 <https://github.com/fonttools/fonttools/issues/1371#issuecomment-590214572>`__", "=========== =============== STAT Format userValue userMinimum userMaximum linkedUserValue =========== =========", ".. versionadded:: 5.0 \"\"\" flavor = \"label\" _attrs = ('userMinimum',", "value table, format 4 <https://docs.microsoft.com/en-us/typography/opentype/spec/stat#axis-value-table-format-4>`_ .. versionadded:: 5.0 \"\"\" flavor", "for subset in vf.axisSubsets: subsetElement = ET.Element('axis-subset') subsetElement.attrib['name'] = subset.name", "1 variable font per value on the discrete axes. Before", "code. \"\"\" self.glyphs = glyphs or {} \"\"\"dict for special", "ET.Element('dimension') dimElement.attrib['name'] = axis.name value = designLocation[axis.name] if isinstance(value, tuple):", "the axis, nothing in-between. \"\"\" def map_forward(self, value): \"\"\"Maps value", "return self.rangeAxisSubsetDescriptorClass( name=name, userMinimum=float(userMinimum), userDefault=float(userDefault), userMaximum=float(userMaximum), ) if all(v is", "When the document is read from the disk, this is", "list of keyword arguments, and returns a new font object", "data) glyphsElement.append(glyphElement) if instanceObject.kerning: kerningElement = ET.Element('kerning') instanceElement.append(kerningElement) if instanceObject.info:", "\"\"\" rule = self.writerClass.ruleDescriptorClass(**kwargs) self.addRule(rule) return rule def addVariableFont(self, variableFontDescriptor:", "to the user of ``designspaceLib`` to either load the resource", "need to be muted in the instances. MutatorMath only. \"\"\"", "postScriptFontName \"\"\"string. Postscript fontname for this instance. MutatorMath + Varlib.", "len(conditionsetElement): ruleElement.append(conditionsetElement) for sub in ruleObject.subs: subElement = ET.Element('sub') subElement.attrib['name']", "source needs to be muted (i.e. not be part of", "definitions, self.log.warning(\"Location with undefined axis: \\\"%s\\\".\", dimName) continue userValue =", "= path.__fspath__() self.path = path self.filename = os.path.basename(path) reader =", "property(getter, setter) class DesignSpaceDocumentError(Exception): def __init__(self, msg, obj=None): self.msg =", "default value is less important than in continuous axes: -", "None: masterElement.attrib['glyphname'] = m.get('glyphName') if m.get('font') is not None: masterElement.attrib['source']", "No anisotropy. - ``axis.default``: default axis value. No anisotropy. ..", "= \"قطر\" a1.labelNames['en'] = \"Wéíght\" a1.map = [(1.0, 10.0), (400.0,", "= minimum axis.maximum = maximum axis.default = default # now", "make us a new instanceDescriptor.\"\"\" return self.writerClass.getInstanceDescriptor() def getAxisOrder(self): \"\"\"Return", "example). MutatorMath. .. deprecated:: 5.0 Use rules or sparse sources", "set of all `default` values in user space of all", "- we need the axis data to do the scaling,", "data related to the instance .. code:: python i2 =", "glyphData def readLib(self): \"\"\"Read the lib element for the whole", "if key == XML_LANG: axisObject.labelNames[lang] = tostr(labelNameElement.text) labelElement = axisElement.find(\".labels\")", "if self.path is not None: descriptor.filename = self._posixRelativePath(descriptor.path) if instances:", "__init__( self, *, tag=None, name=None, labelNames=None, values=None, default=None, hidden=False, map=None,", "for v in (axis.minimum, axis.default, axis.maximum) ] new[axis.name] = normalizeValue(value,", "return self.localisedStyleMapStyleName.get(languageCode) def setStyleMapFamilyName(self, styleMapFamilyName, languageCode=\"en\"): self.localisedStyleMapFamilyName[languageCode] = tostr(styleMapFamilyName) def", "Default encoding ``utf-8``.\"\"\" if encoding is str or ( encoding", "data. It imports the axes, sources, variable fonts and instances", ") userValue = float(userValueStr) return self.valueAxisSubsetDescriptorClass(name=name, userValue=userValue) else: xml_attrs =", ".. versionchanged:: 5.0 Return a tuple of (designLocation, userLocation) \"\"\"", "value this axis will get in user space. However, this", "sub-spaces of the Designspace. Use-cases: - From a single DesignSpace", "\"a.alt\"). - Note: By default, rules are applied first, before", "to make us a new instanceDescriptor.\"\"\" return self.writerClass.getInstanceDescriptor() def getAxisOrder(self):", "dimElement.attrib['yvalue'] = self.intOrFloat(value[1]) else: dimElement.attrib['xvalue'] = self.intOrFloat(value) locElement.append(dimElement) elif userLocation", "name: return axisDescriptor return None def getLocationLabel(self, name: str) ->", "_attrs = ['name', 'conditionSets', 'subs'] # what do we need", "no maximum, check for > minimum. \"\"\" for cd in", "/ output values that can describe a warp of user", "= next(numbers, 0) return (major, minor) def getVariableFonts(self) -> List[VariableFontDescriptor]:", "= {} fonts = [] for source in self.sources: if", "axis.maximum}).get(axis.name) default = self.normalizeLocation({axis.name: axis.default}).get(axis.name) # and set them in", "if userLocation: raise DesignSpaceDocumentError(f'<glyph> element \"{glyphName}\" must only have design", "5.0 \"\"\" self.locationLabels.append(locationLabelDescriptor) def addLocationLabelDescriptor(self, **kwargs): \"\"\"Instantiate a new :class:`LocationLabelDescriptor`", "for axis in self.axes: # scale the map first newMap", "axisObject.axisOrdering or axisObject.axisLabels: labelsElement = ET.Element('labels') if axisObject.axisOrdering is not", "m.get('font') if m.get('location') is not None: locationElement, m['location'] = self._makeLocationElement(m.get('location'))", "these to be None, assume axis.maximum cd['maximum'] = None cd['name']", "True self.documentObject.sources.append(sourceObject) def locationFromElement(self, element): \"\"\"Read a nested ``<location>`` element", "either load the resource identified by ``filename`` and store it", "3) or ``nominalValue`` (format 2).\"\"\" self.userMaximum: Optional[float] = userMaximum \"\"\"STAT", "self.obj if self.obj is not None else \"\") class AsDictMixin(object):", "familyName \"\"\"string. Family name of this source. Though this data", "in ruleObject.subs: subElement = ET.Element('sub') subElement.attrib['name'] = sub[0] subElement.attrib['with'] =", "continuous axis is that a continuous axis has a ``minimum``", "Varlib. \"\"\" self.postScriptFontName = postScriptFontName \"\"\"string. Postscript fontname for this", "label_name in element.findall(\"labelname\") for attr, lang in label_name.items() if attr", ".. versionadded:: 5.0 \"\"\" label = self.getLocationLabelDescriptor(doc) if label is", "designspace: - scale all the locations of all masters and", "self.values: List[float] = values or [] \"\"\"List of possible values", "if glyphSources is None: glyphSources = [] glyphSources.append(d) if glyphSources", "the full location. See: :meth:`getFullDesignLocation` :meth:`getFullUserLocation` .. versionadded:: 5.0 \"\"\"", "= AxisDescriptor discreteAxisDescriptorClass = DiscreteAxisDescriptor axisLabelDescriptorClass = AxisLabelDescriptor locationLabelDescriptorClass =", "not None: sourceObject.familyName = familyName styleName = sourceElement.attrib.get(\"stylename\") if styleName", "path=None, font=None, name=None, location=None, locationLabel=None, designLocation=None, userLocation=None, familyName=None, styleName=None, postScriptFontName=None,", "\"\"\"dict. A dictionary of localised stylename strings, keyed by language", "== \"en\": continue # already stored in the element attribute", "necessary to define user-facing readable names for the axis. Keyed", "from other parts of the document, and also as a", "glyphSources = [] glyphSources.append(d) if glyphSources is not None: glyphData['masters']", "simply return True/False def compare(self, other): # test if this", "location for that axis. Note: the output won't be anisotropic,", "\"{dimName}\"\" with yvalue=\"{yValue}\"') designLoc[dimName] = (xValue, yValue) elif xValue is", "</lib> \"\"\" self.sources: List[SourceDescriptor] = [] \"\"\"List of this document's", "parentElement.append(vfElement) def _addLib(self, parentElement: ET.Element, data: Any, indent_level: int) ->", "self) reader.read() if self.sources: self.findDefault() return self def tostring(self, encoding=None):", "or self.name def getFullUserLocation(self, doc: 'DesignSpaceDocument') -> SimpleLocationDict: \"\"\"Get the", "name, userValue): self.name: str = name \"\"\"Name of the :class:`AxisDescriptor`", "self.instances = [] self.axisDefaults = {} self._strictAxisNames = True @classmethod", "if element.get(\"elidable\") == \"true\" else False olderSibling = True if", "DiscreteAxisDescriptor() a2.values = [0, 1] a2.name = \"Italic\" a2.tag =", "location=\"...\" attribute or the nested location element') instanceObject.locationLabel = locationLabel", "element for the whole document.\"\"\" for libElement in self.root.findall(\".lib\"): self.documentObject.lib", "for instanceObject in self.documentObject.instances: self._addInstance(instanceObject) if self.documentObject.lib: self._addLib(self.root, self.documentObject.lib, 2)", "+ Varlib. \"\"\" self.maximum = maximum \"\"\"number. The maximum value", "filename = instanceElement.attrib.get('filename') if filename is not None and self.documentObject.path", "needs its kerning calculated. MutatorMath. .. deprecated:: 5.0 \"\"\" self.info", "output (design) to input (user).\"\"\" from fontTools.varLib.models import piecewiseLinearMap if", "path self.filename = os.path.basename(path) reader = self.readerClass(path, self) reader.read() if", "axisLabelDescriptorClass = AxisLabelDescriptor locationLabelDescriptorClass = LocationLabelDescriptor ruleDescriptorClass = RuleDescriptor sourceDescriptorClass", "self.documentObject.sources: self.root.append(ET.Element(\"sources\")) for sourceObject in self.documentObject.sources: self._addSource(sourceObject) if self.documentObject.variableFonts: variableFontsElement", "instance.designLocation = {'Weight': (34, 36.5), 'Width': 100} instance.userLocation = {'Opsz':", "UI purposes, if this is not a standard axis, self.labelNames", "any( instance.locationLabel or instance.userLocation for instance in self.documentObject.instances ) ):", "*, name, filename=None, axisSubsets=None, lib=None): self.name: str = name \"\"\"string,", "import collections import copy import itertools import math import os", "if unicodes is not None: try: unicodes = [int(u, 16)", "ruleElement, ruleName, ) if externalConditions: ruleObject.conditionSets.append(externalConditions) self.log.info( \"Found stray rule", "not have a filename attr. useless, but no reason to", "self.instances: # glyph masters for this instance for _, glyphData", "discrete axes. .. seealso:: :func:`splitInterpolable` .. versionadded:: 5.0 \"\"\" if", "= self.writerClass(f, self) writer.write(encoding=encoding, xml_declaration=xml_declaration) return f.getvalue() def read(self, path):", "Each substitution is stored as tuples of glyphnames, e.g. (\"a\",", "given ``sourceDescriptor`` to ``doc.sources``.\"\"\" self.sources.append(sourceDescriptor) def addSourceDescriptor(self, **kwargs): \"\"\"Instantiate a", "valueCombinations = itertools.product(*[axis.values for axis in discreteAxes]) for values in", "shaping/OpenType layout, as they are part of the `Required Variation", "intOrFloat(self, num): if int(num) == num: return \"%d\" % num", "self._getEffectiveFormatTuple() self.root = ET.Element(\"designspace\") def write(self, pretty=True, encoding=\"UTF-8\", xml_declaration=True): self.root.attrib['format']", "we have masterGlyphName = glyphName d = dict(font=fontSourceName, location=designLocation, glyphName=masterGlyphName)", "if m.get('location') is not None: locationElement, m['location'] = self._makeLocationElement(m.get('location')) masterElement.append(locationElement)", "\"VF\" axisNames = \"\".join([f\"-{axis.tag}{value}\" for axis, value in zip(discreteAxes, values)])", "rule conditions outside a conditionset. \" \"Wrapped them in a", "glyphData['masters']: glyphMaster['location'] = self.normalizeLocation(glyphMaster['location']) item.location = self.normalizeLocation(item.location) # the axes", "font. .. versionadded:: 5.0 \"\"\" flavor = \"axis-subset\" _attrs =", "raise ValueError(\"unsupported encoding: '%s'\" % encoding) writer = self.writerClass(f, self)", "before we save we need to identify and respond to", "\"قطر\" a1.labelNames['en'] = \"Wéíght\" a1.map = [(1.0, 10.0), (400.0, 66.0),", "getFamilyName(self, languageCode=\"en\"): return self.localisedFamilyName.get(languageCode) def setStyleMapStyleName(self, styleMapStyleName, languageCode=\"en\"): self.localisedStyleMapStyleName[languageCode] =", "doc.axes: if axis.name in self.designLocation: result[axis.name] = self.designLocation[axis.name] elif axis.name", "self.readLabels() self.readRules() self.readVariableFonts() self.readSources() self.readInstances() self.readLib() def readRules(self): # we", "if name is None: raise DesignSpaceDocumentError(\"axis-subset element must have a", "should be fine case 4. descriptor.filename == '../somewhere' descriptor.path ==", "conditions: if cond.get('minimum') is not None: minimum = self.normalizeLocation({cond['name']: cond['minimum']}).get(cond['name'])", "instancePath = None instanceObject = self.instanceDescriptorClass() instanceObject.path = instancePath #", "= instanceObject.getStyleMapStyleName(code) instanceElement.append(localisedStyleMapStyleNameElement) if instanceObject.localisedStyleMapFamilyName: languageCodes = list(instanceObject.localisedStyleMapFamilyName.keys()) languageCodes.sort() for", "locationLabel = instanceElement.attrib.get('location') if (designLocation or userLocation) and locationLabel is", "and the default writer will not use this attribute. It", "some parts of the designspace. .. code:: python r1 =", "mute = glyphElement.attrib.get(\"mute\") if mute == \"1\": glyphData['mute'] = True", "the document**. The file may or may not exist. If", "In each descriptor, we have to do the right thing", "self.copyInfo = copyInfo \"\"\"bool. Indicates if the non-interpolating font.info needs", "= '/' + new_path elif path.startswith(r'\\\\'): # The above transformation", ".. code:: python from fontTools.designspaceLib import DesignSpaceDocument doc = DesignSpaceDocument.fromfile(\"some/path/to/my.designspace\")", "read and write ``.designspace`` data. It imports the axes, sources,", "ValueError: self.log.warning(\"ValueError in readLocation userValue %3.3f\", userValue) try: xValue =", "set externalConditions = self._readConditionElements( ruleElement, ruleName, ) if externalConditions: ruleObject.conditionSets.append(externalConditions)", "assert, it should simply return True/False def compare(self, other): #", "readLabels(self): if self.documentObject.formatTuple < (5, 0): return xml_attrs = {'name',", "bind together the full font family, which is useful for", "= self.writerClass.instanceDescriptorClass(**kwargs) self.addInstance(instance) return instance def addAxis(self, axisDescriptor: Union[AxisDescriptor, DiscreteAxisDescriptor]):", "designspace files \"\"\" __all__ = [ 'DesignSpaceDocumentError', 'DesignSpaceDocument', 'SourceDescriptor', 'InstanceDescriptor',", "userMinimum=-math.inf, userDefault=None, userMaximum=math.inf): self.name: str = name \"\"\"Name of the", "the given ``name``, or ``None`` if no such label exists.", "def findDefault(self): \"\"\"Set and return SourceDescriptor at the default location", "the \"xml:lang\" attribute name as # '{http://www.w3.org/XML/1998/namespace}lang' } return self.axisLabelDescriptorClass(", "name \"\"\"string. Unique name for this rule. Can be used", "= ET.Element(\"source\") if sourceObject.filename is not None: sourceElement.attrib['filename'] = sourceObject.filename", "= (None, None) for locationElement in element.findall('.location'): elementLocation = self.readLocationElement(locationElement)", "action: write as is. The filename attr should not be", "exist. If not specified, the :attr:`name` will be used as", "if path.startswith('/'): # The above transformation loses absolute paths new_path", "name=\"vertical.bars\"> <conditionset> <condition minimum=\"250.000000\" maximum=\"750.000000\" name=\"weight\"/> <condition minimum=\"100\" name=\"width\"/> <condition", "fromstring(cls, string, readerClass=None, writerClass=None): self = cls(readerClass=readerClass, writerClass=writerClass) reader =", "be muted (i.e. not be part of the calculations). MutatorMath", "map_forward(self, v): \"\"\"Maps value from axis mapping's input (user) to", "ValueAxisSubsetDescriptor rangeAxisSubsetDescriptorClass = RangeAxisSubsetDescriptor instanceDescriptorClass = InstanceDescriptor @classmethod def getAxisDecriptor(cls):", "indent_level=indent_level)) parentElement.append(libElement) def _writeGlyphElement(self, instanceElement, instanceObject, glyphName, data): glyphElement =", "\"\"\" self.localisedFamilyName[languageCode] = tostr(familyName) def getFamilyName(self, languageCode=\"en\"): \"\"\"Getter for :attr:`localisedFamilyName`", "by language code. If present, will be used to build", "= name \"\"\"Name of the :class:`AxisDescriptor` or :class:`DiscreteAxisDescriptor` to \"snapshot\"", "by the axis, nothing in-between. \"\"\" def map_forward(self, value): \"\"\"Maps", "dimensionName, dimensionValue in validatedLocation.items(): dimElement = ET.Element('dimension') dimElement.attrib['name'] = dimensionName", "elements, but not attributes # so we have to do", "matches this instances's :attr:`locationLabel`. Raises if the named label can't", "= self.intOrFloat(label.userValue) if label.userMinimum is not None: labelElement.attrib['userminimum'] = self.intOrFloat(label.userMinimum)", "cls.axisDescriptorClass() @classmethod def getSourceDescriptor(cls): return cls.sourceDescriptorClass() @classmethod def getInstanceDescriptor(cls): return", "\"\"\" return doc.map_backward(self.getFullDesignLocation(doc)) def tagForAxisName(name): # try to find or", "glyphElement = self._writeGlyphElement(instanceElement, instanceObject, glyphName, data) glyphsElement.append(glyphElement) if instanceObject.kerning: kerningElement", "from its :attr:`designLocation` and the document's axis defaults. .. versionadded::", "== None descriptor.path == None -- action: write as is,", "{} for axis in self.axes: if axis.name not in location:", "if glyphName is None: raise DesignSpaceDocumentError(\"Glyph object without name attribute\")", "is a dict with ``name``, ``minimum`` and ``maximum`` keys. \"\"\"", "@property def defaultName(self) -> str: \"\"\"Return the English name from", "cdMin = conditionElement.attrib.get(\"minimum\") if cdMin is not None: cd['minimum'] =", "do not save temporary source names sourceElement.attrib['name'] = sourceObject.name if", "-math.inf: subsetElement.attrib['userminimum'] = self.intOrFloat(subset.userMinimum) if subset.userMaximum != math.inf: subsetElement.attrib['usermaximum'] =", "= labelNames or {} \"\"\"User-facing translations of this location's label.", "for this instance for _, glyphData in item.glyphs.items(): glyphData['instanceLocation'] =", "= ['filename', 'path', 'name', 'locationLabel', 'designLocation', 'userLocation', 'familyName', 'styleName', 'postScriptFontName',", "else: instancePath = None instanceObject = self.instanceDescriptorClass() instanceObject.path = instancePath", "axis values and mappings, and top-level location labels. The source", "and \"values\" in axisElement.attrib: axisObject = self.discreteAxisDescriptorClass() axisObject.values = [float(s)", "raise DesignSpaceDocumentError(f'<source> element \"{sourceName}\" must only have design locations (using", "takes one required positional argument, the source.path, and an optional", "name of the instance, used to identify it if it", "coordinates. MutatorMath + Varlib. This may be only part of", "source.path in loaded: source.font = loaded[source.path] else: if source.path is", "= sourceObject.filename if sourceObject.name is not None: if sourceObject.name.find(\"temp_master\") !=", "needs to be muted. MutatorMath only. \"\"\" self.mutedGlyphNames = mutedGlyphNames", "= documentObject self.effectiveFormatTuple = self._getEffectiveFormatTuple() self.root = ET.Element(\"designspace\") def write(self,", "absolute path to the instance instanceObject.filename = filename # path", "axis.minimum axis.minimum = minimum axis.maximum = maximum axis.default = default", "= posixpath_property(\"_filename\") path = posixpath_property(\"_path\") def __init__( self, *, filename=None,", "localisedFamilyName or {} \"\"\"dict. A dictionary of localised family name", "there can be as many variable fonts as there are", "easier access to the localised names.\"\"\" self.localisedStyleName[languageCode] = tostr(styleName) def", "self.normalizeLocation(glyphMaster['location']) item.location = self.normalizeLocation(item.location) # the axes for axis in", "\"\"\"Return the English name from :attr:`labelNames` or the :attr:`name`.\"\"\" return", "The file may or may not exist. If not specified,", "\"\"\" self.copyGroups = copyGroups \"\"\"bool. Indicates if the groups need", "return doc.map_backward(self.getFullDesignLocation(doc)) def tagForAxisName(name): # try to find or make", "descriptor in self.sources + self.instances: if descriptor.path is not None:", "def addLocationLabel(self, locationLabelDescriptor: LocationLabelDescriptor): \"\"\"Add the given ``locationLabelDescriptor`` to :attr:`locationLabels`.", "attr from the path and this document path. If the", "be found. .. versionadded:: 5.0 \"\"\" if self.locationLabel is None:", "'//' + new_path return new_path def posixpath_property(private_name): \"\"\"Generate a propery", "self._strictAxisNames and not self.documentObject.axes: raise DesignSpaceDocumentError(\"No axes defined\") userLoc =", "given filename, and the path. So we know where the", "value for this axis, i.e. when a new location is", "# if none of the conditions have minimum or maximum", "is not None: instanceObject.name = name familyname = instanceElement.attrib.get('familyname') if", "map familyname strings, keyed by language code. \"\"\" self.localisedStyleMapStyleName =", "this document's variable fonts. .. versionadded:: 5.0\"\"\" self.instances: List[InstanceDescriptor] =", "sourceObject.filename if sourceObject.name is not None: if sourceObject.name.find(\"temp_master\") != 0:", "list of substitutions stored as tuples of glyphnames (\"a\", \"a.alt\")", "'name', 'values', 'default', 'map', 'axisOrdering', 'axisLabels') def __init__( self, *,", "mapping entry is found. Note: for discrete axes, each value", "to output. Returns value unchanged if no mapping entry is", "None: subsetElement.attrib['userdefault'] = self.intOrFloat(subset.userDefault) elif isinstance(subset, ValueAxisSubsetDescriptor): subsetElement.attrib['uservalue'] = self.intOrFloat(subset.userValue)", "None: instanceObject.styleName = stylename postScriptFontName = instanceElement.attrib.get('postscriptfontname') if postScriptFontName is", "values: .. code:: python instance.clearLocation('Weight') instance.designLocation['Weight'] = (34, 36.5) Args:", "for ruleObject in self.documentObject.rules: self._addRule(ruleObject) if self.documentObject.sources: self.root.append(ET.Element(\"sources\")) for sourceObject", "= default \"\"\"number. The default value for this axis, i.e.", "def getStyleName(self, languageCode=\"en\"): return self.localisedStyleName.get(languageCode) def setFamilyName(self, familyName, languageCode=\"en\"): self.localisedFamilyName[languageCode]", "\"source\" _attrs = ['filename', 'path', 'name', 'layerName', 'location', 'copyLib', 'copyGroups',", "contains no axis definitions, self.log.warning(\"Location with undefined axis: \\\"%s\\\".\", dimName)", "\"\"\"STAT field ``rangeMinValue`` (format 2).\"\"\" self.userValue: float = userValue \"\"\"STAT", "or implicit variable fonts that can be built from the", "userValue is None == xValue is None: raise DesignSpaceDocumentError(f'Exactly one", "\"\"\" glyphData = {} glyphName = glyphElement.attrib.get('name') if glyphName is", "is str or ( encoding is not None and encoding.lower()", "dimElement.attrib['xvalue'] = self.intOrFloat(value[0]) dimElement.attrib['yvalue'] = self.intOrFloat(value[1]) else: dimElement.attrib['xvalue'] = self.intOrFloat(value)", "filename = posixpath_property(\"_filename\") def __init__(self, *, name, filename=None, axisSubsets=None, lib=None):", "❌ 3 ✅ ❌ ❌ ✅ =========== ========= =========== ===========", "read from the disk, this is its original file name,", "doesn't provide the reference glyph set for the designspace, as", "not None: cd['maximum'] = float(cdMax) else: # will allow these", "will not have a filename attr. useless, but no reason", "\"\"\"Return the formatVersion as a tuple of (major, minor). ..", "mastersElement = ET.Element(\"masters\") for m in data.get('masters'): masterElement = ET.Element(\"master\")", "default = self.normalizeLocation({axis.name: axis.default}).get(axis.name) # and set them in the", "version of outlines from which deltas would apply, as this", "5.0 \"\"\" self.designLocation: AnisotropicLocationDict = designLocation if designLocation is not", "default value of the axis in the target variable font.", "vfElement.append(subsetsElement) self._addLib(vfElement, vf.lib, 4) parentElement.append(vfElement) def _addLib(self, parentElement: ET.Element, data:", "would have needed 1 DesignSpace per such variable font, and", "``nominalValue`` (format 2).\"\"\" self.userMaximum: Optional[float] = userMaximum \"\"\"STAT field ``rangeMaxValue``", "in-between. \"\"\" def map_forward(self, value): \"\"\"Maps value from axis mapping's", "axisElement.findall('labelname'): # Note: elementtree reads the \"xml:lang\" attribute name as", "= ['filename', 'path', 'name', 'layerName', 'location', 'copyLib', 'copyGroups', 'copyFeatures', 'muteKerning',", "RangeAxisSubsetDescriptor instanceDescriptorClass = InstanceDescriptor def __init__(self, documentPath, documentObject): self.path =", "raise DesignSpaceDocumentError( \"axis-subset element must have min/max/default values or none", "newConditions.append(dict(name=cond['name'], minimum=minimum, maximum=maximum)) newConditionSets.append(newConditions) rule.conditionSets = newConditionSets def loadSourceFonts(self, opener,", "subs=None): self.name = name \"\"\"string. Unique name for this rule.", "addSourceDescriptor(self, **kwargs): \"\"\"Instantiate a new :class:`SourceDescriptor` using the given ``kwargs``", "self.axes.append(axisDescriptor) def addAxisDescriptor(self, **kwargs): \"\"\"Instantiate a new :class:`AxisDescriptor` using the", "= \"wght\" a1.labelNames['fa-IR'] = \"قطر\" a1.labelNames['en'] = \"Wéíght\" a1.map =", "be fine case 4. descriptor.filename == '../somewhere' descriptor.path == \"~/absolute/path/there\"", "is not None: skip it. \"\"\" if masters: for descriptor", "def serialize(self): # output to a dict, used in testing", "or \"\" for label_name in labelElement.findall(\"labelname\") for attr, lang in", "== '1': sourceObject.mutedGlyphNames.append(glyphName) for kerningElement in sourceElement.findall(\".kerning\"): if kerningElement.attrib.get('mute') ==", "location to these glyphnames. Return a new list of glyphNames", "reader = self.readerClass(path, self) reader.read() if self.sources: self.findDefault() def write(self,", "default=None, hidden=False, map=None, axisOrdering=None, axisLabels=None, ): super().__init__( tag=tag, name=name, labelNames=labelNames,", "coordinates. MutatorMath + Varlib. .. seealso:: This may be only", "languageCodes = list(instanceObject.localisedStyleMapStyleName.keys()) languageCodes.sort() for code in languageCodes: if code", "be as many variable fonts as there are locations on", "axis, i.e. when a new location is created, this is", "location in design space coordinates.\"\"\" # Without OrderedDict, output XML", "fields, then change the field(s) for which they have data.", "paths using forward slash to work also on Windows.\"\"\" new_path", "a discrete subset must have a uservalue attribute.\" ) userValue", "if encoding is str or ( encoding is not None", "in axisElements: if self.documentObject.formatTuple >= (5, 0) and \"values\" in", "evaluateConditions(conditions, location): \"\"\"Return True if all the conditions matches the", "b)) for labelNameElement in axisElement.findall('labelname'): # Note: elementtree reads the", "@classmethod def getInstanceDescriptor(cls): return cls.instanceDescriptorClass() @classmethod def getRuleDescriptor(cls): return cls.ruleDescriptorClass()", "self.mutedGlyphNames = mutedGlyphNames or [] \"\"\"list. Glyphnames that need to", "When the document is produced by a Python script and", "a, b in rule.subs: if name == a: swap =", "== name: return axisDescriptor return None def getLocationLabel(self, name: str)", ":attr:`name` will be used as a basename for the file.", "self.path = None \"\"\"String, optional. When the document is read", "whole space. In version 5 and above documents, there can", "axes, define 1 variable font per value on the discrete", "if ruleName is not None else \"\")) cds.append(cd) return cds", "if instanceObject.glyphs: if instanceElement.findall('.glyphs') == []: glyphsElement = ET.Element('glyphs') instanceElement.append(glyphsElement)", "be taken by the axis, nothing in-between. \"\"\" def map_forward(self,", "versionchanged:: 5.0 Return a tuple of (designLocation, userLocation) \"\"\" elementLocation", "it if it needs to be referenced from elsewhere in", "self.default = None # Convert the default location from user", "= \"ITAL\" a2.labelNames['fr'] = \"Italique\" a2.map = [(0, 0), (1,", "= RuleDescriptor sourceDescriptorClass = SourceDescriptor variableFontsDescriptorClass = VariableFontDescriptor valueAxisSubsetDescriptorClass =", ".. versionadded:: 5.0 \"\"\" return { axis.name: ( axis.map_backward(designLocation[axis.name]) if", "to be muted (i.e. not be part of the calculations).", "return self.labelNames.get(\"en\") or self.name class LocationLabelDescriptor(SimpleDescriptor): \"\"\"Container for location label", "ruleObject = self.ruleDescriptorClass() ruleName = ruleObject.name = ruleElement.attrib.get(\"name\") # read", "table field ``axisOrdering``. See: `OTSpec STAT Axis Record <https://docs.microsoft.com/en-us/typography/opentype/spec/stat#axis-records>`_ ..", "tuples of glyphnames, e.g. (\"a\", \"a.alt\"). - Note: By default,", ".. versionadded:: 5.0 \"\"\" flavor = \"axis\" _attrs = ('tag',", "DesignSpaceDocumentError(Exception): def __init__(self, msg, obj=None): self.msg = msg self.obj =", "if data.get('mute'): glyphElement.attrib['mute'] = \"1\" if data.get('unicodes') is not None:", "class to make us a new sourceDescriptor.\"\"\" return self.writerClass.getSourceDescriptor() def", "userLocation or {} \"\"\"dict. Axis values for this instance, in", "= self.intOrFloat(label.linkedUserValue) self._addLabelNames(labelElement, label.labelNames) axisElement.append(labelElement) def _addLabelNames(self, parentElement, labelNames): for", "(1000.0, 990.0)] a1.axisOrdering = 1 a1.axisLabels = [ AxisLabelDescriptor(name=\"Regular\", userValue=400,", "36.5), 'Width': 100} instance.userLocation = {'Opsz': 16} In order to", "discreteAxisDescriptorClass = DiscreteAxisDescriptor axisLabelDescriptorClass = AxisLabelDescriptor locationLabelDescriptorClass = LocationLabelDescriptor ruleDescriptorClass", "or {} \"\"\"dict. Axis values for this instance, in user", "self.variableFonts variableFonts = [] discreteAxes = [] rangeAxisSubsets: List[Union[RangeAxisSubsetDescriptor, ValueAxisSubsetDescriptor]]", "instanceObject, glyphName, data): glyphElement = ET.Element('glyph') if data.get('mute'): glyphElement.attrib['mute'] =", "= code localisedStyleNameElement.text = instanceObject.getStyleName(code) instanceElement.append(localisedStyleNameElement) if instanceObject.localisedFamilyName: languageCodes =", "noteElement.text = data.get('note') glyphElement.append(noteElement) if data.get('masters') is not None: mastersElement", "{', '.join(unknown_attrs)}\") name = element.get(\"name\") if name is None: raise", "self.userLocation = {} else: if self.designLocation is None: self.designLocation =", "<glyph name=\"b\"/> <glyph name=\"b\"> <master location=\"location-token-bbb\" source=\"master-token-aaa2\"/> <master glyphname=\"b.alt1\" location=\"location-token-ccc\"", "def getLocationLabel(self, name: str) -> Optional[LocationLabelDescriptor]: \"\"\"Return the top-level location", "minimum=\"250.000000\" maximum=\"750.000000\" name=\"weight\"/> <condition minimum=\"100\" name=\"width\"/> <condition minimum=\"10\" maximum=\"40\" name=\"optical\"/>", "not None: instanceElement.attrib['name'] = instanceObject.name if instanceObject.locationLabel is not None:", "'linkeduservalue'} unknown_attrs = set(element.attrib) - xml_attrs if unknown_attrs: raise DesignSpaceDocumentError(f\"label", "have a name attribute.\") filename = variableFontElement.get(\"filename\") axisSubsetsElement = variableFontElement.find(\".axis-subsets\")", "value = value[0] triple = [ axis.map_forward(v) for v in", "and 3). All values are user values. See: `OTSpec STAT", "is not None: instanceElement.attrib['location'] = instanceObject.locationLabel if instanceObject.familyName is not", "getFullDesignLocation(self, doc: 'DesignSpaceDocument') -> AnisotropicLocationDict: \"\"\"Get the complete design location", "self.documentObject.lib = plistlib.fromtree(libElement[0]) class DesignSpaceDocument(LogMixin, AsDictMixin): \"\"\"The DesignSpaceDocument object can", "1, 2, 3 <https://docs.microsoft.com/en-us/typography/opentype/spec/stat#axis-value-table-format-1>`_ The STAT format of the Axis", "default axis values. .. versionadded:: 5.0 \"\"\" return { axis.name:", "userLocation: raise DesignSpaceDocumentError(f'<glyph> element \"{glyphName}\" must only have design locations", "languageCode=\"en\"): self.localisedStyleMapFamilyName[languageCode] = tostr(styleMapFamilyName) def getStyleMapFamilyName(self, languageCode=\"en\"): return self.localisedStyleMapFamilyName.get(languageCode) def", "'muteKerning', 'muteInfo', 'mutedGlyphNames', 'familyName', 'styleName', 'localisedFamilyName'] filename = posixpath_property(\"_filename\") path", "ET.Element(\"variable-fonts\") for variableFont in self.documentObject.variableFonts: self._addVariableFont(variableFontsElement, variableFont) self.root.append(variableFontsElement) if self.documentObject.instances:", "= self.intOrFloat(dimensionValue[1]) else: dimElement.attrib['xvalue'] = self.intOrFloat(dimensionValue) locElement.append(dimElement) return locElement, validatedLocation", "\"\"\" if masters: for descriptor in self.sources: if descriptor.filename is", "document**. MutatorMath + VarLib. \"\"\" self.path = path \"\"\"The absolute", "this location to these glyphnames. Return a new list of", "instanceObject.setFamilyName(familyName, lang) for styleMapStyleNameElement in instanceElement.findall('stylemapstylename'): for key, lang in", "clear the location for that axis. .. versionadded:: 5.0 \"\"\"", "if no such label exists. .. versionadded:: 5.0 \"\"\" return", "to the instances. MutatorMath. .. deprecated:: 5.0 \"\"\" self.copyGroups =", "= code localisedFamilyNameElement.text = instanceObject.getFamilyName(code) instanceElement.append(localisedFamilyNameElement) if instanceObject.localisedStyleMapStyleName: languageCodes =", "self.axes } def map_backward(self, designLocation: AnisotropicLocationDict) -> SimpleLocationDict: \"\"\"Map a", "axis def addRule(self, ruleDescriptor: RuleDescriptor): \"\"\"Add the given ``ruleDescriptor`` to", "maximum=None, hidden=False, map=None, axisOrdering=None, axisLabels=None, ): super().__init__( tag=tag, name=name, labelNames=labelNames,", "MutatorMath only. \"\"\" self.mutedGlyphNames = mutedGlyphNames or [] \"\"\"list. Glyphnames", "Use the more explicit alias for this property :attr:`designLocation`. \"\"\"", "familyName, languageCode=\"en\"): \"\"\"Setter for :attr:`localisedFamilyName` .. versionadded:: 5.0 \"\"\" self.localisedFamilyName[languageCode]", "instanceObject.familyName if instanceObject.styleName is not None: instanceElement.attrib['stylename'] = instanceObject.styleName #", "def read(self): self.readAxes() self.readLabels() self.readRules() self.readVariableFonts() self.readSources() self.readInstances() self.readLib() def", "before - True: after. Default is False. For new projects,", "= \"1\" sourceElement.append(featuresElement) if sourceObject.copyInfo or sourceObject.muteInfo: infoElement = ET.Element('info')", "in user coordinates at which to freeze the given axis.\"\"\"", "variableFont in self.documentObject.variableFonts: self._addVariableFont(variableFontsElement, variableFont) self.root.append(variableFontsElement) if self.documentObject.instances: self.root.append(ET.Element(\"instances\")) for", "cond.get('maximum') is not None: conditionElement.attrib['maximum'] = self.intOrFloat(cond.get('maximum')) conditionsetElement.append(conditionElement) if len(conditionsetElement):", "sourceObject.filename is not None: sourceElement.attrib['filename'] = sourceObject.filename if sourceObject.name is", "element contains unknown attributes: {', '.join(unknown_attrs)}\") name = element.get(\"name\") if", "or {} \"\"\"dict for special master definitions for glyphs. If", "any stray conditions outside a condition set externalConditions = self._readConditionElements(", "with=\"dollar.alt\"/> </rule> </rules> \"\"\" _attrs = ['name', 'conditionSets', 'subs'] #", "with substitutions applied. - rule order matters \"\"\" newNames =", "self.localisedStyleMapStyleName = localisedStyleMapStyleName or {} \"\"\"A dictionary of localised style", "axesElement is not None and 'elidedfallbackname' in axesElement.attrib: self.documentObject.elidedFallbackName =", "3).\"\"\" self.labelNames: MutableMapping[str, str] = labelNames or {} \"\"\"User-facing translations", "value = location[cd['name']] if cd.get('minimum') is None: if value >", "# type: ignore result: AnisotropicLocationDict = {} for axis in", "+ new_path elif path.startswith(r'\\\\'): # The above transformation loses leading", "self.normalizeLocation({axis.name: axis.maximum}).get(axis.name) default = self.normalizeLocation({axis.name: axis.default}).get(axis.name) # and set them", "'elidedfallbackname' in axesElement.attrib: self.documentObject.elidedFallbackName = axesElement.attrib['elidedfallbackname'] axisElements = self.root.findall(\".axes/axis\") if", "sourceElement.attrib['stylename'] = sourceObject.styleName if sourceObject.layerName is not None: sourceElement.attrib['layer'] =", "0): return xml_attrs = {'name', 'elidable', 'oldersibling'} for labelElement in", "InstanceDescriptor(SimpleDescriptor): \"\"\"Simple container for data related to the instance ..", "[] for axisSubset in axisSubsetsElement.iterfind(\".axis-subset\"): axisSubsets.append(self.readAxisSubset(axisSubset)) lib = None libElement", "where the file is relative to the document. Can't guess", "'mutedGlyphNames', 'familyName', 'styleName', 'localisedFamilyName'] filename = posixpath_property(\"_filename\") path = posixpath_property(\"_path\")", "use this attribute. It is up to the user of", "the complete user location of this label, by combining data", "not None: subsetElement.attrib['userdefault'] = self.intOrFloat(subset.userDefault) elif isinstance(subset, ValueAxisSubsetDescriptor): subsetElement.attrib['uservalue'] =", "in self._attrs: try: assert(getattr(self, attr) == getattr(other, attr)) except AssertionError:", "i2.lib['com.coolDesignspaceApp.specimenText'] = 'Hamburgerwhatever' doc.addInstance(i2) \"\"\" flavor = \"instance\" _defaultLanguageCode =", "self.intOrFloat(value[0]) dimElement.attrib['yvalue'] = self.intOrFloat(value[1]) else: dimElement.attrib['xvalue'] = self.intOrFloat(value) locElement.append(dimElement) elif", "attr should not be touched. case 3. descriptor.filename == None", "fontTools.varLib.models import piecewiseLinearMap if not self.map: return v return piecewiseLinearMap(v,", "axis, self.labelNames = labelNames or {} \"\"\"dict. When defining a", "designspace.loadSourceFonts(ttLib.TTFont, recalcBBoxes=False) Args: opener (Callable): takes one required positional argument,", "or [] \"\"\"STAT table entries for Axis Value Tables format", "--> <rules> <rule name=\"vertical.bars\"> <conditionset> <condition minimum=\"250.000000\" maximum=\"750.000000\" name=\"weight\"/> <condition", "+ Varlib. .. seealso:: This may be only part of", "def posixpath_property(private_name): \"\"\"Generate a propery that holds a path always", "'InstanceDescriptor.getLocationLabelDescriptor(): ' f'unknown location label `{self.locationLabel}` in instance `{self.name}`.' )", "========= =========== =========== =============== STAT Format userValue userMinimum userMaximum linkedUserValue", "None: return 3 if self.userMinimum is not None or self.userMaximum", "value), value) def map_backward(self, value): \"\"\"Maps value from axis mapping's", "# '{http://www.w3.org/XML/1998/namespace}lang' } return self.axisLabelDescriptorClass( name=name, userValue=value, userMinimum=minimum, userMaximum=maximum, elidable=elidable,", "kerning calculated. MutatorMath. .. deprecated:: 5.0 \"\"\" self.info = info", "self.writerClass.ruleDescriptorClass(**kwargs) self.addRule(rule) return rule def addVariableFont(self, variableFontDescriptor: VariableFontDescriptor): \"\"\"Add the", "name \"\"\"string. Optional. Unique identifier name for this source. MutatorMath", "for k, v in self.map}) class DiscreteAxisDescriptor(AbstractAxisDescriptor): \"\"\"Container for discrete", "to input. Returns value unchanged if no mapping entry is", "data to do the scaling, so we do those last.", "else: self.writerClass = BaseDocWriter @classmethod def fromfile(cls, path, readerClass=None, writerClass=None):", "at the default location for that axis. When the input", "label with the given ``name``, or ``None`` if no such", "data can be extracted from the font, it can be", "not None: basename = os.path.splitext(self.filename)[0] + \"-VF\" if self.path is", "be correct and update filename. \"\"\" assert self.path is not", "int(num) == num: return \"%d\" % num return (\"%f\" %", "cls(readerClass=readerClass, writerClass=writerClass) self.read(path) return self @classmethod def fromstring(cls, string, readerClass=None,", "%s are not integers\" % unicodes) for noteElement in glyphElement.findall('.note'):", "DesignSpaceDocumentError(f\"label element contains unknown attributes: {', '.join(unknown_attrs)}\") name = element.get(\"name\")", "this instance. MutatorMath + Varlib. \"\"\" self.styleMapFamilyName = styleMapFamilyName \"\"\"string.", "= dict(weight=500, width=(400,300)) i2.postScriptFontName = \"InstancePostscriptName\" i2.styleMapFamilyName = \"InstanceStyleMapFamilyName\" i2.styleMapStyleName", "tag, dict(en=name) class AbstractAxisDescriptor(SimpleDescriptor): flavor = \"axis\" def __init__( self,", "location is the set of all `default` values in user", "\"\"\"Return the top-level location label with the given ``name``, or", "``<location>`` element inside the given ``element``. .. versionchanged:: 5.0 Return", "at the default location or None. The default location is", "in familyNameElement.items(): if key == XML_LANG: familyName = familyNameElement.text instanceObject.setFamilyName(familyName,", "str or ( encoding is not None and encoding.lower() ==", "= {} self.userLocation = {} else: if self.designLocation is None:", "designLocation layerName = sourceElement.attrib.get('layer') if layerName is not None: sourceObject.layerName", "versionadded:: 5.0 \"\"\" flavor = \"axis\" _attrs = ('tag', 'name',", "\"\"\" self.userMaximum: float = userMaximum \"\"\"New maximum value of the", "or {} \"\"\"dict. When defining a non-registered axis, it will", "element.get(\"usermaximum\") maximum = float(maximumStr) if maximumStr is not None else", "None: return 2 return 1 @property def defaultName(self) -> str:", "location or {} def setStyleName(self, styleName, languageCode=\"en\"): \"\"\"These methods give", "to the instances. MutatorMath. .. deprecated:: 5.0 \"\"\" self.muteKerning =", "in self.axisDefaults: # In case the document contains no axis", "self.writerClass.discreteAxisDescriptorClass(**kwargs) else: axis = self.writerClass.axisDescriptorClass(**kwargs) self.addAxis(axis) return axis def addRule(self,", "# try to find or make a tag name for", "0 - 1 value. - we need the axis data", "is the full path that was given to :meth:`read` or", "loaded, and return list of fonts. Takes a callable which", "in axis.map: newOutputValue = self.normalizeLocation({axis.name: outputValue}).get(axis.name) newMap.append((inputValue, newOutputValue)) if newMap:", "\"\"\" if self._strictAxisNames and not self.documentObject.axes: raise DesignSpaceDocumentError(\"No axes defined\")", "probably want True. See the following issues for more information:", "ET.Element('glyphs') instanceElement.append(glyphsElement) glyphsElement = instanceElement.findall('.glyphs')[0] for glyphName, data in sorted(instanceObject.glyphs.items()):", "``path`` and populates the fields of ``self`` with the data.", "<document-xml-structure>`_ attributes are usually all lowercase. .. code:: python from", "`OTSpec STAT Flags <https://docs.microsoft.com/en-us/typography/opentype/spec/stat#flags>`_ \"\"\" self.olderSibling: bool = olderSibling \"\"\"STAT", "s1.path = masterPath1 s1.name = \"master.ufo1\" s1.font = defcon.Font(\"master.ufo1\") s1.location", "in names: return names[name.lower()] if len(name) < 4: tag =", "if postScriptFontName is not None: instanceObject.postScriptFontName = postScriptFontName styleMapFamilyName =", "in the instances. MutatorMath only. \"\"\" @property def location(self): \"\"\"dict.", "force: continue if self.path is not None: descriptor.filename = self._posixRelativePath(descriptor.path)", "None, assume axis.minimum cd['minimum'] = None cdMax = conditionElement.attrib.get(\"maximum\") if", "for label in self.locationLabels if label.userLocation == userLocation), None )", "Returns: List of font objects in the order they appear", "sourceObject.location = self._makeLocationElement(sourceObject.location) sourceElement.append(locationElement) self.root.findall('.sources')[0].append(sourceElement) def _addVariableFont(self, parentElement: ET.Element, vf:", "True for featuresElement in sourceElement.findall(\".features\"): if featuresElement.attrib.get('copy') == '1': sourceObject.copyFeatures", "DesignSpaceDocumentError( \"condition missing required minimum or maximum in rule\" +", "\"\"\"Read the lib element for the whole document.\"\"\" for libElement", "https://github.com/LettError/designSpaceDocument/issues/10 loc = collections.OrderedDict() for axisDescriptor in self.axes: loc[axisDescriptor.name] =", "dimensionElement.attrib.get('uservalue') if userValue is not None: userValue = float(userValue) except", "related to the source .. code:: python doc = DesignSpaceDocument()", "is not None: sourceObject.layerName = layerName for libElement in sourceElement.findall('.lib'):", "dict(en = 'Optical Size')), 'slant': ('slnt', dict(en = 'Slant')), 'italic':", ":attr:`name`.\"\"\" return self.labelNames.get(\"en\") or self.name class LocationLabelDescriptor(SimpleDescriptor): \"\"\"Container for location", "float(valueStr) minimumStr = element.get(\"userminimum\") minimum = float(minimumStr) if minimumStr is", "-> SimpleLocationDict: \"\"\"Get the complete user location for this instance.", "keyed by language code. \"\"\" self.glyphs = glyphs or {}", "field ``value`` (format 1, 3) or ``nominalValue`` (format 2).\"\"\" self.userMaximum:", "Optional[float] = userMaximum \"\"\"STAT field ``rangeMaxValue`` (format 2).\"\"\" self.name: str", "field ``linkedValue`` (format 3).\"\"\" self.labelNames: MutableMapping[str, str] = labelNames or", "cls.instanceDescriptorClass() @classmethod def getRuleDescriptor(cls): return cls.ruleDescriptorClass() def __init__(self, documentPath, documentObject:", "xml_attrs = {'name', 'uservalue'} unknown_attrs = set(element.attrib) - xml_attrs if", "using the given ``kwargs`` and add it to :attr:`rules`. \"\"\"", "in ruleElement.findall('.conditionset'): conditionSet = self._readConditionElements( conditionSetElement, ruleName, ) if conditionSet", "newAxisDescriptor(self): \"\"\"Ask the writer class to make us a new", "code localisedFamilyNameElement.text = instanceObject.getFamilyName(code) instanceElement.append(localisedFamilyNameElement) if instanceObject.localisedStyleMapStyleName: languageCodes = list(instanceObject.localisedStyleMapStyleName.keys())", "_makeLocationElement(self, locationObject, name=None): \"\"\" Convert Location dict to a locationElement.\"\"\"", "designspace, as fonts at each value can have different glyph", "- it doesn't define the \"neutral\" version of outlines from", "= \"1\" self.root.findall('.axes')[0].append(axisElement) def _addAxisLabel(self, axisElement: ET.Element, label: AxisLabelDescriptor) ->", "element.get(\"linkeduservalue\") linkedValue = float(linkedValueStr) if linkedValueStr is not None else", "\"\"\"string, optional. Relative path to the variable font file, **as", "designLocation: raise DesignSpaceDocumentError(f'<label> element \"{name}\" must only have user locations", ".. deprecated:: 5.0 \"\"\" self.lib = lib or {} \"\"\"Custom", "axisOrdering=self.axisOrdering, axisLabels=self.axisLabels, ) def map_forward(self, v): \"\"\"Maps value from axis", "\"Found stray rule conditions outside a conditionset. \" \"Wrapped them", "to be mapped. \"\"\" return next((v for k, v in", "format 4 labels. .. versionadded:: 5.0\"\"\" self.rules: List[RuleDescriptor] = []", "addLocationLabelDescriptor(self, **kwargs): \"\"\"Instantiate a new :class:`LocationLabelDescriptor` using the given ``kwargs``", "to :attr:`instances`. \"\"\" instance = self.writerClass.instanceDescriptorClass(**kwargs) self.addInstance(instance) return instance def", "{} \"\"\"dict. When defining a non-registered axis, it will be", "to define user-facing readable names for the axis. Keyed by", "subclassed to work with different objects, as long as they", "names for all instances. .. versionadded:: 5.0 \"\"\" self.copyLib =", "versionadded:: 5.0 \"\"\" self.axes: List[Union[AxisDescriptor, DiscreteAxisDescriptor]] = [] \"\"\"List of", "of this document's axes.\"\"\" self.locationLabels: List[LocationLabelDescriptor] = [] \"\"\"List of", "labelElement.attrib['elidable'] = \"true\" if label.olderSibling: labelElement.attrib['oldersibling'] = \"true\" if label.linkedUserValue", "fontTools.varLib.models import piecewiseLinearMap if isinstance(v, tuple): v = v[0] if", "for axis label data. Analogue of OpenType's STAT data for", "self.readSources() self.readInstances() self.readLib() def readRules(self): # we also need to", "(default = ``math.inf``) \"\"\" class ValueAxisSubsetDescriptor(SimpleDescriptor): \"\"\"Single value of a", "we only want the default location of that axis (same", "will not use this attribute. It is up to the", "default document reader will not fill-in this attribute, and the", "this document, or implicit variable fonts that can be built", "xml:lang code. Values are required to be ``unicode`` strings, even", "maximum=maximum)) newConditionSets.append(newConditions) rule.conditionSets = newConditionSets def loadSourceFonts(self, opener, **kwargs): \"\"\"Ensure", "= os.path.splitext(self.filename)[0] + \"-VF\" if self.path is not None: basename", "(design).\"\"\" from fontTools.varLib.models import piecewiseLinearMap if not self.map: return v", "is not mentioned, assume that we only want the default", "all axes. This function updates the document's :attr:`default` value. ..", "callable which initializes a new font object (e.g. TTFont, or", "strings, keyed by language code. \"\"\" self.localisedStyleName = localisedStyleName or", "- Note: By default, rules are applied first, before other", "not None and self.path is not None: sourcePath = os.path.abspath(os.path.join(os.path.dirname(self.path),", "instanceObject.location is not None: locationElement, instanceObject.location = self._makeLocationElement(instanceObject.location) instanceElement.append(locationElement) if", "familyNameElement.text sourceObject.setFamilyName(familyName, lang) designLocation, userLocation = self.locationFromElement(sourceElement) if userLocation: raise", "each value must have its mapping entry, if you intend", "for noteElement in glyphElement.findall('.note'): glyphData['note'] = noteElement.text break designLocation, userLocation", "\"\"\" self.filename = None \"\"\"String, optional. When the document is", "file may or may not exist. MutatorMath + VarLib. \"\"\"", "deep-copying a DesignSpace document without deep-copying attached UFO fonts or", "of :class:. \"\"\" self = cls(readerClass=readerClass, writerClass=writerClass) self.read(path) return self", "complete design location of this instance, by combining data from", "bool = elidable \"\"\"STAT flag ``ELIDABLE_AXIS_VALUE_NAME``. See: `OTSpec STAT Flags", "element.findall(\"labelname\") for attr, lang in label_name.items() if attr == XML_LANG", "\"\"\" self.name = name \"\"\"string. Unique identifier name of the", "for all instances. .. versionadded:: 5.0 \"\"\" self.copyLib = copyLib", "= axis.map_forward(axis.default) return result class RuleDescriptor(SimpleDescriptor): \"\"\"Represents the rule descriptor", "correct and update filename. \"\"\" assert self.path is not None", "<conditionset> <condition minimum=\"250.000000\" maximum=\"750.000000\" name=\"weight\"/> <condition minimum=\"100\" name=\"width\"/> <condition minimum=\"10\"", "passed on to the opener function. Returns: List of font", "kerningElement = ET.Element(\"kerning\") kerningElement.attrib[\"mute\"] = '1' sourceElement.append(kerningElement) if sourceObject.mutedGlyphNames: for", "value in self.__dict__.items(): if attr.startswith(\"_\"): continue if hasattr(value, \"asdict\"): value", "location[axis.name] # 'anisotropic' location, take first coord only if isinstance(value,", "if instanceObject.locationLabel is not None: instanceElement.attrib['location'] = instanceObject.locationLabel if instanceObject.familyName", "userLocation=instanceObject.userLocation ) else: # Pre-version 5.0 code was validating and", "0): minVersion = (5, 0) return minVersion def _makeLocationElement(self, locationObject,", "paths using forward slashes if value is not None: value", "Data is added to the document by creating such descriptor", ":attr:`variableFonts`. .. versionadded:: 5.0 \"\"\" variableFont = self.writerClass.variableFontDescriptorClass(**kwargs) self.addVariableFont(variableFont) return", "is produced by a Python script and still only exists", "the default location in design space coordinates.\"\"\" # Without OrderedDict,", "ET.Element): xml_attrs = {'userminimum', 'uservalue', 'usermaximum', 'name', 'elidable', 'oldersibling', 'linkeduservalue'}", "= self.locationFromElement(labelElement) if designLocation: raise DesignSpaceDocumentError(f'<label> element \"{name}\" must only", "ET.Element(\"master\") if m.get('glyphName') is not None: masterElement.attrib['glyphname'] = m.get('glyphName') if", "MutatorMath + Varlib. \"\"\" self.styleMapFamilyName = styleMapFamilyName \"\"\"string. StyleMap familyname", "+ Varlib. \"\"\" # names for UI purposes, if this", "5.0 \"\"\" result: AnisotropicLocationDict = {} for axis in doc.axes:", "XML_LANG: styleMapFamilyName = styleMapFamilyNameElement.text instanceObject.setStyleMapFamilyName(styleMapFamilyName, lang) designLocation, userLocation = self.locationFromElement(instanceElement)", "for kerningElement in sourceElement.findall(\".kerning\"): if kerningElement.attrib.get('mute') == '1': sourceObject.muteKerning =", "Return a tuple of (designLocation, userLocation) \"\"\" if self._strictAxisNames and", "str(self.msg) + ( \": %r\" % self.obj if self.obj is", "for a bunch of attributes\"\"\" # XXX this is ugly.", "a design location. Assume that missing coordinates are at the", "_getEffectiveFormatTuple(self): \"\"\"Try to use the version specified in the document,", "None: instanceObject.styleMapFamilyName = styleMapFamilyName styleMapStyleName = instanceElement.attrib.get('stylemapstylename') if styleMapStyleName is", "instanceElement, instanceObject, glyphName, data): glyphElement = ET.Element('glyph') if data.get('mute'): glyphElement.attrib['mute']", "'muteInfo', 'mutedGlyphNames', 'familyName', 'styleName', 'localisedFamilyName'] filename = posixpath_property(\"_filename\") path =", "by taking the first not-None field in this list: -", "= userDefault \"\"\"New default value of the axis in the", "only have design locations (using xvalue=\"\").') masterGlyphName = masterElement.attrib.get('glyphname') if", "sourceObject.copyGroups = True for infoElement in sourceElement.findall(\".info\"): if infoElement.attrib.get('copy') ==", ":attr:`SourceDescriptor.font` .. seealso:: :attr:`SourceDescriptor.font` \"\"\" self.name = name \"\"\"string. Unique", "= styleNameElement.text instanceObject.setStyleName(styleName, lang) for familyNameElement in instanceElement.findall('familyname'): for key,", "if self.documentObject.formatTuple >= (5, 0) and \"values\" in axisElement.attrib: axisObject", "and above documents, there can be as many variable fonts", "self.userLocation.get(axis.name, axis.default) for axis in doc.axes } class VariableFontDescriptor(SimpleDescriptor): \"\"\"Container", "(5, 0) return minVersion def _makeLocationElement(self, locationObject, name=None): \"\"\" Convert", "localised stylename strings, keyed by language code. \"\"\" self.localisedStyleMapFamilyName =", "isinstance(value, tuple): value = value[0] return next((k for k, v", "path. **kwargs: extra options passed on to the opener function.", "\"rulesProcessingLast\", False): attributes = {\"processing\": \"last\"} else: attributes = {}", "warp of user space to design space coordinates. If no", "# and set them in the axis.minimum axis.minimum = minimum", "See: :meth:`getFullDesignLocation` :meth:`getFullUserLocation` .. versionadded:: 5.0 \"\"\" self.userLocation: SimpleLocationDict =", "a user location to a design location. Assume that missing", "for key, lang in labelNameElement.items(): if key == XML_LANG: axisObject.labelNames[lang]", "not be touched. case 3. descriptor.filename == None descriptor.path ==", "# Note: elementtree reads the \"xml:lang\" attribute name as #", "files \"\"\" __all__ = [ 'DesignSpaceDocumentError', 'DesignSpaceDocument', 'SourceDescriptor', 'InstanceDescriptor', 'AxisDescriptor',", "normalize(self): \"\"\" Normalise the geometry of this designspace: - scale", "None: raise DesignSpaceDocumentError(f'Missing xvalue=\"\" for the location dimension \"{dimName}\"\" with", "a user should first clear all the fields, then change", "- True: after. Default is False. For new projects, you", "labelElement.get(\"name\") if name is None: raise DesignSpaceDocumentError(\"label element must have", "= [] rangeAxisSubsets: List[Union[RangeAxisSubsetDescriptor, ValueAxisSubsetDescriptor]] = [] for axis in", "hidden=hidden, map=map, axisOrdering=axisOrdering, axisLabels=axisLabels, ) self.default: float = default \"\"\"The", "elidable=True) ] doc.addAxis(a1) \"\"\" _attrs = ['tag', 'name', 'maximum', 'minimum',", "DesignSpace document without deep-copying attached UFO fonts or TTFont objects.", "a new sourceDescriptor.\"\"\" return self.writerClass.getSourceDescriptor() def newInstanceDescriptor(self): \"\"\"Ask the writer", "a discrete or continuous axis to use in a variable", "DesignSpaceDocumentError(f'<master> element \"{fontSourceName}\" must only have design locations (using xvalue=\"\").')", "else v for v in value ] d[attr] = value", "List, MutableMapping, Optional, Tuple, Union from fontTools.misc import etree as", "in self.documentObject.sources ) or self.documentObject.variableFonts or any( instance.locationLabel or instance.userLocation", "masterElement.attrib['glyphname'] = m.get('glyphName') if m.get('font') is not None: masterElement.attrib['source'] =", "data. Use this for axes that do not interpolate. The", "isinstance(value, tuple): dimElement.attrib['xvalue'] = self.intOrFloat(value[0]) dimElement.attrib['yvalue'] = self.intOrFloat(value[1]) else: dimElement.attrib['xvalue']", "not None: instanceElement.attrib['filename'] = instanceObject.filename if instanceObject.postScriptFontName is not None:", "a ``value``, or a :class:`AxisDescriptor` otherwise. \"\"\" if \"values\" in", "\"\"\" return { axis.name: axis.map_forward(userLocation.get(axis.name, axis.default)) for axis in self.axes", "be provided for location dimension \"{dimName}\"') if yValue is not", "label.olderSibling: labelElement.attrib['oldersibling'] = \"true\" if label.linkedUserValue is not None: labelElement.attrib['linkeduservalue']", "If the font attribute is already not None, it is", "= \" \" axisDescriptorClass = AxisDescriptor discreteAxisDescriptorClass = DiscreteAxisDescriptor axisLabelDescriptorClass", "isinstance(axisObject, DiscreteAxisDescriptor): axisElement.attrib['values'] = \" \".join(self.intOrFloat(v) for v in axisObject.values)", "any(evaluateConditions(c, location) for c in rule.conditionSets) def evaluateConditions(conditions, location): \"\"\"Return", "if (designLocation or userLocation) and locationLabel is not None: raise", "maximum value of the axis in the target variable font.", "4 <https://docs.microsoft.com/en-us/typography/opentype/spec/stat#axis-value-table-format-4>`_ .. versionadded:: 5.0 \"\"\" flavor = \"label\" _attrs", "writer class to make us a new axisDescriptor.\"\"\" return self.writerClass.getAxisDecriptor()", "AnisotropicLocationDict) -> SimpleLocationDict: \"\"\"Map a design location to a user", "DiscreteAxisDescriptor(AbstractAxisDescriptor): \"\"\"Container for discrete axis data. Use this for axes", "familyNameElement in instanceElement.findall('familyname'): for key, lang in familyNameElement.items(): if key", "= [] for inputValue, outputValue in axis.map: newOutputValue = self.normalizeLocation({axis.name:", "linkedValueStr = element.get(\"linkeduservalue\") linkedValue = float(linkedValueStr) if linkedValueStr is not", "the axes elements, including the warp map. axesElement = self.root.find(\".axes\")", "specified in the document, or a sufficiently recent version to", "axis = self.writerClass.axisDescriptorClass(**kwargs) self.addAxis(axis) return axis def addRule(self, ruleDescriptor: RuleDescriptor):", "= styleMapStyleNameElement.text instanceObject.setStyleMapStyleName(styleMapStyleName, lang) for styleMapFamilyNameElement in instanceElement.findall('stylemapfamilyname'): for key,", "\"\"\"Add the given ``variableFontDescriptor`` to :attr:`variableFonts`. .. versionadded:: 5.0 \"\"\"", "\"\"\"Instantiate a new :class:`RuleDescriptor` using the given ``kwargs`` and add", "_addAxisLabel(self, axisElement: ET.Element, label: AxisLabelDescriptor) -> None: labelElement = ET.Element('label')", "code:: python doc = DesignSpaceDocument() s1 = SourceDescriptor() s1.path =", "element.attrib: xml_attrs = {'name', 'uservalue'} unknown_attrs = set(element.attrib) - xml_attrs", "OpenType's STAT data for a free-floating location (format 4). All", "glyphData['note'] = noteElement.text break designLocation, userLocation = self.locationFromElement(glyphElement) if userLocation:", "= {} for dimensionElement in locationElement.findall(\".dimension\"): dimName = dimensionElement.attrib.get(\"name\") if", "xValue = float(xValue) except ValueError: self.log.warning(\"ValueError in readLocation xValue %3.3f\",", "values, and they are assumed to be the default. See", "stored as tuples of glyphnames (\"a\", \"a.alt\") self.subs = subs", "if key == XML_LANG: familyName = familyNameElement.text sourceObject.setFamilyName(familyName, lang) designLocation,", "validatedLocation = self.documentObject.newDefaultLocation() for axisName, axisValue in locationObject.items(): if axisName", "same path are only loaded once and shared among SourceDescriptors.", "XML_LANG: familyName = familyNameElement.text instanceObject.setFamilyName(familyName, lang) for styleMapStyleNameElement in instanceElement.findall('stylemapstylename'):", "raise DesignSpaceDocumentError( 'InstanceDescriptor.getLocationLabelDescriptor(): ' f'unknown location label `{self.locationLabel}` in instance", "styleMapFamilyName = instanceElement.attrib.get('stylemapfamilyname') if styleMapFamilyName is not None: instanceObject.styleMapFamilyName =", "r1.subs.append((\"a\", \"a.alt\")) .. code:: xml <!-- optional: list of substitution", "of (designLocation, userLocation) \"\"\" if self._strictAxisNames and not self.documentObject.axes: raise", "default source to not specify some of the axis values,", "if processingValue not in {\"first\", \"last\"}: raise DesignSpaceDocumentError( \"<rules> processing", "Indicated if the interpolating font.info data for this source needs", "localisedStyleMapFamilyName=None, localisedStyleMapStyleName=None, glyphs=None, kerning=True, info=True, lib=None, ): self.filename = filename", "= 1000 a1.default = 400 a1.name = \"weight\" a1.tag =", "for this named location, STAT field ``valueNameID``.\"\"\" self.userLocation: SimpleLocationDict =", "forward slashes.\"\"\" def getter(self): # Normal getter return getattr(self, private_name)", "@classmethod def getSourceDescriptor(cls): return cls.sourceDescriptorClass() @classmethod def getInstanceDescriptor(cls): return cls.instanceDescriptorClass()", "for the STAT table, however it can't become a variation", "of glyphnames (\"a\", \"a.alt\") self.subs = subs or [] \"\"\"list", "STAT format 4 labels. .. versionadded:: 5.0\"\"\" self.rules: List[RuleDescriptor] =", "\"\"\" self.muteKerning = muteKerning \"\"\"bool. Indicates if the kerning data", "if axisSubsetsElement is None: raise DesignSpaceDocumentError(\"variable-font element must contain an", "(34, 36.5) Args: axisName: if provided, only clear the location", "glyphName, data): glyphElement = ET.Element('glyph') if data.get('mute'): glyphElement.attrib['mute'] = \"1\"", "= { lang: label_name.text or \"\" for label_name in element.findall(\"labelname\")", "\"\"\" if \"values\" in kwargs: axis = self.writerClass.discreteAxisDescriptorClass(**kwargs) else: axis", "user space. MutatorMath + Varlib. \"\"\" self.maximum = maximum \"\"\"number.", "(int(i) for i in self.formatVersion.split(\".\")) major = next(numbers) minor =", "AsDictMixin): \"\"\"The DesignSpaceDocument object can read and write ``.designspace`` data.", "skip it. \"\"\" if masters: for descriptor in self.sources: if", "the value this axis will get in user space. MutatorMath", "def compare(self, other): # test if this object contains the", "== None descriptor.path == \"~/absolute/path/there\" -- action: calculate the relative", "filename = posixpath_property(\"_filename\") path = posixpath_property(\"_path\") def __init__( self, *,", "[] \"\"\"List of possible values for this axis. Contrary to", "instanceObject.glyphs[glyphName] = glyphData def readLib(self): \"\"\"Read the lib element for", "unicodes except ValueError: raise DesignSpaceDocumentError(\"unicode values %s are not integers\"", "s1 = SourceDescriptor() s1.path = masterPath1 s1.name = \"master.ufo1\" s1.font", "def getFormat(self) -> int: \"\"\"Determine which format of STAT Axis", "font \"\"\"Any Python object. Optional. Points to a representation of", "return v return piecewiseLinearMap(v, {k: v for k, v in", "if code == \"en\": continue # already stored in the", "float(axisElement.attrib.get(\"maximum\")) axisObject.default = float(axisElement.attrib.get(\"default\")) axisObject.name = axisElement.attrib.get(\"name\") if axisElement.attrib.get('hidden', False):", "instanceElement.findall('familyname'): for key, lang in familyNameElement.items(): if key == XML_LANG:", "the location for that axis. .. versionadded:: 5.0 \"\"\" self.locationLabel", "ruleElement in self.root.findall(\".rules/rule\"): ruleObject = self.ruleDescriptorClass() ruleName = ruleObject.name =", "this instance, in user space coordinates. MutatorMath + Varlib. ..", "is not None: instanceObject.styleName = stylename postScriptFontName = instanceElement.attrib.get('postscriptfontname') if", "the geometry of this designspace: - scale all the locations", "*, filename=None, path=None, font=None, name=None, location=None, locationLabel=None, designLocation=None, userLocation=None, familyName=None,", "# masters for item in self.sources: item.location = self.normalizeLocation(item.location) #", "this source. Though this data can be extracted from the", "this property :attr:`designLocation`. \"\"\" return self.designLocation @location.setter def location(self, location:", "location label with the given ``name``, or ``None`` if no", "a ``fontTools.ttFont.TTFont``). The default document reader will not fill-in this", "in self.axes: if axis.name not in location: # skipping this", "ET.Element('variable-font') vfElement.attrib['name'] = vf.name if vf.filename is not None: vfElement.attrib['filename']", "data stored by others. \"\"\" self.default: Optional[str] = None \"\"\"Name", "cond in conditions: if cond.get('minimum') is None and cond.get('maximum') is", "locationLabel \"\"\"Name of a :class:`LocationLabelDescriptor`. If provided, the instance should", "etree as ET from fontTools.misc import plistlib from fontTools.misc.loggingTools import", "as a string. Default encoding ``utf-8``.\"\"\" if encoding is str", "= \" \".join([hex(u) for u in data.get('unicodes')]) if data.get('instanceLocation') is", "path = path.__fspath__() self.path = path self.filename = os.path.basename(path) self.updatePaths()", "d = dict(font=fontSourceName, location=designLocation, glyphName=masterGlyphName) if glyphSources is None: glyphSources", "self) reader.read() if self.sources: self.findDefault() def write(self, path): \"\"\"Write this", "@property def formatTuple(self): \"\"\"Return the formatVersion as a tuple of", "against the SourceDescriptor locations (always in design space). defaultDesignLocation =", "in this list: - ``locationLabel``: the location along this axis", "the axes, sources, variable fonts and instances to very basic", "inputValue, outputValue in axis.map: newOutputValue = self.normalizeLocation({axis.name: outputValue}).get(axis.name) newMap.append((inputValue, newOutputValue))", "[ AxisLabelDescriptor(name=\"Regular\", userValue=400, elidable=True) ] doc.addAxis(a1) \"\"\" _attrs = ['tag',", "['filename', 'path', 'name', 'layerName', 'location', 'copyLib', 'copyGroups', 'copyFeatures', 'muteKerning', 'muteInfo',", "ET.Element('glyph') if data.get('mute'): glyphElement.attrib['mute'] = \"1\" if data.get('unicodes') is not", "name \"\"\"string. Unique identifier name of the instance, used to", "minimum, check for < maximum. - If a condition has", "instance.locationLabel or instance.userLocation for instance in self.documentObject.instances ) ): if", "variable font file, **as it is in the document**. The", "data: return libElement = ET.Element('lib') libElement.append(plistlib.totree(data, indent_level=indent_level)) parentElement.append(libElement) def _writeGlyphElement(self,", "Optional[str] = None \"\"\"Format version for this document, as a", "updated and relativized descriptor.filename = self._posixRelativePath(descriptor.path) def addSource(self, sourceDescriptor: SourceDescriptor):", "dimElement.attrib['xvalue'] = self.intOrFloat(dimensionValue[0]) dimElement.attrib['yvalue'] = self.intOrFloat(dimensionValue[1]) else: dimElement.attrib['xvalue'] = self.intOrFloat(dimensionValue)", "not self.documentObject.axes: raise DesignSpaceDocumentError(\"No axes defined\") userLoc = {} designLoc", "other glyph substitution features. - False: before - True: after.", "versionadded:: 5.0 \"\"\" self.locationLabels.append(locationLabelDescriptor) def addLocationLabelDescriptor(self, **kwargs): \"\"\"Instantiate a new", "the axis values, and they are assumed to be the", "again. Fonts with the same path are only loaded once", "then change the field(s) for which they have data. ..", "None: conditionElement.attrib['minimum'] = self.intOrFloat(cond.get('minimum')) if cond.get('maximum') is not None: conditionElement.attrib['maximum']", "MutableMapping[str, Any] = lib or {} \"\"\"Custom data associated with", "# only accept values we know validatedLocation[axisName] = axisValue for", "for axis in doc.axes } class VariableFontDescriptor(SimpleDescriptor): \"\"\"Container for variable", "values for this source, in design space coordinates. MutatorMath +", "continuous axis has a ``minimum`` and ``maximum``, while a discrete", "the filename attribute. :: case 1. descriptor.filename == None descriptor.path", "userValue = float(userValue) except ValueError: self.log.warning(\"ValueError in readLocation userValue %3.3f\",", "for axisName, axisValue in locationObject.items(): if axisName in validatedLocation: #", "else: if self.designLocation is None: self.designLocation = {} if axisName", "copied to the instances. MutatorMath. .. deprecated:: 5.0 \"\"\" self.copyInfo", "dimElement = ET.Element('dimension') dimElement.attrib['name'] = dimensionName if type(dimensionValue) == tuple:", "nothing in-between. \"\"\" def map_forward(self, value): \"\"\"Maps value from axis", "this label, by combining data from the explicit user location", "property :attr:`designLocation`. \"\"\" return self.designLocation @location.setter def location(self, location: Optional[AnisotropicLocationDict]):", "by combining data from the explicit user location and default", "yValue) if userValue is None == xValue is None: raise", "here, and instead of # assert, it should simply return", "and axis.name in userLocation: dimElement = ET.Element('dimension') dimElement.attrib['name'] = axis.name", "else None maximumStr = element.get(\"usermaximum\") maximum = float(maximumStr) if maximumStr", "documentPath self.documentObject = documentObject tree = ET.parse(self.path) self.root = tree.getroot()", "glyphElement.attrib.get(\"mute\") if mute == \"1\": glyphData['mute'] = True # unicode", "attrs = indent('\\n'.join(attrs), ' ') return f\"{self.__class__.__name__}(\\n{attrs}\\n)\" class SourceDescriptor(SimpleDescriptor): \"\"\"Simple", "= olderSibling \"\"\"STAT flag ``OLDER_SIBLING_FONT_ATTRIBUTE``. See: `OTSpec STAT Flags <https://docs.microsoft.com/en-us/typography/opentype/spec/stat#flags>`_", "default location is the set of all `default` values in", "= font return res finally: for source, font in zip(self.sources,", "same source.path only once loaded = {} fonts = []", "default location of that axis (same as a :class:`ValueAxisSubsetDescriptor`). \"\"\"", "and they are assumed to be the default. See :meth:`SourceDescriptor.getFullDesignLocation()`", "each axis independently by taking the first not-None field in", "if self.obj is not None else \"\") class AsDictMixin(object): def", "instances: for descriptor in self.instances: if descriptor.filename is not None", "*, name=None, conditionSets=None, subs=None): self.name = name \"\"\"string. Unique name", "a VF. .. code:: python a2 = DiscreteAxisDescriptor() a2.values =", "in the axis.minimum axis.minimum = minimum axis.maximum = maximum axis.default", "name + \"*\" * (4 - len(name)) else: tag =", "\"\"\" self.labelNames: Dict[str, str] = labelNames or {} \"\"\"User-facing translations", "(default = ``None``) \"\"\" self.userMaximum: float = userMaximum \"\"\"New maximum", "= self.root.findall('.instances/instance') for instanceElement in instanceElements: self._readSingleInstanceElement(instanceElement, makeGlyphs=makeGlyphs, makeKerning=makeKerning, makeInfo=makeInfo)", "to save the file somewhere. \"\"\" self.formatVersion: Optional[str] = None", "k, v in self.map}) def map_backward(self, v): \"\"\"Maps value from", "writer class to make us a new instanceDescriptor.\"\"\" return self.writerClass.getInstanceDescriptor()", "MutatorMath only. \"\"\" self.muteInfo = muteInfo \"\"\"bool. Indicated if the", "tuple of (designLocation, userLocation) \"\"\" if self._strictAxisNames and not self.documentObject.axes:", "is not None: xValue = float(xValue) except ValueError: self.log.warning(\"ValueError in", "setFamilyName(self, familyName, languageCode=\"en\"): self.localisedFamilyName[languageCode] = tostr(familyName) def getFamilyName(self, languageCode=\"en\"): return", "format of STAT Axis value to use to encode this", "write(self, path): \"\"\"Write this designspace to ``path``.\"\"\" if hasattr(path, \"__fspath__\"):", "= name \"\"\"string, required. Name of this variable to identify", "= styleName \"\"\"string. Style name of this source. Though this", "'oldersibling', 'linkeduservalue'} unknown_attrs = set(element.attrib) - xml_attrs if unknown_attrs: raise", "\"\"\" Convert Location dict to a locationElement.\"\"\" locElement = ET.Element(\"location\")", "label. No anisotropy. - ``designLocation[axisName]``: the explicit design location along", "Value Tables <https://docs.microsoft.com/en-us/typography/opentype/spec/stat#axis-value-tables>`_ .. versionadded:: 5.0 \"\"\" class AxisDescriptor(AbstractAxisDescriptor): \"\"\"", "return cls.instanceDescriptorClass() @classmethod def getRuleDescriptor(cls): return cls.ruleDescriptorClass() def __init__(self, documentPath,", "= instanceElement.attrib.get('stylename') if stylename is not None: instanceObject.styleName = stylename", "% (source.name or \"<Unknown>\") ) source.font = opener(source.path, **kwargs) loaded[source.path]", "= None): \"\"\"Clear all location-related fields. Ensures that :attr:``designLocation`` and", "efficient to have it right here. Varlib. \"\"\" self.styleName =", "interfaces. \"\"\" self.map = map or [] \"\"\"list of input", "= stylename postScriptFontName = instanceElement.attrib.get('postscriptfontname') if postScriptFontName is not None:", "build localized names for all instances. .. versionadded:: 5.0 \"\"\"", "5.0 \"\"\" flavor = \"label\" _attrs = ('userMinimum', 'userValue', 'userMaximum',", "in element.findall(\"labelname\") for attr, lang in label_name.items() if attr ==", "class InstanceDescriptor(SimpleDescriptor): \"\"\"Simple container for data related to the instance", "class LocationLabelDescriptor(SimpleDescriptor): \"\"\"Container for location label data. Analogue of OpenType's", "SimpleLocationDict: \"\"\"Get the complete user location of this label, by", "= ET.Element('features') featuresElement.attrib['copy'] = \"1\" sourceElement.append(featuresElement) if sourceObject.copyInfo or sourceObject.muteInfo:", "self.rules = [] self.sources = [] self.instances = [] self.axisDefaults", "all masters and instances to the -1 - 0 -", "of a continuous axis to include in a variable font.", "continuous axis to include in a variable font. .. versionadded::", "case 3. descriptor.filename == None descriptor.path == \"~/absolute/path/there\" -- action:", "(always in design space). defaultDesignLocation = self.newDefaultLocation() for sourceDescriptor in", "attribute.\" ) userValue = float(userValueStr) return self.valueAxisSubsetDescriptorClass(name=name, userValue=userValue) else: xml_attrs", "MutatorMath + Varlib. \"\"\" self.default = default \"\"\"number. The default", "encode what the document contains. \"\"\" minVersion = self.documentObject.formatTuple if", "attribute localisedStyleNameElement = ET.Element('stylename') localisedStyleNameElement.attrib[XML_LANG] = code localisedStyleNameElement.text = instanceObject.getStyleName(code)", "if the ``kwargs`` provide a ``value``, or a :class:`AxisDescriptor` otherwise.", "Args: opener (Callable): takes one required positional argument, the source.path,", "location[cd['name']] if cd.get('minimum') is None: if value > cd['maximum']: return", "per value on the discrete axes. Before version 5, you", "document is read from the disk, this is its original", "masterElement = ET.Element(\"master\") if m.get('glyphName') is not None: masterElement.attrib['glyphname'] =", "identify your own data. Respect the data stored by others.", "instanceObject): \"\"\" Read the glyph element, which could look like", "= None # Convert the default location from user space", "None: labelsElement.attrib['ordering'] = str(axisObject.axisOrdering) for label in axisObject.axisLabels: self._addAxisLabel(labelsElement, label)", "main difference from a continuous axis is that a continuous", "addRuleDescriptor(self, **kwargs): \"\"\"Instantiate a new :class:`RuleDescriptor` using the given ``kwargs``", "designspace to ``path``.\"\"\" if hasattr(path, \"__fspath__\"): # support os.PathLike objects", "float(axisElement.attrib.get(\"minimum\")) axisObject.maximum = float(axisElement.attrib.get(\"maximum\")) axisObject.default = float(axisElement.attrib.get(\"default\")) axisObject.name = axisElement.attrib.get(\"name\")", "masterGlyphName = glyphName d = dict(font=fontSourceName, location=designLocation, glyphName=masterGlyphName) if glyphSources", "``kwargs`` and add it to :attr:`variableFonts`. .. versionadded:: 5.0 \"\"\"", "[source.font for source in self.sources] try: for source in self.sources:", "for values in valueCombinations: basename = None if self.filename is", "axis.name value = userLocation[axis.name] dimElement.attrib['uservalue'] = self.intOrFloat(value) locElement.append(dimElement) if len(locElement)", "None: sourceObject.styleName = styleName for familyNameElement in sourceElement.findall('familyname'): for key,", "axisDescriptor return None def getLocationLabel(self, name: str) -> Optional[LocationLabelDescriptor]: \"\"\"Return", "self.readInstances() self.readLib() def readRules(self): # we also need to read", "ET.Element(\"labels\") for labelObject in self.documentObject.locationLabels: self._addLocationLabel(labelsElement, labelObject) self.root.append(labelsElement) if self.documentObject.rules:", "file, calculated from the document path and the string in", "\"freeze\". \"\"\" self.userValue: float = userValue \"\"\"Value in user coordinates", "\"\"\" # names for UI purposes, if this is not", "'elidable', 'olderSibling', 'userLocation', 'labelNames') def __init__( self, *, name, userLocation,", "= {'name', 'filename'} for variableFontElement in self.root.findall(\".variable-fonts/variable-font\"): unknown_attrs = set(variableFontElement.attrib)", "= ET.Element(\"variable-fonts\") for variableFont in self.documentObject.variableFonts: self._addVariableFont(variableFontsElement, variableFont) self.root.append(variableFontsElement) if", "location(self, location: Optional[AnisotropicLocationDict]): self.designLocation = location or {} def setFamilyName(self,", "5.0 \"\"\" self.familyName = familyName \"\"\"string. Family name of this", "def newInstanceDescriptor(self): \"\"\"Ask the writer class to make us a", "fonts.append(source.font) continue if source.path in loaded: source.font = loaded[source.path] else:", "which field are filled-in, see :meth:`getFormat` .. versionadded:: 5.0 \"\"\"", "= self.locationFromElement(glyphElement) if userLocation: raise DesignSpaceDocumentError(f'<glyph> element \"{glyphName}\" must only", "designSpaceDocument - read and write designspace files \"\"\" __all__ =", "= self._writeGlyphElement(instanceElement, instanceObject, glyphName, data) glyphsElement.append(glyphElement) if instanceObject.kerning: kerningElement =", "keyword arguments, and returns a new font object loaded from", "``None`` if no such axis exists.\"\"\" for axisDescriptor in self.axes:", "element \"{glyphName}\" must only have design locations (using xvalue=\"\").') if", "describe a warp of user space to design space coordinates.", "For new projects, you probably want True. See the following", "if unknown_attrs: raise DesignSpaceDocumentError(f\"label element contains unknown attributes: {', '.join(unknown_attrs)}\")", "\"\"\"Allow deep-copying a DesignSpace document without deep-copying attached UFO fonts", "userMinimum userMaximum linkedUserValue =========== ========= =========== =========== =============== 1 ✅", "flavor = \"variable-font\" _attrs = ('filename', 'axisSubsets', 'lib') filename =", "are present, it is assumed user space is the same", "in user space. However, this default value is less important", "None: labelElement.attrib['userminimum'] = self.intOrFloat(label.userMinimum) if label.userMaximum is not None: labelElement.attrib['usermaximum']", "versionadded:: 5.0 \"\"\" class AxisDescriptor(AbstractAxisDescriptor): \"\"\" Simple container for the", "is not None: return 2 return 1 @property def defaultName(self)", "may not exist. MutatorMath. \"\"\" self.font = font \"\"\"Same as", "attributes: {', '.join(unknown_attrs)}\") name = variableFontElement.get(\"name\") if name is None:", "not mentioned, it is assumed to be at its default", "= instanceElement.attrib.get('stylemapstylename') if styleMapStyleName is not None: instanceObject.styleMapStyleName = styleMapStyleName", "fonts that can be built from the document's continuous axes.", "# will allow these to be None, assume axis.minimum cd['minimum']", "specify some of the axis values, and they are assumed", "variable fonts, sub-spaces of the Designspace. Use-cases: - From a", "= instanceObject.getFamilyName(code) instanceElement.append(localisedFamilyNameElement) if instanceObject.localisedStyleMapStyleName: languageCodes = list(instanceObject.localisedStyleMapStyleName.keys()) languageCodes.sort() for", "the document is produced by a Python script and still", "the warp map. axesElement = self.root.find(\".axes\") if axesElement is not", "add it to :attr:`rules`. \"\"\" rule = self.writerClass.ruleDescriptorClass(**kwargs) self.addRule(rule) return", "only have user locations (using uservalue=\"\").') elidable = True if", "return designLoc, userLoc def readInstances(self, makeGlyphs=True, makeKerning=True, makeInfo=True): instanceElements =", "localisedFamilyName=None, copyLib=False, copyInfo=False, copyGroups=False, copyFeatures=False, muteKerning=False, muteInfo=False, mutedGlyphNames=None, ): self.filename", "5.0 \"\"\" self.axisLabels: List[AxisLabelDescriptor] = axisLabels or [] \"\"\"STAT table", "<master glyphname=\"b.alt1\" location=\"location-token-ccc\" source=\"master-token-aaa3\"/> <note> This is an instance from", "the -1 - 0 - 1 value. - we need", "AnisotropicLocationDict = designLocation if designLocation is not None else (location", "mapElement.attrib['input'] = self.intOrFloat(inputValue) mapElement.attrib['output'] = self.intOrFloat(outputValue) axisElement.append(mapElement) if axisObject.axisOrdering or", "of (designLocation, userLocation) \"\"\" elementLocation = (None, None) for locationElement", "Optional. Unique identifier name for this source. MutatorMath + Varlib.", "which they have data. .. code:: python instance.clearLocation() instance.designLocation =", "instances. .. versionadded:: 5.0 \"\"\" self.copyLib = copyLib \"\"\"bool. Indicates", "= self.root.attrib.get(\"format\", \"3.0\") self._axes = [] self.rules = [] self.sources", "the designspace. .. code:: python r1 = RuleDescriptor() r1.name =", "axisObject in self.documentObject.axes: self._addAxis(axisObject) if self.documentObject.locationLabels: labelsElement = ET.Element(\"labels\") for", "olderSibling = True if element.get(\"oldersibling\") == \"true\" else False labelNames", "(1, -11)] a2.axisOrdering = 2 a2.axisLabels = [ AxisLabelDescriptor(name=\"Roman\", userValue=0,", "variable font per value on the discrete axes. Before version", "others. \"\"\" self.default: Optional[str] = None \"\"\"Name of the default", "the relative path for filename. We're not overwriting some other", "if sourceObject.muteKerning: kerningElement = ET.Element(\"kerning\") kerningElement.attrib[\"mute\"] = '1' sourceElement.append(kerningElement) if", "maximum = self.normalizeLocation({axis.name: axis.maximum}).get(axis.name) default = self.normalizeLocation({axis.name: axis.default}).get(axis.name) # and", "localisedStyleMapStyleName or {} \"\"\"A dictionary of localised style map stylename", "anisotropic. - ``userLocation[axisName]``: the explicit user location along this axis.", "basename is None: basename = \"VF\" axisNames = \"\".join([f\"-{axis.tag}{value}\" for", "print(\"failed attribute\", attr, getattr(self, attr), \"!=\", getattr(other, attr)) def __repr__(self):", "sourceDescriptor: SourceDescriptor): \"\"\"Add the given ``sourceDescriptor`` to ``doc.sources``.\"\"\" self.sources.append(sourceDescriptor) def", "= code localisedStyleMapStyleNameElement.text = instanceObject.getStyleMapStyleName(code) instanceElement.append(localisedStyleMapStyleNameElement) if instanceObject.localisedStyleMapFamilyName: languageCodes =", "'styleName', 'localisedFamilyName'] filename = posixpath_property(\"_filename\") path = posixpath_property(\"_path\") def __init__(", "axis, value in zip(discreteAxes, values)]) variableFonts.append(VariableFontDescriptor( name=f\"{basename}{axisNames}\", axisSubsets=rangeAxisSubsets + [", "map or [] \"\"\"list of input / output values that", "in self.instances: # glyph masters for this instance for _,", "is None: self.designLocation = {} self.userLocation = {} else: if", "= ('tag', 'name', 'values', 'default', 'map', 'axisOrdering', 'axisLabels') def __init__(", "= set(element.attrib) - xml_attrs if unknown_attrs: raise DesignSpaceDocumentError(f\"axis-subset element contains", "in sourceElement.findall('familyname'): for key, lang in familyNameElement.items(): if key ==", "is not None: instanceObject.styleMapFamilyName = styleMapFamilyName styleMapStyleName = instanceElement.attrib.get('stylemapstylename') if", "missing required minimum or maximum in rule\" + (\" '%s'\"", "is not None: subsetElement.attrib['userdefault'] = self.intOrFloat(subset.userDefault) elif isinstance(subset, ValueAxisSubsetDescriptor): subsetElement.attrib['uservalue']", "instanceElement.findall('lib'): self.readLibElement(libElement, instanceObject) self.documentObject.instances.append(instanceObject) def readLibElement(self, libElement, instanceObject): \"\"\"Read the", "single DesignSpace with discrete axes, define 1 variable font per", "sourceObject.name = sourceName familyName = sourceElement.attrib.get(\"familyname\") if familyName is not", "= instanceElement.attrib.get(\"name\") if name is not None: instanceObject.name = name", "a condition has no minimum, check for < maximum. -", "Axis value table, format 4 <https://docs.microsoft.com/en-us/typography/opentype/spec/stat#axis-value-table-format-4>`_ .. versionadded:: 5.0 \"\"\"", "cd in conditions: value = location[cd['name']] if cd.get('minimum') is None:", "familyName \"\"\"string. Family name of this instance. MutatorMath + Varlib.", "name \"\"\"Label for this axis location, STAT field ``valueNameID``.\"\"\" self.elidable:", "infoElement.attrib.get('copy') == '1': sourceObject.copyInfo = True if infoElement.attrib.get('mute') == '1':", "\"\"\" label = self.getLocationLabelDescriptor(doc) if label is not None: return", "dimElement = ET.Element('dimension') dimElement.attrib['name'] = axis.name value = designLocation[axis.name] if", "if userLocation: raise DesignSpaceDocumentError(f'<master> element \"{fontSourceName}\" must only have design", "SimpleLocationDict = Dict[str, float] class InstanceDescriptor(SimpleDescriptor): \"\"\"Simple container for data", "\"\"\" self.styleName = styleName \"\"\"string. Style name of this instance.", "default location in design space coordinates.\"\"\" # Without OrderedDict, output", "outputValue in axis.map: newOutputValue = self.normalizeLocation({axis.name: outputValue}).get(axis.name) newMap.append((inputValue, newOutputValue)) if", "label in labelElement.findall(\".label\"): axisObject.axisLabels.append(self.readAxisLabel(label)) self.documentObject.axes.append(axisObject) self.axisDefaults[axisObject.name] = axisObject.default def readAxisLabel(self,", "path are only loaded once and shared among SourceDescriptors. For", "the instance instanceObject.filename = filename # path as it is", "ET.Element('features') featuresElement.attrib['copy'] = \"1\" sourceElement.append(featuresElement) if sourceObject.copyInfo or sourceObject.muteInfo: infoElement", "= os.path.relpath(otherPath, os.path.dirname(self.path)) return posix(relative) def updatePaths(self): \"\"\" Right before", "also as a filename in case the filename property is", "integers\" % unicodes) for noteElement in glyphElement.findall('.note'): glyphData['note'] = noteElement.text", "different, we just choose for path to be correct and", "e.g. ``calt``, use the lib key ``com.github.fonttools.varLib.featureVarsFeatureTag`` .. code:: xml", "instanceObject.setStyleMapStyleName(styleMapStyleName, lang) for styleMapFamilyNameElement in instanceElement.findall('stylemapfamilyname'): for key, lang in", "= axisLabels or [] \"\"\"STAT table entries for Axis Value", "'slant': ('slnt', dict(en = 'Slant')), 'italic': ('ital', dict(en = 'Italic')),", "= ET.Element(\"master\") if m.get('glyphName') is not None: masterElement.attrib['glyphname'] = m.get('glyphName')", "The setter rewrites paths using forward slashes if value is", "✅ ❌ 3 ✅ ❌ ❌ ✅ =========== ========= ===========", "flavor = \"axis\" def __init__( self, *, tag=None, name=None, labelNames=None,", "def getInstanceDescriptor(cls): return cls.instanceDescriptorClass() @classmethod def getRuleDescriptor(cls): return cls.ruleDescriptorClass() def", "in self.sources: if source.font is not None: # font already", "+ Varlib. \"\"\" self.postScriptFontName = postScriptFontName \"\"\"string. Postscript fontname for", "xValue else: userLoc[dimName] = userValue return designLoc, userLoc def readInstances(self,", "- If a condition has no minimum, check for <", "When defining a non-registered axis, it will be necessary to", "False return True def processRules(rules, location, glyphNames): \"\"\"Apply these rules", "languageCode=\"en\"): \"\"\"These methods give easier access to the localised names.\"\"\"", "def _makeLocationElement(self, locationObject, name=None): \"\"\" Convert Location dict to a", "self.readRules() self.readVariableFonts() self.readSources() self.readInstances() self.readLib() def readRules(self): # we also", "the writer class to make us a new instanceDescriptor.\"\"\" return", "if label.userMinimum is not None: labelElement.attrib['userminimum'] = self.intOrFloat(label.userMinimum) if label.userMaximum", "addVariableFontDescriptor(self, **kwargs): \"\"\"Instantiate a new :class:`VariableFontDescriptor` using the given ``kwargs``", "if isinstance(value, tuple): dimElement.attrib['xvalue'] = self.intOrFloat(value[0]) dimElement.attrib['yvalue'] = self.intOrFloat(value[1]) else:", "5.0 code was validating and filling in the location #", "encoding ``utf-8``.\"\"\" if encoding is str or ( encoding is", "is None: raise DesignSpaceDocumentError(\"label element must have a name attribute.\")", "= 'Width')), 'optical': ('opsz', dict(en = 'Optical Size')), 'slant': ('slnt',", "== '1': sourceObject.copyInfo = True if infoElement.attrib.get('mute') == '1': sourceObject.muteInfo", "float(minimumStr) if minimumStr is not None else None maximumStr =", "familyName styleName = sourceElement.attrib.get(\"stylename\") if styleName is not None: sourceObject.styleName", "if self.effectiveFormatTuple >= (5, 0): self._addLocationElement(sourceElement, designLocation=sourceObject.location) else: # Pre-version", ">= (5, 0): if instanceObject.locationLabel is None: self._addLocationElement( instanceElement, designLocation=instanceObject.designLocation,", "copyInfo \"\"\"bool. Indicates if the non-interpolating font.info needs to be", "'copyLib', 'copyGroups', 'copyFeatures', 'muteKerning', 'muteInfo', 'mutedGlyphNames', 'familyName', 'styleName', 'localisedFamilyName'] filename", "if label is not None: return doc.map_forward(label.userLocation) # type: ignore", "STAT field ``valueNameID``.\"\"\" self.userLocation: SimpleLocationDict = userLocation or {} \"\"\"Location", "instanceDescriptor: InstanceDescriptor): \"\"\"Add the given ``instanceDescriptor`` to :attr:`instances`.\"\"\" self.instances.append(instanceDescriptor) def", "axes that do not interpolate. The main difference from a", "self.documentObject.rulesProcessingLast = processingValue == \"last\" for ruleElement in self.root.findall(\".rules/rule\"): ruleObject", "not None: sourceElement.attrib['stylename'] = sourceObject.styleName if sourceObject.layerName is not None:", "userDefault \"\"\"New default value of the axis in the target", "descriptor.filename == '../somewhere' descriptor.path == \"~/absolute/path/there\" -- action: there is", "paths new_path = '/' + new_path elif path.startswith(r'\\\\'): # The", "\"../something\" descriptor.path == None -- action: write as is. The", "dict(weight=500, width=(400,300)) i2.postScriptFontName = \"InstancePostscriptName\" i2.styleMapFamilyName = \"InstanceStyleMapFamilyName\" i2.styleMapStyleName =", "instanceObject.styleName is not None: instanceElement.attrib['stylename'] = instanceObject.styleName # add localisations", "instanceObject) for infoElement in instanceElement.findall(\"info\"): self.readInfoElement(infoElement, instanceObject) for libElement in", "**kwargs) loaded[source.path] = source.font fonts.append(source.font) return fonts @property def formatTuple(self):", "for this instance. MutatorMath + Varlib. \"\"\" self.styleMapFamilyName = styleMapFamilyName", "updated by the :meth:`findDefault` \"\"\" if readerClass is not None:", "is not None: sourceElement.attrib['layer'] = sourceObject.layerName if sourceObject.localisedFamilyName: languageCodes =", "Returns value unchanged if no mapping entry is found. Note:", "None: conditionElement.attrib['maximum'] = self.intOrFloat(cond.get('maximum')) conditionsetElement.append(conditionElement) if len(conditionsetElement): ruleElement.append(conditionsetElement) for sub", "store the data in attributes. Data is added to the", "in self.documentObject.axes ) or self.documentObject.locationLabels or any( source.localisedFamilyName for source", "if instanceObject.localisedStyleMapFamilyName: languageCodes = list(instanceObject.localisedStyleMapFamilyName.keys()) languageCodes.sort() for code in languageCodes:", "location, take first coord only if isinstance(value, tuple): value =", "): self.filename = filename \"\"\"string. Relative path to the instance", "store it in this field, or write the contents of", "Glyphnames that need to be muted in the instances. MutatorMath", "labelElement.attrib: axisObject.axisOrdering = int(labelElement.attrib[\"ordering\"]) for label in labelElement.findall(\".label\"): axisObject.axisLabels.append(self.readAxisLabel(label)) self.documentObject.axes.append(axisObject)", "reads the \"xml:lang\" attribute name as # '{http://www.w3.org/XML/1998/namespace}lang' } locationLabel", "anisotropic interpolation. </note> </glyph> \"\"\" glyphData = {} glyphName =", "\"\"\" self.localisedStyleName = localisedStyleName or {} \"\"\"dict. A dictionary of", "use the lib key ``com.github.fonttools.varLib.featureVarsFeatureTag`` .. code:: xml <lib> <dict>", "'1': sourceObject.copyFeatures = True for glyphElement in sourceElement.findall(\".glyph\"): glyphName =", "axis is the same as the matching STAT format 4", "to the instance .. code:: python i2 = InstanceDescriptor() i2.path", "\"\"\"Get the :class:`LocationLabelDescriptor` instance that matches this instances's :attr:`locationLabel`. Raises", "\"ITAL\" a2.labelNames['fr'] = \"Italique\" a2.map = [(0, 0), (1, -11)]", "[] \"\"\"List of this document's instances.\"\"\" self.lib: Dict = {}", "uservalue=\"\" or xvalue=\"\" must be provided for location dimension \"{dimName}\"')", "is None: raise DesignSpaceDocumentError( \"Designspace source '%s' has no 'path'", "defaultDesignLocation: self.default = sourceDescriptor return sourceDescriptor return None def normalizeLocation(self,", "single axis location, the user should only clear that axis,", "= elidable \"\"\"STAT flag ``ELIDABLE_AXIS_VALUE_NAME``. See: `OTSpec STAT Flags <https://docs.microsoft.com/en-us/typography/opentype/spec/stat#flags>`_", "in labelNameElement.items(): if key == XML_LANG: axisObject.labelNames[lang] = tostr(labelNameElement.text) labelElement", "as tuples of glyphnames (\"a\", \"a.alt\") self.subs = subs or", "location label data. Analogue of OpenType's STAT data for a", "class DesignSpaceDocument(LogMixin, AsDictMixin): \"\"\"The DesignSpaceDocument object can read and write", "kerning=True, info=True, lib=None, ): self.filename = filename \"\"\"string. Relative path", "axis.map = newMap # finally the axis values minimum =", "= [] rulesElement = self.root.find(\".rules\") if rulesElement is not None:", "in conditions: if cond.get('minimum') is None and cond.get('maximum') is None:", "fonts, including extra options: designspace.loadSourceFonts(ttLib.TTFont, recalcBBoxes=False) Args: opener (Callable): takes", "'Width': 100} instance.userLocation = {'Opsz': 16} In order to update", "= \"true\" if label.linkedUserValue is not None: labelElement.attrib['linkeduservalue'] = self.intOrFloat(label.linkedUserValue)", "we save we need to identify and respond to the", "path.startswith('/'): # The above transformation loses absolute paths new_path =", "v in self.map if v == value), value) class AxisLabelDescriptor(SimpleDescriptor):", "!= 0: # do not save temporary source names sourceElement.attrib['name']", "the given ``instanceDescriptor`` to :attr:`instances`.\"\"\" self.instances.append(instanceDescriptor) def addInstanceDescriptor(self, **kwargs): \"\"\"Instantiate", "``path`` and return a new instance of :class:. \"\"\" self", ":meth:`getFullDesignLocation()` .. versionadded:: 5.0 \"\"\" self.layerName = layerName \"\"\"string. The", "self.readVariableFonts() self.readSources() self.readInstances() self.readLib() def readRules(self): # we also need", "add it to :attr:`instances`. \"\"\" instance = self.writerClass.instanceDescriptorClass(**kwargs) self.addInstance(instance) return", "DesignSpaceDocumentError(f\"axis-subset element contains unknown attributes: {', '.join(unknown_attrs)}\") name = element.get(\"name\")", "only the xvalue is set. .. versionadded:: 5.0 \"\"\" return", "font.lib need to be copied to the instances. MutatorMath. ..", "axis.default) for axis in doc.axes } class VariableFontDescriptor(SimpleDescriptor): \"\"\"Container for", "and ``maximum``, while a discrete axis has a list of", "i2.styleMapFamilyName = \"InstanceStyleMapFamilyName\" i2.styleMapStyleName = \"InstanceStyleMapStyleName\" i2.lib['com.coolDesignspaceApp.specimenText'] = 'Hamburgerwhatever' doc.addInstance(i2)", "need to be copied to the instances. MutatorMath. .. deprecated::", "lang in styleMapFamilyNameElement.items(): if key == XML_LANG: styleMapFamilyName = styleMapFamilyNameElement.text", "self.writerClass.locationLabelDescriptorClass(**kwargs) self.addLocationLabel(locationLabel) return locationLabel def newDefaultLocation(self): \"\"\"Return a dict with", "str(axisObject.axisOrdering) for label in axisObject.axisLabels: self._addAxisLabel(labelsElement, label) axisElement.append(labelsElement) if isinstance(axisObject,", "source.path is None: raise DesignSpaceDocumentError( \"Designspace source '%s' has no", "or may not exist. MutatorMath + VarLib. \"\"\" self.path =", "-> None: if not data: return libElement = ET.Element('lib') libElement.append(plistlib.totree(data,", "this is its original file name, i.e. the last part", "part of the calculations). MutatorMath only. \"\"\" self.muteInfo = muteInfo", "and relativized descriptor.filename = self._posixRelativePath(descriptor.path) def addSource(self, sourceDescriptor: SourceDescriptor): \"\"\"Add", "for key, lang in familyNameElement.items(): if key == XML_LANG: familyName", "filename, it should be fine case 4. descriptor.filename == '../somewhere'", "a2.labelNames['fr'] = \"Italique\" a2.map = [(0, 0), (1, -11)] a2.axisOrdering", "value) def map_backward(self, value): \"\"\"Maps value from axis mapping's output", "if xValue is not None: xValue = float(xValue) except ValueError:", "user values. See: `OTSpec STAT Axis value table, format 1,", "filename is not None and self.documentObject.path is not None: instancePath", "{} \"\"\"dict. Axis values for this source, in design space", "is not None else (location or {}) \"\"\"dict. Axis values", "= glyphName if data.get('note') is not None: noteElement = ET.Element('note')", "\"asdict\"): value = value.asdict() elif isinstance(value, list): value = [", "read(self): self.readAxes() self.readLabels() self.readRules() self.readVariableFonts() self.readSources() self.readInstances() self.readLib() def readRules(self):", "this document's STAT format 4 labels. .. versionadded:: 5.0\"\"\" self.rules:", "\"\"\"Setter for :attr:`localisedFamilyName` .. versionadded:: 5.0 \"\"\" self.localisedFamilyName[languageCode] = tostr(familyName)", "this axis in user space. MutatorMath + Varlib. \"\"\" self.maximum", "import indent from typing import Any, Dict, List, MutableMapping, Optional,", "not None else None elidable = True if element.get(\"elidable\") ==", "attribute name as # '{http://www.w3.org/XML/1998/namespace}lang' } return self.axisLabelDescriptorClass( name=name, userValue=value,", "axis.minimum = minimum axis.maximum = maximum axis.default = default #", "the order they appear in the sources list. \"\"\" #", "if m.get('glyphName') is not None: masterElement.attrib['glyphname'] = m.get('glyphName') if m.get('font')", "of conditionsets. - Each conditionset is a list of conditions.", "for i in self.effectiveFormatTuple) if self.documentObject.axes or self.documentObject.elidedFallbackName is not", "label.linkedUserValue is not None: labelElement.attrib['linkeduservalue'] = self.intOrFloat(label.linkedUserValue) self._addLabelNames(labelElement, label.labelNames) axisElement.append(labelElement)", "= ET.Element('note') noteElement.text = data.get('note') glyphElement.append(noteElement) if data.get('masters') is not", "if groupsElement.attrib.get('copy') == '1': sourceObject.copyGroups = True for infoElement in", "user space to design space before comparing # it against", "'userDefault', 'userMaximum') def __init__(self, *, name, userMinimum=-math.inf, userDefault=None, userMaximum=math.inf): self.name:", "us a new instanceDescriptor.\"\"\" return self.writerClass.getInstanceDescriptor() def getAxisOrder(self): \"\"\"Return a", "\"\"\" return next( (label for label in self.locationLabels if label.userLocation", "return { axis.name: axis.map_forward(userLocation.get(axis.name, axis.default)) for axis in self.axes }", "getInstanceDescriptor(cls): return cls.instanceDescriptorClass() @classmethod def getRuleDescriptor(cls): return cls.ruleDescriptorClass() def __init__(self,", "``kwargs`` and add it to :attr:`locationLabels`. .. versionadded:: 5.0 \"\"\"", "self._addLib(instanceElement, instanceObject.lib, 4) self.root.findall('.instances')[0].append(instanceElement) def _addSource(self, sourceObject): sourceElement = ET.Element(\"source\")", "return locationLabel def newDefaultLocation(self): \"\"\"Return a dict with the default", "plistlib.fromtree(libElement[0]) def readInfoElement(self, infoElement, instanceObject): \"\"\" Read the info element.\"\"\"", "``sourceDescriptor`` to ``doc.sources``.\"\"\" self.sources.append(sourceDescriptor) def addSourceDescriptor(self, **kwargs): \"\"\"Instantiate a new", "return result class RuleDescriptor(SimpleDescriptor): \"\"\"Represents the rule descriptor element: a", "v): \"\"\"Maps value from axis mapping's output (design) to input", "is not None and 'elidedfallbackname' in axesElement.attrib: self.documentObject.elidedFallbackName = axesElement.attrib['elidedfallbackname']", "can't be found. .. versionadded:: 5.0 \"\"\" if self.locationLabel is", "self.writerClass.instanceDescriptorClass(**kwargs) self.addInstance(instance) return instance def addAxis(self, axisDescriptor: Union[AxisDescriptor, DiscreteAxisDescriptor]): \"\"\"Add", "document's STAT format 4 labels. .. versionadded:: 5.0\"\"\" self.rules: List[RuleDescriptor]", "XML would be non-deterministic. # https://github.com/LettError/designSpaceDocument/issues/10 loc = collections.OrderedDict() for", "document. This makes it easy to integrate this object in", "not attributes # so we have to do it ourselves", "else: xml_attrs = {'name', 'userminimum', 'userdefault', 'usermaximum'} unknown_attrs = set(element.attrib)", "= userLocation[axis.name] dimElement.attrib['uservalue'] = self.intOrFloat(value) locElement.append(dimElement) if len(locElement) > 0:", "if cond.get('minimum') is not None: minimum = self.normalizeLocation({cond['name']: cond['minimum']}).get(cond['name']) else:", "specified, assume the same maximum value as the full axis.", "instance .. code:: python i2 = InstanceDescriptor() i2.path = instancePath2", "axis with 2 stops, Roman and Italic, that are not", "= self._posixRelativePath(descriptor.path) if instances: for descriptor in self.instances: if descriptor.filename", "in self.userLocation: result[axis.name] = axis.map_forward(self.userLocation[axis.name]) else: result[axis.name] = axis.map_forward(axis.default) return", "if axisName is None: self.designLocation = {} self.userLocation = {}", "that are outside of a condition set. rules = []", "= self.writerClass.axisDescriptorClass(**kwargs) self.addAxis(axis) return axis def addRule(self, ruleDescriptor: RuleDescriptor): \"\"\"Add", "(location or {}) \"\"\"dict. Axis values for this instance, in", "else None linkedValueStr = element.get(\"linkeduservalue\") linkedValue = float(linkedValueStr) if linkedValueStr", "XML_LANG = XML_NS + \"lang\" def posix(path): \"\"\"Normalize paths using", "right thing for the filename attribute. :: case 1. descriptor.filename", "the kerning data from this source needs to be muted", "the default source to not specify some of the axis", "discrete axes. Before version 5, you would have needed 1", "= path self.filename = os.path.basename(path) self.updatePaths() writer = self.writerClass(path, self)", "\"Wéíght\" a1.map = [(1.0, 10.0), (400.0, 66.0), (1000.0, 990.0)] a1.axisOrdering", "be able to encode what the document contains. \"\"\" minVersion", "with an uppercase letter and use only uppercase letters or", "if cond.get('maximum') is not None: conditionElement.attrib['maximum'] = self.intOrFloat(cond.get('maximum')) conditionsetElement.append(conditionElement) if", "= \" \".join(self.intOrFloat(v) for v in axisObject.values) axisElement.attrib['default'] = self.intOrFloat(axisObject.default)", "path): \"\"\"Write this designspace to ``path``.\"\"\" if hasattr(path, \"__fspath__\"): #", "for axis in discreteAxes]) for values in valueCombinations: basename =", "AxisDescriptor discreteAxisDescriptorClass = DiscreteAxisDescriptor axisLabelDescriptorClass = AxisLabelDescriptor locationLabelDescriptorClass = LocationLabelDescriptor", "set for the designspace, as fonts at each value can", "axes. .. seealso:: :func:`splitInterpolable` .. versionadded:: 5.0 \"\"\" if self.variableFonts:", "self.documentObject.variableFonts: variableFontsElement = ET.Element(\"variable-fonts\") for variableFont in self.documentObject.variableFonts: self._addVariableFont(variableFontsElement, variableFont)", ".. versionadded:: 5.0 \"\"\" return { axis.name: axis.map_forward(userLocation.get(axis.name, axis.default)) for", "'RuleDescriptor', 'BaseDocReader', 'BaseDocWriter' ] # ElementTree allows to find namespace-prefixed", "exist. MutatorMath. \"\"\" self.font = font \"\"\"Same as :attr:`SourceDescriptor.font` ..", "the SourceDescriptor locations (always in design space). defaultDesignLocation = self.newDefaultLocation()", "fontTools.misc.loggingTools import LogMixin from fontTools.misc.textTools import tobytes, tostr \"\"\" designSpaceDocument", "condition continue conditionElement = ET.Element('condition') conditionElement.attrib['name'] = cond.get('name') if cond.get('minimum')", "coordinates at which to freeze the given axis.\"\"\" class BaseDocWriter(object):", "= ET.Element('condition') conditionElement.attrib['name'] = cond.get('name') if cond.get('minimum') is not None:", "attribute.\") designLocation, userLocation = self.locationFromElement(labelElement) if designLocation: raise DesignSpaceDocumentError(f'<label> element", "= variableFontElement.get(\"filename\") axisSubsetsElement = variableFontElement.find(\".axis-subsets\") if axisSubsetsElement is None: raise", "sourceElement.attrib.get('name') if sourceName is None: # add a temporary source", "if isinstance(v, tuple): v = v[0] if not self.map: return", "version for this document, as a string. E.g. \"4.0\" \"\"\"", "name=\"dollar\" with=\"dollar.alt\"/> </rule> </rules> \"\"\" _attrs = ['name', 'conditionSets', 'subs']", "{} for attr, value in self.__dict__.items(): if attr.startswith(\"_\"): continue if", "is None: raise DesignSpaceDocumentError(\"variable-font element must contain an axis-subsets element.\")", "descriptor.path == None -- action: write as is, descriptors will", "self.userLocation[axisName] def getLocationLabelDescriptor(self, doc: 'DesignSpaceDocument') -> Optional[LocationLabelDescriptor]: \"\"\"Get the :class:`LocationLabelDescriptor`", "Varlib. \"\"\" self.localisedFamilyName = localisedFamilyName or {} \"\"\"dict. A dictionary", "the explicit user location along this axis. No anisotropy. -", "or \"freeze\". \"\"\" self.userValue: float = userValue \"\"\"Value in user", "float = userMaximum \"\"\"New maximum value of the axis in", "axis.map_forward(axis.default) return result class RuleDescriptor(SimpleDescriptor): \"\"\"Represents the rule descriptor element:", "be applied before or after other glyph substitution features. -", "None: if sourceObject.name.find(\"temp_master\") != 0: # do not save temporary", "= DesignSpaceDocument.fromfile(\"some/path/to/my.designspace\") doc.formatVersion doc.elidedFallbackName doc.axes doc.locationLabels doc.rules doc.rulesProcessingLast doc.sources doc.variableFonts", "if len(locElement) > 0: parentElement.append(locElement) def _addInstance(self, instanceObject): instanceElement =", "mapping entry, if you intend that value to be mapped.", "user space is the same as design space, as in", "a1.tag = \"wght\" a1.labelNames['fa-IR'] = \"قطر\" a1.labelNames['en'] = \"Wéíght\" a1.map", "If a condition has no maximum, check for > minimum.", "Raises if the named label can't be found. .. versionadded::", "None and axis.name in userLocation: dimElement = ET.Element('dimension') dimElement.attrib['name'] =", "sourceObject.familyName if sourceObject.styleName is not None: sourceElement.attrib['stylename'] = sourceObject.styleName if", "name == a: swap = True break if swap: newNames.append(b)", "raise DesignSpaceDocumentError(\"unicode values %s are not integers\" % unicodes) for", "self.documentObject.elidedFallbackName = axesElement.attrib['elidedfallbackname'] axisElements = self.root.findall(\".axes/axis\") if not axisElements: return", "1 @property def defaultName(self) -> str: \"\"\"Return the English name", "labelsElement = ET.Element(\"labels\") for labelObject in self.documentObject.locationLabels: self._addLocationLabel(labelsElement, labelObject) self.root.append(labelsElement)", "= lib or {} \"\"\"Custom data associated with this instance.\"\"\"", "descriptor.path == None -- action: write as is. The filename", "following situations: In each descriptor, we have to do the", "design location to a user location. Assume that missing coordinates", "readLocation xValue %3.3f\", xValue) try: yValue = dimensionElement.attrib.get('yvalue') if yValue", "self, *, filename=None, path=None, font=None, name=None, location=None, designLocation=None, layerName=None, familyName=None,", "infoElement in sourceElement.findall(\".info\"): if infoElement.attrib.get('copy') == '1': sourceObject.copyInfo = True", "axisDescriptorClass = AxisDescriptor discreteAxisDescriptorClass = DiscreteAxisDescriptor axisLabelDescriptorClass = AxisLabelDescriptor locationLabelDescriptorClass", "with this variable font.\"\"\" class RangeAxisSubsetDescriptor(SimpleDescriptor): \"\"\"Subset of a continuous", "def _readConditionElements(self, parentElement, ruleName=None): cds = [] for conditionElement in", "a locationElement.\"\"\" locElement = ET.Element(\"location\") if name is not None:", "filename. We're not overwriting some other value for filename, it", "case 3 and 4: filename gets updated and relativized descriptor.filename", "locationElement.findall(\".dimension\"): dimName = dimensionElement.attrib.get(\"name\") if self._strictAxisNames and dimName not in", "groupsElement in sourceElement.findall('.groups'): if groupsElement.attrib.get('copy') == '1': sourceObject.copyGroups = True", "family name strings, keyed by language code. \"\"\" self.localisedStyleName =", "None and axis.name in designLocation: dimElement = ET.Element('dimension') dimElement.attrib['name'] =", "locations (always in design space). defaultDesignLocation = self.newDefaultLocation() for sourceDescriptor", "not be part of the calculations). MutatorMath only. \"\"\" self.muteInfo", "if value > cd['maximum']: return False elif cd.get('maximum') is None:", "# opentype tag for this axis self.tag = tag \"\"\"string.", "locations (using xvalue=\"\").') sourceObject.location = designLocation layerName = sourceElement.attrib.get('layer') if", "find or make a tag name for this axis name", "= ET.Element(\"designspace\") def write(self, pretty=True, encoding=\"UTF-8\", xml_declaration=True): self.root.attrib['format'] = \".\".join(str(i)", "</glyph> \"\"\" glyphData = {} glyphName = glyphElement.attrib.get('name') if glyphName", "continuous axes, only the values in this list can be", "read a glyphname, use the one we have masterGlyphName =", "minimum or maximum values, do not add the rule. ruleElement", "continue if self.path is not None: descriptor.filename = self._posixRelativePath(descriptor.path) def", "swap: newNames.append(b) else: newNames.append(name) glyphNames = newNames newNames = []", "= languageCode languageElement.text = labelName parentElement.append(languageElement) def _addLocationLabel(self, parentElement: ET.Element,", "this instance.\"\"\" @property def location(self): \"\"\"dict. Axis values for this", "list of conditions. - Each condition is a dict with", "ET.Element('info') instanceElement.append(infoElement) self._addLib(instanceElement, instanceObject.lib, 4) self.root.findall('.instances')[0].append(instanceElement) def _addSource(self, sourceObject): sourceElement", "cd.get('minimum') is None: if value > cd['maximum']: return False elif", "of the designspace. .. code:: python r1 = RuleDescriptor() r1.name", "python i2 = InstanceDescriptor() i2.path = instancePath2 i2.familyName = \"InstanceFamilyName\"", "the full font family, which is useful for the STAT", "= localisedStyleName or {} \"\"\"dict. A dictionary of localised stylename", "for code in languageCodes: if code == \"en\": continue localisedStyleMapStyleNameElement", "userLocation = self.locationFromElement(instanceElement) locationLabel = instanceElement.attrib.get('location') if (designLocation or userLocation)", "axisObject.default def readAxisLabel(self, element: ET.Element): xml_attrs = {'userminimum', 'uservalue', 'usermaximum',", "minimum. \"\"\" for cd in conditions: value = location[cd['name']] if", "in languageCodes: if code == \"en\": continue # already stored", "( axis.map_backward(designLocation[axis.name]) if axis.name in designLocation else axis.default ) for", "axis.name in self.designLocation: result[axis.name] = self.designLocation[axis.name] else: result[axis.name] = axis.map_forward(axis.default)", "axis, value in zip(discreteAxes, values) ] )) return variableFonts def", "only want the default location of that axis (same as", "minimum = None if cond.get('maximum') is not None: maximum =", "return new def normalize(self): \"\"\" Normalise the geometry of this", "\"\"\"Set and return SourceDescriptor at the default location or None.", "LocationLabelDescriptor) -> None: labelElement = ET.Element('label') labelElement.attrib['name'] = label.name if", "name=None, location=None, designLocation=None, layerName=None, familyName=None, styleName=None, localisedFamilyName=None, copyLib=False, copyInfo=False, copyGroups=False,", "or any( instance.locationLabel or instance.userLocation for instance in self.documentObject.instances )", "\"\"\"string. Postscript fontname for this instance. MutatorMath + Varlib. \"\"\"", "Varlib. .. deprecated:: 5.0 Use the more explicit alias for", "versionadded:: 5.0 \"\"\" self.axisLabels: List[AxisLabelDescriptor] = axisLabels or [] \"\"\"STAT", "filename = sourceElement.attrib.get('filename') if filename is not None and self.path", "'glyphs', 'kerning', 'info', 'lib'] filename = posixpath_property(\"_filename\") path = posixpath_property(\"_path\")", "attributes = {} self.root.append(ET.Element(\"rules\", attributes)) for ruleObject in self.documentObject.rules: self._addRule(ruleObject)", "glyphnames (\"a\", \"a.alt\") self.subs = subs or [] \"\"\"list of", "in valueCombinations: basename = None if self.filename is not None:", "def _posixRelativePath(self, otherPath): relative = os.path.relpath(otherPath, os.path.dirname(self.path)) return posix(relative) def", "map values are present, it is assumed user space is", ")) return variableFonts def deepcopyExceptFonts(self): \"\"\"Allow deep-copying a DesignSpace document", "calculate the relative path for filename. We're not overwriting some", "Dict = {} \"\"\"User defined, custom data associated with the", "None if self.filename is not None: basename = os.path.splitext(self.filename)[0] +", "axis.name: axis.map_forward(userLocation.get(axis.name, axis.default)) for axis in self.axes } def map_backward(self,", "if isinstance(axis, DiscreteAxisDescriptor): discreteAxes.append(axis) else: rangeAxisSubsets.append(RangeAxisSubsetDescriptor(name=axis.name)) valueCombinations = itertools.product(*[axis.values for", "path): \"\"\"Read a designspace file from ``path`` and populates the", "deprecated:: 5.0 \"\"\" self.copyInfo = copyInfo \"\"\"bool. Indicates if the", "else (location or {}) \"\"\"dict. Axis values for this instance,", "in {\"first\", \"last\"}: raise DesignSpaceDocumentError( \"<rules> processing attribute value is", "- Each condition is a dict with ``name``, ``minimum`` and", "interfere. case 2. descriptor.filename == \"../something\" descriptor.path == None --", "copy. .. versionadded:: 5.0 \"\"\" fonts = [source.font for source", "= ('name', 'userMinimum', 'userDefault', 'userMaximum') def __init__(self, *, name, userMinimum=-math.inf,", "'copyGroups', 'copyFeatures', 'muteKerning', 'muteInfo', 'mutedGlyphNames', 'familyName', 'styleName', 'localisedFamilyName'] filename =", "None: minimum = self.normalizeLocation({cond['name']: cond['minimum']}).get(cond['name']) else: minimum = None if", "`Required Variation Alternates OpenType feature <https://docs.microsoft.com/en-us/typography/opentype/spec/features_pt#-tag-rvrn>`_. See ref:`rules-element` § Attributes.", "and update filename. \"\"\" assert self.path is not None for", "table entries for Axis Value Tables format 1, 2, 3.", "= instanceObject.getStyleMapFamilyName(code) instanceElement.append(localisedStyleMapFamilyNameElement) if self.effectiveFormatTuple >= (5, 0): if instanceObject.locationLabel", "axis data. Use this for axes that do not interpolate.", "None: vfElement.attrib['filename'] = vf.filename if vf.axisSubsets: subsetsElement = ET.Element('axis-subsets') for", "def write(self, pretty=True, encoding=\"UTF-8\", xml_declaration=True): self.root.attrib['format'] = \".\".join(str(i) for i", "want the default location of that axis (same as a", "self.root.attrib.get(\"format\", \"3.0\") self._axes = [] self.rules = [] self.sources =", "\")] glyphData['unicodes'] = unicodes except ValueError: raise DesignSpaceDocumentError(\"unicode values %s", "last part of its path. When the document is produced", "format 1, 2, 3 <https://docs.microsoft.com/en-us/typography/opentype/spec/stat#axis-value-table-format-1>`_ The STAT format of the", "hidden=False, map=None, axisOrdering=None, axisLabels=None, ): super().__init__( tag=tag, name=name, labelNames=labelNames, hidden=hidden,", "True if infoElement.attrib.get('mute') == '1': sourceObject.muteInfo = True for featuresElement", "Or to load masters as FontTools binary fonts, including extra", "InstanceDescriptor @classmethod def getAxisDecriptor(cls): return cls.axisDescriptorClass() @classmethod def getSourceDescriptor(cls): return", "the ``kwargs`` provide a ``value``, or a :class:`AxisDescriptor` otherwise. \"\"\"", "This attribute is updated by the :meth:`findDefault` \"\"\" if readerClass", "new location is created, this is the value this axis", "in styleNameElement.items(): if key == XML_LANG: styleName = styleNameElement.text instanceObject.setStyleName(styleName,", "if the contents of the font.lib need to be copied", "seealso:: :meth:`getFullDesignLocation` .. versionadded:: 5.0 \"\"\" return doc.map_backward(self.getFullDesignLocation(doc)) def tagForAxisName(name):", "code:: python a1 = AxisDescriptor() a1.minimum = 1 a1.maximum =", "None: # if we don't read a glyphname, use the", "None: instanceObject.postScriptFontName = postScriptFontName styleMapFamilyName = instanceElement.attrib.get('stylemapfamilyname') if styleMapFamilyName is", "action: calculate the relative path for filename. We're not overwriting", "= 'Weight')), 'width': ('wdth', dict(en = 'Width')), 'optical': ('opsz', dict(en", "-> None: vfElement = ET.Element('variable-font') vfElement.attrib['name'] = vf.name if vf.filename", "font per value on the discrete axes. Before version 5,", "for data related to the instance .. code:: python i2", "self.intOrFloat(cond.get('minimum')) if cond.get('maximum') is not None: conditionElement.attrib['maximum'] = self.intOrFloat(cond.get('maximum')) conditionsetElement.append(conditionElement)", "2) tree = ET.ElementTree(self.root) tree.write( self.path, encoding=encoding, method='xml', xml_declaration=xml_declaration, pretty_print=pretty,", "self.axisDescriptorClass() axisObject.minimum = float(axisElement.attrib.get(\"minimum\")) axisObject.maximum = float(axisElement.attrib.get(\"maximum\")) axisObject.default = float(axisElement.attrib.get(\"default\"))", "self.axisOrdering = axisOrdering \"\"\"STAT table field ``axisOrdering``. See: `OTSpec STAT", ".. versionadded:: 5.0\"\"\" self.rules: List[RuleDescriptor] = [] \"\"\"List of this", "of fonts. Takes a callable which initializes a new font", "✅ =========== ========= =========== =========== =============== \"\"\" if self.linkedUserValue is", "same default value as the full axis. (default = ``None``)", "= ET.Element('label') labelElement.attrib['name'] = label.name if label.elidable: labelElement.attrib['elidable'] = \"true\"", "the value this axis will get in user space. However,", "provided, the instance should have the same location as the", "try: yValue = dimensionElement.attrib.get('yvalue') if yValue is not None: yValue", "\".join([hex(u) for u in data.get('unicodes')]) if data.get('instanceLocation') is not None:", "def getRuleDescriptor(cls): return cls.ruleDescriptorClass() def __init__(self, documentPath, documentObject: DesignSpaceDocument): self.path", "to a design location. Assume that missing coordinates are at", "data.get('instanceLocation') is not None: locationElement, data['instanceLocation'] = self._makeLocationElement(data.get('instanceLocation')) glyphElement.append(locationElement) if", "in discreteAxes]) for values in valueCombinations: basename = None if", "postScriptFontName = instanceElement.attrib.get('postscriptfontname') if postScriptFontName is not None: instanceObject.postScriptFontName =", "'DesignSpaceDocumentError', 'DesignSpaceDocument', 'SourceDescriptor', 'InstanceDescriptor', 'AxisDescriptor', 'RuleDescriptor', 'BaseDocReader', 'BaseDocWriter' ] #", "feature text needs to be copied to the instances. MutatorMath.", "own data. Respect the data stored by others. \"\"\" self.default:", "{} instanceObject.designLocation = designLocation or {} for glyphElement in instanceElement.findall('.glyphs/glyph'):", "source.font is not None: # font already loaded fonts.append(source.font) continue", "DesignSpaceDocumentError(f'Exactly one of uservalue=\"\" or xvalue=\"\" must be provided for", "element: a set of glyph substitutions to trigger conditionally in", "for this axis name names = { 'weight': ('wght', dict(en", "is not None: sourceObject.styleName = styleName for familyNameElement in sourceElement.findall('familyname'):", "= ET.Element('labelname') languageElement.attrib[XML_LANG] = languageCode languageElement.text = labelName parentElement.append(languageElement) def", "of the axis values, and they are assumed to be", "= styleMapStyleName # read localised names for styleNameElement in instanceElement.findall('stylename'):", "\"weight\" a1.tag = \"wght\" a1.labelNames['fa-IR'] = \"قطر\" a1.labelNames['en'] = \"Wéíght\"", "save temporary source names sourceElement.attrib['name'] = sourceObject.name if sourceObject.familyName is", "= {} self._strictAxisNames = True @classmethod def fromstring(cls, string, documentObject):", "sourceElement.append(libElement) if sourceObject.copyGroups: groupsElement = ET.Element('groups') groupsElement.attrib['copy'] = \"1\" sourceElement.append(groupsElement)", "(e.g. TTFont, or defcon.Font, etc.) from the SourceDescriptor.path, and sets", "userValue, userMinimum=None, userMaximum=None, elidable=False, olderSibling=False, linkedUserValue=None, labelNames=None, ): self.userMinimum: Optional[float]", "**kwargs): \"\"\"Instantiate a new :class:`LocationLabelDescriptor` using the given ``kwargs`` and", "None \"\"\"Format version for this document, as a string. E.g.", "-> Optional[LocationLabelDescriptor]: \"\"\"Get the :class:`LocationLabelDescriptor` instance that matches this instances's", "\"en\" _attrs = ['filename', 'path', 'name', 'locationLabel', 'designLocation', 'userLocation', 'familyName',", "' ') return f\"{self.__class__.__name__}(\\n{attrs}\\n)\" class SourceDescriptor(SimpleDescriptor): \"\"\"Simple container for data", "class BaseDocWriter(object): _whiteSpace = \" \" axisDescriptorClass = AxisDescriptor discreteAxisDescriptorClass", "location along this axis. No anisotropy. - ``axis.default``: default axis", "\"good\" filename, in case one wants to save the file", "self = cls(readerClass=readerClass, writerClass=writerClass) reader = self.readerClass.fromstring(string, self) reader.read() if", "interpolate. - it doesn't provide the reference glyph set for", "given location. .. versionadded:: 5.0 \"\"\" flavor = \"variable-font\" _attrs", "if sourceObject.familyName is not None: sourceElement.attrib['familyname'] = sourceObject.familyName if sourceObject.styleName", "code-block:: xml <glyph name=\"b\" unicode=\"0x62\"/> <glyph name=\"b\"/> <glyph name=\"b\"> <master", "'localisedFamilyName'] filename = posixpath_property(\"_filename\") path = posixpath_property(\"_path\") def __init__( self,", "self.documentObject.axes: raise DesignSpaceDocumentError(\"No axes defined\") userLoc = {} designLoc =", "self.sources.append(sourceDescriptor) def addSourceDescriptor(self, **kwargs): \"\"\"Instantiate a new :class:`SourceDescriptor` using the", "for UI purposes, if this is not a standard axis,", "if label.olderSibling: labelElement.attrib['oldersibling'] = \"true\" self._addLabelNames(labelElement, label.labelNames) self._addLocationElement(labelElement, userLocation=label.userLocation) parentElement.append(labelElement)", "to be referenced from elsewhere in the document. \"\"\" self.locationLabel", "name: str) -> Optional[LocationLabelDescriptor]: \"\"\"Return the top-level location label with", "field ``valueNameID``.\"\"\" self.elidable: bool = elidable \"\"\"STAT flag ``ELIDABLE_AXIS_VALUE_NAME``. See:", "\"\"\"Get the complete design location of this instance, by combining", "§ Attributes. \"\"\" def evaluateRule(rule, location): \"\"\"Return True if any", "self.userLocation: result[axis.name] = axis.map_forward(self.userLocation[axis.name]) else: result[axis.name] = axis.map_forward(axis.default) return result", "float(userValueStr) return self.valueAxisSubsetDescriptorClass(name=name, userValue=userValue) else: xml_attrs = {'name', 'userminimum', 'userdefault',", "= [ 'DesignSpaceDocumentError', 'DesignSpaceDocument', 'SourceDescriptor', 'InstanceDescriptor', 'AxisDescriptor', 'RuleDescriptor', 'BaseDocReader', 'BaseDocWriter'", "localisedStyleName=None, localisedStyleMapFamilyName=None, localisedStyleMapStyleName=None, glyphs=None, kerning=True, info=True, lib=None, ): self.filename =", "property is empty. VarLib. \"\"\" self.filename: str = filename \"\"\"string,", "\"en\": continue # already stored in the element attribute localisedFamilyNameElement", "conditionElement.attrib['name'] = cond.get('name') if cond.get('minimum') is not None: conditionElement.attrib['minimum'] =", "RangeAxisSubsetDescriptor(SimpleDescriptor): \"\"\"Subset of a continuous axis to include in a", "is not None else \"\") class AsDictMixin(object): def asdict(self): d", "axisName: Optional[str] = None): \"\"\"Clear all location-related fields. Ensures that", "include in a variable font. .. versionadded:: 5.0 \"\"\" flavor", "uservalue attribute.\" ) userValue = float(userValueStr) return self.valueAxisSubsetDescriptorClass(name=name, userValue=userValue) else:", "cond['maximum']}).get(cond['name']) else: maximum = None newConditions.append(dict(name=cond['name'], minimum=minimum, maximum=maximum)) newConditionSets.append(newConditions) rule.conditionSets", "\"\"\"dict for special master definitions for glyphs. If glyphs need", "axisName in self.designLocation: del self.designLocation[axisName] if self.userLocation is None: self.userLocation", "sourceObject.styleName is not None: sourceElement.attrib['stylename'] = sourceObject.styleName if sourceObject.layerName is", "MutatorMath + Varlib. \"\"\" self.designLocation = designLocation if designLocation is", "self.default = sourceDescriptor return sourceDescriptor return None def normalizeLocation(self, location):", "source. MutatorMath + Varlib. \"\"\" self.designLocation = designLocation if designLocation", "may not exist. MutatorMath + VarLib. \"\"\" self.path = path", "the location dimension \"{dimName}\"\" with yvalue=\"{yValue}\"') designLoc[dimName] = (xValue, yValue)", "unknown_attrs: raise DesignSpaceDocumentError(f\"Label element contains unknown attributes: {', '.join(unknown_attrs)}\") name", "is None == xValue is None: raise DesignSpaceDocumentError(f'Exactly one of", "True @classmethod def fromstring(cls, string, documentObject): f = BytesIO(tobytes(string, encoding=\"utf-8\"))", "keyed by language code. \"\"\" self.localisedStyleMapFamilyName = localisedStyleMapFamilyName or {}", "minimum = float(minimumStr) if minimumStr is not None else None", "= self._makeLocationElement(sourceObject.location) sourceElement.append(locationElement) self.root.findall('.sources')[0].append(sourceElement) def _addVariableFont(self, parentElement: ET.Element, vf: VariableFontDescriptor)", "\"\"\"string. Name of the axis as it is used in", "+ \"-VF\" if basename is None: basename = \"VF\" axisNames", "self.labelNames = labelNames or {} \"\"\"dict. When defining a non-registered", "is not None else None elidable = True if element.get(\"elidable\")", "= name glyphElement.attrib[\"mute\"] = '1' sourceElement.append(glyphElement) if self.effectiveFormatTuple >= (5,", "userLocation) \"\"\" elementLocation = (None, None) for locationElement in element.findall('.location'):", "element contains unknown attributes: {', '.join(unknown_attrs)}\") name = variableFontElement.get(\"name\") if", "not None: instanceElement.attrib['stylemapfamilyname'] = instanceObject.styleMapFamilyName if instanceObject.styleMapStyleName is not None:", "string. Default encoding ``utf-8``.\"\"\" if encoding is str or (", "List[RuleDescriptor] = [] \"\"\"List of this document's rules.\"\"\" self.rulesProcessingLast: bool", "self.instances: if descriptor.path is not None: # case 3 and", "xml_attrs if unknown_attrs: raise DesignSpaceDocumentError(f\"axis-subset element contains unknown attributes: {',", "> 0: parentElement.append(locElement) def _addInstance(self, instanceObject): instanceElement = ET.Element('instance') if", "loc def labelForUserLocation(self, userLocation: SimpleLocationDict) -> Optional[LocationLabelDescriptor]: \"\"\"Return the :class:`LocationLabel`", "plistlib.fromtree(libElement[0]) class DesignSpaceDocument(LogMixin, AsDictMixin): \"\"\"The DesignSpaceDocument object can read and", "`fontTools#1371 <https://github.com/fonttools/fonttools/issues/1371#issuecomment-590214572>`__ `fontTools#2050 <https://github.com/fonttools/fonttools/issues/2050#issuecomment-678691020>`__ If you want to use a", "= axisObject.default def readAxisLabel(self, element: ET.Element): xml_attrs = {'userminimum', 'uservalue',", "languageElement.attrib[XML_LANG] = languageCode languageElement.text = labelName parentElement.append(languageElement) def _addLocationLabel(self, parentElement:", "or {} \"\"\"User-facing translations of this location's label. Keyed by", "else axis.default ) for axis in self.axes } def findDefault(self):", "(Callable): takes one required positional argument, the source.path, and an", "import DesignSpaceDocument doc = DesignSpaceDocument.fromfile(\"some/path/to/my.designspace\") doc.formatVersion doc.elidedFallbackName doc.axes doc.locationLabels doc.rules", "float = userValue \"\"\"STAT field ``value`` (format 1, 3) or", "write(self, pretty=True, encoding=\"UTF-8\", xml_declaration=True): self.root.attrib['format'] = \".\".join(str(i) for i in", "is not None: masterElement.attrib['glyphname'] = m.get('glyphName') if m.get('font') is not", "not None: sourceObject.styleName = styleName for familyNameElement in sourceElement.findall('familyname'): for", "in instanceElement.findall(\"info\"): self.readInfoElement(infoElement, instanceObject) for libElement in instanceElement.findall('lib'): self.readLibElement(libElement, instanceObject)", "is shared by reference between the original and the copy.", "defined, custom data associated with the whole document. Use reverse-DNS", "in sourceObject.mutedGlyphNames: glyphElement = ET.Element(\"glyph\") glyphElement.attrib[\"name\"] = name glyphElement.attrib[\"mute\"] =", "instance needs its kerning calculated. MutatorMath. .. deprecated:: 5.0 \"\"\"", "in attributes. Data is added to the document by creating", "AxisDescriptor): axisElement.attrib['minimum'] = self.intOrFloat(axisObject.minimum) axisElement.attrib['maximum'] = self.intOrFloat(axisObject.maximum) elif isinstance(axisObject, DiscreteAxisDescriptor):", "SourceDescriptor): \"\"\"Add the given ``sourceDescriptor`` to ``doc.sources``.\"\"\" self.sources.append(sourceDescriptor) def addSourceDescriptor(self,", "preserved below. locationElement, sourceObject.location = self._makeLocationElement(sourceObject.location) sourceElement.append(locationElement) self.root.findall('.sources')[0].append(sourceElement) def _addVariableFont(self,", "a new location is created, this is the value this", "for rule in rules: if evaluateRule(rule, location): for name in", "\"\"\"A dictionary of localised style map familyname strings, keyed by", "locationLabel instanceObject.userLocation = userLocation or {} instanceObject.designLocation = designLocation or", ".. versionadded:: 5.0 \"\"\" self.userLocation: SimpleLocationDict = userLocation or {}", "ruleObject.conditionSets.append(conditionSet) for subElement in ruleElement.findall('.sub'): a = subElement.attrib['name'] b =", "a design location to a user location. Assume that missing", "if labelElement is not None: if \"ordering\" in labelElement.attrib: axisObject.axisOrdering", "axis. (default = ``math.inf``) \"\"\" class ValueAxisSubsetDescriptor(SimpleDescriptor): \"\"\"Single value of", "return variableFonts def deepcopyExceptFonts(self): \"\"\"Allow deep-copying a DesignSpace document without", "key == XML_LANG: axisObject.labelNames[lang] = tostr(labelNameElement.text) labelElement = axisElement.find(\".labels\") if", "instanceObject, glyphName, data) glyphsElement.append(glyphElement) if instanceObject.kerning: kerningElement = ET.Element('kerning') instanceElement.append(kerningElement)", "-> None: labelElement = ET.Element('label') labelElement.attrib['name'] = label.name if label.elidable:", "instance.userLocation = {'Opsz': 16} In order to update a single", "the full axis. (default = ``math.inf``) \"\"\" class ValueAxisSubsetDescriptor(SimpleDescriptor): \"\"\"Single", "import itertools import math import os import posixpath from io", "from fontTools.misc.textTools import tobytes, tostr \"\"\" designSpaceDocument - read and", "= designLocation if designLocation is not None else (location or", "'1' sourceElement.append(glyphElement) if self.effectiveFormatTuple >= (5, 0): self._addLocationElement(sourceElement, designLocation=sourceObject.location) else:", "lib or {} \"\"\"Custom data associated with this instance.\"\"\" @property", "_defaultLanguageCode = \"en\" _attrs = ['filename', 'path', 'name', 'locationLabel', 'designLocation',", "= value[0] return next((k for k, v in self.map if", "are user values. See: `OTSpec STAT Axis value table, format", "by ``xml:lang`` code. \"\"\" def getFormat(self) -> int: \"\"\"Determine which", "not use this attribute. It is up to the user", "is not None: labelsElement.attrib['ordering'] = str(axisObject.axisOrdering) for label in axisObject.axisLabels:", "write the contents of this field to the disk and", "may be only part of the full design location. See", "new :class:`VariableFontDescriptor` using the given ``kwargs`` and add it to", "None for descriptor in self.sources + self.instances: if descriptor.path is", "action: there is a conflict between the given filename, and", "BytesIO(tobytes(string, encoding=\"utf-8\")) self = cls(f, documentObject) self.path = None return", "is not None: locationElement, data['instanceLocation'] = self._makeLocationElement(data.get('instanceLocation')) glyphElement.append(locationElement) if glyphName", "the opener function. Returns: List of font objects in the", "``name``, ``minimum`` and ``maximum`` keys. \"\"\" # list of substitutions", "elidable=False, olderSibling=False, labelNames=None, ): self.name: str = name \"\"\"Label for", "axisSubsets or [] \"\"\"Axis subsets to include in this variable", "source, font in zip(res.sources, fonts): res.font = font return res", "self.map: return v return piecewiseLinearMap(v, {v: k for k, v", "'optical': ('opsz', dict(en = 'Optical Size')), 'slant': ('slnt', dict(en =", "return v return piecewiseLinearMap(v, {v: k for k, v in", "5.0 \"\"\" flavor = \"axis-subset\" _attrs = ('name', 'userValue') def", "return cls.axisDescriptorClass() @classmethod def getSourceDescriptor(cls): return cls.sourceDescriptorClass() @classmethod def getInstanceDescriptor(cls):", "Convert Location dict to a locationElement.\"\"\" locElement = ET.Element(\"location\") if", "a descriptor filename attr from the path and this document", "== \"../something\" descriptor.path == None -- action: write as is.", "as preserved below. if instanceObject.location is not None: locationElement, instanceObject.location", "complete design location of this source, from its :attr:`designLocation` and", "familyname = instanceElement.attrib.get('familyname') if familyname is not None: instanceObject.familyName =", "the document. This makes it easy to integrate this object", "\"utf-8\": f = BytesIO() encoding = \"UTF-8\" xml_declaration = True", "3. descriptor.filename == None descriptor.path == \"~/absolute/path/there\" -- action: calculate", "are at the default location for that axis. When the", "axisElement = ET.Element('axis') axisElement.attrib['tag'] = axisObject.tag axisElement.attrib['name'] = axisObject.name self._addLabelNames(axisElement,", "return None def normalizeLocation(self, location): \"\"\"Return a dict with normalized", "``locationLabelDescriptor`` to :attr:`locationLabels`. .. versionadded:: 5.0 \"\"\" self.locationLabels.append(locationLabelDescriptor) def addLocationLabelDescriptor(self,", "find namespace-prefixed elements, but not attributes # so we have", "'1' sourceElement.append(kerningElement) if sourceObject.mutedGlyphNames: for name in sourceObject.mutedGlyphNames: glyphElement =", "after. Default is False. For new projects, you probably want", "self.sources: self.findDefault() def write(self, path): \"\"\"Write this designspace to ``path``.\"\"\"", "self.variableFontsDescriptorClass( name=name, filename=filename, axisSubsets=axisSubsets, lib=lib, ) self.documentObject.variableFonts.append(variableFont) def readAxisSubset(self, element:", "name=name, userValue=value, userMinimum=minimum, userMaximum=maximum, elidable=elidable, olderSibling=olderSibling, linkedUserValue=linkedValue, labelNames=labelNames, ) def", "\"\"\"STAT table entries for Axis Value Tables format 1, 2,", "has a ``minimum`` and ``maximum``, while a discrete axis has", "True def readGlyphElement(self, glyphElement, instanceObject): \"\"\" Read the glyph element,", "axisObject.labelNames) if axisObject.map: for inputValue, outputValue in axisObject.map: mapElement =", "= RuleDescriptor sourceDescriptorClass = SourceDescriptor variableFontDescriptorClass = VariableFontDescriptor valueAxisSubsetDescriptorClass =", "non-registered axis, it will be necessary to define user-facing readable", "must be provided for location dimension \"{dimName}\"') if yValue is", "languageCode=\"en\"): return self.localisedFamilyName.get(languageCode) def setStyleMapStyleName(self, styleMapStyleName, languageCode=\"en\"): self.localisedStyleMapStyleName[languageCode] = tostr(styleMapStyleName)", "version specified in the document, or a sufficiently recent version", "may or may not exist. MutatorMath. \"\"\" self.font = font", "validating and filling in the location # dict while writing", "new axisDescriptor.\"\"\" return self.writerClass.getAxisDecriptor() def newSourceDescriptor(self): \"\"\"Ask the writer class", "allows to bind together the full font family, which is", "readAxes(self): # read the axes elements, including the warp map.", "MutatorMath + Varlib. \"\"\" def serialize(self): # output to a", "yValue is not None: yValue = float(yValue) except ValueError: self.log.warning(\"ValueError", "argument, the source.path, and an optional list of keyword arguments,", "= v[0] if not self.map: return v return piecewiseLinearMap(v, {v:", "axes. In the case of Designspace documents before version 5,", "family, which is useful for the STAT table, however it", "that matches this instances's :attr:`locationLabel`. Raises if the named label", "Optional[AnisotropicLocationDict]): self.designLocation = location or {} def setFamilyName(self, familyName, languageCode=\"en\"):", "list(instanceObject.localisedStyleName.keys()) languageCodes.sort() for code in languageCodes: if code == \"en\":", "be used to reference this rule data.\"\"\" # list of", "coordinates along each axis. If an axis is not mentioned,", "userValue) try: xValue = dimensionElement.attrib.get('xvalue') if xValue is not None:", "None: labelElement.attrib['usermaximum'] = self.intOrFloat(label.userMaximum) labelElement.attrib['name'] = label.name if label.elidable: labelElement.attrib['elidable']", "name=\"b\"/> <glyph name=\"b\"> <master location=\"location-token-bbb\" source=\"master-token-aaa2\"/> <master glyphname=\"b.alt1\" location=\"location-token-ccc\" source=\"master-token-aaa3\"/>", "the results of executed rules for example). MutatorMath. .. deprecated::", "_addSource(self, sourceObject): sourceElement = ET.Element(\"source\") if sourceObject.filename is not None:", "= ET.Element('variable-font') vfElement.attrib['name'] = vf.name if vf.filename is not None:", "element must have a uservalue attribute.\") value = float(valueStr) minimumStr", "for the given instance.\"\"\" instanceObject.lib = plistlib.fromtree(libElement[0]) def readInfoElement(self, infoElement,", "has no minimum, check for < maximum. - If a", "\": %r\" % self.obj if self.obj is not None else", "for axisDescriptor in self.axes: loc[axisDescriptor.name] = axisDescriptor.map_forward( axisDescriptor.default ) return", "data as the other for attr in self._attrs: try: assert(getattr(self,", "attribute is updated by the :meth:`findDefault` \"\"\" if readerClass is", "Varlib. .. seealso:: This may be only part of the", "str = name \"\"\"Name of the :class:`AxisDescriptor` to subset.\"\"\" self.userMinimum:", "exists in memory, the producing script can write here an", "'%s'\" % ruleName if ruleName is not None else \"\"))", "sourceName familyName = sourceElement.attrib.get(\"familyname\") if familyName is not None: sourceObject.familyName", "to be the default. See :meth:`SourceDescriptor.getFullDesignLocation()` \"\"\" self.default = None", "by creating such descriptor objects, filling them with data and", "``utf-8``.\"\"\" if encoding is str or ( encoding is not", "rulesElement = self.root.find(\".rules\") if rulesElement is not None: processingValue =", "sparse sources instead. \"\"\" self.kerning = kerning \"\"\" bool. Indicates", "needs to be muted (i.e. not be part of the", "map=None, axisOrdering=None, axisLabels=None, ): super().__init__( tag=tag, name=name, labelNames=labelNames, hidden=hidden, map=map,", "the disk and make ```filename`` point to that. \"\"\" self.name", "different contexts. The **DesignSpaceDocument** object can be subclassed to work", "): super().__init__( tag=tag, name=name, labelNames=labelNames, hidden=hidden, map=map, axisOrdering=axisOrdering, axisLabels=axisLabels, )", "using the given ``kwargs`` and add it to :attr:`locationLabels`. ..", "VarLib. \"\"\" self.path = path \"\"\"The absolute path, calculated from", "the document name = instanceElement.attrib.get(\"name\") if name is not None:", "-11)] a2.axisOrdering = 2 a2.axisLabels = [ AxisLabelDescriptor(name=\"Roman\", userValue=0, elidable=True)", "result def getFullUserLocation(self, doc: 'DesignSpaceDocument') -> SimpleLocationDict: \"\"\"Get the complete", "in ruleElement.findall('.sub'): a = subElement.attrib['name'] b = subElement.attrib['with'] ruleObject.subs.append((a, b))", "glyphsElement = instanceElement.findall('.glyphs')[0] for glyphName, data in sorted(instanceObject.glyphs.items()): glyphElement =", "RuleDescriptor sourceDescriptorClass = SourceDescriptor variableFontsDescriptorClass = VariableFontDescriptor valueAxisSubsetDescriptorClass = ValueAxisSubsetDescriptor", "axisOrdering=None, axisLabels=None, ): super().__init__( tag=tag, name=name, labelNames=labelNames, hidden=hidden, map=map, axisOrdering=axisOrdering,", "axisElement.findall('map'): a = float(mapElement.attrib['input']) b = float(mapElement.attrib['output']) axisObject.map.append((a, b)) for", "in instanceElement.findall('stylename'): for key, lang in styleNameElement.items(): if key ==", "Unique identifier name of the instance, used to identify it", "from textwrap import indent from typing import Any, Dict, List,", "return result def getFullUserLocation(self, doc: 'DesignSpaceDocument') -> SimpleLocationDict: \"\"\"Get the", "self, *, filename=None, path=None, font=None, name=None, location=None, locationLabel=None, designLocation=None, userLocation=None,", "Flags <https://docs.microsoft.com/en-us/typography/opentype/spec/stat#flags>`_ \"\"\" self.linkedUserValue: Optional[float] = linkedUserValue \"\"\"STAT field ``linkedValue``", "\"\"\"string. Family name of this source. Though this data can", "self.effectiveFormatTuple >= (5, 0): self._addLocationElement(sourceElement, designLocation=sourceObject.location) else: # Pre-version 5.0", "matches the given location.\"\"\" return any(evaluateConditions(c, location) for c in", "must have min/max/default values or none at all.\" ) def", "**DesignSpaceDocument** object can be subclassed to work with different objects,", "name=name, userLocation=userLocation, elidable=elidable, olderSibling=olderSibling, labelNames=labelNames, ) self.documentObject.locationLabels.append(locationLabel) def readVariableFonts(self): if", "design space coordinates. If no map values are present, it", "required. Name of this variable to identify it during the", "return label def getFullDesignLocation(self, doc: 'DesignSpaceDocument') -> AnisotropicLocationDict: \"\"\"Get the", "these: .. code-block:: xml <glyph name=\"b\" unicode=\"0x62\"/> <glyph name=\"b\"/> <glyph", "associated with the whole document. Use reverse-DNS notation to identify", "doesn't define the \"neutral\" version of outlines from which deltas", "self.localisedStyleName[languageCode] = tostr(styleName) def getStyleName(self, languageCode=\"en\"): return self.localisedStyleName.get(languageCode) def setFamilyName(self,", "none of the conditions have minimum or maximum values, do", "return cls.ruleDescriptorClass() def __init__(self, documentPath, documentObject: DesignSpaceDocument): self.path = documentPath", "= True if element.get(\"elidable\") == \"true\" else False olderSibling =", "as the full axis. (default = ``-math.inf``) \"\"\" self.userDefault: Optional[float]", "del self.designLocation[axisName] if self.userLocation is None: self.userLocation = {} if", "subElement.attrib['name'] b = subElement.attrib['with'] ruleObject.subs.append((a, b)) rules.append(ruleObject) self.documentObject.rules = rules", "maximum=1000) self.conditionSets = conditionSets or [] \"\"\"a list of conditionsets.", "def __init__(self, readerClass=None, writerClass=None): self.path = None \"\"\"String, optional. When", "styleMapFamilyName \"\"\"string. StyleMap familyname for this instance. MutatorMath + Varlib.", "indent('\\n'.join(attrs), ' ') return f\"{self.__class__.__name__}(\\n{attrs}\\n)\" class SourceDescriptor(SimpleDescriptor): \"\"\"Simple container for", "self.designLocation: result[axis.name] = self.designLocation[axis.name] elif axis.name in self.userLocation: result[axis.name] =", "= designLocation if designLocation is not None else location or", "self.intOrFloat(axisObject.maximum) elif isinstance(axisObject, DiscreteAxisDescriptor): axisElement.attrib['values'] = \" \".join(self.intOrFloat(v) for v", "self._readConditionElements( ruleElement, ruleName, ) if externalConditions: ruleObject.conditionSets.append(externalConditions) self.log.info( \"Found stray", "not None: self.readerClass = readerClass else: self.readerClass = BaseDocReader if", "labelNameElement in axisElement.findall('labelname'): # Note: elementtree reads the \"xml:lang\" attribute", "defaults. .. versionadded:: 5.0 \"\"\" result: AnisotropicLocationDict = {} for", "given ``element``. .. versionchanged:: 5.0 Return a tuple of (designLocation,", "to do it ourselves for 'xml:lang' XML_NS = \"{http://www.w3.org/XML/1998/namespace}\" XML_LANG", "variableFont) self.root.append(variableFontsElement) if self.documentObject.instances: self.root.append(ET.Element(\"instances\")) for instanceObject in self.documentObject.instances: self._addInstance(instanceObject)", "the element attribute localisedFamilyNameElement = ET.Element('familyname') localisedFamilyNameElement.attrib[XML_LANG] = code localisedFamilyNameElement.text", "you probably want True. See the following issues for more", "difference from a continuous axis is that a continuous axis", "if this instance needs its kerning calculated. MutatorMath. .. deprecated::", "instanceObject.styleMapFamilyName = styleMapFamilyName styleMapStyleName = instanceElement.attrib.get('stylemapstylename') if styleMapStyleName is not", "list of ``values``. Example: an Italic axis with 2 stops,", "in continuous axes: - it doesn't define the \"neutral\" version", "= muteKerning \"\"\"bool. Indicates if the kerning data from this", "not None: value = posix(value) setattr(self, private_name, value) return property(getter,", "these rules at this location to these glyphnames. Return a", "values are user values. See: `OTSpec STAT Axis value table,", "substitutions applied. - rule order matters \"\"\" newNames = []", "{} \"\"\"dict. A dictionary of localised family name strings, keyed", "registered at the `OpenType specification <https://www.microsoft.com/typography/otspec/fvar.htm#VAT>`__. Privately-defined axis tags must", "last. \"\"\" # masters for item in self.sources: item.location =", "List[SourceDescriptor] = [] \"\"\"List of this document's sources.\"\"\" self.variableFonts: List[VariableFontDescriptor]", "documents before version 5, the whole document was implicitly describing", "path. So we know where the file is relative to", "is not None: cd['minimum'] = float(cdMin) else: # will allow", "== \"true\" else False olderSibling = True if element.get(\"oldersibling\") ==", "matches the given location. - If a condition has no", "\"\"\" flavor = \"instance\" _defaultLanguageCode = \"en\" _attrs = ['filename',", "of glyphnames, e.g. (\"a\", \"a.alt\"). - Note: By default, rules", "= ET.Element('info') instanceElement.append(infoElement) self._addLib(instanceElement, instanceObject.lib, 4) self.root.findall('.instances')[0].append(instanceElement) def _addSource(self, sourceObject):", "labelElement.findall(\"labelname\") for attr, lang in label_name.items() if attr == XML_LANG", "self.map}) class DiscreteAxisDescriptor(AbstractAxisDescriptor): \"\"\"Container for discrete axis data. Use this", "sourceObject.localisedFamilyName: languageCodes = list(sourceObject.localisedFamilyName.keys()) languageCodes.sort() for code in languageCodes: if", "as the other for attr in self._attrs: try: assert(getattr(self, attr)", "conditionSet = self._readConditionElements( conditionSetElement, ruleName, ) if conditionSet is not", "\"\"\"Location in user coordinates along each axis. If an axis", "the given ``name``, or ``None`` if no such axis exists.\"\"\"", "VariableFontDescriptor valueAxisSubsetDescriptorClass = ValueAxisSubsetDescriptor rangeAxisSubsetDescriptorClass = RangeAxisSubsetDescriptor instanceDescriptorClass = InstanceDescriptor", "def clearLocation(self, axisName: Optional[str] = None): \"\"\"Clear all location-related fields.", "\"\"\" self.axisOrdering = axisOrdering \"\"\"STAT table field ``axisOrdering``. See: `OTSpec", "= [] for axisDescriptor in self.axes: names.append(axisDescriptor.name) return names def", "is None: raise DesignSpaceDocumentError( \"condition missing required minimum or maximum", "none at all.\" ) def readSources(self): for sourceCount, sourceElement in", "efficient to have it right here. Varlib. \"\"\" self.localisedFamilyName =", "\"axis\" _attrs = ('tag', 'name', 'values', 'default', 'map', 'axisOrdering', 'axisLabels')", "the font.lib need to be copied to the instances. MutatorMath.", "the element attribute localisedStyleNameElement = ET.Element('stylename') localisedStyleNameElement.attrib[XML_LANG] = code localisedStyleNameElement.text", "axisElement.attrib[\"values\"].split(\" \")] else: axisObject = self.axisDescriptorClass() axisObject.minimum = float(axisElement.attrib.get(\"minimum\")) axisObject.maximum", "include in this variable font. If an axis is not", "'name', 'locationLabel', 'designLocation', 'userLocation', 'familyName', 'styleName', 'postScriptFontName', 'styleMapFamilyName', 'styleMapStyleName', 'localisedFamilyName',", "self._addSource(sourceObject) if self.documentObject.variableFonts: variableFontsElement = ET.Element(\"variable-fonts\") for variableFont in self.documentObject.variableFonts:", "Note: the output won't be anisotropic, only the xvalue is", "in the filename attr. The file may or may not", "location for that axis. .. versionadded:: 5.0 \"\"\" self.locationLabel =", "styleName = sourceElement.attrib.get(\"stylename\") if styleName is not None: sourceObject.styleName =", "strings, keyed by language code. \"\"\" self.localisedStyleMapFamilyName = localisedStyleMapFamilyName or", "= self.intOrFloat(value) locElement.append(dimElement) elif userLocation is not None and axis.name", "hidden \"\"\"bool. Whether this axis should be hidden in user", ":attr:`designLocation` and the document's axis defaults. .. versionadded:: 5.0 \"\"\"", "this dimension it seems continue value = location[axis.name] # 'anisotropic'", "location # dict while writing it out, as preserved below.", "allow these to be None, assume axis.maximum cd['maximum'] = None", "= dimensionElement.attrib.get('xvalue') if xValue is not None: xValue = float(xValue)", "def _writeGlyphElement(self, instanceElement, instanceObject, glyphName, data): glyphElement = ET.Element('glyph') if", "and the document's axis defaults. .. versionadded:: 5.0 \"\"\" result:", "newNames.append(b) else: newNames.append(name) glyphNames = newNames newNames = [] return", "in case one wants to save the file somewhere. \"\"\"", "from io import BytesIO, StringIO from textwrap import indent from", "self.default: float = default \"\"\"The default value for this axis,", ":class:. \"\"\" self = cls(readerClass=readerClass, writerClass=writerClass) self.read(path) return self @classmethod", "name attribute\") mute = glyphElement.attrib.get(\"mute\") if mute == \"1\": glyphData['mute']", "DesignSpace per such variable font, and a lot of data", "userValue \"\"\"STAT field ``value`` (format 1, 3) or ``nominalValue`` (format", "with=\"cent.alt\"/> <sub name=\"dollar\" with=\"dollar.alt\"/> </rule> </rules> \"\"\" _attrs = ['name',", "= \"1\" sourceElement.append(libElement) if sourceObject.copyGroups: groupsElement = ET.Element('groups') groupsElement.attrib['copy'] =", "the conditionsets for conditionSetElement in ruleElement.findall('.conditionset'): conditionSet = self._readConditionElements( conditionSetElement,", "# Convert the default location from user space to design", "part of its path. When the document is produced by", "\"Designspace source '%s' has no 'path' attribute\" % (source.name or", "for a, b in rule.subs: if name == a: swap", "familyName=None, styleName=None, localisedFamilyName=None, copyLib=False, copyInfo=False, copyGroups=False, copyFeatures=False, muteKerning=False, muteInfo=False, mutedGlyphNames=None,", "opener (Callable): takes one required positional argument, the source.path, and", "= xValue else: userLoc[dimName] = userValue return designLoc, userLoc def", "None: cd['minimum'] = float(cdMin) else: # will allow these to", "self.intOrFloat(cond.get('maximum')) conditionsetElement.append(conditionElement) if len(conditionsetElement): ruleElement.append(conditionsetElement) for sub in ruleObject.subs: subElement", "BaseDocReader if writerClass is not None: self.writerClass = writerClass else:", "[] self.axisDefaults = {} self._strictAxisNames = True @classmethod def fromstring(cls,", "): f = StringIO() xml_declaration = False elif encoding is", "else: # will allow these to be None, assume axis.minimum", "referenced from elsewhere in the document. \"\"\" self.locationLabel = locationLabel", "the glyph element, which could look like either one of", "= familyName \"\"\"string. Family name of this source. Though this", "not mentioned, assume that we only want the default location", "and shared among SourceDescriptors. For example, to load UFO sources", "List[Union[AxisDescriptor, DiscreteAxisDescriptor]] = [] \"\"\"List of this document's axes.\"\"\" self.locationLabels:", "anisotropy. - ``designLocation[axisName]``: the explicit design location along this axis,", "v return piecewiseLinearMap(v, {v: k for k, v in self.map})", "rules should be applied before or after other glyph substitution", "labelsElement = ET.Element('labels') if axisObject.axisOrdering is not None: labelsElement.attrib['ordering'] =", "have data. .. code:: python instance.clearLocation() instance.designLocation = {'Weight': (34,", "True for glyphElement in sourceElement.findall(\".glyph\"): glyphName = glyphElement.attrib.get('name') if glyphName", "are usually camelCased, the corresponding `XML <document-xml-structure>`_ attributes are usually", "[ v.asdict() if hasattr(v, \"asdict\") else v for v in", "processing attribute value is not valid: %r, \" \"expected 'first'", "yValue = None try: userValue = dimensionElement.attrib.get('uservalue') if userValue is", "\"snapshot\" or \"freeze\". \"\"\" self.userValue: float = userValue \"\"\"Value in", "# read localised names for styleNameElement in instanceElement.findall('stylename'): for key,", "no mapping entry is found. Note: for discrete axes, each", "% encoding) writer = self.writerClass(f, self) writer.write(encoding=encoding, xml_declaration=xml_declaration) return f.getvalue()", "names def getAxis(self, name): \"\"\"Return the axis with the given", "self.sources = [] self.instances = [] self.axisDefaults = {} self._strictAxisNames", "instanceElement.attrib.get('stylemapstylename') if styleMapStyleName is not None: instanceObject.styleMapStyleName = styleMapStyleName #", "= element.get(\"uservalue\") if userValueStr is None: raise DesignSpaceDocumentError( \"The axis-subset", "data associated with this instance.\"\"\" @property def location(self): \"\"\"dict. Axis", "if key == XML_LANG: styleName = styleNameElement.text instanceObject.setStyleName(styleName, lang) for", "instances to very basic **descriptor** objects that store the data", "sourceElement.append(glyphElement) if self.effectiveFormatTuple >= (5, 0): self._addLocationElement(sourceElement, designLocation=sourceObject.location) else: #", "return new_path def posixpath_property(private_name): \"\"\"Generate a propery that holds a", "define the \"neutral\" version of outlines from which deltas would", "self.normalizeLocation(glyphData['instanceLocation']) for glyphMaster in glyphData['masters']: glyphMaster['location'] = self.normalizeLocation(glyphMaster['location']) item.location =", "this axis is the same as the matching STAT format", "if sourceObject.copyGroups: groupsElement = ET.Element('groups') groupsElement.attrib['copy'] = \"1\" sourceElement.append(groupsElement) if", "key, lang in labelNameElement.items(): if key == XML_LANG: axisObject.labelNames[lang] =", "first coord only if isinstance(value, tuple): value = value[0] triple", "parts of the designspace. .. code:: python r1 = RuleDescriptor()", "raise DesignSpaceDocumentError('instance element must have at most one of the", "in element.findall('.location'): elementLocation = self.readLocationElement(locationElement) break return elementLocation def readLocationElement(self,", "a new :class:`RuleDescriptor` using the given ``kwargs`` and add it", "to design space coordinates. If no map values are present,", "5.0 \"\"\" if self.variableFonts: return self.variableFonts variableFonts = [] discreteAxes", "slashes if value is not None: value = posix(value) setattr(self,", "can't become a variation axis in a VF. .. code::", "that :attr:``designLocation`` and :attr:``userLocation`` are dictionaries (possibly empty if clearing", "axis mapping's input to output. Returns value unchanged if no", "of this source font that is loaded in memory, as", "\"\"\" Normalise the geometry of this designspace: - scale all", "able to encode what the document contains. \"\"\" minVersion =", "name in sourceObject.mutedGlyphNames: glyphElement = ET.Element(\"glyph\") glyphElement.attrib[\"name\"] = name glyphElement.attrib[\"mute\"]", "must begin with an uppercase letter and use only uppercase", "not None: glyphElement.attrib['name'] = glyphName if data.get('note') is not None:", "designspace as a string. Default encoding ``utf-8``.\"\"\" if encoding is", "= ET.Element('map') mapElement.attrib['input'] = self.intOrFloat(inputValue) mapElement.attrib['output'] = self.intOrFloat(outputValue) axisElement.append(mapElement) if", "None and encoding.lower() == \"unicode\" ): f = StringIO() xml_declaration", "stylename strings, keyed by language code. \"\"\" self.localisedStyleMapFamilyName = localisedStyleMapFamilyName", "self._attrs: try: assert(getattr(self, attr) == getattr(other, attr)) except AssertionError: print(\"failed", "instanceElement.attrib.get('postscriptfontname') if postScriptFontName is not None: instanceObject.postScriptFontName = postScriptFontName styleMapFamilyName", "from the path and this document path. If the filename", "None: descriptor.filename = self._posixRelativePath(descriptor.path) if instances: for descriptor in self.instances:", "dict(name='aaaa', minimum=0, maximum=1000) self.conditionSets = conditionSets or [] \"\"\"a list", "the layer in the source to look for outline data.", "is not None: sourceElement.attrib['familyname'] = sourceObject.familyName if sourceObject.styleName is not", "axis in self.axes: # scale the map first newMap =", "variableFontsElement = ET.Element(\"variable-fonts\") for variableFont in self.documentObject.variableFonts: self._addVariableFont(variableFontsElement, variableFont) self.root.append(variableFontsElement)", "\"a.alt\")) .. code:: xml <!-- optional: list of substitution rules", "name attribute.\") filename = variableFontElement.get(\"filename\") axisSubsetsElement = variableFontElement.find(\".axis-subsets\") if axisSubsetsElement", "<https://www.microsoft.com/typography/otspec/fvar.htm#VAT>`__. Privately-defined axis tags must begin with an uppercase letter", "at the `OpenType specification <https://www.microsoft.com/typography/otspec/fvar.htm#VAT>`__. Privately-defined axis tags must begin", "outputValue}).get(axis.name) newMap.append((inputValue, newOutputValue)) if newMap: axis.map = newMap # finally", "along this axis. No anisotropy. - ``axis.default``: default axis value.", "fonts): res.font = font return res finally: for source, font", "and store it in this field, or write the contents", "= userMinimum \"\"\"New minimum value of the axis in the", "copyGroups \"\"\"bool. Indicates if the groups need to be copied", "substitutions to trigger conditionally in some parts of the designspace.", "= axisOrdering \"\"\"STAT table field ``axisOrdering``. See: `OTSpec STAT Axis", "minimum=0, maximum=1000) self.conditionSets = conditionSets or [] \"\"\"a list of", "= code localisedFamilyNameElement.text = sourceObject.getFamilyName(code) sourceElement.append(localisedFamilyNameElement) if sourceObject.copyLib: libElement =", "new :class:`RuleDescriptor` using the given ``kwargs`` and add it to", "of version 5.0 if instanceObject.glyphs: if instanceElement.findall('.glyphs') == []: glyphsElement", "given ``name``, or ``None`` if no such label exists. ..", "element inside the given ``element``. .. versionchanged:: 5.0 Return a", "case 4. descriptor.filename == '../somewhere' descriptor.path == \"~/absolute/path/there\" -- action:", "mappings, and top-level location labels. The source of truth for", "(format 4). All values are user values. See: `OTSpec STAT", "in axisElement.findall('labelname'): # Note: elementtree reads the \"xml:lang\" attribute name", "[] \"\"\"Axis subsets to include in this variable font. If", "# glyph masters for this instance for _, glyphData in", "use to encode this label. =========== ========= =========== =========== ===============", "labelNames=labelNames, hidden=hidden, map=map, axisOrdering=axisOrdering, axisLabels=axisLabels, ) self.minimum = minimum \"\"\"number.", "self._makeLocationElement(sourceObject.location) sourceElement.append(locationElement) self.root.findall('.sources')[0].append(sourceElement) def _addVariableFont(self, parentElement: ET.Element, vf: VariableFontDescriptor) ->", "glyphSources.append(d) if glyphSources is not None: glyphData['masters'] = glyphSources instanceObject.glyphs[glyphName]", "data. \"\"\" if hasattr(path, \"__fspath__\"): # support os.PathLike objects path", "of all axes. This function updates the document's :attr:`default` value.", "instanceObject.info = True def readGlyphElement(self, glyphElement, instanceObject): \"\"\" Read the", "UFO sources using defcon: designspace = DesignSpaceDocument.fromfile(\"path/to/my.designspace\") designspace.loadSourceFonts(defcon.Font) Or to", "\"~/absolute/path/there\" -- action: calculate the relative path for filename. We're", "\"\"\"Maps value from axis mapping's output (design) to input (user).\"\"\"", "an uppercase letter and use only uppercase letters or digits.", "= self.documentObject.formatTuple if ( any( isinstance(axis, DiscreteAxisDescriptor) or axis.axisOrdering is", "olderSibling=False, labelNames=None, ): self.name: str = name \"\"\"Label for this", "data related to the source .. code:: python doc =", "= True if infoElement.attrib.get('mute') == '1': sourceObject.muteInfo = True for", "None elidable = True if element.get(\"elidable\") == \"true\" else False", "element for the given instance.\"\"\" instanceObject.lib = plistlib.fromtree(libElement[0]) def readInfoElement(self,", ":meth:`SourceDescriptor.getFullDesignLocation()` \"\"\" self.default = None # Convert the default location", "is not None: self.writerClass = writerClass else: self.writerClass = BaseDocWriter", "Optional, Tuple, Union from fontTools.misc import etree as ET from", "for discrete axes, each value must have its mapping entry,", "def readRules(self): # we also need to read any conditions", "If provided, the instance should have the same location as", "won't be anisotropic, only the xvalue is set. .. versionadded::", "location's label. Keyed by xml:lang code. \"\"\" @property def defaultName(self)", "xValue is None: raise DesignSpaceDocumentError(f'Missing xvalue=\"\" for the location dimension", "cond in conditions: if cond.get('minimum') is not None: minimum =", "return xml_attrs = {'name', 'filename'} for variableFontElement in self.root.findall(\".variable-fonts/variable-font\"): unknown_attrs", "os import posixpath from io import BytesIO, StringIO from textwrap", "self.default: Optional[str] = None \"\"\"Name of the default master. This", "not None and encoding.lower() == \"unicode\" ): f = StringIO()", "not None: glyphElement.attrib['unicode'] = \" \".join([hex(u) for u in data.get('unicodes')])", "this rule. Can be used to reference this rule data.\"\"\"", "rules at this location to these glyphnames. Return a new", "= axis.map_forward(self.userLocation[axis.name]) else: result[axis.name] = axis.map_forward(axis.default) return result def getFullUserLocation(self,", "by others. \"\"\" self.default: Optional[str] = None \"\"\"Name of the", "attr, getattr(self, attr), \"!=\", getattr(other, attr)) def __repr__(self): attrs =", "used in locations self.name = name \"\"\"string. Name of the", "processRules(rules, location, glyphNames): \"\"\"Apply these rules at this location to", "axis label data. Analogue of OpenType's STAT data for a", "dict with ``name``, ``minimum`` and ``maximum`` keys. \"\"\" # list", "translations of this location's label. Keyed by ``xml:lang`` code. \"\"\"", "all variable fonts defined in this document, or implicit variable", "See: :meth:`getFullUserLocation` \"\"\" self.elidable: bool = elidable \"\"\"STAT flag ``ELIDABLE_AXIS_VALUE_NAME``.", "self.obj = obj def __str__(self): return str(self.msg) + ( \":", "= userLocation or {} instanceObject.designLocation = designLocation or {} for", "elif isinstance(axisObject, DiscreteAxisDescriptor): axisElement.attrib['values'] = \" \".join(self.intOrFloat(v) for v in", "values for this instance, in user space coordinates. MutatorMath +", "axis.name not in location: # skipping this dimension it seems", "muted. MutatorMath only. \"\"\" self.mutedGlyphNames = mutedGlyphNames or [] \"\"\"list.", "used as a basename for the file. \"\"\" self.axisSubsets: List[Union[RangeAxisSubsetDescriptor,", "sourceDescriptor.getFullDesignLocation(self) == defaultDesignLocation: self.default = sourceDescriptor return sourceDescriptor return None", "allows to find namespace-prefixed elements, but not attributes # so", "path \"\"\"string. Absolute path to the instance file, calculated from", "= \"true\" if label.olderSibling: labelElement.attrib['oldersibling'] = \"true\" self._addLabelNames(labelElement, label.labelNames) self._addLocationElement(labelElement,", "to either load the resource identified by ``filename`` and store", "load the resource identified by ``filename`` and store it in", "if any of the rule's conditionsets matches the given location.\"\"\"", "\"\"\"STAT field ``linkedValue`` (format 3).\"\"\" self.labelNames: MutableMapping[str, str] = labelNames", "None: lib = plistlib.fromtree(libElement[0]) variableFont = self.variableFontsDescriptorClass( name=name, filename=filename, axisSubsets=axisSubsets,", "dict with the default location in design space coordinates.\"\"\" #", "Windows.\"\"\" new_path = posixpath.join(*path.split(os.path.sep)) if path.startswith('/'): # The above transformation", "(5, 0): minVersion = (5, 0) return minVersion def _makeLocationElement(self,", "Italic, that are not compatible. The axis still allows to", "ET.Element('labelname') languageElement.attrib[XML_LANG] = languageCode languageElement.text = labelName parentElement.append(languageElement) def _addLocationLabel(self,", "userLocation=None, familyName=None, styleName=None, postScriptFontName=None, styleMapFamilyName=None, styleMapStyleName=None, localisedFamilyName=None, localisedStyleName=None, localisedStyleMapFamilyName=None, localisedStyleMapStyleName=None,", "a1.name = \"weight\" a1.tag = \"wght\" a1.labelNames['fa-IR'] = \"قطر\" a1.labelNames['en']", "value. - we need the axis data to do the", "if instanceObject.postScriptFontName is not None: instanceElement.attrib['postscriptfontname'] = instanceObject.postScriptFontName if instanceObject.styleMapFamilyName", "is not None or axis.axisLabels for axis in self.documentObject.axes )", "MutatorMath + Varlib. \"\"\" self.styleMapStyleName = styleMapStyleName \"\"\"string. StyleMap stylename", "user space to design space coordinates. If no map values", "elementLocation = self.readLocationElement(locationElement) break return elementLocation def readLocationElement(self, locationElement): \"\"\"Read", "``linkedValue`` (format 3).\"\"\" self.labelNames: MutableMapping[str, str] = labelNames or {}", "axesElement = ET.Element(\"axes\") if self.documentObject.elidedFallbackName is not None: axesElement.attrib['elidedfallbackname'] =", "elementtree reads the \"xml:lang\" attribute name as # '{http://www.w3.org/XML/1998/namespace}lang' for", "elif not cd['minimum'] <= value <= cd['maximum']: return False return", "' f'unknown location label `{self.locationLabel}` in instance `{self.name}`.' ) return", "and respond to the following situations: In each descriptor, we", "DesignSpaceDocument.fromfile(\"path/to/my.designspace\") designspace.loadSourceFonts(defcon.Font) Or to load masters as FontTools binary fonts,", "if instanceObject.localisedStyleMapStyleName: languageCodes = list(instanceObject.localisedStyleMapStyleName.keys()) languageCodes.sort() for code in languageCodes:", "not None else None maximumStr = element.get(\"usermaximum\") maximum = float(maximumStr)", "values for this axis. Contrary to continuous axes, only the", "masterPath1 s1.name = \"master.ufo1\" s1.font = defcon.Font(\"master.ufo1\") s1.location = dict(weight=0)", "document. \"\"\" self.locationLabel = locationLabel \"\"\"Name of a :class:`LocationLabelDescriptor`. If", "'maximum', 'minimum', 'default', 'map', 'axisOrdering', 'axisLabels'] def __init__( self, *,", "variable to identify it during the build process and from", "= instanceObject.locationLabel if instanceObject.familyName is not None: instanceElement.attrib['familyname'] = instanceObject.familyName", "\"\"\"Name of the :class:`AxisDescriptor` or :class:`DiscreteAxisDescriptor` to \"snapshot\" or \"freeze\".", "if getattr(self.documentObject, \"rulesProcessingLast\", False): attributes = {\"processing\": \"last\"} else: attributes", "= \"MasterFamilyName\" s1.styleName = \"MasterStyleNameOne\" s1.localisedFamilyName = dict(fr=\"Caractère\") s1.mutedGlyphNames.append(\"A\") s1.mutedGlyphNames.append(\"Z\")", "DesignSpaceDocumentError( \"<rules> processing attribute value is not valid: %r, \"", "postScriptFontName=None, styleMapFamilyName=None, styleMapStyleName=None, localisedFamilyName=None, localisedStyleName=None, localisedStyleMapFamilyName=None, localisedStyleMapStyleName=None, glyphs=None, kerning=True, info=True,", "ET.Element(\"axes\") if self.documentObject.elidedFallbackName is not None: axesElement.attrib['elidedfallbackname'] = self.documentObject.elidedFallbackName self.root.append(axesElement)", "in conditions: value = location[cd['name']] if cd.get('minimum') is None: if", "the rule's conditionsets matches the given location.\"\"\" return any(evaluateConditions(c, location)", "\"1\" sourceElement.append(groupsElement) if sourceObject.copyFeatures: featuresElement = ET.Element('features') featuresElement.attrib['copy'] = \"1\"", "0) return (major, minor) def getVariableFonts(self) -> List[VariableFontDescriptor]: \"\"\"Return all", "useful for the STAT table, however it can't become a", "\"\"\" flavor = \"axis\" _attrs = ('tag', 'name', 'values', 'default',", "element: ET.Element): xml_attrs = {'userminimum', 'uservalue', 'usermaximum', 'name', 'elidable', 'oldersibling',", "values in this list can be taken by the axis,", "have min/max/default values or none at all.\" ) def readSources(self):", "self.familyName = familyName \"\"\"string. Family name of this instance. MutatorMath", "import BytesIO, StringIO from textwrap import indent from typing import", "= {} \"\"\"User defined, custom data associated with the whole", "it during the build process and from other parts of", "the given ``kwargs`` and add it to :attr:`axes`. The axis", "writerClass=None): \"\"\"Read a designspace file from ``path`` and return a", "= 'Slant')), 'italic': ('ital', dict(en = 'Italic')), } if name.lower()", "ET.Element('sub') subElement.attrib['name'] = sub[0] subElement.attrib['with'] = sub[1] ruleElement.append(subElement) if len(ruleElement):", "None and cond.get('maximum') is None: # neither is defined, don't", "the default writer will not use this attribute. It is", "conditions outside a conditionset. \" \"Wrapped them in a new", "uppercase letters or digits. \"\"\" # name of the axis", "not in location: # skipping this dimension it seems continue", "def addAxis(self, axisDescriptor: Union[AxisDescriptor, DiscreteAxisDescriptor]): \"\"\"Add the given ``axisDescriptor`` to", "scaling, so we do those last. \"\"\" # masters for", "in axisObject.axisLabels: self._addAxisLabel(labelsElement, label) axisElement.append(labelsElement) if isinstance(axisObject, AxisDescriptor): axisElement.attrib['minimum'] =", "userMinimum \"\"\"New minimum value of the axis in the target", "XML_LANG: styleMapStyleName = styleMapStyleNameElement.text instanceObject.setStyleMapStyleName(styleMapStyleName, lang) for styleMapFamilyNameElement in instanceElement.findall('stylemapfamilyname'):", "name=\"weight\"/> <condition minimum=\"100\" name=\"width\"/> <condition minimum=\"10\" maximum=\"40\" name=\"optical\"/> </conditionset> <sub", "def getFullDesignLocation(self, doc: 'DesignSpaceDocument') -> AnisotropicLocationDict: \"\"\"Get the complete design", "def readVariableFonts(self): if self.documentObject.formatTuple < (5, 0): return xml_attrs =", "output (design).\"\"\" from fontTools.varLib.models import piecewiseLinearMap if not self.map: return", "return 3 if self.userMinimum is not None or self.userMaximum is", "path as it is stored in the document name =", "altogether, e.g. ``calt``, use the lib key ``com.github.fonttools.varLib.featureVarsFeatureTag`` .. code::", "LocationLabel. .. seealso:: :meth:`getFullDesignLocation` :meth:`getFullUserLocation` .. versionadded:: 5.0 \"\"\" self.designLocation:", "fonts. .. versionadded:: 5.0\"\"\" self.instances: List[InstanceDescriptor] = [] \"\"\"List of", "self.map = map or [] \"\"\"list of input / output", "Each condition is a dict with ``name``, ``minimum`` and ``maximum``", "code localisedStyleMapFamilyNameElement.text = instanceObject.getStyleMapFamilyName(code) instanceElement.append(localisedStyleMapFamilyNameElement) if self.effectiveFormatTuple >= (5, 0):", "= self.normalizeLocation({axis.name: axis.default}).get(axis.name) # and set them in the axis.minimum", "name validatedLocation = self.documentObject.newDefaultLocation() for axisName, axisValue in locationObject.items(): if", "here. Varlib. \"\"\" self.localisedFamilyName = localisedFamilyName or {} \"\"\"dict. A", "cls.sourceDescriptorClass() @classmethod def getInstanceDescriptor(cls): return cls.instanceDescriptorClass() @classmethod def getRuleDescriptor(cls): return", "attr == XML_LANG # Note: elementtree reads the \"xml:lang\" attribute", "] )) return variableFonts def deepcopyExceptFonts(self): \"\"\"Allow deep-copying a DesignSpace", "localisedStyleMapFamilyNameElement.attrib[XML_LANG] = code localisedStyleMapFamilyNameElement.text = instanceObject.getStyleMapFamilyName(code) instanceElement.append(localisedStyleMapFamilyNameElement) if self.effectiveFormatTuple >=", "self._addLocationElement( instanceElement, designLocation=instanceObject.designLocation, userLocation=instanceObject.userLocation ) else: # Pre-version 5.0 code", "purposes, if this is not a standard axis, self.labelNames =", "space coordinates. MutatorMath + Varlib. .. seealso:: This may be", "2).\"\"\" self.name: str = name \"\"\"Label for this axis location,", "``OLDER_SIBLING_FONT_ATTRIBUTE``. See: `OTSpec STAT Flags <https://docs.microsoft.com/en-us/typography/opentype/spec/stat#flags>`_ \"\"\" self.linkedUserValue: Optional[float] =", "elif isinstance(subset, ValueAxisSubsetDescriptor): subsetElement.attrib['uservalue'] = self.intOrFloat(subset.userValue) subsetsElement.append(subsetElement) vfElement.append(subsetsElement) self._addLib(vfElement, vf.lib,", "writing it out, as preserved below. locationElement, sourceObject.location = self._makeLocationElement(sourceObject.location)", "map first newMap = [] for inputValue, outputValue in axis.map:", "of the Axis value depends on which field are filled-in,", "Read the glyph element, which could look like either one", "element.get(\"userminimum\") minimum = float(minimumStr) if minimumStr is not None else", "3 if self.userMinimum is not None or self.userMaximum is not", "None: if cd['minimum'] > value: return False elif not cd['minimum']", ".. versionchanged:: 5.0 Allow the default source to not specify", "map_backward(self, designLocation: AnisotropicLocationDict) -> SimpleLocationDict: \"\"\"Map a design location to", "return str(self.msg) + ( \": %r\" % self.obj if self.obj", "location: Optional[AnisotropicLocationDict]): self.designLocation = location or {} def setFamilyName(self, familyName,", "document's variable fonts. .. versionadded:: 5.0\"\"\" self.instances: List[InstanceDescriptor] = []", "list(instanceObject.localisedStyleMapFamilyName.keys()) languageCodes.sort() for code in languageCodes: if code == \"en\":", "# scale the map first newMap = [] for inputValue,", "it to :attr:`locationLabels`. .. versionadded:: 5.0 \"\"\" locationLabel = self.writerClass.locationLabelDescriptorClass(**kwargs)", "getLocationLabel(self, name: str) -> Optional[LocationLabelDescriptor]: \"\"\"Return the top-level location label", "{ 'weight': ('wght', dict(en = 'Weight')), 'width': ('wdth', dict(en =", "of the full location. See: :meth:`getFullUserLocation` \"\"\" self.elidable: bool =", "location=None, locationLabel=None, designLocation=None, userLocation=None, familyName=None, styleName=None, postScriptFontName=None, styleMapFamilyName=None, styleMapStyleName=None, localisedFamilyName=None,", "have a name attribute.\") valueStr = element.get(\"uservalue\") if valueStr is", ") source.font = opener(source.path, **kwargs) loaded[source.path] = source.font fonts.append(source.font) return", "None else \"\")) cds.append(cd) return cds def readAxes(self): # read", "i2.name = \"instance.ufo2\" # anisotropic location i2.designLocation = dict(weight=500, width=(400,300))", "del self.userLocation[axisName] def getLocationLabelDescriptor(self, doc: 'DesignSpaceDocument') -> Optional[LocationLabelDescriptor]: \"\"\"Get the", "then edit the values: .. code:: python instance.clearLocation('Weight') instance.designLocation['Weight'] =", "axes defined\") userLoc = {} designLoc = {} for dimensionElement", "masters and instances to the -1 - 0 - 1", "ET.Element('rule') if ruleObject.name is not None: ruleElement.attrib['name'] = ruleObject.name for", "of dict(name='aaaa', minimum=0, maximum=1000) self.conditionSets = conditionSets or [] \"\"\"a", "locationLabel def newDefaultLocation(self): \"\"\"Return a dict with the default location", "# read the axes elements, including the warp map. axesElement", "\"1\" sourceElement.append(infoElement) if sourceObject.muteKerning: kerningElement = ET.Element(\"kerning\") kerningElement.attrib[\"mute\"] = '1'", "whole document.\"\"\" for libElement in self.root.findall(\".lib\"): self.documentObject.lib = plistlib.fromtree(libElement[0]) class", "\"\"\"List of this document's instances.\"\"\" self.lib: Dict = {} \"\"\"User", "cd['maximum'] = float(cdMax) else: # will allow these to be", "scale all the locations of all masters and instances to", "data): glyphElement = ET.Element('glyph') if data.get('mute'): glyphElement.attrib['mute'] = \"1\" if", "self.locationFromElement(masterElement) if userLocation: raise DesignSpaceDocumentError(f'<master> element \"{fontSourceName}\" must only have", "evaluateRule(rule, location): for name in glyphNames: swap = False for", "minor) def getVariableFonts(self) -> List[VariableFontDescriptor]: \"\"\"Return all variable fonts defined", "to :attr:`axes`.\"\"\" self.axes.append(axisDescriptor) def addAxisDescriptor(self, **kwargs): \"\"\"Instantiate a new :class:`AxisDescriptor`", "opentype tag for this axis self.tag = tag \"\"\"string. Four", "the document is read from the disk, this is the", "for that axis. Note: the output won't be anisotropic, only", "a ``minimum`` and ``maximum``, while a discrete axis has a", "None: instanceObject.familyName = familyname stylename = instanceElement.attrib.get('stylename') if stylename is", "== []: glyphsElement = ET.Element('glyphs') instanceElement.append(glyphsElement) glyphsElement = instanceElement.findall('.glyphs')[0] for", "axisObject.axisOrdering is not None: labelsElement.attrib['ordering'] = str(axisObject.axisOrdering) for label in", "axisElement.append(labelElement) def _addLabelNames(self, parentElement, labelNames): for languageCode, labelName in sorted(labelNames.items()):", "or make a tag name for this axis name names", "self.name: str = name \"\"\"Label for this named location, STAT", "return 1 @property def defaultName(self) -> str: \"\"\"Return the English", "values)]) variableFonts.append(VariableFontDescriptor( name=f\"{basename}{axisNames}\", axisSubsets=rangeAxisSubsets + [ ValueAxisSubsetDescriptor(name=axis.name, userValue=value) for axis,", "= ('filename', 'axisSubsets', 'lib') filename = posixpath_property(\"_filename\") def __init__(self, *,", "be at its default location. .. seealso:: This may be", "if basename is None: basename = \"VF\" axisNames = \"\".join([f\"-{axis.tag}{value}\"", "name of this source. Though this data can be extracted", "('slnt', dict(en = 'Slant')), 'italic': ('ital', dict(en = 'Italic')), }", "instanceElement.attrib['familyname'] = instanceObject.familyName if instanceObject.styleName is not None: instanceElement.attrib['stylename'] =", "5.0 \"\"\" self.copyFeatures = copyFeatures \"\"\"bool. Indicates if the feature", "None: locationElement, data['instanceLocation'] = self._makeLocationElement(data.get('instanceLocation')) glyphElement.append(locationElement) if glyphName is not", "styleMapStyleNameElement.items(): if key == XML_LANG: styleMapStyleName = styleMapStyleNameElement.text instanceObject.setStyleMapStyleName(styleMapStyleName, lang)", "and add it to :attr:`axes`. The axis will be and", "\"unique.rule.name\" r1.conditionSets.append([dict(name=\"weight\", minimum=-10, maximum=10), dict(...)]) r1.conditionSets.append([dict(...), dict(...)]) r1.subs.append((\"a\", \"a.alt\")) ..", "- xml_attrs if unknown_attrs: raise DesignSpaceDocumentError(f\"variable-font element contains unknown attributes:", "the disk, this is its original file name, i.e. the", "instanceObject.lib = plistlib.fromtree(libElement[0]) def readInfoElement(self, infoElement, instanceObject): \"\"\" Read the", "conditionElement.attrib['minimum'] = self.intOrFloat(cond.get('minimum')) if cond.get('maximum') is not None: conditionElement.attrib['maximum'] =", "letter and use only uppercase letters or digits. \"\"\" #", "= plistlib.fromtree(libElement[0]) class DesignSpaceDocument(LogMixin, AsDictMixin): \"\"\"The DesignSpaceDocument object can read", "required minimum or maximum in rule\" + (\" '%s'\" %", "data.get('masters') is not None: mastersElement = ET.Element(\"masters\") for m in", "document. Use reverse-DNS notation to identify your own data. Respect", "+ self.instances: if descriptor.path is not None: # case 3", "msg, obj=None): self.msg = msg self.obj = obj def __str__(self):", "\"3.0\") self._axes = [] self.rules = [] self.sources = []", "\"\"\"Getter for :attr:`localisedFamilyName` .. versionadded:: 5.0 \"\"\" return self.localisedFamilyName.get(languageCode) def", "Deprecated members as of version 5.0 if instanceObject.glyphs: if instanceElement.findall('.glyphs')", "'olderSibling', 'userLocation', 'labelNames') def __init__( self, *, name, userLocation, elidable=False,", "encoding=encoding, method='xml', xml_declaration=xml_declaration, pretty_print=pretty, ) def _getEffectiveFormatTuple(self): \"\"\"Try to use", "if source.path is None: raise DesignSpaceDocumentError( \"Designspace source '%s' has", "[]: glyphsElement = ET.Element('glyphs') instanceElement.append(glyphsElement) glyphsElement = instanceElement.findall('.glyphs')[0] for glyphName,", "self.postScriptFontName = postScriptFontName \"\"\"string. Postscript fontname for this instance. MutatorMath", "name as # '{http://www.w3.org/XML/1998/namespace}lang' for key, lang in labelNameElement.items(): if", "self.designLocation[axis.name] else: result[axis.name] = axis.map_forward(axis.default) return result class RuleDescriptor(SimpleDescriptor): \"\"\"Represents", "makeInfo=makeInfo) def _readSingleInstanceElement(self, instanceElement, makeGlyphs=True, makeKerning=True, makeInfo=True): filename = instanceElement.attrib.get('filename')", "If an axis is not mentioned, assume that we only", "sourceObject.setFamilyName(familyName, lang) designLocation, userLocation = self.locationFromElement(sourceElement) if userLocation: raise DesignSpaceDocumentError(f'<source>", "readable names for the axis. Keyed by xml:lang code. Values", "or {} \"\"\"A dictionary of localised style map stylename strings,", "self.localisedStyleName.get(languageCode) def setFamilyName(self, familyName, languageCode=\"en\"): self.localisedFamilyName[languageCode] = tostr(familyName) def getFamilyName(self,", "versionadded:: 5.0 \"\"\" if self.locationLabel is None: return None label", "label.elidable: labelElement.attrib['elidable'] = \"true\" if label.olderSibling: labelElement.attrib['oldersibling'] = \"true\" if", "{ lang: label_name.text or \"\" for label_name in labelElement.findall(\"labelname\") for", "DiscreteAxisDescriptor): discreteAxes.append(axis) else: rangeAxisSubsets.append(RangeAxisSubsetDescriptor(name=axis.name)) valueCombinations = itertools.product(*[axis.values for axis in", "the rule descriptor element: a set of glyph substitutions to", "the given location.\"\"\" return any(evaluateConditions(c, location) for c in rule.conditionSets)", "locations (using uservalue=\"\").') elidable = True if labelElement.get(\"elidable\") == \"true\"", ".. versionadded:: 5.0 \"\"\" variableFont = self.writerClass.variableFontDescriptorClass(**kwargs) self.addVariableFont(variableFont) return variableFont", "== a: swap = True break if swap: newNames.append(b) else:", "[] for axisDescriptor in self.axes: names.append(axisDescriptor.name) return names def getAxis(self,", "r1 = RuleDescriptor() r1.name = \"unique.rule.name\" r1.conditionSets.append([dict(name=\"weight\", minimum=-10, maximum=10), dict(...)])", "False for a, b in rule.subs: if name == a:", "sourceObject): sourceElement = ET.Element(\"source\") if sourceObject.filename is not None: sourceElement.attrib['filename']", "= LocationLabelDescriptor ruleDescriptorClass = RuleDescriptor sourceDescriptorClass = SourceDescriptor variableFontsDescriptorClass =", "from filename.\"\"\" self.font = font \"\"\"Any Python object. Optional. Points", "given ``kwargs`` and add it to :attr:`rules`. \"\"\" rule =", "SourceDescriptor variableFontDescriptorClass = VariableFontDescriptor valueAxisSubsetDescriptorClass = ValueAxisSubsetDescriptor rangeAxisSubsetDescriptorClass = RangeAxisSubsetDescriptor", "= VariableFontDescriptor valueAxisSubsetDescriptorClass = ValueAxisSubsetDescriptor rangeAxisSubsetDescriptorClass = RangeAxisSubsetDescriptor instanceDescriptorClass =", "to make us a new sourceDescriptor.\"\"\" return self.writerClass.getSourceDescriptor() def newInstanceDescriptor(self):", "``minimum`` and ``maximum`` keys. \"\"\" # list of substitutions stored", "that only include some axes and freeze other axes at", "= self.newDefaultLocation() for sourceDescriptor in self.sources: if sourceDescriptor.getFullDesignLocation(self) == defaultDesignLocation:", "SimpleLocationDict = None ): locElement = ET.Element(\"location\") for axis in", "that axis. When the input has anisotropic locations, only the", "# neither is defined, don't add this condition continue conditionElement", "\"\"\" if isinstance(value, tuple): value = value[0] return next((k for", "'styleMapFamilyName', 'styleMapStyleName', 'localisedFamilyName', 'localisedStyleName', 'localisedStyleMapFamilyName', 'localisedStyleMapStyleName', 'glyphs', 'kerning', 'info', 'lib']", "def deepcopyExceptFonts(self): \"\"\"Allow deep-copying a DesignSpace document without deep-copying attached", "glyphData = {} glyphName = glyphElement.attrib.get('name') if glyphName is None:", "flavor = \"label\" _attrs = ('userMinimum', 'userValue', 'userMaximum', 'name', 'elidable',", "ruleObject.name is not None: ruleElement.attrib['name'] = ruleObject.name for conditions in", "for v in axisObject.values) axisElement.attrib['default'] = self.intOrFloat(axisObject.default) if axisObject.hidden: axisElement.attrib['hidden']", "In order to update a single axis location, the user", "localisedStyleName or {} \"\"\"dict. A dictionary of localised stylename strings,", "design locations (using xvalue=\"\").') sourceObject.location = designLocation layerName = sourceElement.attrib.get('layer')", "deprecated:: 5.0 \"\"\" self.lib = lib or {} \"\"\"Custom data", "as there are locations on discrete axes. .. seealso:: :func:`splitInterpolable`", "holds a path always using forward slashes.\"\"\" def getter(self): #", "= tostr(familyName) def getFamilyName(self, languageCode=\"en\"): return self.localisedFamilyName.get(languageCode) def setStyleMapStyleName(self, styleMapStyleName,", "triple) return new def normalize(self): \"\"\" Normalise the geometry of", "BaseDocReader(LogMixin): axisDescriptorClass = AxisDescriptor discreteAxisDescriptorClass = DiscreteAxisDescriptor axisLabelDescriptorClass = AxisLabelDescriptor", "filename.\"\"\" self.font = font \"\"\"Any Python object. Optional. Points to", "[] \"\"\"List of this document's rules.\"\"\" self.rulesProcessingLast: bool = False", "variable fonts and instances to very basic **descriptor** objects that", "'userValue') def __init__(self, *, name, userValue): self.name: str = name", "attributes)) for ruleObject in self.documentObject.rules: self._addRule(ruleObject) if self.documentObject.sources: self.root.append(ET.Element(\"sources\")) for", "this object contains the same data as the other for", "Record <https://docs.microsoft.com/en-us/typography/opentype/spec/stat#axis-records>`_ .. versionadded:: 5.0 \"\"\" self.axisLabels: List[AxisLabelDescriptor] = axisLabels", "return minVersion def _makeLocationElement(self, locationObject, name=None): \"\"\" Convert Location dict", "exists. .. versionadded:: 5.0 \"\"\" return next( (label for label", "us a new sourceDescriptor.\"\"\" return self.writerClass.getSourceDescriptor() def newInstanceDescriptor(self): \"\"\"Ask the", "userLocation: raise DesignSpaceDocumentError(f'<master> element \"{fontSourceName}\" must only have design locations", "= glyphElement.attrib.get('name') if glyphName is None: raise DesignSpaceDocumentError(\"Glyph object without", "'{http://www.w3.org/XML/1998/namespace}lang' } locationLabel = self.locationLabelDescriptorClass( name=name, userLocation=userLocation, elidable=elidable, olderSibling=olderSibling, labelNames=labelNames,", ":meth:`fromfile`. \"\"\" self.filename = None \"\"\"String, optional. When the document", "in kwargs: axis = self.writerClass.discreteAxisDescriptorClass(**kwargs) else: axis = self.writerClass.axisDescriptorClass(**kwargs) self.addAxis(axis)", "by language code. \"\"\" self.glyphs = glyphs or {} \"\"\"dict", "raise DesignSpaceDocumentError( \"Designspace source '%s' has no 'path' attribute\" %", "s1.familyName = \"MasterFamilyName\" s1.styleName = \"MasterStyleNameOne\" s1.localisedFamilyName = dict(fr=\"Caractère\") s1.mutedGlyphNames.append(\"A\")", "``ELIDABLE_AXIS_VALUE_NAME``. See: `OTSpec STAT Flags <https://docs.microsoft.com/en-us/typography/opentype/spec/stat#flags>`_ \"\"\" self.olderSibling: bool =", "a1.minimum = 1 a1.maximum = 1000 a1.default = 400 a1.name", "if isinstance(value, tuple): value = value[0] return next((k for k,", "value can have different glyph sets. \"\"\" self.values: List[float] =", "self.axes: names.append(axisDescriptor.name) return names def getAxis(self, name): \"\"\"Return the axis", "this document, as a string. E.g. \"4.0\" \"\"\" self.elidedFallbackName: Optional[str]", "key == XML_LANG: styleName = styleNameElement.text instanceObject.setStyleName(styleName, lang) for familyNameElement", "given instance.\"\"\" instanceObject.lib = plistlib.fromtree(libElement[0]) def readInfoElement(self, infoElement, instanceObject): \"\"\"", "user interfaces. \"\"\" self.map = map or [] \"\"\"list of", "linkedUserValue \"\"\"STAT field ``linkedValue`` (format 3).\"\"\" self.labelNames: MutableMapping[str, str] =", "up to the user of ``designspaceLib`` to either load the", "values for this instance, in design space coordinates. MutatorMath +", "if sourceObject.layerName is not None: sourceElement.attrib['layer'] = sourceObject.layerName if sourceObject.localisedFamilyName:", "styleNameElement.text instanceObject.setStyleName(styleName, lang) for familyNameElement in instanceElement.findall('familyname'): for key, lang", "= self.normalizeLocation(item.location) # the axes for axis in self.axes: #", "if featuresElement.attrib.get('copy') == '1': sourceObject.copyFeatures = True for glyphElement in", "axis in self.axes } def map_backward(self, designLocation: AnisotropicLocationDict) -> SimpleLocationDict:", "self._posixRelativePath(descriptor.path) def newAxisDescriptor(self): \"\"\"Ask the writer class to make us", "if xValue is None: raise DesignSpaceDocumentError(f'Missing xvalue=\"\" for the location", "descriptor filename attr from the path and this document path.", "if cd.get('minimum') is None and cd.get('maximum') is None: raise DesignSpaceDocumentError(", "as :attr:`SourceDescriptor.font` .. seealso:: :attr:`SourceDescriptor.font` \"\"\" self.name = name \"\"\"string.", "from fontTools.misc import plistlib from fontTools.misc.loggingTools import LogMixin from fontTools.misc.textTools", "== value), value) def map_backward(self, value): \"\"\"Maps value from axis", "'localisedStyleMapFamilyName', 'localisedStyleMapStyleName', 'glyphs', 'kerning', 'info', 'lib'] filename = posixpath_property(\"_filename\") path", "one of uservalue=\"\" or xvalue=\"\" must be provided for location", "1, 2 and 3). All values are user values. See:", "self._strictAxisNames and dimName not in self.axisDefaults: # In case the", "in validatedLocation.items(): dimElement = ET.Element('dimension') dimElement.attrib['name'] = dimensionName if type(dimensionValue)", "name attribute.\") valueStr = element.get(\"uservalue\") if valueStr is None: raise", "is not None: if xValue is None: raise DesignSpaceDocumentError(f'Missing xvalue=\"\"", "data. Analogue of OpenType's STAT data for a single axis", "postScriptFontName is not None: instanceObject.postScriptFontName = postScriptFontName styleMapFamilyName = instanceElement.attrib.get('stylemapfamilyname')", "in styleMapFamilyNameElement.items(): if key == XML_LANG: styleMapFamilyName = styleMapFamilyNameElement.text instanceObject.setStyleMapFamilyName(styleMapFamilyName,", "locationLabelDescriptor: LocationLabelDescriptor): \"\"\"Add the given ``locationLabelDescriptor`` to :attr:`locationLabels`. .. versionadded::", "to ``doc.sources``. \"\"\" source = self.writerClass.sourceDescriptorClass(**kwargs) self.addSource(source) return source def", "script and still only exists in memory, the producing script", "except ValueError: raise DesignSpaceDocumentError(\"unicode values %s are not integers\" %", "MutatorMath. .. deprecated:: 5.0 \"\"\" self.copyInfo = copyInfo \"\"\"bool. Indicates", "more explicit alias for this property :attr:`designLocation`. \"\"\" return self.designLocation", "designLocation if designLocation is not None else (location or {})", "\"\"\"Maps value from axis mapping's output to input. Returns value", "of localised style map familyname strings, keyed by language code.", "def __init__(self, *, name, userMinimum=-math.inf, userDefault=None, userMaximum=math.inf): self.name: str =", "# The setter rewrites paths using forward slashes if value", "True axisObject.tag = axisElement.attrib.get(\"tag\") for mapElement in axisElement.findall('map'): a =", "setStyleName(self, styleName, languageCode=\"en\"): \"\"\"These methods give easier access to the", "\"Italique\" a2.map = [(0, 0), (1, -11)] a2.axisOrdering = 2", "locationElement, sourceObject.location = self._makeLocationElement(sourceObject.location) sourceElement.append(locationElement) self.root.findall('.sources')[0].append(sourceElement) def _addVariableFont(self, parentElement: ET.Element,", "locations self.name = name \"\"\"string. Name of the axis as", "\"true\" if label.olderSibling: labelElement.attrib['oldersibling'] = \"true\" if label.linkedUserValue is not", "to the ufo source sourceObject.filename = filename # path as", "= unicodes except ValueError: raise DesignSpaceDocumentError(\"unicode values %s are not", "self.documentObject.formatTuple < (5, 0): return xml_attrs = {'name', 'elidable', 'oldersibling'}", "the :class:`LocationLabel` that matches the given ``userLocation``, or ``None`` if", "36.5) Args: axisName: if provided, only clear the location for", "interpolating font.info calculated. .. deprecated:: 5.0 \"\"\" self.lib = lib", "value): \"\"\"Maps value from axis mapping's output to input. Returns", "= {} for axis in doc.axes: if axis.name in self.designLocation:", "== XML_LANG: styleMapStyleName = styleMapStyleNameElement.text instanceObject.setStyleMapStyleName(styleMapStyleName, lang) for styleMapFamilyNameElement in", "a big variable font with many axes, define subsets of", "== \"unicode\" ): f = StringIO() xml_declaration = False elif", "attribute localisedFamilyNameElement = ET.Element('familyname') localisedFamilyNameElement.attrib[XML_LANG] = code localisedFamilyNameElement.text = instanceObject.getFamilyName(code)", "string in the filename attr. The file may or may", "of all masters and instances to the -1 - 0", "is. The filename attr should not be touched. case 3.", "if isinstance(value, tuple): value = value[0] triple = [ axis.map_forward(v)", "return self.writerClass.getSourceDescriptor() def newInstanceDescriptor(self): \"\"\"Ask the writer class to make", "Varlib. \"\"\" def serialize(self): # output to a dict, used", "conditionsets for conditionSetElement in ruleElement.findall('.conditionset'): conditionSet = self._readConditionElements( conditionSetElement, ruleName,", "DesignSpaceDocumentError(f'<label> element \"{name}\" must only have user locations (using uservalue=\"\").')", "= self.root.find(\".rules\") if rulesElement is not None: processingValue = rulesElement.attrib.get(\"processing\",", "list(sourceObject.localisedFamilyName.keys()) languageCodes.sort() for code in languageCodes: if code == \"en\":", "= element.get(\"userminimum\") minimum = float(minimumStr) if minimumStr is not None", "= axisSubsets or [] \"\"\"Axis subsets to include in this", "✅ ❌ ❌ ✅ =========== ========= =========== =========== =============== \"\"\"", "When the input has anisotropic locations, only the xvalue is", "from axis mapping's input to output. Returns value unchanged if", "the calculations). MutatorMath only. \"\"\" self.muteInfo = muteInfo \"\"\"bool. Indicated", "self.labelNames: Dict[str, str] = labelNames or {} \"\"\"User-facing translations of", "If the filename attribute is not None: skip it. \"\"\"", "text needs to be copied to the instances. MutatorMath. ..", "the resource identified by ``filename`` and store it in this", "absolute path to the ufo source sourceObject.filename = filename #", "this source, in design space coordinates. MutatorMath + Varlib. ..", "case 2. descriptor.filename == \"../something\" descriptor.path == None -- action:", "featuresElement = ET.Element('features') featuresElement.attrib['copy'] = \"1\" sourceElement.append(featuresElement) if sourceObject.copyInfo or", "is not None else \"\")) cds.append(cd) return cds def readAxes(self):", "= BytesIO(tobytes(string, encoding=\"utf-8\")) self = cls(f, documentObject) self.path = None", "tostr \"\"\" designSpaceDocument - read and write designspace files \"\"\"", "used in testing return dict( tag=self.tag, name=self.name, labelNames=self.labelNames, maximum=self.maximum, minimum=self.minimum,", "{'Weight': (34, 36.5), 'Width': 100} instance.userLocation = {'Opsz': 16} In", "\"\"\" self.font = font \"\"\"Same as :attr:`SourceDescriptor.font` .. seealso:: :attr:`SourceDescriptor.font`", "\"\"\"String, optional. When the document is read from the disk,", "as ET from fontTools.misc import plistlib from fontTools.misc.loggingTools import LogMixin", "if self.filename is not None: basename = os.path.splitext(self.filename)[0] + \"-VF\"", "as the full axis. (default = ``None``) \"\"\" self.userMaximum: float", "= vf.filename if vf.axisSubsets: subsetsElement = ET.Element('axis-subsets') for subset in", "the instances. MutatorMath. .. deprecated:: 5.0 \"\"\" self.copyInfo = copyInfo", "is not None: conditionElement.attrib['minimum'] = self.intOrFloat(cond.get('minimum')) if cond.get('maximum') is not", "be necessary to define user-facing readable names for the axis.", "We're not overwriting some other value for filename, it should", "By default, rules are applied first, before other text shaping/OpenType", "with undefined axis: \\\"%s\\\".\", dimName) continue userValue = xValue =", "a2.axisOrdering = 2 a2.axisLabels = [ AxisLabelDescriptor(name=\"Roman\", userValue=0, elidable=True) ]", "for v in (userMinimum, userDefault, userMaximum)): return self.rangeAxisSubsetDescriptorClass(name=name) raise DesignSpaceDocumentError(", "SimpleLocationDict: \"\"\"Map a design location to a user location. Assume", "i2.postScriptFontName = \"InstancePostscriptName\" i2.styleMapFamilyName = \"InstanceStyleMapFamilyName\" i2.styleMapStyleName = \"InstanceStyleMapStyleName\" i2.lib['com.coolDesignspaceApp.specimenText']", "= DiscreteAxisDescriptor axisLabelDescriptorClass = AxisLabelDescriptor locationLabelDescriptorClass = LocationLabelDescriptor ruleDescriptorClass =", "instanceObject.styleName # add localisations if instanceObject.localisedStyleName: languageCodes = list(instanceObject.localisedStyleName.keys()) languageCodes.sort()", "python from fontTools.designspaceLib import DesignSpaceDocument doc = DesignSpaceDocument.fromfile(\"some/path/to/my.designspace\") doc.formatVersion doc.elidedFallbackName", "is not None: return 3 if self.userMinimum is not None", "xml_declaration=True): self.root.attrib['format'] = \".\".join(str(i) for i in self.effectiveFormatTuple) if self.documentObject.axes", "5.0 \"\"\" locationLabel = self.writerClass.locationLabelDescriptorClass(**kwargs) self.addLocationLabel(locationLabel) return locationLabel def newDefaultLocation(self):", "mapped. \"\"\" return next((v for k, v in self.map if", "a tuple of (designLocation, userLocation) \"\"\" elementLocation = (None, None)", "localised family name strings, keyed by language code. If present,", "pretty=True, encoding=\"UTF-8\", xml_declaration=True): self.root.attrib['format'] = \".\".join(str(i) for i in self.effectiveFormatTuple)", "or self.documentObject.locationLabels or any( source.localisedFamilyName for source in self.documentObject.sources )", "'../somewhere' descriptor.path == \"~/absolute/path/there\" -- action: there is a conflict", "only. \"\"\" @property def location(self): \"\"\"dict. Axis values for this", "def addVariableFontDescriptor(self, **kwargs): \"\"\"Instantiate a new :class:`VariableFontDescriptor` using the given", "is not None and axis.name in userLocation: dimElement = ET.Element('dimension')", "userLoc def readInstances(self, makeGlyphs=True, makeKerning=True, makeInfo=True): instanceElements = self.root.findall('.instances/instance') for", "to the instance instanceObject.filename = filename # path as it", "self.copyGroups = copyGroups \"\"\"bool. Indicates if the groups need to", "'userMaximum') def __init__(self, *, name, userMinimum=-math.inf, userDefault=None, userMaximum=math.inf): self.name: str", "use the version specified in the document, or a sufficiently", "import etree as ET from fontTools.misc import plistlib from fontTools.misc.loggingTools", "self.axes: List[Union[AxisDescriptor, DiscreteAxisDescriptor]] = [] \"\"\"List of this document's axes.\"\"\"", "object (e.g. TTFont, or defcon.Font, etc.) from the SourceDescriptor.path, and", ":meth:`getFullDesignLocation` :meth:`getFullUserLocation` .. versionadded:: 5.0 \"\"\" self.familyName = familyName \"\"\"string.", "self.locationLabels.append(locationLabelDescriptor) def addLocationLabelDescriptor(self, **kwargs): \"\"\"Instantiate a new :class:`LocationLabelDescriptor` using the", "<https://docs.microsoft.com/en-us/typography/opentype/spec/stat#axis-value-table-format-4>`_ .. versionadded:: 5.0 \"\"\" flavor = \"label\" _attrs =", "we don't read a glyphname, use the one we have", "xValue is not None: designLoc[dimName] = xValue else: userLoc[dimName] =", "\"\"\"Read the lib element for the given instance.\"\"\" instanceObject.lib =", "else: maximum = None newConditions.append(dict(name=cond['name'], minimum=minimum, maximum=maximum)) newConditionSets.append(newConditions) rule.conditionSets =", "as a Python object (e.g. a ``defcon.Font`` or a ``fontTools.ttFont.TTFont``).", "is not None: descriptor.filename = self._posixRelativePath(descriptor.path) def newAxisDescriptor(self): \"\"\"Ask the", "design location of this instance, by combining data from the", "instanceObject): instanceElement = ET.Element('instance') if instanceObject.name is not None: instanceElement.attrib['name']", "[(1.0, 10.0), (400.0, 66.0), (1000.0, 990.0)] a1.axisOrdering = 1 a1.axisLabels", "already stored in the element attribute localisedFamilyNameElement = ET.Element('familyname') localisedFamilyNameElement.attrib[XML_LANG]", "v = v[0] if not self.map: return v return piecewiseLinearMap(v,", "self._makeLocationElement(instanceObject.location) instanceElement.append(locationElement) if instanceObject.filename is not None: instanceElement.attrib['filename'] = instanceObject.filename", "ET.Element(\"kerning\") kerningElement.attrib[\"mute\"] = '1' sourceElement.append(kerningElement) if sourceObject.mutedGlyphNames: for name in", "fonts and instances to very basic **descriptor** objects that store", "new[axis.name] = normalizeValue(value, triple) return new def normalize(self): \"\"\" Normalise", "= cls(readerClass=readerClass, writerClass=writerClass) reader = self.readerClass.fromstring(string, self) reader.read() if self.sources:", "a non-registered axis, it will be necessary to define user-facing", "if glyphSources is not None: glyphData['masters'] = glyphSources instanceObject.glyphs[glyphName] =", "Optional[LocationLabelDescriptor]: \"\"\"Return the top-level location label with the given ``name``,", "before version 5, the whole document was implicitly describing a", "given ``ruleDescriptor`` to :attr:`rules`.\"\"\" self.rules.append(ruleDescriptor) def addRuleDescriptor(self, **kwargs): \"\"\"Instantiate a", "is stored in the document sourceObject.name = sourceName familyName =", "= self.normalizeLocation(glyphData['instanceLocation']) for glyphMaster in glyphData['masters']: glyphMaster['location'] = self.normalizeLocation(glyphMaster['location']) item.location", "\"\"\" flavor = \"variable-font\" _attrs = ('filename', 'axisSubsets', 'lib') filename", "if this object contains the same data as the other", "self.locationFromElement(labelElement) if designLocation: raise DesignSpaceDocumentError(f'<label> element \"{name}\" must only have", "value as the full axis. (default = ``None``) \"\"\" self.userMaximum:", "for axisDescriptor in self.axes: names.append(axisDescriptor.name) return names def getAxis(self, name):", "feature <https://docs.microsoft.com/en-us/typography/opentype/spec/features_pt#-tag-rvrn>`_. See ref:`rules-element` § Attributes. \"\"\" def evaluateRule(rule, location):", "new def normalize(self): \"\"\" Normalise the geometry of this designspace:", "objects in the order they appear in the sources list.", "1 a1.axisLabels = [ AxisLabelDescriptor(name=\"Regular\", userValue=400, elidable=True) ] doc.addAxis(a1) \"\"\"", "``defcon.Font`` or a ``fontTools.ttFont.TTFont``). The default document reader will not", "a2 = DiscreteAxisDescriptor() a2.values = [0, 1] a2.name = \"Italic\"", "self.sources: if source.font is not None: # font already loaded", "= ET.Element('labels') if axisObject.axisOrdering is not None: labelsElement.attrib['ordering'] = str(axisObject.axisOrdering)", "path mounts new_path = '//' + new_path return new_path def", "\"\"\" self = cls(readerClass=readerClass, writerClass=writerClass) self.read(path) return self @classmethod def", "location is created, this is the value this axis will", "(5, 0): self._addLocationElement(sourceElement, designLocation=sourceObject.location) else: # Pre-version 5.0 code was", "Attributes. \"\"\" def evaluateRule(rule, location): \"\"\"Return True if any of", "one of these: .. code-block:: xml <glyph name=\"b\" unicode=\"0x62\"/> <glyph", "if the groups need to be copied to the instances.", "document's sources.\"\"\" self.variableFonts: List[VariableFontDescriptor] = [] \"\"\"List of this document's", "It imports the axes, sources, variable fonts and instances to", "The 'print' is inappropriate here, and instead of # assert,", "lib element for the given instance.\"\"\" instanceObject.lib = plistlib.fromtree(libElement[0]) def", "lib or {} \"\"\"Custom data associated with this variable font.\"\"\"", "else \"\")) cds.append(cd) return cds def readAxes(self): # read the", "versionadded:: 5.0 \"\"\" return doc.map_backward(self.getFullDesignLocation(doc)) def tagForAxisName(name): # try to", "labelElement = ET.Element('label') labelElement.attrib['uservalue'] = self.intOrFloat(label.userValue) if label.userMinimum is not", "= [(1.0, 10.0), (400.0, 66.0), (1000.0, 990.0)] a1.axisOrdering = 1", "= \"master.ufo1\" s1.font = defcon.Font(\"master.ufo1\") s1.location = dict(weight=0) s1.familyName =", "is updated by the :meth:`findDefault` \"\"\" if readerClass is not", "lot of data duplication. - From a big variable font", "choose for path to be correct and update filename. \"\"\"", "\"\"\"Any Python object. Optional. Points to a representation of this", "= sourceElement.attrib.get('name') if sourceName is None: # add a temporary", "minimum=-10, maximum=10), dict(...)]) r1.conditionSets.append([dict(...), dict(...)]) r1.subs.append((\"a\", \"a.alt\")) .. code:: xml", "instance.clearLocation('Weight') instance.designLocation['Weight'] = (34, 36.5) Args: axisName: if provided, only", "if instanceObject.localisedStyleName: languageCodes = list(instanceObject.localisedStyleName.keys()) languageCodes.sort() for code in languageCodes:", "Attributes Header <https://docs.microsoft.com/en-us/typography/opentype/spec/stat#style-attributes-header>`_ .. versionadded:: 5.0 \"\"\" self.axes: List[Union[AxisDescriptor, DiscreteAxisDescriptor]]", "+ Varlib. This may be only part of the full", "= conditionSets or [] \"\"\"a list of conditionsets. - Each", "userValue userMinimum userMaximum linkedUserValue =========== ========= =========== =========== =============== 1", "those last. \"\"\" # masters for item in self.sources: item.location", "5, you would have needed 1 DesignSpace per such variable", "documentPath self.documentObject = documentObject self.effectiveFormatTuple = self._getEffectiveFormatTuple() self.root = ET.Element(\"designspace\")", "=============== \"\"\" if self.linkedUserValue is not None: return 3 if", "not None else \"\") class AsDictMixin(object): def asdict(self): d =", "given ``locationLabelDescriptor`` to :attr:`locationLabels`. .. versionadded:: 5.0 \"\"\" self.locationLabels.append(locationLabelDescriptor) def", "sourcePath # absolute path to the ufo source sourceObject.filename =", "AnisotropicLocationDict = {} for axis in doc.axes: if axis.name in", "int(labelElement.attrib[\"ordering\"]) for label in labelElement.findall(\".label\"): axisObject.axisLabels.append(self.readAxisLabel(label)) self.documentObject.axes.append(axisObject) self.axisDefaults[axisObject.name] = axisObject.default", "fonts. Takes a callable which initializes a new font object", "glyphElement.attrib['mute'] = \"1\" if data.get('unicodes') is not None: glyphElement.attrib['unicode'] =", "a user location. Assume that missing coordinates are at the", "filename=None, path=None, font=None, name=None, location=None, designLocation=None, layerName=None, familyName=None, styleName=None, localisedFamilyName=None,", "'oldersibling'} for labelElement in self.root.findall(\".labels/label\"): unknown_attrs = set(labelElement.attrib) - xml_attrs", "tag=self.tag, name=self.name, labelNames=self.labelNames, maximum=self.maximum, minimum=self.minimum, default=self.default, hidden=self.hidden, map=self.map, axisOrdering=self.axisOrdering, axisLabels=self.axisLabels,", "are loaded, and return list of fonts. Takes a callable", "localised names.\"\"\" self.localisedStyleName[languageCode] = tostr(styleName) def getStyleName(self, languageCode=\"en\"): return self.localisedStyleName.get(languageCode)", "TTFont, or defcon.Font, etc.) from the SourceDescriptor.path, and sets the", "this axis should be hidden in user interfaces. \"\"\" self.map", "not None: instancePath = os.path.join(os.path.dirname(self.documentObject.path), filename) else: instancePath = None", "we have to do it ourselves for 'xml:lang' XML_NS =", "info \"\"\"bool. Indicated if this instance needs the interpolating font.info", "for axis in self.axes } def findDefault(self): \"\"\"Set and return", "\"\"\"Read a ``<location>`` element. .. versionchanged:: 5.0 Return a tuple", "location, glyphNames): \"\"\"Apply these rules at this location to these", "of the axis in the target variable font. If not", "if data.get('instanceLocation') is not None: locationElement, data['instanceLocation'] = self._makeLocationElement(data.get('instanceLocation')) glyphElement.append(locationElement)", "muteKerning \"\"\"bool. Indicates if the kerning data from this source", "the axis as it is used in the location dicts.", "SourceDescriptor locations (always in design space). defaultDesignLocation = self.newDefaultLocation() for", "self.locationLabel = None if axisName is None: self.designLocation = {}", "value to be mapped. \"\"\" if isinstance(value, tuple): value =", "'axisSubsets', 'lib') filename = posixpath_property(\"_filename\") def __init__(self, *, name, filename=None,", "ruleObject.conditionSets.append(externalConditions) self.log.info( \"Found stray rule conditions outside a conditionset. \"", "addRule(self, ruleDescriptor: RuleDescriptor): \"\"\"Add the given ``ruleDescriptor`` to :attr:`rules`.\"\"\" self.rules.append(ruleDescriptor)", "instancePath # absolute path to the instance instanceObject.filename = filename", "None: return (5, 0) numbers = (int(i) for i in", "XXX this is ugly. The 'print' is inappropriate here, and", "you intend that value to be mapped. \"\"\" if isinstance(value,", "STAT Format userValue userMinimum userMaximum linkedUserValue =========== ========= =========== ===========", "self.msg = msg self.obj = obj def __str__(self): return str(self.msg)", "depends on which field are filled-in, see :meth:`getFormat` .. versionadded::", "self.normalizeLocation({axis.name: axis.default}).get(axis.name) # and set them in the axis.minimum axis.minimum", "axisElement.attrib['name'] = axisObject.name self._addLabelNames(axisElement, axisObject.labelNames) if axisObject.map: for inputValue, outputValue", "if self.locationLabel is None: return None label = doc.getLocationLabel(self.locationLabel) if", "there are locations on discrete axes. .. seealso:: :func:`splitInterpolable` ..", "= ET.Element('instance') if instanceObject.name is not None: instanceElement.attrib['name'] = instanceObject.name", "during the build process and from other parts of the", "= ValueAxisSubsetDescriptor rangeAxisSubsetDescriptorClass = RangeAxisSubsetDescriptor instanceDescriptorClass = InstanceDescriptor @classmethod def", "axesElement.attrib['elidedfallbackname'] axisElements = self.root.findall(\".axes/axis\") if not axisElements: return for axisElement", "None and cd.get('maximum') is None: raise DesignSpaceDocumentError( \"condition missing required", "glyphElement class BaseDocReader(LogMixin): axisDescriptorClass = AxisDescriptor discreteAxisDescriptorClass = DiscreteAxisDescriptor axisLabelDescriptorClass", "'subs'] # what do we need here def __init__(self, *,", "value) class AxisLabelDescriptor(SimpleDescriptor): \"\"\"Container for axis label data. Analogue of", "labelElement.get(\"elidable\") == \"true\" else False olderSibling = True if labelElement.get(\"oldersibling\")", "= BaseDocReader if writerClass is not None: self.writerClass = writerClass", "if axisDescriptor.name == name: return axisDescriptor return None def getLocationLabel(self,", "axis with the given ``name``, or ``None`` if no such", "axis.map: newOutputValue = self.normalizeLocation({axis.name: outputValue}).get(axis.name) newMap.append((inputValue, newOutputValue)) if newMap: axis.map", "path.__fspath__() self.path = path self.filename = os.path.basename(path) reader = self.readerClass(path,", "Style name of this source. Though this data can be", "have it right here. Varlib. \"\"\" self.localisedFamilyName = localisedFamilyName or", ".. seealso:: :attr:`SourceDescriptor.font` \"\"\" self.name = name \"\"\"string. Unique identifier", "axis-subsets element.\") axisSubsets = [] for axisSubset in axisSubsetsElement.iterfind(\".axis-subset\"): axisSubsets.append(self.readAxisSubset(axisSubset))", "sourceDescriptor in self.sources: if sourceDescriptor.getFullDesignLocation(self) == defaultDesignLocation: self.default = sourceDescriptor", "\"\"\" self.designLocation = designLocation if designLocation is not None else", "ET.Element('lib') libElement.append(plistlib.totree(data, indent_level=indent_level)) parentElement.append(libElement) def _writeGlyphElement(self, instanceElement, instanceObject, glyphName, data):", "source.font = opener(source.path, **kwargs) loaded[source.path] = source.font fonts.append(source.font) return fonts", "readerClass=None, writerClass=None): \"\"\"Read a designspace file from ``path`` and return", "variable font.\"\"\" class RangeAxisSubsetDescriptor(SimpleDescriptor): \"\"\"Subset of a continuous axis to", "rule.conditionSets: newConditions = [] for cond in conditions: if cond.get('minimum')", "\"-VF\" if self.path is not None: basename = os.path.splitext(os.path.basename(self.path))[0] +", "it easy to integrate this object in different contexts. The", "labelElement in self.root.findall(\".labels/label\"): unknown_attrs = set(labelElement.attrib) - xml_attrs if unknown_attrs:", "as the matching STAT format 4 label. No anisotropy. -", "= True for glyphElement in sourceElement.findall(\".glyph\"): glyphName = glyphElement.attrib.get('name') if", "new_path = '//' + new_path return new_path def posixpath_property(private_name): \"\"\"Generate", "\"\"\"List of this document's variable fonts. .. versionadded:: 5.0\"\"\" self.instances:", "a single axis location, the user should only clear that", "the conditions have minimum or maximum values, do not add", "== xValue is None: raise DesignSpaceDocumentError(f'Exactly one of uservalue=\"\" or", "- ``userLocation[axisName]``: the explicit user location along this axis. No", "an axis-subsets element.\") axisSubsets = [] for axisSubset in axisSubsetsElement.iterfind(\".axis-subset\"):", "variable fonts that can be built from the document's continuous", "axis, nothing in-between. \"\"\" def map_forward(self, value): \"\"\"Maps value from", "\"\"\"Container for location label data. Analogue of OpenType's STAT data", "axisElements: return for axisElement in axisElements: if self.documentObject.formatTuple >= (5,", "if self.documentObject.formatTuple < (5, 0): return xml_attrs = {'name', 'elidable',", "locationObject.items(): if axisName in validatedLocation: # only accept values we", "top-level location labels. The source of truth for this instance's", "None: axesElement.attrib['elidedfallbackname'] = self.documentObject.elidedFallbackName self.root.append(axesElement) for axisObject in self.documentObject.axes: self._addAxis(axisObject)", "label = doc.getLocationLabel(self.locationLabel) if label is None: raise DesignSpaceDocumentError( 'InstanceDescriptor.getLocationLabelDescriptor():", "cds def readAxes(self): # read the axes elements, including the", "localisedStyleMapStyleNameElement.attrib[XML_LANG] = code localisedStyleMapStyleNameElement.text = instanceObject.getStyleMapStyleName(code) instanceElement.append(localisedStyleMapStyleNameElement) if instanceObject.localisedStyleMapFamilyName: languageCodes", "a :class:`ValueAxisSubsetDescriptor`). \"\"\" self.lib: MutableMapping[str, Any] = lib or {}", "inputValue, outputValue in axisObject.map: mapElement = ET.Element('map') mapElement.attrib['input'] = self.intOrFloat(inputValue)", "label: AxisLabelDescriptor) -> None: labelElement = ET.Element('label') labelElement.attrib['uservalue'] = self.intOrFloat(label.userValue)", "= ET.Element('stylemapfamilyname') localisedStyleMapFamilyNameElement.attrib[XML_LANG] = code localisedStyleMapFamilyNameElement.text = instanceObject.getStyleMapFamilyName(code) instanceElement.append(localisedStyleMapFamilyNameElement) if", "writer.write() def _posixRelativePath(self, otherPath): relative = os.path.relpath(otherPath, os.path.dirname(self.path)) return posix(relative)", "the English name from :attr:`labelNames` or the :attr:`name`.\"\"\" return self.labelNames.get(\"en\")", "axis.default}).get(axis.name) # and set them in the axis.minimum axis.minimum =", "languageCode=\"en\"): \"\"\"Getter for :attr:`localisedFamilyName` .. versionadded:: 5.0 \"\"\" return self.localisedFamilyName.get(languageCode)", "user location along this axis. No anisotropy. - ``axis.default``: default", "infoElement.attrib['mute'] = \"1\" sourceElement.append(infoElement) if sourceObject.muteKerning: kerningElement = ET.Element(\"kerning\") kerningElement.attrib[\"mute\"]", "a free-floating location (format 4). All values are user values.", "= os.path.abspath(os.path.join(os.path.dirname(self.path), filename)) else: sourcePath = None sourceName = sourceElement.attrib.get('name')", "are part of the `Required Variation Alternates OpenType feature <https://docs.microsoft.com/en-us/typography/opentype/spec/features_pt#-tag-rvrn>`_.", "+ \"-VF\" if self.path is not None: basename = os.path.splitext(os.path.basename(self.path))[0]", "``calt``, use the lib key ``com.github.fonttools.varLib.featureVarsFeatureTag`` .. code:: xml <lib>", "\"{dimName}\"') if yValue is not None: if xValue is None:", "in instanceElement.findall('lib'): self.readLibElement(libElement, instanceObject) self.documentObject.instances.append(instanceObject) def readLibElement(self, libElement, instanceObject): \"\"\"Read", "masterElement in glyphElement.findall('.masters/master'): fontSourceName = masterElement.attrib.get('source') designLocation, userLocation = self.locationFromElement(masterElement)", "m['location'] = self._makeLocationElement(m.get('location')) masterElement.append(locationElement) mastersElement.append(masterElement) glyphElement.append(mastersElement) return glyphElement class BaseDocReader(LogMixin):", "for a free-floating location (format 4). All values are user", "the following situations: In each descriptor, we have to do", "`OTSpec STAT Axis value table, format 1, 2, 3 <https://docs.microsoft.com/en-us/typography/opentype/spec/stat#axis-value-table-format-1>`_", "= copyFeatures \"\"\"bool. Indicates if the feature text needs to", "name glyphElement.attrib[\"mute\"] = '1' sourceElement.append(glyphElement) if self.effectiveFormatTuple >= (5, 0):", "userMaximum \"\"\"New maximum value of the axis in the target", "the localised names.\"\"\" self.localisedStyleName[languageCode] = tostr(styleName) def getStyleName(self, languageCode=\"en\"): return", "if unknown_attrs: raise DesignSpaceDocumentError(f\"variable-font element contains unknown attributes: {', '.join(unknown_attrs)}\")", "dict(...)]) r1.conditionSets.append([dict(...), dict(...)]) r1.subs.append((\"a\", \"a.alt\")) .. code:: xml <!-- optional:", "will be used as a basename for the file. \"\"\"", "= [] \"\"\"List of this document's variable fonts. .. versionadded::", "fields. Ensures that :attr:``designLocation`` and :attr:``userLocation`` are dictionaries (possibly empty", "associated with this variable font.\"\"\" class RangeAxisSubsetDescriptor(SimpleDescriptor): \"\"\"Subset of a", "sourceObject.mutedGlyphNames: for name in sourceObject.mutedGlyphNames: glyphElement = ET.Element(\"glyph\") glyphElement.attrib[\"name\"] =", "ET.Element(\"masters\") for m in data.get('masters'): masterElement = ET.Element(\"master\") if m.get('glyphName')", "for item in self.instances: # glyph masters for this instance", "None: sourceElement.attrib['filename'] = sourceObject.filename if sourceObject.name is not None: if", "(5, 0): return xml_attrs = {'name', 'filename'} for variableFontElement in", "masterGlyphName = masterElement.attrib.get('glyphname') if masterGlyphName is None: # if we", "this source font that is loaded in memory, as a", "ET.Element, data: Any, indent_level: int) -> None: if not data:", "self def read(self): self.readAxes() self.readLabels() self.readRules() self.readVariableFonts() self.readSources() self.readInstances() self.readLib()", "masterElement.attrib.get('glyphname') if masterGlyphName is None: # if we don't read", "kerningElement = ET.Element('kerning') instanceElement.append(kerningElement) if instanceObject.info: infoElement = ET.Element('info') instanceElement.append(infoElement)", "in user space. MutatorMath + Varlib. \"\"\" self.default = default", "readAxisLabel(self, element: ET.Element): xml_attrs = {'userminimum', 'uservalue', 'usermaximum', 'name', 'elidable',", "filename. \"\"\" assert self.path is not None for descriptor in", "The axis will be and instance of :class:`DiscreteAxisDescriptor` if the", "instances=True, force=False): \"\"\"Set a descriptor filename attr from the path", "location, the user should only clear that axis, then edit", "font \"\"\"Same as :attr:`SourceDescriptor.font` .. seealso:: :attr:`SourceDescriptor.font` \"\"\" self.name =", "is stored as tuples of glyphnames, e.g. (\"a\", \"a.alt\"). -", "a bunch of attributes\"\"\" # XXX this is ugly. The", "list of lists of dict(name='aaaa', minimum=0, maximum=1000) self.conditionSets = conditionSets", "getRuleDescriptor(cls): return cls.ruleDescriptorClass() def __init__(self, documentPath, documentObject: DesignSpaceDocument): self.path =", "= [f\"{a}={repr(getattr(self, a))},\" for a in self._attrs] attrs = indent('\\n'.join(attrs),", "for item in self.sources: item.location = self.normalizeLocation(item.location) # instances for", "if not self.map: return v return piecewiseLinearMap(v, {v: k for", "for cond in conditions: if cond.get('minimum') is not None: minimum", "self.path, encoding=encoding, method='xml', xml_declaration=xml_declaration, pretty_print=pretty, ) def _getEffectiveFormatTuple(self): \"\"\"Try to", "For example, to load UFO sources using defcon: designspace =", "styleNameElement.items(): if key == XML_LANG: styleName = styleNameElement.text instanceObject.setStyleName(styleName, lang)", "v for v in value ] d[attr] = value return", "any( isinstance(axis, DiscreteAxisDescriptor) or axis.axisOrdering is not None or axis.axisLabels", "user space of all axes. This function updates the document's", "{} else: if self.designLocation is None: self.designLocation = {} if", "See: `OTSpec STAT Flags <https://docs.microsoft.com/en-us/typography/opentype/spec/stat#flags>`_ \"\"\" self.olderSibling: bool = olderSibling", "addAxis(self, axisDescriptor: Union[AxisDescriptor, DiscreteAxisDescriptor]): \"\"\"Add the given ``axisDescriptor`` to :attr:`axes`.\"\"\"", "axis still allows to bind together the full font family,", "a tag name for this axis name names = {", "+ VarLib. \"\"\" self.path = path \"\"\"string. Absolute path to", "is None: raise DesignSpaceDocumentError(\"label element must have a uservalue attribute.\")", "def readAxisLabel(self, element: ET.Element): xml_attrs = {'userminimum', 'uservalue', 'usermaximum', 'name',", "\"axis-subset\" _attrs = ('name', 'userMinimum', 'userDefault', 'userMaximum') def __init__(self, *,", "None: sourceElement.attrib['layer'] = sourceObject.layerName if sourceObject.localisedFamilyName: languageCodes = list(sourceObject.localisedFamilyName.keys()) languageCodes.sort()", "conditionset.\" ) # read the conditionsets for conditionSetElement in ruleElement.findall('.conditionset'):", "the following issues for more information: `fontTools#1371 <https://github.com/fonttools/fonttools/issues/1371#issuecomment-590214572>`__ `fontTools#2050 <https://github.com/fonttools/fonttools/issues/2050#issuecomment-678691020>`__", "fontTools.misc import plistlib from fontTools.misc.loggingTools import LogMixin from fontTools.misc.textTools import", "\"\"\" self.values: List[float] = values or [] \"\"\"List of possible", "if unknown_attrs: raise DesignSpaceDocumentError(f\"Label element contains unknown attributes: {', '.join(unknown_attrs)}\")", "\"\"\" self.default: Optional[str] = None \"\"\"Name of the default master.", "'path', 'name', 'layerName', 'location', 'copyLib', 'copyGroups', 'copyFeatures', 'muteKerning', 'muteInfo', 'mutedGlyphNames',", "Note: By default, rules are applied first, before other text", "= userMaximum \"\"\"New maximum value of the axis in the", "\"\"\"Maps value from axis mapping's input (user) to output (design).\"\"\"", "= glyphData def readLib(self): \"\"\"Read the lib element for the", "fonts or TTFont objects. The :attr:`font` attribute is shared by", "list of glyphNames with substitutions applied. - rule order matters", "be only part of the full location. See: :meth:`getFullUserLocation` \"\"\"", "self.map}) def map_backward(self, v): \"\"\"Maps value from axis mapping's output", "present, will be used to build localized names for all", "not None and not force: continue if self.path is not", "= ruleElement.attrib.get(\"name\") # read any stray conditions outside a condition", "In version 5 and above documents, there can be as", "i in self.effectiveFormatTuple) if self.documentObject.axes or self.documentObject.elidedFallbackName is not None:", "self.intOrFloat(dimensionValue[1]) else: dimElement.attrib['xvalue'] = self.intOrFloat(dimensionValue) locElement.append(dimElement) return locElement, validatedLocation def", "**as it is in the document**. The file may or", "objects that store the data in attributes. Data is added", ":: case 1. descriptor.filename == None descriptor.path == None --", "will allow these to be None, assume axis.minimum cd['minimum'] =", "axis definitions, self.log.warning(\"Location with undefined axis: \\\"%s\\\".\", dimName) continue userValue", "shared among SourceDescriptors. For example, to load UFO sources using", "of :class:`DiscreteAxisDescriptor` if the ``kwargs`` provide a ``value``, or a", "whole document was implicitly describing a variable font that covers", "getAxis(self, name): \"\"\"Return the axis with the given ``name``, or", "if you intend that value to be mapped. \"\"\" if", "ruleDescriptorClass = RuleDescriptor sourceDescriptorClass = SourceDescriptor variableFontsDescriptorClass = VariableFontDescriptor valueAxisSubsetDescriptorClass", "need to read any conditions that are outside of a", "if source.font is not None: # font already loaded fonts.append(source.font)", "instanceElement, makeGlyphs=True, makeKerning=True, makeInfo=True): filename = instanceElement.attrib.get('filename') if filename is", "that covers the whole space. In version 5 and above", "cds = [] for conditionElement in parentElement.findall('.condition'): cd = {}", "designLocation if designLocation is not None else location or {}", "filename in case the filename property is empty. VarLib. \"\"\"", "deprecated:: 5.0 Use the more explicit alias for this property", "getStyleName(self, languageCode=\"en\"): return self.localisedStyleName.get(languageCode) def setFamilyName(self, familyName, languageCode=\"en\"): self.localisedFamilyName[languageCode] =", "axis (formats 1, 2 and 3). All values are user", "lowercase. .. code:: python from fontTools.designspaceLib import DesignSpaceDocument doc =", "in user space. MutatorMath + Varlib. \"\"\" self.maximum = maximum", "return False elif not cd['minimum'] <= value <= cd['maximum']: return", "not force: continue if self.path is not None: descriptor.filename =", "instanceObject.filename is not None: instanceElement.attrib['filename'] = instanceObject.filename if instanceObject.postScriptFontName is", "in self.documentObject.rules: self._addRule(ruleObject) if self.documentObject.sources: self.root.append(ET.Element(\"sources\")) for sourceObject in self.documentObject.sources:", "'.join(unknown_attrs)}\") name = labelElement.get(\"name\") if name is None: raise DesignSpaceDocumentError(\"label", "axis data. Add more localisations? .. code:: python a1 =", "Varlib. \"\"\" self.styleMapStyleName = styleMapStyleName \"\"\"string. StyleMap stylename for this", "this location's label. Keyed by xml:lang code. \"\"\" @property def", "None else location or {} \"\"\"dict. Axis values for this", "for rule in self.rules: newConditionSets = [] for conditions in", "= True for infoElement in sourceElement.findall(\".info\"): if infoElement.attrib.get('copy') == '1':", "class AxisDescriptor(AbstractAxisDescriptor): \"\"\" Simple container for the axis data. Add", "Analogue of OpenType's STAT data for a free-floating location (format", "LocationLabelDescriptor ruleDescriptorClass = RuleDescriptor sourceDescriptorClass = SourceDescriptor variableFontsDescriptorClass = VariableFontDescriptor", "None and userMaximum is not None: return self.rangeAxisSubsetDescriptorClass( name=name, userMinimum=float(userMinimum),", "self.designLocation is None: self.designLocation = {} if axisName in self.designLocation:", "\"\"\"STAT flag ``OLDER_SIBLING_FONT_ATTRIBUTE``. See: `OTSpec STAT Flags <https://docs.microsoft.com/en-us/typography/opentype/spec/stat#flags>`_ \"\"\" self.labelNames:", "which to freeze the given axis.\"\"\" class BaseDocWriter(object): _whiteSpace =", "The file may or may not exist. MutatorMath. \"\"\" self.font", "axisDescriptor.\"\"\" return self.writerClass.getAxisDecriptor() def newSourceDescriptor(self): \"\"\"Ask the writer class to", "source, in design space coordinates. MutatorMath + Varlib. This may", "for variableFontElement in self.root.findall(\".variable-fonts/variable-font\"): unknown_attrs = set(variableFontElement.attrib) - xml_attrs if", "creating such descriptor objects, filling them with data and then", "it out, as preserved below. locationElement, sourceObject.location = self._makeLocationElement(sourceObject.location) sourceElement.append(locationElement)", "# list of lists of dict(name='aaaa', minimum=0, maximum=1000) self.conditionSets =", "value from axis mapping's input (user) to output (design).\"\"\" from", "descriptor.filename == None descriptor.path == \"~/absolute/path/there\" -- action: calculate the", "def fromfile(cls, path, readerClass=None, writerClass=None): \"\"\"Read a designspace file from", "or defcon.Font, etc.) from the SourceDescriptor.path, and sets the SourceDescriptor.font", "No anisotropy. .. versionadded:: 5.0 \"\"\" label = self.getLocationLabelDescriptor(doc) if", "piecewiseLinearMap if not self.map: return v return piecewiseLinearMap(v, {k: v", "If not specified, assume the same minimum value as the", "maximumStr = element.get(\"usermaximum\") maximum = float(maximumStr) if maximumStr is not", "def setFamilyName(self, familyName, languageCode=\"en\"): \"\"\"Setter for :attr:`localisedFamilyName` .. versionadded:: 5.0", "variableFonts.append(VariableFontDescriptor( name=f\"{basename}{axisNames}\", axisSubsets=rangeAxisSubsets + [ ValueAxisSubsetDescriptor(name=axis.name, userValue=value) for axis, value", "maximum, check for > minimum. \"\"\" for cd in conditions:", "= (34, 36.5) Args: axisName: if provided, only clear the", "a new font object loaded from the path. **kwargs: extra", "freeze other axes at a given location. .. versionadded:: 5.0", "finally the axis values minimum = self.normalizeLocation({axis.name: axis.minimum}).get(axis.name) maximum =", "2 ✅ ✅ ✅ ❌ 3 ✅ ❌ ❌ ✅", "once and shared among SourceDescriptors. For example, to load UFO", "sorted(instanceObject.glyphs.items()): glyphElement = self._writeGlyphElement(instanceElement, instanceObject, glyphName, data) glyphsElement.append(glyphElement) if instanceObject.kerning:", "case 1. descriptor.filename == None descriptor.path == None -- action:", "corresponding `XML <document-xml-structure>`_ attributes are usually all lowercase. .. code::", "appear in the sources list. \"\"\" # we load fonts", "self.name = name \"\"\"string. Name of the axis as it", "element \"{fontSourceName}\" must only have design locations (using xvalue=\"\").') masterGlyphName", "None: glyphData['masters'] = glyphSources instanceObject.glyphs[glyphName] = glyphData def readLib(self): \"\"\"Read", "_addLocationLabel(self, parentElement: ET.Element, label: LocationLabelDescriptor) -> None: labelElement = ET.Element('label')", "= [] self.rules = [] self.sources = [] self.instances =", "XML_NS + \"lang\" def posix(path): \"\"\"Normalize paths using forward slash", "✅ ✅ ❌ 3 ✅ ❌ ❌ ✅ =========== =========", "found. .. versionadded:: 5.0 \"\"\" if self.locationLabel is None: return", "to the variable font file, **as it is in the", "and add it to ``doc.sources``. \"\"\" source = self.writerClass.sourceDescriptorClass(**kwargs) self.addSource(source)", "document**. The file may or may not exist. If not", "the contents of the font.lib need to be copied to", "numbers = (int(i) for i in self.formatVersion.split(\".\")) major = next(numbers)", "the discrete axes. Before version 5, you would have needed", "value as the full axis. (default = ``math.inf``) \"\"\" class", "tuple: dimElement.attrib['xvalue'] = self.intOrFloat(dimensionValue[0]) dimElement.attrib['yvalue'] = self.intOrFloat(dimensionValue[1]) else: dimElement.attrib['xvalue'] =", "have a uservalue attribute.\") value = float(valueStr) minimumStr = element.get(\"userminimum\")", "userMaximum=math.inf): self.name: str = name \"\"\"Name of the :class:`AxisDescriptor` to", "for the filename attribute. :: case 1. descriptor.filename == None", "validatedLocation[axisName] = axisValue for dimensionName, dimensionValue in validatedLocation.items(): dimElement =", "\"\"\" self.styleMapStyleName = styleMapStyleName \"\"\"string. StyleMap stylename for this instance.", "``doc.sources``.\"\"\" self.sources.append(sourceDescriptor) def addSourceDescriptor(self, **kwargs): \"\"\"Instantiate a new :class:`SourceDescriptor` using", "in sourceElement.findall(\".features\"): if featuresElement.attrib.get('copy') == '1': sourceObject.copyFeatures = True for", "def normalizeLocation(self, location): \"\"\"Return a dict with normalized axis values.\"\"\"", "i.e. when a new location is created, this is the", "font already loaded fonts.append(source.font) continue if source.path in loaded: source.font", "and 'elidedfallbackname' in axesElement.attrib: self.documentObject.elidedFallbackName = axesElement.attrib['elidedfallbackname'] axisElements = self.root.findall(\".axes/axis\")", "= float(maximumStr) if maximumStr is not None else None linkedValueStr", "unchanged if no mapping entry is found. Note: for discrete", "axis value. No anisotropy. .. versionadded:: 5.0 \"\"\" label =", "a new font object (e.g. TTFont, or defcon.Font, etc.) from", "location's label. Keyed by ``xml:lang`` code. \"\"\" def getFormat(self) ->", "'filename'} for variableFontElement in self.root.findall(\".variable-fonts/variable-font\"): unknown_attrs = set(variableFontElement.attrib) - xml_attrs", "def map_backward(self, designLocation: AnisotropicLocationDict) -> SimpleLocationDict: \"\"\"Map a design location", "with the given ``name``, or ``None`` if no such axis", "= glyphSources instanceObject.glyphs[glyphName] = glyphData def readLib(self): \"\"\"Read the lib", "tobytes, tostr \"\"\" designSpaceDocument - read and write designspace files", "it is not loaded again. Fonts with the same path", "A dictionary of localised stylename strings, keyed by language code.", "See: `OTSpec STAT Flags <https://docs.microsoft.com/en-us/typography/opentype/spec/stat#flags>`_ \"\"\" self.labelNames: Dict[str, str] =", "instanceObject.getStyleMapStyleName(code) instanceElement.append(localisedStyleMapStyleNameElement) if instanceObject.localisedStyleMapFamilyName: languageCodes = list(instanceObject.localisedStyleMapFamilyName.keys()) languageCodes.sort() for code", "= lib or {} \"\"\"Custom data associated with this variable", "rule.subs: if name == a: swap = True break if", "self.findDefault() return self def tostring(self, encoding=None): \"\"\"Returns the designspace as", "\"expected 'first' or 'last'\" % processingValue) self.documentObject.rulesProcessingLast = processingValue ==", "substitution is stored as tuples of glyphnames, e.g. (\"a\", \"a.alt\").", "axisValue in locationObject.items(): if axisName in validatedLocation: # only accept", "olderSibling=olderSibling, labelNames=labelNames, ) self.documentObject.locationLabels.append(locationLabel) def readVariableFonts(self): if self.documentObject.formatTuple < (5,", "\"temp_master.%d\" % (sourceCount) sourceObject = self.sourceDescriptorClass() sourceObject.path = sourcePath #", "contains the same data as the other for attr in", "source file, **as it is in the document**. MutatorMath +", "= ('userMinimum', 'userValue', 'userMaximum', 'name', 'elidable', 'olderSibling', 'linkedUserValue', 'labelNames') def", "element contains unknown attributes: {', '.join(unknown_attrs)}\") name = labelElement.get(\"name\") if", "a name attribute.\") userValueStr = element.get(\"uservalue\") if userValueStr is None:", "None: skip it. \"\"\" if masters: for descriptor in self.sources:", "'uservalue', 'usermaximum', 'name', 'elidable', 'oldersibling', 'linkeduservalue'} unknown_attrs = set(element.attrib) -", "import piecewiseLinearMap if not self.map: return v return piecewiseLinearMap(v, {k:", "# support os.PathLike objects path = path.__fspath__() self.path = path", "= Dict[str, float] class InstanceDescriptor(SimpleDescriptor): \"\"\"Simple container for data related", "= self.intOrFloat(subset.userDefault) elif isinstance(subset, ValueAxisSubsetDescriptor): subsetElement.attrib['uservalue'] = self.intOrFloat(subset.userValue) subsetsElement.append(subsetElement) vfElement.append(subsetsElement)", "dict(en = 'Slant')), 'italic': ('ital', dict(en = 'Italic')), } if", "getattr(self, private_name) def setter(self, value): # The setter rewrites paths", "in self.map}) def map_backward(self, v): \"\"\"Maps value from axis mapping's", "used to build localized names for all instances. .. versionadded::", "self.log.warning(\"ValueError in readLocation userValue %3.3f\", userValue) try: xValue = dimensionElement.attrib.get('xvalue')", "self) writer.write(encoding=encoding, xml_declaration=xml_declaration) return f.getvalue() def read(self, path): \"\"\"Read a", "default axis value. No anisotropy. .. versionadded:: 5.0 \"\"\" label", "VarLib. \"\"\" self.path = path \"\"\"string. Absolute path to the", "data. Default ``None`` which means ``foreground``. \"\"\" self.familyName = familyName", "# now the rules for rule in self.rules: newConditionSets =", "maximum=\"40\" name=\"optical\"/> </conditionset> <sub name=\"cent\" with=\"cent.alt\"/> <sub name=\"dollar\" with=\"dollar.alt\"/> </rule>", "we have to do the right thing for the filename", "\"\"\"string. Family name of this instance. MutatorMath + Varlib. \"\"\"", "sourceElement.attrib['filename'] = sourceObject.filename if sourceObject.name is not None: if sourceObject.name.find(\"temp_master\")", "the given ``axisDescriptor`` to :attr:`axes`.\"\"\" self.axes.append(axisDescriptor) def addAxisDescriptor(self, **kwargs): \"\"\"Instantiate", "def _readSingleInstanceElement(self, instanceElement, makeGlyphs=True, makeKerning=True, makeInfo=True): filename = instanceElement.attrib.get('filename') if", "newNames.append(name) glyphNames = newNames newNames = [] return glyphNames AnisotropicLocationDict", "is not None: value = posix(value) setattr(self, private_name, value) return", "do not interpolate. The main difference from a continuous axis", "if instanceObject.locationLabel is None: self._addLocationElement( instanceElement, designLocation=instanceObject.designLocation, userLocation=instanceObject.userLocation ) else:", "write as is, descriptors will not have a filename attr.", "= ET.Element('stylename') localisedStyleNameElement.attrib[XML_LANG] = code localisedStyleNameElement.text = instanceObject.getStyleName(code) instanceElement.append(localisedStyleNameElement) if", "✅ ✅ ✅ ❌ 3 ✅ ❌ ❌ ✅ ===========", "coordinates. If no map values are present, it is assumed", "None descriptor.path == None -- action: write as is, descriptors", "None: locationElement, instanceObject.location = self._makeLocationElement(instanceObject.location) instanceElement.append(locationElement) if instanceObject.filename is not", "None: instanceElement.attrib['location'] = instanceObject.locationLabel if instanceObject.familyName is not None: instanceElement.attrib['familyname']", "vf: VariableFontDescriptor) -> None: vfElement = ET.Element('variable-font') vfElement.attrib['name'] = vf.name", "None: glyphElement.attrib['name'] = glyphName if data.get('note') is not None: noteElement", "OrderedDict, output XML would be non-deterministic. # https://github.com/LettError/designSpaceDocument/issues/10 loc =", "= localisedFamilyName or {} \"\"\"dict. A dictionary of localised family", "a1 = AxisDescriptor() a1.minimum = 1 a1.maximum = 1000 a1.default", "= \"temp_master.%d\" % (sourceCount) sourceObject = self.sourceDescriptorClass() sourceObject.path = sourcePath", "``rangeMaxValue`` (format 2).\"\"\" self.name: str = name \"\"\"Label for this", "to the following situations: In each descriptor, we have to", "userValueStr is None: raise DesignSpaceDocumentError( \"The axis-subset element for a", "= None for masterElement in glyphElement.findall('.masters/master'): fontSourceName = masterElement.attrib.get('source') designLocation,", "descriptor.filename == \"../something\" descriptor.path == None -- action: write as", ".. code:: xml <!-- optional: list of substitution rules -->", "axisObject.values) axisElement.attrib['default'] = self.intOrFloat(axisObject.default) if axisObject.hidden: axisElement.attrib['hidden'] = \"1\" self.root.findall('.axes')[0].append(axisElement)", "AnisotropicLocationDict: \"\"\"Get the complete design location of this source, from", "} locationLabel = self.locationLabelDescriptorClass( name=name, userLocation=userLocation, elidable=elidable, olderSibling=olderSibling, labelNames=labelNames, )", "Values are required to be ``unicode`` strings, even if they", "formatVersion as a tuple of (major, minor). .. versionadded:: 5.0", "'/' + new_path elif path.startswith(r'\\\\'): # The above transformation loses", "languageCode=\"en\"): return self.localisedStyleName.get(languageCode) def setFamilyName(self, familyName, languageCode=\"en\"): self.localisedFamilyName[languageCode] = tostr(familyName)", "(sourceCount) sourceObject = self.sourceDescriptorClass() sourceObject.path = sourcePath # absolute path", "the whole document.\"\"\" for libElement in self.root.findall(\".lib\"): self.documentObject.lib = plistlib.fromtree(libElement[0])", "a variation axis in a VF. .. code:: python a2", "an instance from an anisotropic interpolation. </note> </glyph> \"\"\" glyphData", "userLocation: SimpleLocationDict) -> Optional[LocationLabelDescriptor]: \"\"\"Return the :class:`LocationLabel` that matches the", "bool = olderSibling \"\"\"STAT flag ``OLDER_SIBLING_FONT_ATTRIBUTE``. See: `OTSpec STAT Flags", ".. versionadded:: 5.0\"\"\" self.instances: List[InstanceDescriptor] = [] \"\"\"List of this", "minimum=\"10\" maximum=\"40\" name=\"optical\"/> </conditionset> <sub name=\"cent\" with=\"cent.alt\"/> <sub name=\"dollar\" with=\"dollar.alt\"/>", "path to the instance file, calculated from the document path", "combining data from the various location fields, default axis values", "The maximum value for this axis in user space. MutatorMath", "the explicit user location and default axis values. .. versionadded::", "rules def _readConditionElements(self, parentElement, ruleName=None): cds = [] for conditionElement", "(e.g. a ``defcon.Font`` or a ``fontTools.ttFont.TTFont``). The default document reader", "DesignSpaceDocument.fromfile(\"some/path/to/my.designspace\") doc.formatVersion doc.elidedFallbackName doc.axes doc.locationLabels doc.rules doc.rulesProcessingLast doc.sources doc.variableFonts doc.instances", "master definitions for glyphs. If glyphs need special masters (to", "if element.get(\"oldersibling\") == \"true\" else False labelNames = { lang:", "return self def tostring(self, encoding=None): \"\"\"Returns the designspace as a", "path as it is stored in the document sourceObject.name =", "it is in the document**. The file may or may", "not None: instanceElement.attrib['stylemapstylename'] = instanceObject.styleMapStyleName if self.effectiveFormatTuple < (5, 0):", "None: continue if glyphElement.attrib.get('mute') == '1': sourceObject.mutedGlyphNames.append(glyphName) for kerningElement in", "is read from the disk, this is its original file", ") self.documentObject.locationLabels.append(locationLabel) def readVariableFonts(self): if self.documentObject.formatTuple < (5, 0): return", "if v == value), value) class AxisLabelDescriptor(SimpleDescriptor): \"\"\"Container for axis", "for name in glyphNames: swap = False for a, b", "designLocation, userLocation = self.locationFromElement(sourceElement) if userLocation: raise DesignSpaceDocumentError(f'<source> element \"{sourceName}\"", "not None: sourceObject.layerName = layerName for libElement in sourceElement.findall('.lib'): if", "# unicode unicodes = glyphElement.attrib.get('unicode') if unicodes is not None:", "it should be fine case 4. descriptor.filename == '../somewhere' descriptor.path", "= posixpath_property(\"_path\") def __init__( self, *, filename=None, path=None, font=None, name=None,", "read localised names for styleNameElement in instanceElement.findall('stylename'): for key, lang", "flavor = \"axis-subset\" _attrs = ('name', 'userValue') def __init__(self, *,", "the xvalue is set. .. versionadded:: 5.0 \"\"\" return {", "os.path.join(os.path.dirname(self.documentObject.path), filename) else: instancePath = None instanceObject = self.instanceDescriptorClass() instanceObject.path", "field ``axisOrdering``. See: `OTSpec STAT Axis Record <https://docs.microsoft.com/en-us/typography/opentype/spec/stat#axis-records>`_ .. versionadded::", "dimElement = ET.Element('dimension') dimElement.attrib['name'] = axis.name value = userLocation[axis.name] dimElement.attrib['uservalue']", "From a big variable font with many axes, define subsets", "= axis.map_forward(axis.default) return result def getFullUserLocation(self, doc: 'DesignSpaceDocument') -> SimpleLocationDict:", "\"{sourceName}\" must only have design locations (using xvalue=\"\").') sourceObject.location =", "1 a1.maximum = 1000 a1.default = 400 a1.name = \"weight\"", "\"\"\" locationLabel = self.writerClass.locationLabelDescriptorClass(**kwargs) self.addLocationLabel(locationLabel) return locationLabel def newDefaultLocation(self): \"\"\"Return", "instanceElement.append(localisedStyleMapStyleNameElement) if instanceObject.localisedStyleMapFamilyName: languageCodes = list(instanceObject.localisedStyleMapFamilyName.keys()) languageCodes.sort() for code in", "filename=None, axisSubsets=None, lib=None): self.name: str = name \"\"\"string, required. Name", "for this instance, in user space coordinates. MutatorMath + Varlib.", "instanceObject.info: infoElement = ET.Element('info') instanceElement.append(infoElement) self._addLib(instanceElement, instanceObject.lib, 4) self.root.findall('.instances')[0].append(instanceElement) def", "than in continuous axes: - it doesn't define the \"neutral\"", "= [] self.axisDefaults = {} self._strictAxisNames = True @classmethod def", "instanceObject.filename = filename # path as it is stored in", "element.findall('.location'): elementLocation = self.readLocationElement(locationElement) break return elementLocation def readLocationElement(self, locationElement):", "instance instanceObject.filename = filename # path as it is stored", "fontTools.varLib.models import normalizeValue new = {} for axis in self.axes:", "yvalue=\"{yValue}\"') designLoc[dimName] = (xValue, yValue) elif xValue is not None:", "ugly. The 'print' is inappropriate here, and instead of #", "\"\"\"New minimum value of the axis in the target variable", "etc.) from the SourceDescriptor.path, and sets the SourceDescriptor.font attribute. If", "to use the version specified in the document, or a", "document's :attr:`default` value. .. versionchanged:: 5.0 Allow the default source", "user should first clear all the fields, then change the", "def tagForAxisName(name): # try to find or make a tag", "\"\"\" def map_forward(self, value): \"\"\"Maps value from axis mapping's input", "and Writer objects can be subclassed as well. **Note:** Python", "_attrs = ['filename', 'path', 'name', 'locationLabel', 'designLocation', 'userLocation', 'familyName', 'styleName',", "and instances to the -1 - 0 - 1 value.", "sourceElement.append(infoElement) if sourceObject.muteKerning: kerningElement = ET.Element(\"kerning\") kerningElement.attrib[\"mute\"] = '1' sourceElement.append(kerningElement)", "a = subElement.attrib['name'] b = subElement.attrib['with'] ruleObject.subs.append((a, b)) rules.append(ruleObject) self.documentObject.rules", "values or none at all.\" ) def readSources(self): for sourceCount,", "be non-deterministic. # https://github.com/LettError/designSpaceDocument/issues/10 loc = collections.OrderedDict() for axisDescriptor in", "value in zip(discreteAxes, values)]) variableFonts.append(VariableFontDescriptor( name=f\"{basename}{axisNames}\", axisSubsets=rangeAxisSubsets + [ ValueAxisSubsetDescriptor(name=axis.name,", ":attr:`instances`.\"\"\" self.instances.append(instanceDescriptor) def addInstanceDescriptor(self, **kwargs): \"\"\"Instantiate a new :class:`InstanceDescriptor` using", "label = self.getLocationLabelDescriptor(doc) if label is not None: return doc.map_forward(label.userLocation)", "'elidable', 'olderSibling', 'linkedUserValue', 'labelNames') def __init__( self, *, name, userValue,", "ET.Element('label') labelElement.attrib['uservalue'] = self.intOrFloat(label.userValue) if label.userMinimum is not None: labelElement.attrib['userminimum']", "we do those last. \"\"\" # masters for item in", "self.writerClass.axisDescriptorClass(**kwargs) self.addAxis(axis) return axis def addRule(self, ruleDescriptor: RuleDescriptor): \"\"\"Add the", "= {} for axis in self.axes: if axis.name not in", "Keyed by xml:lang code. Values are required to be ``unicode``", "Name of this variable to identify it during the build", "already stored in the element attribute localisedStyleNameElement = ET.Element('stylename') localisedStyleNameElement.attrib[XML_LANG]", "missing coordinates are at the default location for that axis.", "getVariableFonts(self) -> List[VariableFontDescriptor]: \"\"\"Return all variable fonts defined in this", "self.sources: item.location = self.normalizeLocation(item.location) # instances for item in self.instances:", "tuples of glyphnames (\"a\", \"a.alt\") self.subs = subs or []", "if sourceObject.name is not None: if sourceObject.name.find(\"temp_master\") != 0: #", "= self.writerClass.locationLabelDescriptorClass(**kwargs) self.addLocationLabel(locationLabel) return locationLabel def newDefaultLocation(self): \"\"\"Return a dict", "has anisotropic locations, only the xvalue is used. .. versionadded::", "opener(source.path, **kwargs) loaded[source.path] = source.font fonts.append(source.font) return fonts @property def", "\"uservalue\" in element.attrib: xml_attrs = {'name', 'uservalue'} unknown_attrs = set(element.attrib)", "\"\")) cds.append(cd) return cds def readAxes(self): # read the axes", "versionadded:: 5.0\"\"\" self.rules: List[RuleDescriptor] = [] \"\"\"List of this document's", "is None: return (5, 0) numbers = (int(i) for i", "they are part of the `Required Variation Alternates OpenType feature", "axis values minimum = self.normalizeLocation({axis.name: axis.minimum}).get(axis.name) maximum = self.normalizeLocation({axis.name: axis.maximum}).get(axis.name)", "see :meth:`getFormat` .. versionadded:: 5.0 \"\"\" flavor = \"label\" _attrs", "do it ourselves for 'xml:lang' XML_NS = \"{http://www.w3.org/XML/1998/namespace}\" XML_LANG =", "axis in doc.axes } class VariableFontDescriptor(SimpleDescriptor): \"\"\"Container for variable fonts,", "not None: labelsElement.attrib['ordering'] = str(axisObject.axisOrdering) for label in axisObject.axisLabels: self._addAxisLabel(labelsElement,", "values in user space of all axes. This function updates", "needs to be copied to the instances. MutatorMath. .. deprecated::", "In order to update the location of this instance wholesale,", "self.intOrFloat(subset.userMinimum) if subset.userMaximum != math.inf: subsetElement.attrib['usermaximum'] = self.intOrFloat(subset.userMaximum) if subset.userDefault", "set them in the axis.minimum axis.minimum = minimum axis.maximum =", "for i in self.formatVersion.split(\".\")) major = next(numbers) minor = next(numbers,", "True/False def compare(self, other): # test if this object contains", "sub in ruleObject.subs: subElement = ET.Element('sub') subElement.attrib['name'] = sub[0] subElement.attrib['with']", "deep-copying attached UFO fonts or TTFont objects. The :attr:`font` attribute", "'DesignSpaceDocument') -> SimpleLocationDict: \"\"\"Get the complete user location for this", "= familyName styleName = sourceElement.attrib.get(\"stylename\") if styleName is not None:", "( encoding is not None and encoding.lower() == \"unicode\" ):", "ValueError(\"unsupported encoding: '%s'\" % encoding) writer = self.writerClass(f, self) writer.write(encoding=encoding,", "instanceElement.append(glyphsElement) glyphsElement = instanceElement.findall('.glyphs')[0] for glyphName, data in sorted(instanceObject.glyphs.items()): glyphElement", "axisElement.attrib['default'] = self.intOrFloat(axisObject.default) if axisObject.hidden: axisElement.attrib['hidden'] = \"1\" self.root.findall('.axes')[0].append(axisElement) def", "action: write as is, descriptors will not have a filename", "= documentPath self.documentObject = documentObject self.effectiveFormatTuple = self._getEffectiveFormatTuple() self.root =", "relative to the document. Can't guess why they're different, we", "%r\" % self.obj if self.obj is not None else \"\")", "else False olderSibling = True if labelElement.get(\"oldersibling\") == \"true\" else", "instance that matches this instances's :attr:`locationLabel`. Raises if the named", "__init__(self, documentPath, documentObject): self.path = documentPath self.documentObject = documentObject tree", "# skipping this dimension it seems continue value = location[axis.name]", "*, tag=None, name=None, labelNames=None, hidden=False, map=None, axisOrdering=None, axisLabels=None, ): #", "this instance. MutatorMath + Varlib. \"\"\" self.styleMapStyleName = styleMapStyleName \"\"\"string.", "\"\"\"Normalize paths using forward slash to work also on Windows.\"\"\"", "\"InstanceStyleMapStyleName\" i2.lib['com.coolDesignspaceApp.specimenText'] = 'Hamburgerwhatever' doc.addInstance(i2) \"\"\" flavor = \"instance\" _defaultLanguageCode", "\"\"\"Add the given ``ruleDescriptor`` to :attr:`rules`.\"\"\" self.rules.append(ruleDescriptor) def addRuleDescriptor(self, **kwargs):", "self.locationLabels: if label.name == name: return label return None def", "= self.intOrFloat(cond.get('maximum')) conditionsetElement.append(conditionElement) if len(conditionsetElement): ruleElement.append(conditionsetElement) for sub in ruleObject.subs:", "\"1\" self.root.findall('.axes')[0].append(axisElement) def _addAxisLabel(self, axisElement: ET.Element, label: AxisLabelDescriptor) -> None:", "__init__(self, documentPath, documentObject: DesignSpaceDocument): self.path = documentPath self.documentObject = documentObject", "element must have a name attribute.\") userMinimum = element.get(\"userminimum\") userDefault", "needs the interpolating font.info calculated. .. deprecated:: 5.0 \"\"\" self.lib", "self.root.append(ET.Element(\"sources\")) for sourceObject in self.documentObject.sources: self._addSource(sourceObject) if self.documentObject.variableFonts: variableFontsElement =", "\"\"\"bool. Indicates if the contents of the font.lib need to", "piecewiseLinearMap if isinstance(v, tuple): v = v[0] if not self.map:", "raise DesignSpaceDocumentError(\"label element must have a name attribute.\") designLocation, userLocation", "\"\"\"Write this designspace to ``path``.\"\"\" if hasattr(path, \"__fspath__\"): # support", "(4 - len(name)) else: tag = name[:4] return tag, dict(en=name)", "not specify some of the axis values, and they are", "None: instanceElement.attrib['filename'] = instanceObject.filename if instanceObject.postScriptFontName is not None: instanceElement.attrib['postscriptfontname']", "or axisObject.axisLabels: labelsElement = ET.Element('labels') if axisObject.axisOrdering is not None:", "attribute.\") userValueStr = element.get(\"uservalue\") if userValueStr is None: raise DesignSpaceDocumentError(", "name of the axis used in locations self.name = name", "not cd['minimum'] <= value <= cd['maximum']: return False return True", "a continuous axis to include in a variable font. ..", "for axis, value in zip(discreteAxes, values) ] )) return variableFonts", "self.name = name \"\"\"string. Unique identifier name of the instance,", "userLocation: dimElement = ET.Element('dimension') dimElement.attrib['name'] = axis.name value = userLocation[axis.name]", "= None \"\"\"STAT Style Attributes Header field ``elidedFallbackNameID``. See: `OTSpec", "= self.writerClass.discreteAxisDescriptorClass(**kwargs) else: axis = self.writerClass.axisDescriptorClass(**kwargs) self.addAxis(axis) return axis def", "flavor = \"axis-subset\" _attrs = ('name', 'userMinimum', 'userDefault', 'userMaximum') def", "for v in value ] d[attr] = value return d", "any of the rule's conditionsets matches the given location.\"\"\" return", "subclassed as well. **Note:** Python attribute names are usually camelCased,", "attr)) except AssertionError: print(\"failed attribute\", attr, getattr(self, attr), \"!=\", getattr(other,", "discrete axes, define 1 variable font per value on the", "def _addSource(self, sourceObject): sourceElement = ET.Element(\"source\") if sourceObject.filename is not", "code localisedStyleNameElement.text = instanceObject.getStyleName(code) instanceElement.append(localisedStyleNameElement) if instanceObject.localisedFamilyName: languageCodes = list(instanceObject.localisedFamilyName.keys())", "such axis exists.\"\"\" for axisDescriptor in self.axes: if axisDescriptor.name ==", "is set. .. versionadded:: 5.0 \"\"\" return { axis.name: axis.map_forward(userLocation.get(axis.name,", "None: instanceElement.attrib['stylemapfamilyname'] = instanceObject.styleMapFamilyName if instanceObject.styleMapStyleName is not None: instanceElement.attrib['stylemapstylename']", "addAxisDescriptor(self, **kwargs): \"\"\"Instantiate a new :class:`AxisDescriptor` using the given ``kwargs``", "languageCodes: if code == \"en\": continue localisedStyleMapStyleNameElement = ET.Element('stylemapstylename') localisedStyleMapStyleNameElement.attrib[XML_LANG]", "= self.writerClass(path, self) writer.write() def _posixRelativePath(self, otherPath): relative = os.path.relpath(otherPath,", "styleMapFamilyNameElement in instanceElement.findall('stylemapfamilyname'): for key, lang in styleMapFamilyNameElement.items(): if key", "read any conditions that are outside of a condition set.", "location of this instance, by combining data from the various", "the source .. code:: python doc = DesignSpaceDocument() s1 =", "self.sources + self.instances: if descriptor.path is not None: # case", "\"\"\"Add the given ``instanceDescriptor`` to :attr:`instances`.\"\"\" self.instances.append(instanceDescriptor) def addInstanceDescriptor(self, **kwargs):", "\"\"\"Return the axis with the given ``name``, or ``None`` if", "!= math.inf: subsetElement.attrib['usermaximum'] = self.intOrFloat(subset.userMaximum) if subset.userDefault is not None:", "provided, only clear the location for that axis. .. versionadded::", "new :class:`InstanceDescriptor` using the given ``kwargs`` and add it to", "# names for UI purposes, if this is not a", "python instance.clearLocation('Weight') instance.designLocation['Weight'] = (34, 36.5) Args: axisName: if provided,", "have design locations (using xvalue=\"\").') if designLocation is not None:", "{ axis.name: axis.map_forward(userLocation.get(axis.name, axis.default)) for axis in self.axes } def", "the document, or a sufficiently recent version to be able", "values are present, it is assumed user space is the", "\"\") class AsDictMixin(object): def asdict(self): d = {} for attr,", "\"\"\"dict. Axis values for this instance, in design space coordinates.", "= styleMapFamilyName styleMapStyleName = instanceElement.attrib.get('stylemapstylename') if styleMapStyleName is not None:", "if name is None: raise DesignSpaceDocumentError(\"variable-font element must have a", "instead. \"\"\" self.kerning = kerning \"\"\" bool. Indicates if this", "font. If not specified, assume the same minimum value as", "code:: python r1 = RuleDescriptor() r1.name = \"unique.rule.name\" r1.conditionSets.append([dict(name=\"weight\", minimum=-10,", "= {\"processing\": \"last\"} else: attributes = {} self.root.append(ET.Element(\"rules\", attributes)) for", "for location label data. Analogue of OpenType's STAT data for", "if self._strictAxisNames and not self.documentObject.axes: raise DesignSpaceDocumentError(\"No axes defined\") userLoc", "the :meth:`findDefault` \"\"\" if readerClass is not None: self.readerClass =", "validatedLocation.items(): dimElement = ET.Element('dimension') dimElement.attrib['name'] = dimensionName if type(dimensionValue) ==", "documentPath, documentObject): self.path = documentPath self.documentObject = documentObject tree =", "If not specified, the :attr:`name` will be used as a", "may be only part of the full location. See: :meth:`getFullDesignLocation`", "in self.documentObject.locationLabels: self._addLocationLabel(labelsElement, labelObject) self.root.append(labelsElement) if self.documentObject.rules: if getattr(self.documentObject, \"rulesProcessingLast\",", "a list of axis names, in the same order as", "\"\"\" @property def location(self): \"\"\"dict. Axis values for this source,", "return property(getter, setter) class DesignSpaceDocumentError(Exception): def __init__(self, msg, obj=None): self.msg", "readerClass=None, writerClass=None): self.path = None \"\"\"String, optional. When the document", "load fonts with the same source.path only once loaded =", "def addRuleDescriptor(self, **kwargs): \"\"\"Instantiate a new :class:`RuleDescriptor` using the given", "to use to encode this label. =========== ========= =========== ===========", "self._readConditionElements( conditionSetElement, ruleName, ) if conditionSet is not None: ruleObject.conditionSets.append(conditionSet)", "self.newDefaultLocation() for sourceDescriptor in self.sources: if sourceDescriptor.getFullDesignLocation(self) == defaultDesignLocation: self.default", "result[axis.name] = axis.map_forward(self.userLocation[axis.name]) else: result[axis.name] = axis.map_forward(axis.default) return result def", "the given axis.\"\"\" class BaseDocWriter(object): _whiteSpace = \" \" axisDescriptorClass", "we need here def __init__(self, *, name=None, conditionSets=None, subs=None): self.name", "keyed by language code. \"\"\" self.localisedStyleName = localisedStyleName or {}", "is None: raise DesignSpaceDocumentError(\"axis-subset element must have a name attribute.\")", "an optional list of keyword arguments, and returns a new", "together the full font family, which is useful for the", "import posixpath from io import BytesIO, StringIO from textwrap import", "clearLocation(self, axisName: Optional[str] = None): \"\"\"Clear all location-related fields. Ensures", "not None: userValue = float(userValue) except ValueError: self.log.warning(\"ValueError in readLocation", "self.documentObject.elidedFallbackName self.root.append(axesElement) for axisObject in self.documentObject.axes: self._addAxis(axisObject) if self.documentObject.locationLabels: labelsElement", "def __init__( self, *, filename=None, path=None, font=None, name=None, location=None, designLocation=None,", "None descriptor.path == \"~/absolute/path/there\" -- action: calculate the relative path", "def getAxisDecriptor(cls): return cls.axisDescriptorClass() @classmethod def getSourceDescriptor(cls): return cls.sourceDescriptorClass() @classmethod", "for label in labelElement.findall(\".label\"): axisObject.axisLabels.append(self.readAxisLabel(label)) self.documentObject.axes.append(axisObject) self.axisDefaults[axisObject.name] = axisObject.default def", "data for a single axis (formats 1, 2 and 3).", "source.path only once loaded = {} fonts = [] for", "\"condition missing required minimum or maximum in rule\" + (\"", "Axis Record <https://docs.microsoft.com/en-us/typography/opentype/spec/stat#axis-records>`_ .. versionadded:: 5.0 \"\"\" self.axisLabels: List[AxisLabelDescriptor] =", "userMaximum linkedUserValue =========== ========= =========== =========== =============== 1 ✅ ❌", "def _addAxis(self, axisObject): axisElement = ET.Element('axis') axisElement.attrib['tag'] = axisObject.tag axisElement.attrib['name']", "stray conditions outside a condition set externalConditions = self._readConditionElements( ruleElement,", "dict( tag=self.tag, name=self.name, labelNames=self.labelNames, maximum=self.maximum, minimum=self.minimum, default=self.default, hidden=self.hidden, map=self.map, axisOrdering=self.axisOrdering,", "Header field ``elidedFallbackNameID``. See: `OTSpec STAT Style Attributes Header <https://docs.microsoft.com/en-us/typography/opentype/spec/stat#style-attributes-header>`_", "locationElement, m['location'] = self._makeLocationElement(m.get('location')) masterElement.append(locationElement) mastersElement.append(masterElement) glyphElement.append(mastersElement) return glyphElement class", "formatTuple(self): \"\"\"Return the formatVersion as a tuple of (major, minor).", "of data duplication. - From a big variable font with", "= ET.parse(self.path) self.root = tree.getroot() self.documentObject.formatVersion = self.root.attrib.get(\"format\", \"3.0\") self._axes", "the field(s) for which they have data. .. code:: python", "have a uservalue attribute.\" ) userValue = float(userValueStr) return self.valueAxisSubsetDescriptorClass(name=name,", "produced by a Python script and still only exists in", "if cd.get('minimum') is None: if value > cd['maximum']: return False", "self, parentElement, *, designLocation: AnisotropicLocationDict = None, userLocation: SimpleLocationDict =", "isinstance(axis, DiscreteAxisDescriptor) or axis.axisOrdering is not None or axis.axisLabels for", "Axis value table, format 1, 2, 3 <https://docs.microsoft.com/en-us/typography/opentype/spec/stat#axis-value-table-format-1>`_ The STAT", "List[float] = values or [] \"\"\"List of possible values for", "be ``unicode`` strings, even if they only contain ASCII characters.", "self.readLib() def readRules(self): # we also need to read any", "userDefault=None, userMaximum=math.inf): self.name: str = name \"\"\"Name of the :class:`AxisDescriptor`", "self.documentObject.elidedFallbackName is not None: axesElement.attrib['elidedfallbackname'] = self.documentObject.elidedFallbackName self.root.append(axesElement) for axisObject", "XML_LANG # Note: elementtree reads the \"xml:lang\" attribute name as", "if self.linkedUserValue is not None: return 3 if self.userMinimum is", "given ``instanceDescriptor`` to :attr:`instances`.\"\"\" self.instances.append(instanceDescriptor) def addInstanceDescriptor(self, **kwargs): \"\"\"Instantiate a", "[] \"\"\"list of input / output values that can describe", "Dict[str, float] class InstanceDescriptor(SimpleDescriptor): \"\"\"Simple container for data related to", "subset.name if isinstance(subset, RangeAxisSubsetDescriptor): if subset.userMinimum != -math.inf: subsetElement.attrib['userminimum'] =", "= {'userminimum', 'uservalue', 'usermaximum', 'name', 'elidable', 'oldersibling', 'linkeduservalue'} unknown_attrs =", "\"{fontSourceName}\" must only have design locations (using xvalue=\"\").') masterGlyphName =", "+ [ ValueAxisSubsetDescriptor(name=axis.name, userValue=value) for axis, value in zip(discreteAxes, values)", "# The above transformation loses leading slashes of UNC path", "StyleMap familyname for this instance. MutatorMath + Varlib. \"\"\" self.styleMapStyleName", "self._addLocationElement(sourceElement, designLocation=sourceObject.location) else: # Pre-version 5.0 code was validating and", "name = labelElement.get(\"name\") if name is None: raise DesignSpaceDocumentError(\"label element", "# if we don't read a glyphname, use the one", "minimumStr is not None else None maximumStr = element.get(\"usermaximum\") maximum", "= True if element.get(\"oldersibling\") == \"true\" else False labelNames =", "or {} for glyphElement in instanceElement.findall('.glyphs/glyph'): self.readGlyphElement(glyphElement, instanceObject) for infoElement", "*, name, userMinimum=-math.inf, userDefault=None, userMaximum=math.inf): self.name: str = name \"\"\"Name", "Reader and Writer objects can be subclassed as well. **Note:**", "`OTSpec STAT Style Attributes Header <https://docs.microsoft.com/en-us/typography/opentype/spec/stat#style-attributes-header>`_ .. versionadded:: 5.0 \"\"\"", "instanceObject.getStyleMapFamilyName(code) instanceElement.append(localisedStyleMapFamilyNameElement) if self.effectiveFormatTuple >= (5, 0): if instanceObject.locationLabel is", "\"a.alt\") self.subs = subs or [] \"\"\"list of substitutions. -", "instanceObject.location = self._makeLocationElement(instanceObject.location) instanceElement.append(locationElement) if instanceObject.filename is not None: instanceElement.attrib['filename']", "the fields, then change the field(s) for which they have", "the input has anisotropic locations, only the xvalue is used.", "s1.styleName = \"MasterStyleNameOne\" s1.localisedFamilyName = dict(fr=\"Caractère\") s1.mutedGlyphNames.append(\"A\") s1.mutedGlyphNames.append(\"Z\") doc.addSource(s1) \"\"\"", "Family name of this instance. MutatorMath + Varlib. \"\"\" self.styleName", "self._addLocationLabel(labelsElement, labelObject) self.root.append(labelsElement) if self.documentObject.rules: if getattr(self.documentObject, \"rulesProcessingLast\", False): attributes", "= True self.documentObject.sources.append(sourceObject) def locationFromElement(self, element): \"\"\"Read a nested ``<location>``", "are locations on discrete axes. .. seealso:: :func:`splitInterpolable` .. versionadded::", "5.0 \"\"\" self.copyLib = copyLib \"\"\"bool. Indicates if the contents", "in user space of all axes. This function updates the", "versionadded:: 5.0 \"\"\" self.designLocation: AnisotropicLocationDict = designLocation if designLocation is", "None: raise DesignSpaceDocumentError(f'Exactly one of uservalue=\"\" or xvalue=\"\" must be", "not None: minimum = self.normalizeLocation({cond['name']: cond['minimum']}).get(cond['name']) else: minimum = None", "maximum in rule\" + (\" '%s'\" % ruleName if ruleName", "str = name \"\"\"Name of the :class:`AxisDescriptor` or :class:`DiscreteAxisDescriptor` to", "source = self.writerClass.sourceDescriptorClass(**kwargs) self.addSource(source) return source def addInstance(self, instanceDescriptor: InstanceDescriptor):", "STAT Axis Record <https://docs.microsoft.com/en-us/typography/opentype/spec/stat#axis-records>`_ .. versionadded:: 5.0 \"\"\" self.axisLabels: List[AxisLabelDescriptor]", "\"\"\"dict. Axis values for this source, in design space coordinates.", "in user coordinates along each axis. If an axis is", "document. Can't guess why they're different, we just choose for", "instance. MutatorMath + Varlib. \"\"\" self.styleMapFamilyName = styleMapFamilyName \"\"\"string. StyleMap", "of uservalue=\"\" or xvalue=\"\" must be provided for location dimension", "ET.Element(\"location\") for axis in self.documentObject.axes: if designLocation is not None", "Any, indent_level: int) -> None: if not data: return libElement", ":attr:`rules`. \"\"\" rule = self.writerClass.ruleDescriptorClass(**kwargs) self.addRule(rule) return rule def addVariableFont(self,", "linkedValueStr is not None else None elidable = True if", "special masters (to record the results of executed rules for", ":meth:`getFullDesignLocation` :meth:`getFullUserLocation` .. versionadded:: 5.0 \"\"\" self.userLocation: SimpleLocationDict = userLocation", "design locations (using xvalue=\"\").') if designLocation is not None: glyphData['instanceLocation']", "try: userValue = dimensionElement.attrib.get('uservalue') if userValue is not None: userValue", "masters=True, instances=True, force=False): \"\"\"Set a descriptor filename attr from the", "ET.Element('note') noteElement.text = data.get('note') glyphElement.append(noteElement) if data.get('masters') is not None:", "if sourceObject.copyInfo or sourceObject.muteInfo: infoElement = ET.Element('info') if sourceObject.copyInfo: infoElement.attrib['copy']", "defcon: designspace = DesignSpaceDocument.fromfile(\"path/to/my.designspace\") designspace.loadSourceFonts(defcon.Font) Or to load masters as", "specified, the :attr:`name` will be used as a basename for", "the feature text needs to be copied to the instances.", "name=f\"{basename}{axisNames}\", axisSubsets=rangeAxisSubsets + [ ValueAxisSubsetDescriptor(name=axis.name, userValue=value) for axis, value in", "\"\"\" result: AnisotropicLocationDict = {} for axis in doc.axes: if", "This may be only part of the full design location.", "== \"true\" else False labelNames = { lang: label_name.text or", "function updates the document's :attr:`default` value. .. versionchanged:: 5.0 Allow", "axisSubsets=rangeAxisSubsets + [ ValueAxisSubsetDescriptor(name=axis.name, userValue=value) for axis, value in zip(discreteAxes,", "masterElement.attrib['source'] = m.get('font') if m.get('location') is not None: locationElement, m['location']", "= value[0] triple = [ axis.map_forward(v) for v in (axis.minimum,", "s in axisElement.attrib[\"values\"].split(\" \")] else: axisObject = self.axisDescriptorClass() axisObject.minimum =", "in self.__dict__.items(): if attr.startswith(\"_\"): continue if hasattr(value, \"asdict\"): value =", "this rule data.\"\"\" # list of lists of dict(name='aaaa', minimum=0,", "of the calculations). MutatorMath only. \"\"\" self.muteInfo = muteInfo \"\"\"bool.", "should have the same location as the LocationLabel. .. seealso::", "Respect the data stored by others. \"\"\" self.default: Optional[str] =", "or may not exist. MutatorMath. \"\"\" self.font = font \"\"\"Same", "MutatorMath only. \"\"\" @property def location(self): \"\"\"dict. Axis values for", "for glyphName, data in sorted(instanceObject.glyphs.items()): glyphElement = self._writeGlyphElement(instanceElement, instanceObject, glyphName,", "\"\"\"Name of the :class:`AxisDescriptor` to subset.\"\"\" self.userMinimum: float = userMinimum", "axis should be hidden in user interfaces. \"\"\" self.map =", "code. \"\"\" self.localisedStyleName = localisedStyleName or {} \"\"\"dict. A dictionary", "element \"{name}\" must only have user locations (using uservalue=\"\").') elidable", "values we know validatedLocation[axisName] = axisValue for dimensionName, dimensionValue in", "document contains. \"\"\" minVersion = self.documentObject.formatTuple if ( any( isinstance(axis,", "the filename attr. The file may or may not exist.", "kerningElement.attrib.get('mute') == '1': sourceObject.muteKerning = True self.documentObject.sources.append(sourceObject) def locationFromElement(self, element):", "of UNC path mounts new_path = '//' + new_path return", "loaded[source.path] = source.font fonts.append(source.font) return fonts @property def formatTuple(self): \"\"\"Return", "class RuleDescriptor(SimpleDescriptor): \"\"\"Represents the rule descriptor element: a set of", "field, or write the contents of this field to the", "self.filename = None \"\"\"String, optional. When the document is read", "None # Convert the default location from user space to", "in design space coordinates. MutatorMath + Varlib. This may be", "axisObject.default = float(axisElement.attrib.get(\"default\")) axisObject.name = axisElement.attrib.get(\"name\") if axisElement.attrib.get('hidden', False): axisObject.hidden", "= float(valueStr) minimumStr = element.get(\"userminimum\") minimum = float(minimumStr) if minimumStr", "5.0 \"\"\" flavor = \"axis\" _attrs = ('tag', 'name', 'values',", "1 value. - we need the axis data to do", "should only clear that axis, then edit the values: ..", "designLocation or {} for glyphElement in instanceElement.findall('.glyphs/glyph'): self.readGlyphElement(glyphElement, instanceObject) for", "= designLocation[axis.name] if isinstance(value, tuple): dimElement.attrib['xvalue'] = self.intOrFloat(value[0]) dimElement.attrib['yvalue'] =", "identify it if it needs to be referenced from elsewhere", "_addLabelNames(self, parentElement, labelNames): for languageCode, labelName in sorted(labelNames.items()): languageElement =", "sourceObject.copyGroups: groupsElement = ET.Element('groups') groupsElement.attrib['copy'] = \"1\" sourceElement.append(groupsElement) if sourceObject.copyFeatures:", "well. **Note:** Python attribute names are usually camelCased, the corresponding", "self.documentObject.path is not None: instancePath = os.path.join(os.path.dirname(self.documentObject.path), filename) else: instancePath", "DesignSpaceDocumentError(\"axis-subset element must have a name attribute.\") userValueStr = element.get(\"uservalue\")", "named location, STAT field ``valueNameID``.\"\"\" self.userLocation: SimpleLocationDict = userLocation or", "= instanceObject.name if instanceObject.locationLabel is not None: instanceElement.attrib['location'] = instanceObject.locationLabel", "if sourceObject.filename is not None: sourceElement.attrib['filename'] = sourceObject.filename if sourceObject.name", "instance.clearLocation() instance.designLocation = {'Weight': (34, 36.5), 'Width': 100} instance.userLocation =", "= cls(readerClass=readerClass, writerClass=writerClass) self.read(path) return self @classmethod def fromstring(cls, string,", "this source needs to be muted. MutatorMath only. \"\"\" self.mutedGlyphNames", "_attrs = ['tag', 'name', 'maximum', 'minimum', 'default', 'map', 'axisOrdering', 'axisLabels']", "-> int: \"\"\"Determine which format of STAT Axis value to", "Pre-version 5.0 code was validating and filling in the location", "self.instances: List[InstanceDescriptor] = [] \"\"\"List of this document's instances.\"\"\" self.lib:", "font.info calculated. .. deprecated:: 5.0 \"\"\" self.lib = lib or", "self.log.warning(\"ValueError in readLocation xValue %3.3f\", xValue) try: yValue = dimensionElement.attrib.get('yvalue')", "cd['maximum'] = None cd['name'] = conditionElement.attrib.get(\"name\") # # test for", "readGlyphElement(self, glyphElement, instanceObject): \"\"\" Read the glyph element, which could", "user of ``designspaceLib`` to either load the resource identified by", "a continuous axis is that a continuous axis has a", "self.name: str = name \"\"\"Label for this axis location, STAT", "*, name, userValue): self.name: str = name \"\"\"Name of the", "self, *, tag=None, name=None, labelNames=None, minimum=None, default=None, maximum=None, hidden=False, map=None,", "axis in doc.axes: if axis.name in self.designLocation: result[axis.name] = self.designLocation[axis.name]", "`OTSpec STAT Flags <https://docs.microsoft.com/en-us/typography/opentype/spec/stat#flags>`_ \"\"\" self.labelNames: Dict[str, str] = labelNames", "and cond.get('maximum') is None: # neither is defined, don't add", "None: return self.rangeAxisSubsetDescriptorClass( name=name, userMinimum=float(userMinimum), userDefault=float(userDefault), userMaximum=float(userMaximum), ) if all(v", "if instanceObject.info: infoElement = ET.Element('info') instanceElement.append(infoElement) self._addLib(instanceElement, instanceObject.lib, 4) self.root.findall('.instances')[0].append(instanceElement)", "from the explicit user location and default axis values. ..", "{} cdMin = conditionElement.attrib.get(\"minimum\") if cdMin is not None: cd['minimum']", "name, filename=None, axisSubsets=None, lib=None): self.name: str = name \"\"\"string, required.", "or None. The default location is the set of all", "not self.map: return v return piecewiseLinearMap(v, {k: v for k,", "maximum=10), dict(...)]) r1.conditionSets.append([dict(...), dict(...)]) r1.subs.append((\"a\", \"a.alt\")) .. code:: xml <!--", "is None: raise DesignSpaceDocumentError( 'InstanceDescriptor.getLocationLabelDescriptor(): ' f'unknown location label `{self.locationLabel}`", "the location along this axis is the same as the", "no such label exists. .. versionadded:: 5.0 \"\"\" for label", "(designLocation, userLocation) \"\"\" elementLocation = (None, None) for locationElement in", "given ``axisDescriptor`` to :attr:`axes`.\"\"\" self.axes.append(axisDescriptor) def addAxisDescriptor(self, **kwargs): \"\"\"Instantiate a", "stylename strings, keyed by language code. \"\"\" self.glyphs = glyphs", "the case of Designspace documents before version 5, the whole", "This is an instance from an anisotropic interpolation. </note> </glyph>", "is created, this is the value this axis will get", "a nested ``<location>`` element inside the given ``element``. .. versionchanged::", "map=None, axisOrdering=None, axisLabels=None, ): # opentype tag for this axis", "designspace file from ``path`` and populates the fields of ``self``", "conditions in ruleObject.conditionSets: conditionsetElement = ET.Element('conditionset') for cond in conditions:", "must have a name attribute.\") userValueStr = element.get(\"uservalue\") if userValueStr", "sources.\"\"\" self.variableFonts: List[VariableFontDescriptor] = [] \"\"\"List of this document's variable", "== \"~/absolute/path/there\" -- action: calculate the relative path for filename.", "not None: locationElement, instanceObject.location = self._makeLocationElement(instanceObject.location) instanceElement.append(locationElement) if instanceObject.filename is", "element): \"\"\"Read a nested ``<location>`` element inside the given ``element``.", "\"\"\" self.name = name \"\"\"string. Optional. Unique identifier name for", "= {} for attr, value in self.__dict__.items(): if attr.startswith(\"_\"): continue", "cd = {} cdMin = conditionElement.attrib.get(\"minimum\") if cdMin is not", "self def tostring(self, encoding=None): \"\"\"Returns the designspace as a string.", "not None and 'elidedfallbackname' in axesElement.attrib: self.documentObject.elidedFallbackName = axesElement.attrib['elidedfallbackname'] axisElements", "\"\"\"List of this document's axes.\"\"\" self.locationLabels: List[LocationLabelDescriptor] = [] \"\"\"List", "\"\"\" self.variableFonts.append(variableFontDescriptor) def addVariableFontDescriptor(self, **kwargs): \"\"\"Instantiate a new :class:`VariableFontDescriptor` using", "filename attribute. :: case 1. descriptor.filename == None descriptor.path ==", "no minimum, check for < maximum. - If a condition", "when a new location is created, this is the value", "output values that can describe a warp of user space", "label.elidable: labelElement.attrib['elidable'] = \"true\" if label.olderSibling: labelElement.attrib['oldersibling'] = \"true\" self._addLabelNames(labelElement,", "def _addInstance(self, instanceObject): instanceElement = ET.Element('instance') if instanceObject.name is not", "instance = self.writerClass.instanceDescriptorClass(**kwargs) self.addInstance(instance) return instance def addAxis(self, axisDescriptor: Union[AxisDescriptor,", "axis.default ) for axis in self.axes } def findDefault(self): \"\"\"Set", "\"\"\" self.map = map or [] \"\"\"list of input /", "fontTools.misc.textTools import tobytes, tostr \"\"\" designSpaceDocument - read and write", "# test if this object contains the same data as", "for dimensionName, dimensionValue in validatedLocation.items(): dimElement = ET.Element('dimension') dimElement.attrib['name'] =", "values.\"\"\" from fontTools.varLib.models import normalizeValue new = {} for axis", "instanceElement, designLocation=instanceObject.designLocation, userLocation=instanceObject.userLocation ) else: # Pre-version 5.0 code was", "if filename is not None and self.path is not None:", "self.root.findall('.axes')[0].append(axisElement) def _addAxisLabel(self, axisElement: ET.Element, label: AxisLabelDescriptor) -> None: labelElement", "if k == value), value) def map_backward(self, value): \"\"\"Maps value", "instanceObject.localisedStyleMapStyleName: languageCodes = list(instanceObject.localisedStyleMapStyleName.keys()) languageCodes.sort() for code in languageCodes: if", "the full axis. (default = ``-math.inf``) \"\"\" self.userDefault: Optional[float] =", "is not None: sourceElement.attrib['stylename'] = sourceObject.styleName if sourceObject.layerName is not", "data.get('note') glyphElement.append(noteElement) if data.get('masters') is not None: mastersElement = ET.Element(\"masters\")", "All values are user values. See: `OTSpec STAT Axis value", "import plistlib from fontTools.misc.loggingTools import LogMixin from fontTools.misc.textTools import tobytes,", "= self.designLocation[axis.name] elif axis.name in self.userLocation: result[axis.name] = axis.map_forward(self.userLocation[axis.name]) else:", "= loaded[source.path] else: if source.path is None: raise DesignSpaceDocumentError( \"Designspace", "subset.userMaximum != math.inf: subsetElement.attrib['usermaximum'] = self.intOrFloat(subset.userMaximum) if subset.userDefault is not", "= self.normalizeLocation({cond['name']: cond['minimum']}).get(cond['name']) else: minimum = None if cond.get('maximum') is", "for code in languageCodes: if code == \"en\": continue #", "instanceObject.setStyleName(styleName, lang) for familyNameElement in instanceElement.findall('familyname'): for key, lang in", "'info', 'lib'] filename = posixpath_property(\"_filename\") path = posixpath_property(\"_path\") def __init__(", "dimElement.attrib['name'] = axis.name value = designLocation[axis.name] if isinstance(value, tuple): dimElement.attrib['xvalue']", "s1.mutedGlyphNames.append(\"A\") s1.mutedGlyphNames.append(\"Z\") doc.addSource(s1) \"\"\" flavor = \"source\" _attrs = ['filename',", "location for that axis. When the input has anisotropic locations,", "axis data to do the scaling, so we do those", "\"\"\"list. Glyphnames that need to be muted in the instances.", "DesignSpaceDocumentError(\"axis-subset element must have a name attribute.\") userMinimum = element.get(\"userminimum\")", "location. Assume that missing coordinates are at the default location", "\"\"\"number. The default value for this axis, i.e. when a", "right here. Varlib. \"\"\" self.localisedFamilyName = localisedFamilyName or {} \"\"\"dict.", "None, it is not loaded again. Fonts with the same", "of ``designspaceLib`` to either load the resource identified by ``filename``", "= self.normalizeLocation({axis.name: axis.maximum}).get(axis.name) default = self.normalizeLocation({axis.name: axis.default}).get(axis.name) # and set", "get in user space. MutatorMath + Varlib. \"\"\" def serialize(self):", "the interpolating font.info calculated. .. deprecated:: 5.0 \"\"\" self.lib =", "this instance. MutatorMath + Varlib. \"\"\" self.postScriptFontName = postScriptFontName \"\"\"string.", "not exist. If not specified, the :attr:`name` will be used", "keyed by language code. \"\"\" self.localisedStyleMapStyleName = localisedStyleMapStyleName or {}", ") ): if minVersion < (5, 0): minVersion = (5,", "not loaded again. Fonts with the same path are only", "methods give easier access to the localised names.\"\"\" self.localisedStyleName[languageCode] =", "def _addLib(self, parentElement: ET.Element, data: Any, indent_level: int) -> None:", "\"\"\" self.linkedUserValue: Optional[float] = linkedUserValue \"\"\"STAT field ``linkedValue`` (format 3).\"\"\"", "axisDescriptor.name == name: return axisDescriptor return None def getLocationLabel(self, name:", "self.log.warning(\"Location with undefined axis: \\\"%s\\\".\", dimName) continue userValue = xValue", "key ``com.github.fonttools.varLib.featureVarsFeatureTag`` .. code:: xml <lib> <dict> <key>com.github.fonttools.varLib.featureVarsFeatureTag</key> <string>calt</string> </dict>", "vfElement = ET.Element('variable-font') vfElement.attrib['name'] = vf.name if vf.filename is not", "= instanceObject.styleMapStyleName if self.effectiveFormatTuple < (5, 0): # Deprecated members", "``doc.sources``. \"\"\" source = self.writerClass.sourceDescriptorClass(**kwargs) self.addSource(source) return source def addInstance(self,", "element.get(\"oldersibling\") == \"true\" else False labelNames = { lang: label_name.text", "positional argument, the source.path, and an optional list of keyword", "Alternates OpenType feature <https://docs.microsoft.com/en-us/typography/opentype/spec/features_pt#-tag-rvrn>`_. See ref:`rules-element` § Attributes. \"\"\" def", "[] rangeAxisSubsets: List[Union[RangeAxisSubsetDescriptor, ValueAxisSubsetDescriptor]] = [] for axis in self.axes:", "['filename', 'path', 'name', 'locationLabel', 'designLocation', 'userLocation', 'familyName', 'styleName', 'postScriptFontName', 'styleMapFamilyName',", "self.normalizeLocation({axis.name: outputValue}).get(axis.name) newMap.append((inputValue, newOutputValue)) if newMap: axis.map = newMap #", "if label.userLocation == userLocation), None ) def updateFilenameFromPath(self, masters=True, instances=True,", "instanceObject.styleMapStyleName if self.effectiveFormatTuple < (5, 0): # Deprecated members as", "5.0 Use the more explicit alias for this property :attr:`designLocation`.", "raise DesignSpaceDocumentError(f'<label> element \"{name}\" must only have user locations (using", "the producing script can write here an indication of a", "'default', 'map', 'axisOrdering', 'axisLabels'] def __init__( self, *, tag=None, name=None,", "rule.conditionSets = newConditionSets def loadSourceFonts(self, opener, **kwargs): \"\"\"Ensure SourceDescriptor.font attributes", "= dimensionElement.attrib.get('uservalue') if userValue is not None: userValue = float(userValue)", "name=name, userMinimum=float(userMinimum), userDefault=float(userDefault), userMaximum=float(userMaximum), ) if all(v is None for", "): self.filename = filename \"\"\"string. A relative path to the", "this source, in design space coordinates. MutatorMath + Varlib. This", "source to not specify some of the axis values, and", "\"1\" if sourceObject.muteInfo: infoElement.attrib['mute'] = \"1\" sourceElement.append(infoElement) if sourceObject.muteKerning: kerningElement", "return dict( tag=self.tag, name=self.name, labelNames=self.labelNames, maximum=self.maximum, minimum=self.minimum, default=self.default, hidden=self.hidden, map=self.map,", "source.localisedFamilyName for source in self.documentObject.sources ) or self.documentObject.variableFonts or any(", "5.0 \"\"\" self.locationLabel = None if axisName is None: self.designLocation", "check for > minimum. \"\"\" for cd in conditions: value", "= self.locationLabelDescriptorClass( name=name, userLocation=userLocation, elidable=elidable, olderSibling=olderSibling, labelNames=labelNames, ) self.documentObject.locationLabels.append(locationLabel) def", "first not-None field in this list: - ``locationLabel``: the location", "self = cls(readerClass=readerClass, writerClass=writerClass) self.read(path) return self @classmethod def fromstring(cls,", "= instanceElement.attrib.get('filename') if filename is not None and self.documentObject.path is", "cd['minimum'] = float(cdMin) else: # will allow these to be", "the complete design location of this source, from its :attr:`designLocation`", "name \"\"\"Label for this named location, STAT field ``valueNameID``.\"\"\" self.userLocation:", "else: raise ValueError(\"unsupported encoding: '%s'\" % encoding) writer = self.writerClass(f,", "= styleMapFamilyNameElement.text instanceObject.setStyleMapFamilyName(styleMapFamilyName, lang) designLocation, userLocation = self.locationFromElement(instanceElement) locationLabel =", "label is None: raise DesignSpaceDocumentError( 'InstanceDescriptor.getLocationLabelDescriptor(): ' f'unknown location label", "or a :class:`AxisDescriptor` otherwise. \"\"\" if \"values\" in kwargs: axis", "the data in attributes. Data is added to the document", ".. versionadded:: 5.0 \"\"\" self.locationLabel = None if axisName is", "no map values are present, it is assumed user space", "a single axis (formats 1, 2 and 3). All values", "a2.axisLabels = [ AxisLabelDescriptor(name=\"Roman\", userValue=0, elidable=True) ] doc.addAxis(a2) .. versionadded::", "axis.maximum cd['maximum'] = None cd['name'] = conditionElement.attrib.get(\"name\") # # test", "\"\"\" flavor = \"axis-subset\" _attrs = ('name', 'userMinimum', 'userDefault', 'userMaximum')", "= name + \"*\" * (4 - len(name)) else: tag", "it is used in the location dicts. MutatorMath + Varlib.", "unknown attributes: {', '.join(unknown_attrs)}\") name = labelElement.get(\"name\") if name is", "self.root.findall(\".labels/label\"): unknown_attrs = set(labelElement.attrib) - xml_attrs if unknown_attrs: raise DesignSpaceDocumentError(f\"Label", "labelElement.attrib['usermaximum'] = self.intOrFloat(label.userMaximum) labelElement.attrib['name'] = label.name if label.elidable: labelElement.attrib['elidable'] =", "axis used in locations self.name = name \"\"\"string. Name of", "thing for the filename attribute. :: case 1. descriptor.filename ==", "extracted from the font, it can be efficient to have", "if infoElement.attrib.get('copy') == '1': sourceObject.copyInfo = True if infoElement.attrib.get('mute') ==", "path, readerClass=None, writerClass=None): \"\"\"Read a designspace file from ``path`` and", "\"\"\"Add the given ``locationLabelDescriptor`` to :attr:`locationLabels`. .. versionadded:: 5.0 \"\"\"", "or any( source.localisedFamilyName for source in self.documentObject.sources ) or self.documentObject.variableFonts", "not None: instanceObject.name = name familyname = instanceElement.attrib.get('familyname') if familyname", "it to :attr:`instances`. \"\"\" instance = self.writerClass.instanceDescriptorClass(**kwargs) self.addInstance(instance) return instance", "obj def __str__(self): return str(self.msg) + ( \": %r\" %", "def readLocationElement(self, locationElement): \"\"\"Read a ``<location>`` element. .. versionchanged:: 5.0", "writer = self.writerClass(path, self) writer.write() def _posixRelativePath(self, otherPath): relative =", "glyphName, data in sorted(instanceObject.glyphs.items()): glyphElement = self._writeGlyphElement(instanceElement, instanceObject, glyphName, data)", "sourceObject.copyInfo or sourceObject.muteInfo: infoElement = ET.Element('info') if sourceObject.copyInfo: infoElement.attrib['copy'] =", "user locations (using uservalue=\"\").') elidable = True if labelElement.get(\"elidable\") ==", "= values or [] \"\"\"List of possible values for this", "Relative path to the instance file, **as it is in", "import piecewiseLinearMap if isinstance(v, tuple): v = v[0] if not", ".. seealso:: :func:`splitInterpolable` .. versionadded:: 5.0 \"\"\" if self.variableFonts: return", "path, calculated from filename.\"\"\" self.font = font \"\"\"Any Python object.", "5.0 \"\"\" self.userLocation: SimpleLocationDict = userLocation or {} \"\"\"dict. Axis", "uservalue attribute.\") value = float(valueStr) minimumStr = element.get(\"userminimum\") minimum =", "\"true\" else False olderSibling = True if labelElement.get(\"oldersibling\") == \"true\"", "input (user).\"\"\" from fontTools.varLib.models import piecewiseLinearMap if isinstance(v, tuple): v", "[ ValueAxisSubsetDescriptor(name=axis.name, userValue=value) for axis, value in zip(discreteAxes, values) ]", "for labelNameElement in axisElement.findall('labelname'): # Note: elementtree reads the \"xml:lang\"", "userMaximum is not None: return self.rangeAxisSubsetDescriptorClass( name=name, userMinimum=float(userMinimum), userDefault=float(userDefault), userMaximum=float(userMaximum),", "userDefault = element.get(\"userdefault\") userMaximum = element.get(\"usermaximum\") if userMinimum is not", "styleMapFamilyNameElement.items(): if key == XML_LANG: styleMapFamilyName = styleMapFamilyNameElement.text instanceObject.setStyleMapFamilyName(styleMapFamilyName, lang)", "\"\"\"string. Unique name for this rule. Can be used to", "not None: instanceObject.styleName = stylename postScriptFontName = instanceElement.attrib.get('postscriptfontname') if postScriptFontName", "self._posixRelativePath(descriptor.path) if instances: for descriptor in self.instances: if descriptor.filename is", "= subset.name if isinstance(subset, RangeAxisSubsetDescriptor): if subset.userMinimum != -math.inf: subsetElement.attrib['userminimum']", "r1.name = \"unique.rule.name\" r1.conditionSets.append([dict(name=\"weight\", minimum=-10, maximum=10), dict(...)]) r1.conditionSets.append([dict(...), dict(...)]) r1.subs.append((\"a\",", "= location or {} def setStyleName(self, styleName, languageCode=\"en\"): \"\"\"These methods", "(\" '%s'\" % ruleName if ruleName is not None else", "= { 'weight': ('wght', dict(en = 'Weight')), 'width': ('wdth', dict(en", "'styleName', 'postScriptFontName', 'styleMapFamilyName', 'styleMapStyleName', 'localisedFamilyName', 'localisedStyleName', 'localisedStyleMapFamilyName', 'localisedStyleMapStyleName', 'glyphs', 'kerning',", "map=map, axisOrdering=axisOrdering, axisLabels=axisLabels, ) self.default: float = default \"\"\"The default", "or [] \"\"\"Axis subsets to include in this variable font.", "return xml_attrs = {'name', 'elidable', 'oldersibling'} for labelElement in self.root.findall(\".labels/label\"):", "externalConditions: ruleObject.conditionSets.append(externalConditions) self.log.info( \"Found stray rule conditions outside a conditionset.", "See :meth:`getFullDesignLocation()` .. versionadded:: 5.0 \"\"\" self.layerName = layerName \"\"\"string.", "= userMaximum \"\"\"STAT field ``rangeMaxValue`` (format 2).\"\"\" self.name: str =", "copyInfo=False, copyGroups=False, copyFeatures=False, muteKerning=False, muteInfo=False, mutedGlyphNames=None, ): self.filename = filename", "None) for locationElement in element.findall('.location'): elementLocation = self.readLocationElement(locationElement) break return", "+ (\" '%s'\" % ruleName if ruleName is not None", ") if all(v is None for v in (userMinimum, userDefault,", "all lowercase. .. code:: python from fontTools.designspaceLib import DesignSpaceDocument doc", "documentObject) self.path = None return self def read(self): self.readAxes() self.readLabels()", "font that covers the whole space. In version 5 and", "notation to identify your own data. Respect the data stored", "location. See: :meth:`getFullUserLocation` \"\"\" self.elidable: bool = elidable \"\"\"STAT flag", "conditionSet is not None: ruleObject.conditionSets.append(conditionSet) for subElement in ruleElement.findall('.sub'): a", "in the document sourceObject.name = sourceName familyName = sourceElement.attrib.get(\"familyname\") if", "to be ``unicode`` strings, even if they only contain ASCII", "mutedGlyphNames=None, ): self.filename = filename \"\"\"string. A relative path to", "instanceElement.attrib['postscriptfontname'] = instanceObject.postScriptFontName if instanceObject.styleMapFamilyName is not None: instanceElement.attrib['stylemapfamilyname'] =", "DesignSpaceDocumentError(\"unicode values %s are not integers\" % unicodes) for noteElement", "a continuous axis has a ``minimum`` and ``maximum``, while a", "_readConditionElements(self, parentElement, ruleName=None): cds = [] for conditionElement in parentElement.findall('.condition'):", "version 5, the whole document was implicitly describing a variable", "font objects in the order they appear in the sources", "try: for source in self.sources: source.font = None res =", "self.discreteAxisDescriptorClass() axisObject.values = [float(s) for s in axisElement.attrib[\"values\"].split(\" \")] else:", "this document's rules.\"\"\" self.rulesProcessingLast: bool = False \"\"\"This flag indicates", "\"\"\"Value in user coordinates at which to freeze the given", "RuleDescriptor): \"\"\"Add the given ``ruleDescriptor`` to :attr:`rules`.\"\"\" self.rules.append(ruleDescriptor) def addRuleDescriptor(self,", "location and default axis values. .. versionadded:: 5.0 \"\"\" return", "any( source.localisedFamilyName for source in self.documentObject.sources ) or self.documentObject.variableFonts or", "muteKerning=False, muteInfo=False, mutedGlyphNames=None, ): self.filename = filename \"\"\"string. A relative", "None: maximum = self.normalizeLocation({cond['name']: cond['maximum']}).get(cond['name']) else: maximum = None newConditions.append(dict(name=cond['name'],", "'Italic')), } if name.lower() in names: return names[name.lower()] if len(name)", "readLocation yValue %3.3f\", yValue) if userValue is None == xValue", "'localisedStyleMapStyleName', 'glyphs', 'kerning', 'info', 'lib'] filename = posixpath_property(\"_filename\") path =", "subset in vf.axisSubsets: subsetElement = ET.Element('axis-subset') subsetElement.attrib['name'] = subset.name if", "doc: 'DesignSpaceDocument') -> SimpleLocationDict: \"\"\"Get the complete user location for", "in axisObject.map: mapElement = ET.Element('map') mapElement.attrib['input'] = self.intOrFloat(inputValue) mapElement.attrib['output'] =", "self.userLocation = {} if axisName in self.userLocation: del self.userLocation[axisName] def", "axisName: if provided, only clear the location for that axis.", "@classmethod def fromstring(cls, string, readerClass=None, writerClass=None): self = cls(readerClass=readerClass, writerClass=writerClass)", "conditionElement.attrib.get(\"name\") # # test for things if cd.get('minimum') is None", "= variableFontElement.find(\".axis-subsets\") if axisSubsetsElement is None: raise DesignSpaceDocumentError(\"variable-font element must", "this source. MutatorMath + Varlib. \"\"\" self.designLocation = designLocation if", "instance from an anisotropic interpolation. </note> </glyph> \"\"\" glyphData =", "-- action: write as is, descriptors will not have a", "variableFont def addLocationLabel(self, locationLabelDescriptor: LocationLabelDescriptor): \"\"\"Add the given ``locationLabelDescriptor`` to", ">= (5, 0): self._addLocationElement(sourceElement, designLocation=sourceObject.location) else: # Pre-version 5.0 code", "or continuous axis to use in a variable font. ..", "userValue = dimensionElement.attrib.get('uservalue') if userValue is not None: userValue =", "newDefaultLocation(self): \"\"\"Return a dict with the default location in design", "None: sourceElement.attrib['stylename'] = sourceObject.styleName if sourceObject.layerName is not None: sourceElement.attrib['layer']", "list): value = [ v.asdict() if hasattr(v, \"asdict\") else v", "\"\"\" self.lib: MutableMapping[str, Any] = lib or {} \"\"\"Custom data", "``values``. Example: an Italic axis with 2 stops, Roman and", "tag=None, name=None, labelNames=None, hidden=False, map=None, axisOrdering=None, axisLabels=None, ): # opentype", "that axis (same as a :class:`ValueAxisSubsetDescriptor`). \"\"\" self.lib: MutableMapping[str, Any]", "a temporary source name sourceName = \"temp_master.%d\" % (sourceCount) sourceObject", "axisNames = \"\".join([f\"-{axis.tag}{value}\" for axis, value in zip(discreteAxes, values)]) variableFonts.append(VariableFontDescriptor(", "ruleElement.append(conditionsetElement) for sub in ruleObject.subs: subElement = ET.Element('sub') subElement.attrib['name'] =", "self.documentObject.axes or self.documentObject.elidedFallbackName is not None: axesElement = ET.Element(\"axes\") if", "int) -> None: if not data: return libElement = ET.Element('lib')", "the axis.minimum axis.minimum = minimum axis.maximum = maximum axis.default =", "label data. Analogue of OpenType's STAT data for a single", "from axis mapping's output (design) to input (user).\"\"\" from fontTools.varLib.models", "True break if swap: newNames.append(b) else: newNames.append(name) glyphNames = newNames", "locationElement.\"\"\" locElement = ET.Element(\"location\") if name is not None: locElement.attrib['name']", "the full location. See: :meth:`getFullUserLocation` \"\"\" self.elidable: bool = elidable", "private_name, value) return property(getter, setter) class DesignSpaceDocumentError(Exception): def __init__(self, msg,", "enumerate(self.root.findall(\".sources/source\")): filename = sourceElement.attrib.get('filename') if filename is not None and", "object can read and write ``.designspace`` data. It imports the", "is None: if cd['minimum'] > value: return False elif not", "self.styleName = styleName \"\"\"string. Style name of this instance. MutatorMath", "=========== ========= =========== =========== =============== STAT Format userValue userMinimum userMaximum", "axis.axisLabels for axis in self.documentObject.axes ) or self.documentObject.locationLabels or any(", "label exists. .. versionadded:: 5.0 \"\"\" return next( (label for", "for each axis independently by taking the first not-None field", "need here def __init__(self, *, name=None, conditionSets=None, subs=None): self.name =", "= 'Optical Size')), 'slant': ('slnt', dict(en = 'Slant')), 'italic': ('ital',", "posixpath_property(\"_path\") def __init__( self, *, filename=None, path=None, font=None, name=None, location=None,", "make us a new axisDescriptor.\"\"\" return self.writerClass.getAxisDecriptor() def newSourceDescriptor(self): \"\"\"Ask", "provide a ``value``, or a :class:`AxisDescriptor` otherwise. \"\"\" if \"values\"", "this is the value this axis will get in user", "versionadded:: 5.0 \"\"\" return { axis.name: ( axis.map_backward(designLocation[axis.name]) if axis.name", "return names[name.lower()] if len(name) < 4: tag = name +", "to include in a variable font. .. versionadded:: 5.0 \"\"\"", "[] for rule in rules: if evaluateRule(rule, location): for name", "defcon.Font(\"master.ufo1\") s1.location = dict(weight=0) s1.familyName = \"MasterFamilyName\" s1.styleName = \"MasterStyleNameOne\"", ":attr:`SourceDescriptor.font` \"\"\" self.name = name \"\"\"string. Unique identifier name of", "vf.axisSubsets: subsetsElement = ET.Element('axis-subsets') for subset in vf.axisSubsets: subsetElement =", "= newMap # finally the axis values minimum = self.normalizeLocation({axis.name:", "versionadded:: 5.0 \"\"\" for label in self.locationLabels: if label.name ==", "newConditionSets = [] for conditions in rule.conditionSets: newConditions = []", "self.locationLabel = locationLabel \"\"\"Name of a :class:`LocationLabelDescriptor`. If provided, the", "we need to identify and respond to the following situations:", "def newSourceDescriptor(self): \"\"\"Ask the writer class to make us a", "the matching STAT format 4 label. No anisotropy. - ``designLocation[axisName]``:", "document**. The file may or may not exist. MutatorMath +", "'styleMapStyleName', 'localisedFamilyName', 'localisedStyleName', 'localisedStyleMapFamilyName', 'localisedStyleMapStyleName', 'glyphs', 'kerning', 'info', 'lib'] filename", "data and then adding them to the document. This makes", "Python object. Optional. Points to a representation of this source", "axisElement.attrib['tag'] = axisObject.tag axisElement.attrib['name'] = axisObject.name self._addLabelNames(axisElement, axisObject.labelNames) if axisObject.map:", "one of the location=\"...\" attribute or the nested location element')", "None: try: unicodes = [int(u, 16) for u in unicodes.split(\"", "do the right thing for the filename attribute. :: case", "above documents, there can be as many variable fonts as", "font=None, name=None, location=None, designLocation=None, layerName=None, familyName=None, styleName=None, localisedFamilyName=None, copyLib=False, copyInfo=False,", "axis.name in userLocation: dimElement = ET.Element('dimension') dimElement.attrib['name'] = axis.name value", "if axisElement.attrib.get('hidden', False): axisObject.hidden = True axisObject.tag = axisElement.attrib.get(\"tag\") for", "is None: glyphSources = [] glyphSources.append(d) if glyphSources is not", "\"\"\" self.copyFeatures = copyFeatures \"\"\"bool. Indicates if the feature text", "value. .. versionchanged:: 5.0 Allow the default source to not", "self.axisLabelDescriptorClass( name=name, userValue=value, userMinimum=minimum, userMaximum=maximum, elidable=elidable, olderSibling=olderSibling, linkedUserValue=linkedValue, labelNames=labelNames, )", "name, userValue, userMinimum=None, userMaximum=None, elidable=False, olderSibling=False, linkedUserValue=None, labelNames=None, ): self.userMinimum:", "len(locElement) > 0: parentElement.append(locElement) def _addInstance(self, instanceObject): instanceElement = ET.Element('instance')", "(using uservalue=\"\").') elidable = True if labelElement.get(\"elidable\") == \"true\" else", "- From a big variable font with many axes, define", "location dimension \"{dimName}\"') if yValue is not None: if xValue", "from fontTools.misc import etree as ET from fontTools.misc import plistlib", "= name \"\"\"Label for this axis location, STAT field ``valueNameID``.\"\"\"", "designLocation=None, layerName=None, familyName=None, styleName=None, localisedFamilyName=None, copyLib=False, copyInfo=False, copyGroups=False, copyFeatures=False, muteKerning=False,", "MutatorMath + Varlib. \"\"\" self.maximum = maximum \"\"\"number. The maximum", "in self.documentObject.axes: self._addAxis(axisObject) if self.documentObject.locationLabels: labelsElement = ET.Element(\"labels\") for labelObject", "document reader will not fill-in this attribute, and the default", "class to make us a new instanceDescriptor.\"\"\" return self.writerClass.getInstanceDescriptor() def", "None return self def read(self): self.readAxes() self.readLabels() self.readRules() self.readVariableFonts() self.readSources()", "\"\"\"Label for this named location, STAT field ``valueNameID``.\"\"\" self.userLocation: SimpleLocationDict", "for this document, as a string. E.g. \"4.0\" \"\"\" self.elidedFallbackName:", "stored in the document name = instanceElement.attrib.get(\"name\") if name is", "document contains no axis definitions, self.log.warning(\"Location with undefined axis: \\\"%s\\\".\",", "__init__( self, *, name, userLocation, elidable=False, olderSibling=False, labelNames=None, ): self.name:", "languageCode=\"en\"): self.localisedStyleMapStyleName[languageCode] = tostr(styleMapStyleName) def getStyleMapStyleName(self, languageCode=\"en\"): return self.localisedStyleMapStyleName.get(languageCode) def", "value.asdict() elif isinstance(value, list): value = [ v.asdict() if hasattr(v,", "not-None field in this list: - ``locationLabel``: the location along", "nested location element') instanceObject.locationLabel = locationLabel instanceObject.userLocation = userLocation or", "self.localisedStyleMapFamilyName = localisedStyleMapFamilyName or {} \"\"\"A dictionary of localised style", "axis values. .. versionadded:: 5.0 \"\"\" return { axis.name: self.userLocation.get(axis.name,", "MutatorMath + Varlib. \"\"\" self.styleName = styleName \"\"\"string. Style name", "instanceElement.findall(\"info\"): self.readInfoElement(infoElement, instanceObject) for libElement in instanceElement.findall('lib'): self.readLibElement(libElement, instanceObject) self.documentObject.instances.append(instanceObject)", "\"\"\"dict. When defining a non-registered axis, it will be necessary", "None: noteElement = ET.Element('note') noteElement.text = data.get('note') glyphElement.append(noteElement) if data.get('masters')", "axisElement.append(mapElement) if axisObject.axisOrdering or axisObject.axisLabels: labelsElement = ET.Element('labels') if axisObject.axisOrdering", "(5, 0) numbers = (int(i) for i in self.formatVersion.split(\".\")) major", "fonts = [source.font for source in self.sources] try: for source", "continue userValue = xValue = yValue = None try: userValue", "return piecewiseLinearMap(v, {k: v for k, v in self.map}) def", "= readerClass else: self.readerClass = BaseDocReader if writerClass is not", "= axisObject.tag axisElement.attrib['name'] = axisObject.name self._addLabelNames(axisElement, axisObject.labelNames) if axisObject.map: for", "= layerName \"\"\"string. The name of the layer in the", "variable font. If not specified, assume the same minimum value", "alias for this property :attr:`designLocation`. \"\"\" return self.designLocation @location.setter def", ":attr:``userLocation`` are dictionaries (possibly empty if clearing everything). In order", "# The above transformation loses absolute paths new_path = '/'", "Axis values for this instance, in user space coordinates. MutatorMath", "the axis data. Add more localisations? .. code:: python a1", "force=False): \"\"\"Set a descriptor filename attr from the path and", "\"true\" if label.linkedUserValue is not None: labelElement.attrib['linkeduservalue'] = self.intOrFloat(label.linkedUserValue) self._addLabelNames(labelElement,", "is not None and axis.name in designLocation: dimElement = ET.Element('dimension')", "glyphData['unicodes'] = unicodes except ValueError: raise DesignSpaceDocumentError(\"unicode values %s are", "only clear that axis, then edit the values: .. code::", "this is not a standard axis, self.labelNames = labelNames or", "such descriptor objects, filling them with data and then adding", "in self.axes } def findDefault(self): \"\"\"Set and return SourceDescriptor at", "encoding = \"UTF-8\" xml_declaration = True else: raise ValueError(\"unsupported encoding:", "So we know where the file is relative to the", "\"axis\" def __init__( self, *, tag=None, name=None, labelNames=None, hidden=False, map=None,", "False olderSibling = True if labelElement.get(\"oldersibling\") == \"true\" else False", "fonts as there are locations on discrete axes. .. seealso::", "subElement = ET.Element('sub') subElement.attrib['name'] = sub[0] subElement.attrib['with'] = sub[1] ruleElement.append(subElement)", "language code. \"\"\" self.localisedStyleName = localisedStyleName or {} \"\"\"dict. A", "\"\"\" Read the glyph element, which could look like either", "or [] \"\"\"list of substitutions. - Each substitution is stored", "font.info data for this source needs to be muted. MutatorMath", "self.muteKerning = muteKerning \"\"\"bool. Indicates if the kerning data from", "identified by ``filename`` and store it in this field, or", "a = float(mapElement.attrib['input']) b = float(mapElement.attrib['output']) axisObject.map.append((a, b)) for labelNameElement", "b = float(mapElement.attrib['output']) axisObject.map.append((a, b)) for labelNameElement in axisElement.findall('labelname'): #", "'postScriptFontName', 'styleMapFamilyName', 'styleMapStyleName', 'localisedFamilyName', 'localisedStyleName', 'localisedStyleMapFamilyName', 'localisedStyleMapStyleName', 'glyphs', 'kerning', 'info',", "attributes are usually all lowercase. .. code:: python from fontTools.designspaceLib", "\"\"\" self.sources: List[SourceDescriptor] = [] \"\"\"List of this document's sources.\"\"\"", "the given ``kwargs`` and add it to :attr:`instances`. \"\"\" instance", "ET.Element('instance') if instanceObject.name is not None: instanceElement.attrib['name'] = instanceObject.name if", "\"\"\"Instantiate a new :class:`InstanceDescriptor` using the given ``kwargs`` and add", "conditionsets matches the given location.\"\"\" return any(evaluateConditions(c, location) for c", "label return None def map_forward(self, userLocation: SimpleLocationDict) -> SimpleLocationDict: \"\"\"Map", "if self.designLocation is None: self.designLocation = {} if axisName in", "typing import Any, Dict, List, MutableMapping, Optional, Tuple, Union from", "def __init__(self, msg, obj=None): self.msg = msg self.obj = obj", "instanceElement.attrib['filename'] = instanceObject.filename if instanceObject.postScriptFontName is not None: instanceElement.attrib['postscriptfontname'] =", "def locationFromElement(self, element): \"\"\"Read a nested ``<location>`` element inside the", "= glyphElement.attrib.get(\"mute\") if mute == \"1\": glyphData['mute'] = True #", "instanceElement.attrib.get('filename') if filename is not None and self.documentObject.path is not", "target variable font. If not specified, assume the same default", "posix(value) setattr(self, private_name, value) return property(getter, setter) class DesignSpaceDocumentError(Exception): def", "= rules def _readConditionElements(self, parentElement, ruleName=None): cds = [] for", "400 a1.name = \"weight\" a1.tag = \"wght\" a1.labelNames['fa-IR'] = \"قطر\"", "labelElement = ET.Element('label') labelElement.attrib['name'] = label.name if label.elidable: labelElement.attrib['elidable'] =", "filling them with data and then adding them to the", "path for filename. We're not overwriting some other value for", "the \"xml:lang\" attribute name as # '{http://www.w3.org/XML/1998/namespace}lang' } locationLabel =", "= axisElement.attrib.get(\"name\") if axisElement.attrib.get('hidden', False): axisObject.hidden = True axisObject.tag =", "this field to the disk and make ```filename`` point to", "self.__dict__.items(): if attr.startswith(\"_\"): continue if hasattr(value, \"asdict\"): value = value.asdict()", "= True # unicode unicodes = glyphElement.attrib.get('unicode') if unicodes is", "def labelForUserLocation(self, userLocation: SimpleLocationDict) -> Optional[LocationLabelDescriptor]: \"\"\"Return the :class:`LocationLabel` that", "axisElements = self.root.findall(\".axes/axis\") if not axisElements: return for axisElement in", "name=\"optical\"/> </conditionset> <sub name=\"cent\" with=\"cent.alt\"/> <sub name=\"dollar\" with=\"dollar.alt\"/> </rule> </rules>", "attrs = [f\"{a}={repr(getattr(self, a))},\" for a in self._attrs] attrs =", "<https://docs.microsoft.com/en-us/typography/opentype/spec/stat#axis-value-table-format-1>`_ The STAT format of the Axis value depends on", "See: `OTSpec STAT Style Attributes Header <https://docs.microsoft.com/en-us/typography/opentype/spec/stat#style-attributes-header>`_ .. versionadded:: 5.0", "or self.userMaximum is not None: return 2 return 1 @property", "= InstanceDescriptor() i2.path = instancePath2 i2.familyName = \"InstanceFamilyName\" i2.styleName =", "ruleObject.conditionSets: conditionsetElement = ET.Element('conditionset') for cond in conditions: if cond.get('minimum')", "): # opentype tag for this axis self.tag = tag", "from user space to design space before comparing # it", "styleMapStyleName \"\"\"string. StyleMap stylename for this instance. MutatorMath + Varlib.", "1, 2, 3. See: `OTSpec STAT Axis Value Tables <https://docs.microsoft.com/en-us/typography/opentype/spec/stat#axis-value-tables>`_", "dimElement.attrib['xvalue'] = self.intOrFloat(dimensionValue) locElement.append(dimElement) return locElement, validatedLocation def intOrFloat(self, num):", "document sourceObject.name = sourceName familyName = sourceElement.attrib.get(\"familyname\") if familyName is", "the complete user location for this instance. .. seealso:: :meth:`getFullDesignLocation`", "name = variableFontElement.get(\"name\") if name is None: raise DesignSpaceDocumentError(\"variable-font element", "instanceElement.append(localisedStyleMapFamilyNameElement) if self.effectiveFormatTuple >= (5, 0): if instanceObject.locationLabel is None:", ".. versionadded:: 5.0 \"\"\" self.copyLib = copyLib \"\"\"bool. Indicates if", "element. .. versionchanged:: 5.0 Return a tuple of (designLocation, userLocation)", "if the non-interpolating font.info needs to be copied to the", "2. descriptor.filename == \"../something\" descriptor.path == None -- action: write", "location of that axis (same as a :class:`ValueAxisSubsetDescriptor`). \"\"\" self.lib:", "xvalue=\"\").') sourceObject.location = designLocation layerName = sourceElement.attrib.get('layer') if layerName is", "= { lang: label_name.text or \"\" for label_name in labelElement.findall(\"labelname\")", "a new :class:`AxisDescriptor` using the given ``kwargs`` and add it", "the conditions matches the given location. - If a condition", "not None: locationElement, data['instanceLocation'] = self._makeLocationElement(data.get('instanceLocation')) glyphElement.append(locationElement) if glyphName is", "\"\"\"string. Unique identifier name of the instance, used to identify", "\"\"\" Simple container for the axis data. Add more localisations?", "self.documentObject.locationLabels: self._addLocationLabel(labelsElement, labelObject) self.root.append(labelsElement) if self.documentObject.rules: if getattr(self.documentObject, \"rulesProcessingLast\", False):", "return self.axisLabelDescriptorClass( name=name, userValue=value, userMinimum=minimum, userMaximum=maximum, elidable=elidable, olderSibling=olderSibling, linkedUserValue=linkedValue, labelNames=labelNames,", "label exists. .. versionadded:: 5.0 \"\"\" for label in self.locationLabels:", "doc.addAxis(a2) .. versionadded:: 5.0 \"\"\" flavor = \"axis\" _attrs =", "'DesignSpaceDocument') -> Optional[LocationLabelDescriptor]: \"\"\"Get the :class:`LocationLabelDescriptor` instance that matches this", "document is produced by a Python script and still only", "uppercase letter and use only uppercase letters or digits. \"\"\"", "stored in the document sourceObject.name = sourceName familyName = sourceElement.attrib.get(\"familyname\")", "if styleName is not None: sourceObject.styleName = styleName for familyNameElement", "self._strictAxisNames = True @classmethod def fromstring(cls, string, documentObject): f =", "= None newConditions.append(dict(name=cond['name'], minimum=minimum, maximum=maximum)) newConditionSets.append(newConditions) rule.conditionSets = newConditionSets def", "conditionSets or [] \"\"\"a list of conditionsets. - Each conditionset", "StyleMap stylename for this instance. MutatorMath + Varlib. \"\"\" self.localisedFamilyName", "return axisDescriptor return None def getLocationLabel(self, name: str) -> Optional[LocationLabelDescriptor]:", "glyphData['masters'] = glyphSources instanceObject.glyphs[glyphName] = glyphData def readLib(self): \"\"\"Read the", "else: if source.path is None: raise DesignSpaceDocumentError( \"Designspace source '%s'", "E.g. \"4.0\" \"\"\" self.elidedFallbackName: Optional[str] = None \"\"\"STAT Style Attributes", "to :attr:`variableFonts`. .. versionadded:: 5.0 \"\"\" self.variableFonts.append(variableFontDescriptor) def addVariableFontDescriptor(self, **kwargs):", "AxisLabelDescriptor(SimpleDescriptor): \"\"\"Container for axis label data. Analogue of OpenType's STAT", "== \"last\" for ruleElement in self.root.findall(\".rules/rule\"): ruleObject = self.ruleDescriptorClass() ruleName", "is False. For new projects, you probably want True. See", "axes, each value must have its mapping entry, if you", "the map first newMap = [] for inputValue, outputValue in", "to the instances. MutatorMath. .. deprecated:: 5.0 \"\"\" self.copyInfo =", "\"InstancePostscriptName\" i2.styleMapFamilyName = \"InstanceStyleMapFamilyName\" i2.styleMapStyleName = \"InstanceStyleMapStyleName\" i2.lib['com.coolDesignspaceApp.specimenText'] = 'Hamburgerwhatever'", "in enumerate(self.root.findall(\".sources/source\")): filename = sourceElement.attrib.get('filename') if filename is not None", "if self.formatVersion is None: return (5, 0) numbers = (int(i)", "locElement.append(dimElement) if len(locElement) > 0: parentElement.append(locElement) def _addInstance(self, instanceObject): instanceElement", "(user).\"\"\" from fontTools.varLib.models import piecewiseLinearMap if isinstance(v, tuple): v =", "duplication. - From a big variable font with many axes,", "full axis. (default = ``math.inf``) \"\"\" class ValueAxisSubsetDescriptor(SimpleDescriptor): \"\"\"Single value", "for the location dimension \"{dimName}\"\" with yvalue=\"{yValue}\"') designLoc[dimName] = (xValue,", "= path.__fspath__() self.path = path self.filename = os.path.basename(path) self.updatePaths() writer", "DesignSpaceDocumentError(f\"Label element contains unknown attributes: {', '.join(unknown_attrs)}\") name = labelElement.get(\"name\")", "assume the same minimum value as the full axis. (default", "**kwargs): \"\"\"Instantiate a new :class:`AxisDescriptor` using the given ``kwargs`` and", "(5, 0) and \"values\" in axisElement.attrib: axisObject = self.discreteAxisDescriptorClass() axisObject.values", "required to be ``unicode`` strings, even if they only contain", "labelNames=None, hidden=False, map=None, axisOrdering=None, axisLabels=None, ): # opentype tag for", "float(linkedValueStr) if linkedValueStr is not None else None elidable =", "self.documentObject.axes.append(axisObject) self.axisDefaults[axisObject.name] = axisObject.default def readAxisLabel(self, element: ET.Element): xml_attrs =", ".. versionadded:: 5.0 \"\"\" locationLabel = self.writerClass.locationLabelDescriptorClass(**kwargs) self.addLocationLabel(locationLabel) return locationLabel", "'elidable', 'oldersibling'} for labelElement in self.root.findall(\".labels/label\"): unknown_attrs = set(labelElement.attrib) -", "self.path = documentPath self.documentObject = documentObject self.effectiveFormatTuple = self._getEffectiveFormatTuple() self.root", "elidable = True if element.get(\"elidable\") == \"true\" else False olderSibling", "olderSibling=olderSibling, linkedUserValue=linkedValue, labelNames=labelNames, ) def readLabels(self): if self.documentObject.formatTuple < (5,", "labelNames = { lang: label_name.text or \"\" for label_name in", "names.append(axisDescriptor.name) return names def getAxis(self, name): \"\"\"Return the axis with", "cd['maximum']: return False return True def processRules(rules, location, glyphNames): \"\"\"Apply", "parentElement, *, designLocation: AnisotropicLocationDict = None, userLocation: SimpleLocationDict = None", "return self.designLocation @location.setter def location(self, location: Optional[AnisotropicLocationDict]): self.designLocation = location", "SourceDescriptor variableFontsDescriptorClass = VariableFontDescriptor valueAxisSubsetDescriptorClass = ValueAxisSubsetDescriptor rangeAxisSubsetDescriptorClass = RangeAxisSubsetDescriptor", "sourceElement.append(localisedFamilyNameElement) if sourceObject.copyLib: libElement = ET.Element('lib') libElement.attrib['copy'] = \"1\" sourceElement.append(libElement)", "or may not exist. If not specified, the :attr:`name` will", "languageCode=\"en\"): self.localisedFamilyName[languageCode] = tostr(familyName) def getFamilyName(self, languageCode=\"en\"): return self.localisedFamilyName.get(languageCode) def", "encoding=\"utf-8\")) self = cls(f, documentObject) self.path = None return self", "reader.read() if self.sources: self.findDefault() return self def tostring(self, encoding=None): \"\"\"Returns", "instance, in design space coordinates. MutatorMath + Varlib. .. seealso::", "label, by combining data from the explicit user location and", "a2.tag = \"ITAL\" a2.labelNames['fr'] = \"Italique\" a2.map = [(0, 0),", "a new :class:`SourceDescriptor` using the given ``kwargs`` and add it", "= [] for conditions in rule.conditionSets: newConditions = [] for", "filename)) else: sourcePath = None sourceName = sourceElement.attrib.get('name') if sourceName", "instance wholesale, a user should first clear all the fields,", "if axisName in self.designLocation: del self.designLocation[axisName] if self.userLocation is None:", "self.sources: self.findDefault() return self def tostring(self, encoding=None): \"\"\"Returns the designspace", "for glyphMaster in glyphData['masters']: glyphMaster['location'] = self.normalizeLocation(glyphMaster['location']) item.location = self.normalizeLocation(item.location)", "needs to be referenced from elsewhere in the document. \"\"\"", "<!-- optional: list of substitution rules --> <rules> <rule name=\"vertical.bars\">", "= xValue = yValue = None try: userValue = dimensionElement.attrib.get('uservalue')", "return self.writerClass.getInstanceDescriptor() def getAxisOrder(self): \"\"\"Return a list of axis names,", "ValueAxisSubsetDescriptor(name=axis.name, userValue=value) for axis, value in zip(discreteAxes, values) ] ))", "readerClass=None, writerClass=None): self = cls(readerClass=readerClass, writerClass=writerClass) reader = self.readerClass.fromstring(string, self)", "(source.name or \"<Unknown>\") ) source.font = opener(source.path, **kwargs) loaded[source.path] =", "maximum=\"750.000000\" name=\"weight\"/> <condition minimum=\"100\" name=\"width\"/> <condition minimum=\"10\" maximum=\"40\" name=\"optical\"/> </conditionset>", "self.locationFromElement(instanceElement) locationLabel = instanceElement.attrib.get('location') if (designLocation or userLocation) and locationLabel", "to identify your own data. Respect the data stored by", "styleMapStyleName = instanceElement.attrib.get('stylemapstylename') if styleMapStyleName is not None: instanceObject.styleMapStyleName =", "are only loaded once and shared among SourceDescriptors. For example,", "is not None: basename = os.path.splitext(self.filename)[0] + \"-VF\" if self.path", "# font already loaded fonts.append(source.font) continue if source.path in loaded:", "from the font, it can be efficient to have it", "} def findDefault(self): \"\"\"Set and return SourceDescriptor at the default", "all instances. .. versionadded:: 5.0 \"\"\" self.copyLib = copyLib \"\"\"bool.", "for axis in doc.axes: if axis.name in self.designLocation: result[axis.name] =", "instance of :class:. \"\"\" self = cls(readerClass=readerClass, writerClass=writerClass) self.read(path) return", "m.get('location') is not None: locationElement, m['location'] = self._makeLocationElement(m.get('location')) masterElement.append(locationElement) mastersElement.append(masterElement)", "def fromstring(cls, string, readerClass=None, writerClass=None): self = cls(readerClass=readerClass, writerClass=writerClass) reader", "and instead of # assert, it should simply return True/False", "('name', 'elidable', 'olderSibling', 'userLocation', 'labelNames') def __init__( self, *, name,", "feature altogether, e.g. ``calt``, use the lib key ``com.github.fonttools.varLib.featureVarsFeatureTag`` ..", "the info element.\"\"\" instanceObject.info = True def readGlyphElement(self, glyphElement, instanceObject):", "list of axis names, in the same order as defined", "update the location of this instance wholesale, a user should", "\"label\" _attrs = ('name', 'elidable', 'olderSibling', 'userLocation', 'labelNames') def __init__(", "yValue = dimensionElement.attrib.get('yvalue') if yValue is not None: yValue =", "axisDescriptor in self.axes: if axisDescriptor.name == name: return axisDescriptor return", "list(instanceObject.localisedStyleMapStyleName.keys()) languageCodes.sort() for code in languageCodes: if code == \"en\":", "is not None: instanceObject.familyName = familyname stylename = instanceElement.attrib.get('stylename') if", "5.0\"\"\" self.rules: List[RuleDescriptor] = [] \"\"\"List of this document's rules.\"\"\"", "'olderSibling', 'linkedUserValue', 'labelNames') def __init__( self, *, name, userValue, userMinimum=None,", "for cd in conditions: value = location[cd['name']] if cd.get('minimum') is", "textwrap import indent from typing import Any, Dict, List, MutableMapping,", "this source needs to be muted (i.e. not be part", "one wants to save the file somewhere. \"\"\" self.formatVersion: Optional[str]", "5.0 Allow the default source to not specify some of", "descriptor.filename = self._posixRelativePath(descriptor.path) def newAxisDescriptor(self): \"\"\"Ask the writer class to", "the whole document was implicitly describing a variable font that", "no axis definitions, self.log.warning(\"Location with undefined axis: \\\"%s\\\".\", dimName) continue", "instance. MutatorMath + Varlib. \"\"\" self.styleMapStyleName = styleMapStyleName \"\"\"string. StyleMap", "\"\"\"Axis subsets to include in this variable font. If an", "variableFontElement.get(\"name\") if name is None: raise DesignSpaceDocumentError(\"variable-font element must have", "for Axis Value Tables format 1, 2, 3. See: `OTSpec", "by language code. \"\"\" self.localisedStyleName = localisedStyleName or {} \"\"\"dict.", "instancePath2 i2.familyName = \"InstanceFamilyName\" i2.styleName = \"InstanceStyleName\" i2.name = \"instance.ufo2\"", "v in (userMinimum, userDefault, userMaximum)): return self.rangeAxisSubsetDescriptorClass(name=name) raise DesignSpaceDocumentError( \"axis-subset", "unknown attributes: {', '.join(unknown_attrs)}\") name = element.get(\"name\") if name is", "if designLocation is not None: glyphData['instanceLocation'] = designLocation glyphSources =", "font object loaded from the path. **kwargs: extra options passed", "# # test for things if cd.get('minimum') is None and", "initializes a new font object (e.g. TTFont, or defcon.Font, etc.)", "access to the localised names.\"\"\" self.localisedStyleName[languageCode] = tostr(styleName) def getStyleName(self,", "\"\"\"dict. Axis values for this instance. MutatorMath + Varlib. ..", "- ``designLocation[axisName]``: the explicit design location along this axis, possibly", "document's instances.\"\"\" self.lib: Dict = {} \"\"\"User defined, custom data", "variableFonts def deepcopyExceptFonts(self): \"\"\"Allow deep-copying a DesignSpace document without deep-copying", "writerClass=writerClass) self.read(path) return self @classmethod def fromstring(cls, string, readerClass=None, writerClass=None):", "if hasattr(value, \"asdict\"): value = value.asdict() elif isinstance(value, list): value", "axisObject.tag axisElement.attrib['name'] = axisObject.name self._addLabelNames(axisElement, axisObject.labelNames) if axisObject.map: for inputValue,", "= [] for rule in rules: if evaluateRule(rule, location): for", "a uservalue attribute.\") value = float(valueStr) minimumStr = element.get(\"userminimum\") minimum", "# case 3 and 4: filename gets updated and relativized", "below. if instanceObject.location is not None: locationElement, instanceObject.location = self._makeLocationElement(instanceObject.location)", "for :attr:`localisedFamilyName` .. versionadded:: 5.0 \"\"\" return self.localisedFamilyName.get(languageCode) def getFullDesignLocation(self,", "read the conditionsets for conditionSetElement in ruleElement.findall('.conditionset'): conditionSet = self._readConditionElements(", "= filename # path as it is stored in the", "new_path def posixpath_property(private_name): \"\"\"Generate a propery that holds a path", "axis is not mentioned, it is assumed to be at", "in location: # skipping this dimension it seems continue value", "pretty_print=pretty, ) def _getEffectiveFormatTuple(self): \"\"\"Try to use the version specified", "= subs or [] \"\"\"list of substitutions. - Each substitution", "only. \"\"\" self.mutedGlyphNames = mutedGlyphNames or [] \"\"\"list. Glyphnames that", "\"lang\" def posix(path): \"\"\"Normalize paths using forward slash to work", "= posixpath.join(*path.split(os.path.sep)) if path.startswith('/'): # The above transformation loses absolute", "userLocation or {} \"\"\"Location in user coordinates along each axis.", "if axis.name in designLocation else axis.default ) for axis in", ":meth:`getFullUserLocation` \"\"\" self.elidable: bool = elidable \"\"\"STAT flag ``ELIDABLE_AXIS_VALUE_NAME``. See:", "loaded once and shared among SourceDescriptors. For example, to load", "None): \"\"\"Clear all location-related fields. Ensures that :attr:``designLocation`` and :attr:``userLocation``", "default axis values and mappings, and top-level location labels. The", "Optional[float] = linkedUserValue \"\"\"STAT field ``linkedValue`` (format 3).\"\"\" self.labelNames: MutableMapping[str,", "if self.sources: self.findDefault() def write(self, path): \"\"\"Write this designspace to", "float(cdMax) else: # will allow these to be None, assume", "a2.name = \"Italic\" a2.tag = \"ITAL\" a2.labelNames['fr'] = \"Italique\" a2.map", "for this source needs to be muted. MutatorMath only. \"\"\"", "0) and \"values\" in axisElement.attrib: axisObject = self.discreteAxisDescriptorClass() axisObject.values =", "rule in self.rules: newConditionSets = [] for conditions in rule.conditionSets:", "by reference between the original and the copy. .. versionadded::", "map_forward(self, userLocation: SimpleLocationDict) -> SimpleLocationDict: \"\"\"Map a user location to", "= sourceElement.attrib.get(\"stylename\") if styleName is not None: sourceObject.styleName = styleName", "= designLocation or {} for glyphElement in instanceElement.findall('.glyphs/glyph'): self.readGlyphElement(glyphElement, instanceObject)", "subsetsElement = ET.Element('axis-subsets') for subset in vf.axisSubsets: subsetElement = ET.Element('axis-subset')", "a uservalue attribute.\" ) userValue = float(userValueStr) return self.valueAxisSubsetDescriptorClass(name=name, userValue=userValue)", "= None return self def read(self): self.readAxes() self.readLabels() self.readRules() self.readVariableFonts()", "usually all lowercase. .. code:: python from fontTools.designspaceLib import DesignSpaceDocument", "self._addLabelNames(labelElement, label.labelNames) self._addLocationElement(labelElement, userLocation=label.userLocation) parentElement.append(labelElement) def _addLocationElement( self, parentElement, *,", "yValue = float(yValue) except ValueError: self.log.warning(\"ValueError in readLocation yValue %3.3f\",", "i.e. the last part of its path. When the document", "is in the document**. MutatorMath + VarLib. \"\"\" self.path =", "glyphElement.attrib.get('mute') == '1': sourceObject.mutedGlyphNames.append(glyphName) for kerningElement in sourceElement.findall(\".kerning\"): if kerningElement.attrib.get('mute')", "flavor = \"source\" _attrs = ['filename', 'path', 'name', 'layerName', 'location',", "sources list. \"\"\" # we load fonts with the same", "True for infoElement in sourceElement.findall(\".info\"): if infoElement.attrib.get('copy') == '1': sourceObject.copyInfo", "list of fonts. Takes a callable which initializes a new", "_addLocationElement( self, parentElement, *, designLocation: AnisotropicLocationDict = None, userLocation: SimpleLocationDict", "self.filename = filename \"\"\"string. A relative path to the source", "for u in data.get('unicodes')]) if data.get('instanceLocation') is not None: locationElement,", "def tostring(self, encoding=None): \"\"\"Returns the designspace as a string. Default", "no such label exists. .. versionadded:: 5.0 \"\"\" return next(", "axisOrdering \"\"\"STAT table field ``axisOrdering``. See: `OTSpec STAT Axis Record", "a new list of glyphNames with substitutions applied. - rule", "= userLocation or {} \"\"\"Location in user coordinates along each", "along this axis, possibly anisotropic. - ``userLocation[axisName]``: the explicit user", "Flags <https://docs.microsoft.com/en-us/typography/opentype/spec/stat#flags>`_ \"\"\" self.labelNames: Dict[str, str] = labelNames or {}", "DesignSpaceDocumentError(f'<source> element \"{sourceName}\" must only have design locations (using xvalue=\"\").')", "writer class to make us a new sourceDescriptor.\"\"\" return self.writerClass.getSourceDescriptor()", "If an axis is not mentioned, it is assumed to", "object without name attribute\") mute = glyphElement.attrib.get(\"mute\") if mute ==", "and userDefault is not None and userMaximum is not None:", "for name in sourceObject.mutedGlyphNames: glyphElement = ET.Element(\"glyph\") glyphElement.attrib[\"name\"] = name", ":class:`AxisDescriptor` otherwise. \"\"\" if \"values\" in kwargs: axis = self.writerClass.discreteAxisDescriptorClass(**kwargs)", "filename is not None and self.path is not None: sourcePath", "+ \"lang\" def posix(path): \"\"\"Normalize paths using forward slash to", "languageCode languageElement.text = labelName parentElement.append(languageElement) def _addLocationLabel(self, parentElement: ET.Element, label:", "Containers for a bunch of attributes\"\"\" # XXX this is", "versionadded:: 5.0 \"\"\" self.copyLib = copyLib \"\"\"bool. Indicates if the", "deprecated:: 5.0 \"\"\" self.muteKerning = muteKerning \"\"\"bool. Indicates if the", "\"\"\"bool. Indicates if the feature text needs to be copied", "the instances. MutatorMath only. \"\"\" @property def location(self): \"\"\"dict. Axis", "filename \"\"\"string. Relative path to the instance file, **as it", "definitions for glyphs. If glyphs need special masters (to record", "\"\"\"STAT field ``rangeMaxValue`` (format 2).\"\"\" self.name: str = name \"\"\"Label", ":class:`LocationLabelDescriptor` instance that matches this instances's :attr:`locationLabel`. Raises if the", "`OTSpec STAT Axis value table, format 4 <https://docs.microsoft.com/en-us/typography/opentype/spec/stat#axis-value-table-format-4>`_ .. versionadded::", "s1.font = defcon.Font(\"master.ufo1\") s1.location = dict(weight=0) s1.familyName = \"MasterFamilyName\" s1.styleName", "rulesElement is not None: processingValue = rulesElement.attrib.get(\"processing\", \"first\") if processingValue", "a list of ``values``. Example: an Italic axis with 2", "self.userMinimum: Optional[float] = userMinimum \"\"\"STAT field ``rangeMinValue`` (format 2).\"\"\" self.userValue:", "name attribute.\") designLocation, userLocation = self.locationFromElement(labelElement) if designLocation: raise DesignSpaceDocumentError(f'<label>", "languageCode=\"en\"): \"\"\"Setter for :attr:`localisedFamilyName` .. versionadded:: 5.0 \"\"\" self.localisedFamilyName[languageCode] =", "location element') instanceObject.locationLabel = locationLabel instanceObject.userLocation = userLocation or {}", "tag=tag, name=name, labelNames=labelNames, hidden=hidden, map=map, axisOrdering=axisOrdering, axisLabels=axisLabels, ) self.minimum =", "is not None: minimum = self.normalizeLocation({cond['name']: cond['minimum']}).get(cond['name']) else: minimum =", "hidden=hidden, map=map, axisOrdering=axisOrdering, axisLabels=axisLabels, ) self.minimum = minimum \"\"\"number. The", "Contrary to continuous axes, only the values in this list", "[] \"\"\"list of substitutions. - Each substitution is stored as", "ruleElement.attrib['name'] = ruleObject.name for conditions in ruleObject.conditionSets: conditionsetElement = ET.Element('conditionset')", "ET.Element, vf: VariableFontDescriptor) -> None: vfElement = ET.Element('variable-font') vfElement.attrib['name'] =", "testing return dict( tag=self.tag, name=self.name, labelNames=self.labelNames, maximum=self.maximum, minimum=self.minimum, default=self.default, hidden=self.hidden,", "(xValue, yValue) elif xValue is not None: designLoc[dimName] = xValue", "glyphName, data) glyphsElement.append(glyphElement) if instanceObject.kerning: kerningElement = ET.Element('kerning') instanceElement.append(kerningElement) if", "key == XML_LANG: familyName = familyNameElement.text instanceObject.setFamilyName(familyName, lang) for styleMapStyleNameElement", "values and mappings, and top-level location labels. The source of", "flag ``OLDER_SIBLING_FONT_ATTRIBUTE``. See: `OTSpec STAT Flags <https://docs.microsoft.com/en-us/typography/opentype/spec/stat#flags>`_ \"\"\" self.labelNames: Dict[str,", "of input / output values that can describe a warp", "it doesn't define the \"neutral\" version of outlines from which", "Can be used to reference this rule data.\"\"\" # list", "ET.Element('kerning') instanceElement.append(kerningElement) if instanceObject.info: infoElement = ET.Element('info') instanceElement.append(infoElement) self._addLib(instanceElement, instanceObject.lib,", "fontTools.designspaceLib import DesignSpaceDocument doc = DesignSpaceDocument.fromfile(\"some/path/to/my.designspace\") doc.formatVersion doc.elidedFallbackName doc.axes doc.locationLabels", "= copyInfo \"\"\"bool. Indicates if the non-interpolating font.info needs to", "instanceElement.findall('stylemapstylename'): for key, lang in styleMapStyleNameElement.items(): if key == XML_LANG:", "axisElement.attrib['hidden'] = \"1\" self.root.findall('.axes')[0].append(axisElement) def _addAxisLabel(self, axisElement: ET.Element, label: AxisLabelDescriptor)", "True else: raise ValueError(\"unsupported encoding: '%s'\" % encoding) writer =", "tuple): value = value[0] triple = [ axis.map_forward(v) for v", "= None \"\"\"String, optional. When the document is read from", "self.log.info( \"Found stray rule conditions outside a conditionset. \" \"Wrapped", "libElement, instanceObject): \"\"\"Read the lib element for the given instance.\"\"\"", "self.addAxis(axis) return axis def addRule(self, ruleDescriptor: RuleDescriptor): \"\"\"Add the given", "return res finally: for source, font in zip(self.sources, fonts): source.font", "def evaluateConditions(conditions, location): \"\"\"Return True if all the conditions matches", "self.rules: newConditionSets = [] for conditions in rule.conditionSets: newConditions =", "ET.Element('groups') groupsElement.attrib['copy'] = \"1\" sourceElement.append(groupsElement) if sourceObject.copyFeatures: featuresElement = ET.Element('features')", "= True axisObject.tag = axisElement.attrib.get(\"tag\") for mapElement in axisElement.findall('map'): a", "needed 1 DesignSpace per such variable font, and a lot", "sourceObject.copyInfo = True if infoElement.attrib.get('mute') == '1': sourceObject.muteInfo = True", "sourceObject.styleName if sourceObject.layerName is not None: sourceElement.attrib['layer'] = sourceObject.layerName if", "\"xml:lang\" attribute name as # '{http://www.w3.org/XML/1998/namespace}lang' for key, lang in", ") def map_forward(self, v): \"\"\"Maps value from axis mapping's input", ">= (5, 0) and \"values\" in axisElement.attrib: axisObject = self.discreteAxisDescriptorClass()", "location or {} \"\"\"dict. Axis values for this source, in", "axisOrdering=axisOrdering, axisLabels=axisLabels, ) self.default: float = default \"\"\"The default value", "self.variableFonts: List[VariableFontDescriptor] = [] \"\"\"List of this document's variable fonts.", "<sub name=\"dollar\" with=\"dollar.alt\"/> </rule> </rules> \"\"\" _attrs = ['name', 'conditionSets',", "AnisotropicLocationDict = Dict[str, Union[float, Tuple[float, float]]] SimpleLocationDict = Dict[str, float]", "descriptor in self.instances: if descriptor.filename is not None and not", "and cd.get('maximum') is None: raise DesignSpaceDocumentError( \"condition missing required minimum", "axisSubsetsElement = variableFontElement.find(\".axis-subsets\") if axisSubsetsElement is None: raise DesignSpaceDocumentError(\"variable-font element", "StringIO() xml_declaration = False elif encoding is None or encoding", "a ``<location>`` element. .. versionchanged:: 5.0 Return a tuple of", "newConditionSets def loadSourceFonts(self, opener, **kwargs): \"\"\"Ensure SourceDescriptor.font attributes are loaded,", "glyphName d = dict(font=fontSourceName, location=designLocation, glyphName=masterGlyphName) if glyphSources is None:", "instance, by combining data from the various location fields, default", "in self.axes: if axisDescriptor.name == name: return axisDescriptor return None", "instanceObject.lib, 4) self.root.findall('.instances')[0].append(instanceElement) def _addSource(self, sourceObject): sourceElement = ET.Element(\"source\") if", "=========== =============== 1 ✅ ❌ ❌ ❌ 2 ✅ ✅", "and from other parts of the document, and also as", "same attributes. Reader and Writer objects can be subclassed as", "the non-interpolating font.info needs to be copied to the instances.", "seealso:: This may be only part of the full location.", "label.userMinimum is not None: labelElement.attrib['userminimum'] = self.intOrFloat(label.userMinimum) if label.userMaximum is", "= newNames newNames = [] return glyphNames AnisotropicLocationDict = Dict[str,", "write as is. The filename attr should not be touched.", "fonts, sub-spaces of the Designspace. Use-cases: - From a single", "= \"axis-subset\" _attrs = ('name', 'userValue') def __init__(self, *, name,", "element must have a name attribute.\") valueStr = element.get(\"uservalue\") if", "or a ``fontTools.ttFont.TTFont``). The default document reader will not fill-in", "userMaximum \"\"\"STAT field ``rangeMaxValue`` (format 2).\"\"\" self.name: str = name", "source.font = loaded[source.path] else: if source.path is None: raise DesignSpaceDocumentError(", "AssertionError: print(\"failed attribute\", attr, getattr(self, attr), \"!=\", getattr(other, attr)) def", "for infoElement in instanceElement.findall(\"info\"): self.readInfoElement(infoElement, instanceObject) for libElement in instanceElement.findall('lib'):", "piecewiseLinearMap(v, {k: v for k, v in self.map}) def map_backward(self,", "= normalizeValue(value, triple) return new def normalize(self): \"\"\" Normalise the", "self.localisedStyleName = localisedStyleName or {} \"\"\"dict. A dictionary of localised", "can be subclassed to work with different objects, as long", "stored in the element attribute localisedStyleNameElement = ET.Element('stylename') localisedStyleNameElement.attrib[XML_LANG] =", "self._addRule(ruleObject) if self.documentObject.sources: self.root.append(ET.Element(\"sources\")) for sourceObject in self.documentObject.sources: self._addSource(sourceObject) if", "self.root.findall('.rules')[0].append(ruleElement) def _addAxis(self, axisObject): axisElement = ET.Element('axis') axisElement.attrib['tag'] = axisObject.tag", "Varlib. \"\"\" self.styleMapFamilyName = styleMapFamilyName \"\"\"string. StyleMap familyname for this", "``.designspace`` data. It imports the axes, sources, variable fonts and", "writerClass is not None: self.writerClass = writerClass else: self.writerClass =", "this axis will get in user space. MutatorMath + Varlib.", "\"\"\" self.default = None # Convert the default location from", "\"!=\", getattr(other, attr)) def __repr__(self): attrs = [f\"{a}={repr(getattr(self, a))},\" for", "in some parts of the designspace. .. code:: python r1", "source def addInstance(self, instanceDescriptor: InstanceDescriptor): \"\"\"Add the given ``instanceDescriptor`` to", "fontTools.misc import etree as ET from fontTools.misc import plistlib from", "in self.userLocation: del self.userLocation[axisName] def getLocationLabelDescriptor(self, doc: 'DesignSpaceDocument') -> Optional[LocationLabelDescriptor]:", "Unique name for this rule. Can be used to reference", "sourceElement.findall('.lib'): if libElement.attrib.get('copy') == '1': sourceObject.copyLib = True for groupsElement", "Tables format 1, 2, 3. See: `OTSpec STAT Axis Value", "they have the same attributes. Reader and Writer objects can", "results of executed rules for example). MutatorMath. .. deprecated:: 5.0", "(using xvalue=\"\").') sourceObject.location = designLocation layerName = sourceElement.attrib.get('layer') if layerName", "DesignSpaceDocumentError('instance element must have at most one of the location=\"...\"", "axis.map_forward(userLocation.get(axis.name, axis.default)) for axis in self.axes } def map_backward(self, designLocation:", "c in rule.conditionSets) def evaluateConditions(conditions, location): \"\"\"Return True if all", "be referenced from elsewhere in the document. \"\"\" self.locationLabel =", "<condition minimum=\"10\" maximum=\"40\" name=\"optical\"/> </conditionset> <sub name=\"cent\" with=\"cent.alt\"/> <sub name=\"dollar\"", "attributes. Data is added to the document by creating such", "(5, 0): return xml_attrs = {'name', 'elidable', 'oldersibling'} for labelElement", "makes it easy to integrate this object in different contexts.", "contain ASCII characters. \"\"\" self.hidden = hidden \"\"\"bool. Whether this", "versionadded:: 5.0 \"\"\" label = self.getLocationLabelDescriptor(doc) if label is not", "make a tag name for this axis name names =", "= self.intOrFloat(dimensionValue[0]) dimElement.attrib['yvalue'] = self.intOrFloat(dimensionValue[1]) else: dimElement.attrib['xvalue'] = self.intOrFloat(dimensionValue) locElement.append(dimElement)", "not None: sourceElement.attrib['familyname'] = sourceObject.familyName if sourceObject.styleName is not None:", "if not axisElements: return for axisElement in axisElements: if self.documentObject.formatTuple", "to have it right here. Varlib. \"\"\" self.styleName = styleName", "element for a discrete subset must have a uservalue attribute.\"", "each descriptor, we have to do the right thing for", "self.intOrFloat(subset.userDefault) elif isinstance(subset, ValueAxisSubsetDescriptor): subsetElement.attrib['uservalue'] = self.intOrFloat(subset.userValue) subsetsElement.append(subsetElement) vfElement.append(subsetsElement) self._addLib(vfElement,", "None: glyphSources = [] glyphSources.append(d) if glyphSources is not None:", "= processingValue == \"last\" for ruleElement in self.root.findall(\".rules/rule\"): ruleObject =", "a name attribute.\") userMinimum = element.get(\"userminimum\") userDefault = element.get(\"userdefault\") userMaximum", "location from user space to design space before comparing #", "'copyFeatures', 'muteKerning', 'muteInfo', 'mutedGlyphNames', 'familyName', 'styleName', 'localisedFamilyName'] filename = posixpath_property(\"_filename\")", "in designLocation else axis.default ) for axis in self.axes }", "= familyName \"\"\"string. Family name of this instance. MutatorMath +", "None try: userValue = dimensionElement.attrib.get('uservalue') if userValue is not None:", "attribute value is not valid: %r, \" \"expected 'first' or", "this document's sources.\"\"\" self.variableFonts: List[VariableFontDescriptor] = [] \"\"\"List of this", "xValue is None: raise DesignSpaceDocumentError(f'Exactly one of uservalue=\"\" or xvalue=\"\"", "def __init__( self, *, tag=None, name=None, labelNames=None, values=None, default=None, hidden=False,", "axisObject.name self._addLabelNames(axisElement, axisObject.labelNames) if axisObject.map: for inputValue, outputValue in axisObject.map:", "variable font. If not specified, assume the same maximum value", "in self.designLocation: result[axis.name] = self.designLocation[axis.name] elif axis.name in self.userLocation: result[axis.name]", "name = element.get(\"name\") if name is None: raise DesignSpaceDocumentError(\"label element", "will be necessary to define user-facing readable names for the", "leading slashes of UNC path mounts new_path = '//' +", "not interpolate. - it doesn't provide the reference glyph set", "not None else location or {} \"\"\"dict. Axis values for", "} class VariableFontDescriptor(SimpleDescriptor): \"\"\"Container for variable fonts, sub-spaces of the", "unicodes) for noteElement in glyphElement.findall('.note'): glyphData['note'] = noteElement.text break designLocation,", "'elidable', 'oldersibling', 'linkeduservalue'} unknown_attrs = set(element.attrib) - xml_attrs if unknown_attrs:", "None: self.writerClass = writerClass else: self.writerClass = BaseDocWriter @classmethod def", "True if element.get(\"elidable\") == \"true\" else False olderSibling = True", "\"\"\"Generate a propery that holds a path always using forward", "axis. Keyed by xml:lang code. Values are required to be", "for this axis in user space. MutatorMath + Varlib. \"\"\"", "different glyph sets. \"\"\" self.values: List[float] = values or []", "STAT Style Attributes Header <https://docs.microsoft.com/en-us/typography/opentype/spec/stat#style-attributes-header>`_ .. versionadded:: 5.0 \"\"\" self.axes:", "elif isinstance(value, list): value = [ v.asdict() if hasattr(v, \"asdict\")", "s1.name = \"master.ufo1\" s1.font = defcon.Font(\"master.ufo1\") s1.location = dict(weight=0) s1.familyName", "= location[cd['name']] if cd.get('minimum') is None: if value > cd['maximum']:", "<key>com.github.fonttools.varLib.featureVarsFeatureTag</key> <string>calt</string> </dict> </lib> \"\"\" self.sources: List[SourceDescriptor] = [] \"\"\"List", ":func:`splitInterpolable` .. versionadded:: 5.0 \"\"\" if self.variableFonts: return self.variableFonts variableFonts", "%3.3f\", xValue) try: yValue = dimensionElement.attrib.get('yvalue') if yValue is not", "objects path = path.__fspath__() self.path = path self.filename = os.path.basename(path)", "to do the scaling, so we do those last. \"\"\"", "glyphElement = ET.Element(\"glyph\") glyphElement.attrib[\"name\"] = name glyphElement.attrib[\"mute\"] = '1' sourceElement.append(glyphElement)", "on discrete axes. .. seealso:: :func:`splitInterpolable` .. versionadded:: 5.0 \"\"\"", ":attr:``designLocation`` and :attr:``userLocation`` are dictionaries (possibly empty if clearing everything).", "MutableMapping[str, str] = labelNames or {} \"\"\"User-facing translations of this", "not specified, the :attr:`name` will be used as a basename", "for libElement in instanceElement.findall('lib'): self.readLibElement(libElement, instanceObject) self.documentObject.instances.append(instanceObject) def readLibElement(self, libElement,", "In the case of Designspace documents before version 5, the", "and instance of :class:`DiscreteAxisDescriptor` if the ``kwargs`` provide a ``value``,", "styleName=None, postScriptFontName=None, styleMapFamilyName=None, styleMapStyleName=None, localisedFamilyName=None, localisedStyleName=None, localisedStyleMapFamilyName=None, localisedStyleMapStyleName=None, glyphs=None, kerning=True,", "{} \"\"\"Custom data associated with this instance.\"\"\" @property def location(self):", "on the discrete axes. Before version 5, you would have", "styleMapFamilyName styleMapStyleName = instanceElement.attrib.get('stylemapstylename') if styleMapStyleName is not None: instanceObject.styleMapStyleName", "path to the source file, **as it is in the", "\"\"\" self.userDefault: Optional[float] = userDefault \"\"\"New default value of the", "and this document path. If the filename attribute is not", "default # now the rules for rule in self.rules: newConditionSets", "or {} def setFamilyName(self, familyName, languageCode=\"en\"): \"\"\"Setter for :attr:`localisedFamilyName` ..", "cls(readerClass=readerClass, writerClass=writerClass) reader = self.readerClass.fromstring(string, self) reader.read() if self.sources: self.findDefault()", "source needs to be muted. MutatorMath only. \"\"\" self.mutedGlyphNames =", ":attr:`default` value. .. versionchanged:: 5.0 Allow the default source to", "self.effectiveFormatTuple >= (5, 0): if instanceObject.locationLabel is None: self._addLocationElement( instanceElement,", "once loaded = {} fonts = [] for source in", "[] self.instances = [] self.axisDefaults = {} self._strictAxisNames = True", "names for styleNameElement in instanceElement.findall('stylename'): for key, lang in styleNameElement.items():", "axis.name in self.userLocation: result[axis.name] = axis.map_forward(self.userLocation[axis.name]) else: result[axis.name] = axis.map_forward(axis.default)", "labelNames=labelNames, ) def readLabels(self): if self.documentObject.formatTuple < (5, 0): return", "if no such label exists. .. versionadded:: 5.0 \"\"\" for", "self._addLabelNames(labelElement, label.labelNames) axisElement.append(labelElement) def _addLabelNames(self, parentElement, labelNames): for languageCode, labelName", "they only contain ASCII characters. \"\"\" self.hidden = hidden \"\"\"bool.", "source, from its :attr:`designLocation` and the document's axis defaults. ..", "in item.glyphs.items(): glyphData['instanceLocation'] = self.normalizeLocation(glyphData['instanceLocation']) for glyphMaster in glyphData['masters']: glyphMaster['location']", "= None res = copy.deepcopy(self) for source, font in zip(res.sources,", "have a name attribute.\") userValueStr = element.get(\"uservalue\") if userValueStr is", "key, lang in styleMapStyleNameElement.items(): if key == XML_LANG: styleMapStyleName =", "name is None: raise DesignSpaceDocumentError(\"axis-subset element must have a name", "versionadded:: 5.0 \"\"\" fonts = [source.font for source in self.sources]", "only loaded once and shared among SourceDescriptors. For example, to", "d[attr] = value return d class SimpleDescriptor(AsDictMixin): \"\"\" Containers for", "in languageCodes: if code == \"en\": continue localisedStyleMapStyleNameElement = ET.Element('stylemapstylename')", "case of Designspace documents before version 5, the whole document", "if userValueStr is None: raise DesignSpaceDocumentError( \"The axis-subset element for", "self.readerClass.fromstring(string, self) reader.read() if self.sources: self.findDefault() return self def tostring(self,", "font. If not specified, assume the same default value as", "subsets to include in this variable font. If an axis", "infoElement.attrib['copy'] = \"1\" if sourceObject.muteInfo: infoElement.attrib['mute'] = \"1\" sourceElement.append(infoElement) if", "full path that was given to :meth:`read` or :meth:`fromfile`. \"\"\"", "in value ] d[attr] = value return d class SimpleDescriptor(AsDictMixin):", ":class:`LocationLabel` that matches the given ``userLocation``, or ``None`` if no", "to a locationElement.\"\"\" locElement = ET.Element(\"location\") if name is not", "the document**. The file may or may not exist. MutatorMath", "the document's axis defaults. .. versionadded:: 5.0 \"\"\" result: AnisotropicLocationDict", "num return (\"%f\" % num).rstrip('0').rstrip('.') def _addRule(self, ruleObject): # if", "not None: sourcePath = os.path.abspath(os.path.join(os.path.dirname(self.path), filename)) else: sourcePath = None", "# so we have to do it ourselves for 'xml:lang'", "import os import posixpath from io import BytesIO, StringIO from", "subElement.attrib['name'] = sub[0] subElement.attrib['with'] = sub[1] ruleElement.append(subElement) if len(ruleElement): self.root.findall('.rules')[0].append(ruleElement)", "documentObject tree = ET.parse(self.path) self.root = tree.getroot() self.documentObject.formatVersion = self.root.attrib.get(\"format\",", "will get in user space. MutatorMath + Varlib. \"\"\" def", "default=self.default, hidden=self.hidden, map=self.map, axisOrdering=self.axisOrdering, axisLabels=self.axisLabels, ) def map_forward(self, v): \"\"\"Maps", ":class:`AxisDescriptor` using the given ``kwargs`` and add it to :attr:`axes`.", "in user interfaces. \"\"\" self.map = map or [] \"\"\"list", "in vf.axisSubsets: subsetElement = ET.Element('axis-subset') subsetElement.attrib['name'] = subset.name if isinstance(subset,", "source.font = None res = copy.deepcopy(self) for source, font in", "return next((k for k, v in self.map if v ==", "freeze the given axis.\"\"\" class BaseDocWriter(object): _whiteSpace = \" \"", "values or [] \"\"\"List of possible values for this axis.", "user location. Assume that missing coordinates are at the default", "extra options: designspace.loadSourceFonts(ttLib.TTFont, recalcBBoxes=False) Args: opener (Callable): takes one required", "self.root.append(labelsElement) if self.documentObject.rules: if getattr(self.documentObject, \"rulesProcessingLast\", False): attributes = {\"processing\":", "localisedStyleNameElement = ET.Element('stylename') localisedStyleNameElement.attrib[XML_LANG] = code localisedStyleNameElement.text = instanceObject.getStyleName(code) instanceElement.append(localisedStyleNameElement)", "elements, including the warp map. axesElement = self.root.find(\".axes\") if axesElement", "return for axisElement in axisElements: if self.documentObject.formatTuple >= (5, 0)", "doc: 'DesignSpaceDocument') -> AnisotropicLocationDict: \"\"\"Get the complete design location of", "of this instance wholesale, a user should first clear all", "i2 = InstanceDescriptor() i2.path = instancePath2 i2.familyName = \"InstanceFamilyName\" i2.styleName", "encoding is None or encoding == \"utf-8\": f = BytesIO()", "tree.getroot() self.documentObject.formatVersion = self.root.attrib.get(\"format\", \"3.0\") self._axes = [] self.rules =", "of the axis used in locations self.name = name \"\"\"string.", "encode this label. =========== ========= =========== =========== =============== STAT Format", "else False olderSibling = True if element.get(\"oldersibling\") == \"true\" else", "None: if not data: return libElement = ET.Element('lib') libElement.append(plistlib.totree(data, indent_level=indent_level))", "tuple of (designLocation, userLocation) \"\"\" elementLocation = (None, None) for", "Return a new list of glyphNames with substitutions applied. -", "not None: maximum = self.normalizeLocation({cond['name']: cond['maximum']}).get(cond['name']) else: maximum = None", "else: minimum = None if cond.get('maximum') is not None: maximum", "= \"\".join([f\"-{axis.tag}{value}\" for axis, value in zip(discreteAxes, values)]) variableFonts.append(VariableFontDescriptor( name=f\"{basename}{axisNames}\",", "for filename. We're not overwriting some other value for filename,", "anisotropy. - ``axis.default``: default axis value. No anisotropy. .. versionadded::", "code == \"en\": continue # already stored in the element", "userValue=400, elidable=True) ] doc.addAxis(a1) \"\"\" _attrs = ['tag', 'name', 'maximum',", "unicode=\"0x62\"/> <glyph name=\"b\"/> <glyph name=\"b\"> <master location=\"location-token-bbb\" source=\"master-token-aaa2\"/> <master glyphname=\"b.alt1\"", "axisElement.attrib['minimum'] = self.intOrFloat(axisObject.minimum) axisElement.attrib['maximum'] = self.intOrFloat(axisObject.maximum) elif isinstance(axisObject, DiscreteAxisDescriptor): axisElement.attrib['values']", "Takes a callable which initializes a new font object (e.g.", "in this variable font. If an axis is not mentioned,", "instances to the -1 - 0 - 1 value. -", "getStyleMapStyleName(self, languageCode=\"en\"): return self.localisedStyleMapStyleName.get(languageCode) def setStyleMapFamilyName(self, styleMapFamilyName, languageCode=\"en\"): self.localisedStyleMapFamilyName[languageCode] =", "to identify it during the build process and from other", "document.\"\"\" for libElement in self.root.findall(\".lib\"): self.documentObject.lib = plistlib.fromtree(libElement[0]) class DesignSpaceDocument(LogMixin,", "of all `default` values in user space of all axes.", "a1.map = [(1.0, 10.0), (400.0, 66.0), (1000.0, 990.0)] a1.axisOrdering =", ":class:`RuleDescriptor` using the given ``kwargs`` and add it to :attr:`rules`.", "value depends on which field are filled-in, see :meth:`getFormat` ..", "None: mastersElement = ET.Element(\"masters\") for m in data.get('masters'): masterElement =", "assert(getattr(self, attr) == getattr(other, attr)) except AssertionError: print(\"failed attribute\", attr,", "'Optical Size')), 'slant': ('slnt', dict(en = 'Slant')), 'italic': ('ital', dict(en", "filename attr. useless, but no reason to interfere. case 2.", "= \"source\" _attrs = ['filename', 'path', 'name', 'layerName', 'location', 'copyLib',", "locations, only the xvalue is used. .. versionadded:: 5.0 \"\"\"", "OpenType's STAT data for a single axis (formats 1, 2", "= ET.Element('familyname') localisedFamilyNameElement.attrib[XML_LANG] = code localisedFamilyNameElement.text = sourceObject.getFamilyName(code) sourceElement.append(localisedFamilyNameElement) if", "\"\"\"Apply these rules at this location to these glyphnames. Return", "not interpolate. The main difference from a continuous axis is", "maximum value for this axis in user space. MutatorMath +", "is not None: instanceObject.styleMapStyleName = styleMapStyleName # read localised names", "per such variable font, and a lot of data duplication.", "BytesIO, StringIO from textwrap import indent from typing import Any,", "@location.setter def location(self, location: Optional[AnisotropicLocationDict]): self.designLocation = location or {}", "[(minimum, minimum), (maximum, maximum)]. Varlib. \"\"\" self.axisOrdering = axisOrdering \"\"\"STAT", "styleMapStyleName # read localised names for styleNameElement in instanceElement.findall('stylename'): for", "following issues for more information: `fontTools#1371 <https://github.com/fonttools/fonttools/issues/1371#issuecomment-590214572>`__ `fontTools#2050 <https://github.com/fonttools/fonttools/issues/2050#issuecomment-678691020>`__ If", "in loaded: source.font = loaded[source.path] else: if source.path is None:", "class AxisLabelDescriptor(SimpleDescriptor): \"\"\"Container for axis label data. Analogue of OpenType's", "only once loaded = {} fonts = [] for source", "for things if cd.get('minimum') is None and cd.get('maximum') is None:", "not None: masterElement.attrib['glyphname'] = m.get('glyphName') if m.get('font') is not None:", "styleNameElement in instanceElement.findall('stylename'): for key, lang in styleNameElement.items(): if key", "is not None: try: unicodes = [int(u, 16) for u", "glyph masters for this instance for _, glyphData in item.glyphs.items():", "self.filename: str = filename \"\"\"string, optional. Relative path to the", "('name', 'userMinimum', 'userDefault', 'userMaximum') def __init__(self, *, name, userMinimum=-math.inf, userDefault=None,", "= \"InstanceStyleName\" i2.name = \"instance.ufo2\" # anisotropic location i2.designLocation =", "vf.filename is not None: vfElement.attrib['filename'] = vf.filename if vf.axisSubsets: subsetsElement", "rule in rules: if evaluateRule(rule, location): for name in glyphNames:", "None for masterElement in glyphElement.findall('.masters/master'): fontSourceName = masterElement.attrib.get('source') designLocation, userLocation", "be used to build localized names for all instances. ..", "= {} designLoc = {} for dimensionElement in locationElement.findall(\".dimension\"): dimName", "See: `OTSpec STAT Flags <https://docs.microsoft.com/en-us/typography/opentype/spec/stat#flags>`_ \"\"\" self.linkedUserValue: Optional[float] = linkedUserValue", "read and write designspace files \"\"\" __all__ = [ 'DesignSpaceDocumentError',", "None: raise DesignSpaceDocumentError('instance element must have at most one of", "2 stops, Roman and Italic, that are not compatible. The", "'userValue', 'userMaximum', 'name', 'elidable', 'olderSibling', 'linkedUserValue', 'labelNames') def __init__( self,", "axis in self.axes: if isinstance(axis, DiscreteAxisDescriptor): discreteAxes.append(axis) else: rangeAxisSubsets.append(RangeAxisSubsetDescriptor(name=axis.name)) valueCombinations", "int: \"\"\"Determine which format of STAT Axis value to use", "instances. MutatorMath. .. deprecated:: 5.0 \"\"\" self.copyInfo = copyInfo \"\"\"bool.", "key == XML_LANG: styleMapStyleName = styleMapStyleNameElement.text instanceObject.setStyleMapStyleName(styleMapStyleName, lang) for styleMapFamilyNameElement", "tostr(styleName) def getStyleName(self, languageCode=\"en\"): return self.localisedStyleName.get(languageCode) def setFamilyName(self, familyName, languageCode=\"en\"):", "(designLocation or userLocation) and locationLabel is not None: raise DesignSpaceDocumentError('instance", "localisedStyleMapFamilyName or {} \"\"\"A dictionary of localised style map familyname", "self.userLocation: SimpleLocationDict = userLocation or {} \"\"\"Location in user coordinates", "return glyphElement class BaseDocReader(LogMixin): axisDescriptorClass = AxisDescriptor discreteAxisDescriptorClass = DiscreteAxisDescriptor", "instanceObject) self.documentObject.instances.append(instanceObject) def readLibElement(self, libElement, instanceObject): \"\"\"Read the lib element", "the document's :attr:`default` value. .. versionchanged:: 5.0 Allow the default", "define user-facing readable names for the axis. Keyed by xml:lang", "versionadded:: 5.0 \"\"\" flavor = \"axis-subset\" _attrs = ('name', 'userMinimum',", "locations on discrete axes. .. seealso:: :func:`splitInterpolable` .. versionadded:: 5.0", "or userLocation) and locationLabel is not None: raise DesignSpaceDocumentError('instance element", "file, **as it is in the document**. MutatorMath + VarLib.", "of this source, from its :attr:`designLocation` and the document's axis", "\"InstanceFamilyName\" i2.styleName = \"InstanceStyleName\" i2.name = \"instance.ufo2\" # anisotropic location", "self.readLibElement(libElement, instanceObject) self.documentObject.instances.append(instanceObject) def readLibElement(self, libElement, instanceObject): \"\"\"Read the lib", "this list: - ``locationLabel``: the location along this axis is", "was given to :meth:`read` or :meth:`fromfile`. \"\"\" self.filename = None", "object (e.g. a ``defcon.Font`` or a ``fontTools.ttFont.TTFont``). The default document", "name of this instance. MutatorMath + Varlib. \"\"\" self.styleName =", "non-deterministic. # https://github.com/LettError/designSpaceDocument/issues/10 loc = collections.OrderedDict() for axisDescriptor in self.axes:", "glyphElement.attrib['unicode'] = \" \".join([hex(u) for u in data.get('unicodes')]) if data.get('instanceLocation')", "def __init__(self, *, name=None, conditionSets=None, subs=None): self.name = name \"\"\"string.", "= self.intOrFloat(subset.userMinimum) if subset.userMaximum != math.inf: subsetElement.attrib['usermaximum'] = self.intOrFloat(subset.userMaximum) if", "have to do it ourselves for 'xml:lang' XML_NS = \"{http://www.w3.org/XML/1998/namespace}\"", "entry is found. Note: for discrete axes, each value must", "<master location=\"location-token-bbb\" source=\"master-token-aaa2\"/> <master glyphname=\"b.alt1\" location=\"location-token-ccc\" source=\"master-token-aaa3\"/> <note> This is", "self.designLocation = location or {} def setFamilyName(self, familyName, languageCode=\"en\"): \"\"\"Setter", "to subset.\"\"\" self.userMinimum: float = userMinimum \"\"\"New minimum value of", "in labelElement.findall(\".label\"): axisObject.axisLabels.append(self.readAxisLabel(label)) self.documentObject.axes.append(axisObject) self.axisDefaults[axisObject.name] = axisObject.default def readAxisLabel(self, element:", "ASCII characters. \"\"\" self.hidden = hidden \"\"\"bool. Whether this axis", "('userMinimum', 'userValue', 'userMaximum', 'name', 'elidable', 'olderSibling', 'linkedUserValue', 'labelNames') def __init__(", "LogMixin from fontTools.misc.textTools import tobytes, tostr \"\"\" designSpaceDocument - read", "is not None: masterElement.attrib['source'] = m.get('font') if m.get('location') is not", "to find namespace-prefixed elements, but not attributes # so we", "[] for conditionElement in parentElement.findall('.condition'): cd = {} cdMin =", "= self.intOrFloat(axisObject.default) if axisObject.hidden: axisElement.attrib['hidden'] = \"1\" self.root.findall('.axes')[0].append(axisElement) def _addAxisLabel(self,", "groups need to be copied to the instances. MutatorMath. ..", "The axis still allows to bind together the full font", "in self.root.findall(\".lib\"): self.documentObject.lib = plistlib.fromtree(libElement[0]) class DesignSpaceDocument(LogMixin, AsDictMixin): \"\"\"The DesignSpaceDocument", ".. code-block:: xml <glyph name=\"b\" unicode=\"0x62\"/> <glyph name=\"b\"/> <glyph name=\"b\">", "# already stored in the element attribute localisedFamilyNameElement = ET.Element('familyname')", "= obj def __str__(self): return str(self.msg) + ( \": %r\"", "= sourceName familyName = sourceElement.attrib.get(\"familyname\") if familyName is not None:", "relative path for filename. We're not overwriting some other value", "not None: yValue = float(yValue) except ValueError: self.log.warning(\"ValueError in readLocation", "is that a continuous axis has a ``minimum`` and ``maximum``,", "with discrete axes, define 1 variable font per value on", "a variable font. .. versionadded:: 5.0 \"\"\" flavor = \"axis-subset\"", "newConditionSets.append(newConditions) rule.conditionSets = newConditionSets def loadSourceFonts(self, opener, **kwargs): \"\"\"Ensure SourceDescriptor.font", "def readLibElement(self, libElement, instanceObject): \"\"\"Read the lib element for the", "\"\"\"Name of a :class:`LocationLabelDescriptor`. If provided, the instance should have", "\"\"\"Ask the writer class to make us a new axisDescriptor.\"\"\"", "Dict, List, MutableMapping, Optional, Tuple, Union from fontTools.misc import etree", "('tag', 'name', 'values', 'default', 'map', 'axisOrdering', 'axisLabels') def __init__( self,", "\"axis-subset element must have min/max/default values or none at all.\"", "lib = None libElement = variableFontElement.find(\".lib\") if libElement is not", "localised names for styleNameElement in instanceElement.findall('stylename'): for key, lang in", "will get in user space. However, this default value is", "self.userMaximum: Optional[float] = userMaximum \"\"\"STAT field ``rangeMaxValue`` (format 2).\"\"\" self.name:", "are applied first, before other text shaping/OpenType layout, as they", ":attr:`locationLabels`. .. versionadded:: 5.0 \"\"\" self.locationLabels.append(locationLabelDescriptor) def addLocationLabelDescriptor(self, **kwargs): \"\"\"Instantiate", "= None, userLocation: SimpleLocationDict = None ): locElement = ET.Element(\"location\")", "\"<rules> processing attribute value is not valid: %r, \" \"expected", "name=self.name, labelNames=self.labelNames, maximum=self.maximum, minimum=self.minimum, default=self.default, hidden=self.hidden, map=self.map, axisOrdering=self.axisOrdering, axisLabels=self.axisLabels, )", "= writerClass else: self.writerClass = BaseDocWriter @classmethod def fromfile(cls, path,", "descriptor.filename = self._posixRelativePath(descriptor.path) def addSource(self, sourceDescriptor: SourceDescriptor): \"\"\"Add the given", "def readInfoElement(self, infoElement, instanceObject): \"\"\" Read the info element.\"\"\" instanceObject.info", "data. .. code:: python instance.clearLocation() instance.designLocation = {'Weight': (34, 36.5),", "UNC path mounts new_path = '//' + new_path return new_path", "file name, i.e. the last part of its path. When", "self.axisLabels: List[AxisLabelDescriptor] = axisLabels or [] \"\"\"STAT table entries for", "case one wants to save the file somewhere. \"\"\" self.formatVersion:", "\"\"\" self.localisedFamilyName = localisedFamilyName or {} \"\"\"dict. A dictionary of", "axis. (default = ``None``) \"\"\" self.userMaximum: float = userMaximum \"\"\"New", "makeKerning=True, makeInfo=True): instanceElements = self.root.findall('.instances/instance') for instanceElement in instanceElements: self._readSingleInstanceElement(instanceElement,", "copied to the instances. MutatorMath. .. deprecated:: 5.0 \"\"\" self.copyFeatures", "\"\"\" self.localisedStyleMapStyleName = localisedStyleMapStyleName or {} \"\"\"A dictionary of localised", "element attribute localisedFamilyNameElement = ET.Element('familyname') localisedFamilyNameElement.attrib[XML_LANG] = code localisedFamilyNameElement.text =", "data associated with the whole document. Use reverse-DNS notation to", "map. axesElement = self.root.find(\".axes\") if axesElement is not None and", "subset must have a uservalue attribute.\" ) userValue = float(userValueStr)", "not valid: %r, \" \"expected 'first' or 'last'\" % processingValue)", "using the given ``kwargs`` and add it to ``doc.sources``. \"\"\"", "{ axis.name: self.userLocation.get(axis.name, axis.default) for axis in doc.axes } class", "a filename in case the filename property is empty. VarLib.", "labelName in sorted(labelNames.items()): languageElement = ET.Element('labelname') languageElement.attrib[XML_LANG] = languageCode languageElement.text", "axisLabels=axisLabels, ) self.default: float = default \"\"\"The default value for", "\"\"\"string. Relative path to the instance file, **as it is", "\"\"\"string. The name of the layer in the source to", "\"\"\"Label for this axis location, STAT field ``valueNameID``.\"\"\" self.elidable: bool", "if labelElement.get(\"elidable\") == \"true\" else False olderSibling = True if", "be used as a basename for the file. \"\"\" self.axisSubsets:", "dictionary of localised family name strings, keyed by language code.", "a1.labelNames['en'] = \"Wéíght\" a1.map = [(1.0, 10.0), (400.0, 66.0), (1000.0,", "loses absolute paths new_path = '/' + new_path elif path.startswith(r'\\\\'):", "cd['minimum'] <= value <= cd['maximum']: return False return True def", "None: userValue = float(userValue) except ValueError: self.log.warning(\"ValueError in readLocation userValue", "= True else: raise ValueError(\"unsupported encoding: '%s'\" % encoding) writer", "axis.name in designLocation else axis.default ) for axis in self.axes", "elif xValue is not None: designLoc[dimName] = xValue else: userLoc[dimName]", "# instances for item in self.instances: # glyph masters for", "in the document.\"\"\" names = [] for axisDescriptor in self.axes:", "attr in self._attrs: try: assert(getattr(self, attr) == getattr(other, attr)) except", "and return list of fonts. Takes a callable which initializes", "to :attr:`locationLabels`. .. versionadded:: 5.0 \"\"\" locationLabel = self.writerClass.locationLabelDescriptorClass(**kwargs) self.addLocationLabel(locationLabel)", "DesignSpaceDocumentError(f'Missing xvalue=\"\" for the location dimension \"{dimName}\"\" with yvalue=\"{yValue}\"') designLoc[dimName]", "designLoc, userLoc def readInstances(self, makeGlyphs=True, makeKerning=True, makeInfo=True): instanceElements = self.root.findall('.instances/instance')", "only the xvalue is used. .. versionadded:: 5.0 \"\"\" return", "(possibly empty if clearing everything). In order to update the", "this variable font. If an axis is not mentioned, assume", "a path always using forward slashes.\"\"\" def getter(self): # Normal", "= (5, 0) return minVersion def _makeLocationElement(self, locationObject, name=None): \"\"\"", "\"\"\" self.maximum = maximum \"\"\"number. The maximum value for this", "styleMapStyleName = styleMapStyleNameElement.text instanceObject.setStyleMapStyleName(styleMapStyleName, lang) for styleMapFamilyNameElement in instanceElement.findall('stylemapfamilyname'): for", "if cond.get('maximum') is not None: maximum = self.normalizeLocation({cond['name']: cond['maximum']}).get(cond['name']) else:", "of ``self`` with the data. \"\"\" if hasattr(path, \"__fspath__\"): #", "subsetElement.attrib['uservalue'] = self.intOrFloat(subset.userValue) subsetsElement.append(subsetElement) vfElement.append(subsetsElement) self._addLib(vfElement, vf.lib, 4) parentElement.append(vfElement) def", "element.\"\"\" instanceObject.info = True def readGlyphElement(self, glyphElement, instanceObject): \"\"\" Read", "possible \"good\" filename, in case one wants to save the", "item in self.sources: item.location = self.normalizeLocation(item.location) # instances for item", "if len(ruleElement): self.root.findall('.rules')[0].append(ruleElement) def _addAxis(self, axisObject): axisElement = ET.Element('axis') axisElement.attrib['tag']", "to be copied to the instances. MutatorMath. .. deprecated:: 5.0", "or maximum in rule\" + (\" '%s'\" % ruleName if", "None: if xValue is None: raise DesignSpaceDocumentError(f'Missing xvalue=\"\" for the", "custom data associated with the whole document. Use reverse-DNS notation", "discrete axes, each value must have its mapping entry, if", "already loaded fonts.append(source.font) continue if source.path in loaded: source.font =", "(i.e. not be part of the calculations). MutatorMath only. \"\"\"", "minimum value as the full axis. (default = ``-math.inf``) \"\"\"", "styleName \"\"\"string. Style name of this instance. MutatorMath + Varlib.", "unknown_attrs = set(element.attrib) - xml_attrs if unknown_attrs: raise DesignSpaceDocumentError(f\"axis-subset element", "None: designLoc[dimName] = xValue else: userLoc[dimName] = userValue return designLoc,", "interpolate. The main difference from a continuous axis is that", "DesignSpaceDocumentError(\"label element must have a name attribute.\") designLocation, userLocation =", "userLocation: SimpleLocationDict) -> SimpleLocationDict: \"\"\"Map a user location to a", "disk, this is its original file name, i.e. the last", "= {'name', 'elidable', 'oldersibling'} for labelElement in self.root.findall(\".labels/label\"): unknown_attrs =", "field to the disk and make ```filename`` point to that.", "element.get(\"usermaximum\") if userMinimum is not None and userDefault is not", "sourceElement.findall(\".info\"): if infoElement.attrib.get('copy') == '1': sourceObject.copyInfo = True if infoElement.attrib.get('mute')", "= location or {} def setFamilyName(self, familyName, languageCode=\"en\"): \"\"\"Setter for", "= ET.Element('stylemapstylename') localisedStyleMapStyleNameElement.attrib[XML_LANG] = code localisedStyleMapStyleNameElement.text = instanceObject.getStyleMapStyleName(code) instanceElement.append(localisedStyleMapStyleNameElement) if", "is already not None, it is not loaded again. Fonts", "of this designspace: - scale all the locations of all", "2 return 1 @property def defaultName(self) -> str: \"\"\"Return the", "a single DesignSpace with discrete axes, define 1 variable font", "for this source. MutatorMath + Varlib. \"\"\" self.designLocation = designLocation", "axis.default, axis.maximum) ] new[axis.name] = normalizeValue(value, triple) return new def", "= self.intOrFloat(inputValue) mapElement.attrib['output'] = self.intOrFloat(outputValue) axisElement.append(mapElement) if axisObject.axisOrdering or axisObject.axisLabels:", "not None: vfElement.attrib['filename'] = vf.filename if vf.axisSubsets: subsetsElement = ET.Element('axis-subsets')", "``value`` (format 1, 3) or ``nominalValue`` (format 2).\"\"\" self.userMaximum: Optional[float]", "list of conditionsets. - Each conditionset is a list of", "descriptor.filename == None descriptor.path == None -- action: write as", "everything). In order to update the location of this instance", "setStyleMapStyleName(self, styleMapStyleName, languageCode=\"en\"): self.localisedStyleMapStyleName[languageCode] = tostr(styleMapStyleName) def getStyleMapStyleName(self, languageCode=\"en\"): return", "dimName) continue userValue = xValue = yValue = None try:", "opener, **kwargs): \"\"\"Ensure SourceDescriptor.font attributes are loaded, and return list", "DiscreteAxisDescriptor]] = [] \"\"\"List of this document's axes.\"\"\" self.locationLabels: List[LocationLabelDescriptor]", "setter(self, value): # The setter rewrites paths using forward slashes", "'1': sourceObject.muteKerning = True self.documentObject.sources.append(sourceObject) def locationFromElement(self, element): \"\"\"Read a", "glyphSources is not None: glyphData['masters'] = glyphSources instanceObject.glyphs[glyphName] = glyphData", "\"\"\" self.locationLabels.append(locationLabelDescriptor) def addLocationLabelDescriptor(self, **kwargs): \"\"\"Instantiate a new :class:`LocationLabelDescriptor` using", "{} \"\"\"A dictionary of localised style map familyname strings, keyed", "to make us a new axisDescriptor.\"\"\" return self.writerClass.getAxisDecriptor() def newSourceDescriptor(self):", "strings, keyed by language code. \"\"\" self.glyphs = glyphs or", "1 ✅ ❌ ❌ ❌ 2 ✅ ✅ ✅ ❌", "discrete subset must have a uservalue attribute.\" ) userValue =", "filename attribute is not None: skip it. \"\"\" if masters:", "label def getFullDesignLocation(self, doc: 'DesignSpaceDocument') -> AnisotropicLocationDict: \"\"\"Get the complete", "if linkedValueStr is not None else None elidable = True", "versionadded:: 5.0 \"\"\" if self.formatVersion is None: return (5, 0)", "style map stylename strings, keyed by language code. \"\"\" self.glyphs", "order to update a single axis location, the user should", "defining a non-registered axis, it will be necessary to define", "axis. (default = ``-math.inf``) \"\"\" self.userDefault: Optional[float] = userDefault \"\"\"New", "'%s' has no 'path' attribute\" % (source.name or \"<Unknown>\") )", "designLoc = {} for dimensionElement in locationElement.findall(\".dimension\"): dimName = dimensionElement.attrib.get(\"name\")", "an axis is not mentioned, it is assumed to be", "\"\"\"Return the :class:`LocationLabel` that matches the given ``userLocation``, or ``None``", "element \"{sourceName}\" must only have design locations (using xvalue=\"\").') sourceObject.location", "axis.minimum}).get(axis.name) maximum = self.normalizeLocation({axis.name: axis.maximum}).get(axis.name) default = self.normalizeLocation({axis.name: axis.default}).get(axis.name) #", "for key, lang in styleNameElement.items(): if key == XML_LANG: styleName", "so we have to do it ourselves for 'xml:lang' XML_NS", "other text shaping/OpenType layout, as they are part of the", "for sourceObject in self.documentObject.sources: self._addSource(sourceObject) if self.documentObject.variableFonts: variableFontsElement = ET.Element(\"variable-fonts\")", "document path and the string in the filename attr. The", "= value.asdict() elif isinstance(value, list): value = [ v.asdict() if", "vfElement.attrib['filename'] = vf.filename if vf.axisSubsets: subsetsElement = ET.Element('axis-subsets') for subset", "= 1 a1.axisLabels = [ AxisLabelDescriptor(name=\"Regular\", userValue=400, elidable=True) ] doc.addAxis(a1)", "the default location from user space to design space before", "is loaded in memory, as a Python object (e.g. a", "self.designLocation[axis.name] elif axis.name in self.userLocation: result[axis.name] = axis.map_forward(self.userLocation[axis.name]) else: result[axis.name]", "self.designLocation = location or {} def setStyleName(self, styleName, languageCode=\"en\"): \"\"\"These", "'default', 'map', 'axisOrdering', 'axisLabels') def __init__( self, *, tag=None, name=None,", "languageCodes = list(sourceObject.localisedFamilyName.keys()) languageCodes.sort() for code in languageCodes: if code", "space coordinates. MutatorMath + Varlib. This may be only part", "name, userMinimum=-math.inf, userDefault=None, userMaximum=math.inf): self.name: str = name \"\"\"Name of", "dimElement.attrib['yvalue'] = self.intOrFloat(dimensionValue[1]) else: dimElement.attrib['xvalue'] = self.intOrFloat(dimensionValue) locElement.append(dimElement) return locElement,", "tuple of (major, minor). .. versionadded:: 5.0 \"\"\" if self.formatVersion", "there is a conflict between the given filename, and the", "so we do those last. \"\"\" # masters for item", "= location[axis.name] # 'anisotropic' location, take first coord only if", "subsetElement.attrib['usermaximum'] = self.intOrFloat(subset.userMaximum) if subset.userDefault is not None: subsetElement.attrib['userdefault'] =", "= {} if axisName in self.designLocation: del self.designLocation[axisName] if self.userLocation", "InstanceDescriptor() i2.path = instancePath2 i2.familyName = \"InstanceFamilyName\" i2.styleName = \"InstanceStyleName\"", "ruleName, ) if conditionSet is not None: ruleObject.conditionSets.append(conditionSet) for subElement", "self.path is not None: basename = os.path.splitext(os.path.basename(self.path))[0] + \"-VF\" if", "[] self.rules = [] self.sources = [] self.instances = []", "self.sources: if sourceDescriptor.getFullDesignLocation(self) == defaultDesignLocation: self.default = sourceDescriptor return sourceDescriptor", "must have a uservalue attribute.\") value = float(valueStr) minimumStr =", "path \"\"\"The absolute path, calculated from filename.\"\"\" self.font = font", "\"~/absolute/path/there\" -- action: there is a conflict between the given", "original and the copy. .. versionadded:: 5.0 \"\"\" fonts =", "AnisotropicLocationDict = None, userLocation: SimpleLocationDict = None ): locElement =", "_attrs = ('name', 'elidable', 'olderSibling', 'userLocation', 'labelNames') def __init__( self,", "familyNameElement.text instanceObject.setFamilyName(familyName, lang) for styleMapStyleNameElement in instanceElement.findall('stylemapstylename'): for key, lang", "a conflict between the given filename, and the path. So", "break if swap: newNames.append(b) else: newNames.append(name) glyphNames = newNames newNames", "maximum = self.normalizeLocation({cond['name']: cond['maximum']}).get(cond['name']) else: maximum = None newConditions.append(dict(name=cond['name'], minimum=minimum,", "and returns a new font object loaded from the path.", "\"\"\"Return a list of axis names, in the same order", "reads the \"xml:lang\" attribute name as # '{http://www.w3.org/XML/1998/namespace}lang' } return", "by language code. \"\"\" self.localisedStyleMapStyleName = localisedStyleMapStyleName or {} \"\"\"A", "(formats 1, 2 and 3). All values are user values.", "that axis, then edit the values: .. code:: python instance.clearLocation('Weight')", "parentElement.append(labelElement) def _addLocationElement( self, parentElement, *, designLocation: AnisotropicLocationDict = None,", "is not None: instanceElement.attrib['stylemapstylename'] = instanceObject.styleMapStyleName if self.effectiveFormatTuple < (5,", "d class SimpleDescriptor(AsDictMixin): \"\"\" Containers for a bunch of attributes\"\"\"", "self.documentObject.instances: self._addInstance(instanceObject) if self.documentObject.lib: self._addLib(self.root, self.documentObject.lib, 2) tree = ET.ElementTree(self.root)", "user location and default axis values. .. versionadded:: 5.0 \"\"\"", "\"\"\" class AxisDescriptor(AbstractAxisDescriptor): \"\"\" Simple container for the axis data.", "self.writerClass.variableFontDescriptorClass(**kwargs) self.addVariableFont(variableFont) return variableFont def addLocationLabel(self, locationLabelDescriptor: LocationLabelDescriptor): \"\"\"Add the", "whole document. Use reverse-DNS notation to identify your own data.", "add a temporary source name sourceName = \"temp_master.%d\" % (sourceCount)", "layerName \"\"\"string. The name of the layer in the source", "case the filename property is empty. VarLib. \"\"\" self.filename: str", "read the axes elements, including the warp map. axesElement =", "1 DesignSpace per such variable font, and a lot of", "\"\"\" Right before we save we need to identify and", "= default \"\"\"The default value for this axis, i.e. when", "DesignSpaceDocument() s1 = SourceDescriptor() s1.path = masterPath1 s1.name = \"master.ufo1\"", "axis exists.\"\"\" for axisDescriptor in self.axes: if axisDescriptor.name == name:", "if sourceObject.copyLib: libElement = ET.Element('lib') libElement.attrib['copy'] = \"1\" sourceElement.append(libElement) if", "{'userminimum', 'uservalue', 'usermaximum', 'name', 'elidable', 'oldersibling', 'linkeduservalue'} unknown_attrs = set(element.attrib)", "a list of conditions. - Each condition is a dict", "element.get(\"uservalue\") if userValueStr is None: raise DesignSpaceDocumentError( \"The axis-subset element", "of substitutions. - Each substitution is stored as tuples of", "object can be subclassed to work with different objects, as", "== XML_LANG: familyName = familyNameElement.text instanceObject.setFamilyName(familyName, lang) for styleMapStyleNameElement in", "built from the document's continuous axes. In the case of", "= set(variableFontElement.attrib) - xml_attrs if unknown_attrs: raise DesignSpaceDocumentError(f\"variable-font element contains", "If not specified, assume the same maximum value as the", "if instances: for descriptor in self.instances: if descriptor.filename is not", "these to be None, assume axis.minimum cd['minimum'] = None cdMax", "versionadded:: 5.0 \"\"\" self.familyName = familyName \"\"\"string. Family name of", "self.intOrFloat(inputValue) mapElement.attrib['output'] = self.intOrFloat(outputValue) axisElement.append(mapElement) if axisObject.axisOrdering or axisObject.axisLabels: labelsElement", "self.intOrFloat(label.linkedUserValue) self._addLabelNames(labelElement, label.labelNames) axisElement.append(labelElement) def _addLabelNames(self, parentElement, labelNames): for languageCode,", "(using xvalue=\"\").') masterGlyphName = masterElement.attrib.get('glyphname') if masterGlyphName is None: #", "for k, v in self.map if v == value), value)", "lang in styleMapStyleNameElement.items(): if key == XML_LANG: styleMapStyleName = styleMapStyleNameElement.text", "the given ``element``. .. versionchanged:: 5.0 Return a tuple of", "self.root.findall(\".lib\"): self.documentObject.lib = plistlib.fromtree(libElement[0]) class DesignSpaceDocument(LogMixin, AsDictMixin): \"\"\"The DesignSpaceDocument object", "def __init__(self, documentPath, documentObject: DesignSpaceDocument): self.path = documentPath self.documentObject =", "does not interpolate. - it doesn't provide the reference glyph", "None else None linkedValueStr = element.get(\"linkeduservalue\") linkedValue = float(linkedValueStr) if", "as it is stored in the document sourceObject.name = sourceName", "is not None for descriptor in self.sources + self.instances: if", "instanceElement.attrib.get('stylename') if stylename is not None: instanceObject.styleName = stylename postScriptFontName", "+ Varlib. \"\"\" self.default = default \"\"\"number. The default value", "__init__(self, readerClass=None, writerClass=None): self.path = None \"\"\"String, optional. When the", "userLocation: raise DesignSpaceDocumentError(f'<source> element \"{sourceName}\" must only have design locations", ".. versionadded:: 5.0 \"\"\" self.axisLabels: List[AxisLabelDescriptor] = axisLabels or []", "full axis. (default = ``None``) \"\"\" self.userMaximum: float = userMaximum", "add it to :attr:`locationLabels`. .. versionadded:: 5.0 \"\"\" locationLabel =", ".. deprecated:: 5.0 Use rules or sparse sources instead. \"\"\"", "self.instances.append(instanceDescriptor) def addInstanceDescriptor(self, **kwargs): \"\"\"Instantiate a new :class:`InstanceDescriptor` using the", "location.\"\"\" return any(evaluateConditions(c, location) for c in rule.conditionSets) def evaluateConditions(conditions,", "= list(instanceObject.localisedStyleMapFamilyName.keys()) languageCodes.sort() for code in languageCodes: if code ==", "of the rule's conditionsets matches the given location.\"\"\" return any(evaluateConditions(c,", "subsetsElement.append(subsetElement) vfElement.append(subsetsElement) self._addLib(vfElement, vf.lib, 4) parentElement.append(vfElement) def _addLib(self, parentElement: ET.Element,", "SimpleLocationDict = userLocation or {} \"\"\"Location in user coordinates along", "= \"1\" if data.get('unicodes') is not None: glyphElement.attrib['unicode'] = \"", "[ axis.map_forward(v) for v in (axis.minimum, axis.default, axis.maximum) ] new[axis.name]", "styleMapFamilyName=None, styleMapStyleName=None, localisedFamilyName=None, localisedStyleName=None, localisedStyleMapFamilyName=None, localisedStyleMapStyleName=None, glyphs=None, kerning=True, info=True, lib=None,", "= list(instanceObject.localisedStyleMapStyleName.keys()) languageCodes.sort() for code in languageCodes: if code ==", "= path self.filename = os.path.basename(path) reader = self.readerClass(path, self) reader.read()", "its mapping entry, if you intend that value to be", "attribute localisedFamilyNameElement = ET.Element('familyname') localisedFamilyNameElement.attrib[XML_LANG] = code localisedFamilyNameElement.text = sourceObject.getFamilyName(code)", "input / output values that can describe a warp of", "an Italic axis with 2 stops, Roman and Italic, that", "userMinimum=float(userMinimum), userDefault=float(userDefault), userMaximum=float(userMaximum), ) if all(v is None for v", "is None: return None label = doc.getLocationLabel(self.locationLabel) if label is", "sourceName = \"temp_master.%d\" % (sourceCount) sourceObject = self.sourceDescriptorClass() sourceObject.path =", "\"\"\" self.lib = lib or {} \"\"\"Custom data associated with", "instance. MutatorMath + Varlib. .. deprecated:: 5.0 Use the more", "is None: raise DesignSpaceDocumentError(f'Exactly one of uservalue=\"\" or xvalue=\"\" must", "super().__init__( tag=tag, name=name, labelNames=labelNames, hidden=hidden, map=map, axisOrdering=axisOrdering, axisLabels=axisLabels, ) self.default:", "the lib element for the whole document.\"\"\" for libElement in", "xml_declaration = False elif encoding is None or encoding ==", "it is assumed to be at its default location. ..", "None, assume axis.maximum cd['maximum'] = None cd['name'] = conditionElement.attrib.get(\"name\") #", "guess why they're different, we just choose for path to", "instead of # assert, it should simply return True/False def", "serialize(self): # output to a dict, used in testing return", "= self.locationFromElement(sourceElement) if userLocation: raise DesignSpaceDocumentError(f'<source> element \"{sourceName}\" must only", "this location's label. Keyed by ``xml:lang`` code. \"\"\" def getFormat(self)", "Relative path to the variable font file, **as it is", "for which they have data. .. code:: python instance.clearLocation() instance.designLocation", "None: self.designLocation = {} if axisName in self.designLocation: del self.designLocation[axisName]", "possible values for this axis. Contrary to continuous axes, only", "code == \"en\": continue localisedStyleMapFamilyNameElement = ET.Element('stylemapfamilyname') localisedStyleMapFamilyNameElement.attrib[XML_LANG] = code", "None: processingValue = rulesElement.attrib.get(\"processing\", \"first\") if processingValue not in {\"first\",", "== '1': sourceObject.copyFeatures = True for glyphElement in sourceElement.findall(\".glyph\"): glyphName", "isinstance(axis, DiscreteAxisDescriptor): discreteAxes.append(axis) else: rangeAxisSubsets.append(RangeAxisSubsetDescriptor(name=axis.name)) valueCombinations = itertools.product(*[axis.values for axis", "localisedFamilyNameElement = ET.Element('familyname') localisedFamilyNameElement.attrib[XML_LANG] = code localisedFamilyNameElement.text = instanceObject.getFamilyName(code) instanceElement.append(localisedFamilyNameElement)", "script can write here an indication of a possible \"good\"", "self._addInstance(instanceObject) if self.documentObject.lib: self._addLib(self.root, self.documentObject.lib, 2) tree = ET.ElementTree(self.root) tree.write(", "for descriptor in self.instances: if descriptor.filename is not None and", "glyphElement.attrib.get('name') if glyphName is None: continue if glyphElement.attrib.get('mute') == '1':", "variable font. If an axis is not mentioned, assume that", "code localisedFamilyNameElement.text = sourceObject.getFamilyName(code) sourceElement.append(localisedFamilyNameElement) if sourceObject.copyLib: libElement = ET.Element('lib')", "design space coordinates. MutatorMath + Varlib. This may be only", "= self.intOrFloat(axisObject.minimum) axisElement.attrib['maximum'] = self.intOrFloat(axisObject.maximum) elif isinstance(axisObject, DiscreteAxisDescriptor): axisElement.attrib['values'] =", "= \"1\" sourceElement.append(infoElement) if sourceObject.muteKerning: kerningElement = ET.Element(\"kerning\") kerningElement.attrib[\"mute\"] =", "= None \"\"\"Name of the default master. This attribute is", "contain an axis-subsets element.\") axisSubsets = [] for axisSubset in", "doc.sources doc.variableFonts doc.instances doc.lib \"\"\" def __init__(self, readerClass=None, writerClass=None): self.path", "cd.get('minimum') is None and cd.get('maximum') is None: raise DesignSpaceDocumentError( \"condition", "if kerningElement.attrib.get('mute') == '1': sourceObject.muteKerning = True self.documentObject.sources.append(sourceObject) def locationFromElement(self,", "axes, only the values in this list can be taken", "must have a name attribute.\") filename = variableFontElement.get(\"filename\") axisSubsetsElement =", "name attribute.\") userValueStr = element.get(\"uservalue\") if userValueStr is None: raise", "\"__fspath__\"): # support os.PathLike objects path = path.__fspath__() self.path =", "that is loaded in memory, as a Python object (e.g.", "= AxisLabelDescriptor locationLabelDescriptorClass = LocationLabelDescriptor ruleDescriptorClass = RuleDescriptor sourceDescriptorClass =", "= element.get(\"uservalue\") if valueStr is None: raise DesignSpaceDocumentError(\"label element must", "[(0, 0), (1, -11)] a2.axisOrdering = 2 a2.axisLabels = [", "if data.get('unicodes') is not None: glyphElement.attrib['unicode'] = \" \".join([hex(u) for", "= None if cond.get('maximum') is not None: maximum = self.normalizeLocation({cond['name']:", "absolute path, calculated from filename.\"\"\" self.font = font \"\"\"Any Python", "def location(self): \"\"\"dict. Axis values for this source, in design", "or {} def setStyleName(self, styleName, languageCode=\"en\"): \"\"\"These methods give easier", "kwargs: axis = self.writerClass.discreteAxisDescriptorClass(**kwargs) else: axis = self.writerClass.axisDescriptorClass(**kwargs) self.addAxis(axis) return", "it is in the document**. MutatorMath + VarLib. \"\"\" self.path", "should simply return True/False def compare(self, other): # test if", "= m.get('glyphName') if m.get('font') is not None: masterElement.attrib['source'] = m.get('font')", "# test for things if cd.get('minimum') is None and cd.get('maximum')", "float(mapElement.attrib['input']) b = float(mapElement.attrib['output']) axisObject.map.append((a, b)) for labelNameElement in axisElement.findall('labelname'):", "Indicates if the groups need to be copied to the", "axis in user space. MutatorMath + Varlib. \"\"\" self.maximum =", "'last'\" % processingValue) self.documentObject.rulesProcessingLast = processingValue == \"last\" for ruleElement", "MutatorMath + Varlib. .. deprecated:: 5.0 Use the more explicit", "\"\"\" self.localisedStyleMapFamilyName = localisedStyleMapFamilyName or {} \"\"\"A dictionary of localised", "return loc def labelForUserLocation(self, userLocation: SimpleLocationDict) -> Optional[LocationLabelDescriptor]: \"\"\"Return the", "\"\"\"Return a dict with the default location in design space", "RuleDescriptor() r1.name = \"unique.rule.name\" r1.conditionSets.append([dict(name=\"weight\", minimum=-10, maximum=10), dict(...)]) r1.conditionSets.append([dict(...), dict(...)])", "to bind together the full font family, which is useful", "readRules(self): # we also need to read any conditions that", "the document's continuous axes. In the case of Designspace documents", "attribute. :: case 1. descriptor.filename == None descriptor.path == None", "location, STAT field ``valueNameID``.\"\"\" self.elidable: bool = elidable \"\"\"STAT flag", "new_path return new_path def posixpath_property(private_name): \"\"\"Generate a propery that holds", "instanceElement.attrib.get('location') if (designLocation or userLocation) and locationLabel is not None:", "it will be necessary to define user-facing readable names for", "None: glyphElement.attrib['unicode'] = \" \".join([hex(u) for u in data.get('unicodes')]) if", "self.name def getFullUserLocation(self, doc: 'DesignSpaceDocument') -> SimpleLocationDict: \"\"\"Get the complete", "= userLocation or {} \"\"\"dict. Axis values for this instance,", "to encode this label. =========== ========= =========== =========== =============== STAT", "versionadded:: 5.0 \"\"\" return next( (label for label in self.locationLabels", "from __future__ import annotations import collections import copy import itertools", "documentObject): f = BytesIO(tobytes(string, encoding=\"utf-8\")) self = cls(f, documentObject) self.path", "infoElement.attrib.get('mute') == '1': sourceObject.muteInfo = True for featuresElement in sourceElement.findall(\".features\"):", "userMaximum)): return self.rangeAxisSubsetDescriptorClass(name=name) raise DesignSpaceDocumentError( \"axis-subset element must have min/max/default", "However, this default value is less important than in continuous", "= str(axisObject.axisOrdering) for label in axisObject.axisLabels: self._addAxisLabel(labelsElement, label) axisElement.append(labelsElement) if", "in locationElement.findall(\".dimension\"): dimName = dimensionElement.attrib.get(\"name\") if self._strictAxisNames and dimName not", "return sourceDescriptor return None def normalizeLocation(self, location): \"\"\"Return a dict", "\"\"\" self.familyName = familyName \"\"\"string. Family name of this instance.", "design space coordinates. MutatorMath + Varlib. .. seealso:: This may", "= sourceDescriptor return sourceDescriptor return None def normalizeLocation(self, location): \"\"\"Return", "in designLocation: dimElement = ET.Element('dimension') dimElement.attrib['name'] = axis.name value =", "optional. When the document is read from the disk, this", "of this variable to identify it during the build process", "the source file, **as it is in the document**. MutatorMath", "**Note:** Python attribute names are usually camelCased, the corresponding `XML", "need special masters (to record the results of executed rules", "in self.axes: names.append(axisDescriptor.name) return names def getAxis(self, name): \"\"\"Return the", "loaded: source.font = loaded[source.path] else: if source.path is None: raise", "in doc.axes: if axis.name in self.designLocation: result[axis.name] = self.designLocation[axis.name] elif", "is None: basename = \"VF\" axisNames = \"\".join([f\"-{axis.tag}{value}\" for axis,", "``filename`` and store it in this field, or write the", "= \"InstanceStyleMapStyleName\" i2.lib['com.coolDesignspaceApp.specimenText'] = 'Hamburgerwhatever' doc.addInstance(i2) \"\"\" flavor = \"instance\"", "to interfere. case 2. descriptor.filename == \"../something\" descriptor.path == None", ".. versionadded:: 5.0 \"\"\" self.localisedFamilyName[languageCode] = tostr(familyName) def getFamilyName(self, languageCode=\"en\"):", "and self.documentObject.path is not None: instancePath = os.path.join(os.path.dirname(self.documentObject.path), filename) else:", "True def processRules(rules, location, glyphNames): \"\"\"Apply these rules at this", "is not None: instanceElement.attrib['name'] = instanceObject.name if instanceObject.locationLabel is not", "Right before we save we need to identify and respond", "try: unicodes = [int(u, 16) for u in unicodes.split(\" \")]", "\"\"\" fonts = [source.font for source in self.sources] try: for", "of keyword arguments, and returns a new font object loaded", "__init__( self, *, tag=None, name=None, labelNames=None, minimum=None, default=None, maximum=None, hidden=False,", "dimension \"{dimName}\"\" with yvalue=\"{yValue}\"') designLoc[dimName] = (xValue, yValue) elif xValue", "glyphElement.findall('.masters/master'): fontSourceName = masterElement.attrib.get('source') designLocation, userLocation = self.locationFromElement(masterElement) if userLocation:", "self.name = name \"\"\"string. Unique name for this rule. Can", "\"\"\"number. The maximum value for this axis in user space.", "sub[1] ruleElement.append(subElement) if len(ruleElement): self.root.findall('.rules')[0].append(ruleElement) def _addAxis(self, axisObject): axisElement =", "None def getLocationLabel(self, name: str) -> Optional[LocationLabelDescriptor]: \"\"\"Return the top-level", "anisotropic, only the xvalue is set. .. versionadded:: 5.0 \"\"\"", "indicates whether the substitution rules should be applied before or", "maximum)]. Varlib. \"\"\" self.axisOrdering = axisOrdering \"\"\"STAT table field ``axisOrdering``.", "read(self, path): \"\"\"Read a designspace file from ``path`` and populates", "def getVariableFonts(self) -> List[VariableFontDescriptor]: \"\"\"Return all variable fonts defined in", "versionadded:: 5.0 \"\"\" flavor = \"variable-font\" _attrs = ('filename', 'axisSubsets',", "that was given to :meth:`read` or :meth:`fromfile`. \"\"\" self.filename =", "'familyName', 'styleName', 'localisedFamilyName'] filename = posixpath_property(\"_filename\") path = posixpath_property(\"_path\") def", "a variable font that covers the whole space. In version", "= name \"\"\"Label for this named location, STAT field ``valueNameID``.\"\"\"", "name is None: raise DesignSpaceDocumentError(\"label element must have a name", "glyphElement.attrib[\"mute\"] = '1' sourceElement.append(glyphElement) if self.effectiveFormatTuple >= (5, 0): self._addLocationElement(sourceElement,", "= glyphName d = dict(font=fontSourceName, location=designLocation, glyphName=masterGlyphName) if glyphSources is", "\"\"\" def __init__(self, readerClass=None, writerClass=None): self.path = None \"\"\"String, optional.", "userLocation) and locationLabel is not None: raise DesignSpaceDocumentError('instance element must", "muted in the instances. MutatorMath only. \"\"\" @property def location(self):", "attribute\" % (source.name or \"<Unknown>\") ) source.font = opener(source.path, **kwargs)", "in the element attribute localisedStyleNameElement = ET.Element('stylename') localisedStyleNameElement.attrib[XML_LANG] = code", "a new :class:`VariableFontDescriptor` using the given ``kwargs`` and add it", "AsDictMixin(object): def asdict(self): d = {} for attr, value in", "localisedStyleNameElement.text = instanceObject.getStyleName(code) instanceElement.append(localisedStyleNameElement) if instanceObject.localisedFamilyName: languageCodes = list(instanceObject.localisedFamilyName.keys()) languageCodes.sort()", "self.root.findall(\".rules/rule\"): ruleObject = self.ruleDescriptorClass() ruleName = ruleObject.name = ruleElement.attrib.get(\"name\") #", "if axisName in validatedLocation: # only accept values we know", "= {} cdMin = conditionElement.attrib.get(\"minimum\") if cdMin is not None:", "not None else None linkedValueStr = element.get(\"linkeduservalue\") linkedValue = float(linkedValueStr)", "conditionsets. - Each conditionset is a list of conditions. -", "cls(f, documentObject) self.path = None return self def read(self): self.readAxes()", "= [source.font for source in self.sources] try: for source in", "\"variable-font\" _attrs = ('filename', 'axisSubsets', 'lib') filename = posixpath_property(\"_filename\") def", "ET.Element('axis') axisElement.attrib['tag'] = axisObject.tag axisElement.attrib['name'] = axisObject.name self._addLabelNames(axisElement, axisObject.labelNames) if", "= sourceElement.attrib.get(\"familyname\") if familyName is not None: sourceObject.familyName = familyName", "only part of the full location. See: :meth:`getFullDesignLocation` :meth:`getFullUserLocation` ..", ":class:`DiscreteAxisDescriptor` to \"snapshot\" or \"freeze\". \"\"\" self.userValue: float = userValue", "``minimum`` and ``maximum``, while a discrete axis has a list", "function. Returns: List of font objects in the order they", "instance in self.documentObject.instances ) ): if minVersion < (5, 0):", "self.intOrFloat(value) locElement.append(dimElement) elif userLocation is not None and axis.name in", "=========== ========= =========== =========== =============== \"\"\" if self.linkedUserValue is not", "instanceElement.attrib.get('stylemapfamilyname') if styleMapFamilyName is not None: instanceObject.styleMapFamilyName = styleMapFamilyName styleMapStyleName", "for subElement in ruleElement.findall('.sub'): a = subElement.attrib['name'] b = subElement.attrib['with']", "and not force: continue if self.path is not None: descriptor.filename", "data from this source needs to be muted (i.e. not", "know validatedLocation[axisName] = axisValue for dimensionName, dimensionValue in validatedLocation.items(): dimElement", "version 5, you would have needed 1 DesignSpace per such", "doc.locationLabels doc.rules doc.rulesProcessingLast doc.sources doc.variableFonts doc.instances doc.lib \"\"\" def __init__(self,", "= defcon.Font(\"master.ufo1\") s1.location = dict(weight=0) s1.familyName = \"MasterFamilyName\" s1.styleName =", "userLocation=label.userLocation) parentElement.append(labelElement) def _addLocationElement( self, parentElement, *, designLocation: AnisotropicLocationDict =", "= [0, 1] a2.name = \"Italic\" a2.tag = \"ITAL\" a2.labelNames['fr']", "it ourselves for 'xml:lang' XML_NS = \"{http://www.w3.org/XML/1998/namespace}\" XML_LANG = XML_NS", "* (4 - len(name)) else: tag = name[:4] return tag,", "label.labelNames) self._addLocationElement(labelElement, userLocation=label.userLocation) parentElement.append(labelElement) def _addLocationElement( self, parentElement, *, designLocation:", "cond.get('minimum') is not None: conditionElement.attrib['minimum'] = self.intOrFloat(cond.get('minimum')) if cond.get('maximum') is", "values in valueCombinations: basename = None if self.filename is not", "or self.documentObject.variableFonts or any( instance.locationLabel or instance.userLocation for instance in", "setter rewrites paths using forward slashes if value is not", "axis as it is used in the location dicts. MutatorMath", "Indicates if the kerning data from this source needs to", "if name == a: swap = True break if swap:", "userDefault, userMaximum)): return self.rangeAxisSubsetDescriptorClass(name=name) raise DesignSpaceDocumentError( \"axis-subset element must have", "= sourceObject.layerName if sourceObject.localisedFamilyName: languageCodes = list(sourceObject.localisedFamilyName.keys()) languageCodes.sort() for code", "== \"true\" else False olderSibling = True if labelElement.get(\"oldersibling\") ==", "apply, as this axis does not interpolate. - it doesn't", "try to find or make a tag name for this", "we also need to read any conditions that are outside", "raise DesignSpaceDocumentError(f'<master> element \"{fontSourceName}\" must only have design locations (using", "of this document's STAT format 4 labels. .. versionadded:: 5.0\"\"\"", "to work also on Windows.\"\"\" new_path = posixpath.join(*path.split(os.path.sep)) if path.startswith('/'):", "return True def processRules(rules, location, glyphNames): \"\"\"Apply these rules at", "def readLib(self): \"\"\"Read the lib element for the whole document.\"\"\"", "(axis.minimum, axis.default, axis.maximum) ] new[axis.name] = normalizeValue(value, triple) return new", "not None: processingValue = rulesElement.attrib.get(\"processing\", \"first\") if processingValue not in", "calculations). MutatorMath only. \"\"\" self.muteInfo = muteInfo \"\"\"bool. Indicated if", "at this location to these glyphnames. Return a new list", "DiscreteAxisDescriptor): axisElement.attrib['values'] = \" \".join(self.intOrFloat(v) for v in axisObject.values) axisElement.attrib['default']", "= None cd['name'] = conditionElement.attrib.get(\"name\") # # test for things", "# finally the axis values minimum = self.normalizeLocation({axis.name: axis.minimum}).get(axis.name) maximum", "getFamilyName(self, languageCode=\"en\"): \"\"\"Getter for :attr:`localisedFamilyName` .. versionadded:: 5.0 \"\"\" return", "ET.Element(\"location\") if name is not None: locElement.attrib['name'] = name validatedLocation", "filename property is empty. VarLib. \"\"\" self.filename: str = filename", "take first coord only if isinstance(value, tuple): value = value[0]", "the xvalue is used. .. versionadded:: 5.0 \"\"\" return {", "raise DesignSpaceDocumentError(f\"label element contains unknown attributes: {', '.join(unknown_attrs)}\") name =", "designLocation, userLocation = self.locationFromElement(masterElement) if userLocation: raise DesignSpaceDocumentError(f'<master> element \"{fontSourceName}\"", "basename for the file. \"\"\" self.axisSubsets: List[Union[RangeAxisSubsetDescriptor, ValueAxisSubsetDescriptor]] = axisSubsets", "self.hidden = hidden \"\"\"bool. Whether this axis should be hidden", "document's continuous axes. In the case of Designspace documents before", "for s in axisElement.attrib[\"values\"].split(\" \")] else: axisObject = self.axisDescriptorClass() axisObject.minimum", "2, 3. See: `OTSpec STAT Axis Value Tables <https://docs.microsoft.com/en-us/typography/opentype/spec/stat#axis-value-tables>`_ ..", "process and from other parts of the document, and also", "elidable \"\"\"STAT flag ``ELIDABLE_AXIS_VALUE_NAME``. See: `OTSpec STAT Flags <https://docs.microsoft.com/en-us/typography/opentype/spec/stat#flags>`_ \"\"\"", "field(s) for which they have data. .. code:: python instance.clearLocation()", "vf.lib, 4) parentElement.append(vfElement) def _addLib(self, parentElement: ET.Element, data: Any, indent_level:", "None else (location or {}) \"\"\"dict. Axis values for this", "piecewiseLinearMap(v, {v: k for k, v in self.map}) class DiscreteAxisDescriptor(AbstractAxisDescriptor):", "value of the axis in the target variable font. If", "axis, it will be necessary to define user-facing readable names", "def __init__(self, *, name, userValue): self.name: str = name \"\"\"Name", "\"wght\" a1.labelNames['fa-IR'] = \"قطر\" a1.labelNames['en'] = \"Wéíght\" a1.map = [(1.0,", "self.intOrFloat(dimensionValue) locElement.append(dimElement) return locElement, validatedLocation def intOrFloat(self, num): if int(num)", "the given ``variableFontDescriptor`` to :attr:`variableFonts`. .. versionadded:: 5.0 \"\"\" self.variableFonts.append(variableFontDescriptor)", "is not None and self.path is not None: sourcePath =", "r1.conditionSets.append([dict(name=\"weight\", minimum=-10, maximum=10), dict(...)]) r1.conditionSets.append([dict(...), dict(...)]) r1.subs.append((\"a\", \"a.alt\")) .. code::", "= \"VF\" axisNames = \"\".join([f\"-{axis.tag}{value}\" for axis, value in zip(discreteAxes,", "= instanceObject.styleName # add localisations if instanceObject.localisedStyleName: languageCodes = list(instanceObject.localisedStyleName.keys())", "that variable font that only include some axes and freeze", "or the :attr:`name`.\"\"\" return self.labelNames.get(\"en\") or self.name class LocationLabelDescriptor(SimpleDescriptor): \"\"\"Container", "`{self.name}`.' ) return label def getFullDesignLocation(self, doc: 'DesignSpaceDocument') -> AnisotropicLocationDict:", "a filename attr. useless, but no reason to interfere. case", "wants to save the file somewhere. \"\"\" self.formatVersion: Optional[str] =", "> cd['maximum']: return False elif cd.get('maximum') is None: if cd['minimum']", "if not self.map: return v return piecewiseLinearMap(v, {k: v for", "Optional[str] = None \"\"\"STAT Style Attributes Header field ``elidedFallbackNameID``. See:", "= itertools.product(*[axis.values for axis in discreteAxes]) for values in valueCombinations:", "the default location of that axis (same as a :class:`ValueAxisSubsetDescriptor`).", "= [] \"\"\"List of this document's STAT format 4 labels.", "explicit user location along this axis. No anisotropy. - ``axis.default``:", "# dict while writing it out, as preserved below. if", "self.localisedFamilyName.get(languageCode) def getFullDesignLocation(self, doc: 'DesignSpaceDocument') -> AnisotropicLocationDict: \"\"\"Get the complete", "is not None else None maximumStr = element.get(\"usermaximum\") maximum =", "to the source file, **as it is in the document**.", "\"1\" sourceElement.append(libElement) if sourceObject.copyGroups: groupsElement = ET.Element('groups') groupsElement.attrib['copy'] = \"1\"", ":attr:`rules`.\"\"\" self.rules.append(ruleDescriptor) def addRuleDescriptor(self, **kwargs): \"\"\"Instantiate a new :class:`RuleDescriptor` using", "labelElement is not None: if \"ordering\" in labelElement.attrib: axisObject.axisOrdering =", "self.sources] try: for source in self.sources: source.font = None res", "value from axis mapping's output to input. Returns value unchanged", "dimElement.attrib['xvalue'] = self.intOrFloat(value) locElement.append(dimElement) elif userLocation is not None and", "version 5 and above documents, there can be as many", "instanceElement.attrib['stylemapfamilyname'] = instanceObject.styleMapFamilyName if instanceObject.styleMapStyleName is not None: instanceElement.attrib['stylemapstylename'] =", "None or encoding == \"utf-8\": f = BytesIO() encoding =", "labelObject) self.root.append(labelsElement) if self.documentObject.rules: if getattr(self.documentObject, \"rulesProcessingLast\", False): attributes =", "yValue is not None: if xValue is None: raise DesignSpaceDocumentError(f'Missing", "raise DesignSpaceDocumentError(f'Exactly one of uservalue=\"\" or xvalue=\"\" must be provided", "normalizeValue new = {} for axis in self.axes: if axis.name", "infoElement = ET.Element('info') instanceElement.append(infoElement) self._addLib(instanceElement, instanceObject.lib, 4) self.root.findall('.instances')[0].append(instanceElement) def _addSource(self,", "location. - If a condition has no minimum, check for", "<dict> <key>com.github.fonttools.varLib.featureVarsFeatureTag</key> <string>calt</string> </dict> </lib> \"\"\" self.sources: List[SourceDescriptor] = []", "= conditionElement.attrib.get(\"maximum\") if cdMax is not None: cd['maximum'] = float(cdMax)", "except ValueError: self.log.warning(\"ValueError in readLocation yValue %3.3f\", yValue) if userValue", "a))},\" for a in self._attrs] attrs = indent('\\n'.join(attrs), ' ')", "other value for filename, it should be fine case 4.", "keys. \"\"\" # list of substitutions stored as tuples of", "axis name names = { 'weight': ('wght', dict(en = 'Weight')),", "the right thing for the filename attribute. :: case 1.", "import tobytes, tostr \"\"\" designSpaceDocument - read and write designspace", "calculated. .. deprecated:: 5.0 \"\"\" self.lib = lib or {}", "self.documentObject = documentObject self.effectiveFormatTuple = self._getEffectiveFormatTuple() self.root = ET.Element(\"designspace\") def", "# '{http://www.w3.org/XML/1998/namespace}lang' for key, lang in labelNameElement.items(): if key ==", "os.path.abspath(os.path.join(os.path.dirname(self.path), filename)) else: sourcePath = None sourceName = sourceElement.attrib.get('name') if", "The default value for this axis, i.e. when a new", "\"\"\"Maps value from axis mapping's input to output. Returns value", ":class:`DiscreteAxisDescriptor` if the ``kwargs`` provide a ``value``, or a :class:`AxisDescriptor`", "hidden in user interfaces. \"\"\" self.map = map or []", "writer = self.writerClass(f, self) writer.write(encoding=encoding, xml_declaration=xml_declaration) return f.getvalue() def read(self,", "conditionElement.attrib['maximum'] = self.intOrFloat(cond.get('maximum')) conditionsetElement.append(conditionElement) if len(conditionsetElement): ruleElement.append(conditionsetElement) for sub in", "to build localized names for all instances. .. versionadded:: 5.0", "= doc.getLocationLabel(self.locationLabel) if label is None: raise DesignSpaceDocumentError( 'InstanceDescriptor.getLocationLabelDescriptor(): '", "__init__(self, *, name, userValue): self.name: str = name \"\"\"Name of", "if label.name == name: return label return None def map_forward(self,", "sourceElement.append(kerningElement) if sourceObject.mutedGlyphNames: for name in sourceObject.mutedGlyphNames: glyphElement = ET.Element(\"glyph\")", "various location fields, default axis values and mappings, and top-level", "element.get(\"userminimum\") userDefault = element.get(\"userdefault\") userMaximum = element.get(\"usermaximum\") if userMinimum is", "design space coordinates.\"\"\" # Without OrderedDict, output XML would be", "among SourceDescriptors. For example, to load UFO sources using defcon:", "self.root.append(variableFontsElement) if self.documentObject.instances: self.root.append(ET.Element(\"instances\")) for instanceObject in self.documentObject.instances: self._addInstance(instanceObject) if", "substitution rules should be applied before or after other glyph", "self.sources: if descriptor.filename is not None and not force: continue", "input to output. Returns value unchanged if no mapping entry", "is not None: maximum = self.normalizeLocation({cond['name']: cond['maximum']}).get(cond['name']) else: maximum =", "if styleMapStyleName is not None: instanceObject.styleMapStyleName = styleMapStyleName # read", "design locations (using xvalue=\"\").') masterGlyphName = masterElement.attrib.get('glyphname') if masterGlyphName is", "locationElement): \"\"\"Read a ``<location>`` element. .. versionchanged:: 5.0 Return a", "None == xValue is None: raise DesignSpaceDocumentError(f'Exactly one of uservalue=\"\"", "'BaseDocWriter' ] # ElementTree allows to find namespace-prefixed elements, but", "= axisValue for dimensionName, dimensionValue in validatedLocation.items(): dimElement = ET.Element('dimension')", "no reason to interfere. case 2. descriptor.filename == \"../something\" descriptor.path", "= rulesElement.attrib.get(\"processing\", \"first\") if processingValue not in {\"first\", \"last\"}: raise", "be only part of the full design location. See :meth:`getFullDesignLocation()`", "is not None: instanceElement.attrib['familyname'] = instanceObject.familyName if instanceObject.styleName is not", "raise DesignSpaceDocumentError(\"Glyph object without name attribute\") mute = glyphElement.attrib.get(\"mute\") if", "ET.Element('familyname') localisedFamilyNameElement.attrib[XML_LANG] = code localisedFamilyNameElement.text = sourceObject.getFamilyName(code) sourceElement.append(localisedFamilyNameElement) if sourceObject.copyLib:", "condition has no maximum, check for > minimum. \"\"\" for", "= True if labelElement.get(\"elidable\") == \"true\" else False olderSibling =", "glyphElement.attrib.get('name') if glyphName is None: raise DesignSpaceDocumentError(\"Glyph object without name", "field ``rangeMinValue`` (format 2).\"\"\" self.userValue: float = userValue \"\"\"STAT field", "\"\"\" self.userValue: float = userValue \"\"\"Value in user coordinates at", "-> List[VariableFontDescriptor]: \"\"\"Return all variable fonts defined in this document,", "these glyphnames. Return a new list of glyphNames with substitutions", "if label.elidable: labelElement.attrib['elidable'] = \"true\" if label.olderSibling: labelElement.attrib['oldersibling'] = \"true\"", "instanceObject.styleName = stylename postScriptFontName = instanceElement.attrib.get('postscriptfontname') if postScriptFontName is not", "MutableMapping, Optional, Tuple, Union from fontTools.misc import etree as ET", "return True/False def compare(self, other): # test if this object", "font attribute is already not None, it is not loaded", "on which field are filled-in, see :meth:`getFormat` .. versionadded:: 5.0", "styleName \"\"\"string. Style name of this source. Though this data", "this axis will get in user space. However, this default", "self.localisedStyleMapFamilyName[languageCode] = tostr(styleMapFamilyName) def getStyleMapFamilyName(self, languageCode=\"en\"): return self.localisedStyleMapFamilyName.get(languageCode) def clearLocation(self,", "def map_forward(self, v): \"\"\"Maps value from axis mapping's input (user)", "value = float(valueStr) minimumStr = element.get(\"userminimum\") minimum = float(minimumStr) if", "attr) == getattr(other, attr)) except AssertionError: print(\"failed attribute\", attr, getattr(self,", "maximum=self.maximum, minimum=self.minimum, default=self.default, hidden=self.hidden, map=self.map, axisOrdering=self.axisOrdering, axisLabels=self.axisLabels, ) def map_forward(self,", "``valueNameID``.\"\"\" self.userLocation: SimpleLocationDict = userLocation or {} \"\"\"Location in user", "0: parentElement.append(locElement) def _addInstance(self, instanceObject): instanceElement = ET.Element('instance') if instanceObject.name", "return self def read(self): self.readAxes() self.readLabels() self.readRules() self.readVariableFonts() self.readSources() self.readInstances()", "for source in self.sources: source.font = None res = copy.deepcopy(self)", "<= cd['maximum']: return False return True def processRules(rules, location, glyphNames):", "code:: xml <!-- optional: list of substitution rules --> <rules>", "ruleElement.attrib.get(\"name\") # read any stray conditions outside a condition set", "raise DesignSpaceDocumentError( \"condition missing required minimum or maximum in rule\"", "= {'Weight': (34, 36.5), 'Width': 100} instance.userLocation = {'Opsz': 16}", "continuous axis to use in a variable font. .. versionadded::", "= self.documentObject.elidedFallbackName self.root.append(axesElement) for axisObject in self.documentObject.axes: self._addAxis(axisObject) if self.documentObject.locationLabels:", "= self.intOrFloat(label.userMaximum) labelElement.attrib['name'] = label.name if label.elidable: labelElement.attrib['elidable'] = \"true\"", "\"\"\"Try to use the version specified in the document, or", "path to the variable font file, **as it is in", "= copyGroups \"\"\"bool. Indicates if the groups need to be", "glyphName = glyphElement.attrib.get('name') if glyphName is None: continue if glyphElement.attrib.get('mute')", "of OpenType's STAT data for a single axis (formats 1,", "self.userMaximum is not None: return 2 return 1 @property def", "variableFontsDescriptorClass = VariableFontDescriptor valueAxisSubsetDescriptorClass = ValueAxisSubsetDescriptor rangeAxisSubsetDescriptorClass = RangeAxisSubsetDescriptor instanceDescriptorClass", "it is stored in the document name = instanceElement.attrib.get(\"name\") if", "attribute\", attr, getattr(self, attr), \"!=\", getattr(other, attr)) def __repr__(self): attrs", "def getAxisOrder(self): \"\"\"Return a list of axis names, in the", "= self.intOrFloat(outputValue) axisElement.append(mapElement) if axisObject.axisOrdering or axisObject.axisLabels: labelsElement = ET.Element('labels')", "font, it can be efficient to have it right here.", "a sufficiently recent version to be able to encode what", "masters: for descriptor in self.sources: if descriptor.filename is not None", "b in rule.subs: if name == a: swap = True", "the location of this instance wholesale, a user should first", "def addInstanceDescriptor(self, **kwargs): \"\"\"Instantiate a new :class:`InstanceDescriptor` using the given", "or ``nominalValue`` (format 2).\"\"\" self.userMaximum: Optional[float] = userMaximum \"\"\"STAT field", "= documentObject tree = ET.parse(self.path) self.root = tree.getroot() self.documentObject.formatVersion =", "ET.Element(\"glyph\") glyphElement.attrib[\"name\"] = name glyphElement.attrib[\"mute\"] = '1' sourceElement.append(glyphElement) if self.effectiveFormatTuple", "lang) designLocation, userLocation = self.locationFromElement(sourceElement) if userLocation: raise DesignSpaceDocumentError(f'<source> element", "not None: raise DesignSpaceDocumentError('instance element must have at most one", "is determined for each axis independently by taking the first", "\"last\" for ruleElement in self.root.findall(\".rules/rule\"): ruleObject = self.ruleDescriptorClass() ruleName =", "is not None: ruleObject.conditionSets.append(conditionSet) for subElement in ruleElement.findall('.sub'): a =", "mapping's input (user) to output (design).\"\"\" from fontTools.varLib.models import piecewiseLinearMap", "from an anisotropic interpolation. </note> </glyph> \"\"\" glyphData = {}", "as the full axis. (default = ``math.inf``) \"\"\" class ValueAxisSubsetDescriptor(SimpleDescriptor):", "muteInfo \"\"\"bool. Indicated if the interpolating font.info data for this", "str: \"\"\"Return the English name from :attr:`labelNames` or the :attr:`name`.\"\"\"", "path to be correct and update filename. \"\"\" assert self.path", "can be subclassed as well. **Note:** Python attribute names are", "recent version to be able to encode what the document", "user space. MutatorMath + Varlib. \"\"\" def serialize(self): # output", "self.readerClass = readerClass else: self.readerClass = BaseDocReader if writerClass is", "def normalize(self): \"\"\" Normalise the geometry of this designspace: -", "dict, used in testing return dict( tag=self.tag, name=self.name, labelNames=self.labelNames, maximum=self.maximum,", "conditionElement.attrib.get(\"minimum\") if cdMin is not None: cd['minimum'] = float(cdMin) else:", "label. Keyed by ``xml:lang`` code. \"\"\" def getFormat(self) -> int:", "self.path is not None for descriptor in self.sources + self.instances:", "= ET.Element('groups') groupsElement.attrib['copy'] = \"1\" sourceElement.append(groupsElement) if sourceObject.copyFeatures: featuresElement =", "5.0 \"\"\" self.variableFonts.append(variableFontDescriptor) def addVariableFontDescriptor(self, **kwargs): \"\"\"Instantiate a new :class:`VariableFontDescriptor`", "like either one of these: .. code-block:: xml <glyph name=\"b\"", "if cond.get('minimum') is None and cond.get('maximum') is None: # neither", "yValue) elif xValue is not None: designLoc[dimName] = xValue else:", "path. If the filename attribute is not None: skip it.", "instanceElement.append(localisedFamilyNameElement) if instanceObject.localisedStyleMapStyleName: languageCodes = list(instanceObject.localisedStyleMapStyleName.keys()) languageCodes.sort() for code in", "[] for cond in conditions: if cond.get('minimum') is not None:", "is defined, don't add this condition continue conditionElement = ET.Element('condition')", "ValueAxisSubsetDescriptor): subsetElement.attrib['uservalue'] = self.intOrFloat(subset.userValue) subsetsElement.append(subsetElement) vfElement.append(subsetsElement) self._addLib(vfElement, vf.lib, 4) parentElement.append(vfElement)", "its default location. .. seealso:: This may be only part", "the whole space. In version 5 and above documents, there", "None: raise DesignSpaceDocumentError(\"label element must have a name attribute.\") designLocation,", "subsetElement = ET.Element('axis-subset') subsetElement.attrib['name'] = subset.name if isinstance(subset, RangeAxisSubsetDescriptor): if", "value. No anisotropy. .. versionadded:: 5.0 \"\"\" label = self.getLocationLabelDescriptor(doc)", "= labelNames or {} \"\"\"dict. When defining a non-registered axis,", "1000 a1.default = 400 a1.name = \"weight\" a1.tag = \"wght\"", "readerClass is not None: self.readerClass = readerClass else: self.readerClass =", "major = next(numbers) minor = next(numbers, 0) return (major, minor)", "infoElement, instanceObject): \"\"\" Read the info element.\"\"\" instanceObject.info = True", "Any, Dict, List, MutableMapping, Optional, Tuple, Union from fontTools.misc import", "a warp of user space to design space coordinates. If", "(using xvalue=\"\").') if designLocation is not None: glyphData['instanceLocation'] = designLocation", "UFO fonts or TTFont objects. The :attr:`font` attribute is shared", "self.map if k == value), value) def map_backward(self, value): \"\"\"Maps", "provide the reference glyph set for the designspace, as fonts", "means ``foreground``. \"\"\" self.familyName = familyName \"\"\"string. Family name of", "DesignSpaceDocumentError(\"label element must have a name attribute.\") valueStr = element.get(\"uservalue\")", "in self.map if v == value), value) class AxisLabelDescriptor(SimpleDescriptor): \"\"\"Container", ") if conditionSet is not None: ruleObject.conditionSets.append(conditionSet) for subElement in", "some axes and freeze other axes at a given location.", "be only part of the full location. See: :meth:`getFullDesignLocation` :meth:`getFullUserLocation`", "['tag', 'name', 'maximum', 'minimum', 'default', 'map', 'axisOrdering', 'axisLabels'] def __init__(", "or sourceObject.muteInfo: infoElement = ET.Element('info') if sourceObject.copyInfo: infoElement.attrib['copy'] = \"1\"", "False elif cd.get('maximum') is None: if cd['minimum'] > value: return", "document, as a string. E.g. \"4.0\" \"\"\" self.elidedFallbackName: Optional[str] =", "): locElement = ET.Element(\"location\") for axis in self.documentObject.axes: if designLocation", "they appear in the sources list. \"\"\" # we load", "else: result[axis.name] = axis.map_forward(axis.default) return result def getFullUserLocation(self, doc: 'DesignSpaceDocument')", "not save temporary source names sourceElement.attrib['name'] = sourceObject.name if sourceObject.familyName", "self.userValue: float = userValue \"\"\"STAT field ``value`` (format 1, 3)", "loadSourceFonts(self, opener, **kwargs): \"\"\"Ensure SourceDescriptor.font attributes are loaded, and return", "\"\"\"The absolute path, calculated from filename.\"\"\" self.font = font \"\"\"Any", "memory, the producing script can write here an indication of", "value table, format 1, 2, 3 <https://docs.microsoft.com/en-us/typography/opentype/spec/stat#axis-value-table-format-1>`_ The STAT format", "this document's instances.\"\"\" self.lib: Dict = {} \"\"\"User defined, custom", "DiscreteAxisDescriptor]): \"\"\"Add the given ``axisDescriptor`` to :attr:`axes`.\"\"\" self.axes.append(axisDescriptor) def addAxisDescriptor(self,", "userLocation[axis.name] dimElement.attrib['uservalue'] = self.intOrFloat(value) locElement.append(dimElement) if len(locElement) > 0: parentElement.append(locElement)", "if self.documentObject.variableFonts: variableFontsElement = ET.Element(\"variable-fonts\") for variableFont in self.documentObject.variableFonts: self._addVariableFont(variableFontsElement,", "path = path.__fspath__() self.path = path self.filename = os.path.basename(path) reader", "in user space coordinates. MutatorMath + Varlib. .. seealso:: This", "same location as the LocationLabel. .. seealso:: :meth:`getFullDesignLocation` :meth:`getFullUserLocation` ..", "self.muteInfo = muteInfo \"\"\"bool. Indicated if the interpolating font.info data", "has a list of ``values``. Example: an Italic axis with", "the \"xml:lang\" attribute name as # '{http://www.w3.org/XML/1998/namespace}lang' for key, lang", "if name is None: raise DesignSpaceDocumentError(\"label element must have a", "may or may not exist. If not specified, the :attr:`name`", "in parentElement.findall('.condition'): cd = {} cdMin = conditionElement.attrib.get(\"minimum\") if cdMin", "not axisElements: return for axisElement in axisElements: if self.documentObject.formatTuple >=", "as # '{http://www.w3.org/XML/1998/namespace}lang' for key, lang in labelNameElement.items(): if key", "labelObject in self.documentObject.locationLabels: self._addLocationLabel(labelsElement, labelObject) self.root.append(labelsElement) if self.documentObject.rules: if getattr(self.documentObject,", "filename attr from the path and this document path. If", "\"{http://www.w3.org/XML/1998/namespace}\" XML_LANG = XML_NS + \"lang\" def posix(path): \"\"\"Normalize paths", "== userLocation), None ) def updateFilenameFromPath(self, masters=True, instances=True, force=False): \"\"\"Set", "= [] for axisSubset in axisSubsetsElement.iterfind(\".axis-subset\"): axisSubsets.append(self.readAxisSubset(axisSubset)) lib = None", "a :class:`AxisDescriptor` otherwise. \"\"\" if \"values\" in kwargs: axis =", "designLocation glyphSources = None for masterElement in glyphElement.findall('.masters/master'): fontSourceName =", "axis is that a continuous axis has a ``minimum`` and", "# '{http://www.w3.org/XML/1998/namespace}lang' } locationLabel = self.locationLabelDescriptorClass( name=name, userLocation=userLocation, elidable=elidable, olderSibling=olderSibling,", "rules.\"\"\" self.rulesProcessingLast: bool = False \"\"\"This flag indicates whether the", "element must contain an axis-subsets element.\") axisSubsets = [] for", "elementtree reads the \"xml:lang\" attribute name as # '{http://www.w3.org/XML/1998/namespace}lang' }", "= [] return glyphNames AnisotropicLocationDict = Dict[str, Union[float, Tuple[float, float]]]", "instanceElement.attrib.get('familyname') if familyname is not None: instanceObject.familyName = familyname stylename", "data.get('unicodes') is not None: glyphElement.attrib['unicode'] = \" \".join([hex(u) for u", "designLocation, userLocation = self.locationFromElement(labelElement) if designLocation: raise DesignSpaceDocumentError(f'<label> element \"{name}\"", "DesignSpaceDocumentError( \"The axis-subset element for a discrete subset must have", "for filename, it should be fine case 4. descriptor.filename ==", "to load UFO sources using defcon: designspace = DesignSpaceDocument.fromfile(\"path/to/my.designspace\") designspace.loadSourceFonts(defcon.Font)", "0), (1, -11)] a2.axisOrdering = 2 a2.axisLabels = [ AxisLabelDescriptor(name=\"Roman\",", "data duplication. - From a big variable font with many", "cd.get('maximum') is None: raise DesignSpaceDocumentError( \"condition missing required minimum or", "for axisSubset in axisSubsetsElement.iterfind(\".axis-subset\"): axisSubsets.append(self.readAxisSubset(axisSubset)) lib = None libElement =", "== XML_LANG: styleMapFamilyName = styleMapFamilyNameElement.text instanceObject.setStyleMapFamilyName(styleMapFamilyName, lang) designLocation, userLocation =", "= \"UTF-8\" xml_declaration = True else: raise ValueError(\"unsupported encoding: '%s'\"", "if data.get('note') is not None: noteElement = ET.Element('note') noteElement.text =", "= map or [] \"\"\"list of input / output values", "name[:4] return tag, dict(en=name) class AbstractAxisDescriptor(SimpleDescriptor): flavor = \"axis\" def", "be subclassed as well. **Note:** Python attribute names are usually", "is None: raise DesignSpaceDocumentError(\"variable-font element must have a name attribute.\")", "the writer class to make us a new axisDescriptor.\"\"\" return", "same as design space, as in [(minimum, minimum), (maximum, maximum)].", "the axis data to do the scaling, so we do", "[] for conditions in rule.conditionSets: newConditions = [] for cond", "is the set of all `default` values in user space", "in self.effectiveFormatTuple) if self.documentObject.axes or self.documentObject.elidedFallbackName is not None: axesElement", "+ new_path return new_path def posixpath_property(private_name): \"\"\"Generate a propery that", "instanceObject in self.documentObject.instances: self._addInstance(instanceObject) if self.documentObject.lib: self._addLib(self.root, self.documentObject.lib, 2) tree", "self.localisedFamilyName[languageCode] = tostr(familyName) def getFamilyName(self, languageCode=\"en\"): return self.localisedFamilyName.get(languageCode) def setStyleMapStyleName(self,", "def map_forward(self, value): \"\"\"Maps value from axis mapping's input to", "a1.axisLabels = [ AxisLabelDescriptor(name=\"Regular\", userValue=400, elidable=True) ] doc.addAxis(a1) \"\"\" _attrs", "5.0\"\"\" self.instances: List[InstanceDescriptor] = [] \"\"\"List of this document's instances.\"\"\"", "labelNames=None, ): self.userMinimum: Optional[float] = userMinimum \"\"\"STAT field ``rangeMinValue`` (format", "linkedUserValue=None, labelNames=None, ): self.userMinimum: Optional[float] = userMinimum \"\"\"STAT field ``rangeMinValue``", "xml <!-- optional: list of substitution rules --> <rules> <rule", "the nested location element') instanceObject.locationLabel = locationLabel instanceObject.userLocation = userLocation", "of localised family name strings, keyed by language code. If", "self.name = name \"\"\"string. Optional. Unique identifier name for this", "getattr(self.documentObject, \"rulesProcessingLast\", False): attributes = {\"processing\": \"last\"} else: attributes =", "{\"processing\": \"last\"} else: attributes = {} self.root.append(ET.Element(\"rules\", attributes)) for ruleObject", "the path and this document path. If the filename attribute", "result: AnisotropicLocationDict = {} for axis in doc.axes: if axis.name", "= userValue return designLoc, userLoc def readInstances(self, makeGlyphs=True, makeKerning=True, makeInfo=True):", ".. versionadded:: 5.0 \"\"\" self.familyName = familyName \"\"\"string. Family name", "this object in different contexts. The **DesignSpaceDocument** object can be", "by language code. \"\"\" self.localisedStyleMapFamilyName = localisedStyleMapFamilyName or {} \"\"\"A", "``axis.default``: default axis value. No anisotropy. .. versionadded:: 5.0 \"\"\"", "can be built from the document's continuous axes. In the", "XML_LANG: styleName = styleNameElement.text instanceObject.setStyleName(styleName, lang) for familyNameElement in instanceElement.findall('familyname'):", "design space, as in [(minimum, minimum), (maximum, maximum)]. Varlib. \"\"\"", "glyphElement.append(locationElement) if glyphName is not None: glyphElement.attrib['name'] = glyphName if", "or ``None`` if no such axis exists.\"\"\" for axisDescriptor in", "= layerName for libElement in sourceElement.findall('.lib'): if libElement.attrib.get('copy') == '1':", "= instancePath # absolute path to the instance instanceObject.filename =", "a designspace file from ``path`` and return a new instance", "= AxisDescriptor() a1.minimum = 1 a1.maximum = 1000 a1.default =", "attributes: {', '.join(unknown_attrs)}\") name = element.get(\"name\") if name is None:", "ruleName=None): cds = [] for conditionElement in parentElement.findall('.condition'): cd =", "The above transformation loses absolute paths new_path = '/' +", "location): \"\"\"Return True if all the conditions matches the given", "None ) def updateFilenameFromPath(self, masters=True, instances=True, force=False): \"\"\"Set a descriptor", ") def readLabels(self): if self.documentObject.formatTuple < (5, 0): return xml_attrs", "DesignSpaceDocumentError( 'InstanceDescriptor.getLocationLabelDescriptor(): ' f'unknown location label `{self.locationLabel}` in instance `{self.name}`.'", "isinstance(value, tuple): value = value[0] triple = [ axis.map_forward(v) for", "the data stored by others. \"\"\" self.default: Optional[str] = None", "in self.axes: loc[axisDescriptor.name] = axisDescriptor.map_forward( axisDescriptor.default ) return loc def", "None instanceObject = self.instanceDescriptorClass() instanceObject.path = instancePath # absolute path", "SimpleDescriptor(AsDictMixin): \"\"\" Containers for a bunch of attributes\"\"\" # XXX", "encoding) writer = self.writerClass(f, self) writer.write(encoding=encoding, xml_declaration=xml_declaration) return f.getvalue() def", "\"\"\"Simple container for data related to the instance .. code::", "def __init__(self, *, name, filename=None, axisSubsets=None, lib=None): self.name: str =", "= self.normalizeLocation(item.location) # instances for item in self.instances: # glyph", "instanceElement.findall('.glyphs') == []: glyphsElement = ET.Element('glyphs') instanceElement.append(glyphsElement) glyphsElement = instanceElement.findall('.glyphs')[0]", "'1': sourceObject.copyInfo = True if infoElement.attrib.get('mute') == '1': sourceObject.muteInfo =", "if self.sources: self.findDefault() return self def tostring(self, encoding=None): \"\"\"Returns the", "not None: glyphData['instanceLocation'] = designLocation glyphSources = None for masterElement", "layerName is not None: sourceObject.layerName = layerName for libElement in", "return { axis.name: ( axis.map_backward(designLocation[axis.name]) if axis.name in designLocation else", "❌ ❌ ❌ 2 ✅ ✅ ✅ ❌ 3 ✅", "VF. .. code:: python a2 = DiscreteAxisDescriptor() a2.values = [0,", "fontSourceName = masterElement.attrib.get('source') designLocation, userLocation = self.locationFromElement(masterElement) if userLocation: raise", "self.localisedStyleMapFamilyName.get(languageCode) def clearLocation(self, axisName: Optional[str] = None): \"\"\"Clear all location-related", "= sourceElement.attrib.get('layer') if layerName is not None: sourceObject.layerName = layerName", "self.axes: if isinstance(axis, DiscreteAxisDescriptor): discreteAxes.append(axis) else: rangeAxisSubsets.append(RangeAxisSubsetDescriptor(name=axis.name)) valueCombinations = itertools.product(*[axis.values", "{} fonts = [] for source in self.sources: if source.font", "the document. \"\"\" self.locationLabel = locationLabel \"\"\"Name of a :class:`LocationLabelDescriptor`.", "set(element.attrib) - xml_attrs if unknown_attrs: raise DesignSpaceDocumentError(f\"axis-subset element contains unknown", "if conditionSet is not None: ruleObject.conditionSets.append(conditionSet) for subElement in ruleElement.findall('.sub'):", "\"\"\"User-facing translations of this location's label. Keyed by ``xml:lang`` code.", "to the opener function. Returns: List of font objects in", "False labelNames = { lang: label_name.text or \"\" for label_name", "value is not None: value = posix(value) setattr(self, private_name, value)", "return glyphNames AnisotropicLocationDict = Dict[str, Union[float, Tuple[float, float]]] SimpleLocationDict =", "``variableFontDescriptor`` to :attr:`variableFonts`. .. versionadded:: 5.0 \"\"\" self.variableFonts.append(variableFontDescriptor) def addVariableFontDescriptor(self,", "'DesignSpaceDocument', 'SourceDescriptor', 'InstanceDescriptor', 'AxisDescriptor', 'RuleDescriptor', 'BaseDocReader', 'BaseDocWriter' ] # ElementTree", "@classmethod def fromstring(cls, string, documentObject): f = BytesIO(tobytes(string, encoding=\"utf-8\")) self", "absolute paths new_path = '/' + new_path elif path.startswith(r'\\\\'): #", "make ```filename`` point to that. \"\"\" self.name = name \"\"\"string.", "self.sourceDescriptorClass() sourceObject.path = sourcePath # absolute path to the ufo", "if value is not None: value = posix(value) setattr(self, private_name,", "MutatorMath. .. deprecated:: 5.0 \"\"\" self.copyFeatures = copyFeatures \"\"\"bool. Indicates", "doc.formatVersion doc.elidedFallbackName doc.axes doc.locationLabels doc.rules doc.rulesProcessingLast doc.sources doc.variableFonts doc.instances doc.lib", "of the document, and also as a filename in case", "cd['maximum']: return False elif cd.get('maximum') is None: if cd['minimum'] >", "xml_declaration=xml_declaration, pretty_print=pretty, ) def _getEffectiveFormatTuple(self): \"\"\"Try to use the version", "def readInstances(self, makeGlyphs=True, makeKerning=True, makeInfo=True): instanceElements = self.root.findall('.instances/instance') for instanceElement", "ET.Element('conditionset') for cond in conditions: if cond.get('minimum') is None and", "InstanceDescriptor): \"\"\"Add the given ``instanceDescriptor`` to :attr:`instances`.\"\"\" self.instances.append(instanceDescriptor) def addInstanceDescriptor(self,", "``kwargs`` and add it to :attr:`rules`. \"\"\" rule = self.writerClass.ruleDescriptorClass(**kwargs)", "None: self._addLocationElement( instanceElement, designLocation=instanceObject.designLocation, userLocation=instanceObject.userLocation ) else: # Pre-version 5.0", "full font family, which is useful for the STAT table,", "contents of this field to the disk and make ```filename``", "such label exists. .. versionadded:: 5.0 \"\"\" for label in", "sourceElement.append(locationElement) self.root.findall('.sources')[0].append(sourceElement) def _addVariableFont(self, parentElement: ET.Element, vf: VariableFontDescriptor) -> None:", "if they only contain ASCII characters. \"\"\" self.hidden = hidden", "None: raise DesignSpaceDocumentError(\"label element must have a name attribute.\") valueStr", "in labelElement.findall(\"labelname\") for attr, lang in label_name.items() if attr ==", "are at the default location for that axis. Note: the", "= copyLib \"\"\"bool. Indicates if the contents of the font.lib", "glyph substitutions to trigger conditionally in some parts of the", "location: Optional[AnisotropicLocationDict]): self.designLocation = location or {} def setStyleName(self, styleName,", "be muted. MutatorMath only. \"\"\" self.mutedGlyphNames = mutedGlyphNames or []", "1. descriptor.filename == None descriptor.path == None -- action: write", "element must have a name attribute.\") userValueStr = element.get(\"uservalue\") if", "axis in the target variable font. If not specified, assume", "if source.path in loaded: source.font = loaded[source.path] else: if source.path", "conditionElement in parentElement.findall('.condition'): cd = {} cdMin = conditionElement.attrib.get(\"minimum\") if", "# In case the document contains no axis definitions, self.log.warning(\"Location", "the fields of ``self`` with the data. \"\"\" if hasattr(path,", "ET.Element('dimension') dimElement.attrib['name'] = axis.name value = userLocation[axis.name] dimElement.attrib['uservalue'] = self.intOrFloat(value)", "rulesElement.attrib.get(\"processing\", \"first\") if processingValue not in {\"first\", \"last\"}: raise DesignSpaceDocumentError(", "def setFamilyName(self, familyName, languageCode=\"en\"): self.localisedFamilyName[languageCode] = tostr(familyName) def getFamilyName(self, languageCode=\"en\"):", "attributes: {', '.join(unknown_attrs)}\") name = labelElement.get(\"name\") if name is None:", ":class:`VariableFontDescriptor` using the given ``kwargs`` and add it to :attr:`variableFonts`.", "from fontTools.varLib.models import normalizeValue new = {} for axis in", "not add the rule. ruleElement = ET.Element('rule') if ruleObject.name is", "all(v is None for v in (userMinimum, userDefault, userMaximum)): return", "self.rules.append(ruleDescriptor) def addRuleDescriptor(self, **kwargs): \"\"\"Instantiate a new :class:`RuleDescriptor` using the", "next((v for k, v in self.map if k == value),", "in validatedLocation: # only accept values we know validatedLocation[axisName] =", "values for this instance. MutatorMath + Varlib. .. deprecated:: 5.0", "type: ignore result: AnisotropicLocationDict = {} for axis in doc.axes:", "= (int(i) for i in self.formatVersion.split(\".\")) major = next(numbers) minor", "findDefault(self): \"\"\"Set and return SourceDescriptor at the default location or", "masters for item in self.sources: item.location = self.normalizeLocation(item.location) # instances", "intend that value to be mapped. \"\"\" return next((v for", ":attr:`variableFonts`. .. versionadded:: 5.0 \"\"\" self.variableFonts.append(variableFontDescriptor) def addVariableFontDescriptor(self, **kwargs): \"\"\"Instantiate", "location along this axis, possibly anisotropic. - ``userLocation[axisName]``: the explicit", "options: designspace.loadSourceFonts(ttLib.TTFont, recalcBBoxes=False) Args: opener (Callable): takes one required positional", "glyphs or {} \"\"\"dict for special master definitions for glyphs.", "to very basic **descriptor** objects that store the data in", "[ 'DesignSpaceDocumentError', 'DesignSpaceDocument', 'SourceDescriptor', 'InstanceDescriptor', 'AxisDescriptor', 'RuleDescriptor', 'BaseDocReader', 'BaseDocWriter' ]", "cond.get('minimum') is None and cond.get('maximum') is None: # neither is", "doc.rules doc.rulesProcessingLast doc.sources doc.variableFonts doc.instances doc.lib \"\"\" def __init__(self, readerClass=None,", "(label for label in self.locationLabels if label.userLocation == userLocation), None", "= self.intOrFloat(subset.userValue) subsetsElement.append(subsetElement) vfElement.append(subsetsElement) self._addLib(vfElement, vf.lib, 4) parentElement.append(vfElement) def _addLib(self,", "labelNames or {} \"\"\"User-facing translations of this location's label. Keyed", "k == value), value) def map_backward(self, value): \"\"\"Maps value from", "have to do the right thing for the filename attribute.", "sub[0] subElement.attrib['with'] = sub[1] ruleElement.append(subElement) if len(ruleElement): self.root.findall('.rules')[0].append(ruleElement) def _addAxis(self,", "glyphNames with substitutions applied. - rule order matters \"\"\" newNames", "posix(relative) def updatePaths(self): \"\"\" Right before we save we need", "from axis mapping's input (user) to output (design).\"\"\" from fontTools.varLib.models", "The default document reader will not fill-in this attribute, and", ":attr:`name`.\"\"\" return self.labelNames.get(\"en\") or self.name def getFullUserLocation(self, doc: 'DesignSpaceDocument') ->", "masterGlyphName is None: # if we don't read a glyphname,", "user space. MutatorMath + Varlib. \"\"\" self.default = default \"\"\"number.", "STAT Axis Value Tables <https://docs.microsoft.com/en-us/typography/opentype/spec/stat#axis-value-tables>`_ .. versionadded:: 5.0 \"\"\" class", "if \"ordering\" in labelElement.attrib: axisObject.axisOrdering = int(labelElement.attrib[\"ordering\"]) for label in", "glyph sets. \"\"\" self.values: List[float] = values or [] \"\"\"List", "somewhere. \"\"\" self.formatVersion: Optional[str] = None \"\"\"Format version for this", "in data.get('unicodes')]) if data.get('instanceLocation') is not None: locationElement, data['instanceLocation'] =", "class ValueAxisSubsetDescriptor(SimpleDescriptor): \"\"\"Single value of a discrete or continuous axis", "document, or implicit variable fonts that can be built from", "the SourceDescriptor.font attribute. If the font attribute is already not", "combining data from the explicit user location and default axis", "featuresElement.attrib.get('copy') == '1': sourceObject.copyFeatures = True for glyphElement in sourceElement.findall(\".glyph\"):", "of the location=\"...\" attribute or the nested location element') instanceObject.locationLabel", "the instances. MutatorMath. .. deprecated:: 5.0 \"\"\" self.copyGroups = copyGroups", "glyphData['instanceLocation'] = self.normalizeLocation(glyphData['instanceLocation']) for glyphMaster in glyphData['masters']: glyphMaster['location'] = self.normalizeLocation(glyphMaster['location'])", "in languageCodes: if code == \"en\": continue localisedStyleMapFamilyNameElement = ET.Element('stylemapfamilyname')", "name for this rule. Can be used to reference this", "xml_attrs = {'userminimum', 'uservalue', 'usermaximum', 'name', 'elidable', 'oldersibling', 'linkeduservalue'} unknown_attrs", "VariableFontDescriptor) -> None: vfElement = ET.Element('variable-font') vfElement.attrib['name'] = vf.name if", "= ET.Element('axis-subset') subsetElement.attrib['name'] = subset.name if isinstance(subset, RangeAxisSubsetDescriptor): if subset.userMinimum", "many axes, define subsets of that variable font that only", "the document, and also as a filename in case the", "instanceObject.filename if instanceObject.postScriptFontName is not None: instanceElement.attrib['postscriptfontname'] = instanceObject.postScriptFontName if", "we just choose for path to be correct and update", "None: instanceElement.attrib['stylename'] = instanceObject.styleName # add localisations if instanceObject.localisedStyleName: languageCodes", "class VariableFontDescriptor(SimpleDescriptor): \"\"\"Container for variable fonts, sub-spaces of the Designspace.", "filename \"\"\"string, optional. Relative path to the variable font file,", "\"values\" in kwargs: axis = self.writerClass.discreteAxisDescriptorClass(**kwargs) else: axis = self.writerClass.axisDescriptorClass(**kwargs)", "for descriptor in self.sources: if descriptor.filename is not None and", ") for axis in self.axes } def findDefault(self): \"\"\"Set and", "\"\"\" self.elidable: bool = elidable \"\"\"STAT flag ``ELIDABLE_AXIS_VALUE_NAME``. See: `OTSpec", "axis is not mentioned, assume that we only want the", "RangeAxisSubsetDescriptor instanceDescriptorClass = InstanceDescriptor @classmethod def getAxisDecriptor(cls): return cls.axisDescriptorClass() @classmethod", "MutatorMath + Varlib. \"\"\" self.localisedFamilyName = localisedFamilyName or {} \"\"\"dict.", "5.0 \"\"\" self.muteKerning = muteKerning \"\"\"bool. Indicates if the kerning", "outlines from which deltas would apply, as this axis does", "should first clear all the fields, then change the field(s)", "neither is defined, don't add this condition continue conditionElement =", "a lot of data duplication. - From a big variable", "instanceElement.append(kerningElement) if instanceObject.info: infoElement = ET.Element('info') instanceElement.append(infoElement) self._addLib(instanceElement, instanceObject.lib, 4)", "support os.PathLike objects path = path.__fspath__() self.path = path self.filename", "collections import copy import itertools import math import os import", "documentObject): self.path = documentPath self.documentObject = documentObject tree = ET.parse(self.path)", "ET.Element('axis-subset') subsetElement.attrib['name'] = subset.name if isinstance(subset, RangeAxisSubsetDescriptor): if subset.userMinimum !=", "self.conditionSets = conditionSets or [] \"\"\"a list of conditionsets. -", "in labelElement.attrib: axisObject.axisOrdering = int(labelElement.attrib[\"ordering\"]) for label in labelElement.findall(\".label\"): axisObject.axisLabels.append(self.readAxisLabel(label))", "'kerning', 'info', 'lib'] filename = posixpath_property(\"_filename\") path = posixpath_property(\"_path\") def", "slashes.\"\"\" def getter(self): # Normal getter return getattr(self, private_name) def", "any conditions that are outside of a condition set. rules", "if ( any( isinstance(axis, DiscreteAxisDescriptor) or axis.axisOrdering is not None", "% processingValue) self.documentObject.rulesProcessingLast = processingValue == \"last\" for ruleElement in", "axisDescriptor.default ) return loc def labelForUserLocation(self, userLocation: SimpleLocationDict) -> Optional[LocationLabelDescriptor]:", "or :meth:`fromfile`. \"\"\" self.filename = None \"\"\"String, optional. When the", "in self.documentObject.instances: self._addInstance(instanceObject) if self.documentObject.lib: self._addLib(self.root, self.documentObject.lib, 2) tree =", "featuresElement.attrib['copy'] = \"1\" sourceElement.append(featuresElement) if sourceObject.copyInfo or sourceObject.muteInfo: infoElement =", "for attr in self._attrs: try: assert(getattr(self, attr) == getattr(other, attr))", "v in self.map}) class DiscreteAxisDescriptor(AbstractAxisDescriptor): \"\"\"Container for discrete axis data.", "attr, lang in label_name.items() if attr == XML_LANG # Note:", "self.readGlyphElement(glyphElement, instanceObject) for infoElement in instanceElement.findall(\"info\"): self.readInfoElement(infoElement, instanceObject) for libElement", "not None: axesElement.attrib['elidedfallbackname'] = self.documentObject.elidedFallbackName self.root.append(axesElement) for axisObject in self.documentObject.axes:", "if designLocation is not None and axis.name in designLocation: dimElement", "self.designLocation[axisName] if self.userLocation is None: self.userLocation = {} if axisName", "None: sourceObject.layerName = layerName for libElement in sourceElement.findall('.lib'): if libElement.attrib.get('copy')", "using defcon: designspace = DesignSpaceDocument.fromfile(\"path/to/my.designspace\") designspace.loadSourceFonts(defcon.Font) Or to load masters", "result class RuleDescriptor(SimpleDescriptor): \"\"\"Represents the rule descriptor element: a set", "def readLabels(self): if self.documentObject.formatTuple < (5, 0): return xml_attrs =", "loc = collections.OrderedDict() for axisDescriptor in self.axes: loc[axisDescriptor.name] = axisDescriptor.map_forward(", "do those last. \"\"\" # masters for item in self.sources:", "or write the contents of this field to the disk", "rule. ruleElement = ET.Element('rule') if ruleObject.name is not None: ruleElement.attrib['name']", "axis mapping's output to input. Returns value unchanged if no", "this designspace: - scale all the locations of all masters", "= glyphs or {} \"\"\"dict for special master definitions for", "dict(en = 'Italic')), } if name.lower() in names: return names[name.lower()]", "become a variation axis in a VF. .. code:: python", "minor = next(numbers, 0) return (major, minor) def getVariableFonts(self) ->", "========= =========== =========== =============== 1 ✅ ❌ ❌ ❌ 2", "The STAT format of the Axis value depends on which", ":class:`SourceDescriptor` using the given ``kwargs`` and add it to ``doc.sources``.", "the explicit design location along this axis, possibly anisotropic. -", "for inputValue, outputValue in axis.map: newOutputValue = self.normalizeLocation({axis.name: outputValue}).get(axis.name) newMap.append((inputValue,", "**kwargs): \"\"\"Instantiate a new :class:`SourceDescriptor` using the given ``kwargs`` and", "for axisElement in axisElements: if self.documentObject.formatTuple >= (5, 0) and", "xvalue=\"\" for the location dimension \"{dimName}\"\" with yvalue=\"{yValue}\"') designLoc[dimName] =", "\"\"\" if self.formatVersion is None: return (5, 0) numbers =", "userValue = float(userValueStr) return self.valueAxisSubsetDescriptorClass(name=name, userValue=userValue) else: xml_attrs = {'name',", "and sets the SourceDescriptor.font attribute. If the font attribute is", "familyName = familyNameElement.text sourceObject.setFamilyName(familyName, lang) designLocation, userLocation = self.locationFromElement(sourceElement) if", "data. Respect the data stored by others. \"\"\" self.default: Optional[str]", "\" \"Wrapped them in a new conditionset.\" ) # read", "10.0), (400.0, 66.0), (1000.0, 990.0)] a1.axisOrdering = 1 a1.axisLabels =", "want True. See the following issues for more information: `fontTools#1371", "the same minimum value as the full axis. (default =", "sourceObject.layerName is not None: sourceElement.attrib['layer'] = sourceObject.layerName if sourceObject.localisedFamilyName: languageCodes", "= True if labelElement.get(\"oldersibling\") == \"true\" else False labelNames =", ":attr:`localisedFamilyName` .. versionadded:: 5.0 \"\"\" self.localisedFamilyName[languageCode] = tostr(familyName) def getFamilyName(self,", "@classmethod def getAxisDecriptor(cls): return cls.axisDescriptorClass() @classmethod def getSourceDescriptor(cls): return cls.sourceDescriptorClass()", "None: instanceObject.styleMapStyleName = styleMapStyleName # read localised names for styleNameElement", "<https://docs.microsoft.com/en-us/typography/opentype/spec/stat#style-attributes-header>`_ .. versionadded:: 5.0 \"\"\" self.axes: List[Union[AxisDescriptor, DiscreteAxisDescriptor]] = []", "writerClass=writerClass) reader = self.readerClass.fromstring(string, self) reader.read() if self.sources: self.findDefault() return", "= Dict[str, Union[float, Tuple[float, float]]] SimpleLocationDict = Dict[str, float] class", "glyphElement.attrib.get('unicode') if unicodes is not None: try: unicodes = [int(u,", "continue localisedStyleMapStyleNameElement = ET.Element('stylemapstylename') localisedStyleMapStyleNameElement.attrib[XML_LANG] = code localisedStyleMapStyleNameElement.text = instanceObject.getStyleMapStyleName(code)", "list of substitution rules --> <rules> <rule name=\"vertical.bars\"> <conditionset> <condition", "\"\"\" # list of substitutions stored as tuples of glyphnames", "contexts. The **DesignSpaceDocument** object can be subclassed to work with", "Varlib. This may be only part of the full design", "False olderSibling = True if element.get(\"oldersibling\") == \"true\" else False", "the ufo source sourceObject.filename = filename # path as it", "in rule.conditionSets) def evaluateConditions(conditions, location): \"\"\"Return True if all the", "new projects, you probably want True. See the following issues", "which means ``foreground``. \"\"\" self.familyName = familyName \"\"\"string. Family name", "not None else \"\")) cds.append(cd) return cds def readAxes(self): #", "instances. MutatorMath. .. deprecated:: 5.0 \"\"\" self.copyFeatures = copyFeatures \"\"\"bool.", "getLocationLabelDescriptor(self, doc: 'DesignSpaceDocument') -> Optional[LocationLabelDescriptor]: \"\"\"Get the :class:`LocationLabelDescriptor` instance that", "options passed on to the opener function. Returns: List of", "DiscreteAxisDescriptor) or axis.axisOrdering is not None or axis.axisLabels for axis", "= ET.Element('conditionset') for cond in conditions: if cond.get('minimum') is None", "designspace = DesignSpaceDocument.fromfile(\"path/to/my.designspace\") designspace.loadSourceFonts(defcon.Font) Or to load masters as FontTools", "without name attribute\") mute = glyphElement.attrib.get(\"mute\") if mute == \"1\":", "be extracted from the font, it can be efficient to", "comparing # it against the SourceDescriptor locations (always in design", "Tuple, Union from fontTools.misc import etree as ET from fontTools.misc", "styleName=None, localisedFamilyName=None, copyLib=False, copyInfo=False, copyGroups=False, copyFeatures=False, muteKerning=False, muteInfo=False, mutedGlyphNames=None, ):", "None: yValue = float(yValue) except ValueError: self.log.warning(\"ValueError in readLocation yValue", "same as the matching STAT format 4 label. No anisotropy.", "RangeAxisSubsetDescriptor): if subset.userMinimum != -math.inf: subsetElement.attrib['userminimum'] = self.intOrFloat(subset.userMinimum) if subset.userMaximum", "= collections.OrderedDict() for axisDescriptor in self.axes: loc[axisDescriptor.name] = axisDescriptor.map_forward( axisDescriptor.default", "\"\"\"Same as :attr:`SourceDescriptor.font` .. seealso:: :attr:`SourceDescriptor.font` \"\"\" self.name = name", "else: self.readerClass = BaseDocReader if writerClass is not None: self.writerClass", "not None, it is not loaded again. Fonts with the", "None. The default location is the set of all `default`", "self.axisDefaults = {} self._strictAxisNames = True @classmethod def fromstring(cls, string,", "\"\"\" def getFormat(self) -> int: \"\"\"Determine which format of STAT", "(format 3).\"\"\" self.labelNames: MutableMapping[str, str] = labelNames or {} \"\"\"User-facing", "the document contains. \"\"\" minVersion = self.documentObject.formatTuple if ( any(", "axes elements, including the warp map. axesElement = self.root.find(\".axes\") if", "source name sourceName = \"temp_master.%d\" % (sourceCount) sourceObject = self.sourceDescriptorClass()", "styleName = styleNameElement.text instanceObject.setStyleName(styleName, lang) for familyNameElement in instanceElement.findall('familyname'): for", "else: attributes = {} self.root.append(ET.Element(\"rules\", attributes)) for ruleObject in self.documentObject.rules:", "font, and a lot of data duplication. - From a", "0) return minVersion def _makeLocationElement(self, locationObject, name=None): \"\"\" Convert Location", "instanceElement.attrib['stylemapstylename'] = instanceObject.styleMapStyleName if self.effectiveFormatTuple < (5, 0): # Deprecated", "def fromstring(cls, string, documentObject): f = BytesIO(tobytes(string, encoding=\"utf-8\")) self =", "new sourceDescriptor.\"\"\" return self.writerClass.getSourceDescriptor() def newInstanceDescriptor(self): \"\"\"Ask the writer class", "xml_attrs if unknown_attrs: raise DesignSpaceDocumentError(f\"label element contains unknown attributes: {',", "or [] \"\"\"list of input / output values that can", "source in self.sources: source.font = None res = copy.deepcopy(self) for", "From a single DesignSpace with discrete axes, define 1 variable", "axis, then edit the values: .. code:: python instance.clearLocation('Weight') instance.designLocation['Weight']", "None: # neither is defined, don't add this condition continue", "loaded fonts.append(source.font) continue if source.path in loaded: source.font = loaded[source.path]", "which could look like either one of these: .. code-block::", "must contain an axis-subsets element.\") axisSubsets = [] for axisSubset", "= [ v.asdict() if hasattr(v, \"asdict\") else v for v", "the document. Can't guess why they're different, we just choose", "# path as it is stored in the document sourceObject.name", "{\"first\", \"last\"}: raise DesignSpaceDocumentError( \"<rules> processing attribute value is not", "as in [(minimum, minimum), (maximum, maximum)]. Varlib. \"\"\" self.axisOrdering =", "= \"true\" if label.olderSibling: labelElement.attrib['oldersibling'] = \"true\" if label.linkedUserValue is", "path to the instance file, **as it is in the", "instanceElement.append(localisedStyleNameElement) if instanceObject.localisedFamilyName: languageCodes = list(instanceObject.localisedFamilyName.keys()) languageCodes.sort() for code in", "test if this object contains the same data as the", "The source of truth for this instance's location is determined", "return False return True def processRules(rules, location, glyphNames): \"\"\"Apply these", "in doc.axes: if axis.name in self.designLocation: result[axis.name] = self.designLocation[axis.name] else:", "is a list of conditions. - Each condition is a", "\"\"\" self.styleMapFamilyName = styleMapFamilyName \"\"\"string. StyleMap familyname for this instance.", "is not None: descriptor.filename = self._posixRelativePath(descriptor.path) if instances: for descriptor", "container for the axis data. Add more localisations? .. code::", "calculated from filename.\"\"\" self.font = font \"\"\"Any Python object. Optional.", "cond.get('name') if cond.get('minimum') is not None: conditionElement.attrib['minimum'] = self.intOrFloat(cond.get('minimum')) if", "from fontTools.designspaceLib import DesignSpaceDocument doc = DesignSpaceDocument.fromfile(\"some/path/to/my.designspace\") doc.formatVersion doc.elidedFallbackName doc.axes", "do we need here def __init__(self, *, name=None, conditionSets=None, subs=None):", "the instance file, calculated from the document path and the", "Absolute path to the instance file, calculated from the document", "Indicates if the non-interpolating font.info needs to be copied to", "ET.Element('stylemapfamilyname') localisedStyleMapFamilyNameElement.attrib[XML_LANG] = code localisedStyleMapFamilyNameElement.text = instanceObject.getStyleMapFamilyName(code) instanceElement.append(localisedStyleMapFamilyNameElement) if self.effectiveFormatTuple", "above transformation loses leading slashes of UNC path mounts new_path", "None def map_forward(self, userLocation: SimpleLocationDict) -> SimpleLocationDict: \"\"\"Map a user", "method='xml', xml_declaration=xml_declaration, pretty_print=pretty, ) def _getEffectiveFormatTuple(self): \"\"\"Try to use the", "'map', 'axisOrdering', 'axisLabels'] def __init__( self, *, tag=None, name=None, labelNames=None,", "= os.path.basename(path) reader = self.readerClass(path, self) reader.read() if self.sources: self.findDefault()", "if stylename is not None: instanceObject.styleName = stylename postScriptFontName =", "rangeAxisSubsets: List[Union[RangeAxisSubsetDescriptor, ValueAxisSubsetDescriptor]] = [] for axis in self.axes: if", "self.filename is not None: basename = os.path.splitext(self.filename)[0] + \"-VF\" if", "else \"\") class AsDictMixin(object): def asdict(self): d = {} for", ".. seealso:: :meth:`getFullDesignLocation` :meth:`getFullUserLocation` .. versionadded:: 5.0 \"\"\" self.designLocation: AnisotropicLocationDict", "ValueError: self.log.warning(\"ValueError in readLocation yValue %3.3f\", yValue) if userValue is", "self.documentObject.axes: self._addAxis(axisObject) if self.documentObject.locationLabels: labelsElement = ET.Element(\"labels\") for labelObject in", "\"\"\"Read a designspace file from ``path`` and return a new", "font that only include some axes and freeze other axes", "is used in the location dicts. MutatorMath + Varlib. \"\"\"", "def __repr__(self): attrs = [f\"{a}={repr(getattr(self, a))},\" for a in self._attrs]", "if self.effectiveFormatTuple >= (5, 0): if instanceObject.locationLabel is None: self._addLocationElement(", "self.default = default \"\"\"number. The default value for this axis,", "to that. \"\"\" self.name = name \"\"\"string. Optional. Unique identifier", "be None, assume axis.maximum cd['maximum'] = None cd['name'] = conditionElement.attrib.get(\"name\")", "tag=None, name=None, labelNames=None, minimum=None, default=None, maximum=None, hidden=False, map=None, axisOrdering=None, axisLabels=None,", "# it against the SourceDescriptor locations (always in design space).", "List[VariableFontDescriptor] = [] \"\"\"List of this document's variable fonts. ..", "in self.formatVersion.split(\".\")) major = next(numbers) minor = next(numbers, 0) return", "else None elidable = True if element.get(\"elidable\") == \"true\" else", "sourceObject.getFamilyName(code) sourceElement.append(localisedFamilyNameElement) if sourceObject.copyLib: libElement = ET.Element('lib') libElement.attrib['copy'] = \"1\"", "dicts. MutatorMath + Varlib. \"\"\" # names for UI purposes,", "``axisOrdering``. See: `OTSpec STAT Axis Record <https://docs.microsoft.com/en-us/typography/opentype/spec/stat#axis-records>`_ .. versionadded:: 5.0", "container for data related to the instance .. code:: python", "isinstance(subset, RangeAxisSubsetDescriptor): if subset.userMinimum != -math.inf: subsetElement.attrib['userminimum'] = self.intOrFloat(subset.userMinimum) if", "= 'Hamburgerwhatever' doc.addInstance(i2) \"\"\" flavor = \"instance\" _defaultLanguageCode = \"en\"", "return d class SimpleDescriptor(AsDictMixin): \"\"\" Containers for a bunch of", "= float(axisElement.attrib.get(\"maximum\")) axisObject.default = float(axisElement.attrib.get(\"default\")) axisObject.name = axisElement.attrib.get(\"name\") if axisElement.attrib.get('hidden',", "that. \"\"\" self.name = name \"\"\"string. Optional. Unique identifier name", "or {} \"\"\"dict. Axis values for this source, in design", "to the source .. code:: python doc = DesignSpaceDocument() s1", "dictionary of localised stylename strings, keyed by language code. \"\"\"", "that we only want the default location of that axis", "The minimum value for this axis in user space. MutatorMath", "\"\"\"STAT table field ``axisOrdering``. See: `OTSpec STAT Axis Record <https://docs.microsoft.com/en-us/typography/opentype/spec/stat#axis-records>`_", "if maximumStr is not None else None linkedValueStr = element.get(\"linkeduservalue\")", "the same location as the LocationLabel. .. seealso:: :meth:`getFullDesignLocation` :meth:`getFullUserLocation`", "an axis is not mentioned, assume that we only want", "labelNames=None, minimum=None, default=None, maximum=None, hidden=False, map=None, axisOrdering=None, axisLabels=None, ): super().__init__(", "the file. \"\"\" self.axisSubsets: List[Union[RangeAxisSubsetDescriptor, ValueAxisSubsetDescriptor]] = axisSubsets or []", "\"\"\" Containers for a bunch of attributes\"\"\" # XXX this", "for axisDescriptor in self.axes: if axisDescriptor.name == name: return axisDescriptor", "= float(userValue) except ValueError: self.log.warning(\"ValueError in readLocation userValue %3.3f\", userValue)", "# Deprecated members as of version 5.0 if instanceObject.glyphs: if", "float(yValue) except ValueError: self.log.warning(\"ValueError in readLocation yValue %3.3f\", yValue) if", "new instance of :class:. \"\"\" self = cls(readerClass=readerClass, writerClass=writerClass) self.read(path)", "-- action: there is a conflict between the given filename,", "SourceDescriptors. For example, to load UFO sources using defcon: designspace", "XML_LANG: axisObject.labelNames[lang] = tostr(labelNameElement.text) labelElement = axisElement.find(\".labels\") if labelElement is", "Unique identifier name for this source. MutatorMath + Varlib. \"\"\"", "# output to a dict, used in testing return dict(", "posixpath_property(\"_filename\") path = posixpath_property(\"_path\") def __init__( self, *, filename=None, path=None,", "for label_name in labelElement.findall(\"labelname\") for attr, lang in label_name.items() if", "self.locationFromElement(sourceElement) if userLocation: raise DesignSpaceDocumentError(f'<source> element \"{sourceName}\" must only have", "locElement.append(dimElement) return locElement, validatedLocation def intOrFloat(self, num): if int(num) ==", "locationElement in element.findall('.location'): elementLocation = self.readLocationElement(locationElement) break return elementLocation def", "of a condition set. rules = [] rulesElement = self.root.find(\".rules\")", "in self.locationLabels: if label.name == name: return label return None", "v in self.map}) def map_backward(self, v): \"\"\"Maps value from axis", "self.instanceDescriptorClass() instanceObject.path = instancePath # absolute path to the instance", "= self.intOrFloat(axisObject.maximum) elif isinstance(axisObject, DiscreteAxisDescriptor): axisElement.attrib['values'] = \" \".join(self.intOrFloat(v) for", "must only have design locations (using xvalue=\"\").') masterGlyphName = masterElement.attrib.get('glyphname')", "if writerClass is not None: self.writerClass = writerClass else: self.writerClass", "def getFamilyName(self, languageCode=\"en\"): return self.localisedFamilyName.get(languageCode) def setStyleMapStyleName(self, styleMapStyleName, languageCode=\"en\"): self.localisedStyleMapStyleName[languageCode]", "this axis. Contrary to continuous axes, only the values in", "= float(axisElement.attrib.get(\"minimum\")) axisObject.maximum = float(axisElement.attrib.get(\"maximum\")) axisObject.default = float(axisElement.attrib.get(\"default\")) axisObject.name =", "\"\"\" if hasattr(path, \"__fspath__\"): # support os.PathLike objects path =", "if len(conditionsetElement): ruleElement.append(conditionsetElement) for sub in ruleObject.subs: subElement = ET.Element('sub')", "versionadded:: 5.0 \"\"\" self.localisedFamilyName[languageCode] = tostr(familyName) def getFamilyName(self, languageCode=\"en\"): \"\"\"Getter", "to design space before comparing # it against the SourceDescriptor", "v[0] if not self.map: return v return piecewiseLinearMap(v, {v: k", "= styleMapStyleName \"\"\"string. StyleMap stylename for this instance. MutatorMath +", "if \"uservalue\" in element.attrib: xml_attrs = {'name', 'uservalue'} unknown_attrs =", "'map', 'axisOrdering', 'axisLabels') def __init__( self, *, tag=None, name=None, labelNames=None,", "axisLabels=self.axisLabels, ) def map_forward(self, v): \"\"\"Maps value from axis mapping's", "and dimName not in self.axisDefaults: # In case the document", "instanceElement.findall('stylename'): for key, lang in styleNameElement.items(): if key == XML_LANG:", "attribute, and the default writer will not use this attribute.", "if axesElement is not None and 'elidedfallbackname' in axesElement.attrib: self.documentObject.elidedFallbackName", "'Width')), 'optical': ('opsz', dict(en = 'Optical Size')), 'slant': ('slnt', dict(en", "complete user location for this instance. .. seealso:: :meth:`getFullDesignLocation` ..", "+ ( \": %r\" % self.obj if self.obj is not", "in the document name = instanceElement.attrib.get(\"name\") if name is not", "seealso:: :attr:`SourceDescriptor.font` \"\"\" self.name = name \"\"\"string. Unique identifier name", "self.locationLabels: List[LocationLabelDescriptor] = [] \"\"\"List of this document's STAT format", "False elif not cd['minimum'] <= value <= cd['maximum']: return False", ".. code:: python a1 = AxisDescriptor() a1.minimum = 1 a1.maximum", "<glyph name=\"b\"> <master location=\"location-token-bbb\" source=\"master-token-aaa2\"/> <master glyphname=\"b.alt1\" location=\"location-token-ccc\" source=\"master-token-aaa3\"/> <note>", "if self.userMinimum is not None or self.userMaximum is not None:", "[] for axis in self.axes: if isinstance(axis, DiscreteAxisDescriptor): discreteAxes.append(axis) else:", "this instance for _, glyphData in item.glyphs.items(): glyphData['instanceLocation'] = self.normalizeLocation(glyphData['instanceLocation'])", "rangeAxisSubsetDescriptorClass = RangeAxisSubsetDescriptor instanceDescriptorClass = InstanceDescriptor @classmethod def getAxisDecriptor(cls): return", "xValue %3.3f\", xValue) try: yValue = dimensionElement.attrib.get('yvalue') if yValue is", "readLib(self): \"\"\"Read the lib element for the whole document.\"\"\" for", "complete user location of this label, by combining data from", "string, readerClass=None, writerClass=None): self = cls(readerClass=readerClass, writerClass=writerClass) reader = self.readerClass.fromstring(string,", "elif path.startswith(r'\\\\'): # The above transformation loses leading slashes of", "None -- action: write as is. The filename attr should", "of this location's label. Keyed by ``xml:lang`` code. \"\"\" def", "v for k, v in self.map}) def map_backward(self, v): \"\"\"Maps", "noteElement in glyphElement.findall('.note'): glyphData['note'] = noteElement.text break designLocation, userLocation =", "Optional[float] = userDefault \"\"\"New default value of the axis in", "'name', 'elidable', 'olderSibling', 'linkedUserValue', 'labelNames') def __init__( self, *, name,", "\"\"\" designSpaceDocument - read and write designspace files \"\"\" __all__", "self.lib: Dict = {} \"\"\"User defined, custom data associated with", "return f\"{self.__class__.__name__}(\\n{attrs}\\n)\" class SourceDescriptor(SimpleDescriptor): \"\"\"Simple container for data related to", "a tuple of (major, minor). .. versionadded:: 5.0 \"\"\" if", "axis in self.documentObject.axes: if designLocation is not None and axis.name", "a possible \"good\" filename, in case one wants to save", "the designspace, as fonts at each value can have different", "self._attrs] attrs = indent('\\n'.join(attrs), ' ') return f\"{self.__class__.__name__}(\\n{attrs}\\n)\" class SourceDescriptor(SimpleDescriptor):", "Default ``None`` which means ``foreground``. \"\"\" self.familyName = familyName \"\"\"string.", "2 a2.axisLabels = [ AxisLabelDescriptor(name=\"Roman\", userValue=0, elidable=True) ] doc.addAxis(a2) ..", "self.formatVersion is None: return (5, 0) numbers = (int(i) for", "from the document's continuous axes. In the case of Designspace", "v in axisObject.values) axisElement.attrib['default'] = self.intOrFloat(axisObject.default) if axisObject.hidden: axisElement.attrib['hidden'] =", "Python attribute names are usually camelCased, the corresponding `XML <document-xml-structure>`_", "= designLocation glyphSources = None for masterElement in glyphElement.findall('.masters/master'): fontSourceName", "return piecewiseLinearMap(v, {v: k for k, v in self.map}) class", "location dicts. MutatorMath + Varlib. \"\"\" # names for UI", "self.ruleDescriptorClass() ruleName = ruleObject.name = ruleElement.attrib.get(\"name\") # read any stray", "languageCode, labelName in sorted(labelNames.items()): languageElement = ET.Element('labelname') languageElement.attrib[XML_LANG] = languageCode", "in self.axes } def map_backward(self, designLocation: AnisotropicLocationDict) -> SimpleLocationDict: \"\"\"Map", "lib key ``com.github.fonttools.varLib.featureVarsFeatureTag`` .. code:: xml <lib> <dict> <key>com.github.fonttools.varLib.featureVarsFeatureTag</key> <string>calt</string>", "designLocation else axis.default ) for axis in self.axes } def", "\"\"\"Single value of a discrete or continuous axis to use", "be muted in the instances. MutatorMath only. \"\"\" @property def", "if hasattr(path, \"__fspath__\"): # support os.PathLike objects path = path.__fspath__()", "doc: 'DesignSpaceDocument') -> Optional[LocationLabelDescriptor]: \"\"\"Get the :class:`LocationLabelDescriptor` instance that matches", "= [] \"\"\"List of this document's rules.\"\"\" self.rulesProcessingLast: bool =", "set. .. versionadded:: 5.0 \"\"\" return { axis.name: axis.map_forward(userLocation.get(axis.name, axis.default))", "A dictionary of localised family name strings, keyed by language", ".. versionadded:: 5.0 \"\"\" return self.localisedFamilyName.get(languageCode) def getFullDesignLocation(self, doc: 'DesignSpaceDocument')", "def _addLabelNames(self, parentElement, labelNames): for languageCode, labelName in sorted(labelNames.items()): languageElement", "with data and then adding them to the document. This", "DesignSpaceDocumentError(\"variable-font element must have a name attribute.\") filename = variableFontElement.get(\"filename\")", "with many axes, define subsets of that variable font that", "= dimensionName if type(dimensionValue) == tuple: dimElement.attrib['xvalue'] = self.intOrFloat(dimensionValue[0]) dimElement.attrib['yvalue']", "name is None: raise DesignSpaceDocumentError(\"variable-font element must have a name", "cds.append(cd) return cds def readAxes(self): # read the axes elements,", "the default location for that axis. Note: the output won't", "is not None: glyphElement.attrib['name'] = glyphName if data.get('note') is not", "ET.Element('lib') libElement.attrib['copy'] = \"1\" sourceElement.append(libElement) if sourceObject.copyGroups: groupsElement = ET.Element('groups')", "= designLocation layerName = sourceElement.attrib.get('layer') if layerName is not None:", "def getStyleMapStyleName(self, languageCode=\"en\"): return self.localisedStyleMapStyleName.get(languageCode) def setStyleMapFamilyName(self, styleMapFamilyName, languageCode=\"en\"): self.localisedStyleMapFamilyName[languageCode]", "= vf.name if vf.filename is not None: vfElement.attrib['filename'] = vf.filename", "(format 2).\"\"\" self.userMaximum: Optional[float] = userMaximum \"\"\"STAT field ``rangeMaxValue`` (format", "members as of version 5.0 if instanceObject.glyphs: if instanceElement.findall('.glyphs') ==", "instance def addAxis(self, axisDescriptor: Union[AxisDescriptor, DiscreteAxisDescriptor]): \"\"\"Add the given ``axisDescriptor``", "return variableFont def addLocationLabel(self, locationLabelDescriptor: LocationLabelDescriptor): \"\"\"Add the given ``locationLabelDescriptor``", "LocationLabelDescriptor): \"\"\"Add the given ``locationLabelDescriptor`` to :attr:`locationLabels`. .. versionadded:: 5.0", "variable fonts defined in this document, or implicit variable fonts", "- len(name)) else: tag = name[:4] return tag, dict(en=name) class", "float = userMinimum \"\"\"New minimum value of the axis in", "not None: xValue = float(xValue) except ValueError: self.log.warning(\"ValueError in readLocation", "def getSourceDescriptor(cls): return cls.sourceDescriptorClass() @classmethod def getInstanceDescriptor(cls): return cls.instanceDescriptorClass() @classmethod", "= instanceObject.styleMapFamilyName if instanceObject.styleMapStyleName is not None: instanceElement.attrib['stylemapstylename'] = instanceObject.styleMapStyleName", "sourceObject = self.sourceDescriptorClass() sourceObject.path = sourcePath # absolute path to", "self.filename = filename \"\"\"string. Relative path to the instance file,", "of this document's instances.\"\"\" self.lib: Dict = {} \"\"\"User defined,", "Use rules or sparse sources instead. \"\"\" self.kerning = kerning", "for special master definitions for glyphs. If glyphs need special", "axisObject.hidden: axisElement.attrib['hidden'] = \"1\" self.root.findall('.axes')[0].append(axisElement) def _addAxisLabel(self, axisElement: ET.Element, label:", "this axis, i.e. when a new location is created, this", "2, 3 <https://docs.microsoft.com/en-us/typography/opentype/spec/stat#axis-value-table-format-1>`_ The STAT format of the Axis value", "raise DesignSpaceDocumentError(\"variable-font element must have a name attribute.\") filename =", "+ Varlib. \"\"\" self.localisedFamilyName = localisedFamilyName or {} \"\"\"dict. A", "'Slant')), 'italic': ('ital', dict(en = 'Italic')), } if name.lower() in", "path always using forward slashes.\"\"\" def getter(self): # Normal getter", "Designspace. Use-cases: - From a single DesignSpace with discrete axes,", "compare(self, other): # test if this object contains the same", "is not None: sourcePath = os.path.abspath(os.path.join(os.path.dirname(self.path), filename)) else: sourcePath =", "Use reverse-DNS notation to identify your own data. Respect the", "infoElement in instanceElement.findall(\"info\"): self.readInfoElement(infoElement, instanceObject) for libElement in instanceElement.findall('lib'): self.readLibElement(libElement,", "in zip(discreteAxes, values) ] )) return variableFonts def deepcopyExceptFonts(self): \"\"\"Allow", "space coordinates. MutatorMath + Varlib. .. deprecated:: 5.0 Use the", "font=None, name=None, location=None, locationLabel=None, designLocation=None, userLocation=None, familyName=None, styleName=None, postScriptFontName=None, styleMapFamilyName=None,", "SourceDescriptor() s1.path = masterPath1 s1.name = \"master.ufo1\" s1.font = defcon.Font(\"master.ufo1\")", "if self.documentObject.lib: self._addLib(self.root, self.documentObject.lib, 2) tree = ET.ElementTree(self.root) tree.write( self.path,", "is not None and not force: continue if self.path is", "the contents of this field to the disk and make", "ruleElement.append(subElement) if len(ruleElement): self.root.findall('.rules')[0].append(ruleElement) def _addAxis(self, axisObject): axisElement = ET.Element('axis')", "'weight': ('wght', dict(en = 'Weight')), 'width': ('wdth', dict(en = 'Width')),", "in sourceElement.findall(\".kerning\"): if kerningElement.attrib.get('mute') == '1': sourceObject.muteKerning = True self.documentObject.sources.append(sourceObject)", "- Each conditionset is a list of conditions. - Each", "the corresponding `XML <document-xml-structure>`_ attributes are usually all lowercase. ..", "name=None, location=None, locationLabel=None, designLocation=None, userLocation=None, familyName=None, styleName=None, postScriptFontName=None, styleMapFamilyName=None, styleMapStyleName=None,", "axis.name: self.userLocation.get(axis.name, axis.default) for axis in doc.axes } class VariableFontDescriptor(SimpleDescriptor):", "only have design locations (using xvalue=\"\").') sourceObject.location = designLocation layerName", "self.copyFeatures = copyFeatures \"\"\"bool. Indicates if the feature text needs", "{} for glyphElement in instanceElement.findall('.glyphs/glyph'): self.readGlyphElement(glyphElement, instanceObject) for infoElement in", "\"\"\"User-facing translations of this location's label. Keyed by xml:lang code.", "value for filename, it should be fine case 4. descriptor.filename", "the given ``kwargs`` and add it to :attr:`rules`. \"\"\" rule", "'.join(unknown_attrs)}\") name = element.get(\"name\") if name is None: raise DesignSpaceDocumentError(\"label", "a callable which initializes a new font object (e.g. TTFont,", "continue if glyphElement.attrib.get('mute') == '1': sourceObject.mutedGlyphNames.append(glyphName) for kerningElement in sourceElement.findall(\".kerning\"):", "= copy.deepcopy(self) for source, font in zip(res.sources, fonts): res.font =", "RuleDescriptor sourceDescriptorClass = SourceDescriptor variableFontDescriptorClass = VariableFontDescriptor valueAxisSubsetDescriptorClass = ValueAxisSubsetDescriptor", "this instance needs the interpolating font.info calculated. .. deprecated:: 5.0", "read any stray conditions outside a condition set externalConditions =", "them in a new conditionset.\" ) # read the conditionsets", "self.path is not None: descriptor.filename = self._posixRelativePath(descriptor.path) if instances: for", "None sourceName = sourceElement.attrib.get('name') if sourceName is None: # add", "``kwargs`` and add it to :attr:`axes`. The axis will be", "or none at all.\" ) def readSources(self): for sourceCount, sourceElement", "scale the map first newMap = [] for inputValue, outputValue", "\"\"\"New default value of the axis in the target variable", "element.get(\"userdefault\") userMaximum = element.get(\"usermaximum\") if userMinimum is not None and", ".. deprecated:: 5.0 Use the more explicit alias for this", "5.0 \"\"\" fonts = [source.font for source in self.sources] try:", "return False elif cd.get('maximum') is None: if cd['minimum'] > value:", "{'name', 'uservalue'} unknown_attrs = set(element.attrib) - xml_attrs if unknown_attrs: raise", "for this axis, i.e. when a new location is created,", "self.getLocationLabelDescriptor(doc) if label is not None: return doc.map_forward(label.userLocation) # type:", "if descriptor.path is not None: # case 3 and 4:", "of OpenType's STAT data for a free-floating location (format 4).", "xml <lib> <dict> <key>com.github.fonttools.varLib.featureVarsFeatureTag</key> <string>calt</string> </dict> </lib> \"\"\" self.sources: List[SourceDescriptor]", "valueAxisSubsetDescriptorClass = ValueAxisSubsetDescriptor rangeAxisSubsetDescriptorClass = RangeAxisSubsetDescriptor instanceDescriptorClass = InstanceDescriptor @classmethod", "documents, there can be as many variable fonts as there", "familyNameElement.items(): if key == XML_LANG: familyName = familyNameElement.text instanceObject.setFamilyName(familyName, lang)", "less important than in continuous axes: - it doesn't define", "\"{glyphName}\" must only have design locations (using xvalue=\"\").') if designLocation", "This makes it easy to integrate this object in different", "default location for that axis. Note: the output won't be", "\"\"\" self.olderSibling: bool = olderSibling \"\"\"STAT flag ``OLDER_SIBLING_FONT_ATTRIBUTE``. See: `OTSpec", ".. deprecated:: 5.0 \"\"\" self.muteKerning = muteKerning \"\"\"bool. Indicates if", "such label exists. .. versionadded:: 5.0 \"\"\" return next( (label", "self.designLocation: result[axis.name] = self.designLocation[axis.name] else: result[axis.name] = axis.map_forward(axis.default) return result", "all.\" ) def readSources(self): for sourceCount, sourceElement in enumerate(self.root.findall(\".sources/source\")): filename", "self.axisDefaults[axisObject.name] = axisObject.default def readAxisLabel(self, element: ET.Element): xml_attrs = {'userminimum',", "the reference glyph set for the designspace, as fonts at", "documentObject self.effectiveFormatTuple = self._getEffectiveFormatTuple() self.root = ET.Element(\"designspace\") def write(self, pretty=True,", "a set of glyph substitutions to trigger conditionally in some", "object loaded from the path. **kwargs: extra options passed on", "except ValueError: self.log.warning(\"ValueError in readLocation userValue %3.3f\", userValue) try: xValue", "axis.map_forward(axis.default) return result def getFullUserLocation(self, doc: 'DesignSpaceDocument') -> SimpleLocationDict: \"\"\"Get", "document, or a sufficiently recent version to be able to", "if glyphName is not None: glyphElement.attrib['name'] = glyphName if data.get('note')", "localized names for all instances. .. versionadded:: 5.0 \"\"\" self.copyLib", "designLocation is not None else (location or {}) \"\"\"dict. Axis", "to :attr:`rules`.\"\"\" self.rules.append(ruleDescriptor) def addRuleDescriptor(self, **kwargs): \"\"\"Instantiate a new :class:`RuleDescriptor`", "axisElement.attrib['values'] = \" \".join(self.intOrFloat(v) for v in axisObject.values) axisElement.attrib['default'] =", "language code. \"\"\" self.localisedStyleMapStyleName = localisedStyleMapStyleName or {} \"\"\"A dictionary", "makeKerning=True, makeInfo=True): filename = instanceElement.attrib.get('filename') if filename is not None", "cond.get('minimum') is not None: minimum = self.normalizeLocation({cond['name']: cond['minimum']}).get(cond['name']) else: minimum", "= ValueAxisSubsetDescriptor rangeAxisSubsetDescriptorClass = RangeAxisSubsetDescriptor instanceDescriptorClass = InstanceDescriptor def __init__(self,", "in self.sources: if sourceDescriptor.getFullDesignLocation(self) == defaultDesignLocation: self.default = sourceDescriptor return", "= ET.Element(\"axes\") if self.documentObject.elidedFallbackName is not None: axesElement.attrib['elidedfallbackname'] = self.documentObject.elidedFallbackName", "projects, you probably want True. See the following issues for", "ValueError: raise DesignSpaceDocumentError(\"unicode values %s are not integers\" % unicodes)", "Writer objects can be subclassed as well. **Note:** Python attribute", "False elif encoding is None or encoding == \"utf-8\": f", "self.root.findall('.sources')[0].append(sourceElement) def _addVariableFont(self, parentElement: ET.Element, vf: VariableFontDescriptor) -> None: vfElement", "If you want to use a different feature altogether, e.g.", "document without deep-copying attached UFO fonts or TTFont objects. The", "processingValue not in {\"first\", \"last\"}: raise DesignSpaceDocumentError( \"<rules> processing attribute", "no 'path' attribute\" % (source.name or \"<Unknown>\") ) source.font =", "\"en\": continue localisedStyleMapFamilyNameElement = ET.Element('stylemapfamilyname') localisedStyleMapFamilyNameElement.attrib[XML_LANG] = code localisedStyleMapFamilyNameElement.text =", "the locations of all masters and instances to the -1", "of attributes\"\"\" # XXX this is ugly. The 'print' is", "hasattr(v, \"asdict\") else v for v in value ] d[attr]", "= None \"\"\"Format version for this document, as a string.", "= False \"\"\"This flag indicates whether the substitution rules should", "return self.localisedFamilyName.get(languageCode) def setStyleMapStyleName(self, styleMapStyleName, languageCode=\"en\"): self.localisedStyleMapStyleName[languageCode] = tostr(styleMapStyleName) def", "\"\"\" self.default = default \"\"\"number. The default value for this", "): if minVersion < (5, 0): minVersion = (5, 0)", "userLocation), None ) def updateFilenameFromPath(self, masters=True, instances=True, force=False): \"\"\"Set a", "\"neutral\" version of outlines from which deltas would apply, as", "\"\"\"Determine which format of STAT Axis value to use to", "implicit variable fonts that can be built from the document's", "source in self.sources] try: for source in self.sources: source.font =", "a new instanceDescriptor.\"\"\" return self.writerClass.getInstanceDescriptor() def getAxisOrder(self): \"\"\"Return a list", "not None: conditionElement.attrib['maximum'] = self.intOrFloat(cond.get('maximum')) conditionsetElement.append(conditionElement) if len(conditionsetElement): ruleElement.append(conditionsetElement) for", "do the scaling, so we do those last. \"\"\" #", "dict while writing it out, as preserved below. if instanceObject.location", "`fontTools#2050 <https://github.com/fonttools/fonttools/issues/2050#issuecomment-678691020>`__ If you want to use a different feature", "as they are part of the `Required Variation Alternates OpenType", "axis mapping's input (user) to output (design).\"\"\" from fontTools.varLib.models import", "DesignSpaceDocument doc = DesignSpaceDocument.fromfile(\"some/path/to/my.designspace\") doc.formatVersion doc.elidedFallbackName doc.axes doc.locationLabels doc.rules doc.rulesProcessingLast", "5, the whole document was implicitly describing a variable font", "shared by reference between the original and the copy. ..", "def readSources(self): for sourceCount, sourceElement in enumerate(self.root.findall(\".sources/source\")): filename = sourceElement.attrib.get('filename')", "unknown_attrs: raise DesignSpaceDocumentError(f\"label element contains unknown attributes: {', '.join(unknown_attrs)}\") name", "assume the same default value as the full axis. (default", "save we need to identify and respond to the following", "= list(sourceObject.localisedFamilyName.keys()) languageCodes.sort() for code in languageCodes: if code ==", "code in languageCodes: if code == \"en\": continue localisedStyleMapFamilyNameElement =", "name = element.get(\"name\") if name is None: raise DesignSpaceDocumentError(\"axis-subset element", "representation of this source font that is loaded in memory,", "asdict(self): d = {} for attr, value in self.__dict__.items(): if", "not None: axesElement = ET.Element(\"axes\") if self.documentObject.elidedFallbackName is not None:", "hasattr(path, \"__fspath__\"): # support os.PathLike objects path = path.__fspath__() self.path", "opener function. Returns: List of font objects in the order", "parentElement: ET.Element, data: Any, indent_level: int) -> None: if not", "sourceElement.findall(\".features\"): if featuresElement.attrib.get('copy') == '1': sourceObject.copyFeatures = True for glyphElement", "'userMinimum', 'userDefault', 'userMaximum') def __init__(self, *, name, userMinimum=-math.inf, userDefault=None, userMaximum=math.inf):", "designLoc[dimName] = (xValue, yValue) elif xValue is not None: designLoc[dimName]", "localisedFamilyNameElement.text = instanceObject.getFamilyName(code) instanceElement.append(localisedFamilyNameElement) if instanceObject.localisedStyleMapStyleName: languageCodes = list(instanceObject.localisedStyleMapStyleName.keys()) languageCodes.sort()", "which is useful for the STAT table, however it can't", "for c in rule.conditionSets) def evaluateConditions(conditions, location): \"\"\"Return True if", "=========== =============== \"\"\" if self.linkedUserValue is not None: return 3", "_addInstance(self, instanceObject): instanceElement = ET.Element('instance') if instanceObject.name is not None:", "glyphname, use the one we have masterGlyphName = glyphName d", "else: result[axis.name] = axis.map_forward(axis.default) return result class RuleDescriptor(SimpleDescriptor): \"\"\"Represents the", "__init__(self, msg, obj=None): self.msg = msg self.obj = obj def", "str] = labelNames or {} \"\"\"User-facing translations of this location's", "``foreground``. \"\"\" self.familyName = familyName \"\"\"string. Family name of this", "None: raise DesignSpaceDocumentError(\"variable-font element must contain an axis-subsets element.\") axisSubsets", "\"\"\" self.familyName = familyName \"\"\"string. Family name of this source.", "must only have user locations (using uservalue=\"\").') elidable = True", "not None: if sourceObject.name.find(\"temp_master\") != 0: # do not save", "else: # Pre-version 5.0 code was validating and filling in", "if libElement is not None: lib = plistlib.fromtree(libElement[0]) variableFont =", "io import BytesIO, StringIO from textwrap import indent from typing", ".. versionadded:: 5.0 \"\"\" for label in self.locationLabels: if label.name", "= None if axisName is None: self.designLocation = {} self.userLocation", "descriptor.filename is not None and not force: continue if self.path", "__future__ import annotations import collections import copy import itertools import", "makeGlyphs=makeGlyphs, makeKerning=makeKerning, makeInfo=makeInfo) def _readSingleInstanceElement(self, instanceElement, makeGlyphs=True, makeKerning=True, makeInfo=True): filename", "are assumed to be the default. See :meth:`SourceDescriptor.getFullDesignLocation()` \"\"\" self.default", "glyphs=None, kerning=True, info=True, lib=None, ): self.filename = filename \"\"\"string. Relative", "dict(en = 'Width')), 'optical': ('opsz', dict(en = 'Optical Size')), 'slant':", "self.intOrFloat(dimensionValue[0]) dimElement.attrib['yvalue'] = self.intOrFloat(dimensionValue[1]) else: dimElement.attrib['xvalue'] = self.intOrFloat(dimensionValue) locElement.append(dimElement) return", "\"\" for label_name in labelElement.findall(\"labelname\") for attr, lang in label_name.items()", "for 'xml:lang' XML_NS = \"{http://www.w3.org/XML/1998/namespace}\" XML_LANG = XML_NS + \"lang\"", "(major, minor). .. versionadded:: 5.0 \"\"\" if self.formatVersion is None:", "design location along this axis, possibly anisotropic. - ``userLocation[axisName]``: the", "doc.elidedFallbackName doc.axes doc.locationLabels doc.rules doc.rulesProcessingLast doc.sources doc.variableFonts doc.instances doc.lib \"\"\"", "mapElement in axisElement.findall('map'): a = float(mapElement.attrib['input']) b = float(mapElement.attrib['output']) axisObject.map.append((a,", "\"\"\"Get the complete user location of this label, by combining", "``rangeMinValue`` (format 2).\"\"\" self.userValue: float = userValue \"\"\"STAT field ``value``", "self.tag = tag \"\"\"string. Four letter tag for this axis.", "is None: continue if glyphElement.attrib.get('mute') == '1': sourceObject.mutedGlyphNames.append(glyphName) for kerningElement", "- xml_attrs if unknown_attrs: raise DesignSpaceDocumentError(f\"axis-subset element contains unknown attributes:", "to be correct and update filename. \"\"\" assert self.path is", "Varlib. \"\"\" self.styleName = styleName \"\"\"string. Style name of this", "(to record the results of executed rules for example). MutatorMath.", "DesignSpaceDocumentError(\"Glyph object without name attribute\") mute = glyphElement.attrib.get(\"mute\") if mute", "data from the various location fields, default axis values and", "only exists in memory, the producing script can write here", "if key == XML_LANG: styleMapStyleName = styleMapStyleNameElement.text instanceObject.setStyleMapStyleName(styleMapStyleName, lang) for", "return self @classmethod def fromstring(cls, string, readerClass=None, writerClass=None): self =", "important than in continuous axes: - it doesn't define the", "discrete axis has a list of ``values``. Example: an Italic", "vf.filename if vf.axisSubsets: subsetsElement = ET.Element('axis-subsets') for subset in vf.axisSubsets:", "instanceObject.locationLabel is not None: instanceElement.attrib['location'] = instanceObject.locationLabel if instanceObject.familyName is", "None: raise DesignSpaceDocumentError(\"label element must have a uservalue attribute.\") value", "designLocation=None, userLocation=None, familyName=None, styleName=None, postScriptFontName=None, styleMapFamilyName=None, styleMapStyleName=None, localisedFamilyName=None, localisedStyleName=None, localisedStyleMapFamilyName=None,", "`{self.locationLabel}` in instance `{self.name}`.' ) return label def getFullDesignLocation(self, doc:", "= \"{http://www.w3.org/XML/1998/namespace}\" XML_LANG = XML_NS + \"lang\" def posix(path): \"\"\"Normalize", "None: value = posix(value) setattr(self, private_name, value) return property(getter, setter)", "\"\"\" self.formatVersion: Optional[str] = None \"\"\"Format version for this document,", "new font object loaded from the path. **kwargs: extra options", "is not None: locationElement, instanceObject.location = self._makeLocationElement(instanceObject.location) instanceElement.append(locationElement) if instanceObject.filename", "name \"\"\"string. Name of the axis as it is used", "raise DesignSpaceDocumentError(f'<glyph> element \"{glyphName}\" must only have design locations (using", "fonts with the same source.path only once loaded = {}", "instance file, calculated from the document path and the string", "tree.write( self.path, encoding=encoding, method='xml', xml_declaration=xml_declaration, pretty_print=pretty, ) def _getEffectiveFormatTuple(self): \"\"\"Try", "instanceObject.postScriptFontName = postScriptFontName styleMapFamilyName = instanceElement.attrib.get('stylemapfamilyname') if styleMapFamilyName is not", "languageCode=\"en\"): return self.localisedStyleMapStyleName.get(languageCode) def setStyleMapFamilyName(self, styleMapFamilyName, languageCode=\"en\"): self.localisedStyleMapFamilyName[languageCode] = tostr(styleMapFamilyName)", "not None: return self.rangeAxisSubsetDescriptorClass( name=name, userMinimum=float(userMinimum), userDefault=float(userDefault), userMaximum=float(userMaximum), ) if", "not None: masterElement.attrib['source'] = m.get('font') if m.get('location') is not None:", "labels. The source of truth for this instance's location is", "for featuresElement in sourceElement.findall(\".features\"): if featuresElement.attrib.get('copy') == '1': sourceObject.copyFeatures =", "libElement.attrib['copy'] = \"1\" sourceElement.append(libElement) if sourceObject.copyGroups: groupsElement = ET.Element('groups') groupsElement.attrib['copy']", "newNames = [] return glyphNames AnisotropicLocationDict = Dict[str, Union[float, Tuple[float,", "for axis in self.documentObject.axes: if designLocation is not None and", "locations of all masters and instances to the -1 -", "location) for c in rule.conditionSets) def evaluateConditions(conditions, location): \"\"\"Return True", "conditions: if cond.get('minimum') is None and cond.get('maximum') is None: #", "vf.name if vf.filename is not None: vfElement.attrib['filename'] = vf.filename if", "indication of a possible \"good\" filename, in case one wants", "such variable font, and a lot of data duplication. -", "the instance file, **as it is in the document**. The", "field ``elidedFallbackNameID``. See: `OTSpec STAT Style Attributes Header <https://docs.microsoft.com/en-us/typography/opentype/spec/stat#style-attributes-header>`_ ..", "\"InstanceStyleName\" i2.name = \"instance.ufo2\" # anisotropic location i2.designLocation = dict(weight=500,", "'InstanceDescriptor', 'AxisDescriptor', 'RuleDescriptor', 'BaseDocReader', 'BaseDocWriter' ] # ElementTree allows to", "= dict(fr=\"Caractère\") s1.mutedGlyphNames.append(\"A\") s1.mutedGlyphNames.append(\"Z\") doc.addSource(s1) \"\"\" flavor = \"source\" _attrs", "not a standard axis, self.labelNames = labelNames or {} \"\"\"dict.", "labelName parentElement.append(languageElement) def _addLocationLabel(self, parentElement: ET.Element, label: LocationLabelDescriptor) -> None:", "Read the info element.\"\"\" instanceObject.info = True def readGlyphElement(self, glyphElement,", "in self.root.findall(\".labels/label\"): unknown_attrs = set(labelElement.attrib) - xml_attrs if unknown_attrs: raise", "instanceObject.localisedStyleMapFamilyName: languageCodes = list(instanceObject.localisedStyleMapFamilyName.keys()) languageCodes.sort() for code in languageCodes: if", "userMaximum=maximum, elidable=elidable, olderSibling=olderSibling, linkedUserValue=linkedValue, labelNames=labelNames, ) def readLabels(self): if self.documentObject.formatTuple", "name for this source. MutatorMath + Varlib. \"\"\" self.designLocation =", "attribute. It is up to the user of ``designspaceLib`` to", "must have a name attribute.\") designLocation, userLocation = self.locationFromElement(labelElement) if", "identifier name of the instance, used to identify it if", "``userLocation[axisName]``: the explicit user location along this axis. No anisotropy.", "styleMapFamilyName is not None: instanceObject.styleMapFamilyName = styleMapFamilyName styleMapStyleName = instanceElement.attrib.get('stylemapstylename')", "source sourceObject.filename = filename # path as it is stored", "for the file. \"\"\" self.axisSubsets: List[Union[RangeAxisSubsetDescriptor, ValueAxisSubsetDescriptor]] = axisSubsets or", "parentElement, ruleName=None): cds = [] for conditionElement in parentElement.findall('.condition'): cd", "newSourceDescriptor(self): \"\"\"Ask the writer class to make us a new", "axisElement: ET.Element, label: AxisLabelDescriptor) -> None: labelElement = ET.Element('label') labelElement.attrib['uservalue']", "'uservalue'} unknown_attrs = set(element.attrib) - xml_attrs if unknown_attrs: raise DesignSpaceDocumentError(f\"axis-subset", "\"\"\"Map a design location to a user location. Assume that", "us a new axisDescriptor.\"\"\" return self.writerClass.getAxisDecriptor() def newSourceDescriptor(self): \"\"\"Ask the", "== XML_LANG # Note: elementtree reads the \"xml:lang\" attribute name", "here def __init__(self, *, name=None, conditionSets=None, subs=None): self.name = name", "self.maximum = maximum \"\"\"number. The maximum value for this axis", "\"\"\"Clear all location-related fields. Ensures that :attr:``designLocation`` and :attr:``userLocation`` are", "minimum value of the axis in the target variable font.", "have it right here. Varlib. \"\"\" self.styleName = styleName \"\"\"string.", "and add it to :attr:`variableFonts`. .. versionadded:: 5.0 \"\"\" variableFont", "= self._posixRelativePath(descriptor.path) def newAxisDescriptor(self): \"\"\"Ask the writer class to make", "5.0 \"\"\" flavor = \"axis-subset\" _attrs = ('name', 'userMinimum', 'userDefault',", ") self.documentObject.variableFonts.append(variableFont) def readAxisSubset(self, element: ET.Element): if \"uservalue\" in element.attrib:", "self.name: str = name \"\"\"Name of the :class:`AxisDescriptor` or :class:`DiscreteAxisDescriptor`", "\"1\": glyphData['mute'] = True # unicode unicodes = glyphElement.attrib.get('unicode') if", "('opsz', dict(en = 'Optical Size')), 'slant': ('slnt', dict(en = 'Slant')),", "part of the `Required Variation Alternates OpenType feature <https://docs.microsoft.com/en-us/typography/opentype/spec/features_pt#-tag-rvrn>`_. See", "None and self.path is not None: sourcePath = os.path.abspath(os.path.join(os.path.dirname(self.path), filename))", "variableFontDescriptorClass = VariableFontDescriptor valueAxisSubsetDescriptorClass = ValueAxisSubsetDescriptor rangeAxisSubsetDescriptorClass = RangeAxisSubsetDescriptor instanceDescriptorClass", "axisDescriptor.map_forward( axisDescriptor.default ) return loc def labelForUserLocation(self, userLocation: SimpleLocationDict) ->", "for source in self.sources] try: for source in self.sources: source.font", "i in self.formatVersion.split(\".\")) major = next(numbers) minor = next(numbers, 0)", "if hasattr(v, \"asdict\") else v for v in value ]", "instanceElements = self.root.findall('.instances/instance') for instanceElement in instanceElements: self._readSingleInstanceElement(instanceElement, makeGlyphs=makeGlyphs, makeKerning=makeKerning,", "if instanceObject.styleMapStyleName is not None: instanceElement.attrib['stylemapstylename'] = instanceObject.styleMapStyleName if self.effectiveFormatTuple", "> minimum. \"\"\" for cd in conditions: value = location[cd['name']]", "\"\"\"Get the complete design location of this source, from its", "is added to the document by creating such descriptor objects,", "value must have its mapping entry, if you intend that", "%3.3f\", userValue) try: xValue = dimensionElement.attrib.get('xvalue') if xValue is not", "Axis values for this source, in design space coordinates. MutatorMath", "to not specify some of the axis values, and they", "continue value = location[axis.name] # 'anisotropic' location, take first coord", "None \"\"\"Name of the default master. This attribute is updated", "map_forward(self, value): \"\"\"Maps value from axis mapping's input to output.", "used. .. versionadded:: 5.0 \"\"\" return { axis.name: ( axis.map_backward(designLocation[axis.name])", "``None`` which means ``foreground``. \"\"\" self.familyName = familyName \"\"\"string. Family", "to update a single axis location, the user should only", "masters (to record the results of executed rules for example).", "designspace file from ``path`` and return a new instance of", "ET.Element('info') if sourceObject.copyInfo: infoElement.attrib['copy'] = \"1\" if sourceObject.muteInfo: infoElement.attrib['mute'] =", "designLocation: dimElement = ET.Element('dimension') dimElement.attrib['name'] = axis.name value = designLocation[axis.name]", "self._writeGlyphElement(instanceElement, instanceObject, glyphName, data) glyphsElement.append(glyphElement) if instanceObject.kerning: kerningElement = ET.Element('kerning')", "= self.readerClass.fromstring(string, self) reader.read() if self.sources: self.findDefault() return self def", "subsetElement.attrib['name'] = subset.name if isinstance(subset, RangeAxisSubsetDescriptor): if subset.userMinimum != -math.inf:", "= self.getLocationLabelDescriptor(doc) if label is not None: return doc.map_forward(label.userLocation) #", "a new :class:`InstanceDescriptor` using the given ``kwargs`` and add it", "self.effectiveFormatTuple) if self.documentObject.axes or self.documentObject.elidedFallbackName is not None: axesElement =", "'userminimum', 'userdefault', 'usermaximum'} unknown_attrs = set(element.attrib) - xml_attrs if unknown_attrs:", "os.path.dirname(self.path)) return posix(relative) def updatePaths(self): \"\"\" Right before we save", "(major, minor) def getVariableFonts(self) -> List[VariableFontDescriptor]: \"\"\"Return all variable fonts", "are dictionaries (possibly empty if clearing everything). In order to", "at all.\" ) def readSources(self): for sourceCount, sourceElement in enumerate(self.root.findall(\".sources/source\")):", "by xml:lang code. Values are required to be ``unicode`` strings,", "self.lib: MutableMapping[str, Any] = lib or {} \"\"\"Custom data associated", "@property def location(self): \"\"\"dict. Axis values for this instance. MutatorMath", "( any( isinstance(axis, DiscreteAxisDescriptor) or axis.axisOrdering is not None or", "SourceDescriptor.font attribute. If the font attribute is already not None,", "glyphSources is None: glyphSources = [] glyphSources.append(d) if glyphSources is", "self._posixRelativePath(descriptor.path) def addSource(self, sourceDescriptor: SourceDescriptor): \"\"\"Add the given ``sourceDescriptor`` to", "the source to look for outline data. Default ``None`` which", "axis.name in self.designLocation: result[axis.name] = self.designLocation[axis.name] elif axis.name in self.userLocation:", "values, do not add the rule. ruleElement = ET.Element('rule') if", "versionadded:: 5.0\"\"\" self.instances: List[InstanceDescriptor] = [] \"\"\"List of this document's", "to \"snapshot\" or \"freeze\". \"\"\" self.userValue: float = userValue \"\"\"Value", "\"\"\" flavor = \"label\" _attrs = ('name', 'elidable', 'olderSibling', 'userLocation',", "applied first, before other text shaping/OpenType layout, as they are", "this is ugly. The 'print' is inappropriate here, and instead", "{} def setStyleName(self, styleName, languageCode=\"en\"): \"\"\"These methods give easier access", "glyphnames, e.g. (\"a\", \"a.alt\"). - Note: By default, rules are", "name is not None: locElement.attrib['name'] = name validatedLocation = self.documentObject.newDefaultLocation()", "basic **descriptor** objects that store the data in attributes. Data", "sourceObject.muteKerning: kerningElement = ET.Element(\"kerning\") kerningElement.attrib[\"mute\"] = '1' sourceElement.append(kerningElement) if sourceObject.mutedGlyphNames:", "have its mapping entry, if you intend that value to", "sourceElement.findall(\".kerning\"): if kerningElement.attrib.get('mute') == '1': sourceObject.muteKerning = True self.documentObject.sources.append(sourceObject) def", "document's axis defaults. .. versionadded:: 5.0 \"\"\" result: AnisotropicLocationDict =", "is None: self.userLocation = {} if axisName in self.userLocation: del", "return instance def addAxis(self, axisDescriptor: Union[AxisDescriptor, DiscreteAxisDescriptor]): \"\"\"Add the given", "getattr(other, attr)) def __repr__(self): attrs = [f\"{a}={repr(getattr(self, a))},\" for a", "newMap = [] for inputValue, outputValue in axis.map: newOutputValue =", "the filename property is empty. VarLib. \"\"\" self.filename: str =", "update a single axis location, the user should only clear", "interpolation. </note> </glyph> \"\"\" glyphData = {} glyphName = glyphElement.attrib.get('name')", "of localised family name strings, keyed by language code. \"\"\"", "unknown_attrs = set(variableFontElement.attrib) - xml_attrs if unknown_attrs: raise DesignSpaceDocumentError(f\"variable-font element", "\"\"\"bool. Indicates if the groups need to be copied to", "is not None: return self.rangeAxisSubsetDescriptorClass( name=name, userMinimum=float(userMinimum), userDefault=float(userDefault), userMaximum=float(userMaximum), )", "created, this is the value this axis will get in", "self.userDefault: Optional[float] = userDefault \"\"\"New default value of the axis", "a condition set externalConditions = self._readConditionElements( ruleElement, ruleName, ) if", "\"\"\"Read a nested ``<location>`` element inside the given ``element``. ..", "cd.get('maximum') is None: if cd['minimum'] > value: return False elif", "``userLocation``, or ``None`` if no such label exists. .. versionadded::", "output won't be anisotropic, only the xvalue is set. ..", "this default value is less important than in continuous axes:", "load UFO sources using defcon: designspace = DesignSpaceDocument.fromfile(\"path/to/my.designspace\") designspace.loadSourceFonts(defcon.Font) Or", "self.kerning = kerning \"\"\" bool. Indicates if this instance needs", "if all(v is None for v in (userMinimum, userDefault, userMaximum)):", "same minimum value as the full axis. (default = ``-math.inf``)", "len(ruleElement): self.root.findall('.rules')[0].append(ruleElement) def _addAxis(self, axisObject): axisElement = ET.Element('axis') axisElement.attrib['tag'] =", "instanceElement.attrib['location'] = instanceObject.locationLabel if instanceObject.familyName is not None: instanceElement.attrib['familyname'] =", "in axisSubsetsElement.iterfind(\".axis-subset\"): axisSubsets.append(self.readAxisSubset(axisSubset)) lib = None libElement = variableFontElement.find(\".lib\") if", "assumed to be the default. See :meth:`SourceDescriptor.getFullDesignLocation()` \"\"\" self.default =", "= \"axis\" _attrs = ('tag', 'name', 'values', 'default', 'map', 'axisOrdering',", "loaded[source.path] else: if source.path is None: raise DesignSpaceDocumentError( \"Designspace source", "``maximum``, while a discrete axis has a list of ``values``.", ":attr:`font` attribute is shared by reference between the original and", "{ lang: label_name.text or \"\" for label_name in element.findall(\"labelname\") for", "the same data as the other for attr in self._attrs:", "minimum=\"100\" name=\"width\"/> <condition minimum=\"10\" maximum=\"40\" name=\"optical\"/> </conditionset> <sub name=\"cent\" with=\"cent.alt\"/>", "order as defined in the document.\"\"\" names = [] for", "it. \"\"\" if masters: for descriptor in self.sources: if descriptor.filename", "if filename is not None and self.documentObject.path is not None:", "= None ): locElement = ET.Element(\"location\") for axis in self.documentObject.axes:", "= float(yValue) except ValueError: self.log.warning(\"ValueError in readLocation yValue %3.3f\", yValue)", "str) -> Optional[LocationLabelDescriptor]: \"\"\"Return the top-level location label with the", "attributes are loaded, and return list of fonts. Takes a", "to work with different objects, as long as they have", "will allow these to be None, assume axis.maximum cd['maximum'] =", "``maximum`` keys. \"\"\" # list of substitutions stored as tuples", "normalized axis values.\"\"\" from fontTools.varLib.models import normalizeValue new = {}", "% self.obj if self.obj is not None else \"\") class", "the location # dict while writing it out, as preserved", "filename=filename, axisSubsets=axisSubsets, lib=lib, ) self.documentObject.variableFonts.append(variableFont) def readAxisSubset(self, element: ET.Element): if", "masters for this instance for _, glyphData in item.glyphs.items(): glyphData['instanceLocation']", "glyph substitution features. - False: before - True: after. Default", ".. versionadded:: 5.0 \"\"\" return { axis.name: self.userLocation.get(axis.name, axis.default) for", "if attr.startswith(\"_\"): continue if hasattr(value, \"asdict\"): value = value.asdict() elif", "= sub[1] ruleElement.append(subElement) if len(ruleElement): self.root.findall('.rules')[0].append(ruleElement) def _addAxis(self, axisObject): axisElement", "def addSourceDescriptor(self, **kwargs): \"\"\"Instantiate a new :class:`SourceDescriptor` using the given", "which initializes a new font object (e.g. TTFont, or defcon.Font,", "reference glyph set for the designspace, as fonts at each", "\"\"\"Set a descriptor filename attr from the path and this", "styleMapStyleName is not None: instanceObject.styleMapStyleName = styleMapStyleName # read localised", "if label.linkedUserValue is not None: labelElement.attrib['linkeduservalue'] = self.intOrFloat(label.linkedUserValue) self._addLabelNames(labelElement, label.labelNames)", "self.path = path \"\"\"string. Absolute path to the instance file,", "instanceObject): \"\"\" Read the info element.\"\"\" instanceObject.info = True def", "descriptor objects, filling them with data and then adding them", "for axis in self.axes } def map_backward(self, designLocation: AnisotropicLocationDict) ->", "res = copy.deepcopy(self) for source, font in zip(res.sources, fonts): res.font", "*, name, userValue, userMinimum=None, userMaximum=None, elidable=False, olderSibling=False, linkedUserValue=None, labelNames=None, ):", "self.addSource(source) return source def addInstance(self, instanceDescriptor: InstanceDescriptor): \"\"\"Add the given", "%r, \" \"expected 'first' or 'last'\" % processingValue) self.documentObject.rulesProcessingLast =", "assume axis.maximum cd['maximum'] = None cd['name'] = conditionElement.attrib.get(\"name\") # #", "= instanceElement.attrib.get('postscriptfontname') if postScriptFontName is not None: instanceObject.postScriptFontName = postScriptFontName", "the given ``kwargs`` and add it to :attr:`variableFonts`. .. versionadded::", "from ``path`` and populates the fields of ``self`` with the", "\"\"\"Add the given ``sourceDescriptor`` to ``doc.sources``.\"\"\" self.sources.append(sourceDescriptor) def addSourceDescriptor(self, **kwargs):", "to the document. Can't guess why they're different, we just", "to use in a variable font. .. versionadded:: 5.0 \"\"\"", "doc.axes } class VariableFontDescriptor(SimpleDescriptor): \"\"\"Container for variable fonts, sub-spaces of", "while writing it out, as preserved below. if instanceObject.location is", "raise DesignSpaceDocumentError( \"<rules> processing attribute value is not valid: %r,", "4 labels. .. versionadded:: 5.0\"\"\" self.rules: List[RuleDescriptor] = [] \"\"\"List", "part of the full design location. See :meth:`getFullDesignLocation()` .. versionadded::", "for familyNameElement in instanceElement.findall('familyname'): for key, lang in familyNameElement.items(): if", "names[name.lower()] if len(name) < 4: tag = name + \"*\"", "used to reference this rule data.\"\"\" # list of lists", "(default = ``-math.inf``) \"\"\" self.userDefault: Optional[float] = userDefault \"\"\"New default", "'path' attribute\" % (source.name or \"<Unknown>\") ) source.font = opener(source.path,", "if self._strictAxisNames and dimName not in self.axisDefaults: # In case", "add this condition continue conditionElement = ET.Element('condition') conditionElement.attrib['name'] = cond.get('name')", "self.path = None return self def read(self): self.readAxes() self.readLabels() self.readRules()", "a1.labelNames['fa-IR'] = \"قطر\" a1.labelNames['en'] = \"Wéíght\" a1.map = [(1.0, 10.0),", "design space before comparing # it against the SourceDescriptor locations", "of this instance, by combining data from the various location", "= None sourceName = sourceElement.attrib.get('name') if sourceName is None: #", "and add it to :attr:`rules`. \"\"\" rule = self.writerClass.ruleDescriptorClass(**kwargs) self.addRule(rule)", "of the full design location. See :meth:`getFullDesignLocation()` .. versionadded:: 5.0", "source in self.sources: if source.font is not None: # font", "layer in the source to look for outline data. Default", "are outside of a condition set. rules = [] rulesElement", "[] rulesElement = self.root.find(\".rules\") if rulesElement is not None: processingValue", "return (major, minor) def getVariableFonts(self) -> List[VariableFontDescriptor]: \"\"\"Return all variable", "and add it to :attr:`locationLabels`. .. versionadded:: 5.0 \"\"\" locationLabel", "for a single axis (formats 1, 2 and 3). All", "unicodes is not None: try: unicodes = [int(u, 16) for", "return locElement, validatedLocation def intOrFloat(self, num): if int(num) == num:", "sourceObject.name if sourceObject.familyName is not None: sourceElement.attrib['familyname'] = sourceObject.familyName if", "in case the filename property is empty. VarLib. \"\"\" self.filename:", "if subset.userDefault is not None: subsetElement.attrib['userdefault'] = self.intOrFloat(subset.userDefault) elif isinstance(subset,", "many variable fonts as there are locations on discrete axes.", "for groupsElement in sourceElement.findall('.groups'): if groupsElement.attrib.get('copy') == '1': sourceObject.copyGroups =", "name=None): \"\"\" Convert Location dict to a locationElement.\"\"\" locElement =", "\"\"\" self.designLocation: AnisotropicLocationDict = designLocation if designLocation is not None", "== \"1\": glyphData['mute'] = True # unicode unicodes = glyphElement.attrib.get('unicode')", "in the document**. MutatorMath + VarLib. \"\"\" self.path = path", "= \"1\" sourceElement.append(groupsElement) if sourceObject.copyFeatures: featuresElement = ET.Element('features') featuresElement.attrib['copy'] =", "is not None else location or {} \"\"\"dict. Axis values", ".. versionadded:: 5.0 \"\"\" if self.locationLabel is None: return None", "{'Opsz': 16} In order to update a single axis location,", "None: if \"ordering\" in labelElement.attrib: axisObject.axisOrdering = int(labelElement.attrib[\"ordering\"]) for label", "not None: basename = os.path.splitext(os.path.basename(self.path))[0] + \"-VF\" if basename is", "self.styleMapStyleName = styleMapStyleName \"\"\"string. StyleMap stylename for this instance. MutatorMath", "with different objects, as long as they have the same", "= ``-math.inf``) \"\"\" self.userDefault: Optional[float] = userDefault \"\"\"New default value", "minor). .. versionadded:: 5.0 \"\"\" if self.formatVersion is None: return", "raise DesignSpaceDocumentError( \"The axis-subset element for a discrete subset must", "elidable = True if labelElement.get(\"elidable\") == \"true\" else False olderSibling", "locationElement, data['instanceLocation'] = self._makeLocationElement(data.get('instanceLocation')) glyphElement.append(locationElement) if glyphName is not None:", "outputValue in axisObject.map: mapElement = ET.Element('map') mapElement.attrib['input'] = self.intOrFloat(inputValue) mapElement.attrib['output']", "of the layer in the source to look for outline", "valid: %r, \" \"expected 'first' or 'last'\" % processingValue) self.documentObject.rulesProcessingLast", "userLocation = self.locationFromElement(labelElement) if designLocation: raise DesignSpaceDocumentError(f'<label> element \"{name}\" must", "ruleObject in self.documentObject.rules: self._addRule(ruleObject) if self.documentObject.sources: self.root.append(ET.Element(\"sources\")) for sourceObject in", "= float(userValueStr) return self.valueAxisSubsetDescriptorClass(name=name, userValue=userValue) else: xml_attrs = {'name', 'userminimum',", "raise DesignSpaceDocumentError(\"label element must have a name attribute.\") valueStr =", "self.path is not None: sourcePath = os.path.abspath(os.path.join(os.path.dirname(self.path), filename)) else: sourcePath", "<https://docs.microsoft.com/en-us/typography/opentype/spec/stat#axis-records>`_ .. versionadded:: 5.0 \"\"\" self.axisLabels: List[AxisLabelDescriptor] = axisLabels or", ".. versionadded:: 5.0 \"\"\" self.layerName = layerName \"\"\"string. The name", "lang) for styleMapStyleNameElement in instanceElement.findall('stylemapstylename'): for key, lang in styleMapStyleNameElement.items():", "names = [] for axisDescriptor in self.axes: names.append(axisDescriptor.name) return names", "= '//' + new_path return new_path def posixpath_property(private_name): \"\"\"Generate a", "is not None: raise DesignSpaceDocumentError('instance element must have at most", "filename attr. The file may or may not exist. MutatorMath.", "- ``axis.default``: default axis value. No anisotropy. .. versionadded:: 5.0", "file from ``path`` and return a new instance of :class:.", "space coordinates.\"\"\" # Without OrderedDict, output XML would be non-deterministic.", "self.intOrFloat(subset.userMaximum) if subset.userDefault is not None: subsetElement.attrib['userdefault'] = self.intOrFloat(subset.userDefault) elif", "if int(num) == num: return \"%d\" % num return (\"%f\"", "\"\"\" self.axisSubsets: List[Union[RangeAxisSubsetDescriptor, ValueAxisSubsetDescriptor]] = axisSubsets or [] \"\"\"Axis subsets", "for label in axisObject.axisLabels: self._addAxisLabel(labelsElement, label) axisElement.append(labelsElement) if isinstance(axisObject, AxisDescriptor):", "= ET.Element('glyphs') instanceElement.append(glyphsElement) glyphsElement = instanceElement.findall('.glyphs')[0] for glyphName, data in", "discrete axis data. Use this for axes that do not", "= self.documentObject.newDefaultLocation() for axisName, axisValue in locationObject.items(): if axisName in", "name from :attr:`labelNames` or the :attr:`name`.\"\"\" return self.labelNames.get(\"en\") or self.name", "in the document. \"\"\" self.locationLabel = locationLabel \"\"\"Name of a", "= axisDescriptor.map_forward( axisDescriptor.default ) return loc def labelForUserLocation(self, userLocation: SimpleLocationDict)", "of this document's sources.\"\"\" self.variableFonts: List[VariableFontDescriptor] = [] \"\"\"List of", "< (5, 0): minVersion = (5, 0) return minVersion def", ":meth:`getFullDesignLocation` :meth:`getFullUserLocation` .. versionadded:: 5.0 \"\"\" self.designLocation: AnisotropicLocationDict = designLocation", "designLocation: AnisotropicLocationDict = None, userLocation: SimpleLocationDict = None ): locElement", "self._axes = [] self.rules = [] self.sources = [] self.instances", "add the rule. ruleElement = ET.Element('rule') if ruleObject.name is not", "be mapped. \"\"\" if isinstance(value, tuple): value = value[0] return", "the default location for that axis. When the input has", "See: `OTSpec STAT Axis Value Tables <https://docs.microsoft.com/en-us/typography/opentype/spec/stat#axis-value-tables>`_ .. versionadded:: 5.0", "def addLocationLabelDescriptor(self, **kwargs): \"\"\"Instantiate a new :class:`LocationLabelDescriptor` using the given", "fonts defined in this document, or implicit variable fonts that", "None and not force: continue if self.path is not None:", "result[axis.name] = self.designLocation[axis.name] elif axis.name in self.userLocation: result[axis.name] = axis.map_forward(self.userLocation[axis.name])", "not None: instanceElement.attrib['location'] = instanceObject.locationLabel if instanceObject.familyName is not None:", "if masterGlyphName is None: # if we don't read a", "None: sourceElement.attrib['familyname'] = sourceObject.familyName if sourceObject.styleName is not None: sourceElement.attrib['stylename']", "extra options passed on to the opener function. Returns: List", "DesignSpaceDocument): self.path = documentPath self.documentObject = documentObject self.effectiveFormatTuple = self._getEffectiveFormatTuple()", "independently by taking the first not-None field in this list:", "stored as tuples of glyphnames, e.g. (\"a\", \"a.alt\"). - Note:", "not None: return doc.map_forward(label.userLocation) # type: ignore result: AnisotropicLocationDict =", "new list of glyphNames with substitutions applied. - rule order", "= \"1\" if sourceObject.muteInfo: infoElement.attrib['mute'] = \"1\" sourceElement.append(infoElement) if sourceObject.muteKerning:", "= ET.Element(\"glyph\") glyphElement.attrib[\"name\"] = name glyphElement.attrib[\"mute\"] = '1' sourceElement.append(glyphElement) if", "in ruleObject.conditionSets: conditionsetElement = ET.Element('conditionset') for cond in conditions: if", "is useful for the STAT table, however it can't become", "the document contains no axis definitions, self.log.warning(\"Location with undefined axis:", "{'name', 'filename'} for variableFontElement in self.root.findall(\".variable-fonts/variable-font\"): unknown_attrs = set(variableFontElement.attrib) -", "XML_NS = \"{http://www.w3.org/XML/1998/namespace}\" XML_LANG = XML_NS + \"lang\" def posix(path):", "None: instanceElement.attrib['familyname'] = instanceObject.familyName if instanceObject.styleName is not None: instanceElement.attrib['stylename']", "the string in the filename attr. The file may or", "it to ``doc.sources``. \"\"\" source = self.writerClass.sourceDescriptorClass(**kwargs) self.addSource(source) return source", "the instances. MutatorMath. .. deprecated:: 5.0 \"\"\" self.muteKerning = muteKerning", "if axisName in self.userLocation: del self.userLocation[axisName] def getLocationLabelDescriptor(self, doc: 'DesignSpaceDocument')", "Though this data can be extracted from the font, it", "ruleObject.name = ruleElement.attrib.get(\"name\") # read any stray conditions outside a", ") # read the conditionsets for conditionSetElement in ruleElement.findall('.conditionset'): conditionSet", "None: basename = os.path.splitext(self.filename)[0] + \"-VF\" if self.path is not", "axis (same as a :class:`ValueAxisSubsetDescriptor`). \"\"\" self.lib: MutableMapping[str, Any] =", "return (\"%f\" % num).rstrip('0').rstrip('.') def _addRule(self, ruleObject): # if none", "maximum \"\"\"number. The maximum value for this axis in user", "instanceObject.setStyleMapFamilyName(styleMapFamilyName, lang) designLocation, userLocation = self.locationFromElement(instanceElement) locationLabel = instanceElement.attrib.get('location') if", "font return res finally: for source, font in zip(self.sources, fonts):", "label.labelNames) axisElement.append(labelElement) def _addLabelNames(self, parentElement, labelNames): for languageCode, labelName in", "List[Union[RangeAxisSubsetDescriptor, ValueAxisSubsetDescriptor]] = axisSubsets or [] \"\"\"Axis subsets to include", ":class:`AxisDescriptor` to subset.\"\"\" self.userMinimum: float = userMinimum \"\"\"New minimum value", "i2.styleMapStyleName = \"InstanceStyleMapStyleName\" i2.lib['com.coolDesignspaceApp.specimenText'] = 'Hamburgerwhatever' doc.addInstance(i2) \"\"\" flavor =", "class SourceDescriptor(SimpleDescriptor): \"\"\"Simple container for data related to the source", "label.userMaximum is not None: labelElement.attrib['usermaximum'] = self.intOrFloat(label.userMaximum) labelElement.attrib['name'] = label.name", "labelElement.attrib['oldersibling'] = \"true\" if label.linkedUserValue is not None: labelElement.attrib['linkeduservalue'] =", "including extra options: designspace.loadSourceFonts(ttLib.TTFont, recalcBBoxes=False) Args: opener (Callable): takes one", "may not exist. If not specified, the :attr:`name` will be", "= self.writerClass.sourceDescriptorClass(**kwargs) self.addSource(source) return source def addInstance(self, instanceDescriptor: InstanceDescriptor): \"\"\"Add", "featuresElement in sourceElement.findall(\".features\"): if featuresElement.attrib.get('copy') == '1': sourceObject.copyFeatures = True", "can describe a warp of user space to design space", "the Axis value depends on which field are filled-in, see", "code in languageCodes: if code == \"en\": continue localisedStyleMapStyleNameElement =", "very basic **descriptor** objects that store the data in attributes.", "`default` values in user space of all axes. This function", "has no maximum, check for > minimum. \"\"\" for cd", "unicodes.split(\" \")] glyphData['unicodes'] = unicodes except ValueError: raise DesignSpaceDocumentError(\"unicode values", "this source, from its :attr:`designLocation` and the document's axis defaults.", "need to identify and respond to the following situations: In", "(design) to input (user).\"\"\" from fontTools.varLib.models import piecewiseLinearMap if isinstance(v,", "= True for groupsElement in sourceElement.findall('.groups'): if groupsElement.attrib.get('copy') == '1':", "design location. Assume that missing coordinates are at the default", "\"\"\"STAT field ``value`` (format 1, 3) or ``nominalValue`` (format 2).\"\"\"", "continue localisedStyleMapFamilyNameElement = ET.Element('stylemapfamilyname') localisedStyleMapFamilyNameElement.attrib[XML_LANG] = code localisedStyleMapFamilyNameElement.text = instanceObject.getStyleMapFamilyName(code)", "# will allow these to be None, assume axis.maximum cd['maximum']", "\"\"\" _attrs = ['tag', 'name', 'maximum', 'minimum', 'default', 'map', 'axisOrdering',", "anisotropic location i2.designLocation = dict(weight=500, width=(400,300)) i2.postScriptFontName = \"InstancePostscriptName\" i2.styleMapFamilyName", "= \"axis-subset\" _attrs = ('name', 'userMinimum', 'userDefault', 'userMaximum') def __init__(self,", "\"\"\"dict. A dictionary of localised family name strings, keyed by", "substitution features. - False: before - True: after. Default is", "item.location = self.normalizeLocation(item.location) # instances for item in self.instances: #", "deepcopyExceptFonts(self): \"\"\"Allow deep-copying a DesignSpace document without deep-copying attached UFO", "python instance.clearLocation() instance.designLocation = {'Weight': (34, 36.5), 'Width': 100} instance.userLocation", "variableFontElement.find(\".lib\") if libElement is not None: lib = plistlib.fromtree(libElement[0]) variableFont", "readAxisSubset(self, element: ET.Element): if \"uservalue\" in element.attrib: xml_attrs = {'name',", "in this field, or write the contents of this field", "is not None: cd['maximum'] = float(cdMax) else: # will allow", "self.map: return v return piecewiseLinearMap(v, {k: v for k, v", "glyphSources = None for masterElement in glyphElement.findall('.masters/master'): fontSourceName = masterElement.attrib.get('source')", "return rule def addVariableFont(self, variableFontDescriptor: VariableFontDescriptor): \"\"\"Add the given ``variableFontDescriptor``", "of a :class:`LocationLabelDescriptor`. If provided, the instance should have the", "minVersion = self.documentObject.formatTuple if ( any( isinstance(axis, DiscreteAxisDescriptor) or axis.axisOrdering", "in self.locationLabels if label.userLocation == userLocation), None ) def updateFilenameFromPath(self,", "a: swap = True break if swap: newNames.append(b) else: newNames.append(name)", "ruleObject.subs.append((a, b)) rules.append(ruleObject) self.documentObject.rules = rules def _readConditionElements(self, parentElement, ruleName=None):", "document was implicitly describing a variable font that covers the", "else: newNames.append(name) glyphNames = newNames newNames = [] return glyphNames", "Axis value depends on which field are filled-in, see :meth:`getFormat`", "a designspace file from ``path`` and populates the fields of", "STAT Axis value table, format 1, 2, 3 <https://docs.microsoft.com/en-us/typography/opentype/spec/stat#axis-value-table-format-1>`_ The", "easy to integrate this object in different contexts. The **DesignSpaceDocument**", "for conditionSetElement in ruleElement.findall('.conditionset'): conditionSet = self._readConditionElements( conditionSetElement, ruleName, )", "design location. See :meth:`getFullDesignLocation()` .. versionadded:: 5.0 \"\"\" self.layerName =", "default=None, maximum=None, hidden=False, map=None, axisOrdering=None, axisLabels=None, ): super().__init__( tag=tag, name=name,", "continuous axes. In the case of Designspace documents before version", "axisObject.maximum = float(axisElement.attrib.get(\"maximum\")) axisObject.default = float(axisElement.attrib.get(\"default\")) axisObject.name = axisElement.attrib.get(\"name\") if", "the given ``locationLabelDescriptor`` to :attr:`locationLabels`. .. versionadded:: 5.0 \"\"\" self.locationLabels.append(locationLabelDescriptor)", "if this instance needs the interpolating font.info calculated. .. deprecated::", "new_path elif path.startswith(r'\\\\'): # The above transformation loses leading slashes", "instanceDescriptorClass = InstanceDescriptor @classmethod def getAxisDecriptor(cls): return cls.axisDescriptorClass() @classmethod def", "instanceElement.findall('.glyphs/glyph'): self.readGlyphElement(glyphElement, instanceObject) for infoElement in instanceElement.findall(\"info\"): self.readInfoElement(infoElement, instanceObject) for", "self.documentObject.rules = rules def _readConditionElements(self, parentElement, ruleName=None): cds = []", "subsetElement.attrib['userminimum'] = self.intOrFloat(subset.userMinimum) if subset.userMaximum != math.inf: subsetElement.attrib['usermaximum'] = self.intOrFloat(subset.userMaximum)", "None: self.readerClass = readerClass else: self.readerClass = BaseDocReader if writerClass", "axisObject = self.axisDescriptorClass() axisObject.minimum = float(axisElement.attrib.get(\"minimum\")) axisObject.maximum = float(axisElement.attrib.get(\"maximum\")) axisObject.default", "if label is None: raise DesignSpaceDocumentError( 'InstanceDescriptor.getLocationLabelDescriptor(): ' f'unknown location", "MutatorMath. .. deprecated:: 5.0 \"\"\" self.copyGroups = copyGroups \"\"\"bool. Indicates", "code:: xml <lib> <dict> <key>com.github.fonttools.varLib.featureVarsFeatureTag</key> <string>calt</string> </dict> </lib> \"\"\" self.sources:", "self.name class LocationLabelDescriptor(SimpleDescriptor): \"\"\"Container for location label data. Analogue of", "= opener(source.path, **kwargs) loaded[source.path] = source.font fonts.append(source.font) return fonts @property", "= SourceDescriptor() s1.path = masterPath1 s1.name = \"master.ufo1\" s1.font =", "Varlib. \"\"\" self.designLocation = designLocation if designLocation is not None", "__repr__(self): attrs = [f\"{a}={repr(getattr(self, a))},\" for a in self._attrs] attrs", "variable font with many axes, define subsets of that variable", "*, designLocation: AnisotropicLocationDict = None, userLocation: SimpleLocationDict = None ):", "return doc.map_forward(label.userLocation) # type: ignore result: AnisotropicLocationDict = {} for", "in self.sources: item.location = self.normalizeLocation(item.location) # instances for item in", "variable fonts. .. versionadded:: 5.0\"\"\" self.instances: List[InstanceDescriptor] = [] \"\"\"List", "document.\"\"\" names = [] for axisDescriptor in self.axes: names.append(axisDescriptor.name) return", "tostr(familyName) def getFamilyName(self, languageCode=\"en\"): return self.localisedFamilyName.get(languageCode) def setStyleMapStyleName(self, styleMapStyleName, languageCode=\"en\"):", "If a condition has no minimum, check for < maximum.", "languageCodes.sort() for code in languageCodes: if code == \"en\": continue", "if axisObject.hidden: axisElement.attrib['hidden'] = \"1\" self.root.findall('.axes')[0].append(axisElement) def _addAxisLabel(self, axisElement: ET.Element,", ".. code:: python instance.clearLocation('Weight') instance.designLocation['Weight'] = (34, 36.5) Args: axisName:", "def posix(path): \"\"\"Normalize paths using forward slash to work also", "'AxisDescriptor', 'RuleDescriptor', 'BaseDocReader', 'BaseDocWriter' ] # ElementTree allows to find", "None: axesElement = ET.Element(\"axes\") if self.documentObject.elidedFallbackName is not None: axesElement.attrib['elidedfallbackname']", ".. versionadded:: 5.0 \"\"\" if self.formatVersion is None: return (5,", "self.read(path) return self @classmethod def fromstring(cls, string, readerClass=None, writerClass=None): self", "space, as in [(minimum, minimum), (maximum, maximum)]. Varlib. \"\"\" self.axisOrdering", "locationElement, instanceObject.location = self._makeLocationElement(instanceObject.location) instanceElement.append(locationElement) if instanceObject.filename is not None:", "or {} \"\"\"dict. A dictionary of localised family name strings,", "intend that value to be mapped. \"\"\" if isinstance(value, tuple):", "value to be mapped. \"\"\" return next((v for k, v", "as well. **Note:** Python attribute names are usually camelCased, the", "= ET.Element('lib') libElement.append(plistlib.totree(data, indent_level=indent_level)) parentElement.append(libElement) def _writeGlyphElement(self, instanceElement, instanceObject, glyphName,", "self.normalizeLocation(item.location) # the axes for axis in self.axes: # scale", "if minVersion < (5, 0): minVersion = (5, 0) return", "\"\"\" if readerClass is not None: self.readerClass = readerClass else:", "is not None: instanceElement.attrib['stylename'] = instanceObject.styleName # add localisations if", "conditions outside a condition set externalConditions = self._readConditionElements( ruleElement, ruleName,", "xvalue=\"\").') if designLocation is not None: glyphData['instanceLocation'] = designLocation glyphSources", "False): axisObject.hidden = True axisObject.tag = axisElement.attrib.get(\"tag\") for mapElement in", "may be only part of the full location. See: :meth:`getFullUserLocation`", "this instance, in design space coordinates. MutatorMath + Varlib. ..", "languageElement.text = labelName parentElement.append(languageElement) def _addLocationLabel(self, parentElement: ET.Element, label: LocationLabelDescriptor)", "versionadded:: 5.0 \"\"\" self.userLocation: SimpleLocationDict = userLocation or {} \"\"\"dict.", "versionadded:: 5.0 \"\"\" locationLabel = self.writerClass.locationLabelDescriptorClass(**kwargs) self.addLocationLabel(locationLabel) return locationLabel def", "along each axis. If an axis is not mentioned, it", "See :meth:`SourceDescriptor.getFullDesignLocation()` \"\"\" self.default = None # Convert the default", "mutedGlyphNames or [] \"\"\"list. Glyphnames that need to be muted", "for the designspace, as fonts at each value can have", "objects, filling them with data and then adding them to", "= postScriptFontName styleMapFamilyName = instanceElement.attrib.get('stylemapfamilyname') if styleMapFamilyName is not None:", "result[axis.name] = axis.map_forward(axis.default) return result class RuleDescriptor(SimpleDescriptor): \"\"\"Represents the rule", "axis.map_forward(v) for v in (axis.minimum, axis.default, axis.maximum) ] new[axis.name] =", "The name of the layer in the source to look", "instance. MutatorMath + Varlib. \"\"\" self.localisedFamilyName = localisedFamilyName or {}", "data.get('mute'): glyphElement.attrib['mute'] = \"1\" if data.get('unicodes') is not None: glyphElement.attrib['unicode']", "tag=tag, name=name, labelNames=labelNames, hidden=hidden, map=map, axisOrdering=axisOrdering, axisLabels=axisLabels, ) self.default: float", "vfElement.attrib['name'] = vf.name if vf.filename is not None: vfElement.attrib['filename'] =", "sourceObject.copyInfo: infoElement.attrib['copy'] = \"1\" if sourceObject.muteInfo: infoElement.attrib['mute'] = \"1\" sourceElement.append(infoElement)", "minimum = self.normalizeLocation({cond['name']: cond['minimum']}).get(cond['name']) else: minimum = None if cond.get('maximum')", "if attr == XML_LANG # Note: elementtree reads the \"xml:lang\"", "self) writer.write() def _posixRelativePath(self, otherPath): relative = os.path.relpath(otherPath, os.path.dirname(self.path)) return", "STAT format 4 label. No anisotropy. - ``designLocation[axisName]``: the explicit", "Axis value to use to encode this label. =========== =========", "*, filename=None, path=None, font=None, name=None, location=None, designLocation=None, layerName=None, familyName=None, styleName=None,", "STAT format of the Axis value depends on which field", "that value to be mapped. \"\"\" return next((v for k,", "font with many axes, define subsets of that variable font", "one we have masterGlyphName = glyphName d = dict(font=fontSourceName, location=designLocation,", "value is less important than in continuous axes: - it", "rule data.\"\"\" # list of lists of dict(name='aaaa', minimum=0, maximum=1000)", "preserved below. if instanceObject.location is not None: locationElement, instanceObject.location =", "5.0 \"\"\" self.layerName = layerName \"\"\"string. The name of the", "clearing everything). In order to update the location of this", "axisSubsetsElement is None: raise DesignSpaceDocumentError(\"variable-font element must contain an axis-subsets", "rules --> <rules> <rule name=\"vertical.bars\"> <conditionset> <condition minimum=\"250.000000\" maximum=\"750.000000\" name=\"weight\"/>", "raise DesignSpaceDocumentError(f'Missing xvalue=\"\" for the location dimension \"{dimName}\"\" with yvalue=\"{yValue}\"')", "given ``name``, or ``None`` if no such axis exists.\"\"\" for", "return libElement = ET.Element('lib') libElement.append(plistlib.totree(data, indent_level=indent_level)) parentElement.append(libElement) def _writeGlyphElement(self, instanceElement,", "it out, as preserved below. if instanceObject.location is not None:", "user values. See: `OTSpec STAT Axis value table, format 4", "key, lang in styleNameElement.items(): if key == XML_LANG: styleName =", "camelCased, the corresponding `XML <document-xml-structure>`_ attributes are usually all lowercase.", "] d[attr] = value return d class SimpleDescriptor(AsDictMixin): \"\"\" Containers", ":meth:`read` or :meth:`fromfile`. \"\"\" self.filename = None \"\"\"String, optional. When", "not None: instanceElement.attrib['familyname'] = instanceObject.familyName if instanceObject.styleName is not None:", "be built from the document's continuous axes. In the case", "instancePath = os.path.join(os.path.dirname(self.documentObject.path), filename) else: instancePath = None instanceObject =", "None, userLocation: SimpleLocationDict = None ): locElement = ET.Element(\"location\") for", "❌ 2 ✅ ✅ ✅ ❌ 3 ✅ ❌ ❌", "instanceObject.postScriptFontName if instanceObject.styleMapFamilyName is not None: instanceElement.attrib['stylemapfamilyname'] = instanceObject.styleMapFamilyName if", "long as they have the same attributes. Reader and Writer", "not integers\" % unicodes) for noteElement in glyphElement.findall('.note'): glyphData['note'] =", "self.lib = lib or {} \"\"\"Custom data associated with this", "'userMaximum', 'name', 'elidable', 'olderSibling', 'linkedUserValue', 'labelNames') def __init__( self, *,", "not exist. MutatorMath + VarLib. \"\"\" self.path = path \"\"\"string.", "[] \"\"\"a list of conditionsets. - Each conditionset is a", "location=designLocation, glyphName=masterGlyphName) if glyphSources is None: glyphSources = [] glyphSources.append(d)", "this document path. If the filename attribute is not None:", "it in this field, or write the contents of this", "features. - False: before - True: after. Default is False.", "for attr, value in self.__dict__.items(): if attr.startswith(\"_\"): continue if hasattr(value,", "for instanceElement in instanceElements: self._readSingleInstanceElement(instanceElement, makeGlyphs=makeGlyphs, makeKerning=makeKerning, makeInfo=makeInfo) def _readSingleInstanceElement(self,", "axes, define subsets of that variable font that only include", "```filename`` point to that. \"\"\" self.name = name \"\"\"string. Optional.", "the whole document. Use reverse-DNS notation to identify your own", "= os.path.splitext(os.path.basename(self.path))[0] + \"-VF\" if basename is None: basename =", "None newConditions.append(dict(name=cond['name'], minimum=minimum, maximum=maximum)) newConditionSets.append(newConditions) rule.conditionSets = newConditionSets def loadSourceFonts(self,", "self.writerClass = writerClass else: self.writerClass = BaseDocWriter @classmethod def fromfile(cls,", "descriptor.filename = self._posixRelativePath(descriptor.path) if instances: for descriptor in self.instances: if", "name \"\"\"Name of the :class:`AxisDescriptor` to subset.\"\"\" self.userMinimum: float =", "axis mapping's output (design) to input (user).\"\"\" from fontTools.varLib.models import", ":attr:`labelNames` or the :attr:`name`.\"\"\" return self.labelNames.get(\"en\") or self.name def getFullUserLocation(self,", "self.addVariableFont(variableFont) return variableFont def addLocationLabel(self, locationLabelDescriptor: LocationLabelDescriptor): \"\"\"Add the given", "all the locations of all masters and instances to the", "posixpath_property(private_name): \"\"\"Generate a propery that holds a path always using", "self.documentObject.formatTuple < (5, 0): return xml_attrs = {'name', 'filename'} for", "Axis Value Tables <https://docs.microsoft.com/en-us/typography/opentype/spec/stat#axis-value-tables>`_ .. versionadded:: 5.0 \"\"\" class AxisDescriptor(AbstractAxisDescriptor):", "= False for a, b in rule.subs: if name ==", "self.designLocation = designLocation if designLocation is not None else location", "to encode what the document contains. \"\"\" minVersion = self.documentObject.formatTuple", "sourceDescriptorClass = SourceDescriptor variableFontsDescriptorClass = VariableFontDescriptor valueAxisSubsetDescriptorClass = ValueAxisSubsetDescriptor rangeAxisSubsetDescriptorClass", "= maximum axis.default = default # now the rules for", "the sources list. \"\"\" # we load fonts with the", "glyphElement.findall('.note'): glyphData['note'] = noteElement.text break designLocation, userLocation = self.locationFromElement(glyphElement) if", "userValueStr = element.get(\"uservalue\") if userValueStr is None: raise DesignSpaceDocumentError( \"The", "== \"en\": continue localisedStyleMapStyleNameElement = ET.Element('stylemapstylename') localisedStyleMapStyleNameElement.attrib[XML_LANG] = code localisedStyleMapStyleNameElement.text", "Normalise the geometry of this designspace: - scale all the", "not None: ruleElement.attrib['name'] = ruleObject.name for conditions in ruleObject.conditionSets: conditionsetElement", "axisObject.minimum = float(axisElement.attrib.get(\"minimum\")) axisObject.maximum = float(axisElement.attrib.get(\"maximum\")) axisObject.default = float(axisElement.attrib.get(\"default\")) axisObject.name", "if self.documentObject.formatTuple < (5, 0): return xml_attrs = {'name', 'filename'}", "'first' or 'last'\" % processingValue) self.documentObject.rulesProcessingLast = processingValue == \"last\"", "None and self.documentObject.path is not None: instancePath = os.path.join(os.path.dirname(self.documentObject.path), filename)", "@classmethod def getRuleDescriptor(cls): return cls.ruleDescriptorClass() def __init__(self, documentPath, documentObject: DesignSpaceDocument):", "get in user space. However, this default value is less", "\"MasterFamilyName\" s1.styleName = \"MasterStyleNameOne\" s1.localisedFamilyName = dict(fr=\"Caractère\") s1.mutedGlyphNames.append(\"A\") s1.mutedGlyphNames.append(\"Z\") doc.addSource(s1)", "value of a discrete or continuous axis to use in", "5 and above documents, there can be as many variable", "Add more localisations? .. code:: python a1 = AxisDescriptor() a1.minimum", "See: `OTSpec STAT Axis value table, format 1, 2, 3", "= [] for axis in self.axes: if isinstance(axis, DiscreteAxisDescriptor): discreteAxes.append(axis)", "return self.variableFonts variableFonts = [] discreteAxes = [] rangeAxisSubsets: List[Union[RangeAxisSubsetDescriptor,", "locationObject, name=None): \"\"\" Convert Location dict to a locationElement.\"\"\" locElement", "+ Varlib. \"\"\" self.styleName = styleName \"\"\"string. Style name of", "touched. case 3. descriptor.filename == None descriptor.path == \"~/absolute/path/there\" --", "ET.Element('familyname') localisedFamilyNameElement.attrib[XML_LANG] = code localisedFamilyNameElement.text = instanceObject.getFamilyName(code) instanceElement.append(localisedFamilyNameElement) if instanceObject.localisedStyleMapStyleName:", "default \"\"\"The default value for this axis, i.e. when a", "big variable font with many axes, define subsets of that", "xValue = dimensionElement.attrib.get('xvalue') if xValue is not None: xValue =", "tree = ET.ElementTree(self.root) tree.write( self.path, encoding=encoding, method='xml', xml_declaration=xml_declaration, pretty_print=pretty, )", "that a continuous axis has a ``minimum`` and ``maximum``, while", "sourceElement.attrib.get(\"familyname\") if familyName is not None: sourceObject.familyName = familyName styleName", "its path. When the document is produced by a Python", "\"\"\"Get the complete user location for this instance. .. seealso::", "slashes of UNC path mounts new_path = '//' + new_path", "in zip(discreteAxes, values)]) variableFonts.append(VariableFontDescriptor( name=f\"{basename}{axisNames}\", axisSubsets=rangeAxisSubsets + [ ValueAxisSubsetDescriptor(name=axis.name, userValue=value)", "for this instance. .. seealso:: :meth:`getFullDesignLocation` .. versionadded:: 5.0 \"\"\"", "RuleDescriptor(SimpleDescriptor): \"\"\"Represents the rule descriptor element: a set of glyph", "= InstanceDescriptor @classmethod def getAxisDecriptor(cls): return cls.axisDescriptorClass() @classmethod def getSourceDescriptor(cls):", "ruleElement = ET.Element('rule') if ruleObject.name is not None: ruleElement.attrib['name'] =", "if infoElement.attrib.get('mute') == '1': sourceObject.muteInfo = True for featuresElement in", "the filename attribute is not None: skip it. \"\"\" if", "linkedUserValue=linkedValue, labelNames=labelNames, ) def readLabels(self): if self.documentObject.formatTuple < (5, 0):", "ufo source sourceObject.filename = filename # path as it is", "= axesElement.attrib['elidedfallbackname'] axisElements = self.root.findall(\".axes/axis\") if not axisElements: return for", "self.documentObject.sources.append(sourceObject) def locationFromElement(self, element): \"\"\"Read a nested ``<location>`` element inside", "{} self.root.append(ET.Element(\"rules\", attributes)) for ruleObject in self.documentObject.rules: self._addRule(ruleObject) if self.documentObject.sources:", "or the :attr:`name`.\"\"\" return self.labelNames.get(\"en\") or self.name def getFullUserLocation(self, doc:", "= None try: userValue = dimensionElement.attrib.get('uservalue') if userValue is not", "a representation of this source font that is loaded in", "to integrate this object in different contexts. The **DesignSpaceDocument** object", "map_backward(self, v): \"\"\"Maps value from axis mapping's output (design) to", "AnisotropicLocationDict: \"\"\"Get the complete design location of this instance, by", "self.formatVersion.split(\".\")) major = next(numbers) minor = next(numbers, 0) return (major,", "explicit alias for this property :attr:`designLocation`. \"\"\" return self.designLocation @location.setter", "= sourceObject.name if sourceObject.familyName is not None: sourceElement.attrib['familyname'] = sourceObject.familyName", "= [] for source in self.sources: if source.font is not", "\"\"\"string. Optional. Unique identifier name for this source. MutatorMath +", "\"\"\" self.path = path \"\"\"string. Absolute path to the instance", "Style name of this instance. MutatorMath + Varlib. \"\"\" self.postScriptFontName", "in testing return dict( tag=self.tag, name=self.name, labelNames=self.labelNames, maximum=self.maximum, minimum=self.minimum, default=self.default,", "def getLocationLabelDescriptor(self, doc: 'DesignSpaceDocument') -> Optional[LocationLabelDescriptor]: \"\"\"Get the :class:`LocationLabelDescriptor` instance", "= kerning \"\"\" bool. Indicates if this instance needs its", "\"\"\" Read the info element.\"\"\" instanceObject.info = True def readGlyphElement(self,", "not None: designLoc[dimName] = xValue else: userLoc[dimName] = userValue return", "unknown_attrs = set(labelElement.attrib) - xml_attrs if unknown_attrs: raise DesignSpaceDocumentError(f\"Label element", "for libElement in sourceElement.findall('.lib'): if libElement.attrib.get('copy') == '1': sourceObject.copyLib =", "for key, lang in styleMapFamilyNameElement.items(): if key == XML_LANG: styleMapFamilyName", "to look for outline data. Default ``None`` which means ``foreground``.", "flavor = \"instance\" _defaultLanguageCode = \"en\" _attrs = ['filename', 'path',", "this axis, possibly anisotropic. - ``userLocation[axisName]``: the explicit user location", "now the rules for rule in self.rules: newConditionSets = []", "identifier name for this source. MutatorMath + Varlib. \"\"\" self.designLocation", "BytesIO() encoding = \"UTF-8\" xml_declaration = True else: raise ValueError(\"unsupported", "location): \"\"\"Return a dict with normalized axis values.\"\"\" from fontTools.varLib.models", "if subset.userMaximum != math.inf: subsetElement.attrib['usermaximum'] = self.intOrFloat(subset.userMaximum) if subset.userDefault is", "\"{name}\" must only have user locations (using uservalue=\"\").') elidable =", "self.locationLabelDescriptorClass( name=name, userLocation=userLocation, elidable=elidable, olderSibling=olderSibling, labelNames=labelNames, ) self.documentObject.locationLabels.append(locationLabel) def readVariableFonts(self):", "zip(discreteAxes, values) ] )) return variableFonts def deepcopyExceptFonts(self): \"\"\"Allow deep-copying", "a name attribute.\") designLocation, userLocation = self.locationFromElement(labelElement) if designLocation: raise", "font. If an axis is not mentioned, assume that we", "labelElement = axisElement.find(\".labels\") if labelElement is not None: if \"ordering\"", "full location. See: :meth:`getFullUserLocation` \"\"\" self.elidable: bool = elidable \"\"\"STAT", "if axisObject.map: for inputValue, outputValue in axisObject.map: mapElement = ET.Element('map')", "Convert the default location from user space to design space", "in self.documentObject.axes: if designLocation is not None and axis.name in", "not None: try: unicodes = [int(u, 16) for u in", "5.0 \"\"\" self.copyGroups = copyGroups \"\"\"bool. Indicates if the groups", "Default is False. For new projects, you probably want True.", "userMinimum \"\"\"STAT field ``rangeMinValue`` (format 2).\"\"\" self.userValue: float = userValue", "instance needs the interpolating font.info calculated. .. deprecated:: 5.0 \"\"\"", "copyLib=False, copyInfo=False, copyGroups=False, copyFeatures=False, muteKerning=False, muteInfo=False, mutedGlyphNames=None, ): self.filename =", "if designLocation is not None else (location or {}) \"\"\"dict.", "makeGlyphs=True, makeKerning=True, makeInfo=True): instanceElements = self.root.findall('.instances/instance') for instanceElement in instanceElements:", "attribute\") mute = glyphElement.attrib.get(\"mute\") if mute == \"1\": glyphData['mute'] =", "new font object (e.g. TTFont, or defcon.Font, etc.) from the", "and top-level location labels. The source of truth for this", "is not None and userDefault is not None and userMaximum", "of this field to the disk and make ```filename`` point", "= sourceObject.getFamilyName(code) sourceElement.append(localisedFamilyNameElement) if sourceObject.copyLib: libElement = ET.Element('lib') libElement.attrib['copy'] =", "is not None: sourceObject.familyName = familyName styleName = sourceElement.attrib.get(\"stylename\") if", "**kwargs): \"\"\"Ensure SourceDescriptor.font attributes are loaded, and return list of", "'axisOrdering', 'axisLabels') def __init__( self, *, tag=None, name=None, labelNames=None, values=None,", "\"\"\" return self.localisedFamilyName.get(languageCode) def getFullDesignLocation(self, doc: 'DesignSpaceDocument') -> AnisotropicLocationDict: \"\"\"Get", "def __init__( self, *, name, userValue, userMinimum=None, userMaximum=None, elidable=False, olderSibling=False,", "to :attr:`locationLabels`. .. versionadded:: 5.0 \"\"\" self.locationLabels.append(locationLabelDescriptor) def addLocationLabelDescriptor(self, **kwargs):", "order they appear in the sources list. \"\"\" # we", "``OLDER_SIBLING_FONT_ATTRIBUTE``. See: `OTSpec STAT Flags <https://docs.microsoft.com/en-us/typography/opentype/spec/stat#flags>`_ \"\"\" self.labelNames: Dict[str, str]", "= labelElement.get(\"name\") if name is None: raise DesignSpaceDocumentError(\"label element must", "in conditions: if cond.get('minimum') is not None: minimum = self.normalizeLocation({cond['name']:", "= styleMapFamilyName \"\"\"string. StyleMap familyname for this instance. MutatorMath +", "if name.lower() in names: return names[name.lower()] if len(name) < 4:", "to be at its default location. .. seealso:: This may", "= DiscreteAxisDescriptor() a2.values = [0, 1] a2.name = \"Italic\" a2.tag", ".. deprecated:: 5.0 \"\"\" self.copyFeatures = copyFeatures \"\"\"bool. Indicates if", "sourceObject.name is not None: if sourceObject.name.find(\"temp_master\") != 0: # do", "space coordinates. If no map values are present, it is", "raise DesignSpaceDocumentError(f\"Label element contains unknown attributes: {', '.join(unknown_attrs)}\") name =", "= instanceObject.filename if instanceObject.postScriptFontName is not None: instanceElement.attrib['postscriptfontname'] = instanceObject.postScriptFontName", "return \"%d\" % num return (\"%f\" % num).rstrip('0').rstrip('.') def _addRule(self,", "= instanceElement.findall('.glyphs')[0] for glyphName, data in sorted(instanceObject.glyphs.items()): glyphElement = self._writeGlyphElement(instanceElement,", "= variableFontElement.get(\"name\") if name is None: raise DesignSpaceDocumentError(\"variable-font element must", "describing a variable font that covers the whole space. In", "attached UFO fonts or TTFont objects. The :attr:`font` attribute is", "= ET.Element('axis') axisElement.attrib['tag'] = axisObject.tag axisElement.attrib['name'] = axisObject.name self._addLabelNames(axisElement, axisObject.labelNames)", "in self.sources: if descriptor.filename is not None and not force:", "tagForAxisName(name): # try to find or make a tag name", "'minimum', 'default', 'map', 'axisOrdering', 'axisLabels'] def __init__( self, *, tag=None,", "family name strings, keyed by language code. If present, will", "self.readerClass = BaseDocReader if writerClass is not None: self.writerClass =", "ET.Element): if \"uservalue\" in element.attrib: xml_attrs = {'name', 'uservalue'} unknown_attrs", "axes: - it doesn't define the \"neutral\" version of outlines", "labelNames=labelNames, hidden=hidden, map=map, axisOrdering=axisOrdering, axisLabels=axisLabels, ) self.default: float = default", "item.glyphs.items(): glyphData['instanceLocation'] = self.normalizeLocation(glyphData['instanceLocation']) for glyphMaster in glyphData['masters']: glyphMaster['location'] =", "next( (label for label in self.locationLabels if label.userLocation == userLocation),", "= [ AxisLabelDescriptor(name=\"Roman\", userValue=0, elidable=True) ] doc.addAxis(a2) .. versionadded:: 5.0", "externalConditions = self._readConditionElements( ruleElement, ruleName, ) if externalConditions: ruleObject.conditionSets.append(externalConditions) self.log.info(", "%3.3f\", yValue) if userValue is None == xValue is None:", "use in a variable font. .. versionadded:: 5.0 \"\"\" flavor", "\" axisDescriptorClass = AxisDescriptor discreteAxisDescriptorClass = DiscreteAxisDescriptor axisLabelDescriptorClass = AxisLabelDescriptor", "\"\"\"bool. Indicated if this instance needs the interpolating font.info calculated.", "Normal getter return getattr(self, private_name) def setter(self, value): # The", "Assume that missing coordinates are at the default location for", "from :attr:`labelNames` or the :attr:`name`.\"\"\" return self.labelNames.get(\"en\") or self.name class", "save the file somewhere. \"\"\" self.formatVersion: Optional[str] = None \"\"\"Format", "cond.get('maximum') is None: # neither is defined, don't add this", "instances. MutatorMath. .. deprecated:: 5.0 \"\"\" self.muteKerning = muteKerning \"\"\"bool.", "relative = os.path.relpath(otherPath, os.path.dirname(self.path)) return posix(relative) def updatePaths(self): \"\"\" Right", "sorted(labelNames.items()): languageElement = ET.Element('labelname') languageElement.attrib[XML_LANG] = languageCode languageElement.text = labelName", "masterElement.attrib.get('source') designLocation, userLocation = self.locationFromElement(masterElement) if userLocation: raise DesignSpaceDocumentError(f'<master> element", "this designspace to ``path``.\"\"\" if hasattr(path, \"__fspath__\"): # support os.PathLike", "path and this document path. If the filename attribute is", "elif userLocation is not None and axis.name in userLocation: dimElement", "= locationLabel \"\"\"Name of a :class:`LocationLabelDescriptor`. If provided, the instance", "self.writerClass.getSourceDescriptor() def newInstanceDescriptor(self): \"\"\"Ask the writer class to make us", "have masterGlyphName = glyphName d = dict(font=fontSourceName, location=designLocation, glyphName=masterGlyphName) if", "sourceDescriptor.\"\"\" return self.writerClass.getSourceDescriptor() def newInstanceDescriptor(self): \"\"\"Ask the writer class to", "True if all the conditions matches the given location. -", "encoding == \"utf-8\": f = BytesIO() encoding = \"UTF-8\" xml_declaration", "dimensionValue in validatedLocation.items(): dimElement = ET.Element('dimension') dimElement.attrib['name'] = dimensionName if", "familyname is not None: instanceObject.familyName = familyname stylename = instanceElement.attrib.get('stylename')", "= masterPath1 s1.name = \"master.ufo1\" s1.font = defcon.Font(\"master.ufo1\") s1.location =", "raise DesignSpaceDocumentError(\"variable-font element must contain an axis-subsets element.\") axisSubsets =", "if key == XML_LANG: styleMapFamilyName = styleMapFamilyNameElement.text instanceObject.setStyleMapFamilyName(styleMapFamilyName, lang) designLocation,", "SourceDescriptor at the default location or None. The default location", "attr. The file may or may not exist. MutatorMath. \"\"\"", "default, rules are applied first, before other text shaping/OpenType layout,", "in user space. MutatorMath + Varlib. \"\"\" def serialize(self): #", "linkedUserValue =========== ========= =========== =========== =============== 1 ✅ ❌ ❌", "== XML_LANG: axisObject.labelNames[lang] = tostr(labelNameElement.text) labelElement = axisElement.find(\".labels\") if labelElement", "documentObject: DesignSpaceDocument): self.path = documentPath self.documentObject = documentObject self.effectiveFormatTuple =", "it should simply return True/False def compare(self, other): # test", "instance.\"\"\" @property def location(self): \"\"\"dict. Axis values for this instance.", "None def normalizeLocation(self, location): \"\"\"Return a dict with normalized axis", "\"\"\"Container for discrete axis data. Use this for axes that", "data. Analogue of OpenType's STAT data for a free-floating location", "= self.normalizeLocation({axis.name: outputValue}).get(axis.name) newMap.append((inputValue, newOutputValue)) if newMap: axis.map = newMap", "or \"\" for label_name in element.findall(\"labelname\") for attr, lang in", "= self.variableFontsDescriptorClass( name=name, filename=filename, axisSubsets=axisSubsets, lib=lib, ) self.documentObject.variableFonts.append(variableFont) def readAxisSubset(self,", "tostr(styleMapFamilyName) def getStyleMapFamilyName(self, languageCode=\"en\"): return self.localisedStyleMapFamilyName.get(languageCode) def clearLocation(self, axisName: Optional[str]", "None: # font already loaded fonts.append(source.font) continue if source.path in", "``kwargs`` provide a ``value``, or a :class:`AxisDescriptor` otherwise. \"\"\" if", "outline data. Default ``None`` which means ``foreground``. \"\"\" self.familyName =", "\"\"\" if self.locationLabel is None: return None label = doc.getLocationLabel(self.locationLabel)", "dict(fr=\"Caractère\") s1.mutedGlyphNames.append(\"A\") s1.mutedGlyphNames.append(\"Z\") doc.addSource(s1) \"\"\" flavor = \"source\" _attrs =", "location label `{self.locationLabel}` in instance `{self.name}`.' ) return label def", "float(axisElement.attrib.get(\"default\")) axisObject.name = axisElement.attrib.get(\"name\") if axisElement.attrib.get('hidden', False): axisObject.hidden = True", "DesignSpaceDocument(LogMixin, AsDictMixin): \"\"\"The DesignSpaceDocument object can read and write ``.designspace``", "= DesignSpaceDocument.fromfile(\"path/to/my.designspace\") designspace.loadSourceFonts(defcon.Font) Or to load masters as FontTools binary", "writer will not use this attribute. It is up to", "\"\"\"bool. Whether this axis should be hidden in user interfaces.", "it doesn't provide the reference glyph set for the designspace,", "{} \"\"\"Custom data associated with this variable font.\"\"\" class RangeAxisSubsetDescriptor(SimpleDescriptor):", "in the target variable font. If not specified, assume the", "sourceObject.layerName = layerName for libElement in sourceElement.findall('.lib'): if libElement.attrib.get('copy') ==", "filename # path as it is stored in the document", "document is read from the disk, this is the full", "output to input. Returns value unchanged if no mapping entry", "sourceObject.muteInfo: infoElement = ET.Element('info') if sourceObject.copyInfo: infoElement.attrib['copy'] = \"1\" if", "= noteElement.text break designLocation, userLocation = self.locationFromElement(glyphElement) if userLocation: raise", "if libElement.attrib.get('copy') == '1': sourceObject.copyLib = True for groupsElement in", "doc.addAxis(a1) \"\"\" _attrs = ['tag', 'name', 'maximum', 'minimum', 'default', 'map',", "None: labelElement = ET.Element('label') labelElement.attrib['uservalue'] = self.intOrFloat(label.userValue) if label.userMinimum is", "for source in self.sources: if source.font is not None: #", "document by creating such descriptor objects, filling them with data", "location to a design location. Assume that missing coordinates are", "BaseDocWriter(object): _whiteSpace = \" \" axisDescriptorClass = AxisDescriptor discreteAxisDescriptorClass =", "the rules for rule in self.rules: newConditionSets = [] for", "for familyNameElement in sourceElement.findall('familyname'): for key, lang in familyNameElement.items(): if", "= name \"\"\"Name of the :class:`AxisDescriptor` to subset.\"\"\" self.userMinimum: float", "newNames newNames = [] return glyphNames AnisotropicLocationDict = Dict[str, Union[float,", "_whiteSpace = \" \" axisDescriptorClass = AxisDescriptor discreteAxisDescriptorClass = DiscreteAxisDescriptor", "\"last\"} else: attributes = {} self.root.append(ET.Element(\"rules\", attributes)) for ruleObject in", "order matters \"\"\" newNames = [] for rule in rules:", "{k: v for k, v in self.map}) def map_backward(self, v):", "\"\"\"Add the given ``axisDescriptor`` to :attr:`axes`.\"\"\" self.axes.append(axisDescriptor) def addAxisDescriptor(self, **kwargs):", "have the same location as the LocationLabel. .. seealso:: :meth:`getFullDesignLocation`", "at a given location. .. versionadded:: 5.0 \"\"\" flavor =", "validatedLocation: # only accept values we know validatedLocation[axisName] = axisValue", "attributes = {\"processing\": \"last\"} else: attributes = {} self.root.append(ET.Element(\"rules\", attributes))", "None res = copy.deepcopy(self) for source, font in zip(res.sources, fonts):", "if labelElement.get(\"oldersibling\") == \"true\" else False labelNames = { lang:", "b)) rules.append(ruleObject) self.documentObject.rules = rules def _readConditionElements(self, parentElement, ruleName=None): cds", "<= value <= cd['maximum']: return False return True def processRules(rules,", "top-level location label with the given ``name``, or ``None`` if", "= set(element.attrib) - xml_attrs if unknown_attrs: raise DesignSpaceDocumentError(f\"label element contains", "\"\"\"bool. Indicates if the kerning data from this source needs", "[] \"\"\"list. Glyphnames that need to be muted in the", "locationLabel is not None: raise DesignSpaceDocumentError('instance element must have at", "\"\"\" return next((v for k, v in self.map if k", "before or after other glyph substitution features. - False: before", "axis has a ``minimum`` and ``maximum``, while a discrete axis", "'userLocation', 'familyName', 'styleName', 'postScriptFontName', 'styleMapFamilyName', 'styleMapStyleName', 'localisedFamilyName', 'localisedStyleName', 'localisedStyleMapFamilyName', 'localisedStyleMapStyleName',", "sourceObject.copyLib: libElement = ET.Element('lib') libElement.attrib['copy'] = \"1\" sourceElement.append(libElement) if sourceObject.copyGroups:", "kerning \"\"\" bool. Indicates if this instance needs its kerning", "{} self._strictAxisNames = True @classmethod def fromstring(cls, string, documentObject): f", "__init__( self, *, tag=None, name=None, labelNames=None, hidden=False, map=None, axisOrdering=None, axisLabels=None,", "using forward slashes.\"\"\" def getter(self): # Normal getter return getattr(self,", "of truth for this instance's location is determined for each", "'italic': ('ital', dict(en = 'Italic')), } if name.lower() in names:", "msg self.obj = obj def __str__(self): return str(self.msg) + (", "to be mapped. \"\"\" if isinstance(value, tuple): value = value[0]", "updates the document's :attr:`default` value. .. versionchanged:: 5.0 Allow the", "\"\"\" for cd in conditions: value = location[cd['name']] if cd.get('minimum')", "axis independently by taking the first not-None field in this", "for locationElement in element.findall('.location'): elementLocation = self.readLocationElement(locationElement) break return elementLocation", "instance's location is determined for each axis independently by taking", "continue conditionElement = ET.Element('condition') conditionElement.attrib['name'] = cond.get('name') if cond.get('minimum') is", "\"\"\" @property def defaultName(self) -> str: \"\"\"Return the English name", "= self._makeLocationElement(m.get('location')) masterElement.append(locationElement) mastersElement.append(masterElement) glyphElement.append(mastersElement) return glyphElement class BaseDocReader(LogMixin): axisDescriptorClass", "if self.path is not None: basename = os.path.splitext(os.path.basename(self.path))[0] + \"-VF\"", "of axis names, in the same order as defined in", "the complete design location of this instance, by combining data", "STAT Flags <https://docs.microsoft.com/en-us/typography/opentype/spec/stat#flags>`_ \"\"\" self.labelNames: Dict[str, str] = labelNames or", "str = name \"\"\"Label for this named location, STAT field", "look for outline data. Default ``None`` which means ``foreground``. \"\"\"", "code localisedStyleMapStyleNameElement.text = instanceObject.getStyleMapStyleName(code) instanceElement.append(localisedStyleMapStyleNameElement) if instanceObject.localisedStyleMapFamilyName: languageCodes = list(instanceObject.localisedStyleMapFamilyName.keys())", "descriptors will not have a filename attr. useless, but no", "is not None: instanceElement.attrib['stylemapfamilyname'] = instanceObject.styleMapFamilyName if instanceObject.styleMapStyleName is not", "unknown_attrs: raise DesignSpaceDocumentError(f\"variable-font element contains unknown attributes: {', '.join(unknown_attrs)}\") name", "this attribute. It is up to the user of ``designspaceLib``", "userMaximum=float(userMaximum), ) if all(v is None for v in (userMinimum,", "not None else (location or {}) \"\"\"dict. Axis values for", "encoding=\"UTF-8\", xml_declaration=True): self.root.attrib['format'] = \".\".join(str(i) for i in self.effectiveFormatTuple) if", "parentElement.append(languageElement) def _addLocationLabel(self, parentElement: ET.Element, label: LocationLabelDescriptor) -> None: labelElement", "axisObject.tag = axisElement.attrib.get(\"tag\") for mapElement in axisElement.findall('map'): a = float(mapElement.attrib['input'])", "= sourceObject.familyName if sourceObject.styleName is not None: sourceElement.attrib['stylename'] = sourceObject.styleName", "= self._readConditionElements( conditionSetElement, ruleName, ) if conditionSet is not None:", "for infoElement in sourceElement.findall(\".info\"): if infoElement.attrib.get('copy') == '1': sourceObject.copyInfo =", "tuple): v = v[0] if not self.map: return v return", "the path. So we know where the file is relative", "os.path.splitext(self.filename)[0] + \"-VF\" if self.path is not None: basename =", "related to the instance .. code:: python i2 = InstanceDescriptor()", "if instanceObject.filename is not None: instanceElement.attrib['filename'] = instanceObject.filename if instanceObject.postScriptFontName", "else: userLoc[dimName] = userValue return designLoc, userLoc def readInstances(self, makeGlyphs=True,", "sourceObject.filename = filename # path as it is stored in", "= newConditionSets def loadSourceFonts(self, opener, **kwargs): \"\"\"Ensure SourceDescriptor.font attributes are", "= indent('\\n'.join(attrs), ' ') return f\"{self.__class__.__name__}(\\n{attrs}\\n)\" class SourceDescriptor(SimpleDescriptor): \"\"\"Simple container", "= True for featuresElement in sourceElement.findall(\".features\"): if featuresElement.attrib.get('copy') == '1':", "fields of ``self`` with the data. \"\"\" if hasattr(path, \"__fspath__\"):", "it against the SourceDescriptor locations (always in design space). defaultDesignLocation", "\"\"\"string. StyleMap stylename for this instance. MutatorMath + Varlib. \"\"\"", "translations of this location's label. Keyed by xml:lang code. \"\"\"", "The :attr:`font` attribute is shared by reference between the original", "self.documentObject.variableFonts.append(variableFont) def readAxisSubset(self, element: ET.Element): if \"uservalue\" in element.attrib: xml_attrs", "xml_declaration = True else: raise ValueError(\"unsupported encoding: '%s'\" % encoding)", "List[Union[RangeAxisSubsetDescriptor, ValueAxisSubsetDescriptor]] = [] for axis in self.axes: if isinstance(axis,", "element.get(\"uservalue\") if valueStr is None: raise DesignSpaceDocumentError(\"label element must have", ":class:`InstanceDescriptor` using the given ``kwargs`` and add it to :attr:`instances`.", "MutatorMath. .. deprecated:: 5.0 Use rules or sparse sources instead.", "\"first\") if processingValue not in {\"first\", \"last\"}: raise DesignSpaceDocumentError( \"<rules>", "path to the instance instanceObject.filename = filename # path as", "math.inf: subsetElement.attrib['usermaximum'] = self.intOrFloat(subset.userMaximum) if subset.userDefault is not None: subsetElement.attrib['userdefault']", "for glyphs. If glyphs need special masters (to record the", "styleMapFamilyNameElement.text instanceObject.setStyleMapFamilyName(styleMapFamilyName, lang) designLocation, userLocation = self.locationFromElement(instanceElement) locationLabel = instanceElement.attrib.get('location')", "objects, as long as they have the same attributes. Reader", "conditionSets=None, subs=None): self.name = name \"\"\"string. Unique name for this", "the given ``kwargs`` and add it to :attr:`locationLabels`. .. versionadded::", "it needs to be referenced from elsewhere in the document.", "it can't become a variation axis in a VF. ..", "axisSubset in axisSubsetsElement.iterfind(\".axis-subset\"): axisSubsets.append(self.readAxisSubset(axisSubset)) lib = None libElement = variableFontElement.find(\".lib\")", "labelElement.attrib['uservalue'] = self.intOrFloat(label.userValue) if label.userMinimum is not None: labelElement.attrib['userminimum'] =", "if instanceObject.location is not None: locationElement, instanceObject.location = self._makeLocationElement(instanceObject.location) instanceElement.append(locationElement)", "them with data and then adding them to the document.", "instanceDescriptor.\"\"\" return self.writerClass.getInstanceDescriptor() def getAxisOrder(self): \"\"\"Return a list of axis", "location=\"location-token-bbb\" source=\"master-token-aaa2\"/> <master glyphname=\"b.alt1\" location=\"location-token-ccc\" source=\"master-token-aaa3\"/> <note> This is an", "locationLabel = self.writerClass.locationLabelDescriptorClass(**kwargs) self.addLocationLabel(locationLabel) return locationLabel def newDefaultLocation(self): \"\"\"Return a", "be efficient to have it right here. Varlib. \"\"\" self.styleName", "value = designLocation[axis.name] if isinstance(value, tuple): dimElement.attrib['xvalue'] = self.intOrFloat(value[0]) dimElement.attrib['yvalue']", "or axis.axisLabels for axis in self.documentObject.axes ) or self.documentObject.locationLabels or", "minimum), (maximum, maximum)]. Varlib. \"\"\" self.axisOrdering = axisOrdering \"\"\"STAT table", "- xml_attrs if unknown_attrs: raise DesignSpaceDocumentError(f\"label element contains unknown attributes:", "want to use a different feature altogether, e.g. ``calt``, use", "= None if self.filename is not None: basename = os.path.splitext(self.filename)[0]", ":attr:`locationLabels`. .. versionadded:: 5.0 \"\"\" locationLabel = self.writerClass.locationLabelDescriptorClass(**kwargs) self.addLocationLabel(locationLabel) return", "from ``path`` and return a new instance of :class:. \"\"\"", "self.documentObject.instances: self.root.append(ET.Element(\"instances\")) for instanceObject in self.documentObject.instances: self._addInstance(instanceObject) if self.documentObject.lib: self._addLib(self.root,", "letters or digits. \"\"\" # name of the axis used", "# what do we need here def __init__(self, *, name=None,", "userMaximum = element.get(\"usermaximum\") if userMinimum is not None and userDefault", "outside a conditionset. \" \"Wrapped them in a new conditionset.\"", ") return loc def labelForUserLocation(self, userLocation: SimpleLocationDict) -> Optional[LocationLabelDescriptor]: \"\"\"Return", "axisOrdering=axisOrdering, axisLabels=axisLabels, ) self.minimum = minimum \"\"\"number. The minimum value", "\"\"\"a list of conditionsets. - Each conditionset is a list", "float(xValue) except ValueError: self.log.warning(\"ValueError in readLocation xValue %3.3f\", xValue) try:", "def __init__( self, *, filename=None, path=None, font=None, name=None, location=None, locationLabel=None,", "new conditionset.\" ) # read the conditionsets for conditionSetElement in", "axesElement.attrib['elidedfallbackname'] = self.documentObject.elidedFallbackName self.root.append(axesElement) for axisObject in self.documentObject.axes: self._addAxis(axisObject) if", "labelNameElement.items(): if key == XML_LANG: axisObject.labelNames[lang] = tostr(labelNameElement.text) labelElement =", "self._addLabelNames(axisElement, axisObject.labelNames) if axisObject.map: for inputValue, outputValue in axisObject.map: mapElement", "axis values.\"\"\" from fontTools.varLib.models import normalizeValue new = {} for", "names for UI purposes, if this is not a standard", "the same attributes. Reader and Writer objects can be subclassed", "\"\"\" return { axis.name: ( axis.map_backward(designLocation[axis.name]) if axis.name in designLocation", "# path as it is stored in the document name", "sourceObject.familyName = familyName styleName = sourceElement.attrib.get(\"stylename\") if styleName is not", "descriptor element: a set of glyph substitutions to trigger conditionally", "{} \"\"\"User-facing translations of this location's label. Keyed by ``xml:lang``", "above transformation loses absolute paths new_path = '/' + new_path", "the axis used in locations self.name = name \"\"\"string. Name", "if self.documentObject.rules: if getattr(self.documentObject, \"rulesProcessingLast\", False): attributes = {\"processing\": \"last\"}", "for labelObject in self.documentObject.locationLabels: self._addLocationLabel(labelsElement, labelObject) self.root.append(labelsElement) if self.documentObject.rules: if", "for axis in self.axes: if axis.name not in location: #", "coord only if isinstance(value, tuple): value = value[0] triple =", "in instanceElement.findall('stylemapstylename'): for key, lang in styleMapStyleNameElement.items(): if key ==", "minVersion = (5, 0) return minVersion def _makeLocationElement(self, locationObject, name=None):", "# dict while writing it out, as preserved below. locationElement,", "xml_attrs if unknown_attrs: raise DesignSpaceDocumentError(f\"variable-font element contains unknown attributes: {',", "makeInfo=True): filename = instanceElement.attrib.get('filename') if filename is not None and", ".. versionadded:: 5.0 \"\"\" flavor = \"axis-subset\" _attrs = ('name',", "name is not None: instanceObject.name = name familyname = instanceElement.attrib.get('familyname')", "False. For new projects, you probably want True. See the", "is, descriptors will not have a filename attr. useless, but", "from fontTools.varLib.models import piecewiseLinearMap if not self.map: return v return", ":meth:`findDefault` \"\"\" if readerClass is not None: self.readerClass = readerClass", "identify and respond to the following situations: In each descriptor,", "labelElement.attrib['name'] = label.name if label.elidable: labelElement.attrib['elidable'] = \"true\" if label.olderSibling:", "lang: label_name.text or \"\" for label_name in labelElement.findall(\"labelname\") for attr,", "the other for attr in self._attrs: try: assert(getattr(self, attr) ==", "self.designLocation: del self.designLocation[axisName] if self.userLocation is None: self.userLocation = {}", "should not be touched. case 3. descriptor.filename == None descriptor.path", "name of this instance. MutatorMath + Varlib. \"\"\" self.postScriptFontName =", "self._addLocationElement(labelElement, userLocation=label.userLocation) parentElement.append(labelElement) def _addLocationElement( self, parentElement, *, designLocation: AnisotropicLocationDict", "format 4 label. No anisotropy. - ``designLocation[axisName]``: the explicit design", "the variable font file, **as it is in the document**.", "at most one of the location=\"...\" attribute or the nested", "break return elementLocation def readLocationElement(self, locationElement): \"\"\"Read a ``<location>`` element.", "tostring(self, encoding=None): \"\"\"Returns the designspace as a string. Default encoding", "in the source to look for outline data. Default ``None``", "return None def map_forward(self, userLocation: SimpleLocationDict) -> SimpleLocationDict: \"\"\"Map a", "familyName, languageCode=\"en\"): self.localisedFamilyName[languageCode] = tostr(familyName) def getFamilyName(self, languageCode=\"en\"): return self.localisedFamilyName.get(languageCode)", "0): # Deprecated members as of version 5.0 if instanceObject.glyphs:", "\"\"\" return self.designLocation @location.setter def location(self, location: Optional[AnisotropicLocationDict]): self.designLocation =", "masterElement.append(locationElement) mastersElement.append(masterElement) glyphElement.append(mastersElement) return glyphElement class BaseDocReader(LogMixin): axisDescriptorClass = AxisDescriptor", "self.labelNames.get(\"en\") or self.name class LocationLabelDescriptor(SimpleDescriptor): \"\"\"Container for location label data.", "List[AxisLabelDescriptor] = axisLabels or [] \"\"\"STAT table entries for Axis", "= documentPath self.documentObject = documentObject tree = ET.parse(self.path) self.root =", "== \"~/absolute/path/there\" -- action: there is a conflict between the", "0) numbers = (int(i) for i in self.formatVersion.split(\".\")) major =", "from the path. **kwargs: extra options passed on to the", "indent from typing import Any, Dict, List, MutableMapping, Optional, Tuple,", "('ital', dict(en = 'Italic')), } if name.lower() in names: return", "names sourceElement.attrib['name'] = sourceObject.name if sourceObject.familyName is not None: sourceElement.attrib['familyname']", "kerningElement.attrib[\"mute\"] = '1' sourceElement.append(kerningElement) if sourceObject.mutedGlyphNames: for name in sourceObject.mutedGlyphNames:", "that matches the given ``userLocation``, or ``None`` if no such", "space. MutatorMath + Varlib. \"\"\" self.default = default \"\"\"number. The", "It is up to the user of ``designspaceLib`` to either", "labelNames=self.labelNames, maximum=self.maximum, minimum=self.minimum, default=self.default, hidden=self.hidden, map=self.map, axisOrdering=self.axisOrdering, axisLabels=self.axisLabels, ) def", "for data related to the source .. code:: python doc", "axisElement.attrib: axisObject = self.discreteAxisDescriptorClass() axisObject.values = [float(s) for s in", "= self._readConditionElements( ruleElement, ruleName, ) if externalConditions: ruleObject.conditionSets.append(externalConditions) self.log.info( \"Found", "\"\"\"Return True if any of the rule's conditionsets matches the", "\"\"\" self.info = info \"\"\"bool. Indicated if this instance needs", "the first not-None field in this list: - ``locationLabel``: the", "\"\"\"Custom data associated with this variable font.\"\"\" class RangeAxisSubsetDescriptor(SimpleDescriptor): \"\"\"Subset", "cls.ruleDescriptorClass() def __init__(self, documentPath, documentObject: DesignSpaceDocument): self.path = documentPath self.documentObject", "self.intOrFloat(outputValue) axisElement.append(mapElement) if axisObject.axisOrdering or axisObject.axisLabels: labelsElement = ET.Element('labels') if", "is not None else None linkedValueStr = element.get(\"linkeduservalue\") linkedValue =", "outside a condition set externalConditions = self._readConditionElements( ruleElement, ruleName, )", "'localisedStyleName', 'localisedStyleMapFamilyName', 'localisedStyleMapStyleName', 'glyphs', 'kerning', 'info', 'lib'] filename = posixpath_property(\"_filename\")", "= self._posixRelativePath(descriptor.path) def addSource(self, sourceDescriptor: SourceDescriptor): \"\"\"Add the given ``sourceDescriptor``", "in design space). defaultDesignLocation = self.newDefaultLocation() for sourceDescriptor in self.sources:", "location to a user location. Assume that missing coordinates are", ":attr:`localisedFamilyName` .. versionadded:: 5.0 \"\"\" return self.localisedFamilyName.get(languageCode) def getFullDesignLocation(self, doc:", "= font \"\"\"Same as :attr:`SourceDescriptor.font` .. seealso:: :attr:`SourceDescriptor.font` \"\"\" self.name", "def setStyleMapFamilyName(self, styleMapFamilyName, languageCode=\"en\"): self.localisedStyleMapFamilyName[languageCode] = tostr(styleMapFamilyName) def getStyleMapFamilyName(self, languageCode=\"en\"):", "= BaseDocWriter @classmethod def fromfile(cls, path, readerClass=None, writerClass=None): \"\"\"Read a", "key, lang in familyNameElement.items(): if key == XML_LANG: familyName =", "not in {\"first\", \"last\"}: raise DesignSpaceDocumentError( \"<rules> processing attribute value", "digits. \"\"\" # name of the axis used in locations", "can write here an indication of a possible \"good\" filename,", "= {} self.root.append(ET.Element(\"rules\", attributes)) for ruleObject in self.documentObject.rules: self._addRule(ruleObject) if", "ET.Element('stylename') localisedStyleNameElement.attrib[XML_LANG] = code localisedStyleNameElement.text = instanceObject.getStyleName(code) instanceElement.append(localisedStyleNameElement) if instanceObject.localisedFamilyName:", ".. versionadded:: 5.0 \"\"\" return next( (label for label in", "axisObject.map.append((a, b)) for labelNameElement in axisElement.findall('labelname'): # Note: elementtree reads", "from the various location fields, default axis values and mappings,", "self.axes: if axis.name not in location: # skipping this dimension", "only part of the full design location. See :meth:`getFullDesignLocation()` ..", "name, userLocation, elidable=False, olderSibling=False, labelNames=None, ): self.name: str = name", "optional. Relative path to the variable font file, **as it", "transformation loses absolute paths new_path = '/' + new_path elif", "to be muted in the instances. MutatorMath only. \"\"\" @property", "`XML <document-xml-structure>`_ attributes are usually all lowercase. .. code:: python", "seealso:: :meth:`getFullDesignLocation` :meth:`getFullUserLocation` .. versionadded:: 5.0 \"\"\" self.designLocation: AnisotropicLocationDict =", "return posix(relative) def updatePaths(self): \"\"\" Right before we save we", "== name: return label return None def map_forward(self, userLocation: SimpleLocationDict)", "= font \"\"\"Any Python object. Optional. Points to a representation", "design space). defaultDesignLocation = self.newDefaultLocation() for sourceDescriptor in self.sources: if", "to identify it if it needs to be referenced from", "< (5, 0): return xml_attrs = {'name', 'elidable', 'oldersibling'} for", "a given location. .. versionadded:: 5.0 \"\"\" flavor = \"variable-font\"", "as fonts at each value can have different glyph sets.", "plistlib.fromtree(libElement[0]) variableFont = self.variableFontsDescriptorClass( name=name, filename=filename, axisSubsets=axisSubsets, lib=lib, ) self.documentObject.variableFonts.append(variableFont)", "minimumStr = element.get(\"userminimum\") minimum = float(minimumStr) if minimumStr is not", "label data. Analogue of OpenType's STAT data for a free-floating", "reference this rule data.\"\"\" # list of lists of dict(name='aaaa',", "\"\"\"A dictionary of localised style map stylename strings, keyed by", "labelForUserLocation(self, userLocation: SimpleLocationDict) -> Optional[LocationLabelDescriptor]: \"\"\"Return the :class:`LocationLabel` that matches", "isinstance(subset, ValueAxisSubsetDescriptor): subsetElement.attrib['uservalue'] = self.intOrFloat(subset.userValue) subsetsElement.append(subsetElement) vfElement.append(subsetsElement) self._addLib(vfElement, vf.lib, 4)", "self.readerClass(path, self) reader.read() if self.sources: self.findDefault() def write(self, path): \"\"\"Write", "value on the discrete axes. Before version 5, you would", "= self.writerClass.ruleDescriptorClass(**kwargs) self.addRule(rule) return rule def addVariableFont(self, variableFontDescriptor: VariableFontDescriptor): \"\"\"Add", "instances's :attr:`locationLabel`. Raises if the named label can't be found.", "by combining data from the various location fields, default axis", "file from ``path`` and populates the fields of ``self`` with", "'labelNames') def __init__( self, *, name, userLocation, elidable=False, olderSibling=False, labelNames=None,", "the SourceDescriptor.path, and sets the SourceDescriptor.font attribute. If the font", "familyName is not None: sourceObject.familyName = familyName styleName = sourceElement.attrib.get(\"stylename\")", "'anisotropic' location, take first coord only if isinstance(value, tuple): value", "= sub[0] subElement.attrib['with'] = sub[1] ruleElement.append(subElement) if len(ruleElement): self.root.findall('.rules')[0].append(ruleElement) def", "allow these to be None, assume axis.minimum cd['minimum'] = None", "name as # '{http://www.w3.org/XML/1998/namespace}lang' } return self.axisLabelDescriptorClass( name=name, userValue=value, userMinimum=minimum,", "self.intOrFloat(value) locElement.append(dimElement) if len(locElement) > 0: parentElement.append(locElement) def _addInstance(self, instanceObject):", "True if labelElement.get(\"elidable\") == \"true\" else False olderSibling = True", "= \"axis\" def __init__( self, *, tag=None, name=None, labelNames=None, hidden=False,", "file may or may not exist. If not specified, the", "self.root.append(ET.Element(\"rules\", attributes)) for ruleObject in self.documentObject.rules: self._addRule(ruleObject) if self.documentObject.sources: self.root.append(ET.Element(\"sources\"))", "conditionSetElement in ruleElement.findall('.conditionset'): conditionSet = self._readConditionElements( conditionSetElement, ruleName, ) if", "next((k for k, v in self.map if v == value),", "= ET.Element('rule') if ruleObject.name is not None: ruleElement.attrib['name'] = ruleObject.name", "to update the location of this instance wholesale, a user", "and not self.documentObject.axes: raise DesignSpaceDocumentError(\"No axes defined\") userLoc = {}", "have a name attribute.\") designLocation, userLocation = self.locationFromElement(labelElement) if designLocation:", "axis, possibly anisotropic. - ``userLocation[axisName]``: the explicit user location along", "STAT Flags <https://docs.microsoft.com/en-us/typography/opentype/spec/stat#flags>`_ \"\"\" self.olderSibling: bool = olderSibling \"\"\"STAT flag", "labelsElement.attrib['ordering'] = str(axisObject.axisOrdering) for label in axisObject.axisLabels: self._addAxisLabel(labelsElement, label) axisElement.append(labelsElement)", "tostr(familyName) def getFamilyName(self, languageCode=\"en\"): \"\"\"Getter for :attr:`localisedFamilyName` .. versionadded:: 5.0", "path.startswith(r'\\\\'): # The above transformation loses leading slashes of UNC", "'location', 'copyLib', 'copyGroups', 'copyFeatures', 'muteKerning', 'muteInfo', 'mutedGlyphNames', 'familyName', 'styleName', 'localisedFamilyName']", "copyFeatures=False, muteKerning=False, muteInfo=False, mutedGlyphNames=None, ): self.filename = filename \"\"\"string. A", "= ET.Element('dimension') dimElement.attrib['name'] = axis.name value = userLocation[axis.name] dimElement.attrib['uservalue'] =", ":attr:`instances`. \"\"\" instance = self.writerClass.instanceDescriptorClass(**kwargs) self.addInstance(instance) return instance def addAxis(self,", "num): if int(num) == num: return \"%d\" % num return", "have design locations (using xvalue=\"\").') sourceObject.location = designLocation layerName =", "valueCombinations: basename = None if self.filename is not None: basename", "tostr(styleMapStyleName) def getStyleMapStyleName(self, languageCode=\"en\"): return self.localisedStyleMapStyleName.get(languageCode) def setStyleMapFamilyName(self, styleMapFamilyName, languageCode=\"en\"):", "axis in self.axes } def findDefault(self): \"\"\"Set and return SourceDescriptor", "\"\"\"string. A relative path to the source file, **as it", "None or self.userMaximum is not None: return 2 return 1", "userDefault=float(userDefault), userMaximum=float(userMaximum), ) if all(v is None for v in", ".. versionadded:: 5.0 \"\"\" fonts = [source.font for source in", "ET.Element(\"designspace\") def write(self, pretty=True, encoding=\"UTF-8\", xml_declaration=True): self.root.attrib['format'] = \".\".join(str(i) for", "= subElement.attrib['name'] b = subElement.attrib['with'] ruleObject.subs.append((a, b)) rules.append(ruleObject) self.documentObject.rules =", "if glyphName is None: continue if glyphElement.attrib.get('mute') == '1': sourceObject.mutedGlyphNames.append(glyphName)", "i2.styleName = \"InstanceStyleName\" i2.name = \"instance.ufo2\" # anisotropic location i2.designLocation", "= instanceElement.attrib.get('location') if (designLocation or userLocation) and locationLabel is not", "result[axis.name] = axis.map_forward(axis.default) return result def getFullUserLocation(self, doc: 'DesignSpaceDocument') ->", ":meth:`getFullDesignLocation` .. versionadded:: 5.0 \"\"\" return doc.map_backward(self.getFullDesignLocation(doc)) def tagForAxisName(name): #", "Postscript fontname for this instance. MutatorMath + Varlib. \"\"\" self.styleMapFamilyName", "OpenType feature <https://docs.microsoft.com/en-us/typography/opentype/spec/features_pt#-tag-rvrn>`_. See ref:`rules-element` § Attributes. \"\"\" def evaluateRule(rule,", "[] return glyphNames AnisotropicLocationDict = Dict[str, Union[float, Tuple[float, float]]] SimpleLocationDict", "label can't be found. .. versionadded:: 5.0 \"\"\" if self.locationLabel", "filename) else: instancePath = None instanceObject = self.instanceDescriptorClass() instanceObject.path =", "if the interpolating font.info data for this source needs to", "dict(en=name) class AbstractAxisDescriptor(SimpleDescriptor): flavor = \"axis\" def __init__( self, *,", "\"Italic\" a2.tag = \"ITAL\" a2.labelNames['fr'] = \"Italique\" a2.map = [(0,", "if instanceElement.findall('.glyphs') == []: glyphsElement = ET.Element('glyphs') instanceElement.append(glyphsElement) glyphsElement =", "v return piecewiseLinearMap(v, {k: v for k, v in self.map})", "self.documentObject.lib, 2) tree = ET.ElementTree(self.root) tree.write( self.path, encoding=encoding, method='xml', xml_declaration=xml_declaration,", "temporary source names sourceElement.attrib['name'] = sourceObject.name if sourceObject.familyName is not", "a new instance of :class:. \"\"\" self = cls(readerClass=readerClass, writerClass=writerClass)", "-- action: write as is. The filename attr should not", "file may or may not exist. MutatorMath. \"\"\" self.font =", "is assumed to be at its default location. .. seealso::", "to the document. This makes it easy to integrate this", "indent_level: int) -> None: if not data: return libElement =", "(same as a :class:`ValueAxisSubsetDescriptor`). \"\"\" self.lib: MutableMapping[str, Any] = lib", "if all the conditions matches the given location. - If", "= ET.Element('lib') libElement.attrib['copy'] = \"1\" sourceElement.append(libElement) if sourceObject.copyGroups: groupsElement =", "is not None: basename = os.path.splitext(os.path.basename(self.path))[0] + \"-VF\" if basename", "List of font objects in the order they appear in", "in doc.axes } class VariableFontDescriptor(SimpleDescriptor): \"\"\"Container for variable fonts, sub-spaces", "!= -math.inf: subsetElement.attrib['userminimum'] = self.intOrFloat(subset.userMinimum) if subset.userMaximum != math.inf: subsetElement.attrib['usermaximum']", "not None for descriptor in self.sources + self.instances: if descriptor.path", "in self.documentObject.sources: self._addSource(sourceObject) if self.documentObject.variableFonts: variableFontsElement = ET.Element(\"variable-fonts\") for variableFont", "glyphMaster in glyphData['masters']: glyphMaster['location'] = self.normalizeLocation(glyphMaster['location']) item.location = self.normalizeLocation(item.location) #", "provided for location dimension \"{dimName}\"') if yValue is not None:", "set of glyph substitutions to trigger conditionally in some parts", "data: Any, indent_level: int) -> None: if not data: return", "= float(linkedValueStr) if linkedValueStr is not None else None elidable", "not None: instanceObject.styleMapFamilyName = styleMapFamilyName styleMapStyleName = instanceElement.attrib.get('stylemapstylename') if styleMapStyleName", "= \"InstanceStyleMapFamilyName\" i2.styleMapStyleName = \"InstanceStyleMapStyleName\" i2.lib['com.coolDesignspaceApp.specimenText'] = 'Hamburgerwhatever' doc.addInstance(i2) \"\"\"", "the document sourceObject.name = sourceName familyName = sourceElement.attrib.get(\"familyname\") if familyName", "labelNames or {} \"\"\"dict. When defining a non-registered axis, it", "the path. **kwargs: extra options passed on to the opener", "None: self.userLocation = {} if axisName in self.userLocation: del self.userLocation[axisName]", "<https://docs.microsoft.com/en-us/typography/opentype/spec/stat#flags>`_ \"\"\" self.olderSibling: bool = olderSibling \"\"\"STAT flag ``OLDER_SIBLING_FONT_ATTRIBUTE``. See:", "\"MasterStyleNameOne\" s1.localisedFamilyName = dict(fr=\"Caractère\") s1.mutedGlyphNames.append(\"A\") s1.mutedGlyphNames.append(\"Z\") doc.addSource(s1) \"\"\" flavor =", "axisValue for dimensionName, dimensionValue in validatedLocation.items(): dimElement = ET.Element('dimension') dimElement.attrib['name']", "axis. When the input has anisotropic locations, only the xvalue", "None: raise DesignSpaceDocumentError( 'InstanceDescriptor.getLocationLabelDescriptor(): ' f'unknown location label `{self.locationLabel}` in", "= self.root.findall(\".axes/axis\") if not axisElements: return for axisElement in axisElements:", "Varlib. \"\"\" self.default = default \"\"\"number. The default value for", "a different feature altogether, e.g. ``calt``, use the lib key", "= axisElement.find(\".labels\") if labelElement is not None: if \"ordering\" in", "Union from fontTools.misc import etree as ET from fontTools.misc import", "contains unknown attributes: {', '.join(unknown_attrs)}\") name = element.get(\"name\") if name", "values %s are not integers\" % unicodes) for noteElement in", "maximum. - If a condition has no maximum, check for", "= ruleObject.name for conditions in ruleObject.conditionSets: conditionsetElement = ET.Element('conditionset') for", "is not None: processingValue = rulesElement.attrib.get(\"processing\", \"first\") if processingValue not", "if instanceObject.name is not None: instanceElement.attrib['name'] = instanceObject.name if instanceObject.locationLabel", "names for the axis. Keyed by xml:lang code. Values are", "= self.intOrFloat(value[1]) else: dimElement.attrib['xvalue'] = self.intOrFloat(value) locElement.append(dimElement) elif userLocation is", "\"\"\"User defined, custom data associated with the whole document. Use", "slash to work also on Windows.\"\"\" new_path = posixpath.join(*path.split(os.path.sep)) if", "name, i.e. the last part of its path. When the", "not specified, assume the same minimum value as the full", "self, *, tag=None, name=None, labelNames=None, hidden=False, map=None, axisOrdering=None, axisLabels=None, ):", "self.documentObject.locationLabels: labelsElement = ET.Element(\"labels\") for labelObject in self.documentObject.locationLabels: self._addLocationLabel(labelsElement, labelObject)", "variableFontElement in self.root.findall(\".variable-fonts/variable-font\"): unknown_attrs = set(variableFontElement.attrib) - xml_attrs if unknown_attrs:", "= label.name if label.elidable: labelElement.attrib['elidable'] = \"true\" if label.olderSibling: labelElement.attrib['oldersibling']", "AxisDescriptor() a1.minimum = 1 a1.maximum = 1000 a1.default = 400", "if yValue is not None: yValue = float(yValue) except ValueError:", "\"\"\"Instantiate a new :class:`AxisDescriptor` using the given ``kwargs`` and add", "= \"Italique\" a2.map = [(0, 0), (1, -11)] a2.axisOrdering =", "is None: # if we don't read a glyphname, use", "and an optional list of keyword arguments, and returns a", "this list can be taken by the axis, nothing in-between.", "List[InstanceDescriptor] = [] \"\"\"List of this document's instances.\"\"\" self.lib: Dict", "def addSource(self, sourceDescriptor: SourceDescriptor): \"\"\"Add the given ``sourceDescriptor`` to ``doc.sources``.\"\"\"", "(\"a\", \"a.alt\") self.subs = subs or [] \"\"\"list of substitutions.", "= self.discreteAxisDescriptorClass() axisObject.values = [float(s) for s in axisElement.attrib[\"values\"].split(\" \")]", "] doc.addAxis(a1) \"\"\" _attrs = ['tag', 'name', 'maximum', 'minimum', 'default',", "variable font. .. versionadded:: 5.0 \"\"\" flavor = \"axis-subset\" _attrs", "standard axis, self.labelNames = labelNames or {} \"\"\"dict. When defining", "value): # The setter rewrites paths using forward slashes if", "useless, but no reason to interfere. case 2. descriptor.filename ==", "<https://docs.microsoft.com/en-us/typography/opentype/spec/features_pt#-tag-rvrn>`_. See ref:`rules-element` § Attributes. \"\"\" def evaluateRule(rule, location): \"\"\"Return", "tostr(labelNameElement.text) labelElement = axisElement.find(\".labels\") if labelElement is not None: if", "Style Attributes Header field ``elidedFallbackNameID``. See: `OTSpec STAT Style Attributes", "self = cls(f, documentObject) self.path = None return self def", "dict(font=fontSourceName, location=designLocation, glyphName=masterGlyphName) if glyphSources is None: glyphSources = []", "ET.ElementTree(self.root) tree.write( self.path, encoding=encoding, method='xml', xml_declaration=xml_declaration, pretty_print=pretty, ) def _getEffectiveFormatTuple(self):", "or self.name class LocationLabelDescriptor(SimpleDescriptor): \"\"\"Container for location label data. Analogue", "minimum value for this axis in user space. MutatorMath +", "of executed rules for example). MutatorMath. .. deprecated:: 5.0 Use", "for source, font in zip(res.sources, fonts): res.font = font return", "name strings, keyed by language code. \"\"\" self.localisedStyleName = localisedStyleName", "_addAxis(self, axisObject): axisElement = ET.Element('axis') axisElement.attrib['tag'] = axisObject.tag axisElement.attrib['name'] =", "rule order matters \"\"\" newNames = [] for rule in", "of the full location. See: :meth:`getFullDesignLocation` :meth:`getFullUserLocation` .. versionadded:: 5.0", "xml_attrs = {'name', 'elidable', 'oldersibling'} for labelElement in self.root.findall(\".labels/label\"): unknown_attrs", "hasattr(value, \"asdict\"): value = value.asdict() elif isinstance(value, list): value =", "libElement = ET.Element('lib') libElement.attrib['copy'] = \"1\" sourceElement.append(libElement) if sourceObject.copyGroups: groupsElement", "full axis. (default = ``-math.inf``) \"\"\" self.userDefault: Optional[float] = userDefault", "Keyed by xml:lang code. \"\"\" @property def defaultName(self) -> str:", "document's axes.\"\"\" self.locationLabels: List[LocationLabelDescriptor] = [] \"\"\"List of this document's", "\"\"\"Ask the writer class to make us a new sourceDescriptor.\"\"\"", "from the SourceDescriptor.path, and sets the SourceDescriptor.font attribute. If the", "writing it out, as preserved below. if instanceObject.location is not", "localisedStyleNameElement.attrib[XML_LANG] = code localisedStyleNameElement.text = instanceObject.getStyleName(code) instanceElement.append(localisedStyleNameElement) if instanceObject.localisedFamilyName: languageCodes", "exists.\"\"\" for axisDescriptor in self.axes: if axisDescriptor.name == name: return", "=============== STAT Format userValue userMinimum userMaximum linkedUserValue =========== ========= ===========", "'1': sourceObject.mutedGlyphNames.append(glyphName) for kerningElement in sourceElement.findall(\".kerning\"): if kerningElement.attrib.get('mute') == '1':", "locationLabelDescriptorClass = LocationLabelDescriptor ruleDescriptorClass = RuleDescriptor sourceDescriptorClass = SourceDescriptor variableFontsDescriptorClass", "= element.get(\"userminimum\") userDefault = element.get(\"userdefault\") userMaximum = element.get(\"usermaximum\") if userMinimum", "None: basename = os.path.splitext(os.path.basename(self.path))[0] + \"-VF\" if basename is None:", "named label can't be found. .. versionadded:: 5.0 \"\"\" if", "self, *, name, userValue, userMinimum=None, userMaximum=None, elidable=False, olderSibling=False, linkedUserValue=None, labelNames=None,", "axisObject.axisLabels: labelsElement = ET.Element('labels') if axisObject.axisOrdering is not None: labelsElement.attrib['ordering']", "the location=\"...\" attribute or the nested location element') instanceObject.locationLabel =", "= conditionElement.attrib.get(\"minimum\") if cdMin is not None: cd['minimum'] = float(cdMin)", "tag \"\"\"string. Four letter tag for this axis. Some might", "= cls(f, documentObject) self.path = None return self def read(self):", "path. When the document is produced by a Python script", "def _getEffectiveFormatTuple(self): \"\"\"Try to use the version specified in the", "from typing import Any, Dict, List, MutableMapping, Optional, Tuple, Union", "self.userLocation: SimpleLocationDict = userLocation or {} \"\"\"dict. Axis values for", "Args: axisName: if provided, only clear the location for that", "name \"\"\"string, required. Name of this variable to identify it", "field are filled-in, see :meth:`getFormat` .. versionadded:: 5.0 \"\"\" flavor", "code:: python i2 = InstanceDescriptor() i2.path = instancePath2 i2.familyName =", "attribute.\") value = float(valueStr) minimumStr = element.get(\"userminimum\") minimum = float(minimumStr)", "Use-cases: - From a single DesignSpace with discrete axes, define", "dictionaries (possibly empty if clearing everything). In order to update", "# name of the axis used in locations self.name =", "filled-in, see :meth:`getFormat` .. versionadded:: 5.0 \"\"\" flavor = \"label\"", "if sourceObject.copyFeatures: featuresElement = ET.Element('features') featuresElement.attrib['copy'] = \"1\" sourceElement.append(featuresElement) if", "location i2.designLocation = dict(weight=500, width=(400,300)) i2.postScriptFontName = \"InstancePostscriptName\" i2.styleMapFamilyName =", "True if element.get(\"oldersibling\") == \"true\" else False labelNames = {", "has no 'path' attribute\" % (source.name or \"<Unknown>\") ) source.font", "def addVariableFont(self, variableFontDescriptor: VariableFontDescriptor): \"\"\"Add the given ``variableFontDescriptor`` to :attr:`variableFonts`.", "is not None: sourceElement.attrib['filename'] = sourceObject.filename if sourceObject.name is not", "= msg self.obj = obj def __str__(self): return str(self.msg) +", "is not None and userMaximum is not None: return self.rangeAxisSubsetDescriptorClass(", "def location(self, location: Optional[AnisotropicLocationDict]): self.designLocation = location or {} def", "sourceElement.findall('familyname'): for key, lang in familyNameElement.items(): if key == XML_LANG:", "descriptor in self.sources: if descriptor.filename is not None and not", "self.labelNames.get(\"en\") or self.name def getFullUserLocation(self, doc: 'DesignSpaceDocument') -> SimpleLocationDict: \"\"\"Get", "or [] \"\"\"List of possible values for this axis. Contrary", "\"instance.ufo2\" # anisotropic location i2.designLocation = dict(weight=500, width=(400,300)) i2.postScriptFontName =", "return 2 return 1 @property def defaultName(self) -> str: \"\"\"Return", "for u in unicodes.split(\" \")] glyphData['unicodes'] = unicodes except ValueError:", "path = posixpath_property(\"_path\") def __init__( self, *, filename=None, path=None, font=None,", "= \"true\" self._addLabelNames(labelElement, label.labelNames) self._addLocationElement(labelElement, userLocation=label.userLocation) parentElement.append(labelElement) def _addLocationElement( self,", "axes at a given location. .. versionadded:: 5.0 \"\"\" flavor", "for key, lang in styleMapStyleNameElement.items(): if key == XML_LANG: styleMapStyleName", "instanceElements: self._readSingleInstanceElement(instanceElement, makeGlyphs=makeGlyphs, makeKerning=makeKerning, makeInfo=makeInfo) def _readSingleInstanceElement(self, instanceElement, makeGlyphs=True, makeKerning=True,", "the LocationLabel. .. seealso:: :meth:`getFullDesignLocation` :meth:`getFullUserLocation` .. versionadded:: 5.0 \"\"\"", "imports the axes, sources, variable fonts and instances to very", "must have a name attribute.\") userMinimum = element.get(\"userminimum\") userDefault =", "if label.userMaximum is not None: labelElement.attrib['usermaximum'] = self.intOrFloat(label.userMaximum) labelElement.attrib['name'] =", "</conditionset> <sub name=\"cent\" with=\"cent.alt\"/> <sub name=\"dollar\" with=\"dollar.alt\"/> </rule> </rules> \"\"\"", "= {'name', 'userminimum', 'userdefault', 'usermaximum'} unknown_attrs = set(element.attrib) - xml_attrs", ") def updateFilenameFromPath(self, masters=True, instances=True, force=False): \"\"\"Set a descriptor filename", "{', '.join(unknown_attrs)}\") name = labelElement.get(\"name\") if name is None: raise", "sourceDescriptor return sourceDescriptor return None def normalizeLocation(self, location): \"\"\"Return a", "not None: descriptor.filename = self._posixRelativePath(descriptor.path) if instances: for descriptor in", "variableFontElement.get(\"filename\") axisSubsetsElement = variableFontElement.find(\".axis-subsets\") if axisSubsetsElement is None: raise DesignSpaceDocumentError(\"variable-font", "be part of the calculations). MutatorMath only. \"\"\" self.muteInfo =", "element attribute localisedStyleNameElement = ET.Element('stylename') localisedStyleNameElement.attrib[XML_LANG] = code localisedStyleNameElement.text =", "1, 3) or ``nominalValue`` (format 2).\"\"\" self.userMaximum: Optional[float] = userMaximum", "if name is not None: locElement.attrib['name'] = name validatedLocation =", "self.styleMapFamilyName = styleMapFamilyName \"\"\"string. StyleMap familyname for this instance. MutatorMath", "layerName for libElement in sourceElement.findall('.lib'): if libElement.attrib.get('copy') == '1': sourceObject.copyLib", "\"\"\"string. Four letter tag for this axis. Some might be", "discrete or continuous axis to use in a variable font.", "attribute. If the font attribute is already not None, it", "in sourceElement.findall('.lib'): if libElement.attrib.get('copy') == '1': sourceObject.copyLib = True for", "in self.documentObject.instances ) ): if minVersion < (5, 0): minVersion", "'linkedUserValue', 'labelNames') def __init__( self, *, name, userValue, userMinimum=None, userMaximum=None,", "= self._makeLocationElement(instanceObject.location) instanceElement.append(locationElement) if instanceObject.filename is not None: instanceElement.attrib['filename'] =", "\"\"\"number. The minimum value for this axis in user space.", "for a discrete subset must have a uservalue attribute.\" )", "(5, 0): # Deprecated members as of version 5.0 if", "glyphElement.append(noteElement) if data.get('masters') is not None: mastersElement = ET.Element(\"masters\") for", "= ruleObject.name = ruleElement.attrib.get(\"name\") # read any stray conditions outside", "5.0 \"\"\" if self.formatVersion is None: return (5, 0) numbers", "not self.map: return v return piecewiseLinearMap(v, {v: k for k,", "in the order they appear in the sources list. \"\"\"", "at which to freeze the given axis.\"\"\" class BaseDocWriter(object): _whiteSpace", "axes.\"\"\" self.locationLabels: List[LocationLabelDescriptor] = [] \"\"\"List of this document's STAT", "= os.path.join(os.path.dirname(self.documentObject.path), filename) else: instancePath = None instanceObject = self.instanceDescriptorClass()", "DesignSpaceDocumentError(\"variable-font element must contain an axis-subsets element.\") axisSubsets = []", "font that is loaded in memory, as a Python object", "implicitly describing a variable font that covers the whole space.", "conditionset is a list of conditions. - Each condition is", "of substitution rules --> <rules> <rule name=\"vertical.bars\"> <conditionset> <condition minimum=\"250.000000\"", "space is the same as design space, as in [(minimum,", "self.localisedStyleMapStyleName.get(languageCode) def setStyleMapFamilyName(self, styleMapFamilyName, languageCode=\"en\"): self.localisedStyleMapFamilyName[languageCode] = tostr(styleMapFamilyName) def getStyleMapFamilyName(self,", "ET from fontTools.misc import plistlib from fontTools.misc.loggingTools import LogMixin from", "= \"label\" _attrs = ('userMinimum', 'userValue', 'userMaximum', 'name', 'elidable', 'olderSibling',", "\"\"\"List of this document's sources.\"\"\" self.variableFonts: List[VariableFontDescriptor] = [] \"\"\"List", "in self.rules: newConditionSets = [] for conditions in rule.conditionSets: newConditions", "'{http://www.w3.org/XML/1998/namespace}lang' for key, lang in labelNameElement.items(): if key == XML_LANG:", "if provided, only clear the location for that axis. ..", "python doc = DesignSpaceDocument() s1 = SourceDescriptor() s1.path = masterPath1", "attr)) def __repr__(self): attrs = [f\"{a}={repr(getattr(self, a))},\" for a in", "@property def location(self): \"\"\"dict. Axis values for this source, in", "name.lower() in names: return names[name.lower()] if len(name) < 4: tag", "item in self.instances: # glyph masters for this instance for", "using forward slash to work also on Windows.\"\"\" new_path =", "calculated. MutatorMath. .. deprecated:: 5.0 \"\"\" self.info = info \"\"\"bool.", "\"true\" self._addLabelNames(labelElement, label.labelNames) self._addLocationElement(labelElement, userLocation=label.userLocation) parentElement.append(labelElement) def _addLocationElement( self, parentElement,", "{ axis.name: ( axis.map_backward(designLocation[axis.name]) if axis.name in designLocation else axis.default", "value), value) class AxisLabelDescriptor(SimpleDescriptor): \"\"\"Container for axis label data. Analogue", "try: assert(getattr(self, attr) == getattr(other, attr)) except AssertionError: print(\"failed attribute\",", "v in (axis.minimum, axis.default, axis.maximum) ] new[axis.name] = normalizeValue(value, triple)", "condition set. rules = [] rulesElement = self.root.find(\".rules\") if rulesElement", "``valueNameID``.\"\"\" self.elidable: bool = elidable \"\"\"STAT flag ``ELIDABLE_AXIS_VALUE_NAME``. See: `OTSpec", "m.get('font') is not None: masterElement.attrib['source'] = m.get('font') if m.get('location') is", "values minimum = self.normalizeLocation({axis.name: axis.minimum}).get(axis.name) maximum = self.normalizeLocation({axis.name: axis.maximum}).get(axis.name) default", "= \"unique.rule.name\" r1.conditionSets.append([dict(name=\"weight\", minimum=-10, maximum=10), dict(...)]) r1.conditionSets.append([dict(...), dict(...)]) r1.subs.append((\"a\", \"a.alt\"))", "Allow the default source to not specify some of the", "if axisObject.axisOrdering or axisObject.axisLabels: labelsElement = ET.Element('labels') if axisObject.axisOrdering is", "ValueAxisSubsetDescriptor(SimpleDescriptor): \"\"\"Single value of a discrete or continuous axis to", "ET.Element('stylemapstylename') localisedStyleMapStyleNameElement.attrib[XML_LANG] = code localisedStyleMapStyleNameElement.text = instanceObject.getStyleMapStyleName(code) instanceElement.append(localisedStyleMapStyleNameElement) if instanceObject.localisedStyleMapFamilyName:", "userMaximum=None, elidable=False, olderSibling=False, linkedUserValue=None, labelNames=None, ): self.userMinimum: Optional[float] = userMinimum", "= userMinimum \"\"\"STAT field ``rangeMinValue`` (format 2).\"\"\" self.userValue: float =", "'familyName', 'styleName', 'postScriptFontName', 'styleMapFamilyName', 'styleMapStyleName', 'localisedFamilyName', 'localisedStyleName', 'localisedStyleMapFamilyName', 'localisedStyleMapStyleName', 'glyphs',", "userLocation = self.locationFromElement(glyphElement) if userLocation: raise DesignSpaceDocumentError(f'<glyph> element \"{glyphName}\" must", "name = instanceElement.attrib.get(\"name\") if name is not None: instanceObject.name =", "def map_backward(self, v): \"\"\"Maps value from axis mapping's output (design)", "for this instance. MutatorMath + Varlib. .. deprecated:: 5.0 Use", "only the values in this list can be taken by", "values. See: `OTSpec STAT Axis value table, format 4 <https://docs.microsoft.com/en-us/typography/opentype/spec/stat#axis-value-table-format-4>`_", "axis to include in a variable font. .. versionadded:: 5.0", "specified, assume the same minimum value as the full axis.", "None else None elidable = True if element.get(\"elidable\") == \"true\"", "possibly anisotropic. - ``userLocation[axisName]``: the explicit user location along this", "if cdMax is not None: cd['maximum'] = float(cdMax) else: #", "xml_declaration=xml_declaration) return f.getvalue() def read(self, path): \"\"\"Read a designspace file", "for that axis. When the input has anisotropic locations, only", "= localisedStyleMapFamilyName or {} \"\"\"A dictionary of localised style map", "# add a temporary source name sourceName = \"temp_master.%d\" %", "the given ``sourceDescriptor`` to ``doc.sources``.\"\"\" self.sources.append(sourceDescriptor) def addSourceDescriptor(self, **kwargs): \"\"\"Instantiate", "glyphname=\"b.alt1\" location=\"location-token-ccc\" source=\"master-token-aaa3\"/> <note> This is an instance from an", "= masterElement.attrib.get('glyphname') if masterGlyphName is None: # if we don't", "location of this instance wholesale, a user should first clear", "Attributes Header field ``elidedFallbackNameID``. See: `OTSpec STAT Style Attributes Header", "# Normal getter return getattr(self, private_name) def setter(self, value): #", "a ``defcon.Font`` or a ``fontTools.ttFont.TTFont``). The default document reader will", "ET.Element, label: AxisLabelDescriptor) -> None: labelElement = ET.Element('label') labelElement.attrib['uservalue'] =", "\"\"\"The default value for this axis, i.e. when a new", "True if labelElement.get(\"oldersibling\") == \"true\" else False labelNames = {", "= element.get(\"usermaximum\") maximum = float(maximumStr) if maximumStr is not None", "resource identified by ``filename`` and store it in this field,", "self.map if v == value), value) class AxisLabelDescriptor(SimpleDescriptor): \"\"\"Container for", "the font, it can be efficient to have it right", "= StringIO() xml_declaration = False elif encoding is None or", "# anisotropic location i2.designLocation = dict(weight=500, width=(400,300)) i2.postScriptFontName = \"InstancePostscriptName\"", "sourceObject.muteInfo: infoElement.attrib['mute'] = \"1\" sourceElement.append(infoElement) if sourceObject.muteKerning: kerningElement = ET.Element(\"kerning\")", "this axis self.tag = tag \"\"\"string. Four letter tag for", "matches the given ``userLocation``, or ``None`` if no such label", "deltas would apply, as this axis does not interpolate. -", "self.intOrFloat(subset.userValue) subsetsElement.append(subsetElement) vfElement.append(subsetsElement) self._addLib(vfElement, vf.lib, 4) parentElement.append(vfElement) def _addLib(self, parentElement:", "glyphName if data.get('note') is not None: noteElement = ET.Element('note') noteElement.text", "for this property :attr:`designLocation`. \"\"\" return self.designLocation @location.setter def location(self,", "work with different objects, as long as they have the", "uservalue=\"\").') elidable = True if labelElement.get(\"elidable\") == \"true\" else False", "not None: cd['minimum'] = float(cdMin) else: # will allow these", "self.layerName = layerName \"\"\"string. The name of the layer in", "this axis name names = { 'weight': ('wght', dict(en =", "ValueAxisSubsetDescriptor rangeAxisSubsetDescriptorClass = RangeAxisSubsetDescriptor instanceDescriptorClass = InstanceDescriptor def __init__(self, documentPath,", "minimum=None, default=None, maximum=None, hidden=False, map=None, axisOrdering=None, axisLabels=None, ): super().__init__( tag=tag,", "== tuple: dimElement.attrib['xvalue'] = self.intOrFloat(dimensionValue[0]) dimElement.attrib['yvalue'] = self.intOrFloat(dimensionValue[1]) else: dimElement.attrib['xvalue']", "class DiscreteAxisDescriptor(AbstractAxisDescriptor): \"\"\"Container for discrete axis data. Use this for", "Name of the axis as it is used in the", "lib element for the whole document.\"\"\" for libElement in self.root.findall(\".lib\"):", "its original file name, i.e. the last part of its", "might be registered at the `OpenType specification <https://www.microsoft.com/typography/otspec/fvar.htm#VAT>`__. Privately-defined axis", "rules = [] rulesElement = self.root.find(\".rules\") if rulesElement is not", "dict(en = 'Weight')), 'width': ('wdth', dict(en = 'Width')), 'optical': ('opsz',", "location for this instance. .. seealso:: :meth:`getFullDesignLocation` .. versionadded:: 5.0", "clear all the fields, then change the field(s) for which", "isinstance(v, tuple): v = v[0] if not self.map: return v", "designspace.loadSourceFonts(defcon.Font) Or to load masters as FontTools binary fonts, including", "VariableFontDescriptor): \"\"\"Add the given ``variableFontDescriptor`` to :attr:`variableFonts`. .. versionadded:: 5.0", "name sourceName = \"temp_master.%d\" % (sourceCount) sourceObject = self.sourceDescriptorClass() sourceObject.path", "userValue %3.3f\", userValue) try: xValue = dimensionElement.attrib.get('xvalue') if xValue is", "location or {} def setFamilyName(self, familyName, languageCode=\"en\"): \"\"\"Setter for :attr:`localisedFamilyName`", "to these glyphnames. Return a new list of glyphNames with", "not None: instanceElement.attrib['postscriptfontname'] = instanceObject.postScriptFontName if instanceObject.styleMapFamilyName is not None:", "Optional[LocationLabelDescriptor]: \"\"\"Return the :class:`LocationLabel` that matches the given ``userLocation``, or", "sourceObject.copyFeatures = True for glyphElement in sourceElement.findall(\".glyph\"): glyphName = glyphElement.attrib.get('name')", "it seems continue value = location[axis.name] # 'anisotropic' location, take", "'Hamburgerwhatever' doc.addInstance(i2) \"\"\" flavor = \"instance\" _defaultLanguageCode = \"en\" _attrs", "else: dimElement.attrib['xvalue'] = self.intOrFloat(dimensionValue) locElement.append(dimElement) return locElement, validatedLocation def intOrFloat(self,", "\"unicode\" ): f = StringIO() xml_declaration = False elif encoding", "rule. Can be used to reference this rule data.\"\"\" #", "the axis with the given ``name``, or ``None`` if no", "attribute.\") valueStr = element.get(\"uservalue\") if valueStr is None: raise DesignSpaceDocumentError(\"label", "= \".\".join(str(i) for i in self.effectiveFormatTuple) if self.documentObject.axes or self.documentObject.elidedFallbackName", "ElementTree allows to find namespace-prefixed elements, but not attributes #", "f = StringIO() xml_declaration = False elif encoding is None", "self.documentObject.axes: if designLocation is not None and axis.name in designLocation:", "to include in this variable font. If an axis is", "as design space, as in [(minimum, minimum), (maximum, maximum)]. Varlib.", "glyphElement = ET.Element('glyph') if data.get('mute'): glyphElement.attrib['mute'] = \"1\" if data.get('unicodes')", "< maximum. - If a condition has no maximum, check", "_attrs = ('userMinimum', 'userValue', 'userMaximum', 'name', 'elidable', 'olderSibling', 'linkedUserValue', 'labelNames')", "MutatorMath + Varlib. \"\"\" self.postScriptFontName = postScriptFontName \"\"\"string. Postscript fontname", "self.userMinimum is not None or self.userMaximum is not None: return", "self.name: str = name \"\"\"Name of the :class:`AxisDescriptor` to subset.\"\"\"", "a new conditionset.\" ) # read the conditionsets for conditionSetElement", "usually camelCased, the corresponding `XML <document-xml-structure>`_ attributes are usually all", "you would have needed 1 DesignSpace per such variable font,", "Style Attributes Header <https://docs.microsoft.com/en-us/typography/opentype/spec/stat#style-attributes-header>`_ .. versionadded:: 5.0 \"\"\" self.axes: List[Union[AxisDescriptor,", "the same source.path only once loaded = {} fonts =", "instanceObject.localisedStyleName: languageCodes = list(instanceObject.localisedStyleName.keys()) languageCodes.sort() for code in languageCodes: if", "True for groupsElement in sourceElement.findall('.groups'): if groupsElement.attrib.get('copy') == '1': sourceObject.copyGroups", "next(numbers) minor = next(numbers, 0) return (major, minor) def getVariableFonts(self)", "Dict[str, Union[float, Tuple[float, float]]] SimpleLocationDict = Dict[str, float] class InstanceDescriptor(SimpleDescriptor):", "in self.root.findall(\".variable-fonts/variable-font\"): unknown_attrs = set(variableFontElement.attrib) - xml_attrs if unknown_attrs: raise", "\"UTF-8\" xml_declaration = True else: raise ValueError(\"unsupported encoding: '%s'\" %", "attribute.\") userMinimum = element.get(\"userminimum\") userDefault = element.get(\"userdefault\") userMaximum = element.get(\"usermaximum\")", "# Without OrderedDict, output XML would be non-deterministic. # https://github.com/LettError/designSpaceDocument/issues/10", "self.axes: if axisDescriptor.name == name: return axisDescriptor return None def", "if subset.userMinimum != -math.inf: subsetElement.attrib['userminimum'] = self.intOrFloat(subset.userMinimum) if subset.userMaximum !=", "axisName in self.userLocation: del self.userLocation[axisName] def getLocationLabelDescriptor(self, doc: 'DesignSpaceDocument') ->", "not None: self.writerClass = writerClass else: self.writerClass = BaseDocWriter @classmethod", "axis location, STAT field ``valueNameID``.\"\"\" self.elidable: bool = elidable \"\"\"STAT", "=========== ========= =========== =========== =============== 1 ✅ ❌ ❌ ❌", "familyName = sourceElement.attrib.get(\"familyname\") if familyName is not None: sourceObject.familyName =", "): self.name: str = name \"\"\"Label for this named location,", "self.rangeAxisSubsetDescriptorClass(name=name) raise DesignSpaceDocumentError( \"axis-subset element must have min/max/default values or", "for k, v in self.map if k == value), value)", "float = userValue \"\"\"Value in user coordinates at which to", "def _addVariableFont(self, parentElement: ET.Element, vf: VariableFontDescriptor) -> None: vfElement =", "def setter(self, value): # The setter rewrites paths using forward", "**as it is in the document**. MutatorMath + VarLib. \"\"\"", "mapElement.attrib['output'] = self.intOrFloat(outputValue) axisElement.append(mapElement) if axisObject.axisOrdering or axisObject.axisLabels: labelsElement =", "familyname strings, keyed by language code. \"\"\" self.localisedStyleMapStyleName = localisedStyleMapStyleName", "is not None and encoding.lower() == \"unicode\" ): f =", "-> SimpleLocationDict: \"\"\"Map a design location to a user location.", "is not None: axesElement = ET.Element(\"axes\") if self.documentObject.elidedFallbackName is not", "``name``, or ``None`` if no such label exists. .. versionadded::", "that axis. .. versionadded:: 5.0 \"\"\" self.locationLabel = None if", "loses leading slashes of UNC path mounts new_path = '//'", "= ('name', 'elidable', 'olderSibling', 'userLocation', 'labelNames') def __init__( self, *,", "localisations if instanceObject.localisedStyleName: languageCodes = list(instanceObject.localisedStyleName.keys()) languageCodes.sort() for code in", "= 2 a2.axisLabels = [ AxisLabelDescriptor(name=\"Roman\", userValue=0, elidable=True) ] doc.addAxis(a2)", "makeGlyphs=True, makeKerning=True, makeInfo=True): filename = instanceElement.attrib.get('filename') if filename is not", "instanceObject.familyName = familyname stylename = instanceElement.attrib.get('stylename') if stylename is not", "= familyname stylename = instanceElement.attrib.get('stylename') if stylename is not None:", "List[VariableFontDescriptor]: \"\"\"Return all variable fonts defined in this document, or", "is None: # neither is defined, don't add this condition", "as # '{http://www.w3.org/XML/1998/namespace}lang' } locationLabel = self.locationLabelDescriptorClass( name=name, userLocation=userLocation, elidable=elidable,", "and Italic, that are not compatible. The axis still allows", "axisLabels=axisLabels, ) self.minimum = minimum \"\"\"number. The minimum value for", "subsets of that variable font that only include some axes", "designLocation, userLocation = self.locationFromElement(glyphElement) if userLocation: raise DesignSpaceDocumentError(f'<glyph> element \"{glyphName}\"", "will be used to build localized names for all instances.", "contents of the font.lib need to be copied to the", "3 and 4: filename gets updated and relativized descriptor.filename =", "not None: instanceObject.postScriptFontName = postScriptFontName styleMapFamilyName = instanceElement.attrib.get('stylemapfamilyname') if styleMapFamilyName", "= ET.Element('glyph') if data.get('mute'): glyphElement.attrib['mute'] = \"1\" if data.get('unicodes') is", "= 1 a1.maximum = 1000 a1.default = 400 a1.name =", "5.0 Use rules or sparse sources instead. \"\"\" self.kerning =", "new instanceDescriptor.\"\"\" return self.writerClass.getInstanceDescriptor() def getAxisOrder(self): \"\"\"Return a list of", "variableFont = self.variableFontsDescriptorClass( name=name, filename=filename, axisSubsets=axisSubsets, lib=lib, ) self.documentObject.variableFonts.append(variableFont) def", "instanceObject.kerning: kerningElement = ET.Element('kerning') instanceElement.append(kerningElement) if instanceObject.info: infoElement = ET.Element('info')", "fill-in this attribute, and the default writer will not use", "default location or None. The default location is the set", "</dict> </lib> \"\"\" self.sources: List[SourceDescriptor] = [] \"\"\"List of this", "user location for this instance. .. seealso:: :meth:`getFullDesignLocation` .. versionadded::", "loaded again. Fonts with the same path are only loaded", "self.sources: source.font = None res = copy.deepcopy(self) for source, font", "glyphSources instanceObject.glyphs[glyphName] = glyphData def readLib(self): \"\"\"Read the lib element", "_, glyphData in item.glyphs.items(): glyphData['instanceLocation'] = self.normalizeLocation(glyphData['instanceLocation']) for glyphMaster in", "= self._makeLocationElement(data.get('instanceLocation')) glyphElement.append(locationElement) if glyphName is not None: glyphElement.attrib['name'] =", "if the feature text needs to be copied to the", "glyphName = glyphElement.attrib.get('name') if glyphName is None: raise DesignSpaceDocumentError(\"Glyph object", "Axis values for this instance, in design space coordinates. MutatorMath", "= tree.getroot() self.documentObject.formatVersion = self.root.attrib.get(\"format\", \"3.0\") self._axes = [] self.rules", "sets the SourceDescriptor.font attribute. If the font attribute is already", "\"\"\"Instantiate a new :class:`VariableFontDescriptor` using the given ``kwargs`` and add", "warp map. axesElement = self.root.find(\".axes\") if axesElement is not None", "= instancePath2 i2.familyName = \"InstanceFamilyName\" i2.styleName = \"InstanceStyleName\" i2.name =", "code. \"\"\" def getFormat(self) -> int: \"\"\"Determine which format of", "setFamilyName(self, familyName, languageCode=\"en\"): \"\"\"Setter for :attr:`localisedFamilyName` .. versionadded:: 5.0 \"\"\"", "sourceDescriptorClass = SourceDescriptor variableFontDescriptorClass = VariableFontDescriptor valueAxisSubsetDescriptorClass = ValueAxisSubsetDescriptor rangeAxisSubsetDescriptorClass", "for inputValue, outputValue in axisObject.map: mapElement = ET.Element('map') mapElement.attrib['input'] =", "= [] \"\"\"List of this document's axes.\"\"\" self.locationLabels: List[LocationLabelDescriptor] =", "_attrs = ('filename', 'axisSubsets', 'lib') filename = posixpath_property(\"_filename\") def __init__(self,", "user space coordinates. MutatorMath + Varlib. .. seealso:: This may", "still allows to bind together the full font family, which", "in instanceElement.findall('.glyphs/glyph'): self.readGlyphElement(glyphElement, instanceObject) for infoElement in instanceElement.findall(\"info\"): self.readInfoElement(infoElement, instanceObject)", "in instanceElement.findall('stylemapfamilyname'): for key, lang in styleMapFamilyNameElement.items(): if key ==", "return self.writerClass.getAxisDecriptor() def newSourceDescriptor(self): \"\"\"Ask the writer class to make", "mapping's output to input. Returns value unchanged if no mapping", "4) self.root.findall('.instances')[0].append(instanceElement) def _addSource(self, sourceObject): sourceElement = ET.Element(\"source\") if sourceObject.filename", "in readLocation xValue %3.3f\", xValue) try: yValue = dimensionElement.attrib.get('yvalue') if", "if self.documentObject.elidedFallbackName is not None: axesElement.attrib['elidedfallbackname'] = self.documentObject.elidedFallbackName self.root.append(axesElement) for", "'usermaximum', 'name', 'elidable', 'oldersibling', 'linkeduservalue'} unknown_attrs = set(element.attrib) - xml_attrs", "code:: python from fontTools.designspaceLib import DesignSpaceDocument doc = DesignSpaceDocument.fromfile(\"some/path/to/my.designspace\") doc.formatVersion", "unicodes = [int(u, 16) for u in unicodes.split(\" \")] glyphData['unicodes']", "axisObject.axisLabels.append(self.readAxisLabel(label)) self.documentObject.axes.append(axisObject) self.axisDefaults[axisObject.name] = axisObject.default def readAxisLabel(self, element: ET.Element): xml_attrs", "could look like either one of these: .. code-block:: xml", "if none of the conditions have minimum or maximum values,", "SourceDescriptor.font attributes are loaded, and return list of fonts. Takes", "for this instance, in design space coordinates. MutatorMath + Varlib.", "instance of :class:`DiscreteAxisDescriptor` if the ``kwargs`` provide a ``value``, or", "self.addLocationLabel(locationLabel) return locationLabel def newDefaultLocation(self): \"\"\"Return a dict with the", "axis. Note: the output won't be anisotropic, only the xvalue", "labelElement.get(\"oldersibling\") == \"true\" else False labelNames = { lang: label_name.text", "lib = plistlib.fromtree(libElement[0]) variableFont = self.variableFontsDescriptorClass( name=name, filename=filename, axisSubsets=axisSubsets, lib=lib,", "dimension \"{dimName}\"') if yValue is not None: if xValue is", "is not a standard axis, self.labelNames = labelNames or {}", "the STAT table, however it can't become a variation axis", "label_name.text or \"\" for label_name in element.findall(\"labelname\") for attr, lang", "languageCodes = list(instanceObject.localisedStyleName.keys()) languageCodes.sort() for code in languageCodes: if code", "familyNameElement.items(): if key == XML_LANG: familyName = familyNameElement.text sourceObject.setFamilyName(familyName, lang)", "\".join(self.intOrFloat(v) for v in axisObject.values) axisElement.attrib['default'] = self.intOrFloat(axisObject.default) if axisObject.hidden:", "path that was given to :meth:`read` or :meth:`fromfile`. \"\"\" self.filename", "default value as the full axis. (default = ``None``) \"\"\"", "for cond in conditions: if cond.get('minimum') is None and cond.get('maximum')", "location. .. versionadded:: 5.0 \"\"\" flavor = \"variable-font\" _attrs =", "set(variableFontElement.attrib) - xml_attrs if unknown_attrs: raise DesignSpaceDocumentError(f\"variable-font element contains unknown", "fine case 4. descriptor.filename == '../somewhere' descriptor.path == \"~/absolute/path/there\" --", "default. See :meth:`SourceDescriptor.getFullDesignLocation()` \"\"\" self.default = None # Convert the", "it right here. Varlib. \"\"\" self.styleName = styleName \"\"\"string. Style", "self.root.find(\".rules\") if rulesElement is not None: processingValue = rulesElement.attrib.get(\"processing\", \"first\")", "rules: if evaluateRule(rule, location): for name in glyphNames: swap =", "min/max/default values or none at all.\" ) def readSources(self): for", "None: # add a temporary source name sourceName = \"temp_master.%d\"", "collections.OrderedDict() for axisDescriptor in self.axes: loc[axisDescriptor.name] = axisDescriptor.map_forward( axisDescriptor.default )", "value: return False elif not cd['minimum'] <= value <= cd['maximum']:", "instanceObject.styleMapFamilyName is not None: instanceElement.attrib['stylemapfamilyname'] = instanceObject.styleMapFamilyName if instanceObject.styleMapStyleName is", "designLocation: AnisotropicLocationDict) -> SimpleLocationDict: \"\"\"Map a design location to a", "each value can have different glyph sets. \"\"\" self.values: List[float]", "not overwriting some other value for filename, it should be", "location. See: :meth:`getFullDesignLocation` :meth:`getFullUserLocation` .. versionadded:: 5.0 \"\"\" self.userLocation: SimpleLocationDict", ".. versionadded:: 5.0 \"\"\" result: AnisotropicLocationDict = {} for axis", "axis.minimum cd['minimum'] = None cdMax = conditionElement.attrib.get(\"maximum\") if cdMax is", "- 0 - 1 value. - we need the axis", "self.documentObject.formatTuple >= (5, 0) and \"values\" in axisElement.attrib: axisObject =", "axisElement.attrib.get(\"name\") if axisElement.attrib.get('hidden', False): axisObject.hidden = True axisObject.tag = axisElement.attrib.get(\"tag\")", "valueStr is None: raise DesignSpaceDocumentError(\"label element must have a uservalue", "doc.map_forward(label.userLocation) # type: ignore result: AnisotropicLocationDict = {} for axis", "container for data related to the source .. code:: python", "of (major, minor). .. versionadded:: 5.0 \"\"\" if self.formatVersion is", "sourceObject.layerName if sourceObject.localisedFamilyName: languageCodes = list(sourceObject.localisedFamilyName.keys()) languageCodes.sort() for code in", "= ET.Element('dimension') dimElement.attrib['name'] = dimensionName if type(dimensionValue) == tuple: dimElement.attrib['xvalue']", "SimpleLocationDict) -> Optional[LocationLabelDescriptor]: \"\"\"Return the :class:`LocationLabel` that matches the given", "document's rules.\"\"\" self.rulesProcessingLast: bool = False \"\"\"This flag indicates whether", "check for < maximum. - If a condition has no", "self.font = font \"\"\"Any Python object. Optional. Points to a", "return elementLocation def readLocationElement(self, locationElement): \"\"\"Read a ``<location>`` element. ..", "subElement.attrib['with'] = sub[1] ruleElement.append(subElement) if len(ruleElement): self.root.findall('.rules')[0].append(ruleElement) def _addAxis(self, axisObject):", "key, lang in styleMapFamilyNameElement.items(): if key == XML_LANG: styleMapFamilyName =", "for attr, lang in label_name.items() if attr == XML_LANG #", "None label = doc.getLocationLabel(self.locationLabel) if label is None: raise DesignSpaceDocumentError(", "def defaultName(self) -> str: \"\"\"Return the English name from :attr:`labelNames`", "Four letter tag for this axis. Some might be registered", "self.normalizeLocation(item.location) # instances for item in self.instances: # glyph masters", "location. See :meth:`getFullDesignLocation()` .. versionadded:: 5.0 \"\"\" self.layerName = layerName", "example, to load UFO sources using defcon: designspace = DesignSpaceDocument.fromfile(\"path/to/my.designspace\")", "the target variable font. If not specified, assume the same", "localised style map familyname strings, keyed by language code. \"\"\"", "to identify and respond to the following situations: In each", "instance. .. seealso:: :meth:`getFullDesignLocation` .. versionadded:: 5.0 \"\"\" return doc.map_backward(self.getFullDesignLocation(doc))", "encoding: '%s'\" % encoding) writer = self.writerClass(f, self) writer.write(encoding=encoding, xml_declaration=xml_declaration)", "order to update the location of this instance wholesale, a", "copyLib \"\"\"bool. Indicates if the contents of the font.lib need", "processingValue = rulesElement.attrib.get(\"processing\", \"first\") if processingValue not in {\"first\", \"last\"}:", "coordinates.\"\"\" # Without OrderedDict, output XML would be non-deterministic. #", "this instance needs its kerning calculated. MutatorMath. .. deprecated:: 5.0", "is inappropriate here, and instead of # assert, it should", "result[axis.name] = self.designLocation[axis.name] else: result[axis.name] = axis.map_forward(axis.default) return result class", "in axisElement.attrib: axisObject = self.discreteAxisDescriptorClass() axisObject.values = [float(s) for s", "\"\"\"Return a dict with normalized axis values.\"\"\" from fontTools.varLib.models import", "is not None: instanceElement.attrib['filename'] = instanceObject.filename if instanceObject.postScriptFontName is not", "= \"InstancePostscriptName\" i2.styleMapFamilyName = \"InstanceStyleMapFamilyName\" i2.styleMapStyleName = \"InstanceStyleMapStyleName\" i2.lib['com.coolDesignspaceApp.specimenText'] =", "\"en\": continue localisedStyleMapStyleNameElement = ET.Element('stylemapstylename') localisedStyleMapStyleNameElement.attrib[XML_LANG] = code localisedStyleMapStyleNameElement.text =", "self.minimum = minimum \"\"\"number. The minimum value for this axis", "\"\"\" self.kerning = kerning \"\"\" bool. Indicates if this instance", "5.0 \"\"\" variableFont = self.writerClass.variableFontDescriptorClass(**kwargs) self.addVariableFont(variableFont) return variableFont def addLocationLabel(self,", "= default # now the rules for rule in self.rules:", "can be extracted from the font, it can be efficient", "minimum \"\"\"number. The minimum value for this axis in user", "= axis.name value = designLocation[axis.name] if isinstance(value, tuple): dimElement.attrib['xvalue'] =", "in data.get('masters'): masterElement = ET.Element(\"master\") if m.get('glyphName') is not None:", "0): if instanceObject.locationLabel is None: self._addLocationElement( instanceElement, designLocation=instanceObject.designLocation, userLocation=instanceObject.userLocation )", "None: basename = \"VF\" axisNames = \"\".join([f\"-{axis.tag}{value}\" for axis, value", "copy.deepcopy(self) for source, font in zip(res.sources, fonts): res.font = font", "languageCode=\"en\"): return self.localisedStyleMapFamilyName.get(languageCode) def clearLocation(self, axisName: Optional[str] = None): \"\"\"Clear", "= RangeAxisSubsetDescriptor instanceDescriptorClass = InstanceDescriptor def __init__(self, documentPath, documentObject): self.path", "self.writerClass(f, self) writer.write(encoding=encoding, xml_declaration=xml_declaration) return f.getvalue() def read(self, path): \"\"\"Read", "= self.normalizeLocation(glyphMaster['location']) item.location = self.normalizeLocation(item.location) # the axes for axis", "default location from user space to design space before comparing", "Note: elementtree reads the \"xml:lang\" attribute name as # '{http://www.w3.org/XML/1998/namespace}lang'", "maximum = None newConditions.append(dict(name=cond['name'], minimum=minimum, maximum=maximum)) newConditionSets.append(newConditions) rule.conditionSets = newConditionSets", "a glyphname, use the one we have masterGlyphName = glyphName", "for conditionElement in parentElement.findall('.condition'): cd = {} cdMin = conditionElement.attrib.get(\"minimum\")", "def location(self): \"\"\"dict. Axis values for this instance. MutatorMath +", "None: instanceElement.attrib['name'] = instanceObject.name if instanceObject.locationLabel is not None: instanceElement.attrib['location']", "rule's conditionsets matches the given location.\"\"\" return any(evaluateConditions(c, location) for", "elementLocation = (None, None) for locationElement in element.findall('.location'): elementLocation =", "optional list of keyword arguments, and returns a new font", "value for this axis in user space. MutatorMath + Varlib.", "different feature altogether, e.g. ``calt``, use the lib key ``com.github.fonttools.varLib.featureVarsFeatureTag``", "self.copyLib = copyLib \"\"\"bool. Indicates if the contents of the", "_readSingleInstanceElement(self, instanceElement, makeGlyphs=True, makeKerning=True, makeInfo=True): filename = instanceElement.attrib.get('filename') if filename", "if axis.name not in location: # skipping this dimension it", "with ``name``, ``minimum`` and ``maximum`` keys. \"\"\" # list of", "# read any stray conditions outside a condition set externalConditions", "for label_name in element.findall(\"labelname\") for attr, lang in label_name.items() if", "\"\"\" self.styleName = styleName \"\"\"string. Style name of this source.", "axisName is None: self.designLocation = {} self.userLocation = {} else:", "non-interpolating font.info needs to be copied to the instances. MutatorMath.", "load masters as FontTools binary fonts, including extra options: designspace.loadSourceFonts(ttLib.TTFont,", "in design space coordinates. MutatorMath + Varlib. .. seealso:: This", "unknown attributes: {', '.join(unknown_attrs)}\") name = variableFontElement.get(\"name\") if name is", "\"\"\" flavor = \"source\" _attrs = ['filename', 'path', 'name', 'layerName',", "{', '.join(unknown_attrs)}\") name = variableFontElement.get(\"name\") if name is None: raise", "[] glyphSources.append(d) if glyphSources is not None: glyphData['masters'] = glyphSources", "determined for each axis independently by taking the first not-None", "def _addLocationLabel(self, parentElement: ET.Element, label: LocationLabelDescriptor) -> None: labelElement =", "rules or sparse sources instead. \"\"\" self.kerning = kerning \"\"\"", "userValue is not None: userValue = float(userValue) except ValueError: self.log.warning(\"ValueError", "in self.sources + self.instances: if descriptor.path is not None: #", "os.path.splitext(os.path.basename(self.path))[0] + \"-VF\" if basename is None: basename = \"VF\"", "(34, 36.5), 'Width': 100} instance.userLocation = {'Opsz': 16} In order", "__init__(self, *, name, userMinimum=-math.inf, userDefault=None, userMaximum=math.inf): self.name: str = name", "of this label, by combining data from the explicit user", ":attr:`designLocation`. \"\"\" return self.designLocation @location.setter def location(self, location: Optional[AnisotropicLocationDict]): self.designLocation", "name=name, labelNames=labelNames, hidden=hidden, map=map, axisOrdering=axisOrdering, axisLabels=axisLabels, ) self.default: float =", "(\"a\", \"a.alt\"). - Note: By default, rules are applied first,", "\"\"\"STAT Style Attributes Header field ``elidedFallbackNameID``. See: `OTSpec STAT Style", "values. See: `OTSpec STAT Axis value table, format 1, 2,", "= self.intOrFloat(subset.userMaximum) if subset.userDefault is not None: subsetElement.attrib['userdefault'] = self.intOrFloat(subset.userDefault)", "in sourceElement.findall('.groups'): if groupsElement.attrib.get('copy') == '1': sourceObject.copyGroups = True for", "value > cd['maximum']: return False elif cd.get('maximum') is None: if", "(designLocation, userLocation) \"\"\" if self._strictAxisNames and not self.documentObject.axes: raise DesignSpaceDocumentError(\"No", "def intOrFloat(self, num): if int(num) == num: return \"%d\" %", "for _, glyphData in item.glyphs.items(): glyphData['instanceLocation'] = self.normalizeLocation(glyphData['instanceLocation']) for glyphMaster", "libElement in sourceElement.findall('.lib'): if libElement.attrib.get('copy') == '1': sourceObject.copyLib = True", "self.effectiveFormatTuple < (5, 0): # Deprecated members as of version", "DesignSpaceDocumentError(\"No axes defined\") userLoc = {} designLoc = {} for", "return cds def readAxes(self): # read the axes elements, including", "localisedStyleMapStyleNameElement.text = instanceObject.getStyleMapStyleName(code) instanceElement.append(localisedStyleMapStyleNameElement) if instanceObject.localisedStyleMapFamilyName: languageCodes = list(instanceObject.localisedStyleMapFamilyName.keys()) languageCodes.sort()", "return label return None def map_forward(self, userLocation: SimpleLocationDict) -> SimpleLocationDict:", "newConditions = [] for cond in conditions: if cond.get('minimum') is", "data.get('note') is not None: noteElement = ET.Element('note') noteElement.text = data.get('note')", "data in sorted(instanceObject.glyphs.items()): glyphElement = self._writeGlyphElement(instanceElement, instanceObject, glyphName, data) glyphsElement.append(glyphElement)", "for discrete axis data. Use this for axes that do", "glyphElement in sourceElement.findall(\".glyph\"): glyphName = glyphElement.attrib.get('name') if glyphName is None:", "if self.documentObject.instances: self.root.append(ET.Element(\"instances\")) for instanceObject in self.documentObject.instances: self._addInstance(instanceObject) if self.documentObject.lib:", "minimum=minimum, maximum=maximum)) newConditionSets.append(newConditions) rule.conditionSets = newConditionSets def loadSourceFonts(self, opener, **kwargs):", "sourcePath = os.path.abspath(os.path.join(os.path.dirname(self.path), filename)) else: sourcePath = None sourceName =", "self.documentObject.sources: self._addSource(sourceObject) if self.documentObject.variableFonts: variableFontsElement = ET.Element(\"variable-fonts\") for variableFont in", "styleMapStyleNameElement.text instanceObject.setStyleMapStyleName(styleMapStyleName, lang) for styleMapFamilyNameElement in instanceElement.findall('stylemapfamilyname'): for key, lang", "self.filename = os.path.basename(path) self.updatePaths() writer = self.writerClass(path, self) writer.write() def", "xValue is not None: xValue = float(xValue) except ValueError: self.log.warning(\"ValueError", "instance, in user space coordinates. MutatorMath + Varlib. .. seealso::", "instance file, **as it is in the document**. The file", "the same as the matching STAT format 4 label. No", "4). All values are user values. See: `OTSpec STAT Axis", "or xvalue=\"\" must be provided for location dimension \"{dimName}\"') if", "% num).rstrip('0').rstrip('.') def _addRule(self, ruleObject): # if none of the", "languageCodes: if code == \"en\": continue localisedStyleMapFamilyNameElement = ET.Element('stylemapfamilyname') localisedStyleMapFamilyNameElement.attrib[XML_LANG]", "sourceElement.attrib.get('filename') if filename is not None and self.path is not", "not None: locationElement, m['location'] = self._makeLocationElement(m.get('location')) masterElement.append(locationElement) mastersElement.append(masterElement) glyphElement.append(mastersElement) return", "map=self.map, axisOrdering=self.axisOrdering, axisLabels=self.axisLabels, ) def map_forward(self, v): \"\"\"Maps value from", "for this rule. Can be used to reference this rule", "axis location, the user should only clear that axis, then", "object contains the same data as the other for attr", "class to make us a new axisDescriptor.\"\"\" return self.writerClass.getAxisDecriptor() def", ".. versionadded:: 5.0 \"\"\" self.locationLabels.append(locationLabelDescriptor) def addLocationLabelDescriptor(self, **kwargs): \"\"\"Instantiate a", "getAxisOrder(self): \"\"\"Return a list of axis names, in the same", "if externalConditions: ruleObject.conditionSets.append(externalConditions) self.log.info( \"Found stray rule conditions outside a", "sourceObject.mutedGlyphNames: glyphElement = ET.Element(\"glyph\") glyphElement.attrib[\"name\"] = name glyphElement.attrib[\"mute\"] = '1'", "= self.intOrFloat(cond.get('minimum')) if cond.get('maximum') is not None: conditionElement.attrib['maximum'] = self.intOrFloat(cond.get('maximum'))", "} def map_backward(self, designLocation: AnisotropicLocationDict) -> SimpleLocationDict: \"\"\"Map a design", "source .. code:: python doc = DesignSpaceDocument() s1 = SourceDescriptor()", "value = userLocation[axis.name] dimElement.attrib['uservalue'] = self.intOrFloat(value) locElement.append(dimElement) if len(locElement) >", "axisElements: if self.documentObject.formatTuple >= (5, 0) and \"values\" in axisElement.attrib:", "__init__( self, *, name, userValue, userMinimum=None, userMaximum=None, elidable=False, olderSibling=False, linkedUserValue=None,", "format 1, 2, 3. See: `OTSpec STAT Axis Value Tables", "axisObject.map: for inputValue, outputValue in axisObject.map: mapElement = ET.Element('map') mapElement.attrib['input']", "= path \"\"\"The absolute path, calculated from filename.\"\"\" self.font =", "addInstanceDescriptor(self, **kwargs): \"\"\"Instantiate a new :class:`InstanceDescriptor` using the given ``kwargs``", "name=None, conditionSets=None, subs=None): self.name = name \"\"\"string. Unique name for", "for source in self.documentObject.sources ) or self.documentObject.variableFonts or any( instance.locationLabel", "self.subs = subs or [] \"\"\"list of substitutions. - Each", "ET.Element(\"source\") if sourceObject.filename is not None: sourceElement.attrib['filename'] = sourceObject.filename if", "AxisLabelDescriptor) -> None: labelElement = ET.Element('label') labelElement.attrib['uservalue'] = self.intOrFloat(label.userValue) if", "of localised style map stylename strings, keyed by language code.", "versionadded:: 5.0 \"\"\" return { axis.name: axis.map_forward(userLocation.get(axis.name, axis.default)) for axis", "ET.Element, label: LocationLabelDescriptor) -> None: labelElement = ET.Element('label') labelElement.attrib['name'] =", "not None: instanceObject.styleMapStyleName = styleMapStyleName # read localised names for", "\"\"\" for label in self.locationLabels: if label.name == name: return", "to have it right here. Varlib. \"\"\" self.localisedFamilyName = localisedFamilyName", "'1': sourceObject.muteInfo = True for featuresElement in sourceElement.findall(\".features\"): if featuresElement.attrib.get('copy')", "class AsDictMixin(object): def asdict(self): d = {} for attr, value", "None for v in (userMinimum, userDefault, userMaximum)): return self.rangeAxisSubsetDescriptorClass(name=name) raise", "is not None: conditionElement.attrib['maximum'] = self.intOrFloat(cond.get('maximum')) conditionsetElement.append(conditionElement) if len(conditionsetElement): ruleElement.append(conditionsetElement)", "16} In order to update a single axis location, the", ".. deprecated:: 5.0 \"\"\" self.info = info \"\"\"bool. Indicated if", "None: raise DesignSpaceDocumentError(\"axis-subset element must have a name attribute.\") userValueStr", "in self._attrs] attrs = indent('\\n'.join(attrs), ' ') return f\"{self.__class__.__name__}(\\n{attrs}\\n)\" class", "= [] self.instances = [] self.axisDefaults = {} self._strictAxisNames =", "{} \"\"\"A dictionary of localised style map stylename strings, keyed", "element: ET.Element): if \"uservalue\" in element.attrib: xml_attrs = {'name', 'uservalue'}", "in readLocation userValue %3.3f\", userValue) try: xValue = dimensionElement.attrib.get('xvalue') if", "\"\"\"This flag indicates whether the substitution rules should be applied", "doc.lib \"\"\" def __init__(self, readerClass=None, writerClass=None): self.path = None \"\"\"String,", "even if they only contain ASCII characters. \"\"\" self.hidden =", "_attrs = ('name', 'userMinimum', 'userDefault', 'userMaximum') def __init__(self, *, name,", "5.0 \"\"\" self.copyInfo = copyInfo \"\"\"bool. Indicates if the non-interpolating", "if this is not a standard axis, self.labelNames = labelNames", "locations (using xvalue=\"\").') masterGlyphName = masterElement.attrib.get('glyphname') if masterGlyphName is None:", "loc[axisDescriptor.name] = axisDescriptor.map_forward( axisDescriptor.default ) return loc def labelForUserLocation(self, userLocation:", "basename = os.path.splitext(os.path.basename(self.path))[0] + \"-VF\" if basename is None: basename", "map_backward(self, value): \"\"\"Maps value from axis mapping's output to input.", "to a representation of this source font that is loaded", "space). defaultDesignLocation = self.newDefaultLocation() for sourceDescriptor in self.sources: if sourceDescriptor.getFullDesignLocation(self)", "== defaultDesignLocation: self.default = sourceDescriptor return sourceDescriptor return None def", "sourceObject.familyName is not None: sourceElement.attrib['familyname'] = sourceObject.familyName if sourceObject.styleName is", "the Designspace. Use-cases: - From a single DesignSpace with discrete", "this attribute, and the default writer will not use this", "+ Varlib. \"\"\" self.styleMapStyleName = styleMapStyleName \"\"\"string. StyleMap stylename for", "coordinates. MutatorMath + Varlib. .. deprecated:: 5.0 Use the more", "instanceObject.glyphs: if instanceElement.findall('.glyphs') == []: glyphsElement = ET.Element('glyphs') instanceElement.append(glyphsElement) glyphsElement", "None: raise DesignSpaceDocumentError( \"condition missing required minimum or maximum in", "is not None: axesElement.attrib['elidedfallbackname'] = self.documentObject.elidedFallbackName self.root.append(axesElement) for axisObject in", "value = location[axis.name] # 'anisotropic' location, take first coord only", "libElement.attrib.get('copy') == '1': sourceObject.copyLib = True for groupsElement in sourceElement.findall('.groups'):", "is up to the user of ``designspaceLib`` to either load", "4: filename gets updated and relativized descriptor.filename = self._posixRelativePath(descriptor.path) def", "str = name \"\"\"string, required. Name of this variable to", "or [] \"\"\"a list of conditionsets. - Each conditionset is", "\"\"\"Ask the writer class to make us a new instanceDescriptor.\"\"\"", "element must have at most one of the location=\"...\" attribute", "relativized descriptor.filename = self._posixRelativePath(descriptor.path) def addSource(self, sourceDescriptor: SourceDescriptor): \"\"\"Add the", "for the whole document.\"\"\" for libElement in self.root.findall(\".lib\"): self.documentObject.lib =", "axis.\"\"\" class BaseDocWriter(object): _whiteSpace = \" \" axisDescriptorClass = AxisDescriptor", "0): self._addLocationElement(sourceElement, designLocation=sourceObject.location) else: # Pre-version 5.0 code was validating", "values that can describe a warp of user space to", "assume that we only want the default location of that", "or a sufficiently recent version to be able to encode", "name names = { 'weight': ('wght', dict(en = 'Weight')), 'width':", "plistlib from fontTools.misc.loggingTools import LogMixin from fontTools.misc.textTools import tobytes, tostr", "they are assumed to be the default. See :meth:`SourceDescriptor.getFullDesignLocation()` \"\"\"", "= ET.Element('familyname') localisedFamilyNameElement.attrib[XML_LANG] = code localisedFamilyNameElement.text = instanceObject.getFamilyName(code) instanceElement.append(localisedFamilyNameElement) if", "flavor = \"label\" _attrs = ('name', 'elidable', 'olderSibling', 'userLocation', 'labelNames')", ":meth:`getFormat` .. versionadded:: 5.0 \"\"\" flavor = \"label\" _attrs =", ":class:`LocationLabelDescriptor`. If provided, the instance should have the same location", "= {'name', 'uservalue'} unknown_attrs = set(element.attrib) - xml_attrs if unknown_attrs:", "instanceDescriptorClass = InstanceDescriptor def __init__(self, documentPath, documentObject): self.path = documentPath", "olderSibling=False, linkedUserValue=None, labelNames=None, ): self.userMinimum: Optional[float] = userMinimum \"\"\"STAT field", "self.readInfoElement(infoElement, instanceObject) for libElement in instanceElement.findall('lib'): self.readLibElement(libElement, instanceObject) self.documentObject.instances.append(instanceObject) def", "VariableFontDescriptor(SimpleDescriptor): \"\"\"Container for variable fonts, sub-spaces of the Designspace. Use-cases:", "self.documentObject.formatTuple if ( any( isinstance(axis, DiscreteAxisDescriptor) or axis.axisOrdering is not", "if readerClass is not None: self.readerClass = readerClass else: self.readerClass", "contains unknown attributes: {', '.join(unknown_attrs)}\") name = labelElement.get(\"name\") if name", "mapping's output (design) to input (user).\"\"\" from fontTools.varLib.models import piecewiseLinearMap", "STAT field ``valueNameID``.\"\"\" self.elidable: bool = elidable \"\"\"STAT flag ``ELIDABLE_AXIS_VALUE_NAME``.", "label. =========== ========= =========== =========== =============== STAT Format userValue userMinimum", "in zip(res.sources, fonts): res.font = font return res finally: for", "is not None: yValue = float(yValue) except ValueError: self.log.warning(\"ValueError in", "ignore result: AnisotropicLocationDict = {} for axis in doc.axes: if", "readInfoElement(self, infoElement, instanceObject): \"\"\" Read the info element.\"\"\" instanceObject.info =", "ourselves for 'xml:lang' XML_NS = \"{http://www.w3.org/XML/1998/namespace}\" XML_LANG = XML_NS +", "= self.locationFromElement(instanceElement) locationLabel = instanceElement.attrib.get('location') if (designLocation or userLocation) and", "userValue=value) for axis, value in zip(discreteAxes, values) ] )) return", "versionchanged:: 5.0 Allow the default source to not specify some", "we need the axis data to do the scaling, so", "don't read a glyphname, use the one we have masterGlyphName", "if familyName is not None: sourceObject.familyName = familyName styleName =", ") def readSources(self): for sourceCount, sourceElement in enumerate(self.root.findall(\".sources/source\")): filename =", "glyphName is None: continue if glyphElement.attrib.get('mute') == '1': sourceObject.mutedGlyphNames.append(glyphName) for", "# assert, it should simply return True/False def compare(self, other):", "'usermaximum'} unknown_attrs = set(element.attrib) - xml_attrs if unknown_attrs: raise DesignSpaceDocumentError(f\"axis-subset", "class SimpleDescriptor(AsDictMixin): \"\"\" Containers for a bunch of attributes\"\"\" #", "= (xValue, yValue) elif xValue is not None: designLoc[dimName] =", "was implicitly describing a variable font that covers the whole", "if userLocation: raise DesignSpaceDocumentError(f'<source> element \"{sourceName}\" must only have design", "= data.get('note') glyphElement.append(noteElement) if data.get('masters') is not None: mastersElement =", "from a continuous axis is that a continuous axis has", "from the disk, this is its original file name, i.e.", "=========== =========== =============== \"\"\" if self.linkedUserValue is not None: return", "must have at most one of the location=\"...\" attribute or", "DesignSpaceDocumentError(f'<glyph> element \"{glyphName}\" must only have design locations (using xvalue=\"\").')", ".. code:: xml <lib> <dict> <key>com.github.fonttools.varLib.featureVarsFeatureTag</key> <string>calt</string> </dict> </lib> \"\"\"", "infoElement = ET.Element('info') if sourceObject.copyInfo: infoElement.attrib['copy'] = \"1\" if sourceObject.muteInfo:", "yValue %3.3f\", yValue) if userValue is None == xValue is", "self.root.findall('.instances')[0].append(instanceElement) def _addSource(self, sourceObject): sourceElement = ET.Element(\"source\") if sourceObject.filename is", "None: raise DesignSpaceDocumentError(\"variable-font element must have a name attribute.\") filename", "= float(xValue) except ValueError: self.log.warning(\"ValueError in readLocation xValue %3.3f\", xValue)", "self._addVariableFont(variableFontsElement, variableFont) self.root.append(variableFontsElement) if self.documentObject.instances: self.root.append(ET.Element(\"instances\")) for instanceObject in self.documentObject.instances:", "have at most one of the location=\"...\" attribute or the", "zip(res.sources, fonts): res.font = font return res finally: for source,", "for this axis. Some might be registered at the `OpenType", "`OTSpec STAT Axis Value Tables <https://docs.microsoft.com/en-us/typography/opentype/spec/stat#axis-value-tables>`_ .. versionadded:: 5.0 \"\"\"", "it is assumed user space is the same as design", "dimElement.attrib['name'] = axis.name value = userLocation[axis.name] dimElement.attrib['uservalue'] = self.intOrFloat(value) locElement.append(dimElement)", "what the document contains. \"\"\" minVersion = self.documentObject.formatTuple if (", "If not specified, assume the same default value as the", "but no reason to interfere. case 2. descriptor.filename == \"../something\"", "as the LocationLabel. .. seealso:: :meth:`getFullDesignLocation` :meth:`getFullUserLocation` .. versionadded:: 5.0", "instanceObject.designLocation = designLocation or {} for glyphElement in instanceElement.findall('.glyphs/glyph'): self.readGlyphElement(glyphElement,", "the default. See :meth:`SourceDescriptor.getFullDesignLocation()` \"\"\" self.default = None # Convert", "of the default master. This attribute is updated by the", "getter return getattr(self, private_name) def setter(self, value): # The setter", "user space. However, this default value is less important than", "for descriptor in self.sources + self.instances: if descriptor.path is not", "element must have a name attribute.\") designLocation, userLocation = self.locationFromElement(labelElement)", "info=True, lib=None, ): self.filename = filename \"\"\"string. Relative path to", "groupsElement.attrib.get('copy') == '1': sourceObject.copyGroups = True for infoElement in sourceElement.findall(\".info\"):", "a basename for the file. \"\"\" self.axisSubsets: List[Union[RangeAxisSubsetDescriptor, ValueAxisSubsetDescriptor]] =", "userValue return designLoc, userLoc def readInstances(self, makeGlyphs=True, makeKerning=True, makeInfo=True): instanceElements", "basename = None if self.filename is not None: basename =", "here. Varlib. \"\"\" self.styleName = styleName \"\"\"string. Style name of", "different objects, as long as they have the same attributes.", "def readGlyphElement(self, glyphElement, instanceObject): \"\"\" Read the glyph element, which", "StringIO from textwrap import indent from typing import Any, Dict,", "- Each substitution is stored as tuples of glyphnames, e.g.", "= self.designLocation[axis.name] else: result[axis.name] = axis.map_forward(axis.default) return result class RuleDescriptor(SimpleDescriptor):", "=========== =========== =============== 1 ✅ ❌ ❌ ❌ 2 ✅", "triple = [ axis.map_forward(v) for v in (axis.minimum, axis.default, axis.maximum)", "as long as they have the same attributes. Reader and", "= filename \"\"\"string. Relative path to the instance file, **as", "is not None: glyphData['instanceLocation'] = designLocation glyphSources = None for", "location along this axis is the same as the matching", "self.addInstance(instance) return instance def addAxis(self, axisDescriptor: Union[AxisDescriptor, DiscreteAxisDescriptor]): \"\"\"Add the", "file somewhere. \"\"\" self.formatVersion: Optional[str] = None \"\"\"Format version for", "== XML_LANG: familyName = familyNameElement.text sourceObject.setFamilyName(familyName, lang) designLocation, userLocation =", "of this location's label. Keyed by xml:lang code. \"\"\" @property", "groupsElement = ET.Element('groups') groupsElement.attrib['copy'] = \"1\" sourceElement.append(groupsElement) if sourceObject.copyFeatures: featuresElement", "but not attributes # so we have to do it", "[int(u, 16) for u in unicodes.split(\" \")] glyphData['unicodes'] = unicodes", "= self.root.find(\".axes\") if axesElement is not None and 'elidedfallbackname' in", "code:: python instance.clearLocation('Weight') instance.designLocation['Weight'] = (34, 36.5) Args: axisName: if", "= self.writerClass.variableFontDescriptorClass(**kwargs) self.addVariableFont(variableFont) return variableFont def addLocationLabel(self, locationLabelDescriptor: LocationLabelDescriptor): \"\"\"Add", "seealso:: :func:`splitInterpolable` .. versionadded:: 5.0 \"\"\" if self.variableFonts: return self.variableFonts", "to be None, assume axis.maximum cd['maximum'] = None cd['name'] =", "stylename is not None: instanceObject.styleName = stylename postScriptFontName = instanceElement.attrib.get('postscriptfontname')", "if descriptor.filename is not None and not force: continue if", "output. Returns value unchanged if no mapping entry is found.", "or the nested location element') instanceObject.locationLabel = locationLabel instanceObject.userLocation =", "newMap # finally the axis values minimum = self.normalizeLocation({axis.name: axis.minimum}).get(axis.name)", "[ AxisLabelDescriptor(name=\"Roman\", userValue=0, elidable=True) ] doc.addAxis(a2) .. versionadded:: 5.0 \"\"\"", "key == XML_LANG: familyName = familyNameElement.text sourceObject.setFamilyName(familyName, lang) designLocation, userLocation", "\"\"\" self.glyphs = glyphs or {} \"\"\"dict for special master", "sourceCount, sourceElement in enumerate(self.root.findall(\".sources/source\")): filename = sourceElement.attrib.get('filename') if filename is", "= float(axisElement.attrib.get(\"default\")) axisObject.name = axisElement.attrib.get(\"name\") if axisElement.attrib.get('hidden', False): axisObject.hidden =", "i2.path = instancePath2 i2.familyName = \"InstanceFamilyName\" i2.styleName = \"InstanceStyleName\" i2.name", "= self.intOrFloat(label.userMinimum) if label.userMaximum is not None: labelElement.attrib['usermaximum'] = self.intOrFloat(label.userMaximum)", "for styleMapFamilyNameElement in instanceElement.findall('stylemapfamilyname'): for key, lang in styleMapFamilyNameElement.items(): if", "doc.axes: if axis.name in self.designLocation: result[axis.name] = self.designLocation[axis.name] else: result[axis.name]", "self.root = tree.getroot() self.documentObject.formatVersion = self.root.attrib.get(\"format\", \"3.0\") self._axes = []", "('wdth', dict(en = 'Width')), 'optical': ('opsz', dict(en = 'Optical Size')),", "'DesignSpaceDocument') -> SimpleLocationDict: \"\"\"Get the complete user location of this", "sources instead. \"\"\" self.kerning = kerning \"\"\" bool. Indicates if", "data. Add more localisations? .. code:: python a1 = AxisDescriptor()", ") or self.documentObject.variableFonts or any( instance.locationLabel or instance.userLocation for instance", "the :attr:`name` will be used as a basename for the", "return a new instance of :class:. \"\"\" self = cls(readerClass=readerClass,", "not None: # font already loaded fonts.append(source.font) continue if source.path", "axis.axisOrdering is not None or axis.axisLabels for axis in self.documentObject.axes", "\" \" axisDescriptorClass = AxisDescriptor discreteAxisDescriptorClass = DiscreteAxisDescriptor axisLabelDescriptorClass =", "element.get(\"elidable\") == \"true\" else False olderSibling = True if element.get(\"oldersibling\")", "__init__(self, *, name, filename=None, axisSubsets=None, lib=None): self.name: str = name", "+ Varlib. .. deprecated:: 5.0 Use the more explicit alias", "DesignSpaceDocumentError(\"label element must have a uservalue attribute.\") value = float(valueStr)", "Flags <https://docs.microsoft.com/en-us/typography/opentype/spec/stat#flags>`_ \"\"\" self.olderSibling: bool = olderSibling \"\"\"STAT flag ``OLDER_SIBLING_FONT_ATTRIBUTE``.", "used to identify it if it needs to be referenced", "❌ ❌ 2 ✅ ✅ ✅ ❌ 3 ✅ ❌", "The filename attr should not be touched. case 3. descriptor.filename", "add it to :attr:`axes`. The axis will be and instance", "that missing coordinates are at the default location for that", "= element.get(\"name\") if name is None: raise DesignSpaceDocumentError(\"label element must", "= element.get(\"userdefault\") userMaximum = element.get(\"usermaximum\") if userMinimum is not None", "in axisObject.values) axisElement.attrib['default'] = self.intOrFloat(axisObject.default) if axisObject.hidden: axisElement.attrib['hidden'] = \"1\"", "more localisations? .. code:: python a1 = AxisDescriptor() a1.minimum =", "location fields, default axis values and mappings, and top-level location", "attribute or the nested location element') instanceObject.locationLabel = locationLabel instanceObject.userLocation", "\"\"\"Read a designspace file from ``path`` and populates the fields", "designLocation, userLocation = self.locationFromElement(instanceElement) locationLabel = instanceElement.attrib.get('location') if (designLocation or", "Indicates if the contents of the font.lib need to be", "if no mapping entry is found. Note: for discrete axes,", "full location. See: :meth:`getFullDesignLocation` :meth:`getFullUserLocation` .. versionadded:: 5.0 \"\"\" self.familyName", "Ensures that :attr:``designLocation`` and :attr:``userLocation`` are dictionaries (possibly empty if", "data in attributes. Data is added to the document by", "dict with normalized axis values.\"\"\" from fontTools.varLib.models import normalizeValue new", "assert self.path is not None for descriptor in self.sources +", "doc = DesignSpaceDocument.fromfile(\"some/path/to/my.designspace\") doc.formatVersion doc.elidedFallbackName doc.axes doc.locationLabels doc.rules doc.rulesProcessingLast doc.sources", "self.rules: List[RuleDescriptor] = [] \"\"\"List of this document's rules.\"\"\" self.rulesProcessingLast:", "'width': ('wdth', dict(en = 'Width')), 'optical': ('opsz', dict(en = 'Optical", "_attrs = ('name', 'userValue') def __init__(self, *, name, userValue): self.name:", "axisName, axisValue in locationObject.items(): if axisName in validatedLocation: # only", "are required to be ``unicode`` strings, even if they only", "self.obj is not None else \"\") class AsDictMixin(object): def asdict(self):", "\"\"\" # we load fonts with the same source.path only", "default writer will not use this attribute. It is up", "user-facing readable names for the axis. Keyed by xml:lang code.", "def newDefaultLocation(self): \"\"\"Return a dict with the default location in", "name=None, labelNames=None, minimum=None, default=None, maximum=None, hidden=False, map=None, axisOrdering=None, axisLabels=None, ):", "elif cd.get('maximum') is None: if cd['minimum'] > value: return False", "styleMapFamilyName, languageCode=\"en\"): self.localisedStyleMapFamilyName[languageCode] = tostr(styleMapFamilyName) def getStyleMapFamilyName(self, languageCode=\"en\"): return self.localisedStyleMapFamilyName.get(languageCode)", "for the axis data. Add more localisations? .. code:: python", "ruleDescriptor: RuleDescriptor): \"\"\"Add the given ``ruleDescriptor`` to :attr:`rules`.\"\"\" self.rules.append(ruleDescriptor) def", "for sub in ruleObject.subs: subElement = ET.Element('sub') subElement.attrib['name'] = sub[0]", "a condition set. rules = [] rulesElement = self.root.find(\".rules\") if", "= code localisedStyleMapFamilyNameElement.text = instanceObject.getStyleMapFamilyName(code) instanceElement.append(localisedStyleMapFamilyNameElement) if self.effectiveFormatTuple >= (5,", "``-math.inf``) \"\"\" self.userDefault: Optional[float] = userDefault \"\"\"New default value of", "``designLocation[axisName]``: the explicit design location along this axis, possibly anisotropic.", "-> str: \"\"\"Return the English name from :attr:`labelNames` or the", "anisotropy. .. versionadded:: 5.0 \"\"\" label = self.getLocationLabelDescriptor(doc) if label", "python a2 = DiscreteAxisDescriptor() a2.values = [0, 1] a2.name =", "None cdMax = conditionElement.attrib.get(\"maximum\") if cdMax is not None: cd['maximum']", "axisObject.hidden = True axisObject.tag = axisElement.attrib.get(\"tag\") for mapElement in axisElement.findall('map'):", "is None and cd.get('maximum') is None: raise DesignSpaceDocumentError( \"condition missing", "that do not interpolate. The main difference from a continuous", "self.documentObject = documentObject tree = ET.parse(self.path) self.root = tree.getroot() self.documentObject.formatVersion", "\"The axis-subset element for a discrete subset must have a", "in self.axes: if isinstance(axis, DiscreteAxisDescriptor): discreteAxes.append(axis) else: rangeAxisSubsets.append(RangeAxisSubsetDescriptor(name=axis.name)) valueCombinations =", "\"\"\"Name of the default master. This attribute is updated by", "given ``kwargs`` and add it to :attr:`variableFonts`. .. versionadded:: 5.0", "subs or [] \"\"\"list of substitutions. - Each substitution is", "self.root.findall('.instances/instance') for instanceElement in instanceElements: self._readSingleInstanceElement(instanceElement, makeGlyphs=makeGlyphs, makeKerning=makeKerning, makeInfo=makeInfo) def", "= instanceElement.attrib.get('familyname') if familyname is not None: instanceObject.familyName = familyname", "that are not compatible. The axis still allows to bind", "def __init__( self, *, tag=None, name=None, labelNames=None, minimum=None, default=None, maximum=None,", "the full design location. See :meth:`getFullDesignLocation()` .. versionadded:: 5.0 \"\"\"", "\"\"\" newNames = [] for rule in rules: if evaluateRule(rule,", "source in self.documentObject.sources ) or self.documentObject.variableFonts or any( instance.locationLabel or", "``locationLabel``: the location along this axis is the same as", "libElement = variableFontElement.find(\".lib\") if libElement is not None: lib =", "in a VF. .. code:: python a2 = DiscreteAxisDescriptor() a2.values", "self._makeLocationElement(data.get('instanceLocation')) glyphElement.append(locationElement) if glyphName is not None: glyphElement.attrib['name'] = glyphName", "*, tag=None, name=None, labelNames=None, values=None, default=None, hidden=False, map=None, axisOrdering=None, axisLabels=None,", "is not valid: %r, \" \"expected 'first' or 'last'\" %", "file is relative to the document. Can't guess why they're", "DesignSpace with discrete axes, define 1 variable font per value", "None: labelElement = ET.Element('label') labelElement.attrib['name'] = label.name if label.elidable: labelElement.attrib['elidable']", "in sourceElement.findall(\".glyph\"): glyphName = glyphElement.attrib.get('name') if glyphName is None: continue", "in the document**. The file may or may not exist.", "'%s'\" % encoding) writer = self.writerClass(f, self) writer.write(encoding=encoding, xml_declaration=xml_declaration) return", "This function updates the document's :attr:`default` value. .. versionchanged:: 5.0", "glyphsElement = ET.Element('glyphs') instanceElement.append(glyphsElement) glyphsElement = instanceElement.findall('.glyphs')[0] for glyphName, data", "conditions that are outside of a condition set. rules =", "Use this for axes that do not interpolate. The main", "swap = True break if swap: newNames.append(b) else: newNames.append(name) glyphNames", "and write designspace files \"\"\" __all__ = [ 'DesignSpaceDocumentError', 'DesignSpaceDocument',", "'lib'] filename = posixpath_property(\"_filename\") path = posixpath_property(\"_path\") def __init__( self,", "is not None: return doc.map_forward(label.userLocation) # type: ignore result: AnisotropicLocationDict", "attr), \"!=\", getattr(other, attr)) def __repr__(self): attrs = [f\"{a}={repr(getattr(self, a))},\"", "Indicates if the feature text needs to be copied to", "``fontTools.ttFont.TTFont``). The default document reader will not fill-in this attribute,", "Any] = lib or {} \"\"\"Custom data associated with this", "defined\") userLoc = {} designLoc = {} for dimensionElement in", "instanceObject.postScriptFontName is not None: instanceElement.attrib['postscriptfontname'] = instanceObject.postScriptFontName if instanceObject.styleMapFamilyName is", "dimName = dimensionElement.attrib.get(\"name\") if self._strictAxisNames and dimName not in self.axisDefaults:", "'print' is inappropriate here, and instead of # assert, it", "the full axis. (default = ``None``) \"\"\" self.userMaximum: float =", "('name', 'userValue') def __init__(self, *, name, userValue): self.name: str =", "maximum value as the full axis. (default = ``math.inf``) \"\"\"", "continue if self.path is not None: descriptor.filename = self._posixRelativePath(descriptor.path) if", "for masterElement in glyphElement.findall('.masters/master'): fontSourceName = masterElement.attrib.get('source') designLocation, userLocation =", "is None: self._addLocationElement( instanceElement, designLocation=instanceObject.designLocation, userLocation=instanceObject.userLocation ) else: # Pre-version", "\"\"\" self.copyLib = copyLib \"\"\"bool. Indicates if the contents of", "\"\"\" self.axisLabels: List[AxisLabelDescriptor] = axisLabels or [] \"\"\"STAT table entries", "is an instance from an anisotropic interpolation. </note> </glyph> \"\"\"", "locationLabel = self.locationLabelDescriptorClass( name=name, userLocation=userLocation, elidable=elidable, olderSibling=olderSibling, labelNames=labelNames, ) self.documentObject.locationLabels.append(locationLabel)", "the disk, this is the full path that was given", "def map_backward(self, value): \"\"\"Maps value from axis mapping's output to", "it is stored in the document sourceObject.name = sourceName familyName", "glyph set for the designspace, as fonts at each value", "the lib key ``com.github.fonttools.varLib.featureVarsFeatureTag`` .. code:: xml <lib> <dict> <key>com.github.fonttools.varLib.featureVarsFeatureTag</key>", "None: raise DesignSpaceDocumentError( \"Designspace source '%s' has no 'path' attribute\"", "or {} \"\"\"Location in user coordinates along each axis. If", "else: rangeAxisSubsets.append(RangeAxisSubsetDescriptor(name=axis.name)) valueCombinations = itertools.product(*[axis.values for axis in discreteAxes]) for", "versionadded:: 5.0 \"\"\" result: AnisotropicLocationDict = {} for axis in", "# we load fonts with the same source.path only once", "the interpolating font.info data for this source needs to be", "if \"values\" in kwargs: axis = self.writerClass.discreteAxisDescriptorClass(**kwargs) else: axis =", "A relative path to the source file, **as it is", "copyFeatures \"\"\"bool. Indicates if the feature text needs to be", "5.0 \"\"\" self.info = info \"\"\"bool. Indicated if this instance", "the same as design space, as in [(minimum, minimum), (maximum,", "\"\"\" self.postScriptFontName = postScriptFontName \"\"\"string. Postscript fontname for this instance.", "= self.readLocationElement(locationElement) break return elementLocation def readLocationElement(self, locationElement): \"\"\"Read a", "<https://docs.microsoft.com/en-us/typography/opentype/spec/stat#axis-value-tables>`_ .. versionadded:: 5.0 \"\"\" class AxisDescriptor(AbstractAxisDescriptor): \"\"\" Simple container", "self._addAxis(axisObject) if self.documentObject.locationLabels: labelsElement = ET.Element(\"labels\") for labelObject in self.documentObject.locationLabels:", "if self.effectiveFormatTuple < (5, 0): # Deprecated members as of", "userMinimum is not None and userDefault is not None and", "work also on Windows.\"\"\" new_path = posixpath.join(*path.split(os.path.sep)) if path.startswith('/'): #", "= styleName \"\"\"string. Style name of this instance. MutatorMath +", "# read the conditionsets for conditionSetElement in ruleElement.findall('.conditionset'): conditionSet =", "\"\"\" bool. Indicates if this instance needs its kerning calculated.", "location dimension \"{dimName}\"\" with yvalue=\"{yValue}\"') designLoc[dimName] = (xValue, yValue) elif", "labelNames=labelNames, ) self.documentObject.locationLabels.append(locationLabel) def readVariableFonts(self): if self.documentObject.formatTuple < (5, 0):", "See: `OTSpec STAT Axis value table, format 4 <https://docs.microsoft.com/en-us/typography/opentype/spec/stat#axis-value-table-format-4>`_ ..", "glyphsElement.append(glyphElement) if instanceObject.kerning: kerningElement = ET.Element('kerning') instanceElement.append(kerningElement) if instanceObject.info: infoElement", "= name \"\"\"string. Unique identifier name of the instance, used", "not None: sourceElement.attrib['filename'] = sourceObject.filename if sourceObject.name is not None:", "string. E.g. \"4.0\" \"\"\" self.elidedFallbackName: Optional[str] = None \"\"\"STAT Style", "r1.conditionSets.append([dict(...), dict(...)]) r1.subs.append((\"a\", \"a.alt\")) .. code:: xml <!-- optional: list", "variableFontElement.find(\".axis-subsets\") if axisSubsetsElement is None: raise DesignSpaceDocumentError(\"variable-font element must contain", "MutatorMath. \"\"\" self.font = font \"\"\"Same as :attr:`SourceDescriptor.font` .. seealso::", "target variable font. If not specified, assume the same minimum", "localised family name strings, keyed by language code. \"\"\" self.localisedStyleName", "or digits. \"\"\" # name of the axis used in", "the :class:`AxisDescriptor` or :class:`DiscreteAxisDescriptor` to \"snapshot\" or \"freeze\". \"\"\" self.userValue:", "__str__(self): return str(self.msg) + ( \": %r\" % self.obj if", "**kwargs): \"\"\"Instantiate a new :class:`VariableFontDescriptor` using the given ``kwargs`` and", "None: locationElement, m['location'] = self._makeLocationElement(m.get('location')) masterElement.append(locationElement) mastersElement.append(masterElement) glyphElement.append(mastersElement) return glyphElement", "labels. .. versionadded:: 5.0\"\"\" self.rules: List[RuleDescriptor] = [] \"\"\"List of", "we know validatedLocation[axisName] = axisValue for dimensionName, dimensionValue in validatedLocation.items():", "at each value can have different glyph sets. \"\"\" self.values:", "language code. \"\"\" self.glyphs = glyphs or {} \"\"\"dict for", "given ``userLocation``, or ``None`` if no such label exists. ..", "e.g. (\"a\", \"a.alt\"). - Note: By default, rules are applied", "not data: return libElement = ET.Element('lib') libElement.append(plistlib.totree(data, indent_level=indent_level)) parentElement.append(libElement) def", "element must have min/max/default values or none at all.\" )", "'1': sourceObject.copyLib = True for groupsElement in sourceElement.findall('.groups'): if groupsElement.attrib.get('copy')", "add it to ``doc.sources``. \"\"\" source = self.writerClass.sourceDescriptorClass(**kwargs) self.addSource(source) return", "set(element.attrib) - xml_attrs if unknown_attrs: raise DesignSpaceDocumentError(f\"label element contains unknown", "axes and freeze other axes at a given location. ..", "kerning data from this source needs to be muted (i.e.", "mapping's input to output. Returns value unchanged if no mapping", "we load fonts with the same source.path only once loaded", "Points to a representation of this source font that is", "DiscreteAxisDescriptor axisLabelDescriptorClass = AxisLabelDescriptor locationLabelDescriptorClass = LocationLabelDescriptor ruleDescriptorClass = RuleDescriptor", "= float(cdMin) else: # will allow these to be None,", "if sourceDescriptor.getFullDesignLocation(self) == defaultDesignLocation: self.default = sourceDescriptor return sourceDescriptor return", "userLocation is not None and axis.name in userLocation: dimElement =", "self.writerClass.getAxisDecriptor() def newSourceDescriptor(self): \"\"\"Ask the writer class to make us", "if userMinimum is not None and userDefault is not None", "userValue=0, elidable=True) ] doc.addAxis(a2) .. versionadded:: 5.0 \"\"\" flavor =", "def evaluateRule(rule, location): \"\"\"Return True if any of the rule's", "format of the Axis value depends on which field are", "\"\"\"Subset of a continuous axis to include in a variable", "\"true\" if label.olderSibling: labelElement.attrib['oldersibling'] = \"true\" self._addLabelNames(labelElement, label.labelNames) self._addLocationElement(labelElement, userLocation=label.userLocation)", "# we also need to read any conditions that are", "all location-related fields. Ensures that :attr:``designLocation`` and :attr:``userLocation`` are dictionaries", "them to the document. This makes it easy to integrate", "def newAxisDescriptor(self): \"\"\"Ask the writer class to make us a", "float(userValue) except ValueError: self.log.warning(\"ValueError in readLocation userValue %3.3f\", userValue) try:", "the file somewhere. \"\"\" self.formatVersion: Optional[str] = None \"\"\"Format version", "def getFullUserLocation(self, doc: 'DesignSpaceDocument') -> SimpleLocationDict: \"\"\"Get the complete user", "-- action: calculate the relative path for filename. We're not", "know where the file is relative to the document. Can't", "fields, default axis values and mappings, and top-level location labels.", "code:: python instance.clearLocation() instance.designLocation = {'Weight': (34, 36.5), 'Width': 100}", "tags must begin with an uppercase letter and use only", "v): \"\"\"Maps value from axis mapping's input (user) to output", "5.0 \"\"\" self.localisedFamilyName[languageCode] = tostr(familyName) def getFamilyName(self, languageCode=\"en\"): \"\"\"Getter for", "= '1' sourceElement.append(kerningElement) if sourceObject.mutedGlyphNames: for name in sourceObject.mutedGlyphNames: glyphElement", "= ET.Element(\"masters\") for m in data.get('masters'): masterElement = ET.Element(\"master\") if", "self.elidable: bool = elidable \"\"\"STAT flag ``ELIDABLE_AXIS_VALUE_NAME``. See: `OTSpec STAT", "axis: \\\"%s\\\".\", dimName) continue userValue = xValue = yValue =", "= dimensionElement.attrib.get('yvalue') if yValue is not None: yValue = float(yValue)", "This may be only part of the full location. See:", "import annotations import collections import copy import itertools import math", "def asdict(self): d = {} for attr, value in self.__dict__.items():", "with the same path are only loaded once and shared", "if self.documentObject.axes or self.documentObject.elidedFallbackName is not None: axesElement = ET.Element(\"axes\")", "\"master.ufo1\" s1.font = defcon.Font(\"master.ufo1\") s1.location = dict(weight=0) s1.familyName = \"MasterFamilyName\"", "sufficiently recent version to be able to encode what the", "Can't guess why they're different, we just choose for path", "Variation Alternates OpenType feature <https://docs.microsoft.com/en-us/typography/opentype/spec/features_pt#-tag-rvrn>`_. See ref:`rules-element` § Attributes. \"\"\"", "as of version 5.0 if instanceObject.glyphs: if instanceElement.findall('.glyphs') == []:", "DesignSpaceDocument object can read and write ``.designspace`` data. It imports", "Optional[LocationLabelDescriptor]: \"\"\"Get the :class:`LocationLabelDescriptor` instance that matches this instances's :attr:`locationLabel`.", "olderSibling = True if labelElement.get(\"oldersibling\") == \"true\" else False labelNames", "in readLocation yValue %3.3f\", yValue) if userValue is None ==", "self.normalizeLocation({cond['name']: cond['maximum']}).get(cond['name']) else: maximum = None newConditions.append(dict(name=cond['name'], minimum=minimum, maximum=maximum)) newConditionSets.append(newConditions)", "f\"{self.__class__.__name__}(\\n{attrs}\\n)\" class SourceDescriptor(SimpleDescriptor): \"\"\"Simple container for data related to the", "is None: if value > cd['maximum']: return False elif cd.get('maximum')", "{} for dimensionElement in locationElement.findall(\".dimension\"): dimName = dimensionElement.attrib.get(\"name\") if self._strictAxisNames", "raise DesignSpaceDocumentError(f\"variable-font element contains unknown attributes: {', '.join(unknown_attrs)}\") name =", "ET.parse(self.path) self.root = tree.getroot() self.documentObject.formatVersion = self.root.attrib.get(\"format\", \"3.0\") self._axes =", "here an indication of a possible \"good\" filename, in case", "of this instance. MutatorMath + Varlib. \"\"\" self.styleName = styleName", "do not add the rule. ruleElement = ET.Element('rule') if ruleObject.name", "= [] \"\"\"List of this document's sources.\"\"\" self.variableFonts: List[VariableFontDescriptor] =", "None else \"\") class AsDictMixin(object): def asdict(self): d = {}", "value) return property(getter, setter) class DesignSpaceDocumentError(Exception): def __init__(self, msg, obj=None):", "doc = DesignSpaceDocument() s1 = SourceDescriptor() s1.path = masterPath1 s1.name", "userValue = xValue = yValue = None try: userValue =", "tag for this axis self.tag = tag \"\"\"string. Four letter", "will not fill-in this attribute, and the default writer will", "if the kerning data from this source needs to be", "given ``kwargs`` and add it to :attr:`instances`. \"\"\" instance =", "None: ruleElement.attrib['name'] = ruleObject.name for conditions in ruleObject.conditionSets: conditionsetElement =", "STAT Axis value to use to encode this label. ===========", "-1 - 0 - 1 value. - we need the", "(userMinimum, userDefault, userMaximum)): return self.rangeAxisSubsetDescriptorClass(name=name) raise DesignSpaceDocumentError( \"axis-subset element must", "are usually all lowercase. .. code:: python from fontTools.designspaceLib import", "'BaseDocReader', 'BaseDocWriter' ] # ElementTree allows to find namespace-prefixed elements,", "is not None: if \"ordering\" in labelElement.attrib: axisObject.axisOrdering = int(labelElement.attrib[\"ordering\"])", "conditionElement.attrib.get(\"maximum\") if cdMax is not None: cd['maximum'] = float(cdMax) else:", "_addLib(self, parentElement: ET.Element, data: Any, indent_level: int) -> None: if", "Roman and Italic, that are not compatible. The axis still", "a discrete axis has a list of ``values``. Example: an", "would be non-deterministic. # https://github.com/LettError/designSpaceDocument/issues/10 loc = collections.OrderedDict() for axisDescriptor", "= InstanceDescriptor def __init__(self, documentPath, documentObject): self.path = documentPath self.documentObject", "self.valueAxisSubsetDescriptorClass(name=name, userValue=userValue) else: xml_attrs = {'name', 'userminimum', 'userdefault', 'usermaximum'} unknown_attrs", "for libElement in self.root.findall(\".lib\"): self.documentObject.lib = plistlib.fromtree(libElement[0]) class DesignSpaceDocument(LogMixin, AsDictMixin):", "the axis. Keyed by xml:lang code. Values are required to", "axisSubsets.append(self.readAxisSubset(axisSubset)) lib = None libElement = variableFontElement.find(\".lib\") if libElement is", "is not None: labelElement.attrib['userminimum'] = self.intOrFloat(label.userMinimum) if label.userMaximum is not", "for this instance's location is determined for each axis independently", "} return self.axisLabelDescriptorClass( name=name, userValue=value, userMinimum=minimum, userMaximum=maximum, elidable=elidable, olderSibling=olderSibling, linkedUserValue=linkedValue,", "encoding=None): \"\"\"Returns the designspace as a string. Default encoding ``utf-8``.\"\"\"", "the user of ``designspaceLib`` to either load the resource identified", "include some axes and freeze other axes at a given", "= localisedStyleMapStyleName or {} \"\"\"A dictionary of localised style map", "in design space coordinates. MutatorMath + Varlib. .. deprecated:: 5.0", "instances.\"\"\" self.lib: Dict = {} \"\"\"User defined, custom data associated", "\"\"\"Container for variable fonts, sub-spaces of the Designspace. Use-cases: -", "is None or encoding == \"utf-8\": f = BytesIO() encoding", "= \"InstanceFamilyName\" i2.styleName = \"InstanceStyleName\" i2.name = \"instance.ufo2\" # anisotropic", "minimum axis.maximum = maximum axis.default = default # now the", ".. versionadded:: 5.0 \"\"\" flavor = \"label\" _attrs = ('name',", "dimensionName if type(dimensionValue) == tuple: dimElement.attrib['xvalue'] = self.intOrFloat(dimensionValue[0]) dimElement.attrib['yvalue'] =", "def getter(self): # Normal getter return getattr(self, private_name) def setter(self,", "raise DesignSpaceDocumentError(f\"axis-subset element contains unknown attributes: {', '.join(unknown_attrs)}\") name =", "_posixRelativePath(self, otherPath): relative = os.path.relpath(otherPath, os.path.dirname(self.path)) return posix(relative) def updatePaths(self):", "self.linkedUserValue: Optional[float] = linkedUserValue \"\"\"STAT field ``linkedValue`` (format 3).\"\"\" self.labelNames:", "is relative to the document. Can't guess why they're different,", "is found. Note: for discrete axes, each value must have", ") if externalConditions: ruleObject.conditionSets.append(externalConditions) self.log.info( \"Found stray rule conditions outside", "code. \"\"\" self.localisedStyleMapStyleName = localisedStyleMapStyleName or {} \"\"\"A dictionary of", "is None: self.designLocation = {} if axisName in self.designLocation: del", "self.variableFonts.append(variableFontDescriptor) def addVariableFontDescriptor(self, **kwargs): \"\"\"Instantiate a new :class:`VariableFontDescriptor` using the", "the default master. This attribute is updated by the :meth:`findDefault`", "location(self, location: Optional[AnisotropicLocationDict]): self.designLocation = location or {} def setStyleName(self,", "axes, sources, variable fonts and instances to very basic **descriptor**", "STAT data for a free-floating location (format 4). All values", "axis will be and instance of :class:`DiscreteAxisDescriptor` if the ``kwargs``", "for this instance. MutatorMath + Varlib. \"\"\" self.localisedFamilyName = localisedFamilyName", "label_name in labelElement.findall(\"labelname\") for attr, lang in label_name.items() if attr", "we know where the file is relative to the document.", "optional: list of substitution rules --> <rules> <rule name=\"vertical.bars\"> <conditionset>", "self.writerClass.sourceDescriptorClass(**kwargs) self.addSource(source) return source def addInstance(self, instanceDescriptor: InstanceDescriptor): \"\"\"Add the", "all the conditions matches the given location. - If a", "MutatorMath. .. deprecated:: 5.0 \"\"\" self.muteKerning = muteKerning \"\"\"bool. Indicates", "``value``, or a :class:`AxisDescriptor` otherwise. \"\"\" if \"values\" in kwargs:", "= name \"\"\"string. Name of the axis as it is", "\"xml:lang\" attribute name as # '{http://www.w3.org/XML/1998/namespace}lang' } return self.axisLabelDescriptorClass( name=name,", "self.writerClass.getInstanceDescriptor() def getAxisOrder(self): \"\"\"Return a list of axis names, in", "the various location fields, default axis values and mappings, and", ".. versionadded:: 5.0 \"\"\" if self.variableFonts: return self.variableFonts variableFonts =", "a name attribute.\") filename = variableFontElement.get(\"filename\") axisSubsetsElement = variableFontElement.find(\".axis-subsets\") if", "values. .. versionadded:: 5.0 \"\"\" return { axis.name: self.userLocation.get(axis.name, axis.default)", "instanceObject.getFamilyName(code) instanceElement.append(localisedFamilyNameElement) if instanceObject.localisedStyleMapStyleName: languageCodes = list(instanceObject.localisedStyleMapStyleName.keys()) languageCodes.sort() for code", "it right here. Varlib. \"\"\" self.localisedFamilyName = localisedFamilyName or {}", "label.name if label.elidable: labelElement.attrib['elidable'] = \"true\" if label.olderSibling: labelElement.attrib['oldersibling'] =", "map=map, axisOrdering=axisOrdering, axisLabels=axisLabels, ) self.minimum = minimum \"\"\"number. The minimum", "m.get('glyphName') is not None: masterElement.attrib['glyphname'] = m.get('glyphName') if m.get('font') is", "for a in self._attrs] attrs = indent('\\n'.join(attrs), ' ') return", "self.root.findall(\".axes/axis\") if not axisElements: return for axisElement in axisElements: if", "= \"MasterStyleNameOne\" s1.localisedFamilyName = dict(fr=\"Caractère\") s1.mutedGlyphNames.append(\"A\") s1.mutedGlyphNames.append(\"Z\") doc.addSource(s1) \"\"\" flavor", "using the given ``kwargs`` and add it to :attr:`axes`. The", "variable font that covers the whole space. In version 5", "**kwargs): \"\"\"Instantiate a new :class:`RuleDescriptor` using the given ``kwargs`` and", "2).\"\"\" self.userMaximum: Optional[float] = userMaximum \"\"\"STAT field ``rangeMaxValue`` (format 2).\"\"\"", "v == value), value) class AxisLabelDescriptor(SimpleDescriptor): \"\"\"Container for axis label", "lists of dict(name='aaaa', minimum=0, maximum=1000) self.conditionSets = conditionSets or []", "sourceObject.styleName = styleName for familyNameElement in sourceElement.findall('familyname'): for key, lang", ".. code:: python doc = DesignSpaceDocument() s1 = SourceDescriptor() s1.path", "characters. \"\"\" self.hidden = hidden \"\"\"bool. Whether this axis should", "ruleElement.findall('.conditionset'): conditionSet = self._readConditionElements( conditionSetElement, ruleName, ) if conditionSet is", "mute == \"1\": glyphData['mute'] = True # unicode unicodes =", "is not None: self.readerClass = readerClass else: self.readerClass = BaseDocReader", "same order as defined in the document.\"\"\" names = []", "with normalized axis values.\"\"\" from fontTools.varLib.models import normalizeValue new =", "setattr(self, private_name, value) return property(getter, setter) class DesignSpaceDocumentError(Exception): def __init__(self,", "is not None: glyphElement.attrib['unicode'] = \" \".join([hex(u) for u in", "loaded from the path. **kwargs: extra options passed on to", "dimension it seems continue value = location[axis.name] # 'anisotropic' location,", "if sourceName is None: # add a temporary source name", "= instanceObject.getStyleName(code) instanceElement.append(localisedStyleNameElement) if instanceObject.localisedFamilyName: languageCodes = list(instanceObject.localisedFamilyName.keys()) languageCodes.sort() for", "and return a new instance of :class:. \"\"\" self =", "interpolating font.info data for this source needs to be muted.", "copyGroups=False, copyFeatures=False, muteKerning=False, muteInfo=False, mutedGlyphNames=None, ): self.filename = filename \"\"\"string.", "and make ```filename`` point to that. \"\"\" self.name = name", "by the :meth:`findDefault` \"\"\" if readerClass is not None: self.readerClass", "self.designLocation = {} if axisName in self.designLocation: del self.designLocation[axisName] if", "if instanceObject.styleMapFamilyName is not None: instanceElement.attrib['stylemapfamilyname'] = instanceObject.styleMapFamilyName if instanceObject.styleMapStyleName", "# 'anisotropic' location, take first coord only if isinstance(value, tuple):", "= None libElement = variableFontElement.find(\".lib\") if libElement is not None:", "the document is read from the disk, this is its", "if instanceObject.localisedFamilyName: languageCodes = list(instanceObject.localisedFamilyName.keys()) languageCodes.sort() for code in languageCodes:", "as preserved below. locationElement, sourceObject.location = self._makeLocationElement(sourceObject.location) sourceElement.append(locationElement) self.root.findall('.sources')[0].append(sourceElement) def", "user coordinates along each axis. If an axis is not", "input (user) to output (design).\"\"\" from fontTools.varLib.models import piecewiseLinearMap if", "None and 'elidedfallbackname' in axesElement.attrib: self.documentObject.elidedFallbackName = axesElement.attrib['elidedfallbackname'] axisElements =", "5.0 \"\"\" return { axis.name: ( axis.map_backward(designLocation[axis.name]) if axis.name in", "the given location. - If a condition has no minimum,", "- ``locationLabel``: the location along this axis is the same", "self.documentObject.lib: self._addLib(self.root, self.documentObject.lib, 2) tree = ET.ElementTree(self.root) tree.write( self.path, encoding=encoding,", "self.localisedFamilyName[languageCode] = tostr(familyName) def getFamilyName(self, languageCode=\"en\"): \"\"\"Getter for :attr:`localisedFamilyName` ..", "userLocation, elidable=False, olderSibling=False, labelNames=None, ): self.name: str = name \"\"\"Label", "- rule order matters \"\"\" newNames = [] for rule", "[] \"\"\"STAT table entries for Axis Value Tables format 1,", "and instances to very basic **descriptor** objects that store the", "None \"\"\"STAT Style Attributes Header field ``elidedFallbackNameID``. See: `OTSpec STAT", "sourceElement in enumerate(self.root.findall(\".sources/source\")): filename = sourceElement.attrib.get('filename') if filename is not", "\"\"\"STAT flag ``ELIDABLE_AXIS_VALUE_NAME``. See: `OTSpec STAT Flags <https://docs.microsoft.com/en-us/typography/opentype/spec/stat#flags>`_ \"\"\" self.olderSibling:", "instanceObject.userLocation = userLocation or {} instanceObject.designLocation = designLocation or {}", "instance.\"\"\" instanceObject.lib = plistlib.fromtree(libElement[0]) def readInfoElement(self, infoElement, instanceObject): \"\"\" Read", "The file may or may not exist. MutatorMath + VarLib.", "is not mentioned, it is assumed to be at its", "or maximum values, do not add the rule. ruleElement =", "only have design locations (using xvalue=\"\").') if designLocation is not", "for label in self.locationLabels: if label.name == name: return label", "(5, 0): if instanceObject.locationLabel is None: self._addLocationElement( instanceElement, designLocation=instanceObject.designLocation, userLocation=instanceObject.userLocation", "axis to use in a variable font. .. versionadded:: 5.0", "\"\" for label_name in element.findall(\"labelname\") for attr, lang in label_name.items()", "the axis in the target variable font. If not specified,", "xml_attrs if unknown_attrs: raise DesignSpaceDocumentError(f\"Label element contains unknown attributes: {',", "sourceObject.muteInfo = True for featuresElement in sourceElement.findall(\".features\"): if featuresElement.attrib.get('copy') ==", "on to the opener function. Returns: List of font objects", "space. In version 5 and above documents, there can be", "\"\"\" __all__ = [ 'DesignSpaceDocumentError', 'DesignSpaceDocument', 'SourceDescriptor', 'InstanceDescriptor', 'AxisDescriptor', 'RuleDescriptor',", "label. Keyed by xml:lang code. \"\"\" @property def defaultName(self) ->", "66.0), (1000.0, 990.0)] a1.axisOrdering = 1 a1.axisLabels = [ AxisLabelDescriptor(name=\"Regular\",", "</rule> </rules> \"\"\" _attrs = ['name', 'conditionSets', 'subs'] # what", "SimpleLocationDict) -> SimpleLocationDict: \"\"\"Map a user location to a design", "return SourceDescriptor at the default location or None. The default", "if len(name) < 4: tag = name + \"*\" *", "as a :class:`ValueAxisSubsetDescriptor`). \"\"\" self.lib: MutableMapping[str, Any] = lib or", "\"\"\" return { axis.name: self.userLocation.get(axis.name, axis.default) for axis in doc.axes", "filename=None, path=None, font=None, name=None, location=None, locationLabel=None, designLocation=None, userLocation=None, familyName=None, styleName=None,", "be efficient to have it right here. Varlib. \"\"\" self.localisedFamilyName", "style map familyname strings, keyed by language code. \"\"\" self.localisedStyleMapStyleName", "def __init__( self, *, tag=None, name=None, labelNames=None, hidden=False, map=None, axisOrdering=None,", "xvalue=\"\" must be provided for location dimension \"{dimName}\"') if yValue", "this for axes that do not interpolate. The main difference", "is empty. VarLib. \"\"\" self.filename: str = filename \"\"\"string, optional.", "``xml:lang`` code. \"\"\" def getFormat(self) -> int: \"\"\"Determine which format", "elif encoding is None or encoding == \"utf-8\": f =", "ruleName = ruleObject.name = ruleElement.attrib.get(\"name\") # read any stray conditions", "``self`` with the data. \"\"\" if hasattr(path, \"__fspath__\"): # support", "format 4 <https://docs.microsoft.com/en-us/typography/opentype/spec/stat#axis-value-table-format-4>`_ .. versionadded:: 5.0 \"\"\" flavor = \"label\"", "axis in self.axes: if axis.name not in location: # skipping" ]
[ "any). X_pending: A tensor containing points whose evaluation is pending", "= get_infeasible_cost(X=X_observed, model=model, objective=obj_tf) objective = ConstrainedMCObjective( objective=obj_tf, constraints=con_tfs or", "-> AcquisitionFunction: r\"\"\"Instantiates a PosteriorMean acquisition function. Note: If no", "get_infeasible_cost from botorch.models.model import Model from botorch.utils import ( get_objective_weights_transform,", "under the MIT license found in the # LICENSE file", "evaluation) present for all objective outcomes and outcomes that appear", "None, X_pending: Optional[Tensor] = None, **kwargs: Any, ) -> AcquisitionFunction:", "True}} or an optimizer that does not assume pending point", "LICENSE file in the root directory of this source tree.", "Optional[Tensor] = None, X_pending: Optional[Tensor] = None, **kwargs: Any, )", "b). For k outcome constraints and m outputs at f(x),", "1)), ) else: obj_tf = get_objective_weights_transform(objective_weights) if outcome_constraints is None:", "this source tree. from typing import Any, Optional, Tuple import", "and m outputs at f(x), A is (k x m)", "botorch.acquisition.acquisition import AcquisitionFunction from botorch.acquisition.monte_carlo import qSimpleRegret from botorch.acquisition.objective import", "evaluation is pending (i.e. that have been submitted for evaluation)", "analytic posterior, to handle arbitrary objective fns. acq_func = qSimpleRegret(model,", "Optional[Tensor] = None, **kwargs: Any, ) -> AcquisitionFunction: r\"\"\"Instantiates a", "assume pending point support. Args: objective_weights: The objective is to", "the MIT license found in the # LICENSE file in", "<= b. (Not used by single task models) X_observed: A", "k outcome constraints and m outputs at f(x), A is", "(if there are any). Returns: PosteriorMean: The instantiated acquisition function.", "botorch.utils.multi_objective.scalarization import get_chebyshev_scalarization from botorch.utils.transforms import squeeze_last_dim from torch import", "objective outcomes and outcomes that appear in the outcome constraints", "None, **kwargs: Any, ) -> AcquisitionFunction: r\"\"\"Instantiates a PosteriorMean acquisition", "license found in the # LICENSE file in the root", "= get_outcome_constraint_transforms(outcome_constraints) inf_cost = get_infeasible_cost(X=X_observed, model=model, objective=obj_tf) objective = ConstrainedMCObjective(", "and outcomes that appear in the outcome constraints (if there", "requires {optimizer_kwargs: {joint_optimization: True}} or an optimizer that does not", "an optimizer that does not assume pending point support. Args:", "Model from botorch.utils import ( get_objective_weights_transform, get_outcome_constraint_transforms, ) from botorch.utils.multi_objective.scalarization", "get_outcome_constraint_transforms(outcome_constraints) inf_cost = get_infeasible_cost(X=X_observed, model=model, objective=obj_tf) objective = ConstrainedMCObjective( objective=obj_tf,", "for all objective outcomes and outcomes that appear in the", "appear in the outcome constraints (if there are any). Returns:", "from botorch.acquisition.acquisition import AcquisitionFunction from botorch.acquisition.monte_carlo import qSimpleRegret from botorch.acquisition.objective", "qSimpleRegret from botorch.acquisition.objective import ConstrainedMCObjective, GenericMCObjective from botorch.acquisition.utils import get_infeasible_cost", "of this source tree. from typing import Any, Optional, Tuple", "from botorch.utils.transforms import squeeze_last_dim from torch import Tensor def get_PosteriorMean(", "pending (i.e. that have been submitted for evaluation) present for", "objective is to maximize a weighted sum of the columns", "GenericMCObjective(objective=obj_tf) else: con_tfs = get_outcome_constraint_transforms(outcome_constraints) inf_cost = get_infeasible_cost(X=X_observed, model=model, objective=obj_tf)", "= None, X_observed: Optional[Tensor] = None, X_pending: Optional[Tensor] = None,", "x 1) such that A f(x) <= b. (Not used", "Any, ) -> AcquisitionFunction: r\"\"\"Instantiates a PosteriorMean acquisition function. Note:", "else: obj_tf = get_objective_weights_transform(objective_weights) if outcome_constraints is None: objective =", "(A, b). For k outcome constraints and m outputs at", "in the root directory of this source tree. from typing", "whose evaluation is pending (i.e. that have been submitted for", "are any). X_pending: A tensor containing points whose evaluation is", "botorch.utils.transforms import squeeze_last_dim from torch import Tensor def get_PosteriorMean( model:", "columns of f(x). These are the weights. outcome_constraints: A tuple", "None: objective = GenericMCObjective(objective=obj_tf) else: con_tfs = get_outcome_constraint_transforms(outcome_constraints) inf_cost =", "is pending (i.e. that have been submitted for evaluation) present", "from botorch.utils import ( get_objective_weights_transform, get_outcome_constraint_transforms, ) from botorch.utils.multi_objective.scalarization import", "objective = ConstrainedMCObjective( objective=obj_tf, constraints=con_tfs or [], infeasible_cost=inf_cost ) #", "import get_chebyshev_scalarization from botorch.utils.transforms import squeeze_last_dim from torch import Tensor", "points observed for all objective outcomes and outcomes that appear", "Returns: PosteriorMean: The instantiated acquisition function. \"\"\" if X_observed is", "X_observed: A tensor containing points observed for all objective outcomes", "containing points observed for all objective outcomes and outcomes that", "inf_cost = get_infeasible_cost(X=X_observed, model=model, objective=obj_tf) objective = ConstrainedMCObjective( objective=obj_tf, constraints=con_tfs", "from botorch.acquisition.monte_carlo import qSimpleRegret from botorch.acquisition.objective import ConstrainedMCObjective, GenericMCObjective from", "import get_infeasible_cost from botorch.models.model import Model from botorch.utils import (", "(k x m) and b is (k x 1) such", "affiliates. # # This source code is licensed under the", "submitted for evaluation) present for all objective outcomes and outcomes", "m outputs at f(x), A is (k x m) and", "posterior, to handle arbitrary objective fns. acq_func = qSimpleRegret(model, objective=objective)", "constraints (if there are any). X_pending: A tensor containing points", "are no feasible observed points.\") # construct Objective module if", "models) X_observed: A tensor containing points observed for all objective", "import Model from botorch.utils import ( get_objective_weights_transform, get_outcome_constraint_transforms, ) from", "any). Returns: PosteriorMean: The instantiated acquisition function. \"\"\" if X_observed", "is (k x m) and b is (k x 1)", "weights. outcome_constraints: A tuple of (A, b). For k outcome", "are the weights. outcome_constraints: A tuple of (A, b). For", "con_tfs = get_outcome_constraint_transforms(outcome_constraints) inf_cost = get_infeasible_cost(X=X_observed, model=model, objective=obj_tf) objective =", "root directory of this source tree. from typing import Any,", "constraints (if there are any). Returns: PosteriorMean: The instantiated acquisition", "and its affiliates. # # This source code is licensed", "tensor containing points whose evaluation is pending (i.e. that have", "r\"\"\"Instantiates a PosteriorMean acquisition function. Note: If no OutcomeConstraints given,", "outcome_constraints is None: objective = GenericMCObjective(objective=obj_tf) else: con_tfs = get_outcome_constraint_transforms(outcome_constraints)", "import AcquisitionFunction from botorch.acquisition.monte_carlo import qSimpleRegret from botorch.acquisition.objective import ConstrainedMCObjective,", "file in the root directory of this source tree. from", "Copyright (c) Facebook, Inc. and its affiliates. # # This", "the root directory of this source tree. from typing import", ") -> AcquisitionFunction: r\"\"\"Instantiates a PosteriorMean acquisition function. Note: If", "{joint_optimization: True}} or an optimizer that does not assume pending", "is None: objective = GenericMCObjective(objective=obj_tf) else: con_tfs = get_outcome_constraint_transforms(outcome_constraints) inf_cost", ") # Use qSimpleRegret, not analytic posterior, to handle arbitrary", "None, X_observed: Optional[Tensor] = None, X_pending: Optional[Tensor] = None, **kwargs:", "the outcome constraints (if there are any). X_pending: A tensor", "else: con_tfs = get_outcome_constraint_transforms(outcome_constraints) inf_cost = get_infeasible_cost(X=X_observed, model=model, objective=obj_tf) objective", "The instantiated acquisition function. \"\"\" if X_observed is None: raise", "# LICENSE file in the root directory of this source", "raise ValueError(\"There are no feasible observed points.\") # construct Objective", "found in the # LICENSE file in the root directory", "and b is (k x 1) such that A f(x)", "obj_tf = get_objective_weights_transform(objective_weights) if outcome_constraints is None: objective = GenericMCObjective(objective=obj_tf)", "to maximize a weighted sum of the columns of f(x).", "from botorch.acquisition.utils import get_infeasible_cost from botorch.models.model import Model from botorch.utils", "source tree. from typing import Any, Optional, Tuple import torch", "of f(x). These are the weights. outcome_constraints: A tuple of", "outcome_constraints: A tuple of (A, b). For k outcome constraints", "For k outcome constraints and m outputs at f(x), A", "module if kwargs.get(\"chebyshev_scalarization\", False): obj_tf = get_chebyshev_scalarization( weights=objective_weights, Y=squeeze_last_dim(torch.stack(kwargs.get(\"Ys\")).transpose(0, 1)),", "is (k x 1) such that A f(x) <= b.", "These are the weights. outcome_constraints: A tuple of (A, b).", "(Not used by single task models) X_observed: A tensor containing", "f(x) <= b. (Not used by single task models) X_observed:", "outputs at f(x), A is (k x m) and b", "return an analytic acquisition function. This requires {optimizer_kwargs: {joint_optimization: True}}", "no OutcomeConstraints given, return an analytic acquisition function. This requires", "that appear in the outcome constraints (if there are any).", "b. (Not used by single task models) X_observed: A tensor", "This requires {optimizer_kwargs: {joint_optimization: True}} or an optimizer that does", "such that A f(x) <= b. (Not used by single", "False): obj_tf = get_chebyshev_scalarization( weights=objective_weights, Y=squeeze_last_dim(torch.stack(kwargs.get(\"Ys\")).transpose(0, 1)), ) else: obj_tf", "feasible observed points.\") # construct Objective module if kwargs.get(\"chebyshev_scalarization\", False):", "directory of this source tree. from typing import Any, Optional,", "(c) Facebook, Inc. and its affiliates. # # This source", "# Use qSimpleRegret, not analytic posterior, to handle arbitrary objective", ") else: obj_tf = get_objective_weights_transform(objective_weights) if outcome_constraints is None: objective", "if outcome_constraints is None: objective = GenericMCObjective(objective=obj_tf) else: con_tfs =", "(i.e. that have been submitted for evaluation) present for all", "from botorch.models.model import Model from botorch.utils import ( get_objective_weights_transform, get_outcome_constraint_transforms,", "m) and b is (k x 1) such that A", "ConstrainedMCObjective( objective=obj_tf, constraints=con_tfs or [], infeasible_cost=inf_cost ) # Use qSimpleRegret,", "f(x), A is (k x m) and b is (k", "get_objective_weights_transform(objective_weights) if outcome_constraints is None: objective = GenericMCObjective(objective=obj_tf) else: con_tfs", "get_objective_weights_transform, get_outcome_constraint_transforms, ) from botorch.utils.multi_objective.scalarization import get_chebyshev_scalarization from botorch.utils.transforms import", "A tensor containing points whose evaluation is pending (i.e. that", "is None: raise ValueError(\"There are no feasible observed points.\") #", "This source code is licensed under the MIT license found", "obj_tf = get_chebyshev_scalarization( weights=objective_weights, Y=squeeze_last_dim(torch.stack(kwargs.get(\"Ys\")).transpose(0, 1)), ) else: obj_tf =", "not assume pending point support. Args: objective_weights: The objective is", "tuple of (A, b). For k outcome constraints and m", "weighted sum of the columns of f(x). These are the", "the weights. outcome_constraints: A tuple of (A, b). For k", "from torch import Tensor def get_PosteriorMean( model: Model, objective_weights: Tensor,", "that have been submitted for evaluation) present for all objective", "optimizer that does not assume pending point support. Args: objective_weights:", "given, return an analytic acquisition function. This requires {optimizer_kwargs: {joint_optimization:", "does not assume pending point support. Args: objective_weights: The objective", "Y=squeeze_last_dim(torch.stack(kwargs.get(\"Ys\")).transpose(0, 1)), ) else: obj_tf = get_objective_weights_transform(objective_weights) if outcome_constraints is", "licensed under the MIT license found in the # LICENSE", "function. This requires {optimizer_kwargs: {joint_optimization: True}} or an optimizer that", "x m) and b is (k x 1) such that", "botorch.utils import ( get_objective_weights_transform, get_outcome_constraint_transforms, ) from botorch.utils.multi_objective.scalarization import get_chebyshev_scalarization", "Tensor def get_PosteriorMean( model: Model, objective_weights: Tensor, outcome_constraints: Optional[Tuple[Tensor, Tensor]]", "ValueError(\"There are no feasible observed points.\") # construct Objective module", "if X_observed is None: raise ValueError(\"There are no feasible observed", "containing points whose evaluation is pending (i.e. that have been", "MIT license found in the # LICENSE file in the", "get_chebyshev_scalarization( weights=objective_weights, Y=squeeze_last_dim(torch.stack(kwargs.get(\"Ys\")).transpose(0, 1)), ) else: obj_tf = get_objective_weights_transform(objective_weights) if", "Model, objective_weights: Tensor, outcome_constraints: Optional[Tuple[Tensor, Tensor]] = None, X_observed: Optional[Tensor]", "import Tensor def get_PosteriorMean( model: Model, objective_weights: Tensor, outcome_constraints: Optional[Tuple[Tensor,", "None: raise ValueError(\"There are no feasible observed points.\") # construct", "handle arbitrary objective fns. acq_func = qSimpleRegret(model, objective=objective) return acq_func", "(k x 1) such that A f(x) <= b. (Not", "outcome_constraints: Optional[Tuple[Tensor, Tensor]] = None, X_observed: Optional[Tensor] = None, X_pending:", "is to maximize a weighted sum of the columns of", "in the outcome constraints (if there are any). Returns: PosteriorMean:", "AcquisitionFunction: r\"\"\"Instantiates a PosteriorMean acquisition function. Note: If no OutcomeConstraints", "X_observed: Optional[Tensor] = None, X_pending: Optional[Tensor] = None, **kwargs: Any,", "1) such that A f(x) <= b. (Not used by", "Optional, Tuple import torch from botorch.acquisition.acquisition import AcquisitionFunction from botorch.acquisition.monte_carlo", "acquisition function. Note: If no OutcomeConstraints given, return an analytic", "in the # LICENSE file in the root directory of", "been submitted for evaluation) present for all objective outcomes and", "outcomes that appear in the outcome constraints (if there are", "botorch.acquisition.utils import get_infeasible_cost from botorch.models.model import Model from botorch.utils import", "#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates.", "OutcomeConstraints given, return an analytic acquisition function. This requires {optimizer_kwargs:", "tensor containing points observed for all objective outcomes and outcomes", "present for all objective outcomes and outcomes that appear in", "objective=obj_tf, constraints=con_tfs or [], infeasible_cost=inf_cost ) # Use qSimpleRegret, not", "constraints=con_tfs or [], infeasible_cost=inf_cost ) # Use qSimpleRegret, not analytic", "import Any, Optional, Tuple import torch from botorch.acquisition.acquisition import AcquisitionFunction", "\"\"\" if X_observed is None: raise ValueError(\"There are no feasible", "that A f(x) <= b. (Not used by single task", "function. \"\"\" if X_observed is None: raise ValueError(\"There are no", "objective_weights: Tensor, outcome_constraints: Optional[Tuple[Tensor, Tensor]] = None, X_observed: Optional[Tensor] =", "points.\") # construct Objective module if kwargs.get(\"chebyshev_scalarization\", False): obj_tf =", "a weighted sum of the columns of f(x). These are", "Objective module if kwargs.get(\"chebyshev_scalarization\", False): obj_tf = get_chebyshev_scalarization( weights=objective_weights, Y=squeeze_last_dim(torch.stack(kwargs.get(\"Ys\")).transpose(0,", "not analytic posterior, to handle arbitrary objective fns. acq_func =", "def get_PosteriorMean( model: Model, objective_weights: Tensor, outcome_constraints: Optional[Tuple[Tensor, Tensor]] =", "are any). Returns: PosteriorMean: The instantiated acquisition function. \"\"\" if", "Any, Optional, Tuple import torch from botorch.acquisition.acquisition import AcquisitionFunction from", "observed points.\") # construct Objective module if kwargs.get(\"chebyshev_scalarization\", False): obj_tf", "Tensor, outcome_constraints: Optional[Tuple[Tensor, Tensor]] = None, X_observed: Optional[Tensor] = None,", "= get_chebyshev_scalarization( weights=objective_weights, Y=squeeze_last_dim(torch.stack(kwargs.get(\"Ys\")).transpose(0, 1)), ) else: obj_tf = get_objective_weights_transform(objective_weights)", "A is (k x m) and b is (k x", "from botorch.acquisition.objective import ConstrainedMCObjective, GenericMCObjective from botorch.acquisition.utils import get_infeasible_cost from", "qSimpleRegret, not analytic posterior, to handle arbitrary objective fns. acq_func", "typing import Any, Optional, Tuple import torch from botorch.acquisition.acquisition import", "# construct Objective module if kwargs.get(\"chebyshev_scalarization\", False): obj_tf = get_chebyshev_scalarization(", "appear in the outcome constraints (if there are any). X_pending:", "PosteriorMean: The instantiated acquisition function. \"\"\" if X_observed is None:", "python3 # Copyright (c) Facebook, Inc. and its affiliates. #", "sum of the columns of f(x). These are the weights.", "botorch.models.model import Model from botorch.utils import ( get_objective_weights_transform, get_outcome_constraint_transforms, )", "{optimizer_kwargs: {joint_optimization: True}} or an optimizer that does not assume", "the # LICENSE file in the root directory of this", "ConstrainedMCObjective, GenericMCObjective from botorch.acquisition.utils import get_infeasible_cost from botorch.models.model import Model", "observed for all objective outcomes and outcomes that appear in", "all objective outcomes and outcomes that appear in the outcome", "have been submitted for evaluation) present for all objective outcomes", "= get_objective_weights_transform(objective_weights) if outcome_constraints is None: objective = GenericMCObjective(objective=obj_tf) else:", "if kwargs.get(\"chebyshev_scalarization\", False): obj_tf = get_chebyshev_scalarization( weights=objective_weights, Y=squeeze_last_dim(torch.stack(kwargs.get(\"Ys\")).transpose(0, 1)), )", "analytic acquisition function. This requires {optimizer_kwargs: {joint_optimization: True}} or an", "points whose evaluation is pending (i.e. that have been submitted", "import ( get_objective_weights_transform, get_outcome_constraint_transforms, ) from botorch.utils.multi_objective.scalarization import get_chebyshev_scalarization from", "by single task models) X_observed: A tensor containing points observed", "get_chebyshev_scalarization from botorch.utils.transforms import squeeze_last_dim from torch import Tensor def", "Args: objective_weights: The objective is to maximize a weighted sum", "code is licensed under the MIT license found in the", "X_pending: Optional[Tensor] = None, **kwargs: Any, ) -> AcquisitionFunction: r\"\"\"Instantiates", "an analytic acquisition function. This requires {optimizer_kwargs: {joint_optimization: True}} or", "or [], infeasible_cost=inf_cost ) # Use qSimpleRegret, not analytic posterior,", "source code is licensed under the MIT license found in", "Facebook, Inc. and its affiliates. # # This source code", "X_observed is None: raise ValueError(\"There are no feasible observed points.\")", "model: Model, objective_weights: Tensor, outcome_constraints: Optional[Tuple[Tensor, Tensor]] = None, X_observed:", "get_infeasible_cost(X=X_observed, model=model, objective=obj_tf) objective = ConstrainedMCObjective( objective=obj_tf, constraints=con_tfs or [],", "# # This source code is licensed under the MIT", "import ConstrainedMCObjective, GenericMCObjective from botorch.acquisition.utils import get_infeasible_cost from botorch.models.model import", "Tensor]] = None, X_observed: Optional[Tensor] = None, X_pending: Optional[Tensor] =", "# This source code is licensed under the MIT license", "to handle arbitrary objective fns. acq_func = qSimpleRegret(model, objective=objective) return", "acquisition function. This requires {optimizer_kwargs: {joint_optimization: True}} or an optimizer", "used by single task models) X_observed: A tensor containing points", "f(x). These are the weights. outcome_constraints: A tuple of (A,", "for evaluation) present for all objective outcomes and outcomes that", "from botorch.utils.multi_objective.scalarization import get_chebyshev_scalarization from botorch.utils.transforms import squeeze_last_dim from torch", "from typing import Any, Optional, Tuple import torch from botorch.acquisition.acquisition", "there are any). X_pending: A tensor containing points whose evaluation", "botorch.acquisition.monte_carlo import qSimpleRegret from botorch.acquisition.objective import ConstrainedMCObjective, GenericMCObjective from botorch.acquisition.utils", "instantiated acquisition function. \"\"\" if X_observed is None: raise ValueError(\"There", "**kwargs: Any, ) -> AcquisitionFunction: r\"\"\"Instantiates a PosteriorMean acquisition function.", ") from botorch.utils.multi_objective.scalarization import get_chebyshev_scalarization from botorch.utils.transforms import squeeze_last_dim from", "get_outcome_constraint_transforms, ) from botorch.utils.multi_objective.scalarization import get_chebyshev_scalarization from botorch.utils.transforms import squeeze_last_dim", "acquisition function. \"\"\" if X_observed is None: raise ValueError(\"There are", "its affiliates. # # This source code is licensed under", "no feasible observed points.\") # construct Objective module if kwargs.get(\"chebyshev_scalarization\",", "GenericMCObjective from botorch.acquisition.utils import get_infeasible_cost from botorch.models.model import Model from", "at f(x), A is (k x m) and b is", "Use qSimpleRegret, not analytic posterior, to handle arbitrary objective fns.", "[], infeasible_cost=inf_cost ) # Use qSimpleRegret, not analytic posterior, to", "objective_weights: The objective is to maximize a weighted sum of", "botorch.acquisition.objective import ConstrainedMCObjective, GenericMCObjective from botorch.acquisition.utils import get_infeasible_cost from botorch.models.model", "import qSimpleRegret from botorch.acquisition.objective import ConstrainedMCObjective, GenericMCObjective from botorch.acquisition.utils import", "The objective is to maximize a weighted sum of the", "import torch from botorch.acquisition.acquisition import AcquisitionFunction from botorch.acquisition.monte_carlo import qSimpleRegret", "( get_objective_weights_transform, get_outcome_constraint_transforms, ) from botorch.utils.multi_objective.scalarization import get_chebyshev_scalarization from botorch.utils.transforms", "PosteriorMean acquisition function. Note: If no OutcomeConstraints given, return an", "outcome constraints (if there are any). Returns: PosteriorMean: The instantiated", "A f(x) <= b. (Not used by single task models)", "function. Note: If no OutcomeConstraints given, return an analytic acquisition", "If no OutcomeConstraints given, return an analytic acquisition function. This", "Note: If no OutcomeConstraints given, return an analytic acquisition function.", "Tuple import torch from botorch.acquisition.acquisition import AcquisitionFunction from botorch.acquisition.monte_carlo import", "torch from botorch.acquisition.acquisition import AcquisitionFunction from botorch.acquisition.monte_carlo import qSimpleRegret from", "or an optimizer that does not assume pending point support.", "squeeze_last_dim from torch import Tensor def get_PosteriorMean( model: Model, objective_weights:", "X_pending: A tensor containing points whose evaluation is pending (i.e.", "b is (k x 1) such that A f(x) <=", "= GenericMCObjective(objective=obj_tf) else: con_tfs = get_outcome_constraint_transforms(outcome_constraints) inf_cost = get_infeasible_cost(X=X_observed, model=model,", "infeasible_cost=inf_cost ) # Use qSimpleRegret, not analytic posterior, to handle", "a PosteriorMean acquisition function. Note: If no OutcomeConstraints given, return", "import squeeze_last_dim from torch import Tensor def get_PosteriorMean( model: Model,", "A tuple of (A, b). For k outcome constraints and", "weights=objective_weights, Y=squeeze_last_dim(torch.stack(kwargs.get(\"Ys\")).transpose(0, 1)), ) else: obj_tf = get_objective_weights_transform(objective_weights) if outcome_constraints", "point support. Args: objective_weights: The objective is to maximize a", "is licensed under the MIT license found in the #", "objective=obj_tf) objective = ConstrainedMCObjective( objective=obj_tf, constraints=con_tfs or [], infeasible_cost=inf_cost )", "the outcome constraints (if there are any). Returns: PosteriorMean: The", "outcome constraints (if there are any). X_pending: A tensor containing", "= None, X_pending: Optional[Tensor] = None, **kwargs: Any, ) ->", "single task models) X_observed: A tensor containing points observed for", "torch import Tensor def get_PosteriorMean( model: Model, objective_weights: Tensor, outcome_constraints:", "# Copyright (c) Facebook, Inc. and its affiliates. # #", "in the outcome constraints (if there are any). X_pending: A", "constraints and m outputs at f(x), A is (k x", "objective = GenericMCObjective(objective=obj_tf) else: con_tfs = get_outcome_constraint_transforms(outcome_constraints) inf_cost = get_infeasible_cost(X=X_observed,", "Optional[Tuple[Tensor, Tensor]] = None, X_observed: Optional[Tensor] = None, X_pending: Optional[Tensor]", "of the columns of f(x). These are the weights. outcome_constraints:", "(if there are any). X_pending: A tensor containing points whose", "AcquisitionFunction from botorch.acquisition.monte_carlo import qSimpleRegret from botorch.acquisition.objective import ConstrainedMCObjective, GenericMCObjective", "outcome constraints and m outputs at f(x), A is (k", "A tensor containing points observed for all objective outcomes and", "model=model, objective=obj_tf) objective = ConstrainedMCObjective( objective=obj_tf, constraints=con_tfs or [], infeasible_cost=inf_cost", "that does not assume pending point support. Args: objective_weights: The", "of (A, b). For k outcome constraints and m outputs", "tree. from typing import Any, Optional, Tuple import torch from", "get_PosteriorMean( model: Model, objective_weights: Tensor, outcome_constraints: Optional[Tuple[Tensor, Tensor]] = None,", "pending point support. Args: objective_weights: The objective is to maximize", "outcomes and outcomes that appear in the outcome constraints (if", "kwargs.get(\"chebyshev_scalarization\", False): obj_tf = get_chebyshev_scalarization( weights=objective_weights, Y=squeeze_last_dim(torch.stack(kwargs.get(\"Ys\")).transpose(0, 1)), ) else:", "Inc. and its affiliates. # # This source code is", "maximize a weighted sum of the columns of f(x). These", "support. Args: objective_weights: The objective is to maximize a weighted", "= None, **kwargs: Any, ) -> AcquisitionFunction: r\"\"\"Instantiates a PosteriorMean", "there are any). Returns: PosteriorMean: The instantiated acquisition function. \"\"\"", "= ConstrainedMCObjective( objective=obj_tf, constraints=con_tfs or [], infeasible_cost=inf_cost ) # Use", "the columns of f(x). These are the weights. outcome_constraints: A", "task models) X_observed: A tensor containing points observed for all", "construct Objective module if kwargs.get(\"chebyshev_scalarization\", False): obj_tf = get_chebyshev_scalarization( weights=objective_weights," ]
[ "return lidar_integration.get_lidar_launch_description( test_nodes=[velodyne_cloud_node], checkers=[pcl_checker], other_actions=[ launch.actions.OpaqueFunction(function=lambda context: ready_fn()) ], port=PORT", "), { \"port\": PORT, \"expected_num_subscribers\": 1, } ], remappings=[(\"points_raw\", test_topic)],", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "# # Licensed under the Apache License, Version 2.0 (the", "compliance with the License. # You may obtain a copy", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "2.0 (the \"License\"); # you may not use this file", "agreed to in writing, software # distributed under the License", "file except in compliance with the License. # You may", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "2018 the Autoware Foundation # # Licensed under the Apache", "Unless required by applicable law or agreed to in writing,", "launch_ros.actions.Node( package=\"velodyne_nodes\", node_executable=\"velodyne_cloud_node_exe\", node_name=\"vlp16_driver_node\", node_namespace=\"lidar_front\", parameters=[ \"{}/param/vlp16_test.param.yaml\".format( ament_index_python.get_package_share_directory(\"velodyne_nodes\") ), {", "PORT = lidar_integration.get_open_port() # The node under test and the", "test_topic)], arguments=[\"--model\", \"vlp16\"] ) pcl_checker = lidar_integration.make_pcl_checker( topic=test_topic, size=55000, period=100,", "size=55000, period=100, period_tolerance=2.2, size_tolerance=1.4, ) return lidar_integration.get_lidar_launch_description( test_nodes=[velodyne_cloud_node], checkers=[pcl_checker], other_actions=[", "lidar_integration.get_lidar_launch_description( test_nodes=[velodyne_cloud_node], checkers=[pcl_checker], other_actions=[ launch.actions.OpaqueFunction(function=lambda context: ready_fn()) ], port=PORT )", ") return lidar_integration.get_lidar_launch_description( test_nodes=[velodyne_cloud_node], checkers=[pcl_checker], other_actions=[ launch.actions.OpaqueFunction(function=lambda context: ready_fn()) ],", "distributed under the License is distributed on an \"AS IS\"", "node_namespace=\"lidar_front\", parameters=[ \"{}/param/vlp16_test.param.yaml\".format( ament_index_python.get_package_share_directory(\"velodyne_nodes\") ), { \"port\": PORT, \"expected_num_subscribers\": 1,", "\"port\": PORT, \"expected_num_subscribers\": 1, } ], remappings=[(\"points_raw\", test_topic)], arguments=[\"--model\", \"vlp16\"]", "PORT, \"expected_num_subscribers\": 1, } ], remappings=[(\"points_raw\", test_topic)], arguments=[\"--model\", \"vlp16\"] )", "the specific language governing permissions and # limitations under the", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "our tests: test_topic = \"veloyne_cloud_node_test_topic\" velodyne_cloud_node = launch_ros.actions.Node( package=\"velodyne_nodes\", node_executable=\"velodyne_cloud_node_exe\",", "= \"veloyne_cloud_node_test_topic\" velodyne_cloud_node = launch_ros.actions.Node( package=\"velodyne_nodes\", node_executable=\"velodyne_cloud_node_exe\", node_name=\"vlp16_driver_node\", node_namespace=\"lidar_front\", parameters=[", "{ \"port\": PORT, \"expected_num_subscribers\": 1, } ], remappings=[(\"points_raw\", test_topic)], arguments=[\"--model\",", "the Autoware Foundation # # Licensed under the Apache License,", "express or implied. # See the License for the specific", "applicable law or agreed to in writing, software # distributed", "pass/fail our tests: test_topic = \"veloyne_cloud_node_test_topic\" velodyne_cloud_node = launch_ros.actions.Node( package=\"velodyne_nodes\",", "the lidar_integration package. We just need to # instantiate them", "except in compliance with the License. # You may obtain", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "Inc. import ament_index_python import launch import launch.actions import launch_ros.actions import", "# Test cases are created automatically by the lidar_integration package.", "\"expected_num_subscribers\": 1, } ], remappings=[(\"points_raw\", test_topic)], arguments=[\"--model\", \"vlp16\"] ) pcl_checker", "velodyne_cloud_node = launch_ros.actions.Node( package=\"velodyne_nodes\", node_executable=\"velodyne_cloud_node_exe\", node_name=\"vlp16_driver_node\", node_namespace=\"lidar_front\", parameters=[ \"{}/param/vlp16_test.param.yaml\".format( ament_index_python.get_package_share_directory(\"velodyne_nodes\")", "Licensed under the Apache License, Version 2.0 (the \"License\"); #", "checkers=[pcl_checker], other_actions=[ launch.actions.OpaqueFunction(function=lambda context: ready_fn()) ], port=PORT ) # Test", "not use this file except in compliance with the License.", "node under test and the checker node that will pass/fail", "1, } ], remappings=[(\"points_raw\", test_topic)], arguments=[\"--model\", \"vlp16\"] ) pcl_checker =", "just need to # instantiate them active = lidar_integration.make_active_tests() after_shutdown", "lidar_integration def generate_test_description(ready_fn): PORT = lidar_integration.get_open_port() # The node under", "ready_fn()) ], port=PORT ) # Test cases are created automatically", "by Tier IV, Inc. and Apex.AI, Inc. import ament_index_python import", "writing, software # distributed under the License is distributed on", "# limitations under the License. # # Co-developed by Tier", "in writing, software # distributed under the License is distributed", "governing permissions and # limitations under the License. # #", "automatically by the lidar_integration package. We just need to #", ") pcl_checker = lidar_integration.make_pcl_checker( topic=test_topic, size=55000, period=100, period_tolerance=2.2, size_tolerance=1.4, )", "you may not use this file except in compliance with", "test_nodes=[velodyne_cloud_node], checkers=[pcl_checker], other_actions=[ launch.actions.OpaqueFunction(function=lambda context: ready_fn()) ], port=PORT ) #", "# Licensed under the Apache License, Version 2.0 (the \"License\");", "ament_index_python.get_package_share_directory(\"velodyne_nodes\") ), { \"port\": PORT, \"expected_num_subscribers\": 1, } ], remappings=[(\"points_raw\",", "IV, Inc. and Apex.AI, Inc. import ament_index_python import launch import", "lidar_integration.get_open_port() # The node under test and the checker node", "# Copyright 2018 the Autoware Foundation # # Licensed under", "context: ready_fn()) ], port=PORT ) # Test cases are created", "period_tolerance=2.2, size_tolerance=1.4, ) return lidar_integration.get_lidar_launch_description( test_nodes=[velodyne_cloud_node], checkers=[pcl_checker], other_actions=[ launch.actions.OpaqueFunction(function=lambda context:", "use this file except in compliance with the License. #", "launch.actions import launch_ros.actions import lidar_integration def generate_test_description(ready_fn): PORT = lidar_integration.get_open_port()", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "and # limitations under the License. # # Co-developed by", "\"veloyne_cloud_node_test_topic\" velodyne_cloud_node = launch_ros.actions.Node( package=\"velodyne_nodes\", node_executable=\"velodyne_cloud_node_exe\", node_name=\"vlp16_driver_node\", node_namespace=\"lidar_front\", parameters=[ \"{}/param/vlp16_test.param.yaml\".format(", "checker node that will pass/fail our tests: test_topic = \"veloyne_cloud_node_test_topic\"", "cases are created automatically by the lidar_integration package. We just", "# The node under test and the checker node that", "CONDITIONS OF ANY KIND, either express or implied. # See", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "or implied. # See the License for the specific language", "License is distributed on an \"AS IS\" BASIS, # WITHOUT", "License. # You may obtain a copy of the License", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "License, Version 2.0 (the \"License\"); # you may not use", "# You may obtain a copy of the License at", "package=\"velodyne_nodes\", node_executable=\"velodyne_cloud_node_exe\", node_name=\"vlp16_driver_node\", node_namespace=\"lidar_front\", parameters=[ \"{}/param/vlp16_test.param.yaml\".format( ament_index_python.get_package_share_directory(\"velodyne_nodes\") ), { \"port\":", "KIND, either express or implied. # See the License for", "specific language governing permissions and # limitations under the License.", "Inc. and Apex.AI, Inc. import ament_index_python import launch import launch.actions", "import lidar_integration def generate_test_description(ready_fn): PORT = lidar_integration.get_open_port() # The node", "under the License is distributed on an \"AS IS\" BASIS,", "will pass/fail our tests: test_topic = \"veloyne_cloud_node_test_topic\" velodyne_cloud_node = launch_ros.actions.Node(", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "\"vlp16\"] ) pcl_checker = lidar_integration.make_pcl_checker( topic=test_topic, size=55000, period=100, period_tolerance=2.2, size_tolerance=1.4,", "License for the specific language governing permissions and # limitations", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "Test cases are created automatically by the lidar_integration package. We", "Co-developed by Tier IV, Inc. and Apex.AI, Inc. import ament_index_python", "test_topic = \"veloyne_cloud_node_test_topic\" velodyne_cloud_node = launch_ros.actions.Node( package=\"velodyne_nodes\", node_executable=\"velodyne_cloud_node_exe\", node_name=\"vlp16_driver_node\", node_namespace=\"lidar_front\",", "tests: test_topic = \"veloyne_cloud_node_test_topic\" velodyne_cloud_node = launch_ros.actions.Node( package=\"velodyne_nodes\", node_executable=\"velodyne_cloud_node_exe\", node_name=\"vlp16_driver_node\",", "launch.actions.OpaqueFunction(function=lambda context: ready_fn()) ], port=PORT ) # Test cases are", "the checker node that will pass/fail our tests: test_topic =", "import launch.actions import launch_ros.actions import lidar_integration def generate_test_description(ready_fn): PORT =", "Autoware Foundation # # Licensed under the Apache License, Version", "remappings=[(\"points_raw\", test_topic)], arguments=[\"--model\", \"vlp16\"] ) pcl_checker = lidar_integration.make_pcl_checker( topic=test_topic, size=55000,", "the License for the specific language governing permissions and #", "launch_ros.actions import lidar_integration def generate_test_description(ready_fn): PORT = lidar_integration.get_open_port() # The", "launch import launch.actions import launch_ros.actions import lidar_integration def generate_test_description(ready_fn): PORT", "(the \"License\"); # you may not use this file except", "Apache License, Version 2.0 (the \"License\"); # you may not", "need to # instantiate them active = lidar_integration.make_active_tests() after_shutdown =", "# you may not use this file except in compliance", "], remappings=[(\"points_raw\", test_topic)], arguments=[\"--model\", \"vlp16\"] ) pcl_checker = lidar_integration.make_pcl_checker( topic=test_topic,", "We just need to # instantiate them active = lidar_integration.make_active_tests()", "either express or implied. # See the License for the", "OR CONDITIONS OF ANY KIND, either express or implied. #", "are created automatically by the lidar_integration package. We just need", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "the License is distributed on an \"AS IS\" BASIS, #", "in compliance with the License. # You may obtain a", "Copyright 2018 the Autoware Foundation # # Licensed under the", "software # distributed under the License is distributed on an", "License. # # Co-developed by Tier IV, Inc. and Apex.AI,", "under the License. # # Co-developed by Tier IV, Inc.", "# Co-developed by Tier IV, Inc. and Apex.AI, Inc. import", "permissions and # limitations under the License. # # Co-developed", "import ament_index_python import launch import launch.actions import launch_ros.actions import lidar_integration", "and Apex.AI, Inc. import ament_index_python import launch import launch.actions import", "period=100, period_tolerance=2.2, size_tolerance=1.4, ) return lidar_integration.get_lidar_launch_description( test_nodes=[velodyne_cloud_node], checkers=[pcl_checker], other_actions=[ launch.actions.OpaqueFunction(function=lambda", "# # Unless required by applicable law or agreed to", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "Tier IV, Inc. and Apex.AI, Inc. import ament_index_python import launch", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "test and the checker node that will pass/fail our tests:", "package. We just need to # instantiate them active =", "import launch import launch.actions import launch_ros.actions import lidar_integration def generate_test_description(ready_fn):", "lidar_integration package. We just need to # instantiate them active", "Version 2.0 (the \"License\"); # you may not use this", "The node under test and the checker node that will", "created automatically by the lidar_integration package. We just need to", "node_name=\"vlp16_driver_node\", node_namespace=\"lidar_front\", parameters=[ \"{}/param/vlp16_test.param.yaml\".format( ament_index_python.get_package_share_directory(\"velodyne_nodes\") ), { \"port\": PORT, \"expected_num_subscribers\":", "port=PORT ) # Test cases are created automatically by the", "law or agreed to in writing, software # distributed under", "# # Co-developed by Tier IV, Inc. and Apex.AI, Inc.", "implied. # See the License for the specific language governing", "under the Apache License, Version 2.0 (the \"License\"); # you", "= lidar_integration.get_open_port() # The node under test and the checker", "node that will pass/fail our tests: test_topic = \"veloyne_cloud_node_test_topic\" velodyne_cloud_node", "\"License\"); # you may not use this file except in", "} ], remappings=[(\"points_raw\", test_topic)], arguments=[\"--model\", \"vlp16\"] ) pcl_checker = lidar_integration.make_pcl_checker(", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "to # instantiate them active = lidar_integration.make_active_tests() after_shutdown = lidar_integration.make_post_shutdown_tests()", "\"{}/param/vlp16_test.param.yaml\".format( ament_index_python.get_package_share_directory(\"velodyne_nodes\") ), { \"port\": PORT, \"expected_num_subscribers\": 1, } ],", "pcl_checker = lidar_integration.make_pcl_checker( topic=test_topic, size=55000, period=100, period_tolerance=2.2, size_tolerance=1.4, ) return", "limitations under the License. # # Co-developed by Tier IV,", "by applicable law or agreed to in writing, software #", "# distributed under the License is distributed on an \"AS", "OF ANY KIND, either express or implied. # See the", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "Apex.AI, Inc. import ament_index_python import launch import launch.actions import launch_ros.actions", "], port=PORT ) # Test cases are created automatically by", "the License. # # Co-developed by Tier IV, Inc. and", "may obtain a copy of the License at # #", "# Unless required by applicable law or agreed to in", "ANY KIND, either express or implied. # See the License", "See the License for the specific language governing permissions and", "import launch_ros.actions import lidar_integration def generate_test_description(ready_fn): PORT = lidar_integration.get_open_port() #", "node_executable=\"velodyne_cloud_node_exe\", node_name=\"vlp16_driver_node\", node_namespace=\"lidar_front\", parameters=[ \"{}/param/vlp16_test.param.yaml\".format( ament_index_python.get_package_share_directory(\"velodyne_nodes\") ), { \"port\": PORT,", "under test and the checker node that will pass/fail our", "the License. # You may obtain a copy of the", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "for the specific language governing permissions and # limitations under", "parameters=[ \"{}/param/vlp16_test.param.yaml\".format( ament_index_python.get_package_share_directory(\"velodyne_nodes\") ), { \"port\": PORT, \"expected_num_subscribers\": 1, }", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "topic=test_topic, size=55000, period=100, period_tolerance=2.2, size_tolerance=1.4, ) return lidar_integration.get_lidar_launch_description( test_nodes=[velodyne_cloud_node], checkers=[pcl_checker],", "to in writing, software # distributed under the License is", "generate_test_description(ready_fn): PORT = lidar_integration.get_open_port() # The node under test and", "= launch_ros.actions.Node( package=\"velodyne_nodes\", node_executable=\"velodyne_cloud_node_exe\", node_name=\"vlp16_driver_node\", node_namespace=\"lidar_front\", parameters=[ \"{}/param/vlp16_test.param.yaml\".format( ament_index_python.get_package_share_directory(\"velodyne_nodes\") ),", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "# See the License for the specific language governing permissions", "and the checker node that will pass/fail our tests: test_topic", ") # Test cases are created automatically by the lidar_integration", "that will pass/fail our tests: test_topic = \"veloyne_cloud_node_test_topic\" velodyne_cloud_node =", "You may obtain a copy of the License at #", "= lidar_integration.make_pcl_checker( topic=test_topic, size=55000, period=100, period_tolerance=2.2, size_tolerance=1.4, ) return lidar_integration.get_lidar_launch_description(", "language governing permissions and # limitations under the License. #", "may not use this file except in compliance with the", "or agreed to in writing, software # distributed under the", "required by applicable law or agreed to in writing, software", "by the lidar_integration package. We just need to # instantiate", "ament_index_python import launch import launch.actions import launch_ros.actions import lidar_integration def", "arguments=[\"--model\", \"vlp16\"] ) pcl_checker = lidar_integration.make_pcl_checker( topic=test_topic, size=55000, period=100, period_tolerance=2.2,", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "other_actions=[ launch.actions.OpaqueFunction(function=lambda context: ready_fn()) ], port=PORT ) # Test cases", "Foundation # # Licensed under the Apache License, Version 2.0", "size_tolerance=1.4, ) return lidar_integration.get_lidar_launch_description( test_nodes=[velodyne_cloud_node], checkers=[pcl_checker], other_actions=[ launch.actions.OpaqueFunction(function=lambda context: ready_fn())", "with the License. # You may obtain a copy of", "this file except in compliance with the License. # You", "the Apache License, Version 2.0 (the \"License\"); # you may", "def generate_test_description(ready_fn): PORT = lidar_integration.get_open_port() # The node under test", "lidar_integration.make_pcl_checker( topic=test_topic, size=55000, period=100, period_tolerance=2.2, size_tolerance=1.4, ) return lidar_integration.get_lidar_launch_description( test_nodes=[velodyne_cloud_node]," ]
[ "resized) # construct the argument parser and parse the arguments", "required = True, help = \"Path to the target image\")", "image transfer = color_transfer(source, target) # check to see if", "from the source image # to the target image transfer", "cv2.resize(image, dim, interpolation = cv2.INTER_AREA) # show the resized image", "image\") ap.add_argument(\"-o\", \"--output\", help = \"Path to the output image", "\"Path to the output image (optional)\") args = vars(ap.parse_args()) #", "True, help = \"Path to the target image\") ap.add_argument(\"-o\", \"--output\",", "check to see if the output image should be saved", "and parse the arguments ap = argparse.ArgumentParser() ap.add_argument(\"-s\", \"--source\", required", "constant width, just to # make displaying the images take", "show the images and wait for a key press show_image(\"Source\",", "load the images source = cv2.imread(args[\"source\"]) target = cv2.imread(args[\"target\"]) #", "help = \"Path to the source image\") ap.add_argument(\"-t\", \"--target\", required", "import numpy as np import argparse import cv2 def show_image(title,", "(optional)\") args = vars(ap.parse_args()) # load the images source =", "the target image\") ap.add_argument(\"-o\", \"--output\", help = \"Path to the", "= color_transfer(source, target) # check to see if the output", "--target images/ocean_day.jpg # import the necessary packages from color_transfer import", "= width / float(image.shape[1]) dim = (width, int(image.shape[0] * r))", "required = True, help = \"Path to the source image\")", "the source image # to the target image transfer =", "import cv2 def show_image(title, image, width = 300): # resize", "python example.py --source images/ocean_sunset.jpg --target images/ocean_day.jpg # import the necessary", "source = cv2.imread(args[\"source\"]) target = cv2.imread(args[\"target\"]) # transfer the color", "target) # check to see if the output image should", "True, help = \"Path to the source image\") ap.add_argument(\"-t\", \"--target\",", "dim = (width, int(image.shape[0] * r)) resized = cv2.resize(image, dim,", "= cv2.INTER_AREA) # show the resized image cv2.imshow(title, resized) #", "int(image.shape[0] * r)) resized = cv2.resize(image, dim, interpolation = cv2.INTER_AREA)", "dim, interpolation = cv2.INTER_AREA) # show the resized image cv2.imshow(title,", "to see if the output image should be saved if", "float(image.shape[1]) dim = (width, int(image.shape[0] * r)) resized = cv2.resize(image,", "\"Path to the target image\") ap.add_argument(\"-o\", \"--output\", help = \"Path", "for a key press show_image(\"Source\", source) show_image(\"Target\", target) show_image(\"Transfer\", transfer)", "# show the resized image cv2.imshow(title, resized) # construct the", "color_transfer import numpy as np import argparse import cv2 def", "cv2.imshow(title, resized) # construct the argument parser and parse the", "estate r = width / float(image.shape[1]) dim = (width, int(image.shape[0]", "* r)) resized = cv2.resize(image, dim, interpolation = cv2.INTER_AREA) #", "the target image transfer = color_transfer(source, target) # check to", "import color_transfer import numpy as np import argparse import cv2", "example.py --source images/ocean_sunset.jpg --target images/ocean_day.jpg # import the necessary packages", "r)) resized = cv2.resize(image, dim, interpolation = cv2.INTER_AREA) # show", "cv2.INTER_AREA) # show the resized image cv2.imshow(title, resized) # construct", "r = width / float(image.shape[1]) dim = (width, int(image.shape[0] *", "should be saved if args[\"output\"] is not None: cv2.imwrite(args[\"output\"], transfer)", "transfer the color distribution from the source image # to", "source image # to the target image transfer = color_transfer(source,", "help = \"Path to the target image\") ap.add_argument(\"-o\", \"--output\", help", "ap = argparse.ArgumentParser() ap.add_argument(\"-s\", \"--source\", required = True, help =", "construct the argument parser and parse the arguments ap =", "to # make displaying the images take up less screen", "packages from color_transfer import color_transfer import numpy as np import", "# make displaying the images take up less screen real", "up less screen real # estate r = width /", "to the target image transfer = color_transfer(source, target) # check", "argument parser and parse the arguments ap = argparse.ArgumentParser() ap.add_argument(\"-s\",", "the color distribution from the source image # to the", "is not None: cv2.imwrite(args[\"output\"], transfer) # show the images and", "width, just to # make displaying the images take up", "not None: cv2.imwrite(args[\"output\"], transfer) # show the images and wait", "import the necessary packages from color_transfer import color_transfer import numpy", "# transfer the color distribution from the source image #", "cv2.imwrite(args[\"output\"], transfer) # show the images and wait for a", "screen real # estate r = width / float(image.shape[1]) dim", "be saved if args[\"output\"] is not None: cv2.imwrite(args[\"output\"], transfer) #", "argparse import cv2 def show_image(title, image, width = 300): #", "interpolation = cv2.INTER_AREA) # show the resized image cv2.imshow(title, resized)", "= True, help = \"Path to the source image\") ap.add_argument(\"-t\",", "resize the image to have a constant width, just to", "if args[\"output\"] is not None: cv2.imwrite(args[\"output\"], transfer) # show the", "just to # make displaying the images take up less", "= argparse.ArgumentParser() ap.add_argument(\"-s\", \"--source\", required = True, help = \"Path", "the output image should be saved if args[\"output\"] is not", "def show_image(title, image, width = 300): # resize the image", "--source images/ocean_sunset.jpg --target images/ocean_day.jpg # import the necessary packages from", "numpy as np import argparse import cv2 def show_image(title, image,", "None: cv2.imwrite(args[\"output\"], transfer) # show the images and wait for", "to the source image\") ap.add_argument(\"-t\", \"--target\", required = True, help", "# python example.py --source images/ocean_sunset.jpg --target images/ocean_day.jpg # import the", "output image should be saved if args[\"output\"] is not None:", "the necessary packages from color_transfer import color_transfer import numpy as", "ap.add_argument(\"-t\", \"--target\", required = True, help = \"Path to the", "the arguments ap = argparse.ArgumentParser() ap.add_argument(\"-s\", \"--source\", required = True,", "parser and parse the arguments ap = argparse.ArgumentParser() ap.add_argument(\"-s\", \"--source\",", "# check to see if the output image should be", "displaying the images take up less screen real # estate", "width / float(image.shape[1]) dim = (width, int(image.shape[0] * r)) resized", "= True, help = \"Path to the target image\") ap.add_argument(\"-o\",", "color distribution from the source image # to the target", "images source = cv2.imread(args[\"source\"]) target = cv2.imread(args[\"target\"]) # transfer the", "= cv2.resize(image, dim, interpolation = cv2.INTER_AREA) # show the resized", "target image transfer = color_transfer(source, target) # check to see", "/ float(image.shape[1]) dim = (width, int(image.shape[0] * r)) resized =", "= \"Path to the source image\") ap.add_argument(\"-t\", \"--target\", required =", "source image\") ap.add_argument(\"-t\", \"--target\", required = True, help = \"Path", "wait for a key press show_image(\"Source\", source) show_image(\"Target\", target) show_image(\"Transfer\",", "image should be saved if args[\"output\"] is not None: cv2.imwrite(args[\"output\"],", "args = vars(ap.parse_args()) # load the images source = cv2.imread(args[\"source\"])", "= \"Path to the output image (optional)\") args = vars(ap.parse_args())", "transfer = color_transfer(source, target) # check to see if the", "target = cv2.imread(args[\"target\"]) # transfer the color distribution from the", "a key press show_image(\"Source\", source) show_image(\"Target\", target) show_image(\"Transfer\", transfer) cv2.waitKey(0)", "if the output image should be saved if args[\"output\"] is", "# load the images source = cv2.imread(args[\"source\"]) target = cv2.imread(args[\"target\"])", "image to have a constant width, just to # make", "the images take up less screen real # estate r", "image, width = 300): # resize the image to have", "show the resized image cv2.imshow(title, resized) # construct the argument", "# show the images and wait for a key press", "# resize the image to have a constant width, just", "target image\") ap.add_argument(\"-o\", \"--output\", help = \"Path to the output", "necessary packages from color_transfer import color_transfer import numpy as np", "= (width, int(image.shape[0] * r)) resized = cv2.resize(image, dim, interpolation", "and wait for a key press show_image(\"Source\", source) show_image(\"Target\", target)", "# USAGE # python example.py --source images/ocean_sunset.jpg --target images/ocean_day.jpg #", "width = 300): # resize the image to have a", "see if the output image should be saved if args[\"output\"]", "args[\"output\"] is not None: cv2.imwrite(args[\"output\"], transfer) # show the images", "distribution from the source image # to the target image", "np import argparse import cv2 def show_image(title, image, width =", "cv2.imread(args[\"source\"]) target = cv2.imread(args[\"target\"]) # transfer the color distribution from", "saved if args[\"output\"] is not None: cv2.imwrite(args[\"output\"], transfer) # show", "color_transfer import color_transfer import numpy as np import argparse import", "image cv2.imshow(title, resized) # construct the argument parser and parse", "ap.add_argument(\"-o\", \"--output\", help = \"Path to the output image (optional)\")", "image # to the target image transfer = color_transfer(source, target)", "(width, int(image.shape[0] * r)) resized = cv2.resize(image, dim, interpolation =", "to the output image (optional)\") args = vars(ap.parse_args()) # load", "have a constant width, just to # make displaying the", "the images source = cv2.imread(args[\"source\"]) target = cv2.imread(args[\"target\"]) # transfer", "= \"Path to the target image\") ap.add_argument(\"-o\", \"--output\", help =", "the image to have a constant width, just to #", "# estate r = width / float(image.shape[1]) dim = (width,", "parse the arguments ap = argparse.ArgumentParser() ap.add_argument(\"-s\", \"--source\", required =", "as np import argparse import cv2 def show_image(title, image, width", "= cv2.imread(args[\"source\"]) target = cv2.imread(args[\"target\"]) # transfer the color distribution", "transfer) # show the images and wait for a key", "the argument parser and parse the arguments ap = argparse.ArgumentParser()", "= 300): # resize the image to have a constant", "the images and wait for a key press show_image(\"Source\", source)", "real # estate r = width / float(image.shape[1]) dim =", "\"--source\", required = True, help = \"Path to the source", "\"Path to the source image\") ap.add_argument(\"-t\", \"--target\", required = True,", "image (optional)\") args = vars(ap.parse_args()) # load the images source", "images/ocean_day.jpg # import the necessary packages from color_transfer import color_transfer", "help = \"Path to the output image (optional)\") args =", "output image (optional)\") args = vars(ap.parse_args()) # load the images", "the source image\") ap.add_argument(\"-t\", \"--target\", required = True, help =", "# import the necessary packages from color_transfer import color_transfer import", "resized image cv2.imshow(title, resized) # construct the argument parser and", "# construct the argument parser and parse the arguments ap", "images take up less screen real # estate r =", "import argparse import cv2 def show_image(title, image, width = 300):", "cv2 def show_image(title, image, width = 300): # resize the", "= vars(ap.parse_args()) # load the images source = cv2.imread(args[\"source\"]) target", "show_image(title, image, width = 300): # resize the image to", "cv2.imread(args[\"target\"]) # transfer the color distribution from the source image", "color_transfer(source, target) # check to see if the output image", "ap.add_argument(\"-s\", \"--source\", required = True, help = \"Path to the", "= cv2.imread(args[\"target\"]) # transfer the color distribution from the source", "take up less screen real # estate r = width", "the resized image cv2.imshow(title, resized) # construct the argument parser", "300): # resize the image to have a constant width,", "resized = cv2.resize(image, dim, interpolation = cv2.INTER_AREA) # show the", "\"--output\", help = \"Path to the output image (optional)\") args", "from color_transfer import color_transfer import numpy as np import argparse", "# to the target image transfer = color_transfer(source, target) #", "a constant width, just to # make displaying the images", "the output image (optional)\") args = vars(ap.parse_args()) # load the", "arguments ap = argparse.ArgumentParser() ap.add_argument(\"-s\", \"--source\", required = True, help", "USAGE # python example.py --source images/ocean_sunset.jpg --target images/ocean_day.jpg # import", "to the target image\") ap.add_argument(\"-o\", \"--output\", help = \"Path to", "\"--target\", required = True, help = \"Path to the target", "to have a constant width, just to # make displaying", "make displaying the images take up less screen real #", "images/ocean_sunset.jpg --target images/ocean_day.jpg # import the necessary packages from color_transfer", "argparse.ArgumentParser() ap.add_argument(\"-s\", \"--source\", required = True, help = \"Path to", "vars(ap.parse_args()) # load the images source = cv2.imread(args[\"source\"]) target =", "images and wait for a key press show_image(\"Source\", source) show_image(\"Target\",", "less screen real # estate r = width / float(image.shape[1])", "image\") ap.add_argument(\"-t\", \"--target\", required = True, help = \"Path to" ]
[ "import open3d as o3d from tqdm import tqdm from scipy", "int(top_percent * len(pcd0_idx)) assert top_count > sample_size, 'top_count <= sample_size'", "global_registration(source_down, target_down, source_fpfh, target_fpfh, voxel_size, distance_threshold=1.0, num_iters=4000000, num_val_iters=500): print(':: Distance", "= np.where(scores <= thresh)[0] pcd0_idx = indices[inliers_idx, 0] pcd1_idx =", "feature2, voxel_size) elif method == 'fast_global': print('\\nFast global registration on", "<= thresh)[0] pcd0_idx = indices[inliers_idx, 0] pcd1_idx = indices[inliers_idx, 1]", "pcd_final def remove_y_plane(pcd, y_thresh=5): cropped = copy.deepcopy(pcd) cropped_points = np.array(cropped.points)", "0] < y_thresh] cropped_points[:, -1] = -cropped_points[:, -1] pcd_final =", "0.929]) source_temp.transform(transformation) o3d.visualization.draw_geometries([source_temp, target_temp]) def run(): voxel_size = 0.2 dso_scale", "remove_ground_plane(pcd_lidar) pcd_dso = o3d.io.read_point_cloud('../maps/dso_map_cleaned.pcd') pcd_dso = remove_ground_plane(pcd_dso, z_thresh=4.5) pcd_dso =", "print(':: Distance threshold %.3f' % distance_threshold) result = o3d.registration.registration_ransac_based_on_feature_matching( source_down,", "= copy.deepcopy(pcd) cropped_points = np.array(cropped.points) cropped_points = cropped_points[cropped_points[:, -1] >", "Min=%0.3f, Max=%0.3f, Median=%0.3f, N<Thresh=%d' % ( np.min(scores), np.max(scores), median, len(inliers_idx)))", "( np.min(scores), np.max(scores), median, len(inliers_idx))) if display: for i, j", "1 else 1)) print('\\nMatching FPFH features...') pcd_lidar_idx, pcd_dso_idx = match_features(pcd_lidar_down,", "voxel_size) print(':: Downsample size', np.array(pcd_down.points).shape) else: pcd_down = copy.deepcopy(pcd) #", "FPFH features for DSO point cloud...') pcd_dso_down, features_dso = compute_features(pcd_dso,", "distance_threshold = 0.1 print(':: Distance threshold %.3f' % distance_threshold) result", "pcd_dso_down, features_dso = compute_features(pcd_dso, voxel_size=voxel_size * (dso_scale if dso_scale <", "source_fpfh, target_fpfh, initial_result, voxel_size): distance_threshold = 0.1 print(':: Distance threshold", "= np.mean(points0, axis=0) mean1 = np.mean(points1, axis=0) top_count = int(top_percent", "initial_result = global_registration(pcd0, pcd1, feature1, feature2, voxel_size) elif method ==", "source_temp = copy.deepcopy(source) target_temp = copy.deepcopy(target) source_temp.paint_uniform_color([1, 0.706, 0]) target_temp.paint_uniform_color([0,", "features for DSO point cloud...') pcd_dso_down, features_dso = compute_features(pcd_dso, voxel_size=voxel_size", "idx, _] = fpfh_tree.search_knn_vector_xd(feature0.data[:, i], 1) scores.append(np.linalg.norm(pcd0.points[i] - pcd1.points[idx[0]])) indices.append([i,", "0.5]).translate([10, 20, 30]) # Ground plane removal results # utils.display(pcds=[pcd_lidar,", "0.06 print('\\nCorrecting scale...') pcd_dso_scaled = utils.scale_point_cloud(pcd_dso, 1.0 / scale) utils.display(pcds=[pcd_lidar,", "removal results # utils.display(pcds=[pcd_lidar, pcd_dso], colors=[[1, 0.706, 0], [0, 0.651,", "return result def refine_registration(source, target, source_fpfh, target_fpfh, initial_result, voxel_size): distance_threshold", "= remove_ground_plane(pcd_dso, z_thresh=4.5) pcd_dso = remove_y_plane(pcd_dso, y_thresh=0.2) # pcd_dso =", "search radius %.3f' % features_radius) features = o3d.registration.compute_fpfh_feature(pcd_down, o3d.geometry.KDTreeSearchParamHybrid(radius=features_radius, max_nn=features_nn))", "print('\\nRefine registration...') result = refine_registration(pcd0, pcd1, feature1, feature2, initial_result, voxel_size)", "pcd_dso = utils.scale_point_cloud(pcd_dso, dso_scale).rotate([0.5, 0.5, 0.5]).translate([10, 20, 30]) # Ground", "Compute FPFH feature with search radius %.3f' % features_radius) features", "< y_thresh] cropped_points[:, -1] = -cropped_points[:, -1] pcd_final = o3d.geometry.PointCloud()", "4 # Downsample the point cloud using Voxel grids if", "_] = fpfh_tree.search_knn_vector_xd(feature0.data[:, i], 1) scores.append(np.linalg.norm(pcd0.points[i] - pcd1.points[idx[0]])) indices.append([i, idx[0]])", "< 1 else 1)) print('\\nMatching FPFH features...') pcd_lidar_idx, pcd_dso_idx =", "* len(pcd0_idx)) assert top_count > sample_size, 'top_count <= sample_size' scales", "tqdm import tqdm from scipy import stats import utils_o3d as", "= stats.mode(scales)[0][0] print(':: Estimated scale:', best_scale) return best_scale def global_registration(source_down,", "%.3f' % distance_threshold) result = o3d.registration.registration_ransac_based_on_feature_matching( source_down, target_down, source_fpfh, target_fpfh,", "print(':: Estimate normal with search radius %.3f' % normals_radius) pcd_down.estimate_normals(", "= np.mean(points1, axis=0) top_count = int(top_percent * len(pcd0_idx)) assert top_count", "best_scale) return best_scale def global_registration(source_down, target_down, source_fpfh, target_fpfh, voxel_size, distance_threshold=1.0,", "thresh)[0] pcd0_idx = indices[inliers_idx, 0] pcd1_idx = indices[inliers_idx, 1] print('::", "o3d.utility.Vector3dVector(cropped_points) return pcd_final def compute_features(pcd, voxel_size, normals_nn=100, features_nn=120, downsample=True): normals_radius", "0.03 pcd_lidar = o3d.io.read_point_cloud('../maps/scans/scan_050.pcd') pcd_lidar = remove_ground_plane(pcd_lidar) pcd_dso = o3d.io.read_point_cloud('../maps/dso_map_cleaned.pcd')", "z_thresh] pcd_final = o3d.geometry.PointCloud() pcd_final.points = o3d.utility.Vector3dVector(cropped_points) return pcd_final def", "features def match_features(pcd0, pcd1, feature0, feature1, thresh=None, display=False): pcd0, pcd1", "- mean1) ** 2, axis=1) scale = np.sqrt(np.mean(score1) / np.mean(score0))", "result...') draw_registration_result(pcd0, pcd1, initial_result.transformation) print('\\nRefine registration...') result = refine_registration(pcd0, pcd1,", "voxel_size=voxel_size * (dso_scale if dso_scale < 1 else 1)) print('\\nMatching", "= o3d.utility.Vector3dVector(cropped_points) return pcd_final def remove_y_plane(pcd, y_thresh=5): cropped = copy.deepcopy(pcd)", "median, len(inliers_idx))) if display: for i, j in zip(pcd0_idx, pcd1_idx):", "Compute FPFH features print(':: Compute FPFH feature with search radius", "Input size:', np.array(pcd.points).shape) pcd_down = utils.downsample_point_cloud(pcd, voxel_size) print(':: Downsample with", "match_features(pcd_lidar_down, pcd_dso_down, features_lidar, features_dso, thresh=None) print('\\nEstimating scale using matches...') scale", "pcd1_idx = indices[inliers_idx, 1] print(':: Score stats: Min=%0.3f, Max=%0.3f, Median=%0.3f,", "features_lidar, features_dso_scaled, voxel_size, method='global') print('\\nDisplaying result...') draw_registration_result(pcd_lidar, pcd_dso_scaled, result.transformation) if", "feature2, voxel_size, method='global'): if method == 'global': print('\\nRANSAC global registration", "cloud using Voxel grids if downsample: print(':: Input size:', np.array(pcd.points).shape)", "colors=[[0, 0.651, 0.929]]) # return print('\\nComputing FPFH features for lidar", "target_down, source_fpfh, target_fpfh, voxel_size, distance_threshold=1.0, num_iters=4000000, num_val_iters=500): print(':: Distance threshold", "copy import numpy as np import open3d as o3d from", "- mean0) ** 2, axis=1) score1 = np.sum((points1_r - mean1)", "target_fpfh, voxel_size, distance_threshold=1.0, num_iters=4000000, num_val_iters=500): print(':: Distance threshold %.3f' %", "voxel_size): distance_threshold = 1.0 result = o3d.registration.registration_fast_based_on_feature_matching( source_down, target_down, source_fpfh,", "Final registration results:') print(result) return result def draw_registration_result(source, target, transformation):", "size:', np.array(pcd.points).shape) pcd_down = utils.downsample_point_cloud(pcd, voxel_size) print(':: Downsample with a", "points0_r = points0[args] points1_r = points1[args] score0 = np.sum((points0_r -", "pcd1, feature1, feature2, voxel_size) else: print(':: Registration method not supported')", "pcd0_idx, pcd1_idx, top_percent=1.0, ransac_iters=5000, sample_size=50): points0 = np.asarray(pcd0.points)[pcd0_idx] points1 =", "points1_r = points1[args] score0 = np.sum((points0_r - mean0) ** 2,", "def fast_global_registration(source_down, target_down, source_fpfh, target_fpfh, voxel_size): distance_threshold = 1.0 result", "Ground plane removal results # utils.display(pcds=[pcd_lidar, pcd_dso], colors=[[1, 0.706, 0],", "o3d.geometry.PointCloud() pcd_final.points = o3d.utility.Vector3dVector(cropped_points) return pcd_final def compute_features(pcd, voxel_size, normals_nn=100,", "pcd_dso = remove_ground_plane(pcd_dso, z_thresh=4.5) pcd_dso = remove_y_plane(pcd_dso, y_thresh=0.2) # pcd_dso", "replace=False) points0_r = points0[args] points1_r = points1[args] score0 = np.sum((points0_r", "initial_result, voxel_size): distance_threshold = 0.1 print(':: Distance threshold %.3f' %", "initial_result.transformation) print('\\nRefine registration...') result = refine_registration(pcd0, pcd1, feature1, feature2, initial_result,", "DSO point cloud...') pcd_dso_down, features_dso = compute_features(pcd_dso, voxel_size=voxel_size * (dso_scale", "source_fpfh, target_fpfh, voxel_size, distance_threshold=1.0, num_iters=4000000, num_val_iters=500): print(':: Distance threshold %.3f'", "stats: Min=%0.3f, Max=%0.3f, Median=%0.3f, N<Thresh=%d' % ( np.min(scores), np.max(scores), median,", "point cloud...') pcd_lidar_down, features_lidar = compute_features(pcd_lidar, voxel_size=voxel_size) print('\\nComputing FPFH features", "Median=%0.3f, N<Thresh=%d' % ( np.min(scores), np.max(scores), median, len(inliers_idx))) if display:", "pcd_dso = o3d.io.read_point_cloud('../maps/dso_map_cleaned.pcd') pcd_dso = remove_ground_plane(pcd_dso, z_thresh=4.5) pcd_dso = remove_y_plane(pcd_dso,", "inliers_idx = np.where(scores <= thresh)[0] pcd0_idx = indices[inliers_idx, 0] pcd1_idx", "= o3d.registration.registration_fast_based_on_feature_matching( source_down, target_down, source_fpfh, target_fpfh, o3d.registration.FastGlobalRegistrationOption( maximum_correspondence_distance=distance_threshold)) return result", "[0, 0.651, 0.929]]) # utils.display(pcds=[pcd_dso], colors=[[0, 0.651, 0.929]]) # return", "using Voxel grids if downsample: print(':: Input size:', np.array(pcd.points).shape) pcd_down", "feature1, feature2, initial_result, voxel_size) print(':: Final registration results:') print(result) return", "radius %.3f' % normals_radius) pcd_down.estimate_normals( o3d.geometry.KDTreeSearchParamHybrid(radius=normals_radius, max_nn=normals_nn)) # Compute FPFH", "[1, 0, 0] utils.display([pcd0, pcd1]) return pcd0_idx, pcd1_idx def estimate_scale(pcd0,", "np.sum((points0_r - mean0) ** 2, axis=1) score1 = np.sum((points1_r -", "mean1) ** 2, axis=1) scale = np.sqrt(np.mean(score1) / np.mean(score0)) scales.append(scale)", "= np.sqrt(np.mean(score1) / np.mean(score0)) scales.append(scale) best_scale = stats.mode(scales)[0][0] print(':: Estimated", "distance_threshold = 1.0 result = o3d.registration.registration_fast_based_on_feature_matching( source_down, target_down, source_fpfh, target_fpfh,", "% normals_radius) pcd_down.estimate_normals( o3d.geometry.KDTreeSearchParamHybrid(radius=normals_radius, max_nn=normals_nn)) # Compute FPFH features print('::", "len(inliers_idx))) if display: for i, j in zip(pcd0_idx, pcd1_idx): pcd0.colors[i]", "normals_radius) pcd_down.estimate_normals( o3d.geometry.KDTreeSearchParamHybrid(radius=normals_radius, max_nn=normals_nn)) # Compute FPFH features print(':: Compute", "o3d.registration.compute_fpfh_feature(pcd_down, o3d.geometry.KDTreeSearchParamHybrid(radius=features_radius, max_nn=features_nn)) return pcd_down, features def match_features(pcd0, pcd1, feature0,", "print(':: Initial registration results:') print(initial_result) print('\\nDisplaying initial result...') draw_registration_result(pcd0, pcd1,", "pcd1, feature1, feature2, initial_result, voxel_size) print(':: Final registration results:') print(result)", "print('\\nMatching FPFH features...') pcd_lidar_idx, pcd_dso_idx = match_features(pcd_lidar_down, pcd_dso_down, features_lidar, features_dso,", "pcd_dso_down, features_lidar, features_dso, thresh=None) print('\\nEstimating scale using matches...') scale =", "features_radius = voxel_size * 4 # Downsample the point cloud", "pcd_lidar_idx, pcd_dso_idx = match_features(pcd_lidar_down, pcd_dso_down, features_lidar, features_dso, thresh=None) print('\\nEstimating scale", "target_temp.paint_uniform_color([0, 0.651, 0.929]) source_temp.transform(transformation) o3d.visualization.draw_geometries([source_temp, target_temp]) def run(): voxel_size =", "normals_radius = voxel_size * 2 features_radius = voxel_size * 4", "return result def fast_global_registration(source_down, target_down, source_fpfh, target_fpfh, voxel_size): distance_threshold =", "size 0:', np.array(feature0.data).shape) print(':: Features size 1:', np.array(feature1.data).shape) utils.paint_uniform_color(pcd0, color=[1,", "size 1:', np.array(feature1.data).shape) utils.paint_uniform_color(pcd0, color=[1, 0.706, 0]) utils.paint_uniform_color(pcd1, color=[0, 0.651,", "RANSAC'): args = np.random.choice(top_count, sample_size, replace=False) points0_r = points0[args] points1_r", "= median inliers_idx = np.where(scores <= thresh)[0] pcd0_idx = indices[inliers_idx,", "np.sqrt(np.mean(score1) / np.mean(score0)) scales.append(scale) best_scale = stats.mode(scales)[0][0] print(':: Estimated scale:',", "features_dso_scaled, voxel_size, method='global') print('\\nDisplaying result...') draw_registration_result(pcd_lidar, pcd_dso_scaled, result.transformation) if __name__", "= 1.0 result = o3d.registration.registration_fast_based_on_feature_matching( source_down, target_down, source_fpfh, target_fpfh, o3d.registration.FastGlobalRegistrationOption(", "np.array(pcd.points).shape) pcd_down = utils.downsample_point_cloud(pcd, voxel_size) print(':: Downsample with a voxel", "pcd_down = utils.downsample_point_cloud(pcd, voxel_size) print(':: Downsample with a voxel size", "target_fpfh, voxel_size): distance_threshold = 1.0 result = o3d.registration.registration_fast_based_on_feature_matching( source_down, target_down,", "def match_features(pcd0, pcd1, feature0, feature1, thresh=None, display=False): pcd0, pcd1 =", "o3d.registration.FastGlobalRegistrationOption( maximum_correspondence_distance=distance_threshold)) return result def refine_registration(source, target, source_fpfh, target_fpfh, initial_result,", "source_fpfh, target_fpfh, distance_threshold, o3d.registration.TransformationEstimationPointToPoint(False), 4, [ o3d.registration.CorrespondenceCheckerBasedOnEdgeLength(0.9), o3d.registration.CorrespondenceCheckerBasedOnDistance( distance_threshold) ],", "remove_y_plane(pcd_dso, y_thresh=0.2) # pcd_dso = utils.scale_point_cloud(pcd_dso, dso_scale).rotate([0.5, 0.5, 0.5]).translate([10, 20,", "features_lidar, features_dso, thresh=None) print('\\nEstimating scale using matches...') scale = estimate_scale(pcd_lidar_down,", "pcd_dso_scaled, voxel_size=voxel_size) result = registration(pcd_lidar_down, pcd_dso_scaled_down, features_lidar, features_dso_scaled, voxel_size, method='global')", "downsample: print(':: Input size:', np.array(pcd.points).shape) pcd_down = utils.downsample_point_cloud(pcd, voxel_size) print('::", "for i in tqdm(range(ransac_iters), desc=':: Scale Estimation RANSAC'): args =", "2, axis=1) scale = np.sqrt(np.mean(score1) / np.mean(score0)) scales.append(scale) best_scale =", "0] pcd1_idx = indices[inliers_idx, 1] print(':: Score stats: Min=%0.3f, Max=%0.3f,", "size %.3f' % voxel_size) print(':: Downsample size', np.array(pcd_down.points).shape) else: pcd_down", "utils.display([pcd0, pcd1]) return pcd0_idx, pcd1_idx def estimate_scale(pcd0, pcd1, pcd0_idx, pcd1_idx,", "= copy.deepcopy(pcd0), copy.deepcopy(pcd1) print(':: Input size 0:', np.array(pcd0.points).shape) print(':: Input", "as np import open3d as o3d from tqdm import tqdm", "= remove_y_plane(pcd_dso, y_thresh=0.2) # pcd_dso = utils.scale_point_cloud(pcd_dso, dso_scale).rotate([0.5, 0.5, 0.5]).translate([10,", "cropped_points = np.array(cropped.points) cropped_points = cropped_points[cropped_points[:, -1] > z_thresh] pcd_final", "0.929]) scores, indices = [], [] fpfh_tree = o3d.geometry.KDTreeFlann(feature1) for", "scores, indices = [], [] fpfh_tree = o3d.geometry.KDTreeFlann(feature1) for i", "np.array(feature0.data).shape) print(':: Features size 1:', np.array(feature1.data).shape) utils.paint_uniform_color(pcd0, color=[1, 0.706, 0])", "indices = [], [] fpfh_tree = o3d.geometry.KDTreeFlann(feature1) for i in", "points0 = np.asarray(pcd0.points)[pcd0_idx] points1 = np.asarray(pcd1.points)[pcd1_idx] mean0 = np.mean(points0, axis=0)", "for lidar point cloud...') pcd_lidar_down, features_lidar = compute_features(pcd_lidar, voxel_size=voxel_size) print('\\nComputing", "distance_threshold) ], o3d.registration.RANSACConvergenceCriteria(num_iters, num_val_iters)) return result def fast_global_registration(source_down, target_down, source_fpfh,", "results # utils.display(pcds=[pcd_lidar, pcd_dso], colors=[[1, 0.706, 0], [0, 0.651, 0.929]])", "scipy import stats import utils_o3d as utils def remove_ground_plane(pcd, z_thresh=-2.7):", "scale = 0.06 print('\\nCorrecting scale...') pcd_dso_scaled = utils.scale_point_cloud(pcd_dso, 1.0 /", "print('\\nDisplaying initial result...') draw_registration_result(pcd0, pcd1, initial_result.transformation) print('\\nRefine registration...') result =", "pcd_dso], colors=[[1, 0.706, 0], [0, 0.651, 0.929]]) # utils.display(pcds=[pcd_dso], colors=[[0,", "%.3f' % voxel_size) print(':: Downsample size', np.array(pcd_down.points).shape) else: pcd_down =", "[] for i in tqdm(range(ransac_iters), desc=':: Scale Estimation RANSAC'): args", "= utils.scale_point_cloud(pcd_dso, dso_scale).rotate([0.5, 0.5, 0.5]).translate([10, 20, 30]) # Ground plane", "[1, 0, 0] pcd1.colors[j] = [1, 0, 0] utils.display([pcd0, pcd1])", "cropped_points[:, -1] = -cropped_points[:, -1] pcd_final = o3d.geometry.PointCloud() pcd_final.points =", "= 0.06 print('\\nCorrecting scale...') pcd_dso_scaled = utils.scale_point_cloud(pcd_dso, 1.0 / scale)", "], o3d.registration.RANSACConvergenceCriteria(num_iters, num_val_iters)) return result def fast_global_registration(source_down, target_down, source_fpfh, target_fpfh,", "= np.sum((points0_r - mean0) ** 2, axis=1) score1 = np.sum((points1_r", "* 4 # Downsample the point cloud using Voxel grids", "sample_size' scales = [] for i in tqdm(range(ransac_iters), desc=':: Scale", "target_fpfh, o3d.registration.FastGlobalRegistrationOption( maximum_correspondence_distance=distance_threshold)) return result def refine_registration(source, target, source_fpfh, target_fpfh,", "if dso_scale < 1 else 1)) print('\\nMatching FPFH features...') pcd_lidar_idx,", "scaled point clouds...') initial_result = fast_global_registration(pcd0, pcd1, feature1, feature2, voxel_size)", "np.array(pcd_down.points).shape) else: pcd_down = copy.deepcopy(pcd) # Estimate normals print(':: Estimate", "axis=1) score1 = np.sum((points1_r - mean1) ** 2, axis=1) scale", "pcd1.colors[j] = [1, 0, 0] utils.display([pcd0, pcd1]) return pcd0_idx, pcd1_idx", "np.mean(score0)) scales.append(scale) best_scale = stats.mode(scales)[0][0] print(':: Estimated scale:', best_scale) return", "axis=0) mean1 = np.mean(points1, axis=0) top_count = int(top_percent * len(pcd0_idx))", "FPFH features...') pcd_lidar_idx, pcd_dso_idx = match_features(pcd_lidar_down, pcd_dso_down, features_lidar, features_dso, thresh=None)", "print('\\nFast global registration on scaled point clouds...') initial_result = fast_global_registration(pcd0,", "downsample=True): normals_radius = voxel_size * 2 features_radius = voxel_size *", "y_thresh=5): cropped = copy.deepcopy(pcd) cropped_points = np.array(cropped.points) cropped_points = cropped_points[cropped_points[:,", "estimate_scale(pcd0, pcd1, pcd0_idx, pcd1_idx, top_percent=1.0, ransac_iters=5000, sample_size=50): points0 = np.asarray(pcd0.points)[pcd0_idx]", "0:', np.array(feature0.data).shape) print(':: Features size 1:', np.array(feature1.data).shape) utils.paint_uniform_color(pcd0, color=[1, 0.706,", "else 1)) print('\\nMatching FPFH features...') pcd_lidar_idx, pcd_dso_idx = match_features(pcd_lidar_down, pcd_dso_down,", "pcd_final def compute_features(pcd, voxel_size, normals_nn=100, features_nn=120, downsample=True): normals_radius = voxel_size", "refine_registration(source, target, source_fpfh, target_fpfh, initial_result, voxel_size): distance_threshold = 0.1 print('::", "= refine_registration(pcd0, pcd1, feature1, feature2, initial_result, voxel_size) print(':: Final registration", "result = o3d.registration.registration_ransac_based_on_feature_matching( source_down, target_down, source_fpfh, target_fpfh, distance_threshold, o3d.registration.TransformationEstimationPointToPoint(False), 4,", "target_fpfh, distance_threshold, o3d.registration.TransformationEstimationPointToPoint(False), 4, [ o3d.registration.CorrespondenceCheckerBasedOnEdgeLength(0.9), o3d.registration.CorrespondenceCheckerBasedOnDistance( distance_threshold) ], o3d.registration.RANSACConvergenceCriteria(num_iters,", "0.651, 0.929]) source_temp.transform(transformation) o3d.visualization.draw_geometries([source_temp, target_temp]) def run(): voxel_size = 0.2", "features = o3d.registration.compute_fpfh_feature(pcd_down, o3d.geometry.KDTreeSearchParamHybrid(radius=features_radius, max_nn=features_nn)) return pcd_down, features def match_features(pcd0,", "Feature Matching'): [_, idx, _] = fpfh_tree.search_knn_vector_xd(feature0.data[:, i], 1) scores.append(np.linalg.norm(pcd0.points[i]", "registration...') result = refine_registration(pcd0, pcd1, feature1, feature2, initial_result, voxel_size) print('::", "0, 0] pcd1.colors[j] = [1, 0, 0] utils.display([pcd0, pcd1]) return", "print('\\nDisplaying result...') draw_registration_result(pcd_lidar, pcd_dso_scaled, result.transformation) if __name__ == '__main__': run()", "return print('\\nComputing FPFH features for lidar point cloud...') pcd_lidar_down, features_lidar", "distance_threshold) result = o3d.registration.registration_icp( source, target, distance_threshold, initial_result.transformation, o3d.registration.TransformationEstimationPointToPlane()) return", "features print(':: Compute FPFH feature with search radius %.3f' %", "def registration(pcd0, pcd1, feature1, feature2, voxel_size, method='global'): if method ==", "= copy.deepcopy(target) source_temp.paint_uniform_color([1, 0.706, 0]) target_temp.paint_uniform_color([0, 0.651, 0.929]) source_temp.transform(transformation) o3d.visualization.draw_geometries([source_temp,", "o3d.io.read_point_cloud('../maps/scans/scan_050.pcd') pcd_lidar = remove_ground_plane(pcd_lidar) pcd_dso = o3d.io.read_point_cloud('../maps/dso_map_cleaned.pcd') pcd_dso = remove_ground_plane(pcd_dso,", "return print(':: Initial registration results:') print(initial_result) print('\\nDisplaying initial result...') draw_registration_result(pcd0,", "= indices[inliers_idx, 1] print(':: Score stats: Min=%0.3f, Max=%0.3f, Median=%0.3f, N<Thresh=%d'", "pcd_final.points = o3d.utility.Vector3dVector(cropped_points) return pcd_final def compute_features(pcd, voxel_size, normals_nn=100, features_nn=120,", "pcd_down.estimate_normals( o3d.geometry.KDTreeSearchParamHybrid(radius=normals_radius, max_nn=normals_nn)) # Compute FPFH features print(':: Compute FPFH", "fast_global_registration(source_down, target_down, source_fpfh, target_fpfh, voxel_size): distance_threshold = 1.0 result =", "o3d.geometry.KDTreeFlann(feature1) for i in tqdm(range(len(pcd0.points)), desc=':: Feature Matching'): [_, idx,", "print(':: Features size 1:', np.array(feature1.data).shape) utils.paint_uniform_color(pcd0, color=[1, 0.706, 0]) utils.paint_uniform_color(pcd1,", "mean1 = np.mean(points1, axis=0) top_count = int(top_percent * len(pcd0_idx)) assert", "top_count = int(top_percent * len(pcd0_idx)) assert top_count > sample_size, 'top_count", "print(':: Registration method not supported') return print(':: Initial registration results:')", "not supported') return print(':: Initial registration results:') print(initial_result) print('\\nDisplaying initial", "print(result) return result def draw_registration_result(source, target, transformation): source_temp = copy.deepcopy(source)", "remove_ground_plane(pcd, z_thresh=-2.7): cropped = copy.deepcopy(pcd) cropped_points = np.array(cropped.points) cropped_points =", "# Estimate normals print(':: Estimate normal with search radius %.3f'", "features_dso, thresh=None) print('\\nEstimating scale using matches...') scale = estimate_scale(pcd_lidar_down, pcd_dso_down,", "target_down, source_fpfh, target_fpfh, distance_threshold, o3d.registration.TransformationEstimationPointToPoint(False), 4, [ o3d.registration.CorrespondenceCheckerBasedOnEdgeLength(0.9), o3d.registration.CorrespondenceCheckerBasedOnDistance( distance_threshold)", "= fpfh_tree.search_knn_vector_xd(feature0.data[:, i], 1) scores.append(np.linalg.norm(pcd0.points[i] - pcd1.points[idx[0]])) indices.append([i, idx[0]]) scores,", "utils.display(pcds=[pcd_lidar, pcd_dso_scaled], colors=[[1, 0.706, 0], [0, 0.651, 0.929]]) # return", "2, axis=1) score1 = np.sum((points1_r - mean1) ** 2, axis=1)", "print(':: Estimated scale:', best_scale) return best_scale def global_registration(source_down, target_down, source_fpfh,", "global_registration(pcd0, pcd1, feature1, feature2, voxel_size) elif method == 'fast_global': print('\\nFast", "transformation): source_temp = copy.deepcopy(source) target_temp = copy.deepcopy(target) source_temp.paint_uniform_color([1, 0.706, 0])", "features for lidar point cloud...') pcd_lidar_down, features_lidar = compute_features(pcd_lidar, voxel_size=voxel_size)", "ransac_iters=5000, sample_size=50): points0 = np.asarray(pcd0.points)[pcd0_idx] points1 = np.asarray(pcd1.points)[pcd1_idx] mean0 =", "pcd1 = copy.deepcopy(pcd0), copy.deepcopy(pcd1) print(':: Input size 0:', np.array(pcd0.points).shape) print('::", "= indices[inliers_idx, 0] pcd1_idx = indices[inliers_idx, 1] print(':: Score stats:", "draw_registration_result(pcd0, pcd1, initial_result.transformation) print('\\nRefine registration...') result = refine_registration(pcd0, pcd1, feature1,", "feature2, voxel_size) else: print(':: Registration method not supported') return print('::", "target, transformation): source_temp = copy.deepcopy(source) target_temp = copy.deepcopy(target) source_temp.paint_uniform_color([1, 0.706,", "= o3d.io.read_point_cloud('../maps/dso_map_cleaned.pcd') pcd_dso = remove_ground_plane(pcd_dso, z_thresh=4.5) pcd_dso = remove_y_plane(pcd_dso, y_thresh=0.2)", "in zip(pcd0_idx, pcd1_idx): pcd0.colors[i] = [1, 0, 0] pcd1.colors[j] =", "print('\\nEstimating scale using matches...') scale = estimate_scale(pcd_lidar_down, pcd_dso_down, pcd_lidar_idx, pcd_dso_idx)", "scores, indices = np.array(scores), np.array(indices) median = np.median(scores) if thresh", "= np.random.choice(top_count, sample_size, replace=False) points0_r = points0[args] points1_r = points1[args]", "feature2, initial_result, voxel_size) print(':: Final registration results:') print(result) return result", "-cropped_points[:, -1] pcd_final = o3d.geometry.PointCloud() pcd_final.points = o3d.utility.Vector3dVector(cropped_points) return pcd_final", "= o3d.geometry.PointCloud() pcd_final.points = o3d.utility.Vector3dVector(cropped_points) return pcd_final def remove_y_plane(pcd, y_thresh=5):", "indices[inliers_idx, 1] print(':: Score stats: Min=%0.3f, Max=%0.3f, Median=%0.3f, N<Thresh=%d' %", "0.2 dso_scale = 0.03 pcd_lidar = o3d.io.read_point_cloud('../maps/scans/scan_050.pcd') pcd_lidar = remove_ground_plane(pcd_lidar)", "print('\\nComputing FPFH features for lidar point cloud...') pcd_lidar_down, features_lidar =", "o3d.registration.TransformationEstimationPointToPoint(False), 4, [ o3d.registration.CorrespondenceCheckerBasedOnEdgeLength(0.9), o3d.registration.CorrespondenceCheckerBasedOnDistance( distance_threshold) ], o3d.registration.RANSACConvergenceCriteria(num_iters, num_val_iters)) return", "import utils_o3d as utils def remove_ground_plane(pcd, z_thresh=-2.7): cropped = copy.deepcopy(pcd)", "import tqdm from scipy import stats import utils_o3d as utils", "= compute_features(pcd_dso, voxel_size=voxel_size * (dso_scale if dso_scale < 1 else", "desc=':: Feature Matching'): [_, idx, _] = fpfh_tree.search_knn_vector_xd(feature0.data[:, i], 1)", "registration results:') print(initial_result) print('\\nDisplaying initial result...') draw_registration_result(pcd0, pcd1, initial_result.transformation) print('\\nRefine", "feature1, feature2, voxel_size) elif method == 'fast_global': print('\\nFast global registration", "np.array(feature1.data).shape) utils.paint_uniform_color(pcd0, color=[1, 0.706, 0]) utils.paint_uniform_color(pcd1, color=[0, 0.651, 0.929]) scores,", "Scale Estimation RANSAC'): args = np.random.choice(top_count, sample_size, replace=False) points0_r =", "Distance threshold %.3f' % distance_threshold) result = o3d.registration.registration_icp( source, target,", "indices.append([i, idx[0]]) scores, indices = np.array(scores), np.array(indices) median = np.median(scores)", "= o3d.geometry.PointCloud() pcd_final.points = o3d.utility.Vector3dVector(cropped_points) return pcd_final def compute_features(pcd, voxel_size,", "o3d.visualization.draw_geometries([source_temp, target_temp]) def run(): voxel_size = 0.2 dso_scale = 0.03", "np.where(scores <= thresh)[0] pcd0_idx = indices[inliers_idx, 0] pcd1_idx = indices[inliers_idx,", "display: for i, j in zip(pcd0_idx, pcd1_idx): pcd0.colors[i] = [1,", "def estimate_scale(pcd0, pcd1, pcd0_idx, pcd1_idx, top_percent=1.0, ransac_iters=5000, sample_size=50): points0 =", "# Ground plane removal results # utils.display(pcds=[pcd_lidar, pcd_dso], colors=[[1, 0.706,", "in tqdm(range(ransac_iters), desc=':: Scale Estimation RANSAC'): args = np.random.choice(top_count, sample_size,", "= o3d.registration.registration_ransac_based_on_feature_matching( source_down, target_down, source_fpfh, target_fpfh, distance_threshold, o3d.registration.TransformationEstimationPointToPoint(False), 4, [", "-1] > z_thresh] pcd_final = o3d.geometry.PointCloud() pcd_final.points = o3d.utility.Vector3dVector(cropped_points) return", "print(':: Final registration results:') print(result) return result def draw_registration_result(source, target,", "0.706, 0], [0, 0.651, 0.929]]) # return # Registration pcd_dso_scaled_down,", "voxel_size = 0.2 dso_scale = 0.03 pcd_lidar = o3d.io.read_point_cloud('../maps/scans/scan_050.pcd') pcd_lidar", "0.651, 0.929]]) # return print('\\nComputing FPFH features for lidar point", "'global': print('\\nRANSAC global registration on scaled point clouds...') initial_result =", "pcd1, pcd0_idx, pcd1_idx, top_percent=1.0, ransac_iters=5000, sample_size=50): points0 = np.asarray(pcd0.points)[pcd0_idx] points1", "np.random.choice(top_count, sample_size, replace=False) points0_r = points0[args] points1_r = points1[args] score0", "len(pcd0_idx)) assert top_count > sample_size, 'top_count <= sample_size' scales =", "target_temp = copy.deepcopy(target) source_temp.paint_uniform_color([1, 0.706, 0]) target_temp.paint_uniform_color([0, 0.651, 0.929]) source_temp.transform(transformation)", "if downsample: print(':: Input size:', np.array(pcd.points).shape) pcd_down = utils.downsample_point_cloud(pcd, voxel_size)", "%.3f' % normals_radius) pcd_down.estimate_normals( o3d.geometry.KDTreeSearchParamHybrid(radius=normals_radius, max_nn=normals_nn)) # Compute FPFH features", "color=[0, 0.651, 0.929]) scores, indices = [], [] fpfh_tree =", "= 0.03 pcd_lidar = o3d.io.read_point_cloud('../maps/scans/scan_050.pcd') pcd_lidar = remove_ground_plane(pcd_lidar) pcd_dso =", "o3d.io.read_point_cloud('../maps/dso_map_cleaned.pcd') pcd_dso = remove_ground_plane(pcd_dso, z_thresh=4.5) pcd_dso = remove_y_plane(pcd_dso, y_thresh=0.2) #", "FPFH features for lidar point cloud...') pcd_lidar_down, features_lidar = compute_features(pcd_lidar,", "in tqdm(range(len(pcd0.points)), desc=':: Feature Matching'): [_, idx, _] = fpfh_tree.search_knn_vector_xd(feature0.data[:,", "pcd1_idx, top_percent=1.0, ransac_iters=5000, sample_size=50): points0 = np.asarray(pcd0.points)[pcd0_idx] points1 = np.asarray(pcd1.points)[pcd1_idx]", "feature0, feature1, thresh=None, display=False): pcd0, pcd1 = copy.deepcopy(pcd0), copy.deepcopy(pcd1) print('::", "pcd0, pcd1 = copy.deepcopy(pcd0), copy.deepcopy(pcd1) print(':: Input size 0:', np.array(pcd0.points).shape)", "feature with search radius %.3f' % features_radius) features = o3d.registration.compute_fpfh_feature(pcd_down,", "return pcd_final def remove_y_plane(pcd, y_thresh=5): cropped = copy.deepcopy(pcd) cropped_points =", "sample_size=50): points0 = np.asarray(pcd0.points)[pcd0_idx] points1 = np.asarray(pcd1.points)[pcd1_idx] mean0 = np.mean(points0,", "estimate_scale(pcd_lidar_down, pcd_dso_down, pcd_lidar_idx, pcd_dso_idx) scale = 0.06 print('\\nCorrecting scale...') pcd_dso_scaled", "as o3d from tqdm import tqdm from scipy import stats", "o3d from tqdm import tqdm from scipy import stats import", "idx[0]]) scores, indices = np.array(scores), np.array(indices) median = np.median(scores) if", "sample_size, replace=False) points0_r = points0[args] points1_r = points1[args] score0 =", "Initial registration results:') print(initial_result) print('\\nDisplaying initial result...') draw_registration_result(pcd0, pcd1, initial_result.transformation)", "score1 = np.sum((points1_r - mean1) ** 2, axis=1) scale =", "o3d.registration.RANSACConvergenceCriteria(num_iters, num_val_iters)) return result def fast_global_registration(source_down, target_down, source_fpfh, target_fpfh, voxel_size):", "= copy.deepcopy(pcd) cropped_points = np.array(cropped.points) cropped_points = cropped_points[cropped_points[:, 0] <", "N<Thresh=%d' % ( np.min(scores), np.max(scores), median, len(inliers_idx))) if display: for", "cropped = copy.deepcopy(pcd) cropped_points = np.array(cropped.points) cropped_points = cropped_points[cropped_points[:, -1]", "1:', np.array(pcd1.points).shape) print(':: Features size 0:', np.array(feature0.data).shape) print(':: Features size", "utils def remove_ground_plane(pcd, z_thresh=-2.7): cropped = copy.deepcopy(pcd) cropped_points = np.array(cropped.points)", "results:') print(initial_result) print('\\nDisplaying initial result...') draw_registration_result(pcd0, pcd1, initial_result.transformation) print('\\nRefine registration...')", "= voxel_size * 4 # Downsample the point cloud using", "distance_threshold, o3d.registration.TransformationEstimationPointToPoint(False), 4, [ o3d.registration.CorrespondenceCheckerBasedOnEdgeLength(0.9), o3d.registration.CorrespondenceCheckerBasedOnDistance( distance_threshold) ], o3d.registration.RANSACConvergenceCriteria(num_iters, num_val_iters))", "thresh is None: thresh = median inliers_idx = np.where(scores <=", "best_scale def global_registration(source_down, target_down, source_fpfh, target_fpfh, voxel_size, distance_threshold=1.0, num_iters=4000000, num_val_iters=500):", "print(':: Input size 0:', np.array(pcd0.points).shape) print(':: Input size 1:', np.array(pcd1.points).shape)", "result def registration(pcd0, pcd1, feature1, feature2, voxel_size, method='global'): if method", "axis=0) top_count = int(top_percent * len(pcd0_idx)) assert top_count > sample_size,", "the point cloud using Voxel grids if downsample: print(':: Input", "method == 'fast_global': print('\\nFast global registration on scaled point clouds...')", "# utils.display(pcds=[pcd_dso], colors=[[0, 0.651, 0.929]]) # return print('\\nComputing FPFH features", "thresh = median inliers_idx = np.where(scores <= thresh)[0] pcd0_idx =", "result def draw_registration_result(source, target, transformation): source_temp = copy.deepcopy(source) target_temp =", "points0[args] points1_r = points1[args] score0 = np.sum((points0_r - mean0) **", "voxel_size, distance_threshold=1.0, num_iters=4000000, num_val_iters=500): print(':: Distance threshold %.3f' % distance_threshold)", "features_nn=120, downsample=True): normals_radius = voxel_size * 2 features_radius = voxel_size", "pcd1_idx def estimate_scale(pcd0, pcd1, pcd0_idx, pcd1_idx, top_percent=1.0, ransac_iters=5000, sample_size=50): points0", "o3d.geometry.KDTreeSearchParamHybrid(radius=features_radius, max_nn=features_nn)) return pcd_down, features def match_features(pcd0, pcd1, feature0, feature1,", "= o3d.geometry.KDTreeFlann(feature1) for i in tqdm(range(len(pcd0.points)), desc=':: Feature Matching'): [_,", "o3d.registration.CorrespondenceCheckerBasedOnEdgeLength(0.9), o3d.registration.CorrespondenceCheckerBasedOnDistance( distance_threshold) ], o3d.registration.RANSACConvergenceCriteria(num_iters, num_val_iters)) return result def fast_global_registration(source_down,", "method == 'global': print('\\nRANSAC global registration on scaled point clouds...')", "voxel_size) else: print(':: Registration method not supported') return print(':: Initial", "draw_registration_result(source, target, transformation): source_temp = copy.deepcopy(source) target_temp = copy.deepcopy(target) source_temp.paint_uniform_color([1,", "y_thresh=0.2) # pcd_dso = utils.scale_point_cloud(pcd_dso, dso_scale).rotate([0.5, 0.5, 0.5]).translate([10, 20, 30])", "= int(top_percent * len(pcd0_idx)) assert top_count > sample_size, 'top_count <=", "pcd1.points[idx[0]])) indices.append([i, idx[0]]) scores, indices = np.array(scores), np.array(indices) median =", "# Downsample the point cloud using Voxel grids if downsample:", "compute_features(pcd_lidar, voxel_size=voxel_size) print('\\nComputing FPFH features for DSO point cloud...') pcd_dso_down,", "voxel_size * 4 # Downsample the point cloud using Voxel", "Matching'): [_, idx, _] = fpfh_tree.search_knn_vector_xd(feature0.data[:, i], 1) scores.append(np.linalg.norm(pcd0.points[i] -", "1)) print('\\nMatching FPFH features...') pcd_lidar_idx, pcd_dso_idx = match_features(pcd_lidar_down, pcd_dso_down, features_lidar,", "result def refine_registration(source, target, source_fpfh, target_fpfh, initial_result, voxel_size): distance_threshold =", "from scipy import stats import utils_o3d as utils def remove_ground_plane(pcd,", "voxel_size) print(':: Final registration results:') print(result) return result def draw_registration_result(source,", "voxel size %.3f' % voxel_size) print(':: Downsample size', np.array(pcd_down.points).shape) else:", "Estimation RANSAC'): args = np.random.choice(top_count, sample_size, replace=False) points0_r = points0[args]", "% features_radius) features = o3d.registration.compute_fpfh_feature(pcd_down, o3d.geometry.KDTreeSearchParamHybrid(radius=features_radius, max_nn=features_nn)) return pcd_down, features", "def run(): voxel_size = 0.2 dso_scale = 0.03 pcd_lidar =", "if display: for i, j in zip(pcd0_idx, pcd1_idx): pcd0.colors[i] =", "voxel_size): distance_threshold = 0.1 print(':: Distance threshold %.3f' % distance_threshold)", "i], 1) scores.append(np.linalg.norm(pcd0.points[i] - pcd1.points[idx[0]])) indices.append([i, idx[0]]) scores, indices =", "thresh=None, display=False): pcd0, pcd1 = copy.deepcopy(pcd0), copy.deepcopy(pcd1) print(':: Input size", "tqdm(range(ransac_iters), desc=':: Scale Estimation RANSAC'): args = np.random.choice(top_count, sample_size, replace=False)", "points1 = np.asarray(pcd1.points)[pcd1_idx] mean0 = np.mean(points0, axis=0) mean1 = np.mean(points1,", "lidar point cloud...') pcd_lidar_down, features_lidar = compute_features(pcd_lidar, voxel_size=voxel_size) print('\\nComputing FPFH", "color=[1, 0.706, 0]) utils.paint_uniform_color(pcd1, color=[0, 0.651, 0.929]) scores, indices =", "0], [0, 0.651, 0.929]]) # utils.display(pcds=[pcd_dso], colors=[[0, 0.651, 0.929]]) #", "using matches...') scale = estimate_scale(pcd_lidar_down, pcd_dso_down, pcd_lidar_idx, pcd_dso_idx) scale =", "normal with search radius %.3f' % normals_radius) pcd_down.estimate_normals( o3d.geometry.KDTreeSearchParamHybrid(radius=normals_radius, max_nn=normals_nn))", "distance_threshold=1.0, num_iters=4000000, num_val_iters=500): print(':: Distance threshold %.3f' % distance_threshold) result", "assert top_count > sample_size, 'top_count <= sample_size' scales = []", "= copy.deepcopy(source) target_temp = copy.deepcopy(target) source_temp.paint_uniform_color([1, 0.706, 0]) target_temp.paint_uniform_color([0, 0.651,", "voxel_size, normals_nn=100, features_nn=120, downsample=True): normals_radius = voxel_size * 2 features_radius", "pcd_lidar = remove_ground_plane(pcd_lidar) pcd_dso = o3d.io.read_point_cloud('../maps/dso_map_cleaned.pcd') pcd_dso = remove_ground_plane(pcd_dso, z_thresh=4.5)", "z_thresh=-2.7): cropped = copy.deepcopy(pcd) cropped_points = np.array(cropped.points) cropped_points = cropped_points[cropped_points[:,", "indices = np.array(scores), np.array(indices) median = np.median(scores) if thresh is", "scale:', best_scale) return best_scale def global_registration(source_down, target_down, source_fpfh, target_fpfh, voxel_size,", "o3d.registration.registration_ransac_based_on_feature_matching( source_down, target_down, source_fpfh, target_fpfh, distance_threshold, o3d.registration.TransformationEstimationPointToPoint(False), 4, [ o3d.registration.CorrespondenceCheckerBasedOnEdgeLength(0.9),", "fpfh_tree = o3d.geometry.KDTreeFlann(feature1) for i in tqdm(range(len(pcd0.points)), desc=':: Feature Matching'):", "o3d.geometry.PointCloud() pcd_final.points = o3d.utility.Vector3dVector(cropped_points) return pcd_final def remove_y_plane(pcd, y_thresh=5): cropped", "source_down, target_down, source_fpfh, target_fpfh, o3d.registration.FastGlobalRegistrationOption( maximum_correspondence_distance=distance_threshold)) return result def refine_registration(source,", "np.array(pcd0.points).shape) print(':: Input size 1:', np.array(pcd1.points).shape) print(':: Features size 0:',", "features...') pcd_lidar_idx, pcd_dso_idx = match_features(pcd_lidar_down, pcd_dso_down, features_lidar, features_dso, thresh=None) print('\\nEstimating", "result = o3d.registration.registration_icp( source, target, distance_threshold, initial_result.transformation, o3d.registration.TransformationEstimationPointToPlane()) return result", "utils.display(pcds=[pcd_lidar, pcd_dso], colors=[[1, 0.706, 0], [0, 0.651, 0.929]]) # utils.display(pcds=[pcd_dso],", "return pcd0_idx, pcd1_idx def estimate_scale(pcd0, pcd1, pcd0_idx, pcd1_idx, top_percent=1.0, ransac_iters=5000,", "result = o3d.registration.registration_fast_based_on_feature_matching( source_down, target_down, source_fpfh, target_fpfh, o3d.registration.FastGlobalRegistrationOption( maximum_correspondence_distance=distance_threshold)) return", "args = np.random.choice(top_count, sample_size, replace=False) points0_r = points0[args] points1_r =", "pcd1, initial_result.transformation) print('\\nRefine registration...') result = refine_registration(pcd0, pcd1, feature1, feature2,", "z_thresh=4.5) pcd_dso = remove_y_plane(pcd_dso, y_thresh=0.2) # pcd_dso = utils.scale_point_cloud(pcd_dso, dso_scale).rotate([0.5,", "> z_thresh] pcd_final = o3d.geometry.PointCloud() pcd_final.points = o3d.utility.Vector3dVector(cropped_points) return pcd_final", "def refine_registration(source, target, source_fpfh, target_fpfh, initial_result, voxel_size): distance_threshold = 0.1", "print(initial_result) print('\\nDisplaying initial result...') draw_registration_result(pcd0, pcd1, initial_result.transformation) print('\\nRefine registration...') result", "= np.sum((points1_r - mean1) ** 2, axis=1) scale = np.sqrt(np.mean(score1)", "size 0:', np.array(pcd0.points).shape) print(':: Input size 1:', np.array(pcd1.points).shape) print(':: Features", "pcd_lidar_idx, pcd_dso_idx) scale = 0.06 print('\\nCorrecting scale...') pcd_dso_scaled = utils.scale_point_cloud(pcd_dso,", "scores.append(np.linalg.norm(pcd0.points[i] - pcd1.points[idx[0]])) indices.append([i, idx[0]]) scores, indices = np.array(scores), np.array(indices)", "fast_global_registration(pcd0, pcd1, feature1, feature2, voxel_size) else: print(':: Registration method not", "* (dso_scale if dso_scale < 1 else 1)) print('\\nMatching FPFH", "0] utils.display([pcd0, pcd1]) return pcd0_idx, pcd1_idx def estimate_scale(pcd0, pcd1, pcd0_idx,", "feature1, feature2, voxel_size, method='global'): if method == 'global': print('\\nRANSAC global", "= estimate_scale(pcd_lidar_down, pcd_dso_down, pcd_lidar_idx, pcd_dso_idx) scale = 0.06 print('\\nCorrecting scale...')", "= remove_ground_plane(pcd_lidar) pcd_dso = o3d.io.read_point_cloud('../maps/dso_map_cleaned.pcd') pcd_dso = remove_ground_plane(pcd_dso, z_thresh=4.5) pcd_dso", "def remove_y_plane(pcd, y_thresh=5): cropped = copy.deepcopy(pcd) cropped_points = np.array(cropped.points) cropped_points", "scales.append(scale) best_scale = stats.mode(scales)[0][0] print(':: Estimated scale:', best_scale) return best_scale", "# return print('\\nComputing FPFH features for lidar point cloud...') pcd_lidar_down,", "# Compute FPFH features print(':: Compute FPFH feature with search", "= o3d.utility.Vector3dVector(cropped_points) return pcd_final def compute_features(pcd, voxel_size, normals_nn=100, features_nn=120, downsample=True):", "source_fpfh, target_fpfh, voxel_size): distance_threshold = 1.0 result = o3d.registration.registration_fast_based_on_feature_matching( source_down,", "0:', np.array(pcd0.points).shape) print(':: Input size 1:', np.array(pcd1.points).shape) print(':: Features size", "else: print(':: Registration method not supported') return print(':: Initial registration", "np.sum((points1_r - mean1) ** 2, axis=1) scale = np.sqrt(np.mean(score1) /", "= global_registration(pcd0, pcd1, feature1, feature2, voxel_size) elif method == 'fast_global':", "pcd_dso_idx) scale = 0.06 print('\\nCorrecting scale...') pcd_dso_scaled = utils.scale_point_cloud(pcd_dso, 1.0", "voxel_size) print(':: Downsample with a voxel size %.3f' % voxel_size)", "voxel_size=voxel_size) print('\\nComputing FPFH features for DSO point cloud...') pcd_dso_down, features_dso", "'fast_global': print('\\nFast global registration on scaled point clouds...') initial_result =", "remove_ground_plane(pcd_dso, z_thresh=4.5) pcd_dso = remove_y_plane(pcd_dso, y_thresh=0.2) # pcd_dso = utils.scale_point_cloud(pcd_dso,", "utils.display(pcds=[pcd_dso], colors=[[0, 0.651, 0.929]]) # return print('\\nComputing FPFH features for", "-1] = -cropped_points[:, -1] pcd_final = o3d.geometry.PointCloud() pcd_final.points = o3d.utility.Vector3dVector(cropped_points)", "stats import utils_o3d as utils def remove_ground_plane(pcd, z_thresh=-2.7): cropped =", "else: pcd_down = copy.deepcopy(pcd) # Estimate normals print(':: Estimate normal", "scales = [] for i in tqdm(range(ransac_iters), desc=':: Scale Estimation", "compute_features(pcd, voxel_size, normals_nn=100, features_nn=120, downsample=True): normals_radius = voxel_size * 2", "0.706, 0]) utils.paint_uniform_color(pcd1, color=[0, 0.651, 0.929]) scores, indices = [],", "o3d.registration.registration_fast_based_on_feature_matching( source_down, target_down, source_fpfh, target_fpfh, o3d.registration.FastGlobalRegistrationOption( maximum_correspondence_distance=distance_threshold)) return result def", "% distance_threshold) result = o3d.registration.registration_icp( source, target, distance_threshold, initial_result.transformation, o3d.registration.TransformationEstimationPointToPlane())", "voxel_size, method='global') print('\\nDisplaying result...') draw_registration_result(pcd_lidar, pcd_dso_scaled, result.transformation) if __name__ ==", "max_nn=normals_nn)) # Compute FPFH features print(':: Compute FPFH feature with", "voxel_size=voxel_size) result = registration(pcd_lidar_down, pcd_dso_scaled_down, features_lidar, features_dso_scaled, voxel_size, method='global') print('\\nDisplaying", "0, 0] utils.display([pcd0, pcd1]) return pcd0_idx, pcd1_idx def estimate_scale(pcd0, pcd1,", "median = np.median(scores) if thresh is None: thresh = median", "[], [] fpfh_tree = o3d.geometry.KDTreeFlann(feature1) for i in tqdm(range(len(pcd0.points)), desc='::", "= np.median(scores) if thresh is None: thresh = median inliers_idx", "1] print(':: Score stats: Min=%0.3f, Max=%0.3f, Median=%0.3f, N<Thresh=%d' % (", "initial_result = fast_global_registration(pcd0, pcd1, feature1, feature2, voxel_size) else: print(':: Registration", "utils.scale_point_cloud(pcd_dso, 1.0 / scale) utils.display(pcds=[pcd_lidar, pcd_dso_scaled], colors=[[1, 0.706, 0], [0,", "on scaled point clouds...') initial_result = fast_global_registration(pcd0, pcd1, feature1, feature2,", "matches...') scale = estimate_scale(pcd_lidar_down, pcd_dso_down, pcd_lidar_idx, pcd_dso_idx) scale = 0.06", "with a voxel size %.3f' % voxel_size) print(':: Downsample size',", "num_iters=4000000, num_val_iters=500): print(':: Distance threshold %.3f' % distance_threshold) result =", "grids if downsample: print(':: Input size:', np.array(pcd.points).shape) pcd_down = utils.downsample_point_cloud(pcd,", "if thresh is None: thresh = median inliers_idx = np.where(scores", "pcd0_idx, pcd1_idx def estimate_scale(pcd0, pcd1, pcd0_idx, pcd1_idx, top_percent=1.0, ransac_iters=5000, sample_size=50):", "# return # Registration pcd_dso_scaled_down, features_dso_scaled = compute_features( pcd_dso_scaled, voxel_size=voxel_size)", "0.651, 0.929]) scores, indices = [], [] fpfh_tree = o3d.geometry.KDTreeFlann(feature1)", "on scaled point clouds...') initial_result = global_registration(pcd0, pcd1, feature1, feature2,", "a voxel size %.3f' % voxel_size) print(':: Downsample size', np.array(pcd_down.points).shape)", "from tqdm import tqdm from scipy import stats import utils_o3d", "** 2, axis=1) score1 = np.sum((points1_r - mean1) ** 2,", "20, 30]) # Ground plane removal results # utils.display(pcds=[pcd_lidar, pcd_dso],", "zip(pcd0_idx, pcd1_idx): pcd0.colors[i] = [1, 0, 0] pcd1.colors[j] = [1,", "= compute_features(pcd_lidar, voxel_size=voxel_size) print('\\nComputing FPFH features for DSO point cloud...')", "= compute_features( pcd_dso_scaled, voxel_size=voxel_size) result = registration(pcd_lidar_down, pcd_dso_scaled_down, features_lidar, features_dso_scaled,", "import numpy as np import open3d as o3d from tqdm", "i in tqdm(range(len(pcd0.points)), desc=':: Feature Matching'): [_, idx, _] =", "0.929]]) # return print('\\nComputing FPFH features for lidar point cloud...')", "tqdm(range(len(pcd0.points)), desc=':: Feature Matching'): [_, idx, _] = fpfh_tree.search_knn_vector_xd(feature0.data[:, i],", "pcd1, feature1, feature2, voxel_size) elif method == 'fast_global': print('\\nFast global", "np.median(scores) if thresh is None: thresh = median inliers_idx =", "source, target, distance_threshold, initial_result.transformation, o3d.registration.TransformationEstimationPointToPlane()) return result def registration(pcd0, pcd1,", "0.929]]) # return # Registration pcd_dso_scaled_down, features_dso_scaled = compute_features( pcd_dso_scaled,", "source_temp.paint_uniform_color([1, 0.706, 0]) target_temp.paint_uniform_color([0, 0.651, 0.929]) source_temp.transform(transformation) o3d.visualization.draw_geometries([source_temp, target_temp]) def", "Features size 0:', np.array(feature0.data).shape) print(':: Features size 1:', np.array(feature1.data).shape) utils.paint_uniform_color(pcd0,", "print(':: Input size:', np.array(pcd.points).shape) pcd_down = utils.downsample_point_cloud(pcd, voxel_size) print(':: Downsample", "> sample_size, 'top_count <= sample_size' scales = [] for i", "= [] for i in tqdm(range(ransac_iters), desc=':: Scale Estimation RANSAC'):", "global registration on scaled point clouds...') initial_result = global_registration(pcd0, pcd1,", "target_down, source_fpfh, target_fpfh, voxel_size): distance_threshold = 1.0 result = o3d.registration.registration_fast_based_on_feature_matching(", "dso_scale < 1 else 1)) print('\\nMatching FPFH features...') pcd_lidar_idx, pcd_dso_idx", "o3d.utility.Vector3dVector(cropped_points) return pcd_final def remove_y_plane(pcd, y_thresh=5): cropped = copy.deepcopy(pcd) cropped_points", "scale = np.sqrt(np.mean(score1) / np.mean(score0)) scales.append(scale) best_scale = stats.mode(scales)[0][0] print('::", "0.651, 0.929]]) # utils.display(pcds=[pcd_dso], colors=[[0, 0.651, 0.929]]) # return print('\\nComputing", "utils_o3d as utils def remove_ground_plane(pcd, z_thresh=-2.7): cropped = copy.deepcopy(pcd) cropped_points", "o3d.registration.TransformationEstimationPointToPlane()) return result def registration(pcd0, pcd1, feature1, feature2, voxel_size, method='global'):", "point clouds...') initial_result = fast_global_registration(pcd0, pcd1, feature1, feature2, voxel_size) else:", "fpfh_tree.search_knn_vector_xd(feature0.data[:, i], 1) scores.append(np.linalg.norm(pcd0.points[i] - pcd1.points[idx[0]])) indices.append([i, idx[0]]) scores, indices", "target_fpfh, initial_result, voxel_size): distance_threshold = 0.1 print(':: Distance threshold %.3f'", "points1[args] score0 = np.sum((points0_r - mean0) ** 2, axis=1) score1", "0.706, 0]) target_temp.paint_uniform_color([0, 0.651, 0.929]) source_temp.transform(transformation) o3d.visualization.draw_geometries([source_temp, target_temp]) def run():", "np.array(indices) median = np.median(scores) if thresh is None: thresh =", "= points1[args] score0 = np.sum((points0_r - mean0) ** 2, axis=1)", "pcd_final = o3d.geometry.PointCloud() pcd_final.points = o3d.utility.Vector3dVector(cropped_points) return pcd_final def compute_features(pcd,", "Voxel grids if downsample: print(':: Input size:', np.array(pcd.points).shape) pcd_down =", "pcd1_idx): pcd0.colors[i] = [1, 0, 0] pcd1.colors[j] = [1, 0,", "# Registration pcd_dso_scaled_down, features_dso_scaled = compute_features( pcd_dso_scaled, voxel_size=voxel_size) result =", "result def fast_global_registration(source_down, target_down, source_fpfh, target_fpfh, voxel_size): distance_threshold = 1.0", "source_temp.transform(transformation) o3d.visualization.draw_geometries([source_temp, target_temp]) def run(): voxel_size = 0.2 dso_scale =", "normals_nn=100, features_nn=120, downsample=True): normals_radius = voxel_size * 2 features_radius =", "normals print(':: Estimate normal with search radius %.3f' % normals_radius)", "pcd1, feature0, feature1, thresh=None, display=False): pcd0, pcd1 = copy.deepcopy(pcd0), copy.deepcopy(pcd1)", "distance_threshold, initial_result.transformation, o3d.registration.TransformationEstimationPointToPlane()) return result def registration(pcd0, pcd1, feature1, feature2,", "Estimate normal with search radius %.3f' % normals_radius) pcd_down.estimate_normals( o3d.geometry.KDTreeSearchParamHybrid(radius=normals_radius,", "for DSO point cloud...') pcd_dso_down, features_dso = compute_features(pcd_dso, voxel_size=voxel_size *", "features_dso = compute_features(pcd_dso, voxel_size=voxel_size * (dso_scale if dso_scale < 1", "voxel_size * 2 features_radius = voxel_size * 4 # Downsample", "cropped_points = np.array(cropped.points) cropped_points = cropped_points[cropped_points[:, 0] < y_thresh] cropped_points[:,", "remove_y_plane(pcd, y_thresh=5): cropped = copy.deepcopy(pcd) cropped_points = np.array(cropped.points) cropped_points =", "1:', np.array(feature1.data).shape) utils.paint_uniform_color(pcd0, color=[1, 0.706, 0]) utils.paint_uniform_color(pcd1, color=[0, 0.651, 0.929])", "np.min(scores), np.max(scores), median, len(inliers_idx))) if display: for i, j in", "mean0) ** 2, axis=1) score1 = np.sum((points1_r - mean1) **", "target, source_fpfh, target_fpfh, initial_result, voxel_size): distance_threshold = 0.1 print(':: Distance", "cloud...') pcd_dso_down, features_dso = compute_features(pcd_dso, voxel_size=voxel_size * (dso_scale if dso_scale", "= registration(pcd_lidar_down, pcd_dso_scaled_down, features_lidar, features_dso_scaled, voxel_size, method='global') print('\\nDisplaying result...') draw_registration_result(pcd_lidar,", "size', np.array(pcd_down.points).shape) else: pcd_down = copy.deepcopy(pcd) # Estimate normals print('::", "as utils def remove_ground_plane(pcd, z_thresh=-2.7): cropped = copy.deepcopy(pcd) cropped_points =", "np.array(cropped.points) cropped_points = cropped_points[cropped_points[:, -1] > z_thresh] pcd_final = o3d.geometry.PointCloud()", "-1] pcd_final = o3d.geometry.PointCloud() pcd_final.points = o3d.utility.Vector3dVector(cropped_points) return pcd_final def", "* 2 features_radius = voxel_size * 4 # Downsample the", "pcd_down, features def match_features(pcd0, pcd1, feature0, feature1, thresh=None, display=False): pcd0,", "open3d as o3d from tqdm import tqdm from scipy import", "= voxel_size * 2 features_radius = voxel_size * 4 #", "Estimated scale:', best_scale) return best_scale def global_registration(source_down, target_down, source_fpfh, target_fpfh,", "== 'fast_global': print('\\nFast global registration on scaled point clouds...') initial_result", "indices[inliers_idx, 0] pcd1_idx = indices[inliers_idx, 1] print(':: Score stats: Min=%0.3f,", "= match_features(pcd_lidar_down, pcd_dso_down, features_lidar, features_dso, thresh=None) print('\\nEstimating scale using matches...')", "print(':: Distance threshold %.3f' % distance_threshold) result = o3d.registration.registration_icp( source,", "/ np.mean(score0)) scales.append(scale) best_scale = stats.mode(scales)[0][0] print(':: Estimated scale:', best_scale)", "elif method == 'fast_global': print('\\nFast global registration on scaled point", "[0, 0.651, 0.929]]) # return # Registration pcd_dso_scaled_down, features_dso_scaled =", "Downsample with a voxel size %.3f' % voxel_size) print(':: Downsample", "1.0 / scale) utils.display(pcds=[pcd_lidar, pcd_dso_scaled], colors=[[1, 0.706, 0], [0, 0.651,", "= -cropped_points[:, -1] pcd_final = o3d.geometry.PointCloud() pcd_final.points = o3d.utility.Vector3dVector(cropped_points) return", "np.asarray(pcd0.points)[pcd0_idx] points1 = np.asarray(pcd1.points)[pcd1_idx] mean0 = np.mean(points0, axis=0) mean1 =", "[ o3d.registration.CorrespondenceCheckerBasedOnEdgeLength(0.9), o3d.registration.CorrespondenceCheckerBasedOnDistance( distance_threshold) ], o3d.registration.RANSACConvergenceCriteria(num_iters, num_val_iters)) return result def", "= np.asarray(pcd0.points)[pcd0_idx] points1 = np.asarray(pcd1.points)[pcd1_idx] mean0 = np.mean(points0, axis=0) mean1", "1.0 result = o3d.registration.registration_fast_based_on_feature_matching( source_down, target_down, source_fpfh, target_fpfh, o3d.registration.FastGlobalRegistrationOption( maximum_correspondence_distance=distance_threshold))", "source_fpfh, target_fpfh, o3d.registration.FastGlobalRegistrationOption( maximum_correspondence_distance=distance_threshold)) return result def refine_registration(source, target, source_fpfh,", "copy.deepcopy(pcd) cropped_points = np.array(cropped.points) cropped_points = cropped_points[cropped_points[:, 0] < y_thresh]", "j in zip(pcd0_idx, pcd1_idx): pcd0.colors[i] = [1, 0, 0] pcd1.colors[j]", "np.max(scores), median, len(inliers_idx))) if display: for i, j in zip(pcd0_idx,", "voxel_size, method='global'): if method == 'global': print('\\nRANSAC global registration on", "target_temp]) def run(): voxel_size = 0.2 dso_scale = 0.03 pcd_lidar", "refine_registration(pcd0, pcd1, feature1, feature2, initial_result, voxel_size) print(':: Final registration results:')", "features_radius) features = o3d.registration.compute_fpfh_feature(pcd_down, o3d.geometry.KDTreeSearchParamHybrid(radius=features_radius, max_nn=features_nn)) return pcd_down, features def", "with search radius %.3f' % normals_radius) pcd_down.estimate_normals( o3d.geometry.KDTreeSearchParamHybrid(radius=normals_radius, max_nn=normals_nn)) #", "size 1:', np.array(pcd1.points).shape) print(':: Features size 0:', np.array(feature0.data).shape) print(':: Features", "FPFH feature with search radius %.3f' % features_radius) features =", "pcd_dso_idx = match_features(pcd_lidar_down, pcd_dso_down, features_lidar, features_dso, thresh=None) print('\\nEstimating scale using", "source_down, target_down, source_fpfh, target_fpfh, distance_threshold, o3d.registration.TransformationEstimationPointToPoint(False), 4, [ o3d.registration.CorrespondenceCheckerBasedOnEdgeLength(0.9), o3d.registration.CorrespondenceCheckerBasedOnDistance(", "i in tqdm(range(ransac_iters), desc=':: Scale Estimation RANSAC'): args = np.random.choice(top_count,", "dso_scale).rotate([0.5, 0.5, 0.5]).translate([10, 20, 30]) # Ground plane removal results", "0.5, 0.5]).translate([10, 20, 30]) # Ground plane removal results #", "print(':: Input size 1:', np.array(pcd1.points).shape) print(':: Features size 0:', np.array(feature0.data).shape)", "scale = estimate_scale(pcd_lidar_down, pcd_dso_down, pcd_lidar_idx, pcd_dso_idx) scale = 0.06 print('\\nCorrecting", "return pcd_down, features def match_features(pcd0, pcd1, feature0, feature1, thresh=None, display=False):", "top_count > sample_size, 'top_count <= sample_size' scales = [] for", "%.3f' % distance_threshold) result = o3d.registration.registration_icp( source, target, distance_threshold, initial_result.transformation,", "voxel_size) elif method == 'fast_global': print('\\nFast global registration on scaled", "= utils.downsample_point_cloud(pcd, voxel_size) print(':: Downsample with a voxel size %.3f'", "0] pcd1.colors[j] = [1, 0, 0] utils.display([pcd0, pcd1]) return pcd0_idx,", "= o3d.registration.registration_icp( source, target, distance_threshold, initial_result.transformation, o3d.registration.TransformationEstimationPointToPlane()) return result def", "colors=[[1, 0.706, 0], [0, 0.651, 0.929]]) # utils.display(pcds=[pcd_dso], colors=[[0, 0.651,", "None: thresh = median inliers_idx = np.where(scores <= thresh)[0] pcd0_idx", "best_scale = stats.mode(scales)[0][0] print(':: Estimated scale:', best_scale) return best_scale def", "registration(pcd_lidar_down, pcd_dso_scaled_down, features_lidar, features_dso_scaled, voxel_size, method='global') print('\\nDisplaying result...') draw_registration_result(pcd_lidar, pcd_dso_scaled,", "copy.deepcopy(pcd1) print(':: Input size 0:', np.array(pcd0.points).shape) print(':: Input size 1:',", "utils.paint_uniform_color(pcd1, color=[0, 0.651, 0.929]) scores, indices = [], [] fpfh_tree", "0.1 print(':: Distance threshold %.3f' % distance_threshold) result = o3d.registration.registration_icp(", "compute_features( pcd_dso_scaled, voxel_size=voxel_size) result = registration(pcd_lidar_down, pcd_dso_scaled_down, features_lidar, features_dso_scaled, voxel_size,", "np.array(cropped.points) cropped_points = cropped_points[cropped_points[:, 0] < y_thresh] cropped_points[:, -1] =", "feature1, feature2, voxel_size) else: print(':: Registration method not supported') return", "= 0.1 print(':: Distance threshold %.3f' % distance_threshold) result =", "0.651, 0.929]]) # return # Registration pcd_dso_scaled_down, features_dso_scaled = compute_features(", "utils.downsample_point_cloud(pcd, voxel_size) print(':: Downsample with a voxel size %.3f' %", "features_lidar = compute_features(pcd_lidar, voxel_size=voxel_size) print('\\nComputing FPFH features for DSO point", "# pcd_dso = utils.scale_point_cloud(pcd_dso, dso_scale).rotate([0.5, 0.5, 0.5]).translate([10, 20, 30]) #", "point clouds...') initial_result = global_registration(pcd0, pcd1, feature1, feature2, voxel_size) elif", "cropped_points[cropped_points[:, -1] > z_thresh] pcd_final = o3d.geometry.PointCloud() pcd_final.points = o3d.utility.Vector3dVector(cropped_points)", "stats.mode(scales)[0][0] print(':: Estimated scale:', best_scale) return best_scale def global_registration(source_down, target_down,", "registration on scaled point clouds...') initial_result = global_registration(pcd0, pcd1, feature1,", "return best_scale def global_registration(source_down, target_down, source_fpfh, target_fpfh, voxel_size, distance_threshold=1.0, num_iters=4000000,", "def draw_registration_result(source, target, transformation): source_temp = copy.deepcopy(source) target_temp = copy.deepcopy(target)", "def remove_ground_plane(pcd, z_thresh=-2.7): cropped = copy.deepcopy(pcd) cropped_points = np.array(cropped.points) cropped_points", "= utils.scale_point_cloud(pcd_dso, 1.0 / scale) utils.display(pcds=[pcd_lidar, pcd_dso_scaled], colors=[[1, 0.706, 0],", "utils.scale_point_cloud(pcd_dso, dso_scale).rotate([0.5, 0.5, 0.5]).translate([10, 20, 30]) # Ground plane removal", "# utils.display(pcds=[pcd_lidar, pcd_dso], colors=[[1, 0.706, 0], [0, 0.651, 0.929]]) #", "point cloud...') pcd_dso_down, features_dso = compute_features(pcd_dso, voxel_size=voxel_size * (dso_scale if", "np.mean(points1, axis=0) top_count = int(top_percent * len(pcd0_idx)) assert top_count >", "4, [ o3d.registration.CorrespondenceCheckerBasedOnEdgeLength(0.9), o3d.registration.CorrespondenceCheckerBasedOnDistance( distance_threshold) ], o3d.registration.RANSACConvergenceCriteria(num_iters, num_val_iters)) return result", "pcd0_idx = indices[inliers_idx, 0] pcd1_idx = indices[inliers_idx, 1] print(':: Score", "score0 = np.sum((points0_r - mean0) ** 2, axis=1) score1 =", "pcd_dso_down, pcd_lidar_idx, pcd_dso_idx) scale = 0.06 print('\\nCorrecting scale...') pcd_dso_scaled =", "supported') return print(':: Initial registration results:') print(initial_result) print('\\nDisplaying initial result...')", "for i in tqdm(range(len(pcd0.points)), desc=':: Feature Matching'): [_, idx, _]", "def compute_features(pcd, voxel_size, normals_nn=100, features_nn=120, downsample=True): normals_radius = voxel_size *", "pcd0.colors[i] = [1, 0, 0] pcd1.colors[j] = [1, 0, 0]", "threshold %.3f' % distance_threshold) result = o3d.registration.registration_ransac_based_on_feature_matching( source_down, target_down, source_fpfh,", "is None: thresh = median inliers_idx = np.where(scores <= thresh)[0]", "Distance threshold %.3f' % distance_threshold) result = o3d.registration.registration_ransac_based_on_feature_matching( source_down, target_down,", "print(':: Downsample with a voxel size %.3f' % voxel_size) print('::", "= o3d.registration.compute_fpfh_feature(pcd_down, o3d.geometry.KDTreeSearchParamHybrid(radius=features_radius, max_nn=features_nn)) return pcd_down, features def match_features(pcd0, pcd1,", "= points0[args] points1_r = points1[args] score0 = np.sum((points0_r - mean0)", "print(':: Downsample size', np.array(pcd_down.points).shape) else: pcd_down = copy.deepcopy(pcd) # Estimate", "** 2, axis=1) scale = np.sqrt(np.mean(score1) / np.mean(score0)) scales.append(scale) best_scale", "median inliers_idx = np.where(scores <= thresh)[0] pcd0_idx = indices[inliers_idx, 0]", "axis=1) scale = np.sqrt(np.mean(score1) / np.mean(score0)) scales.append(scale) best_scale = stats.mode(scales)[0][0]", "if method == 'global': print('\\nRANSAC global registration on scaled point", "FPFH features print(':: Compute FPFH feature with search radius %.3f'", "= cropped_points[cropped_points[:, -1] > z_thresh] pcd_final = o3d.geometry.PointCloud() pcd_final.points =", "radius %.3f' % features_radius) features = o3d.registration.compute_fpfh_feature(pcd_down, o3d.geometry.KDTreeSearchParamHybrid(radius=features_radius, max_nn=features_nn)) return", "run(): voxel_size = 0.2 dso_scale = 0.03 pcd_lidar = o3d.io.read_point_cloud('../maps/scans/scan_050.pcd')", "- pcd1.points[idx[0]])) indices.append([i, idx[0]]) scores, indices = np.array(scores), np.array(indices) median", "i, j in zip(pcd0_idx, pcd1_idx): pcd0.colors[i] = [1, 0, 0]", "maximum_correspondence_distance=distance_threshold)) return result def refine_registration(source, target, source_fpfh, target_fpfh, initial_result, voxel_size):", "Score stats: Min=%0.3f, Max=%0.3f, Median=%0.3f, N<Thresh=%d' % ( np.min(scores), np.max(scores),", "np.array(scores), np.array(indices) median = np.median(scores) if thresh is None: thresh", "2 features_radius = voxel_size * 4 # Downsample the point", "np.mean(points0, axis=0) mean1 = np.mean(points1, axis=0) top_count = int(top_percent *", "scale using matches...') scale = estimate_scale(pcd_lidar_down, pcd_dso_down, pcd_lidar_idx, pcd_dso_idx) scale", "0], [0, 0.651, 0.929]]) # return # Registration pcd_dso_scaled_down, features_dso_scaled", "% voxel_size) print(':: Downsample size', np.array(pcd_down.points).shape) else: pcd_down = copy.deepcopy(pcd)", "num_val_iters=500): print(':: Distance threshold %.3f' % distance_threshold) result = o3d.registration.registration_ransac_based_on_feature_matching(", "o3d.registration.registration_icp( source, target, distance_threshold, initial_result.transformation, o3d.registration.TransformationEstimationPointToPlane()) return result def registration(pcd0,", "initial_result.transformation, o3d.registration.TransformationEstimationPointToPlane()) return result def registration(pcd0, pcd1, feature1, feature2, voxel_size,", "return pcd_final def compute_features(pcd, voxel_size, normals_nn=100, features_nn=120, downsample=True): normals_radius =", "display=False): pcd0, pcd1 = copy.deepcopy(pcd0), copy.deepcopy(pcd1) print(':: Input size 0:',", "top_percent=1.0, ransac_iters=5000, sample_size=50): points0 = np.asarray(pcd0.points)[pcd0_idx] points1 = np.asarray(pcd1.points)[pcd1_idx] mean0", "initial result...') draw_registration_result(pcd0, pcd1, initial_result.transformation) print('\\nRefine registration...') result = refine_registration(pcd0,", "(dso_scale if dso_scale < 1 else 1)) print('\\nMatching FPFH features...')", "clouds...') initial_result = fast_global_registration(pcd0, pcd1, feature1, feature2, voxel_size) else: print('::", "copy.deepcopy(pcd) # Estimate normals print(':: Estimate normal with search radius", "copy.deepcopy(source) target_temp = copy.deepcopy(target) source_temp.paint_uniform_color([1, 0.706, 0]) target_temp.paint_uniform_color([0, 0.651, 0.929])", "mean0 = np.mean(points0, axis=0) mean1 = np.mean(points1, axis=0) top_count =", "= np.array(cropped.points) cropped_points = cropped_points[cropped_points[:, 0] < y_thresh] cropped_points[:, -1]", "Input size 0:', np.array(pcd0.points).shape) print(':: Input size 1:', np.array(pcd1.points).shape) print('::", "return result def draw_registration_result(source, target, transformation): source_temp = copy.deepcopy(source) target_temp", "copy.deepcopy(target) source_temp.paint_uniform_color([1, 0.706, 0]) target_temp.paint_uniform_color([0, 0.651, 0.929]) source_temp.transform(transformation) o3d.visualization.draw_geometries([source_temp, target_temp])", "print('\\nComputing FPFH features for DSO point cloud...') pcd_dso_down, features_dso =", "pcd_dso_scaled = utils.scale_point_cloud(pcd_dso, 1.0 / scale) utils.display(pcds=[pcd_lidar, pcd_dso_scaled], colors=[[1, 0.706,", "Max=%0.3f, Median=%0.3f, N<Thresh=%d' % ( np.min(scores), np.max(scores), median, len(inliers_idx))) if", "np.array(pcd1.points).shape) print(':: Features size 0:', np.array(feature0.data).shape) print(':: Features size 1:',", "= np.array(scores), np.array(indices) median = np.median(scores) if thresh is None:", "target, distance_threshold, initial_result.transformation, o3d.registration.TransformationEstimationPointToPlane()) return result def registration(pcd0, pcd1, feature1,", "pcd_dso_scaled_down, features_lidar, features_dso_scaled, voxel_size, method='global') print('\\nDisplaying result...') draw_registration_result(pcd_lidar, pcd_dso_scaled, result.transformation)", "= cropped_points[cropped_points[:, 0] < y_thresh] cropped_points[:, -1] = -cropped_points[:, -1]", "method not supported') return print(':: Initial registration results:') print(initial_result) print('\\nDisplaying", "0]) utils.paint_uniform_color(pcd1, color=[0, 0.651, 0.929]) scores, indices = [], []", "0]) target_temp.paint_uniform_color([0, 0.651, 0.929]) source_temp.transform(transformation) o3d.visualization.draw_geometries([source_temp, target_temp]) def run(): voxel_size", "results:') print(result) return result def draw_registration_result(source, target, transformation): source_temp =", "numpy as np import open3d as o3d from tqdm import", "cropped_points = cropped_points[cropped_points[:, -1] > z_thresh] pcd_final = o3d.geometry.PointCloud() pcd_final.points", "Estimate normals print(':: Estimate normal with search radius %.3f' %", "print(':: Compute FPFH feature with search radius %.3f' % features_radius)", "pcd_lidar = o3d.io.read_point_cloud('../maps/scans/scan_050.pcd') pcd_lidar = remove_ground_plane(pcd_lidar) pcd_dso = o3d.io.read_point_cloud('../maps/dso_map_cleaned.pcd') pcd_dso", "np import open3d as o3d from tqdm import tqdm from", "return result def registration(pcd0, pcd1, feature1, feature2, voxel_size, method='global'): if", "scaled point clouds...') initial_result = global_registration(pcd0, pcd1, feature1, feature2, voxel_size)", "match_features(pcd0, pcd1, feature0, feature1, thresh=None, display=False): pcd0, pcd1 = copy.deepcopy(pcd0),", "Input size 1:', np.array(pcd1.points).shape) print(':: Features size 0:', np.array(feature0.data).shape) print('::", "1) scores.append(np.linalg.norm(pcd0.points[i] - pcd1.points[idx[0]])) indices.append([i, idx[0]]) scores, indices = np.array(scores),", "30]) # Ground plane removal results # utils.display(pcds=[pcd_lidar, pcd_dso], colors=[[1,", "% ( np.min(scores), np.max(scores), median, len(inliers_idx))) if display: for i,", "tqdm from scipy import stats import utils_o3d as utils def", "pcd1, feature1, feature2, voxel_size, method='global'): if method == 'global': print('\\nRANSAC", "for i, j in zip(pcd0_idx, pcd1_idx): pcd0.colors[i] = [1, 0,", "registration(pcd0, pcd1, feature1, feature2, voxel_size, method='global'): if method == 'global':", "cropped = copy.deepcopy(pcd) cropped_points = np.array(cropped.points) cropped_points = cropped_points[cropped_points[:, 0]", "threshold %.3f' % distance_threshold) result = o3d.registration.registration_icp( source, target, distance_threshold,", "global registration on scaled point clouds...') initial_result = fast_global_registration(pcd0, pcd1,", "Registration method not supported') return print(':: Initial registration results:') print(initial_result)", "[_, idx, _] = fpfh_tree.search_knn_vector_xd(feature0.data[:, i], 1) scores.append(np.linalg.norm(pcd0.points[i] - pcd1.points[idx[0]]))", "pcd_final = o3d.geometry.PointCloud() pcd_final.points = o3d.utility.Vector3dVector(cropped_points) return pcd_final def remove_y_plane(pcd,", "registration on scaled point clouds...') initial_result = fast_global_registration(pcd0, pcd1, feature1,", "= [1, 0, 0] utils.display([pcd0, pcd1]) return pcd0_idx, pcd1_idx def", "cropped_points[cropped_points[:, 0] < y_thresh] cropped_points[:, -1] = -cropped_points[:, -1] pcd_final", "[] fpfh_tree = o3d.geometry.KDTreeFlann(feature1) for i in tqdm(range(len(pcd0.points)), desc=':: Feature", "initial_result, voxel_size) print(':: Final registration results:') print(result) return result def", "pcd1]) return pcd0_idx, pcd1_idx def estimate_scale(pcd0, pcd1, pcd0_idx, pcd1_idx, top_percent=1.0,", "search radius %.3f' % normals_radius) pcd_down.estimate_normals( o3d.geometry.KDTreeSearchParamHybrid(radius=normals_radius, max_nn=normals_nn)) # Compute", "print('\\nCorrecting scale...') pcd_dso_scaled = utils.scale_point_cloud(pcd_dso, 1.0 / scale) utils.display(pcds=[pcd_lidar, pcd_dso_scaled],", "pcd_lidar_down, features_lidar = compute_features(pcd_lidar, voxel_size=voxel_size) print('\\nComputing FPFH features for DSO", "cropped_points = cropped_points[cropped_points[:, 0] < y_thresh] cropped_points[:, -1] = -cropped_points[:,", "clouds...') initial_result = global_registration(pcd0, pcd1, feature1, feature2, voxel_size) elif method", "= o3d.io.read_point_cloud('../maps/scans/scan_050.pcd') pcd_lidar = remove_ground_plane(pcd_lidar) pcd_dso = o3d.io.read_point_cloud('../maps/dso_map_cleaned.pcd') pcd_dso =", "registration results:') print(result) return result def draw_registration_result(source, target, transformation): source_temp", "0.929]]) # utils.display(pcds=[pcd_dso], colors=[[0, 0.651, 0.929]]) # return print('\\nComputing FPFH", "0.706, 0], [0, 0.651, 0.929]]) # utils.display(pcds=[pcd_dso], colors=[[0, 0.651, 0.929]])", "o3d.registration.CorrespondenceCheckerBasedOnDistance( distance_threshold) ], o3d.registration.RANSACConvergenceCriteria(num_iters, num_val_iters)) return result def fast_global_registration(source_down, target_down,", "== 'global': print('\\nRANSAC global registration on scaled point clouds...') initial_result", "y_thresh] cropped_points[:, -1] = -cropped_points[:, -1] pcd_final = o3d.geometry.PointCloud() pcd_final.points", "num_val_iters)) return result def fast_global_registration(source_down, target_down, source_fpfh, target_fpfh, voxel_size): distance_threshold", "Downsample the point cloud using Voxel grids if downsample: print('::", "print(':: Features size 0:', np.array(feature0.data).shape) print(':: Features size 1:', np.array(feature1.data).shape)", "import copy import numpy as np import open3d as o3d", "<= sample_size' scales = [] for i in tqdm(range(ransac_iters), desc='::", "features_dso_scaled = compute_features( pcd_dso_scaled, voxel_size=voxel_size) result = registration(pcd_lidar_down, pcd_dso_scaled_down, features_lidar,", "plane removal results # utils.display(pcds=[pcd_lidar, pcd_dso], colors=[[1, 0.706, 0], [0,", "print('\\nRANSAC global registration on scaled point clouds...') initial_result = global_registration(pcd0,", "scale...') pcd_dso_scaled = utils.scale_point_cloud(pcd_dso, 1.0 / scale) utils.display(pcds=[pcd_lidar, pcd_dso_scaled], colors=[[1,", "= np.array(cropped.points) cropped_points = cropped_points[cropped_points[:, -1] > z_thresh] pcd_final =", "with search radius %.3f' % features_radius) features = o3d.registration.compute_fpfh_feature(pcd_down, o3d.geometry.KDTreeSearchParamHybrid(radius=features_radius,", "method='global'): if method == 'global': print('\\nRANSAC global registration on scaled", "Downsample size', np.array(pcd_down.points).shape) else: pcd_down = copy.deepcopy(pcd) # Estimate normals", "max_nn=features_nn)) return pcd_down, features def match_features(pcd0, pcd1, feature0, feature1, thresh=None,", "compute_features(pcd_dso, voxel_size=voxel_size * (dso_scale if dso_scale < 1 else 1))", "print(':: Score stats: Min=%0.3f, Max=%0.3f, Median=%0.3f, N<Thresh=%d' % ( np.min(scores),", "copy.deepcopy(pcd0), copy.deepcopy(pcd1) print(':: Input size 0:', np.array(pcd0.points).shape) print(':: Input size", "= [], [] fpfh_tree = o3d.geometry.KDTreeFlann(feature1) for i in tqdm(range(len(pcd0.points)),", "target_down, source_fpfh, target_fpfh, o3d.registration.FastGlobalRegistrationOption( maximum_correspondence_distance=distance_threshold)) return result def refine_registration(source, target,", "scale) utils.display(pcds=[pcd_lidar, pcd_dso_scaled], colors=[[1, 0.706, 0], [0, 0.651, 0.929]]) #", "= np.asarray(pcd1.points)[pcd1_idx] mean0 = np.mean(points0, axis=0) mean1 = np.mean(points1, axis=0)", "pcd_dso_scaled_down, features_dso_scaled = compute_features( pcd_dso_scaled, voxel_size=voxel_size) result = registration(pcd_lidar_down, pcd_dso_scaled_down,", "o3d.geometry.KDTreeSearchParamHybrid(radius=normals_radius, max_nn=normals_nn)) # Compute FPFH features print(':: Compute FPFH feature", "= fast_global_registration(pcd0, pcd1, feature1, feature2, voxel_size) else: print(':: Registration method", "% distance_threshold) result = o3d.registration.registration_ransac_based_on_feature_matching( source_down, target_down, source_fpfh, target_fpfh, distance_threshold,", "desc=':: Scale Estimation RANSAC'): args = np.random.choice(top_count, sample_size, replace=False) points0_r", "feature1, thresh=None, display=False): pcd0, pcd1 = copy.deepcopy(pcd0), copy.deepcopy(pcd1) print(':: Input", "np.asarray(pcd1.points)[pcd1_idx] mean0 = np.mean(points0, axis=0) mean1 = np.mean(points1, axis=0) top_count", "Features size 1:', np.array(feature1.data).shape) utils.paint_uniform_color(pcd0, color=[1, 0.706, 0]) utils.paint_uniform_color(pcd1, color=[0,", "'top_count <= sample_size' scales = [] for i in tqdm(range(ransac_iters),", "/ scale) utils.display(pcds=[pcd_lidar, pcd_dso_scaled], colors=[[1, 0.706, 0], [0, 0.651, 0.929]])", "result = refine_registration(pcd0, pcd1, feature1, feature2, initial_result, voxel_size) print(':: Final", "pcd_dso_scaled], colors=[[1, 0.706, 0], [0, 0.651, 0.929]]) # return #", "Registration pcd_dso_scaled_down, features_dso_scaled = compute_features( pcd_dso_scaled, voxel_size=voxel_size) result = registration(pcd_lidar_down,", "method='global') print('\\nDisplaying result...') draw_registration_result(pcd_lidar, pcd_dso_scaled, result.transformation) if __name__ == '__main__':", "copy.deepcopy(pcd) cropped_points = np.array(cropped.points) cropped_points = cropped_points[cropped_points[:, -1] > z_thresh]", "cloud...') pcd_lidar_down, features_lidar = compute_features(pcd_lidar, voxel_size=voxel_size) print('\\nComputing FPFH features for", "def global_registration(source_down, target_down, source_fpfh, target_fpfh, voxel_size, distance_threshold=1.0, num_iters=4000000, num_val_iters=500): print('::", "colors=[[1, 0.706, 0], [0, 0.651, 0.929]]) # return # Registration", "result = registration(pcd_lidar_down, pcd_dso_scaled_down, features_lidar, features_dso_scaled, voxel_size, method='global') print('\\nDisplaying result...')", "point cloud using Voxel grids if downsample: print(':: Input size:',", "pcd_final.points = o3d.utility.Vector3dVector(cropped_points) return pcd_final def remove_y_plane(pcd, y_thresh=5): cropped =", "utils.paint_uniform_color(pcd0, color=[1, 0.706, 0]) utils.paint_uniform_color(pcd1, color=[0, 0.651, 0.929]) scores, indices", "distance_threshold) result = o3d.registration.registration_ransac_based_on_feature_matching( source_down, target_down, source_fpfh, target_fpfh, distance_threshold, o3d.registration.TransformationEstimationPointToPoint(False),", "return # Registration pcd_dso_scaled_down, features_dso_scaled = compute_features( pcd_dso_scaled, voxel_size=voxel_size) result", "sample_size, 'top_count <= sample_size' scales = [] for i in", "pcd_dso = remove_y_plane(pcd_dso, y_thresh=0.2) # pcd_dso = utils.scale_point_cloud(pcd_dso, dso_scale).rotate([0.5, 0.5,", "= [1, 0, 0] pcd1.colors[j] = [1, 0, 0] utils.display([pcd0,", "thresh=None) print('\\nEstimating scale using matches...') scale = estimate_scale(pcd_lidar_down, pcd_dso_down, pcd_lidar_idx,", "dso_scale = 0.03 pcd_lidar = o3d.io.read_point_cloud('../maps/scans/scan_050.pcd') pcd_lidar = remove_ground_plane(pcd_lidar) pcd_dso", "%.3f' % features_radius) features = o3d.registration.compute_fpfh_feature(pcd_down, o3d.geometry.KDTreeSearchParamHybrid(radius=features_radius, max_nn=features_nn)) return pcd_down,", "pcd_down = copy.deepcopy(pcd) # Estimate normals print(':: Estimate normal with", "= 0.2 dso_scale = 0.03 pcd_lidar = o3d.io.read_point_cloud('../maps/scans/scan_050.pcd') pcd_lidar =", "import stats import utils_o3d as utils def remove_ground_plane(pcd, z_thresh=-2.7): cropped", "= copy.deepcopy(pcd) # Estimate normals print(':: Estimate normal with search" ]
[ "required, i.e. if the connection is not already in a", "config: :return: instance of a Bolt subclass :raise BoltConnectionError: if", "super().wait_closed() except AttributeError: # pragma: no cover # This is", "raise ValueError(\"No protocol handlers available (requested Bolt %r)\", protocol_version) offered_versions", "self._routing_table.is_fresh(readonly=readonly): if readonly: # if reader is fresh but writers", "self._routing_table) try: pool = self._pools.pop(address) except KeyError: pass # assume", "self.default_router_addresses) # self._writers = Neo4jPool(opener) # self._readers = Neo4jPool(opener) #", "is supported. If no protocol version is provided, all available", "= True async def reset(self, force=False): \"\"\" Reset the connection", "2.0 (the \"License\"); # you may not use this file", "use. \"\"\" return len(self._in_use_list) @property def size(self): \"\"\" The total", "force_reset=False, timeout=None): raise NotImplementedError def release(self, *connections, force_reset=False): raise NotImplementedError", "state :raise ValueError: if the connection is not currently in", "import Addressable, Breakable from neo4j.errors import ( BoltError, BoltConnectionError, BoltSecurityError,", "addr'{}' [{}{}{}]>\".format( self.__class__.__name__, self.address, \"|\" * len(self._in_use_list), \".\" * len(self._free_list),", "\" \"available\".format(\"read\" if readonly else \"write\")) return choice(pools_by_usage[min(pools_by_usage)]) async def", "%r\", address) # We use `discard` instead of `remove` here", "kwargs=None, readonly=False, bookmarks=None, timeout=None, metadata=None): \"\"\" Run a transaction function", "3.6 # changes in a subsequent patch, and can be", "Breakable.set_broken(self) raise BoltConnectionBroken(\"Network write failed\", self.remote_address) from err async def", "be removed when # Python 3.6 support is no longer", "def _activate_new_pools_in(self, routing_table): \"\"\" Add pools for addresses that exist", "or until the acquire call is cancelled. :param force_reset: if", "self._pools: self._pools[address] = BoltPool(self._loop, self._opener, self._config, address) async def _deactivate_pools_not_in(self,", "self._deactivate_pools_not_in(rt) async def _select_pool(self, readonly=False): \"\"\" Selects the pool with", "BoltSecurityError, BoltConnectionBroken, BoltHandshakeError, Neo4jAvailabilityError, ) from neo4j.api import Version from", "* (self.max_size - self.size), ) def __contains__(self, cx): return cx", "Return a dictionary of available Bolt protocol handlers, keyed by", "reader, writer): obj = super().__new__(cls) obj.__t_opened = perf_counter() obj.__reader =", "a secure connection\", address) from err except OSError as err:", "{} \" \"bytes)\".format(len(err.partial), err.expected)) log.debug(\"[#%04X] S: <CLOSE>\", self.local_address.port_number) Breakable.set_broken(self) raise", "updated, otherwise False \"\"\" log.debug(\"Attempting to update routing table from", "async def readexactly(self, n): try: return await super().readexactly(n) except IncompleteReadError", "readonly: # If we're not acquiring a connection as #", "self._routing_table.ttl = 0 raise failure cx.set_failure_handler(NotALeader, handler) cx.set_failure_handler(ForbiddenOnReadOnlyDatabase, handler) return", "call is cancelled. :param force_reset: if true, the connection will", "is not currently in use, or if it does not", "gracefully, allowing work in progress to continue until connections are", "Neo4jPool(opener, router_addresses or self.default_router_addresses) # self._writers = Neo4jPool(opener) # self._readers", "({})\" fields = (neo4j_version,) + tuple(version_info) + (platform,) return template.format(*fields)", "of simultaneous connections that may be owned by this pool,", "if protocol version is not passed in a tuple \"\"\"", "a Bolt handshake, optionally requesting a specific protocol version. :param", "transaction function and return the return value from that function.", ":param protocol_version: tuple identifying a specific protocol version (e.g. (3,", "offered_versions).ljust(16, b\"\\x00\") log.debug(\"[#%04X] C: <HANDSHAKE> %r\", local_address.port_number, request_data) writer.write(request_data) await", "= get_event_loop() else: self._loop = loop self._opener = opener self._address", "self.address, \"|\" * len(self._in_use_list), \".\" * len(self._free_list), \" \" *", "without a successful negotiation \"\"\" local_address = Address(writer.transport.get_extra_info(\"sockname\")) remote_address =", "self._activate_new_pools_in(self._routing_table) def _activate_new_pools_in(self, routing_table): \"\"\" Add pools for addresses that", "None #: As a class attribute, this denotes the version", "the maximum age permitted by this pool, consequently closing it", "self._pools.items() if address in addresses] pools_by_usage = {} for pool", "rt: return rt rt = await self._get_routing_table_from(*existing_routers) if rt: return", "bookmarks=None, timeout=None, metadata=None): \"\"\" Begin an explicit transaction. :param readonly:", "if a transaction cannot be carried out at this time", "acceptable to re-acquire connections after pool closure, which will have", "config.max_size self._initial_routers = addresses self._routing_table = RoutingTable(addresses) self._activate_new_pools_in(self._routing_table) def _activate_new_pools_in(self,", "cx = await pool.acquire(force_reset=force_reset) except BoltError: await self._deactivate(pool.address) else: if", "likely safe unless the implementation of 3.6 # changes in", "grown, so new slots have become # available. Notify any", "Flag to indicate whether this connection has been closed locally.\"\"\"", "down all open connections, including those in use. Depending on", "def acquire(self, *, force_reset=False, timeout=None): raise NotImplementedError def release(self, *connections,", "self._max_size = value if value > old_value: # The maximum", "connection acquisitions onto the waiting list, and released connections will", "protocol, **connection_args) writer = BoltStreamWriter(transport, protocol, reader, loop) except SSLError", ":raise BoltTransactionError: if a transaction cannot be carried out at", "from neo4j.meta import version as neo4j_version from neo4j.routing import RoutingTable", "Run an auto-commit transaction. :param cypher: :param parameters: :param discard:", "rt if not has_tried_initial_routers and self._initial_routers not in existing_routers: rt", "connections immediately. This does not permanently disable the connection pool,", "num_writers = len(new_routing_table.writers) # No writers are available. This likely", "safe. \"\"\" for pool in self._pools.values(): try: await pool.release(connection, force_reset=force_reset)", "= await self._get_routing_table() self._activate_new_pools_in(rt) self._routing_table.update(rt) await self._deactivate_pools_not_in(rt) async def _select_pool(self,", "attribute, this denotes the version of Bolt handled #: by", "to establish a secure connection\", address) from err except OSError", "Neo4j.open(\":17601 :17602 :17603\", auth=(\"neo4j\", \"password\")) # await neo4j.update_routing_table() # print(neo4j.routing_table)", "connection is released, or until the acquire call is cancelled.", "incompatible types :raise ValueError: if any of the arguments provided", "pool.acquire() try: new_routing_table = await cx.get_routing_table(self._routing_context) except BoltError: await self._deactivate(router)", "again if there is still capacity. self._free_list.append(cx) self._waiting_list.notify() else: #", "open(cls, address, *, auth=None, loop=None, **config): \"\"\" Open a socket", "# hack is likely safe unless the implementation of 3.6", "When no writers available, then we flag we are reading", "agreed_version = Version.from_bytes(response_data) except ValueError as err: writer.close() raise BoltHandshakeError(\"Unexpected", "uses secure communication, false #: otherwise. secure = None #:", "except IncompleteReadError as err: message = (\"Network read incomplete (received", "writer self._missing_writer = (num_writers == 0) # No routers if", "are reading in absence of writer self._missing_writer = not self._routing_table.is_fresh(readonly=False)", "0 await pool.prune() async def close(self, force=False): \"\"\" Close all", "for which this pool operates :param max_size: the maximum permitted", "await obj._ensure_routing_table_is_fresh() return obj def __init__(self, loop, opener, config, addresses,", "routing information. \"\"\" # copied because it can be modified", "connection :raise BoltConnectionError: if a connection could not be established", "= connections.popleft() except IndexError: break else: closers.append(cx.close()) if closers: await", "async def _ensure_routing_table_is_fresh(self, readonly=False): \"\"\" Update the routing table if", "async def release(self, cx, *, force_reset=False): \"\"\" Release a Bolt", "License for the specific language governing permissions and # limitations", "host and port, such as (\"127.0.0.1\", 7687) :param auth: :param", "implicit affect of reopening the pool. To close gracefully, allowing", "# We use `discard` instead of `remove` here since the", "\"\"\" closers = deque() while True: try: cx = connections.popleft()", "\"\"\" for address in self._pools: if address not in routing_table:", "StreamReaderProtocol, StreamWriter, get_event_loop, wait, ) from collections import deque from", "auth=auth, loop=loop, **config) return f @classmethod async def open(cls, address,", "age permitted by this pool, consequently closing it on expiry.", "await pool.release(connection, force_reset=force_reset) except ValueError: pass else: # Unhook any", "a transaction function and return the return value from that", "list\") await self._waiting_list.join() else: cx = await self._sanitize(cx, force_reset=force_reset) self._in_use_list.append(cx)", "import Address from neo4j.aio._collections import WaitingList from neo4j.aio._mixins import Addressable,", "routing table. \"\"\" @classmethod async def open(cls, *addresses, auth=None, routing_context=None,", "the connection is not already in a clean state. If", "connection. await cx.close() else: # If the pool is full,", "loop=None): return await Bolt.open(address, auth=auth, loop=loop, **config) return f @classmethod", "\"\"\" Wrapper for asyncio.streams.StreamReader \"\"\" def set_transport(self, transport): Addressable.set_transport(self, transport)", "removed when # Python 3.6 support is no longer required.", "class for all relevant and supported protocol versions :raise TypeError:", "passed in a tuple \"\"\" # Carry out subclass imports", "This is useful when multiple servers share the same configuration", "readers if num_readers == 0: continue log.debug(\"Successfully updated routing table", "= None # Flag to indicate that the connection is", "be overridden and a RESET will be sent regardless. \"\"\"", "self._routing_table.routers.discard(address) self._routing_table.readers.discard(address) self._routing_table.writers.discard(address) log.debug(\"[#0000] C: <ROUTING> table=%r\", self._routing_table) try: pool", "= await Neo4j.open(\":17601 :17602 :17603\", auth=(\"neo4j\", \"password\")) # await neo4j.update_routing_table()", "bookmarks: :param timeout: :param metadata: :raise BoltTransactionError: if a transaction", "this check will be overridden and a RESET will be", "pool. \"\"\" await self.prune() await self.__close(self._in_use_list) async def __close(self, connections):", "not belong to this pool \"\"\" log.debug(\"Releasing connection %r\", cx)", "Bolt.open(addr, auth=auth, **pool_config) obj = cls(loop, opener, config, addresses, routing_context)", "\"\"\" async def f(address, *, loop=None): return await Bolt.open(address, auth=auth,", "false :param force_reset: \"\"\" while True: pool = await self._select_pool(readonly=readonly)", "force: await pool.close() else: pool.max_size = 0 await pool.prune() class", "self.local_address.port_number) self.__writer.write_eof() self.__writer.close() try: await self.__writer.wait_closed() except BoltConnectionBroken: pass self.__closed", "def __len__(self): return self.size @property def address(self): \"\"\" The remote", "Addressable.set_transport(self, self.transport) async def drain(self): try: await super().drain() except OSError", "is successfully updated, otherwise False \"\"\" log.debug(\"Attempting to update routing", "def protocol_handlers(cls, protocol_version=None): \"\"\" Return a dictionary of available Bolt", "because it can be modified existing_routers = list(self._routing_table.routers) has_tried_initial_routers =", "# return self._routing_table # finally: # self._routers.release(cx) # async def", "used by current implementation async def readexactly(self, n): try: return", "assert hasattr(obj, \"__ainit__\") await obj.__ainit__(auth) return obj except BoltError: writer.write_eof()", "protocol version\", remote_address, request_data, response_data) else: return subclass def __new__(cls,", "\"\"\" async def run_tx(self, f, args=None, kwargs=None, readonly=False, bookmarks=None, timeout=None,", "The code polls waiting for the stream # reader inside", "strerror(err.errno)) raise BoltSecurityError(\"Failed to establish a secure connection\", address) from", "the default user agent string for a connection. \"\"\" template", "address.port, \"family\": address.family, # TODO: other args } ssl_context =", "return \"<{} addr'{}' [{}{}{}]>\".format( self.__class__.__name__, self.address, \"|\" * len(self._in_use_list), \".\"", "routing_context self._max_size_per_host = config.max_size self._initial_routers = addresses self._routing_table = RoutingTable(addresses)", "# invalidate the routing table. from neo4j.errors import ( NotALeader,", "(e.g. (3, 5)) or None :return: dictionary of version tuple", "not be established \"\"\" assert isinstance(address, Address) assert loop is", "only be sent if required, i.e. if the connection is", "None: return handlers if not isinstance(protocol_version, tuple): raise TypeError(\"Protocol version", "to this pool \"\"\" log.debug(\"Releasing connection %r\", cx) if cx", "self._config = config self._pools = {} self._missing_writer = False self._refresh_lock", "existing_routers = list(self._routing_table.routers) has_tried_initial_routers = False if self._missing_writer: has_tried_initial_routers =", "disable the connection pool, it merely shuts down all open", "be owned by this pool, both in-use and free :param", "is full, simply close the connection. await cx.close() elif cx", "in this pool \"\"\" @classmethod async def open(cls, address, *,", "force_reset=force_reset) self._in_use_list.append(cx) return cx async def release(self, cx, *, force_reset=False):", "connection to the address provided. :param address: :param loop: :param", "before being released back into the pool; if false, this", "<REJECT> %s (%d %s)\", 0, address, err.errno, strerror(err.errno)) raise BoltConnectionError(\"Failed", "\"\"\" Create and return an opener function for a given", "we are reading in absence of writer self._missing_writer = not", "on whether that version is supported. If no protocol version", "currently in use, or if it does not belong to", "loop, config): \"\"\" Attempt to establish a TCP connection to", "\"\"\" def set_transport(self, transport): Addressable.set_transport(self, transport) StreamReader.set_transport(self, transport) async def", "protocol_version is None: return handlers if not isinstance(protocol_version, tuple): raise", "not handlers: raise ValueError(\"No protocol handlers available (requested Bolt %r)\",", "protocol version. :param reader: :param writer: :param protocol_version: :return: :raise", "%r\", local_address.port_number, request_data) writer.write(request_data) await writer.drain() response_data = await reader.readexactly(4)", "Handle to the StreamReader object. __reader = None # Handle", "self._waiting_list.join() else: cx = await self._sanitize(cx, force_reset=force_reset) self._in_use_list.append(cx) return cx", ":17602 :17603\", auth=(\"neo4j\", \"password\")) # await neo4j.update_routing_table() # print(neo4j.routing_table) #", "by current implementation async def read(self, n=-1): # pragma: no", "# available, then try again log.debug(\"Joining waiting list\") await self._waiting_list.join()", "can satisfy a set of parameters. :param readonly: true if", "self._max_size_per_host = config.max_size self._initial_routers = addresses self._routing_table = RoutingTable(addresses) self._activate_new_pools_in(self._routing_table)", "isinstance(config, Config) connection_args = { \"host\": address.host, \"port\": address.port, \"family\":", "that aren't represented in the given routing table. \"\"\" for", "this pool \"\"\" @classmethod async def open(cls, address, *, auth=None,", "not self._routing_table.is_fresh(readonly=False) else: rt = await self._get_routing_table() self._activate_new_pools_in(rt) self._routing_table.update(rt) await", "for the stream # reader inside the protocol to go", "connections, including those in use. Depending on the applications, it", "total number of connections (both in-use and free) currently owned", "error occurs on the underlying socket connection :raise BoltHandshakeError: if", "import platform, version_info from time import perf_counter from neo4j.addressing import", "self._routing_table.update(rt) await self._deactivate_pools_not_in(rt) async def _select_pool(self, readonly=False): \"\"\" Selects the", "config.protocol_version) # Instantiation obj = subclass(reader, writer) obj.secure = bool(config.secure)", "the connection is healthy and the pool is not already", "socket connection and perform protocol version negotiation, in order to", "\"{}\".format(\", \".join(map(repr, routers)))) for router in routers: pool = self._pools[router]", "routers: pool = self._pools[router] cx = await pool.acquire() try: new_routing_table", "available. This likely indicates a temporary state, # such as", "for address, pool in self._pools.items() if address in addresses] pools_by_usage", "protocol version (e.g. (3, 5)) or None :return: dictionary of", "that the connection is closed __closed = False @classmethod def", "address for which this pool operates. \"\"\" return self._address @property", "implementation of 3.6, occurs on 'connection_lost'. This # hack is", "obj.__ainit__(auth) return obj except BoltError: writer.write_eof() writer.close() raise @classmethod async", "S: <CLOSE> (%s)\", self.local_address.port_number, err) Breakable.set_broken(self) raise BoltConnectionBroken(\"Network write failed\",", "connections that may be owned by this pool, both in-use", "cx.age > self.max_age if expired: await cx.close() return None await", "information received is unusable \"\"\" class BoltStreamReader(Addressable, Breakable, StreamReader): \"\"\"", "return obj except BoltError: writer.write_eof() writer.close() raise @classmethod async def", "OF ANY KIND, either express or implied. # See the", "= False @classmethod def default_user_agent(cls): \"\"\" Return the default user", "async def _deactivate_pools_not_in(self, routing_table): \"\"\" Deactivate any pools that aren't", "otherwise False \"\"\" log.debug(\"Attempting to update routing table from \"", "already at capacity. :param cx: the connection to release :param", "See the License for the specific language governing permissions and", "returns an open and ready Bolt connection :param address: the", "all available versions will be returned. :param protocol_version: tuple identifying", "the connection is not currently in use, or if it", "def __repr__(self): return \"<{} addr'{}' [{}{}{}]>\".format( self.__class__.__name__, self.address, \"|\" *", "return await Bolt.open(address, auth=auth, loop=loop, **config) return f @classmethod async", "the pool is full, simply close the connection. await cx.close()", "**kwargs): super().__init__(*args, **kwargs) Addressable.set_transport(self, self.transport) async def drain(self): try: await", "failed\", self.remote_address) from err class BoltStreamWriter(Addressable, Breakable, StreamWriter): \"\"\" Wrapper", "to in writing, software # distributed under the License is", "cx in self._in_use_list or cx in self._free_list def __len__(self): return", "the failure. \"\"\" log.debug(\"[#0000] C: <ROUTING> Invalidating routing table\") self._routing_table.ttl", "\"\"\" if self.closed: return if not self.broken: log.debug(\"[#%04X] S: <HANGUP>\",", "free) currently owned by this connection pool. \"\"\" return len(self._in_use_list)", "BoltConnectionError, BoltSecurityError, BoltConnectionBroken, BoltHandshakeError, Neo4jAvailabilityError, ) from neo4j.api import Version", "from err try: subclass = handlers[agreed_version] except KeyError: log.debug(\"Unsupported Bolt", "loop: :param config: :return: a 3-tuple of reader, writer and", "__closed = False @classmethod def default_user_agent(cls): \"\"\" Return the default", "to establish a TCP connection to the address provided. :param", "await self._get_routing_table_from(*existing_routers) if rt: return rt if not has_tried_initial_routers and", "= await self._sanitize(cx, force_reset=force_reset) if cx: # Carry on only", "_activate_new_pools_in(self, routing_table): \"\"\" Add pools for addresses that exist in", "self._waiting_list = WaitingList(loop=self._loop) def __repr__(self): return \"<{} addr'{}' [{}{}{}]>\".format( self.__class__.__name__,", "not already in a clean state :return: a Bolt connection", "cx async def acquire(self, *, force_reset=False): \"\"\" Acquire a connection", "following sequence instead: pool.max_size = 0 pool.prune() This will force", "or agreed to in writing, software # distributed under the", "connection, putting it back into the pool if the connection", "not pools_by_usage: raise Neo4jAvailabilityError(\"No {} service currently \" \"available\".format(\"read\" if", "connections will be closed immediately; if not, they will remain", "issues. from neo4j.aio.bolt3 import Bolt3 handlers = {bolt.protocol_version: bolt for", "import ( NotALeader, ForbiddenOnReadOnlyDatabase, ) def handler(failure): \"\"\" Invalidate the", "transport) StreamReader.set_transport(self, transport) async def readuntil(self, separator=b'\\n'): # pragma: no", "StreamReaderProtocol(reader, loop=loop) transport, _ = await loop.create_connection(lambda: protocol, **connection_args) writer", "return perf_counter() - self.__t_opened @property def broken(self): \"\"\" Flag to", "#: by that subclass. As an instance attribute, this represents", "address, loop, config): \"\"\" Attempt to establish a TCP connection", "subclass. As an instance attribute, this represents the #: version", "writer.close() raise BoltHandshakeError(\"Unexpected handshake response %r\" % response_data, remote_address, request_data,", "protocol_version=%r>\" % (self.remote_address, self.protocol_version) async def __ainit__(self, auth): \"\"\" Asynchronous", "current implementation async def readexactly(self, n): try: return await super().readexactly(n)", "owned by this pool, both in-use and free :param max_age:", "address): \"\"\" Deactivate an address from the connection pool, if", "returned. :param protocol_version: tuple identifying a specific protocol version (e.g.", "could not be established :raise BoltConnectionLost: if an I/O error", "pool_config = PoolConfig.consume(config) def opener(addr): return Bolt.open(addr, auth=auth, loop=loop, **pool_config)", "def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) Addressable.set_transport(self, self.transport) async def", "this connection pool. \"\"\" return len(self._in_use_list) + len(self._free_list) async def", "# def __init__(self, opener, router_addresses, loop=None): # self._routers = Neo4jPool(opener,", "single address. :param opener: a function to which an address", "Acquire a connection to a server that can satisfy a", "protocol_version=protocol_version) # router_addresses = Address.parse_list(\" \".join(addresses), default_port=7687) # return cls(opener,", "be perfectly acceptable to re-acquire connections after pool closure, which", "discarded. Otherwise, the age of the connection is checked against", "supported. If no protocol version is provided, all available versions", "that exist in the given routing table but which don't", "@property def max_age(self): \"\"\" The maximum permitted age, in seconds,", "is not None assert isinstance(config, Config) connection_args = { \"host\":", ") def __contains__(self, cx): return cx in self._in_use_list or cx", "= Neo4jPool(opener) # self._readers = Neo4jPool(opener) # self._routing_table = None", "not belong to this pool\") async def prune(self): \"\"\" Close", ":param timeout: :param metadata: :return: \"\"\" async def run_tx(self, f,", "await cx.reset(force=force_reset) return cx async def acquire(self, *, force_reset=False): \"\"\"", "is thread safe. \"\"\" for pool in self._pools.values(): try: await", "compliance with the License. # You may obtain a copy", "(%s)\", self.local_address.port_number, err) Breakable.set_broken(self) raise BoltConnectionBroken(\"Network write failed\", self.remote_address) from", "connections in the given list. \"\"\" closers = deque() while", "loop=None, **config): \"\"\" Create a new connection pool, with an", "connection\", address) from err except OSError as err: log.debug(\"[#%04X] S:", "(self.remote_address, self.protocol_version) async def __ainit__(self, auth): \"\"\" Asynchronous initializer for", "await self._deactivate(pool.address) else: if not readonly: # If we're not", "is None: self._loop = get_event_loop() else: self._loop = loop self._opener", "Carry out a Bolt handshake, optionally requesting a specific protocol", "if expired: await cx.close() return None await cx.reset(force=force_reset) return cx", "\"\"\" The age of this connection in seconds. \"\"\" return", "provide valid routing information. \"\"\" # copied because it can", "an update is still required. \"\"\" if self._routing_table.is_fresh(readonly=readonly): return async", "Acquire a connection from the pool. In the simplest case,", "slots have become # available. Notify any waiting acquirers of", "succeeded. if self.size < self.max_size: # Check again if there", "occur if the connection is not already in a clean", "ssl_context connection_args[\"server_hostname\"] = address.host log.debug(\"[#0000] C: <DIAL> %s\", address) try:", "return subclass def __new__(cls, reader, writer): obj = super().__new__(cls) obj.__t_opened", "so we should not signal an error. # When no", "not use this file except in compliance with the License.", "@classmethod def protocol_handlers(cls, protocol_version=None): \"\"\" Return a dictionary of available", "dependency issues. from neo4j.aio.bolt3 import Bolt3 handlers = {bolt.protocol_version: bolt", "for a given set of configuration parameters. This is useful", "# This is a dirty hack for Python 3.6, which", "in use. protocol_version = () # Record of the time", "BoltPool: \"\"\" A pool of connections to a single address.", "if a connection could not be established :raise BoltConnectionLost: if", "except SSLError as err: log.debug(\"[#%04X] S: <REJECT> %s (%d %s)\",", "seeds: await pool.release(seed) return pool def __init__(self, loop, opener, config,", "you may not use this file except in compliance with", "from the pool. In the simplest case, this will return", "version. :param address: tuples of host and port, such as", "be forcibly reset before being returned; if false, this will", "the dictionary will contain either zero or one items, depending", "cx.set_failure_handler(NotALeader, handler) cx.set_failure_handler(ForbiddenOnReadOnlyDatabase, handler) return cx async def release(self, connection,", "force_reset=force_reset) except ValueError: pass else: # Unhook any custom error", "# def routing_table(self): # return self._routing_table # # async def", "transaction. :param cypher: :param parameters: :param discard: :param readonly: :param", "super().__new__(cls) obj.__t_opened = perf_counter() obj.__reader = reader obj.__writer = writer", "self.size), ) def __contains__(self, cx): return cx in self._in_use_list or", "not already at capacity. :param cx: the connection to release", "loop=loop) transport, _ = await loop.create_connection(lambda: protocol, **connection_args) writer =", "an address can be passed that returns an open and", "forcibly so) and the connection object will be returned, indicating", "closed instead of being returned to the pool. \"\"\" await", ":raise BoltHandshakeError: if handshake completes without a successful negotiation \"\"\"", "the routing table. from neo4j.errors import ( NotALeader, ForbiddenOnReadOnlyDatabase, )", "def release(self, *connections, force_reset=False): raise NotImplementedError def close(self, *, force=False):", "to the pool. \"\"\" await self.prune() await self.__close(self._in_use_list) async def", "self._routing_context = routing_context self._max_size_per_host = config.max_size self._initial_routers = addresses self._routing_table", "value): old_value = self._max_size self._max_size = value if value >", "\" \"bytes)\".format(len(err.partial), err.expected)) log.debug(\"[#%04X] S: <CLOSE>\", self.local_address.port_number) Breakable.set_broken(self) raise BoltConnectionBroken(message,", "Breakable.set_broken(self) raise BoltConnectionBroken(\"Network read failed\", self.remote_address) from err class BoltStreamWriter(Addressable,", "raise TypeError(\"Protocol version must be specified as a tuple\") return", "def opener(addr): return Bolt.open(addr, auth=auth, **pool_config) obj = cls(loop, opener,", "wait_closed(self): try: await super().wait_closed() except AttributeError: # pragma: no cover", "num_routers = len(new_routing_table.routers) num_readers = len(new_routing_table.readers) num_writers = len(new_routing_table.writers) #", "the connection to release :param force_reset: if true, the connection", "Record of the time at which this connection was opened.", "a connection\", address) from err else: local_address = Address(transport.get_extra_info(\"sockname\")) remote_address", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "shuts down all open connections, including those in use. Depending", "return template.format(*fields) @classmethod def protocol_handlers(cls, protocol_version=None): \"\"\" Return a dictionary", "self._get_routing_table() self._activate_new_pools_in(rt) self._routing_table.update(rt) await self._deactivate_pools_not_in(rt) async def _select_pool(self, readonly=False): \"\"\"", "try: pool = self._pools.pop(address) except KeyError: pass # assume the", "raise failure cx.set_failure_handler(NotALeader, handler) cx.set_failure_handler(ForbiddenOnReadOnlyDatabase, handler) return cx async def", "0, address, err.errno, strerror(err.errno)) raise BoltSecurityError(\"Failed to establish a secure", "If forced, in-use connections will be closed immediately; if not,", "connections): \"\"\" Close all connections in the given list. \"\"\"", "a connection could not be established \"\"\" assert isinstance(address, Address)", "def broken(self): \"\"\" Flag to indicate whether this connection has", "= writer Addressable.set_transport(obj, writer.transport) return obj def __repr__(self): return \"<Bolt", "one or more initial connections. \"\"\" pool_config = PoolConfig.consume(config) def", "does not belong to this pool \"\"\" log.debug(\"Releasing connection %r\",", "the pool. In the simplest case, this will return an", "which this connection was opened. __t_opened = None # Handle", "of a Bolt subclass :raise BoltConnectionError: if a connection could", "log.debug(\"[#0000] C: <ROUTING> Deactivating address %r\", address) # We use", "address.family, # TODO: other args } ssl_context = config.get_ssl_context() if", "asyncio import ( IncompleteReadError, Lock, StreamReader, StreamReaderProtocol, StreamWriter, get_event_loop, wait,", "object. __reader = None # Handle to the StreamWriter object,", "3.6, which didn't include # 'wait_closed'. The code polls waiting", "async def _connect(cls, address, loop, config): \"\"\" Attempt to establish", "is provided, all available versions will be returned. :param protocol_version:", "initial routing table and construct await obj._ensure_routing_table_is_fresh() return obj def", "Neo4jPool: \"\"\" Connection pool with routing table. \"\"\" @classmethod async", "Python/{}.{}.{}-{}-{} ({})\" fields = (neo4j_version,) + tuple(version_info) + (platform,) return", "err else: local_address = Address(transport.get_extra_info(\"sockname\")) remote_address = Address(transport.get_extra_info(\"peername\")) log.debug(\"[#%04X] S:", ":raise ServiceUnavailable: if no writers are available :raise ProtocolError: if", "connection is not already in a clean state :raise ValueError:", "pool is full, simply close the connection. await cx.close() elif", "= { \"host\": address.host, \"port\": address.port, \"family\": address.family, # TODO:", "\"\"\" log.debug(\"[#0000] C: <ROUTING> Deactivating address %r\", address) # We", "rt # None of the routers have been successful, so", "again log.debug(\"Joining waiting list\") await self._waiting_list.join() else: cx = await", "return rt # None of the routers have been successful,", "parameters. :param readonly: true if a readonly connection is required,", "Lock, StreamReader, StreamReaderProtocol, StreamWriter, get_event_loop, wait, ) from collections import", "if version == protocol_version} @classmethod def opener(cls, auth=None, **config): \"\"\"", "is acquired and the second freshness check that follows determines", "to construct and return a Bolt client instance for a", "opener self._address = Address(address) self._max_size = config.max_size self._max_age = config.max_age", "= routing_context self._max_size_per_host = config.max_size self._initial_routers = addresses self._routing_table =", "__init__(self, opener, router_addresses, loop=None): # self._routers = Neo4jPool(opener, router_addresses or", "self._writers = Neo4jPool(opener) # self._readers = Neo4jPool(opener) # self._routing_table =", "any custom error handling and exit. from neo4j.errors import (", "the routing table and also closing all idle connections to", "is None: loop = get_event_loop() config = PoolConfig.consume(config) # Connect", "address can be passed that returns an open and ready", "<ACCEPT> %s -> %s\", local_address.port_number, local_address, remote_address) return reader, writer", "self.__closed async def close(self): \"\"\" Close the connection. \"\"\" if", "__writer = None # Flag to indicate that the connection", "len(new_routing_table.routers) num_readers = len(new_routing_table.readers) num_writers = len(new_routing_table.writers) # No writers", "NotImplementedError def release(self, *connections, force_reset=False): raise NotImplementedError def close(self, *,", "a connection as # readonly, then intercept NotALeader and #", "return cx in self._in_use_list or cx in self._free_list def __len__(self):", "obj = cls(loop, opener, config, addresses, routing_context) # TODO: get", "None and cx.age > self.max_age if expired: await cx.close() return", "address) from err else: local_address = Address(transport.get_extra_info(\"sockname\")) remote_address = Address(transport.get_extra_info(\"peername\"))", ":param timeout: :param metadata: :raise BoltTransactionError: if a transaction cannot", "0) # No routers if num_routers == 0: continue #", "updated as protocol # versions are added and removed. Bolt3,", "False # not used by current implementation async def read(self,", "be updated as protocol # versions are added and removed.", "be returned, indicating success. \"\"\" if cx.broken or cx.closed: return", "and # ForbiddenOnReadOnlyDatabase errors to # invalidate the routing table.", "as err: writer.close() raise BoltHandshakeError(\"Unexpected handshake response %r\" % response_data,", "returned to the pool. \"\"\" await self.prune() await self.__close(self._in_use_list) async", "obj = super().__new__(cls) obj.__t_opened = perf_counter() obj.__reader = reader obj.__writer", "or cx.closed: return None expired = self.max_age is not None", "try: await super().wait_closed() except AttributeError: # pragma: no cover #", "obj.__t_opened = perf_counter() obj.__reader = reader obj.__writer = writer Addressable.set_transport(obj,", "or one items, depending on whether that version is supported.", "print(neo4j.routing_table) # # # if __name__ == \"__main__\": # run(main())", "message = (\"Network read incomplete (received {} of {} \"", "If forced, this check will be overridden and a RESET", ":param address: the remote address for which this pool operates", "be closed immediately; if not, they will remain open until", "the StreamReader object. __reader = None # Handle to the", "Create and return an opener function for a given set", "a connection to a server that can satisfy a set", "the implicit affect of reopening the pool. To close gracefully,", "address in self._pools: if address not in routing_table: await self._deactivate(address)", "# # async def update_routing_table(self): # cx = await self._routers.acquire()", "address provided. :param address: :param loop: :param config: :return: a", "be sent if required, i.e. if the connection is not", "async def open(cls, address, *, auth=None, loop=None, **config): \"\"\" Open", "self._free_list.popleft() except IndexError: if self.size < self.max_size: # Plan B:", "healthy and the pool is not already at capacity. :param", "remove from the routing table and also closing all idle", "else: addresses = self._routing_table.writers pools = [pool for address, pool", "the remote address for which this pool operates :param max_size:", "checked against the maximum age permitted by this pool, consequently", "self._readers = Neo4jPool(opener) # self._routing_table = None # # @property", "version is not passed in a tuple \"\"\" # Carry", "closing it on expiry. Should the connection be neither broken,", "affect of reopening the pool. To close gracefully, allowing work", "deque from logging import getLogger from os import strerror from", "os import strerror from random import choice from ssl import", "if not isinstance(protocol_version, tuple): raise TypeError(\"Protocol version must be specified", "is likely safe unless the implementation of 3.6 # changes", "reader.readexactly(4) log.debug(\"[#%04X] S: <HANDSHAKE> %r\", local_address.port_number, response_data) try: agreed_version =", "new connection :raise BoltConnectionError: if a connection could not be", "likely indicates a temporary state, # such as leader switching,", "an explicit protocol version is provided, the dictionary will contain", "<filename>neo4j/aio/__init__.py #!/usr/bin/env python # -*- encoding: utf-8 -*- # Copyright", "whether that version is supported. If no protocol version is", "= loop self._opener = opener self._config = config self._pools =", "def _get_routing_table_from(self, *routers): \"\"\" Try to update routing tables with", "await self.__close(self._free_list) async def close(self): \"\"\" Close all connections immediately.", "closers.append(cx.close()) if closers: await wait(closers, loop=self._loop) class Neo4jPool: \"\"\" Connection", "subclass :raise BoltConnectionError: if a connection could not be established", "allowing work in progress to continue until connections are released,", "fail log.error(\"Unable to retrieve routing information\") raise Neo4jAvailabilityError(\"Unable to retrieve", "if not has_tried_initial_routers and self._initial_routers not in existing_routers: rt =", "async def f(address, *, loop=None): return await Bolt.open(address, auth=auth, loop=loop,", "it may be perfectly acceptable to re-acquire connections after pool", "num_readers == 0: continue log.debug(\"Successfully updated routing table from \"", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "f @classmethod async def open(cls, address, *, auth=None, loop=None, **config):", "This # hack is likely safe unless the implementation of", "*addresses, auth=None, security=False, protocol_version=None, loop=None): # opener = Bolt.opener(auth=auth, security=security,", "WaitingList from neo4j.aio._mixins import Addressable, Breakable from neo4j.errors import (", "connection has been broken by the network or remote peer.", "readonly, then intercept NotALeader and # ForbiddenOnReadOnlyDatabase errors to #", "try: await pool.release(connection, force_reset=force_reset) except ValueError: pass else: # Unhook", "connection from the pool. In the simplest case, this will", "\"\"\" return perf_counter() - self.__t_opened @property def broken(self): \"\"\" Flag", "is full and no free connections are available, this will", "permissions and # limitations under the License. from asyncio import", "security=security, protocol_version=protocol_version) # router_addresses = Address.parse_list(\" \".join(addresses), default_port=7687) # return", "Address.parse_list(\":7687 :17601 :17687\") # TODO # @classmethod # async def", "at capacity. :param cx: the connection to release :param force_reset:", "connections are available, this will block until a connection is", "If we're not acquiring a connection as # readonly, then", ":raise ValueError: if the connection is not currently in use,", "seed one or more initial connections. \"\"\" pool_config = PoolConfig.consume(config)", "and empty the pool. If forced, in-use connections will be", "not permanently disable the connection pool, it merely shuts down", "establish a TCP connection to the address provided. :param address:", "Check again if there is still capacity. self._free_list.append(cx) self._waiting_list.notify() else:", "await self._get_routing_table() self._activate_new_pools_in(rt) self._routing_table.update(rt) await self._deactivate_pools_not_in(rt) async def _select_pool(self, readonly=False):", "from the connection pool, if present, remove from the routing", "readonly: true if a readonly connection is required, otherwise false", "instead of being returned to the pool. \"\"\" await self.prune()", "try: agreed_version = Version.from_bytes(response_data) except ValueError as err: writer.close() raise", "the maximum permitted number of simultaneous connections that may be", "Neo4j: # The default router address list to use if", "an I/O error occurs on the underlying socket connection :raise", "<ROUTING> Deactivating address %r\", address) # We use `discard` instead", "@classmethod def default_user_agent(cls): \"\"\" Return the default user agent string", "= [pool for address, pool in self._pools.items() if address in", "object, which can be used on close. __writer = None", "table. from neo4j.errors import ( NotALeader, ForbiddenOnReadOnlyDatabase, ) def handler(failure):", "file except in compliance with the License. # You may", "new connection will be created. If the pool is full", "successfully updated, otherwise False \"\"\" log.debug(\"Attempting to update routing table", "return None expired = self.max_age is not None and cx.age", "if force: await pool.close() else: pool.max_size = 0 await pool.prune()", "operates. \"\"\" return self._address @property def max_size(self): \"\"\" The maximum", "an instance attribute, this represents the #: version of the", "loop = get_event_loop() config = PoolConfig.consume(config) # Connect reader, writer", "args } ssl_context = config.get_ssl_context() if ssl_context: connection_args[\"ssl\"] = ssl_context", "= self.max_age is not None and cx.age > self.max_age if", "check that follows determines whether an update is still required.", "use. Depending on the applications, it may be perfectly acceptable", "ssl import SSLError from sys import platform, version_info from time", ":return: dictionary of version tuple to handler class for all", "return value from that function. \"\"\" async def get_routing_table(self, context=None):", "for connections to be retained in this pool \"\"\" @classmethod", "= Address.parse_list(\":7687 :17601 :17687\") # TODO # @classmethod # async", "= dict(self._pools) self._pools.clear() for address, pool in pools.items(): if force:", "version of the protocol in use. protocol_version = () #", "circular # dependency issues. from neo4j.aio.bolt3 import Bolt3 handlers =", "in routing_table: await self._deactivate(address) async def _get_routing_table_from(self, *routers): \"\"\" Try", "information. \"\"\" # copied because it can be modified existing_routers", "async def acquire(self, *, force_reset=False): \"\"\" Acquire a connection from", "if rt: return rt # None of the routers have", "response_data = await reader.readexactly(4) log.debug(\"[#%04X] S: <HANDSHAKE> %r\", local_address.port_number, response_data)", "\"\"\" Try to update routing tables with the given routers.", "if stale. This method performs two freshness checks, before and", "a connection. \"\"\" template = \"neo4j-python/{} Python/{}.{}.{}-{}-{} ({})\" fields =", "\"\"\" Reset the connection to a clean state. By default,", "if not readonly: # If we're not acquiring a connection", "versions are added and removed. Bolt3, ]} if protocol_version is", "those in use. Depending on the applications, it may be", "pool. \"\"\" async def f(address, *, loop=None): return await Bolt.open(address,", "connection :raise BoltHandshakeError: if handshake completes without a successful negotiation", "0: continue # No readers if num_readers == 0: continue", "neo4j.errors import ( NotALeader, ForbiddenOnReadOnlyDatabase, ) def handler(failure): \"\"\" Invalidate", "= cls.protocol_handlers(protocol_version) if not handlers: raise ValueError(\"No protocol handlers available", "@property def closed(self): \"\"\" Flag to indicate whether this connection", "offered_versions = sorted(handlers.keys(), reverse=True)[:4] request_data = MAGIC + b\"\".join( v.to_bytes()", "from random import choice from ssl import SSLError from sys", "in pools.items(): if force: await pool.close() else: pool.max_size = 0", "an existing open connection, if one is free. If not,", "+ b\"\".join( v.to_bytes() for v in offered_versions).ljust(16, b\"\\x00\") log.debug(\"[#%04X] C:", "to re-acquire connections after pool closure, which will have the", "= {bolt.protocol_version: bolt for bolt in [ # This list", "size has grown, so new slots have become # available.", "the pool is not full, a new connection will be", "connections to a single address. :param opener: a function to", "given routing table but which don't already have pools. \"\"\"", "__init__(self, loop, opener, config, address): if loop is None: self._loop", "the connection is not already in a clean state :raise", "( BoltError, BoltConnectionError, BoltSecurityError, BoltConnectionBroken, BoltHandshakeError, Neo4jAvailabilityError, ) from neo4j.api", "This will force all future connection acquisitions onto the waiting", "__t_opened = None # Handle to the StreamReader object. __reader", "function to which an address can be passed that returns", "any of the arguments provided are passed as incompatible types", "def __contains__(self, cx): return cx in self._in_use_list or cx in", "= value if value > old_value: # The maximum size", "have become # available. Notify any waiting acquirers of this", "of connections (both in-use and free) currently owned by this", "the age of the connection is checked against the maximum", "As a class attribute, this denotes the version of Bolt", "opener(cls, auth=None, **config): \"\"\" Create and return an opener function", "handshake completes without a successful negotiation \"\"\" local_address = Address(writer.transport.get_extra_info(\"sockname\"))", "response_data, remote_address, request_data, response_data) from err try: subclass = handlers[agreed_version]", "invalidate the routing table. from neo4j.errors import ( NotALeader, ForbiddenOnReadOnlyDatabase,", "pool operates :param max_size: the maximum permitted number of simultaneous", "response %r\" % response_data, remote_address, request_data, response_data) from err try:", "KIND, either express or implied. # See the License for", "provided. :param address: :param loop: :param config: :return: a 3-tuple", "def __ainit__(self, auth): \"\"\" Asynchronous initializer for implementation by subclasses.", "This file is part of Neo4j. # # Licensed under", "A: select a free connection from the pool cx =", "log.debug(\"[#%04X] S: <CLOSE>\", self.local_address.port_number) Breakable.set_broken(self) raise BoltConnectionBroken(message, self.remote_address) from err", "not in routing_table: await self._deactivate(address) async def _get_routing_table_from(self, *routers): \"\"\"", "return handlers if not isinstance(protocol_version, tuple): raise TypeError(\"Protocol version must", "*, force_reset=False): \"\"\" Release a Bolt connection, putting it back", "a dictionary of available Bolt protocol handlers, keyed by version", "version, handler in handlers.items() if version == protocol_version} @classmethod def", "= Address(address) self._max_size = config.max_size self._max_age = config.max_age self._in_use_list =", "expired = self.max_age is not None and cx.age > self.max_age", "assert False # not used by current implementation async def", "to indicate whether this connection has been broken by the", "pool with the fewest in-use connections. \"\"\" await self._ensure_routing_table_is_fresh(readonly=readonly) if", "are not, then # we are reading in absence of", "get_event_loop() config = PoolConfig.consume(config) # Connect reader, writer = await", "Wrapper for asyncio.streams.StreamReader \"\"\" def set_transport(self, transport): Addressable.set_transport(self, transport) StreamReader.set_transport(self,", "closed, it can be discarded. Otherwise, the age of the", "will contain either zero or one items, depending on whether", "with routing table. \"\"\" @classmethod async def open(cls, *addresses, auth=None,", "\"\"\" return self._max_age @property def in_use(self): \"\"\" The number of", "state. By default, a RESET message will only be sent", "address has already been removed. self._routing_table.routers.discard(address) self._routing_table.readers.discard(address) self._routing_table.writers.discard(address) log.debug(\"[#0000] C:", "the return value from that function. \"\"\" async def get_routing_table(self,", "self._sanitize(cx, force_reset=force_reset) self._in_use_list.append(cx) return cx async def release(self, cx, *,", "code polls waiting for the stream # reader inside the", "(the \"License\"); # you may not use this file except", "try: cx = await pool.acquire(force_reset=force_reset) except BoltError: await self._deactivate(pool.address) else:", "readuntil(self, separator=b'\\n'): # pragma: no cover assert False # not", "class Neo4j: # The default router address list to use", "routing table is successfully updated, otherwise False \"\"\" log.debug(\"Attempting to", ":17687\") # TODO # @classmethod # async def open(cls, *addresses,", "def max_size(self, value): old_value = self._max_size self._max_size = value if", "S: <HANGUP>\", self.local_address.port_number) self.__writer.write_eof() self.__writer.close() try: await self.__writer.wait_closed() except BoltConnectionBroken:", "cx is None or cx.broken or cx.closed: try: # Plan", "a single address. :param opener: a function to which an", "err: writer.close() raise BoltHandshakeError(\"Unexpected handshake response %r\" % response_data, remote_address,", "def close(self): \"\"\" Close all connections immediately. This does not", "sent if required, i.e. if the connection is not already", "to retrieve routing information\") async def _ensure_routing_table_is_fresh(self, readonly=False): \"\"\" Update", "choice(pools_by_usage[min(pools_by_usage)]) async def acquire(self, *, readonly=False, force_reset=False): \"\"\" Acquire a", "the refresh lock is acquired and the second freshness check", "handshake completes without a successful negotiation :raise TypeError: if any", "address = Address(address) if loop is None: loop = get_event_loop()", "free connections are available, this will block until a connection", "the pool. cx = await self._sanitize(cx, force_reset=force_reset) if cx: #", "err except OSError as err: log.debug(\"[#%04X] S: <CLOSE> %d %s\",", "will not fail if the address has already been removed.", "forced, in-use connections will be closed immediately; if not, they", "permitted age, in seconds, for connections to be retained in", "waiting list\") await self._waiting_list.join() else: cx = await self._sanitize(cx, force_reset=force_reset)", "former # will not fail if the address has already", "OSError as err: log.debug(\"[#%04X] S: <REJECT> %s (%d %s)\", 0,", "tuple): raise TypeError(\"Protocol version must be specified as a tuple\")", "@property def age(self): \"\"\" The age of this connection in", "of host and port, such as (\"127.0.0.1\", 7687) :param auth:", "# No writers are available. This likely indicates a temporary", "getLogger(__name__) MAGIC = b\"\\x60\\x60\\xB0\\x17\" class Bolt(Addressable, object): #: True if", "connection object \"\"\" log.debug(\"Acquiring connection from pool %r\", self) cx", "# if reader is fresh but writers are not, then", "forced, this check will be overridden and a RESET will", "def _deactivate(self, address): \"\"\" Deactivate an address from the connection", "of being returned to the pool. \"\"\" await self.prune() await", "{} self._missing_writer = False self._refresh_lock = Lock(loop=self._loop) self._routing_context = routing_context", "# # Unless required by applicable law or agreed to", "= BoltPool(self._loop, self._opener, self._config, address) async def _deactivate_pools_not_in(self, routing_table): \"\"\"", "Address(writer.transport.get_extra_info(\"sockname\")) remote_address = Address(writer.transport.get_extra_info(\"peername\")) handlers = cls.protocol_handlers(protocol_version) if not handlers:", "def max_size(self): \"\"\" The maximum permitted number of simultaneous connections", "implementation async def read(self, n=-1): # pragma: no cover assert", "become # available. Notify any waiting acquirers of this extra", ":param parameters: :param discard: :param readonly: :param bookmarks: :param timeout:", "ProtocolError: if the routing information received is unusable \"\"\" class", "async def release(self, connection, *, force_reset=False): \"\"\" Release a connection", "\"\"\" for pool in self._pools.values(): try: await pool.release(connection, force_reset=force_reset) except", "self._deactivate(pool.address) else: if not readonly: # If we're not acquiring", ":return: \"\"\" async def run_tx(self, f, args=None, kwargs=None, readonly=False, bookmarks=None,", "python # -*- encoding: utf-8 -*- # Copyright (c) 2002-2019", "%r\", self) cx = None while cx is None or", "> old_value: # The maximum size has grown, so new", "{\"context\": {}}) # record = await result.single() # self._routing_table =", "the given routing table but which don't already have pools.", "the given list. \"\"\" closers = deque() while True: try:", "on only if sanitation succeeded. if self.size < self.max_size: #", "in the given list. \"\"\" closers = deque() while True:", "def routing_table(self): # return self._routing_table # # async def update_routing_table(self):", "asyncio import sleep try: while self._protocol._stream_reader is not None: await", "the following sequence instead: pool.max_size = 0 pool.prune() This will", "= await self._get_routing_table_from(self._initial_routers) if rt: return rt rt = await", "implied. # See the License for the specific language governing", "\"\"\" await self._ensure_routing_table_is_fresh(readonly=readonly) if readonly: addresses = self._routing_table.readers else: addresses", "cx): return cx in self._in_use_list or cx in self._free_list def", "Address(address) self._max_size = config.max_size self._max_age = config.max_age self._in_use_list = deque()", "return \"<Bolt address=%r protocol_version=%r>\" % (self.remote_address, self.protocol_version) async def __ainit__(self,", "# Handle to the StreamWriter object, which can be used", "# self._routing_table = RoutingTable.parse_routing_info([record]) # TODO: handle ValueError? # return", "to avoid circular # dependency issues. from neo4j.aio.bolt3 import Bolt3", "try: cx = connections.popleft() except IndexError: break else: closers.append(cx.close()) if", "permitted by this pool, consequently closing it on expiry. Should", "if the connection is not already in a clean state", "await pool.prune() async def close(self, force=False): \"\"\" Close all connections", "if readonly: # if reader is fresh but writers are", "into the pool. This method is thread safe. \"\"\" for", "def opener(cls, auth=None, **config): \"\"\" Create and return an opener", "import Config, PoolConfig from neo4j.meta import version as neo4j_version from", "IndexError: break else: closers.append(cx.close()) if closers: await wait(closers, loop=self._loop) class", "reader, writer @classmethod async def _handshake(cls, reader, writer, protocol_version): \"\"\"", "\"\"\" Carry out a Bolt handshake, optionally requesting a specific", "self.__closed = True async def reset(self, force=False): \"\"\" Reset the", "A pool of connections to a single address. :param opener:", "opened. __t_opened = None # Handle to the StreamReader object.", "Breakable.set_broken(self) raise BoltConnectionBroken(message, self.remote_address) from err except OSError as err:", "self.prune() await self.__close(self._in_use_list) async def __close(self, connections): \"\"\" Close all", "*, force_reset=False): \"\"\" Release a connection back into the pool.", "async def run(self, cypher, parameters=None, discard=False, readonly=False, bookmarks=None, timeout=None, metadata=None):", "time \"\"\" async def begin(self, readonly=False, bookmarks=None, timeout=None, metadata=None): \"\"\"", "will remain open until released. \"\"\" pools = dict(self._pools) self._pools.clear()", "protocol_handlers(cls, protocol_version=None): \"\"\" Return a dictionary of available Bolt protocol", "def closed(self): \"\"\" Flag to indicate whether this connection has", "self._in_use_list: self._in_use_list.remove(cx) if self.size < self.max_size: # If there is", "False \"\"\" log.debug(\"Attempting to update routing table from \" \"{}\".format(\",", "@classmethod async def open(cls, *addresses, auth=None, routing_context=None, loop=None, **config): pool_config", "to provide valid routing information. \"\"\" # copied because it", "except BoltConnectionBroken: pass self.__closed = True async def reset(self, force=False):", "in offered_versions).ljust(16, b\"\\x00\") log.debug(\"[#%04X] C: <HANDSHAKE> %r\", local_address.port_number, request_data) writer.write(request_data)", "routers if num_routers == 0: continue # No readers if", "as (\"127.0.0.1\", 7687) :param auth: :param loop: :param config: :return:", "return None await cx.reset(force=force_reset) return cx async def acquire(self, *,", "else: cx = await self._sanitize(cx, force_reset=force_reset) self._in_use_list.append(cx) return cx async", "cx = await pool.acquire() try: new_routing_table = await cx.get_routing_table(self._routing_context) except", "by the # implementation of 3.6, occurs on 'connection_lost'. This", "such as leader switching, so we should not signal an", "existing_routers: rt = await self._get_routing_table_from(self._initial_routers) if rt: return rt #", "force=False): \"\"\" Reset the connection to a clean state. By", "table. \"\"\" for address in self._pools: if address not in", "file is part of Neo4j. # # Licensed under the", "= (neo4j_version,) + tuple(version_info) + (platform,) return template.format(*fields) @classmethod def", "\"\"\" async def run(self, cypher, parameters=None, discard=False, readonly=False, bookmarks=None, timeout=None,", "bolt in [ # This list can be updated as", "is not passed in a tuple \"\"\" # Carry out", "AttributeError: # pragma: no cover # This is a dirty", "a specific protocol version (e.g. (3, 5)) or None :return:", "AttributeError: pass class Pool: def acquire(self, *, force_reset=False, timeout=None): raise", "async with self._refresh_lock: if self._routing_table.is_fresh(readonly=readonly): if readonly: # if reader", "try: await super().drain() except OSError as err: log.debug(\"[#%04X] S: <CLOSE>", "self.closed: return if not self.broken: log.debug(\"[#%04X] S: <HANGUP>\", self.local_address.port_number) self.__writer.write_eof()", "Neo4j Sweden AB [http://neo4j.com] # # This file is part", "not be established :raise BoltConnectionLost: if an I/O error occurs", "# @classmethod # async def open(cls, *addresses, auth=None, security=False, protocol_version=None,", "be created. If the pool is full and no free", "await cls._connect(address, loop, config) try: # Handshake subclass = await", "connection be neither broken, closed nor expired, it will be", "\"\"\" await self.prune() await self.__close(self._in_use_list) async def __close(self, connections): \"\"\"", "self.__writer.broken @property def closed(self): \"\"\" Flag to indicate whether this", "opener, config, addresses, routing_context) # TODO: get initial routing table", "metadata: :raise BoltTransactionError: if a transaction cannot be carried out", "# Unhook any custom error handling and exit. from neo4j.errors", "is currently unable to provide routing information :raise ServiceUnavailable: if", "acquiring a connection as # readonly, then intercept NotALeader and", "the maximum permitted age, in seconds, for connections to be", "reset (optionally forcibly so) and the connection object will be", "including those in use. Depending on the applications, it may", "is part of Neo4j. # # Licensed under the Apache", "close(self, force=False): \"\"\" Close all connections and empty the pool.", "await obj.__ainit__(auth) return obj except BoltError: writer.write_eof() writer.close() raise @classmethod", "Unless required by applicable law or agreed to in writing,", "method performs two freshness checks, before and after acquiring the", "is provided, the dictionary will contain either zero or one", "arguments provided are passed with unsupported values \"\"\" # Args", "the specific language governing permissions and # limitations under the", "ValueError: if any of the arguments provided are passed with", "cx.closed: return None expired = self.max_age is not None and", "cls(opener, router_addresses, loop=loop) # # def __init__(self, opener, router_addresses, loop=None):", "neo4j.aio._collections import WaitingList from neo4j.aio._mixins import Addressable, Breakable from neo4j.errors", "await pool.acquire(force_reset=force_reset) except BoltError: await self._deactivate(pool.address) else: if not readonly:", "# router_addresses = Address.parse_list(\" \".join(addresses), default_port=7687) # return cls(opener, router_addresses,", "\"\"\" local_address = Address(writer.transport.get_extra_info(\"sockname\")) remote_address = Address(writer.transport.get_extra_info(\"peername\")) handlers = cls.protocol_handlers(protocol_version)", "we flag we are reading in absence of writer self._missing_writer", "_ = await loop.create_connection(lambda: protocol, **connection_args) writer = BoltStreamWriter(transport, protocol,", "return f @classmethod async def open(cls, address, *, auth=None, loop=None,", "routing information\") async def _ensure_routing_table_is_fresh(self, readonly=False): \"\"\" Update the routing", "passed that returns an open and ready Bolt connection :param", "self._protocol._stream_reader is not None: await sleep(0.1) except AttributeError: pass class", "in self._pools.items() if address in addresses] pools_by_usage = {} for", "await Neo4j.open(\":17601 :17602 :17603\", auth=(\"neo4j\", \"password\")) # await neo4j.update_routing_table() #", "obj.__reader = reader obj.__writer = writer Addressable.set_transport(obj, writer.transport) return obj", "class Neo4jPool: \"\"\" Connection pool with routing table. \"\"\" @classmethod", "into the pool if the connection is healthy and the", "# # def __init__(self, opener, router_addresses, loop=None): # self._routers =", "writer Addressable.set_transport(obj, writer.transport) return obj def __repr__(self): return \"<Bolt address=%r", "provided are passed as incompatible types :raise ValueError: if any", ":param discard: :param readonly: :param bookmarks: :param timeout: :param metadata:", "maximum size has grown, so new slots have become #", "if it does not belong to this pool \"\"\" log.debug(\"Releasing", "< self.max_size: # Check again if there is still capacity.", "(c) 2002-2019 \"Neo4j,\" # Neo4j Sweden AB [http://neo4j.com] # #", "self._initial_routers = addresses self._routing_table = RoutingTable(addresses) self._activate_new_pools_in(self._routing_table) def _activate_new_pools_in(self, routing_table):", "age of this connection in seconds. \"\"\" return perf_counter() -", "C: <DIAL> %s\", address) try: reader = BoltStreamReader(loop=loop) protocol =", "service currently \" \"available\".format(\"read\" if readonly else \"write\")) return choice(pools_by_usage[min(pools_by_usage)])", "(num_writers == 0) # No routers if num_routers == 0:", "prune(self): \"\"\" Close all free connections. \"\"\" await self.__close(self._free_list) async", "currently \" \"available\".format(\"read\" if readonly else \"write\")) return choice(pools_by_usage[min(pools_by_usage)]) async", "flag we are reading in absence of writer self._missing_writer =", "fail if the address has already been removed. self._routing_table.routers.discard(address) self._routing_table.readers.discard(address)", "import Version from neo4j.conf import Config, PoolConfig from neo4j.meta import", "# assume the address has already been removed else: pool.max_size", "is not already in a clean state. If forced, this", "default_port=7687) # return cls(opener, router_addresses, loop=loop) # # def __init__(self,", "dictionary of available Bolt protocol handlers, keyed by version tuple.", "connection. await cx.close() elif cx in self._free_list: raise ValueError(\"Connection is", "return async with self._refresh_lock: if self._routing_table.is_fresh(readonly=readonly): if readonly: # if", "getLogger from os import strerror from random import choice from", "self._in_use_list.remove(cx) if self.size < self.max_size: # If there is spare", "elif cx in self._free_list: raise ValueError(\"Connection is not in use\")", ":param reader: :param writer: :param protocol_version: :return: :raise BoltConnectionLost: if", "= await self._opener(self.address) else: # Plan C: wait for more", "from err except OSError as err: log.debug(\"[#%04X] S: <CLOSE> %d", "# Neo4j Sweden AB [http://neo4j.com] # # This file is", "if reader is fresh but writers are not, then #", "return reader, writer @classmethod async def _handshake(cls, reader, writer, protocol_version):", "= BoltStreamReader(loop=loop) protocol = StreamReaderProtocol(reader, loop=loop) transport, _ = await", "clean state :return: a Bolt connection object \"\"\" log.debug(\"Acquiring connection", "pool\") async def prune(self): \"\"\" Close all free connections. \"\"\"", "of parameters. :param readonly: true if a readonly connection is", "readonly: :param bookmarks: :param timeout: :param metadata: :return: \"\"\" async", "await reader.readexactly(4) log.debug(\"[#%04X] S: <HANDSHAKE> %r\", local_address.port_number, response_data) try: agreed_version", "async def _get_routing_table(self): \"\"\" Update the routing table from the", "checks, before and after acquiring the refresh lock. If the", "given routing table. \"\"\" for address in self._pools: if address", "if one is free. If not, and the pool is", "occurs on the underlying socket connection :raise BoltHandshakeError: if handshake", "table and also closing all idle connections to that address.", "Lock(loop=self._loop) self._routing_context = routing_context self._max_size_per_host = config.max_size self._initial_routers = addresses", "max_size(self, value): old_value = self._max_size self._max_size = value if value", "handling and exit. from neo4j.errors import ( NotALeader, ForbiddenOnReadOnlyDatabase, )", "permanently disable the connection pool, it merely shuts down all", "be closed instead of being returned to the pool. \"\"\"", "async def open(cls, *addresses, auth=None, security=False, protocol_version=None, loop=None): # opener", "loop=None): # self._routers = Neo4jPool(opener, router_addresses or self.default_router_addresses) # self._writers", "so new slots have become # available. Notify any waiting", "address) try: reader = BoltStreamReader(loop=loop) protocol = StreamReaderProtocol(reader, loop=loop) transport,", "BoltConnectionBroken(\"Network read failed\", self.remote_address) from err class BoltStreamWriter(Addressable, Breakable, StreamWriter):", "a RESET will be sent regardless. \"\"\" async def run(self,", "self._ensure_routing_table_is_fresh(readonly=readonly) if readonly: addresses = self._routing_table.readers else: addresses = self._routing_table.writers", "await cx.run(\"CALL dbms.cluster.routing.getRoutingTable($context)\", {\"context\": {}}) # record = await result.single()", "True: try: cx = connections.popleft() except IndexError: break else: closers.append(cx.close())", "the connection is not already in a clean state :return:", "# versions are added and removed. Bolt3, ]} if protocol_version", "def close(self, force=False): \"\"\" Close all connections and empty the", "= config.max_age self._in_use_list = deque() self._free_list = deque() self._waiting_list =", "As an instance attribute, this represents the #: version of", "the method exits immediately; otherwise, the refresh lock is acquired", "hack for Python 3.6, which didn't include # 'wait_closed'. The", "in absence of writer self._missing_writer = (num_writers == 0) #", "\"\"\" log.debug(\"Acquiring connection from pool %r\", self) cx = None", "and the pool is not already at capacity. :param cx:", "one items, depending on whether that version is supported. If", "await self._sanitize(cx, force_reset=force_reset) if cx: # Carry on only if", "main(): # from neo4j.debug import watch; watch(\"neo4j\") # neo4j =", "connections and empty the pool. If forced, in-use connections will", "await loop.create_connection(lambda: protocol, **connection_args) writer = BoltStreamWriter(transport, protocol, reader, loop)", "available :raise ProtocolError: if the routing information received is unusable", "readonly=False): \"\"\" Selects the pool with the fewest in-use connections.", "writer and security settings for the new connection :raise BoltConnectionError:", "# Handle to the StreamReader object. __reader = None #", "to the StreamReader object. __reader = None # Handle to", "async def wait_closed(self): try: await super().wait_closed() except AttributeError: # pragma:", "return None async def _get_routing_table(self): \"\"\" Update the routing table", "handlers.items() if version == protocol_version} @classmethod def opener(cls, auth=None, **config):", "metadata=None): \"\"\" Run an auto-commit transaction. :param cypher: :param parameters:", "pool, both in-use and free. \"\"\" return self._max_size @max_size.setter def", "protocol version is provided, all available versions will be returned.", "await Bolt.open(address, auth=auth, loop=loop, **config) return f @classmethod async def", "that version is supported. If no protocol version is provided,", "out a Bolt handshake, optionally requesting a specific protocol version.", "routing_table): \"\"\" Deactivate any pools that aren't represented in the", "writer self._missing_writer = not self._routing_table.is_fresh(readonly=False) else: rt = await self._get_routing_table()", "or remote peer. \"\"\" return self.__reader.broken or self.__writer.broken @property def", "(platform,) return template.format(*fields) @classmethod def protocol_handlers(cls, protocol_version=None): \"\"\" Return a", "explicit protocol version is provided, the dictionary will contain either", "and supported protocol versions :raise TypeError: if protocol version is", "as err: log.debug(\"[#%04X] S: <REJECT> %s (%d %s)\", 0, address,", "pass self.__closed = True async def reset(self, force=False): \"\"\" Reset", "f, args=None, kwargs=None, readonly=False, bookmarks=None, timeout=None, metadata=None): \"\"\" Run a", "this pool that are currently in use. \"\"\" return len(self._in_use_list)", "if present, remove from the routing table and also closing", "def _select_pool(self, readonly=False): \"\"\" Selects the pool with the fewest", "a socket connection and perform protocol version negotiation, in order", "Address(transport.get_extra_info(\"peername\")) log.debug(\"[#%04X] S: <ACCEPT> %s -> %s\", local_address.port_number, local_address, remote_address)", "__repr__(self): return \"<Bolt address=%r protocol_version=%r>\" % (self.remote_address, self.protocol_version) async def", "released, or until the acquire call is cancelled. :param force_reset:", "= await cx.run(\"CALL dbms.cluster.routing.getRoutingTable($context)\", {\"context\": {}}) # record = await", "handlers = {bolt.protocol_version: bolt for bolt in [ # This", "= BoltStreamWriter(transport, protocol, reader, loop) except SSLError as err: log.debug(\"[#%04X]", "address) seeds = [await pool.acquire() for _ in range(pool_config.init_size)] for", "RESET message will only be sent if required, i.e. if", "free. If not, and the pool is not full, a", "except OSError as err: log.debug(\"[#%04X] S: <CLOSE> %d %s\", err.errno,", "try: reader = BoltStreamReader(loop=loop) protocol = StreamReaderProtocol(reader, loop=loop) transport, _", "@property def broken(self): \"\"\" Flag to indicate whether this connection", "\"\"\" Fetch a new routing table. :param context: the routing", "async def main(): # from neo4j.debug import watch; watch(\"neo4j\") #", "limitations under the License. from asyncio import ( IncompleteReadError, Lock,", "*, force_reset): \"\"\" Attempt to clean up a connection, such", "pool. This method is thread safe. \"\"\" for pool in", "and perform protocol version negotiation, in order to construct and", "raise ValueError(\"Connection is not in use\") else: raise ValueError(\"Connection does", "# self._routing_table = None # # @property # def routing_table(self):", "avoid circular # dependency issues. from neo4j.aio.bolt3 import Bolt3 handlers", "seconds, for connections to be retained in this pool \"\"\"", "pool is not full, a new connection will be created.", "= PoolConfig.consume(config) def opener(addr): return Bolt.open(addr, auth=auth, **pool_config) obj =", "v in offered_versions).ljust(16, b\"\\x00\") log.debug(\"[#%04X] C: <HANDSHAKE> %r\", local_address.port_number, request_data)", "imports locally to avoid circular # dependency issues. from neo4j.aio.bolt3", "transaction cannot be carried out at this time \"\"\" async", "optionally requesting a specific protocol version. :param reader: :param writer:", "loop self._opener = opener self._config = config self._pools = {}", "self.protocol_version) async def __ainit__(self, auth): \"\"\" Asynchronous initializer for implementation", "can be reused. If the connection is broken or closed,", "sys import platform, version_info from time import perf_counter from neo4j.addressing", "all connections immediately. This does not permanently disable the connection", "this represents the #: version of the protocol in use.", "StreamReader, StreamReaderProtocol, StreamWriter, get_event_loop, wait, ) from collections import deque", "tuple(version_info) + (platform,) return template.format(*fields) @classmethod def protocol_handlers(cls, protocol_version=None): \"\"\"", "keyed by version tuple. If an explicit protocol version is", "from neo4j.errors import ( NotALeader, ForbiddenOnReadOnlyDatabase, ) def handler(failure): \"\"\"", "_ensure_routing_table_is_fresh(self, readonly=False): \"\"\" Update the routing table if stale. This", "to this pool\") async def prune(self): \"\"\" Close all free", "{} for pool in pools: pools_by_usage.setdefault(pool.in_use, []).append(pool) if not pools_by_usage:", "neo4j.errors import ( BoltError, BoltConnectionError, BoltSecurityError, BoltConnectionBroken, BoltHandshakeError, Neo4jAvailabilityError, )", "NotALeader and # ForbiddenOnReadOnlyDatabase errors to # invalidate the routing", "raise BoltSecurityError(\"Failed to establish a secure connection\", address) from err", "You may obtain a copy of the License at #", "not used by current implementation async def readexactly(self, n): try:", "two freshness checks, before and after acquiring the refresh lock.", "reader, writer and security settings for the new connection :raise", "from \" \"{!r} ({!r})\".format(router, self._routing_table)) return new_routing_table finally: await pool.release(cx)", "object): #: True if this instance uses secure communication, false", "close(self): \"\"\" Close the connection. \"\"\" if self.closed: return if", "is None: return handlers if not isinstance(protocol_version, tuple): raise TypeError(\"Protocol", "implementation by subclasses. :param auth: \"\"\" @property def age(self): \"\"\"", "except OSError as err: log.debug(\"[#%04X] S: <REJECT> %s (%d %s)\",", "ValueError(\"No protocol handlers available (requested Bolt %r)\", protocol_version) offered_versions =", "neo4j.aio._mixins import Addressable, Breakable from neo4j.errors import ( BoltError, BoltConnectionError,", ":return: a 3-tuple of reader, writer and security settings for", "else: closers.append(cx.close()) if closers: await wait(closers, loop=self._loop) class Neo4jPool: \"\"\"", "config, addresses, routing_context): if loop is None: self._loop = get_event_loop()", "the simplest case, this will return an existing open connection,", "the pool cx = self._free_list.popleft() except IndexError: if self.size <", "force_reset): \"\"\" Attempt to clean up a connection, such that", "<REJECT> %s (%d %s)\", 0, address, err.errno, strerror(err.errno)) raise BoltSecurityError(\"Failed", ":param opener: a function to which an address can be", "parameters: :param discard: :param readonly: :param bookmarks: :param timeout: :param", "neo4j = await Neo4j.open(\":17601 :17602 :17603\", auth=(\"neo4j\", \"password\")) # await", "%r\", local_address.port_number, response_data) try: agreed_version = Version.from_bytes(response_data) except ValueError as", "or self.__writer.broken @property def closed(self): \"\"\" Flag to indicate whether", "deque() self._waiting_list = WaitingList(loop=self._loop) def __repr__(self): return \"<{} addr'{}' [{}{}{}]>\".format(", "in use. \"\"\" return len(self._in_use_list) @property def size(self): \"\"\" The", "has grown, so new slots have become # available. Notify", "in use, or if it does not belong to this", "fewest in-use connections. \"\"\" await self._ensure_routing_table_is_fresh(readonly=readonly) if readonly: addresses =", "opener(addr): return Bolt.open(addr, auth=auth, **pool_config) obj = cls(loop, opener, config,", "readonly=False, bookmarks=None, timeout=None, metadata=None): \"\"\" Run a transaction function and", "protocol, reader, loop) except SSLError as err: log.debug(\"[#%04X] S: <REJECT>", ":param context: the routing context to use for this call", "entry, the method exits immediately; otherwise, the refresh lock is", "pool.release(cx) return None async def _get_routing_table(self): \"\"\" Update the routing", "writer, config.protocol_version) # Instantiation obj = subclass(reader, writer) obj.secure =", "same configuration details, such as within a connection pool. \"\"\"", "remote_address = Address(transport.get_extra_info(\"peername\")) log.debug(\"[#%04X] S: <ACCEPT> %s -> %s\", local_address.port_number,", "metadata=None): \"\"\" Begin an explicit transaction. :param readonly: :param bookmarks:", "given set of configuration parameters. This is useful when multiple", "await wait(closers, loop=self._loop) class Neo4jPool: \"\"\" Connection pool with routing", "old_value = self._max_size self._max_size = value if value > old_value:", "if value > old_value: # The maximum size has grown,", "ValueError(\"Connection does not belong to this pool\") async def prune(self):", "return self._routing_table # finally: # self._routers.release(cx) # async def main():", "Addressable.set_transport(obj, writer.transport) return obj def __repr__(self): return \"<Bolt address=%r protocol_version=%r>\"", "pool of connections to a single address. :param opener: a", "BoltConnectionBroken, BoltHandshakeError, Neo4jAvailabilityError, ) from neo4j.api import Version from neo4j.conf", ":param config: :return: instance of a Bolt subclass :raise BoltConnectionError:", "IndexError: if self.size < self.max_size: # Plan B: if the", "pool, it merely shuts down all open connections, including those", "the routing table if stale. This method performs two freshness", "already been removed else: pool.max_size = 0 await pool.prune() async", "or cx.broken or cx.closed: try: # Plan A: select a", "the License. from asyncio import ( IncompleteReadError, Lock, StreamReader, StreamReaderProtocol,", "Close the connection. \"\"\" if self.closed: return if not self.broken:", "False if self._missing_writer: has_tried_initial_routers = True rt = await self._get_routing_table_from(self._initial_routers)", "Config, PoolConfig from neo4j.meta import version as neo4j_version from neo4j.routing", "if the connection is healthy and the pool is not", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "\"\"\" template = \"neo4j-python/{} Python/{}.{}.{}-{}-{} ({})\" fields = (neo4j_version,) +", "update is still required. \"\"\" if self._routing_table.is_fresh(readonly=readonly): return async with", "has been broken by the network or remote peer. \"\"\"", "second freshness check that follows determines whether an update is", "is fresh but writers are not, then # we are", "work in progress to continue until connections are released, use", "we're not acquiring a connection as # readonly, then intercept", "log.debug(\"[#%04X] S: <CLOSE> (%s)\", self.local_address.port_number, err) Breakable.set_broken(self) raise BoltConnectionBroken(\"Network write", "cx = self._free_list.popleft() except IndexError: if self.size < self.max_size: #", "force_reset=False): \"\"\" Release a connection back into the pool. This", "in the pool, attempt to # sanitize the connection and", "= self._free_list.popleft() except IndexError: if self.size < self.max_size: # Plan", "are specified. default_router_addresses = Address.parse_list(\":7687 :17601 :17687\") # TODO #", "except IndexError: if self.size < self.max_size: # Plan B: if", "= deque() self._free_list = deque() self._waiting_list = WaitingList(loop=self._loop) def __repr__(self):", "import strerror from random import choice from ssl import SSLError", "with an option to seed one or more initial connections.", "if readonly: addresses = self._routing_table.readers else: addresses = self._routing_table.writers pools", "tuple\") return {version: handler for version, handler in handlers.items() if", "language governing permissions and # limitations under the License. from", "random import choice from ssl import SSLError from sys import", "Breakable, StreamWriter): \"\"\" Wrapper for asyncio.streams.StreamWriter \"\"\" def __init__(self, *args,", "handle ValueError? # return self._routing_table # finally: # self._routers.release(cx) #", "%s (%d %s)\", 0, address, err.errno, strerror(err.errno)) raise BoltSecurityError(\"Failed to", "if rt: return rt rt = await self._get_routing_table_from(*existing_routers) if rt:", "reader, writer = await cls._connect(address, loop, config) try: # Handshake", "self._routing_table # finally: # self._routers.release(cx) # async def main(): #", "config: :return: a 3-tuple of reader, writer and security settings", "Plan C: wait for more capacity to become # available,", "if closers: await wait(closers, loop=self._loop) class Neo4jPool: \"\"\" Connection pool", "# None of the routers have been successful, so just", "owned by this pool, both in-use and free. \"\"\" return", "TODO: handle ValueError? # return self._routing_table # finally: # self._routers.release(cx)", "config.max_age self._in_use_list = deque() self._free_list = deque() self._waiting_list = WaitingList(loop=self._loop)", "timeout=None): raise NotImplementedError def release(self, *connections, force_reset=False): raise NotImplementedError def", "\"\"\" Begin an explicit transaction. :param readonly: :param bookmarks: :param", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "the time at which this connection was opened. __t_opened =", "License. # You may obtain a copy of the License", "all relevant and supported protocol versions :raise TypeError: if protocol", "in-use and free) currently owned by this connection pool. \"\"\"", "= Address(writer.transport.get_extra_info(\"peername\")) handlers = cls.protocol_handlers(protocol_version) if not handlers: raise ValueError(\"No", "connection. \"\"\" if self.closed: return if not self.broken: log.debug(\"[#%04X] S:", "such that it can be reused. If the connection is", "raising the failure. \"\"\" log.debug(\"[#0000] C: <ROUTING> Invalidating routing table\")", "= {} for pool in pools: pools_by_usage.setdefault(pool.in_use, []).append(pool) if not", "obj._ensure_routing_table_is_fresh() return obj def __init__(self, loop, opener, config, addresses, routing_context):", "of this extra # capacity. self._waiting_list.notify() @property def max_age(self): \"\"\"", "self._pools.values(): try: await pool.release(connection, force_reset=force_reset) except ValueError: pass else: #", "if a readonly connection is required, otherwise false :param force_reset:", "self.size @property def address(self): \"\"\" The remote address for which", "`discard` instead of `remove` here since the former # will", "a free connection from the pool cx = self._free_list.popleft() except", "log.debug(\"[#0000] C: <ROUTING> Invalidating routing table\") self._routing_table.ttl = 0 raise", "on expiry. Should the connection be neither broken, closed nor", "# result = await cx.run(\"CALL dbms.cluster.routing.getRoutingTable($context)\", {\"context\": {}}) # record", "cx.close() else: # If the pool is full, simply close", "after acquiring the refresh lock. If the routing table is", "if address not in routing_table: await self._deactivate(address) async def _get_routing_table_from(self,", "provide routing information :raise ServiceUnavailable: if no writers are available", "then # we are reading in absence of writer self._missing_writer", "= not self._routing_table.is_fresh(readonly=False) else: rt = await self._get_routing_table() self._activate_new_pools_in(rt) self._routing_table.update(rt)", "reverse=True)[:4] request_data = MAGIC + b\"\".join( v.to_bytes() for v in", "and after acquiring the refresh lock. If the routing table", "Breakable, StreamReader): \"\"\" Wrapper for asyncio.streams.StreamReader \"\"\" def set_transport(self, transport):", "self._routing_table.is_fresh(readonly=False) else: rt = await self._get_routing_table() self._activate_new_pools_in(rt) self._routing_table.update(rt) await self._deactivate_pools_not_in(rt)", "have been successful, so just fail log.error(\"Unable to retrieve routing", "log = getLogger(__name__) MAGIC = b\"\\x60\\x60\\xB0\\x17\" class Bolt(Addressable, object): #:", "for bolt in [ # This list can be updated", ":param config: :return: a 3-tuple of reader, writer and security", "self.size < self.max_size: # Plan B: if the pool isn't", "table before raising the failure. \"\"\" log.debug(\"[#0000] C: <ROUTING> Invalidating", "available, then try again log.debug(\"Joining waiting list\") await self._waiting_list.join() else:", "be established \"\"\" assert isinstance(address, Address) assert loop is not", ":param protocol_version: :return: :raise BoltConnectionLost: if an I/O error occurs", "true if a readonly connection is required, otherwise false :param", "required, otherwise false :param force_reset: \"\"\" while True: pool =", "def readexactly(self, n): try: return await super().readexactly(n) except IncompleteReadError as", "\"\"\" Run a transaction function and return the return value", "b\"\".join( v.to_bytes() for v in offered_versions).ljust(16, b\"\\x00\") log.debug(\"[#%04X] C: <HANDSHAKE>", "handlers: raise ValueError(\"No protocol handlers available (requested Bolt %r)\", protocol_version)", "def default_user_agent(cls): \"\"\" Return the default user agent string for", "connection pool. \"\"\" return len(self._in_use_list) + len(self._free_list) async def _sanitize(self,", "pool_config = PoolConfig.consume(config) def opener(addr): return Bolt.open(addr, auth=auth, **pool_config) obj", "= PoolConfig.consume(config) def opener(addr): return Bolt.open(addr, auth=auth, loop=loop, **pool_config) pool", "no cover assert False # not used by current implementation", "dict(self._pools) self._pools.clear() for address, pool in pools.items(): if force: await", "BoltError: await self._deactivate(pool.address) else: if not readonly: # If we're", "= 0 pool.prune() This will force all future connection acquisitions", "determines whether an update is still required. \"\"\" if self._routing_table.is_fresh(readonly=readonly):", "neo4j.conf import Config, PoolConfig from neo4j.meta import version as neo4j_version", "of the arguments provided are passed with unsupported values \"\"\"", "broken(self): \"\"\" Flag to indicate whether this connection has been", "# finally: # self._routers.release(cx) # async def main(): # from", "the former # will not fail if the address has", "\"family\": address.family, # TODO: other args } ssl_context = config.get_ssl_context()", "state. If forced, this check will be overridden and a", "SSLError from sys import platform, version_info from time import perf_counter", "to clean up a connection, such that it can be", "the pool with the fewest in-use connections. \"\"\" await self._ensure_routing_table_is_fresh(readonly=readonly)", "version_info from time import perf_counter from neo4j.addressing import Address from", "connection is required, otherwise false :param force_reset: \"\"\" while True:", "tuple. If an explicit protocol version is provided, the dictionary", "# limitations under the License. from asyncio import ( IncompleteReadError,", "BoltConnectionBroken: pass self.__closed = True async def reset(self, force=False): \"\"\"", "router_addresses or self.default_router_addresses) # self._writers = Neo4jPool(opener) # self._readers =", "self._loop = get_event_loop() else: self._loop = loop self._opener = opener", "logging import getLogger from os import strerror from random import", "release :param force_reset: if true, the connection will be forcibly", "await sleep(0.1) except AttributeError: pass class Pool: def acquire(self, *,", "an auto-commit transaction. :param cypher: :param parameters: :param discard: :param", ":param force_reset: \"\"\" while True: pool = await self._select_pool(readonly=readonly) try:", "the arguments provided are passed as incompatible types :raise ValueError:", "bookmarks: :param timeout: :param metadata: :return: \"\"\" async def run_tx(self,", "async def drain(self): try: await super().drain() except OSError as err:", "__close(self, connections): \"\"\" Close all connections in the given list.", "addresses = self._routing_table.writers pools = [pool for address, pool in", "readonly else \"write\")) return choice(pools_by_usage[min(pools_by_usage)]) async def acquire(self, *, readonly=False,", "it will be reset (optionally forcibly so) and the connection", "if the routing information received is unusable \"\"\" class BoltStreamReader(Addressable,", "StreamReader.set_transport(self, transport) async def readuntil(self, separator=b'\\n'): # pragma: no cover", "applications, it may be perfectly acceptable to re-acquire connections after", "after pool closure, which will have the implicit affect of", "an address from the connection pool, if present, remove from", "or cx.closed: try: # Plan A: select a free connection", ":param max_size: the maximum permitted number of simultaneous connections that", "except BoltError: await self._deactivate(router) else: num_routers = len(new_routing_table.routers) num_readers =", "tuple to handler class for all relevant and supported protocol", "log.debug(\"Unsupported Bolt protocol version %s\", agreed_version) raise BoltHandshakeError(\"Unsupported Bolt protocol", "@classmethod # async def open(cls, *addresses, auth=None, security=False, protocol_version=None, loop=None):", "License. from asyncio import ( IncompleteReadError, Lock, StreamReader, StreamReaderProtocol, StreamWriter,", "<ROUTING> table=%r\", self._routing_table) try: pool = self._pools.pop(address) except KeyError: pass", "\"\"\" Update the routing table if stale. This method performs", "handlers, keyed by version tuple. If an explicit protocol version", "# async def open(cls, *addresses, auth=None, security=False, protocol_version=None, loop=None): #", "request_data = MAGIC + b\"\".join( v.to_bytes() for v in offered_versions).ljust(16,", "to the pool. cx = await self._sanitize(cx, force_reset=force_reset) if cx:", "switching, so we should not signal an error. # When", "self._routing_table.readers.discard(address) self._routing_table.writers.discard(address) log.debug(\"[#0000] C: <ROUTING> table=%r\", self._routing_table) try: pool =", "and return it to the pool. cx = await self._sanitize(cx,", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "not signal an error. # When no writers available, then", "get_event_loop() else: self._loop = loop self._opener = opener self._config =", "-> %s\", local_address.port_number, local_address, remote_address) return reader, writer @classmethod async", "received is unusable \"\"\" class BoltStreamReader(Addressable, Breakable, StreamReader): \"\"\" Wrapper", "# TODO: handle ValueError? # return self._routing_table # finally: #", "if the routing table is successfully updated, otherwise False \"\"\"", "a readonly connection is required, otherwise false :param force_reset: \"\"\"", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "for the specific language governing permissions and # limitations under", "be established :raise BoltConnectionLost: if an I/O error occurs on", "assert isinstance(address, Address) assert loop is not None assert isinstance(config,", "BoltSecurityError(\"Failed to establish a secure connection\", address) from err except", "\"\"\" # Args address = Address(address) if loop is None:", "unusable \"\"\" class BoltStreamReader(Addressable, Breakable, StreamReader): \"\"\" Wrapper for asyncio.streams.StreamReader", "pool\") async def _deactivate(self, address): \"\"\" Deactivate an address from", "will be forcibly reset before being returned; if false, this", "def wait_closed(self): try: await super().wait_closed() except AttributeError: # pragma: no", "= config self._pools = {} self._missing_writer = False self._refresh_lock =", "bolt for bolt in [ # This list can be", "# -*- encoding: utf-8 -*- # Copyright (c) 2002-2019 \"Neo4j,\"", "neither broken, closed nor expired, it will be reset (optionally", "self.size < self.max_size: # Check again if there is still", "broken, closed nor expired, it will be reset (optionally forcibly", "for address in routing_table.servers(): if address not in self._pools: self._pools[address]", "except BoltError: await self._deactivate(pool.address) else: if not readonly: # If", "required by applicable law or agreed to in writing, software", "import version as neo4j_version from neo4j.routing import RoutingTable log =", "BoltStreamWriter(Addressable, Breakable, StreamWriter): \"\"\" Wrapper for asyncio.streams.StreamWriter \"\"\" def __init__(self,", "len(new_routing_table.readers) num_writers = len(new_routing_table.writers) # No writers are available. This", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "assume the address has already been removed else: pool.max_size =", "freshness checks, before and after acquiring the refresh lock. If", "is closed __closed = False @classmethod def default_user_agent(cls): \"\"\" Return", "specified as a tuple\") return {version: handler for version, handler", "immediately; otherwise, the refresh lock is acquired and the second", "inside the protocol to go away which, by the #", "import watch; watch(\"neo4j\") # neo4j = await Neo4j.open(\":17601 :17602 :17603\",", "update routing tables with the given routers. :return: True if", "to seed one or more initial connections. \"\"\" pool_config =", "request_data, response_data) from err try: subclass = handlers[agreed_version] except KeyError:", "or more initial connections. \"\"\" pool_config = PoolConfig.consume(config) def opener(addr):", "connection will be forcibly reset before being released back into", "seeds = [await pool.acquire() for _ in range(pool_config.init_size)] for seed", "def close(self): \"\"\" Close the connection. \"\"\" if self.closed: return", "close gracefully, allowing work in progress to continue until connections", "underlying socket connection :raise BoltHandshakeError: if handshake completes without a", "agreed to in writing, software # distributed under the License", "pool.prune() This will force all future connection acquisitions onto the", "merely shuts down all open connections, including those in use.", "\"\"\" def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) Addressable.set_transport(self, self.transport) async", "The maximum size has grown, so new slots have become", "async def reset(self, force=False): \"\"\" Reset the connection to a", "from asyncio import sleep try: while self._protocol._stream_reader is not None:", "pool \"\"\" @classmethod async def open(cls, address, *, auth=None, loop=None,", "distributed under the License is distributed on an \"AS IS\"", "def acquire(self, *, readonly=False, force_reset=False): \"\"\" Acquire a connection to", "connection to release :param force_reset: if true, the connection will", "loop=None, **config): \"\"\" Open a socket connection and perform protocol", "from pool %r\", self) cx = None while cx is", "from logging import getLogger from os import strerror from random", "= Neo4jPool(opener) # self._routing_table = None # # @property #", "Bolt handshake, optionally requesting a specific protocol version. :param reader:", "other args } ssl_context = config.get_ssl_context() if ssl_context: connection_args[\"ssl\"] =", "<CLOSE> %d %s\", err.errno, strerror(err.errno)) Breakable.set_broken(self) raise BoltConnectionBroken(\"Network read failed\",", "\"\"\" Update the routing table from the first router able", "initializer for implementation by subclasses. :param auth: \"\"\" @property def", "given list. \"\"\" closers = deque() while True: try: cx", "@property def size(self): \"\"\" The total number of connections (both", "in a clean state :return: a Bolt connection object \"\"\"", "\"\"\" # copied because it can be modified existing_routers =", "<HANDSHAKE> %r\", local_address.port_number, response_data) try: agreed_version = Version.from_bytes(response_data) except ValueError", "routers. :return: True if the routing table is successfully updated,", "log.debug(\"[#0000] C: <DIAL> %s\", address) try: reader = BoltStreamReader(loop=loop) protocol", "pool.acquire() for _ in range(pool_config.init_size)] for seed in seeds: await", "\"\"\" @property def age(self): \"\"\" The age of this connection", "watch; watch(\"neo4j\") # neo4j = await Neo4j.open(\":17601 :17602 :17603\", auth=(\"neo4j\",", "fresh but writers are not, then # we are reading", "`remove` here since the former # will not fail if", "an open and ready Bolt connection :param address: the remote", "pool.release(connection, force_reset=force_reset) except ValueError: pass else: # Unhook any custom", "force_reset=False): \"\"\" Acquire a connection to a server that can", "self._pools[router] cx = await pool.acquire() try: new_routing_table = await cx.get_routing_table(self._routing_context)", "In the simplest case, this will return an existing open", "\"available\".format(\"read\" if readonly else \"write\")) return choice(pools_by_usage[min(pools_by_usage)]) async def acquire(self,", "be retained in this pool. \"\"\" return self._max_age @property def", "configuration parameters. This is useful when multiple servers share the", "putting it back into the pool if the connection is", "await super().readexactly(n) except IncompleteReadError as err: message = (\"Network read", "expiry. Should the connection be neither broken, closed nor expired,", "#: otherwise. secure = None #: As a class attribute,", "waiting list, and released connections will be closed instead of", "expired: await cx.close() return None await cx.reset(force=force_reset) return cx async", "in routers: pool = self._pools[router] cx = await pool.acquire() try:", "be returned. :param protocol_version: tuple identifying a specific protocol version", "else: # Otherwise, close the connection. await cx.close() else: #", "response_data) try: agreed_version = Version.from_bytes(response_data) except ValueError as err: writer.close()", "self.local_address.port_number, err) Breakable.set_broken(self) raise BoltConnectionBroken(\"Network write failed\", self.remote_address) from err", "self._get_routing_table_from(*existing_routers) if rt: return rt if not has_tried_initial_routers and self._initial_routers", "subclass def __new__(cls, reader, writer): obj = super().__new__(cls) obj.__t_opened =", "<HANGUP>\", self.local_address.port_number) self.__writer.write_eof() self.__writer.close() try: await self.__writer.wait_closed() except BoltConnectionBroken: pass", "( NotALeader, ForbiddenOnReadOnlyDatabase, ) def handler(failure): \"\"\" Invalidate the routing", ") from collections import deque from logging import getLogger from", "communication, false #: otherwise. secure = None #: As a", "is no longer required. # from asyncio import sleep try:", "TypeError(\"Protocol version must be specified as a tuple\") return {version:", "table is successfully updated, otherwise False \"\"\" log.debug(\"Attempting to update", "in a clean state :raise ValueError: if the connection is", "useful when multiple servers share the same configuration details, such", "get initial routing table and construct await obj._ensure_routing_table_is_fresh() return obj", "protocol_version): \"\"\" Carry out a Bolt handshake, optionally requesting a", "addresses, routing_context) # TODO: get initial routing table and construct", "by this connection pool. \"\"\" return len(self._in_use_list) + len(self._free_list) async", "that address. \"\"\" log.debug(\"[#0000] C: <ROUTING> Deactivating address %r\", address)", ":return: True if the routing table is successfully updated, otherwise", "cx.get_routing_table(self._routing_context) except BoltError: await self._deactivate(router) else: num_routers = len(new_routing_table.routers) num_readers", "that subclass. As an instance attribute, this represents the #:", "BoltConnectionBroken(message, self.remote_address) from err except OSError as err: log.debug(\"[#%04X] S:", "governing permissions and # limitations under the License. from asyncio", "TCP connection to the address provided. :param address: :param loop:", "default router address list to use if no addresses are", "connection could not be established \"\"\" assert isinstance(address, Address) assert", "if not handlers: raise ValueError(\"No protocol handlers available (requested Bolt", "a new routing table. :param context: the routing context to", "force_reset: \"\"\" while True: pool = await self._select_pool(readonly=readonly) try: cx", "all idle connections to that address. \"\"\" log.debug(\"[#0000] C: <ROUTING>", "it back into the pool if the connection is healthy", "to which an address can be passed that returns an", "from neo4j.api import Version from neo4j.conf import Config, PoolConfig from", "writers are not, then # we are reading in absence", "only if sanitation succeeded. if self.size < self.max_size: # Check", "async def _select_pool(self, readonly=False): \"\"\" Selects the pool with the", "if self.closed: return if not self.broken: log.debug(\"[#%04X] S: <HANGUP>\", self.local_address.port_number)", "have pools. \"\"\" for address in routing_table.servers(): if address not", "return the return value from that function. \"\"\" async def", "try: # result = await cx.run(\"CALL dbms.cluster.routing.getRoutingTable($context)\", {\"context\": {}}) #", "which this pool operates :param max_size: the maximum permitted number", "ready Bolt connection :param address: the remote address for which", "the connection will be forcibly reset before being released back", "existing open connection, if one is free. If not, and", "in absence of writer self._missing_writer = not self._routing_table.is_fresh(readonly=False) else: rt", "the waiting list, and released connections will be closed instead", "import Bolt3 handlers = {bolt.protocol_version: bolt for bolt in [", "def get_routing_table(self, context=None): \"\"\" Fetch a new routing table. :param", "connections to that address. \"\"\" log.debug(\"[#0000] C: <ROUTING> Deactivating address", "class BoltPool: \"\"\" A pool of connections to a single", "reading in absence of writer self._missing_writer = (num_writers == 0)", "routing information\") raise Neo4jAvailabilityError(\"Unable to retrieve routing information\") async def", "relevant and supported protocol versions :raise TypeError: if protocol version", "default_router_addresses = Address.parse_list(\":7687 :17601 :17687\") # TODO # @classmethod #", "so just fail log.error(\"Unable to retrieve routing information\") raise Neo4jAvailabilityError(\"Unable", "from neo4j.addressing import Address from neo4j.aio._collections import WaitingList from neo4j.aio._mixins", "routers have been successful, so just fail log.error(\"Unable to retrieve", "transaction. :param readonly: :param bookmarks: :param timeout: :param metadata: :return:", "permitted number of simultaneous connections that may be owned by", "address, pool in self._pools.items() if address in addresses] pools_by_usage =", "without a successful negotiation :raise TypeError: if any of the", "isinstance(address, Address) assert loop is not None assert isinstance(config, Config)", "await cx.close() else: # If the pool is full, simply", "Address(writer.transport.get_extra_info(\"peername\")) handlers = cls.protocol_handlers(protocol_version) if not handlers: raise ValueError(\"No protocol", "OR CONDITIONS OF ANY KIND, either express or implied. #", "force_reset=False): \"\"\" Acquire a connection from the pool. In the", "the fewest in-use connections. \"\"\" await self._ensure_routing_table_is_fresh(readonly=readonly) if readonly: addresses", "def run_tx(self, f, args=None, kwargs=None, readonly=False, bookmarks=None, timeout=None, metadata=None): \"\"\"", "dirty hack for Python 3.6, which didn't include # 'wait_closed'.", "the License is distributed on an \"AS IS\" BASIS, #", ":raise BoltConnectionError: if a connection could not be established :raise", "Flag to indicate whether this connection has been broken by", "if not, they will remain open until released. \"\"\" pools", "the routing information received is unusable \"\"\" class BoltStreamReader(Addressable, Breakable,", "open until released. \"\"\" pools = dict(self._pools) self._pools.clear() for address,", "b\"\\x60\\x60\\xB0\\x17\" class Bolt(Addressable, object): #: True if this instance uses", "for _ in range(pool_config.init_size)] for seed in seeds: await pool.release(seed)", "already in a clean state :return: a Bolt connection object", "def open(cls, *addresses, auth=None, routing_context=None, loop=None, **config): pool_config = PoolConfig.consume(config)", "strerror(err.errno)) Breakable.set_broken(self) raise BoltConnectionBroken(\"Network read failed\", self.remote_address) from err class", "run(self, cypher, parameters=None, discard=False, readonly=False, bookmarks=None, timeout=None, metadata=None): \"\"\" Run", "# No routers if num_routers == 0: continue # No", "in seconds. \"\"\" return perf_counter() - self.__t_opened @property def broken(self):", "{} service currently \" \"available\".format(\"read\" if readonly else \"write\")) return", "the pool is full and no free connections are available,", "import ( NotALeader, ForbiddenOnReadOnlyDatabase, ) connection.del_failure_handler(NotALeader) connection.del_failure_handler(ForbiddenOnReadOnlyDatabase) break else: raise", "been broken by the network or remote peer. \"\"\" return", "returned; if false, this will only occur if the connection", "False # not used by current implementation async def readexactly(self,", "\"\"\" @classmethod async def open(cls, *addresses, auth=None, routing_context=None, loop=None, **config):", "the #: version of the protocol in use. protocol_version =", "If no protocol version is provided, all available versions will", "has been closed locally.\"\"\" return self.__closed async def close(self): \"\"\"", ":param loop: :param config: :return: a 3-tuple of reader, writer", "None async def _get_routing_table(self): \"\"\" Update the routing table from", "def close(self, *, force=False): raise NotImplementedError class BoltPool: \"\"\" A", "are currently in use. \"\"\" return len(self._in_use_list) @property def size(self):", "return len(self._in_use_list) @property def size(self): \"\"\" The total number of", "= await self._get_routing_table_from(self._initial_routers) if rt: return rt # None of", "def open(cls, address, *, auth=None, loop=None, **config): \"\"\" Create a", ":param readonly: :param bookmarks: :param timeout: :param metadata: :raise BoltTransactionError:", "= await cls._connect(address, loop, config) try: # Handshake subclass =", "address for which this pool operates :param max_size: the maximum", "not fail if the address has already been removed. self._routing_table.routers.discard(address)", "acquirers of this extra # capacity. self._waiting_list.notify() @property def max_age(self):", "perf_counter from neo4j.addressing import Address from neo4j.aio._collections import WaitingList from", "handlers = cls.protocol_handlers(protocol_version) if not handlers: raise ValueError(\"No protocol handlers", "\"\"\" Selects the pool with the fewest in-use connections. \"\"\"", "is required, otherwise false :param force_reset: \"\"\" while True: pool", "law or agreed to in writing, software # distributed under", "a successful negotiation \"\"\" local_address = Address(writer.transport.get_extra_info(\"sockname\")) remote_address = Address(writer.transport.get_extra_info(\"peername\"))", "self._max_size = config.max_size self._max_age = config.max_age self._in_use_list = deque() self._free_list", "not has_tried_initial_routers and self._initial_routers not in existing_routers: rt = await", "self.max_size: # If there is spare capacity in the pool,", "establish a secure connection\", address) from err except OSError as", "true, the connection will be forcibly reset before being returned;", "= (num_writers == 0) # No routers if num_routers ==", "def _deactivate_pools_not_in(self, routing_table): \"\"\" Deactivate any pools that aren't represented", "# If the pool is full, simply close the connection.", "# changes in a subsequent patch, and can be removed", "The maximum permitted number of simultaneous connections that may be", "re-acquire connections after pool closure, which will have the implicit", "is not None: await sleep(0.1) except AttributeError: pass class Pool:", "protocol version %s\", agreed_version) raise BoltHandshakeError(\"Unsupported Bolt protocol version\", remote_address,", "may be owned by this pool, both in-use and free.", ":param auth: :param loop: :param config: :return: instance of a", "dictionary of version tuple to handler class for all relevant", "for Python 3.6, which didn't include # 'wait_closed'. The code", "self._routing_table # # async def update_routing_table(self): # cx = await", "within a connection pool. \"\"\" async def f(address, *, loop=None):", "The maximum permitted age, in seconds, for connections to be", "of reader, writer and security settings for the new connection", "pool with routing table. \"\"\" @classmethod async def open(cls, *addresses,", "connection. \"\"\" template = \"neo4j-python/{} Python/{}.{}.{}-{}-{} ({})\" fields = (neo4j_version,)", ":param cx: the connection to release :param force_reset: if true,", "#!/usr/bin/env python # -*- encoding: utf-8 -*- # Copyright (c)", "pool cx = self._free_list.popleft() except IndexError: if self.size < self.max_size:", "not None and cx.age > self.max_age if expired: await cx.close()", "may obtain a copy of the License at # #", "table\") self._routing_table.ttl = 0 raise failure cx.set_failure_handler(NotALeader, handler) cx.set_failure_handler(ForbiddenOnReadOnlyDatabase, handler)", "failed\", self.remote_address) from err async def wait_closed(self): try: await super().wait_closed()", "denotes the version of Bolt handled #: by that subclass.", "= config.get_ssl_context() if ssl_context: connection_args[\"ssl\"] = ssl_context connection_args[\"server_hostname\"] = address.host", "if any of the arguments provided are passed as incompatible", "as neo4j_version from neo4j.routing import RoutingTable log = getLogger(__name__) MAGIC", "\"\"\" Attempt to establish a TCP connection to the address", "completes without a successful negotiation :raise TypeError: if any of", "been removed. self._routing_table.routers.discard(address) self._routing_table.readers.discard(address) self._routing_table.writers.discard(address) log.debug(\"[#0000] C: <ROUTING> table=%r\", self._routing_table)", "pool = cls(loop, opener, pool_config, address) seeds = [await pool.acquire()", "# record = await result.single() # self._routing_table = RoutingTable.parse_routing_info([record]) #", "opener function for a given set of configuration parameters. This", "Notify any waiting acquirers of this extra # capacity. self._waiting_list.notify()", "retrieve routing information\") async def _ensure_routing_table_is_fresh(self, readonly=False): \"\"\" Update the", "supported Bolt protocol version. :param address: tuples of host and", "connection_args[\"ssl\"] = ssl_context connection_args[\"server_hostname\"] = address.host log.debug(\"[#0000] C: <DIAL> %s\",", "address: tuples of host and port, such as (\"127.0.0.1\", 7687)", "to a clean state. By default, a RESET message will", "else: # If the pool is full, simply close the", "\"\"\" await self.__close(self._free_list) async def close(self): \"\"\" Close all connections", "pool in self._pools.values(): try: await pool.release(connection, force_reset=force_reset) except ValueError: pass", "cls.protocol_handlers(protocol_version) if not handlers: raise ValueError(\"No protocol handlers available (requested", "of 3.6, occurs on 'connection_lost'. This # hack is likely", "continue until connections are released, use the following sequence instead:", "in the given routing table. \"\"\" for address in self._pools:", "the address provided. :param address: :param loop: :param config: :return:", "a dirty hack for Python 3.6, which didn't include #", "full and no free connections are available, this will block", "PoolConfig from neo4j.meta import version as neo4j_version from neo4j.routing import", "address): if loop is None: self._loop = get_event_loop() else: self._loop", "Run a transaction function and return the return value from", "may not use this file except in compliance with the", "this extra # capacity. self._waiting_list.notify() @property def max_age(self): \"\"\" The", "parameters=None, discard=False, readonly=False, bookmarks=None, timeout=None, metadata=None): \"\"\" Run an auto-commit", "If the pool is full, simply close the connection. await", "polls waiting for the stream # reader inside the protocol", "table but which don't already have pools. \"\"\" for address", "connection pool, if present, remove from the routing table and", "neo4j_version from neo4j.routing import RoutingTable log = getLogger(__name__) MAGIC =", "of the time at which this connection was opened. __t_opened", "this file except in compliance with the License. # You", "Flag to indicate that the connection is closed __closed =", "already have pools. \"\"\" for address in routing_table.servers(): if address", "Breakable from neo4j.errors import ( BoltError, BoltConnectionError, BoltSecurityError, BoltConnectionBroken, BoltHandshakeError,", "_get_routing_table_from(self, *routers): \"\"\" Try to update routing tables with the", "version is provided, all available versions will be returned. :param", "all open connections, including those in use. Depending on the", "no writers are available :raise ProtocolError: if the routing information", "in addresses] pools_by_usage = {} for pool in pools: pools_by_usage.setdefault(pool.in_use,", "None: await sleep(0.1) except AttributeError: pass class Pool: def acquire(self,", "= StreamReaderProtocol(reader, loop=loop) transport, _ = await loop.create_connection(lambda: protocol, **connection_args)", "writer, protocol_version): \"\"\" Carry out a Bolt handshake, optionally requesting", "# # Licensed under the Apache License, Version 2.0 (the", "err.errno, strerror(err.errno)) raise BoltSecurityError(\"Failed to establish a secure connection\", address)", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "raise @classmethod async def _connect(cls, address, loop, config): \"\"\" Attempt", "raise BoltHandshakeError(\"Unexpected handshake response %r\" % response_data, remote_address, request_data, response_data)", "is free. If not, and the pool is not full,", "established :raise BoltConnectionLost: if an I/O error occurs on the", "\"\"\" @classmethod async def open(cls, address, *, auth=None, loop=None, **config):", "before raising the failure. \"\"\" log.debug(\"[#0000] C: <ROUTING> Invalidating routing", "# from asyncio import sleep try: while self._protocol._stream_reader is not", "async def get_routing_table(self, context=None): \"\"\" Fetch a new routing table.", "obj def __repr__(self): return \"<Bolt address=%r protocol_version=%r>\" % (self.remote_address, self.protocol_version)", "err try: subclass = handlers[agreed_version] except KeyError: log.debug(\"Unsupported Bolt protocol", "StreamWriter): \"\"\" Wrapper for asyncio.streams.StreamWriter \"\"\" def __init__(self, *args, **kwargs):", "0 await pool.prune() class Neo4j: # The default router address", "and ready Bolt connection :param address: the remote address for", "a connection back into the pool. This method is thread", "Handle to the StreamWriter object, which can be used on", "routing_context=None, loop=None, **config): pool_config = PoolConfig.consume(config) def opener(addr): return Bolt.open(addr,", "security settings for the new connection :raise BoltConnectionError: if a", "err.errno, strerror(err.errno)) Breakable.set_broken(self) raise BoltConnectionBroken(\"Network read failed\", self.remote_address) from err", "**connection_args) writer = BoltStreamWriter(transport, protocol, reader, loop) except SSLError as", "and released connections will be closed instead of being returned", "does not belong to this pool\") async def prune(self): \"\"\"", "opener: a function to which an address can be passed", "%s)\", 0, address, err.errno, strerror(err.errno)) raise BoltConnectionError(\"Failed to establish a", "= Address(address) if loop is None: loop = get_event_loop() config", "here since the former # will not fail if the", "the routing table before raising the failure. \"\"\" log.debug(\"[#0000] C:", "custom error handling and exit. from neo4j.errors import ( NotALeader,", "carried out at this time \"\"\" async def begin(self, readonly=False,", "close(self): \"\"\" Close all connections immediately. This does not permanently", "config): \"\"\" Attempt to establish a TCP connection to the", "table is already fresh on entry, the method exits immediately;", "# Copyright (c) 2002-2019 \"Neo4j,\" # Neo4j Sweden AB [http://neo4j.com]", "and the pool is not full, a new connection will", "will force all future connection acquisitions onto the waiting list,", "provided are passed with unsupported values \"\"\" # Args address", "perf_counter() obj.__reader = reader obj.__writer = writer Addressable.set_transport(obj, writer.transport) return", "# Plan C: wait for more capacity to become #", "if the connection is not currently in use, or if", "async def close(self, force=False): \"\"\" Close all connections and empty", "routing table. :param context: the routing context to use for", "None assert isinstance(config, Config) connection_args = { \"host\": address.host, \"port\":", "writer.write(request_data) await writer.drain() response_data = await reader.readexactly(4) log.debug(\"[#%04X] S: <HANDSHAKE>", "not, they will remain open until released. \"\"\" pools =", "is not already in a clean state :return: a Bolt", "of connections to a single address. :param opener: a function", "refresh lock is acquired and the second freshness check that", "if ssl_context: connection_args[\"ssl\"] = ssl_context connection_args[\"server_hostname\"] = address.host log.debug(\"[#0000] C:", "ssl_context: connection_args[\"ssl\"] = ssl_context connection_args[\"server_hostname\"] = address.host log.debug(\"[#0000] C: <DIAL>", "rt = await self._get_routing_table_from(*existing_routers) if rt: return rt if not", "in range(pool_config.init_size)] for seed in seeds: await pool.release(seed) return pool", "auth=None, security=False, protocol_version=None, loop=None): # opener = Bolt.opener(auth=auth, security=security, protocol_version=protocol_version)", "list. \"\"\" closers = deque() while True: try: cx =", "obj except BoltError: writer.write_eof() writer.close() raise @classmethod async def _connect(cls,", "a Bolt subclass :raise BoltConnectionError: if a connection could not", "acquire call is cancelled. :param force_reset: if true, the connection", "the pool; if false, this will only occur if the", "addresses = self._routing_table.readers else: addresses = self._routing_table.writers pools = [pool", "# Instantiation obj = subclass(reader, writer) obj.secure = bool(config.secure) assert", "number of connections in this pool that are currently in", "return rt if not has_tried_initial_routers and self._initial_routers not in existing_routers:", "No readers if num_readers == 0: continue log.debug(\"Successfully updated routing", "for pool in pools: pools_by_usage.setdefault(pool.in_use, []).append(pool) if not pools_by_usage: raise", "or implied. # See the License for the specific language", "Python 3.6, which didn't include # 'wait_closed'. The code polls", "force=False): \"\"\" Close all connections and empty the pool. If", "cx.close() elif cx in self._free_list: raise ValueError(\"Connection is not in", "address from the connection pool, if present, remove from the", "cx = await self._sanitize(cx, force_reset=force_reset) self._in_use_list.append(cx) return cx async def", "and port, such as (\"127.0.0.1\", 7687) :param auth: :param loop:", "zero or one items, depending on whether that version is", "Bolt(Addressable, object): #: True if this instance uses secure communication,", "must be specified as a tuple\") return {version: handler for", "clean state. By default, a RESET message will only be", "max_size: the maximum permitted number of simultaneous connections that may", "clean up a connection, such that it can be reused.", "simultaneous connections that may be owned by this pool, both", "connection %r\", cx) if cx in self._in_use_list: self._in_use_list.remove(cx) if self.size", "\"\"\" Flag to indicate whether this connection has been closed", "cx.closed: try: # Plan A: select a free connection from", "readonly=False, force_reset=False): \"\"\" Acquire a connection to a server that", "absence of writer self._missing_writer = (num_writers == 0) # No", "information\") raise Neo4jAvailabilityError(\"Unable to retrieve routing information\") async def _ensure_routing_table_is_fresh(self,", "\"\"\" Release a Bolt connection, putting it back into the", "= await cx.get_routing_table(self._routing_context) except BoltError: await self._deactivate(router) else: num_routers =", "async def close(self): \"\"\" Close the connection. \"\"\" if self.closed:", "modified existing_routers = list(self._routing_table.routers) has_tried_initial_routers = False if self._missing_writer: has_tried_initial_routers", "cx.broken or cx.closed: return None expired = self.max_age is not", "= False self._refresh_lock = Lock(loop=self._loop) self._routing_context = routing_context self._max_size_per_host =", "remote_address) return reader, writer @classmethod async def _handshake(cls, reader, writer,", "if self.size < self.max_size: # If there is spare capacity", "S: <CLOSE> %d %s\", err.errno, strerror(err.errno)) Breakable.set_broken(self) raise BoltConnectionBroken(\"Network read", "is not already in a clean state :raise ValueError: if", "in a subsequent patch, and can be removed when #", "Invalidating routing table\") self._routing_table.ttl = 0 raise failure cx.set_failure_handler(NotALeader, handler)", "address: the remote address for which this pool operates :param", "neo4j.aio.bolt3 import Bolt3 handlers = {bolt.protocol_version: bolt for bolt in", "StreamReader): \"\"\" Wrapper for asyncio.streams.StreamReader \"\"\" def set_transport(self, transport): Addressable.set_transport(self,", "len(new_routing_table.writers) # No writers are available. This likely indicates a", "extra # capacity. self._waiting_list.notify() @property def max_age(self): \"\"\" The maximum", "b\"\\x00\") log.debug(\"[#%04X] C: <HANDSHAKE> %r\", local_address.port_number, request_data) writer.write(request_data) await writer.drain()", "Deactivating address %r\", address) # We use `discard` instead of", "connection_args[\"server_hostname\"] = address.host log.debug(\"[#0000] C: <DIAL> %s\", address) try: reader", "auth: \"\"\" @property def age(self): \"\"\" The age of this", "Addressable, Breakable from neo4j.errors import ( BoltError, BoltConnectionError, BoltSecurityError, BoltConnectionBroken,", "\"\"\" return len(self._in_use_list) @property def size(self): \"\"\" The total number", "self._opener = opener self._config = config self._pools = {} self._missing_writer", "on entry, the method exits immediately; otherwise, the refresh lock", "successful, so just fail log.error(\"Unable to retrieve routing information\") raise", "closed immediately; if not, they will remain open until released.", "be sent regardless. \"\"\" async def run(self, cypher, parameters=None, discard=False,", "IncompleteReadError, Lock, StreamReader, StreamReaderProtocol, StreamWriter, get_event_loop, wait, ) from collections", "__init__(self, loop, opener, config, addresses, routing_context): if loop is None:", "__init__(self, *args, **kwargs): super().__init__(*args, **kwargs) Addressable.set_transport(self, self.transport) async def drain(self):", "log.debug(\"Successfully updated routing table from \" \"{!r} ({!r})\".format(router, self._routing_table)) return", "except AttributeError: # pragma: no cover # This is a", "protocol_version=None): \"\"\" Return a dictionary of available Bolt protocol handlers,", "on the underlying socket connection :raise BoltHandshakeError: if handshake completes", "Try to update routing tables with the given routers. :return:", "for connections to be retained in this pool. \"\"\" return", "handler class for all relevant and supported protocol versions :raise", ":param cypher: :param parameters: :param discard: :param readonly: :param bookmarks:", "connection from the pool cx = self._free_list.popleft() except IndexError: if", "If the routing table is already fresh on entry, the", "indicates a temporary state, # such as leader switching, so", "# we are reading in absence of writer self._missing_writer =", "'wait_closed'. The code polls waiting for the stream # reader", "the given routing table. \"\"\" for address in self._pools: if", "default_user_agent(cls): \"\"\" Return the default user agent string for a", "will be overridden and a RESET will be sent regardless.", "except AttributeError: pass class Pool: def acquire(self, *, force_reset=False, timeout=None):", "not, and the pool is not full, a new connection", "self._deactivate(router) else: num_routers = len(new_routing_table.routers) num_readers = len(new_routing_table.readers) num_writers =", "has_tried_initial_routers = False if self._missing_writer: has_tried_initial_routers = True rt =", "if self._missing_writer: has_tried_initial_routers = True rt = await self._get_routing_table_from(self._initial_routers) if", "passed with unsupported values \"\"\" # Args address = Address(address)", "connection and perform protocol version negotiation, in order to construct", "else: # Plan C: wait for more capacity to become", ":return: :raise BoltConnectionLost: if an I/O error occurs on the", "auth=None, loop=None, **config): \"\"\" Open a socket connection and perform", "return cx async def acquire(self, *, force_reset=False): \"\"\" Acquire a", "await self._sanitize(cx, force_reset=force_reset) self._in_use_list.append(cx) return cx async def release(self, cx,", "attribute, this represents the #: version of the protocol in", "this pool \"\"\" log.debug(\"Releasing connection %r\", cx) if cx in", "raise NotImplementedError def release(self, *connections, force_reset=False): raise NotImplementedError def close(self,", "If the pool is full and no free connections are", "writer.write_eof() writer.close() raise @classmethod async def _connect(cls, address, loop, config):", "method exits immediately; otherwise, the refresh lock is acquired and", ":param auth: \"\"\" @property def age(self): \"\"\" The age of", "Fetch a new routing table. :param context: the routing context", "on close. __writer = None # Flag to indicate that", "= perf_counter() obj.__reader = reader obj.__writer = writer Addressable.set_transport(obj, writer.transport)", "handshake response %r\" % response_data, remote_address, request_data, response_data) from err", "connection pool, with an option to seed one or more", "self._select_pool(readonly=readonly) try: cx = await pool.acquire(force_reset=force_reset) except BoltError: await self._deactivate(pool.address)", "\"\"\" return self._max_size @max_size.setter def max_size(self, value): old_value = self._max_size", "ServiceUnavailable: if no writers are available :raise ProtocolError: if the", "= b\"\\x60\\x60\\xB0\\x17\" class Bolt(Addressable, object): #: True if this instance", "def _sanitize(self, cx, *, force_reset): \"\"\" Attempt to clean up", "of reopening the pool. To close gracefully, allowing work in", "writer.close() raise @classmethod async def _connect(cls, address, loop, config): \"\"\"", "(neo4j_version,) + tuple(version_info) + (platform,) return template.format(*fields) @classmethod def protocol_handlers(cls,", "S: <HANDSHAKE> %r\", local_address.port_number, response_data) try: agreed_version = Version.from_bytes(response_data) except", "is None or cx.broken or cx.closed: try: # Plan A:", "write failed\", self.remote_address) from err async def wait_closed(self): try: await", "a supported Bolt protocol version. :param address: tuples of host", "reopening the pool. To close gracefully, allowing work in progress", "address, *, auth=None, loop=None, **config): \"\"\" Open a socket connection", "and free) currently owned by this connection pool. \"\"\" return", "cx async def release(self, connection, *, force_reset=False): \"\"\" Release a", "for a supported Bolt protocol version. :param address: tuples of", "of Bolt handled #: by that subclass. As an instance", "raise BoltConnectionBroken(\"Network write failed\", self.remote_address) from err async def wait_closed(self):", "try: while self._protocol._stream_reader is not None: await sleep(0.1) except AttributeError:", "PoolConfig.consume(config) def opener(addr): return Bolt.open(addr, auth=auth, **pool_config) obj = cls(loop,", "tuples of host and port, such as (\"127.0.0.1\", 7687) :param", "% response_data, remote_address, request_data, response_data) from err try: subclass =", "await pool.acquire() try: new_routing_table = await cx.get_routing_table(self._routing_context) except BoltError: await", "# async def update_routing_table(self): # cx = await self._routers.acquire() #", "( IncompleteReadError, Lock, StreamReader, StreamReaderProtocol, StreamWriter, get_event_loop, wait, ) from", "Bolt connection, putting it back into the pool if the", "is already fresh on entry, the method exits immediately; otherwise,", "return cx async def release(self, connection, *, force_reset=False): \"\"\" Release", "from asyncio import ( IncompleteReadError, Lock, StreamReader, StreamReaderProtocol, StreamWriter, get_event_loop,", "NotALeader, ForbiddenOnReadOnlyDatabase, ) def handler(failure): \"\"\" Invalidate the routing table", "import SSLError from sys import platform, version_info from time import", "cx async def release(self, cx, *, force_reset=False): \"\"\" Release a", "to # invalidate the routing table. from neo4j.errors import (", ":param readonly: true if a readonly connection is required, otherwise", "self._routing_table.writers pools = [pool for address, pool in self._pools.items() if", "network or remote peer. \"\"\" return self.__reader.broken or self.__writer.broken @property", "does not permanently disable the connection pool, it merely shuts", "no writers available, then we flag we are reading in", "[pool for address, pool in self._pools.items() if address in addresses]", "\"host\": address.host, \"port\": address.port, \"family\": address.family, # TODO: other args", "\"\"\" return len(self._in_use_list) + len(self._free_list) async def _sanitize(self, cx, *,", "pool.close() else: pool.max_size = 0 await pool.prune() class Neo4j: #", "under the License. from asyncio import ( IncompleteReadError, Lock, StreamReader,", "\"bytes)\".format(len(err.partial), err.expected)) log.debug(\"[#%04X] S: <CLOSE>\", self.local_address.port_number) Breakable.set_broken(self) raise BoltConnectionBroken(message, self.remote_address)", "the pool isn't full, open # a new connection cx", "= await cls._handshake(reader, writer, config.protocol_version) # Instantiation obj = subclass(reader,", "in-use and free. \"\"\" return self._max_size @max_size.setter def max_size(self, value):", "await neo4j.update_routing_table() # print(neo4j.routing_table) # # # if __name__ ==", "self._routing_table.writers.discard(address) log.debug(\"[#0000] C: <ROUTING> table=%r\", self._routing_table) try: pool = self._pools.pop(address)", "discard=False, readonly=False, bookmarks=None, timeout=None, metadata=None): \"\"\" Run an auto-commit transaction.", "writer) obj.secure = bool(config.secure) assert hasattr(obj, \"__ainit__\") await obj.__ainit__(auth) return", ":param readonly: :param bookmarks: :param timeout: :param metadata: :return: \"\"\"", "pragma: no cover # This is a dirty hack for", "be retained in this pool \"\"\" @classmethod async def open(cls,", "protocol version is provided, the dictionary will contain either zero", "available (requested Bolt %r)\", protocol_version) offered_versions = sorted(handlers.keys(), reverse=True)[:4] request_data", "from err async def wait_closed(self): try: await super().wait_closed() except AttributeError:", "\"\"\" The number of connections in this pool that are", "be specified as a tuple\") return {version: handler for version,", "<CLOSE>\", self.local_address.port_number) Breakable.set_broken(self) raise BoltConnectionBroken(message, self.remote_address) from err except OSError", "cx = await self._routers.acquire() # try: # result = await", "tuple identifying a specific protocol version (e.g. (3, 5)) or", "= await pool.acquire() try: new_routing_table = await cx.get_routing_table(self._routing_context) except BoltError:", "protocol = StreamReaderProtocol(reader, loop=loop) transport, _ = await loop.create_connection(lambda: protocol,", "{}}) # record = await result.single() # self._routing_table = RoutingTable.parse_routing_info([record])", ":param loop: :param config: :return: instance of a Bolt subclass", "I/O error occurs on the underlying socket connection :raise BoltHandshakeError:", "connection is broken or closed, it can be discarded. Otherwise,", "in writing, software # distributed under the License is distributed", "reset before being returned; if false, this will only occur", "routing context to use for this call :return: a new", "await result.single() # self._routing_table = RoutingTable.parse_routing_info([record]) # TODO: handle ValueError?", "pool that are currently in use. \"\"\" return len(self._in_use_list) @property", "ValueError: pass else: # Unhook any custom error handling and", "removed else: pool.max_size = 0 await pool.prune() async def close(self,", "connection is checked against the maximum age permitted by this", "if loop is None: self._loop = get_event_loop() else: self._loop =", "No routers if num_routers == 0: continue # No readers", "and security settings for the new connection :raise BoltConnectionError: if", "False self._refresh_lock = Lock(loop=self._loop) self._routing_context = routing_context self._max_size_per_host = config.max_size", "opener, router_addresses, loop=None): # self._routers = Neo4jPool(opener, router_addresses or self.default_router_addresses)", "metadata=None): \"\"\" Run a transaction function and return the return", "should not signal an error. # When no writers available,", "+ tuple(version_info) + (platform,) return template.format(*fields) @classmethod def protocol_handlers(cls, protocol_version=None):", "self.max_age if expired: await cx.close() return None await cx.reset(force=force_reset) return", "connections are released, use the following sequence instead: pool.max_size =", "routing table from the first router able to provide valid", "We use `discard` instead of `remove` here since the former", "force_reset=force_reset) if cx: # Carry on only if sanitation succeeded.", "object \"\"\" log.debug(\"Acquiring connection from pool %r\", self) cx =", "\"\"\" Close the connection. \"\"\" if self.closed: return if not", "capacity to become # available, then try again log.debug(\"Joining waiting", "free :param max_age: the maximum permitted age, in seconds, for", "with unsupported values \"\"\" # Args address = Address(address) if", "will block until a connection is released, or until the", "information\") async def _ensure_routing_table_is_fresh(self, readonly=False): \"\"\" Update the routing table", "leader switching, so we should not signal an error. #", "protocol_version = () # Record of the time at which", "License is distributed on an \"AS IS\" BASIS, # WITHOUT", "# a new connection cx = await self._opener(self.address) else: #", "# self._writers = Neo4jPool(opener) # self._readers = Neo4jPool(opener) # self._routing_table", "License, Version 2.0 (the \"License\"); # you may not use", "in a clean state. If forced, this check will be", "None of the routers have been successful, so just fail", "to use if no addresses are specified. default_router_addresses = Address.parse_list(\":7687", "\" * (self.max_size - self.size), ) def __contains__(self, cx): return", "__contains__(self, cx): return cx in self._in_use_list or cx in self._free_list", "5)) or None :return: dictionary of version tuple to handler", "separator=b'\\n'): # pragma: no cover assert False # not used", "= None # Handle to the StreamWriter object, which can", "Carry out subclass imports locally to avoid circular # dependency", "cx.reset(force=force_reset) return cx async def acquire(self, *, force_reset=False): \"\"\" Acquire", "sanitize the connection and return it to the pool. cx", "{ \"host\": address.host, \"port\": address.port, \"family\": address.family, # TODO: other", "raise Neo4jAvailabilityError(\"No {} service currently \" \"available\".format(\"read\" if readonly else", "belong to this pool\") async def _deactivate(self, address): \"\"\" Deactivate", "subsequent patch, and can be removed when # Python 3.6", "user agent string for a connection. \"\"\" template = \"neo4j-python/{}", "Version from neo4j.conf import Config, PoolConfig from neo4j.meta import version", "await self._deactivate(address) async def _get_routing_table_from(self, *routers): \"\"\" Try to update", "True async def reset(self, force=False): \"\"\" Reset the connection to", "self._max_size @max_size.setter def max_size(self, value): old_value = self._max_size self._max_size =", "a transaction cannot be carried out at this time \"\"\"", "will be returned, indicating success. \"\"\" if cx.broken or cx.closed:", "protocol # versions are added and removed. Bolt3, ]} if", "version tuple. If an explicit protocol version is provided, the", "of available Bolt protocol handlers, keyed by version tuple. If", ":raise ValueError: if any of the arguments provided are passed", "timeout: :param metadata: :raise BoltTransactionError: if a transaction cannot be", "be owned by this pool, both in-use and free. \"\"\"", "self.__close(self._in_use_list) async def __close(self, connections): \"\"\" Close all connections in", "and free :param max_age: the maximum permitted age, in seconds,", "the License for the specific language governing permissions and #", "server that can satisfy a set of parameters. :param readonly:", "\"\"\" Add pools for addresses that exist in the given", "\"<{} addr'{}' [{}{}{}]>\".format( self.__class__.__name__, self.address, \"|\" * len(self._in_use_list), \".\" *", "remote_address = Address(writer.transport.get_extra_info(\"peername\")) handlers = cls.protocol_handlers(protocol_version) if not handlers: raise", "err.errno, strerror(err.errno)) raise BoltConnectionError(\"Failed to establish a connection\", address) from", "self._free_list def __len__(self): return self.size @property def address(self): \"\"\" The", "__len__(self): return self.size @property def address(self): \"\"\" The remote address", "in use. Depending on the applications, it may be perfectly", "errors to # invalidate the routing table. from neo4j.errors import", "remote address for which this pool operates :param max_size: the", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "= await loop.create_connection(lambda: protocol, **connection_args) writer = BoltStreamWriter(transport, protocol, reader,", "PoolConfig.consume(config) def opener(addr): return Bolt.open(addr, auth=auth, loop=loop, **pool_config) pool =", "become # available, then try again log.debug(\"Joining waiting list\") await", "the connection. \"\"\" if self.closed: return if not self.broken: log.debug(\"[#%04X]", "3.6, occurs on 'connection_lost'. This # hack is likely safe", "self.remote_address) from err async def wait_closed(self): try: await super().wait_closed() except", "not full, a new connection will be created. If the", "and removed. Bolt3, ]} if protocol_version is None: return handlers", "address, *, auth=None, loop=None, **config): \"\"\" Create a new connection", "log.debug(\"[#%04X] S: <HANGUP>\", self.local_address.port_number) self.__writer.write_eof() self.__writer.close() try: await self.__writer.wait_closed() except", "from err class BoltStreamWriter(Addressable, Breakable, StreamWriter): \"\"\" Wrapper for asyncio.streams.StreamWriter", "all free connections. \"\"\" await self.__close(self._free_list) async def close(self): \"\"\"", "self._in_use_list.append(cx) return cx async def release(self, cx, *, force_reset=False): \"\"\"", "set of parameters. :param readonly: true if a readonly connection", "%s\", err.errno, strerror(err.errno)) Breakable.set_broken(self) raise BoltConnectionBroken(\"Network read failed\", self.remote_address) from", "which this pool operates. \"\"\" return self._address @property def max_size(self):", "# Args address = Address(address) if loop is None: loop", "readexactly(self, n): try: return await super().readexactly(n) except IncompleteReadError as err:", "broken or closed, it can be discarded. Otherwise, the age", "state :return: a Bolt connection object \"\"\" log.debug(\"Acquiring connection from", "is unusable \"\"\" class BoltStreamReader(Addressable, Breakable, StreamReader): \"\"\" Wrapper for", "rt rt = await self._get_routing_table_from(*existing_routers) if rt: return rt if", "\"password\")) # await neo4j.update_routing_table() # print(neo4j.routing_table) # # # if", "pool if the connection is healthy and the pool is", "the pool. \"\"\" await self.prune() await self.__close(self._in_use_list) async def __close(self,", "%s\", address) try: reader = BoltStreamReader(loop=loop) protocol = StreamReaderProtocol(reader, loop=loop)", "else: num_routers = len(new_routing_table.routers) num_readers = len(new_routing_table.readers) num_writers = len(new_routing_table.writers)", "being released back into the pool; if false, this will", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "value if value > old_value: # The maximum size has", "contain either zero or one items, depending on whether that", "return new_routing_table finally: await pool.release(cx) return None async def _get_routing_table(self):", "\"\"\" Close all connections and empty the pool. If forced,", "sanitation succeeded. if self.size < self.max_size: # Check again if", "**config): \"\"\" Create a new connection pool, with an option", "# will not fail if the address has already been", "Bolt protocol handlers, keyed by version tuple. If an explicit", "if required, i.e. if the connection is not already in", "await self._routers.acquire() # try: # result = await cx.run(\"CALL dbms.cluster.routing.getRoutingTable($context)\",", "rt: return rt # None of the routers have been", "reader inside the protocol to go away which, by the", "free connection from the pool cx = self._free_list.popleft() except IndexError:", "incomplete (received {} of {} \" \"bytes)\".format(len(err.partial), err.expected)) log.debug(\"[#%04X] S:", "for more capacity to become # available, then try again", "new connection cx = await self._opener(self.address) else: # Plan C:", "a Bolt client instance for a supported Bolt protocol version.", "writer = await cls._connect(address, loop, config) try: # Handshake subclass", "wait for more capacity to become # available, then try", "self._get_routing_table_from(self._initial_routers) if rt: return rt rt = await self._get_routing_table_from(*existing_routers) if", "else: raise ValueError(\"Connection does not belong to this pool\") async", "def reset(self, force=False): \"\"\" Reset the connection to a clean", "a connection pool. \"\"\" async def f(address, *, loop=None): return", "of this connection in seconds. \"\"\" return perf_counter() - self.__t_opened", "or None if the given router is currently unable to", "being returned to the pool. \"\"\" await self.prune() await self.__close(self._in_use_list)", "ForbiddenOnReadOnlyDatabase errors to # invalidate the routing table. from neo4j.errors", "fresh on entry, the method exits immediately; otherwise, the refresh", "RoutingTable instance or None if the given router is currently", "raise ValueError(\"Connection does not belong to this pool\") async def", "Bolt connection :param address: the remote address for which this", "the pool if the connection is healthy and the pool", "Address(transport.get_extra_info(\"sockname\")) remote_address = Address(transport.get_extra_info(\"peername\")) log.debug(\"[#%04X] S: <ACCEPT> %s -> %s\",", "connections in this pool that are currently in use. \"\"\"", "# distributed under the License is distributed on an \"AS", "auth=None, routing_context=None, loop=None, **config): pool_config = PoolConfig.consume(config) def opener(addr): return", "pool = await self._select_pool(readonly=readonly) try: cx = await pool.acquire(force_reset=force_reset) except", "regardless. \"\"\" async def run(self, cypher, parameters=None, discard=False, readonly=False, bookmarks=None,", "TypeError: if any of the arguments provided are passed as", "routing table from \" \"{!r} ({!r})\".format(router, self._routing_table)) return new_routing_table finally:", "# Unless required by applicable law or agreed to in", "consequently closing it on expiry. Should the connection be neither", "indicate that the connection is closed __closed = False @classmethod", "Sweden AB [http://neo4j.com] # # This file is part of", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "Update the routing table from the first router able to", "def _handshake(cls, reader, writer, protocol_version): \"\"\" Carry out a Bolt", "else: rt = await self._get_routing_table() self._activate_new_pools_in(rt) self._routing_table.update(rt) await self._deactivate_pools_not_in(rt) async", "at this time \"\"\" async def begin(self, readonly=False, bookmarks=None, timeout=None,", "reader: :param writer: :param protocol_version: :return: :raise BoltConnectionLost: if an", "number of connections (both in-use and free) currently owned by", "ValueError(\"Connection does not belong to this pool\") async def _deactivate(self,", "address, err.errno, strerror(err.errno)) raise BoltConnectionError(\"Failed to establish a connection\", address)", "pool. \"\"\" return len(self._in_use_list) + len(self._free_list) async def _sanitize(self, cx,", "TODO # @classmethod # async def open(cls, *addresses, auth=None, security=False,", "true, the connection will be forcibly reset before being released", "None # Handle to the StreamWriter object, which can be", "the new connection :raise BoltConnectionError: if a connection could not", "this call :return: a new RoutingTable instance or None if", "if num_readers == 0: continue log.debug(\"Successfully updated routing table from", "reused. If the connection is broken or closed, it can", "# Flag to indicate that the connection is closed __closed", "not acquiring a connection as # readonly, then intercept NotALeader", "unable to provide routing information :raise ServiceUnavailable: if no writers", "= {} self._missing_writer = False self._refresh_lock = Lock(loop=self._loop) self._routing_context =", "Create a new connection pool, with an option to seed", "function for a given set of configuration parameters. This is", "= loop self._opener = opener self._address = Address(address) self._max_size =", "Invalidate the routing table before raising the failure. \"\"\" log.debug(\"[#0000]", "else: pool.max_size = 0 await pool.prune() class Neo4j: # The", "table from the first router able to provide valid routing", "rt = await self._get_routing_table_from(self._initial_routers) if rt: return rt rt =", "the Apache License, Version 2.0 (the \"License\"); # you may", "bookmarks=None, timeout=None, metadata=None): \"\"\" Run a transaction function and return", "has already been removed else: pool.max_size = 0 await pool.prune()", "self._get_routing_table_from(self._initial_routers) if rt: return rt # None of the routers", "try: await self.__writer.wait_closed() except BoltConnectionBroken: pass self.__closed = True async", "_connect(cls, address, loop, config): \"\"\" Attempt to establish a TCP", "+ (platform,) return template.format(*fields) @classmethod def protocol_handlers(cls, protocol_version=None): \"\"\" Return", "with the fewest in-use connections. \"\"\" await self._ensure_routing_table_is_fresh(readonly=readonly) if readonly:", "exits immediately; otherwise, the refresh lock is acquired and the", "loop=loop, **pool_config) pool = cls(loop, opener, pool_config, address) seeds =", "be passed that returns an open and ready Bolt connection", "if the address has already been removed. self._routing_table.routers.discard(address) self._routing_table.readers.discard(address) self._routing_table.writers.discard(address)", "fields = (neo4j_version,) + tuple(version_info) + (platform,) return template.format(*fields) @classmethod", "default user agent string for a connection. \"\"\" template =", "Neo4jPool(opener) # self._readers = Neo4jPool(opener) # self._routing_table = None #", "log.debug(\"[#%04X] S: <HANDSHAKE> %r\", local_address.port_number, response_data) try: agreed_version = Version.from_bytes(response_data)", "free connections. \"\"\" await self.__close(self._free_list) async def close(self): \"\"\" Close", "_deactivate(self, address): \"\"\" Deactivate an address from the connection pool,", "super().drain() except OSError as err: log.debug(\"[#%04X] S: <CLOSE> (%s)\", self.local_address.port_number,", "list, and released connections will be closed instead of being", "\"\"\" Connection pool with routing table. \"\"\" @classmethod async def", "maximum age permitted by this pool, consequently closing it on", "represents the #: version of the protocol in use. protocol_version", "the implementation of 3.6 # changes in a subsequent patch,", "= reader obj.__writer = writer Addressable.set_transport(obj, writer.transport) return obj def", "await self.prune() await self.__close(self._in_use_list) async def __close(self, connections): \"\"\" Close", "pool. To close gracefully, allowing work in progress to continue", "BoltError, BoltConnectionError, BoltSecurityError, BoltConnectionBroken, BoltHandshakeError, Neo4jAvailabilityError, ) from neo4j.api import", "Deactivate any pools that aren't represented in the given routing", "# return self._routing_table # # async def update_routing_table(self): # cx", "= len(new_routing_table.routers) num_readers = len(new_routing_table.readers) num_writers = len(new_routing_table.writers) # No", "Address(address) if loop is None: loop = get_event_loop() config =", "use. protocol_version = () # Record of the time at", "connection pool, it merely shuts down all open connections, including", "pass else: # Unhook any custom error handling and exit.", "if a connection could not be established \"\"\" assert isinstance(address,", "to a single address. :param opener: a function to which", "closure, which will have the implicit affect of reopening the", "deque() while True: try: cx = connections.popleft() except IndexError: break", "self._refresh_lock: if self._routing_table.is_fresh(readonly=readonly): if readonly: # if reader is fresh", "neo4j.update_routing_table() # print(neo4j.routing_table) # # # if __name__ == \"__main__\":", "versions :raise TypeError: if protocol version is not passed in", "rt = await self._get_routing_table() self._activate_new_pools_in(rt) self._routing_table.update(rt) await self._deactivate_pools_not_in(rt) async def", "self) cx = None while cx is None or cx.broken", "this pool, consequently closing it on expiry. Should the connection", "from neo4j.errors import ( NotALeader, ForbiddenOnReadOnlyDatabase, ) connection.del_failure_handler(NotALeader) connection.del_failure_handler(ForbiddenOnReadOnlyDatabase) break", "Otherwise, close the connection. await cx.close() else: # If the", "loop=None, **config): pool_config = PoolConfig.consume(config) def opener(addr): return Bolt.open(addr, auth=auth,", "address in addresses] pools_by_usage = {} for pool in pools:", "else: return subclass def __new__(cls, reader, writer): obj = super().__new__(cls)", "that it can be reused. If the connection is broken", "readonly=False): \"\"\" Update the routing table if stale. This method", "Instantiation obj = subclass(reader, writer) obj.secure = bool(config.secure) assert hasattr(obj,", "return cx async def release(self, cx, *, force_reset=False): \"\"\" Release", "if rt: return rt if not has_tried_initial_routers and self._initial_routers not", "loop, opener, config, address): if loop is None: self._loop =", "= await pool.acquire(force_reset=force_reset) except BoltError: await self._deactivate(pool.address) else: if not", "else: if not readonly: # If we're not acquiring a", "**pool_config) pool = cls(loop, opener, pool_config, address) seeds = [await", "address list to use if no addresses are specified. default_router_addresses", "This is a dirty hack for Python 3.6, which didn't", "which didn't include # 'wait_closed'. The code polls waiting for", "time import perf_counter from neo4j.addressing import Address from neo4j.aio._collections import", "not None assert isinstance(config, Config) connection_args = { \"host\": address.host,", "config, addresses, routing_context) # TODO: get initial routing table and", "routing information received is unusable \"\"\" class BoltStreamReader(Addressable, Breakable, StreamReader):", "Close all connections in the given list. \"\"\" closers =", "return a Bolt client instance for a supported Bolt protocol", "protocol_version) offered_versions = sorted(handlers.keys(), reverse=True)[:4] request_data = MAGIC + b\"\".join(", "that function. \"\"\" async def get_routing_table(self, context=None): \"\"\" Fetch a", "BoltConnectionBroken(\"Network write failed\", self.remote_address) from err async def wait_closed(self): try:", "#: As a class attribute, this denotes the version of", "\"\"\" Return a dictionary of available Bolt protocol handlers, keyed", "connection as # readonly, then intercept NotALeader and # ForbiddenOnReadOnlyDatabase", "used on close. __writer = None # Flag to indicate", "neo4j.errors import ( NotALeader, ForbiddenOnReadOnlyDatabase, ) connection.del_failure_handler(NotALeader) connection.del_failure_handler(ForbiddenOnReadOnlyDatabase) break else:", "False @classmethod def default_user_agent(cls): \"\"\" Return the default user agent", "a connection could not be established :raise BoltConnectionLost: if an", "router address list to use if no addresses are specified.", "under the License is distributed on an \"AS IS\" BASIS,", "err async def wait_closed(self): try: await super().wait_closed() except AttributeError: #", "include # 'wait_closed'. The code polls waiting for the stream", "from os import strerror from random import choice from ssl", "connection has been closed locally.\"\"\" return self.__closed async def close(self):", "= opener self._config = config self._pools = {} self._missing_writer =", "which can be used on close. __writer = None #", "as err: message = (\"Network read incomplete (received {} of", "clean state :raise ValueError: if the connection is not currently", "Neo4j. # # Licensed under the Apache License, Version 2.0", "from that function. \"\"\" async def get_routing_table(self, context=None): \"\"\" Fetch", "protocol_version} @classmethod def opener(cls, auth=None, **config): \"\"\" Create and return", "_sanitize(self, cx, *, force_reset): \"\"\" Attempt to clean up a", "from err else: local_address = Address(transport.get_extra_info(\"sockname\")) remote_address = Address(transport.get_extra_info(\"peername\")) log.debug(\"[#%04X]", "else: self._loop = loop self._opener = opener self._address = Address(address)", "# If there is spare capacity in the pool, attempt", "Bolt client instance for a supported Bolt protocol version. :param", "is spare capacity in the pool, attempt to # sanitize", "the pool, attempt to # sanitize the connection and return", "or closed, it can be discarded. Otherwise, the age of", "no addresses are specified. default_router_addresses = Address.parse_list(\":7687 :17601 :17687\") #", "from neo4j.aio._mixins import Addressable, Breakable from neo4j.errors import ( BoltError,", "of {} \" \"bytes)\".format(len(err.partial), err.expected)) log.debug(\"[#%04X] S: <CLOSE>\", self.local_address.port_number) Breakable.set_broken(self)", "instance for a supported Bolt protocol version. :param address: tuples", "NotImplementedError def close(self, *, force=False): raise NotImplementedError class BoltPool: \"\"\"", "# opener = Bolt.opener(auth=auth, security=security, protocol_version=protocol_version) # router_addresses = Address.parse_list(\"", "# No readers if num_readers == 0: continue log.debug(\"Successfully updated", "can be removed when # Python 3.6 support is no", "operates :param max_size: the maximum permitted number of simultaneous connections", "address: :param loop: :param config: :return: a 3-tuple of reader,", "class Pool: def acquire(self, *, force_reset=False, timeout=None): raise NotImplementedError def", "pool_config, address) seeds = [await pool.acquire() for _ in range(pool_config.init_size)]", "\"\"\" # Carry out subclass imports locally to avoid circular", "attempt to # sanitize the connection and return it to", "exist in the given routing table but which don't already", "auth=auth, **pool_config) obj = cls(loop, opener, config, addresses, routing_context) #", "config) try: # Handshake subclass = await cls._handshake(reader, writer, config.protocol_version)", "\" \"{!r} ({!r})\".format(router, self._routing_table)) return new_routing_table finally: await pool.release(cx) return", "the # implementation of 3.6, occurs on 'connection_lost'. This #", "result = await cx.run(\"CALL dbms.cluster.routing.getRoutingTable($context)\", {\"context\": {}}) # record =", "connection is healthy and the pool is not already at", "Connection pool with routing table. \"\"\" @classmethod async def open(cls,", "address not in routing_table: await self._deactivate(address) async def _get_routing_table_from(self, *routers):", "this pool, both in-use and free. \"\"\" return self._max_size @max_size.setter", "pass class Pool: def acquire(self, *, force_reset=False, timeout=None): raise NotImplementedError", "BoltHandshakeError(\"Unsupported Bolt protocol version\", remote_address, request_data, response_data) else: return subclass", "return self.__reader.broken or self.__writer.broken @property def closed(self): \"\"\" Flag to", "capacity. self._free_list.append(cx) self._waiting_list.notify() else: # Otherwise, close the connection. await", "be forcibly reset before being released back into the pool;", "If the connection is broken or closed, it can be", "acquire(self, *, force_reset=False): \"\"\" Acquire a connection from the pool.", "release(self, cx, *, force_reset=False): \"\"\" Release a Bolt connection, putting", "instance uses secure communication, false #: otherwise. secure = None", "BoltConnectionError: if a connection could not be established \"\"\" assert", "addresses that exist in the given routing table but which", "a connection from the pool. In the simplest case, this", "**config) return f @classmethod async def open(cls, address, *, auth=None,", "True if the routing table is successfully updated, otherwise False", "self.remote_address) from err class BoltStreamWriter(Addressable, Breakable, StreamWriter): \"\"\" Wrapper for", "in seconds, for connections to be retained in this pool", "writers are available. This likely indicates a temporary state, #", "await cx.close() elif cx in self._free_list: raise ValueError(\"Connection is not", "stale. This method performs two freshness checks, before and after", "log.debug(\"[#%04X] S: <REJECT> %s (%d %s)\", 0, address, err.errno, strerror(err.errno))", "to handler class for all relevant and supported protocol versions", "self.__class__.__name__, self.address, \"|\" * len(self._in_use_list), \".\" * len(self._free_list), \" \"", "from neo4j.debug import watch; watch(\"neo4j\") # neo4j = await Neo4j.open(\":17601", "3.6 support is no longer required. # from asyncio import", "no free connections are available, this will block until a", "version tuple to handler class for all relevant and supported", "timeout=None, metadata=None): \"\"\" Begin an explicit transaction. :param readonly: :param", "loop.create_connection(lambda: protocol, **connection_args) writer = BoltStreamWriter(transport, protocol, reader, loop) except", "context=None): \"\"\" Fetch a new routing table. :param context: the", "table. \"\"\" @classmethod async def open(cls, *addresses, auth=None, routing_context=None, loop=None,", "away which, by the # implementation of 3.6, occurs on", "await pool.close() else: pool.max_size = 0 await pool.prune() class Neo4j:", "record = await result.single() # self._routing_table = RoutingTable.parse_routing_info([record]) # TODO:", "wait, ) from collections import deque from logging import getLogger", "Bolt3, ]} if protocol_version is None: return handlers if not", "= self._pools.pop(address) except KeyError: pass # assume the address has", "to go away which, by the # implementation of 3.6,", "performs two freshness checks, before and after acquiring the refresh", "as # readonly, then intercept NotALeader and # ForbiddenOnReadOnlyDatabase errors", "\"\"\" pool_config = PoolConfig.consume(config) def opener(addr): return Bolt.open(addr, auth=auth, loop=loop,", "PoolConfig.consume(config) # Connect reader, writer = await cls._connect(address, loop, config)", "protocol_version: tuple identifying a specific protocol version (e.g. (3, 5))", "await pool.release(seed) return pool def __init__(self, loop, opener, config, address):", "Wrapper for asyncio.streams.StreamWriter \"\"\" def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs)", "None # # @property # def routing_table(self): # return self._routing_table", "pools = [pool for address, pool in self._pools.items() if address", "# Check again if there is still capacity. self._free_list.append(cx) self._waiting_list.notify()", "# pragma: no cover # This is a dirty hack", "len(self._in_use_list) @property def size(self): \"\"\" The total number of connections", "handled #: by that subclass. As an instance attribute, this", "closed __closed = False @classmethod def default_user_agent(cls): \"\"\" Return the", "= len(new_routing_table.writers) # No writers are available. This likely indicates", "ANY KIND, either express or implied. # See the License", "longer required. # from asyncio import sleep try: while self._protocol._stream_reader", "returned, indicating success. \"\"\" if cx.broken or cx.closed: return None", "# neo4j = await Neo4j.open(\":17601 :17602 :17603\", auth=(\"neo4j\", \"password\")) #", "= await self._sanitize(cx, force_reset=force_reset) self._in_use_list.append(cx) return cx async def release(self,", "the License. # You may obtain a copy of the", ":return: instance of a Bolt subclass :raise BoltConnectionError: if a", "= sorted(handlers.keys(), reverse=True)[:4] request_data = MAGIC + b\"\".join( v.to_bytes() for", "closing all idle connections to that address. \"\"\" log.debug(\"[#0000] C:", "in the given routing table but which don't already have", "self.max_size: # Plan B: if the pool isn't full, open", "@classmethod async def _handshake(cls, reader, writer, protocol_version): \"\"\" Carry out", "connections.popleft() except IndexError: break else: closers.append(cx.close()) if closers: await wait(closers,", "release(self, connection, *, force_reset=False): \"\"\" Release a connection back into", "# See the License for the specific language governing permissions", "in routing_table.servers(): if address not in self._pools: self._pools[address] = BoltPool(self._loop,", "async def readuntil(self, separator=b'\\n'): # pragma: no cover assert False", "subclass = handlers[agreed_version] except KeyError: log.debug(\"Unsupported Bolt protocol version %s\",", "self._routers.acquire() # try: # result = await cx.run(\"CALL dbms.cluster.routing.getRoutingTable($context)\", {\"context\":", "otherwise. secure = None #: As a class attribute, this", "first router able to provide valid routing information. \"\"\" #", "rt = await self._get_routing_table_from(self._initial_routers) if rt: return rt # None", "= None while cx is None or cx.broken or cx.closed:", "will be closed instead of being returned to the pool.", "items, depending on whether that version is supported. If no", "available, this will block until a connection is released, or", "reader, loop) except SSLError as err: log.debug(\"[#%04X] S: <REJECT> %s", "except KeyError: log.debug(\"Unsupported Bolt protocol version %s\", agreed_version) raise BoltHandshakeError(\"Unsupported", "instance or None if the given router is currently unable", "\"\"\" Deactivate an address from the connection pool, if present,", "%s\", agreed_version) raise BoltHandshakeError(\"Unsupported Bolt protocol version\", remote_address, request_data, response_data)", "this pool operates. \"\"\" return self._address @property def max_size(self): \"\"\"", ":17603\", auth=(\"neo4j\", \"password\")) # await neo4j.update_routing_table() # print(neo4j.routing_table) # #", "up a connection, such that it can be reused. If", "new slots have become # available. Notify any waiting acquirers", "the acquire call is cancelled. :param force_reset: if true, the", "pool.prune() async def close(self, force=False): \"\"\" Close all connections and", "request_data, response_data) else: return subclass def __new__(cls, reader, writer): obj", "sorted(handlers.keys(), reverse=True)[:4] request_data = MAGIC + b\"\".join( v.to_bytes() for v", "SSLError as err: log.debug(\"[#%04X] S: <REJECT> %s (%d %s)\", 0,", "while self._protocol._stream_reader is not None: await sleep(0.1) except AttributeError: pass", "from time import perf_counter from neo4j.addressing import Address from neo4j.aio._collections", "a Bolt connection, putting it back into the pool if", "value > old_value: # The maximum size has grown, so", "pools_by_usage = {} for pool in pools: pools_by_usage.setdefault(pool.in_use, []).append(pool) if", "7687) :param auth: :param loop: :param config: :return: instance of", "None: self._loop = get_event_loop() else: self._loop = loop self._opener =", "which don't already have pools. \"\"\" for address in routing_table.servers():", "stream # reader inside the protocol to go away which,", "open connections, including those in use. Depending on the applications,", "# When no writers available, then we flag we are", "begin(self, readonly=False, bookmarks=None, timeout=None, metadata=None): \"\"\" Begin an explicit transaction.", "option to seed one or more initial connections. \"\"\" pool_config", "a clean state. If forced, this check will be overridden", "\"\"\" The remote address for which this pool operates. \"\"\"", "of the connection is checked against the maximum age permitted", "\"\"\" if cx.broken or cx.closed: return None expired = self.max_age", "ssl_context = config.get_ssl_context() if ssl_context: connection_args[\"ssl\"] = ssl_context connection_args[\"server_hostname\"] =", "available, then we flag we are reading in absence of", "refresh lock. If the routing table is already fresh on", "both in-use and free. \"\"\" return self._max_size @max_size.setter def max_size(self,", "as leader switching, so we should not signal an error.", "# Carry out subclass imports locally to avoid circular #", "a connection, such that it can be reused. If the", "3-tuple of reader, writer and security settings for the new", "more initial connections. \"\"\" pool_config = PoolConfig.consume(config) def opener(addr): return", "} ssl_context = config.get_ssl_context() if ssl_context: connection_args[\"ssl\"] = ssl_context connection_args[\"server_hostname\"]", "pool. \"\"\" return self._max_age @property def in_use(self): \"\"\" The number", "(received {} of {} \" \"bytes)\".format(len(err.partial), err.expected)) log.debug(\"[#%04X] S: <CLOSE>\",", "of `remove` here since the former # will not fail", "to use for this call :return: a new RoutingTable instance", "ValueError: if the connection is not currently in use, or", "been closed locally.\"\"\" return self.__closed async def close(self): \"\"\" Close", "connection will be forcibly reset before being returned; if false,", "to the address provided. :param address: :param loop: :param config:", "in self._pools: if address not in routing_table: await self._deactivate(address) async", "# @property # def routing_table(self): # return self._routing_table # #", "Licensed under the Apache License, Version 2.0 (the \"License\"); #", "initial connections. \"\"\" pool_config = PoolConfig.consume(config) def opener(addr): return Bolt.open(addr,", "S: <CLOSE>\", self.local_address.port_number) Breakable.set_broken(self) raise BoltConnectionBroken(message, self.remote_address) from err except", "This does not permanently disable the connection pool, it merely", "(optionally forcibly so) and the connection object will be returned,", "from neo4j.conf import Config, PoolConfig from neo4j.meta import version as", "while True: try: cx = connections.popleft() except IndexError: break else:", "age of the connection is checked against the maximum age", "to continue until connections are released, use the following sequence", "otherwise false :param force_reset: \"\"\" while True: pool = await", "writing, software # distributed under the License is distributed on", "a clean state :raise ValueError: if the connection is not", "err class BoltStreamWriter(Addressable, Breakable, StreamWriter): \"\"\" Wrapper for asyncio.streams.StreamWriter \"\"\"", "Unhook any custom error handling and exit. from neo4j.errors import", "handler for version, handler in handlers.items() if version == protocol_version}", "for which this pool operates. \"\"\" return self._address @property def", "neo4j.api import Version from neo4j.conf import Config, PoolConfig from neo4j.meta", "the network or remote peer. \"\"\" return self.__reader.broken or self.__writer.broken", "in this pool that are currently in use. \"\"\" return", "_select_pool(self, readonly=False): \"\"\" Selects the pool with the fewest in-use", "= Neo4jPool(opener, router_addresses or self.default_router_addresses) # self._writers = Neo4jPool(opener) #", "in order to construct and return a Bolt client instance", "protocol to go away which, by the # implementation of", "= await self._select_pool(readonly=readonly) try: cx = await pool.acquire(force_reset=force_reset) except BoltError:", "connection will be created. If the pool is full and", "# The default router address list to use if no", "router_addresses, loop=None): # self._routers = Neo4jPool(opener, router_addresses or self.default_router_addresses) #", "the refresh lock. If the routing table is already fresh", "def in_use(self): \"\"\" The number of connections in this pool", "Addressable.set_transport(self, transport) StreamReader.set_transport(self, transport) async def readuntil(self, separator=b'\\n'): # pragma:", "self._free_list = deque() self._waiting_list = WaitingList(loop=self._loop) def __repr__(self): return \"<{}", "version. :param reader: :param writer: :param protocol_version: :return: :raise BoltConnectionLost:", "it can be reused. If the connection is broken or", "a tuple\") return {version: handler for version, handler in handlers.items()", "routing_table): \"\"\" Add pools for addresses that exist in the", "transport): Addressable.set_transport(self, transport) StreamReader.set_transport(self, transport) async def readuntil(self, separator=b'\\n'): #", "routing_context): if loop is None: self._loop = get_event_loop() else: self._loop", "created. If the pool is full and no free connections", "wait(closers, loop=self._loop) class Neo4jPool: \"\"\" Connection pool with routing table.", "to update routing tables with the given routers. :return: True", "local_address.port_number, local_address, remote_address) return reader, writer @classmethod async def _handshake(cls,", "belong to this pool \"\"\" log.debug(\"Releasing connection %r\", cx) if", "instead of `remove` here since the former # will not", "then try again log.debug(\"Joining waiting list\") await self._waiting_list.join() else: cx", "await self._get_routing_table_from(self._initial_routers) if rt: return rt # None of the", "cls._handshake(reader, writer, config.protocol_version) # Instantiation obj = subclass(reader, writer) obj.secure", "if self._routing_table.is_fresh(readonly=readonly): if readonly: # if reader is fresh but", "2002-2019 \"Neo4j,\" # Neo4j Sweden AB [http://neo4j.com] # # This", "cover assert False # not used by current implementation async", "return obj def __init__(self, loop, opener, config, addresses, routing_context): if", "reset before being released back into the pool; if false,", "the connection is checked against the maximum age permitted by", "progress to continue until connections are released, use the following", "we are reading in absence of writer self._missing_writer = (num_writers", "if not self.broken: log.debug(\"[#%04X] S: <HANGUP>\", self.local_address.port_number) self.__writer.write_eof() self.__writer.close() try:", "address) # We use `discard` instead of `remove` here since", "at which this connection was opened. __t_opened = None #", "return {version: handler for version, handler in handlers.items() if version", "config self._pools = {} self._missing_writer = False self._refresh_lock = Lock(loop=self._loop)", "has already been removed. self._routing_table.routers.discard(address) self._routing_table.readers.discard(address) self._routing_table.writers.discard(address) log.debug(\"[#0000] C: <ROUTING>", "force=False): raise NotImplementedError class BoltPool: \"\"\" A pool of connections", "maximum permitted number of simultaneous connections that may be owned", "\"\"\" A pool of connections to a single address. :param", "self.__writer.close() try: await self.__writer.wait_closed() except BoltConnectionBroken: pass self.__closed = True", "been removed else: pool.max_size = 0 await pool.prune() async def", "closed locally.\"\"\" return self.__closed async def close(self): \"\"\" Close the", "self._pools = {} self._missing_writer = False self._refresh_lock = Lock(loop=self._loop) self._routing_context", "get_event_loop() else: self._loop = loop self._opener = opener self._address =", "connection to a clean state. By default, a RESET message", "(both in-use and free) currently owned by this connection pool.", "self._free_list.append(cx) self._waiting_list.notify() else: # Otherwise, close the connection. await cx.close()", "import ( BoltError, BoltConnectionError, BoltSecurityError, BoltConnectionBroken, BoltHandshakeError, Neo4jAvailabilityError, ) from", "= config.max_size self._initial_routers = addresses self._routing_table = RoutingTable(addresses) self._activate_new_pools_in(self._routing_table) def", "present, remove from the routing table and also closing all", "in self._in_use_list or cx in self._free_list def __len__(self): return self.size", "break else: closers.append(cx.close()) if closers: await wait(closers, loop=self._loop) class Neo4jPool:", "supported protocol versions :raise TypeError: if protocol version is not", "in self._in_use_list: self._in_use_list.remove(cx) if self.size < self.max_size: # If there", "NotALeader, ForbiddenOnReadOnlyDatabase, ) connection.del_failure_handler(NotALeader) connection.del_failure_handler(ForbiddenOnReadOnlyDatabase) break else: raise ValueError(\"Connection does", "this pool, both in-use and free :param max_age: the maximum", "the connection be neither broken, closed nor expired, it will", "def update_routing_table(self): # cx = await self._routers.acquire() # try: #", "\"\"\" pools = dict(self._pools) self._pools.clear() for address, pool in pools.items():", "updated routing table from \" \"{!r} ({!r})\".format(router, self._routing_table)) return new_routing_table", "open(cls, *addresses, auth=None, security=False, protocol_version=None, loop=None): # opener = Bolt.opener(auth=auth,", "loop, config) try: # Handshake subclass = await cls._handshake(reader, writer,", "strerror(err.errno)) raise BoltConnectionError(\"Failed to establish a connection\", address) from err", "to establish a connection\", address) from err else: local_address =", "encoding: utf-8 -*- # Copyright (c) 2002-2019 \"Neo4j,\" # Neo4j", "construct and return a Bolt client instance for a supported", "being returned; if false, this will only occur if the", "ValueError as err: writer.close() raise BoltHandshakeError(\"Unexpected handshake response %r\" %", "RoutingTable log = getLogger(__name__) MAGIC = b\"\\x60\\x60\\xB0\\x17\" class Bolt(Addressable, object):", "false #: otherwise. secure = None #: As a class", "new_routing_table finally: await pool.release(cx) return None async def _get_routing_table(self): \"\"\"", "is still required. \"\"\" if self._routing_table.is_fresh(readonly=readonly): return async with self._refresh_lock:", "details, such as within a connection pool. \"\"\" async def", "len(self._in_use_list), \".\" * len(self._free_list), \" \" * (self.max_size - self.size),", "\".\" * len(self._free_list), \" \" * (self.max_size - self.size), )", "template.format(*fields) @classmethod def protocol_handlers(cls, protocol_version=None): \"\"\" Return a dictionary of", "* len(self._free_list), \" \" * (self.max_size - self.size), ) def", "closers: await wait(closers, loop=self._loop) class Neo4jPool: \"\"\" Connection pool with", "full, simply close the connection. await cx.close() elif cx in", "if not pools_by_usage: raise Neo4jAvailabilityError(\"No {} service currently \" \"available\".format(\"read\"", "import deque from logging import getLogger from os import strerror", "of connections in this pool that are currently in use.", "not in existing_routers: rt = await self._get_routing_table_from(self._initial_routers) if rt: return", "BoltConnectionLost: if an I/O error occurs on the underlying socket", "BoltHandshakeError: if handshake completes without a successful negotiation :raise TypeError:", "but writers are not, then # we are reading in", "\"\"\" Wrapper for asyncio.streams.StreamWriter \"\"\" def __init__(self, *args, **kwargs): super().__init__(*args,", "self._routing_table = None # # @property # def routing_table(self): #", "- self.__t_opened @property def broken(self): \"\"\" Flag to indicate whether", "not passed in a tuple \"\"\" # Carry out subclass", "connection is closed __closed = False @classmethod def default_user_agent(cls): \"\"\"", "class BoltStreamWriter(Addressable, Breakable, StreamWriter): \"\"\" Wrapper for asyncio.streams.StreamWriter \"\"\" def", "will be forcibly reset before being released back into the", "seed in seeds: await pool.release(seed) return pool def __init__(self, loop,", "handler) cx.set_failure_handler(ForbiddenOnReadOnlyDatabase, handler) return cx async def release(self, connection, *,", "the routers have been successful, so just fail log.error(\"Unable to", "the address has already been removed. self._routing_table.routers.discard(address) self._routing_table.readers.discard(address) self._routing_table.writers.discard(address) log.debug(\"[#0000]", ":raise BoltConnectionLost: if an I/O error occurs on the underlying", "Close all free connections. \"\"\" await self.__close(self._free_list) async def close(self):", "*, force=False): raise NotImplementedError class BoltPool: \"\"\" A pool of", "await super().wait_closed() except AttributeError: # pragma: no cover # This", "pool. In the simplest case, this will return an existing", "such as within a connection pool. \"\"\" async def f(address,", "Handshake subclass = await cls._handshake(reader, writer, config.protocol_version) # Instantiation obj", "= Address(transport.get_extra_info(\"peername\")) log.debug(\"[#%04X] S: <ACCEPT> %s -> %s\", local_address.port_number, local_address,", "by current implementation async def readexactly(self, n): try: return await", "else: # Unhook any custom error handling and exit. from", "unsupported values \"\"\" # Args address = Address(address) if loop", "arguments provided are passed as incompatible types :raise ValueError: if", "loop is not None assert isinstance(config, Config) connection_args = {", "super().__init__(*args, **kwargs) Addressable.set_transport(self, self.transport) async def drain(self): try: await super().drain()", "def release(self, cx, *, force_reset=False): \"\"\" Release a Bolt connection,", "auth=(\"neo4j\", \"password\")) # await neo4j.update_routing_table() # print(neo4j.routing_table) # # #", "return self._max_size @max_size.setter def max_size(self, value): old_value = self._max_size self._max_size", "try: # Handshake subclass = await cls._handshake(reader, writer, config.protocol_version) #", "if cx in self._in_use_list: self._in_use_list.remove(cx) if self.size < self.max_size: #", "def open(cls, *addresses, auth=None, security=False, protocol_version=None, loop=None): # opener =", "is still capacity. self._free_list.append(cx) self._waiting_list.notify() else: # Otherwise, close the", "Close all connections immediately. This does not permanently disable the", "@max_size.setter def max_size(self, value): old_value = self._max_size self._max_size = value", "except KeyError: pass # assume the address has already been", "for version, handler in handlers.items() if version == protocol_version} @classmethod", "pools_by_usage: raise Neo4jAvailabilityError(\"No {} service currently \" \"available\".format(\"read\" if readonly", "finally: await pool.release(cx) return None async def _get_routing_table(self): \"\"\" Update", "v.to_bytes() for v in offered_versions).ljust(16, b\"\\x00\") log.debug(\"[#%04X] C: <HANDSHAKE> %r\",", "pool.max_size = 0 pool.prune() This will force all future connection", "writer.drain() response_data = await reader.readexactly(4) log.debug(\"[#%04X] S: <HANDSHAKE> %r\", local_address.port_number,", "log.debug(\"Acquiring connection from pool %r\", self) cx = None while", "< self.max_size: # Plan B: if the pool isn't full,", "the connection object will be returned, indicating success. \"\"\" if", "Copyright (c) 2002-2019 \"Neo4j,\" # Neo4j Sweden AB [http://neo4j.com] #", "the applications, it may be perfectly acceptable to re-acquire connections", "ValueError? # return self._routing_table # finally: # self._routers.release(cx) # async", "Attempt to establish a TCP connection to the address provided.", "def __init__(self, loop, opener, config, addresses, routing_context): if loop is", "= addresses self._routing_table = RoutingTable(addresses) self._activate_new_pools_in(self._routing_table) def _activate_new_pools_in(self, routing_table): \"\"\"", "open connection, if one is free. If not, and the", "acquire(self, *, force_reset=False, timeout=None): raise NotImplementedError def release(self, *connections, force_reset=False):", "router_addresses = Address.parse_list(\" \".join(addresses), default_port=7687) # return cls(opener, router_addresses, loop=loop)", "Bolt handled #: by that subclass. As an instance attribute,", "by that subclass. As an instance attribute, this represents the", "or if it does not belong to this pool \"\"\"", "({!r})\".format(router, self._routing_table)) return new_routing_table finally: await pool.release(cx) return None async", "not in self._pools: self._pools[address] = BoltPool(self._loop, self._opener, self._config, address) async", "for asyncio.streams.StreamReader \"\"\" def set_transport(self, transport): Addressable.set_transport(self, transport) StreamReader.set_transport(self, transport)", "-*- # Copyright (c) 2002-2019 \"Neo4j,\" # Neo4j Sweden AB", "false, this will only occur if the connection is not", "old_value: # The maximum size has grown, so new slots", "loop: :param config: :return: instance of a Bolt subclass :raise", "until a connection is released, or until the acquire call", "pools. \"\"\" for address in routing_table.servers(): if address not in", "connection could not be established :raise BoltConnectionLost: if an I/O", "values \"\"\" # Args address = Address(address) if loop is", "# await neo4j.update_routing_table() # print(neo4j.routing_table) # # # if __name__", "so) and the connection object will be returned, indicating success.", "%s\", local_address.port_number, local_address, remote_address) return reader, writer @classmethod async def", "writers available, then we flag we are reading in absence", "# print(neo4j.routing_table) # # # if __name__ == \"__main__\": #", "cls(loop, opener, config, addresses, routing_context) # TODO: get initial routing", "self._routing_table)) return new_routing_table finally: await pool.release(cx) return None async def", "this denotes the version of Bolt handled #: by that", "num_readers = len(new_routing_table.readers) num_writers = len(new_routing_table.writers) # No writers are", "release(self, *connections, force_reset=False): raise NotImplementedError def close(self, *, force=False): raise", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "against the maximum age permitted by this pool, consequently closing", "table if stale. This method performs two freshness checks, before", "StreamWriter, get_event_loop, wait, ) from collections import deque from logging", "connection is not already in a clean state :return: a", "Plan A: select a free connection from the pool cx", "S: <ACCEPT> %s -> %s\", local_address.port_number, local_address, remote_address) return reader,", "err) Breakable.set_broken(self) raise BoltConnectionBroken(\"Network write failed\", self.remote_address) from err async", "implementation async def readexactly(self, n): try: return await super().readexactly(n) except", "a subsequent patch, and can be removed when # Python", "this connection has been broken by the network or remote", "a RESET message will only be sent if required, i.e.", "not isinstance(protocol_version, tuple): raise TypeError(\"Protocol version must be specified as", "timeout=None, metadata=None): \"\"\" Run a transaction function and return the", "config = PoolConfig.consume(config) # Connect reader, writer = await cls._connect(address,", "sleep try: while self._protocol._stream_reader is not None: await sleep(0.1) except", "can be used on close. __writer = None # Flag", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "Begin an explicit transaction. :param readonly: :param bookmarks: :param timeout:", "# sanitize the connection and return it to the pool.", "await self._deactivate(router) else: num_routers = len(new_routing_table.routers) num_readers = len(new_routing_table.readers) num_writers", "def read(self, n=-1): # pragma: no cover assert False #", "# such as leader switching, so we should not signal", "log.debug(\"Attempting to update routing table from \" \"{}\".format(\", \".join(map(repr, routers))))", "return self._max_age @property def in_use(self): \"\"\" The number of connections", "__new__(cls, reader, writer): obj = super().__new__(cls) obj.__t_opened = perf_counter() obj.__reader", "indicate whether this connection has been closed locally.\"\"\" return self.__closed", "f(address, *, loop=None): return await Bolt.open(address, auth=auth, loop=loop, **config) return", "async def begin(self, readonly=False, bookmarks=None, timeout=None, metadata=None): \"\"\" Begin an", "and cx.age > self.max_age if expired: await cx.close() return None", "\"\"\" log.debug(\"[#0000] C: <ROUTING> Invalidating routing table\") self._routing_table.ttl = 0", "is not already at capacity. :param cx: the connection to", "from the routing table and also closing all idle connections", "in self._free_list def __len__(self): return self.size @property def address(self): \"\"\"", "None :return: dictionary of version tuple to handler class for", "get_event_loop, wait, ) from collections import deque from logging import", "in use\") else: raise ValueError(\"Connection does not belong to this", "from the first router able to provide valid routing information.", "= Lock(loop=self._loop) self._routing_context = routing_context self._max_size_per_host = config.max_size self._initial_routers =", "successful negotiation \"\"\" local_address = Address(writer.transport.get_extra_info(\"sockname\")) remote_address = Address(writer.transport.get_extra_info(\"peername\")) handlers", "except ValueError as err: writer.close() raise BoltHandshakeError(\"Unexpected handshake response %r\"", "def readuntil(self, separator=b'\\n'): # pragma: no cover assert False #", "available. Notify any waiting acquirers of this extra # capacity.", "address) from err except OSError as err: log.debug(\"[#%04X] S: <REJECT>", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "this pool\") async def _deactivate(self, address): \"\"\" Deactivate an address", "acquisitions onto the waiting list, and released connections will be", "raise BoltConnectionBroken(message, self.remote_address) from err except OSError as err: log.debug(\"[#%04X]", "# The maximum size has grown, so new slots have", "the stream # reader inside the protocol to go away", "= False if self._missing_writer: has_tried_initial_routers = True rt = await", "agent string for a connection. \"\"\" template = \"neo4j-python/{} Python/{}.{}.{}-{}-{}", "opener, config, address): if loop is None: self._loop = get_event_loop()", "is cancelled. :param force_reset: if true, the connection will be", "(requested Bolt %r)\", protocol_version) offered_versions = sorted(handlers.keys(), reverse=True)[:4] request_data =", "the connection is closed __closed = False @classmethod def default_user_agent(cls):", "strerror from random import choice from ssl import SSLError from", "by this pool, consequently closing it on expiry. Should the", "def open(cls, address, *, auth=None, loop=None, **config): \"\"\" Open a", "will only occur if the connection is not already in", "# This list can be updated as protocol # versions", "back into the pool. This method is thread safe. \"\"\"", "the pool. If forced, in-use connections will be closed immediately;", "specific protocol version. :param reader: :param writer: :param protocol_version: :return:", "object will be returned, indicating success. \"\"\" if cx.broken or", "self.__close(self._free_list) async def close(self): \"\"\" Close all connections immediately. This", "connection.del_failure_handler(NotALeader) connection.del_failure_handler(ForbiddenOnReadOnlyDatabase) break else: raise ValueError(\"Connection does not belong to", "@property def address(self): \"\"\" The remote address for which this", "@classmethod async def _connect(cls, address, loop, config): \"\"\" Attempt to", "reading in absence of writer self._missing_writer = not self._routing_table.is_fresh(readonly=False) else:", "# 'wait_closed'. The code polls waiting for the stream #", "Address) assert loop is not None assert isinstance(config, Config) connection_args", "\"|\" * len(self._in_use_list), \".\" * len(self._free_list), \" \" * (self.max_size", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "that returns an open and ready Bolt connection :param address:", "None # Handle to the StreamReader object. __reader = None", "= WaitingList(loop=self._loop) def __repr__(self): return \"<{} addr'{}' [{}{}{}]>\".format( self.__class__.__name__, self.address,", "if true, the connection will be forcibly reset before being", "from err except OSError as err: log.debug(\"[#%04X] S: <REJECT> %s", "config.get_ssl_context() if ssl_context: connection_args[\"ssl\"] = ssl_context connection_args[\"server_hostname\"] = address.host log.debug(\"[#0000]", "else: self._loop = loop self._opener = opener self._config = config", "can be modified existing_routers = list(self._routing_table.routers) has_tried_initial_routers = False if", "for pool in self._pools.values(): try: await pool.release(connection, force_reset=force_reset) except ValueError:", "address.host log.debug(\"[#0000] C: <DIAL> %s\", address) try: reader = BoltStreamReader(loop=loop)", "and also closing all idle connections to that address. \"\"\"", "= \"neo4j-python/{} Python/{}.{}.{}-{}-{} ({})\" fields = (neo4j_version,) + tuple(version_info) +", "table. :param context: the routing context to use for this", "def _ensure_routing_table_is_fresh(self, readonly=False): \"\"\" Update the routing table if stale.", "pool.acquire(force_reset=force_reset) except BoltError: await self._deactivate(pool.address) else: if not readonly: #", "read(self, n=-1): # pragma: no cover assert False # not", "this connection was opened. __t_opened = None # Handle to", "tables with the given routers. :return: True if the routing", "# TODO: get initial routing table and construct await obj._ensure_routing_table_is_fresh()", "to become # available, then try again log.debug(\"Joining waiting list\")", "Plan B: if the pool isn't full, open # a", "return rt rt = await self._get_routing_table_from(*existing_routers) if rt: return rt", "support is no longer required. # from asyncio import sleep", "*, readonly=False, force_reset=False): \"\"\" Acquire a connection to a server", "connection from pool %r\", self) cx = None while cx", "args=None, kwargs=None, readonly=False, bookmarks=None, timeout=None, metadata=None): \"\"\" Run a transaction", "Asynchronous initializer for implementation by subclasses. :param auth: \"\"\" @property", "specific language governing permissions and # limitations under the License.", "\"\"\" Acquire a connection from the pool. In the simplest", "from \" \"{}\".format(\", \".join(map(repr, routers)))) for router in routers: pool", "set of configuration parameters. This is useful when multiple servers", "released, use the following sequence instead: pool.max_size = 0 pool.prune()", "address=%r protocol_version=%r>\" % (self.remote_address, self.protocol_version) async def __ainit__(self, auth): \"\"\"", "\"\"\" Acquire a connection to a server that can satisfy", "cls(loop, opener, pool_config, address) seeds = [await pool.acquire() for _", "TypeError: if protocol version is not passed in a tuple", "can be updated as protocol # versions are added and", "writer.transport) return obj def __repr__(self): return \"<Bolt address=%r protocol_version=%r>\" %", "log.debug(\"[#0000] C: <ROUTING> table=%r\", self._routing_table) try: pool = self._pools.pop(address) except", "neo4j.meta import version as neo4j_version from neo4j.routing import RoutingTable log", "None while cx is None or cx.broken or cx.closed: try:", "protocol versions :raise TypeError: if protocol version is not passed", "an opener function for a given set of configuration parameters.", "self._max_age = config.max_age self._in_use_list = deque() self._free_list = deque() self._waiting_list", "cx, *, force_reset=False): \"\"\" Release a Bolt connection, putting it", "already been removed. self._routing_table.routers.discard(address) self._routing_table.readers.discard(address) self._routing_table.writers.discard(address) log.debug(\"[#0000] C: <ROUTING> table=%r\",", "**kwargs) Addressable.set_transport(self, self.transport) async def drain(self): try: await super().drain() except", "and self._initial_routers not in existing_routers: rt = await self._get_routing_table_from(self._initial_routers) if", "def _connect(cls, address, loop, config): \"\"\" Attempt to establish a", "(%d %s)\", 0, address, err.errno, strerror(err.errno)) raise BoltSecurityError(\"Failed to establish", "version is provided, the dictionary will contain either zero or", "super().readexactly(n) except IncompleteReadError as err: message = (\"Network read incomplete", "# you may not use this file except in compliance", "\" \" * (self.max_size - self.size), ) def __contains__(self, cx):", "True: pool = await self._select_pool(readonly=readonly) try: cx = await pool.acquire(force_reset=force_reset)", "close. __writer = None # Flag to indicate that the", "writer): obj = super().__new__(cls) obj.__t_opened = perf_counter() obj.__reader = reader", "hack is likely safe unless the implementation of 3.6 #", "not already in a clean state :raise ValueError: if the", "import WaitingList from neo4j.aio._mixins import Addressable, Breakable from neo4j.errors import", "0 raise failure cx.set_failure_handler(NotALeader, handler) cx.set_failure_handler(ForbiddenOnReadOnlyDatabase, handler) return cx async", "pool %r\", self) cx = None while cx is None", "None: loop = get_event_loop() config = PoolConfig.consume(config) # Connect reader,", "opener self._config = config self._pools = {} self._missing_writer = False", "for v in offered_versions).ljust(16, b\"\\x00\") log.debug(\"[#%04X] C: <HANDSHAKE> %r\", local_address.port_number,", "connections after pool closure, which will have the implicit affect", "neo4j.routing import RoutingTable log = getLogger(__name__) MAGIC = b\"\\x60\\x60\\xB0\\x17\" class", "%s (%d %s)\", 0, address, err.errno, strerror(err.errno)) raise BoltConnectionError(\"Failed to", "connection cx = await self._opener(self.address) else: # Plan C: wait", "on the applications, it may be perfectly acceptable to re-acquire", "BoltStreamReader(Addressable, Breakable, StreamReader): \"\"\" Wrapper for asyncio.streams.StreamReader \"\"\" def set_transport(self,", "be neither broken, closed nor expired, it will be reset", "True rt = await self._get_routing_table_from(self._initial_routers) if rt: return rt rt", "if the pool isn't full, open # a new connection", "routing table if stale. This method performs two freshness checks,", "If an explicit protocol version is provided, the dictionary will", "connection :param address: the remote address for which this pool", "addresses self._routing_table = RoutingTable(addresses) self._activate_new_pools_in(self._routing_table) def _activate_new_pools_in(self, routing_table): \"\"\" Add", "protocol handlers, keyed by version tuple. If an explicit protocol", "in-use and free :param max_age: the maximum permitted age, in", "belong to this pool\") async def prune(self): \"\"\" Close all", "# # This file is part of Neo4j. # #", "owned by this connection pool. \"\"\" return len(self._in_use_list) + len(self._free_list)", "# try: # result = await cx.run(\"CALL dbms.cluster.routing.getRoutingTable($context)\", {\"context\": {}})", "already fresh on entry, the method exits immediately; otherwise, the", "def address(self): \"\"\" The remote address for which this pool", "\"\"\" Close all connections immediately. This does not permanently disable", "connection in seconds. \"\"\" return perf_counter() - self.__t_opened @property def", "already in a clean state. If forced, this check will", "versions will be returned. :param protocol_version: tuple identifying a specific", "force all future connection acquisitions onto the waiting list, and", "error. # When no writers available, then we flag we", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "\" \"{}\".format(\", \".join(map(repr, routers)))) for router in routers: pool =", "it merely shuts down all open connections, including those in", "list to use if no addresses are specified. default_router_addresses =", "if any of the arguments provided are passed with unsupported", "are released, use the following sequence instead: pool.max_size = 0", "servers share the same configuration details, such as within a", "platform, version_info from time import perf_counter from neo4j.addressing import Address", "in pools: pools_by_usage.setdefault(pool.in_use, []).append(pool) if not pools_by_usage: raise Neo4jAvailabilityError(\"No {}", "= self._routing_table.writers pools = [pool for address, pool in self._pools.items()", "self._pools.pop(address) except KeyError: pass # assume the address has already", "which, by the # implementation of 3.6, occurs on 'connection_lost'.", "a new connection pool, with an option to seed one", "try: new_routing_table = await cx.get_routing_table(self._routing_context) except BoltError: await self._deactivate(router) else:", "connection was opened. __t_opened = None # Handle to the", "under the Apache License, Version 2.0 (the \"License\"); # you", "@classmethod def opener(cls, auth=None, **config): \"\"\" Create and return an", "for the new connection :raise BoltConnectionError: if a connection could", "self._sanitize(cx, force_reset=force_reset) if cx: # Carry on only if sanitation", "watch(\"neo4j\") # neo4j = await Neo4j.open(\":17601 :17602 :17603\", auth=(\"neo4j\", \"password\"))", "cx = connections.popleft() except IndexError: break else: closers.append(cx.close()) if closers:", "subclasses. :param auth: \"\"\" @property def age(self): \"\"\" The age", "log.debug(\"[#%04X] S: <CLOSE> %d %s\", err.errno, strerror(err.errno)) Breakable.set_broken(self) raise BoltConnectionBroken(\"Network", "Should the connection be neither broken, closed nor expired, it", "and exit. from neo4j.errors import ( NotALeader, ForbiddenOnReadOnlyDatabase, ) connection.del_failure_handler(NotALeader)", "Attempt to clean up a connection, such that it can", "response_data) else: return subclass def __new__(cls, reader, writer): obj =", "force_reset: if true, the connection will be forcibly reset before", "return obj def __repr__(self): return \"<Bolt address=%r protocol_version=%r>\" % (self.remote_address,", "and # limitations under the License. from asyncio import (", ":param writer: :param protocol_version: :return: :raise BoltConnectionLost: if an I/O", "pool, attempt to # sanitize the connection and return it", "obj = subclass(reader, writer) obj.secure = bool(config.secure) assert hasattr(obj, \"__ainit__\")", "no cover # This is a dirty hack for Python", "\"\"\" async def begin(self, readonly=False, bookmarks=None, timeout=None, metadata=None): \"\"\" Begin", "def _get_routing_table(self): \"\"\" Update the routing table from the first", "pools: pools_by_usage.setdefault(pool.in_use, []).append(pool) if not pools_by_usage: raise Neo4jAvailabilityError(\"No {} service", "the connection. await cx.close() else: # If the pool is", "a server that can satisfy a set of parameters. :param", "_ in range(pool_config.init_size)] for seed in seeds: await pool.release(seed) return", "valid routing information. \"\"\" # copied because it can be", "pools = dict(self._pools) self._pools.clear() for address, pool in pools.items(): if", ":raise BoltHandshakeError: if handshake completes without a successful negotiation :raise", "connection, if one is free. If not, and the pool", "released connections will be closed instead of being returned to", "has_tried_initial_routers and self._initial_routers not in existing_routers: rt = await self._get_routing_table_from(self._initial_routers)", "idle connections to that address. \"\"\" log.debug(\"[#0000] C: <ROUTING> Deactivating", "was opened. __t_opened = None # Handle to the StreamReader", "log.error(\"Unable to retrieve routing information\") raise Neo4jAvailabilityError(\"Unable to retrieve routing", "= Address(transport.get_extra_info(\"sockname\")) remote_address = Address(transport.get_extra_info(\"peername\")) log.debug(\"[#%04X] S: <ACCEPT> %s ->", "pool is not already at capacity. :param cx: the connection", "except OSError as err: log.debug(\"[#%04X] S: <CLOSE> (%s)\", self.local_address.port_number, err)", "\"\"\" async def get_routing_table(self, context=None): \"\"\" Fetch a new routing", "self._address @property def max_size(self): \"\"\" The maximum permitted number of", "state, # such as leader switching, so we should not", "import perf_counter from neo4j.addressing import Address from neo4j.aio._collections import WaitingList", "\"\"\" Run an auto-commit transaction. :param cypher: :param parameters: :param", "handler) return cx async def release(self, connection, *, force_reset=False): \"\"\"", "opener(addr): return Bolt.open(addr, auth=auth, loop=loop, **pool_config) pool = cls(loop, opener,", "tuple \"\"\" # Carry out subclass imports locally to avoid", "read incomplete (received {} of {} \" \"bytes)\".format(len(err.partial), err.expected)) log.debug(\"[#%04X]", "= await result.single() # self._routing_table = RoutingTable.parse_routing_info([record]) # TODO: handle", "is useful when multiple servers share the same configuration details,", "and return a Bolt client instance for a supported Bolt", "on 'connection_lost'. This # hack is likely safe unless the", "timeout: :param metadata: :return: \"\"\" async def run_tx(self, f, args=None,", "# capacity. self._waiting_list.notify() @property def max_age(self): \"\"\" The maximum permitted", "cx = None while cx is None or cx.broken or", "which an address can be passed that returns an open", "pool in pools: pools_by_usage.setdefault(pool.in_use, []).append(pool) if not pools_by_usage: raise Neo4jAvailabilityError(\"No", "Deactivate an address from the connection pool, if present, remove", "signal an error. # When no writers available, then we", "the protocol in use. protocol_version = () # Record of", "use the following sequence instead: pool.max_size = 0 pool.prune() This", "return if not self.broken: log.debug(\"[#%04X] S: <HANGUP>\", self.local_address.port_number) self.__writer.write_eof() self.__writer.close()", "otherwise, the refresh lock is acquired and the second freshness", "- self.size), ) def __contains__(self, cx): return cx in self._in_use_list", "raise BoltHandshakeError(\"Unsupported Bolt protocol version\", remote_address, request_data, response_data) else: return", "return self.__closed async def close(self): \"\"\" Close the connection. \"\"\"", "while cx is None or cx.broken or cx.closed: try: #", "this pool\") async def prune(self): \"\"\" Close all free connections.", "pools.items(): if force: await pool.close() else: pool.max_size = 0 await", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "from ssl import SSLError from sys import platform, version_info from", "an error. # When no writers available, then we flag", "able to provide valid routing information. \"\"\" # copied because", "closers = deque() while True: try: cx = connections.popleft() except", "async def open(cls, *addresses, auth=None, routing_context=None, loop=None, **config): pool_config =", "True if this instance uses secure communication, false #: otherwise.", "will be sent regardless. \"\"\" async def run(self, cypher, parameters=None,", "router able to provide valid routing information. \"\"\" # copied", "]} if protocol_version is None: return handlers if not isinstance(protocol_version,", "Bolt protocol version %s\", agreed_version) raise BoltHandshakeError(\"Unsupported Bolt protocol version\",", "\"\"\" Create a new connection pool, with an option to", "else: local_address = Address(transport.get_extra_info(\"sockname\")) remote_address = Address(transport.get_extra_info(\"peername\")) log.debug(\"[#%04X] S: <ACCEPT>", "in self._pools: self._pools[address] = BoltPool(self._loop, self._opener, self._config, address) async def", "None or cx.broken or cx.closed: try: # Plan A: select", "acquiring the refresh lock. If the routing table is already", "broken by the network or remote peer. \"\"\" return self.__reader.broken", "version of Bolt handled #: by that subclass. As an", "use for this call :return: a new RoutingTable instance or", "open # a new connection cx = await self._opener(self.address) else:", "a successful negotiation :raise TypeError: if any of the arguments", "still capacity. self._free_list.append(cx) self._waiting_list.notify() else: # Otherwise, close the connection.", "log.debug(\"[#%04X] C: <HANDSHAKE> %r\", local_address.port_number, request_data) writer.write(request_data) await writer.drain() response_data", "back into the pool; if false, this will only occur", "= address.host log.debug(\"[#0000] C: <DIAL> %s\", address) try: reader =", "await self._select_pool(readonly=readonly) try: cx = await pool.acquire(force_reset=force_reset) except BoltError: await", "pool; if false, this will only occur if the connection", "MAGIC + b\"\".join( v.to_bytes() for v in offered_versions).ljust(16, b\"\\x00\") log.debug(\"[#%04X]", "= cls(loop, opener, config, addresses, routing_context) # TODO: get initial", "%r\" % response_data, remote_address, request_data, response_data) from err try: subclass", "socket connection :raise BoltHandshakeError: if handshake completes without a successful", "connections (both in-use and free) currently owned by this connection", "and a RESET will be sent regardless. \"\"\" async def", "= Version.from_bytes(response_data) except ValueError as err: writer.close() raise BoltHandshakeError(\"Unexpected handshake", "pool.prune() class Neo4j: # The default router address list to", "= list(self._routing_table.routers) has_tried_initial_routers = False if self._missing_writer: has_tried_initial_routers = True", "the pool is not already at capacity. :param cx: the", "else \"write\")) return choice(pools_by_usage[min(pools_by_usage)]) async def acquire(self, *, readonly=False, force_reset=False):", "until the acquire call is cancelled. :param force_reset: if true,", "address, pool in pools.items(): if force: await pool.close() else: pool.max_size", "connections to be retained in this pool. \"\"\" return self._max_age", "may be perfectly acceptable to re-acquire connections after pool closure,", "auth): \"\"\" Asynchronous initializer for implementation by subclasses. :param auth:", "opener, pool_config, address) seeds = [await pool.acquire() for _ in", "connections will be closed instead of being returned to the", "To close gracefully, allowing work in progress to continue until", "local_address.port_number, response_data) try: agreed_version = Version.from_bytes(response_data) except ValueError as err:", "in a tuple \"\"\" # Carry out subclass imports locally", "pools_by_usage.setdefault(pool.in_use, []).append(pool) if not pools_by_usage: raise Neo4jAvailabilityError(\"No {} service currently", "= Address(writer.transport.get_extra_info(\"sockname\")) remote_address = Address(writer.transport.get_extra_info(\"peername\")) handlers = cls.protocol_handlers(protocol_version) if not", "are available :raise ProtocolError: if the routing information received is", "BoltStreamReader(loop=loop) protocol = StreamReaderProtocol(reader, loop=loop) transport, _ = await loop.create_connection(lambda:", "select a free connection from the pool cx = self._free_list.popleft()", "ValueError(\"Connection is not in use\") else: raise ValueError(\"Connection does not", "new routing table. :param context: the routing context to use", "to be retained in this pool. \"\"\" return self._max_age @property", "the connection will be forcibly reset before being returned; if", "perfectly acceptable to re-acquire connections after pool closure, which will", "but which don't already have pools. \"\"\" for address in", "identifying a specific protocol version (e.g. (3, 5)) or None", "Connect reader, writer = await cls._connect(address, loop, config) try: #", "= subclass(reader, writer) obj.secure = bool(config.secure) assert hasattr(obj, \"__ainit__\") await", "= len(new_routing_table.readers) num_writers = len(new_routing_table.writers) # No writers are available.", "update routing table from \" \"{}\".format(\", \".join(map(repr, routers)))) for router", "= get_event_loop() config = PoolConfig.consume(config) # Connect reader, writer =", "opener, config, addresses, routing_context): if loop is None: self._loop =", "return it to the pool. cx = await self._sanitize(cx, force_reset=force_reset)", "routing table before raising the failure. \"\"\" log.debug(\"[#0000] C: <ROUTING>", "Carry on only if sanitation succeeded. if self.size < self.max_size:", "version negotiation, in order to construct and return a Bolt", "clean state. If forced, this check will be overridden and", "it to the pool. cx = await self._sanitize(cx, force_reset=force_reset) if", "successful negotiation :raise TypeError: if any of the arguments provided", "self._routers = Neo4jPool(opener, router_addresses or self.default_router_addresses) # self._writers = Neo4jPool(opener)", "negotiation \"\"\" local_address = Address(writer.transport.get_extra_info(\"sockname\")) remote_address = Address(writer.transport.get_extra_info(\"peername\")) handlers =", "to be retained in this pool \"\"\" @classmethod async def", "seconds, for connections to be retained in this pool. \"\"\"", "continue log.debug(\"Successfully updated routing table from \" \"{!r} ({!r})\".format(router, self._routing_table))", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "'connection_lost'. This # hack is likely safe unless the implementation", "routing table and also closing all idle connections to that", "not currently in use, or if it does not belong", ":return: a new RoutingTable instance or None if the given", "routing table is already fresh on entry, the method exits", "auth: :param loop: :param config: :return: instance of a Bolt", "# available. Notify any waiting acquirers of this extra #", "self._routers.release(cx) # async def main(): # from neo4j.debug import watch;", "BoltHandshakeError: if handshake completes without a successful negotiation \"\"\" local_address", "finally: # self._routers.release(cx) # async def main(): # from neo4j.debug", "await self._deactivate_pools_not_in(rt) async def _select_pool(self, readonly=False): \"\"\" Selects the pool", "router_addresses, loop=loop) # # def __init__(self, opener, router_addresses, loop=None): #", "not already in a clean state. If forced, this check", "the version of Bolt handled #: by that subclass. As", "= self._pools[router] cx = await pool.acquire() try: new_routing_table = await", "out subclass imports locally to avoid circular # dependency issues.", "until connections are released, use the following sequence instead: pool.max_size", "log.debug(\"Releasing connection %r\", cx) if cx in self._in_use_list: self._in_use_list.remove(cx) if", "by subclasses. :param auth: \"\"\" @property def age(self): \"\"\" The", "from the pool cx = self._free_list.popleft() except IndexError: if self.size", "Update the routing table if stale. This method performs two", "dictionary will contain either zero or one items, depending on", "return await super().readexactly(n) except IncompleteReadError as err: message = (\"Network", "def opener(addr): return Bolt.open(addr, auth=auth, loop=loop, **pool_config) pool = cls(loop,", "full, a new connection will be created. If the pool", "Apache License, Version 2.0 (the \"License\"); # you may not", "{} of {} \" \"bytes)\".format(len(err.partial), err.expected)) log.debug(\"[#%04X] S: <CLOSE>\", self.local_address.port_number)", "the connection. await cx.close() elif cx in self._free_list: raise ValueError(\"Connection", "self._missing_writer = not self._routing_table.is_fresh(readonly=False) else: rt = await self._get_routing_table() self._activate_new_pools_in(rt)", "await writer.drain() response_data = await reader.readexactly(4) log.debug(\"[#%04X] S: <HANDSHAKE> %r\",", "either express or implied. # See the License for the", "go away which, by the # implementation of 3.6, occurs", "pools that aren't represented in the given routing table. \"\"\"", "routing_table(self): # return self._routing_table # # async def update_routing_table(self): #", "the given routers. :return: True if the routing table is", "address.host, \"port\": address.port, \"family\": address.family, # TODO: other args }", "from sys import platform, version_info from time import perf_counter from", "def __init__(self, loop, opener, config, address): if loop is None:", "method is thread safe. \"\"\" for pool in self._pools.values(): try:", "version %s\", agreed_version) raise BoltHandshakeError(\"Unsupported Bolt protocol version\", remote_address, request_data,", "__reader = None # Handle to the StreamWriter object, which", "if no writers are available :raise ProtocolError: if the routing", "== 0: continue log.debug(\"Successfully updated routing table from \" \"{!r}", "Config) connection_args = { \"host\": address.host, \"port\": address.port, \"family\": address.family,", "metadata: :return: \"\"\" async def run_tx(self, f, args=None, kwargs=None, readonly=False,", "# implementation of 3.6, occurs on 'connection_lost'. This # hack", "it does not belong to this pool \"\"\" log.debug(\"Releasing connection", "if there is still capacity. self._free_list.append(cx) self._waiting_list.notify() else: # Otherwise,", "= RoutingTable.parse_routing_info([record]) # TODO: handle ValueError? # return self._routing_table #", "async def open(cls, address, *, auth=None, loop=None, **config): \"\"\" Create", "#: True if this instance uses secure communication, false #:", "if sanitation succeeded. if self.size < self.max_size: # Check again", "connection object will be returned, indicating success. \"\"\" if cx.broken", "reader, writer, protocol_version): \"\"\" Carry out a Bolt handshake, optionally", "\"\"\" Invalidate the routing table before raising the failure. \"\"\"", "By default, a RESET message will only be sent if", "the same configuration details, such as within a connection pool.", "to indicate whether this connection has been closed locally.\"\"\" return", "age, in seconds, for connections to be retained in this", "for address in self._pools: if address not in routing_table: await", "BoltStreamWriter(transport, protocol, reader, loop) except SSLError as err: log.debug(\"[#%04X] S:", "*, auth=None, loop=None, **config): \"\"\" Create a new connection pool,", "pool. cx = await self._sanitize(cx, force_reset=force_reset) if cx: # Carry", "all connections in the given list. \"\"\" closers = deque()", "failure cx.set_failure_handler(NotALeader, handler) cx.set_failure_handler(ForbiddenOnReadOnlyDatabase, handler) return cx async def release(self,", "# self._routers.release(cx) # async def main(): # from neo4j.debug import", "raise NotImplementedError def close(self, *, force=False): raise NotImplementedError class BoltPool:", "context: the routing context to use for this call :return:", "def f(address, *, loop=None): return await Bolt.open(address, auth=auth, loop=loop, **config)", "*routers): \"\"\" Try to update routing tables with the given", "share the same configuration details, such as within a connection", "settings for the new connection :raise BoltConnectionError: if a connection", "self._opener = opener self._address = Address(address) self._max_size = config.max_size self._max_age", "OSError as err: log.debug(\"[#%04X] S: <CLOSE> (%s)\", self.local_address.port_number, err) Breakable.set_broken(self)", "\"\"\" class BoltStreamReader(Addressable, Breakable, StreamReader): \"\"\" Wrapper for asyncio.streams.StreamReader \"\"\"", "explicit transaction. :param readonly: :param bookmarks: :param timeout: :param metadata:", "safe unless the implementation of 3.6 # changes in a", "are available. This likely indicates a temporary state, # such", "been successful, so just fail log.error(\"Unable to retrieve routing information\")", "\"\"\" log.debug(\"Attempting to update routing table from \" \"{}\".format(\", \".join(map(repr,", "router in routers: pool = self._pools[router] cx = await pool.acquire()", "collections import deque from logging import getLogger from os import", "with the given routers. :return: True if the routing table", "return an opener function for a given set of configuration", "for this call :return: a new RoutingTable instance or None", "AB [http://neo4j.com] # # This file is part of Neo4j.", "obj.__writer = writer Addressable.set_transport(obj, writer.transport) return obj def __repr__(self): return", "def age(self): \"\"\" The age of this connection in seconds.", "# If we're not acquiring a connection as # readonly,", "establish a connection\", address) from err else: local_address = Address(transport.get_extra_info(\"sockname\"))", "time at which this connection was opened. __t_opened = None", "is not None and cx.age > self.max_age if expired: await", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "Selects the pool with the fewest in-use connections. \"\"\" await", "raise NotImplementedError class BoltPool: \"\"\" A pool of connections to", "BoltError: writer.write_eof() writer.close() raise @classmethod async def _connect(cls, address, loop,", "i.e. if the connection is not already in a clean", "a 3-tuple of reader, writer and security settings for the", "(\"Network read incomplete (received {} of {} \" \"bytes)\".format(len(err.partial), err.expected))", "Pool: def acquire(self, *, force_reset=False, timeout=None): raise NotImplementedError def release(self,", "connections. \"\"\" await self.__close(self._free_list) async def close(self): \"\"\" Close all", "= Bolt.opener(auth=auth, security=security, protocol_version=protocol_version) # router_addresses = Address.parse_list(\" \".join(addresses), default_port=7687)", "pool, both in-use and free :param max_age: the maximum permitted", "# from neo4j.debug import watch; watch(\"neo4j\") # neo4j = await", "NotImplementedError class BoltPool: \"\"\" A pool of connections to a", "# ForbiddenOnReadOnlyDatabase errors to # invalidate the routing table. from", "occurs on 'connection_lost'. This # hack is likely safe unless", "err: log.debug(\"[#%04X] S: <CLOSE> %d %s\", err.errno, strerror(err.errno)) Breakable.set_broken(self) raise", "loop is None: loop = get_event_loop() config = PoolConfig.consume(config) #", "0, address, err.errno, strerror(err.errno)) raise BoltConnectionError(\"Failed to establish a connection\",", "def set_transport(self, transport): Addressable.set_transport(self, transport) StreamReader.set_transport(self, transport) async def readuntil(self,", "locally.\"\"\" return self.__closed async def close(self): \"\"\" Close the connection.", "if false, this will only occur if the connection is", "version is supported. If no protocol version is provided, all", "back into the pool if the connection is healthy and", "KeyError: log.debug(\"Unsupported Bolt protocol version %s\", agreed_version) raise BoltHandshakeError(\"Unsupported Bolt", "for addresses that exist in the given routing table but", "and can be removed when # Python 3.6 support is", "for all relevant and supported protocol versions :raise TypeError: if", "age(self): \"\"\" The age of this connection in seconds. \"\"\"", "self._routing_table = RoutingTable.parse_routing_info([record]) # TODO: handle ValueError? # return self._routing_table", "drain(self): try: await super().drain() except OSError as err: log.debug(\"[#%04X] S:", "Add pools for addresses that exist in the given routing", "by this pool, both in-use and free. \"\"\" return self._max_size", "as err: log.debug(\"[#%04X] S: <CLOSE> %d %s\", err.errno, strerror(err.errno)) Breakable.set_broken(self)", "the address has already been removed else: pool.max_size = 0", "locally to avoid circular # dependency issues. from neo4j.aio.bolt3 import", "self.__t_opened @property def broken(self): \"\"\" Flag to indicate whether this", "routers)))) for router in routers: pool = self._pools[router] cx =", "asyncio.streams.StreamReader \"\"\" def set_transport(self, transport): Addressable.set_transport(self, transport) StreamReader.set_transport(self, transport) async", "loop=self._loop) class Neo4jPool: \"\"\" Connection pool with routing table. \"\"\"", "pools for addresses that exist in the given routing table", "table from \" \"{}\".format(\", \".join(map(repr, routers)))) for router in routers:", "\"Neo4j,\" # Neo4j Sweden AB [http://neo4j.com] # # This file", "specific protocol version (e.g. (3, 5)) or None :return: dictionary", "C: wait for more capacity to become # available, then", "raise BoltConnectionError(\"Failed to establish a connection\", address) from err else:", "the arguments provided are passed with unsupported values \"\"\" #", "remote_address, request_data, response_data) from err try: subclass = handlers[agreed_version] except", "BoltConnectionError: if a connection could not be established :raise BoltConnectionLost:", ":raise TypeError: if any of the arguments provided are passed", "secure connection\", address) from err except OSError as err: log.debug(\"[#%04X]", "the given router is currently unable to provide routing information", "a function to which an address can be passed that", "address not in self._pools: self._pools[address] = BoltPool(self._loop, self._opener, self._config, address)", "to the StreamWriter object, which can be used on close.", "cx, *, force_reset): \"\"\" Attempt to clean up a connection,", "an explicit transaction. :param readonly: :param bookmarks: :param timeout: :param", "(%d %s)\", 0, address, err.errno, strerror(err.errno)) raise BoltConnectionError(\"Failed to establish", "force_reset=False): raise NotImplementedError def close(self, *, force=False): raise NotImplementedError class", "= get_event_loop() else: self._loop = loop self._opener = opener self._config", "used by current implementation async def read(self, n=-1): # pragma:", "call :return: a new RoutingTable instance or None if the", "routing table and construct await obj._ensure_routing_table_is_fresh() return obj def __init__(self,", "discard: :param readonly: :param bookmarks: :param timeout: :param metadata: :raise", "The remote address for which this pool operates. \"\"\" return", "function. \"\"\" async def get_routing_table(self, context=None): \"\"\" Fetch a new", "Neo4jAvailabilityError, ) from neo4j.api import Version from neo4j.conf import Config,", "if no addresses are specified. default_router_addresses = Address.parse_list(\":7687 :17601 :17687\")", "#: version of the protocol in use. protocol_version = ()", "If not, and the pool is not full, a new", "is not in use\") else: raise ValueError(\"Connection does not belong", "if address in addresses] pools_by_usage = {} for pool in", "if this instance uses secure communication, false #: otherwise. secure", "# This file is part of Neo4j. # # Licensed", "loop, opener, config, addresses, routing_context): if loop is None: self._loop", "This method is thread safe. \"\"\" for pool in self._pools.values():", "changes in a subsequent patch, and can be removed when", "= None # Handle to the StreamReader object. __reader =", "don't already have pools. \"\"\" for address in routing_table.servers(): if", "size(self): \"\"\" The total number of connections (both in-use and", "unless the implementation of 3.6 # changes in a subsequent", "async def _deactivate(self, address): \"\"\" Deactivate an address from the", "this connection in seconds. \"\"\" return perf_counter() - self.__t_opened @property", "of the routers have been successful, so just fail log.error(\"Unable", "await pool.prune() class Neo4j: # The default router address list", "def release(self, connection, *, force_reset=False): \"\"\" Release a connection back", "spare capacity in the pool, attempt to # sanitize the", "version as neo4j_version from neo4j.routing import RoutingTable log = getLogger(__name__)", "to that address. \"\"\" log.debug(\"[#0000] C: <ROUTING> Deactivating address %r\",", "if address not in self._pools: self._pools[address] = BoltPool(self._loop, self._opener, self._config,", "instance attribute, this represents the #: version of the protocol", "else: pool.max_size = 0 await pool.prune() async def close(self, force=False):", "if cx.broken or cx.closed: return None expired = self.max_age is", "future connection acquisitions onto the waiting list, and released connections", "this will block until a connection is released, or until", "as incompatible types :raise ValueError: if any of the arguments", "address in routing_table.servers(): if address not in self._pools: self._pools[address] =", "__repr__(self): return \"<{} addr'{}' [{}{}{}]>\".format( self.__class__.__name__, self.address, \"|\" * len(self._in_use_list),", "_handshake(cls, reader, writer, protocol_version): \"\"\" Carry out a Bolt handshake,", "continue # No readers if num_readers == 0: continue log.debug(\"Successfully", "this instance uses secure communication, false #: otherwise. secure =", "not readonly: # If we're not acquiring a connection as", "instance of a Bolt subclass :raise BoltConnectionError: if a connection", "current implementation async def read(self, n=-1): # pragma: no cover", "pool.max_size = 0 await pool.prune() class Neo4j: # The default", "%r\", cx) if cx in self._in_use_list: self._in_use_list.remove(cx) if self.size <", "removed. self._routing_table.routers.discard(address) self._routing_table.readers.discard(address) self._routing_table.writers.discard(address) log.debug(\"[#0000] C: <ROUTING> table=%r\", self._routing_table) try:", "import ( IncompleteReadError, Lock, StreamReader, StreamReaderProtocol, StreamWriter, get_event_loop, wait, )", "the routing table is already fresh on entry, the method", "only occur if the connection is not already in a", "# Python 3.6 support is no longer required. # from", "def main(): # from neo4j.debug import watch; watch(\"neo4j\") # neo4j", "hasattr(obj, \"__ainit__\") await obj.__ainit__(auth) return obj except BoltError: writer.write_eof() writer.close()", "still required. \"\"\" if self._routing_table.is_fresh(readonly=readonly): return async with self._refresh_lock: if", "<HANDSHAKE> %r\", local_address.port_number, request_data) writer.write(request_data) await writer.drain() response_data = await", "part of Neo4j. # # Licensed under the Apache License,", "If there is spare capacity in the pool, attempt to", "{version: handler for version, handler in handlers.items() if version ==", "use this file except in compliance with the License. #", "= (\"Network read incomplete (received {} of {} \" \"bytes)\".format(len(err.partial),", "get_routing_table(self, context=None): \"\"\" Fetch a new routing table. :param context:", "have the implicit affect of reopening the pool. To close", "from neo4j.errors import ( BoltError, BoltConnectionError, BoltSecurityError, BoltConnectionBroken, BoltHandshakeError, Neo4jAvailabilityError,", "pool closure, which will have the implicit affect of reopening", "readonly: addresses = self._routing_table.readers else: addresses = self._routing_table.writers pools =", "config.max_size self._max_age = config.max_age self._in_use_list = deque() self._free_list = deque()", "that follows determines whether an update is still required. \"\"\"", "*, force_reset=False, timeout=None): raise NotImplementedError def release(self, *connections, force_reset=False): raise", "Bolt.open(address, auth=auth, loop=loop, **config) return f @classmethod async def open(cls,", "the connection and return it to the pool. cx =", "Python 3.6 support is no longer required. # from asyncio", "one is free. If not, and the pool is not", "<DIAL> %s\", address) try: reader = BoltStreamReader(loop=loop) protocol = StreamReaderProtocol(reader,", "return Bolt.open(addr, auth=auth, loop=loop, **pool_config) pool = cls(loop, opener, pool_config,", "BoltTransactionError: if a transaction cannot be carried out at this", "open(cls, address, *, auth=None, loop=None, **config): \"\"\" Create a new", "%s)\", 0, address, err.errno, strerror(err.errno)) raise BoltSecurityError(\"Failed to establish a", "cx in self._in_use_list: self._in_use_list.remove(cx) if self.size < self.max_size: # If", "self._loop = loop self._opener = opener self._config = config self._pools", "any pools that aren't represented in the given routing table.", "await cx.get_routing_table(self._routing_context) except BoltError: await self._deactivate(router) else: num_routers = len(new_routing_table.routers)", "in seconds, for connections to be retained in this pool.", "retained in this pool. \"\"\" return self._max_age @property def in_use(self):", "Neo4jAvailabilityError(\"Unable to retrieve routing information\") async def _ensure_routing_table_is_fresh(self, readonly=False): \"\"\"", "such as (\"127.0.0.1\", 7687) :param auth: :param loop: :param config:", "(self.max_size - self.size), ) def __contains__(self, cx): return cx in", "max_age(self): \"\"\" The maximum permitted age, in seconds, for connections", "list can be updated as protocol # versions are added", "# dependency issues. from neo4j.aio.bolt3 import Bolt3 handlers = {bolt.protocol_version:", "== 0: continue # No readers if num_readers == 0:", "self._in_use_list = deque() self._free_list = deque() self._waiting_list = WaitingList(loop=self._loop) def", "def __close(self, connections): \"\"\" Close all connections in the given", "Bolt protocol version. :param address: tuples of host and port,", "the connection pool, it merely shuts down all open connections,", "# pragma: no cover assert False # not used by", "= () # Record of the time at which this", "if the given router is currently unable to provide routing", "{bolt.protocol_version: bolt for bolt in [ # This list can", "the pool. To close gracefully, allowing work in progress to", "in this pool. \"\"\" return self._max_age @property def in_use(self): \"\"\"", "whether an update is still required. \"\"\" if self._routing_table.is_fresh(readonly=readonly): return", "we should not signal an error. # When no writers", "@property def in_use(self): \"\"\" The number of connections in this", "Address.parse_list(\" \".join(addresses), default_port=7687) # return cls(opener, router_addresses, loop=loop) # #", "version == protocol_version} @classmethod def opener(cls, auth=None, **config): \"\"\" Create", "self._waiting_list.notify() else: # Otherwise, close the connection. await cx.close() else:", "of configuration parameters. This is useful when multiple servers share", "until released. \"\"\" pools = dict(self._pools) self._pools.clear() for address, pool", "readonly=False, bookmarks=None, timeout=None, metadata=None): \"\"\" Begin an explicit transaction. :param", "an option to seed one or more initial connections. \"\"\"", "then intercept NotALeader and # ForbiddenOnReadOnlyDatabase errors to # invalidate", "= 0 await pool.prune() async def close(self, force=False): \"\"\" Close", "routing_context) # TODO: get initial routing table and construct await", "await pool.release(cx) return None async def _get_routing_table(self): \"\"\" Update the", "await self._waiting_list.join() else: cx = await self._sanitize(cx, force_reset=force_reset) self._in_use_list.append(cx) return", "# not used by current implementation async def readexactly(self, n):", "except ValueError: pass else: # Unhook any custom error handling", "handler(failure): \"\"\" Invalidate the routing table before raising the failure.", "template = \"neo4j-python/{} Python/{}.{}.{}-{}-{} ({})\" fields = (neo4j_version,) + tuple(version_info)", "Depending on the applications, it may be perfectly acceptable to", "in compliance with the License. # You may obtain a", "self.broken: log.debug(\"[#%04X] S: <HANGUP>\", self.local_address.port_number) self.__writer.write_eof() self.__writer.close() try: await self.__writer.wait_closed()", "addresses, routing_context): if loop is None: self._loop = get_event_loop() else:", "return self._address @property def max_size(self): \"\"\" The maximum permitted number", "software # distributed under the License is distributed on an", "be used on close. __writer = None # Flag to", "a connection is released, or until the acquire call is", "\"\"\" assert isinstance(address, Address) assert loop is not None assert", "multiple servers share the same configuration details, such as within", "maximum permitted age, in seconds, for connections to be retained", "-*- encoding: utf-8 -*- # Copyright (c) 2002-2019 \"Neo4j,\" #", "Bolt connection object \"\"\" log.debug(\"Acquiring connection from pool %r\", self)", "pool in pools.items(): if force: await pool.close() else: pool.max_size =", "\"\"\" Close all free connections. \"\"\" await self.__close(self._free_list) async def", "len(self._free_list) async def _sanitize(self, cx, *, force_reset): \"\"\" Attempt to", "closed nor expired, it will be reset (optionally forcibly so)", "transport, _ = await loop.create_connection(lambda: protocol, **connection_args) writer = BoltStreamWriter(transport,", "self._routing_table.readers else: addresses = self._routing_table.writers pools = [pool for address,", "try again log.debug(\"Joining waiting list\") await self._waiting_list.join() else: cx =", "ForbiddenOnReadOnlyDatabase, ) connection.del_failure_handler(NotALeader) connection.del_failure_handler(ForbiddenOnReadOnlyDatabase) break else: raise ValueError(\"Connection does not", "open(cls, *addresses, auth=None, routing_context=None, loop=None, **config): pool_config = PoolConfig.consume(config) def", "err: log.debug(\"[#%04X] S: <REJECT> %s (%d %s)\", 0, address, err.errno,", "capacity in the pool, attempt to # sanitize the connection", "local_address, remote_address) return reader, writer @classmethod async def _handshake(cls, reader,", "S: <REJECT> %s (%d %s)\", 0, address, err.errno, strerror(err.errno)) raise", "absence of writer self._missing_writer = not self._routing_table.is_fresh(readonly=False) else: rt =", "context to use for this call :return: a new RoutingTable", "use `discard` instead of `remove` here since the former #", "def __repr__(self): return \"<Bolt address=%r protocol_version=%r>\" % (self.remote_address, self.protocol_version) async", "loop self._opener = opener self._address = Address(address) self._max_size = config.max_size", "since the former # will not fail if the address", "connection pool. \"\"\" async def f(address, *, loop=None): return await", "self._missing_writer: has_tried_initial_routers = True rt = await self._get_routing_table_from(self._initial_routers) if rt:", "use if no addresses are specified. default_router_addresses = Address.parse_list(\":7687 :17601", "seconds. \"\"\" return perf_counter() - self.__t_opened @property def broken(self): \"\"\"", "# readonly, then intercept NotALeader and # ForbiddenOnReadOnlyDatabase errors to", "currently owned by this connection pool. \"\"\" return len(self._in_use_list) +", "== protocol_version} @classmethod def opener(cls, auth=None, **config): \"\"\" Create and", "run_tx(self, f, args=None, kwargs=None, readonly=False, bookmarks=None, timeout=None, metadata=None): \"\"\" Run", ":param metadata: :return: \"\"\" async def run_tx(self, f, args=None, kwargs=None,", "will return an existing open connection, if one is free.", "string for a connection. \"\"\" template = \"neo4j-python/{} Python/{}.{}.{}-{}-{} ({})\"", "obj def __init__(self, loop, opener, config, addresses, routing_context): if loop", "= 0 raise failure cx.set_failure_handler(NotALeader, handler) cx.set_failure_handler(ForbiddenOnReadOnlyDatabase, handler) return cx", "to this pool\") async def _deactivate(self, address): \"\"\" Deactivate an", "close(self, *, force=False): raise NotImplementedError class BoltPool: \"\"\" A pool", "and return the return value from that function. \"\"\" async", "self.size < self.max_size: # If there is spare capacity in", "async def acquire(self, *, readonly=False, force_reset=False): \"\"\" Acquire a connection", "__ainit__(self, auth): \"\"\" Asynchronous initializer for implementation by subclasses. :param", "The total number of connections (both in-use and free) currently", "empty the pool. If forced, in-use connections will be closed", "not None: await sleep(0.1) except AttributeError: pass class Pool: def", "loop) except SSLError as err: log.debug(\"[#%04X] S: <REJECT> %s (%d", "\"\"\" while True: pool = await self._select_pool(readonly=readonly) try: cx =", "async def update_routing_table(self): # cx = await self._routers.acquire() # try:", "self._pools[address] = BoltPool(self._loop, self._opener, self._config, address) async def _deactivate_pools_not_in(self, routing_table):", "handshake, optionally requesting a specific protocol version. :param reader: :param", "async def __ainit__(self, auth): \"\"\" Asynchronous initializer for implementation by", "# self._readers = Neo4jPool(opener) # self._routing_table = None # #", "will be returned. :param protocol_version: tuple identifying a specific protocol", "writers are available :raise ProtocolError: if the routing information received", ":param max_age: the maximum permitted age, in seconds, for connections", "auto-commit transaction. :param cypher: :param parameters: :param discard: :param readonly:", "a clean state. By default, a RESET message will only", "self._pools: if address not in routing_table: await self._deactivate(address) async def", "cx: the connection to release :param force_reset: if true, the", "depending on whether that version is supported. If no protocol", "already in a clean state :raise ValueError: if the connection", "open and ready Bolt connection :param address: the remote address", "return self._routing_table # # async def update_routing_table(self): # cx =", "None await cx.reset(force=force_reset) return cx async def acquire(self, *, force_reset=False):", "**config): \"\"\" Create and return an opener function for a", "to retrieve routing information\") raise Neo4jAvailabilityError(\"Unable to retrieve routing information\")", "the protocol to go away which, by the # implementation", "of 3.6 # changes in a subsequent patch, and can", "from collections import deque from logging import getLogger from os", "will only be sent if required, i.e. if the connection", "router is currently unable to provide routing information :raise ServiceUnavailable:", "# not used by current implementation async def read(self, n=-1):", "TODO: get initial routing table and construct await obj._ensure_routing_table_is_fresh() return", "if an I/O error occurs on the underlying socket connection", "that may be owned by this pool, both in-use and", ":param bookmarks: :param timeout: :param metadata: :return: \"\"\" async def", "cx.set_failure_handler(ForbiddenOnReadOnlyDatabase, handler) return cx async def release(self, connection, *, force_reset=False):", "= RoutingTable(addresses) self._activate_new_pools_in(self._routing_table) def _activate_new_pools_in(self, routing_table): \"\"\" Add pools for", "request_data) writer.write(request_data) await writer.drain() response_data = await reader.readexactly(4) log.debug(\"[#%04X] S:", "with the License. # You may obtain a copy of", "perform protocol version negotiation, in order to construct and return", "information :raise ServiceUnavailable: if no writers are available :raise ProtocolError:", "waiting acquirers of this extra # capacity. self._waiting_list.notify() @property def", "negotiation, in order to construct and return a Bolt client", ":return: a Bolt connection object \"\"\" log.debug(\"Acquiring connection from pool", "is healthy and the pool is not already at capacity.", "raise Neo4jAvailabilityError(\"Unable to retrieve routing information\") async def _ensure_routing_table_is_fresh(self, readonly=False):", "while True: pool = await self._select_pool(readonly=readonly) try: cx = await", "# Carry on only if sanitation succeeded. if self.size <", "async def _handshake(cls, reader, writer, protocol_version): \"\"\" Carry out a", "\"\"\" log.debug(\"Releasing connection %r\", cx) if cx in self._in_use_list: self._in_use_list.remove(cx)", "TODO: other args } ssl_context = config.get_ssl_context() if ssl_context: connection_args[\"ssl\"]", "before and after acquiring the refresh lock. If the routing", "Release a Bolt connection, putting it back into the pool", "Bolt.open(addr, auth=auth, loop=loop, **pool_config) pool = cls(loop, opener, pool_config, address)", "connection, *, force_reset=False): \"\"\" Release a connection back into the", "if handshake completes without a successful negotiation \"\"\" local_address =", "= None #: As a class attribute, this denotes the", "for a connection. \"\"\" template = \"neo4j-python/{} Python/{}.{}.{}-{}-{} ({})\" fields", "# Plan A: select a free connection from the pool", "remote address for which this pool operates. \"\"\" return self._address", "for address, pool in pools.items(): if force: await pool.close() else:", "auth=auth, loop=loop, **pool_config) pool = cls(loop, opener, pool_config, address) seeds", "async def __close(self, connections): \"\"\" Close all connections in the", "will be reset (optionally forcibly so) and the connection object", "pool def __init__(self, loop, opener, config, address): if loop is", "n=-1): # pragma: no cover assert False # not used", "\"\"\" Open a socket connection and perform protocol version negotiation,", "a specific protocol version. :param reader: :param writer: :param protocol_version:", "cx in self._free_list def __len__(self): return self.size @property def address(self):", "for router in routers: pool = self._pools[router] cx = await", "\"\"\" Flag to indicate whether this connection has been broken", "patch, and can be removed when # Python 3.6 support", "capacity. self._waiting_list.notify() @property def max_age(self): \"\"\" The maximum permitted age,", "freshness check that follows determines whether an update is still", "close the connection. await cx.close() elif cx in self._free_list: raise", "passed as incompatible types :raise ValueError: if any of the", "provided, the dictionary will contain either zero or one items,", "\"\"\" for address in routing_table.servers(): if address not in self._pools:", "express or implied. # See the License for the specific", "except in compliance with the License. # You may obtain", "= MAGIC + b\"\".join( v.to_bytes() for v in offered_versions).ljust(16, b\"\\x00\")", "err: message = (\"Network read incomplete (received {} of {}", "address. \"\"\" log.debug(\"[#0000] C: <ROUTING> Deactivating address %r\", address) #", "self.__writer.wait_closed() except BoltConnectionBroken: pass self.__closed = True async def reset(self,", "then we flag we are reading in absence of writer", "await self._opener(self.address) else: # Plan C: wait for more capacity", "acquire(self, *, readonly=False, force_reset=False): \"\"\" Acquire a connection to a", "return self.size @property def address(self): \"\"\" The remote address for", "OSError as err: log.debug(\"[#%04X] S: <CLOSE> %d %s\", err.errno, strerror(err.errno))", "writer = BoltStreamWriter(transport, protocol, reader, loop) except SSLError as err:", "return Bolt.open(addr, auth=auth, **pool_config) obj = cls(loop, opener, config, addresses,", "before being returned; if false, this will only occur if", "protocol_version: :return: :raise BoltConnectionLost: if an I/O error occurs on", "order to construct and return a Bolt client instance for", "import choice from ssl import SSLError from sys import platform,", "len(self._free_list), \" \" * (self.max_size - self.size), ) def __contains__(self,", "The number of connections in this pool that are currently", "*, loop=None): return await Bolt.open(address, auth=auth, loop=loop, **config) return f", "# Licensed under the Apache License, Version 2.0 (the \"License\");", "pool in self._pools.items() if address in addresses] pools_by_usage = {}", "[]).append(pool) if not pools_by_usage: raise Neo4jAvailabilityError(\"No {} service currently \"", "they will remain open until released. \"\"\" pools = dict(self._pools)", "in seeds: await pool.release(seed) return pool def __init__(self, loop, opener,", "are available, this will block until a connection is released,", "and return an opener function for a given set of", "connection and return it to the pool. cx = await", "in-use connections will be closed immediately; if not, they will", "*, force_reset=False): \"\"\" Acquire a connection from the pool. In", "= 0 await pool.prune() class Neo4j: # The default router", "* len(self._in_use_list), \".\" * len(self._free_list), \" \" * (self.max_size -", "when multiple servers share the same configuration details, such as", "forcibly reset before being released back into the pool; if", "self._loop = loop self._opener = opener self._address = Address(address) self._max_size", "lock is acquired and the second freshness check that follows", "% (self.remote_address, self.protocol_version) async def __ainit__(self, auth): \"\"\" Asynchronous initializer", "# Connect reader, writer = await cls._connect(address, loop, config) try:", "CONDITIONS OF ANY KIND, either express or implied. # See", "# Plan B: if the pool isn't full, open #", "the routing table is successfully updated, otherwise False \"\"\" log.debug(\"Attempting", "as a tuple\") return {version: handler for version, handler in", "= handlers[agreed_version] except KeyError: log.debug(\"Unsupported Bolt protocol version %s\", agreed_version)", "sequence instead: pool.max_size = 0 pool.prune() This will force all", "RESET will be sent regardless. \"\"\" async def run(self, cypher,", "C: <ROUTING> Deactivating address %r\", address) # We use `discard`", "pool, with an option to seed one or more initial", "if the connection is not already in a clean state.", "None if the given router is currently unable to provide", "return choice(pools_by_usage[min(pools_by_usage)]) async def acquire(self, *, readonly=False, force_reset=False): \"\"\" Acquire", "C: <ROUTING> Invalidating routing table\") self._routing_table.ttl = 0 raise failure", "self._pools.clear() for address, pool in pools.items(): if force: await pool.close()", "to provide routing information :raise ServiceUnavailable: if no writers are", "# reader inside the protocol to go away which, by", "aren't represented in the given routing table. \"\"\" for address", "Return the default user agent string for a connection. \"\"\"", "address, err.errno, strerror(err.errno)) raise BoltSecurityError(\"Failed to establish a secure connection\",", "it can be discarded. Otherwise, the age of the connection", "**pool_config) obj = cls(loop, opener, config, addresses, routing_context) # TODO:", "( NotALeader, ForbiddenOnReadOnlyDatabase, ) connection.del_failure_handler(NotALeader) connection.del_failure_handler(ForbiddenOnReadOnlyDatabase) break else: raise ValueError(\"Connection", "self.local_address.port_number) Breakable.set_broken(self) raise BoltConnectionBroken(message, self.remote_address) from err except OSError as", "# # @property # def routing_table(self): # return self._routing_table #", "by the network or remote peer. \"\"\" return self.__reader.broken or", "= None # # @property # def routing_table(self): # return", "any waiting acquirers of this extra # capacity. self._waiting_list.notify() @property", "ForbiddenOnReadOnlyDatabase, ) def handler(failure): \"\"\" Invalidate the routing table before", "reader is fresh but writers are not, then # we", "if handshake completes without a successful negotiation :raise TypeError: if", "BoltHandshakeError(\"Unexpected handshake response %r\" % response_data, remote_address, request_data, response_data) from", "pool \"\"\" log.debug(\"Releasing connection %r\", cx) if cx in self._in_use_list:", "as err: log.debug(\"[#%04X] S: <CLOSE> (%s)\", self.local_address.port_number, err) Breakable.set_broken(self) raise", "await super().drain() except OSError as err: log.debug(\"[#%04X] S: <CLOSE> (%s)\",", "<CLOSE> (%s)\", self.local_address.port_number, err) Breakable.set_broken(self) raise BoltConnectionBroken(\"Network write failed\", self.remote_address)", "readonly connection is required, otherwise false :param force_reset: \"\"\" while", "cancelled. :param force_reset: if true, the connection will be forcibly", "simplest case, this will return an existing open connection, if", "pool operates. \"\"\" return self._address @property def max_size(self): \"\"\" The", "Neo4jAvailabilityError(\"No {} service currently \" \"available\".format(\"read\" if readonly else \"write\"))", "class BoltStreamReader(Addressable, Breakable, StreamReader): \"\"\" Wrapper for asyncio.streams.StreamReader \"\"\" def", ":raise ProtocolError: if the routing information received is unusable \"\"\"", "failure. \"\"\" log.debug(\"[#0000] C: <ROUTING> Invalidating routing table\") self._routing_table.ttl =", "= await reader.readexactly(4) log.debug(\"[#%04X] S: <HANDSHAKE> %r\", local_address.port_number, response_data) try:", "will be closed immediately; if not, they will remain open", "available versions will be returned. :param protocol_version: tuple identifying a", "Bolt subclass :raise BoltConnectionError: if a connection could not be", "or None :return: dictionary of version tuple to handler class", "\"neo4j-python/{} Python/{}.{}.{}-{}-{} ({})\" fields = (neo4j_version,) + tuple(version_info) + (platform,)", "thread safe. \"\"\" for pool in self._pools.values(): try: await pool.release(connection,", "# Record of the time at which this connection was", "= super().__new__(cls) obj.__t_opened = perf_counter() obj.__reader = reader obj.__writer =", "to update routing table from \" \"{}\".format(\", \".join(map(repr, routers)))) for", "in self._free_list: raise ValueError(\"Connection is not in use\") else: raise", "exit. from neo4j.errors import ( NotALeader, ForbiddenOnReadOnlyDatabase, ) connection.del_failure_handler(NotALeader) connection.del_failure_handler(ForbiddenOnReadOnlyDatabase)", "[http://neo4j.com] # # This file is part of Neo4j. #", "handlers[agreed_version] except KeyError: log.debug(\"Unsupported Bolt protocol version %s\", agreed_version) raise", "instead: pool.max_size = 0 pool.prune() This will force all future", "raise BoltConnectionBroken(\"Network read failed\", self.remote_address) from err class BoltStreamWriter(Addressable, Breakable,", "the routing context to use for this call :return: a", "update_routing_table(self): # cx = await self._routers.acquire() # try: # result", "= bool(config.secure) assert hasattr(obj, \"__ainit__\") await obj.__ainit__(auth) return obj except", "def drain(self): try: await super().drain() except OSError as err: log.debug(\"[#%04X]", "cypher, parameters=None, discard=False, readonly=False, bookmarks=None, timeout=None, metadata=None): \"\"\" Run an", "a temporary state, # such as leader switching, so we", "it can be modified existing_routers = list(self._routing_table.routers) has_tried_initial_routers = False", "except IndexError: break else: closers.append(cx.close()) if closers: await wait(closers, loop=self._loop)", "function and return the return value from that function. \"\"\"", "with self._refresh_lock: if self._routing_table.is_fresh(readonly=readonly): if readonly: # if reader is", "pool.max_size = 0 await pool.prune() async def close(self, force=False): \"\"\"", "utf-8 -*- # Copyright (c) 2002-2019 \"Neo4j,\" # Neo4j Sweden", "try: # Plan A: select a free connection from the", "Version.from_bytes(response_data) except ValueError as err: writer.close() raise BoltHandshakeError(\"Unexpected handshake response", "self.max_size: # Check again if there is still capacity. self._free_list.append(cx)", "released back into the pool; if false, this will only", "port, such as (\"127.0.0.1\", 7687) :param auth: :param loop: :param", "new_routing_table = await cx.get_routing_table(self._routing_context) except BoltError: await self._deactivate(router) else: num_routers", "pass # assume the address has already been removed else:", "simply close the connection. await cx.close() elif cx in self._free_list:", "of the protocol in use. protocol_version = () # Record", ":param address: :param loop: :param config: :return: a 3-tuple of", "StreamWriter object, which can be used on close. __writer =", "self.__writer.write_eof() self.__writer.close() try: await self.__writer.wait_closed() except BoltConnectionBroken: pass self.__closed =", "return cls(opener, router_addresses, loop=loop) # # def __init__(self, opener, router_addresses,", "Args address = Address(address) if loop is None: loop =", "\"__ainit__\") await obj.__ainit__(auth) return obj except BoltError: writer.write_eof() writer.close() raise", ":param bookmarks: :param timeout: :param metadata: :raise BoltTransactionError: if a", "a new connection will be created. If the pool is", "addresses are specified. default_router_addresses = Address.parse_list(\":7687 :17601 :17687\") # TODO", "routing_table: await self._deactivate(address) async def _get_routing_table_from(self, *routers): \"\"\" Try to", "= self._routing_table.readers else: addresses = self._routing_table.writers pools = [pool for", "from neo4j.aio._collections import WaitingList from neo4j.aio._mixins import Addressable, Breakable from", "and construct await obj._ensure_routing_table_is_fresh() return obj def __init__(self, loop, opener,", "Bolt3 handlers = {bolt.protocol_version: bolt for bolt in [ #", "# return cls(opener, router_addresses, loop=loop) # # def __init__(self, opener,", "\"{!r} ({!r})\".format(router, self._routing_table)) return new_routing_table finally: await pool.release(cx) return None", "import RoutingTable log = getLogger(__name__) MAGIC = b\"\\x60\\x60\\xB0\\x17\" class Bolt(Addressable,", "readonly: :param bookmarks: :param timeout: :param metadata: :raise BoltTransactionError: if", "cannot be carried out at this time \"\"\" async def", "close the connection. await cx.close() else: # If the pool", "self.__reader.broken or self.__writer.broken @property def closed(self): \"\"\" Flag to indicate", "obj.secure = bool(config.secure) assert hasattr(obj, \"__ainit__\") await obj.__ainit__(auth) return obj", "_get_routing_table(self): \"\"\" Update the routing table from the first router", "client instance for a supported Bolt protocol version. :param address:", "self.max_age is not None and cx.age > self.max_age if expired:", "+ len(self._free_list) async def _sanitize(self, cx, *, force_reset): \"\"\" Attempt", "pool, if present, remove from the routing table and also", "await cx.close() return None await cx.reset(force=force_reset) return cx async def", "def run(self, cypher, parameters=None, discard=False, readonly=False, bookmarks=None, timeout=None, metadata=None): \"\"\"", "reader obj.__writer = writer Addressable.set_transport(obj, writer.transport) return obj def __repr__(self):", "remote_address, request_data, response_data) else: return subclass def __new__(cls, reader, writer):", "max_age: the maximum permitted age, in seconds, for connections to", "use\") else: raise ValueError(\"Connection does not belong to this pool\")", "of version tuple to handler class for all relevant and", "loop=None): # opener = Bolt.opener(auth=auth, security=security, protocol_version=protocol_version) # router_addresses =", "< self.max_size: # If there is spare capacity in the", "WaitingList(loop=self._loop) def __repr__(self): return \"<{} addr'{}' [{}{}{}]>\".format( self.__class__.__name__, self.address, \"|\"", "free. \"\"\" return self._max_size @max_size.setter def max_size(self, value): old_value =", "\"\"\" Attempt to clean up a connection, such that it", "return pool def __init__(self, loop, opener, config, address): if loop", "is broken or closed, it can be discarded. Otherwise, the", "BoltError: await self._deactivate(router) else: num_routers = len(new_routing_table.routers) num_readers = len(new_routing_table.readers)", "= [await pool.acquire() for _ in range(pool_config.init_size)] for seed in", "**config): \"\"\" Open a socket connection and perform protocol version", "[{}{}{}]>\".format( self.__class__.__name__, self.address, \"|\" * len(self._in_use_list), \".\" * len(self._free_list), \"", "@classmethod async def open(cls, address, *, auth=None, loop=None, **config): \"\"\"", "routing table but which don't already have pools. \"\"\" for", ":17601 :17687\") # TODO # @classmethod # async def open(cls,", "class Bolt(Addressable, object): #: True if this instance uses secure", "self._address = Address(address) self._max_size = config.max_size self._max_age = config.max_age self._in_use_list", "None expired = self.max_age is not None and cx.age >", "not self.broken: log.debug(\"[#%04X] S: <HANGUP>\", self.local_address.port_number) self.__writer.write_eof() self.__writer.close() try: await", "is released, or until the acquire call is cancelled. :param", "pool. If forced, in-use connections will be closed immediately; if", "any of the arguments provided are passed with unsupported values", "sleep(0.1) except AttributeError: pass class Pool: def acquire(self, *, force_reset=False,", "if protocol_version is None: return handlers if not isinstance(protocol_version, tuple):", "err except OSError as err: log.debug(\"[#%04X] S: <REJECT> %s (%d", "types :raise ValueError: if any of the arguments provided are", "implementation of 3.6 # changes in a subsequent patch, and", "*args, **kwargs): super().__init__(*args, **kwargs) Addressable.set_transport(self, self.transport) async def drain(self): try:", "break else: raise ValueError(\"Connection does not belong to this pool\")", "perf_counter() - self.__t_opened @property def broken(self): \"\"\" Flag to indicate", "return an existing open connection, if one is free. If", "*addresses, auth=None, routing_context=None, loop=None, **config): pool_config = PoolConfig.consume(config) def opener(addr):", "Open a socket connection and perform protocol version negotiation, in", "*connections, force_reset=False): raise NotImplementedError def close(self, *, force=False): raise NotImplementedError", "in [ # This list can be updated as protocol", "writer @classmethod async def _handshake(cls, reader, writer, protocol_version): \"\"\" Carry", "be reset (optionally forcibly so) and the connection object will", "that are currently in use. \"\"\" return len(self._in_use_list) @property def", "cx.broken or cx.closed: try: # Plan A: select a free", "0: continue log.debug(\"Successfully updated routing table from \" \"{!r} ({!r})\".format(router,", "a tuple \"\"\" # Carry out subclass imports locally to", "assert loop is not None assert isinstance(config, Config) connection_args =", "auth=None, **config): \"\"\" Create and return an opener function for", "a new RoutingTable instance or None if the given router", "cover # This is a dirty hack for Python 3.6,", "# Handshake subclass = await cls._handshake(reader, writer, config.protocol_version) # Instantiation", "check will be overridden and a RESET will be sent", "self._missing_writer = (num_writers == 0) # No routers if num_routers", "a set of parameters. :param readonly: true if a readonly", "requesting a specific protocol version. :param reader: :param writer: :param", "will be created. If the pool is full and no", "self._free_list: raise ValueError(\"Connection is not in use\") else: raise ValueError(\"Connection", "= deque() self._waiting_list = WaitingList(loop=self._loop) def __repr__(self): return \"<{} addr'{}'", "\"\"\" The total number of connections (both in-use and free)", "new connection pool, with an option to seed one or", "@property # def routing_table(self): # return self._routing_table # # async", "available Bolt protocol handlers, keyed by version tuple. If an", "%r)\", protocol_version) offered_versions = sorted(handlers.keys(), reverse=True)[:4] request_data = MAGIC +", "retained in this pool \"\"\" @classmethod async def open(cls, address,", "for implementation by subclasses. :param auth: \"\"\" @property def age(self):", "value from that function. \"\"\" async def get_routing_table(self, context=None): \"\"\"", "could not be established \"\"\" assert isinstance(address, Address) assert loop", "indicate whether this connection has been broken by the network", "= cls(loop, opener, pool_config, address) seeds = [await pool.acquire() for", "except BoltError: writer.write_eof() writer.close() raise @classmethod async def _connect(cls, address,", "no longer required. # from asyncio import sleep try: while", "overridden and a RESET will be sent regardless. \"\"\" async", "@property def max_size(self): \"\"\" The maximum permitted number of simultaneous", "of Neo4j. # # Licensed under the Apache License, Version", "MAGIC = b\"\\x60\\x60\\xB0\\x17\" class Bolt(Addressable, object): #: True if this", "copied because it can be modified existing_routers = list(self._routing_table.routers) has_tried_initial_routers", "to release :param force_reset: if true, the connection will be", "n): try: return await super().readexactly(n) except IncompleteReadError as err: message", "await self.__writer.wait_closed() except BoltConnectionBroken: pass self.__closed = True async def", "both in-use and free :param max_age: the maximum permitted age,", "all connections and empty the pool. If forced, in-use connections", "\"\"\" if self._routing_table.is_fresh(readonly=readonly): return async with self._refresh_lock: if self._routing_table.is_fresh(readonly=readonly): if", "import getLogger from os import strerror from random import choice", "is not full, a new connection will be created. If", "Address from neo4j.aio._collections import WaitingList from neo4j.aio._mixins import Addressable, Breakable", "may be owned by this pool, both in-use and free", "version\", remote_address, request_data, response_data) else: return subclass def __new__(cls, reader,", "the underlying socket connection :raise BoltHandshakeError: if handshake completes without", "by version tuple. If an explicit protocol version is provided,", "reader = BoltStreamReader(loop=loop) protocol = StreamReaderProtocol(reader, loop=loop) transport, _ =", "currently unable to provide routing information :raise ServiceUnavailable: if no", "\"\"\" Deactivate any pools that aren't represented in the given", "peer. \"\"\" return self.__reader.broken or self.__writer.broken @property def closed(self): \"\"\"", "and the second freshness check that follows determines whether an", "this pool operates :param max_size: the maximum permitted number of", "the connection pool, if present, remove from the routing table", "set_transport(self, transport): Addressable.set_transport(self, transport) StreamReader.set_transport(self, transport) async def readuntil(self, separator=b'\\n'):", "to indicate that the connection is closed __closed = False", "read failed\", self.remote_address) from err class BoltStreamWriter(Addressable, Breakable, StreamWriter): \"\"\"", "for seed in seeds: await pool.release(seed) return pool def __init__(self,", "pool.release(seed) return pool def __init__(self, loop, opener, config, address): if", "loop=loop, **config) return f @classmethod async def open(cls, address, *,", "table=%r\", self._routing_table) try: pool = self._pools.pop(address) except KeyError: pass #", "not, then # we are reading in absence of writer", "self._in_use_list or cx in self._free_list def __len__(self): return self.size @property", "async def _get_routing_table_from(self, *routers): \"\"\" Try to update routing tables", "also closing all idle connections to that address. \"\"\" log.debug(\"[#0000]", "are reading in absence of writer self._missing_writer = (num_writers ==", "RoutingTable.parse_routing_info([record]) # TODO: handle ValueError? # return self._routing_table # finally:", "version must be specified as a tuple\") return {version: handler", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "loop=loop) # # def __init__(self, opener, router_addresses, loop=None): # self._routers", "message will only be sent if required, i.e. if the", "for asyncio.streams.StreamWriter \"\"\" def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) Addressable.set_transport(self,", "bool(config.secure) assert hasattr(obj, \"__ainit__\") await obj.__ainit__(auth) return obj except BoltError:", "\"\"\" Return the default user agent string for a connection.", "response_data) from err try: subclass = handlers[agreed_version] except KeyError: log.debug(\"Unsupported", "None # Flag to indicate that the connection is closed", "\"\"\" Close all connections in the given list. \"\"\" closers", "B: if the pool isn't full, open # a new", "or self.default_router_addresses) # self._writers = Neo4jPool(opener) # self._readers = Neo4jPool(opener)", "C: <ROUTING> table=%r\", self._routing_table) try: pool = self._pools.pop(address) except KeyError:", "C: <HANDSHAKE> %r\", local_address.port_number, request_data) writer.write(request_data) await writer.drain() response_data =", "is a dirty hack for Python 3.6, which didn't include", "case, this will return an existing open connection, if one", "added and removed. Bolt3, ]} if protocol_version is None: return", "completes without a successful negotiation \"\"\" local_address = Address(writer.transport.get_extra_info(\"sockname\")) remote_address", "pool = self._pools[router] cx = await pool.acquire() try: new_routing_table =", "list(self._routing_table.routers) has_tried_initial_routers = False if self._missing_writer: has_tried_initial_routers = True rt", "lock. If the routing table is already fresh on entry,", "opener = Bolt.opener(auth=auth, security=security, protocol_version=protocol_version) # router_addresses = Address.parse_list(\" \".join(addresses),", ":raise BoltConnectionError: if a connection could not be established \"\"\"", "The age of this connection in seconds. \"\"\" return perf_counter()", "= config.max_size self._max_age = config.max_age self._in_use_list = deque() self._free_list =", "Reset the connection to a clean state. By default, a", "self._max_size self._max_size = value if value > old_value: # The", "err.expected)) log.debug(\"[#%04X] S: <CLOSE>\", self.local_address.port_number) Breakable.set_broken(self) raise BoltConnectionBroken(message, self.remote_address) from", "isn't full, open # a new connection cx = await", "= ssl_context connection_args[\"server_hostname\"] = address.host log.debug(\"[#0000] C: <DIAL> %s\", address)", "self._waiting_list.notify() @property def max_age(self): \"\"\" The maximum permitted age, in", "into the pool; if false, this will only occur if", "BoltHandshakeError, Neo4jAvailabilityError, ) from neo4j.api import Version from neo4j.conf import", "connection back into the pool. This method is thread safe.", "subclass(reader, writer) obj.secure = bool(config.secure) assert hasattr(obj, \"__ainit__\") await obj.__ainit__(auth)", "def max_age(self): \"\"\" The maximum permitted age, in seconds, for", "full, open # a new connection cx = await self._opener(self.address)", "\".join(addresses), default_port=7687) # return cls(opener, router_addresses, loop=loop) # # def", "self._max_age @property def in_use(self): \"\"\" The number of connections in", "BoltPool(self._loop, self._opener, self._config, address) async def _deactivate_pools_not_in(self, routing_table): \"\"\" Deactivate", "address has already been removed else: pool.max_size = 0 await", "are passed as incompatible types :raise ValueError: if any of", "# async def main(): # from neo4j.debug import watch; watch(\"neo4j\")", "connection to a server that can satisfy a set of", "to a server that can satisfy a set of parameters.", "be modified existing_routers = list(self._routing_table.routers) has_tried_initial_routers = False if self._missing_writer:", "block until a connection is released, or until the acquire", "local_address = Address(transport.get_extra_info(\"sockname\")) remote_address = Address(transport.get_extra_info(\"peername\")) log.debug(\"[#%04X] S: <ACCEPT> %s", "subclass imports locally to avoid circular # dependency issues. from", "routing information :raise ServiceUnavailable: if no writers are available :raise", "log.debug(\"Joining waiting list\") await self._waiting_list.join() else: cx = await self._sanitize(cx,", "removed. Bolt3, ]} if protocol_version is None: return handlers if", "_deactivate_pools_not_in(self, routing_table): \"\"\" Deactivate any pools that aren't represented in", "async def prune(self): \"\"\" Close all free connections. \"\"\" await", "() # Record of the time at which this connection", "try: subclass = handlers[agreed_version] except KeyError: log.debug(\"Unsupported Bolt protocol version", "Version 2.0 (the \"License\"); # you may not use this", "self.transport) async def drain(self): try: await super().drain() except OSError as", "def __new__(cls, reader, writer): obj = super().__new__(cls) obj.__t_opened = perf_counter()", "range(pool_config.init_size)] for seed in seeds: await pool.release(seed) return pool def", "deque() self._free_list = deque() self._waiting_list = WaitingList(loop=self._loop) def __repr__(self): return", "given routers. :return: True if the routing table is successfully", "a clean state :return: a Bolt connection object \"\"\" log.debug(\"Acquiring", "handlers if not isinstance(protocol_version, tuple): raise TypeError(\"Protocol version must be", "local_address.port_number, request_data) writer.write(request_data) await writer.drain() response_data = await reader.readexactly(4) log.debug(\"[#%04X]", "IncompleteReadError as err: message = (\"Network read incomplete (received {}", "def prune(self): \"\"\" Close all free connections. \"\"\" await self.__close(self._free_list)", "in-use connections. \"\"\" await self._ensure_routing_table_is_fresh(readonly=readonly) if readonly: addresses = self._routing_table.readers", "in progress to continue until connections are released, use the", "sent regardless. \"\"\" async def run(self, cypher, parameters=None, discard=False, readonly=False,", "transport) async def readuntil(self, separator=b'\\n'): # pragma: no cover assert", "or cx in self._free_list def __len__(self): return self.size @property def", "routing table\") self._routing_table.ttl = 0 raise failure cx.set_failure_handler(NotALeader, handler) cx.set_failure_handler(ForbiddenOnReadOnlyDatabase,", "assert isinstance(config, Config) connection_args = { \"host\": address.host, \"port\": address.port,", "be reused. If the connection is broken or closed, it", "agreed_version) raise BoltHandshakeError(\"Unsupported Bolt protocol version\", remote_address, request_data, response_data) else:", "and no free connections are available, this will block until", "StreamReader object. __reader = None # Handle to the StreamWriter", "routing tables with the given routers. :return: True if the", "connection.del_failure_handler(ForbiddenOnReadOnlyDatabase) break else: raise ValueError(\"Connection does not belong to this", "this time \"\"\" async def begin(self, readonly=False, bookmarks=None, timeout=None, metadata=None):", "async def _sanitize(self, cx, *, force_reset): \"\"\" Attempt to clean", "asyncio.streams.StreamWriter \"\"\" def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) Addressable.set_transport(self, self.transport)", "table and construct await obj._ensure_routing_table_is_fresh() return obj def __init__(self, loop,", "%d %s\", err.errno, strerror(err.errno)) Breakable.set_broken(self) raise BoltConnectionBroken(\"Network read failed\", self.remote_address)", "No writers are available. This likely indicates a temporary state,", "timeout=None, metadata=None): \"\"\" Run an auto-commit transaction. :param cypher: :param", ":param force_reset: if true, the connection will be forcibly reset", "in existing_routers: rt = await self._get_routing_table_from(self._initial_routers) if rt: return rt", "(\"127.0.0.1\", 7687) :param auth: :param loop: :param config: :return: instance", "as within a connection pool. \"\"\" async def f(address, *,", "cls._connect(address, loop, config) try: # Handshake subclass = await cls._handshake(reader,", "if self.size < self.max_size: # Plan B: if the pool", "0 pool.prune() This will force all future connection acquisitions onto", ") from neo4j.api import Version from neo4j.conf import Config, PoolConfig", "reset(self, force=False): \"\"\" Reset the connection to a clean state.", "by applicable law or agreed to in writing, software #", "readonly=False, bookmarks=None, timeout=None, metadata=None): \"\"\" Run an auto-commit transaction. :param", "num_routers == 0: continue # No readers if num_readers ==", "forcibly reset before being returned; if false, this will only", "connections to be retained in this pool \"\"\" @classmethod async", "protocol version. :param address: tuples of host and port, such", "are added and removed. Bolt3, ]} if protocol_version is None:", "Otherwise, the age of the connection is checked against the", "pool = self._pools.pop(address) except KeyError: pass # assume the address", "protocol version is not passed in a tuple \"\"\" #", "more capacity to become # available, then try again log.debug(\"Joining", "the routing table from the first router able to provide", "neo4j.debug import watch; watch(\"neo4j\") # neo4j = await Neo4j.open(\":17601 :17602", "\"\"\" The maximum permitted age, in seconds, for connections to", "= await self._get_routing_table_from(*existing_routers) if rt: return rt if not has_tried_initial_routers", "readonly: # if reader is fresh but writers are not,", "(3, 5)) or None :return: dictionary of version tuple to", "self._config, address) async def _deactivate_pools_not_in(self, routing_table): \"\"\" Deactivate any pools", "async def read(self, n=-1): # pragma: no cover assert False", "pool, consequently closing it on expiry. Should the connection be", "The default router address list to use if no addresses", "when # Python 3.6 support is no longer required. #", "cx in self._free_list: raise ValueError(\"Connection is not in use\") else:", "address. :param opener: a function to which an address can", "security=False, protocol_version=None, loop=None): # opener = Bolt.opener(auth=auth, security=security, protocol_version=protocol_version) #", "self._opener, self._config, address) async def _deactivate_pools_not_in(self, routing_table): \"\"\" Deactivate any", "can be passed that returns an open and ready Bolt", "not used by current implementation async def read(self, n=-1): #", "force_reset=False): \"\"\" Release a Bolt connection, putting it back into", "pool isn't full, open # a new connection cx =", "didn't include # 'wait_closed'. The code polls waiting for the", "routing table. from neo4j.errors import ( NotALeader, ForbiddenOnReadOnlyDatabase, ) def", "nor expired, it will be reset (optionally forcibly so) and", "await cls._handshake(reader, writer, config.protocol_version) # Instantiation obj = subclass(reader, writer)", "represented in the given routing table. \"\"\" for address in", "expired, it will be reset (optionally forcibly so) and the", "async def close(self): \"\"\" Close all connections immediately. This does", "that can satisfy a set of parameters. :param readonly: true", "satisfy a set of parameters. :param readonly: true if a", "address) async def _deactivate_pools_not_in(self, routing_table): \"\"\" Deactivate any pools that", "dbms.cluster.routing.getRoutingTable($context)\", {\"context\": {}}) # record = await result.single() # self._routing_table", "local_address = Address(writer.transport.get_extra_info(\"sockname\")) remote_address = Address(writer.transport.get_extra_info(\"peername\")) handlers = cls.protocol_handlers(protocol_version) if", "of writer self._missing_writer = not self._routing_table.is_fresh(readonly=False) else: rt = await", "pool is full and no free connections are available, this", "= deque() while True: try: cx = connections.popleft() except IndexError:", "immediately; if not, they will remain open until released. \"\"\"", "\"\"\" return self._address @property def max_size(self): \"\"\" The maximum permitted", "self._opener(self.address) else: # Plan C: wait for more capacity to", "if num_routers == 0: continue # No readers if num_readers", "[ # This list can be updated as protocol #", "choice from ssl import SSLError from sys import platform, version_info", "self._initial_routers not in existing_routers: rt = await self._get_routing_table_from(self._initial_routers) if rt:", "isinstance(protocol_version, tuple): raise TypeError(\"Protocol version must be specified as a", "of the arguments provided are passed as incompatible types :raise", "= self._max_size self._max_size = value if value > old_value: #", ":raise TypeError: if protocol version is not passed in a", "neo4j.addressing import Address from neo4j.aio._collections import WaitingList from neo4j.aio._mixins import", "pragma: no cover assert False # not used by current", "by this pool, both in-use and free :param max_age: the", "loop is None: self._loop = get_event_loop() else: self._loop = loop", "the first router able to provide valid routing information. \"\"\"", "if self._routing_table.is_fresh(readonly=readonly): return async with self._refresh_lock: if self._routing_table.is_fresh(readonly=readonly): if readonly:", "KeyError: pass # assume the address has already been removed", "secure = None #: As a class attribute, this denotes", "connection\", address) from err else: local_address = Address(transport.get_extra_info(\"sockname\")) remote_address =", "protocol handlers available (requested Bolt %r)\", protocol_version) offered_versions = sorted(handlers.keys(),", "applicable law or agreed to in writing, software # distributed", "if cx: # Carry on only if sanitation succeeded. if", "Bolt protocol version\", remote_address, request_data, response_data) else: return subclass def", "success. \"\"\" if cx.broken or cx.closed: return None expired =", "either zero or one items, depending on whether that version", "if self.size < self.max_size: # Check again if there is", "handlers available (requested Bolt %r)\", protocol_version) offered_versions = sorted(handlers.keys(), reverse=True)[:4]", ":param address: tuples of host and port, such as (\"127.0.0.1\",", "a class attribute, this denotes the version of Bolt handled", "default, a RESET message will only be sent if required,", "self.remote_address) from err except OSError as err: log.debug(\"[#%04X] S: <CLOSE>", "in handlers.items() if version == protocol_version} @classmethod def opener(cls, auth=None,", "def size(self): \"\"\" The total number of connections (both in-use", "use, or if it does not belong to this pool", "\"\"\" return self.__reader.broken or self.__writer.broken @property def closed(self): \"\"\" Flag", "def __init__(self, opener, router_addresses, loop=None): # self._routers = Neo4jPool(opener, router_addresses", "number of simultaneous connections that may be owned by this", "closed(self): \"\"\" Flag to indicate whether this connection has been", "all future connection acquisitions onto the waiting list, and released", "**config): pool_config = PoolConfig.consume(config) def opener(addr): return Bolt.open(addr, auth=auth, **pool_config)", "bookmarks=None, timeout=None, metadata=None): \"\"\" Run an auto-commit transaction. :param cypher:", "the connection is broken or closed, it can be discarded.", "try: return await super().readexactly(n) except IncompleteReadError as err: message =", "self._refresh_lock = Lock(loop=self._loop) self._routing_context = routing_context self._max_size_per_host = config.max_size self._initial_routers", "BoltConnectionError(\"Failed to establish a connection\", address) from err else: local_address", "follows determines whether an update is still required. \"\"\" if", "This list can be updated as protocol # versions are", "Bolt %r)\", protocol_version) offered_versions = sorted(handlers.keys(), reverse=True)[:4] request_data = MAGIC", "in self._pools.values(): try: await pool.release(connection, force_reset=force_reset) except ValueError: pass else:", "can be discarded. Otherwise, the age of the connection is", "\"<Bolt address=%r protocol_version=%r>\" % (self.remote_address, self.protocol_version) async def __ainit__(self, auth):", "return len(self._in_use_list) + len(self._free_list) async def _sanitize(self, cx, *, force_reset):", "Close all connections and empty the pool. If forced, in-use", "the pool. This method is thread safe. \"\"\" for pool", "handler in handlers.items() if version == protocol_version} @classmethod def opener(cls,", "%s -> %s\", local_address.port_number, local_address, remote_address) return reader, writer @classmethod", "parameters. This is useful when multiple servers share the same", "# You may obtain a copy of the License at", "from neo4j.routing import RoutingTable log = getLogger(__name__) MAGIC = b\"\\x60\\x60\\xB0\\x17\"", "await self._ensure_routing_table_is_fresh(readonly=readonly) if readonly: addresses = self._routing_table.readers else: addresses =", "connection is not currently in use, or if it does", "self._activate_new_pools_in(rt) self._routing_table.update(rt) await self._deactivate_pools_not_in(rt) async def _select_pool(self, readonly=False): \"\"\" Selects", "of writer self._missing_writer = (num_writers == 0) # No routers", "\"\"\" The maximum permitted number of simultaneous connections that may", "required. \"\"\" if self._routing_table.is_fresh(readonly=readonly): return async with self._refresh_lock: if self._routing_table.is_fresh(readonly=readonly):", "required. # from asyncio import sleep try: while self._protocol._stream_reader is", "onto the waiting list, and released connections will be closed", "= getLogger(__name__) MAGIC = b\"\\x60\\x60\\xB0\\x17\" class Bolt(Addressable, object): #: True", "just fail log.error(\"Unable to retrieve routing information\") raise Neo4jAvailabilityError(\"Unable to", "this pool. \"\"\" return self._max_age @property def in_use(self): \"\"\" The", "has_tried_initial_routers = True rt = await self._get_routing_table_from(self._initial_routers) if rt: return", "import sleep try: while self._protocol._stream_reader is not None: await sleep(0.1)", "negotiation :raise TypeError: if any of the arguments provided are", "\".join(map(repr, routers)))) for router in routers: pool = self._pools[router] cx", "cx.close() return None await cx.reset(force=force_reset) return cx async def acquire(self,", "# self._routers = Neo4jPool(opener, router_addresses or self.default_router_addresses) # self._writers =", "a given set of configuration parameters. This is useful when", "from neo4j.aio.bolt3 import Bolt3 handlers = {bolt.protocol_version: bolt for bolt", "Bolt.opener(auth=auth, security=security, protocol_version=protocol_version) # router_addresses = Address.parse_list(\" \".join(addresses), default_port=7687) #", "connection_args = { \"host\": address.host, \"port\": address.port, \"family\": address.family, #", ":param metadata: :raise BoltTransactionError: if a transaction cannot be carried", "self._deactivate(address) async def _get_routing_table_from(self, *routers): \"\"\" Try to update routing", "Release a connection back into the pool. This method is", "Neo4jPool(opener) # self._routing_table = None # # @property # def", "be carried out at this time \"\"\" async def begin(self,", "remote peer. \"\"\" return self.__reader.broken or self.__writer.broken @property def closed(self):", "remain open until released. \"\"\" pools = dict(self._pools) self._pools.clear() for", "cx.run(\"CALL dbms.cluster.routing.getRoutingTable($context)\", {\"context\": {}}) # record = await result.single() #", "provided, all available versions will be returned. :param protocol_version: tuple", "if loop is None: loop = get_event_loop() config = PoolConfig.consume(config)", "connections. \"\"\" await self._ensure_routing_table_is_fresh(readonly=readonly) if readonly: addresses = self._routing_table.readers else:", "not belong to this pool\") async def _deactivate(self, address): \"\"\"", "error handling and exit. from neo4j.errors import ( NotALeader, ForbiddenOnReadOnlyDatabase,", "the connection to a clean state. By default, a RESET", "version (e.g. (3, 5)) or None :return: dictionary of version", "this will return an existing open connection, if one is", "table from \" \"{!r} ({!r})\".format(router, self._routing_table)) return new_routing_table finally: await", "released. \"\"\" pools = dict(self._pools) self._pools.clear() for address, pool in", "[await pool.acquire() for _ in range(pool_config.init_size)] for seed in seeds:", "def begin(self, readonly=False, bookmarks=None, timeout=None, metadata=None): \"\"\" Begin an explicit", "and the connection object will be returned, indicating success. \"\"\"", "configuration details, such as within a connection pool. \"\"\" async", "the second freshness check that follows determines whether an update", "auth=None, loop=None, **config): \"\"\" Create a new connection pool, with", "config, address): if loop is None: self._loop = get_event_loop() else:", "out at this time \"\"\" async def begin(self, readonly=False, bookmarks=None,", "to # sanitize the connection and return it to the", "= True rt = await self._get_routing_table_from(self._initial_routers) if rt: return rt", "if readonly else \"write\")) return choice(pools_by_usage[min(pools_by_usage)]) async def acquire(self, *,", "def acquire(self, *, force_reset=False): \"\"\" Acquire a connection from the", "writer: :param protocol_version: :return: :raise BoltConnectionLost: if an I/O error", "# Otherwise, close the connection. await cx.close() else: # If", "max_size(self): \"\"\" The maximum permitted number of simultaneous connections that", "routing table from \" \"{}\".format(\", \".join(map(repr, routers)))) for router in", "whether this connection has been closed locally.\"\"\" return self.__closed async", "== 0) # No routers if num_routers == 0: continue", "not in use\") else: raise ValueError(\"Connection does not belong to", "class attribute, this denotes the version of Bolt handled #:", "cypher: :param parameters: :param discard: :param readonly: :param bookmarks: :param", "\"write\")) return choice(pools_by_usage[min(pools_by_usage)]) async def acquire(self, *, readonly=False, force_reset=False): \"\"\"", "await self.__close(self._in_use_list) async def __close(self, connections): \"\"\" Close all connections", "This method performs two freshness checks, before and after acquiring", "async def run_tx(self, f, args=None, kwargs=None, readonly=False, bookmarks=None, timeout=None, metadata=None):", "cx: # Carry on only if sanitation succeeded. if self.size", "protocol version negotiation, in order to construct and return a", "immediately. This does not permanently disable the connection pool, it", "subclass = await cls._handshake(reader, writer, config.protocol_version) # Instantiation obj =", "cx = await self._sanitize(cx, force_reset=force_reset) if cx: # Carry on", "*, auth=None, loop=None, **config): \"\"\" Open a socket connection and", "cx = await self._opener(self.address) else: # Plan C: wait for", "are passed with unsupported values \"\"\" # Args address =", "given router is currently unable to provide routing information :raise", "which will have the implicit affect of reopening the pool.", "protocol in use. protocol_version = () # Record of the", "temporary state, # such as leader switching, so we should", "await self._get_routing_table_from(self._initial_routers) if rt: return rt rt = await self._get_routing_table_from(*existing_routers)", "log.debug(\"[#%04X] S: <ACCEPT> %s -> %s\", local_address.port_number, local_address, remote_address) return", "capacity. :param cx: the connection to release :param force_reset: if", "This likely indicates a temporary state, # such as leader", "<ROUTING> Invalidating routing table\") self._routing_table.ttl = 0 raise failure cx.set_failure_handler(NotALeader,", "intercept NotALeader and # ForbiddenOnReadOnlyDatabase errors to # invalidate the", "routing table. \"\"\" for address in self._pools: if address not", "cx) if cx in self._in_use_list: self._in_use_list.remove(cx) if self.size < self.max_size:", "len(self._in_use_list) + len(self._free_list) async def _sanitize(self, cx, *, force_reset): \"\"\"", "be discarded. Otherwise, the age of the connection is checked", "as protocol # versions are added and removed. Bolt3, ]}", "is checked against the maximum age permitted by this pool,", "acquired and the second freshness check that follows determines whether", "\"License\"); # you may not use this file except in", "# TODO # @classmethod # async def open(cls, *addresses, auth=None,", "in_use(self): \"\"\" The number of connections in this pool that", "there is still capacity. self._free_list.append(cx) self._waiting_list.notify() else: # Otherwise, close", "rt: return rt if not has_tried_initial_routers and self._initial_routers not in", "connection is not already in a clean state. If forced,", "waiting for the stream # reader inside the protocol to", "new RoutingTable instance or None if the given router is", "\"\"\" Asynchronous initializer for implementation by subclasses. :param auth: \"\"\"", "currently in use. \"\"\" return len(self._in_use_list) @property def size(self): \"\"\"", "self._routing_table.is_fresh(readonly=readonly): return async with self._refresh_lock: if self._routing_table.is_fresh(readonly=readonly): if readonly: #", "the StreamWriter object, which can be used on close. __writer", "> self.max_age if expired: await cx.close() return None await cx.reset(force=force_reset)", "does not belong to this pool\") async def _deactivate(self, address):", "\"port\": address.port, \"family\": address.family, # TODO: other args } ssl_context", "protocol_version=None, loop=None): # opener = Bolt.opener(auth=auth, security=security, protocol_version=protocol_version) # router_addresses", ") connection.del_failure_handler(NotALeader) connection.del_failure_handler(ForbiddenOnReadOnlyDatabase) break else: raise ValueError(\"Connection does not belong", "RoutingTable(addresses) self._activate_new_pools_in(self._routing_table) def _activate_new_pools_in(self, routing_table): \"\"\" Add pools for addresses", "# copied because it can be modified existing_routers = list(self._routing_table.routers)", "retrieve routing information\") raise Neo4jAvailabilityError(\"Unable to retrieve routing information\") async", "# cx = await self._routers.acquire() # try: # result =", "it on expiry. Should the connection be neither broken, closed", "construct await obj._ensure_routing_table_is_fresh() return obj def __init__(self, loop, opener, config,", "address %r\", address) # We use `discard` instead of `remove`", "= opener self._address = Address(address) self._max_size = config.max_size self._max_age =", "# TODO: other args } ssl_context = config.get_ssl_context() if ssl_context:", "connections. \"\"\" pool_config = PoolConfig.consume(config) def opener(addr): return Bolt.open(addr, auth=auth,", "def handler(failure): \"\"\" Invalidate the routing table before raising the", "will have the implicit affect of reopening the pool. To", "address(self): \"\"\" The remote address for which this pool operates.", "specified. default_router_addresses = Address.parse_list(\":7687 :17601 :17687\") # TODO # @classmethod", "self._missing_writer = False self._refresh_lock = Lock(loop=self._loop) self._routing_context = routing_context self._max_size_per_host", "this connection has been closed locally.\"\"\" return self.__closed async def", "= Address.parse_list(\" \".join(addresses), default_port=7687) # return cls(opener, router_addresses, loop=loop) #", ") def handler(failure): \"\"\" Invalidate the routing table before raising", "secure communication, false #: otherwise. secure = None #: As", "whether this connection has been broken by the network or", "\"\"\" Release a connection back into the pool. This method", "addresses] pools_by_usage = {} for pool in pools: pools_by_usage.setdefault(pool.in_use, []).append(pool)", "err: log.debug(\"[#%04X] S: <CLOSE> (%s)\", self.local_address.port_number, err) Breakable.set_broken(self) raise BoltConnectionBroken(\"Network", "and free. \"\"\" return self._max_size @max_size.setter def max_size(self, value): old_value", "indicating success. \"\"\" if cx.broken or cx.closed: return None expired", "a TCP connection to the address provided. :param address: :param", "a new connection cx = await self._opener(self.address) else: # Plan", "routing_table.servers(): if address not in self._pools: self._pools[address] = BoltPool(self._loop, self._opener,", "connection, such that it can be reused. If the connection", "= await self._routers.acquire() # try: # result = await cx.run(\"CALL", "no protocol version is provided, all available versions will be", "this will only occur if the connection is not already", "a Bolt connection object \"\"\" log.debug(\"Acquiring connection from pool %r\",", "self._routing_table = RoutingTable(addresses) self._activate_new_pools_in(self._routing_table) def _activate_new_pools_in(self, routing_table): \"\"\" Add pools", "there is spare capacity in the pool, attempt to #", "established \"\"\" assert isinstance(address, Address) assert loop is not None", "result.single() # self._routing_table = RoutingTable.parse_routing_info([record]) # TODO: handle ValueError? #", "= PoolConfig.consume(config) # Connect reader, writer = await cls._connect(address, loop," ]
[ "\"mock\", \"avro==1.10.2\", \"sqlalchemy\", \"PyMySQL\", ], extras_require={ \"dev\": [ \"pytest\", \"flake8\",", "Feature Store, Spark, Machine Learning, MLOps, DataOps\", url=\"https://github.com/logicalclocks/feature-store-api\", download_url=\"https://github.com/logicalclocks/feature-store-api/releases/tag/\" +", "author_email=\"<EMAIL>\", description=\"HSFS: An environment independent client to interact with the", "\"Development Status :: 5 - Production/Stable\", \"Topic :: Utilities\", \"License", "independent client to interact with the Hopsworks Featurestore\", license=\"Apache License", "\"hsfs.version\", os.path.join(\"hsfs\", \"version.py\") ).__version__ def read(fname): return open(os.path.join(os.path.dirname(__file__), fname)).read() setup(", "find_packages __version__ = imp.load_source( \"hsfs.version\", os.path.join(\"hsfs\", \"version.py\") ).__version__ def read(fname):", "\"boto3\", \"pandas\", \"numpy\", \"pyjks\", \"mock\", \"avro==1.10.2\", \"sqlalchemy\", \"PyMySQL\", ], extras_require={", "extras_require={ \"dev\": [ \"pytest\", \"flake8\", \"black\"], \"docs\": [ \"mkdocs==1.1.2\", \"mkdocs-material==6.2.2\",", "Production/Stable\", \"Topic :: Utilities\", \"License :: OSI Approved :: Apache", "OSI Approved :: Apache Software License\", \"Programming Language :: Python", "\"keras_autodoc @ git+https://git@github.com/moritzmeister/keras-autodoc@split-tags-properties\", \"markdown-include\"], \"hive\": [\"pyhopshive[thrift]\"] }, author=\"Logical Clocks AB\",", "Store, Spark, Machine Learning, MLOps, DataOps\", url=\"https://github.com/logicalclocks/feature-store-api\", download_url=\"https://github.com/logicalclocks/feature-store-api/releases/tag/\" + __version__,", "import setup, find_packages __version__ = imp.load_source( \"hsfs.version\", os.path.join(\"hsfs\", \"version.py\") ).__version__", "description=\"HSFS: An environment independent client to interact with the Hopsworks", "Featurestore\", license=\"Apache License 2.0\", keywords=\"Hopsworks, Feature Store, Spark, Machine Learning,", "def read(fname): return open(os.path.join(os.path.dirname(__file__), fname)).read() setup( name=\"hsfs\", version=__version__, install_requires=[ \"pyhumps==1.6.1\",", "Apache Software License\", \"Programming Language :: Python :: 3\", \"Intended", "\"License :: OSI Approved :: Apache Software License\", \"Programming Language", "\"Programming Language :: Python :: 3\", \"Intended Audience :: Developers\",", "2.0\", keywords=\"Hopsworks, Feature Store, Spark, Machine Learning, MLOps, DataOps\", url=\"https://github.com/logicalclocks/feature-store-api\",", "os import imp from setuptools import setup, find_packages __version__ =", "\"mkdocs==1.1.2\", \"mkdocs-material==6.2.2\", \"mike==0.5.5\", \"sphinx==3.5.4\", \"keras_autodoc @ git+https://git@github.com/moritzmeister/keras-autodoc@split-tags-properties\", \"markdown-include\"], \"hive\": [\"pyhopshive[thrift]\"]", "@ git+https://git@github.com/moritzmeister/keras-autodoc@split-tags-properties\", \"markdown-include\"], \"hive\": [\"pyhopshive[thrift]\"] }, author=\"Logical Clocks AB\", author_email=\"<EMAIL>\",", "Software License\", \"Programming Language :: Python :: 3\", \"Intended Audience", "\"sphinx==3.5.4\", \"keras_autodoc @ git+https://git@github.com/moritzmeister/keras-autodoc@split-tags-properties\", \"markdown-include\"], \"hive\": [\"pyhopshive[thrift]\"] }, author=\"Logical Clocks", "Machine Learning, MLOps, DataOps\", url=\"https://github.com/logicalclocks/feature-store-api\", download_url=\"https://github.com/logicalclocks/feature-store-api/releases/tag/\" + __version__, packages=find_packages(), long_description=read(\"../README.md\"),", "__version__, packages=find_packages(), long_description=read(\"../README.md\"), long_description_content_type=\"text/markdown\", classifiers=[ \"Development Status :: 5 -", "\"mike==0.5.5\", \"sphinx==3.5.4\", \"keras_autodoc @ git+https://git@github.com/moritzmeister/keras-autodoc@split-tags-properties\", \"markdown-include\"], \"hive\": [\"pyhopshive[thrift]\"] }, author=\"Logical", "\"flake8\", \"black\"], \"docs\": [ \"mkdocs==1.1.2\", \"mkdocs-material==6.2.2\", \"mike==0.5.5\", \"sphinx==3.5.4\", \"keras_autodoc @", "client to interact with the Hopsworks Featurestore\", license=\"Apache License 2.0\",", "long_description=read(\"../README.md\"), long_description_content_type=\"text/markdown\", classifiers=[ \"Development Status :: 5 - Production/Stable\", \"Topic", "license=\"Apache License 2.0\", keywords=\"Hopsworks, Feature Store, Spark, Machine Learning, MLOps,", "\"requests\", \"furl\", \"boto3\", \"pandas\", \"numpy\", \"pyjks\", \"mock\", \"avro==1.10.2\", \"sqlalchemy\", \"PyMySQL\",", "= imp.load_source( \"hsfs.version\", os.path.join(\"hsfs\", \"version.py\") ).__version__ def read(fname): return open(os.path.join(os.path.dirname(__file__),", "imp.load_source( \"hsfs.version\", os.path.join(\"hsfs\", \"version.py\") ).__version__ def read(fname): return open(os.path.join(os.path.dirname(__file__), fname)).read()", "os.path.join(\"hsfs\", \"version.py\") ).__version__ def read(fname): return open(os.path.join(os.path.dirname(__file__), fname)).read() setup( name=\"hsfs\",", "\"PyMySQL\", ], extras_require={ \"dev\": [ \"pytest\", \"flake8\", \"black\"], \"docs\": [", "packages=find_packages(), long_description=read(\"../README.md\"), long_description_content_type=\"text/markdown\", classifiers=[ \"Development Status :: 5 - Production/Stable\",", "name=\"hsfs\", version=__version__, install_requires=[ \"pyhumps==1.6.1\", \"requests\", \"furl\", \"boto3\", \"pandas\", \"numpy\", \"pyjks\",", "keywords=\"Hopsworks, Feature Store, Spark, Machine Learning, MLOps, DataOps\", url=\"https://github.com/logicalclocks/feature-store-api\", download_url=\"https://github.com/logicalclocks/feature-store-api/releases/tag/\"", "Language :: Python :: 3\", \"Intended Audience :: Developers\", ],", "\"Topic :: Utilities\", \"License :: OSI Approved :: Apache Software", "setuptools import setup, find_packages __version__ = imp.load_source( \"hsfs.version\", os.path.join(\"hsfs\", \"version.py\")", "DataOps\", url=\"https://github.com/logicalclocks/feature-store-api\", download_url=\"https://github.com/logicalclocks/feature-store-api/releases/tag/\" + __version__, packages=find_packages(), long_description=read(\"../README.md\"), long_description_content_type=\"text/markdown\", classifiers=[ \"Development", "\"black\"], \"docs\": [ \"mkdocs==1.1.2\", \"mkdocs-material==6.2.2\", \"mike==0.5.5\", \"sphinx==3.5.4\", \"keras_autodoc @ git+https://git@github.com/moritzmeister/keras-autodoc@split-tags-properties\",", ":: Python :: 3\", \"Intended Audience :: Developers\", ], )", "\"pytest\", \"flake8\", \"black\"], \"docs\": [ \"mkdocs==1.1.2\", \"mkdocs-material==6.2.2\", \"mike==0.5.5\", \"sphinx==3.5.4\", \"keras_autodoc", "url=\"https://github.com/logicalclocks/feature-store-api\", download_url=\"https://github.com/logicalclocks/feature-store-api/releases/tag/\" + __version__, packages=find_packages(), long_description=read(\"../README.md\"), long_description_content_type=\"text/markdown\", classifiers=[ \"Development Status", "License\", \"Programming Language :: Python :: 3\", \"Intended Audience ::", "\"hive\": [\"pyhopshive[thrift]\"] }, author=\"Logical Clocks AB\", author_email=\"<EMAIL>\", description=\"HSFS: An environment", "Utilities\", \"License :: OSI Approved :: Apache Software License\", \"Programming", "version=__version__, install_requires=[ \"pyhumps==1.6.1\", \"requests\", \"furl\", \"boto3\", \"pandas\", \"numpy\", \"pyjks\", \"mock\",", "Approved :: Apache Software License\", \"Programming Language :: Python ::", "read(fname): return open(os.path.join(os.path.dirname(__file__), fname)).read() setup( name=\"hsfs\", version=__version__, install_requires=[ \"pyhumps==1.6.1\", \"requests\",", "imp from setuptools import setup, find_packages __version__ = imp.load_source( \"hsfs.version\",", "Clocks AB\", author_email=\"<EMAIL>\", description=\"HSFS: An environment independent client to interact", "5 - Production/Stable\", \"Topic :: Utilities\", \"License :: OSI Approved", "\"version.py\") ).__version__ def read(fname): return open(os.path.join(os.path.dirname(__file__), fname)).read() setup( name=\"hsfs\", version=__version__,", "[ \"pytest\", \"flake8\", \"black\"], \"docs\": [ \"mkdocs==1.1.2\", \"mkdocs-material==6.2.2\", \"mike==0.5.5\", \"sphinx==3.5.4\",", "\"furl\", \"boto3\", \"pandas\", \"numpy\", \"pyjks\", \"mock\", \"avro==1.10.2\", \"sqlalchemy\", \"PyMySQL\", ],", "[ \"mkdocs==1.1.2\", \"mkdocs-material==6.2.2\", \"mike==0.5.5\", \"sphinx==3.5.4\", \"keras_autodoc @ git+https://git@github.com/moritzmeister/keras-autodoc@split-tags-properties\", \"markdown-include\"], \"hive\":", "__version__ = imp.load_source( \"hsfs.version\", os.path.join(\"hsfs\", \"version.py\") ).__version__ def read(fname): return", "License 2.0\", keywords=\"Hopsworks, Feature Store, Spark, Machine Learning, MLOps, DataOps\",", "to interact with the Hopsworks Featurestore\", license=\"Apache License 2.0\", keywords=\"Hopsworks,", "], extras_require={ \"dev\": [ \"pytest\", \"flake8\", \"black\"], \"docs\": [ \"mkdocs==1.1.2\",", ":: Apache Software License\", \"Programming Language :: Python :: 3\",", "Hopsworks Featurestore\", license=\"Apache License 2.0\", keywords=\"Hopsworks, Feature Store, Spark, Machine", "\"avro==1.10.2\", \"sqlalchemy\", \"PyMySQL\", ], extras_require={ \"dev\": [ \"pytest\", \"flake8\", \"black\"],", "Learning, MLOps, DataOps\", url=\"https://github.com/logicalclocks/feature-store-api\", download_url=\"https://github.com/logicalclocks/feature-store-api/releases/tag/\" + __version__, packages=find_packages(), long_description=read(\"../README.md\"), long_description_content_type=\"text/markdown\",", "AB\", author_email=\"<EMAIL>\", description=\"HSFS: An environment independent client to interact with", "Status :: 5 - Production/Stable\", \"Topic :: Utilities\", \"License ::", "open(os.path.join(os.path.dirname(__file__), fname)).read() setup( name=\"hsfs\", version=__version__, install_requires=[ \"pyhumps==1.6.1\", \"requests\", \"furl\", \"boto3\",", ").__version__ def read(fname): return open(os.path.join(os.path.dirname(__file__), fname)).read() setup( name=\"hsfs\", version=__version__, install_requires=[", "\"markdown-include\"], \"hive\": [\"pyhopshive[thrift]\"] }, author=\"Logical Clocks AB\", author_email=\"<EMAIL>\", description=\"HSFS: An", "+ __version__, packages=find_packages(), long_description=read(\"../README.md\"), long_description_content_type=\"text/markdown\", classifiers=[ \"Development Status :: 5", "classifiers=[ \"Development Status :: 5 - Production/Stable\", \"Topic :: Utilities\",", "\"mkdocs-material==6.2.2\", \"mike==0.5.5\", \"sphinx==3.5.4\", \"keras_autodoc @ git+https://git@github.com/moritzmeister/keras-autodoc@split-tags-properties\", \"markdown-include\"], \"hive\": [\"pyhopshive[thrift]\"] },", "setup, find_packages __version__ = imp.load_source( \"hsfs.version\", os.path.join(\"hsfs\", \"version.py\") ).__version__ def", "\"docs\": [ \"mkdocs==1.1.2\", \"mkdocs-material==6.2.2\", \"mike==0.5.5\", \"sphinx==3.5.4\", \"keras_autodoc @ git+https://git@github.com/moritzmeister/keras-autodoc@split-tags-properties\", \"markdown-include\"],", "An environment independent client to interact with the Hopsworks Featurestore\",", "\"pyhumps==1.6.1\", \"requests\", \"furl\", \"boto3\", \"pandas\", \"numpy\", \"pyjks\", \"mock\", \"avro==1.10.2\", \"sqlalchemy\",", "long_description_content_type=\"text/markdown\", classifiers=[ \"Development Status :: 5 - Production/Stable\", \"Topic ::", "interact with the Hopsworks Featurestore\", license=\"Apache License 2.0\", keywords=\"Hopsworks, Feature", "import os import imp from setuptools import setup, find_packages __version__", "with the Hopsworks Featurestore\", license=\"Apache License 2.0\", keywords=\"Hopsworks, Feature Store,", "the Hopsworks Featurestore\", license=\"Apache License 2.0\", keywords=\"Hopsworks, Feature Store, Spark,", "\"pandas\", \"numpy\", \"pyjks\", \"mock\", \"avro==1.10.2\", \"sqlalchemy\", \"PyMySQL\", ], extras_require={ \"dev\":", "from setuptools import setup, find_packages __version__ = imp.load_source( \"hsfs.version\", os.path.join(\"hsfs\",", "fname)).read() setup( name=\"hsfs\", version=__version__, install_requires=[ \"pyhumps==1.6.1\", \"requests\", \"furl\", \"boto3\", \"pandas\",", ":: 5 - Production/Stable\", \"Topic :: Utilities\", \"License :: OSI", ":: Utilities\", \"License :: OSI Approved :: Apache Software License\",", "MLOps, DataOps\", url=\"https://github.com/logicalclocks/feature-store-api\", download_url=\"https://github.com/logicalclocks/feature-store-api/releases/tag/\" + __version__, packages=find_packages(), long_description=read(\"../README.md\"), long_description_content_type=\"text/markdown\", classifiers=[", "environment independent client to interact with the Hopsworks Featurestore\", license=\"Apache", "install_requires=[ \"pyhumps==1.6.1\", \"requests\", \"furl\", \"boto3\", \"pandas\", \"numpy\", \"pyjks\", \"mock\", \"avro==1.10.2\",", "download_url=\"https://github.com/logicalclocks/feature-store-api/releases/tag/\" + __version__, packages=find_packages(), long_description=read(\"../README.md\"), long_description_content_type=\"text/markdown\", classifiers=[ \"Development Status ::", "import imp from setuptools import setup, find_packages __version__ = imp.load_source(", "Spark, Machine Learning, MLOps, DataOps\", url=\"https://github.com/logicalclocks/feature-store-api\", download_url=\"https://github.com/logicalclocks/feature-store-api/releases/tag/\" + __version__, packages=find_packages(),", "return open(os.path.join(os.path.dirname(__file__), fname)).read() setup( name=\"hsfs\", version=__version__, install_requires=[ \"pyhumps==1.6.1\", \"requests\", \"furl\",", ":: OSI Approved :: Apache Software License\", \"Programming Language ::", "[\"pyhopshive[thrift]\"] }, author=\"Logical Clocks AB\", author_email=\"<EMAIL>\", description=\"HSFS: An environment independent", "\"numpy\", \"pyjks\", \"mock\", \"avro==1.10.2\", \"sqlalchemy\", \"PyMySQL\", ], extras_require={ \"dev\": [", "git+https://git@github.com/moritzmeister/keras-autodoc@split-tags-properties\", \"markdown-include\"], \"hive\": [\"pyhopshive[thrift]\"] }, author=\"Logical Clocks AB\", author_email=\"<EMAIL>\", description=\"HSFS:", "author=\"Logical Clocks AB\", author_email=\"<EMAIL>\", description=\"HSFS: An environment independent client to", "- Production/Stable\", \"Topic :: Utilities\", \"License :: OSI Approved ::", "\"sqlalchemy\", \"PyMySQL\", ], extras_require={ \"dev\": [ \"pytest\", \"flake8\", \"black\"], \"docs\":", "setup( name=\"hsfs\", version=__version__, install_requires=[ \"pyhumps==1.6.1\", \"requests\", \"furl\", \"boto3\", \"pandas\", \"numpy\",", "\"dev\": [ \"pytest\", \"flake8\", \"black\"], \"docs\": [ \"mkdocs==1.1.2\", \"mkdocs-material==6.2.2\", \"mike==0.5.5\",", "}, author=\"Logical Clocks AB\", author_email=\"<EMAIL>\", description=\"HSFS: An environment independent client", "\"pyjks\", \"mock\", \"avro==1.10.2\", \"sqlalchemy\", \"PyMySQL\", ], extras_require={ \"dev\": [ \"pytest\"," ]
[ "python3 from . import signup, signin, signout, update, info, detail", "#!/usr/bin/env python3 from . import signup, signin, signout, update, info," ]
[ "of dependencies required by the package dependencies = ['torch'] from", "# Optional list of dependencies required by the package dependencies", "<reponame>jamesmcclain/pytorch-multi-class-focal-loss # Optional list of dependencies required by the package", "Optional list of dependencies required by the package dependencies =", "by the package dependencies = ['torch'] from focal_loss import FocalLoss,", "the package dependencies = ['torch'] from focal_loss import FocalLoss, focal_loss", "list of dependencies required by the package dependencies = ['torch']", "dependencies required by the package dependencies = ['torch'] from focal_loss", "required by the package dependencies = ['torch'] from focal_loss import" ]
[ ") lyr.ResetReading() feat = lyr.GetNextFeature() if ogrtest.check_feature_geometry( feat, 'POINT (6", "'success' ############################################################################### # Copy our small gpx file to a", "feat, 'POINT (15 14)', max_error = 0.0001 ) != 0:", "LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A", "# Purpose: Test GPX driver functionality. # Author: <NAME> <even", "feat.SetGeometry(geom) lyr.CreateFeature(feat) feat = ogr.Feature(lyr.GetLayerDefn()) geom = ogr.CreateGeometryFromWkt('POINT(3 50)') feat.SetField('track_name',", "gdaltest.gpx_ds.GetLayerByName( 'waypoints' ) gpx2_ds = ogr.GetDriverByName('GPX').CreateDataSource('tmp/gpx.gpx', options = co_opts )", ") expect = ['route point name', None, None] tr =", "co_opts ) gpx_lyr = gdaltest.gpx_ds.CreateLayer( 'waypoints', geom_type = ogr.wkbPoint )", "ogrtest.check_feature_geometry( feat, 'LINESTRING (6 5,9 8,12 11)', max_error = 0.0001", "!= 0: return 'fail' feat.Destroy() feat = lyr.GetNextFeature() f_geom =", ") != 0: gdaltest.post_reason('CreateFeature failed.') return 'fail' feat = bna_lyr.GetNextFeature()", "feat = lyr.GetNextFeature() if ogrtest.check_feature_geometry( feat, 'POINT (1 0)', max_error", "= None try: os.remove ('tmp/gpx.gpx') except: pass gdaltest.gpx_ds = ogr.GetDriverByName('GPX').CreateDataSource('tmp/gpx.gpx',", "ogrtest.check_feature_geometry( feat, 'POINT (4 3)', max_error = 0.0001 ) !=", "ogr_gpx_2, ogr_gpx_3, ogr_gpx_4, ogr_gpx_5, ogr_gpx_6, # Rerun test 1, 2", "USE OR OTHER # DEALINGS IN THE SOFTWARE. ############################################################################### import", "0: return 'fail' feat.Destroy() return 'success' ############################################################################### # Test route_points", "portions of the Software. # # THE SOFTWARE IS PROVIDED", ") if not tr: return 'fail' lyr.ResetReading() expect = ['text2',", "gdaltest.gpx_ds.CreateLayer( 'waypoints', geom_type = ogr.wkbPoint ) bna_lyr.ResetReading() for i in", "'track_points' ) expect = ['track point name', None, None, None]", "layers' ) return 'fail' return 'success' ############################################################################### # Test waypoints", "any person obtaining a # copy of this software and", "= ogr.wkbPoint ) bna_lyr.ResetReading() for i in range(bna_lyr.GetLayerDefn().GetFieldCount()): field_defn =", "try: os.remove ('tmp/gpx.gpx') except: pass return 'success' gdaltest_list = [", "feat.SetField('route_fid', 0) feat.SetGeometry(geom) lyr.CreateFeature(feat) feat = ogr.Feature(lyr.GetLayerDefn()) geom = ogr.CreateGeometryFromWkt('POINT(3", "ogr.Feature(lyr.GetLayerDefn()) geom = ogr.CreateGeometryFromWkt('POINT(3 51)') feat.SetField('route_name', 'ROUTE_NAME2') feat.SetField('route_fid', 1) feat.SetGeometry(geom)", "feat.SetField('track_fid', 0) feat.SetField('track_seg_id', 0) feat.SetGeometry(geom) lyr.CreateFeature(feat) feat = ogr.Feature(lyr.GetLayerDefn()) geom", "geom = ogr.CreateGeometryFromWkt('POINT(3 50)') feat.SetField('track_name', '--ignored--') feat.SetField('track_fid', 0) feat.SetField('track_seg_id', 0)", "in range(bna_lyr.GetLayerDefn().GetFieldCount()): field_defn = bna_lyr.GetLayerDefn().GetFieldDefn(i) gpx_lyr.CreateField( field_defn ) dst_feat =", "feat.SetField('route_name', 'ROUTE_NAME') feat.SetField('route_fid', 0) feat.SetGeometry(geom) lyr.CreateFeature(feat) feat = ogr.Feature(lyr.GetLayerDefn()) geom", "!= 0: return 'fail' feat.Destroy() return 'success' ############################################################################### # Copy", "destroy is required for old-gen python bindings gpx2_ds.Destroy() gdaltest.gpx_ds.Destroy() gdaltest.gpx_ds", "ogr.wkbPoint ) feat = ogr.Feature(lyr.GetLayerDefn()) geom = ogr.CreateGeometryFromWkt('POINT(2 49)') feat.SetField('track_name',", "fields as <extensions>. def ogr_gpx_7(): if not gdaltest.have_gpx: return 'skip'", "HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #", "gdaltest.gpx_ds.Destroy() gdaltest.gpx_ds = None try: os.remove ('tmp/gpx.gpx') except: pass gdaltest.gpx_ds", "gpx_lyr.GetLayerDefn() ) feat = bna_lyr.GetNextFeature() while feat is not None:", "geom = ogr.CreateGeometryFromWkt('POINT(3 51)') feat.SetField('track_fid', 0) feat.SetField('track_seg_id', 1) feat.SetGeometry(geom) lyr.CreateFeature(feat)", "expect = ['text', None] tr = ogrtest.check_features_against_list( lyr, 'link1_text', expect", "shall be included # in all copies or substantial portions", "dst_feat.Destroy() bna_ds.Destroy() gdaltest.gpx_ds.Destroy() gdaltest.gpx_ds = None #Now check that the", "tr: return 'fail' lyr.ResetReading() expect = ['waypoint name', None] tr", "if not tr: return 'fail' gpx_lyr.ResetReading() expect = ['TID1', None]", "0: gdaltest.post_reason('CreateFeature failed.') return 'fail' feat = gpx_lyr.GetNextFeature() dst_feat.Destroy() gpx_lyr", "# Duplicate routes gpx_lyr = gdaltest.gpx_ds.GetLayerByName( 'routes' ) gpx2_lyr =", "gpx_lyr.ResetReading() expect = ['SID1', 'SID2'] tr = ogrtest.check_features_against_list( gpx_lyr, 'ogr_Secondary_ID',", "tracks gpx_lyr = gdaltest.gpx_ds.GetLayerByName( 'tracks' ) gpx2_lyr = gpx2_ds.CreateLayer( 'tracks',", "return 'success' ############################################################################### # Test waypoints gpx layer. def ogr_gpx_1():", "TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN", "# Rerun test 1, 2 and 4 with generated tmp/tmp.gpx", "not gdaltest.have_gpx: return 'skip' if gdaltest.gpx_ds is None: return 'skip'", "not tr: return 'fail' lyr.ResetReading() expect = ['type2', None] tr", "# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF", "'routes', geom_type = ogr.wkbLineString ) gpx_lyr.ResetReading() dst_feat = ogr.Feature( feature_def", "= ['track point name', None, None, None] tr = ogrtest.check_features_against_list(", "None: return 'fail' lyr = gdaltest.gpx_ds.GetLayerByName( 'tracks' ) lyr.ResetReading() feat", "with generated tmp/tmp.gpx ogr_gpx_1, ogr_gpx_2, ogr_gpx_4, ogr_gpx_7, ogr_gpx_8, ogr_gpx_cleanup ]", "feat = lyr.GetNextFeature() if ogrtest.check_feature_geometry( feat, 'MULTILINESTRING ((15 14,18 17),(21", "paris dot org> # # Permission is hereby granted, free", "None #Now check that the extensions fields have been well", "= ogr.CreateGeometryFromWkt('POINT(3 50)') feat.SetField('track_name', '--ignored--') feat.SetField('track_fid', 0) feat.SetField('track_seg_id', 0) feat.SetGeometry(geom)", "None] tr = ogrtest.check_features_against_list( lyr, 'name', expect ) if not", "if ogrtest.check_feature_geometry( feat, 'POINT (1 0)', max_error = 0.0001 )", ") != 0: return 'fail' feat.Destroy() feat = lyr.GetNextFeature() f_geom", "# Copy our small gpx file to a new gpx", "layer. def ogr_gpx_5(): if not gdaltest.have_gpx: return 'skip' if gdaltest.gpx_ds", "= ['type', None] tr = ogrtest.check_features_against_list( lyr, 'link1_type', expect )", "############################################################################### # Test route_points gpx layer. def ogr_gpx_5(): if not", "# Project: GDAL/OGR Test Suite # Purpose: Test GPX driver", "and to permit persons to whom the # Software is", "not tr: return 'fail' gpx_lyr.ResetReading() expect = ['SID1', 'SID2'] tr", "############################################################################### # Test routes gpx layer. def ogr_gpx_2(): if not", "'data/test.gpx' ) except: gdaltest.gpx_ds = None if gdaltest.gpx_ds is None:", "# Test route_points gpx layer. def ogr_gpx_3(): if not gdaltest.have_gpx:", "return 'success' gdaltest_list = [ ogr_gpx_init, ogr_gpx_1, ogr_gpx_2, ogr_gpx_3, ogr_gpx_4,", "['route point name', None, None] tr = ogrtest.check_features_against_list( lyr, 'name',", "= lyr.GetNextFeature() if ogrtest.check_feature_geometry( feat, 'LINESTRING EMPTY', max_error = 0.0001", "expect = ['waypoint name', None] tr = ogrtest.check_features_against_list( lyr, 'name',", "(the \"Software\"), # to deal in the Software without restriction,", "# in all copies or substantial portions of the Software.", "PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS", "'name', expect ) if not tr: return 'fail' lyr.ResetReading() expect", "tr = ogrtest.check_features_against_list( lyr, 'name', expect ) if not tr:", "# Test routes gpx layer. def ogr_gpx_2(): if not gdaltest.have_gpx:", "IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED,", "8,12 11)', max_error = 0.0001 ) != 0: return 'fail'", "KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO", "= ['href', None] tr = ogrtest.check_features_against_list( lyr, 'link1_href', expect )", "= None # Explicit destroy is required for old-gen python", "WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING", "gpx2_lyr = gpx2_ds.CreateLayer( 'routes', geom_type = ogr.wkbLineString ) gpx_lyr.ResetReading() dst_feat", "the rights to use, copy, modify, merge, publish, distribute, sublicense,", "the Software, and to permit persons to whom the #", "<extensions>. def ogr_gpx_7(): if not gdaltest.have_gpx: return 'skip' if gdaltest.gpx_ds", "ogr.GetDriverByName('GPX').CreateDataSource('tmp/gpx.gpx', options = co_opts ) gpx_lyr = gdaltest.gpx_ds.CreateLayer( 'waypoints', geom_type", "'skip' if gdaltest.gpx_ds is not None: gdaltest.gpx_ds.Destroy() gdaltest.gpx_ds = None", "expected result') print(f_content) return 'fail' return 'success' ############################################################################### # def", "person obtaining a # copy of this software and associated", "is hereby granted, free of charge, to any person obtaining", "tr = ogrtest.check_features_against_list( lyr, 'link2_type', expect ) if not tr:", "ogr.CreateGeometryFromWkt('POINT(3 50)') feat.SetField('route_name', '--ignored--') feat.SetField('route_fid', 0) feat.SetGeometry(geom) lyr.CreateFeature(feat) feat =", "'fail' lyr.ResetReading() expect = ['type2', None] tr = ogrtest.check_features_against_list( lyr,", "= None f = open('tmp/gpx.gpx','rb') f_ref = open('data/ogr_gpx_8_ref.txt','rb') f_content =", "f.close() f_ref.close() if f_content.find(f_ref_content) == -1: gdaltest.post_reason('did not get expected", "associated documentation files (the \"Software\"), # to deal in the", "0.0001 ) != 0: return 'fail' feat.Destroy() feat = lyr.GetNextFeature()", "'fail' lyr = gdaltest.gpx_ds.GetLayerByName( 'tracks' ) lyr.ResetReading() feat = lyr.GetNextFeature()", "'success' gdaltest_list = [ ogr_gpx_init, ogr_gpx_1, ogr_gpx_2, ogr_gpx_3, ogr_gpx_4, ogr_gpx_5,", "= ogr.CreateGeometryFromWkt('POINT(3 50)') feat.SetField('route_name', '--ignored--') feat.SetField('route_fid', 0) feat.SetGeometry(geom) lyr.CreateFeature(feat) feat", "'TRACK_NAME') feat.SetField('track_fid', 0) feat.SetField('track_seg_id', 0) feat.SetGeometry(geom) lyr.CreateFeature(feat) feat = ogr.Feature(lyr.GetLayerDefn())", "None: return 'fail' lyr = gdaltest.gpx_ds.GetLayerByName( 'route_points' ) expect =", "return 'skip' if gdaltest.gpx_ds is None: return 'skip' try: gdal.PushErrorHandler(", "driver functionality. # Author: <NAME> <even dot rouault at mines", "lyr.GetNextFeature() if ogrtest.check_feature_geometry( feat, 'LINESTRING EMPTY', max_error = 0.0001 )", ") import gdaltest import ogrtest import ogr import osr import", "gdaltest.gpx_ds.CreateLayer( 'track_points', geom_type = ogr.wkbPoint ) feat = ogr.Feature(lyr.GetLayerDefn()) geom", "return 'fail' feat = gpx_lyr.GetNextFeature() dst_feat.Destroy() # Duplicate routes gpx_lyr", "feat = gpx_lyr.GetNextFeature() dst_feat.Destroy() gpx_lyr = None gpx2_lyr = None", "3)', max_error = 0.0001 ) != 0: return 'fail' feat.Destroy()", "# def ogr_gpx_cleanup(): if gdaltest.gpx_ds is not None: gdaltest.gpx_ds.Destroy() gdaltest.gpx_ds", "ogr.CreateGeometryFromWkt('POINT(2 49)') feat.SetField('track_name', 'TRACK_NAME') feat.SetField('track_fid', 0) feat.SetField('track_seg_id', 0) feat.SetGeometry(geom) lyr.CreateFeature(feat)", "DEALINGS IN THE SOFTWARE. ############################################################################### import os import sys import", "granted, free of charge, to any person obtaining a #", "dst_feat.SetFrom( feat ) if gpx_lyr.CreateFeature( dst_feat ) != 0: gdaltest.post_reason('CreateFeature", ") ogr.GetDriverByName('CSV').DeleteDataSource( 'tmp/gpx.gpx' ) gdal.PopErrorHandler() except: pass co_opts = [", "CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER #", "if ogrtest.check_feature_geometry( feat, 'MULTILINESTRING ((15 14,18 17),(21 20,24 23))', max_error", "gpx2_lyr = gpx2_ds.CreateLayer( 'waypoints', geom_type = ogr.wkbPoint ) gpx_lyr.ResetReading() dst_feat", "['SID1', 'SID2'] tr = ogrtest.check_features_against_list( gpx_lyr, 'ogr_Secondary_ID', expect ) if", "= lyr.GetNextFeature() if ogrtest.check_feature_geometry( feat, 'POINT (15 14)', max_error =", "tr = ogrtest.check_features_against_list( gpx_lyr, 'ogr_Primary_ID', expect ) if not tr:", "# Permission is hereby granted, free of charge, to any", "try: os.remove ('tmp/gpx.gpx') except: pass gdaltest.gpx_ds = ogr.GetDriverByName('GPX').CreateDataSource('tmp/gpx.gpx', options =", "INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #", "Test GPX driver functionality. # Author: <NAME> <even dot rouault", "gdaltest import ogrtest import ogr import osr import gdal def", "= gpx_lyr.GetNextFeature() dst_feat.Destroy() # Duplicate routes gpx_lyr = gdaltest.gpx_ds.GetLayerByName( 'routes'", "lyr.ResetReading() feat = lyr.GetNextFeature() if ogrtest.check_feature_geometry( feat, 'POINT (6 5)',", "else: gdaltest.have_gpx = 1 if not gdaltest.have_gpx: return 'skip' if", "= gpx_lyr.GetNextFeature() while feat is not None: dst_feat.SetFrom( feat )", "is not None: dst_feat.SetFrom( feat ) if gpx2_lyr.CreateFeature( dst_feat )", "gdaltest.have_gpx: return 'skip' if gdaltest.gpx_ds is None: return 'fail' lyr", "<reponame>HongqiangWei/gdal #!/usr/bin/env python ############################################################################### # $Id$ # # Project: GDAL/OGR", "'TRACK_NAME2') feat.SetField('track_fid', 1) feat.SetField('track_seg_id', 0) feat.SetGeometry(geom) lyr.CreateFeature(feat) gdaltest.gpx_ds.Destroy() gdaltest.gpx_ds =", "co_opts ) gpx2_lyr = gpx2_ds.CreateLayer( 'waypoints', geom_type = ogr.wkbPoint )", "feat, 'LINESTRING (6 5,9 8,12 11)', max_error = 0.0001 )", "# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE", "gdaltest.gpx_ds.GetLayerCount() != 5: gdaltest.post_reason( 'wrong number of layers' ) return", "ogr_gpx_1, ogr_gpx_2, ogr_gpx_4, ogr_gpx_7, ogr_gpx_8, ogr_gpx_cleanup ] if __name__ ==", "required for old-gen python bindings gpx2_ds.Destroy() gdaltest.gpx_ds.Destroy() gdaltest.gpx_ds = ogr.Open(", "'LINESTRING (6 5,9 8,12 11)', max_error = 0.0001 ) !=", "dot rouault at mines dash paris dot org> # ###############################################################################", "((15 14,18 17),(21 20,24 23))', max_error = 0.0001 ) !=", "= ['route point name', None, None] tr = ogrtest.check_features_against_list( lyr,", "gdaltest.gpx_ds = ogr.GetDriverByName('GPX').CreateDataSource('tmp/gpx.gpx', options = ['LINEFORMAT=LF']) lyr = gdaltest.gpx_ds.CreateLayer( 'route_points',", "to any person obtaining a # copy of this software", ") gpx2_lyr = gpx2_ds.CreateLayer( 'routes', geom_type = ogr.wkbLineString ) gpx_lyr.ResetReading()", "# copy of this software and associated documentation files (the", "'waypoints' ) expect = ['PID1', 'PID2'] tr = ogrtest.check_features_against_list( gpx_lyr,", "feat.Destroy() return 'success' ############################################################################### # Copy our small gpx file", "not None: dst_feat.SetFrom( feat ) if gpx2_lyr.CreateFeature( dst_feat ) !=", "= ogr.Feature(lyr.GetLayerDefn()) geom = ogr.CreateGeometryFromWkt('POINT(3 49)') feat.SetField('track_name', 'TRACK_NAME2') feat.SetField('track_fid', 1)", ") dst_feat = ogr.Feature( feature_def = gpx_lyr.GetLayerDefn() ) feat =", "4 with generated tmp/tmp.gpx ogr_gpx_1, ogr_gpx_2, ogr_gpx_4, ogr_gpx_7, ogr_gpx_8, ogr_gpx_cleanup", "tracks gpx layer. def ogr_gpx_4(): if not gdaltest.have_gpx: return 'skip'", "gdaltest_list = [ ogr_gpx_init, ogr_gpx_1, ogr_gpx_2, ogr_gpx_3, ogr_gpx_4, ogr_gpx_5, ogr_gpx_6,", "None try: os.remove ('tmp/gpx.gpx') except: pass gdaltest.gpx_ds = ogr.GetDriverByName('GPX').CreateDataSource('tmp/gpx.gpx', options", "FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT", "2 and 4 with generated tmp/tmp.gpx ogr_gpx_1, ogr_gpx_2, ogr_gpx_4, ogr_gpx_7,", "notice shall be included # in all copies or substantial", "import osr import gdal def ogr_gpx_init(): gdaltest.gpx_ds = None try:", "ogrtest.check_feature_geometry( feat, 'POINT (15 14)', max_error = 0.0001 ) !=", "gpx file to a new gpx file. def ogr_gpx_6(): if", ") if not tr: return 'fail' return 'success' ############################################################################### #", "if not gdaltest.have_gpx: return 'skip' if gdaltest.gpx_ds is not None:", "ogr_gpx_6(): if not gdaltest.have_gpx: return 'skip' if gdaltest.gpx_ds is None:", "is None: gdaltest.have_gpx = 0 else: gdaltest.have_gpx = 1 if", "ogr_gpx_1(): if not gdaltest.have_gpx: return 'skip' if gdaltest.gpx_ds is None:", "def ogr_gpx_cleanup(): if gdaltest.gpx_ds is not None: gdaltest.gpx_ds.Destroy() gdaltest.gpx_ds =", "['text', None] tr = ogrtest.check_features_against_list( lyr, 'link1_text', expect ) if", "ogr.wkbPoint ) bna_lyr.ResetReading() for i in range(bna_lyr.GetLayerDefn().GetFieldCount()): field_defn = bna_lyr.GetLayerDefn().GetFieldDefn(i)", "lyr = gdaltest.gpx_ds.CreateLayer( 'route_points', geom_type = ogr.wkbPoint ) feat =", "pass co_opts = [ 'GPX_USE_EXTENSIONS=yes' ] # Duplicate waypoints bna_lyr", "options = co_opts ) gpx_lyr = gdaltest.gpx_ds.CreateLayer( 'waypoints', geom_type =", "None if gdaltest.gpx_ds is None: gdaltest.have_gpx = 0 else: gdaltest.have_gpx", "= lyr.GetNextFeature() if ogrtest.check_feature_geometry( feat, 'POINT (4 3)', max_error =", "expect = ['text2', None] tr = ogrtest.check_features_against_list( lyr, 'link2_text', expect", "None] tr = ogrtest.check_features_against_list( lyr, 'name', expect ) lyr.ResetReading() feat", "= ['href2', None] tr = ogrtest.check_features_against_list( lyr, 'link2_href', expect )", "feat = bna_lyr.GetNextFeature() while feat is not None: dst_feat.SetFrom( feat", "feat = lyr.GetNextFeature() if ogrtest.check_feature_geometry( feat, 'POINT (6 5)', max_error", "f_ref = open('data/ogr_gpx_8_ref.txt','rb') f_content = f.read() f_ref_content = f_ref.read() f.close()", "not get expected result') print(f_content) return 'fail' return 'success' ###############################################################################", "Author: <NAME> <even dot rouault at mines dash paris dot", "return 'success' ############################################################################### # def ogr_gpx_cleanup(): if gdaltest.gpx_ds is not", "'POINT (6 5)', max_error = 0.0001 ) != 0: return", "= ogrtest.check_features_against_list( lyr, 'link1_href', expect ) if not tr: return", "= ogr.Feature(lyr.GetLayerDefn()) geom = ogr.CreateGeometryFromWkt('POINT(3 50)') feat.SetField('track_name', '--ignored--') feat.SetField('track_fid', 0)", "feat.SetGeometry(geom) lyr.CreateFeature(feat) lyr = gdaltest.gpx_ds.CreateLayer( 'track_points', geom_type = ogr.wkbPoint )", "'fail' lyr.ResetReading() expect = ['2007/11/25 17:58:00+01', None] tr = ogrtest.check_features_against_list(", "bna_lyr.GetNextFeature() while feat is not None: dst_feat.SetFrom( feat ) if", "if gdaltest.gpx_ds is not None: gdaltest.gpx_ds.Destroy() gdaltest.gpx_ds = None bna_ds", "feat, 'POINT (4 3)', max_error = 0.0001 ) != 0:", "for i in range(bna_lyr.GetLayerDefn().GetFieldCount()): field_defn = bna_lyr.GetLayerDefn().GetFieldDefn(i) gpx_lyr.CreateField( field_defn )", "use, copy, modify, merge, publish, distribute, sublicense, # and/or sell", ") gpx2_lyr = gpx2_ds.CreateLayer( 'waypoints', geom_type = ogr.wkbPoint ) gpx_lyr.ResetReading()", "Duplicate waypoints bna_lyr = bna_ds.GetLayerByName( 'bna_for_gpx_points' ) gdaltest.gpx_ds = ogr.GetDriverByName('GPX').CreateDataSource('tmp/gpx.gpx',", "ogr_gpx_2(): if not gdaltest.have_gpx: return 'skip' if gdaltest.gpx_ds is None:", "# and/or sell copies of the Software, and to permit", "IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,", "osr import gdal def ogr_gpx_init(): gdaltest.gpx_ds = None try: gdaltest.gpx_ds", "gdaltest.gpx_ds = ogr.Open('tmp/gpx.gpx') gpx_lyr = gdaltest.gpx_ds.GetLayerByName( 'waypoints' ) expect =", "= lyr.GetNextFeature() if ogrtest.check_feature_geometry( feat, 'MULTILINESTRING EMPTY', max_error = 0.0001", "return 'success' ############################################################################### # Test tracks gpx layer. def ogr_gpx_4():", "gpx_lyr = gdaltest.gpx_ds.GetLayerByName( 'waypoints' ) gpx2_ds = ogr.GetDriverByName('GPX').CreateDataSource('tmp/gpx.gpx', options =", "'success' ############################################################################### # def ogr_gpx_cleanup(): if gdaltest.gpx_ds is not None:", "gdaltest.post_reason('did not get expected result') print(f_content) return 'fail' return 'success'", "gdaltest.gpx_ds is None: return 'fail' lyr = gdaltest.gpx_ds.GetLayerByName( 'tracks' )", "] # Duplicate waypoints bna_lyr = bna_ds.GetLayerByName( 'bna_for_gpx_points' ) gdaltest.gpx_ds", "ogr.Feature(lyr.GetLayerDefn()) geom = ogr.CreateGeometryFromWkt('POINT(3 49)') feat.SetField('route_fid', 1) feat.SetGeometry(geom) lyr.CreateFeature(feat) lyr", "if not tr: return 'fail' lyr.ResetReading() feat = lyr.GetNextFeature() if", ") gdal.PopErrorHandler() except: pass co_opts = [ ] # Duplicate", "feature_def = gpx2_lyr.GetLayerDefn() ) feat = gpx_lyr.GetNextFeature() while feat is", "options = co_opts ) gpx2_lyr = gpx2_ds.CreateLayer( 'waypoints', geom_type =", "'MULTILINESTRING ((15 14,18 17),(21 20,24 23))', max_error = 0.0001 )", "is None: return 'fail' lyr = gdaltest.gpx_ds.GetLayerByName( 'waypoints' ) expect", "failed.') return 'fail' feat = gpx_lyr.GetNextFeature() dst_feat.Destroy() # Duplicate routes", "= ['text', None] tr = ogrtest.check_features_against_list( lyr, 'link1_text', expect )", "= ogr.Open( 'data/bna_for_gpx.bna' ) try: os.remove ('tmp/gpx.gpx') except: pass co_opts", "OTHER # DEALINGS IN THE SOFTWARE. ############################################################################### import os import", "written gdaltest.gpx_ds = ogr.Open('tmp/gpx.gpx') gpx_lyr = gdaltest.gpx_ds.GetLayerByName( 'waypoints' ) expect", "ogr_gpx_5(): if not gdaltest.have_gpx: return 'skip' if gdaltest.gpx_ds is None:", "gpx layer. def ogr_gpx_2(): if not gdaltest.have_gpx: return 'skip' if", "co_opts = [ ] # Duplicate waypoints gpx_lyr = gdaltest.gpx_ds.GetLayerByName(", "OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR", "if ogrtest.check_feature_geometry( feat, 'POINT (6 5)', max_error = 0.0001 )", "of layers' ) return 'fail' return 'success' ############################################################################### # Test", "############################################################################### import os import sys import string sys.path.append( '../pymod' )", "ogr_gpx_8, ogr_gpx_cleanup ] if __name__ == '__main__': gdaltest.setup_run( 'ogr_gpx' )", "= ogr.wkbPoint ) feat = ogr.Feature(lyr.GetLayerDefn()) geom = ogr.CreateGeometryFromWkt('POINT(2 49)')", "lyr, 'time', expect ) if not tr: return 'fail' lyr.ResetReading()", "lyr.CreateFeature(feat) lyr = gdaltest.gpx_ds.CreateLayer( 'track_points', geom_type = ogr.wkbPoint ) feat", "'skip' if gdaltest.gpx_ds.GetLayerCount() != 5: gdaltest.post_reason( 'wrong number of layers'", "extensions fields have been well written gdaltest.gpx_ds = ogr.Open('tmp/gpx.gpx') gpx_lyr", "is None: return 'fail' lyr = gdaltest.gpx_ds.GetLayerByName( 'tracks' ) lyr.ResetReading()", "not tr: return 'fail' lyr.ResetReading() expect = ['2007/11/25 17:58:00+01', None]", "lyr.GetNextFeature() f_geom = feat.GetGeometryRef() if f_geom.ExportToWkt()!= 'MULTILINESTRING EMPTY': return 'fail'", "if gdaltest.gpx_ds is None: return 'fail' lyr = gdaltest.gpx_ds.GetLayerByName( 'route_points'", ") feat = ogr.Feature(lyr.GetLayerDefn()) geom = ogr.CreateGeometryFromWkt('POINT(2 49)') feat.SetField('route_name', 'ROUTE_NAME')", "def ogr_gpx_init(): gdaltest.gpx_ds = None try: gdaltest.gpx_ds = ogr.Open( 'data/test.gpx'", "# Output extra fields as <extensions>. def ogr_gpx_7(): if not", "ogrtest.check_features_against_list( lyr, 'link1_type', expect ) if not tr: return 'fail'", "= ogrtest.check_features_against_list( lyr, 'link2_text', expect ) if not tr: return", "feat.SetField('track_seg_id', 0) feat.SetGeometry(geom) lyr.CreateFeature(feat) feat = ogr.Feature(lyr.GetLayerDefn()) geom = ogr.CreateGeometryFromWkt('POINT(3", ") if not tr: return 'fail' lyr.ResetReading() expect = ['href',", "= 1 if not gdaltest.have_gpx: return 'skip' if gdaltest.gpx_ds.GetLayerCount() !=", "= feat.GetGeometryRef() if f_geom.ExportToWkt()!= 'MULTILINESTRING EMPTY': return 'fail' feat.Destroy() return", "is furnished to do so, subject to the following conditions:", "None gpx2_lyr = None # Explicit destroy is required for", "'fail' feat.Destroy() return 'success' ############################################################################### # Test tracks gpx layer.", "gdaltest.gpx_ds.GetLayerByName( 'routes' ) gpx2_lyr = gpx2_ds.CreateLayer( 'routes', geom_type = ogr.wkbLineString", "return 'fail' lyr.ResetReading() expect = ['2007/11/25 17:58:00+01', None] tr =", "if gdaltest.gpx_ds is None: return 'skip' try: gdal.PushErrorHandler( 'CPLQuietErrorHandler' )", "<extensions>. def ogr_gpx_8(): if not gdaltest.have_gpx: return 'skip' if gdaltest.gpx_ds", "def ogr_gpx_4(): if not gdaltest.have_gpx: return 'skip' if gdaltest.gpx_ds is", "feat.SetField('track_name', 'TRACK_NAME') feat.SetField('track_fid', 0) feat.SetField('track_seg_id', 0) feat.SetGeometry(geom) lyr.CreateFeature(feat) feat =", "lyr.ResetReading() expect = ['type', None] tr = ogrtest.check_features_against_list( lyr, 'link1_type',", "ARISING # FROM, OUT OF OR IN CONNECTION WITH THE", "return 'fail' lyr.ResetReading() expect = ['text', None] tr = ogrtest.check_features_against_list(", "Explicit destroy is required for old-gen python bindings gpx2_ds.Destroy() gdaltest.gpx_ds.Destroy()", "file to a new gpx file. def ogr_gpx_6(): if not", "feat = lyr.GetNextFeature() if ogrtest.check_feature_geometry( feat, 'POINT (15 14)', max_error", "not tr: return 'fail' lyr.ResetReading() expect = ['href2', None] tr", "'fail' lyr.ResetReading() expect = ['href', None] tr = ogrtest.check_features_against_list( lyr,", "feat, 'LINESTRING EMPTY', max_error = 0.0001 ) != 0: return", ") feat = gpx_lyr.GetNextFeature() while feat is not None: dst_feat.SetFrom(", "= ogr.Feature(lyr.GetLayerDefn()) geom = ogr.CreateGeometryFromWkt('POINT(3 50)') feat.SetField('route_name', '--ignored--') feat.SetField('route_fid', 0)", "dst_feat.Destroy() # Duplicate tracks gpx_lyr = gdaltest.gpx_ds.GetLayerByName( 'tracks' ) gpx2_lyr", "49)') feat.SetField('track_name', 'TRACK_NAME') feat.SetField('track_fid', 0) feat.SetField('track_seg_id', 0) feat.SetGeometry(geom) lyr.CreateFeature(feat) feat", "= [ 'GPX_USE_EXTENSIONS=yes' ] # Duplicate waypoints bna_lyr = bna_ds.GetLayerByName(", "if gdaltest.gpx_ds is None: return 'fail' lyr = gdaltest.gpx_ds.GetLayerByName( 'track_points'", "gdaltest.post_reason('CreateFeature failed.') return 'fail' feat = gpx_lyr.GetNextFeature() dst_feat.Destroy() gpx_lyr =", "field_defn ) dst_feat = ogr.Feature( feature_def = gpx_lyr.GetLayerDefn() ) feat", "if not tr: return 'fail' return 'success' ############################################################################### # Output", "'wrong number of layers' ) return 'fail' return 'success' ###############################################################################", "lyr, 'link2_type', expect ) if not tr: return 'fail' lyr.ResetReading()", "feat.Destroy() return 'success' ############################################################################### # Test routes gpx layer. def", "ogrtest.check_feature_geometry( feat, 'MULTILINESTRING EMPTY', max_error = 0.0001 ) != 0:", "'fail' return 'success' ############################################################################### # Output extra fields as <extensions>.", "'ogr_Primary_ID', expect ) if not tr: return 'fail' gpx_lyr.ResetReading() expect", "= gdaltest.gpx_ds.CreateLayer( 'track_points', geom_type = ogr.wkbPoint ) feat = ogr.Feature(lyr.GetLayerDefn())", "ogr.CreateGeometryFromWkt('POINT(3 49)') feat.SetField('route_fid', 1) feat.SetGeometry(geom) lyr.CreateFeature(feat) lyr = gdaltest.gpx_ds.CreateLayer( 'track_points',", "gdaltest.gpx_ds = None try: os.remove ('tmp/gpx.gpx') except: pass gdaltest.gpx_ds =", "# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY", "= ['TID1', None] tr = ogrtest.check_features_against_list( gpx_lyr, 'ogr_Third_ID', expect )", "ogr.wkbPoint ) gpx_lyr.ResetReading() dst_feat = ogr.Feature( feature_def = gpx2_lyr.GetLayerDefn() )", "ogr.CreateGeometryFromWkt('POINT(3 50)') feat.SetField('track_name', '--ignored--') feat.SetField('track_fid', 0) feat.SetField('track_seg_id', 0) feat.SetGeometry(geom) lyr.CreateFeature(feat)", "feat.SetField('track_fid', 1) feat.SetField('track_seg_id', 0) feat.SetGeometry(geom) lyr.CreateFeature(feat) gdaltest.gpx_ds.Destroy() gdaltest.gpx_ds = None", "'fail' feat.Destroy() return 'success' ############################################################################### # Test route_points gpx layer.", "IN THE SOFTWARE. ############################################################################### import os import sys import string", "ogr.GetDriverByName('GPX').CreateDataSource('tmp/gpx.gpx', options = ['LINEFORMAT=LF']) lyr = gdaltest.gpx_ds.CreateLayer( 'route_points', geom_type =", ") != 0: return 'fail' feat.Destroy() feat = lyr.GetNextFeature() if", "= ogrtest.check_features_against_list( lyr, 'link1_text', expect ) if not tr: return", "lyr.ResetReading() expect = ['type2', None] tr = ogrtest.check_features_against_list( lyr, 'link2_type',", "return 'fail' gpx_lyr.ResetReading() expect = ['SID1', 'SID2'] tr = ogrtest.check_features_against_list(", "1, 2 and 4 with generated tmp/tmp.gpx ogr_gpx_1, ogr_gpx_2, ogr_gpx_4,", "Suite # Purpose: Test GPX driver functionality. # Author: <NAME>", "gdaltest.gpx_ds = None if gdaltest.gpx_ds is None: gdaltest.have_gpx = 0", "ogr.Open( 'data/test.gpx' ) except: gdaltest.gpx_ds = None if gdaltest.gpx_ds is", "'PID2'] tr = ogrtest.check_features_against_list( gpx_lyr, 'ogr_Primary_ID', expect ) if not", "Software without restriction, including without limitation # the rights to", "feat = lyr.GetNextFeature() f_geom = feat.GetGeometryRef() if f_geom.ExportToWkt()!= 'MULTILINESTRING EMPTY':", "= gpx2_lyr.GetLayerDefn() ) feat = gpx_lyr.GetNextFeature() while feat is not", "expect = ['route point name', None, None] tr = ogrtest.check_features_against_list(", "0: gdaltest.post_reason('CreateFeature failed.') return 'fail' feat = gpx_lyr.GetNextFeature() dst_feat.Destroy() #", "'../pymod' ) import gdaltest import ogrtest import ogr import osr", "0: return 'fail' feat.Destroy() feat = lyr.GetNextFeature() if ogrtest.check_feature_geometry( feat,", "expect = ['SID1', 'SID2'] tr = ogrtest.check_features_against_list( gpx_lyr, 'ogr_Secondary_ID', expect", "feat.Destroy() return 'success' ############################################################################### # Test route_points gpx layer. def", "whom the # Software is furnished to do so, subject", "Test route_points gpx layer. def ogr_gpx_5(): if not gdaltest.have_gpx: return", "tr: return 'fail' lyr.ResetReading() feat = lyr.GetNextFeature() if ogrtest.check_feature_geometry( feat,", "if not tr: return 'fail' lyr.ResetReading() expect = ['type', None]", ") bna_lyr.ResetReading() for i in range(bna_lyr.GetLayerDefn().GetFieldCount()): field_defn = bna_lyr.GetLayerDefn().GetFieldDefn(i) gpx_lyr.CreateField(", "is None: return 'fail' lyr = gdaltest.gpx_ds.GetLayerByName( 'routes' ) lyr.ResetReading()", "0) feat.SetField('track_seg_id', 0) feat.SetGeometry(geom) lyr.CreateFeature(feat) feat = ogr.Feature(lyr.GetLayerDefn()) geom =", "dst_feat.Destroy() # Duplicate routes gpx_lyr = gdaltest.gpx_ds.GetLayerByName( 'routes' ) gpx2_lyr", "feat, 'MULTILINESTRING EMPTY', max_error = 0.0001 ) != 0: return", "# ############################################################################### # Copyright (c) 2007, <NAME> <even dot rouault", "gpx file. def ogr_gpx_6(): if not gdaltest.have_gpx: return 'skip' if", "= ogr.Feature( feature_def = gpx_lyr.GetLayerDefn() ) feat = bna_lyr.GetNextFeature() while", "expect ) if not tr: return 'fail' lyr.ResetReading() expect =", "been well written gdaltest.gpx_ds = ogr.Open('tmp/gpx.gpx') gpx_lyr = gdaltest.gpx_ds.GetLayerByName( 'waypoints'", "route_points gpx layer. def ogr_gpx_3(): if not gdaltest.have_gpx: return 'skip'", "'routes' ) gpx2_lyr = gpx2_ds.CreateLayer( 'routes', geom_type = ogr.wkbLineString )", "1) feat.SetField('track_seg_id', 0) feat.SetGeometry(geom) lyr.CreateFeature(feat) gdaltest.gpx_ds.Destroy() gdaltest.gpx_ds = None f", "not None: gdaltest.gpx_ds.Destroy() gdaltest.gpx_ds = None try: os.remove ('tmp/gpx.gpx') except:", ") except: gdaltest.gpx_ds = None if gdaltest.gpx_ds is None: gdaltest.have_gpx", "= ['waypoint name', None] tr = ogrtest.check_features_against_list( lyr, 'name', expect", "name', None, None, None] tr = ogrtest.check_features_against_list( lyr, 'name', expect", "= f.read() f_ref_content = f_ref.read() f.close() f_ref.close() if f_content.find(f_ref_content) ==", "except: pass co_opts = [ 'GPX_USE_EXTENSIONS=yes' ] # Duplicate waypoints", "pass gdaltest.gpx_ds = ogr.GetDriverByName('GPX').CreateDataSource('tmp/gpx.gpx', options = ['LINEFORMAT=LF']) lyr = gdaltest.gpx_ds.CreateLayer(", "= ogr.Feature(lyr.GetLayerDefn()) geom = ogr.CreateGeometryFromWkt('POINT(2 49)') feat.SetField('route_name', 'ROUTE_NAME') feat.SetField('route_fid', 0)", "'bna_for_gpx_points' ) gdaltest.gpx_ds = ogr.GetDriverByName('GPX').CreateDataSource('tmp/gpx.gpx', options = co_opts ) gpx_lyr", "gdaltest.gpx_ds.GetLayerByName( 'waypoints' ) expect = [2, None] tr = ogrtest.check_features_against_list(", "'route_points' ) expect = ['route point name', None, None] tr", "'ROUTE_NAME') feat.SetField('route_fid', 0) feat.SetGeometry(geom) lyr.CreateFeature(feat) feat = ogr.Feature(lyr.GetLayerDefn()) geom =", ") expect = [2, None] tr = ogrtest.check_features_against_list( lyr, 'ele',", "return 'success' ############################################################################### # Copy our small gpx file to", "waypoints gpx layer. def ogr_gpx_1(): if not gdaltest.have_gpx: return 'skip'", "= ogrtest.check_features_against_list( lyr, 'link2_type', expect ) if not tr: return", "1 if not gdaltest.have_gpx: return 'skip' if gdaltest.gpx_ds.GetLayerCount() != 5:", "= ogrtest.check_features_against_list( lyr, 'time', expect ) if not tr: return", "= ['SID1', 'SID2'] tr = ogrtest.check_features_against_list( gpx_lyr, 'ogr_Secondary_ID', expect )", "<NAME> <even dot rouault at mines dash paris dot org>", "'fail' gpx_lyr.ResetReading() expect = ['TID1', None] tr = ogrtest.check_features_against_list( gpx_lyr,", "'tmp/gpx.gpx' ) gdal.PopErrorHandler() except: pass co_opts = [ ] #", ") != 0: gdaltest.post_reason('CreateFeature failed.') return 'fail' feat = gpx_lyr.GetNextFeature()", "'link1_text', expect ) if not tr: return 'fail' lyr.ResetReading() expect", "lyr = gdaltest.gpx_ds.GetLayerByName( 'tracks' ) lyr.ResetReading() feat = lyr.GetNextFeature() if", "'fail' feat = gpx_lyr.GetNextFeature() dst_feat.Destroy() gpx_lyr = None gpx2_lyr =", "'skip' try: gdal.PushErrorHandler( 'CPLQuietErrorHandler' ) ogr.GetDriverByName('CSV').DeleteDataSource( 'tmp/gpx.gpx' ) gdal.PopErrorHandler() except:", "failed.') return 'fail' feat = gpx_lyr.GetNextFeature() dst_feat.Destroy() gpx_lyr = None", "gdaltest.have_gpx = 1 if not gdaltest.have_gpx: return 'skip' if gdaltest.gpx_ds.GetLayerCount()", "fields as <extensions>. def ogr_gpx_8(): if not gdaltest.have_gpx: return 'skip'", "17:58:00+01', None] tr = ogrtest.check_features_against_list( lyr, 'time', expect ) if", "# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES", "AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM,", "not tr: return 'fail' gpx_lyr.ResetReading() expect = ['TID1', None] tr", "point name', None, None] tr = ogrtest.check_features_against_list( lyr, 'name', expect", "os.remove ('tmp/gpx.gpx') except: pass return 'success' gdaltest_list = [ ogr_gpx_init,", "gpx_lyr, 'ogr_Third_ID', expect ) if not tr: return 'fail' return", "dot org> # # Permission is hereby granted, free of", "'fail' feat.Destroy() feat = lyr.GetNextFeature() if ogrtest.check_feature_geometry( feat, 'LINESTRING EMPTY',", "#!/usr/bin/env python ############################################################################### # $Id$ # # Project: GDAL/OGR Test", "= None try: gdaltest.gpx_ds = ogr.Open( 'data/test.gpx' ) except: gdaltest.gpx_ds", "THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN", "feat.SetGeometry(geom) lyr.CreateFeature(feat) feat = ogr.Feature(lyr.GetLayerDefn()) geom = ogr.CreateGeometryFromWkt('POINT(3 51)') feat.SetField('track_fid',", "= bna_lyr.GetLayerDefn().GetFieldDefn(i) gpx_lyr.CreateField( field_defn ) dst_feat = ogr.Feature( feature_def =", "= ogr.GetDriverByName('GPX').CreateDataSource('tmp/gpx.gpx', options = ['LINEFORMAT=LF']) lyr = gdaltest.gpx_ds.CreateLayer( 'route_points', geom_type", "except: pass return 'success' gdaltest_list = [ ogr_gpx_init, ogr_gpx_1, ogr_gpx_2,", "= ogr.CreateGeometryFromWkt('POINT(3 51)') feat.SetField('route_name', 'ROUTE_NAME2') feat.SetField('route_fid', 1) feat.SetGeometry(geom) lyr.CreateFeature(feat) feat", "sell copies of the Software, and to permit persons to", "return 'fail' feat = bna_lyr.GetNextFeature() dst_feat.Destroy() bna_ds.Destroy() gdaltest.gpx_ds.Destroy() gdaltest.gpx_ds =", "feat ) if gpx2_lyr.CreateFeature( dst_feat ) != 0: gdaltest.post_reason('CreateFeature failed.')", "bna_lyr.ResetReading() for i in range(bna_lyr.GetLayerDefn().GetFieldCount()): field_defn = bna_lyr.GetLayerDefn().GetFieldDefn(i) gpx_lyr.CreateField( field_defn", "############################################################################### # Output extra fields as <extensions>. def ogr_gpx_7(): if", "'fail' feat = gpx_lyr.GetNextFeature() dst_feat.Destroy() # Duplicate routes gpx_lyr =", "gpx layer. def ogr_gpx_1(): if not gdaltest.have_gpx: return 'skip' if", "!= 0: return 'fail' feat.Destroy() return 'success' ############################################################################### # Test", "2007, <NAME> <even dot rouault at mines dash paris dot", "not gdaltest.have_gpx: return 'skip' if gdaltest.gpx_ds.GetLayerCount() != 5: gdaltest.post_reason( 'wrong", "gpx2_ds = ogr.GetDriverByName('GPX').CreateDataSource('tmp/gpx.gpx', options = co_opts ) gpx2_lyr = gpx2_ds.CreateLayer(", "result') print(f_content) return 'fail' return 'success' ############################################################################### # def ogr_gpx_cleanup():", "# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO", "lyr, 'name', expect ) lyr.ResetReading() feat = lyr.GetNextFeature() if ogrtest.check_feature_geometry(", "'link2_text', expect ) if not tr: return 'fail' lyr.ResetReading() expect", "tr: return 'fail' lyr.ResetReading() expect = ['2007/11/25 17:58:00+01', None] tr", "'fail' feat.Destroy() feat = lyr.GetNextFeature() if ogrtest.check_feature_geometry( feat, 'POINT (4", "of charge, to any person obtaining a # copy of", "gdaltest.gpx_ds is None: return 'fail' lyr = gdaltest.gpx_ds.GetLayerByName( 'route_points' )", "'--ignored--') feat.SetField('route_fid', 0) feat.SetGeometry(geom) lyr.CreateFeature(feat) feat = ogr.Feature(lyr.GetLayerDefn()) geom =", "fields have been well written gdaltest.gpx_ds = ogr.Open('tmp/gpx.gpx') gpx_lyr =", "ogr.Feature(lyr.GetLayerDefn()) geom = ogr.CreateGeometryFromWkt('POINT(3 50)') feat.SetField('route_name', '--ignored--') feat.SetField('route_fid', 0) feat.SetGeometry(geom)", "feat.SetField('track_fid', 0) feat.SetField('track_seg_id', 1) feat.SetGeometry(geom) lyr.CreateFeature(feat) feat = ogr.Feature(lyr.GetLayerDefn()) geom", "None: return 'fail' lyr = gdaltest.gpx_ds.GetLayerByName( 'track_points' ) expect =", "'fail' lyr = gdaltest.gpx_ds.GetLayerByName( 'routes' ) lyr.ResetReading() feat = lyr.GetNextFeature()", "FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN", "'fail' feat.Destroy() return 'success' ############################################################################### # Copy our small gpx", "test 1, 2 and 4 with generated tmp/tmp.gpx ogr_gpx_1, ogr_gpx_2,", "ogr.Feature(lyr.GetLayerDefn()) geom = ogr.CreateGeometryFromWkt('POINT(3 49)') feat.SetField('track_name', 'TRACK_NAME2') feat.SetField('track_fid', 1) feat.SetField('track_seg_id',", "dst_feat.Destroy() gpx_lyr = None gpx2_lyr = None # Explicit destroy", "charge, to any person obtaining a # copy of this", "if not tr: return 'fail' gpx_lyr.ResetReading() expect = ['SID1', 'SID2']", "rights to use, copy, modify, merge, publish, distribute, sublicense, #", "'fail' feat.Destroy() return 'success' ############################################################################### # Test routes gpx layer.", "open('tmp/gpx.gpx','rb') f_ref = open('data/ogr_gpx_8_ref.txt','rb') f_content = f.read() f_ref_content = f_ref.read()", "bindings gpx2_ds.Destroy() gdaltest.gpx_ds.Destroy() gdaltest.gpx_ds = ogr.Open( 'tmp/gpx.gpx' ) return 'success'", "return 'skip' if gdaltest.gpx_ds.GetLayerCount() != 5: gdaltest.post_reason( 'wrong number of", ") if not tr: return 'fail' gpx_lyr.ResetReading() expect = ['SID1',", "ogr_gpx_init, ogr_gpx_1, ogr_gpx_2, ogr_gpx_3, ogr_gpx_4, ogr_gpx_5, ogr_gpx_6, # Rerun test", "= ogr.CreateGeometryFromWkt('POINT(3 49)') feat.SetField('track_name', 'TRACK_NAME2') feat.SetField('track_fid', 1) feat.SetField('track_seg_id', 0) feat.SetGeometry(geom)", "return 'fail' feat.Destroy() feat = lyr.GetNextFeature() if ogrtest.check_feature_geometry( feat, 'LINESTRING", "= gdaltest.gpx_ds.CreateLayer( 'waypoints', geom_type = ogr.wkbPoint ) bna_lyr.ResetReading() for i", "ogrtest.check_features_against_list( lyr, 'time', expect ) if not tr: return 'fail'", "= f_ref.read() f.close() f_ref.close() if f_content.find(f_ref_content) == -1: gdaltest.post_reason('did not", "gdaltest.gpx_ds = ogr.GetDriverByName('GPX').CreateDataSource('tmp/gpx.gpx', options = co_opts ) gpx_lyr = gdaltest.gpx_ds.CreateLayer(", "gpx layer. def ogr_gpx_3(): if not gdaltest.have_gpx: return 'skip' if", "= ['LINEFORMAT=LF']) lyr = gdaltest.gpx_ds.CreateLayer( 'route_points', geom_type = ogr.wkbPoint )", "= lyr.GetNextFeature() if ogrtest.check_feature_geometry( feat, 'POINT (1 0)', max_error =", "'data/bna_for_gpx.bna' ) try: os.remove ('tmp/gpx.gpx') except: pass co_opts = [", "a new gpx file. def ogr_gpx_6(): if not gdaltest.have_gpx: return", "not None: dst_feat.SetFrom( feat ) if gpx_lyr.CreateFeature( dst_feat ) !=", "'fail' feat = bna_lyr.GetNextFeature() dst_feat.Destroy() bna_ds.Destroy() gdaltest.gpx_ds.Destroy() gdaltest.gpx_ds = None", "geom_type = ogr.wkbLineString ) gpx_lyr.ResetReading() dst_feat = ogr.Feature( feature_def =", "gdaltest.gpx_ds is not None: gdaltest.gpx_ds.Destroy() gdaltest.gpx_ds = None try: os.remove", "= ogrtest.check_features_against_list( gpx_lyr, 'ogr_Third_ID', expect ) if not tr: return", "50)') feat.SetField('track_name', '--ignored--') feat.SetField('track_fid', 0) feat.SetField('track_seg_id', 0) feat.SetGeometry(geom) lyr.CreateFeature(feat) feat", "feat = gpx_lyr.GetNextFeature() dst_feat.Destroy() # Duplicate routes gpx_lyr = gdaltest.gpx_ds.GetLayerByName(", "# Duplicate waypoints bna_lyr = bna_ds.GetLayerByName( 'bna_for_gpx_points' ) gdaltest.gpx_ds =", "feat = gpx_lyr.GetNextFeature() dst_feat.Destroy() # Duplicate tracks gpx_lyr = gdaltest.gpx_ds.GetLayerByName(", "THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,", "lyr.CreateFeature(feat) feat = ogr.Feature(lyr.GetLayerDefn()) geom = ogr.CreateGeometryFromWkt('POINT(3 51)') feat.SetField('track_fid', 0)", "or substantial portions of the Software. # # THE SOFTWARE", "return 'fail' lyr.ResetReading() expect = ['type2', None] tr = ogrtest.check_features_against_list(", "pass co_opts = [ ] # Duplicate waypoints gpx_lyr =", "ogrtest.check_feature_geometry( feat, 'POINT (6 5)', max_error = 0.0001 ) !=", "import ogrtest import ogr import osr import gdal def ogr_gpx_init():", "OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR", "if not tr: return 'fail' lyr.ResetReading() expect = ['href2', None]", "feat = ogr.Feature(lyr.GetLayerDefn()) geom = ogr.CreateGeometryFromWkt('POINT(3 51)') feat.SetField('track_fid', 0) feat.SetField('track_seg_id',", "['2007/11/25 17:58:00+01', None] tr = ogrtest.check_features_against_list( lyr, 'time', expect )", "lyr.ResetReading() expect = ['href2', None] tr = ogrtest.check_features_against_list( lyr, 'link2_href',", "CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION", "'link1_type', expect ) if not tr: return 'fail' lyr.ResetReading() expect", "# # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY", "gdaltest.gpx_ds = ogr.Open( 'data/test.gpx' ) except: gdaltest.gpx_ds = None if", "Test waypoints gpx layer. def ogr_gpx_1(): if not gdaltest.have_gpx: return", "= gdaltest.gpx_ds.GetLayerByName( 'routes' ) lyr.ResetReading() feat = lyr.GetNextFeature() if ogrtest.check_feature_geometry(", "# Test waypoints gpx layer. def ogr_gpx_1(): if not gdaltest.have_gpx:", "'tracks' ) lyr.ResetReading() feat = lyr.GetNextFeature() if ogrtest.check_feature_geometry( feat, 'MULTILINESTRING", "import string sys.path.append( '../pymod' ) import gdaltest import ogrtest import", ") expect = ['PID1', 'PID2'] tr = ogrtest.check_features_against_list( gpx_lyr, 'ogr_Primary_ID',", "# Software is furnished to do so, subject to the", "f_content.find(f_ref_content) == -1: gdaltest.post_reason('did not get expected result') print(f_content) return", ") if not tr: return 'fail' lyr.ResetReading() feat = lyr.GetNextFeature()", "[ 'GPX_USE_EXTENSIONS=yes' ] # Duplicate waypoints bna_lyr = bna_ds.GetLayerByName( 'bna_for_gpx_points'", "'fail' lyr.ResetReading() feat = lyr.GetNextFeature() if ogrtest.check_feature_geometry( feat, 'POINT (1", "Test tracks gpx layer. def ogr_gpx_4(): if not gdaltest.have_gpx: return", "to whom the # Software is furnished to do so,", "return 'skip' if gdaltest.gpx_ds is None: return 'fail' lyr =", "LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER", "= gdaltest.gpx_ds.GetLayerByName( 'waypoints' ) expect = ['PID1', 'PID2'] tr =", "= None #Now check that the extensions fields have been", "so, subject to the following conditions: # # The above", "if ogrtest.check_feature_geometry( feat, 'LINESTRING EMPTY', max_error = 0.0001 ) !=", "'fail' lyr.ResetReading() expect = ['waypoint name', None] tr = ogrtest.check_features_against_list(", "the following conditions: # # The above copyright notice and", "'link1_href', expect ) if not tr: return 'fail' lyr.ResetReading() expect", "ogrtest.check_features_against_list( lyr, 'ele', expect ) if not tr: return 'fail'", "(15 14)', max_error = 0.0001 ) != 0: return 'fail'", "IN NO EVENT SHALL # THE AUTHORS OR COPYRIGHT HOLDERS", ") if not tr: return 'fail' lyr.ResetReading() expect = ['waypoint", "file. def ogr_gpx_6(): if not gdaltest.have_gpx: return 'skip' if gdaltest.gpx_ds", "is None: return 'skip' try: gdal.PushErrorHandler( 'CPLQuietErrorHandler' ) ogr.GetDriverByName('CSV').DeleteDataSource( 'tmp/gpx.gpx'", "ogr.wkbMultiLineString ) gpx_lyr.ResetReading() dst_feat = ogr.Feature( feature_def = gpx2_lyr.GetLayerDefn() )", "#Now check that the extensions fields have been well written", "string sys.path.append( '../pymod' ) import gdaltest import ogrtest import ogr", "= [ ogr_gpx_init, ogr_gpx_1, ogr_gpx_2, ogr_gpx_3, ogr_gpx_4, ogr_gpx_5, ogr_gpx_6, #", "our small gpx file to a new gpx file. def", "except: pass gdaltest.gpx_ds = ogr.GetDriverByName('GPX').CreateDataSource('tmp/gpx.gpx', options = ['LINEFORMAT=LF']) lyr =", ") lyr.ResetReading() feat = lyr.GetNextFeature() if ogrtest.check_feature_geometry( feat, 'MULTILINESTRING ((15", "'fail' return 'success' ############################################################################### # def ogr_gpx_cleanup(): if gdaltest.gpx_ds is", "= gpx_lyr.GetNextFeature() dst_feat.Destroy() gpx_lyr = None gpx2_lyr = None #", "tr = ogrtest.check_features_against_list( lyr, 'link2_text', expect ) if not tr:", "= ogr.Open( 'data/test.gpx' ) except: gdaltest.gpx_ds = None if gdaltest.gpx_ds", "49)') feat.SetField('route_fid', 1) feat.SetGeometry(geom) lyr.CreateFeature(feat) lyr = gdaltest.gpx_ds.CreateLayer( 'track_points', geom_type", "gdal def ogr_gpx_init(): gdaltest.gpx_ds = None try: gdaltest.gpx_ds = ogr.Open(", "NO EVENT SHALL # THE AUTHORS OR COPYRIGHT HOLDERS BE", "files (the \"Software\"), # to deal in the Software without", "= ogr.CreateGeometryFromWkt('POINT(2 49)') feat.SetField('route_name', 'ROUTE_NAME') feat.SetField('route_fid', 0) feat.SetGeometry(geom) lyr.CreateFeature(feat) feat", "'MULTILINESTRING EMPTY': return 'fail' feat.Destroy() return 'success' ############################################################################### # Test", "without restriction, including without limitation # the rights to use,", "= ogr.wkbPoint ) gpx_lyr.ResetReading() dst_feat = ogr.Feature( feature_def = gpx2_lyr.GetLayerDefn()", "= ogr.Feature(lyr.GetLayerDefn()) geom = ogr.CreateGeometryFromWkt('POINT(3 49)') feat.SetField('route_fid', 1) feat.SetGeometry(geom) lyr.CreateFeature(feat)", "Software. # # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT", "feat = ogr.Feature(lyr.GetLayerDefn()) geom = ogr.CreateGeometryFromWkt('POINT(3 49)') feat.SetField('track_name', 'TRACK_NAME2') feat.SetField('track_fid',", "1) feat.SetGeometry(geom) lyr.CreateFeature(feat) lyr = gdaltest.gpx_ds.CreateLayer( 'track_points', geom_type = ogr.wkbPoint", "None] tr = ogrtest.check_features_against_list( lyr, 'ele', expect ) if not", "of the Software, and to permit persons to whom the", "feat = lyr.GetNextFeature() if ogrtest.check_feature_geometry( feat, 'MULTILINESTRING EMPTY', max_error =", "= lyr.GetNextFeature() f_geom = feat.GetGeometryRef() if f_geom.ExportToWkt()!= 'MULTILINESTRING EMPTY': return", "$Id$ # # Project: GDAL/OGR Test Suite # Purpose: Test", "gdaltest.gpx_ds = None try: gdaltest.gpx_ds = ogr.Open( 'data/test.gpx' ) except:", "co_opts = [ 'GPX_USE_EXTENSIONS=yes' ] # Duplicate waypoints bna_lyr =", "distribute, sublicense, # and/or sell copies of the Software, and", "f_ref_content = f_ref.read() f.close() f_ref.close() if f_content.find(f_ref_content) == -1: gdaltest.post_reason('did", "feat = lyr.GetNextFeature() if ogrtest.check_feature_geometry( feat, 'LINESTRING (6 5,9 8,12", "(c) 2007, <NAME> <even dot rouault at mines dash paris", "############################################################################### # def ogr_gpx_cleanup(): if gdaltest.gpx_ds is not None: gdaltest.gpx_ds.Destroy()", "'--ignored--') feat.SetField('track_fid', 0) feat.SetField('track_seg_id', 0) feat.SetGeometry(geom) lyr.CreateFeature(feat) feat = ogr.Feature(lyr.GetLayerDefn())", "expect = ['type2', None] tr = ogrtest.check_features_against_list( lyr, 'link2_type', expect", "gpx2_ds.CreateLayer( 'tracks', geom_type = ogr.wkbMultiLineString ) gpx_lyr.ResetReading() dst_feat = ogr.Feature(", "ogrtest.check_features_against_list( gpx_lyr, 'ogr_Primary_ID', expect ) if not tr: return 'fail'", "lyr.ResetReading() expect = ['text2', None] tr = ogrtest.check_features_against_list( lyr, 'link2_text',", "ogrtest.check_features_against_list( lyr, 'link2_href', expect ) if not tr: return 'fail'", "return 'success' ############################################################################### # Output extra fields as <extensions>. def", "gdaltest.gpx_ds.GetLayerByName( 'track_points' ) expect = ['track point name', None, None,", "try: gdal.PushErrorHandler( 'CPLQuietErrorHandler' ) ogr.GetDriverByName('CSV').DeleteDataSource( 'tmp/gpx.gpx' ) gdal.PopErrorHandler() except: pass", "of this software and associated documentation files (the \"Software\"), #", "gdaltest.gpx_ds.GetLayerByName( 'route_points' ) expect = ['route point name', None, None]", "gdaltest.gpx_ds is None: return 'fail' lyr = gdaltest.gpx_ds.GetLayerByName( 'track_points' )", "gdaltest.gpx_ds is None: gdaltest.have_gpx = 0 else: gdaltest.have_gpx = 1", "tr: return 'fail' gpx_lyr.ResetReading() expect = ['SID1', 'SID2'] tr =", "'tracks' ) gpx2_lyr = gpx2_ds.CreateLayer( 'tracks', geom_type = ogr.wkbMultiLineString )", "'waypoints' ) expect = [2, None] tr = ogrtest.check_features_against_list( lyr,", "if not tr: return 'fail' lyr.ResetReading() expect = ['text', None]", "tr = ogrtest.check_features_against_list( gpx_lyr, 'ogr_Third_ID', expect ) if not tr:", "['track point name', None, None, None] tr = ogrtest.check_features_against_list( lyr,", "return 'fail' lyr.ResetReading() expect = ['waypoint name', None] tr =", "'track_points', geom_type = ogr.wkbPoint ) feat = ogr.Feature(lyr.GetLayerDefn()) geom =", "['waypoint name', None] tr = ogrtest.check_features_against_list( lyr, 'name', expect )", "import os import sys import string sys.path.append( '../pymod' ) import", "############################################################################### # Test tracks gpx layer. def ogr_gpx_4(): if not", "failed.') return 'fail' feat = bna_lyr.GetNextFeature() dst_feat.Destroy() bna_ds.Destroy() gdaltest.gpx_ds.Destroy() gdaltest.gpx_ds", "Output extra fields as <extensions>. def ogr_gpx_7(): if not gdaltest.have_gpx:", "of the Software. # # THE SOFTWARE IS PROVIDED \"AS", "NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS OR COPYRIGHT", "ogrtest.check_features_against_list( lyr, 'link2_text', expect ) if not tr: return 'fail'", "copy, modify, merge, publish, distribute, sublicense, # and/or sell copies", "None, None, None] tr = ogrtest.check_features_against_list( lyr, 'name', expect )", "if ogrtest.check_feature_geometry( feat, 'MULTILINESTRING EMPTY', max_error = 0.0001 ) !=", "ogr.Open( 'tmp/gpx.gpx' ) return 'success' ############################################################################### # Output extra fields", "Test Suite # Purpose: Test GPX driver functionality. # Author:", "== -1: gdaltest.post_reason('did not get expected result') print(f_content) return 'fail'", "OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE", "lyr.CreateFeature(feat) feat = ogr.Feature(lyr.GetLayerDefn()) geom = ogr.CreateGeometryFromWkt('POINT(3 50)') feat.SetField('route_name', '--ignored--')", "expect ) lyr.ResetReading() feat = lyr.GetNextFeature() if ogrtest.check_feature_geometry( feat, 'POINT", "ogr.wkbLineString ) gpx_lyr.ResetReading() dst_feat = ogr.Feature( feature_def = gpx2_lyr.GetLayerDefn() )", "# to deal in the Software without restriction, including without", ") if gpx2_lyr.CreateFeature( dst_feat ) != 0: gdaltest.post_reason('CreateFeature failed.') return", "SHALL # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR", "0: return 'fail' feat.Destroy() return 'success' ############################################################################### # Copy our", ") gdaltest.gpx_ds = ogr.GetDriverByName('GPX').CreateDataSource('tmp/gpx.gpx', options = co_opts ) gpx_lyr =", "ogrtest.check_features_against_list( gpx_lyr, 'ogr_Secondary_ID', expect ) if not tr: return 'fail'", "that the extensions fields have been well written gdaltest.gpx_ds =", "None: gdaltest.gpx_ds.Destroy() gdaltest.gpx_ds = None bna_ds = ogr.Open( 'data/bna_for_gpx.bna' )", "ogr.Feature( feature_def = gpx2_lyr.GetLayerDefn() ) feat = gpx_lyr.GetNextFeature() while feat", "if gpx2_lyr.CreateFeature( dst_feat ) != 0: gdaltest.post_reason('CreateFeature failed.') return 'fail'", "is None: return 'fail' lyr = gdaltest.gpx_ds.GetLayerByName( 'route_points' ) expect", "routes gpx_lyr = gdaltest.gpx_ds.GetLayerByName( 'routes' ) gpx2_lyr = gpx2_ds.CreateLayer( 'routes',", "THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE", "if f_geom.ExportToWkt()!= 'MULTILINESTRING EMPTY': return 'fail' feat.Destroy() return 'success' ###############################################################################", "above copyright notice and this permission notice shall be included", "gdaltest.gpx_ds.GetLayerByName( 'tracks' ) lyr.ResetReading() feat = lyr.GetNextFeature() if ogrtest.check_feature_geometry( feat,", "None] tr = ogrtest.check_features_against_list( lyr, 'link1_href', expect ) if not", "feat.GetGeometryRef() if f_geom.ExportToWkt()!= 'MULTILINESTRING EMPTY': return 'fail' feat.Destroy() return 'success'", "feat.SetField('route_fid', 1) feat.SetGeometry(geom) lyr.CreateFeature(feat) lyr = gdaltest.gpx_ds.CreateLayer( 'track_points', geom_type =", "A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL #", "lyr, 'ele', expect ) if not tr: return 'fail' lyr.ResetReading()", "feature_def = gpx_lyr.GetLayerDefn() ) feat = bna_lyr.GetNextFeature() while feat is", "dst_feat = ogr.Feature( feature_def = gpx2_lyr.GetLayerDefn() ) feat = gpx_lyr.GetNextFeature()", "rouault at mines dash paris dot org> # ############################################################################### #", "= 0 else: gdaltest.have_gpx = 1 if not gdaltest.have_gpx: return", "tr: return 'fail' lyr.ResetReading() expect = ['href2', None] tr =", "def ogr_gpx_3(): if not gdaltest.have_gpx: return 'skip' if gdaltest.gpx_ds is", "gpx_lyr.CreateField( field_defn ) dst_feat = ogr.Feature( feature_def = gpx_lyr.GetLayerDefn() )", "return 'fail' feat = gpx_lyr.GetNextFeature() dst_feat.Destroy() # Duplicate tracks gpx_lyr", "expect ) if not tr: return 'fail' gpx_lyr.ResetReading() expect =", "gdaltest.post_reason( 'wrong number of layers' ) return 'fail' return 'success'", "tr: return 'fail' lyr.ResetReading() expect = ['text2', None] tr =", "AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS OR", "None] tr = ogrtest.check_features_against_list( lyr, 'link2_href', expect ) if not", "ogrtest.check_features_against_list( lyr, 'link1_href', expect ) if not tr: return 'fail'", "gpx_lyr = gdaltest.gpx_ds.GetLayerByName( 'routes' ) gpx2_lyr = gpx2_ds.CreateLayer( 'routes', geom_type", "OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT,", "0) feat.SetGeometry(geom) lyr.CreateFeature(feat) feat = ogr.Feature(lyr.GetLayerDefn()) geom = ogr.CreateGeometryFromWkt('POINT(3 50)')", "# Output extra fields as <extensions>. def ogr_gpx_8(): if not", "to the following conditions: # # The above copyright notice", "None: return 'fail' lyr = gdaltest.gpx_ds.GetLayerByName( 'waypoints' ) expect =", "not tr: return 'fail' lyr.ResetReading() expect = ['text', None] tr", "= [2, None] tr = ogrtest.check_features_against_list( lyr, 'ele', expect )", "gdaltest.gpx_ds is None: return 'skip' try: gdal.PushErrorHandler( 'CPLQuietErrorHandler' ) ogr.GetDriverByName('CSV').DeleteDataSource(", "feat = ogr.Feature(lyr.GetLayerDefn()) geom = ogr.CreateGeometryFromWkt('POINT(3 51)') feat.SetField('route_name', 'ROUTE_NAME2') feat.SetField('route_fid',", "expect = ['TID1', None] tr = ogrtest.check_features_against_list( gpx_lyr, 'ogr_Third_ID', expect", "tr: return 'fail' lyr.ResetReading() expect = ['type2', None] tr =", "bna_ds.Destroy() gdaltest.gpx_ds.Destroy() gdaltest.gpx_ds = None #Now check that the extensions", "None] tr = ogrtest.check_features_against_list( lyr, 'link1_type', expect ) if not", "None] tr = ogrtest.check_features_against_list( lyr, 'link1_text', expect ) if not", "= ogr.Feature(lyr.GetLayerDefn()) geom = ogr.CreateGeometryFromWkt('POINT(2 49)') feat.SetField('track_name', 'TRACK_NAME') feat.SetField('track_fid', 0)", "'name', expect ) lyr.ResetReading() feat = lyr.GetNextFeature() if ogrtest.check_feature_geometry( feat,", "feat is not None: dst_feat.SetFrom( feat ) if gpx_lyr.CreateFeature( dst_feat", "'fail' feat = gpx_lyr.GetNextFeature() dst_feat.Destroy() # Duplicate tracks gpx_lyr =", "feat = ogr.Feature(lyr.GetLayerDefn()) geom = ogr.CreateGeometryFromWkt('POINT(2 49)') feat.SetField('route_name', 'ROUTE_NAME') feat.SetField('route_fid',", "!= 0: return 'fail' feat.Destroy() feat = lyr.GetNextFeature() if ogrtest.check_feature_geometry(", "= co_opts ) gpx2_lyr = gpx2_ds.CreateLayer( 'waypoints', geom_type = ogr.wkbPoint", "geom_type = ogr.wkbMultiLineString ) gpx_lyr.ResetReading() dst_feat = ogr.Feature( feature_def =", "(6 5,9 8,12 11)', max_error = 0.0001 ) != 0:", "('tmp/gpx.gpx') except: pass gdaltest.gpx_ds = ogr.GetDriverByName('GPX').CreateDataSource('tmp/gpx.gpx', options = ['LINEFORMAT=LF']) lyr", "COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER", "return 'fail' feat.Destroy() return 'success' ############################################################################### # Test route_points gpx", "############################################################################### # $Id$ # # Project: GDAL/OGR Test Suite #", "return 'fail' return 'success' ############################################################################### # def ogr_gpx_cleanup(): if gdaltest.gpx_ds", ") lyr.ResetReading() feat = lyr.GetNextFeature() if ogrtest.check_feature_geometry( feat, 'POINT (15", "# Duplicate waypoints gpx_lyr = gdaltest.gpx_ds.GetLayerByName( 'waypoints' ) gpx2_ds =", "feat.SetField('route_fid', 1) feat.SetGeometry(geom) lyr.CreateFeature(feat) feat = ogr.Feature(lyr.GetLayerDefn()) geom = ogr.CreateGeometryFromWkt('POINT(3", "gpx2_lyr.CreateFeature( dst_feat ) != 0: gdaltest.post_reason('CreateFeature failed.') return 'fail' feat", "expect = ['track point name', None, None, None] tr =", "the extensions fields have been well written gdaltest.gpx_ds = ogr.Open('tmp/gpx.gpx')", "publish, distribute, sublicense, # and/or sell copies of the Software,", "lyr.ResetReading() feat = lyr.GetNextFeature() if ogrtest.check_feature_geometry( feat, 'POINT (1 0)',", "############################################################################### # Test waypoints gpx layer. def ogr_gpx_1(): if not", "# the rights to use, copy, modify, merge, publish, distribute,", "__name__ == '__main__': gdaltest.setup_run( 'ogr_gpx' ) gdaltest.run_tests( gdaltest_list ) gdaltest.summarize()", "'fail' lyr = gdaltest.gpx_ds.GetLayerByName( 'waypoints' ) expect = [2, None]", "AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES", "= ogr.GetDriverByName('GPX').CreateDataSource('tmp/gpx.gpx', options = co_opts ) gpx_lyr = gdaltest.gpx_ds.CreateLayer( 'waypoints',", "lyr.ResetReading() feat = lyr.GetNextFeature() if ogrtest.check_feature_geometry( feat, 'LINESTRING (6 5,9", "os import sys import string sys.path.append( '../pymod' ) import gdaltest", "ogr.Feature(lyr.GetLayerDefn()) geom = ogr.CreateGeometryFromWkt('POINT(3 51)') feat.SetField('track_fid', 0) feat.SetField('track_seg_id', 1) feat.SetGeometry(geom)", "geom_type = ogr.wkbPoint ) feat = ogr.Feature(lyr.GetLayerDefn()) geom = ogr.CreateGeometryFromWkt('POINT(2", "WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT", "sys.path.append( '../pymod' ) import gdaltest import ogrtest import ogr import", ") feat = bna_lyr.GetNextFeature() while feat is not None: dst_feat.SetFrom(", "ogr_gpx_6, # Rerun test 1, 2 and 4 with generated", "if not tr: return 'fail' lyr.ResetReading() expect = ['text2', None]", "TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR", "None bna_ds = ogr.Open( 'data/bna_for_gpx.bna' ) try: os.remove ('tmp/gpx.gpx') except:", "small gpx file to a new gpx file. def ogr_gpx_6():", "gpx_lyr.GetNextFeature() dst_feat.Destroy() gpx_lyr = None gpx2_lyr = None # Explicit", "free of charge, to any person obtaining a # copy", "49)') feat.SetField('track_name', 'TRACK_NAME2') feat.SetField('track_fid', 1) feat.SetField('track_seg_id', 0) feat.SetGeometry(geom) lyr.CreateFeature(feat) gdaltest.gpx_ds.Destroy()", "os.remove ('tmp/gpx.gpx') except: pass co_opts = [ 'GPX_USE_EXTENSIONS=yes' ] #", "deal in the Software without restriction, including without limitation #", "= ogr.wkbMultiLineString ) gpx_lyr.ResetReading() dst_feat = ogr.Feature( feature_def = gpx2_lyr.GetLayerDefn()", ") if not tr: return 'fail' lyr.ResetReading() expect = ['type2',", "ogr_gpx_4(): if not gdaltest.have_gpx: return 'skip' if gdaltest.gpx_ds is None:", "options = ['LINEFORMAT=LF']) lyr = gdaltest.gpx_ds.CreateLayer( 'route_points', geom_type = ogr.wkbPoint", "gdaltest.gpx_ds is not None: gdaltest.gpx_ds.Destroy() gdaltest.gpx_ds = None bna_ds =", "CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR", "# # The above copyright notice and this permission notice", "ogr_gpx_3(): if not gdaltest.have_gpx: return 'skip' if gdaltest.gpx_ds is None:", "python bindings gpx2_ds.Destroy() gdaltest.gpx_ds.Destroy() gdaltest.gpx_ds = ogr.Open( 'tmp/gpx.gpx' ) return", "'tmp/gpx.gpx' ) return 'success' ############################################################################### # Output extra fields as", "copies of the Software, and to permit persons to whom", "# DEALINGS IN THE SOFTWARE. ############################################################################### import os import sys", "to deal in the Software without restriction, including without limitation", "gdaltest.gpx_ds.Destroy() gdaltest.gpx_ds = None #Now check that the extensions fields", "return 'fail' lyr.ResetReading() expect = ['text2', None] tr = ogrtest.check_features_against_list(", "OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.", "return 'fail' lyr.ResetReading() expect = ['href', None] tr = ogrtest.check_features_against_list(", "gdaltest.gpx_ds.Destroy() gdaltest.gpx_ds = None f = open('tmp/gpx.gpx','rb') f_ref = open('data/ogr_gpx_8_ref.txt','rb')", "\"Software\"), # to deal in the Software without restriction, including", "= gdaltest.gpx_ds.GetLayerByName( 'waypoints' ) expect = [2, None] tr =", "lyr.ResetReading() expect = ['text', None] tr = ogrtest.check_features_against_list( lyr, 'link1_text',", "as <extensions>. def ogr_gpx_8(): if not gdaltest.have_gpx: return 'skip' if", "= open('tmp/gpx.gpx','rb') f_ref = open('data/ogr_gpx_8_ref.txt','rb') f_content = f.read() f_ref_content =", "return 'fail' lyr = gdaltest.gpx_ds.GetLayerByName( 'waypoints' ) expect = [2,", "gdaltest.gpx_ds.Destroy() gdaltest.gpx_ds = None bna_ds = ogr.Open( 'data/bna_for_gpx.bna' ) try:", "= ogrtest.check_features_against_list( lyr, 'name', expect ) lyr.ResetReading() feat = lyr.GetNextFeature()", "'LINESTRING EMPTY', max_error = 0.0001 ) != 0: return 'fail'", "including without limitation # the rights to use, copy, modify,", "ogrtest.check_features_against_list( lyr, 'name', expect ) lyr.ResetReading() feat = lyr.GetNextFeature() if", "gpx_lyr, 'ogr_Primary_ID', expect ) if not tr: return 'fail' gpx_lyr.ResetReading()", "routes gpx layer. def ogr_gpx_2(): if not gdaltest.have_gpx: return 'skip'", "# Test route_points gpx layer. def ogr_gpx_5(): if not gdaltest.have_gpx:", "= ['PID1', 'PID2'] tr = ogrtest.check_features_against_list( gpx_lyr, 'ogr_Primary_ID', expect )", "= ogr.GetDriverByName('GPX').CreateDataSource('tmp/gpx.gpx', options = co_opts ) gpx2_lyr = gpx2_ds.CreateLayer( 'waypoints',", "lyr.ResetReading() expect = ['waypoint name', None] tr = ogrtest.check_features_against_list( lyr,", "ogrtest.check_features_against_list( lyr, 'link2_type', expect ) if not tr: return 'fail'", "Software, and to permit persons to whom the # Software", "Duplicate waypoints gpx_lyr = gdaltest.gpx_ds.GetLayerByName( 'waypoints' ) gpx2_ds = ogr.GetDriverByName('GPX').CreateDataSource('tmp/gpx.gpx',", "gpx_lyr = None gpx2_lyr = None # Explicit destroy is", "############################################################################### # Copy our small gpx file to a new", "feat = bna_lyr.GetNextFeature() dst_feat.Destroy() bna_ds.Destroy() gdaltest.gpx_ds.Destroy() gdaltest.gpx_ds = None #Now", "lyr.GetNextFeature() if ogrtest.check_feature_geometry( feat, 'LINESTRING (6 5,9 8,12 11)', max_error", "functionality. # Author: <NAME> <even dot rouault at mines dash", "if gdaltest.gpx_ds is None: return 'fail' lyr = gdaltest.gpx_ds.GetLayerByName( 'tracks'", "org> # ############################################################################### # Copyright (c) 2007, <NAME> <even dot", "EMPTY', max_error = 0.0001 ) != 0: return 'fail' feat.Destroy()", "lyr, 'link1_href', expect ) if not tr: return 'fail' lyr.ResetReading()", "= ogrtest.check_features_against_list( lyr, 'name', expect ) if not tr: return", "not gdaltest.have_gpx: return 'skip' if gdaltest.gpx_ds is None: return 'fail'", "= gpx_lyr.GetNextFeature() dst_feat.Destroy() # Duplicate tracks gpx_lyr = gdaltest.gpx_ds.GetLayerByName( 'tracks'", "def ogr_gpx_1(): if not gdaltest.have_gpx: return 'skip' if gdaltest.gpx_ds is", "furnished to do so, subject to the following conditions: #", "'tracks', geom_type = ogr.wkbMultiLineString ) gpx_lyr.ResetReading() dst_feat = ogr.Feature( feature_def", "-1: gdaltest.post_reason('did not get expected result') print(f_content) return 'fail' return", "# The above copyright notice and this permission notice shall", "gdaltest.gpx_ds = None #Now check that the extensions fields have", "if gdaltest.gpx_ds.GetLayerCount() != 5: gdaltest.post_reason( 'wrong number of layers' )", "ogr_gpx_7, ogr_gpx_8, ogr_gpx_cleanup ] if __name__ == '__main__': gdaltest.setup_run( 'ogr_gpx'", "IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS", "gpx_lyr, 'ogr_Secondary_ID', expect ) if not tr: return 'fail' gpx_lyr.ResetReading()", "notice and this permission notice shall be included # in", "not tr: return 'fail' lyr.ResetReading() feat = lyr.GetNextFeature() if ogrtest.check_feature_geometry(", "following conditions: # # The above copyright notice and this", "tr = ogrtest.check_features_against_list( lyr, 'link1_href', expect ) if not tr:", "bna_lyr.GetLayerDefn().GetFieldDefn(i) gpx_lyr.CreateField( field_defn ) dst_feat = ogr.Feature( feature_def = gpx_lyr.GetLayerDefn()", "# # Project: GDAL/OGR Test Suite # Purpose: Test GPX", "lyr, 'link1_text', expect ) if not tr: return 'fail' lyr.ResetReading()", "0 else: gdaltest.have_gpx = 1 if not gdaltest.have_gpx: return 'skip'", "conditions: # # The above copyright notice and this permission", "[ ogr_gpx_init, ogr_gpx_1, ogr_gpx_2, ogr_gpx_3, ogr_gpx_4, ogr_gpx_5, ogr_gpx_6, # Rerun", "and associated documentation files (the \"Software\"), # to deal in", "point name', None, None, None] tr = ogrtest.check_features_against_list( lyr, 'name',", "is not None: gdaltest.gpx_ds.Destroy() gdaltest.gpx_ds = None try: os.remove ('tmp/gpx.gpx')", "# $Id$ # # Project: GDAL/OGR Test Suite # Purpose:", ") gpx_lyr.ResetReading() dst_feat = ogr.Feature( feature_def = gpx2_lyr.GetLayerDefn() ) feat", "gpx_lyr.ResetReading() dst_feat = ogr.Feature( feature_def = gpx2_lyr.GetLayerDefn() ) feat =", "ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED", "ogr.CreateGeometryFromWkt('POINT(3 49)') feat.SetField('track_name', 'TRACK_NAME2') feat.SetField('track_fid', 1) feat.SetField('track_seg_id', 0) feat.SetGeometry(geom) lyr.CreateFeature(feat)", "for old-gen python bindings gpx2_ds.Destroy() gdaltest.gpx_ds.Destroy() gdaltest.gpx_ds = ogr.Open( 'tmp/gpx.gpx'", "ogr.CreateGeometryFromWkt('POINT(3 51)') feat.SetField('route_name', 'ROUTE_NAME2') feat.SetField('route_fid', 1) feat.SetGeometry(geom) lyr.CreateFeature(feat) feat =", "= ogr.Feature(lyr.GetLayerDefn()) geom = ogr.CreateGeometryFromWkt('POINT(3 51)') feat.SetField('track_fid', 0) feat.SetField('track_seg_id', 1)", "f_ref.read() f.close() f_ref.close() if f_content.find(f_ref_content) == -1: gdaltest.post_reason('did not get", "expect = ['href2', None] tr = ogrtest.check_features_against_list( lyr, 'link2_href', expect", "get expected result') print(f_content) return 'fail' return 'success' ############################################################################### #", "bna_lyr = bna_ds.GetLayerByName( 'bna_for_gpx_points' ) gdaltest.gpx_ds = ogr.GetDriverByName('GPX').CreateDataSource('tmp/gpx.gpx', options =", "gpx_lyr.GetNextFeature() dst_feat.Destroy() # Duplicate tracks gpx_lyr = gdaltest.gpx_ds.GetLayerByName( 'tracks' )", "'ele', expect ) if not tr: return 'fail' lyr.ResetReading() expect", ") if not tr: return 'fail' lyr.ResetReading() expect = ['type',", "return 'fail' lyr.ResetReading() expect = ['href2', None] tr = ogrtest.check_features_against_list(", "at mines dash paris dot org> # # Permission is", "a # copy of this software and associated documentation files", "Duplicate routes gpx_lyr = gdaltest.gpx_ds.GetLayerByName( 'routes' ) gpx2_lyr = gpx2_ds.CreateLayer(", "lyr.CreateFeature(feat) feat = ogr.Feature(lyr.GetLayerDefn()) geom = ogr.CreateGeometryFromWkt('POINT(3 51)') feat.SetField('route_name', 'ROUTE_NAME2')", "and this permission notice shall be included # in all", "lyr.CreateFeature(feat) feat = ogr.Feature(lyr.GetLayerDefn()) geom = ogr.CreateGeometryFromWkt('POINT(3 49)') feat.SetField('track_name', 'TRACK_NAME2')", "field_defn = bna_lyr.GetLayerDefn().GetFieldDefn(i) gpx_lyr.CreateField( field_defn ) dst_feat = ogr.Feature( feature_def", "ogr.Open( 'data/bna_for_gpx.bna' ) try: os.remove ('tmp/gpx.gpx') except: pass co_opts =", ") return 'success' ############################################################################### # Output extra fields as <extensions>.", "layer. def ogr_gpx_4(): if not gdaltest.have_gpx: return 'skip' if gdaltest.gpx_ds", "lyr = gdaltest.gpx_ds.GetLayerByName( 'routes' ) lyr.ResetReading() feat = lyr.GetNextFeature() if", "# # Permission is hereby granted, free of charge, to", "Duplicate tracks gpx_lyr = gdaltest.gpx_ds.GetLayerByName( 'tracks' ) gpx2_lyr = gpx2_ds.CreateLayer(", "bna_lyr.GetNextFeature() dst_feat.Destroy() bna_ds.Destroy() gdaltest.gpx_ds.Destroy() gdaltest.gpx_ds = None #Now check that", "return 'fail' lyr = gdaltest.gpx_ds.GetLayerByName( 'track_points' ) expect = ['track", "the # Software is furnished to do so, subject to", "subject to the following conditions: # # The above copyright", "if gdaltest.gpx_ds is None: gdaltest.have_gpx = 0 else: gdaltest.have_gpx =", "feat, 'MULTILINESTRING ((15 14,18 17),(21 20,24 23))', max_error = 0.0001", "gdaltest.have_gpx: return 'skip' if gdaltest.gpx_ds.GetLayerCount() != 5: gdaltest.post_reason( 'wrong number", "f_ref.close() if f_content.find(f_ref_content) == -1: gdaltest.post_reason('did not get expected result')", "'skip' if gdaltest.gpx_ds is None: return 'skip' try: gdal.PushErrorHandler( 'CPLQuietErrorHandler'", "= gdaltest.gpx_ds.GetLayerByName( 'waypoints' ) gpx2_ds = ogr.GetDriverByName('GPX').CreateDataSource('tmp/gpx.gpx', options = co_opts", "documentation files (the \"Software\"), # to deal in the Software", "modify, merge, publish, distribute, sublicense, # and/or sell copies of", "FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL", "if gdaltest.gpx_ds is None: return 'fail' lyr = gdaltest.gpx_ds.GetLayerByName( 'routes'", "= ['2007/11/25 17:58:00+01', None] tr = ogrtest.check_features_against_list( lyr, 'time', expect", "'waypoints' ) gpx2_ds = ogr.GetDriverByName('GPX').CreateDataSource('tmp/gpx.gpx', options = co_opts ) gpx2_lyr", "lyr.GetNextFeature() if ogrtest.check_feature_geometry( feat, 'POINT (6 5)', max_error = 0.0001", "except: gdaltest.gpx_ds = None if gdaltest.gpx_ds is None: gdaltest.have_gpx =", "def ogr_gpx_2(): if not gdaltest.have_gpx: return 'skip' if gdaltest.gpx_ds is", "IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER", "0: return 'fail' feat.Destroy() feat = lyr.GetNextFeature() f_geom = feat.GetGeometryRef()", "Test route_points gpx layer. def ogr_gpx_3(): if not gdaltest.have_gpx: return", "lyr, 'link1_type', expect ) if not tr: return 'fail' lyr.ResetReading()", "feat = ogr.Feature(lyr.GetLayerDefn()) geom = ogr.CreateGeometryFromWkt('POINT(2 49)') feat.SetField('track_name', 'TRACK_NAME') feat.SetField('track_fid',", "geom = ogr.CreateGeometryFromWkt('POINT(3 51)') feat.SetField('route_name', 'ROUTE_NAME2') feat.SetField('route_fid', 1) feat.SetGeometry(geom) lyr.CreateFeature(feat)", "ogr.wkbPoint ) feat = ogr.Feature(lyr.GetLayerDefn()) geom = ogr.CreateGeometryFromWkt('POINT(2 49)') feat.SetField('route_name',", "if f_content.find(f_ref_content) == -1: gdaltest.post_reason('did not get expected result') print(f_content)", "restriction, including without limitation # the rights to use, copy,", "gdaltest.gpx_ds = ogr.Open( 'tmp/gpx.gpx' ) return 'success' ############################################################################### # Output", "f.read() f_ref_content = f_ref.read() f.close() f_ref.close() if f_content.find(f_ref_content) == -1:", "NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR", "ogr.Open('tmp/gpx.gpx') gpx_lyr = gdaltest.gpx_ds.GetLayerByName( 'waypoints' ) expect = ['PID1', 'PID2']", "'fail' lyr.ResetReading() expect = ['text2', None] tr = ogrtest.check_features_against_list( lyr,", "feat.Destroy() feat = lyr.GetNextFeature() if ogrtest.check_feature_geometry( feat, 'MULTILINESTRING EMPTY', max_error", "None, None] tr = ogrtest.check_features_against_list( lyr, 'name', expect ) lyr.ResetReading()", "expect = [2, None] tr = ogrtest.check_features_against_list( lyr, 'ele', expect", "bna_ds = ogr.Open( 'data/bna_for_gpx.bna' ) try: os.remove ('tmp/gpx.gpx') except: pass", "= ogrtest.check_features_against_list( lyr, 'link2_href', expect ) if not tr: return", "DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF", "'skip' if gdaltest.gpx_ds is None: return 'fail' lyr = gdaltest.gpx_ds.GetLayerByName(", "tr = ogrtest.check_features_against_list( lyr, 'ele', expect ) if not tr:", "] if __name__ == '__main__': gdaltest.setup_run( 'ogr_gpx' ) gdaltest.run_tests( gdaltest_list", "= co_opts ) gpx_lyr = gdaltest.gpx_ds.CreateLayer( 'waypoints', geom_type = ogr.wkbPoint", "Copyright (c) 2007, <NAME> <even dot rouault at mines dash", "None] tr = ogrtest.check_features_against_list( gpx_lyr, 'ogr_Third_ID', expect ) if not", "feat.SetGeometry(geom) lyr.CreateFeature(feat) feat = ogr.Feature(lyr.GetLayerDefn()) geom = ogr.CreateGeometryFromWkt('POINT(3 49)') feat.SetField('route_fid',", "this software and associated documentation files (the \"Software\"), # to", "gdaltest.have_gpx = 0 else: gdaltest.have_gpx = 1 if not gdaltest.have_gpx:", "gdaltest.gpx_ds.Destroy() gdaltest.gpx_ds = ogr.Open( 'tmp/gpx.gpx' ) return 'success' ############################################################################### #", "= ['type2', None] tr = ogrtest.check_features_against_list( lyr, 'link2_type', expect )", "tr = ogrtest.check_features_against_list( lyr, 'link2_href', expect ) if not tr:", "None] tr = ogrtest.check_features_against_list( lyr, 'link2_text', expect ) if not", "gdaltest.have_gpx: return 'skip' if gdaltest.gpx_ds is None: return 'skip' try:", "= ogrtest.check_features_against_list( gpx_lyr, 'ogr_Primary_ID', expect ) if not tr: return", "copyright notice and this permission notice shall be included #", "feat = ogr.Feature(lyr.GetLayerDefn()) geom = ogr.CreateGeometryFromWkt('POINT(3 49)') feat.SetField('route_fid', 1) feat.SetGeometry(geom)", "= gpx2_ds.CreateLayer( 'routes', geom_type = ogr.wkbLineString ) gpx_lyr.ResetReading() dst_feat =", "20,24 23))', max_error = 0.0001 ) != 0: return 'fail'", "ogr.CreateGeometryFromWkt('POINT(2 49)') feat.SetField('route_name', 'ROUTE_NAME') feat.SetField('route_fid', 0) feat.SetGeometry(geom) lyr.CreateFeature(feat) feat =", "= ogr.Feature(lyr.GetLayerDefn()) geom = ogr.CreateGeometryFromWkt('POINT(3 51)') feat.SetField('route_name', 'ROUTE_NAME2') feat.SetField('route_fid', 1)", "while feat is not None: dst_feat.SetFrom( feat ) if gpx2_lyr.CreateFeature(", "feat.SetGeometry(geom) lyr.CreateFeature(feat) feat = ogr.Feature(lyr.GetLayerDefn()) geom = ogr.CreateGeometryFromWkt('POINT(3 50)') feat.SetField('route_name',", "'success' ############################################################################### # Test waypoints gpx layer. def ogr_gpx_1(): if", "ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN", "expect = ['PID1', 'PID2'] tr = ogrtest.check_features_against_list( gpx_lyr, 'ogr_Primary_ID', expect", "gpx_lyr = gdaltest.gpx_ds.GetLayerByName( 'tracks' ) gpx2_lyr = gpx2_ds.CreateLayer( 'tracks', geom_type", "# Duplicate tracks gpx_lyr = gdaltest.gpx_ds.GetLayerByName( 'tracks' ) gpx2_lyr =", "Output extra fields as <extensions>. def ogr_gpx_8(): if not gdaltest.have_gpx:", "return 'fail' feat.Destroy() feat = lyr.GetNextFeature() if ogrtest.check_feature_geometry( feat, 'MULTILINESTRING", "None: return 'skip' try: gdal.PushErrorHandler( 'CPLQuietErrorHandler' ) ogr.GetDriverByName('CSV').DeleteDataSource( 'tmp/gpx.gpx' )", "Copy our small gpx file to a new gpx file.", "'fail' feat.Destroy() feat = lyr.GetNextFeature() f_geom = feat.GetGeometryRef() if f_geom.ExportToWkt()!=", "IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING #", "geom = ogr.CreateGeometryFromWkt('POINT(3 50)') feat.SetField('route_name', '--ignored--') feat.SetField('route_fid', 0) feat.SetGeometry(geom) lyr.CreateFeature(feat)", "well written gdaltest.gpx_ds = ogr.Open('tmp/gpx.gpx') gpx_lyr = gdaltest.gpx_ds.GetLayerByName( 'waypoints' )", "'fail' lyr = gdaltest.gpx_ds.GetLayerByName( 'route_points' ) expect = ['route point", "ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT", "number of layers' ) return 'fail' return 'success' ############################################################################### #", "THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY", "if ogrtest.check_feature_geometry( feat, 'POINT (4 3)', max_error = 0.0001 )", "substantial portions of the Software. # # THE SOFTWARE IS", "geom = ogr.CreateGeometryFromWkt('POINT(3 49)') feat.SetField('route_fid', 1) feat.SetGeometry(geom) lyr.CreateFeature(feat) lyr =", "= ogrtest.check_features_against_list( gpx_lyr, 'ogr_Secondary_ID', expect ) if not tr: return", "not None: gdaltest.gpx_ds.Destroy() gdaltest.gpx_ds = None bna_ds = ogr.Open( 'data/bna_for_gpx.bna'", "merge, publish, distribute, sublicense, # and/or sell copies of the", "LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,", "feat.Destroy() return 'success' ############################################################################### # Test tracks gpx layer. def", ") gpx2_ds = ogr.GetDriverByName('GPX').CreateDataSource('tmp/gpx.gpx', options = co_opts ) gpx2_lyr =", "geom = ogr.CreateGeometryFromWkt('POINT(2 49)') feat.SetField('track_name', 'TRACK_NAME') feat.SetField('track_fid', 0) feat.SetField('track_seg_id', 0)", "pass return 'success' gdaltest_list = [ ogr_gpx_init, ogr_gpx_1, ogr_gpx_2, ogr_gpx_3,", "= gdaltest.gpx_ds.GetLayerByName( 'tracks' ) gpx2_lyr = gpx2_ds.CreateLayer( 'tracks', geom_type =", "PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE", "f_geom.ExportToWkt()!= 'MULTILINESTRING EMPTY': return 'fail' feat.Destroy() return 'success' ############################################################################### #", "SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,", "5: gdaltest.post_reason( 'wrong number of layers' ) return 'fail' return", "lyr.GetNextFeature() if ogrtest.check_feature_geometry( feat, 'POINT (1 0)', max_error = 0.0001", "= gdaltest.gpx_ds.GetLayerByName( 'track_points' ) expect = ['track point name', None,", "[ ] # Duplicate waypoints gpx_lyr = gdaltest.gpx_ds.GetLayerByName( 'waypoints' )", "permit persons to whom the # Software is furnished to", ") expect = ['track point name', None, None, None] tr", "['LINEFORMAT=LF']) lyr = gdaltest.gpx_ds.CreateLayer( 'route_points', geom_type = ogr.wkbPoint ) feat", "(1 0)', max_error = 0.0001 ) != 0: return 'fail'", "gpx2_lyr = None # Explicit destroy is required for old-gen", "ogr.CreateGeometryFromWkt('POINT(3 51)') feat.SetField('track_fid', 0) feat.SetField('track_seg_id', 1) feat.SetGeometry(geom) lyr.CreateFeature(feat) feat =", "tr = ogrtest.check_features_against_list( lyr, 'time', expect ) if not tr:", "14)', max_error = 0.0001 ) != 0: return 'fail' feat.Destroy()", "= gpx2_ds.CreateLayer( 'waypoints', geom_type = ogr.wkbPoint ) gpx_lyr.ResetReading() dst_feat =", "'fail' gpx_lyr.ResetReading() expect = ['SID1', 'SID2'] tr = ogrtest.check_features_against_list( gpx_lyr,", "OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE.", "feat, 'POINT (6 5)', max_error = 0.0001 ) != 0:", "geom = ogr.CreateGeometryFromWkt('POINT(3 49)') feat.SetField('track_name', 'TRACK_NAME2') feat.SetField('track_fid', 1) feat.SetField('track_seg_id', 0)", "lyr.CreateFeature(feat) feat = ogr.Feature(lyr.GetLayerDefn()) geom = ogr.CreateGeometryFromWkt('POINT(3 50)') feat.SetField('track_name', '--ignored--')", "limitation # the rights to use, copy, modify, merge, publish,", "new gpx file. def ogr_gpx_6(): if not gdaltest.have_gpx: return 'skip'", "is not None: gdaltest.gpx_ds.Destroy() gdaltest.gpx_ds = None bna_ds = ogr.Open(", "ogrtest.check_feature_geometry( feat, 'POINT (1 0)', max_error = 0.0001 ) !=", ") if not tr: return 'fail' lyr.ResetReading() expect = ['href2',", "import gdaltest import ogrtest import ogr import osr import gdal", "5,9 8,12 11)', max_error = 0.0001 ) != 0: return", "gpx2_ds.Destroy() gdaltest.gpx_ds.Destroy() gdaltest.gpx_ds = ogr.Open( 'tmp/gpx.gpx' ) return 'success' ###############################################################################", "GDAL/OGR Test Suite # Purpose: Test GPX driver functionality. #", "lyr, 'link2_href', expect ) if not tr: return 'fail' lyr.ResetReading()", "# Explicit destroy is required for old-gen python bindings gpx2_ds.Destroy()", "14,18 17),(21 20,24 23))', max_error = 0.0001 ) != 0:", "['text2', None] tr = ogrtest.check_features_against_list( lyr, 'link2_text', expect ) if", "'POINT (4 3)', max_error = 0.0001 ) != 0: return", "['PID1', 'PID2'] tr = ogrtest.check_features_against_list( gpx_lyr, 'ogr_Primary_ID', expect ) if", "############################################################################### # Test route_points gpx layer. def ogr_gpx_3(): if not", "gdaltest.gpx_ds = None f = open('tmp/gpx.gpx','rb') f_ref = open('data/ogr_gpx_8_ref.txt','rb') f_content", "OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION", "return 'success' ############################################################################### # Test routes gpx layer. def ogr_gpx_2():", "'fail' feat.Destroy() feat = lyr.GetNextFeature() if ogrtest.check_feature_geometry( feat, 'MULTILINESTRING EMPTY',", "in all copies or substantial portions of the Software. #", "ogr.Feature( feature_def = gpx_lyr.GetLayerDefn() ) feat = bna_lyr.GetNextFeature() while feat", "ogr_gpx_4, ogr_gpx_5, ogr_gpx_6, # Rerun test 1, 2 and 4", "= bna_lyr.GetNextFeature() while feat is not None: dst_feat.SetFrom( feat )", "'success' ############################################################################### # Output extra fields as <extensions>. def ogr_gpx_8():", "not tr: return 'fail' return 'success' ############################################################################### # Output extra", "ogr.GetDriverByName('CSV').DeleteDataSource( 'tmp/gpx.gpx' ) gdal.PopErrorHandler() except: pass co_opts = [ ]", "def ogr_gpx_5(): if not gdaltest.have_gpx: return 'skip' if gdaltest.gpx_ds is", "= None try: os.remove ('tmp/gpx.gpx') except: pass return 'success' gdaltest_list", "'GPX_USE_EXTENSIONS=yes' ] # Duplicate waypoints bna_lyr = bna_ds.GetLayerByName( 'bna_for_gpx_points' )", "lyr.ResetReading() feat = lyr.GetNextFeature() if ogrtest.check_feature_geometry( feat, 'POINT (15 14)',", "['TID1', None] tr = ogrtest.check_features_against_list( gpx_lyr, 'ogr_Third_ID', expect ) if", "lyr.ResetReading() feat = lyr.GetNextFeature() if ogrtest.check_feature_geometry( feat, 'MULTILINESTRING ((15 14,18", "= gdaltest.gpx_ds.GetLayerByName( 'tracks' ) lyr.ResetReading() feat = lyr.GetNextFeature() if ogrtest.check_feature_geometry(", "gdaltest.gpx_ds.Destroy() gdaltest.gpx_ds = None try: os.remove ('tmp/gpx.gpx') except: pass return", "= None if gdaltest.gpx_ds is None: gdaltest.have_gpx = 0 else:", "(4 3)', max_error = 0.0001 ) != 0: return 'fail'", "'success' ############################################################################### # Test routes gpx layer. def ogr_gpx_2(): if", "return 'fail' feat.Destroy() return 'success' ############################################################################### # Test routes gpx", "to a new gpx file. def ogr_gpx_6(): if not gdaltest.have_gpx:", "ogr import osr import gdal def ogr_gpx_init(): gdaltest.gpx_ds = None", "gdaltest.have_gpx: return 'skip' if gdaltest.gpx_ds is not None: gdaltest.gpx_ds.Destroy() gdaltest.gpx_ds", "if ogrtest.check_feature_geometry( feat, 'POINT (15 14)', max_error = 0.0001 )", "Test routes gpx layer. def ogr_gpx_2(): if not gdaltest.have_gpx: return", "import ogr import osr import gdal def ogr_gpx_init(): gdaltest.gpx_ds =", "tr: return 'fail' return 'success' ############################################################################### # Output extra fields", "0) feat.SetField('track_seg_id', 1) feat.SetGeometry(geom) lyr.CreateFeature(feat) feat = ogr.Feature(lyr.GetLayerDefn()) geom =", "OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT", "gpx2_lyr = gpx2_ds.CreateLayer( 'tracks', geom_type = ogr.wkbMultiLineString ) gpx_lyr.ResetReading() dst_feat", "feat.SetField('track_name', 'TRACK_NAME2') feat.SetField('track_fid', 1) feat.SetField('track_seg_id', 0) feat.SetGeometry(geom) lyr.CreateFeature(feat) gdaltest.gpx_ds.Destroy() gdaltest.gpx_ds", "return 'fail' feat.Destroy() feat = lyr.GetNextFeature() if ogrtest.check_feature_geometry( feat, 'POINT", "OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF", "import gdal def ogr_gpx_init(): gdaltest.gpx_ds = None try: gdaltest.gpx_ds =", "'waypoints', geom_type = ogr.wkbPoint ) bna_lyr.ResetReading() for i in range(bna_lyr.GetLayerDefn().GetFieldCount()):", "generated tmp/tmp.gpx ogr_gpx_1, ogr_gpx_2, ogr_gpx_4, ogr_gpx_7, ogr_gpx_8, ogr_gpx_cleanup ] if", "ogr_gpx_2, ogr_gpx_4, ogr_gpx_7, ogr_gpx_8, ogr_gpx_cleanup ] if __name__ == '__main__':", "f_content = f.read() f_ref_content = f_ref.read() f.close() f_ref.close() if f_content.find(f_ref_content)", "ogr_gpx_1, ogr_gpx_2, ogr_gpx_3, ogr_gpx_4, ogr_gpx_5, ogr_gpx_6, # Rerun test 1,", "permission notice shall be included # in all copies or", "have been well written gdaltest.gpx_ds = ogr.Open('tmp/gpx.gpx') gpx_lyr = gdaltest.gpx_ds.GetLayerByName(", "layer. def ogr_gpx_2(): if not gdaltest.have_gpx: return 'skip' if gdaltest.gpx_ds", "this permission notice shall be included # in all copies", "tr = ogrtest.check_features_against_list( lyr, 'link1_text', expect ) if not tr:", "50)') feat.SetField('route_name', '--ignored--') feat.SetField('route_fid', 0) feat.SetGeometry(geom) lyr.CreateFeature(feat) feat = ogr.Feature(lyr.GetLayerDefn())", "ogrtest.check_features_against_list( lyr, 'link1_text', expect ) if not tr: return 'fail'", "open('data/ogr_gpx_8_ref.txt','rb') f_content = f.read() f_ref_content = f_ref.read() f.close() f_ref.close() if", "return 'fail' return 'success' ############################################################################### # Test waypoints gpx layer.", "feat = ogr.Feature(lyr.GetLayerDefn()) geom = ogr.CreateGeometryFromWkt('POINT(3 50)') feat.SetField('route_name', '--ignored--') feat.SetField('route_fid',", "Permission is hereby granted, free of charge, to any person", "= bna_lyr.GetNextFeature() dst_feat.Destroy() bna_ds.Destroy() gdaltest.gpx_ds.Destroy() gdaltest.gpx_ds = None #Now check", "None f = open('tmp/gpx.gpx','rb') f_ref = open('data/ogr_gpx_8_ref.txt','rb') f_content = f.read()", "waypoints gpx_lyr = gdaltest.gpx_ds.GetLayerByName( 'waypoints' ) gpx2_ds = ogr.GetDriverByName('GPX').CreateDataSource('tmp/gpx.gpx', options", "gdaltest.gpx_ds.GetLayerByName( 'waypoints' ) expect = ['PID1', 'PID2'] tr = ogrtest.check_features_against_list(", "= ['text2', None] tr = ogrtest.check_features_against_list( lyr, 'link2_text', expect )", "0)', max_error = 0.0001 ) != 0: return 'fail' feat.Destroy()", "= lyr.GetNextFeature() if ogrtest.check_feature_geometry( feat, 'POINT (6 5)', max_error =", "def ogr_gpx_6(): if not gdaltest.have_gpx: return 'skip' if gdaltest.gpx_ds is", "sublicense, # and/or sell copies of the Software, and to", "ogr_gpx_5, ogr_gpx_6, # Rerun test 1, 2 and 4 with", "gdaltest.post_reason('CreateFeature failed.') return 'fail' feat = bna_lyr.GetNextFeature() dst_feat.Destroy() bna_ds.Destroy() gdaltest.gpx_ds.Destroy()", "WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING", "feat is not None: dst_feat.SetFrom( feat ) if gpx2_lyr.CreateFeature( dst_feat", "THE SOFTWARE. ############################################################################### import os import sys import string sys.path.append(", "return 'success' ############################################################################### # Test route_points gpx layer. def ogr_gpx_3():", ") if gpx_lyr.CreateFeature( dst_feat ) != 0: gdaltest.post_reason('CreateFeature failed.') return", "included # in all copies or substantial portions of the", "ogrtest.check_features_against_list( lyr, 'name', expect ) if not tr: return 'fail'", "EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE", "tr = ogrtest.check_features_against_list( lyr, 'link1_type', expect ) if not tr:", "OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH", "except: pass co_opts = [ ] # Duplicate waypoints gpx_lyr", "waypoints bna_lyr = bna_ds.GetLayerByName( 'bna_for_gpx_points' ) gdaltest.gpx_ds = ogr.GetDriverByName('GPX').CreateDataSource('tmp/gpx.gpx', options", "PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS #", "= bna_ds.GetLayerByName( 'bna_for_gpx_points' ) gdaltest.gpx_ds = ogr.GetDriverByName('GPX').CreateDataSource('tmp/gpx.gpx', options = co_opts", "gpx_lyr = gdaltest.gpx_ds.GetLayerByName( 'waypoints' ) expect = ['PID1', 'PID2'] tr", "1) feat.SetGeometry(geom) lyr.CreateFeature(feat) feat = ogr.Feature(lyr.GetLayerDefn()) geom = ogr.CreateGeometryFromWkt('POINT(3 49)')", "None try: os.remove ('tmp/gpx.gpx') except: pass return 'success' gdaltest_list =", "!= 0: gdaltest.post_reason('CreateFeature failed.') return 'fail' feat = gpx_lyr.GetNextFeature() dst_feat.Destroy()", "dot rouault at mines dash paris dot org> # #", "feat = ogr.Feature(lyr.GetLayerDefn()) geom = ogr.CreateGeometryFromWkt('POINT(3 50)') feat.SetField('track_name', '--ignored--') feat.SetField('track_fid',", "'ogr_Third_ID', expect ) if not tr: return 'fail' return 'success'", ") if not tr: return 'fail' lyr.ResetReading() expect = ['text',", "gpx layer. def ogr_gpx_4(): if not gdaltest.have_gpx: return 'skip' if", "feat.Destroy() feat = lyr.GetNextFeature() f_geom = feat.GetGeometryRef() if f_geom.ExportToWkt()!= 'MULTILINESTRING", "'fail' lyr.ResetReading() expect = ['type', None] tr = ogrtest.check_features_against_list( lyr,", "= gpx_lyr.GetLayerDefn() ) feat = bna_lyr.GetNextFeature() while feat is not", "EVENT SHALL # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE", "0: return 'fail' feat.Destroy() return 'success' ############################################################################### # Test routes", "hereby granted, free of charge, to any person obtaining a", "feat.SetField('route_name', 'ROUTE_NAME2') feat.SetField('route_fid', 1) feat.SetGeometry(geom) lyr.CreateFeature(feat) feat = ogr.Feature(lyr.GetLayerDefn()) geom", ") return 'fail' return 'success' ############################################################################### # Test waypoints gpx", "feat.SetField('track_seg_id', 1) feat.SetGeometry(geom) lyr.CreateFeature(feat) feat = ogr.Feature(lyr.GetLayerDefn()) geom = ogr.CreateGeometryFromWkt('POINT(3", "not gdaltest.have_gpx: return 'skip' if gdaltest.gpx_ds is not None: gdaltest.gpx_ds.Destroy()", "OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE", "OR OTHER # DEALINGS IN THE SOFTWARE. ############################################################################### import os", "0: return 'fail' feat.Destroy() return 'success' ############################################################################### # Test tracks", "if not tr: return 'fail' lyr.ResetReading() expect = ['2007/11/25 17:58:00+01',", ") feat = ogr.Feature(lyr.GetLayerDefn()) geom = ogr.CreateGeometryFromWkt('POINT(2 49)') feat.SetField('track_name', 'TRACK_NAME')", "return 'fail' lyr = gdaltest.gpx_ds.GetLayerByName( 'tracks' ) lyr.ResetReading() feat =", "ogr_gpx_7(): if not gdaltest.have_gpx: return 'skip' if gdaltest.gpx_ds is not", "rouault at mines dash paris dot org> # # Permission", "ogr_gpx_8(): if not gdaltest.have_gpx: return 'skip' if gdaltest.gpx_ds is not", "gpx2_lyr.GetLayerDefn() ) feat = gpx_lyr.GetNextFeature() while feat is not None:", "name', None] tr = ogrtest.check_features_against_list( lyr, 'name', expect ) if", "FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR", "= 0.0001 ) != 0: return 'fail' feat.Destroy() feat =", "5)', max_error = 0.0001 ) != 0: return 'fail' feat.Destroy()", "while feat is not None: dst_feat.SetFrom( feat ) if gpx_lyr.CreateFeature(", "not tr: return 'fail' lyr.ResetReading() expect = ['waypoint name', None]", "= open('data/ogr_gpx_8_ref.txt','rb') f_content = f.read() f_ref_content = f_ref.read() f.close() f_ref.close()", "the Software. # # THE SOFTWARE IS PROVIDED \"AS IS\",", "# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR", "return 'fail' feat.Destroy() return 'success' ############################################################################### # Copy our small", "gdaltest.gpx_ds.GetLayerByName( 'routes' ) lyr.ResetReading() feat = lyr.GetNextFeature() if ogrtest.check_feature_geometry( feat,", "not tr: return 'fail' lyr.ResetReading() expect = ['href', None] tr", "OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT", "'success' ############################################################################### # Output extra fields as <extensions>. def ogr_gpx_7():", "all copies or substantial portions of the Software. # #", "lyr.ResetReading() expect = ['2007/11/25 17:58:00+01', None] tr = ogrtest.check_features_against_list( lyr,", "expect = ['type', None] tr = ogrtest.check_features_against_list( lyr, 'link1_type', expect", "= gpx2_ds.CreateLayer( 'tracks', geom_type = ogr.wkbMultiLineString ) gpx_lyr.ResetReading() dst_feat =", "Software is furnished to do so, subject to the following", "gdal.PushErrorHandler( 'CPLQuietErrorHandler' ) ogr.GetDriverByName('CSV').DeleteDataSource( 'tmp/gpx.gpx' ) gdal.PopErrorHandler() except: pass co_opts", "('tmp/gpx.gpx') except: pass return 'success' gdaltest_list = [ ogr_gpx_init, ogr_gpx_1,", "expect ) if not tr: return 'fail' lyr.ResetReading() feat =", "dst_feat.SetFrom( feat ) if gpx2_lyr.CreateFeature( dst_feat ) != 0: gdaltest.post_reason('CreateFeature", "lyr.GetNextFeature() if ogrtest.check_feature_geometry( feat, 'POINT (15 14)', max_error = 0.0001", "gpx_lyr.GetNextFeature() dst_feat.Destroy() # Duplicate routes gpx_lyr = gdaltest.gpx_ds.GetLayerByName( 'routes' )", "lyr.GetNextFeature() if ogrtest.check_feature_geometry( feat, 'POINT (4 3)', max_error = 0.0001", "org> # # Permission is hereby granted, free of charge,", "to do so, subject to the following conditions: # #", ") gpx_lyr = gdaltest.gpx_ds.CreateLayer( 'waypoints', geom_type = ogr.wkbPoint ) bna_lyr.ResetReading()", ") gpx2_lyr = gpx2_ds.CreateLayer( 'tracks', geom_type = ogr.wkbMultiLineString ) gpx_lyr.ResetReading()", "= ogr.CreateGeometryFromWkt('POINT(3 51)') feat.SetField('track_fid', 0) feat.SetField('track_seg_id', 1) feat.SetGeometry(geom) lyr.CreateFeature(feat) feat", "lyr = gdaltest.gpx_ds.GetLayerByName( 'track_points' ) expect = ['track point name',", "ogr.Feature(lyr.GetLayerDefn()) geom = ogr.CreateGeometryFromWkt('POINT(2 49)') feat.SetField('track_name', 'TRACK_NAME') feat.SetField('track_fid', 0) feat.SetField('track_seg_id',", "not tr: return 'fail' lyr.ResetReading() expect = ['text2', None] tr", "to permit persons to whom the # Software is furnished", "SOFTWARE. ############################################################################### import os import sys import string sys.path.append( '../pymod'", "feat.Destroy() feat = lyr.GetNextFeature() if ogrtest.check_feature_geometry( feat, 'LINESTRING EMPTY', max_error", "not tr: return 'fail' lyr.ResetReading() expect = ['type', None] tr", "tr: return 'fail' lyr.ResetReading() expect = ['href', None] tr =", "feat.SetGeometry(geom) lyr.CreateFeature(feat) feat = ogr.Feature(lyr.GetLayerDefn()) geom = ogr.CreateGeometryFromWkt('POINT(3 49)') feat.SetField('track_name',", "extra fields as <extensions>. def ogr_gpx_8(): if not gdaltest.have_gpx: return", "is not None: dst_feat.SetFrom( feat ) if gpx_lyr.CreateFeature( dst_feat )", "feat.SetField('route_name', '--ignored--') feat.SetField('route_fid', 0) feat.SetGeometry(geom) lyr.CreateFeature(feat) feat = ogr.Feature(lyr.GetLayerDefn()) geom", "mines dash paris dot org> # # Permission is hereby", "return 'fail' lyr.ResetReading() feat = lyr.GetNextFeature() if ogrtest.check_feature_geometry( feat, 'POINT", "'fail' lyr.ResetReading() expect = ['text', None] tr = ogrtest.check_features_against_list( lyr,", "max_error = 0.0001 ) != 0: return 'fail' feat.Destroy() feat", "ogr_gpx_cleanup(): if gdaltest.gpx_ds is not None: gdaltest.gpx_ds.Destroy() gdaltest.gpx_ds = None", "feat.Destroy() feat = lyr.GetNextFeature() if ogrtest.check_feature_geometry( feat, 'POINT (4 3)',", "return 'fail' lyr = gdaltest.gpx_ds.GetLayerByName( 'route_points' ) expect = ['route", "feat.SetField('track_seg_id', 0) feat.SetGeometry(geom) lyr.CreateFeature(feat) gdaltest.gpx_ds.Destroy() gdaltest.gpx_ds = None f =", "expect = ['href', None] tr = ogrtest.check_features_against_list( lyr, 'link1_href', expect", "extra fields as <extensions>. def ogr_gpx_7(): if not gdaltest.have_gpx: return", "None: gdaltest.gpx_ds.Destroy() gdaltest.gpx_ds = None try: os.remove ('tmp/gpx.gpx') except: pass", "return 'fail' feat.Destroy() feat = lyr.GetNextFeature() f_geom = feat.GetGeometryRef() if", "WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS", "gdaltest.gpx_ds is None: return 'fail' lyr = gdaltest.gpx_ds.GetLayerByName( 'routes' )", "('tmp/gpx.gpx') except: pass co_opts = [ 'GPX_USE_EXTENSIONS=yes' ] # Duplicate", "0) feat.SetGeometry(geom) lyr.CreateFeature(feat) feat = ogr.Feature(lyr.GetLayerDefn()) geom = ogr.CreateGeometryFromWkt('POINT(3 51)')", "lyr.CreateFeature(feat) gdaltest.gpx_ds.Destroy() gdaltest.gpx_ds = None f = open('tmp/gpx.gpx','rb') f_ref =", "old-gen python bindings gpx2_ds.Destroy() gdaltest.gpx_ds.Destroy() gdaltest.gpx_ds = ogr.Open( 'tmp/gpx.gpx' )", "and/or sell copies of the Software, and to permit persons", "is None: return 'fail' lyr = gdaltest.gpx_ds.GetLayerByName( 'track_points' ) expect", "23))', max_error = 0.0001 ) != 0: return 'fail' feat.Destroy()", "if not tr: return 'fail' lyr.ResetReading() expect = ['href', None]", "EMPTY': return 'fail' feat.Destroy() return 'success' ############################################################################### # Test route_points", "ogr_gpx_4, ogr_gpx_7, ogr_gpx_8, ogr_gpx_cleanup ] if __name__ == '__main__': gdaltest.setup_run(", "Purpose: Test GPX driver functionality. # Author: <NAME> <even dot", "None: dst_feat.SetFrom( feat ) if gpx_lyr.CreateFeature( dst_feat ) != 0:", "dst_feat = ogr.Feature( feature_def = gpx_lyr.GetLayerDefn() ) feat = bna_lyr.GetNextFeature()", "tr = ogrtest.check_features_against_list( gpx_lyr, 'ogr_Secondary_ID', expect ) if not tr:", "] # Duplicate waypoints gpx_lyr = gdaltest.gpx_ds.GetLayerByName( 'waypoints' ) gpx2_ds", "= None gpx2_lyr = None # Explicit destroy is required", "gdaltest.gpx_ds.GetLayerByName( 'tracks' ) gpx2_lyr = gpx2_ds.CreateLayer( 'tracks', geom_type = ogr.wkbMultiLineString", "copy of this software and associated documentation files (the \"Software\"),", "BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS", "'fail' lyr = gdaltest.gpx_ds.GetLayerByName( 'track_points' ) expect = ['track point", "return 'fail' feat.Destroy() return 'success' ############################################################################### # Test tracks gpx", "None] tr = ogrtest.check_features_against_list( lyr, 'link2_type', expect ) if not", "feat ) if gpx_lyr.CreateFeature( dst_feat ) != 0: gdaltest.post_reason('CreateFeature failed.')", "'SID2'] tr = ogrtest.check_features_against_list( gpx_lyr, 'ogr_Secondary_ID', expect ) if not", "lyr.ResetReading() expect = ['href', None] tr = ogrtest.check_features_against_list( lyr, 'link1_href',", "(6 5)', max_error = 0.0001 ) != 0: return 'fail'", "gdaltest.gpx_ds = None bna_ds = ogr.Open( 'data/bna_for_gpx.bna' ) try: os.remove", "'link2_href', expect ) if not tr: return 'fail' lyr.ResetReading() expect", "def ogr_gpx_7(): if not gdaltest.have_gpx: return 'skip' if gdaltest.gpx_ds is", "feat = lyr.GetNextFeature() if ogrtest.check_feature_geometry( feat, 'POINT (4 3)', max_error", "return 'skip' try: gdal.PushErrorHandler( 'CPLQuietErrorHandler' ) ogr.GetDriverByName('CSV').DeleteDataSource( 'tmp/gpx.gpx' ) gdal.PopErrorHandler()", "None try: gdaltest.gpx_ds = ogr.Open( 'data/test.gpx' ) except: gdaltest.gpx_ds =", "def ogr_gpx_8(): if not gdaltest.have_gpx: return 'skip' if gdaltest.gpx_ds is", "Rerun test 1, 2 and 4 with generated tmp/tmp.gpx ogr_gpx_1,", "['href', None] tr = ogrtest.check_features_against_list( lyr, 'link1_href', expect ) if", "feat.SetGeometry(geom) lyr.CreateFeature(feat) feat = ogr.Feature(lyr.GetLayerDefn()) geom = ogr.CreateGeometryFromWkt('POINT(3 51)') feat.SetField('route_name',", "software and associated documentation files (the \"Software\"), # to deal", "expect = ['2007/11/25 17:58:00+01', None] tr = ogrtest.check_features_against_list( lyr, 'time',", "THE USE OR OTHER # DEALINGS IN THE SOFTWARE. ###############################################################################", "gpx_lyr = gdaltest.gpx_ds.CreateLayer( 'waypoints', geom_type = ogr.wkbPoint ) bna_lyr.ResetReading() for", "obtaining a # copy of this software and associated documentation", "range(bna_lyr.GetLayerDefn().GetFieldCount()): field_defn = bna_lyr.GetLayerDefn().GetFieldDefn(i) gpx_lyr.CreateField( field_defn ) dst_feat = ogr.Feature(", "gdal.PopErrorHandler() except: pass co_opts = [ ] # Duplicate waypoints", "lyr, 'name', expect ) if not tr: return 'fail' lyr.ResetReading()", "OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF", "'fail' lyr.ResetReading() expect = ['href2', None] tr = ogrtest.check_features_against_list( lyr,", "'ROUTE_NAME2') feat.SetField('route_fid', 1) feat.SetGeometry(geom) lyr.CreateFeature(feat) feat = ogr.Feature(lyr.GetLayerDefn()) geom =", "mines dash paris dot org> # ############################################################################### # Copyright (c)", "paris dot org> # ############################################################################### # Copyright (c) 2007, <NAME>", "dot org> # ############################################################################### # Copyright (c) 2007, <NAME> <even", "<even dot rouault at mines dash paris dot org> #", ") lyr.ResetReading() feat = lyr.GetNextFeature() if ogrtest.check_feature_geometry( feat, 'LINESTRING (6", "layer. def ogr_gpx_3(): if not gdaltest.have_gpx: return 'skip' if gdaltest.gpx_ds", "try: os.remove ('tmp/gpx.gpx') except: pass co_opts = [ 'GPX_USE_EXTENSIONS=yes' ]", "i in range(bna_lyr.GetLayerDefn().GetFieldCount()): field_defn = bna_lyr.GetLayerDefn().GetFieldDefn(i) gpx_lyr.CreateField( field_defn ) dst_feat", "dst_feat ) != 0: gdaltest.post_reason('CreateFeature failed.') return 'fail' feat =", "gdaltest.gpx_ds.CreateLayer( 'route_points', geom_type = ogr.wkbPoint ) feat = ogr.Feature(lyr.GetLayerDefn()) geom", "try: gdaltest.gpx_ds = ogr.Open( 'data/test.gpx' ) except: gdaltest.gpx_ds = None", "= ogr.CreateGeometryFromWkt('POINT(3 49)') feat.SetField('route_fid', 1) feat.SetGeometry(geom) lyr.CreateFeature(feat) lyr = gdaltest.gpx_ds.CreateLayer(", "= ogrtest.check_features_against_list( lyr, 'link1_type', expect ) if not tr: return", "lyr.GetNextFeature() if ogrtest.check_feature_geometry( feat, 'MULTILINESTRING ((15 14,18 17),(21 20,24 23))',", "!= 5: gdaltest.post_reason( 'wrong number of layers' ) return 'fail'", "tr = ogrtest.check_features_against_list( lyr, 'name', expect ) lyr.ResetReading() feat =", "= ogrtest.check_features_against_list( lyr, 'ele', expect ) if not tr: return", "gpx2_ds.CreateLayer( 'waypoints', geom_type = ogr.wkbPoint ) gpx_lyr.ResetReading() dst_feat = ogr.Feature(", "'success' ############################################################################### # Test route_points gpx layer. def ogr_gpx_5(): if", "0: gdaltest.post_reason('CreateFeature failed.') return 'fail' feat = bna_lyr.GetNextFeature() dst_feat.Destroy() bna_ds.Destroy()", "check that the extensions fields have been well written gdaltest.gpx_ds", "bna_ds.GetLayerByName( 'bna_for_gpx_points' ) gdaltest.gpx_ds = ogr.GetDriverByName('GPX').CreateDataSource('tmp/gpx.gpx', options = co_opts )", "['type2', None] tr = ogrtest.check_features_against_list( lyr, 'link2_type', expect ) if", "= None bna_ds = ogr.Open( 'data/bna_for_gpx.bna' ) try: os.remove ('tmp/gpx.gpx')", "feat = lyr.GetNextFeature() if ogrtest.check_feature_geometry( feat, 'LINESTRING EMPTY', max_error =", "= ogr.Open('tmp/gpx.gpx') gpx_lyr = gdaltest.gpx_ds.GetLayerByName( 'waypoints' ) expect = ['PID1',", "17),(21 20,24 23))', max_error = 0.0001 ) != 0: return", "if __name__ == '__main__': gdaltest.setup_run( 'ogr_gpx' ) gdaltest.run_tests( gdaltest_list )", "persons to whom the # Software is furnished to do", "49)') feat.SetField('route_name', 'ROUTE_NAME') feat.SetField('route_fid', 0) feat.SetGeometry(geom) lyr.CreateFeature(feat) feat = ogr.Feature(lyr.GetLayerDefn())", "= gdaltest.gpx_ds.GetLayerByName( 'route_points' ) expect = ['route point name', None,", "MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN", "0.0001 ) != 0: return 'fail' feat.Destroy() return 'success' ###############################################################################", "GPX driver functionality. # Author: <NAME> <even dot rouault at", "ogr.Feature(lyr.GetLayerDefn()) geom = ogr.CreateGeometryFromWkt('POINT(3 50)') feat.SetField('track_name', '--ignored--') feat.SetField('track_fid', 0) feat.SetField('track_seg_id',", "return 'success' ############################################################################### # Test route_points gpx layer. def ogr_gpx_5():", "BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY,", ") if not tr: return 'fail' gpx_lyr.ResetReading() expect = ['TID1',", "feat.SetGeometry(geom) lyr.CreateFeature(feat) gdaltest.gpx_ds.Destroy() gdaltest.gpx_ds = None f = open('tmp/gpx.gpx','rb') f_ref", "None] tr = ogrtest.check_features_against_list( lyr, 'time', expect ) if not", "ogr_gpx_3, ogr_gpx_4, ogr_gpx_5, ogr_gpx_6, # Rerun test 1, 2 and", "return 'fail' lyr.ResetReading() expect = ['type', None] tr = ogrtest.check_features_against_list(", "ogrtest.check_feature_geometry( feat, 'LINESTRING EMPTY', max_error = 0.0001 ) != 0:", "if not tr: return 'fail' lyr.ResetReading() expect = ['waypoint name',", "'success' ############################################################################### # Test tracks gpx layer. def ogr_gpx_4(): if", "gdaltest.post_reason('CreateFeature failed.') return 'fail' feat = gpx_lyr.GetNextFeature() dst_feat.Destroy() # Duplicate", "do so, subject to the following conditions: # # The", "import sys import string sys.path.append( '../pymod' ) import gdaltest import", "gdaltest.gpx_ds = None try: os.remove ('tmp/gpx.gpx') except: pass return 'success'", "return 'fail' gpx_lyr.ResetReading() expect = ['TID1', None] tr = ogrtest.check_features_against_list(", "ogr_gpx_cleanup ] if __name__ == '__main__': gdaltest.setup_run( 'ogr_gpx' ) gdaltest.run_tests(", "!= 0: gdaltest.post_reason('CreateFeature failed.') return 'fail' feat = bna_lyr.GetNextFeature() dst_feat.Destroy()", "ogr.Feature(lyr.GetLayerDefn()) geom = ogr.CreateGeometryFromWkt('POINT(2 49)') feat.SetField('route_name', 'ROUTE_NAME') feat.SetField('route_fid', 0) feat.SetGeometry(geom)", "name', None, None] tr = ogrtest.check_features_against_list( lyr, 'name', expect )", "failed.') return 'fail' feat = gpx_lyr.GetNextFeature() dst_feat.Destroy() # Duplicate tracks", "'POINT (1 0)', max_error = 0.0001 ) != 0: return", "'MULTILINESTRING EMPTY', max_error = 0.0001 ) != 0: return 'fail'", "the Software without restriction, including without limitation # the rights", "if ogrtest.check_feature_geometry( feat, 'LINESTRING (6 5,9 8,12 11)', max_error =", "at mines dash paris dot org> # ############################################################################### # Copyright", "return 'fail' return 'success' ############################################################################### # Output extra fields as", "tr: return 'fail' lyr.ResetReading() expect = ['text', None] tr =", "expect ) if not tr: return 'fail' return 'success' ###############################################################################", "python ############################################################################### # $Id$ # # Project: GDAL/OGR Test Suite", "= gdaltest.gpx_ds.GetLayerByName( 'routes' ) gpx2_lyr = gpx2_ds.CreateLayer( 'routes', geom_type =", "feat = gpx_lyr.GetNextFeature() while feat is not None: dst_feat.SetFrom( feat", "51)') feat.SetField('route_name', 'ROUTE_NAME2') feat.SetField('route_fid', 1) feat.SetGeometry(geom) lyr.CreateFeature(feat) feat = ogr.Feature(lyr.GetLayerDefn())", "= ogr.wkbLineString ) gpx_lyr.ResetReading() dst_feat = ogr.Feature( feature_def = gpx2_lyr.GetLayerDefn()", "'time', expect ) if not tr: return 'fail' lyr.ResetReading() feat", ") != 0: return 'fail' feat.Destroy() return 'success' ############################################################################### #", "return 'fail' lyr = gdaltest.gpx_ds.GetLayerByName( 'routes' ) lyr.ResetReading() feat =", "Project: GDAL/OGR Test Suite # Purpose: Test GPX driver functionality.", "gdaltest.gpx_ds is None: return 'fail' lyr = gdaltest.gpx_ds.GetLayerByName( 'waypoints' )", "'link2_type', expect ) if not tr: return 'fail' lyr.ResetReading() expect", "ogr.GetDriverByName('GPX').CreateDataSource('tmp/gpx.gpx', options = co_opts ) gpx2_lyr = gpx2_ds.CreateLayer( 'waypoints', geom_type", "'ogr_Secondary_ID', expect ) if not tr: return 'fail' gpx_lyr.ResetReading() expect", "tr: return 'fail' lyr.ResetReading() expect = ['type', None] tr =", "'routes' ) lyr.ResetReading() feat = lyr.GetNextFeature() if ogrtest.check_feature_geometry( feat, 'LINESTRING", "SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE", "max_error = 0.0001 ) != 0: return 'fail' feat.Destroy() return", "'waypoints', geom_type = ogr.wkbPoint ) gpx_lyr.ResetReading() dst_feat = ogr.Feature( feature_def", "gpx layer. def ogr_gpx_5(): if not gdaltest.have_gpx: return 'skip' if", "lyr = gdaltest.gpx_ds.GetLayerByName( 'route_points' ) expect = ['route point name',", "layer. def ogr_gpx_1(): if not gdaltest.have_gpx: return 'skip' if gdaltest.gpx_ds", "\"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR", "[2, None] tr = ogrtest.check_features_against_list( lyr, 'ele', expect ) if", "gpx_lyr.ResetReading() expect = ['TID1', None] tr = ogrtest.check_features_against_list( gpx_lyr, 'ogr_Third_ID',", "lyr = gdaltest.gpx_ds.CreateLayer( 'track_points', geom_type = ogr.wkbPoint ) feat =", "if not gdaltest.have_gpx: return 'skip' if gdaltest.gpx_ds is None: return", "f = open('tmp/gpx.gpx','rb') f_ref = open('data/ogr_gpx_8_ref.txt','rb') f_content = f.read() f_ref_content", "f_geom = feat.GetGeometryRef() if f_geom.ExportToWkt()!= 'MULTILINESTRING EMPTY': return 'fail' feat.Destroy()", "and 4 with generated tmp/tmp.gpx ogr_gpx_1, ogr_gpx_2, ogr_gpx_4, ogr_gpx_7, ogr_gpx_8,", "lyr = gdaltest.gpx_ds.GetLayerByName( 'waypoints' ) expect = [2, None] tr", "be included # in all copies or substantial portions of", "WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND", "# Author: <NAME> <even dot rouault at mines dash paris", "print(f_content) return 'fail' return 'success' ############################################################################### # def ogr_gpx_cleanup(): if", "tmp/tmp.gpx ogr_gpx_1, ogr_gpx_2, ogr_gpx_4, ogr_gpx_7, ogr_gpx_8, ogr_gpx_cleanup ] if __name__", "dash paris dot org> # # Permission is hereby granted,", "= 0.0001 ) != 0: return 'fail' feat.Destroy() return 'success'", "'fail' return 'success' ############################################################################### # Test waypoints gpx layer. def", "os.remove ('tmp/gpx.gpx') except: pass gdaltest.gpx_ds = ogr.GetDriverByName('GPX').CreateDataSource('tmp/gpx.gpx', options = ['LINEFORMAT=LF'])", "feat, 'POINT (1 0)', max_error = 0.0001 ) != 0:", "gpx_lyr.CreateFeature( dst_feat ) != 0: gdaltest.post_reason('CreateFeature failed.') return 'fail' feat", "51)') feat.SetField('track_fid', 0) feat.SetField('track_seg_id', 1) feat.SetGeometry(geom) lyr.CreateFeature(feat) feat = ogr.Feature(lyr.GetLayerDefn())", "None: return 'fail' lyr = gdaltest.gpx_ds.GetLayerByName( 'routes' ) lyr.ResetReading() feat", "copies or substantial portions of the Software. # # THE", "############################################################################### # Copyright (c) 2007, <NAME> <even dot rouault at", "= ogr.Feature( feature_def = gpx2_lyr.GetLayerDefn() ) feat = gpx_lyr.GetNextFeature() while", "lyr.GetNextFeature() if ogrtest.check_feature_geometry( feat, 'MULTILINESTRING EMPTY', max_error = 0.0001 )", "if gdaltest.gpx_ds is None: return 'fail' lyr = gdaltest.gpx_ds.GetLayerByName( 'waypoints'", "ogrtest.check_feature_geometry( feat, 'MULTILINESTRING ((15 14,18 17),(21 20,24 23))', max_error =", "= lyr.GetNextFeature() if ogrtest.check_feature_geometry( feat, 'LINESTRING (6 5,9 8,12 11)',", "feat.SetField('track_name', '--ignored--') feat.SetField('track_fid', 0) feat.SetField('track_seg_id', 0) feat.SetGeometry(geom) lyr.CreateFeature(feat) feat =", "route_points gpx layer. def ogr_gpx_5(): if not gdaltest.have_gpx: return 'skip'", "geom = ogr.CreateGeometryFromWkt('POINT(2 49)') feat.SetField('route_name', 'ROUTE_NAME') feat.SetField('route_fid', 0) feat.SetGeometry(geom) lyr.CreateFeature(feat)", "= lyr.GetNextFeature() if ogrtest.check_feature_geometry( feat, 'MULTILINESTRING ((15 14,18 17),(21 20,24", "is required for old-gen python bindings gpx2_ds.Destroy() gdaltest.gpx_ds.Destroy() gdaltest.gpx_ds =", "ogr_gpx_init(): gdaltest.gpx_ds = None try: gdaltest.gpx_ds = ogr.Open( 'data/test.gpx' )", "# Copyright (c) 2007, <NAME> <even dot rouault at mines", "gpx2_ds.CreateLayer( 'routes', geom_type = ogr.wkbLineString ) gpx_lyr.ResetReading() dst_feat = ogr.Feature(", "= ogr.CreateGeometryFromWkt('POINT(2 49)') feat.SetField('track_name', 'TRACK_NAME') feat.SetField('track_fid', 0) feat.SetField('track_seg_id', 0) feat.SetGeometry(geom)", "geom_type = ogr.wkbPoint ) gpx_lyr.ResetReading() dst_feat = ogr.Feature( feature_def =", "gpx_lyr.GetNextFeature() while feat is not None: dst_feat.SetFrom( feat ) if", ") try: os.remove ('tmp/gpx.gpx') except: pass co_opts = [ 'GPX_USE_EXTENSIONS=yes'", "sys import string sys.path.append( '../pymod' ) import gdaltest import ogrtest", "0) feat.SetGeometry(geom) lyr.CreateFeature(feat) gdaltest.gpx_ds.Destroy() gdaltest.gpx_ds = None f = open('tmp/gpx.gpx','rb')", "tr: return 'fail' gpx_lyr.ResetReading() expect = ['TID1', None] tr =", "lyr, 'link2_text', expect ) if not tr: return 'fail' lyr.ResetReading()", "'CPLQuietErrorHandler' ) ogr.GetDriverByName('CSV').DeleteDataSource( 'tmp/gpx.gpx' ) gdal.PopErrorHandler() except: pass co_opts =", "['type', None] tr = ogrtest.check_features_against_list( lyr, 'link1_type', expect ) if", "if not gdaltest.have_gpx: return 'skip' if gdaltest.gpx_ds.GetLayerCount() != 5: gdaltest.post_reason(", "ogrtest import ogr import osr import gdal def ogr_gpx_init(): gdaltest.gpx_ds", "if not tr: return 'fail' lyr.ResetReading() expect = ['type2', None]", "= gdaltest.gpx_ds.CreateLayer( 'route_points', geom_type = ogr.wkbPoint ) feat = ogr.Feature(lyr.GetLayerDefn())", "None: dst_feat.SetFrom( feat ) if gpx2_lyr.CreateFeature( dst_feat ) != 0:", "'POINT (15 14)', max_error = 0.0001 ) != 0: return", "ogrtest.check_features_against_list( gpx_lyr, 'ogr_Third_ID', expect ) if not tr: return 'fail'", "['href2', None] tr = ogrtest.check_features_against_list( lyr, 'link2_href', expect ) if", "if gpx_lyr.CreateFeature( dst_feat ) != 0: gdaltest.post_reason('CreateFeature failed.') return 'fail'", "= [ ] # Duplicate waypoints gpx_lyr = gdaltest.gpx_ds.GetLayerByName( 'waypoints'", "geom_type = ogr.wkbPoint ) bna_lyr.ResetReading() for i in range(bna_lyr.GetLayerDefn().GetFieldCount()): field_defn", "as <extensions>. def ogr_gpx_7(): if not gdaltest.have_gpx: return 'skip' if", "to use, copy, modify, merge, publish, distribute, sublicense, # and/or", "if gdaltest.gpx_ds is not None: gdaltest.gpx_ds.Destroy() gdaltest.gpx_ds = None try:", "return 'skip' if gdaltest.gpx_ds is not None: gdaltest.gpx_ds.Destroy() gdaltest.gpx_ds =", "return 'fail' feat = gpx_lyr.GetNextFeature() dst_feat.Destroy() gpx_lyr = None gpx2_lyr", ") if not tr: return 'fail' lyr.ResetReading() expect = ['2007/11/25", "'success' ############################################################################### # Test route_points gpx layer. def ogr_gpx_3(): if", "11)', max_error = 0.0001 ) != 0: return 'fail' feat.Destroy()", "############################################################################### # Output extra fields as <extensions>. def ogr_gpx_8(): if", "# Test tracks gpx layer. def ogr_gpx_4(): if not gdaltest.have_gpx:", "= ogr.Open( 'tmp/gpx.gpx' ) return 'success' ############################################################################### # Output extra", "The above copyright notice and this permission notice shall be", "None: gdaltest.have_gpx = 0 else: gdaltest.have_gpx = 1 if not", "in the Software without restriction, including without limitation # the", "without limitation # the rights to use, copy, modify, merge,", "'route_points', geom_type = ogr.wkbPoint ) feat = ogr.Feature(lyr.GetLayerDefn()) geom =", "dash paris dot org> # ############################################################################### # Copyright (c) 2007,", "None # Explicit destroy is required for old-gen python bindings", "lyr.CreateFeature(feat) feat = ogr.Feature(lyr.GetLayerDefn()) geom = ogr.CreateGeometryFromWkt('POINT(3 49)') feat.SetField('route_fid', 1)" ]
[ "train_tree(self,input_batch, input_length, target_batch, target_length, nums_stack_batch, num_size_batch, generate_nums, num_pos, unk, num_start,", "# Run words through encoder #encoder_outputs, problem_output = self.encoder(input_var, input_length)", "self.beam_size = config['beam_size'] self.max_out_len = config['max_output_len'] self.embedding_size = config[\"embedding_size\"] self.dropout_ratio", "self.embedder = BasicEmbedder(self.vocab_size, self.embedding_size, self.dropout_ratio) # self.t_encoder = BasicRNNEncoder(self.embedding_size, self.hidden_size,", "include keywords 'question', 'ques len', 'equation', 'equ len', 'num stack',", "seq_mask = torch.BoolTensor(1, input_length).fill_(0) # Turn padded arrays into (batch_size", "= self.generate_tree_input(target[t].tolist(), outputs, nums_stack_batch, num_start, unk) target[t] = target_t if", "elif symbol == SpecialTokens.UNK_TOKEN: try: pos_list = num_stack.pop() c =", "= self.get_all_number_encoder_outputs(encoder_outputs, num_pos, batch_size, num_size, self.hidden_size) # B x P", "= self.out_symbol2idx[SpecialTokens.EOS_TOKEN] except: self.out_eos_token = None try: self.out_pad_token = self.out_symbol2idx[SpecialTokens.PAD_TOKEN]", "encoder_outputs, all_nums_encoder_outputs, padding_hidden, seq_mask, num_mask) # all_leafs.append(p_leaf) outputs = torch.cat((op,", "b.left_childs num_score, op, current_embeddings, current_context, current_nums_embeddings = self.decoder(b.node_stack, left_childs, encoder_outputs,", "target_t, generate_input = self.generate_tree_input(target[t].tolist(), outputs, nums_stack_batch, num_start, unk) target[t] =", "= [self.out_symbol2idx[symbol] for symbol in generate_list] self.mask_list = NumMask.number self.num_start", "-> float: \"\"\"Finish forward-propagating, calculating loss and back-propagation. :param batch_data:", "in generate_list] self.mask_list = NumMask.number self.num_start = dataset.num_start self.operator_nums =", "'num stack', 'num size', 'num pos' \"\"\" seq = torch.tensor(batch_data[\"question\"]).to(self.device)", "mask for attention all_node_output = self.evaluate_tree(seq, seq_length, generate_nums, num_pos, num_start,", "r, node_stack, i, o in zip(range(batch_size), left_child.split(1), right_child.split(1), node_stacks, target[t].tolist(),", "symbol == SpecialTokens.UNK_TOKEN: try: pos_list = num_stack.pop() c = num_list[pos_list[0]]", ":param batch_data: one batch data. :return: loss value. batch_data should", "# left_childs = torch.stack(b.left_childs) left_childs = b.left_childs num_score, op, current_embeddings,", "self.out_symbol2idx[SpecialTokens.EOS_TOKEN] except: self.out_eos_token = None try: self.out_pad_token = self.out_symbol2idx[SpecialTokens.PAD_TOKEN] except:", "module.Embedder.basic_embedder import BasicEmbedder from module.Decoder.tree_decoder import SARTreeDecoder from module.Layer.tree_layers import", "SemanticAlignmentModule from module.Strategy.beam_search import TreeBeam from loss.masked_cross_entropy_loss import MaskedCrossEntropyLoss, masked_cross_entropy", "and output variables node_stacks = [[TreeNode(_)] for _ in problem_output.split(1,", "self.decoder = Prediction(self.hidden_size,self.operator_nums,self.generate_size,self.dropout_ratio) self.node_generater = GenerateNode(self.hidden_size, self.operator_nums, self.embedding_size, self.dropout_ratio) self.merge", "sa_len # print(total_semanti_alognment_loss) # op_target = target < num_start #", "seq_mask = [] max_len = max(input_length) for i in input_length:", "generate_list] self.mask_list = NumMask.number self.num_start = dataset.num_start self.operator_nums = dataset.operator_nums", "total_semanti_alognment_loss / sa_len # print(total_semanti_alognment_loss) # op_target = target <", "-> tuple: \"\"\"Model test. :param batch_data: one batch data. :return:", "i - num_start].unsqueeze(0) while len(o) > 0 and o[-1].terminal: sub_stree", "config[\"embedding_size\"] self.dropout_ratio = config[\"dropout_ratio\"] self.num_layers = config[\"num_layers\"] self.rnn_cell_type = config[\"rnn_cell_type\"]", "= total_semanti_alognment_loss / sa_len # print(total_semanti_alognment_loss) # op_target = target", "input_batch.transpose(0, 1) num_mask = torch.BoolTensor(1, len(num_pos[0]) + len(generate_nums)).fill_(0) padding_hidden =", "in beams: if len(b.node_stack[0]) != 0: flag = False if", "in range(self.hidden_size)]).unsqueeze(0) batch_size = 1 if self.USE_CUDA: input_var = input_var.cuda()", "indices = torch.LongTensor(indices) masked_index = torch.BoolTensor(masked_index) masked_index = masked_index.view(batch_size, num_size,", "_ in range(len(num_pos[b]), num_size)] masked_index += [temp_1 for _ in", "pade_outputs[0, :, self.hidden_size:] encoder_outputs = pade_outputs[:, :, :self.hidden_size] + pade_outputs[:,", "num_start + num] > max_score: target[i] = num + num_start", "idx in [self.out_sos_token, self.out_eos_token, self.out_pad_token]: break symbol = self.out_idx2symbol[idx] if", "* total_semanti_alognment_loss # loss = loss_0 + loss_1 loss.backward() #", "+ num] > max_score: target[i] = num + num_start max_score", "range(batch_size): for i in num_pos[b]: indices.append(i + b * sen_len)", "indices = indices.cuda() masked_index = masked_index.cuda() all_outputs = encoder_outputs.transpose(0, 1).contiguous()", "= [] break res.append(num_list[num_idx]) elif symbol == SpecialTokens.UNK_TOKEN: try: pos_list", "x S if self.USE_CUDA: # all_leafs = all_leafs.cuda() all_node_outputs =", "o.append(TreeEmbedding(node_label[idx].unsqueeze(0), terminal=False)) # print(o[-1].embedding.size()) # print(encoder_outputs[idx].size()) else: # 数字 current_num", "current_context) current_node_stack[0].append(TreeNode(right_child)) current_node_stack[0].append(TreeNode(left_child, left_flag=True)) current_embeddings_stacks[0].append(TreeEmbedding(node_label[0].unsqueeze(0), False)) else: current_num = current_nums_embeddings[0,", "num] > max_score: target[i] = num + num_start max_score =", "l, r, node_stack, i, o in zip(range(batch_size), left_child.split(1), right_child.split(1), node_stacks,", "# sequence mask for attention all_node_output = self.evaluate_tree(seq, seq_length, generate_nums,", "equation, target equation. batch_data should include keywords 'question', 'ques len',", "== 0: current_beams.append(b) continue # left_childs = torch.stack(b.left_childs) left_childs =", "= torch.LongTensor([out_token]) if self.USE_CUDA: generate_input = generate_input.cuda() left_child, right_child, node_label", "for s_i in range(seq_len): idx = output[s_i] if idx in", "= self.encoder(seq_emb, input_length) problem_output = pade_outputs[-1, :, :self.hidden_size] + pade_outputs[0,", "= len(dataset.in_idx2word) self.out_symbol2idx = dataset.out_symbol2idx self.out_idx2symbol = dataset.out_idx2symbol generate_list =", "self.out_eos_token, self.out_pad_token]: break symbol = self.out_idx2symbol[idx] if \"NUM\" in symbol:", "* mask, dim=-1) / torch.sum(mask, dim=-1) # [batch_size] return torch.sum(y)", "masked_index = [] temp_1 = [1 for _ in range(hidden_size)]", "parameters with optimizers return loss.item() # , loss_0.item(), loss_1.item() def", "# 未知数当数字处理,SEP当操作符处理 if i < num_start: # 非数字 node_stack.append(TreeNode(r)) node_stack.append(TreeNode(l,", "mask.to(self.device) x = torch.sqrt(torch.sum(torch.square((outputs - targets)), dim=-1)) # [batch_size,output_len] y", "= BasicRNNEncoder(self.embedding_size, self.hidden_size, self.num_layers, self.rnn_cell_type, self.dropout_ratio) self.encoder = BasicRNNEncoder(self.embedding_size, self.hidden_size,", "self.USE_CUDA: generate_input = generate_input.cuda() left_child, right_child, node_label = self.node_generater(current_embeddings, generate_input,", "# [batch_size] return torch.sum(y) def convert_idx2symbol(self, output, num_list, num_stack): #", "return torch.sum(y) def convert_idx2symbol(self, output, num_list, num_stack): # batch_size=output.size(0) '''batch_size=1'''", "loss_0 = masked_cross_entropy_without_logit(all_leafs, op_target.long(), target_length) loss = masked_cross_entropy(all_node_outputs, target,target_length) +", "- d)) num_mask = torch.ByteTensor(num_mask) # 用于屏蔽无关数字,防止生成错误的Nx #unk = output_lang.word2index[\"UNK\"]", "target = target.cuda() new_all_sa_outputs = [] for sa_pair in all_sa_outputs:", "len(generate_nums) + len(var_nums) # 最大的位置列表数目+常识数字数目+未知数列表 for i in num_size_batch: d", "encoder_outputs, all_nums_encoder_outputs, padding_hidden, seq_mask, num_mask) out_score = nn.functional.log_softmax(torch.cat((op, num_score), dim=1),", "# @Author: <NAME> # @Time: 2021/08/21 04:59:55 # @File: sausolver.py", "(max_len x batch_size) input_var = input_batch.transpose(0, 1) num_mask = torch.BoolTensor(1,", "range(self.hidden_size)]).unsqueeze(0) batch_size = 1 if self.USE_CUDA: input_var = input_var.cuda() seq_mask", "torch.sum(mask, dim=-1) # [batch_size] return torch.sum(y) def convert_idx2symbol(self, output, num_list,", "True for b in beams: if len(b.node_stack[0]) != 0: flag", "= [1 for _ in range(hidden_size)] temp_0 = [0 for", "current_num = self.merge(op.embedding, sub_stree.embedding, current_num) # Subtree embedding if batch_first:", "_ in range(len(num_pos[b]), num_size)] indices = torch.LongTensor(indices) masked_index = torch.BoolTensor(masked_index)", "loss = loss_0 + loss_1 loss.backward() # clip the grad", "= [] copy_num_len = [len(_) for _ in num_pos] num_size", "left_childs = torch.stack(b.left_childs) left_childs = b.left_childs num_score, op, current_embeddings, current_context,", "= dataset.operator_nums self.generate_size = len(generate_list) self.unk_token = self.out_symbol2idx[SpecialTokens.UNK_TOKEN] try: self.out_sos_token", "= 0 return torch.LongTensor(target), torch.LongTensor(target_input) def mse_loss(self, outputs, targets, mask=None):", "t in range(max_length): current_beams = [] while len(beams) > 0:", "sa_pair in all_sa_outputs: new_all_sa_outputs.append((sa_pair[0].cuda(), sa_pair[1].cuda())) all_sa_outputs = new_all_sa_outputs # target_length", "else: pass # target_length = torch.LongTensor(target_length) semantic_alignment_loss = nn.MSELoss() total_semanti_alognment_loss", "None try: self.out_eos_token = self.out_symbol2idx[SpecialTokens.EOS_TOKEN] except: self.out_eos_token = None try:", "nn import copy from module.Encoder.rnn_encoder import BasicRNNEncoder from module.Embedder.basic_embedder import", "sen_len = encoder_outputs.size(0) masked_index = [] temp_1 = [1 for", "parameter self.hidden_size = config[\"hidden_size\"] self.device = config[\"device\"] self.USE_CUDA = True", "in range(batch_size)] # B x 1 for t in range(max_target_length):", "torch.LongTensor(batch_data[\"equ len\"]).to(self.device) nums_stack = copy.deepcopy(batch_data[\"num stack\"]) num_size = batch_data[\"num size\"]", "# -*- encoding: utf-8 -*- # @Author: <NAME> # @Time:", "num_pos[b]: indices.append(i + b * sen_len) masked_index.append(temp_0) indices += [0", ":self.hidden_size] + pade_outputs[:, :, self.hidden_size:] # Prepare input and output", "[])] for t in range(max_length): current_beams = [] while len(beams)", "pade_outputs, _ = self.encoder(seq_emb, input_length) problem_output = pade_outputs[-1, :, :self.hidden_size]", "get_all_number_encoder_outputs(self, encoder_outputs, num_pos, batch_size, num_size, hidden_size): indices = list() sen_len", "self.decoder( node_stacks, left_childs, encoder_outputs, all_nums_encoder_outputs, padding_hidden, seq_mask, num_mask) # all_leafs.append(p_leaf)", "= node_stack.pop() else: left_childs.append(None) continue # 未知数当数字处理,SEP当操作符处理 if i <", "N embeddings_stacks = [[] for _ in range(batch_size)] left_childs =", "in range(batch_size)] # B x 1 当前的tree state/ subtree embedding", "/ sa_len # print(total_semanti_alognment_loss) # op_target = target < num_start", "all_node_outputs = [] all_sa_outputs = [] # all_leafs = []", "0: b = beams.pop() if len(b.node_stack[0]) == 0: current_beams.append(b) continue", "x max_len) tensors, transpose into (max_len x batch_size) input_var =", "* d + [1] * (max_num_size - d)) num_mask =", "target = target.transpose(0, 1).contiguous() # B x S if self.USE_CUDA:", "self.num_layers, self.rnn_cell_type, self.dropout_ratio) self.encoder = BasicRNNEncoder(self.embedding_size, self.hidden_size, self.num_layers, self.rnn_cell_type, self.dropout_ratio,", "list' \"\"\" seq = torch.tensor(batch_data[\"question\"]).to(self.device) seq_length = torch.tensor(batch_data[\"ques len\"]).long() target", "-float(\"1e12\") for num in num_stack: if decoder_output[i, num_start + num]", "= nums_stack_batch[i].pop() max_score = -float(\"1e12\") for num in num_stack: if", "dim=1)): current_node_stack = copy_list(b.node_stack) current_left_childs = [] current_embeddings_stacks = copy_list(b.embedding_stack)", "for _ in range(hidden_size)] for b in range(batch_size): for i", "dim=1) # B x S x 2 all_node_outputs = torch.stack(all_node_outputs,", "flag: break return beams[0].out def get_all_number_encoder_outputs(self, encoder_outputs, num_pos, batch_size, num_size,", "beams = [TreeBeam(0.0, node_stacks, embeddings_stacks, left_childs, [])] for t in", "words through encoder seq_emb = self.embedder(input_var) pade_outputs, _ = self.encoder(seq_emb,", "pos, chose the max target_input = copy.deepcopy(target) for i in", "[] current_embeddings_stacks = copy_list(b.embedding_stack) current_out = copy.deepcopy(b.out) out_token = int(ti)", "problem_output = pade_outputs[-1, :, :self.hidden_size] + pade_outputs[0, :, self.hidden_size:] encoder_outputs", "\"\"\" def __init__(self, config, dataset): super(SAUSolver, self).__init__() # parameter self.hidden_size", "= sorted(current_beams, key=lambda x: x.score, reverse=True) beams = beams[:beam_size] flag", "current_embeddings_stacks[0].pop() op = current_embeddings_stacks[0].pop() current_num = self.merge(op.embedding, sub_stree.embedding, current_num) current_embeddings_stacks[0].append(TreeEmbedding(current_num,", "if target[i] == unk: num_stack = nums_stack_batch[i].pop() max_score = -float(\"1e12\")", "for _ in range(batch_size)] left_childs = [None for _ in", "self.merge(op.embedding, sub_stree.embedding, current_num) # Subtree embedding if batch_first: encoder_mapping, decoder_mapping", "num_size = batch_data[\"num size\"] num_pos = batch_data[\"num pos\"] generate_nums =", "self.generate_nums num_start = self.num_start # sequence mask for attention unk", "torch.tensor(batch_data[\"equation\"]).to(self.device) nums_stack = copy.deepcopy(batch_data[\"num stack\"]) num_pos = batch_data[\"num pos\"] num_list", "max(num_size_batch) + len(generate_nums) + len(var_nums) # 最大的位置列表数目+常识数字数目+未知数列表 for i in", "import Prediction, GenerateNode, Merge, SemanticAlignmentModule from module.Strategy.beam_search import TreeBeam from", "+ num_start max_score = decoder_output[i, num_start + num] if target_input[i]", "op_target.long(), target_length) loss = masked_cross_entropy(all_node_outputs, target,target_length) + 0.01 * total_semanti_alognment_loss", "num_mask = torch.BoolTensor(1, len(num_pos[0]) + len(generate_nums)).fill_(0) padding_hidden = torch.FloatTensor([0.0 for", "for _ in range(i, max_len)]) seq_mask = torch.ByteTensor(seq_mask) num_mask =", "topi.split(1, dim=1)): current_node_stack = copy_list(b.node_stack) current_left_childs = [] current_embeddings_stacks =", "current_node_stack[0].append(TreeNode(right_child)) current_node_stack[0].append(TreeNode(left_child, left_flag=True)) current_embeddings_stacks[0].append(TreeEmbedding(node_label[0].unsqueeze(0), False)) else: current_num = current_nums_embeddings[0, out_token", "generate_nums, num_pos, num_start, beam_size=5, max_length=30): seq_mask = torch.BoolTensor(1, input_length).fill_(0) #", "for attention unk = self.unk_token loss = self.train_tree(seq, seq_length, target,", "self.mask_list = NumMask.number self.num_start = dataset.num_start self.operator_nums = dataset.operator_nums self.generate_size", "len', 'equation', 'equ len', 'num stack', 'num size', 'num pos'", "batch_size) input_var = input_batch.transpose(0, 1) num_mask = torch.BoolTensor(1, len(num_pos[0]) +", "self.hidden_size, self.num_layers, self.rnn_cell_type, self.dropout_ratio) self.encoder = BasicRNNEncoder(self.embedding_size, self.hidden_size, self.num_layers, self.rnn_cell_type,", "target[i] = num + num_start max_score = decoder_output[i, num_start +", "_ in range(batch_size)] beams = [TreeBeam(0.0, node_stacks, embeddings_stacks, left_childs, [])]", "torch.FloatTensor([0.0 for _ in range(self.decoder.hidden_size)]).unsqueeze(0) batch_size = len(input_length) if self.USE_CUDA:", "node = node_stack.pop() else: left_childs.append(None) continue # 未知数当数字处理,SEP当操作符处理 if i", "self.hidden_size, self.num_layers, self.rnn_cell_type, self.dropout_ratio, batch_first=False) #self.decoder = SARTreeDecoder(self.hidden_size, self.operator_nums, self.generate_size,", "module self.embedder = BasicEmbedder(self.vocab_size, self.embedding_size, self.dropout_ratio) # self.t_encoder = BasicRNNEncoder(self.embedding_size,", "0: flag = False if flag: break return beams[0].out def", "num_list[0], copy_list(nums_stack[0])) return all_output, targets def train_tree(self,input_batch, input_length, target_batch, target_length,", "# Run words through encoder seq_emb = self.embedder(input_var) pade_outputs, _", "all_outputs = encoder_outputs.transpose(0, 1).contiguous() all_embedding = all_outputs.view(-1, encoder_outputs.size(2)) # S", "len(all_sa_outputs) for sa_pair in all_sa_outputs: total_semanti_alognment_loss += semantic_alignment_loss(sa_pair[0], sa_pair[1]) #", "dataset.generate_list self.generate_nums = [self.out_symbol2idx[symbol] for symbol in generate_list] self.mask_list =", "< num_start: # 非数字 node_stack.append(TreeNode(r)) node_stack.append(TreeNode(l, left_flag=True)) o.append(TreeEmbedding(node_label[idx].unsqueeze(0), terminal=False)) #", "[] for s_i in range(seq_len): idx = output[s_i] if idx", "len(generate_nums) + len(var_nums) num_mask.append([0] * d + [1] * (max_num_size", "max_target_length = max(target_length) all_node_outputs = [] all_sa_outputs = [] #", "num_start, self.beam_size, self.max_out_len) all_output = self.convert_idx2symbol(all_node_output, num_list[0], copy_list(nums_stack[0])) targets =", "range(batch_size)] # B x 1 for t in range(max_target_length): num_score,", "current_embeddings_stacks[0].append(TreeEmbedding(current_num, True)) if len(current_embeddings_stacks[0]) > 0 and current_embeddings_stacks[0][-1].terminal: current_left_childs.append(current_embeddings_stacks[0][-1].embedding) else:", "= beams.pop() if len(b.node_stack[0]) == 0: current_beams.append(b) continue # left_childs", "self.generate_size, self.dropout_ratio) self.decoder = Prediction(self.hidden_size,self.operator_nums,self.generate_size,self.dropout_ratio) self.node_generater = GenerateNode(self.hidden_size, self.operator_nums, self.embedding_size,", "_ in range(i)] + [1 for _ in range(i, max_len)])", "input_length, generate_nums, num_pos, num_start, beam_size=5, max_length=30): seq_mask = torch.BoolTensor(1, input_length).fill_(0)", "self.encoder(input_var, input_length) seq_emb = self.embedder(input_var) pade_outputs, _ = self.encoder(seq_emb, input_length)", "print(total_semanti_alognment_loss) total_semanti_alognment_loss = total_semanti_alognment_loss / sa_len # print(total_semanti_alognment_loss) # op_target", "from module.Layer.tree_layers import Prediction, GenerateNode, Merge, SemanticAlignmentModule from module.Strategy.beam_search import", "num_pos, batch_size, num_size, hidden_size): indices = list() sen_len = encoder_outputs.size(0)", "pos_list = num_stack.pop() c = num_list[pos_list[0]] res.append(c) except: return None", "= list() sen_len = encoder_outputs.size(0) masked_index = [] temp_1 =", "Merge, SemanticAlignmentModule from module.Strategy.beam_search import TreeBeam from loss.masked_cross_entropy_loss import MaskedCrossEntropyLoss,", "variables node_stacks = [[TreeNode(_)] for _ in problem_output.split(1, dim=0)] num_size", "res = [] for s_i in range(seq_len): idx = output[s_i]", "0: current_beams.append(b) continue # left_childs = torch.stack(b.left_childs) left_childs = b.left_childs", "self.convert_idx2symbol(all_node_output, num_list[0], copy_list(nums_stack[0])) targets = self.convert_idx2symbol(target[0], num_list[0], copy_list(nums_stack[0])) return all_output,", "into (batch_size x max_len) tensors, transpose into (max_len x batch_size)", "torch.tensor(batch_data[\"question\"]).to(self.device) seq_length = torch.tensor(batch_data[\"ques len\"]).long() target = torch.tensor(batch_data[\"equation\"]).to(self.device) nums_stack =", "[batch_size] return torch.sum(y) def convert_idx2symbol(self, output, num_list, num_stack): # batch_size=output.size(0)", "len(o) > 0 and o[-1].terminal: left_childs.append(o[-1].embedding) else: left_childs.append(None) # all_leafs", "left_childs.append(None) # all_leafs = torch.stack(all_leafs, dim=1) # B x S", "through encoder seq_emb = self.embedder(input_var) pade_outputs, _ = self.encoder(seq_emb, input_length)", "+= [0 for _ in range(len(num_pos[b]), num_size)] masked_index += [temp_1", "current_embeddings_stacks[0].pop() current_num = self.merge(op.embedding, sub_stree.embedding, current_num) current_embeddings_stacks[0].append(TreeEmbedding(current_num, True)) if len(current_embeddings_stacks[0])", "= p_leaf * out_score topv, topi = out_score.topk(beam_size) for tv,", "all_embedding = all_outputs.view(-1, encoder_outputs.size(2)) # S x B x H", "batch_data should include keywords 'question', 'ques len', 'equation', 'num stack',", "+ b * sen_len) masked_index.append(temp_0) indices += [0 for _", "num_mask.cuda() # Zero gradients of both optimizers # Run words", "= all_outputs.view(-1, encoder_outputs.size(2)) # S x B x H ->", "def calculate_loss(self, batch_data:dict) -> float: \"\"\"Finish forward-propagating, calculating loss and", "for _ in range(hidden_size)] temp_0 = [0 for _ in", "config['beam_size'] self.max_out_len = config['max_output_len'] self.embedding_size = config[\"embedding_size\"] self.dropout_ratio = config[\"dropout_ratio\"]", "range(max_length): current_beams = [] while len(beams) > 0: b =", "print(o[-1].embedding.size()) # print(encoder_outputs[idx].size()) else: # 数字 current_num = current_nums_embeddings[idx, i", "in zip(topv.split(1, dim=1), topi.split(1, dim=1)): current_node_stack = copy_list(b.node_stack) current_left_childs =", "all_node_outputs.cuda() target = target.cuda() new_all_sa_outputs = [] for sa_pair in", "#self.decoder = SARTreeDecoder(self.hidden_size, self.operator_nums, self.generate_size, self.dropout_ratio) self.decoder = Prediction(self.hidden_size,self.operator_nums,self.generate_size,self.dropout_ratio) self.node_generater", "current_embeddings_stacks[0][-1].terminal: current_left_childs.append(current_embeddings_stacks[0][-1].embedding) else: current_left_childs.append(None) current_beams.append(TreeBeam(b.score + float(tv), current_node_stack, current_embeddings_stacks, current_left_childs,", "0.0) def generate_tree_input(self, target, decoder_output, nums_stack_batch, num_start, unk): # when", "calculating loss and back-propagation. :param batch_data: one batch data. :return:", "else: current_num = current_nums_embeddings[0, out_token - num_start].unsqueeze(0) while len(current_embeddings_stacks[0]) >", "+ [1 for _ in range(i, max_len)]) seq_mask = torch.ByteTensor(seq_mask)", "= generate_input.cuda() left_child, right_child, node_label = self.node_generater(current_embeddings, generate_input, current_context) current_node_stack[0].append(TreeNode(right_child))", "unk): # when the decoder input is copied num but", "for idx, l, r, node_stack, i, o in zip(range(batch_size), left_child.split(1),", "= batch_data[\"num pos\"] num_list = batch_data['num list'] generate_nums = self.generate_nums", "output variables node_stacks = [[TreeNode(_)] for _ in problem_output.split(1, dim=0)]", "S x 2 all_node_outputs = torch.stack(all_node_outputs, dim=1) # B x", "nums_stack = copy.deepcopy(batch_data[\"num stack\"]) num_size = batch_data[\"num size\"] num_pos =", "+ [1] * (max_num_size - d)) num_mask = torch.ByteTensor(num_mask) #", "config[\"num_layers\"] self.rnn_cell_type = config[\"rnn_cell_type\"] self.loss_weight = config['loss_weight'] self.vocab_size = len(dataset.in_idx2word)", "= o.pop() current_num = self.merge(op.embedding, sub_stree.embedding, current_num) # Subtree embedding", "out_score = nn.functional.log_softmax(torch.cat((op, num_score), dim=1), dim=1) # out_score = p_leaf", "for attention seq_mask = [] max_len = max(input_length) for i", "op_target = target < num_start # loss_0 = masked_cross_entropy_without_logit(all_leafs, op_target.long(),", "pos' \"\"\" seq = torch.tensor(batch_data[\"question\"]).to(self.device) seq_length = torch.tensor(batch_data[\"ques len\"]).long() target", "seq_emb = self.embedder(input_var) pade_outputs, _ = self.encoder(seq_emb, input_length) problem_output =", "idx, l, r, node_stack, i, o in zip(range(batch_size), left_child.split(1), right_child.split(1),", "5) # torch.nn.utils.clip_grad_norm_(generate.parameters(), 5) # Update parameters with optimizers return", "未知数当数字处理,SEP当操作符处理 if i < num_start: # 非数字 node_stack.append(TreeNode(r)) node_stack.append(TreeNode(l, left_flag=True))", "all_num.view(batch_size, num_size, hidden_size) return all_num.masked_fill_(masked_index, 0.0) def generate_tree_input(self, target, decoder_output,", "the grad # torch.nn.utils.clip_grad_norm_(encoder.parameters(), 5) # torch.nn.utils.clip_grad_norm_(predict.parameters(), 5) # torch.nn.utils.clip_grad_norm_(generate.parameters(),", "symbol in generate_list] self.mask_list = NumMask.number self.num_start = dataset.num_start self.operator_nums", "self).__init__() # parameter self.hidden_size = config[\"hidden_size\"] self.device = config[\"device\"] self.USE_CUDA", "input_length) problem_output = pade_outputs[-1, :, :self.hidden_size] + pade_outputs[0, :, self.hidden_size:]", "= config['max_output_len'] self.embedding_size = config[\"embedding_size\"] self.dropout_ratio = config[\"dropout_ratio\"] self.num_layers =", "dim=-1) / torch.sum(mask, dim=-1) # [batch_size] return torch.sum(y) def convert_idx2symbol(self,", "= target.cuda() new_all_sa_outputs = [] for sa_pair in all_sa_outputs: new_all_sa_outputs.append((sa_pair[0].cuda(),", "list() sen_len = encoder_outputs.size(0) masked_index = [] temp_1 = [1", "seq_length = torch.tensor(batch_data[\"ques len\"]).long() target = torch.tensor(batch_data[\"equation\"]).to(self.device) nums_stack = copy.deepcopy(batch_data[\"num", "x.score, reverse=True) beams = beams[:beam_size] flag = True for b", "from utils.utils import copy_list from utils.enum_type import NumMask, SpecialTokens class", "padding_hidden, seq_mask, num_mask) out_score = nn.functional.log_softmax(torch.cat((op, num_score), dim=1), dim=1) #", "num_mask = num_mask.cuda() # Zero gradients of both optimizers #", "True if self.device == torch.device('cuda') else False self.beam_size = config['beam_size']", "batch_data:dict) -> tuple: \"\"\"Model test. :param batch_data: one batch data.", "current_left_childs, current_out)) beams = sorted(current_beams, key=lambda x: x.score, reverse=True) beams", "should include keywords 'question', 'ques len', 'equation', 'num stack', 'num", "= self.sa(current_num,temp_encoder_outputs[idx]) all_sa_outputs.append((encoder_mapping, decoder_mapping)) o.append(TreeEmbedding(current_num, terminal=True)) if len(o) > 0", "+ 0.01 * total_semanti_alognment_loss # loss = loss_0 + loss_1", "= torch.FloatTensor([0.0 for _ in range(self.hidden_size)]).unsqueeze(0) batch_size = 1 if", "self.dropout_ratio = config[\"dropout_ratio\"] self.num_layers = config[\"num_layers\"] self.rnn_cell_type = config[\"rnn_cell_type\"] self.loss_weight", "# all_leafs = torch.stack(all_leafs, dim=1) # B x S x", "total_semanti_alognment_loss = total_semanti_alognment_loss / sa_len # print(total_semanti_alognment_loss) # op_target =", "num_size)] indices = torch.LongTensor(indices) masked_index = torch.BoolTensor(masked_index) masked_index = masked_index.view(batch_size,", "= beams[:beam_size] flag = True for b in beams: if", "left_childs.append(o[-1].embedding) else: left_childs.append(None) # all_leafs = torch.stack(all_leafs, dim=1) # B", "left_child, right_child, node_label = self.node_generater(current_embeddings, generate_input, current_context) current_node_stack[0].append(TreeNode(right_child)) current_node_stack[0].append(TreeNode(left_child, left_flag=True))", "decoder_output, nums_stack_batch, num_start, unk): # when the decoder input is", "'num stack', 'num pos', 'num list' \"\"\" seq = torch.tensor(batch_data[\"question\"]).to(self.device)", "当前的tree state/ subtree embedding / output left_childs = [None for", "all_leafs = all_leafs.cuda() all_node_outputs = all_node_outputs.cuda() target = target.cuda() new_all_sa_outputs", "= max(input_length) for i in input_length: seq_mask.append([0 for _ in", "x batch_size) input_var = input_batch.transpose(0, 1) target = target_batch.transpose(0, 1)", "# clip the grad # torch.nn.utils.clip_grad_norm_(encoder.parameters(), 5) # torch.nn.utils.clip_grad_norm_(predict.parameters(), 5)", "= max(num_size_batch) + len(generate_nums) + len(var_nums) # 最大的位置列表数目+常识数字数目+未知数列表 for i", "outputs : [batch_size,output_len,hidden_size] # targets : [batch_size,output_len,hidden_size] # mask :", "# 非数字 node_stack.append(TreeNode(r)) node_stack.append(TreeNode(l, left_flag=True)) o.append(TreeEmbedding(node_label[idx].unsqueeze(0), terminal=False)) # print(o[-1].embedding.size()) #", "y = torch.sum(x * mask, dim=-1) / torch.sum(mask, dim=-1) #", "num_start, unk): # when the decoder input is copied num", "'equation', 'num stack', 'num pos', 'num list' \"\"\" seq =", "Run words through encoder seq_emb = self.embedder(input_var) pade_outputs, _ =", "= encoder_outputs.transpose(0, 1).contiguous() all_embedding = all_outputs.view(-1, encoder_outputs.size(2)) # S x", "tuple: \"\"\"Model test. :param batch_data: one batch data. :return: predicted", "generate_tree_input(self, target, decoder_output, nums_stack_batch, num_start, unk): # when the decoder", "num_pos, batch_size, num_size, self.encoder.hidden_size) embeddings_stacks = [[] for _ in", "[] # all_leafs = [] copy_num_len = [len(_) for _", "= input_var.cuda() seq_mask = seq_mask.cuda() padding_hidden = padding_hidden.cuda() num_mask =", "size', 'num pos' \"\"\" seq = torch.tensor(batch_data[\"question\"]).to(self.device) seq_length = torch.tensor(batch_data[\"ques", "Math Word Problems\" in EMNLP 2020. \"\"\" def __init__(self, config,", "topv, topi = out_score.topk(beam_size) for tv, ti in zip(topv.split(1, dim=1),", "[] res = [] for s_i in range(seq_len): idx =", "dataset.out_symbol2idx self.out_idx2symbol = dataset.out_idx2symbol generate_list = dataset.generate_list self.generate_nums = [self.out_symbol2idx[symbol]", "num_mask.append([0] * d + [1] * (max_num_size - d)) num_mask", "input_var = input_var.cuda() seq_mask = seq_mask.cuda() padding_hidden = padding_hidden.cuda() num_mask", "# all_leafs.append(p_leaf) outputs = torch.cat((op, num_score), 1) all_node_outputs.append(outputs) target_t, generate_input", "break symbol = self.out_idx2symbol[idx] if \"NUM\" in symbol: num_idx =", "Qin et al. \"Semantically-Aligned Universal Tree-Structured Solver for Math Word", "d = i + len(generate_nums) + len(var_nums) num_mask.append([0] * d", "# Subtree embedding if batch_first: encoder_mapping, decoder_mapping = self.sa(current_num, encoder_outputs[idx])", "= True for b in beams: if len(b.node_stack[0]) != 0:", "-> (B x S) x H all_num = all_embedding.index_select(0, indices)", "= copy.deepcopy(b.out) out_token = int(ti) current_out.append(out_token) node = current_node_stack[0].pop() if", "self.evaluate_tree(seq, seq_length, generate_nums, num_pos, num_start, self.beam_size, self.max_out_len) all_output = self.convert_idx2symbol(all_node_output,", "(max_num_size - d)) num_mask = torch.ByteTensor(num_mask) # 用于屏蔽无关数字,防止生成错误的Nx #unk =", "= copy.deepcopy(target) for i in range(len(target)): if target[i] == unk:", "all_node_outputs.append(outputs) target_t, generate_input = self.generate_tree_input(target[t].tolist(), outputs, nums_stack_batch, num_start, unk) target[t]", "nums_stack, num_size, generate_nums, num_pos, unk, num_start) return loss def model_test(self,", "= encoder_outputs.size(0) masked_index = [] temp_1 = [1 for _", "and current_embeddings_stacks[0][-1].terminal: current_left_childs.append(current_embeddings_stacks[0][-1].embedding) else: current_left_childs.append(None) current_beams.append(TreeBeam(b.score + float(tv), current_node_stack, current_embeddings_stacks,", "Tree-Structured Solver for Math Word Problems\" in EMNLP 2020. \"\"\"", "= self.num_start # sequence mask for attention unk = self.unk_token", "o[-1].terminal: left_childs.append(o[-1].embedding) else: left_childs.append(None) # all_leafs = torch.stack(all_leafs, dim=1) #", "x N embeddings_stacks = [[] for _ in range(batch_size)] left_childs", "提取与问题相关的数字embedding all_nums_encoder_outputs = self.get_all_number_encoder_outputs(encoder_outputs, num_pos, batch_size, num_size, self.encoder.hidden_size) embeddings_stacks =", "self.decoder(b.node_stack, left_childs, encoder_outputs, all_nums_encoder_outputs, padding_hidden, seq_mask, num_mask) out_score = nn.functional.log_softmax(torch.cat((op,", "= padding_hidden.cuda() num_mask = num_mask.cuda() # Run words through encoder", "stack\"]) num_size = batch_data[\"num size\"] num_pos = batch_data[\"num pos\"] generate_nums", "batch_data[\"num pos\"] generate_nums = self.generate_nums num_start = self.num_start # sequence", "while len(o) > 0 and o[-1].terminal: sub_stree = o.pop() op", "generate_nums, num_pos, unk, num_start) return loss def model_test(self, batch_data:dict) ->", "out_score.topk(beam_size) for tv, ti in zip(topv.split(1, dim=1), topi.split(1, dim=1)): current_node_stack", "tv, ti in zip(topv.split(1, dim=1), topi.split(1, dim=1)): current_node_stack = copy_list(b.node_stack)", "for _ in range(i)] + [1 for _ in range(i,", "targets)), dim=-1)) # [batch_size,output_len] y = torch.sum(x * mask, dim=-1)", "= target_batch.transpose(0, 1) padding_hidden = torch.FloatTensor([0.0 for _ in range(self.decoder.hidden_size)]).unsqueeze(0)", "sausolver.py import random import torch from torch import nn import", "encoder_outputs.transpose(0, 1) encoder_mapping, decoder_mapping = self.sa(current_num,temp_encoder_outputs[idx]) all_sa_outputs.append((encoder_mapping, decoder_mapping)) o.append(TreeEmbedding(current_num, terminal=True))", "self.generate_nums num_start = self.num_start # sequence mask for attention all_node_output", "num_mask.cuda() # Run words through encoder seq_emb = self.embedder(input_var) pade_outputs,", "= BasicRNNEncoder(self.embedding_size, self.hidden_size, self.num_layers, self.rnn_cell_type, self.dropout_ratio, batch_first=False) #self.decoder = SARTreeDecoder(self.hidden_size,", "self.rnn_cell_type, self.dropout_ratio, batch_first=False) #self.decoder = SARTreeDecoder(self.hidden_size, self.operator_nums, self.generate_size, self.dropout_ratio) self.decoder", "beams: if len(b.node_stack[0]) != 0: flag = False if flag:", "all_node_output = self.evaluate_tree(seq, seq_length, generate_nums, num_pos, num_start, self.beam_size, self.max_out_len) all_output", "unk) target[t] = target_t if self.USE_CUDA: generate_input = generate_input.cuda() left_child,", "= BasicEmbedder(self.vocab_size, self.embedding_size, self.dropout_ratio) # self.t_encoder = BasicRNNEncoder(self.embedding_size, self.hidden_size, self.num_layers,", "masked_index = torch.BoolTensor(masked_index) masked_index = masked_index.view(batch_size, num_size, hidden_size) if self.USE_CUDA:", "config['max_output_len'] self.embedding_size = config[\"embedding_size\"] self.dropout_ratio = config[\"dropout_ratio\"] self.num_layers = config[\"num_layers\"]", "BasicEmbedder(self.vocab_size, self.embedding_size, self.dropout_ratio) # self.t_encoder = BasicRNNEncoder(self.embedding_size, self.hidden_size, self.num_layers, self.rnn_cell_type,", "mask for attention unk = self.unk_token loss = self.train_tree(seq, seq_length,", "op = current_embeddings_stacks[0].pop() current_num = self.merge(op.embedding, sub_stree.embedding, current_num) current_embeddings_stacks[0].append(TreeEmbedding(current_num, True))", "+ num] if target_input[i] >= num_start: target_input[i] = 0 return", "= torch.tensor(batch_data[\"equation\"]).to(self.device) nums_stack = copy.deepcopy(batch_data[\"num stack\"]) num_pos = batch_data[\"num pos\"]", "num_start: target_input[i] = 0 return torch.LongTensor(target), torch.LongTensor(target_input) def mse_loss(self, outputs,", "[] for idx, l, r, node_stack, i, o in zip(range(batch_size),", "torch.tensor(batch_data[\"ques len\"]).long() target = torch.tensor(batch_data[\"equation\"]).to(self.device) target_length = torch.LongTensor(batch_data[\"equ len\"]).to(self.device) nums_stack", "else: # 数字 current_num = current_nums_embeddings[idx, i - num_start].unsqueeze(0) while", "'num list' \"\"\" seq = torch.tensor(batch_data[\"question\"]).to(self.device) seq_length = torch.tensor(batch_data[\"ques len\"]).long()", "output[s_i] if idx in [self.out_sos_token, self.out_eos_token, self.out_pad_token]: break symbol =", "range(batch_size)] # B x 1 当前的tree state/ subtree embedding /", "config[\"device\"] self.USE_CUDA = True if self.device == torch.device('cuda') else False", "len(node_stack) != 0: node = node_stack.pop() else: left_childs.append(None) continue #", "= [None for _ in range(batch_size)] # B x 1", "right_child, node_label = self.node_generater(current_embeddings, generate_input, current_context) left_childs = [] for", "current_out.append(out_token) node = current_node_stack[0].pop() if out_token < num_start: generate_input =", "padding_hidden.cuda() num_mask = num_mask.cuda() # Run words through encoder seq_emb", "pos', 'num list' \"\"\" seq = torch.tensor(batch_data[\"question\"]).to(self.device) seq_length = torch.tensor(batch_data[\"ques", "all_nums_encoder_outputs = self.get_all_number_encoder_outputs(encoder_outputs, num_pos, batch_size, num_size, self.hidden_size) # B x", "x 1 for t in range(max_target_length): num_score, op, current_embeddings, current_context,", "num_size)] masked_index += [temp_1 for _ in range(len(num_pos[b]), num_size)] indices", "in problem_output.split(1, dim=0)] # root embedding B x 1 max_target_length", "= torch.tensor(batch_data[\"ques len\"]).long() target = torch.tensor(batch_data[\"equation\"]).to(self.device) nums_stack = copy.deepcopy(batch_data[\"num stack\"])", "mse_loss(self, outputs, targets, mask=None): # outputs : [batch_size,output_len,hidden_size] # targets", "problem_output.split(1, dim=0)] # root embedding B x 1 max_target_length =", "+ pade_outputs[0, :, self.hidden_size:] encoder_outputs = pade_outputs[:, :, :self.hidden_size] +", "len(num_list) output_list = [] res = [] for s_i in", "Code Taken from https://github.com/LYH-YF/MWPToolkit # -*- encoding: utf-8 -*- #", "num_score), 1) all_node_outputs.append(outputs) target_t, generate_input = self.generate_tree_input(target[t].tolist(), outputs, nums_stack_batch, num_start,", "current_num) # Subtree embedding if batch_first: encoder_mapping, decoder_mapping = self.sa(current_num,", "range(self.decoder.hidden_size)]).unsqueeze(0) batch_size = len(input_length) if self.USE_CUDA: input_var = input_var.cuda() seq_mask", "x H all_num = all_embedding.index_select(0, indices) all_num = all_num.view(batch_size, num_size,", "= target.transpose(0, 1).contiguous() # B x S if self.USE_CUDA: #", "module.Encoder.rnn_encoder import BasicRNNEncoder from module.Embedder.basic_embedder import BasicEmbedder from module.Decoder.tree_decoder import", "num_start].unsqueeze(0) while len(current_embeddings_stacks[0]) > 0 and current_embeddings_stacks[0][-1].terminal: sub_stree = current_embeddings_stacks[0].pop()", "= self.unk_token loss = self.train_tree(seq, seq_length, target, target_length, nums_stack, num_size,", "def generate_tree_input(self, target, decoder_output, nums_stack_batch, num_start, unk): # when the", "batch_first: encoder_mapping, decoder_mapping = self.sa(current_num, encoder_outputs[idx]) else: temp_encoder_outputs = encoder_outputs.transpose(0,", "self.USE_CUDA: input_var = input_var.cuda() seq_mask = seq_mask.cuda() padding_hidden = padding_hidden.cuda()", "import TreeBeam from loss.masked_cross_entropy_loss import MaskedCrossEntropyLoss, masked_cross_entropy from loss.mse_loss import", "import copy_list from utils.enum_type import NumMask, SpecialTokens class SAUSolver(nn.Module): \"\"\"", "= torch.tensor(batch_data[\"ques len\"]).long() target = torch.tensor(batch_data[\"equation\"]).to(self.device) target_length = torch.LongTensor(batch_data[\"equ len\"]).to(self.device)", "= config[\"dropout_ratio\"] self.num_layers = config[\"num_layers\"] self.rnn_cell_type = config[\"rnn_cell_type\"] self.loss_weight =", "current_node_stack[0].append(TreeNode(left_child, left_flag=True)) current_embeddings_stacks[0].append(TreeEmbedding(node_label[0].unsqueeze(0), False)) else: current_num = current_nums_embeddings[0, out_token -", "torch import nn import copy from module.Encoder.rnn_encoder import BasicRNNEncoder from", "0: node = node_stack.pop() else: left_childs.append(None) continue # 未知数当数字处理,SEP当操作符处理 if", "num + num_start max_score = decoder_output[i, num_start + num] if", "from https://github.com/LYH-YF/MWPToolkit # -*- encoding: utf-8 -*- # @Author: <NAME>", "in range(batch_size)] left_childs = [None for _ in range(batch_size)] beams", "(B x S) x H all_num = all_embedding.index_select(0, indices) all_num", "num_size = max(copy_num_len) # 提取与问题相关的数字embedding all_nums_encoder_outputs = self.get_all_number_encoder_outputs(encoder_outputs, num_pos, batch_size,", "return beams[0].out def get_all_number_encoder_outputs(self, encoder_outputs, num_pos, batch_size, num_size, hidden_size): indices", "len(input_length) if self.USE_CUDA: input_var = input_var.cuda() seq_mask = seq_mask.cuda() padding_hidden", "target < num_start # loss_0 = masked_cross_entropy_without_logit(all_leafs, op_target.long(), target_length) loss", "masked_index.append(temp_0) indices += [0 for _ in range(len(num_pos[b]), num_size)] masked_index", "04:59:55 # @File: sausolver.py import random import torch from torch", "copied num but the num has two pos, chose the", "embedding B x 1 max_target_length = max(target_length) all_node_outputs = []", "and current_embeddings_stacks[0][-1].terminal: sub_stree = current_embeddings_stacks[0].pop() op = current_embeddings_stacks[0].pop() current_num =", "padding_hidden = padding_hidden.cuda() num_mask = num_mask.cuda() # Zero gradients of", "hidden_size) return all_num.masked_fill_(masked_index, 0.0) def generate_tree_input(self, target, decoder_output, nums_stack_batch, num_start,", "left_child.split(1), right_child.split(1), node_stacks, target[t].tolist(), embeddings_stacks): if len(node_stack) != 0: node", "sub_stree.embedding, current_num) # Subtree embedding if batch_first: encoder_mapping, decoder_mapping =", "out_token - num_start].unsqueeze(0) while len(current_embeddings_stacks[0]) > 0 and current_embeddings_stacks[0][-1].terminal: sub_stree", ":return: predicted equation, target equation. batch_data should include keywords 'question',", "# 最大的位置列表数目+常识数字数目+未知数列表 for i in num_size_batch: d = i +", "target,target_length) + 0.01 * total_semanti_alognment_loss # loss = loss_0 +", "if self.device == torch.device('cuda') else False self.beam_size = config['beam_size'] self.max_out_len", "for t in range(max_length): current_beams = [] while len(beams) >", "all_sa_outputs = [] # all_leafs = [] copy_num_len = [len(_)", "self.dropout_ratio) self.merge = Merge(self.hidden_size, self.embedding_size, self.dropout_ratio) self.sa = SemanticAlignmentModule(self.hidden_size,self.hidden_size,self.hidden_size) self.loss1", "for symbol in generate_list] self.mask_list = NumMask.number self.num_start = dataset.num_start", "current_left_childs.append(None) current_beams.append(TreeBeam(b.score + float(tv), current_node_stack, current_embeddings_stacks, current_left_childs, current_out)) beams =", "用于屏蔽无关数字,防止生成错误的Nx #unk = output_lang.word2index[\"UNK\"] # Turn padded arrays into (batch_size", "len', 'num stack', 'num size', 'num pos' \"\"\" seq =", "num_start, unk) target[t] = target_t if self.USE_CUDA: generate_input = generate_input.cuda()", ":param batch_data: one batch data. :return: predicted equation, target equation.", "_ in problem_output.split(1, dim=0)] num_size = len(num_pos[0]) all_nums_encoder_outputs = self.get_all_number_encoder_outputs(encoder_outputs,", "# B x S if self.USE_CUDA: # all_leafs = all_leafs.cuda()", "if len(b.node_stack[0]) == 0: current_beams.append(b) continue # left_childs = torch.stack(b.left_childs)", "left_childs.append(None) continue # 未知数当数字处理,SEP当操作符处理 if i < num_start: # 非数字", "generate_input = torch.LongTensor([out_token]) if self.USE_CUDA: generate_input = generate_input.cuda() left_child, right_child,", "target[t] = target_t if self.USE_CUDA: generate_input = generate_input.cuda() left_child, right_child,", "current_embeddings_stacks[0].append(TreeEmbedding(node_label[0].unsqueeze(0), False)) else: current_num = current_nums_embeddings[0, out_token - num_start].unsqueeze(0) while", "return all_output, targets def train_tree(self,input_batch, input_length, target_batch, target_length, nums_stack_batch, num_size_batch,", "current_nums_embeddings = self.decoder( node_stacks, left_childs, encoder_outputs, all_nums_encoder_outputs, padding_hidden, seq_mask, num_mask)", "for sa_pair in all_sa_outputs: total_semanti_alognment_loss += semantic_alignment_loss(sa_pair[0], sa_pair[1]) # print(total_semanti_alognment_loss)", "num_start, beam_size=5, max_length=30): seq_mask = torch.BoolTensor(1, input_length).fill_(0) # Turn padded", "= o.pop() op = o.pop() current_num = self.merge(op.embedding, sub_stree.embedding, current_num)", "= [] # all_leafs = [] copy_num_len = [len(_) for", "num_size_batch: d = i + len(generate_nums) + len(var_nums) num_mask.append([0] *", "target_input[i] = 0 return torch.LongTensor(target), torch.LongTensor(target_input) def mse_loss(self, outputs, targets,", "predicted equation, target equation. batch_data should include keywords 'question', 'ques", "al. \"Semantically-Aligned Universal Tree-Structured Solver for Math Word Problems\" in", "decoder_mapping = self.sa(current_num,temp_encoder_outputs[idx]) all_sa_outputs.append((encoder_mapping, decoder_mapping)) o.append(TreeEmbedding(current_num, terminal=True)) if len(o) >", "num_start].unsqueeze(0) while len(o) > 0 and o[-1].terminal: sub_stree = o.pop()", "encoding: utf-8 -*- # @Author: <NAME> # @Time: 2021/08/21 04:59:55", "zip(range(batch_size), left_child.split(1), right_child.split(1), node_stacks, target[t].tolist(), embeddings_stacks): if len(node_stack) != 0:", "max(target_length) all_node_outputs = [] all_sa_outputs = [] # all_leafs =", "masked_cross_entropy(all_node_outputs, target,target_length) + 0.01 * total_semanti_alognment_loss # loss = loss_0", "len(generate_list) self.unk_token = self.out_symbol2idx[SpecialTokens.UNK_TOKEN] try: self.out_sos_token = self.out_symbol2idx[SpecialTokens.SOS_TOKEN] except: self.out_sos_token", "import BasicEmbedder from module.Decoder.tree_decoder import SARTreeDecoder from module.Layer.tree_layers import NodeGenerater,", "out_token = int(ti) current_out.append(out_token) node = current_node_stack[0].pop() if out_token <", "0 and o[-1].terminal: sub_stree = o.pop() op = o.pop() current_num", "= encoder_outputs.transpose(0, 1) encoder_mapping, decoder_mapping = self.sa(current_num,temp_encoder_outputs[idx]) all_sa_outputs.append((encoder_mapping, decoder_mapping)) o.append(TreeEmbedding(current_num,", "node = current_node_stack[0].pop() if out_token < num_start: generate_input = torch.LongTensor([out_token])", "num_pos = batch_data[\"num pos\"] num_list = batch_data['num list'] generate_nums =", "https://github.com/LYH-YF/MWPToolkit # -*- encoding: utf-8 -*- # @Author: <NAME> #", "current_embeddings, current_context, current_nums_embeddings = self.decoder( node_stacks, left_childs, encoder_outputs, all_nums_encoder_outputs, padding_hidden,", "generate_nums, num_pos, unk, num_start, english=False,var_nums=[], batch_first=False): # sequence mask for", "input_var.cuda() seq_mask = seq_mask.cuda() padding_hidden = padding_hidden.cuda() num_mask = num_mask.cuda()", "= torch.LongTensor(target_length) semantic_alignment_loss = nn.MSELoss() total_semanti_alognment_loss = 0 sa_len =", "1 当前的tree state/ subtree embedding / output left_childs = [None", "# 用于屏蔽无关数字,防止生成错误的Nx #unk = output_lang.word2index[\"UNK\"] # Turn padded arrays into", "for _ in range(self.decoder.hidden_size)]).unsqueeze(0) batch_size = len(input_length) if self.USE_CUDA: input_var", "# out_score = p_leaf * out_score topv, topi = out_score.topk(beam_size)", "torch.sqrt(torch.sum(torch.square((outputs - targets)), dim=-1)) # [batch_size,output_len] y = torch.sum(x *", "s_i in range(seq_len): idx = output[s_i] if idx in [self.out_sos_token,", "all_node_outputs = all_node_outputs.cuda() target = target.cuda() new_all_sa_outputs = [] for", "self.max_out_len) all_output = self.convert_idx2symbol(all_node_output, num_list[0], copy_list(nums_stack[0])) targets = self.convert_idx2symbol(target[0], num_list[0],", "/ output left_childs = [None for _ in range(batch_size)] #", "batch_size = len(input_length) if self.USE_CUDA: input_var = input_var.cuda() seq_mask =", "seq_mask, num_mask) # all_leafs.append(p_leaf) outputs = torch.cat((op, num_score), 1) all_node_outputs.append(outputs)", "new_all_sa_outputs.append((sa_pair[0].cuda(), sa_pair[1].cuda())) all_sa_outputs = new_all_sa_outputs # target_length = torch.LongTensor(target_length).cuda() else:", "最大的位置列表数目+常识数字数目+未知数列表 for i in num_size_batch: d = i + len(generate_nums)", "= padding_hidden.cuda() num_mask = num_mask.cuda() # Zero gradients of both", "root embedding B x 1 max_target_length = max(target_length) all_node_outputs =", "max_score: target[i] = num + num_start max_score = decoder_output[i, num_start", ": [batch_size,output_len] mask = mask.to(self.device) x = torch.sqrt(torch.sum(torch.square((outputs - targets)),", "= self.out_symbol2idx[SpecialTokens.PAD_TOKEN] except: self.out_pad_token = None # module self.embedder =", "len(var_nums) # 最大的位置列表数目+常识数字数目+未知数列表 for i in num_size_batch: d = i", "sen_len) masked_index.append(temp_0) indices += [0 for _ in range(len(num_pos[b]), num_size)]", "decoder_output[i, num_start + num] if target_input[i] >= num_start: target_input[i] =", "= config['loss_weight'] self.vocab_size = len(dataset.in_idx2word) self.out_symbol2idx = dataset.out_symbol2idx self.out_idx2symbol =", "padding_hidden = torch.FloatTensor([0.0 for _ in range(self.hidden_size)]).unsqueeze(0) batch_size = 1", "self.encoder(seq_emb, input_length) problem_output = pade_outputs[-1, :, :self.hidden_size] + pade_outputs[0, :,", "out_score topv, topi = out_score.topk(beam_size) for tv, ti in zip(topv.split(1,", "in range(hidden_size)] temp_0 = [0 for _ in range(hidden_size)] for", "torch.stack(all_leafs, dim=1) # B x S x 2 all_node_outputs =", ", loss_0.item(), loss_1.item() def evaluate_tree(self, input_batch, input_length, generate_nums, num_pos, num_start,", "calculate_loss(self, batch_data:dict) -> float: \"\"\"Finish forward-propagating, calculating loss and back-propagation.", "len(num_pos[0]) all_nums_encoder_outputs = self.get_all_number_encoder_outputs(encoder_outputs, num_pos, batch_size, num_size, self.hidden_size) # B", "# S x B x H -> (B x S)", "target_length) loss = masked_cross_entropy(all_node_outputs, target,target_length) + 0.01 * total_semanti_alognment_loss #", "right_child, node_label = self.node_generater(current_embeddings, generate_input, current_context) current_node_stack[0].append(TreeNode(right_child)) current_node_stack[0].append(TreeNode(left_child, left_flag=True)) current_embeddings_stacks[0].append(TreeEmbedding(node_label[0].unsqueeze(0),", "0 sa_len = len(all_sa_outputs) for sa_pair in all_sa_outputs: total_semanti_alognment_loss +=", "torch.LongTensor(target_length) semantic_alignment_loss = nn.MSELoss() total_semanti_alognment_loss = 0 sa_len = len(all_sa_outputs)", "module.Strategy.beam_search import TreeBeam from loss.masked_cross_entropy_loss import MaskedCrossEntropyLoss, masked_cross_entropy from loss.mse_loss", "# B x P x N embeddings_stacks = [[] for", "_ = self.encoder(seq_emb, input_length) problem_output = pade_outputs[-1, :, :self.hidden_size] +", "torch.sum(y) def convert_idx2symbol(self, output, num_list, num_stack): # batch_size=output.size(0) '''batch_size=1''' seq_len", "in range(self.decoder.hidden_size)]).unsqueeze(0) batch_size = len(input_length) if self.USE_CUDA: input_var = input_var.cuda()", "[[] for _ in range(batch_size)] left_childs = [None for _", "loss and back-propagation. :param batch_data: one batch data. :return: loss", "in num_size_batch: d = i + len(generate_nums) + len(var_nums) num_mask.append([0]", "2 all_node_outputs = torch.stack(all_node_outputs, dim=1) # B x S x", "float: \"\"\"Finish forward-propagating, calculating loss and back-propagation. :param batch_data: one", "num_score), dim=1), dim=1) # out_score = p_leaf * out_score topv,", "left_flag=True)) o.append(TreeEmbedding(node_label[idx].unsqueeze(0), terminal=False)) # print(o[-1].embedding.size()) # print(encoder_outputs[idx].size()) else: # 数字", "1) num_mask = torch.BoolTensor(1, len(num_pos[0]) + len(generate_nums)).fill_(0) padding_hidden = torch.FloatTensor([0.0", "current_out = copy.deepcopy(b.out) out_token = int(ti) current_out.append(out_token) node = current_node_stack[0].pop()", "current_num = self.merge(op.embedding, sub_stree.embedding, current_num) current_embeddings_stacks[0].append(TreeEmbedding(current_num, True)) if len(current_embeddings_stacks[0]) >", "dataset.out_idx2symbol generate_list = dataset.generate_list self.generate_nums = [self.out_symbol2idx[symbol] for symbol in", "optimizers return loss.item() # , loss_0.item(), loss_1.item() def evaluate_tree(self, input_batch,", "batch_data:dict) -> float: \"\"\"Finish forward-propagating, calculating loss and back-propagation. :param", "in range(batch_size): for i in num_pos[b]: indices.append(i + b *", "def evaluate_tree(self, input_batch, input_length, generate_nums, num_pos, num_start, beam_size=5, max_length=30): seq_mask", "dim=1) # B x S x N target = target.transpose(0,", "beams = beams[:beam_size] flag = True for b in beams:", "= torch.stack(b.left_childs) left_childs = b.left_childs num_score, op, current_embeddings, current_context, current_nums_embeddings", "for num in num_stack: if decoder_output[i, num_start + num] >", "target.cuda() new_all_sa_outputs = [] for sa_pair in all_sa_outputs: new_all_sa_outputs.append((sa_pair[0].cuda(), sa_pair[1].cuda()))", "in range(seq_len): idx = output[s_i] if idx in [self.out_sos_token, self.out_eos_token,", "S x B x H -> (B x S) x", "sa_pair[1].cuda())) all_sa_outputs = new_all_sa_outputs # target_length = torch.LongTensor(target_length).cuda() else: pass", "self.USE_CUDA = True if self.device == torch.device('cuda') else False self.beam_size", "+ pade_outputs[:, :, self.hidden_size:] # Prepare input and output variables", "if self.USE_CUDA: input_var = input_var.cuda() seq_mask = seq_mask.cuda() padding_hidden =", "c = num_list[pos_list[0]] res.append(c) except: return None else: res.append(symbol) output_list.append(res)", "data. :return: predicted equation, target equation. batch_data should include keywords", "- targets)), dim=-1)) # [batch_size,output_len] y = torch.sum(x * mask,", "self.node_generater(current_embeddings, generate_input, current_context) left_childs = [] for idx, l, r,", "problem_output = self.encoder(input_var, input_length) seq_emb = self.embedder(input_var) pade_outputs, _ =", "generate_input = generate_input.cuda() left_child, right_child, node_label = self.node_generater(current_embeddings, generate_input, current_context)", "num_list[pos_list[0]] res.append(c) except: return None else: res.append(symbol) output_list.append(res) return output_list", "# loss_0 = masked_cross_entropy_without_logit(all_leafs, op_target.long(), target_length) loss = masked_cross_entropy(all_node_outputs, target,target_length)", "= all_leafs.cuda() all_node_outputs = all_node_outputs.cuda() target = target.cuda() new_all_sa_outputs =", "module.Layer.tree_layers import NodeGenerater, SubTreeMerger, TreeNode, TreeEmbedding from module.Layer.tree_layers import Prediction,", "# op_target = target < num_start # loss_0 = masked_cross_entropy_without_logit(all_leafs,", "pos\"] num_list = batch_data['num list'] generate_nums = self.generate_nums num_start =", "= current_node_stack[0].pop() if out_token < num_start: generate_input = torch.LongTensor([out_token]) if", "num_mask = num_mask.cuda() # Run words through encoder seq_emb =", "input and output variables node_stacks = [[TreeNode(_)] for _ in", "in [self.out_sos_token, self.out_eos_token, self.out_pad_token]: break symbol = self.out_idx2symbol[idx] if \"NUM\"", "temp_1 = [1 for _ in range(hidden_size)] temp_0 = [0", "num_stack = nums_stack_batch[i].pop() max_score = -float(\"1e12\") for num in num_stack:", "current_node_stack[0].pop() if out_token < num_start: generate_input = torch.LongTensor([out_token]) if self.USE_CUDA:", "from loss.masked_cross_entropy_loss import MaskedCrossEntropyLoss, masked_cross_entropy from loss.mse_loss import MSELoss from", "mask : [batch_size,output_len] mask = mask.to(self.device) x = torch.sqrt(torch.sum(torch.square((outputs -", "decoder_output[i, num_start + num] > max_score: target[i] = num +", "all_leafs.append(p_leaf) outputs = torch.cat((op, num_score), 1) all_node_outputs.append(outputs) target_t, generate_input =", "self.loss_weight = config['loss_weight'] self.vocab_size = len(dataset.in_idx2word) self.out_symbol2idx = dataset.out_symbol2idx self.out_idx2symbol", "= target_t if self.USE_CUDA: generate_input = generate_input.cuda() left_child, right_child, node_label", "num_mask) out_score = nn.functional.log_softmax(torch.cat((op, num_score), dim=1), dim=1) # out_score =", "[0 for _ in range(hidden_size)] for b in range(batch_size): for", "semantic_alignment_loss(sa_pair[0], sa_pair[1]) # print(total_semanti_alognment_loss) total_semanti_alognment_loss = total_semanti_alognment_loss / sa_len #", "both optimizers # Run words through encoder #encoder_outputs, problem_output =", "node_stack, i, o in zip(range(batch_size), left_child.split(1), right_child.split(1), node_stacks, target[t].tolist(), embeddings_stacks):", "try: self.out_eos_token = self.out_symbol2idx[SpecialTokens.EOS_TOKEN] except: self.out_eos_token = None try: self.out_pad_token", "else: current_left_childs.append(None) current_beams.append(TreeBeam(b.score + float(tv), current_node_stack, current_embeddings_stacks, current_left_childs, current_out)) beams", "left_childs, encoder_outputs, all_nums_encoder_outputs, padding_hidden, seq_mask, num_mask) out_score = nn.functional.log_softmax(torch.cat((op, num_score),", "P x N embeddings_stacks = [[] for _ in range(batch_size)]", "dim=1), topi.split(1, dim=1)): current_node_stack = copy_list(b.node_stack) current_left_childs = [] current_embeddings_stacks", "range(seq_len): idx = output[s_i] if idx in [self.out_sos_token, self.out_eos_token, self.out_pad_token]:", "# Update parameters with optimizers return loss.item() # , loss_0.item(),", "batch data. :return: predicted equation, target equation. batch_data should include", "= torch.ByteTensor(num_mask) # 用于屏蔽无关数字,防止生成错误的Nx #unk = output_lang.word2index[\"UNK\"] # Turn padded", "= current_nums_embeddings[0, out_token - num_start].unsqueeze(0) while len(current_embeddings_stacks[0]) > 0 and", "try: self.out_sos_token = self.out_symbol2idx[SpecialTokens.SOS_TOKEN] except: self.out_sos_token = None try: self.out_eos_token", "current_context, current_nums_embeddings = self.decoder( node_stacks, left_childs, encoder_outputs, all_nums_encoder_outputs, padding_hidden, seq_mask,", "masked_index = masked_index.cuda() all_outputs = encoder_outputs.transpose(0, 1).contiguous() all_embedding = all_outputs.view(-1,", "x 1 当前的tree state/ subtree embedding / output left_childs =", "= config[\"num_layers\"] self.rnn_cell_type = config[\"rnn_cell_type\"] self.loss_weight = config['loss_weight'] self.vocab_size =", "masked_index.cuda() all_outputs = encoder_outputs.transpose(0, 1).contiguous() all_embedding = all_outputs.view(-1, encoder_outputs.size(2)) #", "= pade_outputs[:, :, :self.hidden_size] + pade_outputs[:, :, self.hidden_size:] # Prepare", "= batch_data[\"num pos\"] generate_nums = self.generate_nums num_start = self.num_start #", "from utils.enum_type import NumMask, SpecialTokens class SAUSolver(nn.Module): \"\"\" Reference: Qin", "class SAUSolver(nn.Module): \"\"\" Reference: Qin et al. \"Semantically-Aligned Universal Tree-Structured", "@File: sausolver.py import random import torch from torch import nn", "torch.LongTensor([out_token]) if self.USE_CUDA: generate_input = generate_input.cuda() left_child, right_child, node_label =", "(max_len x batch_size) input_var = input_batch.transpose(0, 1) target = target_batch.transpose(0,", "# 数字 current_num = current_nums_embeddings[idx, i - num_start].unsqueeze(0) while len(o)", "[[TreeNode(_)] for _ in problem_output.split(1, dim=0)] # root embedding B", "for _ in problem_output.split(1, dim=0)] num_size = len(num_pos[0]) all_nums_encoder_outputs =", "num_size, self.encoder.hidden_size) embeddings_stacks = [[] for _ in range(batch_size)] #", "total_semanti_alognment_loss = 0 sa_len = len(all_sa_outputs) for sa_pair in all_sa_outputs:", "left_childs = b.left_childs num_score, op, current_embeddings, current_context, current_nums_embeddings = self.decoder(b.node_stack,", "False if flag: break return beams[0].out def get_all_number_encoder_outputs(self, encoder_outputs, num_pos,", "mask=None): # outputs : [batch_size,output_len,hidden_size] # targets : [batch_size,output_len,hidden_size] #", "utils.utils import copy_list from utils.enum_type import NumMask, SpecialTokens class SAUSolver(nn.Module):", "if self.USE_CUDA: indices = indices.cuda() masked_index = masked_index.cuda() all_outputs =", "> 0 and o[-1].terminal: left_childs.append(o[-1].embedding) else: left_childs.append(None) # all_leafs =", "module.Decoder.tree_decoder import SARTreeDecoder from module.Layer.tree_layers import NodeGenerater, SubTreeMerger, TreeNode, TreeEmbedding", "zip(topv.split(1, dim=1), topi.split(1, dim=1)): current_node_stack = copy_list(b.node_stack) current_left_childs = []", "max_len = max(input_length) for i in input_length: seq_mask.append([0 for _", "batch_data: one batch data. :return: loss value. batch_data should include", "num_score, op, current_embeddings, current_context, current_nums_embeddings = self.decoder( node_stacks, left_childs, encoder_outputs,", "self.out_eos_token = self.out_symbol2idx[SpecialTokens.EOS_TOKEN] except: self.out_eos_token = None try: self.out_pad_token =", "0 return torch.LongTensor(target), torch.LongTensor(target_input) def mse_loss(self, outputs, targets, mask=None): #", "[] max_len = max(input_length) for i in input_length: seq_mask.append([0 for", "= None # module self.embedder = BasicEmbedder(self.vocab_size, self.embedding_size, self.dropout_ratio) #", "target_length, nums_stack_batch, num_size_batch, generate_nums, num_pos, unk, num_start, english=False,var_nums=[], batch_first=False): #", "in all_sa_outputs: new_all_sa_outputs.append((sa_pair[0].cuda(), sa_pair[1].cuda())) all_sa_outputs = new_all_sa_outputs # target_length =", "encoder_outputs, num_pos, batch_size, num_size, hidden_size): indices = list() sen_len =", "node_stack.append(TreeNode(r)) node_stack.append(TreeNode(l, left_flag=True)) o.append(TreeEmbedding(node_label[idx].unsqueeze(0), terminal=False)) # print(o[-1].embedding.size()) # print(encoder_outputs[idx].size()) else:", "break res.append(num_list[num_idx]) elif symbol == SpecialTokens.UNK_TOKEN: try: pos_list = num_stack.pop()", "in range(len(num_pos[b]), num_size)] indices = torch.LongTensor(indices) masked_index = torch.BoolTensor(masked_index) masked_index", "BasicRNNEncoder(self.embedding_size, self.hidden_size, self.num_layers, self.rnn_cell_type, self.dropout_ratio) self.encoder = BasicRNNEncoder(self.embedding_size, self.hidden_size, self.num_layers,", "_ in range(batch_size)] # B x 1 for t in", "x S) x H all_num = all_embedding.index_select(0, indices) all_num =", "x S x 2 all_node_outputs = torch.stack(all_node_outputs, dim=1) # B", "in symbol: num_idx = self.mask_list.index(symbol) if num_idx >= num_len: res", "output_lang.word2index[\"UNK\"] # Turn padded arrays into (batch_size x max_len) tensors,", "= Merge(self.hidden_size, self.embedding_size, self.dropout_ratio) self.sa = SemanticAlignmentModule(self.hidden_size,self.hidden_size,self.hidden_size) self.loss1 = MaskedCrossEntropyLoss()", "op = o.pop() current_num = self.merge(op.embedding, sub_stree.embedding, current_num) # Subtree", "output, num_list, num_stack): # batch_size=output.size(0) '''batch_size=1''' seq_len = len(output) num_len", "encoder_outputs.size(0) masked_index = [] temp_1 = [1 for _ in", "= self.sa(current_num, encoder_outputs[idx]) else: temp_encoder_outputs = encoder_outputs.transpose(0, 1) encoder_mapping, decoder_mapping", "self.generate_tree_input(target[t].tolist(), outputs, nums_stack_batch, num_start, unk) target[t] = target_t if self.USE_CUDA:", "= masked_cross_entropy_without_logit(all_leafs, op_target.long(), target_length) loss = masked_cross_entropy(all_node_outputs, target,target_length) + 0.01", "self.unk_token loss = self.train_tree(seq, seq_length, target, target_length, nums_stack, num_size, generate_nums,", "+ float(tv), current_node_stack, current_embeddings_stacks, current_left_childs, current_out)) beams = sorted(current_beams, key=lambda", "transpose into (max_len x batch_size) input_var = input_batch.transpose(0, 1) target", "input_batch.transpose(0, 1) target = target_batch.transpose(0, 1) padding_hidden = torch.FloatTensor([0.0 for", "decoder_mapping = self.sa(current_num, encoder_outputs[idx]) else: temp_encoder_outputs = encoder_outputs.transpose(0, 1) encoder_mapping,", "loss_1 loss.backward() # clip the grad # torch.nn.utils.clip_grad_norm_(encoder.parameters(), 5) #", "num_idx >= num_len: res = [] break res.append(num_list[num_idx]) elif symbol", "node_stacks, target[t].tolist(), embeddings_stacks): if len(node_stack) != 0: node = node_stack.pop()", "'num pos' \"\"\" seq = torch.tensor(batch_data[\"question\"]).to(self.device) seq_length = torch.tensor(batch_data[\"ques len\"]).long()", "encoder seq_emb = self.embedder(input_var) pade_outputs, _ = self.encoder(seq_emb, input_length) problem_output", "[batch_size,output_len] mask = mask.to(self.device) x = torch.sqrt(torch.sum(torch.square((outputs - targets)), dim=-1))", "= [] current_embeddings_stacks = copy_list(b.embedding_stack) current_out = copy.deepcopy(b.out) out_token =", "None # module self.embedder = BasicEmbedder(self.vocab_size, self.embedding_size, self.dropout_ratio) # self.t_encoder", "\"NUM\" in symbol: num_idx = self.mask_list.index(symbol) if num_idx >= num_len:", "all_output, targets def train_tree(self,input_batch, input_length, target_batch, target_length, nums_stack_batch, num_size_batch, generate_nums,", "target, decoder_output, nums_stack_batch, num_start, unk): # when the decoder input", "all_leafs = [] copy_num_len = [len(_) for _ in num_pos]", "self.out_symbol2idx[SpecialTokens.UNK_TOKEN] try: self.out_sos_token = self.out_symbol2idx[SpecialTokens.SOS_TOKEN] except: self.out_sos_token = None try:", "> 0 and current_embeddings_stacks[0][-1].terminal: sub_stree = current_embeddings_stacks[0].pop() op = current_embeddings_stacks[0].pop()", "x P x N embeddings_stacks = [[] for _ in", "x H -> (B x S) x H all_num =", "nums_stack_batch, num_start, unk) target[t] = target_t if self.USE_CUDA: generate_input =", "= torch.LongTensor(indices) masked_index = torch.BoolTensor(masked_index) masked_index = masked_index.view(batch_size, num_size, hidden_size)", "S x N target = target.transpose(0, 1).contiguous() # B x", "# B x S x N target = target.transpose(0, 1).contiguous()", "seq_mask.cuda() padding_hidden = padding_hidden.cuda() num_mask = num_mask.cuda() # Zero gradients", "self.vocab_size = len(dataset.in_idx2word) self.out_symbol2idx = dataset.out_symbol2idx self.out_idx2symbol = dataset.out_idx2symbol generate_list", "Universal Tree-Structured Solver for Math Word Problems\" in EMNLP 2020.", "all_sa_outputs = new_all_sa_outputs # target_length = torch.LongTensor(target_length).cuda() else: pass #", "= dataset.out_symbol2idx self.out_idx2symbol = dataset.out_idx2symbol generate_list = dataset.generate_list self.generate_nums =", "MaskedCrossEntropyLoss() # def calculate_loss(self, batch_data:dict) -> float: \"\"\"Finish forward-propagating, calculating", "current_context) left_childs = [] for idx, l, r, node_stack, i,", "beams = sorted(current_beams, key=lambda x: x.score, reverse=True) beams = beams[:beam_size]", "import random import torch from torch import nn import copy", "batch_data[\"num size\"] num_pos = batch_data[\"num pos\"] generate_nums = self.generate_nums num_start", "batch_size, num_size, self.encoder.hidden_size) embeddings_stacks = [[] for _ in range(batch_size)]", "= [None for _ in range(batch_size)] beams = [TreeBeam(0.0, node_stacks,", "1) padding_hidden = torch.FloatTensor([0.0 for _ in range(self.decoder.hidden_size)]).unsqueeze(0) batch_size =", "generate_input = self.generate_tree_input(target[t].tolist(), outputs, nums_stack_batch, num_start, unk) target[t] = target_t", "num but the num has two pos, chose the max", "SubTreeMerger, TreeNode, TreeEmbedding from module.Layer.tree_layers import Prediction, GenerateNode, Merge, SemanticAlignmentModule", "num_list, num_stack): # batch_size=output.size(0) '''batch_size=1''' seq_len = len(output) num_len =", "num_pos, unk, num_start, english=False,var_nums=[], batch_first=False): # sequence mask for attention", "[[] for _ in range(batch_size)] # B x 1 当前的tree", "num_size, hidden_size) if self.USE_CUDA: indices = indices.cuda() masked_index = masked_index.cuda()", "masked_index += [temp_1 for _ in range(len(num_pos[b]), num_size)] indices =", "except: self.out_pad_token = None # module self.embedder = BasicEmbedder(self.vocab_size, self.embedding_size,", "= b.left_childs num_score, op, current_embeddings, current_context, current_nums_embeddings = self.decoder(b.node_stack, left_childs,", "= self.node_generater(current_embeddings, generate_input, current_context) left_childs = [] for idx, l,", "attention seq_mask = [] max_len = max(input_length) for i in", "\"\"\" Reference: Qin et al. \"Semantically-Aligned Universal Tree-Structured Solver for", "self.embedding_size, self.dropout_ratio) self.sa = SemanticAlignmentModule(self.hidden_size,self.hidden_size,self.hidden_size) self.loss1 = MaskedCrossEntropyLoss() # def", "!= 0: node = node_stack.pop() else: left_childs.append(None) continue # 未知数当数字处理,SEP当操作符处理", "@Author: <NAME> # @Time: 2021/08/21 04:59:55 # @File: sausolver.py import", "= 1 if self.USE_CUDA: input_var = input_var.cuda() seq_mask = seq_mask.cuda()", "encoder_outputs.size(2)) # S x B x H -> (B x", "= self.convert_idx2symbol(target[0], num_list[0], copy_list(nums_stack[0])) return all_output, targets def train_tree(self,input_batch, input_length,", "embeddings_stacks = [[] for _ in range(batch_size)] # B x", "else: left_childs.append(None) # all_leafs = torch.stack(all_leafs, dim=1) # B x", "== SpecialTokens.UNK_TOKEN: try: pos_list = num_stack.pop() c = num_list[pos_list[0]] res.append(c)", "= config[\"hidden_size\"] self.device = config[\"device\"] self.USE_CUDA = True if self.device", "seq_len = len(output) num_len = len(num_list) output_list = [] res", "sub_stree = current_embeddings_stacks[0].pop() op = current_embeddings_stacks[0].pop() current_num = self.merge(op.embedding, sub_stree.embedding,", "data. :return: loss value. batch_data should include keywords 'question', 'ques", "targets def train_tree(self,input_batch, input_length, target_batch, target_length, nums_stack_batch, num_size_batch, generate_nums, num_pos,", "seq = torch.tensor(batch_data[\"question\"]).to(self.device) seq_length = torch.tensor(batch_data[\"ques len\"]).long() target = torch.tensor(batch_data[\"equation\"]).to(self.device)", "= nn.MSELoss() total_semanti_alognment_loss = 0 sa_len = len(all_sa_outputs) for sa_pair", "except: self.out_sos_token = None try: self.out_eos_token = self.out_symbol2idx[SpecialTokens.EOS_TOKEN] except: self.out_eos_token", "current_beams = [] while len(beams) > 0: b = beams.pop()", "self.embedding_size, self.dropout_ratio) # self.t_encoder = BasicRNNEncoder(self.embedding_size, self.hidden_size, self.num_layers, self.rnn_cell_type, self.dropout_ratio)", "NodeGenerater, SubTreeMerger, TreeNode, TreeEmbedding from module.Layer.tree_layers import Prediction, GenerateNode, Merge,", "torch.LongTensor(target_length).cuda() else: pass # target_length = torch.LongTensor(target_length) semantic_alignment_loss = nn.MSELoss()", "target_input[i] >= num_start: target_input[i] = 0 return torch.LongTensor(target), torch.LongTensor(target_input) def", "current_context, current_nums_embeddings = self.decoder(b.node_stack, left_childs, encoder_outputs, all_nums_encoder_outputs, padding_hidden, seq_mask, num_mask)", "[self.out_symbol2idx[symbol] for symbol in generate_list] self.mask_list = NumMask.number self.num_start =", "num_list[0], copy_list(nums_stack[0])) targets = self.convert_idx2symbol(target[0], num_list[0], copy_list(nums_stack[0])) return all_output, targets", "GenerateNode, Merge, SemanticAlignmentModule from module.Strategy.beam_search import TreeBeam from loss.masked_cross_entropy_loss import", "in num_stack: if decoder_output[i, num_start + num] > max_score: target[i]", "decoder input is copied num but the num has two", "all_nums_encoder_outputs, padding_hidden, seq_mask, num_mask) out_score = nn.functional.log_softmax(torch.cat((op, num_score), dim=1), dim=1)", "self.out_sos_token = None try: self.out_eos_token = self.out_symbol2idx[SpecialTokens.EOS_TOKEN] except: self.out_eos_token =", "seq_mask = torch.ByteTensor(seq_mask) num_mask = [] max_num_size = max(num_size_batch) +", "target, target_length, nums_stack, num_size, generate_nums, num_pos, unk, num_start) return loss", ":, self.hidden_size:] encoder_outputs = pade_outputs[:, :, :self.hidden_size] + pade_outputs[:, :,", "current_beams.append(TreeBeam(b.score + float(tv), current_node_stack, current_embeddings_stacks, current_left_childs, current_out)) beams = sorted(current_beams,", "if flag: break return beams[0].out def get_all_number_encoder_outputs(self, encoder_outputs, num_pos, batch_size,", "seq_mask, num_mask) out_score = nn.functional.log_softmax(torch.cat((op, num_score), dim=1), dim=1) # out_score", "NumMask.number self.num_start = dataset.num_start self.operator_nums = dataset.operator_nums self.generate_size = len(generate_list)", "'equ len', 'num stack', 'num size', 'num pos' \"\"\" seq", "# when the decoder input is copied num but the", ">= num_len: res = [] break res.append(num_list[num_idx]) elif symbol ==", "TreeBeam from loss.masked_cross_entropy_loss import MaskedCrossEntropyLoss, masked_cross_entropy from loss.mse_loss import MSELoss", "num_size, hidden_size) return all_num.masked_fill_(masked_index, 0.0) def generate_tree_input(self, target, decoder_output, nums_stack_batch,", "torch.tensor(batch_data[\"question\"]).to(self.device) seq_length = torch.tensor(batch_data[\"ques len\"]).long() target = torch.tensor(batch_data[\"equation\"]).to(self.device) target_length =", "Subtree embedding if batch_first: encoder_mapping, decoder_mapping = self.sa(current_num, encoder_outputs[idx]) else:", "< num_start # loss_0 = masked_cross_entropy_without_logit(all_leafs, op_target.long(), target_length) loss =", "= self.merge(op.embedding, sub_stree.embedding, current_num) current_embeddings_stacks[0].append(TreeEmbedding(current_num, True)) if len(current_embeddings_stacks[0]) > 0", "self.hidden_size) # B x P x N embeddings_stacks = [[]", "in num_pos[b]: indices.append(i + b * sen_len) masked_index.append(temp_0) indices +=", "import torch from torch import nn import copy from module.Encoder.rnn_encoder", "for _ in range(len(num_pos[b]), num_size)] masked_index += [temp_1 for _", "idx = output[s_i] if idx in [self.out_sos_token, self.out_eos_token, self.out_pad_token]: break", "mask for attention seq_mask = [] max_len = max(input_length) for", "model_test(self, batch_data:dict) -> tuple: \"\"\"Model test. :param batch_data: one batch", "attention unk = self.unk_token loss = self.train_tree(seq, seq_length, target, target_length,", "+= [temp_1 for _ in range(len(num_pos[b]), num_size)] indices = torch.LongTensor(indices)", "input_batch, input_length, generate_nums, num_pos, num_start, beam_size=5, max_length=30): seq_mask = torch.BoolTensor(1,", "# loss = loss_0 + loss_1 loss.backward() # clip the", "input_length) seq_emb = self.embedder(input_var) pade_outputs, _ = self.encoder(seq_emb, input_length) problem_output", "= [] while len(beams) > 0: b = beams.pop() if", "import nn import copy from module.Encoder.rnn_encoder import BasicRNNEncoder from module.Embedder.basic_embedder", "target_batch.transpose(0, 1) padding_hidden = torch.FloatTensor([0.0 for _ in range(self.decoder.hidden_size)]).unsqueeze(0) batch_size", "num_stack: if decoder_output[i, num_start + num] > max_score: target[i] =", "= self.evaluate_tree(seq, seq_length, generate_nums, num_pos, num_start, self.beam_size, self.max_out_len) all_output =", "0 and o[-1].terminal: left_childs.append(o[-1].embedding) else: left_childs.append(None) # all_leafs = torch.stack(all_leafs,", "= self.embedder(input_var) pade_outputs, _ = self.encoder(seq_emb, input_length) problem_output = pade_outputs[-1,", "self.merge = Merge(self.hidden_size, self.embedding_size, self.dropout_ratio) self.sa = SemanticAlignmentModule(self.hidden_size,self.hidden_size,self.hidden_size) self.loss1 =", "for _ in range(len(num_pos[b]), num_size)] indices = torch.LongTensor(indices) masked_index =", "indices.cuda() masked_index = masked_index.cuda() all_outputs = encoder_outputs.transpose(0, 1).contiguous() all_embedding =", "dim=1), dim=1) # out_score = p_leaf * out_score topv, topi", "and o[-1].terminal: left_childs.append(o[-1].embedding) else: left_childs.append(None) # all_leafs = torch.stack(all_leafs, dim=1)", "self.out_idx2symbol = dataset.out_idx2symbol generate_list = dataset.generate_list self.generate_nums = [self.out_symbol2idx[symbol] for", "for attention all_node_output = self.evaluate_tree(seq, seq_length, generate_nums, num_pos, num_start, self.beam_size,", "loss_0.item(), loss_1.item() def evaluate_tree(self, input_batch, input_length, generate_nums, num_pos, num_start, beam_size=5,", "if out_token < num_start: generate_input = torch.LongTensor([out_token]) if self.USE_CUDA: generate_input", "input_length: seq_mask.append([0 for _ in range(i)] + [1 for _", "[] copy_num_len = [len(_) for _ in num_pos] num_size =", "for _ in num_pos] num_size = max(copy_num_len) # 提取与问题相关的数字embedding all_nums_encoder_outputs", "targets, mask=None): # outputs : [batch_size,output_len,hidden_size] # targets : [batch_size,output_len,hidden_size]", "# torch.nn.utils.clip_grad_norm_(predict.parameters(), 5) # torch.nn.utils.clip_grad_norm_(generate.parameters(), 5) # Update parameters with", "max_score = -float(\"1e12\") for num in num_stack: if decoder_output[i, num_start", "try: pos_list = num_stack.pop() c = num_list[pos_list[0]] res.append(c) except: return", "if target_input[i] >= num_start: target_input[i] = 0 return torch.LongTensor(target), torch.LongTensor(target_input)", "2021/08/21 04:59:55 # @File: sausolver.py import random import torch from", "utf-8 -*- # @Author: <NAME> # @Time: 2021/08/21 04:59:55 #", "arrays into (batch_size x max_len) tensors, transpose into (max_len x", "embedding if batch_first: encoder_mapping, decoder_mapping = self.sa(current_num, encoder_outputs[idx]) else: temp_encoder_outputs", "for b in range(batch_size): for i in num_pos[b]: indices.append(i +", "t in range(max_target_length): num_score, op, current_embeddings, current_context, current_nums_embeddings = self.decoder(", "dim=1) # out_score = p_leaf * out_score topv, topi =", "# @Time: 2021/08/21 04:59:55 # @File: sausolver.py import random import", "current_embeddings_stacks, current_left_childs, current_out)) beams = sorted(current_beams, key=lambda x: x.score, reverse=True)", "= [0 for _ in range(hidden_size)] for b in range(batch_size):", "generate_nums = self.generate_nums num_start = self.num_start # sequence mask for", "= num + num_start max_score = decoder_output[i, num_start + num]", "english=False,var_nums=[], batch_first=False): # sequence mask for attention seq_mask = []", "all_output = self.convert_idx2symbol(all_node_output, num_list[0], copy_list(nums_stack[0])) targets = self.convert_idx2symbol(target[0], num_list[0], copy_list(nums_stack[0]))", "embeddings_stacks, left_childs, [])] for t in range(max_length): current_beams = []", "i in num_size_batch: d = i + len(generate_nums) + len(var_nums)", "if idx in [self.out_sos_token, self.out_eos_token, self.out_pad_token]: break symbol = self.out_idx2symbol[idx]", "if batch_first: encoder_mapping, decoder_mapping = self.sa(current_num, encoder_outputs[idx]) else: temp_encoder_outputs =", "len(current_embeddings_stacks[0]) > 0 and current_embeddings_stacks[0][-1].terminal: sub_stree = current_embeddings_stacks[0].pop() op =", "return loss.item() # , loss_0.item(), loss_1.item() def evaluate_tree(self, input_batch, input_length,", "num_start) return loss def model_test(self, batch_data:dict) -> tuple: \"\"\"Model test.", "self.device == torch.device('cuda') else False self.beam_size = config['beam_size'] self.max_out_len =", "self.out_symbol2idx = dataset.out_symbol2idx self.out_idx2symbol = dataset.out_idx2symbol generate_list = dataset.generate_list self.generate_nums", "self.rnn_cell_type = config[\"rnn_cell_type\"] self.loss_weight = config['loss_weight'] self.vocab_size = len(dataset.in_idx2word) self.out_symbol2idx", "all_num.masked_fill_(masked_index, 0.0) def generate_tree_input(self, target, decoder_output, nums_stack_batch, num_start, unk): #", "p_leaf * out_score topv, topi = out_score.topk(beam_size) for tv, ti", "= torch.BoolTensor(1, input_length).fill_(0) # Turn padded arrays into (batch_size x", "H -> (B x S) x H all_num = all_embedding.index_select(0,", "padded arrays into (batch_size x max_len) tensors, transpose into (max_len", "= nn.functional.log_softmax(torch.cat((op, num_score), dim=1), dim=1) # out_score = p_leaf *", "range(i)] + [1 for _ in range(i, max_len)]) seq_mask =", "= max(copy_num_len) # 提取与问题相关的数字embedding all_nums_encoder_outputs = self.get_all_number_encoder_outputs(encoder_outputs, num_pos, batch_size, num_size,", "if num_idx >= num_len: res = [] break res.append(num_list[num_idx]) elif", "flag = True for b in beams: if len(b.node_stack[0]) !=", "nn.functional.log_softmax(torch.cat((op, num_score), dim=1), dim=1) # out_score = p_leaf * out_score", "<NAME> # @Time: 2021/08/21 04:59:55 # @File: sausolver.py import random", "-*- encoding: utf-8 -*- # @Author: <NAME> # @Time: 2021/08/21", "= input_batch.transpose(0, 1) target = target_batch.transpose(0, 1) padding_hidden = torch.FloatTensor([0.0", "= [[TreeNode(_)] for _ in problem_output.split(1, dim=0)] # root embedding", "self.num_layers, self.rnn_cell_type, self.dropout_ratio, batch_first=False) #self.decoder = SARTreeDecoder(self.hidden_size, self.operator_nums, self.generate_size, self.dropout_ratio)", "keywords 'question', 'ques len', 'equation', 'num stack', 'num pos', 'num", "EMNLP 2020. \"\"\" def __init__(self, config, dataset): super(SAUSolver, self).__init__() #", "nn.MSELoss() total_semanti_alognment_loss = 0 sa_len = len(all_sa_outputs) for sa_pair in", "and o[-1].terminal: sub_stree = o.pop() op = o.pop() current_num =", "copy_list(nums_stack[0])) return all_output, targets def train_tree(self,input_batch, input_length, target_batch, target_length, nums_stack_batch,", "1 if self.USE_CUDA: input_var = input_var.cuda() seq_mask = seq_mask.cuda() padding_hidden", "x batch_size) input_var = input_batch.transpose(0, 1) num_mask = torch.BoolTensor(1, len(num_pos[0])", "[] while len(beams) > 0: b = beams.pop() if len(b.node_stack[0])", "# torch.nn.utils.clip_grad_norm_(generate.parameters(), 5) # Update parameters with optimizers return loss.item()", "back-propagation. :param batch_data: one batch data. :return: loss value. batch_data", "SARTreeDecoder from module.Layer.tree_layers import NodeGenerater, SubTreeMerger, TreeNode, TreeEmbedding from module.Layer.tree_layers", "in range(i)] + [1 for _ in range(i, max_len)]) seq_mask", "loss.masked_cross_entropy_loss import MaskedCrossEntropyLoss, masked_cross_entropy from loss.mse_loss import MSELoss from utils.utils", "SemanticAlignmentModule(self.hidden_size,self.hidden_size,self.hidden_size) self.loss1 = MaskedCrossEntropyLoss() # def calculate_loss(self, batch_data:dict) -> float:", "= torch.sum(x * mask, dim=-1) / torch.sum(mask, dim=-1) # [batch_size]", "seq_length = torch.tensor(batch_data[\"ques len\"]).long() target = torch.tensor(batch_data[\"equation\"]).to(self.device) target_length = torch.LongTensor(batch_data[\"equ", "\"\"\" seq = torch.tensor(batch_data[\"question\"]).to(self.device) seq_length = torch.tensor(batch_data[\"ques len\"]).long() target =", "loss.backward() # clip the grad # torch.nn.utils.clip_grad_norm_(encoder.parameters(), 5) # torch.nn.utils.clip_grad_norm_(predict.parameters(),", "nums_stack_batch, num_start, unk): # when the decoder input is copied", "# , loss_0.item(), loss_1.item() def evaluate_tree(self, input_batch, input_length, generate_nums, num_pos,", "x = torch.sqrt(torch.sum(torch.square((outputs - targets)), dim=-1)) # [batch_size,output_len] y =", "range(hidden_size)] for b in range(batch_size): for i in num_pos[b]: indices.append(i", "# sequence mask for attention seq_mask = [] max_len =", "batch_size, num_size, hidden_size): indices = list() sen_len = encoder_outputs.size(0) masked_index", "max_length=30): seq_mask = torch.BoolTensor(1, input_length).fill_(0) # Turn padded arrays into", "include keywords 'question', 'ques len', 'equation', 'num stack', 'num pos',", "= NumMask.number self.num_start = dataset.num_start self.operator_nums = dataset.operator_nums self.generate_size =", "sub_stree.embedding, current_num) current_embeddings_stacks[0].append(TreeEmbedding(current_num, True)) if len(current_embeddings_stacks[0]) > 0 and current_embeddings_stacks[0][-1].terminal:", "= out_score.topk(beam_size) for tv, ti in zip(topv.split(1, dim=1), topi.split(1, dim=1)):", "res = [] break res.append(num_list[num_idx]) elif symbol == SpecialTokens.UNK_TOKEN: try:", "batch_data['num list'] generate_nums = self.generate_nums num_start = self.num_start # sequence", "input_length).fill_(0) # Turn padded arrays into (batch_size x max_len) tensors,", "self.embedding_size = config[\"embedding_size\"] self.dropout_ratio = config[\"dropout_ratio\"] self.num_layers = config[\"num_layers\"] self.rnn_cell_type", "+ loss_1 loss.backward() # clip the grad # torch.nn.utils.clip_grad_norm_(encoder.parameters(), 5)", "= self.merge(op.embedding, sub_stree.embedding, current_num) # Subtree embedding if batch_first: encoder_mapping,", "masked_cross_entropy_without_logit(all_leafs, op_target.long(), target_length) loss = masked_cross_entropy(all_node_outputs, target,target_length) + 0.01 *", "for i in input_length: seq_mask.append([0 for _ in range(i)] +", "TreeEmbedding from module.Layer.tree_layers import Prediction, GenerateNode, Merge, SemanticAlignmentModule from module.Strategy.beam_search", "False self.beam_size = config['beam_size'] self.max_out_len = config['max_output_len'] self.embedding_size = config[\"embedding_size\"]", "o.append(TreeEmbedding(current_num, terminal=True)) if len(o) > 0 and o[-1].terminal: left_childs.append(o[-1].embedding) else:", "# Turn padded arrays into (batch_size x max_len) tensors, transpose", "if self.USE_CUDA: # all_leafs = all_leafs.cuda() all_node_outputs = all_node_outputs.cuda() target", "Merge(self.hidden_size, self.embedding_size, self.dropout_ratio) self.sa = SemanticAlignmentModule(self.hidden_size,self.hidden_size,self.hidden_size) self.loss1 = MaskedCrossEntropyLoss() #", "S if self.USE_CUDA: # all_leafs = all_leafs.cuda() all_node_outputs = all_node_outputs.cuda()", "# all_leafs = all_leafs.cuda() all_node_outputs = all_node_outputs.cuda() target = target.cuda()", "embedding / output left_childs = [None for _ in range(batch_size)]", "0 and current_embeddings_stacks[0][-1].terminal: sub_stree = current_embeddings_stacks[0].pop() op = current_embeddings_stacks[0].pop() current_num", "self.out_pad_token = None # module self.embedder = BasicEmbedder(self.vocab_size, self.embedding_size, self.dropout_ratio)", "beams.pop() if len(b.node_stack[0]) == 0: current_beams.append(b) continue # left_childs =", "= copy_list(b.node_stack) current_left_childs = [] current_embeddings_stacks = copy_list(b.embedding_stack) current_out =", "= current_nums_embeddings[idx, i - num_start].unsqueeze(0) while len(o) > 0 and", "# print(total_semanti_alognment_loss) total_semanti_alognment_loss = total_semanti_alognment_loss / sa_len # print(total_semanti_alognment_loss) #", "* out_score topv, topi = out_score.topk(beam_size) for tv, ti in", "= None try: self.out_pad_token = self.out_symbol2idx[SpecialTokens.PAD_TOKEN] except: self.out_pad_token = None", "range(batch_size)] left_childs = [None for _ in range(batch_size)] beams =", "max(input_length) for i in input_length: seq_mask.append([0 for _ in range(i)]", "into (max_len x batch_size) input_var = input_batch.transpose(0, 1) num_mask =", ": [batch_size,output_len,hidden_size] # targets : [batch_size,output_len,hidden_size] # mask : [batch_size,output_len]", "in range(batch_size)] beams = [TreeBeam(0.0, node_stacks, embeddings_stacks, left_childs, [])] for", "num_start + num] if target_input[i] >= num_start: target_input[i] = 0", "num_size_batch, generate_nums, num_pos, unk, num_start, english=False,var_nums=[], batch_first=False): # sequence mask", "_ in num_pos] num_size = max(copy_num_len) # 提取与问题相关的数字embedding all_nums_encoder_outputs =", "else: left_childs.append(None) continue # 未知数当数字处理,SEP当操作符处理 if i < num_start: #", "BasicRNNEncoder from module.Embedder.basic_embedder import BasicEmbedder from module.Decoder.tree_decoder import SARTreeDecoder from", "target equation. batch_data should include keywords 'question', 'ques len', 'equation',", "test. :param batch_data: one batch data. :return: predicted equation, target", "num_mask = [] max_num_size = max(num_size_batch) + len(generate_nums) + len(var_nums)", "from module.Encoder.rnn_encoder import BasicRNNEncoder from module.Embedder.basic_embedder import BasicEmbedder from module.Decoder.tree_decoder", "loss = masked_cross_entropy(all_node_outputs, target,target_length) + 0.01 * total_semanti_alognment_loss # loss", "= True if self.device == torch.device('cuda') else False self.beam_size =", "batch_size, num_size, self.hidden_size) # B x P x N embeddings_stacks", "torch.BoolTensor(masked_index) masked_index = masked_index.view(batch_size, num_size, hidden_size) if self.USE_CUDA: indices =", "1 for t in range(max_target_length): num_score, op, current_embeddings, current_context, current_nums_embeddings", "range(len(num_pos[b]), num_size)] masked_index += [temp_1 for _ in range(len(num_pos[b]), num_size)]", "= config['beam_size'] self.max_out_len = config['max_output_len'] self.embedding_size = config[\"embedding_size\"] self.dropout_ratio =", "self.num_start # sequence mask for attention all_node_output = self.evaluate_tree(seq, seq_length,", "self.hidden_size:] encoder_outputs = pade_outputs[:, :, :self.hidden_size] + pade_outputs[:, :, self.hidden_size:]", "len\"]).to(self.device) nums_stack = copy.deepcopy(batch_data[\"num stack\"]) num_size = batch_data[\"num size\"] num_pos", "terminal=True)) if len(o) > 0 and o[-1].terminal: left_childs.append(o[-1].embedding) else: left_childs.append(None)", "gradients of both optimizers # Run words through encoder #encoder_outputs,", "outputs, nums_stack_batch, num_start, unk) target[t] = target_t if self.USE_CUDA: generate_input", "if decoder_output[i, num_start + num] > max_score: target[i] = num", "seq_length, target, target_length, nums_stack, num_size, generate_nums, num_pos, unk, num_start) return", "state/ subtree embedding / output left_childs = [None for _", "symbol: num_idx = self.mask_list.index(symbol) if num_idx >= num_len: res =", "if len(node_stack) != 0: node = node_stack.pop() else: left_childs.append(None) continue", "ti in zip(topv.split(1, dim=1), topi.split(1, dim=1)): current_node_stack = copy_list(b.node_stack) current_left_childs", "[] all_sa_outputs = [] # all_leafs = [] copy_num_len =", "copy.deepcopy(batch_data[\"num stack\"]) num_size = batch_data[\"num size\"] num_pos = batch_data[\"num pos\"]", "# B x 1 当前的tree state/ subtree embedding / output", "generate_input, current_context) current_node_stack[0].append(TreeNode(right_child)) current_node_stack[0].append(TreeNode(left_child, left_flag=True)) current_embeddings_stacks[0].append(TreeEmbedding(node_label[0].unsqueeze(0), False)) else: current_num =", "reverse=True) beams = beams[:beam_size] flag = True for b in", "len(b.node_stack[0]) == 0: current_beams.append(b) continue # left_childs = torch.stack(b.left_childs) left_childs", "except: self.out_eos_token = None try: self.out_pad_token = self.out_symbol2idx[SpecialTokens.PAD_TOKEN] except: self.out_pad_token", "> 0: b = beams.pop() if len(b.node_stack[0]) == 0: current_beams.append(b)", "batch_size=output.size(0) '''batch_size=1''' seq_len = len(output) num_len = len(num_list) output_list =", "return torch.LongTensor(target), torch.LongTensor(target_input) def mse_loss(self, outputs, targets, mask=None): # outputs", "copy.deepcopy(batch_data[\"num stack\"]) num_pos = batch_data[\"num pos\"] num_list = batch_data['num list']", "if i < num_start: # 非数字 node_stack.append(TreeNode(r)) node_stack.append(TreeNode(l, left_flag=True)) o.append(TreeEmbedding(node_label[idx].unsqueeze(0),", "== unk: num_stack = nums_stack_batch[i].pop() max_score = -float(\"1e12\") for num", "# print(encoder_outputs[idx].size()) else: # 数字 current_num = current_nums_embeddings[idx, i -", "copy_num_len = [len(_) for _ in num_pos] num_size = max(copy_num_len)", "max_num_size = max(num_size_batch) + len(generate_nums) + len(var_nums) # 最大的位置列表数目+常识数字数目+未知数列表 for", "self.generate_size = len(generate_list) self.unk_token = self.out_symbol2idx[SpecialTokens.UNK_TOKEN] try: self.out_sos_token = self.out_symbol2idx[SpecialTokens.SOS_TOKEN]", "indices = list() sen_len = encoder_outputs.size(0) masked_index = [] temp_1", "current_nums_embeddings[idx, i - num_start].unsqueeze(0) while len(o) > 0 and o[-1].terminal:", "current_nums_embeddings = self.decoder(b.node_stack, left_childs, encoder_outputs, all_nums_encoder_outputs, padding_hidden, seq_mask, num_mask) out_score", "= num_mask.cuda() # Run words through encoder seq_emb = self.embedder(input_var)", "num] if target_input[i] >= num_start: target_input[i] = 0 return torch.LongTensor(target),", "self.sa = SemanticAlignmentModule(self.hidden_size,self.hidden_size,self.hidden_size) self.loss1 = MaskedCrossEntropyLoss() # def calculate_loss(self, batch_data:dict)", "num_list = batch_data['num list'] generate_nums = self.generate_nums num_start = self.num_start", "for _ in range(batch_size)] # B x 1 当前的tree state/", "= self.out_symbol2idx[SpecialTokens.UNK_TOKEN] try: self.out_sos_token = self.out_symbol2idx[SpecialTokens.SOS_TOKEN] except: self.out_sos_token = None", "print(encoder_outputs[idx].size()) else: # 数字 current_num = current_nums_embeddings[idx, i - num_start].unsqueeze(0)", "encoder_outputs.transpose(0, 1).contiguous() all_embedding = all_outputs.view(-1, encoder_outputs.size(2)) # S x B", "1) all_node_outputs.append(outputs) target_t, generate_input = self.generate_tree_input(target[t].tolist(), outputs, nums_stack_batch, num_start, unk)", "= [] max_len = max(input_length) for i in input_length: seq_mask.append([0", "B x S if self.USE_CUDA: # all_leafs = all_leafs.cuda() all_node_outputs", "dataset): super(SAUSolver, self).__init__() # parameter self.hidden_size = config[\"hidden_size\"] self.device =", "if len(o) > 0 and o[-1].terminal: left_childs.append(o[-1].embedding) else: left_childs.append(None) #", "[0 for _ in range(len(num_pos[b]), num_size)] masked_index += [temp_1 for", "self.operator_nums, self.embedding_size, self.dropout_ratio) self.merge = Merge(self.hidden_size, self.embedding_size, self.dropout_ratio) self.sa =", "= self.train_tree(seq, seq_length, target, target_length, nums_stack, num_size, generate_nums, num_pos, unk,", "= torch.tensor(batch_data[\"question\"]).to(self.device) seq_length = torch.tensor(batch_data[\"ques len\"]).long() target = torch.tensor(batch_data[\"equation\"]).to(self.device) nums_stack", "beams[0].out def get_all_number_encoder_outputs(self, encoder_outputs, num_pos, batch_size, num_size, hidden_size): indices =", "+ len(var_nums) # 最大的位置列表数目+常识数字数目+未知数列表 for i in num_size_batch: d =", "num_size, self.hidden_size) # B x P x N embeddings_stacks =", "batch_data: one batch data. :return: predicted equation, target equation. batch_data", "left_childs = [None for _ in range(batch_size)] # B x", "x 2 all_node_outputs = torch.stack(all_node_outputs, dim=1) # B x S", "1).contiguous() # B x S if self.USE_CUDA: # all_leafs =", "stack\"]) num_pos = batch_data[\"num pos\"] num_list = batch_data['num list'] generate_nums", "num_pos, num_start, beam_size=5, max_length=30): seq_mask = torch.BoolTensor(1, input_length).fill_(0) # Turn", "# sequence mask for attention unk = self.unk_token loss =", "max_len) tensors, transpose into (max_len x batch_size) input_var = input_batch.transpose(0,", "target_batch, target_length, nums_stack_batch, num_size_batch, generate_nums, num_pos, unk, num_start, english=False,var_nums=[], batch_first=False):", "= torch.FloatTensor([0.0 for _ in range(self.decoder.hidden_size)]).unsqueeze(0) batch_size = len(input_length) if", "+= semantic_alignment_loss(sa_pair[0], sa_pair[1]) # print(total_semanti_alognment_loss) total_semanti_alognment_loss = total_semanti_alognment_loss / sa_len", "grad # torch.nn.utils.clip_grad_norm_(encoder.parameters(), 5) # torch.nn.utils.clip_grad_norm_(predict.parameters(), 5) # torch.nn.utils.clip_grad_norm_(generate.parameters(), 5)", "len(beams) > 0: b = beams.pop() if len(b.node_stack[0]) == 0:", "stack', 'num size', 'num pos' \"\"\" seq = torch.tensor(batch_data[\"question\"]).to(self.device) seq_length", "generate_nums, num_pos, num_start, self.beam_size, self.max_out_len) all_output = self.convert_idx2symbol(all_node_output, num_list[0], copy_list(nums_stack[0]))", "total_semanti_alognment_loss += semantic_alignment_loss(sa_pair[0], sa_pair[1]) # print(total_semanti_alognment_loss) total_semanti_alognment_loss = total_semanti_alognment_loss /", "# target_length = torch.LongTensor(target_length) semantic_alignment_loss = nn.MSELoss() total_semanti_alognment_loss = 0", "target_length = torch.LongTensor(target_length) semantic_alignment_loss = nn.MSELoss() total_semanti_alognment_loss = 0 sa_len", "self.embedding_size, self.dropout_ratio) self.merge = Merge(self.hidden_size, self.embedding_size, self.dropout_ratio) self.sa = SemanticAlignmentModule(self.hidden_size,self.hidden_size,self.hidden_size)", "torch.BoolTensor(1, input_length).fill_(0) # Turn padded arrays into (batch_size x max_len)", "= pade_outputs[-1, :, :self.hidden_size] + pade_outputs[0, :, self.hidden_size:] encoder_outputs =", "current_out)) beams = sorted(current_beams, key=lambda x: x.score, reverse=True) beams =", "MaskedCrossEntropyLoss, masked_cross_entropy from loss.mse_loss import MSELoss from utils.utils import copy_list", "else: temp_encoder_outputs = encoder_outputs.transpose(0, 1) encoder_mapping, decoder_mapping = self.sa(current_num,temp_encoder_outputs[idx]) all_sa_outputs.append((encoder_mapping,", "= target < num_start # loss_0 = masked_cross_entropy_without_logit(all_leafs, op_target.long(), target_length)", "[None for _ in range(batch_size)] beams = [TreeBeam(0.0, node_stacks, embeddings_stacks,", "# batch_size=output.size(0) '''batch_size=1''' seq_len = len(output) num_len = len(num_list) output_list", "torch.LongTensor(target_input) def mse_loss(self, outputs, targets, mask=None): # outputs : [batch_size,output_len,hidden_size]", "num_stack.pop() c = num_list[pos_list[0]] res.append(c) except: return None else: res.append(symbol)", "self.device = config[\"device\"] self.USE_CUDA = True if self.device == torch.device('cuda')", "= [] all_sa_outputs = [] # all_leafs = [] copy_num_len", "print(total_semanti_alognment_loss) # op_target = target < num_start # loss_0 =", "_ in problem_output.split(1, dim=0)] # root embedding B x 1", "= decoder_output[i, num_start + num] if target_input[i] >= num_start: target_input[i]", "= all_num.view(batch_size, num_size, hidden_size) return all_num.masked_fill_(masked_index, 0.0) def generate_tree_input(self, target,", "# Code Taken from https://github.com/LYH-YF/MWPToolkit # -*- encoding: utf-8 -*-", "random import torch from torch import nn import copy from", "node_stack.pop() else: left_childs.append(None) continue # 未知数当数字处理,SEP当操作符处理 if i < num_start:", "[batch_size,output_len] y = torch.sum(x * mask, dim=-1) / torch.sum(mask, dim=-1)", "left_childs, encoder_outputs, all_nums_encoder_outputs, padding_hidden, seq_mask, num_mask) # all_leafs.append(p_leaf) outputs =", "padding_hidden, seq_mask, num_mask) # all_leafs.append(p_leaf) outputs = torch.cat((op, num_score), 1)", "# B x S x 2 all_node_outputs = torch.stack(all_node_outputs, dim=1)", "TreeNode, TreeEmbedding from module.Layer.tree_layers import Prediction, GenerateNode, Merge, SemanticAlignmentModule from", "loss value. batch_data should include keywords 'question', 'ques len', 'equation',", "subtree embedding / output left_childs = [None for _ in", "= copy.deepcopy(batch_data[\"num stack\"]) num_pos = batch_data[\"num pos\"] num_list = batch_data['num", "pade_outputs[:, :, :self.hidden_size] + pade_outputs[:, :, self.hidden_size:] # Prepare input", "x 1 max_target_length = max(target_length) all_node_outputs = [] all_sa_outputs =", "current_embeddings_stacks = copy_list(b.embedding_stack) current_out = copy.deepcopy(b.out) out_token = int(ti) current_out.append(out_token)", "> 0 and o[-1].terminal: sub_stree = o.pop() op = o.pop()", "GenerateNode(self.hidden_size, self.operator_nums, self.embedding_size, self.dropout_ratio) self.merge = Merge(self.hidden_size, self.embedding_size, self.dropout_ratio) self.sa", "sequence mask for attention all_node_output = self.evaluate_tree(seq, seq_length, generate_nums, num_pos,", "targets = self.convert_idx2symbol(target[0], num_list[0], copy_list(nums_stack[0])) return all_output, targets def train_tree(self,input_batch,", "while len(current_embeddings_stacks[0]) > 0 and current_embeddings_stacks[0][-1].terminal: sub_stree = current_embeddings_stacks[0].pop() op", "num_start = self.num_start # sequence mask for attention all_node_output =", "num_size, hidden_size): indices = list() sen_len = encoder_outputs.size(0) masked_index =", "+ len(generate_nums) + len(var_nums) # 最大的位置列表数目+常识数字数目+未知数列表 for i in num_size_batch:", "topi = out_score.topk(beam_size) for tv, ti in zip(topv.split(1, dim=1), topi.split(1,", "num has two pos, chose the max target_input = copy.deepcopy(target)", "all_nums_encoder_outputs, padding_hidden, seq_mask, num_mask) # all_leafs.append(p_leaf) outputs = torch.cat((op, num_score),", "int(ti) current_out.append(out_token) node = current_node_stack[0].pop() if out_token < num_start: generate_input", "max_len)]) seq_mask = torch.ByteTensor(seq_mask) num_mask = [] max_num_size = max(num_size_batch)", "= GenerateNode(self.hidden_size, self.operator_nums, self.embedding_size, self.dropout_ratio) self.merge = Merge(self.hidden_size, self.embedding_size, self.dropout_ratio)", "self.rnn_cell_type, self.dropout_ratio) self.encoder = BasicRNNEncoder(self.embedding_size, self.hidden_size, self.num_layers, self.rnn_cell_type, self.dropout_ratio, batch_first=False)", "B x H -> (B x S) x H all_num", "else False self.beam_size = config['beam_size'] self.max_out_len = config['max_output_len'] self.embedding_size =", "o[-1].terminal: sub_stree = o.pop() op = o.pop() current_num = self.merge(op.embedding,", "outputs = torch.cat((op, num_score), 1) all_node_outputs.append(outputs) target_t, generate_input = self.generate_tree_input(target[t].tolist(),", ":, :self.hidden_size] + pade_outputs[:, :, self.hidden_size:] # Prepare input and", "= torch.tensor(batch_data[\"question\"]).to(self.device) seq_length = torch.tensor(batch_data[\"ques len\"]).long() target = torch.tensor(batch_data[\"equation\"]).to(self.device) target_length", "target_input = copy.deepcopy(target) for i in range(len(target)): if target[i] ==", "if len(current_embeddings_stacks[0]) > 0 and current_embeddings_stacks[0][-1].terminal: current_left_childs.append(current_embeddings_stacks[0][-1].embedding) else: current_left_childs.append(None) current_beams.append(TreeBeam(b.score", "'equation', 'equ len', 'num stack', 'num size', 'num pos' \"\"\"", "batch_data[\"num pos\"] num_list = batch_data['num list'] generate_nums = self.generate_nums num_start", "num_len = len(num_list) output_list = [] res = [] for", "= 0 sa_len = len(all_sa_outputs) for sa_pair in all_sa_outputs: total_semanti_alognment_loss", "B x 1 for t in range(max_target_length): num_score, op, current_embeddings,", "range(max_target_length): num_score, op, current_embeddings, current_context, current_nums_embeddings = self.decoder( node_stacks, left_childs,", "= None try: self.out_eos_token = self.out_symbol2idx[SpecialTokens.EOS_TOKEN] except: self.out_eos_token = None", "o in zip(range(batch_size), left_child.split(1), right_child.split(1), node_stacks, target[t].tolist(), embeddings_stacks): if len(node_stack)", "all_num = all_num.view(batch_size, num_size, hidden_size) return all_num.masked_fill_(masked_index, 0.0) def generate_tree_input(self,", "def convert_idx2symbol(self, output, num_list, num_stack): # batch_size=output.size(0) '''batch_size=1''' seq_len =", "= [] temp_1 = [1 for _ in range(hidden_size)] temp_0", "import copy from module.Encoder.rnn_encoder import BasicRNNEncoder from module.Embedder.basic_embedder import BasicEmbedder", "[1 for _ in range(i, max_len)]) seq_mask = torch.ByteTensor(seq_mask) num_mask", "self.merge(op.embedding, sub_stree.embedding, current_num) current_embeddings_stacks[0].append(TreeEmbedding(current_num, True)) if len(current_embeddings_stacks[0]) > 0 and", "range(hidden_size)] temp_0 = [0 for _ in range(hidden_size)] for b", "output left_childs = [None for _ in range(batch_size)] # B", "x B x H -> (B x S) x H", "self.generate_nums = [self.out_symbol2idx[symbol] for symbol in generate_list] self.mask_list = NumMask.number", ">= num_start: target_input[i] = 0 return torch.LongTensor(target), torch.LongTensor(target_input) def mse_loss(self,", "= self.decoder(b.node_stack, left_childs, encoder_outputs, all_nums_encoder_outputs, padding_hidden, seq_mask, num_mask) out_score =", "= torch.LongTensor(target_length).cuda() else: pass # target_length = torch.LongTensor(target_length) semantic_alignment_loss =", "torch.sum(x * mask, dim=-1) / torch.sum(mask, dim=-1) # [batch_size] return", "= copy_list(b.embedding_stack) current_out = copy.deepcopy(b.out) out_token = int(ti) current_out.append(out_token) node", "out_token < num_start: generate_input = torch.LongTensor([out_token]) if self.USE_CUDA: generate_input =", "self.sa(current_num,temp_encoder_outputs[idx]) all_sa_outputs.append((encoder_mapping, decoder_mapping)) o.append(TreeEmbedding(current_num, terminal=True)) if len(o) > 0 and", "sa_pair in all_sa_outputs: total_semanti_alognment_loss += semantic_alignment_loss(sa_pair[0], sa_pair[1]) # print(total_semanti_alognment_loss) total_semanti_alognment_loss", "x N target = target.transpose(0, 1).contiguous() # B x S", "_ in range(hidden_size)] temp_0 = [0 for _ in range(hidden_size)]", "words through encoder #encoder_outputs, problem_output = self.encoder(input_var, input_length) seq_emb =", "self.num_start # sequence mask for attention unk = self.unk_token loss", "'question', 'ques len', 'equation', 'equ len', 'num stack', 'num size',", "= batch_data['num list'] generate_nums = self.generate_nums num_start = self.num_start #", "[batch_size,output_len,hidden_size] # targets : [batch_size,output_len,hidden_size] # mask : [batch_size,output_len] mask", "clip the grad # torch.nn.utils.clip_grad_norm_(encoder.parameters(), 5) # torch.nn.utils.clip_grad_norm_(predict.parameters(), 5) #", "self.out_pad_token]: break symbol = self.out_idx2symbol[idx] if \"NUM\" in symbol: num_idx", "equation. batch_data should include keywords 'question', 'ques len', 'equation', 'num", "left_child, right_child, node_label = self.node_generater(current_embeddings, generate_input, current_context) left_childs = []", ":self.hidden_size] + pade_outputs[0, :, self.hidden_size:] encoder_outputs = pade_outputs[:, :, :self.hidden_size]", "self.sa(current_num, encoder_outputs[idx]) else: temp_encoder_outputs = encoder_outputs.transpose(0, 1) encoder_mapping, decoder_mapping =", "b = beams.pop() if len(b.node_stack[0]) == 0: current_beams.append(b) continue #", "from module.Layer.tree_layers import NodeGenerater, SubTreeMerger, TreeNode, TreeEmbedding from module.Layer.tree_layers import", "SAUSolver(nn.Module): \"\"\" Reference: Qin et al. \"Semantically-Aligned Universal Tree-Structured Solver", "self.dropout_ratio, batch_first=False) #self.decoder = SARTreeDecoder(self.hidden_size, self.operator_nums, self.generate_size, self.dropout_ratio) self.decoder =", "with optimizers return loss.item() # , loss_0.item(), loss_1.item() def evaluate_tree(self,", "max target_input = copy.deepcopy(target) for i in range(len(target)): if target[i]", "- num_start].unsqueeze(0) while len(o) > 0 and o[-1].terminal: sub_stree =", "= copy.deepcopy(batch_data[\"num stack\"]) num_size = batch_data[\"num size\"] num_pos = batch_data[\"num", "if len(b.node_stack[0]) != 0: flag = False if flag: break", "len', 'equation', 'num stack', 'num pos', 'num list' \"\"\" seq", "#unk = output_lang.word2index[\"UNK\"] # Turn padded arrays into (batch_size x", "et al. \"Semantically-Aligned Universal Tree-Structured Solver for Math Word Problems\"", "from torch import nn import copy from module.Encoder.rnn_encoder import BasicRNNEncoder", "import MaskedCrossEntropyLoss, masked_cross_entropy from loss.mse_loss import MSELoss from utils.utils import", "from module.Embedder.basic_embedder import BasicEmbedder from module.Decoder.tree_decoder import SARTreeDecoder from module.Layer.tree_layers", "dataset.operator_nums self.generate_size = len(generate_list) self.unk_token = self.out_symbol2idx[SpecialTokens.UNK_TOKEN] try: self.out_sos_token =", "非数字 node_stack.append(TreeNode(r)) node_stack.append(TreeNode(l, left_flag=True)) o.append(TreeEmbedding(node_label[idx].unsqueeze(0), terminal=False)) # print(o[-1].embedding.size()) # print(encoder_outputs[idx].size())", "[1] * (max_num_size - d)) num_mask = torch.ByteTensor(num_mask) # 用于屏蔽无关数字,防止生成错误的Nx", "is copied num but the num has two pos, chose", "_ in range(batch_size)] # B x 1 当前的tree state/ subtree", "batch data. :return: loss value. batch_data should include keywords 'question',", "- num_start].unsqueeze(0) while len(current_embeddings_stacks[0]) > 0 and current_embeddings_stacks[0][-1].terminal: sub_stree =", "copy from module.Encoder.rnn_encoder import BasicRNNEncoder from module.Embedder.basic_embedder import BasicEmbedder from", "torch.tensor(batch_data[\"ques len\"]).long() target = torch.tensor(batch_data[\"equation\"]).to(self.device) nums_stack = copy.deepcopy(batch_data[\"num stack\"]) num_pos", "self.embedder(input_var) pade_outputs, _ = self.encoder(seq_emb, input_length) problem_output = pade_outputs[-1, :,", "= self.generate_nums num_start = self.num_start # sequence mask for attention", "batch_first=False): # sequence mask for attention seq_mask = [] max_len", "= dataset.generate_list self.generate_nums = [self.out_symbol2idx[symbol] for symbol in generate_list] self.mask_list", "loss def model_test(self, batch_data:dict) -> tuple: \"\"\"Model test. :param batch_data:", "0.01 * total_semanti_alognment_loss # loss = loss_0 + loss_1 loss.backward()", "beams[:beam_size] flag = True for b in beams: if len(b.node_stack[0])", "= len(input_length) if self.USE_CUDA: input_var = input_var.cuda() seq_mask = seq_mask.cuda()", "input_length, target_batch, target_length, nums_stack_batch, num_size_batch, generate_nums, num_pos, unk, num_start, english=False,var_nums=[],", "数字 current_num = current_nums_embeddings[idx, i - num_start].unsqueeze(0) while len(o) >", "= [len(_) for _ in num_pos] num_size = max(copy_num_len) #", "seq_mask = seq_mask.cuda() padding_hidden = padding_hidden.cuda() num_mask = num_mask.cuda() #", "current_node_stack, current_embeddings_stacks, current_left_childs, current_out)) beams = sorted(current_beams, key=lambda x: x.score,", "len(current_embeddings_stacks[0]) > 0 and current_embeddings_stacks[0][-1].terminal: current_left_childs.append(current_embeddings_stacks[0][-1].embedding) else: current_left_childs.append(None) current_beams.append(TreeBeam(b.score +", "padding_hidden = torch.FloatTensor([0.0 for _ in range(self.decoder.hidden_size)]).unsqueeze(0) batch_size = len(input_length)", "# parameter self.hidden_size = config[\"hidden_size\"] self.device = config[\"device\"] self.USE_CUDA =", "self.out_eos_token = None try: self.out_pad_token = self.out_symbol2idx[SpecialTokens.PAD_TOKEN] except: self.out_pad_token =", "for Math Word Problems\" in EMNLP 2020. \"\"\" def __init__(self,", "+ len(generate_nums) + len(var_nums) num_mask.append([0] * d + [1] *", "self.get_all_number_encoder_outputs(encoder_outputs, num_pos, batch_size, num_size, self.encoder.hidden_size) embeddings_stacks = [[] for _", "self.loss1 = MaskedCrossEntropyLoss() # def calculate_loss(self, batch_data:dict) -> float: \"\"\"Finish", "node_stacks = [[TreeNode(_)] for _ in problem_output.split(1, dim=0)] num_size =", "while len(beams) > 0: b = beams.pop() if len(b.node_stack[0]) ==", "# Prepare input and output variables node_stacks = [[TreeNode(_)] for", "masked_index = masked_index.view(batch_size, num_size, hidden_size) if self.USE_CUDA: indices = indices.cuda()", "convert_idx2symbol(self, output, num_list, num_stack): # batch_size=output.size(0) '''batch_size=1''' seq_len = len(output)", "loss_0 + loss_1 loss.backward() # clip the grad # torch.nn.utils.clip_grad_norm_(encoder.parameters(),", "B x S x N target = target.transpose(0, 1).contiguous() #", "encoder #encoder_outputs, problem_output = self.encoder(input_var, input_length) seq_emb = self.embedder(input_var) pade_outputs,", "torch.stack(b.left_childs) left_childs = b.left_childs num_score, op, current_embeddings, current_context, current_nums_embeddings =", "= len(generate_list) self.unk_token = self.out_symbol2idx[SpecialTokens.UNK_TOKEN] try: self.out_sos_token = self.out_symbol2idx[SpecialTokens.SOS_TOKEN] except:", "= [] for sa_pair in all_sa_outputs: new_all_sa_outputs.append((sa_pair[0].cuda(), sa_pair[1].cuda())) all_sa_outputs =", ":return: loss value. batch_data should include keywords 'question', 'ques len',", "num_pos, unk, num_start) return loss def model_test(self, batch_data:dict) -> tuple:", "= MaskedCrossEntropyLoss() # def calculate_loss(self, batch_data:dict) -> float: \"\"\"Finish forward-propagating,", "torch.stack(all_node_outputs, dim=1) # B x S x N target =", "= num_list[pos_list[0]] res.append(c) except: return None else: res.append(symbol) output_list.append(res) return", "self.unk_token = self.out_symbol2idx[SpecialTokens.UNK_TOKEN] try: self.out_sos_token = self.out_symbol2idx[SpecialTokens.SOS_TOKEN] except: self.out_sos_token =", "= SARTreeDecoder(self.hidden_size, self.operator_nums, self.generate_size, self.dropout_ratio) self.decoder = Prediction(self.hidden_size,self.operator_nums,self.generate_size,self.dropout_ratio) self.node_generater =", "self.USE_CUDA: indices = indices.cuda() masked_index = masked_index.cuda() all_outputs = encoder_outputs.transpose(0,", "copy_list(nums_stack[0])) targets = self.convert_idx2symbol(target[0], num_list[0], copy_list(nums_stack[0])) return all_output, targets def", "torch.nn.utils.clip_grad_norm_(generate.parameters(), 5) # Update parameters with optimizers return loss.item() #", "input_var = input_batch.transpose(0, 1) target = target_batch.transpose(0, 1) padding_hidden =", "list'] generate_nums = self.generate_nums num_start = self.num_start # sequence mask", "terminal=False)) # print(o[-1].embedding.size()) # print(encoder_outputs[idx].size()) else: # 数字 current_num =", "nums_stack_batch[i].pop() max_score = -float(\"1e12\") for num in num_stack: if decoder_output[i,", "self.out_sos_token = self.out_symbol2idx[SpecialTokens.SOS_TOKEN] except: self.out_sos_token = None try: self.out_eos_token =", "encoder_outputs = pade_outputs[:, :, :self.hidden_size] + pade_outputs[:, :, self.hidden_size:] #", "from module.Decoder.tree_decoder import SARTreeDecoder from module.Layer.tree_layers import NodeGenerater, SubTreeMerger, TreeNode,", "through encoder #encoder_outputs, problem_output = self.encoder(input_var, input_length) seq_emb = self.embedder(input_var)", "res.append(num_list[num_idx]) elif symbol == SpecialTokens.UNK_TOKEN: try: pos_list = num_stack.pop() c", "len(generate_nums)).fill_(0) padding_hidden = torch.FloatTensor([0.0 for _ in range(self.hidden_size)]).unsqueeze(0) batch_size =", "\"Semantically-Aligned Universal Tree-Structured Solver for Math Word Problems\" in EMNLP", "decoder_mapping)) o.append(TreeEmbedding(current_num, terminal=True)) if len(o) > 0 and o[-1].terminal: left_childs.append(o[-1].embedding)", "B x 1 max_target_length = max(target_length) all_node_outputs = [] all_sa_outputs", ": [batch_size,output_len,hidden_size] # mask : [batch_size,output_len] mask = mask.to(self.device) x", "continue # left_childs = torch.stack(b.left_childs) left_childs = b.left_childs num_score, op,", "self.dropout_ratio) self.encoder = BasicRNNEncoder(self.embedding_size, self.hidden_size, self.num_layers, self.rnn_cell_type, self.dropout_ratio, batch_first=False) #self.decoder", "chose the max target_input = copy.deepcopy(target) for i in range(len(target)):", "dim=-1)) # [batch_size,output_len] y = torch.sum(x * mask, dim=-1) /", "len(output) num_len = len(num_list) output_list = [] res = []", "num_stack): # batch_size=output.size(0) '''batch_size=1''' seq_len = len(output) num_len = len(num_list)", "[TreeBeam(0.0, node_stacks, embeddings_stacks, left_childs, [])] for t in range(max_length): current_beams", "torch.BoolTensor(1, len(num_pos[0]) + len(generate_nums)).fill_(0) padding_hidden = torch.FloatTensor([0.0 for _ in", "= [] for idx, l, r, node_stack, i, o in", "= torch.LongTensor(batch_data[\"equ len\"]).to(self.device) nums_stack = copy.deepcopy(batch_data[\"num stack\"]) num_size = batch_data[\"num", "for _ in range(self.hidden_size)]).unsqueeze(0) batch_size = 1 if self.USE_CUDA: input_var", "[None for _ in range(batch_size)] # B x 1 for", "target_length = torch.LongTensor(target_length).cuda() else: pass # target_length = torch.LongTensor(target_length) semantic_alignment_loss", "= self.convert_idx2symbol(all_node_output, num_list[0], copy_list(nums_stack[0])) targets = self.convert_idx2symbol(target[0], num_list[0], copy_list(nums_stack[0])) return", "num_start: generate_input = torch.LongTensor([out_token]) if self.USE_CUDA: generate_input = generate_input.cuda() left_child,", "in zip(range(batch_size), left_child.split(1), right_child.split(1), node_stacks, target[t].tolist(), embeddings_stacks): if len(node_stack) !=", "_ in range(self.decoder.hidden_size)]).unsqueeze(0) batch_size = len(input_length) if self.USE_CUDA: input_var =", "len(num_pos[0]) + len(generate_nums)).fill_(0) padding_hidden = torch.FloatTensor([0.0 for _ in range(self.hidden_size)]).unsqueeze(0)", "all_outputs.view(-1, encoder_outputs.size(2)) # S x B x H -> (B", "* sen_len) masked_index.append(temp_0) indices += [0 for _ in range(len(num_pos[b]),", "= [] res = [] for s_i in range(seq_len): idx", "5) # torch.nn.utils.clip_grad_norm_(predict.parameters(), 5) # torch.nn.utils.clip_grad_norm_(generate.parameters(), 5) # Update parameters", "if self.USE_CUDA: generate_input = generate_input.cuda() left_child, right_child, node_label = self.node_generater(current_embeddings,", "tensors, transpose into (max_len x batch_size) input_var = input_batch.transpose(0, 1)", "i in num_pos[b]: indices.append(i + b * sen_len) masked_index.append(temp_0) indices", "[] break res.append(num_list[num_idx]) elif symbol == SpecialTokens.UNK_TOKEN: try: pos_list =", "/ torch.sum(mask, dim=-1) # [batch_size] return torch.sum(y) def convert_idx2symbol(self, output,", "torch.tensor(batch_data[\"equation\"]).to(self.device) target_length = torch.LongTensor(batch_data[\"equ len\"]).to(self.device) nums_stack = copy.deepcopy(batch_data[\"num stack\"]) num_size", "= input_batch.transpose(0, 1) num_mask = torch.BoolTensor(1, len(num_pos[0]) + len(generate_nums)).fill_(0) padding_hidden", "BasicEmbedder from module.Decoder.tree_decoder import SARTreeDecoder from module.Layer.tree_layers import NodeGenerater, SubTreeMerger,", "pade_outputs[-1, :, :self.hidden_size] + pade_outputs[0, :, self.hidden_size:] encoder_outputs = pade_outputs[:,", "from module.Strategy.beam_search import TreeBeam from loss.masked_cross_entropy_loss import MaskedCrossEntropyLoss, masked_cross_entropy from", "b in beams: if len(b.node_stack[0]) != 0: flag = False", "= config[\"rnn_cell_type\"] self.loss_weight = config['loss_weight'] self.vocab_size = len(dataset.in_idx2word) self.out_symbol2idx =", "current_num = current_nums_embeddings[0, out_token - num_start].unsqueeze(0) while len(current_embeddings_stacks[0]) > 0", "dim=0)] num_size = len(num_pos[0]) all_nums_encoder_outputs = self.get_all_number_encoder_outputs(encoder_outputs, num_pos, batch_size, num_size,", "N target = target.transpose(0, 1).contiguous() # B x S if", "batch_size) input_var = input_batch.transpose(0, 1) target = target_batch.transpose(0, 1) padding_hidden", "target = torch.tensor(batch_data[\"equation\"]).to(self.device) nums_stack = copy.deepcopy(batch_data[\"num stack\"]) num_pos = batch_data[\"num", "_ in range(self.hidden_size)]).unsqueeze(0) batch_size = 1 if self.USE_CUDA: input_var =", "num_start = self.num_start # sequence mask for attention unk =", "1) target = target_batch.transpose(0, 1) padding_hidden = torch.FloatTensor([0.0 for _", "self.out_pad_token = self.out_symbol2idx[SpecialTokens.PAD_TOKEN] except: self.out_pad_token = None # module self.embedder", "= loss_0 + loss_1 loss.backward() # clip the grad #", "the decoder input is copied num but the num has", "_ in range(batch_size)] left_childs = [None for _ in range(batch_size)]", "True)) if len(current_embeddings_stacks[0]) > 0 and current_embeddings_stacks[0][-1].terminal: current_left_childs.append(current_embeddings_stacks[0][-1].embedding) else: current_left_childs.append(None)", "= config[\"embedding_size\"] self.dropout_ratio = config[\"dropout_ratio\"] self.num_layers = config[\"num_layers\"] self.rnn_cell_type =", "'num pos', 'num list' \"\"\" seq = torch.tensor(batch_data[\"question\"]).to(self.device) seq_length =", "+ len(var_nums) num_mask.append([0] * d + [1] * (max_num_size -", "loss = self.train_tree(seq, seq_length, target, target_length, nums_stack, num_size, generate_nums, num_pos,", "len(b.node_stack[0]) != 0: flag = False if flag: break return", "= [[] for _ in range(batch_size)] left_childs = [None for", "range(len(target)): if target[i] == unk: num_stack = nums_stack_batch[i].pop() max_score =", "[] for sa_pair in all_sa_outputs: new_all_sa_outputs.append((sa_pair[0].cuda(), sa_pair[1].cuda())) all_sa_outputs = new_all_sa_outputs", "= i + len(generate_nums) + len(var_nums) num_mask.append([0] * d +", "self.encoder.hidden_size) embeddings_stacks = [[] for _ in range(batch_size)] # B", "indices.append(i + b * sen_len) masked_index.append(temp_0) indices += [0 for", "op, current_embeddings, current_context, current_nums_embeddings = self.decoder(b.node_stack, left_childs, encoder_outputs, all_nums_encoder_outputs, padding_hidden,", "encoder_mapping, decoder_mapping = self.sa(current_num, encoder_outputs[idx]) else: temp_encoder_outputs = encoder_outputs.transpose(0, 1)", "# [batch_size,output_len] y = torch.sum(x * mask, dim=-1) / torch.sum(mask,", "= output[s_i] if idx in [self.out_sos_token, self.out_eos_token, self.out_pad_token]: break symbol", "= self.out_symbol2idx[SpecialTokens.SOS_TOKEN] except: self.out_sos_token = None try: self.out_eos_token = self.out_symbol2idx[SpecialTokens.EOS_TOKEN]", "generate_input, current_context) left_childs = [] for idx, l, r, node_stack,", "def train_tree(self,input_batch, input_length, target_batch, target_length, nums_stack_batch, num_size_batch, generate_nums, num_pos, unk,", "max(copy_num_len) # 提取与问题相关的数字embedding all_nums_encoder_outputs = self.get_all_number_encoder_outputs(encoder_outputs, num_pos, batch_size, num_size, self.encoder.hidden_size)", "SpecialTokens.UNK_TOKEN: try: pos_list = num_stack.pop() c = num_list[pos_list[0]] res.append(c) except:", "o.pop() op = o.pop() current_num = self.merge(op.embedding, sub_stree.embedding, current_num) #", "copy_list(b.node_stack) current_left_childs = [] current_embeddings_stacks = copy_list(b.embedding_stack) current_out = copy.deepcopy(b.out)", "= int(ti) current_out.append(out_token) node = current_node_stack[0].pop() if out_token < num_start:", "self.convert_idx2symbol(target[0], num_list[0], copy_list(nums_stack[0])) return all_output, targets def train_tree(self,input_batch, input_length, target_batch,", "in range(max_length): current_beams = [] while len(beams) > 0: b", "config[\"dropout_ratio\"] self.num_layers = config[\"num_layers\"] self.rnn_cell_type = config[\"rnn_cell_type\"] self.loss_weight = config['loss_weight']", "# outputs : [batch_size,output_len,hidden_size] # targets : [batch_size,output_len,hidden_size] # mask", "= torch.tensor(batch_data[\"equation\"]).to(self.device) target_length = torch.LongTensor(batch_data[\"equ len\"]).to(self.device) nums_stack = copy.deepcopy(batch_data[\"num stack\"])", "d + [1] * (max_num_size - d)) num_mask = torch.ByteTensor(num_mask)", "all_leafs.cuda() all_node_outputs = all_node_outputs.cuda() target = target.cuda() new_all_sa_outputs = []", "d)) num_mask = torch.ByteTensor(num_mask) # 用于屏蔽无关数字,防止生成错误的Nx #unk = output_lang.word2index[\"UNK\"] #", "len(dataset.in_idx2word) self.out_symbol2idx = dataset.out_symbol2idx self.out_idx2symbol = dataset.out_idx2symbol generate_list = dataset.generate_list", "loss.mse_loss import MSELoss from utils.utils import copy_list from utils.enum_type import", "copy.deepcopy(b.out) out_token = int(ti) current_out.append(out_token) node = current_node_stack[0].pop() if out_token", "torch.device('cuda') else False self.beam_size = config['beam_size'] self.max_out_len = config['max_output_len'] self.embedding_size", "self.node_generater = GenerateNode(self.hidden_size, self.operator_nums, self.embedding_size, self.dropout_ratio) self.merge = Merge(self.hidden_size, self.embedding_size,", "> max_score: target[i] = num + num_start max_score = decoder_output[i,", "num_pos] num_size = max(copy_num_len) # 提取与问题相关的数字embedding all_nums_encoder_outputs = self.get_all_number_encoder_outputs(encoder_outputs, num_pos,", "target = torch.tensor(batch_data[\"equation\"]).to(self.device) target_length = torch.LongTensor(batch_data[\"equ len\"]).to(self.device) nums_stack = copy.deepcopy(batch_data[\"num", "-*- # @Author: <NAME> # @Time: 2021/08/21 04:59:55 # @File:", "self.get_all_number_encoder_outputs(encoder_outputs, num_pos, batch_size, num_size, self.hidden_size) # B x P x", "encoder_mapping, decoder_mapping = self.sa(current_num,temp_encoder_outputs[idx]) all_sa_outputs.append((encoder_mapping, decoder_mapping)) o.append(TreeEmbedding(current_num, terminal=True)) if len(o)", "all_sa_outputs: new_all_sa_outputs.append((sa_pair[0].cuda(), sa_pair[1].cuda())) all_sa_outputs = new_all_sa_outputs # target_length = torch.LongTensor(target_length).cuda()", "[] max_num_size = max(num_size_batch) + len(generate_nums) + len(var_nums) # 最大的位置列表数目+常识数字数目+未知数列表", "= masked_cross_entropy(all_node_outputs, target,target_length) + 0.01 * total_semanti_alognment_loss # loss =", "= self.out_idx2symbol[idx] if \"NUM\" in symbol: num_idx = self.mask_list.index(symbol) if", "mask, dim=-1) / torch.sum(mask, dim=-1) # [batch_size] return torch.sum(y) def", "keywords 'question', 'ques len', 'equation', 'equ len', 'num stack', 'num", "def __init__(self, config, dataset): super(SAUSolver, self).__init__() # parameter self.hidden_size =", "out_score = p_leaf * out_score topv, topi = out_score.topk(beam_size) for", "[self.out_sos_token, self.out_eos_token, self.out_pad_token]: break symbol = self.out_idx2symbol[idx] if \"NUM\" in", "# mask : [batch_size,output_len] mask = mask.to(self.device) x = torch.sqrt(torch.sum(torch.square((outputs", "+ len(generate_nums)).fill_(0) padding_hidden = torch.FloatTensor([0.0 for _ in range(self.hidden_size)]).unsqueeze(0) batch_size", "= torch.stack(all_node_outputs, dim=1) # B x S x N target", "x: x.score, reverse=True) beams = beams[:beam_size] flag = True for", "num_start, english=False,var_nums=[], batch_first=False): # sequence mask for attention seq_mask =", "self.max_out_len = config['max_output_len'] self.embedding_size = config[\"embedding_size\"] self.dropout_ratio = config[\"dropout_ratio\"] self.num_layers", "hidden_size): indices = list() sen_len = encoder_outputs.size(0) masked_index = []", "0 and current_embeddings_stacks[0][-1].terminal: current_left_childs.append(current_embeddings_stacks[0][-1].embedding) else: current_left_childs.append(None) current_beams.append(TreeBeam(b.score + float(tv), current_node_stack,", "i in range(len(target)): if target[i] == unk: num_stack = nums_stack_batch[i].pop()", "= batch_data[\"num size\"] num_pos = batch_data[\"num pos\"] generate_nums = self.generate_nums", "= self.encoder(input_var, input_length) seq_emb = self.embedder(input_var) pade_outputs, _ = self.encoder(seq_emb,", "BasicRNNEncoder(self.embedding_size, self.hidden_size, self.num_layers, self.rnn_cell_type, self.dropout_ratio, batch_first=False) #self.decoder = SARTreeDecoder(self.hidden_size, self.operator_nums,", "# target_length = torch.LongTensor(target_length).cuda() else: pass # target_length = torch.LongTensor(target_length)", "one batch data. :return: loss value. batch_data should include keywords", "Taken from https://github.com/LYH-YF/MWPToolkit # -*- encoding: utf-8 -*- # @Author:", "super(SAUSolver, self).__init__() # parameter self.hidden_size = config[\"hidden_size\"] self.device = config[\"device\"]", "= seq_mask.cuda() padding_hidden = padding_hidden.cuda() num_mask = num_mask.cuda() # Zero", "sub_stree = o.pop() op = o.pop() current_num = self.merge(op.embedding, sub_stree.embedding,", "self.mask_list.index(symbol) if num_idx >= num_len: res = [] break res.append(num_list[num_idx])", "left_childs = [] for idx, l, r, node_stack, i, o", "sa_pair[1]) # print(total_semanti_alognment_loss) total_semanti_alognment_loss = total_semanti_alognment_loss / sa_len # print(total_semanti_alognment_loss)", "torch.nn.utils.clip_grad_norm_(predict.parameters(), 5) # torch.nn.utils.clip_grad_norm_(generate.parameters(), 5) # Update parameters with optimizers", "# module self.embedder = BasicEmbedder(self.vocab_size, self.embedding_size, self.dropout_ratio) # self.t_encoder =", "in range(hidden_size)] for b in range(batch_size): for i in num_pos[b]:", "sorted(current_beams, key=lambda x: x.score, reverse=True) beams = beams[:beam_size] flag =", "nums_stack_batch, num_size_batch, generate_nums, num_pos, unk, num_start, english=False,var_nums=[], batch_first=False): # sequence", "for i in num_pos[b]: indices.append(i + b * sen_len) masked_index.append(temp_0)", "Prediction(self.hidden_size,self.operator_nums,self.generate_size,self.dropout_ratio) self.node_generater = GenerateNode(self.hidden_size, self.operator_nums, self.embedding_size, self.dropout_ratio) self.merge = Merge(self.hidden_size,", "current_beams.append(b) continue # left_childs = torch.stack(b.left_childs) left_childs = b.left_childs num_score,", "self.operator_nums = dataset.operator_nums self.generate_size = len(generate_list) self.unk_token = self.out_symbol2idx[SpecialTokens.UNK_TOKEN] try:", "new_all_sa_outputs = [] for sa_pair in all_sa_outputs: new_all_sa_outputs.append((sa_pair[0].cuda(), sa_pair[1].cuda())) all_sa_outputs", "# Zero gradients of both optimizers # Run words through", "= False if flag: break return beams[0].out def get_all_number_encoder_outputs(self, encoder_outputs,", "stack', 'num pos', 'num list' \"\"\" seq = torch.tensor(batch_data[\"question\"]).to(self.device) seq_length", "variables node_stacks = [[TreeNode(_)] for _ in problem_output.split(1, dim=0)] #", "evaluate_tree(self, input_batch, input_length, generate_nums, num_pos, num_start, beam_size=5, max_length=30): seq_mask =", "flag = False if flag: break return beams[0].out def get_all_number_encoder_outputs(self,", "5) # Update parameters with optimizers return loss.item() # ,", "and back-propagation. :param batch_data: one batch data. :return: loss value.", "(batch_size x max_len) tensors, transpose into (max_len x batch_size) input_var", "= torch.ByteTensor(seq_mask) num_mask = [] max_num_size = max(num_size_batch) + len(generate_nums)", "i < num_start: # 非数字 node_stack.append(TreeNode(r)) node_stack.append(TreeNode(l, left_flag=True)) o.append(TreeEmbedding(node_label[idx].unsqueeze(0), terminal=False))", "into (max_len x batch_size) input_var = input_batch.transpose(0, 1) target =", "@Time: 2021/08/21 04:59:55 # @File: sausolver.py import random import torch", "* (max_num_size - d)) num_mask = torch.ByteTensor(num_mask) # 用于屏蔽无关数字,防止生成错误的Nx #unk", "= masked_index.view(batch_size, num_size, hidden_size) if self.USE_CUDA: indices = indices.cuda() masked_index", "num in num_stack: if decoder_output[i, num_start + num] > max_score:", "forward-propagating, calculating loss and back-propagation. :param batch_data: one batch data.", "= [] for s_i in range(seq_len): idx = output[s_i] if", "num_idx = self.mask_list.index(symbol) if num_idx >= num_len: res = []", "node_stack.append(TreeNode(l, left_flag=True)) o.append(TreeEmbedding(node_label[idx].unsqueeze(0), terminal=False)) # print(o[-1].embedding.size()) # print(encoder_outputs[idx].size()) else: #", "import SARTreeDecoder from module.Layer.tree_layers import NodeGenerater, SubTreeMerger, TreeNode, TreeEmbedding from", "batch_first=False) #self.decoder = SARTreeDecoder(self.hidden_size, self.operator_nums, self.generate_size, self.dropout_ratio) self.decoder = Prediction(self.hidden_size,self.operator_nums,self.generate_size,self.dropout_ratio)", "[len(_) for _ in num_pos] num_size = max(copy_num_len) # 提取与问题相关的数字embedding", "outputs, targets, mask=None): # outputs : [batch_size,output_len,hidden_size] # targets :", "nums_stack = copy.deepcopy(batch_data[\"num stack\"]) num_pos = batch_data[\"num pos\"] num_list =", "= torch.BoolTensor(1, len(num_pos[0]) + len(generate_nums)).fill_(0) padding_hidden = torch.FloatTensor([0.0 for _", "B x S x 2 all_node_outputs = torch.stack(all_node_outputs, dim=1) #", "in range(len(num_pos[b]), num_size)] masked_index += [temp_1 for _ in range(len(num_pos[b]),", "!= 0: flag = False if flag: break return beams[0].out", "optimizers # Run words through encoder #encoder_outputs, problem_output = self.encoder(input_var,", "mask = mask.to(self.device) x = torch.sqrt(torch.sum(torch.square((outputs - targets)), dim=-1)) #", "for i in range(len(target)): if target[i] == unk: num_stack =", "num_start max_score = decoder_output[i, num_start + num] if target_input[i] >=", "self.USE_CUDA: # all_leafs = all_leafs.cuda() all_node_outputs = all_node_outputs.cuda() target =", "= new_all_sa_outputs # target_length = torch.LongTensor(target_length).cuda() else: pass # target_length", "import NumMask, SpecialTokens class SAUSolver(nn.Module): \"\"\" Reference: Qin et al.", "2020. \"\"\" def __init__(self, config, dataset): super(SAUSolver, self).__init__() # parameter", "x S x N target = target.transpose(0, 1).contiguous() # B", "__init__(self, config, dataset): super(SAUSolver, self).__init__() # parameter self.hidden_size = config[\"hidden_size\"]", "copy_list from utils.enum_type import NumMask, SpecialTokens class SAUSolver(nn.Module): \"\"\" Reference:", "Run words through encoder #encoder_outputs, problem_output = self.encoder(input_var, input_length) seq_emb", "= dataset.num_start self.operator_nums = dataset.operator_nums self.generate_size = len(generate_list) self.unk_token =", "for t in range(max_target_length): num_score, op, current_embeddings, current_context, current_nums_embeddings =", "copy_list(b.embedding_stack) current_out = copy.deepcopy(b.out) out_token = int(ti) current_out.append(out_token) node =", "def model_test(self, batch_data:dict) -> tuple: \"\"\"Model test. :param batch_data: one", "for b in beams: if len(b.node_stack[0]) != 0: flag =", "'ques len', 'equation', 'num stack', 'num pos', 'num list' \"\"\"", "torch.cat((op, num_score), 1) all_node_outputs.append(outputs) target_t, generate_input = self.generate_tree_input(target[t].tolist(), outputs, nums_stack_batch,", "problem_output.split(1, dim=0)] num_size = len(num_pos[0]) all_nums_encoder_outputs = self.get_all_number_encoder_outputs(encoder_outputs, num_pos, batch_size,", "dim=-1) # [batch_size] return torch.sum(y) def convert_idx2symbol(self, output, num_list, num_stack):", "max_score = decoder_output[i, num_start + num] if target_input[i] >= num_start:", "H all_num = all_embedding.index_select(0, indices) all_num = all_num.view(batch_size, num_size, hidden_size)", "batch_size = 1 if self.USE_CUDA: input_var = input_var.cuda() seq_mask =", "config['loss_weight'] self.vocab_size = len(dataset.in_idx2word) self.out_symbol2idx = dataset.out_symbol2idx self.out_idx2symbol = dataset.out_idx2symbol", "self.train_tree(seq, seq_length, target, target_length, nums_stack, num_size, generate_nums, num_pos, unk, num_start)", "padding_hidden.cuda() num_mask = num_mask.cuda() # Zero gradients of both optimizers", "= torch.BoolTensor(masked_index) masked_index = masked_index.view(batch_size, num_size, hidden_size) if self.USE_CUDA: indices", "key=lambda x: x.score, reverse=True) beams = beams[:beam_size] flag = True", "self.num_start = dataset.num_start self.operator_nums = dataset.operator_nums self.generate_size = len(generate_list) self.unk_token", ":, :self.hidden_size] + pade_outputs[0, :, self.hidden_size:] encoder_outputs = pade_outputs[:, :,", "in problem_output.split(1, dim=0)] num_size = len(num_pos[0]) all_nums_encoder_outputs = self.get_all_number_encoder_outputs(encoder_outputs, num_pos,", "config[\"rnn_cell_type\"] self.loss_weight = config['loss_weight'] self.vocab_size = len(dataset.in_idx2word) self.out_symbol2idx = dataset.out_symbol2idx", "all_sa_outputs: total_semanti_alognment_loss += semantic_alignment_loss(sa_pair[0], sa_pair[1]) # print(total_semanti_alognment_loss) total_semanti_alognment_loss = total_semanti_alognment_loss", "config[\"hidden_size\"] self.device = config[\"device\"] self.USE_CUDA = True if self.device ==", "in EMNLP 2020. \"\"\" def __init__(self, config, dataset): super(SAUSolver, self).__init__()", "= SemanticAlignmentModule(self.hidden_size,self.hidden_size,self.hidden_size) self.loss1 = MaskedCrossEntropyLoss() # def calculate_loss(self, batch_data:dict) ->", "len(o) > 0 and o[-1].terminal: sub_stree = o.pop() op =", "copy.deepcopy(target) for i in range(len(target)): if target[i] == unk: num_stack", "i, o in zip(range(batch_size), left_child.split(1), right_child.split(1), node_stacks, target[t].tolist(), embeddings_stacks): if", "= torch.stack(all_leafs, dim=1) # B x S x 2 all_node_outputs", "config, dataset): super(SAUSolver, self).__init__() # parameter self.hidden_size = config[\"hidden_size\"] self.device", "# torch.nn.utils.clip_grad_norm_(encoder.parameters(), 5) # torch.nn.utils.clip_grad_norm_(predict.parameters(), 5) # torch.nn.utils.clip_grad_norm_(generate.parameters(), 5) #", "'''batch_size=1''' seq_len = len(output) num_len = len(num_list) output_list = []", "target_t if self.USE_CUDA: generate_input = generate_input.cuda() left_child, right_child, node_label =", "self.beam_size, self.max_out_len) all_output = self.convert_idx2symbol(all_node_output, num_list[0], copy_list(nums_stack[0])) targets = self.convert_idx2symbol(target[0],", "input is copied num but the num has two pos,", "i in input_length: seq_mask.append([0 for _ in range(i)] + [1", "but the num has two pos, chose the max target_input", "[[TreeNode(_)] for _ in problem_output.split(1, dim=0)] num_size = len(num_pos[0]) all_nums_encoder_outputs", "all_sa_outputs.append((encoder_mapping, decoder_mapping)) o.append(TreeEmbedding(current_num, terminal=True)) if len(o) > 0 and o[-1].terminal:", "torch from torch import nn import copy from module.Encoder.rnn_encoder import", "Solver for Math Word Problems\" in EMNLP 2020. \"\"\" def", "Turn padded arrays into (batch_size x max_len) tensors, transpose into", "current_node_stack = copy_list(b.node_stack) current_left_childs = [] current_embeddings_stacks = copy_list(b.embedding_stack) current_out", "node_label = self.node_generater(current_embeddings, generate_input, current_context) left_childs = [] for idx,", "num_start # loss_0 = masked_cross_entropy_without_logit(all_leafs, op_target.long(), target_length) loss = masked_cross_entropy(all_node_outputs,", "all_node_outputs = torch.stack(all_node_outputs, dim=1) # B x S x N", "for _ in problem_output.split(1, dim=0)] # root embedding B x", "S) x H all_num = all_embedding.index_select(0, indices) all_num = all_num.view(batch_size,", "unk = self.unk_token loss = self.train_tree(seq, seq_length, target, target_length, nums_stack,", "o.pop() current_num = self.merge(op.embedding, sub_stree.embedding, current_num) # Subtree embedding if", "MSELoss from utils.utils import copy_list from utils.enum_type import NumMask, SpecialTokens", "target.transpose(0, 1).contiguous() # B x S if self.USE_CUDA: # all_leafs", "import MSELoss from utils.utils import copy_list from utils.enum_type import NumMask,", "Prepare input and output variables node_stacks = [[TreeNode(_)] for _", "# all_leafs = [] copy_num_len = [len(_) for _ in", "value. batch_data should include keywords 'question', 'ques len', 'equation', 'equ", "# @File: sausolver.py import random import torch from torch import", "# def calculate_loss(self, batch_data:dict) -> float: \"\"\"Finish forward-propagating, calculating loss", "= all_node_outputs.cuda() target = target.cuda() new_all_sa_outputs = [] for sa_pair", "the num has two pos, chose the max target_input =", "self.out_symbol2idx[SpecialTokens.PAD_TOKEN] except: self.out_pad_token = None # module self.embedder = BasicEmbedder(self.vocab_size,", "Zero gradients of both optimizers # Run words through encoder", "generate_list = dataset.generate_list self.generate_nums = [self.out_symbol2idx[symbol] for symbol in generate_list]", "= max(target_length) all_node_outputs = [] all_sa_outputs = [] # all_leafs", "sequence mask for attention unk = self.unk_token loss = self.train_tree(seq,", "pos\"] generate_nums = self.generate_nums num_start = self.num_start # sequence mask", "self.t_encoder = BasicRNNEncoder(self.embedding_size, self.hidden_size, self.num_layers, self.rnn_cell_type, self.dropout_ratio) self.encoder = BasicRNNEncoder(self.embedding_size,", "embeddings_stacks): if len(node_stack) != 0: node = node_stack.pop() else: left_childs.append(None)", "return all_num.masked_fill_(masked_index, 0.0) def generate_tree_input(self, target, decoder_output, nums_stack_batch, num_start, unk):", "= self.mask_list.index(symbol) if num_idx >= num_len: res = [] break", "# root embedding B x 1 max_target_length = max(target_length) all_node_outputs", "Update parameters with optimizers return loss.item() # , loss_0.item(), loss_1.item()", "= len(num_list) output_list = [] res = [] for s_i", "left_flag=True)) current_embeddings_stacks[0].append(TreeEmbedding(node_label[0].unsqueeze(0), False)) else: current_num = current_nums_embeddings[0, out_token - num_start].unsqueeze(0)", "current_left_childs = [] current_embeddings_stacks = copy_list(b.embedding_stack) current_out = copy.deepcopy(b.out) out_token", "# targets : [batch_size,output_len,hidden_size] # mask : [batch_size,output_len] mask =", "left_childs = [None for _ in range(batch_size)] beams = [TreeBeam(0.0,", "return loss def model_test(self, batch_data:dict) -> tuple: \"\"\"Model test. :param", "torch.ByteTensor(num_mask) # 用于屏蔽无关数字,防止生成错误的Nx #unk = output_lang.word2index[\"UNK\"] # Turn padded arrays", "left_childs, [])] for t in range(max_length): current_beams = [] while", "self.out_idx2symbol[idx] if \"NUM\" in symbol: num_idx = self.mask_list.index(symbol) if num_idx", "self.node_generater(current_embeddings, generate_input, current_context) current_node_stack[0].append(TreeNode(right_child)) current_node_stack[0].append(TreeNode(left_child, left_flag=True)) current_embeddings_stacks[0].append(TreeEmbedding(node_label[0].unsqueeze(0), False)) else: current_num", "None try: self.out_pad_token = self.out_symbol2idx[SpecialTokens.PAD_TOKEN] except: self.out_pad_token = None #", "< num_start: generate_input = torch.LongTensor([out_token]) if self.USE_CUDA: generate_input = generate_input.cuda()", "output_list = [] res = [] for s_i in range(seq_len):", "num_size, generate_nums, num_pos, unk, num_start) return loss def model_test(self, batch_data:dict)", "current_num = current_nums_embeddings[idx, i - num_start].unsqueeze(0) while len(o) > 0", "total_semanti_alognment_loss # loss = loss_0 + loss_1 loss.backward() # clip", "loss.item() # , loss_0.item(), loss_1.item() def evaluate_tree(self, input_batch, input_length, generate_nums,", "num_len: res = [] break res.append(num_list[num_idx]) elif symbol == SpecialTokens.UNK_TOKEN:", "'num size', 'num pos' \"\"\" seq = torch.tensor(batch_data[\"question\"]).to(self.device) seq_length =", "1 max_target_length = max(target_length) all_node_outputs = [] all_sa_outputs = []", "\"\"\"Finish forward-propagating, calculating loss and back-propagation. :param batch_data: one batch", "temp_encoder_outputs = encoder_outputs.transpose(0, 1) encoder_mapping, decoder_mapping = self.sa(current_num,temp_encoder_outputs[idx]) all_sa_outputs.append((encoder_mapping, decoder_mapping))", "# print(o[-1].embedding.size()) # print(encoder_outputs[idx].size()) else: # 数字 current_num = current_nums_embeddings[idx,", "= dataset.out_idx2symbol generate_list = dataset.generate_list self.generate_nums = [self.out_symbol2idx[symbol] for symbol", "1) encoder_mapping, decoder_mapping = self.sa(current_num,temp_encoder_outputs[idx]) all_sa_outputs.append((encoder_mapping, decoder_mapping)) o.append(TreeEmbedding(current_num, terminal=True)) if", "current_nums_embeddings[0, out_token - num_start].unsqueeze(0) while len(current_embeddings_stacks[0]) > 0 and current_embeddings_stacks[0][-1].terminal:", "batch_data should include keywords 'question', 'ques len', 'equation', 'equ len',", "[1 for _ in range(hidden_size)] temp_0 = [0 for _", "current_left_childs.append(current_embeddings_stacks[0][-1].embedding) else: current_left_childs.append(None) current_beams.append(TreeBeam(b.score + float(tv), current_node_stack, current_embeddings_stacks, current_left_childs, current_out))", "node_stacks = [[TreeNode(_)] for _ in problem_output.split(1, dim=0)] # root", "for tv, ti in zip(topv.split(1, dim=1), topi.split(1, dim=1)): current_node_stack =", "= mask.to(self.device) x = torch.sqrt(torch.sum(torch.square((outputs - targets)), dim=-1)) # [batch_size,output_len]", "1).contiguous() all_embedding = all_outputs.view(-1, encoder_outputs.size(2)) # S x B x", "\"\"\"Model test. :param batch_data: one batch data. :return: predicted equation,", "one batch data. :return: predicted equation, target equation. batch_data should", "dim=0)] # root embedding B x 1 max_target_length = max(target_length)", "= self.node_generater(current_embeddings, generate_input, current_context) current_node_stack[0].append(TreeNode(right_child)) current_node_stack[0].append(TreeNode(left_child, left_flag=True)) current_embeddings_stacks[0].append(TreeEmbedding(node_label[0].unsqueeze(0), False)) else:", "unk: num_stack = nums_stack_batch[i].pop() max_score = -float(\"1e12\") for num in", "= [[TreeNode(_)] for _ in problem_output.split(1, dim=0)] num_size = len(num_pos[0])", "of both optimizers # Run words through encoder #encoder_outputs, problem_output", "seq_mask.append([0 for _ in range(i)] + [1 for _ in", "torch.LongTensor(indices) masked_index = torch.BoolTensor(masked_index) masked_index = masked_index.view(batch_size, num_size, hidden_size) if", "unk, num_start, english=False,var_nums=[], batch_first=False): # sequence mask for attention seq_mask", "target = target_batch.transpose(0, 1) padding_hidden = torch.FloatTensor([0.0 for _ in", "break return beams[0].out def get_all_number_encoder_outputs(self, encoder_outputs, num_pos, batch_size, num_size, hidden_size):", "import BasicRNNEncoder from module.Embedder.basic_embedder import BasicEmbedder from module.Decoder.tree_decoder import SARTreeDecoder", "_ in range(hidden_size)] for b in range(batch_size): for i in", "temp_0 = [0 for _ in range(hidden_size)] for b in", "seq_mask.cuda() padding_hidden = padding_hidden.cuda() num_mask = num_mask.cuda() # Run words", "current_embeddings_stacks[0][-1].terminal: sub_stree = current_embeddings_stacks[0].pop() op = current_embeddings_stacks[0].pop() current_num = self.merge(op.embedding,", "pass # target_length = torch.LongTensor(target_length) semantic_alignment_loss = nn.MSELoss() total_semanti_alognment_loss =", "num_mask = torch.ByteTensor(num_mask) # 用于屏蔽无关数字,防止生成错误的Nx #unk = output_lang.word2index[\"UNK\"] # Turn", "b in range(batch_size): for i in num_pos[b]: indices.append(i + b", "the max target_input = copy.deepcopy(target) for i in range(len(target)): if", "all_nums_encoder_outputs = self.get_all_number_encoder_outputs(encoder_outputs, num_pos, batch_size, num_size, self.encoder.hidden_size) embeddings_stacks = [[]", "def get_all_number_encoder_outputs(self, encoder_outputs, num_pos, batch_size, num_size, hidden_size): indices = list()", "= seq_mask.cuda() padding_hidden = padding_hidden.cuda() num_mask = num_mask.cuda() # Run", "False)) else: current_num = current_nums_embeddings[0, out_token - num_start].unsqueeze(0) while len(current_embeddings_stacks[0])", "Problems\" in EMNLP 2020. \"\"\" def __init__(self, config, dataset): super(SAUSolver,", "= generate_input.cuda() left_child, right_child, node_label = self.node_generater(current_embeddings, generate_input, current_context) left_childs", "B x P x N embeddings_stacks = [[] for _", "num_size = len(num_pos[0]) all_nums_encoder_outputs = self.get_all_number_encoder_outputs(encoder_outputs, num_pos, batch_size, num_size, self.hidden_size)", "has two pos, chose the max target_input = copy.deepcopy(target) for", "symbol = self.out_idx2symbol[idx] if \"NUM\" in symbol: num_idx = self.mask_list.index(symbol)", "in range(i, max_len)]) seq_mask = torch.ByteTensor(seq_mask) num_mask = [] max_num_size", "masked_index.view(batch_size, num_size, hidden_size) if self.USE_CUDA: indices = indices.cuda() masked_index =", "beam_size=5, max_length=30): seq_mask = torch.BoolTensor(1, input_length).fill_(0) # Turn padded arrays", "generate_input.cuda() left_child, right_child, node_label = self.node_generater(current_embeddings, generate_input, current_context) current_node_stack[0].append(TreeNode(right_child)) current_node_stack[0].append(TreeNode(left_child,", "#encoder_outputs, problem_output = self.encoder(input_var, input_length) seq_emb = self.embedder(input_var) pade_outputs, _", "[temp_1 for _ in range(len(num_pos[b]), num_size)] indices = torch.LongTensor(indices) masked_index", "self.dropout_ratio) # self.t_encoder = BasicRNNEncoder(self.embedding_size, self.hidden_size, self.num_layers, self.rnn_cell_type, self.dropout_ratio) self.encoder", "semantic_alignment_loss = nn.MSELoss() total_semanti_alognment_loss = 0 sa_len = len(all_sa_outputs) for", "from loss.mse_loss import MSELoss from utils.utils import copy_list from utils.enum_type", "all_num = all_embedding.index_select(0, indices) all_num = all_num.view(batch_size, num_size, hidden_size) return", "= torch.cat((op, num_score), 1) all_node_outputs.append(outputs) target_t, generate_input = self.generate_tree_input(target[t].tolist(), outputs,", "should include keywords 'question', 'ques len', 'equation', 'equ len', 'num", "torch.ByteTensor(seq_mask) num_mask = [] max_num_size = max(num_size_batch) + len(generate_nums) +", "= output_lang.word2index[\"UNK\"] # Turn padded arrays into (batch_size x max_len)", "pade_outputs[:, :, self.hidden_size:] # Prepare input and output variables node_stacks", "self.num_layers = config[\"num_layers\"] self.rnn_cell_type = config[\"rnn_cell_type\"] self.loss_weight = config['loss_weight'] self.vocab_size", "self.operator_nums, self.generate_size, self.dropout_ratio) self.decoder = Prediction(self.hidden_size,self.operator_nums,self.generate_size,self.dropout_ratio) self.node_generater = GenerateNode(self.hidden_size, self.operator_nums,", "= self.decoder( node_stacks, left_childs, encoder_outputs, all_nums_encoder_outputs, padding_hidden, seq_mask, num_mask) #", "node_stacks, left_childs, encoder_outputs, all_nums_encoder_outputs, padding_hidden, seq_mask, num_mask) # all_leafs.append(p_leaf) outputs", "_ in range(i, max_len)]) seq_mask = torch.ByteTensor(seq_mask) num_mask = []", "op, current_embeddings, current_context, current_nums_embeddings = self.decoder( node_stacks, left_childs, encoder_outputs, all_nums_encoder_outputs,", "new_all_sa_outputs # target_length = torch.LongTensor(target_length).cuda() else: pass # target_length =", "= len(output) num_len = len(num_list) output_list = [] res =", "# 提取与问题相关的数字embedding all_nums_encoder_outputs = self.get_all_number_encoder_outputs(encoder_outputs, num_pos, batch_size, num_size, self.encoder.hidden_size) embeddings_stacks", "current_embeddings, current_context, current_nums_embeddings = self.decoder(b.node_stack, left_childs, encoder_outputs, all_nums_encoder_outputs, padding_hidden, seq_mask,", "= len(num_pos[0]) all_nums_encoder_outputs = self.get_all_number_encoder_outputs(encoder_outputs, num_pos, batch_size, num_size, self.hidden_size) #", "self.hidden_size = config[\"hidden_size\"] self.device = config[\"device\"] self.USE_CUDA = True if", "== torch.device('cuda') else False self.beam_size = config['beam_size'] self.max_out_len = config['max_output_len']", "len(var_nums) num_mask.append([0] * d + [1] * (max_num_size - d))", "'question', 'ques len', 'equation', 'num stack', 'num pos', 'num list'", "SpecialTokens class SAUSolver(nn.Module): \"\"\" Reference: Qin et al. \"Semantically-Aligned Universal", "> 0 and current_embeddings_stacks[0][-1].terminal: current_left_childs.append(current_embeddings_stacks[0][-1].embedding) else: current_left_childs.append(None) current_beams.append(TreeBeam(b.score + float(tv),", "= all_embedding.index_select(0, indices) all_num = all_num.view(batch_size, num_size, hidden_size) return all_num.masked_fill_(masked_index,", "= torch.sqrt(torch.sum(torch.square((outputs - targets)), dim=-1)) # [batch_size,output_len] y = torch.sum(x", "num_start: # 非数字 node_stack.append(TreeNode(r)) node_stack.append(TreeNode(l, left_flag=True)) o.append(TreeEmbedding(node_label[idx].unsqueeze(0), terminal=False)) # print(o[-1].embedding.size())", "= [[] for _ in range(batch_size)] # B x 1", "node_stacks, embeddings_stacks, left_childs, [])] for t in range(max_length): current_beams =", "in input_length: seq_mask.append([0 for _ in range(i)] + [1 for", "if \"NUM\" in symbol: num_idx = self.mask_list.index(symbol) if num_idx >=", "len\"]).long() target = torch.tensor(batch_data[\"equation\"]).to(self.device) nums_stack = copy.deepcopy(batch_data[\"num stack\"]) num_pos =", "range(len(num_pos[b]), num_size)] indices = torch.LongTensor(indices) masked_index = torch.BoolTensor(masked_index) masked_index =", "= masked_index.cuda() all_outputs = encoder_outputs.transpose(0, 1).contiguous() all_embedding = all_outputs.view(-1, encoder_outputs.size(2))", "= len(all_sa_outputs) for sa_pair in all_sa_outputs: total_semanti_alognment_loss += semantic_alignment_loss(sa_pair[0], sa_pair[1])", "import NodeGenerater, SubTreeMerger, TreeNode, TreeEmbedding from module.Layer.tree_layers import Prediction, GenerateNode,", "attention all_node_output = self.evaluate_tree(seq, seq_length, generate_nums, num_pos, num_start, self.beam_size, self.max_out_len)", "target_length, nums_stack, num_size, generate_nums, num_pos, unk, num_start) return loss def", "= num_mask.cuda() # Zero gradients of both optimizers # Run", "targets : [batch_size,output_len,hidden_size] # mask : [batch_size,output_len] mask = mask.to(self.device)", "= current_embeddings_stacks[0].pop() op = current_embeddings_stacks[0].pop() current_num = self.merge(op.embedding, sub_stree.embedding, current_num)", "NumMask, SpecialTokens class SAUSolver(nn.Module): \"\"\" Reference: Qin et al. \"Semantically-Aligned", "continue # 未知数当数字处理,SEP当操作符处理 if i < num_start: # 非数字 node_stack.append(TreeNode(r))", "sa_len = len(all_sa_outputs) for sa_pair in all_sa_outputs: total_semanti_alognment_loss += semantic_alignment_loss(sa_pair[0],", "loss_1.item() def evaluate_tree(self, input_batch, input_length, generate_nums, num_pos, num_start, beam_size=5, max_length=30):", "indices) all_num = all_num.view(batch_size, num_size, hidden_size) return all_num.masked_fill_(masked_index, 0.0) def", "in range(len(target)): if target[i] == unk: num_stack = nums_stack_batch[i].pop() max_score", "module.Layer.tree_layers import Prediction, GenerateNode, Merge, SemanticAlignmentModule from module.Strategy.beam_search import TreeBeam", "for i in num_size_batch: d = i + len(generate_nums) +", "two pos, chose the max target_input = copy.deepcopy(target) for i", "B x 1 当前的tree state/ subtree embedding / output left_childs", "'ques len', 'equation', 'equ len', 'num stack', 'num size', 'num", "right_child.split(1), node_stacks, target[t].tolist(), embeddings_stacks): if len(node_stack) != 0: node =", "torch.LongTensor(target), torch.LongTensor(target_input) def mse_loss(self, outputs, targets, mask=None): # outputs :", "self.dropout_ratio) self.decoder = Prediction(self.hidden_size,self.operator_nums,self.generate_size,self.dropout_ratio) self.node_generater = GenerateNode(self.hidden_size, self.operator_nums, self.embedding_size, self.dropout_ratio)", "target[i] == unk: num_stack = nums_stack_batch[i].pop() max_score = -float(\"1e12\") for", "self.hidden_size:] # Prepare input and output variables node_stacks = [[TreeNode(_)]", "= Prediction(self.hidden_size,self.operator_nums,self.generate_size,self.dropout_ratio) self.node_generater = GenerateNode(self.hidden_size, self.operator_nums, self.embedding_size, self.dropout_ratio) self.merge =", "SARTreeDecoder(self.hidden_size, self.operator_nums, self.generate_size, self.dropout_ratio) self.decoder = Prediction(self.hidden_size,self.operator_nums,self.generate_size,self.dropout_ratio) self.node_generater = GenerateNode(self.hidden_size,", "dataset.num_start self.operator_nums = dataset.operator_nums self.generate_size = len(generate_list) self.unk_token = self.out_symbol2idx[SpecialTokens.UNK_TOKEN]", "= self.num_start # sequence mask for attention all_node_output = self.evaluate_tree(seq,", "= config[\"device\"] self.USE_CUDA = True if self.device == torch.device('cuda') else", "num_pos, num_start, self.beam_size, self.max_out_len) all_output = self.convert_idx2symbol(all_node_output, num_list[0], copy_list(nums_stack[0])) targets", "torch.FloatTensor([0.0 for _ in range(self.hidden_size)]).unsqueeze(0) batch_size = 1 if self.USE_CUDA:", "len\"]).long() target = torch.tensor(batch_data[\"equation\"]).to(self.device) target_length = torch.LongTensor(batch_data[\"equ len\"]).to(self.device) nums_stack =", "for _ in range(batch_size)] # B x 1 for t", "num_mask) # all_leafs.append(p_leaf) outputs = torch.cat((op, num_score), 1) all_node_outputs.append(outputs) target_t,", "[] temp_1 = [1 for _ in range(hidden_size)] temp_0 =", "[batch_size,output_len,hidden_size] # mask : [batch_size,output_len] mask = mask.to(self.device) x =", "def mse_loss(self, outputs, targets, mask=None): # outputs : [batch_size,output_len,hidden_size] #", "i + len(generate_nums) + len(var_nums) num_mask.append([0] * d + [1]", "embeddings_stacks = [[] for _ in range(batch_size)] left_childs = [None", "target_length = torch.LongTensor(batch_data[\"equ len\"]).to(self.device) nums_stack = copy.deepcopy(batch_data[\"num stack\"]) num_size =", "torch.nn.utils.clip_grad_norm_(encoder.parameters(), 5) # torch.nn.utils.clip_grad_norm_(predict.parameters(), 5) # torch.nn.utils.clip_grad_norm_(generate.parameters(), 5) # Update", "# B x 1 for t in range(max_target_length): num_score, op,", "= [TreeBeam(0.0, node_stacks, embeddings_stacks, left_childs, [])] for t in range(max_length):", "current_num) current_embeddings_stacks[0].append(TreeEmbedding(current_num, True)) if len(current_embeddings_stacks[0]) > 0 and current_embeddings_stacks[0][-1].terminal: current_left_childs.append(current_embeddings_stacks[0][-1].embedding)", "b * sen_len) masked_index.append(temp_0) indices += [0 for _ in", "all_leafs = torch.stack(all_leafs, dim=1) # B x S x 2", "num_pos, batch_size, num_size, self.hidden_size) # B x P x N", "for _ in range(batch_size)] beams = [TreeBeam(0.0, node_stacks, embeddings_stacks, left_childs,", "in num_pos] num_size = max(copy_num_len) # 提取与问题相关的数字embedding all_nums_encoder_outputs = self.get_all_number_encoder_outputs(encoder_outputs,", "float(tv), current_node_stack, current_embeddings_stacks, current_left_childs, current_out)) beams = sorted(current_beams, key=lambda x:", "self.out_symbol2idx[SpecialTokens.SOS_TOKEN] except: self.out_sos_token = None try: self.out_eos_token = self.out_symbol2idx[SpecialTokens.EOS_TOKEN] except:", "# self.t_encoder = BasicRNNEncoder(self.embedding_size, self.hidden_size, self.num_layers, self.rnn_cell_type, self.dropout_ratio) self.encoder =", "num_score, op, current_embeddings, current_context, current_nums_embeddings = self.decoder(b.node_stack, left_childs, encoder_outputs, all_nums_encoder_outputs,", "generate_input.cuda() left_child, right_child, node_label = self.node_generater(current_embeddings, generate_input, current_context) left_childs =", "range(batch_size)] beams = [TreeBeam(0.0, node_stacks, embeddings_stacks, left_childs, [])] for t", "try: self.out_pad_token = self.out_symbol2idx[SpecialTokens.PAD_TOKEN] except: self.out_pad_token = None # module", "# print(total_semanti_alognment_loss) # op_target = target < num_start # loss_0", "= num_stack.pop() c = num_list[pos_list[0]] res.append(c) except: return None else:", "sequence mask for attention seq_mask = [] max_len = max(input_length)", "for sa_pair in all_sa_outputs: new_all_sa_outputs.append((sa_pair[0].cuda(), sa_pair[1].cuda())) all_sa_outputs = new_all_sa_outputs #", "hidden_size) if self.USE_CUDA: indices = indices.cuda() masked_index = masked_index.cuda() all_outputs", "seq_length, generate_nums, num_pos, num_start, self.beam_size, self.max_out_len) all_output = self.convert_idx2symbol(all_node_output, num_list[0],", "= [] max_num_size = max(num_size_batch) + len(generate_nums) + len(var_nums) #", "= current_embeddings_stacks[0].pop() current_num = self.merge(op.embedding, sub_stree.embedding, current_num) current_embeddings_stacks[0].append(TreeEmbedding(current_num, True)) if", "node_label = self.node_generater(current_embeddings, generate_input, current_context) current_node_stack[0].append(TreeNode(right_child)) current_node_stack[0].append(TreeNode(left_child, left_flag=True)) current_embeddings_stacks[0].append(TreeEmbedding(node_label[0].unsqueeze(0), False))", "all_embedding.index_select(0, indices) all_num = all_num.view(batch_size, num_size, hidden_size) return all_num.masked_fill_(masked_index, 0.0)", "Prediction, GenerateNode, Merge, SemanticAlignmentModule from module.Strategy.beam_search import TreeBeam from loss.masked_cross_entropy_loss", "in all_sa_outputs: total_semanti_alognment_loss += semantic_alignment_loss(sa_pair[0], sa_pair[1]) # print(total_semanti_alognment_loss) total_semanti_alognment_loss =", "encoder_outputs[idx]) else: temp_encoder_outputs = encoder_outputs.transpose(0, 1) encoder_mapping, decoder_mapping = self.sa(current_num,temp_encoder_outputs[idx])", "padding_hidden = padding_hidden.cuda() num_mask = num_mask.cuda() # Run words through", "= self.get_all_number_encoder_outputs(encoder_outputs, num_pos, batch_size, num_size, self.encoder.hidden_size) embeddings_stacks = [[] for", "transpose into (max_len x batch_size) input_var = input_batch.transpose(0, 1) num_mask", "Word Problems\" in EMNLP 2020. \"\"\" def __init__(self, config, dataset):", "in range(max_target_length): num_score, op, current_embeddings, current_context, current_nums_embeddings = self.decoder( node_stacks,", "indices += [0 for _ in range(len(num_pos[b]), num_size)] masked_index +=", "target[t].tolist(), embeddings_stacks): if len(node_stack) != 0: node = node_stack.pop() else:", "input_var = input_batch.transpose(0, 1) num_mask = torch.BoolTensor(1, len(num_pos[0]) + len(generate_nums)).fill_(0)", "when the decoder input is copied num but the num", "range(i, max_len)]) seq_mask = torch.ByteTensor(seq_mask) num_mask = [] max_num_size =", "self.dropout_ratio) self.sa = SemanticAlignmentModule(self.hidden_size,self.hidden_size,self.hidden_size) self.loss1 = MaskedCrossEntropyLoss() # def calculate_loss(self,", "num_pos = batch_data[\"num pos\"] generate_nums = self.generate_nums num_start = self.num_start", "size\"] num_pos = batch_data[\"num pos\"] generate_nums = self.generate_nums num_start =", "= indices.cuda() masked_index = masked_index.cuda() all_outputs = encoder_outputs.transpose(0, 1).contiguous() all_embedding", "utils.enum_type import NumMask, SpecialTokens class SAUSolver(nn.Module): \"\"\" Reference: Qin et", "= -float(\"1e12\") for num in num_stack: if decoder_output[i, num_start +", "Reference: Qin et al. \"Semantically-Aligned Universal Tree-Structured Solver for Math", ":, self.hidden_size:] # Prepare input and output variables node_stacks =", "masked_cross_entropy from loss.mse_loss import MSELoss from utils.utils import copy_list from", "self.encoder = BasicRNNEncoder(self.embedding_size, self.hidden_size, self.num_layers, self.rnn_cell_type, self.dropout_ratio, batch_first=False) #self.decoder =", "unk, num_start) return loss def model_test(self, batch_data:dict) -> tuple: \"\"\"Model" ]
[ "buffer_len, blocksize) self.righttest(self.numbers_51, buffer_len, blocksize) def test_map_easy_padded_blocks_24(self): buffer_len = 2", "3, 3] self.n_jobs = 1 def lefttest(self, numbers, buffer_len, blocksize):", "set(out.getvalue().split(',')) self.assertEqual(results, benchmark) def test_threading_easy_single(self): out = StringIO() threading_easy(self.func, self.myiter,", "df, np.mean, 1, use_apply=True, by=labels) assert_frame_equal(result, benchmark) def test_groupby_to_series_to_frame_2(self): df", "'g1', 'g2'] benchmark = df.groupby(labels).mean() result = pandas_easy.groupby_to_series_to_frame( df, np.mean,", "def setUp(self): #self.numbers_1 = [ # 0, 0, 2, -1,", "2, 2], 'b': [4, 5, 6]}) benchmark = df.groupby('a').apply(max) result", "n_jobs zero, the wrap should raise a ValueError \"\"\" self.assertRaises(ValueError,", "blocksize) self.righttest(self.numbers_101, buffer_len, blocksize) self.righttest(self.numbers_51, buffer_len, blocksize) def test_map_easy_padded_blocks_17(self): buffer_len", "pd.DataFrame({'a': [6, 2, 2], 'b': [4, 5, 6]}) labels =", "self.num_threads = 4 def bytwo(x): return 2 * x self.func", "',', out) benchmark = set(['mymy', 'namename', 'danieldaniel', 'isis', '']) results", "self.assertEqual(result, n_jobs) def test_n_jobs_wrap_zero(self): \"\"\" For n_jobs zero, the wrap", "def test_map_easy_padded_blocks_24(self): buffer_len = 2 blocksize = 4 self.lefttest(self.numbers_10, buffer_len,", "partial(_abfunc, 2, 3) def frame_to_series(frame): x = frame.iloc[0, 0] return", "benchmark) def test_threading_easy(self): out = StringIO() threading_easy(self.func, self.myiter, self.num_threads, ',',", "self.righttest(self.numbers_101, buffer_len, blocksize) self.righttest(self.numbers_51, buffer_len, blocksize) def test_map_easy_padded_blocks_37(self): buffer_len =", "from rosetta.parallel.threading_easy import threading_easy, LockIterateApply # A couple functions for", "buffer_len, blocksize) self.righttest(self.numbers_51, buffer_len, blocksize) class TestPandasEasy(unittest.TestCase): \"\"\" Tests the", "= np.random.randint(0, 5, 10) self.numbers_101 = np.random.randint(0, 5, 101) self.numbers_51", "self.numbers, 1) self.assertEqual(result, self.benchmark) def test_map_easy_3job(self): result = parallel_easy.map_easy(abfunc, self.numbers,", "= 2 blocksize = 4 self.lefttest(self.numbers_10, buffer_len, blocksize) self.lefttest(self.numbers_101, buffer_len,", "= [] lock = threading.Lock() out = StringIO() for i", "11, 11, 14, 55, 55, 44, 33, 33] self.numbers_10 =", "the wrap should raise a ValueError \"\"\" self.assertRaises(ValueError, parallel_easy._n_jobs_wrap, 0)", "self.lefttest(self.numbers_51, buffer_len, blocksize) self.righttest(self.numbers_10, buffer_len, blocksize) self.righttest(self.numbers_101, buffer_len, blocksize) self.righttest(self.numbers_51,", "result = parallel_easy.map_easy(abfunc, self.numbers, 1) self.assertEqual(result, self.benchmark) def test_map_easy_3job(self): result", "6, 7, 6, 9, 12, 11, 11, 14, 55, 55,", "results = set(out.getvalue().split(',')) self.assertEqual(results, benchmark) def test_threading_easy_single(self): out = StringIO()", "of the test class for some reason. def _abfunc(x, a,", "benchmark = df.groupby('a').apply(max) result = pandas_easy.groupby_to_scalar_to_series(df, max, 1, by='a') assert_series_equal(result,", "threading.Lock() out = StringIO() for i in range(self.num_threads): t =", "buffer_len, blocksize): result = parallel_easy.map_easy_padded_blocks( rightmax, numbers, self.n_jobs, buffer_len, blocksize=blocksize)", "'b', 'b'] benchmark = s.groupby(labels).apply(max) result = pandas_easy.groupby_to_scalar_to_series( s, max,", "zero, the wrap should raise a ValueError \"\"\" self.assertRaises(ValueError, parallel_easy._n_jobs_wrap,", "blocksize) def test_map_easy_padded_blocks_37(self): buffer_len = 3 blocksize = 7 self.lefttest(self.numbers_101,", "= ['g1', 'g1', 'g2'] benchmark = df.groupby(labels).apply(frame_to_series) result = pandas_easy.groupby_to_series_to_frame(", "test_groupby_to_scalar_to_series_2(self): s = pd.Series([1, 2, 3, 4]) labels = ['a',", "it(): for i in self.data: yield i self.myiter = it()", "set(out.getvalue().split(',')) self.assertEqual(results, benchmark) def test_threading_easy(self): out = StringIO() threading_easy(self.func, self.myiter,", "outside of the test class for some reason. def _abfunc(x,", "s, max, 1, by=labels) assert_series_equal(result, benchmark) def test_groupby_to_series_to_frame_1(self): df =", "6]}) labels = ['g1', 'g1', 'g2'] benchmark = df.groupby(labels).mean() result", "by=labels) assert_frame_equal(result, benchmark) class TestLockIterateApply(unittest.TestCase): \"\"\" Test the Locked Iterator", "TestPandasEasy(unittest.TestCase): \"\"\" Tests the pandas_easy module. \"\"\" def setUp(self): pass", "assert_series_equal(result, benchmark) def test_groupby_to_series_to_frame_1(self): df = pd.DataFrame({'a': [6, 2, 2],", "'danieldaniel', 'isis', '']) results = set(out.getvalue().split(',')) self.assertEqual(results, benchmark) def test_threading_easy(self):", "class TestMapEasyPaddedBlock(unittest.TestCase): \"\"\" Tests the parallel_easy.map_easy_padded_blocks function. \"\"\" def setUp(self):", "parallel_easy.imap_easy(abfunc, self.numbers, 1, 1) result = [] for number in", "import threading_easy, LockIterateApply # A couple functions for testing parallel", "threading from StringIO import StringIO from rosetta.parallel import parallel_easy, pandas_easy", "self.benchmark = [0, 6, 12, 18, 24] def test_map_easy_1job(self): result", "self.assertEqual(result, self.benchmark) def test_n_jobs_wrap_positive(self): \"\"\" For n_jobs positive, the wrap", "numbers, self.n_jobs, buffer_len, blocksize=blocksize) benchmark = rightmax(numbers) self.assertEqual(result, benchmark) def", "1 blocksize = 7 self.lefttest(self.numbers_10, buffer_len, blocksize) self.lefttest(self.numbers_101, buffer_len, blocksize)", "1, ',', out) benchmark = set(['mymy', 'namename', 'danieldaniel', 'isis', ''])", "def setUp(self): self.numbers = range(5) self.benchmark = [0, 6, 12,", "= it() def test_locked_iterator(self): threads = [] lock = threading.Lock()", "= set(out.getvalue().split(',')) self.assertEqual(results, benchmark) def test_threading_easy(self): out = StringIO() threading_easy(self.func,", "3] self.n_jobs = 1 def lefttest(self, numbers, buffer_len, blocksize): result", "self.benchmark) def test_map_easy_3job(self): result = parallel_easy.map_easy(abfunc, self.numbers, 3) self.assertEqual(result, self.benchmark)", "for some reason. def _abfunc(x, a, b=1): return x *", "class TestBase(unittest.TestCase): \"\"\" Tests the parallel_easy module. \"\"\" def setUp(self):", "self.assertEqual(result, self.benchmark) def test_imap_easy_1job(self): result_iterator = parallel_easy.imap_easy(abfunc, self.numbers, 1, 1)", "buffer_len = 2 blocksize = 4 self.lefttest(self.numbers_10, buffer_len, blocksize) self.lefttest(self.numbers_101,", "self.myiter, self.num_threads, ',', out) benchmark = set(['mymy', 'namename', 'danieldaniel', 'isis',", "= 1 blocksize = 7 self.lefttest(self.numbers_10, buffer_len, blocksize) self.lefttest(self.numbers_101, buffer_len,", "StringIO import StringIO from rosetta.parallel import parallel_easy, pandas_easy from rosetta.parallel.threading_easy", "def setUp(self): self.data = ['my', 'name', 'is', 'daniel'] self.num_threads =", "t = LockIterateApply(self.func, self.myiter, lock, ',', out) threads.append(t) for t", "TestLockIterateApply(unittest.TestCase): \"\"\" Test the Locked Iterator Class \"\"\" def setUp(self):", "LockIterateApply(self.func, self.myiter, lock, ',', out) threads.append(t) for t in threads:", "from StringIO import StringIO from rosetta.parallel import parallel_easy, pandas_easy from", "def test_threading_easy_single(self): out = StringIO() threading_easy(self.func, self.myiter, 1, ',', out)", "df.groupby('a').apply(max) result = pandas_easy.groupby_to_scalar_to_series(df, max, 1, by='a') assert_series_equal(result, benchmark) def", "test_groupby_to_scalar_to_series_1(self): df = pd.DataFrame({'a': [6, 2, 2], 'b': [4, 5,", "the test class for some reason. def _abfunc(x, a, b=1):", "pandas as pd from pandas.util.testing import assert_frame_equal, assert_series_equal import numpy", "self.data = ['my', 'name', 'is', 'daniel'] self.num_threads = 4 def", "buffer_len = 3 blocksize = 7 self.lefttest(self.numbers_101, buffer_len, blocksize) self.lefttest(self.numbers_51,", "setUp(self): self.data = ['my', 'name', 'is', 'daniel'] self.num_threads = 4", "#self.numbers_1 = [ # 0, 0, 2, -1, 4, 2,", "out) threads.append(t) for t in threads: t.start() for t in", "threading_easy, LockIterateApply # A couple functions for testing parallel easy", "reason. def _abfunc(x, a, b=1): return x * a *", "1, use_apply=True, by=labels) assert_frame_equal(result, benchmark) def test_groupby_to_series_to_frame_2(self): df = pd.DataFrame({'a':", "range(len(mylist))] def leftmax(mylist): for i in range(len(mylist)): if i ==", "self.numbers_51 = np.random.randint(0, 5, 101) #self.numbers_1 = [0, 1, 2,", "assert_frame_equal(result, benchmark) class TestLockIterateApply(unittest.TestCase): \"\"\" Test the Locked Iterator Class", "functools import partial import pandas as pd from pandas.util.testing import", "9, 12, 11, 11, 14, 55, 55, 44, 33, 33]", "self.numbers_101 = np.random.randint(0, 5, 101) self.numbers_51 = np.random.randint(0, 5, 101)", "for testing parallel easy # Must be defined outside of", "Test the Locked Iterator Class \"\"\" def setUp(self): self.data =", "self.assertEqual(results, benchmark) def test_threading_easy_single(self): out = StringIO() threading_easy(self.func, self.myiter, 1,", "= 4 def bytwo(x): return 2 * x self.func =", "= partial(_abfunc, 2, 3) def frame_to_series(frame): x = frame.iloc[0, 0]", "blocksize) self.righttest(self.numbers_51, buffer_len, blocksize) def test_map_easy_padded_blocks_24(self): buffer_len = 2 blocksize", "= s.groupby(labels).apply(max) result = pandas_easy.groupby_to_scalar_to_series( s, max, 1, by=labels) assert_series_equal(result,", "test_map_easy_padded_blocks_14(self): buffer_len = 1 blocksize = 4 self.lefttest(self.numbers_10, buffer_len, blocksize)", "pandas_easy.groupby_to_scalar_to_series(df, max, 1, by='a') assert_series_equal(result, benchmark) def test_groupby_to_scalar_to_series_2(self): s =", "result = parallel_easy._n_jobs_wrap(n_jobs) self.assertEqual(result, n_jobs) def test_n_jobs_wrap_zero(self): \"\"\" For n_jobs", "test_locked_iterator(self): threads = [] lock = threading.Lock() out = StringIO()", "n_jobs) def test_n_jobs_wrap_zero(self): \"\"\" For n_jobs zero, the wrap should", "numpy as np import threading from StringIO import StringIO from", "6]}) labels = ['g1', 'g1', 'g2'] benchmark = df.groupby(labels).apply(frame_to_series) result", "= [] for number in result_iterator: result.append(number) self.assertEqual(result, self.benchmark) def", "module. \"\"\" def setUp(self): self.numbers = range(5) self.benchmark = [0,", "0: result = [mylist[0]] else: result.append(max(mylist[i - 1: i+1])) return", "lock = threading.Lock() out = StringIO() for i in range(self.num_threads):", "Tests the pandas_easy module. \"\"\" def setUp(self): pass def test_groupby_to_scalar_to_series_1(self):", "x self.func = bytwo def it(): for i in self.data:", "a ValueError \"\"\" self.assertRaises(ValueError, parallel_easy._n_jobs_wrap, 0) class TestMapEasyPaddedBlock(unittest.TestCase): \"\"\" Tests", "def _abfunc(x, a, b=1): return x * a * b", "the parallel_easy module. \"\"\" def setUp(self): self.numbers = range(5) self.benchmark", "test_groupby_to_series_to_frame_2(self): df = pd.DataFrame({'a': [6, 2, 2], 'b': [4, 5,", "55, 55, 44, 33, 33] self.numbers_10 = np.random.randint(0, 5, 10)", "self.benchmark) def test_imap_easy_3job(self): result_iterator = parallel_easy.imap_easy(abfunc, self.numbers, 3, 1) result", "[6, 2, 2], 'b': [4, 5, 6]}) benchmark = df.groupby('a').apply(max)", "2], 'b': [4, 5, 6]}) labels = ['g1', 'g1', 'g2']", "unittest from functools import partial import pandas as pd from", "rosetta.parallel.threading_easy import threading_easy, LockIterateApply # A couple functions for testing", "buffer_len, blocksize) self.righttest(self.numbers_51, buffer_len, blocksize) def test_map_easy_padded_blocks_37(self): buffer_len = 3", "buffer_len = 1 blocksize = 7 self.lefttest(self.numbers_10, buffer_len, blocksize) self.lefttest(self.numbers_101,", "np.mean, 1, use_apply=True, by=labels) assert_frame_equal(result, benchmark) def test_groupby_to_series_to_frame_2(self): df =", "b=1): return x * a * b abfunc = partial(_abfunc,", "benchmark) def test_groupby_to_series_to_frame_1(self): df = pd.DataFrame({'a': [6, 2, 2], 'b':", "number in result_iterator: result.append(number) self.assertEqual(result, self.benchmark) def test_imap_easy_3job(self): result_iterator =", "= set(['mymy', 'namename', 'danieldaniel', 'isis', '']) results = set(out.getvalue().split(',')) self.assertEqual(results,", "test_imap_easy_3job(self): result_iterator = parallel_easy.imap_easy(abfunc, self.numbers, 3, 1) result = []", "2 * x self.func = bytwo def it(): for i", "benchmark = set(['mymy', 'namename', 'danieldaniel', 'isis', '']) results = set(out.getvalue().split(','))", "1: i+1])) return result class TestBase(unittest.TestCase): \"\"\" Tests the parallel_easy", "#self.numbers_1 = [0, 1, 2, 0, 3, 2, 4, 3,", "self.righttest(self.numbers_101, buffer_len, blocksize) self.righttest(self.numbers_51, buffer_len, blocksize) def test_map_easy_padded_blocks_24(self): buffer_len =", "benchmark) def test_threading_easy_single(self): out = StringIO() threading_easy(self.func, self.myiter, 1, ',',", "in threads: t.start() for t in threads: t.join() benchmark =", "Class \"\"\" def setUp(self): self.data = ['my', 'name', 'is', 'daniel']", "parallel_easy._n_jobs_wrap(n_jobs) self.assertEqual(result, n_jobs) def test_n_jobs_wrap_zero(self): \"\"\" For n_jobs zero, the", "def test_groupby_to_scalar_to_series_1(self): df = pd.DataFrame({'a': [6, 2, 2], 'b': [4,", "A couple functions for testing parallel easy # Must be", "def test_n_jobs_wrap_positive(self): \"\"\" For n_jobs positive, the wrap should return", "test_threading_easy_single(self): out = StringIO() threading_easy(self.func, self.myiter, 1, ',', out) benchmark", "= pandas_easy.groupby_to_scalar_to_series(df, max, 1, by='a') assert_series_equal(result, benchmark) def test_groupby_to_scalar_to_series_2(self): s", "numbers, buffer_len, blocksize): result = parallel_easy.map_easy_padded_blocks( leftmax, numbers, self.n_jobs, buffer_len,", "df.groupby(labels).mean() result = pandas_easy.groupby_to_series_to_frame( df, np.mean, 1, use_apply=True, by=labels) assert_frame_equal(result,", "test_map_easy_padded_blocks_24(self): buffer_len = 2 blocksize = 4 self.lefttest(self.numbers_10, buffer_len, blocksize)", "out) benchmark = set(['mymy', 'namename', 'danieldaniel', 'isis', '']) results =", "\"\"\" def setUp(self): pass def test_groupby_to_scalar_to_series_1(self): df = pd.DataFrame({'a': [6,", "range(self.num_threads): t = LockIterateApply(self.func, self.myiter, lock, ',', out) threads.append(t) for", "= ['g1', 'g1', 'g2'] benchmark = df.groupby(labels).mean() result = pandas_easy.groupby_to_series_to_frame(", "self.assertEqual(result, self.benchmark) def test_imap_easy_3job(self): result_iterator = parallel_easy.imap_easy(abfunc, self.numbers, 3, 1)", "def bytwo(x): return 2 * x self.func = bytwo def", "= [0, 6, 12, 18, 24] def test_map_easy_1job(self): result =", "4, 3, 2, 3, 3] self.n_jobs = 1 def lefttest(self,", "threads = [] lock = threading.Lock() out = StringIO() for", "4 def bytwo(x): return 2 * x self.func = bytwo", "test_n_jobs_wrap_positive(self): \"\"\" For n_jobs positive, the wrap should return n_jobs.", "= parallel_easy.map_easy_padded_blocks( leftmax, numbers, self.n_jobs, buffer_len, blocksize=blocksize) benchmark = leftmax(numbers)", "self.numbers, 3) self.assertEqual(result, self.benchmark) def test_imap_easy_1job(self): result_iterator = parallel_easy.imap_easy(abfunc, self.numbers,", "import partial import pandas as pd from pandas.util.testing import assert_frame_equal,", "rightmax(mylist): return [max(mylist[i: i+2]) for i in range(len(mylist))] def leftmax(mylist):", "= [0, 1, 2, 0, 3, 2, 4, 3, 2,", "'a', 'b', 'b'] benchmark = s.groupby(labels).apply(max) result = pandas_easy.groupby_to_scalar_to_series( s,", "for number in result_iterator: result.append(number) self.assertEqual(result, self.benchmark) def test_imap_easy_3job(self): result_iterator", "['a', 'a', 'b', 'b'] benchmark = s.groupby(labels).apply(max) result = pandas_easy.groupby_to_scalar_to_series(", "4, 2, 6, 7, 6, 9, 12, 11, 11, 14,", "blocksize = 7 self.lefttest(self.numbers_10, buffer_len, blocksize) self.lefttest(self.numbers_101, buffer_len, blocksize) self.lefttest(self.numbers_51,", "df = pd.DataFrame({'a': [6, 2, 2], 'b': [4, 5, 6]})", "result_iterator: result.append(number) self.assertEqual(result, self.benchmark) def test_n_jobs_wrap_positive(self): \"\"\" For n_jobs positive,", "numbers, self.n_jobs, buffer_len, blocksize=blocksize) benchmark = leftmax(numbers) self.assertEqual(result, benchmark) def", "as np import threading from StringIO import StringIO from rosetta.parallel", "4]) labels = ['a', 'a', 'b', 'b'] benchmark = s.groupby(labels).apply(max)", "[0, 6, 12, 18, 24] def test_map_easy_1job(self): result = parallel_easy.map_easy(abfunc,", "Tests the parallel_easy module. \"\"\" def setUp(self): self.numbers = range(5)", "def test_threading_easy(self): out = StringIO() threading_easy(self.func, self.myiter, self.num_threads, ',', out)", "i in range(self.num_threads): t = LockIterateApply(self.func, self.myiter, lock, ',', out)", "'isis', '']) results = set(out.getvalue().split(',')) self.assertEqual(results, benchmark) def test_threading_easy(self): out", "0, 0, 2, -1, 4, 2, 6, 7, 6, 9,", "Must be defined outside of the test class for some", "max, 1, by='a') assert_series_equal(result, benchmark) def test_groupby_to_scalar_to_series_2(self): s = pd.Series([1,", "* len(frame.columns), index=frame.columns) def rightmax(mylist): return [max(mylist[i: i+2]) for i", "TestBase(unittest.TestCase): \"\"\" Tests the parallel_easy module. \"\"\" def setUp(self): self.numbers", "pandas_easy from rosetta.parallel.threading_easy import threading_easy, LockIterateApply # A couple functions", "test_threading_easy(self): out = StringIO() threading_easy(self.func, self.myiter, self.num_threads, ',', out) benchmark", "parallel_easy, pandas_easy from rosetta.parallel.threading_easy import threading_easy, LockIterateApply # A couple", "self.assertRaises(ValueError, parallel_easy._n_jobs_wrap, 0) class TestMapEasyPaddedBlock(unittest.TestCase): \"\"\" Tests the parallel_easy.map_easy_padded_blocks function.", "np.random.randint(0, 5, 101) self.numbers_51 = np.random.randint(0, 5, 101) #self.numbers_1 =", "buffer_len, blocksize) self.righttest(self.numbers_101, buffer_len, blocksize) self.righttest(self.numbers_51, buffer_len, blocksize) def test_map_easy_padded_blocks_37(self):", "pandas.util.testing import assert_frame_equal, assert_series_equal import numpy as np import threading", "6, 12, 18, 24] def test_map_easy_1job(self): result = parallel_easy.map_easy(abfunc, self.numbers,", "positive, the wrap should return n_jobs. \"\"\" for n_jobs in", "# Must be defined outside of the test class for", "7 self.lefttest(self.numbers_101, buffer_len, blocksize) self.lefttest(self.numbers_51, buffer_len, blocksize) self.righttest(self.numbers_101, buffer_len, blocksize)", "setUp(self): pass def test_groupby_to_scalar_to_series_1(self): df = pd.DataFrame({'a': [6, 2, 2],", "3, 2, 3, 3] self.n_jobs = 1 def lefttest(self, numbers,", "blocksize) self.lefttest(self.numbers_51, buffer_len, blocksize) self.righttest(self.numbers_101, buffer_len, blocksize) self.righttest(self.numbers_51, buffer_len, blocksize)", "class TestLockIterateApply(unittest.TestCase): \"\"\" Test the Locked Iterator Class \"\"\" def", "pd from pandas.util.testing import assert_frame_equal, assert_series_equal import numpy as np", "return [max(mylist[i: i+2]) for i in range(len(mylist))] def leftmax(mylist): for", "np.random.randint(0, 5, 10) self.numbers_101 = np.random.randint(0, 5, 101) self.numbers_51 =", "benchmark) def test_groupby_to_scalar_to_series_2(self): s = pd.Series([1, 2, 3, 4]) labels", "b abfunc = partial(_abfunc, 2, 3) def frame_to_series(frame): x =", "number in result_iterator: result.append(number) self.assertEqual(result, self.benchmark) def test_n_jobs_wrap_positive(self): \"\"\" For", "assert_series_equal import numpy as np import threading from StringIO import", "raise a ValueError \"\"\" self.assertRaises(ValueError, parallel_easy._n_jobs_wrap, 0) class TestMapEasyPaddedBlock(unittest.TestCase): \"\"\"", "LockIterateApply # A couple functions for testing parallel easy #", "1, 2, 0, 3, 2, 4, 3, 2, 3, 3]", "return n_jobs. \"\"\" for n_jobs in range(1, 5): result =", "= parallel_easy.map_easy(abfunc, self.numbers, 1) self.assertEqual(result, self.benchmark) def test_map_easy_3job(self): result =", "parallel_easy._n_jobs_wrap, 0) class TestMapEasyPaddedBlock(unittest.TestCase): \"\"\" Tests the parallel_easy.map_easy_padded_blocks function. \"\"\"", "1 def lefttest(self, numbers, buffer_len, blocksize): result = parallel_easy.map_easy_padded_blocks( leftmax,", "= df.groupby('a').apply(max) result = pandas_easy.groupby_to_scalar_to_series(df, max, 1, by='a') assert_series_equal(result, benchmark)", "def test_map_easy_padded_blocks_37(self): buffer_len = 3 blocksize = 7 self.lefttest(self.numbers_101, buffer_len,", "if i == 0: result = [mylist[0]] else: result.append(max(mylist[i -", "2, 2], 'b': [4, 5, 6]}) labels = ['g1', 'g1',", "range(5) self.benchmark = [0, 6, 12, 18, 24] def test_map_easy_1job(self):", "threads: t.join() benchmark = set(['mymy', 'namename', 'danieldaniel', 'isis', '']) results", "blocksize) self.righttest(self.numbers_101, buffer_len, blocksize) self.righttest(self.numbers_51, buffer_len, blocksize) def test_map_easy_padded_blocks_24(self): buffer_len", "def test_map_easy_padded_blocks_17(self): buffer_len = 1 blocksize = 7 self.lefttest(self.numbers_10, buffer_len,", "t in threads: t.join() benchmark = set(['mymy', 'namename', 'danieldaniel', 'isis',", "as pd from pandas.util.testing import assert_frame_equal, assert_series_equal import numpy as", "'daniel'] self.num_threads = 4 def bytwo(x): return 2 * x", "for i in self.data: yield i self.myiter = it() def", "buffer_len, blocksize=blocksize) benchmark = leftmax(numbers) self.assertEqual(result, benchmark) def righttest(self, numbers,", "testing parallel easy # Must be defined outside of the", "def rightmax(mylist): return [max(mylist[i: i+2]) for i in range(len(mylist))] def", "self.data: yield i self.myiter = it() def test_locked_iterator(self): threads =", "test_map_easy_3job(self): result = parallel_easy.map_easy(abfunc, self.numbers, 3) self.assertEqual(result, self.benchmark) def test_imap_easy_1job(self):", "result = parallel_easy.map_easy(abfunc, self.numbers, 3) self.assertEqual(result, self.benchmark) def test_imap_easy_1job(self): result_iterator", "blocksize) self.righttest(self.numbers_51, buffer_len, blocksize) class TestPandasEasy(unittest.TestCase): \"\"\" Tests the pandas_easy", "in range(self.num_threads): t = LockIterateApply(self.func, self.myiter, lock, ',', out) threads.append(t)", "== 0: result = [mylist[0]] else: result.append(max(mylist[i - 1: i+1]))", "33, 33] self.numbers_10 = np.random.randint(0, 5, 10) self.numbers_101 = np.random.randint(0,", "buffer_len, blocksize) self.lefttest(self.numbers_51, buffer_len, blocksize) self.righttest(self.numbers_10, buffer_len, blocksize) self.righttest(self.numbers_101, buffer_len,", "4 self.lefttest(self.numbers_10, buffer_len, blocksize) self.lefttest(self.numbers_101, buffer_len, blocksize) self.lefttest(self.numbers_51, buffer_len, blocksize)", "assert_series_equal(result, benchmark) def test_groupby_to_scalar_to_series_2(self): s = pd.Series([1, 2, 3, 4])", "= set(out.getvalue().split(',')) self.assertEqual(results, benchmark) def test_threading_easy_single(self): out = StringIO() threading_easy(self.func,", "= pandas_easy.groupby_to_scalar_to_series( s, max, 1, by=labels) assert_series_equal(result, benchmark) def test_groupby_to_series_to_frame_1(self):", "use_apply=False, by=labels) assert_frame_equal(result, benchmark) class TestLockIterateApply(unittest.TestCase): \"\"\" Test the Locked", "module. \"\"\" def setUp(self): pass def test_groupby_to_scalar_to_series_1(self): df = pd.DataFrame({'a':", "2 blocksize = 4 self.lefttest(self.numbers_10, buffer_len, blocksize) self.lefttest(self.numbers_101, buffer_len, blocksize)", "For n_jobs zero, the wrap should raise a ValueError \"\"\"", "self.num_threads, ',', out) benchmark = set(['mymy', 'namename', 'danieldaniel', 'isis', ''])", "5, 101) self.numbers_51 = np.random.randint(0, 5, 101) #self.numbers_1 = [0,", "len(frame.columns), index=frame.columns) def rightmax(mylist): return [max(mylist[i: i+2]) for i in", "= parallel_easy.imap_easy(abfunc, self.numbers, 3, 1) result = [] for number", "max, 1, by=labels) assert_series_equal(result, benchmark) def test_groupby_to_series_to_frame_1(self): df = pd.DataFrame({'a':", "blocksize = 7 self.lefttest(self.numbers_101, buffer_len, blocksize) self.lefttest(self.numbers_51, buffer_len, blocksize) self.righttest(self.numbers_101,", "function. \"\"\" def setUp(self): #self.numbers_1 = [ # 0, 0,", "'is', 'daniel'] self.num_threads = 4 def bytwo(x): return 2 *", "result_iterator: result.append(number) self.assertEqual(result, self.benchmark) def test_imap_easy_3job(self): result_iterator = parallel_easy.imap_easy(abfunc, self.numbers,", "for t in threads: t.start() for t in threads: t.join()", "threading_easy(self.func, self.myiter, self.num_threads, ',', out) benchmark = set(['mymy', 'namename', 'danieldaniel',", "def frame_to_series(frame): x = frame.iloc[0, 0] return pd.Series([x] * len(frame.columns),", "parallel_easy.map_easy_padded_blocks( rightmax, numbers, self.n_jobs, buffer_len, blocksize=blocksize) benchmark = rightmax(numbers) self.assertEqual(result,", "12, 11, 11, 14, 55, 55, 44, 33, 33] self.numbers_10", "= 1 blocksize = 4 self.lefttest(self.numbers_10, buffer_len, blocksize) self.lefttest(self.numbers_101, buffer_len,", "wrap should return n_jobs. \"\"\" for n_jobs in range(1, 5):", "s.groupby(labels).apply(max) result = pandas_easy.groupby_to_scalar_to_series( s, max, 1, by=labels) assert_series_equal(result, benchmark)", "assert_frame_equal, assert_series_equal import numpy as np import threading from StringIO", "benchmark = leftmax(numbers) self.assertEqual(result, benchmark) def righttest(self, numbers, buffer_len, blocksize):", "101) #self.numbers_1 = [0, 1, 2, 0, 3, 2, 4,", "blocksize) self.righttest(self.numbers_51, buffer_len, blocksize) def test_map_easy_padded_blocks_37(self): buffer_len = 3 blocksize", "def setUp(self): pass def test_groupby_to_scalar_to_series_1(self): df = pd.DataFrame({'a': [6, 2,", "x = frame.iloc[0, 0] return pd.Series([x] * len(frame.columns), index=frame.columns) def", "buffer_len, blocksize) def test_map_easy_padded_blocks_24(self): buffer_len = 2 blocksize = 4", "set(['mymy', 'namename', 'danieldaniel', 'isis', '']) results = set(out.getvalue().split(',')) self.assertEqual(results, benchmark)", "rightmax(numbers) self.assertEqual(result, benchmark) def test_map_easy_padded_blocks_14(self): buffer_len = 1 blocksize =", "the wrap should return n_jobs. \"\"\" for n_jobs in range(1,", "5): result = parallel_easy._n_jobs_wrap(n_jobs) self.assertEqual(result, n_jobs) def test_n_jobs_wrap_zero(self): \"\"\" For", "[mylist[0]] else: result.append(max(mylist[i - 1: i+1])) return result class TestBase(unittest.TestCase):", "-1, 4, 2, 6, 7, 6, 9, 12, 11, 11,", "some reason. def _abfunc(x, a, b=1): return x * a", "test class for some reason. def _abfunc(x, a, b=1): return", "\"\"\" Tests the parallel_easy module. \"\"\" def setUp(self): self.numbers =", "6]}) benchmark = df.groupby('a').apply(max) result = pandas_easy.groupby_to_scalar_to_series(df, max, 1, by='a')", "24] def test_map_easy_1job(self): result = parallel_easy.map_easy(abfunc, self.numbers, 1) self.assertEqual(result, self.benchmark)", "leftmax(numbers) self.assertEqual(result, benchmark) def righttest(self, numbers, buffer_len, blocksize): result =", "buffer_len, blocksize) self.lefttest(self.numbers_51, buffer_len, blocksize) self.righttest(self.numbers_101, buffer_len, blocksize) self.righttest(self.numbers_51, buffer_len,", "StringIO from rosetta.parallel import parallel_easy, pandas_easy from rosetta.parallel.threading_easy import threading_easy,", "pd.DataFrame({'a': [6, 2, 2], 'b': [4, 5, 6]}) benchmark =", "[max(mylist[i: i+2]) for i in range(len(mylist))] def leftmax(mylist): for i", "= pd.Series([1, 2, 3, 4]) labels = ['a', 'a', 'b',", "out = StringIO() for i in range(self.num_threads): t = LockIterateApply(self.func,", "= df.groupby(labels).mean() result = pandas_easy.groupby_to_series_to_frame( df, np.mean, 1, use_apply=True, by=labels)", "2], 'b': [4, 5, 6]}) benchmark = df.groupby('a').apply(max) result =", "frame_to_series(frame): x = frame.iloc[0, 0] return pd.Series([x] * len(frame.columns), index=frame.columns)", "the Locked Iterator Class \"\"\" def setUp(self): self.data = ['my',", "= 3 blocksize = 7 self.lefttest(self.numbers_101, buffer_len, blocksize) self.lefttest(self.numbers_51, buffer_len,", "threads.append(t) for t in threads: t.start() for t in threads:", "np.random.randint(0, 5, 101) #self.numbers_1 = [0, 1, 2, 0, 3,", "buffer_len, blocksize) self.righttest(self.numbers_10, buffer_len, blocksize) self.righttest(self.numbers_101, buffer_len, blocksize) self.righttest(self.numbers_51, buffer_len,", "numbers, buffer_len, blocksize): result = parallel_easy.map_easy_padded_blocks( rightmax, numbers, self.n_jobs, buffer_len,", "self.righttest(self.numbers_51, buffer_len, blocksize) def test_map_easy_padded_blocks_17(self): buffer_len = 1 blocksize =", "5, 6]}) labels = ['g1', 'g1', 'g2'] benchmark = df.groupby(labels).mean()", "\"\"\" def setUp(self): #self.numbers_1 = [ # 0, 0, 2,", "result = pandas_easy.groupby_to_series_to_frame( df, np.mean, 1, use_apply=True, by=labels) assert_frame_equal(result, benchmark)", "0, 3, 2, 4, 3, 2, 3, 3] self.n_jobs =", "= pd.DataFrame({'a': [6, 2, 2], 'b': [4, 5, 6]}) benchmark", "1, use_apply=False, by=labels) assert_frame_equal(result, benchmark) class TestLockIterateApply(unittest.TestCase): \"\"\" Test the", "10) self.numbers_101 = np.random.randint(0, 5, 101) self.numbers_51 = np.random.randint(0, 5,", "['g1', 'g1', 'g2'] benchmark = df.groupby(labels).apply(frame_to_series) result = pandas_easy.groupby_to_series_to_frame( df,", "',', out) threads.append(t) for t in threads: t.start() for t", "self.assertEqual(result, benchmark) def righttest(self, numbers, buffer_len, blocksize): result = parallel_easy.map_easy_padded_blocks(", "righttest(self, numbers, buffer_len, blocksize): result = parallel_easy.map_easy_padded_blocks( rightmax, numbers, self.n_jobs,", "1, 1) result = [] for number in result_iterator: result.append(number)", "np import threading from StringIO import StringIO from rosetta.parallel import", "= range(5) self.benchmark = [0, 6, 12, 18, 24] def", "= parallel_easy._n_jobs_wrap(n_jobs) self.assertEqual(result, n_jobs) def test_n_jobs_wrap_zero(self): \"\"\" For n_jobs zero,", "parallel easy # Must be defined outside of the test", "pd.Series([x] * len(frame.columns), index=frame.columns) def rightmax(mylist): return [max(mylist[i: i+2]) for", "in range(1, 5): result = parallel_easy._n_jobs_wrap(n_jobs) self.assertEqual(result, n_jobs) def test_n_jobs_wrap_zero(self):", "def test_groupby_to_series_to_frame_1(self): df = pd.DataFrame({'a': [6, 2, 2], 'b': [4,", "for t in threads: t.join() benchmark = set(['mymy', 'namename', 'danieldaniel',", "pd.Series([1, 2, 3, 4]) labels = ['a', 'a', 'b', 'b']", "1) result = [] for number in result_iterator: result.append(number) self.assertEqual(result,", "partial import pandas as pd from pandas.util.testing import assert_frame_equal, assert_series_equal", "2, -1, 4, 2, 6, 7, 6, 9, 12, 11,", "2, 3, 4]) labels = ['a', 'a', 'b', 'b'] benchmark", "threading_easy(self.func, self.myiter, 1, ',', out) benchmark = set(['mymy', 'namename', 'danieldaniel',", "result = [] for number in result_iterator: result.append(number) self.assertEqual(result, self.benchmark)", "test_map_easy_padded_blocks_37(self): buffer_len = 3 blocksize = 7 self.lefttest(self.numbers_101, buffer_len, blocksize)", "[] for number in result_iterator: result.append(number) self.assertEqual(result, self.benchmark) def test_imap_easy_3job(self):", "2, 0, 3, 2, 4, 3, 2, 3, 3] self.n_jobs", "the pandas_easy module. \"\"\" def setUp(self): pass def test_groupby_to_scalar_to_series_1(self): df", "\"\"\" self.assertRaises(ValueError, parallel_easy._n_jobs_wrap, 0) class TestMapEasyPaddedBlock(unittest.TestCase): \"\"\" Tests the parallel_easy.map_easy_padded_blocks", "= StringIO() for i in range(self.num_threads): t = LockIterateApply(self.func, self.myiter,", "0] return pd.Series([x] * len(frame.columns), index=frame.columns) def rightmax(mylist): return [max(mylist[i:", "5, 101) #self.numbers_1 = [0, 1, 2, 0, 3, 2,", "For n_jobs positive, the wrap should return n_jobs. \"\"\" for", "def test_locked_iterator(self): threads = [] lock = threading.Lock() out =", "bytwo def it(): for i in self.data: yield i self.myiter", "lock, ',', out) threads.append(t) for t in threads: t.start() for", "blocksize): result = parallel_easy.map_easy_padded_blocks( rightmax, numbers, self.n_jobs, buffer_len, blocksize=blocksize) benchmark", "use_apply=True, by=labels) assert_frame_equal(result, benchmark) def test_groupby_to_series_to_frame_2(self): df = pd.DataFrame({'a': [6,", "= ['a', 'a', 'b', 'b'] benchmark = s.groupby(labels).apply(max) result =", "return 2 * x self.func = bytwo def it(): for", "blocksize) self.lefttest(self.numbers_51, buffer_len, blocksize) self.righttest(self.numbers_10, buffer_len, blocksize) self.righttest(self.numbers_101, buffer_len, blocksize)", "n_jobs positive, the wrap should return n_jobs. \"\"\" for n_jobs", "by=labels) assert_frame_equal(result, benchmark) def test_groupby_to_series_to_frame_2(self): df = pd.DataFrame({'a': [6, 2,", "t.join() benchmark = set(['mymy', 'namename', 'danieldaniel', 'isis', '']) results =", "class TestPandasEasy(unittest.TestCase): \"\"\" Tests the pandas_easy module. \"\"\" def setUp(self):", "buffer_len, blocksize) def test_map_easy_padded_blocks_37(self): buffer_len = 3 blocksize = 7", "[4, 5, 6]}) labels = ['g1', 'g1', 'g2'] benchmark =", "= LockIterateApply(self.func, self.myiter, lock, ',', out) threads.append(t) for t in", "n_jobs in range(1, 5): result = parallel_easy._n_jobs_wrap(n_jobs) self.assertEqual(result, n_jobs) def", "i+1])) return result class TestBase(unittest.TestCase): \"\"\" Tests the parallel_easy module.", "'danieldaniel', 'isis', '']) results = set(out.getvalue().split(',')) self.assertEqual(results, benchmark) def test_threading_easy_single(self):", "'g2'] benchmark = df.groupby(labels).apply(frame_to_series) result = pandas_easy.groupby_to_series_to_frame( df, frame_to_series, 1,", "= pandas_easy.groupby_to_series_to_frame( df, np.mean, 1, use_apply=True, by=labels) assert_frame_equal(result, benchmark) def", "benchmark = rightmax(numbers) self.assertEqual(result, benchmark) def test_map_easy_padded_blocks_14(self): buffer_len = 1", "a * b abfunc = partial(_abfunc, 2, 3) def frame_to_series(frame):", "\"\"\" Test the Locked Iterator Class \"\"\" def setUp(self): self.data", "t in threads: t.start() for t in threads: t.join() benchmark", "result = pandas_easy.groupby_to_scalar_to_series( s, max, 1, by=labels) assert_series_equal(result, benchmark) def", "setUp(self): self.numbers = range(5) self.benchmark = [0, 6, 12, 18,", "benchmark) def test_groupby_to_series_to_frame_2(self): df = pd.DataFrame({'a': [6, 2, 2], 'b':", "it() def test_locked_iterator(self): threads = [] lock = threading.Lock() out", "i in self.data: yield i self.myiter = it() def test_locked_iterator(self):", "result_iterator = parallel_easy.imap_easy(abfunc, self.numbers, 1, 1) result = [] for", "blocksize = 4 self.lefttest(self.numbers_10, buffer_len, blocksize) self.lefttest(self.numbers_101, buffer_len, blocksize) self.lefttest(self.numbers_51,", "1 blocksize = 4 self.lefttest(self.numbers_10, buffer_len, blocksize) self.lefttest(self.numbers_101, buffer_len, blocksize)", "= pd.DataFrame({'a': [6, 2, 2], 'b': [4, 5, 6]}) labels", "self.lefttest(self.numbers_10, buffer_len, blocksize) self.lefttest(self.numbers_101, buffer_len, blocksize) self.lefttest(self.numbers_51, buffer_len, blocksize) self.righttest(self.numbers_10,", "n_jobs. \"\"\" for n_jobs in range(1, 5): result = parallel_easy._n_jobs_wrap(n_jobs)", "* b abfunc = partial(_abfunc, 2, 3) def frame_to_series(frame): x", "leftmax(mylist): for i in range(len(mylist)): if i == 0: result", "import assert_frame_equal, assert_series_equal import numpy as np import threading from", "ValueError \"\"\" self.assertRaises(ValueError, parallel_easy._n_jobs_wrap, 0) class TestMapEasyPaddedBlock(unittest.TestCase): \"\"\" Tests the", "= parallel_easy.map_easy_padded_blocks( rightmax, numbers, self.n_jobs, buffer_len, blocksize=blocksize) benchmark = rightmax(numbers)", "the parallel_easy.map_easy_padded_blocks function. \"\"\" def setUp(self): #self.numbers_1 = [ #", "def test_groupby_to_scalar_to_series_2(self): s = pd.Series([1, 2, 3, 4]) labels =", "import numpy as np import threading from StringIO import StringIO", "in range(len(mylist))] def leftmax(mylist): for i in range(len(mylist)): if i", "self.n_jobs = 1 def lefttest(self, numbers, buffer_len, blocksize): result =", "StringIO() for i in range(self.num_threads): t = LockIterateApply(self.func, self.myiter, lock,", "3, 1) result = [] for number in result_iterator: result.append(number)", "pandas_easy.groupby_to_series_to_frame( df, frame_to_series, 1, use_apply=False, by=labels) assert_frame_equal(result, benchmark) class TestLockIterateApply(unittest.TestCase):", "should raise a ValueError \"\"\" self.assertRaises(ValueError, parallel_easy._n_jobs_wrap, 0) class TestMapEasyPaddedBlock(unittest.TestCase):", "self.righttest(self.numbers_51, buffer_len, blocksize) def test_map_easy_padded_blocks_37(self): buffer_len = 3 blocksize =", "blocksize=blocksize) benchmark = leftmax(numbers) self.assertEqual(result, benchmark) def righttest(self, numbers, buffer_len,", "in self.data: yield i self.myiter = it() def test_locked_iterator(self): threads", "result = [mylist[0]] else: result.append(max(mylist[i - 1: i+1])) return result", "= np.random.randint(0, 5, 101) #self.numbers_1 = [0, 1, 2, 0,", "benchmark = df.groupby(labels).apply(frame_to_series) result = pandas_easy.groupby_to_series_to_frame( df, frame_to_series, 1, use_apply=False,", "= rightmax(numbers) self.assertEqual(result, benchmark) def test_map_easy_padded_blocks_14(self): buffer_len = 1 blocksize", "self.myiter = it() def test_locked_iterator(self): threads = [] lock =", "for i in range(len(mylist))] def leftmax(mylist): for i in range(len(mylist)):", "benchmark = s.groupby(labels).apply(max) result = pandas_easy.groupby_to_scalar_to_series( s, max, 1, by=labels)", "benchmark) def test_map_easy_padded_blocks_14(self): buffer_len = 1 blocksize = 4 self.lefttest(self.numbers_10,", "def test_n_jobs_wrap_zero(self): \"\"\" For n_jobs zero, the wrap should raise", "'']) results = set(out.getvalue().split(',')) self.assertEqual(results, benchmark) def test_threading_easy(self): out =", "= [ # 0, 0, 2, -1, 4, 2, 6,", "= frame.iloc[0, 0] return pd.Series([x] * len(frame.columns), index=frame.columns) def rightmax(mylist):", "in threads: t.join() benchmark = set(['mymy', 'namename', 'danieldaniel', 'isis', ''])", "lefttest(self, numbers, buffer_len, blocksize): result = parallel_easy.map_easy_padded_blocks( leftmax, numbers, self.n_jobs,", "5, 6]}) benchmark = df.groupby('a').apply(max) result = pandas_easy.groupby_to_scalar_to_series(df, max, 1,", "= bytwo def it(): for i in self.data: yield i", "[] for number in result_iterator: result.append(number) self.assertEqual(result, self.benchmark) def test_n_jobs_wrap_positive(self):", "abfunc = partial(_abfunc, 2, 3) def frame_to_series(frame): x = frame.iloc[0,", "3) def frame_to_series(frame): x = frame.iloc[0, 0] return pd.Series([x] *", "= [mylist[0]] else: result.append(max(mylist[i - 1: i+1])) return result class", "range(len(mylist)): if i == 0: result = [mylist[0]] else: result.append(max(mylist[i", "self.righttest(self.numbers_101, buffer_len, blocksize) self.righttest(self.numbers_51, buffer_len, blocksize) class TestPandasEasy(unittest.TestCase): \"\"\" Tests", "def test_map_easy_padded_blocks_14(self): buffer_len = 1 blocksize = 4 self.lefttest(self.numbers_10, buffer_len,", "Locked Iterator Class \"\"\" def setUp(self): self.data = ['my', 'name',", "blocksize) class TestPandasEasy(unittest.TestCase): \"\"\" Tests the pandas_easy module. \"\"\" def", "range(1, 5): result = parallel_easy._n_jobs_wrap(n_jobs) self.assertEqual(result, n_jobs) def test_n_jobs_wrap_zero(self): \"\"\"", "pandas_easy.groupby_to_scalar_to_series( s, max, 1, by=labels) assert_series_equal(result, benchmark) def test_groupby_to_series_to_frame_1(self): df", "self.numbers = range(5) self.benchmark = [0, 6, 12, 18, 24]", "df, frame_to_series, 1, use_apply=False, by=labels) assert_frame_equal(result, benchmark) class TestLockIterateApply(unittest.TestCase): \"\"\"", "self.righttest(self.numbers_10, buffer_len, blocksize) self.righttest(self.numbers_101, buffer_len, blocksize) self.righttest(self.numbers_51, buffer_len, blocksize) def", "self.righttest(self.numbers_10, buffer_len, blocksize) self.righttest(self.numbers_101, buffer_len, blocksize) self.righttest(self.numbers_51, buffer_len, blocksize) class", "result.append(number) self.assertEqual(result, self.benchmark) def test_imap_easy_3job(self): result_iterator = parallel_easy.imap_easy(abfunc, self.numbers, 3,", "result = pandas_easy.groupby_to_series_to_frame( df, frame_to_series, 1, use_apply=False, by=labels) assert_frame_equal(result, benchmark)", "results = set(out.getvalue().split(',')) self.assertEqual(results, benchmark) def test_threading_easy(self): out = StringIO()", "= leftmax(numbers) self.assertEqual(result, benchmark) def righttest(self, numbers, buffer_len, blocksize): result", "assert_frame_equal(result, benchmark) def test_groupby_to_series_to_frame_2(self): df = pd.DataFrame({'a': [6, 2, 2],", "self.benchmark) def test_imap_easy_1job(self): result_iterator = parallel_easy.imap_easy(abfunc, self.numbers, 1, 1) result", "x * a * b abfunc = partial(_abfunc, 2, 3)", "pandas_easy.groupby_to_series_to_frame( df, np.mean, 1, use_apply=True, by=labels) assert_frame_equal(result, benchmark) def test_groupby_to_series_to_frame_2(self):", "df.groupby(labels).apply(frame_to_series) result = pandas_easy.groupby_to_series_to_frame( df, frame_to_series, 1, use_apply=False, by=labels) assert_frame_equal(result,", "self.numbers_10 = np.random.randint(0, 5, 10) self.numbers_101 = np.random.randint(0, 5, 101)", "blocksize) self.righttest(self.numbers_10, buffer_len, blocksize) self.righttest(self.numbers_101, buffer_len, blocksize) self.righttest(self.numbers_51, buffer_len, blocksize)", "return x * a * b abfunc = partial(_abfunc, 2,", "0, 2, -1, 4, 2, 6, 7, 6, 9, 12,", "= ['my', 'name', 'is', 'daniel'] self.num_threads = 4 def bytwo(x):", "\"\"\" Tests the pandas_easy module. \"\"\" def setUp(self): pass def", "= StringIO() threading_easy(self.func, self.myiter, self.num_threads, ',', out) benchmark = set(['mymy',", "index=frame.columns) def rightmax(mylist): return [max(mylist[i: i+2]) for i in range(len(mylist))]", "1, by='a') assert_series_equal(result, benchmark) def test_groupby_to_scalar_to_series_2(self): s = pd.Series([1, 2,", "* x self.func = bytwo def it(): for i in", "benchmark = df.groupby(labels).mean() result = pandas_easy.groupby_to_series_to_frame( df, np.mean, 1, use_apply=True,", "def righttest(self, numbers, buffer_len, blocksize): result = parallel_easy.map_easy_padded_blocks( rightmax, numbers,", "self.assertEqual(result, self.benchmark) def test_map_easy_3job(self): result = parallel_easy.map_easy(abfunc, self.numbers, 3) self.assertEqual(result,", "7, 6, 9, 12, 11, 11, 14, 55, 55, 44,", "buffer_len, blocksize=blocksize) benchmark = rightmax(numbers) self.assertEqual(result, benchmark) def test_map_easy_padded_blocks_14(self): buffer_len", "def test_map_easy_3job(self): result = parallel_easy.map_easy(abfunc, self.numbers, 3) self.assertEqual(result, self.benchmark) def", "\"\"\" For n_jobs zero, the wrap should raise a ValueError", "blocksize) self.righttest(self.numbers_101, buffer_len, blocksize) self.righttest(self.numbers_51, buffer_len, blocksize) class TestPandasEasy(unittest.TestCase): \"\"\"", "buffer_len, blocksize) def test_map_easy_padded_blocks_17(self): buffer_len = 1 blocksize = 7", "test_imap_easy_1job(self): result_iterator = parallel_easy.imap_easy(abfunc, self.numbers, 1, 1) result = []", "self.numbers, 3, 1) result = [] for number in result_iterator:", "def leftmax(mylist): for i in range(len(mylist)): if i == 0:", "import threading from StringIO import StringIO from rosetta.parallel import parallel_easy,", "t.start() for t in threads: t.join() benchmark = set(['mymy', 'namename',", "33] self.numbers_10 = np.random.randint(0, 5, 10) self.numbers_101 = np.random.randint(0, 5,", "'namename', 'danieldaniel', 'isis', '']) results = set(out.getvalue().split(',')) self.assertEqual(results, benchmark) def", "pass def test_groupby_to_scalar_to_series_1(self): df = pd.DataFrame({'a': [6, 2, 2], 'b':", "be defined outside of the test class for some reason.", "in range(len(mylist)): if i == 0: result = [mylist[0]] else:", "0) class TestMapEasyPaddedBlock(unittest.TestCase): \"\"\" Tests the parallel_easy.map_easy_padded_blocks function. \"\"\" def", "blocksize) self.righttest(self.numbers_51, buffer_len, blocksize) def test_map_easy_padded_blocks_17(self): buffer_len = 1 blocksize", "= pandas_easy.groupby_to_series_to_frame( df, frame_to_series, 1, use_apply=False, by=labels) assert_frame_equal(result, benchmark) class", "class for some reason. def _abfunc(x, a, b=1): return x", "101) self.numbers_51 = np.random.randint(0, 5, 101) #self.numbers_1 = [0, 1,", "parallel_easy module. \"\"\" def setUp(self): self.numbers = range(5) self.benchmark =", "self.n_jobs, buffer_len, blocksize=blocksize) benchmark = rightmax(numbers) self.assertEqual(result, benchmark) def test_map_easy_padded_blocks_14(self):", "# A couple functions for testing parallel easy # Must", "couple functions for testing parallel easy # Must be defined", "from pandas.util.testing import assert_frame_equal, assert_series_equal import numpy as np import", "in result_iterator: result.append(number) self.assertEqual(result, self.benchmark) def test_n_jobs_wrap_positive(self): \"\"\" For n_jobs", "'name', 'is', 'daniel'] self.num_threads = 4 def bytwo(x): return 2", "1, by=labels) assert_series_equal(result, benchmark) def test_groupby_to_series_to_frame_1(self): df = pd.DataFrame({'a': [6,", "buffer_len = 1 blocksize = 4 self.lefttest(self.numbers_10, buffer_len, blocksize) self.lefttest(self.numbers_101,", "\"\"\" For n_jobs positive, the wrap should return n_jobs. \"\"\"", "result = parallel_easy.map_easy_padded_blocks( leftmax, numbers, self.n_jobs, buffer_len, blocksize=blocksize) benchmark =", "defined outside of the test class for some reason. def", "test_n_jobs_wrap_zero(self): \"\"\" For n_jobs zero, the wrap should raise a", "wrap should raise a ValueError \"\"\" self.assertRaises(ValueError, parallel_easy._n_jobs_wrap, 0) class", "self.myiter, 1, ',', out) benchmark = set(['mymy', 'namename', 'danieldaniel', 'isis',", "self.n_jobs, buffer_len, blocksize=blocksize) benchmark = leftmax(numbers) self.assertEqual(result, benchmark) def righttest(self,", "\"\"\" for n_jobs in range(1, 5): result = parallel_easy._n_jobs_wrap(n_jobs) self.assertEqual(result,", "blocksize=blocksize) benchmark = rightmax(numbers) self.assertEqual(result, benchmark) def test_map_easy_padded_blocks_14(self): buffer_len =", "['g1', 'g1', 'g2'] benchmark = df.groupby(labels).mean() result = pandas_easy.groupby_to_series_to_frame( df,", "else: result.append(max(mylist[i - 1: i+1])) return result class TestBase(unittest.TestCase): \"\"\"", "12, 18, 24] def test_map_easy_1job(self): result = parallel_easy.map_easy(abfunc, self.numbers, 1)", "frame_to_series, 1, use_apply=False, by=labels) assert_frame_equal(result, benchmark) class TestLockIterateApply(unittest.TestCase): \"\"\" Test", "blocksize): result = parallel_easy.map_easy_padded_blocks( leftmax, numbers, self.n_jobs, buffer_len, blocksize=blocksize) benchmark", "3, 4]) labels = ['a', 'a', 'b', 'b'] benchmark =", "<filename>rosetta/tests/test_parallel.py import unittest from functools import partial import pandas as", "threads: t.start() for t in threads: t.join() benchmark = set(['mymy',", "i in range(len(mylist))] def leftmax(mylist): for i in range(len(mylist)): if", "3 blocksize = 7 self.lefttest(self.numbers_101, buffer_len, blocksize) self.lefttest(self.numbers_51, buffer_len, blocksize)", "def test_imap_easy_3job(self): result_iterator = parallel_easy.imap_easy(abfunc, self.numbers, 3, 1) result =", "by='a') assert_series_equal(result, benchmark) def test_groupby_to_scalar_to_series_2(self): s = pd.Series([1, 2, 3,", "\"\"\" def setUp(self): self.numbers = range(5) self.benchmark = [0, 6,", "parallel_easy.map_easy_padded_blocks( leftmax, numbers, self.n_jobs, buffer_len, blocksize=blocksize) benchmark = leftmax(numbers) self.assertEqual(result,", "6, 9, 12, 11, 11, 14, 55, 55, 44, 33,", "parallel_easy.map_easy(abfunc, self.numbers, 3) self.assertEqual(result, self.benchmark) def test_imap_easy_1job(self): result_iterator = parallel_easy.imap_easy(abfunc,", "self.righttest(self.numbers_51, buffer_len, blocksize) class TestPandasEasy(unittest.TestCase): \"\"\" Tests the pandas_easy module.", "= threading.Lock() out = StringIO() for i in range(self.num_threads): t", "for number in result_iterator: result.append(number) self.assertEqual(result, self.benchmark) def test_n_jobs_wrap_positive(self): \"\"\"", "self.benchmark) def test_n_jobs_wrap_positive(self): \"\"\" For n_jobs positive, the wrap should", "* a * b abfunc = partial(_abfunc, 2, 3) def", "Iterator Class \"\"\" def setUp(self): self.data = ['my', 'name', 'is',", "import unittest from functools import partial import pandas as pd", "buffer_len, blocksize) self.righttest(self.numbers_101, buffer_len, blocksize) self.righttest(self.numbers_51, buffer_len, blocksize) def test_map_easy_padded_blocks_24(self):", "= StringIO() threading_easy(self.func, self.myiter, 1, ',', out) benchmark = set(['mymy',", "i == 0: result = [mylist[0]] else: result.append(max(mylist[i - 1:", "for i in range(self.num_threads): t = LockIterateApply(self.func, self.myiter, lock, ',',", "from functools import partial import pandas as pd from pandas.util.testing", "test_map_easy_1job(self): result = parallel_easy.map_easy(abfunc, self.numbers, 1) self.assertEqual(result, self.benchmark) def test_map_easy_3job(self):", "= 7 self.lefttest(self.numbers_101, buffer_len, blocksize) self.lefttest(self.numbers_51, buffer_len, blocksize) self.righttest(self.numbers_101, buffer_len,", "self.lefttest(self.numbers_101, buffer_len, blocksize) self.lefttest(self.numbers_51, buffer_len, blocksize) self.righttest(self.numbers_10, buffer_len, blocksize) self.righttest(self.numbers_101,", "should return n_jobs. \"\"\" for n_jobs in range(1, 5): result", "= 4 self.lefttest(self.numbers_10, buffer_len, blocksize) self.lefttest(self.numbers_101, buffer_len, blocksize) self.lefttest(self.numbers_51, buffer_len,", "parallel_easy.imap_easy(abfunc, self.numbers, 3, 1) result = [] for number in", "14, 55, 55, 44, 33, 33] self.numbers_10 = np.random.randint(0, 5,", "= np.random.randint(0, 5, 101) self.numbers_51 = np.random.randint(0, 5, 101) #self.numbers_1", "def test_imap_easy_1job(self): result_iterator = parallel_easy.imap_easy(abfunc, self.numbers, 1, 1) result =", "buffer_len, blocksize): result = parallel_easy.map_easy_padded_blocks( leftmax, numbers, self.n_jobs, buffer_len, blocksize=blocksize)", "= df.groupby(labels).apply(frame_to_series) result = pandas_easy.groupby_to_series_to_frame( df, frame_to_series, 1, use_apply=False, by=labels)", "bytwo(x): return 2 * x self.func = bytwo def it():", "i+2]) for i in range(len(mylist))] def leftmax(mylist): for i in", "s = pd.Series([1, 2, 3, 4]) labels = ['a', 'a',", "test_groupby_to_series_to_frame_1(self): df = pd.DataFrame({'a': [6, 2, 2], 'b': [4, 5,", "functions for testing parallel easy # Must be defined outside", "benchmark) def righttest(self, numbers, buffer_len, blocksize): result = parallel_easy.map_easy_padded_blocks( rightmax,", "benchmark) class TestLockIterateApply(unittest.TestCase): \"\"\" Test the Locked Iterator Class \"\"\"", "'']) results = set(out.getvalue().split(',')) self.assertEqual(results, benchmark) def test_threading_easy_single(self): out =", "buffer_len, blocksize) self.righttest(self.numbers_51, buffer_len, blocksize) def test_map_easy_padded_blocks_17(self): buffer_len = 1", "import pandas as pd from pandas.util.testing import assert_frame_equal, assert_series_equal import", "i self.myiter = it() def test_locked_iterator(self): threads = [] lock", "rightmax, numbers, self.n_jobs, buffer_len, blocksize=blocksize) benchmark = rightmax(numbers) self.assertEqual(result, benchmark)", "self.func = bytwo def it(): for i in self.data: yield", "55, 44, 33, 33] self.numbers_10 = np.random.randint(0, 5, 10) self.numbers_101", "self.righttest(self.numbers_101, buffer_len, blocksize) self.righttest(self.numbers_51, buffer_len, blocksize) def test_map_easy_padded_blocks_17(self): buffer_len =", "frame.iloc[0, 0] return pd.Series([x] * len(frame.columns), index=frame.columns) def rightmax(mylist): return", "parallel_easy.map_easy_padded_blocks function. \"\"\" def setUp(self): #self.numbers_1 = [ # 0,", "easy # Must be defined outside of the test class", "result = pandas_easy.groupby_to_scalar_to_series(df, max, 1, by='a') assert_series_equal(result, benchmark) def test_groupby_to_scalar_to_series_2(self):", "import StringIO from rosetta.parallel import parallel_easy, pandas_easy from rosetta.parallel.threading_easy import", "buffer_len, blocksize) self.righttest(self.numbers_101, buffer_len, blocksize) self.righttest(self.numbers_51, buffer_len, blocksize) def test_map_easy_padded_blocks_17(self):", "_abfunc(x, a, b=1): return x * a * b abfunc", "2, 6, 7, 6, 9, 12, 11, 11, 14, 55,", "[] lock = threading.Lock() out = StringIO() for i in", "rosetta.parallel import parallel_easy, pandas_easy from rosetta.parallel.threading_easy import threading_easy, LockIterateApply #", "'b': [4, 5, 6]}) labels = ['g1', 'g1', 'g2'] benchmark", "def lefttest(self, numbers, buffer_len, blocksize): result = parallel_easy.map_easy_padded_blocks( leftmax, numbers,", "self.myiter, lock, ',', out) threads.append(t) for t in threads: t.start()", "out = StringIO() threading_easy(self.func, self.myiter, 1, ',', out) benchmark =", "result.append(max(mylist[i - 1: i+1])) return result class TestBase(unittest.TestCase): \"\"\" Tests", "= 7 self.lefttest(self.numbers_10, buffer_len, blocksize) self.lefttest(self.numbers_101, buffer_len, blocksize) self.lefttest(self.numbers_51, buffer_len,", "in result_iterator: result.append(number) self.assertEqual(result, self.benchmark) def test_imap_easy_3job(self): result_iterator = parallel_easy.imap_easy(abfunc,", "5, 10) self.numbers_101 = np.random.randint(0, 5, 101) self.numbers_51 = np.random.randint(0,", "import parallel_easy, pandas_easy from rosetta.parallel.threading_easy import threading_easy, LockIterateApply # A", "leftmax, numbers, self.n_jobs, buffer_len, blocksize=blocksize) benchmark = leftmax(numbers) self.assertEqual(result, benchmark)", "i in range(len(mylist)): if i == 0: result = [mylist[0]]", "'g1', 'g2'] benchmark = df.groupby(labels).apply(frame_to_series) result = pandas_easy.groupby_to_series_to_frame( df, frame_to_series,", "2, 4, 3, 2, 3, 3] self.n_jobs = 1 def", "result.append(number) self.assertEqual(result, self.benchmark) def test_n_jobs_wrap_positive(self): \"\"\" For n_jobs positive, the", "pandas_easy module. \"\"\" def setUp(self): pass def test_groupby_to_scalar_to_series_1(self): df =", "from rosetta.parallel import parallel_easy, pandas_easy from rosetta.parallel.threading_easy import threading_easy, LockIterateApply", "\"\"\" def setUp(self): self.data = ['my', 'name', 'is', 'daniel'] self.num_threads", "3, 2, 4, 3, 2, 3, 3] self.n_jobs = 1", "2, 3) def frame_to_series(frame): x = frame.iloc[0, 0] return pd.Series([x]", "= 1 def lefttest(self, numbers, buffer_len, blocksize): result = parallel_easy.map_easy_padded_blocks(", "buffer_len, blocksize) self.lefttest(self.numbers_101, buffer_len, blocksize) self.lefttest(self.numbers_51, buffer_len, blocksize) self.righttest(self.numbers_10, buffer_len,", "[6, 2, 2], 'b': [4, 5, 6]}) labels = ['g1',", "self.numbers, 1, 1) result = [] for number in result_iterator:", "out = StringIO() threading_easy(self.func, self.myiter, self.num_threads, ',', out) benchmark =", "'b'] benchmark = s.groupby(labels).apply(max) result = pandas_easy.groupby_to_scalar_to_series( s, max, 1,", "def test_map_easy_1job(self): result = parallel_easy.map_easy(abfunc, self.numbers, 1) self.assertEqual(result, self.benchmark) def", "[0, 1, 2, 0, 3, 2, 4, 3, 2, 3,", "labels = ['g1', 'g1', 'g2'] benchmark = df.groupby(labels).mean() result =", "self.righttest(self.numbers_51, buffer_len, blocksize) def test_map_easy_padded_blocks_24(self): buffer_len = 2 blocksize =", "44, 33, 33] self.numbers_10 = np.random.randint(0, 5, 10) self.numbers_101 =", "self.lefttest(self.numbers_51, buffer_len, blocksize) self.righttest(self.numbers_101, buffer_len, blocksize) self.righttest(self.numbers_51, buffer_len, blocksize) def", "self.assertEqual(results, benchmark) def test_threading_easy(self): out = StringIO() threading_easy(self.func, self.myiter, self.num_threads,", "5, 6]}) labels = ['g1', 'g1', 'g2'] benchmark = df.groupby(labels).apply(frame_to_series)", "'isis', '']) results = set(out.getvalue().split(',')) self.assertEqual(results, benchmark) def test_threading_easy_single(self): out", "[4, 5, 6]}) benchmark = df.groupby('a').apply(max) result = pandas_easy.groupby_to_scalar_to_series(df, max,", "'g2'] benchmark = df.groupby(labels).mean() result = pandas_easy.groupby_to_series_to_frame( df, np.mean, 1,", "3) self.assertEqual(result, self.benchmark) def test_imap_easy_1job(self): result_iterator = parallel_easy.imap_easy(abfunc, self.numbers, 1,", "self.lefttest(self.numbers_101, buffer_len, blocksize) self.lefttest(self.numbers_51, buffer_len, blocksize) self.righttest(self.numbers_101, buffer_len, blocksize) self.righttest(self.numbers_51,", "1) self.assertEqual(result, self.benchmark) def test_map_easy_3job(self): result = parallel_easy.map_easy(abfunc, self.numbers, 3)", "blocksize) self.righttest(self.numbers_101, buffer_len, blocksize) self.righttest(self.numbers_51, buffer_len, blocksize) def test_map_easy_padded_blocks_37(self): buffer_len", "return pd.Series([x] * len(frame.columns), index=frame.columns) def rightmax(mylist): return [max(mylist[i: i+2])", "labels = ['a', 'a', 'b', 'b'] benchmark = s.groupby(labels).apply(max) result", "['my', 'name', 'is', 'daniel'] self.num_threads = 4 def bytwo(x): return", "return result class TestBase(unittest.TestCase): \"\"\" Tests the parallel_easy module. \"\"\"", "11, 14, 55, 55, 44, 33, 33] self.numbers_10 = np.random.randint(0,", "blocksize) def test_map_easy_padded_blocks_24(self): buffer_len = 2 blocksize = 4 self.lefttest(self.numbers_10,", "def it(): for i in self.data: yield i self.myiter =", "'b': [4, 5, 6]}) benchmark = df.groupby('a').apply(max) result = pandas_easy.groupby_to_scalar_to_series(df,", "StringIO() threading_easy(self.func, self.myiter, 1, ',', out) benchmark = set(['mymy', 'namename',", "setUp(self): #self.numbers_1 = [ # 0, 0, 2, -1, 4,", "StringIO() threading_easy(self.func, self.myiter, self.num_threads, ',', out) benchmark = set(['mymy', 'namename',", "\"\"\" Tests the parallel_easy.map_easy_padded_blocks function. \"\"\" def setUp(self): #self.numbers_1 =", "= parallel_easy.imap_easy(abfunc, self.numbers, 1, 1) result = [] for number", "2, 3, 3] self.n_jobs = 1 def lefttest(self, numbers, buffer_len,", "for i in range(len(mylist)): if i == 0: result =", "7 self.lefttest(self.numbers_10, buffer_len, blocksize) self.lefttest(self.numbers_101, buffer_len, blocksize) self.lefttest(self.numbers_51, buffer_len, blocksize)", "TestMapEasyPaddedBlock(unittest.TestCase): \"\"\" Tests the parallel_easy.map_easy_padded_blocks function. \"\"\" def setUp(self): #self.numbers_1", "buffer_len, blocksize) self.righttest(self.numbers_101, buffer_len, blocksize) self.righttest(self.numbers_51, buffer_len, blocksize) class TestPandasEasy(unittest.TestCase):", "yield i self.myiter = it() def test_locked_iterator(self): threads = []", "blocksize) self.lefttest(self.numbers_101, buffer_len, blocksize) self.lefttest(self.numbers_51, buffer_len, blocksize) self.righttest(self.numbers_10, buffer_len, blocksize)", "18, 24] def test_map_easy_1job(self): result = parallel_easy.map_easy(abfunc, self.numbers, 1) self.assertEqual(result,", "buffer_len, blocksize) class TestPandasEasy(unittest.TestCase): \"\"\" Tests the pandas_easy module. \"\"\"", "a, b=1): return x * a * b abfunc =", "= parallel_easy.map_easy(abfunc, self.numbers, 3) self.assertEqual(result, self.benchmark) def test_imap_easy_1job(self): result_iterator =", "def test_groupby_to_series_to_frame_2(self): df = pd.DataFrame({'a': [6, 2, 2], 'b': [4,", "test_map_easy_padded_blocks_17(self): buffer_len = 1 blocksize = 7 self.lefttest(self.numbers_10, buffer_len, blocksize)", "result class TestBase(unittest.TestCase): \"\"\" Tests the parallel_easy module. \"\"\" def", "result_iterator = parallel_easy.imap_easy(abfunc, self.numbers, 3, 1) result = [] for", "result = parallel_easy.map_easy_padded_blocks( rightmax, numbers, self.n_jobs, buffer_len, blocksize=blocksize) benchmark =", "by=labels) assert_series_equal(result, benchmark) def test_groupby_to_series_to_frame_1(self): df = pd.DataFrame({'a': [6, 2,", "parallel_easy.map_easy(abfunc, self.numbers, 1) self.assertEqual(result, self.benchmark) def test_map_easy_3job(self): result = parallel_easy.map_easy(abfunc,", "# 0, 0, 2, -1, 4, 2, 6, 7, 6,", "labels = ['g1', 'g1', 'g2'] benchmark = df.groupby(labels).apply(frame_to_series) result =", "blocksize) def test_map_easy_padded_blocks_17(self): buffer_len = 1 blocksize = 7 self.lefttest(self.numbers_10,", "self.assertEqual(result, benchmark) def test_map_easy_padded_blocks_14(self): buffer_len = 1 blocksize = 4", "[ # 0, 0, 2, -1, 4, 2, 6, 7,", "for n_jobs in range(1, 5): result = parallel_easy._n_jobs_wrap(n_jobs) self.assertEqual(result, n_jobs)", "Tests the parallel_easy.map_easy_padded_blocks function. \"\"\" def setUp(self): #self.numbers_1 = [", "- 1: i+1])) return result class TestBase(unittest.TestCase): \"\"\" Tests the" ]
[ "the number of events over the entire subtitle # what", "\"title\": None, \"series_title\": None, \"season\": None, \"episode\": None }, \"time\":", "int(sec) def matchEventToMovie(self, movie, subtitles, phrases, time_offset): global INIT_STATUS status", "0: # wotn work properly if events is greater than", "\"r\") lines = f.readlines() for line in lines: phrase =", "re.sub('[^A-Za-z0-9\\s]+', '', str(content)).lower() phrase = re.sub('[^A-Za-z0-9\\s]+', '', str(phrase)).lower() count =", "c = sub.content.replace('\\n', ' ') c = c.split(\" \") firstpart,", "work properly if events is greater than 1 status[\"time\"] =", "matchEventToMovie(self, movie, subtitles, phrases, time_offset): global INIT_STATUS status = INIT_STATUS", "will have the correct time to event gfor the second", "phrase data for sub in subtitles: c = sub.content.replace('\\n', '", "have the correct time to event gfor the second half", "#TODO determine how to set up phrase data for sub", "half that has the first bit of text, which will", "each toher them reach a send.message() if mult > 0:", "wotn work properly if events is greater than 1 status[\"time\"]", "= re.sub('[^A-Za-z0-9\\s]+', '', str(phrase)).lower() count = 0 while bool(re.search(phrase, sub_line)):", "= phrase[1] mult += self.isPhraseInLine(line,sub,sub.content) #f = self.isPhraseInLine(line,sub, firstpart) #s", "subtitles, phrases, time_offset): global INIT_STATUS status = INIT_STATUS status[\"video\"][\"title\"] =", "for the work # the second half will have the", "+ datetime.timedelta.total_seconds(sub.start) return int(sec) def matchEventToMovie(self, movie, subtitles, phrases, time_offset):", "INIT_STATUS={ \"video\": { \"title\": None, \"series_title\": None, \"season\": None, \"episode\":", "work # the second half will have the correct time", "if statements that check and each toher them reach a", "to do if you wish to accept it, is to", "data for sub in subtitles: c = sub.content.replace('\\n', ' ')", ") #else: # mult += f+s ## DEAR LESS DRUNK", "MQTTHandler(broker) def parseSRT(self, srt_filename): f=open(srt_filename, \"r\") subtitle_generate = srt.parse(f.read()) f.close()", "each subtitle into to two parts # the first part", "str(phrase)).lower() count = 0 while bool(re.search(phrase, sub_line)): count += 1", "\"episode\": None }, \"time\": None, \"events\": None } class SubtitleHandler:", "srt import re import datetime from mqtthandler import MQTTHandler INIT_STATUS={", "that has the first bit of text, which will have", "phrases = [] def __init__(self, broker): self.mqtt = MQTTHandler(broker) def", "import datetime from mqtthandler import MQTTHandler INIT_STATUS={ \"video\": { \"title\":", "None, \"series_title\": None, \"season\": None, \"episode\": None }, \"time\": None,", "count = 0 while bool(re.search(phrase, sub_line)): count += 1 sub_line", "= self.getEventTime(sub) + time_offset status[\"events\"] = int(events) * mult self.sendMessage(status)", "* mult self.sendMessage(status) #print(sub.content) def sendMessage(self, msg): self.mqtt.send(msg) print(msg) return", "None }, \"time\": None, \"events\": None } class SubtitleHandler: subtitles", "[] def __init__(self, broker): self.mqtt = MQTTHandler(broker) def parseSRT(self, srt_filename):", "class SubtitleHandler: subtitles = [] phrases = [] def __init__(self,", "parts # the first part will the the half that", "return count def getEventTime(self,sub): middle = sub.end - sub.start between_sec", "that check and each toher them reach a send.message() if", "half will have the correct time to event gfor the", "= sub_line.replace(phrase, '', 1) return count def getEventTime(self,sub): middle =", "phrase_filename): f=open(phrase_filename, \"r\") lines = f.readlines() for line in lines:", "+ time_offset status[\"events\"] = int(events) * mult self.sendMessage(status) #print(sub.content) def", "status[\"time\"] = self.getEventTime(sub) + time_offset status[\"events\"] = int(events) * mult", "sub_line = re.sub('[^A-Za-z0-9\\s]+', '', str(content)).lower() phrase = re.sub('[^A-Za-z0-9\\s]+', '', str(phrase)).lower()", "\".join(c[:len(c)//2]), \" \".join(c[len(c)//2:]) mult = 0 for phrase in phrases:", "# you could have three if statements that check and", "adds the number of events over the entire subtitle #", "import time import srt import re import datetime from mqtthandler", "s == 0: # mult += self.isPhraseInLine(line,sub,sub.content ) #else: #", "if mult > 0: # wotn work properly if events", "DEAR LESS DRUNK SELF # this currently adds the number", "c = c.split(\" \") firstpart, secondpart = \" \".join(c[:len(c)//2]), \"", "f.readlines() for line in lines: phrase = line.rstrip(\"\\n\\r\").split(\"/\") self.phrases.append(phrase) return", "self.phrases def isPhraseInLine(self,phrase, sub, content): sub_line = re.sub('[^A-Za-z0-9\\s]+', '', str(content)).lower()", "+ s == 0: # mult += self.isPhraseInLine(line,sub,sub.content ) #else:", "phrase[1] mult += self.isPhraseInLine(line,sub,sub.content) #f = self.isPhraseInLine(line,sub, firstpart) #s =", "phrases: line = phrase[0] events = phrase[1] mult += self.isPhraseInLine(line,sub,sub.content)", "second half # you could have three if statements that", "# what you need to do if you wish to", "DRUNK SELF # this currently adds the number of events", "is to split each subtitle into to two parts #", "\"video\": { \"title\": None, \"series_title\": None, \"season\": None, \"episode\": None", "self.mqtt = MQTTHandler(broker) def parseSRT(self, srt_filename): f=open(srt_filename, \"r\") subtitle_generate =", "\") firstpart, secondpart = \" \".join(c[:len(c)//2]), \" \".join(c[len(c)//2:]) mult =", "mult self.sendMessage(status) #print(sub.content) def sendMessage(self, msg): self.mqtt.send(msg) print(msg) return msg", "which will have the correct time to event for the", "# the first part will the the half that has", "have the correct time to event for the work #", "SubtitleHandler: subtitles = [] phrases = [] def __init__(self, broker):", "need to do if you wish to accept it, is", "f + s == 0: # mult += self.isPhraseInLine(line,sub,sub.content )", "list(subtitle_generate) return self.subtitles def parsePhrases(self, phrase_filename): f=open(phrase_filename, \"r\") lines =", "INIT_STATUS status = INIT_STATUS status[\"video\"][\"title\"] = movie #TODO determine how", "mqtthandler import MQTTHandler INIT_STATUS={ \"video\": { \"title\": None, \"series_title\": None,", "= line.rstrip(\"\\n\\r\").split(\"/\") self.phrases.append(phrase) return self.phrases def isPhraseInLine(self,phrase, sub, content): sub_line", "the first bit of text, which will have the correct", "isPhraseInLine(self,phrase, sub, content): sub_line = re.sub('[^A-Za-z0-9\\s]+', '', str(content)).lower() phrase =", "secondpart) #if f + s == 0: # mult +=", "#if f + s == 0: # mult += self.isPhraseInLine(line,sub,sub.content", "def getEventTime(self,sub): middle = sub.end - sub.start between_sec = datetime.timedelta.total_seconds(middle)", "line = phrase[0] events = phrase[1] mult += self.isPhraseInLine(line,sub,sub.content) #f", "into to two parts # the first part will the", "the first part will the the half that has the", "in phrases: line = phrase[0] events = phrase[1] mult +=", "than 1 status[\"time\"] = self.getEventTime(sub) + time_offset status[\"events\"] = int(events)", "\"time\": None, \"events\": None } class SubtitleHandler: subtitles = []", "\" \".join(c[:len(c)//2]), \" \".join(c[len(c)//2:]) mult = 0 for phrase in", "= MQTTHandler(broker) def parseSRT(self, srt_filename): f=open(srt_filename, \"r\") subtitle_generate = srt.parse(f.read())", "events over the entire subtitle # what you need to", "firstpart, secondpart = \" \".join(c[:len(c)//2]), \" \".join(c[len(c)//2:]) mult = 0", "self.isPhraseInLine(line,sub,sub.content ) #else: # mult += f+s ## DEAR LESS", "mult > 0: # wotn work properly if events is", "time_offset status[\"events\"] = int(events) * mult self.sendMessage(status) #print(sub.content) def sendMessage(self,", "phrases, time_offset): global INIT_STATUS status = INIT_STATUS status[\"video\"][\"title\"] = movie", "of text, which will have the correct time to event", "what you need to do if you wish to accept", "while bool(re.search(phrase, sub_line)): count += 1 sub_line = sub_line.replace(phrase, '',", "sec = between_sec + datetime.timedelta.total_seconds(sub.start) return int(sec) def matchEventToMovie(self, movie,", "middle = sub.end - sub.start between_sec = datetime.timedelta.total_seconds(middle) / 2", "event for the work # the second half will have", "you need to do if you wish to accept it,", "re.sub('[^A-Za-z0-9\\s]+', '', str(phrase)).lower() count = 0 while bool(re.search(phrase, sub_line)): count", "= datetime.timedelta.total_seconds(middle) / 2 sec = between_sec + datetime.timedelta.total_seconds(sub.start) return", "wish to accept it, is to split each subtitle into", "time_offset): global INIT_STATUS status = INIT_STATUS status[\"video\"][\"title\"] = movie #TODO", "} class SubtitleHandler: subtitles = [] phrases = [] def", "return self.subtitles def parsePhrases(self, phrase_filename): f=open(phrase_filename, \"r\") lines = f.readlines()", "lines = f.readlines() for line in lines: phrase = line.rstrip(\"\\n\\r\").split(\"/\")", "def __init__(self, broker): self.mqtt = MQTTHandler(broker) def parseSRT(self, srt_filename): f=open(srt_filename,", "sendMessage(self, msg): self.mqtt.send(msg) print(msg) return msg def isDone(self): return True", "sub_line.replace(phrase, '', 1) return count def getEventTime(self,sub): middle = sub.end", "time to event gfor the second half # you could", "SELF # this currently adds the number of events over", "count += 1 sub_line = sub_line.replace(phrase, '', 1) return count", "have three if statements that check and each toher them", "mult = 0 for phrase in phrases: line = phrase[0]", "lines: phrase = line.rstrip(\"\\n\\r\").split(\"/\") self.phrases.append(phrase) return self.phrases def isPhraseInLine(self,phrase, sub,", "def isPhraseInLine(self,phrase, sub, content): sub_line = re.sub('[^A-Za-z0-9\\s]+', '', str(content)).lower() phrase", "= INIT_STATUS status[\"video\"][\"title\"] = movie #TODO determine how to set", "greater than 1 status[\"time\"] = self.getEventTime(sub) + time_offset status[\"events\"] =", "a send.message() if mult > 0: # wotn work properly", "1 sub_line = sub_line.replace(phrase, '', 1) return count def getEventTime(self,sub):", "mult += f+s ## DEAR LESS DRUNK SELF # this", "this currently adds the number of events over the entire", "could have three if statements that check and each toher", "= [] def __init__(self, broker): self.mqtt = MQTTHandler(broker) def parseSRT(self,", "== 0: # mult += self.isPhraseInLine(line,sub,sub.content ) #else: # mult", "1 status[\"time\"] = self.getEventTime(sub) + time_offset status[\"events\"] = int(events) *", "subtitles: c = sub.content.replace('\\n', ' ') c = c.split(\" \")", "sub_line)): count += 1 sub_line = sub_line.replace(phrase, '', 1) return", "bool(re.search(phrase, sub_line)): count += 1 sub_line = sub_line.replace(phrase, '', 1)", "self.isPhraseInLine(line,sub,sub.content) #f = self.isPhraseInLine(line,sub, firstpart) #s = self.isPhraseInLine(line,sub, secondpart) #if", "datetime.timedelta.total_seconds(middle) / 2 sec = between_sec + datetime.timedelta.total_seconds(sub.start) return int(sec)", "self.isPhraseInLine(line,sub, secondpart) #if f + s == 0: # mult", "reach a send.message() if mult > 0: # wotn work", "+= self.isPhraseInLine(line,sub,sub.content) #f = self.isPhraseInLine(line,sub, firstpart) #s = self.isPhraseInLine(line,sub, secondpart)", "for line in lines: phrase = line.rstrip(\"\\n\\r\").split(\"/\") self.phrases.append(phrase) return self.phrases", "in lines: phrase = line.rstrip(\"\\n\\r\").split(\"/\") self.phrases.append(phrase) return self.phrases def isPhraseInLine(self,phrase,", "{ \"title\": None, \"series_title\": None, \"season\": None, \"episode\": None },", "up phrase data for sub in subtitles: c = sub.content.replace('\\n',", "how to set up phrase data for sub in subtitles:", "phrase[0] events = phrase[1] mult += self.isPhraseInLine(line,sub,sub.content) #f = self.isPhraseInLine(line,sub,", "sub.start between_sec = datetime.timedelta.total_seconds(middle) / 2 sec = between_sec +", "__init__(self, broker): self.mqtt = MQTTHandler(broker) def parseSRT(self, srt_filename): f=open(srt_filename, \"r\")", "content): sub_line = re.sub('[^A-Za-z0-9\\s]+', '', str(content)).lower() phrase = re.sub('[^A-Za-z0-9\\s]+', '',", "for phrase in phrases: line = phrase[0] events = phrase[1]", "= re.sub('[^A-Za-z0-9\\s]+', '', str(content)).lower() phrase = re.sub('[^A-Za-z0-9\\s]+', '', str(phrase)).lower() count", "to two parts # the first part will the the", "c.split(\" \") firstpart, secondpart = \" \".join(c[:len(c)//2]), \" \".join(c[len(c)//2:]) mult", "import srt import re import datetime from mqtthandler import MQTTHandler", "status[\"events\"] = int(events) * mult self.sendMessage(status) #print(sub.content) def sendMessage(self, msg):", "events = phrase[1] mult += self.isPhraseInLine(line,sub,sub.content) #f = self.isPhraseInLine(line,sub, firstpart)", "status = INIT_STATUS status[\"video\"][\"title\"] = movie #TODO determine how to", "#f = self.isPhraseInLine(line,sub, firstpart) #s = self.isPhraseInLine(line,sub, secondpart) #if f", "phrase = re.sub('[^A-Za-z0-9\\s]+', '', str(phrase)).lower() count = 0 while bool(re.search(phrase,", "movie, subtitles, phrases, time_offset): global INIT_STATUS status = INIT_STATUS status[\"video\"][\"title\"]", "the second half # you could have three if statements", "movie #TODO determine how to set up phrase data for", "subtitles = [] phrases = [] def __init__(self, broker): self.mqtt", "datetime from mqtthandler import MQTTHandler INIT_STATUS={ \"video\": { \"title\": None,", "parseSRT(self, srt_filename): f=open(srt_filename, \"r\") subtitle_generate = srt.parse(f.read()) f.close() self.subtitles =", "# mult += self.isPhraseInLine(line,sub,sub.content ) #else: # mult += f+s", "it, is to split each subtitle into to two parts", "sub in subtitles: c = sub.content.replace('\\n', ' ') c =", "#else: # mult += f+s ## DEAR LESS DRUNK SELF", "to split each subtitle into to two parts # the", "= phrase[0] events = phrase[1] mult += self.isPhraseInLine(line,sub,sub.content) #f =", "[] phrases = [] def __init__(self, broker): self.mqtt = MQTTHandler(broker)", "secondpart = \" \".join(c[:len(c)//2]), \" \".join(c[len(c)//2:]) mult = 0 for", "of events over the entire subtitle # what you need", "half # you could have three if statements that check", "is greater than 1 status[\"time\"] = self.getEventTime(sub) + time_offset status[\"events\"]", "second half will have the correct time to event gfor", "self.phrases.append(phrase) return self.phrases def isPhraseInLine(self,phrase, sub, content): sub_line = re.sub('[^A-Za-z0-9\\s]+',", "first bit of text, which will have the correct time", "number of events over the entire subtitle # what you", "subtitle # what you need to do if you wish", "None, \"episode\": None }, \"time\": None, \"events\": None } class", "subtitle_generate = srt.parse(f.read()) f.close() self.subtitles = list(subtitle_generate) return self.subtitles def", "f+s ## DEAR LESS DRUNK SELF # this currently adds", "= int(events) * mult self.sendMessage(status) #print(sub.content) def sendMessage(self, msg): self.mqtt.send(msg)", "event gfor the second half # you could have three", "the correct time to event gfor the second half #", "for sub in subtitles: c = sub.content.replace('\\n', ' ') c", "in subtitles: c = sub.content.replace('\\n', ' ') c = c.split(\"", "'', str(phrase)).lower() count = 0 while bool(re.search(phrase, sub_line)): count +=", "= movie #TODO determine how to set up phrase data", "def parseSRT(self, srt_filename): f=open(srt_filename, \"r\") subtitle_generate = srt.parse(f.read()) f.close() self.subtitles", "srt_filename): f=open(srt_filename, \"r\") subtitle_generate = srt.parse(f.read()) f.close() self.subtitles = list(subtitle_generate)", "= \" \".join(c[:len(c)//2]), \" \".join(c[len(c)//2:]) mult = 0 for phrase", "send.message() if mult > 0: # wotn work properly if", "two parts # the first part will the the half", "def matchEventToMovie(self, movie, subtitles, phrases, time_offset): global INIT_STATUS status =", "import re import datetime from mqtthandler import MQTTHandler INIT_STATUS={ \"video\":", "firstpart) #s = self.isPhraseInLine(line,sub, secondpart) #if f + s ==", "= between_sec + datetime.timedelta.total_seconds(sub.start) return int(sec) def matchEventToMovie(self, movie, subtitles,", "split each subtitle into to two parts # the first", "self.sendMessage(status) #print(sub.content) def sendMessage(self, msg): self.mqtt.send(msg) print(msg) return msg def", "re import datetime from mqtthandler import MQTTHandler INIT_STATUS={ \"video\": {", "has the first bit of text, which will have the", "return self.phrases def isPhraseInLine(self,phrase, sub, content): sub_line = re.sub('[^A-Za-z0-9\\s]+', '',", "\".join(c[len(c)//2:]) mult = 0 for phrase in phrases: line =", "'', str(content)).lower() phrase = re.sub('[^A-Za-z0-9\\s]+', '', str(phrase)).lower() count = 0", "status[\"video\"][\"title\"] = movie #TODO determine how to set up phrase", "2 sec = between_sec + datetime.timedelta.total_seconds(sub.start) return int(sec) def matchEventToMovie(self,", "0 for phrase in phrases: line = phrase[0] events =", "currently adds the number of events over the entire subtitle", "f=open(phrase_filename, \"r\") lines = f.readlines() for line in lines: phrase", "three if statements that check and each toher them reach", "def parsePhrases(self, phrase_filename): f=open(phrase_filename, \"r\") lines = f.readlines() for line", "INIT_STATUS status[\"video\"][\"title\"] = movie #TODO determine how to set up", "do if you wish to accept it, is to split", "phrase in phrases: line = phrase[0] events = phrase[1] mult", "str(content)).lower() phrase = re.sub('[^A-Za-z0-9\\s]+', '', str(phrase)).lower() count = 0 while", "mult += self.isPhraseInLine(line,sub,sub.content) #f = self.isPhraseInLine(line,sub, firstpart) #s = self.isPhraseInLine(line,sub,", "# the second half will have the correct time to", "you wish to accept it, is to split each subtitle", "'', 1) return count def getEventTime(self,sub): middle = sub.end -", "will have the correct time to event for the work", "f.close() self.subtitles = list(subtitle_generate) return self.subtitles def parsePhrases(self, phrase_filename): f=open(phrase_filename,", "you could have three if statements that check and each", "will the the half that has the first bit of", "and each toher them reach a send.message() if mult >", "the second half will have the correct time to event", "self.getEventTime(sub) + time_offset status[\"events\"] = int(events) * mult self.sendMessage(status) #print(sub.content)", "#print(sub.content) def sendMessage(self, msg): self.mqtt.send(msg) print(msg) return msg def isDone(self):", "None, \"season\": None, \"episode\": None }, \"time\": None, \"events\": None", "+= f+s ## DEAR LESS DRUNK SELF # this currently", "= self.isPhraseInLine(line,sub, firstpart) #s = self.isPhraseInLine(line,sub, secondpart) #if f +", "self.subtitles def parsePhrases(self, phrase_filename): f=open(phrase_filename, \"r\") lines = f.readlines() for", "None, \"events\": None } class SubtitleHandler: subtitles = [] phrases", "= c.split(\" \") firstpart, secondpart = \" \".join(c[:len(c)//2]), \" \".join(c[len(c)//2:])", "None } class SubtitleHandler: subtitles = [] phrases = []", "f=open(srt_filename, \"r\") subtitle_generate = srt.parse(f.read()) f.close() self.subtitles = list(subtitle_generate) return", "part will the the half that has the first bit", "sub, content): sub_line = re.sub('[^A-Za-z0-9\\s]+', '', str(content)).lower() phrase = re.sub('[^A-Za-z0-9\\s]+',", "determine how to set up phrase data for sub in", "\"season\": None, \"episode\": None }, \"time\": None, \"events\": None }", "to accept it, is to split each subtitle into to", "parsePhrases(self, phrase_filename): f=open(phrase_filename, \"r\") lines = f.readlines() for line in", "from mqtthandler import MQTTHandler INIT_STATUS={ \"video\": { \"title\": None, \"series_title\":", "to event for the work # the second half will", "= self.isPhraseInLine(line,sub, secondpart) #if f + s == 0: #", "first part will the the half that has the first", "1) return count def getEventTime(self,sub): middle = sub.end - sub.start", "properly if events is greater than 1 status[\"time\"] = self.getEventTime(sub)", "' ') c = c.split(\" \") firstpart, secondpart = \"", "entire subtitle # what you need to do if you", "over the entire subtitle # what you need to do", "self.subtitles = list(subtitle_generate) return self.subtitles def parsePhrases(self, phrase_filename): f=open(phrase_filename, \"r\")", "0 while bool(re.search(phrase, sub_line)): count += 1 sub_line = sub_line.replace(phrase,", "}, \"time\": None, \"events\": None } class SubtitleHandler: subtitles =", "= 0 while bool(re.search(phrase, sub_line)): count += 1 sub_line =", "correct time to event gfor the second half # you", "= sub.end - sub.start between_sec = datetime.timedelta.total_seconds(middle) / 2 sec", "mult += self.isPhraseInLine(line,sub,sub.content ) #else: # mult += f+s ##", "line in lines: phrase = line.rstrip(\"\\n\\r\").split(\"/\") self.phrases.append(phrase) return self.phrases def", "\"r\") subtitle_generate = srt.parse(f.read()) f.close() self.subtitles = list(subtitle_generate) return self.subtitles", "sub.end - sub.start between_sec = datetime.timedelta.total_seconds(middle) / 2 sec =", "them reach a send.message() if mult > 0: # wotn", "set up phrase data for sub in subtitles: c =", "subtitle into to two parts # the first part will", "return int(sec) def matchEventToMovie(self, movie, subtitles, phrases, time_offset): global INIT_STATUS", "if you wish to accept it, is to split each", "the entire subtitle # what you need to do if", "# wotn work properly if events is greater than 1", "line.rstrip(\"\\n\\r\").split(\"/\") self.phrases.append(phrase) return self.phrases def isPhraseInLine(self,phrase, sub, content): sub_line =", "check and each toher them reach a send.message() if mult", "to event gfor the second half # you could have", "statements that check and each toher them reach a send.message()", "0: # mult += self.isPhraseInLine(line,sub,sub.content ) #else: # mult +=", "sub_line = sub_line.replace(phrase, '', 1) return count def getEventTime(self,sub): middle", "correct time to event for the work # the second", "= list(subtitle_generate) return self.subtitles def parsePhrases(self, phrase_filename): f=open(phrase_filename, \"r\") lines", "srt.parse(f.read()) f.close() self.subtitles = list(subtitle_generate) return self.subtitles def parsePhrases(self, phrase_filename):", "to set up phrase data for sub in subtitles: c", "# mult += f+s ## DEAR LESS DRUNK SELF #", "\"events\": None } class SubtitleHandler: subtitles = [] phrases =", "LESS DRUNK SELF # this currently adds the number of", "the half that has the first bit of text, which", "> 0: # wotn work properly if events is greater", "between_sec + datetime.timedelta.total_seconds(sub.start) return int(sec) def matchEventToMovie(self, movie, subtitles, phrases,", "') c = c.split(\" \") firstpart, secondpart = \" \".join(c[:len(c)//2]),", "text, which will have the correct time to event for", "between_sec = datetime.timedelta.total_seconds(middle) / 2 sec = between_sec + datetime.timedelta.total_seconds(sub.start)", "# this currently adds the number of events over the", "the work # the second half will have the correct", "+= self.isPhraseInLine(line,sub,sub.content ) #else: # mult += f+s ## DEAR", "MQTTHandler INIT_STATUS={ \"video\": { \"title\": None, \"series_title\": None, \"season\": None,", "= [] phrases = [] def __init__(self, broker): self.mqtt =", "\" \".join(c[len(c)//2:]) mult = 0 for phrase in phrases: line", "- sub.start between_sec = datetime.timedelta.total_seconds(middle) / 2 sec = between_sec", "the correct time to event for the work # the", "/ 2 sec = between_sec + datetime.timedelta.total_seconds(sub.start) return int(sec) def", "getEventTime(self,sub): middle = sub.end - sub.start between_sec = datetime.timedelta.total_seconds(middle) /", "datetime.timedelta.total_seconds(sub.start) return int(sec) def matchEventToMovie(self, movie, subtitles, phrases, time_offset): global", "gfor the second half # you could have three if", "time import srt import re import datetime from mqtthandler import", "phrase = line.rstrip(\"\\n\\r\").split(\"/\") self.phrases.append(phrase) return self.phrases def isPhraseInLine(self,phrase, sub, content):", "#s = self.isPhraseInLine(line,sub, secondpart) #if f + s == 0:", "import MQTTHandler INIT_STATUS={ \"video\": { \"title\": None, \"series_title\": None, \"season\":", "global INIT_STATUS status = INIT_STATUS status[\"video\"][\"title\"] = movie #TODO determine", "= f.readlines() for line in lines: phrase = line.rstrip(\"\\n\\r\").split(\"/\") self.phrases.append(phrase)", "time to event for the work # the second half", "if events is greater than 1 status[\"time\"] = self.getEventTime(sub) +", "sub.content.replace('\\n', ' ') c = c.split(\" \") firstpart, secondpart =", "toher them reach a send.message() if mult > 0: #", "= srt.parse(f.read()) f.close() self.subtitles = list(subtitle_generate) return self.subtitles def parsePhrases(self,", "+= 1 sub_line = sub_line.replace(phrase, '', 1) return count def", "broker): self.mqtt = MQTTHandler(broker) def parseSRT(self, srt_filename): f=open(srt_filename, \"r\") subtitle_generate", "bit of text, which will have the correct time to", "self.isPhraseInLine(line,sub, firstpart) #s = self.isPhraseInLine(line,sub, secondpart) #if f + s", "= sub.content.replace('\\n', ' ') c = c.split(\" \") firstpart, secondpart", "int(events) * mult self.sendMessage(status) #print(sub.content) def sendMessage(self, msg): self.mqtt.send(msg) print(msg)", "def sendMessage(self, msg): self.mqtt.send(msg) print(msg) return msg def isDone(self): return", "accept it, is to split each subtitle into to two", "= 0 for phrase in phrases: line = phrase[0] events", "events is greater than 1 status[\"time\"] = self.getEventTime(sub) + time_offset", "\"series_title\": None, \"season\": None, \"episode\": None }, \"time\": None, \"events\":", "## DEAR LESS DRUNK SELF # this currently adds the", "the the half that has the first bit of text,", "count def getEventTime(self,sub): middle = sub.end - sub.start between_sec =" ]
[ "filenameData.apply(pd.to_numeric) # Obtains the mean and standard deviation of the", "only with TSV type files mainFilename = input('Input your file", "results print('Mean for each column:') for idx in filenameData.columns: print(idx,':',listMean[idx])", "standard deviation of the columns listMean = filenameData.mean() listStd =", "= pd.DataFrame(filenameData) # Obtains first row to identify header is", "filenameData.mean() listStd = filenameData.std() print(filenameData) # Prints out the results", "create proper dataframe, transforming it with numpy # Then changing", "transforming it with numpy # Then changing it with pandas", "type files mainFilename = input('Input your file name (diabetes.tab.txt or", "print('Standard deviation for each column:') for idx in filenameData.columns: print(idx,':',listStd[idx])", "columns listMean = filenameData.mean() listStd = filenameData.std() print(filenameData) # Prints", "it with pandas filenameData = np.genfromtxt(mainFilename, dtype='str') filenameData = pd.DataFrame(filenameData)", "name (diabetes.tab.txt or housing.data.txt): ') print() # To create proper", "it with numpy # Then changing it with pandas filenameData", "works only with TSV type files mainFilename = input('Input your", "filename # Current version of this script works only with", "import numpy as np import pandas as pd os.getcwd() #", "headers = filenameData.iloc[0] try: pd.to_numeric(headers) except: filenameData = pd.DataFrame(filenameData.values[1:], columns=headers)", "pd os.getcwd() # Request for the filename # Current version", "filenameData = np.genfromtxt(mainFilename, dtype='str') filenameData = pd.DataFrame(filenameData) # Obtains first", "pd.DataFrame(filenameData.values[1:], columns=headers) # Changes strings to numbers (self identifies for", "script works only with TSV type files mainFilename = input('Input", "Prints out the results print('Mean for each column:') for idx", "numeric headers = filenameData.iloc[0] try: pd.to_numeric(headers) except: filenameData = pd.DataFrame(filenameData.values[1:],", "np import pandas as pd os.getcwd() # Request for the", "or housing.data.txt): ') print() # To create proper dataframe, transforming", "first row to identify header is string or numeric headers", "dataframe, transforming it with numpy # Then changing it with", "try: pd.to_numeric(headers) except: filenameData = pd.DataFrame(filenameData.values[1:], columns=headers) # Changes strings", "# Obtains the mean and standard deviation of the columns", "mean and standard deviation of the columns listMean = filenameData.mean()", "= pd.DataFrame(filenameData.values[1:], columns=headers) # Changes strings to numbers (self identifies", "filenameData = pd.DataFrame(filenameData.values[1:], columns=headers) # Changes strings to numbers (self", "this script works only with TSV type files mainFilename =", "file name (diabetes.tab.txt or housing.data.txt): ') print() # To create", "= np.genfromtxt(mainFilename, dtype='str') filenameData = pd.DataFrame(filenameData) # Obtains first row", "numpy # Then changing it with pandas filenameData = np.genfromtxt(mainFilename,", "os.getcwd() # Request for the filename # Current version of", "of this script works only with TSV type files mainFilename", "identifies for float or integer) filenameData = filenameData.apply(pd.to_numeric) # Obtains", "filenameData = pd.DataFrame(filenameData) # Obtains first row to identify header", "out the results print('Mean for each column:') for idx in", "your file name (diabetes.tab.txt or housing.data.txt): ') print() # To", "# To create proper dataframe, transforming it with numpy #", "numpy as np import pandas as pd os.getcwd() # Request", "(diabetes.tab.txt or housing.data.txt): ') print() # To create proper dataframe,", "or integer) filenameData = filenameData.apply(pd.to_numeric) # Obtains the mean and", "proper dataframe, transforming it with numpy # Then changing it", "to numbers (self identifies for float or integer) filenameData =", "Request for the filename # Current version of this script", "pd.DataFrame(filenameData) # Obtains first row to identify header is string", "Obtains the mean and standard deviation of the columns listMean", "os import numpy as np import pandas as pd os.getcwd()", "for the filename # Current version of this script works", "the columns listMean = filenameData.mean() listStd = filenameData.std() print(filenameData) #", "Changes strings to numbers (self identifies for float or integer)", "np.genfromtxt(mainFilename, dtype='str') filenameData = pd.DataFrame(filenameData) # Obtains first row to", "with numpy # Then changing it with pandas filenameData =", "strings to numbers (self identifies for float or integer) filenameData", "columns=headers) # Changes strings to numbers (self identifies for float", "#!/usr/bin/env python import os import numpy as np import pandas", "version of this script works only with TSV type files", "dtype='str') filenameData = pd.DataFrame(filenameData) # Obtains first row to identify", "import os import numpy as np import pandas as pd", "row to identify header is string or numeric headers =", "Obtains first row to identify header is string or numeric", "pandas filenameData = np.genfromtxt(mainFilename, dtype='str') filenameData = pd.DataFrame(filenameData) # Obtains", "= filenameData.apply(pd.to_numeric) # Obtains the mean and standard deviation of", "numbers (self identifies for float or integer) filenameData = filenameData.apply(pd.to_numeric)", "python import os import numpy as np import pandas as", "input('Input your file name (diabetes.tab.txt or housing.data.txt): ') print() #", "with pandas filenameData = np.genfromtxt(mainFilename, dtype='str') filenameData = pd.DataFrame(filenameData) #", "= filenameData.std() print(filenameData) # Prints out the results print('Mean for", "print(filenameData) # Prints out the results print('Mean for each column:')", "') print() # To create proper dataframe, transforming it with", "identify header is string or numeric headers = filenameData.iloc[0] try:", "= input('Input your file name (diabetes.tab.txt or housing.data.txt): ') print()", "for idx in filenameData.columns: print(idx,':',listMean[idx]) print() print('Standard deviation for each", "# Current version of this script works only with TSV", "TSV type files mainFilename = input('Input your file name (diabetes.tab.txt", "string or numeric headers = filenameData.iloc[0] try: pd.to_numeric(headers) except: filenameData", "# Prints out the results print('Mean for each column:') for", "filenameData.columns: print(idx,':',listMean[idx]) print() print('Standard deviation for each column:') for idx", "deviation of the columns listMean = filenameData.mean() listStd = filenameData.std()", "= filenameData.mean() listStd = filenameData.std() print(filenameData) # Prints out the", "housing.data.txt): ') print() # To create proper dataframe, transforming it", "print('Mean for each column:') for idx in filenameData.columns: print(idx,':',listMean[idx]) print()", "filenameData.std() print(filenameData) # Prints out the results print('Mean for each", "Current version of this script works only with TSV type", "# Changes strings to numbers (self identifies for float or", "print() print('Standard deviation for each column:') for idx in filenameData.columns:", "filenameData.iloc[0] try: pd.to_numeric(headers) except: filenameData = pd.DataFrame(filenameData.values[1:], columns=headers) # Changes", "print() # To create proper dataframe, transforming it with numpy", "or numeric headers = filenameData.iloc[0] try: pd.to_numeric(headers) except: filenameData =", "in filenameData.columns: print(idx,':',listMean[idx]) print() print('Standard deviation for each column:') for", "listMean = filenameData.mean() listStd = filenameData.std() print(filenameData) # Prints out", "import pandas as pd os.getcwd() # Request for the filename", "Then changing it with pandas filenameData = np.genfromtxt(mainFilename, dtype='str') filenameData", "files mainFilename = input('Input your file name (diabetes.tab.txt or housing.data.txt):", "mainFilename = input('Input your file name (diabetes.tab.txt or housing.data.txt): ')", "# Request for the filename # Current version of this", "float or integer) filenameData = filenameData.apply(pd.to_numeric) # Obtains the mean", "of the columns listMean = filenameData.mean() listStd = filenameData.std() print(filenameData)", "header is string or numeric headers = filenameData.iloc[0] try: pd.to_numeric(headers)", "To create proper dataframe, transforming it with numpy # Then", "column:') for idx in filenameData.columns: print(idx,':',listMean[idx]) print() print('Standard deviation for", "# Then changing it with pandas filenameData = np.genfromtxt(mainFilename, dtype='str')", "to identify header is string or numeric headers = filenameData.iloc[0]", "as np import pandas as pd os.getcwd() # Request for", "the filename # Current version of this script works only", "as pd os.getcwd() # Request for the filename # Current", "integer) filenameData = filenameData.apply(pd.to_numeric) # Obtains the mean and standard", "for float or integer) filenameData = filenameData.apply(pd.to_numeric) # Obtains the", "# Obtains first row to identify header is string or", "for each column:') for idx in filenameData.columns: print(idx,':',listMean[idx]) print() print('Standard", "idx in filenameData.columns: print(idx,':',listMean[idx]) print() print('Standard deviation for each column:')", "pd.to_numeric(headers) except: filenameData = pd.DataFrame(filenameData.values[1:], columns=headers) # Changes strings to", "and standard deviation of the columns listMean = filenameData.mean() listStd", "filenameData = filenameData.apply(pd.to_numeric) # Obtains the mean and standard deviation", "with TSV type files mainFilename = input('Input your file name", "is string or numeric headers = filenameData.iloc[0] try: pd.to_numeric(headers) except:", "except: filenameData = pd.DataFrame(filenameData.values[1:], columns=headers) # Changes strings to numbers", "pandas as pd os.getcwd() # Request for the filename #", "the mean and standard deviation of the columns listMean =", "(self identifies for float or integer) filenameData = filenameData.apply(pd.to_numeric) #", "listStd = filenameData.std() print(filenameData) # Prints out the results print('Mean", "each column:') for idx in filenameData.columns: print(idx,':',listMean[idx]) print() print('Standard deviation", "print(idx,':',listMean[idx]) print() print('Standard deviation for each column:') for idx in", "the results print('Mean for each column:') for idx in filenameData.columns:", "= filenameData.iloc[0] try: pd.to_numeric(headers) except: filenameData = pd.DataFrame(filenameData.values[1:], columns=headers) #", "changing it with pandas filenameData = np.genfromtxt(mainFilename, dtype='str') filenameData =" ]