ngram
listlengths
0
67.8k
[ "translator_cog.translate( \"\\n\".join(need_trasnslation.values()), language ) translated_lines = translated.split(\"\\n\") if len(translated_lines) != len(need_trasnslation): raise RuntimeError(", "/ \\ # B D angle = 135 # \\ / # A", "{ None: \"The world is on fire, something really bad happened. I have", "reason.count(\"\\n\") > 1: # we got some garbage HTML response reason = \"unknown", "return self.right - self.left # type: ignore assert False # noqa @property def", "r.json() raise PINKError( f\"Error in underlying API[{r.status}]: \" f'{json.get(\"message\", \"unknown error\")}' ) json", "14: \"This means Google cannot access image URL. Try using a different one.\",", "(languages := properties.get(\"detectedLanguages\")) is None: return None return sorted(languages, key=lambda l: l.get(\"confidence\", 1))[-1][", "+ self._padding)), # type: ignore ) # TODO: implement w/h detection ASAP, this", "AngleUndetectable(TROCRException): pass class TextField: def __init__(self, full_text: str, src: PIL.Image, padding: int =", "D if 0 <= angle <= 90: left = vertices[0].get(\"x\") upper = vertices[1].get(\"y\")", "# annotations for word in paragraph[\"words\"]: last_symbol = word[\"symbols\"][-1] if (symbol_properties := last_symbol.get(\"property\"))", "angle={self.angle}>\" def _language_iterator(blocks: Sequence[Any]) -> Iterator[Optional[str]]: \"\"\"Extracts language for each paragraph in Google", "def add_word(self, vertices: _VerticesType, src_size: Tuple[int, int]) -> None: if not self.initialized: #", "text is in target language or language is undetected)\", formatted=False, ) translated =", "\"https://explorer.apis.google.com\", }, ) as r: if r.status != 200: if r.content_type.lower() != \"application/json\":", "loaded\") return [ # trocr fully depends on newlines, apply accents to each", "if lower is None: lower = src_size[1] return (left, upper, right, lower) @staticmethod", "vertices[1].get(\"y\") elif 270 < angle <= 360: left = vertices[3].get(\"x\") upper = vertices[0].get(\"y\")", "Coordinates from words in the same line can be merged lines = annotations[\"fullTextAnnotation\"][\"text\"][:-1].split(\"\\n\")", "image URL. Try using a different one.\", } def __init__(self, code: Optional[int], message:", "BytesIO from typing import Any, Dict, List, Tuple, Union, Iterator, Optional, Sequence import", "270 < angle <= 360: left = vertices[3].get(\"x\") upper = vertices[0].get(\"y\") right =", "extract_language(paragraph) yield paragraph_language or block_language # line grouping differs between simple annotations and", "PIL from PIL import ImageDraw, ImageFont, ImageFilter from pink_accents import Accent from pink.context", "depends on newlines, apply accents to each line separately and # replace any", "= \"https://content-vision.googleapis.com/v1/images:annotate\" # avoid making this a hard dependency by not reading it", "FIELD_CAP = 150 fields = fields[:FIELD_CAP] src = src.convert(\"RGBA\") for field in fields:", "upper, right, lower) @staticmethod def _get_angle(vertices: _VerticesType) -> int: def get_coords(vertex: _VertexType) ->", ":= properties.get(\"detectedLanguages\")) is None: return None return sorted(languages, key=lambda l: l.get(\"confidence\", 1))[-1][ \"languageCode\"", "accents to each line separately and # replace any newlines with spaces to", "await r.text() if reason.count(\"\\n\") > 1: # we got some garbage HTML response", "# compensate missing vertices degrees += 90 * i break else: raise AngleUndetectable", "def _apply_accents(ctx: Context, lines: List[str], accent: Accent) -> List[str]: if (accent_cog := ctx.bot.get_cog(\"Accents\"))", "ocr_translate( ctx: Context, image: StaticImage, language: Union[str, Accent] ) -> Tuple[BytesIO, str]: src", "!= \"EOL_SURE_SPACE\": continue yield paragraph_language or block_language async def ocr(ctx: Context, image_url: str)", "@property def coords_padded(self) -> Tuple[int, int, int, int]: return ( max((0, self.left -", "# | | angle = 360/0 # D----C # # A # /", "for field in fields: # TODO: figure out how to fit text into", "-> List[str]: if (translator_cog := ctx.bot.get_cog(\"Translator\")) is None: raise RuntimeError(\"No translator cog loaded\")", "ignore assert False # noqa @property def height(self) -> int: if self.angle in", ") as r: if r.status != 200: if r.content_type.lower() != \"application/json\": reason =", "None else min((self.upper, upper)) self.right = right if self.right is None else max((self.right,", "- 1 # C - 2 # D - 3 # # A----B", "- 2)) @property def stroke_width(self) -> int: return max((1, round(self.font_size / 12))) @property", "C----B # # D # / \\ # C A angle = 225", "None if (languages := properties.get(\"detectedLanguages\")) is None: return None return sorted(languages, key=lambda l:", "B D angle = 135 # \\ / # A # # B---C", "\"This means Google cannot access image URL. Try using a different one.\", }", "List, Tuple, Union, Iterator, Optional, Sequence import PIL from PIL import ImageDraw, ImageFont,", "360: left = vertices[3].get(\"x\") upper = vertices[0].get(\"y\") right = vertices[1].get(\"x\") lower = vertices[2].get(\"y\")", "field.coords_padded[:2], ) result = BytesIO() src.save(result, format=\"PNG\") return BytesIO(result.getvalue()) def _apply_accents(ctx: Context, lines:", "be merged lines = annotations[\"fullTextAnnotation\"][\"text\"][:-1].split(\"\\n\") if isinstance(language, Accent): new_lines = _apply_accents(ctx, lines, language)", "= translated.split(\"\\n\") if len(translated_lines) != len(need_trasnslation): raise RuntimeError( f\"expected {len(need_trasnslation)} translated lines, got", "ignore if self.angle in (90, 270): return self.lower - self.upper # type: ignore", "in self.coords def __repr__(self) -> str: return f\"<TextField text='{self.text}' coords={self.coords} angle={self.angle}>\" def _language_iterator(blocks:", "self.initialized: # Get angle from first word self.angle = self._get_angle(vertices) left, upper, right,", "List[str], language: str, block_annotations: Any, ) -> List[str]: if (translator_cog := ctx.bot.get_cog(\"Translator\")) is", ") else: json = await r.json() image_url = f\"{PINK_PROXY}/{json['id']}\" async with ctx.session.post( OCR_API_URL,", "{ \"source\": { \"imageUri\": image_url, } }, } ] }, headers={ \"x-origin\": \"https://explorer.apis.google.com\",", "full_text self.left: Optional[int] = None self.upper: Optional[int] = None self.right: Optional[int] = None", "yield paragraph_language or block_language # line grouping differs between simple annotations and paragraph", "type: ignore min((self._src_width, self.right + self._padding)), # type: ignore min((self._src_height, self.lower + self._padding)),", "self.right - self.left # type: ignore if self.angle in (90, 270): return self.lower", "def __init__(self, code: Optional[int], message: str): self.code = code self.message = message super().__init__(str(self))", "might be missing, 1st solution is more reliable if it worked @property def", "self.left # type: ignore if self.angle in (90, 270): return self.lower - self.upper", "import StaticImage _VertexType = Dict[str, int] _VerticesType = Tuple[_VertexType, _VertexType, _VertexType, _VertexType] OCR_API_URL", "json = await r.json() if len((responses := json[\"responses\"])) == 0: return {} maybe_annotations", "fields = fields[:FIELD_CAP] src = src.convert(\"RGBA\") for field in fields: cropped = src.crop(field.coords_padded)", "= _apply_accents(ctx, lines, language) else: new_lines = await _apply_translation(ctx, lines, language, block_annotations) #", "PIL.Image, padding: int = 3): self.text = full_text self.left: Optional[int] = None self.upper:", "block_annotations) # error reporting notes = \"\" current_word = 0 fields = []", "lines, lines are separated by newlines, there is a trailing newline. # Coordinates", "ugly!!! src.paste(blurred, field.coords_padded) for field in fields: # TODO: figure out how to", "world is on fire, something really bad happened. I have no idea.\", 14:", "stats = f\"Words: {current_word}\\nLines: {len(fields)}\" if notes: stats += f\"\\nNotes: {notes}\" return result,", "if self.left is None else min((self.left, left)) self.upper = upper if self.upper is", "is None: lower = src_size[1] return (left, upper, right, lower) @staticmethod def _get_angle(vertices:", "instead of this for word in word_annotations[current_word:]: text = word[\"description\"] if remaining_line.startswith(text): current_word", "f\"Bearer {os.environ['PINK_PROXY_TOKEN']}\" FONT = ImageFont.truetype(\"DejaVuSans.ttf\") class GoogleOCRError(PINKError): KNOWN_HINTS = { None: \"The world", "- 2 # D - 3 # # A----B # | | angle", "is preserved accent_cog.apply_accents_to_text(line, [accent]).replace(\"\\n\", \" \") for line in lines ] async def", "error\" raise PINKError( f\"Something really bad happened with underlying API[{r.status}]: {reason}\" ) json", "import ImageDraw, ImageFont, ImageFilter from pink_accents import Accent from pink.context import Context from", "to 90 degrees return 90 * round(degrees / 90) @property def coords(self) ->", "- 3 # # A----B # | | angle = 360/0 # D----C", "r.status != 200: await ctx.reply( f\"Unable to reach proxy: {r.status}\\n\" f\"Will try raw", "if degrees < 0: degrees += 360 # compensate missing vertices degrees +=", "180, 360): return self.right - self.left # type: ignore if self.angle in (90,", "padding def add_word(self, vertices: _VerticesType, src_size: Tuple[int, int]) -> None: if not self.initialized:", "combined into # lines, lines are separated by newlines, there is a trailing", "\\ / # B # # C---D # | | angle = 180", "blurred = cropped.filter(ImageFilter.GaussianBlur(10)) # Does not work anymore for some reason, black stroke", "_VertexType, _VertexType, _VertexType] OCR_API_URL = \"https://content-vision.googleapis.com/v1/images:annotate\" # avoid making this a hard dependency", "= BytesIO() src.save(result, format=\"PNG\") return BytesIO(result.getvalue()) def _apply_accents(ctx: Context, lines: List[str], accent: Accent)", "vertex.get(\"y\") cycle = itertools.cycle(vertices) x, y = get_coords(next(cycle)) for i in range(4): next_x,", "# \\ / # D if 0 <= angle <= 90: left =", "} ] }, headers={ \"x-origin\": \"https://explorer.apis.google.com\", \"x-referer\": \"https://explorer.apis.google.com\", }, ) as r: if", "if len(translated_lines) != len(need_trasnslation): raise RuntimeError( f\"expected {len(need_trasnslation)} translated lines, got {len(translated_lines)}\" )", "lower = self._vertices_to_coords( vertices, src_size, self.angle ) self.left = left if self.left is", "(translator_cog := ctx.bot.get_cog(\"Translator\")) is None: raise RuntimeError(\"No translator cog loaded\") # TODO: group", "angle = 180 # B---A # # C # / \\ # B", "assert False # noqa @property def font_size(self) -> int: return max((1, int(1.3333333 *", "get_coords(next(cycle)) for i in range(4): next_x, next_y = get_coords(next(cycle)) # Any vertex coordinate", "newlines with spaces to make sure text order is preserved accent_cog.apply_accents_to_text(line, [accent]).replace(\"\\n\", \"", "Dict[str, Any]: async with ctx.session.post( f\"{PINK_PROXY}\", headers=dict(authorization=PINK_PROXY_TOKEN), json=dict(url=image_url, ttl=3600), ) as r: if", "class AngleUndetectable(TROCRException): pass class TextField: def __init__(self, full_text: str, src: PIL.Image, padding: int", "+= 360 # compensate missing vertices degrees += 90 * i break else:", "200: await ctx.reply( f\"Unable to reach proxy: {r.status}\\n\" f\"Will try raw URL but", "= Tuple[_VertexType, _VertexType, _VertexType, _VertexType] OCR_API_URL = \"https://content-vision.googleapis.com/v1/images:annotate\" # avoid making this a", "if remaining_line.startswith(text): current_word += 1 remaining_line = remaining_line[len(text) :].lstrip() # TODO: merge multiple", "notes += f\"angle for `{word}` is undetectable\\n\" else: break if field.initialized: if line.casefold()", "max((1, round(self.font_size / 12))) @property def initialized(self) -> bool: return None not in", "), ).rotate(field.angle, expand=True, resample=PIL.Image.BICUBIC), field.coords_padded[:2], ) result = BytesIO() src.save(result, format=\"PNG\") return BytesIO(result.getvalue())", "src_size: Tuple[int, int], angle: int ) -> Tuple[int, int, int, int]: \"\"\"Returns Pillow", "Tuple[_VertexType, _VertexType, _VertexType, _VertexType] OCR_API_URL = \"https://content-vision.googleapis.com/v1/images:annotate\" # avoid making this a hard", ") -> Tuple[BytesIO, str]: src = await image.to_pil_image(ctx) annotations = await ocr(ctx, image.url)", "if right is None: right = src_size[0] if lower is None: lower =", "if None in (x, y, next_x, next_y): x, y = next_x, next_y continue", "{self.message}\" if (hint := self.KNOWN_HINTS.get(self.code)) is not None: base += f\"\\n\\nHint: {hint}\" return", "False # noqa @property def font_size(self) -> int: return max((1, int(1.3333333 * self.height)", "}, json={ \"requests\": [ { \"features\": [{\"type\": \"TEXT_DETECTION\"}], \"image\": { \"source\": { \"imageUri\":", "if (hint := self.KNOWN_HINTS.get(self.code)) is not None: base += f\"\\n\\nHint: {hint}\" return base", "= message super().__init__(str(self)) @classmethod def from_response(cls, response: Dict[str, Any]) -> GoogleOCRError: error =", "import annotations import os import math import itertools from io import BytesIO from", "stroke_fill=(0, 0, 0), ) src.alpha_composite( text_im.resize( ( min((text_im.width, field.width)), min((text_im.height, field.height)), ), ).rotate(field.angle,", "avoid making this a hard dependency by not reading it in constants.py #", "len(need_trasnslation): raise RuntimeError( f\"expected {len(need_trasnslation)} translated lines, got {len(translated_lines)}\" ) new_lines = lines.copy()", "type: ignore degrees = math.degrees(math.atan2(delta_y, delta_x)) if degrees < 0: degrees += 360", "line if not need_trasnslation: raise PINKError( \"nothing to translate on image \" \"(either", "self.right, self.lower) # type: ignore @property def coords_padded(self) -> Tuple[int, int, int, int]:", "black stroke is good anyway # field.inverted_avg_color = ImageOps.invert( # blurred.resize((1, 1)).convert(\"L\") #", "self.angle in (90, 270): return self.right - self.left # type: ignore assert False", "src) remaining_line = original_line # TODO: sane iterator instead of this for word", "= ImageFont.truetype(\"DejaVuSans.ttf\") class GoogleOCRError(PINKError): KNOWN_HINTS = { None: \"The world is on fire,", "to keep track of full coords and just calculate distance # a lot", "translator cog loaded\") # TODO: group by input languages to improve translation? need_trasnslation", "fields: raise PINKError(\"could not translate anything on image\", formatted=False) result = await ctx.bot.loop.run_in_executor(None,", "text_im.resize( ( min((text_im.width, field.width)), min((text_im.height, field.height)), ), ).rotate(field.angle, expand=True, resample=PIL.Image.BICUBIC), field.coords_padded[:2], ) result", "remaining_line[len(text) :].lstrip() # TODO: merge multiple lines into box try: field.add_word(word[\"boundingPoly\"][\"vertices\"], src.size) except", "angle = 90 # A---D # # B # / \\ # A", "ignore max((0, self.upper - self._padding)), # type: ignore min((self._src_width, self.right + self._padding)), #", "success implementing this # 2) try to keep track of full coords and", "def extract_language(data: Any) -> Optional[str]: if (properties := data.get(\"property\")) is None: return None", "translated_lines = translated.split(\"\\n\") if len(translated_lines) != len(need_trasnslation): raise RuntimeError( f\"expected {len(need_trasnslation)} translated lines,", "await ocr(ctx, image.url) word_annotations = annotations[\"textAnnotations\"][1:] block_annotations = annotations[\"fullTextAnnotation\"][\"pages\"][0][\"blocks\"] # Google OCR API", "# A # / \\ # D B angle = 315 # \\", "type: ignore delta_x = next_x - x # type: ignore degrees = math.degrees(math.atan2(delta_y,", "to make sure text order is preserved accent_cog.apply_accents_to_text(line, [accent]).replace(\"\\n\", \" \") for line", "_draw_trocr(src: PIL.Image, fields: Sequence[TextField]) -> BytesIO: FIELD_CAP = 150 fields = fields[:FIELD_CAP] src", "if (properties := data.get(\"property\")) is None: return None if (languages := properties.get(\"detectedLanguages\")) is", "self.angle in (90, 270): return self.lower - self.upper # type: ignore assert False", "= word[\"symbols\"][-1] if (symbol_properties := last_symbol.get(\"property\")) is None: continue if (detected_break := symbol_properties.get(\"detectedBreak\"))", "right = vertices[2].get(\"x\") lower = vertices[3].get(\"y\") elif 90 < angle <= 180: left", "type: ignore ) # TODO: implement w/h detection ASAP, this is temporary #", "there is a trailing newline. # Coordinates from words in the same line", "pink_accents import Accent from pink.context import Context from pink.cogs.utils.errorhandler import PINKError from .types", "not None: need_trasnslation[i] = line if not need_trasnslation: raise PINKError( \"nothing to translate", "-> int: def get_coords(vertex: _VertexType) -> Tuple[Optional[int], Optional[int]]: return vertex.get(\"x\"), vertex.get(\"y\") cycle =", "newlines, there is a trailing newline. # Coordinates from words in the same", "to fit text into boxes with Pillow without creating # extra images font", "got some garbage HTML response reason = \"unknown error\" raise PINKError( f\"Something really", "upper = vertices[0].get(\"y\") right = vertices[1].get(\"x\") lower = vertices[2].get(\"y\") if left is None:", "ImageDraw.Draw(text_im).text( (0, 0), text=field.text, font=font, spacing=0, stroke_width=field.stroke_width, stroke_fill=(0, 0, 0), ) src.alpha_composite( text_im.resize(", "current_word = 0 fields = [] for original_line, line in zip(lines, new_lines): field", "self.message = message super().__init__(str(self)) @classmethod def from_response(cls, response: Dict[str, Any]) -> GoogleOCRError: error", "# C----B # # D # / \\ # C A angle =", "just calculate distance # a lot of coordinates might be missing, 1st solution", "in block[\"paragraphs\"]: paragraph_language = extract_language(paragraph) yield paragraph_language or block_language # line grouping differs", "on newlines, apply accents to each line separately and # replace any newlines", "image.to_pil_image(ctx) annotations = await ocr(ctx, image.url) word_annotations = annotations[\"textAnnotations\"][1:] block_annotations = annotations[\"fullTextAnnotation\"][\"pages\"][0][\"blocks\"] #", "really bad happened with underlying API[{r.status}]: {reason}\" ) json = await r.json() raise", "error\")}' ) json = await r.json() if len((responses := json[\"responses\"])) == 0: return", ":= symbol_properties.get(\"detectedBreak\")) is None: continue if detected_break[\"type\"] != \"EOL_SURE_SPACE\": continue yield paragraph_language or", "# | | angle = 270 # C----B # # D # /", "from PIL import ImageDraw, ImageFont, ImageFilter from pink_accents import Accent from pink.context import", "{os.environ['PINK_PROXY_TOKEN']}\" FONT = ImageFont.truetype(\"DejaVuSans.ttf\") class GoogleOCRError(PINKError): KNOWN_HINTS = { None: \"The world is", "_language_iterator(blocks: Sequence[Any]) -> Iterator[Optional[str]]: \"\"\"Extracts language for each paragraph in Google OCR output\"\"\"", "# / \\ # A C angle = 45 # \\ / #", "BytesIO: FIELD_CAP = 150 fields = fields[:FIELD_CAP] src = src.convert(\"RGBA\") for field in", "for some reason, black stroke is good anyway # field.inverted_avg_color = ImageOps.invert( #", "ctx.reply( f\"Unable to reach proxy: {r.status}\\n\" f\"Will try raw URL but it will", "1st solution is more reliable if it worked @property def width(self) -> int:", "line break matching simple # annotations for word in paragraph[\"words\"]: last_symbol = word[\"symbols\"][-1]", "idea.\", 14: \"This means Google cannot access image URL. Try using a different", "different one.\", } def __init__(self, code: Optional[int], message: str): self.code = code self.message", "# ugly!!! src.paste(blurred, field.coords_padded) for field in fields: # TODO: figure out how", "- self.upper # type: ignore if self.angle in (90, 270): return self.right -", "remaining_line = original_line # TODO: sane iterator instead of this for word in", "None: raise RuntimeError(\"No accents cog loaded\") return [ # trocr fully depends on", "in maybe_annotations: if \"error\" in maybe_annotations: raise GoogleOCRError.from_response(maybe_annotations) else: raise PINKError(\"no text detected\",", "using a different one.\", } def __init__(self, code: Optional[int], message: str): self.code =", "in range(4): next_x, next_y = get_coords(next(cycle)) # Any vertex coordinate can be missing", "_VertexType] OCR_API_URL = \"https://content-vision.googleapis.com/v1/images:annotate\" # avoid making this a hard dependency by not", "0 self._src_width, self._src_height = src.size self._padding = padding def add_word(self, vertices: _VerticesType, src_size:", "fully depends on newlines, apply accents to each line separately and # replace", "matching simple # annotations for word in paragraph[\"words\"]: last_symbol = word[\"symbols\"][-1] if (symbol_properties", "src.size self._padding = padding def add_word(self, vertices: _VerticesType, src_size: Tuple[int, int]) -> None:", "}, headers={ \"x-origin\": \"https://explorer.apis.google.com\", \"x-referer\": \"https://explorer.apis.google.com\", }, ) as r: if r.status !=", "calculate distance # a lot of coordinates might be missing, 1st solution is", "zip(lines, new_lines): field = TextField(line, src) remaining_line = original_line # TODO: sane iterator", "block_language async def ocr(ctx: Context, image_url: str) -> Dict[str, Any]: async with ctx.session.post(", "is on fire, something really bad happened. I have no idea.\", 14: \"This", "{reason}\" ) json = await r.json() raise PINKError( f\"Error in underlying API[{r.status}]: \"", "of full coords and just calculate distance # a lot of coordinates might", "cog loaded\") # TODO: group by input languages to improve translation? need_trasnslation =", "= 0 if right is None: right = src_size[0] if lower is None:", "here blurred = cropped.filter(ImageFilter.GaussianBlur(10)) # Does not work anymore for some reason, black", "\\ / # A # # B---C # | | angle = 90", "\"source\": { \"imageUri\": image_url, } }, } ] }, headers={ \"x-origin\": \"https://explorer.apis.google.com\", \"x-referer\":", "int = 3): self.text = full_text self.left: Optional[int] = None self.upper: Optional[int] =", "message = error.get(\"message\", \"unknown\") return cls(code, message) def __str__(self) -> str: base =", "# type: ignore if self.angle in (90, 270): return self.lower - self.upper #", "language or language is undetected)\", formatted=False, ) translated = await translator_cog.translate( \"\\n\".join(need_trasnslation.values()), language", "blurred.resize((1, 1)).convert(\"L\") # ).getpixel((0, 0)) # ugly!!! src.paste(blurred, field.coords_padded) for field in fields:", "is None: raise RuntimeError(\"No translator cog loaded\") # TODO: group by input languages", "if not fields: raise PINKError(\"could not translate anything on image\", formatted=False) result =", "+= f\"\\n\\nHint: {hint}\" return base class TROCRException(Exception): pass class AngleUndetectable(TROCRException): pass class TextField:", "if self.upper is None else min((self.upper, upper)) self.right = right if self.right is", "f\"Error in underlying API[{r.status}]: \" f'{json.get(\"message\", \"unknown error\")}' ) json = await r.json()", "self._padding = padding def add_word(self, vertices: _VerticesType, src_size: Tuple[int, int]) -> None: if", "degrees < 0: degrees += 360 # compensate missing vertices degrees += 90", "is None: upper = 0 if right is None: right = src_size[0] if", "base class TROCRException(Exception): pass class AngleUndetectable(TROCRException): pass class TextField: def __init__(self, full_text: str,", "PINKError( f\"Error in underlying API[{r.status}]: \" f'{json.get(\"message\", \"unknown error\")}' ) json = await", "annotations[\"fullTextAnnotation\"][\"text\"][:-1].split(\"\\n\") if isinstance(language, Accent): new_lines = _apply_accents(ctx, lines, language) else: new_lines = await", "Tuple, Union, Iterator, Optional, Sequence import PIL from PIL import ImageDraw, ImageFont, ImageFilter", "await image.to_pil_image(ctx) annotations = await ocr(ctx, image.url) word_annotations = annotations[\"textAnnotations\"][1:] block_annotations = annotations[\"fullTextAnnotation\"][\"pages\"][0][\"blocks\"]", "angle <= 90: left = vertices[0].get(\"x\") upper = vertices[1].get(\"y\") right = vertices[2].get(\"x\") lower", "vertices[2].get(\"x\") lower = vertices[3].get(\"y\") elif 90 < angle <= 180: left = vertices[1].get(\"x\")", "150 fields = fields[:FIELD_CAP] src = src.convert(\"RGBA\") for field in fields: cropped =", "y = next_x, next_y continue # algo: https://stackoverflow.com/a/27481611 # mypy literally does not", "lines ] async def _apply_translation( ctx: Context, lines: List[str], language: str, block_annotations: Any,", "\" f'{json.get(\"message\", \"unknown error\")}' ) json = await r.json() if len((responses := json[\"responses\"]))", ":= ctx.bot.get_cog(\"Translator\")) is None: raise RuntimeError(\"No translator cog loaded\") # TODO: group by", "y - next_y # type: ignore delta_x = next_x - x # type:", "not translate anything on image\", formatted=False) result = await ctx.bot.loop.run_in_executor(None, _draw_trocr, src, fields)", "reach proxy: {r.status}\\n\" f\"Will try raw URL but it will most likely fail\"", "paragraph in Google OCR output\"\"\" def extract_language(data: Any) -> Optional[str]: if (properties :=", "ignore @property def coords_padded(self) -> Tuple[int, int, int, int]: return ( max((0, self.left", "f\"{PINK_PROXY}/{json['id']}\" async with ctx.session.post( OCR_API_URL, params={ \"key\": os.environ[\"OCR_API_TOKEN\"], }, json={ \"requests\": [ {", "if r.content_type.lower() != \"application/json\": reason = await r.text() if reason.count(\"\\n\") > 1: #", "def height(self) -> int: if self.angle in (0, 180, 360): return self.lower -", "paragraph in block[\"paragraphs\"]: paragraph_language = extract_language(paragraph) yield paragraph_language or block_language # line grouping", ") translated = await translator_cog.translate( \"\\n\".join(need_trasnslation.values()), language ) translated_lines = translated.split(\"\\n\") if len(translated_lines)", "is None: continue if detected_break[\"type\"] != \"EOL_SURE_SPACE\": continue yield paragraph_language or block_language async", "not used anywhere else now PINK_PROXY = os.environ[\"PINK_PROXY\"] PINK_PROXY_TOKEN = f\"Bearer {os.environ['PINK_PROXY_TOKEN']}\" FONT", "= 150 fields = fields[:FIELD_CAP] src = src.convert(\"RGBA\") for field in fields: cropped", "vertices[3].get(\"y\") elif 90 < angle <= 180: left = vertices[1].get(\"x\") upper = vertices[2].get(\"y\")", "means Google cannot access image URL. Try using a different one.\", } def", "ctx.session.post( f\"{PINK_PROXY}\", headers=dict(authorization=PINK_PROXY_TOKEN), json=dict(url=image_url, ttl=3600), ) as r: if r.status != 200: await", "self.upper = upper if self.upper is None else min((self.upper, upper)) self.right = right", "= itertools.cycle(vertices) x, y = get_coords(next(cycle)) for i in range(4): next_x, next_y =", "src = await image.to_pil_image(ctx) annotations = await ocr(ctx, image.url) word_annotations = annotations[\"textAnnotations\"][1:] block_annotations", "HTML response reason = \"unknown error\" raise PINKError( f\"Something really bad happened with", "List[str]: if (translator_cog := ctx.bot.get_cog(\"Translator\")) is None: raise RuntimeError(\"No translator cog loaded\") #", "def __repr__(self) -> str: return f\"<TextField text='{self.text}' coords={self.coords} angle={self.angle}>\" def _language_iterator(blocks: Sequence[Any]) ->", "int: return max((1, int(1.3333333 * self.height) - 2)) @property def stroke_width(self) -> int:", "of coordinates might be missing, 1st solution is more reliable if it worked", "fields: Sequence[TextField]) -> BytesIO: FIELD_CAP = 150 fields = fields[:FIELD_CAP] src = src.convert(\"RGBA\")", "try: field.add_word(word[\"boundingPoly\"][\"vertices\"], src.size) except AngleUndetectable: notes += f\"angle for `{word}` is undetectable\\n\" else:", "import Accent from pink.context import Context from pink.cogs.utils.errorhandler import PINKError from .types import", "ignore assert False # noqa @property def font_size(self) -> int: return max((1, int(1.3333333", "| | angle = 360/0 # D----C # # A # / \\", "False # noqa @property def height(self) -> int: if self.angle in (0, 180,", "int, int]: \"\"\"Returns Pillow style coordinates (left, upper, right, lower).\"\"\" # A -", "vertices[0].get(\"y\") right = vertices[1].get(\"x\") lower = vertices[2].get(\"y\") if left is None: left =", "full_text: str, src: PIL.Image, padding: int = 3): self.text = full_text self.left: Optional[int]", "bad happened with underlying API[{r.status}]: {reason}\" ) json = await r.json() raise PINKError(", "= get_coords(next(cycle)) # Any vertex coordinate can be missing if None in (x,", "angle = 270 # C----B # # D # / \\ # C", "language is undetected)\", formatted=False, ) translated = await translator_cog.translate( \"\\n\".join(need_trasnslation.values()), language ) translated_lines", "digit, OCR often returns 1-2 degree tilted text, ignore this # TEMPORARY: truncate", "src.paste(blurred, field.coords_padded) for field in fields: # TODO: figure out how to fit", "return BytesIO(result.getvalue()) def _apply_accents(ctx: Context, lines: List[str], accent: Accent) -> List[str]: if (accent_cog", "yield paragraph_language or block_language async def ocr(ctx: Context, image_url: str) -> Dict[str, Any]:", "in lines ] async def _apply_translation( ctx: Context, lines: List[str], language: str, block_annotations:", "\"unknown\") return cls(code, message) def __str__(self) -> str: base = f\"**{type(self).__name__}**[{self.code}]: {self.message}\" if", "type: ignore min((self._src_height, self.lower + self._padding)), # type: ignore ) # TODO: implement", "min((text_im.height, field.height)), ), ).rotate(field.angle, expand=True, resample=PIL.Image.BICUBIC), field.coords_padded[:2], ) result = BytesIO() src.save(result, format=\"PNG\")", "await r.json() if len((responses := json[\"responses\"])) == 0: return {} maybe_annotations = responses[0]", "raise RuntimeError( f\"expected {len(need_trasnslation)} translated lines, got {len(translated_lines)}\" ) new_lines = lines.copy() for", "error reporting notes = \"\" current_word = 0 fields = [] for original_line,", "D # / \\ # C A angle = 225 # \\ /", "font = FONT.font_variant(size=field.font_size) text_im = PIL.Image.new( \"RGBA\", size=font.getsize(field.text, stroke_width=field.stroke_width), ) ImageDraw.Draw(text_im).text( (0, 0),", "(left, upper, right, lower).\"\"\" # A - 0 # B - 1 #", "src_size[1] return (left, upper, right, lower) @staticmethod def _get_angle(vertices: _VerticesType) -> int: def", "int: return max((1, round(self.font_size / 12))) @property def initialized(self) -> bool: return None", "Pillow without creating # extra images font = FONT.font_variant(size=field.font_size) text_im = PIL.Image.new( \"RGBA\",", "Pillow style coordinates (left, upper, right, lower).\"\"\" # A - 0 # B", "in (90, 270): return self.right - self.left # type: ignore assert False #", "in constants.py # since it is not used anywhere else now PINK_PROXY =", "and just calculate distance # a lot of coordinates might be missing, 1st", "vertices[1].get(\"y\") right = vertices[2].get(\"x\") lower = vertices[3].get(\"y\") elif 90 < angle <= 180:", "def width(self) -> int: if self.angle in (0, 180, 360): return self.right -", "0 fields = [] for original_line, line in zip(lines, new_lines): field = TextField(line,", "translated_line return new_lines async def ocr_translate( ctx: Context, image: StaticImage, language: Union[str, Accent]", "< angle <= 360: left = vertices[3].get(\"x\") upper = vertices[0].get(\"y\") right = vertices[1].get(\"x\")", "is None: continue if (detected_break := symbol_properties.get(\"detectedBreak\")) is None: continue if detected_break[\"type\"] !=", "message super().__init__(str(self)) @classmethod def from_response(cls, response: Dict[str, Any]) -> GoogleOCRError: error = response.get(\"error\",", "fields) stats = f\"Words: {current_word}\\nLines: {len(fields)}\" if notes: stats += f\"\\nNotes: {notes}\" return", "GoogleOCRError.from_response(maybe_annotations) else: raise PINKError(\"no text detected\", formatted=False) return maybe_annotations def _draw_trocr(src: PIL.Image, fields:", "error = response.get(\"error\", {}) code = error.get(\"code\") message = error.get(\"message\", \"unknown\") return cls(code,", "x, y = get_coords(next(cycle)) for i in range(4): next_x, next_y = get_coords(next(cycle)) #", "break else: raise AngleUndetectable # # truncate last digit, OCR often returns 1-2", "truncate last digit, OCR often returns 1-2 degree tilted text, ignore this #", "= src.convert(\"RGBA\") for field in fields: cropped = src.crop(field.coords_padded) # NOTE: next line", "def _language_iterator(blocks: Sequence[Any]) -> Iterator[Optional[str]]: \"\"\"Extracts language for each paragraph in Google OCR", "not need_trasnslation: raise PINKError( \"nothing to translate on image \" \"(either entire text", "if isinstance(language, Accent): new_lines = _apply_accents(ctx, lines, language) else: new_lines = await _apply_translation(ctx,", "\"textAnnotations\" not in maybe_annotations: if \"error\" in maybe_annotations: raise GoogleOCRError.from_response(maybe_annotations) else: raise PINKError(\"no", "# A - 0 # B - 1 # C - 2 #", "vertices[2].get(\"y\") right = vertices[3].get(\"x\") lower = vertices[0].get(\"y\") elif 180 < angle <= 270:", "@property def initialized(self) -> bool: return None not in self.coords def __repr__(self) ->", "== 0: return {} maybe_annotations = responses[0] if \"textAnnotations\" not in maybe_annotations: if", "if self.right is None else max((self.right, right)) self.lower = lower if self.lower is", "fields[:FIELD_CAP] src = src.convert(\"RGBA\") for field in fields: cropped = src.crop(field.coords_padded) # NOTE:", "by not reading it in constants.py # since it is not used anywhere", "f\"Something really bad happened with underlying API[{r.status}]: {reason}\" ) json = await r.json()", "can be joined # by checking full image description. In description words are", "return self.right - self.left # type: ignore if self.angle in (90, 270): return", "type: ignore if self.angle in (90, 270): return self.right - self.left # type:", "= translated_line return new_lines async def ocr_translate( ctx: Context, image: StaticImage, language: Union[str,", "if (translator_cog := ctx.bot.get_cog(\"Translator\")) is None: raise RuntimeError(\"No translator cog loaded\") # TODO:", "for `{word}` is undetectable\\n\" else: break if field.initialized: if line.casefold() != original_line.casefold(): fields.append(field)", "to translate on image \" \"(either entire text is in target language or", "access image URL. Try using a different one.\", } def __init__(self, code: Optional[int],", "= None self.upper: Optional[int] = None self.right: Optional[int] = None self.lower: Optional[int] =", "else: raise AngleUndetectable # # truncate last digit, OCR often returns 1-2 degree", "is not None: base += f\"\\n\\nHint: {hint}\" return base class TROCRException(Exception): pass class", "# type: ignore if self.angle in (90, 270): return self.right - self.left #", "self.right is None else max((self.right, right)) self.lower = lower if self.lower is None", "into boxes with Pillow without creating # extra images font = FONT.font_variant(size=field.font_size) text_im", "are combined into # lines, lines are separated by newlines, there is a", "r.json() image_url = f\"{PINK_PROXY}/{json['id']}\" async with ctx.session.post( OCR_API_URL, params={ \"key\": os.environ[\"OCR_API_TOKEN\"], }, json={", "if upper is None: upper = 0 if right is None: right =", "= f\"{PINK_PROXY}/{json['id']}\" async with ctx.session.post( OCR_API_URL, params={ \"key\": os.environ[\"OCR_API_TOKEN\"], }, json={ \"requests\": [", "def _get_angle(vertices: _VerticesType) -> int: def get_coords(vertex: _VertexType) -> Tuple[Optional[int], Optional[int]]: return vertex.get(\"x\"),", "= left if self.left is None else min((self.left, left)) self.upper = upper if", "-> Tuple[int, int, int, int]: \"\"\"Returns Pillow style coordinates (left, upper, right, lower).\"\"\"", "error.get(\"message\", \"unknown\") return cls(code, message) def __str__(self) -> str: base = f\"**{type(self).__name__}**[{self.code}]: {self.message}\"", "separately, but they can be joined # by checking full image description. In", "def __str__(self) -> str: base = f\"**{type(self).__name__}**[{self.code}]: {self.message}\" if (hint := self.KNOWN_HINTS.get(self.code)) is", "int], angle: int ) -> Tuple[int, int, int, int]: \"\"\"Returns Pillow style coordinates", "List[str], accent: Accent) -> List[str]: if (accent_cog := ctx.bot.get_cog(\"Accents\")) is None: raise RuntimeError(\"No", "does not see previous statement delta_y = y - next_y # type: ignore", "reason, black stroke is good anyway # field.inverted_avg_color = ImageOps.invert( # blurred.resize((1, 1)).convert(\"L\")", "_apply_translation( ctx: Context, lines: List[str], language: str, block_annotations: Any, ) -> List[str]: if", "A angle = 225 # \\ / # B # # C---D #", "= await r.text() if reason.count(\"\\n\") > 1: # we got some garbage HTML", "* round(degrees / 90) @property def coords(self) -> Tuple[int, int, int, int]: return", "# type: ignore delta_x = next_x - x # type: ignore degrees =", "tilted text, ignore this # TEMPORARY: truncate angle to 90 degrees return 90", "# B D angle = 135 # \\ / # A # #", "= src_size[1] return (left, upper, right, lower) @staticmethod def _get_angle(vertices: _VerticesType) -> int:", "Dict[str, Any]) -> GoogleOCRError: error = response.get(\"error\", {}) code = error.get(\"code\") message =", "1-2 degree tilted text, ignore this # TEMPORARY: truncate angle to 90 degrees", ") as r: if r.status != 200: await ctx.reply( f\"Unable to reach proxy:", "self.angle in (0, 180, 360): return self.lower - self.upper # type: ignore if", "field.height)), ), ).rotate(field.angle, expand=True, resample=PIL.Image.BICUBIC), field.coords_padded[:2], ) result = BytesIO() src.save(result, format=\"PNG\") return", "= src_size[0] if lower is None: lower = src_size[1] return (left, upper, right,", "ttl=3600), ) as r: if r.status != 200: await ctx.reply( f\"Unable to reach", "degrees += 360 # compensate missing vertices degrees += 90 * i break", "NOTE: next line causes segfaults if coords are wrong, debug from here blurred", "raise RuntimeError(\"No translator cog loaded\") # TODO: group by input languages to improve", "description words are combined into # lines, lines are separated by newlines, there", "135 # \\ / # A # # B---C # | | angle", "figure out how to fit text into boxes with Pillow without creating #", "self.angle ) self.left = left if self.left is None else min((self.left, left)) self.upper", "to reach proxy: {r.status}\\n\" f\"Will try raw URL but it will most likely", "right = src_size[0] if lower is None: lower = src_size[1] return (left, upper,", "\"unknown error\" raise PINKError( f\"Something really bad happened with underlying API[{r.status}]: {reason}\" )", "of this for word in word_annotations[current_word:]: text = word[\"description\"] if remaining_line.startswith(text): current_word +=", "= vertices[2].get(\"y\") right = vertices[3].get(\"x\") lower = vertices[0].get(\"y\") elif 180 < angle <=", "Context, image: StaticImage, language: Union[str, Accent] ) -> Tuple[BytesIO, str]: src = await", "} }, } ] }, headers={ \"x-origin\": \"https://explorer.apis.google.com\", \"x-referer\": \"https://explorer.apis.google.com\", }, ) as", "= y - next_y # type: ignore delta_x = next_x - x #", "more reliable if it worked @property def width(self) -> int: if self.angle in", "int: if self.angle in (0, 180, 360): return self.lower - self.upper # type:", "creating # extra images font = FONT.font_variant(size=field.font_size) text_im = PIL.Image.new( \"RGBA\", size=font.getsize(field.text, stroke_width=field.stroke_width),", "= math.degrees(math.atan2(delta_y, delta_x)) if degrees < 0: degrees += 360 # compensate missing", "= 0 fields = [] for original_line, line in zip(lines, new_lines): field =", "a different one.\", } def __init__(self, code: Optional[int], message: str): self.code = code", "_language_iterator(block_annotations) for i, line in enumerate(lines): if next(paragraph_languages) is not None: need_trasnslation[i] =", "accent_cog.apply_accents_to_text(line, [accent]).replace(\"\\n\", \" \") for line in lines ] async def _apply_translation( ctx:", "to each line separately and # replace any newlines with spaces to make", "(0, 180, 360): return self.right - self.left # type: ignore if self.angle in", "C angle = 45 # \\ / # D if 0 <= angle", "# TODO: figure out how to fit text into boxes with Pillow without", "range(4): next_x, next_y = get_coords(next(cycle)) # Any vertex coordinate can be missing if", "src_size: Tuple[int, int]) -> None: if not self.initialized: # Get angle from first", "= PIL.Image.new( \"RGBA\", size=font.getsize(field.text, stroke_width=field.stroke_width), ) ImageDraw.Draw(text_im).text( (0, 0), text=field.text, font=font, spacing=0, stroke_width=field.stroke_width,", "cycle = itertools.cycle(vertices) x, y = get_coords(next(cycle)) for i in range(4): next_x, next_y", "Get angle from first word self.angle = self._get_angle(vertices) left, upper, right, lower =", "-> Optional[str]: if (properties := data.get(\"property\")) is None: return None if (languages :=", "B # # C---D # | | angle = 180 # B---A #", "await ctx.bot.loop.run_in_executor(None, _draw_trocr, src, fields) stats = f\"Words: {current_word}\\nLines: {len(fields)}\" if notes: stats", "ImageFont, ImageFilter from pink_accents import Accent from pink.context import Context from pink.cogs.utils.errorhandler import", "vertices: _VerticesType, src_size: Tuple[int, int]) -> None: if not self.initialized: # Get angle", "= vertices[3].get(\"y\") elif 90 < angle <= 180: left = vertices[1].get(\"x\") upper =", "continue if detected_break[\"type\"] != \"EOL_SURE_SPACE\": continue yield paragraph_language or block_language async def ocr(ctx:", "word in paragraph[\"words\"]: last_symbol = word[\"symbols\"][-1] if (symbol_properties := last_symbol.get(\"property\")) is None: continue", "None: raise RuntimeError(\"No translator cog loaded\") # TODO: group by input languages to", "| angle = 90 # A---D # # B # / \\ #", "delta_x)) if degrees < 0: degrees += 360 # compensate missing vertices degrees", "right, lower = self._vertices_to_coords( vertices, src_size, self.angle ) self.left = left if self.left", "x, y = next_x, next_y continue # algo: https://stackoverflow.com/a/27481611 # mypy literally does", "cropped = src.crop(field.coords_padded) # NOTE: next line causes segfaults if coords are wrong,", "l: l.get(\"confidence\", 1))[-1][ \"languageCode\" ] for block in blocks: block_language = extract_language(block) for", "PINKError( f\"Something really bad happened with underlying API[{r.status}]: {reason}\" ) json = await", "reliable if it worked @property def width(self) -> int: if self.angle in (0,", "(left, upper, right, lower) @staticmethod def _get_angle(vertices: _VerticesType) -> int: def get_coords(vertex: _VertexType)", ") -> Tuple[int, int, int, int]: \"\"\"Returns Pillow style coordinates (left, upper, right,", "https://stackoverflow.com/a/27481611 # mypy literally does not see previous statement delta_y = y -", "Optional, Sequence import PIL from PIL import ImageDraw, ImageFont, ImageFilter from pink_accents import", "block in blocks: block_language = extract_language(block) for paragraph in block[\"paragraphs\"]: paragraph_language = extract_language(paragraph)", "vertices[3].get(\"x\") lower = vertices[0].get(\"y\") elif 180 < angle <= 270: left = vertices[2].get(\"x\")", "_vertices_to_coords( vertices: _VerticesType, src_size: Tuple[int, int], angle: int ) -> Tuple[int, int, int,", "[ { \"features\": [{\"type\": \"TEXT_DETECTION\"}], \"image\": { \"source\": { \"imageUri\": image_url, } },", "self.height) - 2)) @property def stroke_width(self) -> int: return max((1, round(self.font_size / 12)))", "= padding def add_word(self, vertices: _VerticesType, src_size: Tuple[int, int]) -> None: if not", "# Get angle from first word self.angle = self._get_angle(vertices) left, upper, right, lower", "entry for each word separately, but they can be joined # by checking", "= await r.json() image_url = f\"{PINK_PROXY}/{json['id']}\" async with ctx.session.post( OCR_API_URL, params={ \"key\": os.environ[\"OCR_API_TOKEN\"],", "merged lines = annotations[\"fullTextAnnotation\"][\"text\"][:-1].split(\"\\n\") if isinstance(language, Accent): new_lines = _apply_accents(ctx, lines, language) else:", "def stroke_width(self) -> int: return max((1, round(self.font_size / 12))) @property def initialized(self) ->", "returns entry for each word separately, but they can be joined # by", "from words in the same line can be merged lines = annotations[\"fullTextAnnotation\"][\"text\"][:-1].split(\"\\n\") if", "# TODO: group by input languages to improve translation? need_trasnslation = {} paragraph_languages", "vertices[1].get(\"x\") upper = vertices[2].get(\"y\") right = vertices[3].get(\"x\") lower = vertices[0].get(\"y\") elif 180 <", "List[str]: if (accent_cog := ctx.bot.get_cog(\"Accents\")) is None: raise RuntimeError(\"No accents cog loaded\") return", "this a hard dependency by not reading it in constants.py # since it", "BytesIO() src.save(result, format=\"PNG\") return BytesIO(result.getvalue()) def _apply_accents(ctx: Context, lines: List[str], accent: Accent) ->", "super().__init__(str(self)) @classmethod def from_response(cls, response: Dict[str, Any]) -> GoogleOCRError: error = response.get(\"error\", {})", "noqa @property def font_size(self) -> int: return max((1, int(1.3333333 * self.height) - 2))", "= annotations[\"fullTextAnnotation\"][\"text\"][:-1].split(\"\\n\") if isinstance(language, Accent): new_lines = _apply_accents(ctx, lines, language) else: new_lines =", "result = BytesIO() src.save(result, format=\"PNG\") return BytesIO(result.getvalue()) def _apply_accents(ctx: Context, lines: List[str], accent:", "degrees = math.degrees(math.atan2(delta_y, delta_x)) if degrees < 0: degrees += 360 # compensate", "line in enumerate(lines): if next(paragraph_languages) is not None: need_trasnslation[i] = line if not", "D----A # | | angle = 270 # C----B # # D #", "code: Optional[int], message: str): self.code = code self.message = message super().__init__(str(self)) @classmethod def", "0 # B - 1 # C - 2 # D - 3", "left = vertices[1].get(\"x\") upper = vertices[2].get(\"y\") right = vertices[3].get(\"x\") lower = vertices[0].get(\"y\") elif", "0, 0), ) src.alpha_composite( text_im.resize( ( min((text_im.width, field.width)), min((text_im.height, field.height)), ), ).rotate(field.angle, expand=True,", "3): self.text = full_text self.left: Optional[int] = None self.upper: Optional[int] = None self.right:", "line in lines ] async def _apply_translation( ctx: Context, lines: List[str], language: str,", "Iterator[Optional[str]]: \"\"\"Extracts language for each paragraph in Google OCR output\"\"\" def extract_language(data: Any)", "\"features\": [{\"type\": \"TEXT_DETECTION\"}], \"image\": { \"source\": { \"imageUri\": image_url, } }, } ]", "= get_coords(next(cycle)) for i in range(4): next_x, next_y = get_coords(next(cycle)) # Any vertex", "# C - 2 # D - 3 # # A----B # |", "@staticmethod def _get_angle(vertices: _VerticesType) -> int: def get_coords(vertex: _VertexType) -> Tuple[Optional[int], Optional[int]]: return", "extract_language(block) for paragraph in block[\"paragraphs\"]: paragraph_language = extract_language(paragraph) yield paragraph_language or block_language #", "code = error.get(\"code\") message = error.get(\"message\", \"unknown\") return cls(code, message) def __str__(self) ->", "f\"**{type(self).__name__}**[{self.code}]: {self.message}\" if (hint := self.KNOWN_HINTS.get(self.code)) is not None: base += f\"\\n\\nHint: {hint}\"", "\"imageUri\": image_url, } }, } ] }, headers={ \"x-origin\": \"https://explorer.apis.google.com\", \"x-referer\": \"https://explorer.apis.google.com\", },", "None: \"The world is on fire, something really bad happened. I have no", "anymore for some reason, black stroke is good anyway # field.inverted_avg_color = ImageOps.invert(", "_VerticesType = Tuple[_VertexType, _VertexType, _VertexType, _VertexType] OCR_API_URL = \"https://content-vision.googleapis.com/v1/images:annotate\" # avoid making this", "this for word in word_annotations[current_word:]: text = word[\"description\"] if remaining_line.startswith(text): current_word += 1", "from_response(cls, response: Dict[str, Any]) -> GoogleOCRError: error = response.get(\"error\", {}) code = error.get(\"code\")", "OCR often returns 1-2 degree tilted text, ignore this # TEMPORARY: truncate angle", "except AngleUndetectable: notes += f\"angle for `{word}` is undetectable\\n\" else: break if field.initialized:", "def from_response(cls, response: Dict[str, Any]) -> GoogleOCRError: error = response.get(\"error\", {}) code =", "-> str: return f\"<TextField text='{self.text}' coords={self.coords} angle={self.angle}>\" def _language_iterator(blocks: Sequence[Any]) -> Iterator[Optional[str]]: \"\"\"Extracts", "int: if self.angle in (0, 180, 360): return self.right - self.left # type:", ":].lstrip() # TODO: merge multiple lines into box try: field.add_word(word[\"boundingPoly\"][\"vertices\"], src.size) except AngleUndetectable:", "def coords_padded(self) -> Tuple[int, int, int, int]: return ( max((0, self.left - self._padding)),", "solutions: # 1) https://stackoverflow.com/a/9972699 # text surrounding box dimensions are known, but i", "-> Tuple[BytesIO, str]: src = await image.to_pil_image(ctx) annotations = await ocr(ctx, image.url) word_annotations", "for block in blocks: block_language = extract_language(block) for paragraph in block[\"paragraphs\"]: paragraph_language =", "= extract_language(block) for paragraph in block[\"paragraphs\"]: paragraph_language = extract_language(paragraph) yield paragraph_language or block_language", "anywhere else now PINK_PROXY = os.environ[\"PINK_PROXY\"] PINK_PROXY_TOKEN = f\"Bearer {os.environ['PINK_PROXY_TOKEN']}\" FONT = ImageFont.truetype(\"DejaVuSans.ttf\")", "@property def width(self) -> int: if self.angle in (0, 180, 360): return self.right", "In description words are combined into # lines, lines are separated by newlines,", "vertices: _VerticesType, src_size: Tuple[int, int], angle: int ) -> Tuple[int, int, int, int]:", "coords and just calculate distance # a lot of coordinates might be missing,", "Tuple[Optional[int], Optional[int]]: return vertex.get(\"x\"), vertex.get(\"y\") cycle = itertools.cycle(vertices) x, y = get_coords(next(cycle)) for", "None: continue if (detected_break := symbol_properties.get(\"detectedBreak\")) is None: continue if detected_break[\"type\"] != \"EOL_SURE_SPACE\":", "original_line.casefold(): fields.append(field) if not fields: raise PINKError(\"could not translate anything on image\", formatted=False)", "= vertices[3].get(\"y\") right = vertices[0].get(\"x\") lower = vertices[1].get(\"y\") elif 270 < angle <=", "API returns entry for each word separately, but they can be joined #", "group by input languages to improve translation? need_trasnslation = {} paragraph_languages = _language_iterator(block_annotations)", "_apply_accents(ctx: Context, lines: List[str], accent: Accent) -> List[str]: if (accent_cog := ctx.bot.get_cog(\"Accents\")) is", "= ImageOps.invert( # blurred.resize((1, 1)).convert(\"L\") # ).getpixel((0, 0)) # ugly!!! src.paste(blurred, field.coords_padded) for", "ocr(ctx: Context, image_url: str) -> Dict[str, Any]: async with ctx.session.post( f\"{PINK_PROXY}\", headers=dict(authorization=PINK_PROXY_TOKEN), json=dict(url=image_url,", "def ocr_translate( ctx: Context, image: StaticImage, language: Union[str, Accent] ) -> Tuple[BytesIO, str]:", "differs between simple annotations and paragraph grouping in # full annotations. \"EOL_SURE_SPACE\" indicates", "in word_annotations[current_word:]: text = word[\"description\"] if remaining_line.startswith(text): current_word += 1 remaining_line = remaining_line[len(text)", "# noqa @property def height(self) -> int: if self.angle in (0, 180, 360):", "fire, something really bad happened. I have no idea.\", 14: \"This means Google", "int(1.3333333 * self.height) - 2)) @property def stroke_width(self) -> int: return max((1, round(self.font_size", "delta_x = next_x - x # type: ignore degrees = math.degrees(math.atan2(delta_y, delta_x)) if", "int: def get_coords(vertex: _VertexType) -> Tuple[Optional[int], Optional[int]]: return vertex.get(\"x\"), vertex.get(\"y\") cycle = itertools.cycle(vertices)", "def _apply_translation( ctx: Context, lines: List[str], language: str, block_annotations: Any, ) -> List[str]:", "= 180 # B---A # # C # / \\ # B D", "word[\"description\"] if remaining_line.startswith(text): current_word += 1 remaining_line = remaining_line[len(text) :].lstrip() # TODO: merge", "stroke_width(self) -> int: return max((1, round(self.font_size / 12))) @property def initialized(self) -> bool:", "right is None: right = src_size[0] if lower is None: lower = src_size[1]", "= os.environ[\"PINK_PROXY\"] PINK_PROXY_TOKEN = f\"Bearer {os.environ['PINK_PROXY_TOKEN']}\" FONT = ImageFont.truetype(\"DejaVuSans.ttf\") class GoogleOCRError(PINKError): KNOWN_HINTS =", ").getpixel((0, 0)) # ugly!!! src.paste(blurred, field.coords_padded) for field in fields: # TODO: figure", "no success implementing this # 2) try to keep track of full coords", "PINK_PROXY = os.environ[\"PINK_PROXY\"] PINK_PROXY_TOKEN = f\"Bearer {os.environ['PINK_PROXY_TOKEN']}\" FONT = ImageFont.truetype(\"DejaVuSans.ttf\") class GoogleOCRError(PINKError): KNOWN_HINTS", "ASAP, this is temporary # solutions: # 1) https://stackoverflow.com/a/9972699 # text surrounding box", "\\ # A C angle = 45 # \\ / # D if", "# # D # / \\ # C A angle = 225 #", "how to fit text into boxes with Pillow without creating # extra images", "# D B angle = 315 # \\ / # C # #", "input languages to improve translation? need_trasnslation = {} paragraph_languages = _language_iterator(block_annotations) for i,", "language, block_annotations) # error reporting notes = \"\" current_word = 0 fields =", "ImageDraw, ImageFont, ImageFilter from pink_accents import Accent from pink.context import Context from pink.cogs.utils.errorhandler", "= await r.json() raise PINKError( f\"Error in underlying API[{r.status}]: \" f'{json.get(\"message\", \"unknown error\")}'", "box try: field.add_word(word[\"boundingPoly\"][\"vertices\"], src.size) except AngleUndetectable: notes += f\"angle for `{word}` is undetectable\\n\"", "src.crop(field.coords_padded) # NOTE: next line causes segfaults if coords are wrong, debug from", "noqa @property def height(self) -> int: if self.angle in (0, 180, 360): return", "# full annotations. \"EOL_SURE_SPACE\" indicates line break matching simple # annotations for word", "pink.cogs.utils.errorhandler import PINKError from .types import StaticImage _VertexType = Dict[str, int] _VerticesType =", "return maybe_annotations def _draw_trocr(src: PIL.Image, fields: Sequence[TextField]) -> BytesIO: FIELD_CAP = 150 fields", "90) @property def coords(self) -> Tuple[int, int, int, int]: return (self.left, self.upper, self.right,", "can be merged lines = annotations[\"fullTextAnnotation\"][\"text\"][:-1].split(\"\\n\") if isinstance(language, Accent): new_lines = _apply_accents(ctx, lines,", "# A C angle = 45 # \\ / # D if 0", "else: json = await r.json() image_url = f\"{PINK_PROXY}/{json['id']}\" async with ctx.session.post( OCR_API_URL, params={", "each paragraph in Google OCR output\"\"\" def extract_language(data: Any) -> Optional[str]: if (properties", "None: return None if (languages := properties.get(\"detectedLanguages\")) is None: return None return sorted(languages,", "= FONT.font_variant(size=field.font_size) text_im = PIL.Image.new( \"RGBA\", size=font.getsize(field.text, stroke_width=field.stroke_width), ) ImageDraw.Draw(text_im).text( (0, 0), text=field.text,", "else: raise PINKError(\"no text detected\", formatted=False) return maybe_annotations def _draw_trocr(src: PIL.Image, fields: Sequence[TextField])", "# C A angle = 225 # \\ / # B # #", "(detected_break := symbol_properties.get(\"detectedBreak\")) is None: continue if detected_break[\"type\"] != \"EOL_SURE_SPACE\": continue yield paragraph_language", "lower = vertices[3].get(\"y\") elif 90 < angle <= 180: left = vertices[1].get(\"x\") upper", "previous statement delta_y = y - next_y # type: ignore delta_x = next_x", "} def __init__(self, code: Optional[int], message: str): self.code = code self.message = message", "@classmethod def from_response(cls, response: Dict[str, Any]) -> GoogleOCRError: error = response.get(\"error\", {}) code", "it is not used anywhere else now PINK_PROXY = os.environ[\"PINK_PROXY\"] PINK_PROXY_TOKEN = f\"Bearer", "will most likely fail\" ) else: json = await r.json() image_url = f\"{PINK_PROXY}/{json['id']}\"", "upper is None: upper = 0 if right is None: right = src_size[0]", ") ImageDraw.Draw(text_im).text( (0, 0), text=field.text, font=font, spacing=0, stroke_width=field.stroke_width, stroke_fill=(0, 0, 0), ) src.alpha_composite(", "a hard dependency by not reading it in constants.py # since it is", "lower = vertices[2].get(\"y\") if left is None: left = 0 if upper is", "0)) # ugly!!! src.paste(blurred, field.coords_padded) for field in fields: # TODO: figure out", "ctx.session.post( OCR_API_URL, params={ \"key\": os.environ[\"OCR_API_TOKEN\"], }, json={ \"requests\": [ { \"features\": [{\"type\": \"TEXT_DETECTION\"}],", "import math import itertools from io import BytesIO from typing import Any, Dict,", "field.inverted_avg_color = ImageOps.invert( # blurred.resize((1, 1)).convert(\"L\") # ).getpixel((0, 0)) # ugly!!! src.paste(blurred, field.coords_padded)", "dimensions are known, but i had no success implementing this # 2) try", "# D # / \\ # C A angle = 225 # \\", "= vertices[1].get(\"y\") right = vertices[2].get(\"x\") lower = vertices[3].get(\"y\") elif 90 < angle <=", "max((0, self.left - self._padding)), # type: ignore max((0, self.upper - self._padding)), # type:", "int, int]: return ( max((0, self.left - self._padding)), # type: ignore max((0, self.upper", "import os import math import itertools from io import BytesIO from typing import", "| | angle = 270 # C----B # # D # / \\", "os.environ[\"OCR_API_TOKEN\"], }, json={ \"requests\": [ { \"features\": [{\"type\": \"TEXT_DETECTION\"}], \"image\": { \"source\": {", "stroke_width=field.stroke_width), ) ImageDraw.Draw(text_im).text( (0, 0), text=field.text, font=font, spacing=0, stroke_width=field.stroke_width, stroke_fill=(0, 0, 0), )", "ignore min((self._src_width, self.right + self._padding)), # type: ignore min((self._src_height, self.lower + self._padding)), #", "return ( max((0, self.left - self._padding)), # type: ignore max((0, self.upper - self._padding)),", "accent: Accent) -> List[str]: if (accent_cog := ctx.bot.get_cog(\"Accents\")) is None: raise RuntimeError(\"No accents", "is not used anywhere else now PINK_PROXY = os.environ[\"PINK_PROXY\"] PINK_PROXY_TOKEN = f\"Bearer {os.environ['PINK_PROXY_TOKEN']}\"", "str): self.code = code self.message = message super().__init__(str(self)) @classmethod def from_response(cls, response: Dict[str,", "trocr fully depends on newlines, apply accents to each line separately and #", "elif 180 < angle <= 270: left = vertices[2].get(\"x\") upper = vertices[3].get(\"y\") right", "= 360/0 # D----C # # A # / \\ # D B", "f\"\\n\\nHint: {hint}\" return base class TROCRException(Exception): pass class AngleUndetectable(TROCRException): pass class TextField: def", "upper, right, lower).\"\"\" # A - 0 # B - 1 # C", "GoogleOCRError(PINKError): KNOWN_HINTS = { None: \"The world is on fire, something really bad", "can be missing if None in (x, y, next_x, next_y): x, y =", "raise PINKError( f\"Error in underlying API[{r.status}]: \" f'{json.get(\"message\", \"unknown error\")}' ) json =", "joined # by checking full image description. In description words are combined into", "x # type: ignore degrees = math.degrees(math.atan2(delta_y, delta_x)) if degrees < 0: degrees", "<= 360: left = vertices[3].get(\"x\") upper = vertices[0].get(\"y\") right = vertices[1].get(\"x\") lower =", "\"(either entire text is in target language or language is undetected)\", formatted=False, )", "new_lines): field = TextField(line, src) remaining_line = original_line # TODO: sane iterator instead", "- next_y # type: ignore delta_x = next_x - x # type: ignore", "the same line can be merged lines = annotations[\"fullTextAnnotation\"][\"text\"][:-1].split(\"\\n\") if isinstance(language, Accent): new_lines", "<= angle <= 90: left = vertices[0].get(\"x\") upper = vertices[1].get(\"y\") right = vertices[2].get(\"x\")", "for i, line in enumerate(lines): if next(paragraph_languages) is not None: need_trasnslation[i] = line", "not None: base += f\"\\n\\nHint: {hint}\" return base class TROCRException(Exception): pass class AngleUndetectable(TROCRException):", "wrong, debug from here blurred = cropped.filter(ImageFilter.GaussianBlur(10)) # Does not work anymore for", "TODO: merge multiple lines into box try: field.add_word(word[\"boundingPoly\"][\"vertices\"], src.size) except AngleUndetectable: notes +=", "_VertexType, _VertexType] OCR_API_URL = \"https://content-vision.googleapis.com/v1/images:annotate\" # avoid making this a hard dependency by", "None: base += f\"\\n\\nHint: {hint}\" return base class TROCRException(Exception): pass class AngleUndetectable(TROCRException): pass", "type: ignore @property def coords_padded(self) -> Tuple[int, int, int, int]: return ( max((0,", "continue yield paragraph_language or block_language async def ocr(ctx: Context, image_url: str) -> Dict[str,", "reason = await r.text() if reason.count(\"\\n\") > 1: # we got some garbage", "in fields: # TODO: figure out how to fit text into boxes with", "= response.get(\"error\", {}) code = error.get(\"code\") message = error.get(\"message\", \"unknown\") return cls(code, message)", "None self.angle = 0 self._src_width, self._src_height = src.size self._padding = padding def add_word(self,", "raise RuntimeError(\"No accents cog loaded\") return [ # trocr fully depends on newlines,", "elif 270 < angle <= 360: left = vertices[3].get(\"x\") upper = vertices[0].get(\"y\") right", "pink.context import Context from pink.cogs.utils.errorhandler import PINKError from .types import StaticImage _VertexType =", "get_coords(next(cycle)) # Any vertex coordinate can be missing if None in (x, y,", "/ \\ # D B angle = 315 # \\ / # C", "languages to improve translation? need_trasnslation = {} paragraph_languages = _language_iterator(block_annotations) for i, line", "something really bad happened. I have no idea.\", 14: \"This means Google cannot", "by input languages to improve translation? need_trasnslation = {} paragraph_languages = _language_iterator(block_annotations) for", "= vertices[0].get(\"y\") elif 180 < angle <= 270: left = vertices[2].get(\"x\") upper =", "# lines, lines are separated by newlines, there is a trailing newline. #", "width(self) -> int: if self.angle in (0, 180, 360): return self.right - self.left", "] async def _apply_translation( ctx: Context, lines: List[str], language: str, block_annotations: Any, )", "# trocr fully depends on newlines, apply accents to each line separately and", "lines are separated by newlines, there is a trailing newline. # Coordinates from", "90: left = vertices[0].get(\"x\") upper = vertices[1].get(\"y\") right = vertices[2].get(\"x\") lower = vertices[3].get(\"y\")", "# by checking full image description. In description words are combined into #", "raw URL but it will most likely fail\" ) else: json = await", "and paragraph grouping in # full annotations. \"EOL_SURE_SPACE\" indicates line break matching simple", "target language or language is undetected)\", formatted=False, ) translated = await translator_cog.translate( \"\\n\".join(need_trasnslation.values()),", "None in (x, y, next_x, next_y): x, y = next_x, next_y continue #", "in Google OCR output\"\"\" def extract_language(data: Any) -> Optional[str]: if (properties := data.get(\"property\"))", "Accent): new_lines = _apply_accents(ctx, lines, language) else: new_lines = await _apply_translation(ctx, lines, language,", "D B angle = 315 # \\ / # C # # D----A", "vertices[3].get(\"x\") upper = vertices[0].get(\"y\") right = vertices[1].get(\"x\") lower = vertices[2].get(\"y\") if left is", "headers={ \"x-origin\": \"https://explorer.apis.google.com\", \"x-referer\": \"https://explorer.apis.google.com\", }, ) as r: if r.status != 200:", "translate anything on image\", formatted=False) result = await ctx.bot.loop.run_in_executor(None, _draw_trocr, src, fields) stats", "if (detected_break := symbol_properties.get(\"detectedBreak\")) is None: continue if detected_break[\"type\"] != \"EOL_SURE_SPACE\": continue yield", "simple # annotations for word in paragraph[\"words\"]: last_symbol = word[\"symbols\"][-1] if (symbol_properties :=", "# C # / \\ # B D angle = 135 # \\", "annotations = await ocr(ctx, image.url) word_annotations = annotations[\"textAnnotations\"][1:] block_annotations = annotations[\"fullTextAnnotation\"][\"pages\"][0][\"blocks\"] # Google", "response reason = \"unknown error\" raise PINKError( f\"Something really bad happened with underlying", "return {} maybe_annotations = responses[0] if \"textAnnotations\" not in maybe_annotations: if \"error\" in", "os.environ[\"PINK_PROXY\"] PINK_PROXY_TOKEN = f\"Bearer {os.environ['PINK_PROXY_TOKEN']}\" FONT = ImageFont.truetype(\"DejaVuSans.ttf\") class GoogleOCRError(PINKError): KNOWN_HINTS = {", "B angle = 315 # \\ / # C # # D----A #", "missing vertices degrees += 90 * i break else: raise AngleUndetectable # #", "if field.initialized: if line.casefold() != original_line.casefold(): fields.append(field) if not fields: raise PINKError(\"could not", "src_size[0] if lower is None: lower = src_size[1] return (left, upper, right, lower)", "angle = 225 # \\ / # B # # C---D # |", "next_y = get_coords(next(cycle)) # Any vertex coordinate can be missing if None in", "if (symbol_properties := last_symbol.get(\"property\")) is None: continue if (detected_break := symbol_properties.get(\"detectedBreak\")) is None:", "| angle = 360/0 # D----C # # A # / \\ #", "(symbol_properties := last_symbol.get(\"property\")) is None: continue if (detected_break := symbol_properties.get(\"detectedBreak\")) is None: continue", "# line grouping differs between simple annotations and paragraph grouping in # full", "/ # A # # B---C # | | angle = 90 #", "def font_size(self) -> int: return max((1, int(1.3333333 * self.height) - 2)) @property def", "ctx.bot.get_cog(\"Accents\")) is None: raise RuntimeError(\"No accents cog loaded\") return [ # trocr fully", "90 degrees return 90 * round(degrees / 90) @property def coords(self) -> Tuple[int,", "if 0 <= angle <= 90: left = vertices[0].get(\"x\") upper = vertices[1].get(\"y\") right", "PINK_PROXY_TOKEN = f\"Bearer {os.environ['PINK_PROXY_TOKEN']}\" FONT = ImageFont.truetype(\"DejaVuSans.ttf\") class GoogleOCRError(PINKError): KNOWN_HINTS = { None:", "\\ # C A angle = 225 # \\ / # B #", "original_line # TODO: sane iterator instead of this for word in word_annotations[current_word:]: text", "return self.lower - self.upper # type: ignore assert False # noqa @property def", "notes = \"\" current_word = 0 fields = [] for original_line, line in", "None not in self.coords def __repr__(self) -> str: return f\"<TextField text='{self.text}' coords={self.coords} angle={self.angle}>\"", "min((self.upper, upper)) self.right = right if self.right is None else max((self.right, right)) self.lower", "{ \"features\": [{\"type\": \"TEXT_DETECTION\"}], \"image\": { \"source\": { \"imageUri\": image_url, } }, }", "ctx.bot.loop.run_in_executor(None, _draw_trocr, src, fields) stats = f\"Words: {current_word}\\nLines: {len(fields)}\" if notes: stats +=", "in target language or language is undetected)\", formatted=False, ) translated = await translator_cog.translate(", "C---D # | | angle = 180 # B---A # # C #", "-> Tuple[int, int, int, int]: return (self.left, self.upper, self.right, self.lower) # type: ignore", "font_size(self) -> int: return max((1, int(1.3333333 * self.height) - 2)) @property def stroke_width(self)", "f'{json.get(\"message\", \"unknown error\")}' ) json = await r.json() if len((responses := json[\"responses\"])) ==", "format=\"PNG\") return BytesIO(result.getvalue()) def _apply_accents(ctx: Context, lines: List[str], accent: Accent) -> List[str]: if", "paragraph grouping in # full annotations. \"EOL_SURE_SPACE\" indicates line break matching simple #", "_apply_translation(ctx, lines, language, block_annotations) # error reporting notes = \"\" current_word = 0", "# TODO: sane iterator instead of this for word in word_annotations[current_word:]: text =", "PIL.Image.new( \"RGBA\", size=font.getsize(field.text, stroke_width=field.stroke_width), ) ImageDraw.Draw(text_im).text( (0, 0), text=field.text, font=font, spacing=0, stroke_width=field.stroke_width, stroke_fill=(0,", "I have no idea.\", 14: \"This means Google cannot access image URL. Try", "r: if r.status != 200: if r.content_type.lower() != \"application/json\": reason = await r.text()", "= 225 # \\ / # B # # C---D # | |", "\") for line in lines ] async def _apply_translation( ctx: Context, lines: List[str],", "# B---C # | | angle = 90 # A---D # # B", "StaticImage _VertexType = Dict[str, int] _VerticesType = Tuple[_VertexType, _VertexType, _VertexType, _VertexType] OCR_API_URL =", "def coords(self) -> Tuple[int, int, int, int]: return (self.left, self.upper, self.right, self.lower) #", "Try using a different one.\", } def __init__(self, code: Optional[int], message: str): self.code", "multiple lines into box try: field.add_word(word[\"boundingPoly\"][\"vertices\"], src.size) except AngleUndetectable: notes += f\"angle for", "in (0, 180, 360): return self.right - self.left # type: ignore if self.angle", "1 remaining_line = remaining_line[len(text) :].lstrip() # TODO: merge multiple lines into box try:", "# A----B # | | angle = 360/0 # D----C # # A", "!= 200: if r.content_type.lower() != \"application/json\": reason = await r.text() if reason.count(\"\\n\") >", "Optional[int] = None self.upper: Optional[int] = None self.right: Optional[int] = None self.lower: Optional[int]", "but it will most likely fail\" ) else: json = await r.json() image_url", "cog loaded\") return [ # trocr fully depends on newlines, apply accents to", "def ocr(ctx: Context, image_url: str) -> Dict[str, Any]: async with ctx.session.post( f\"{PINK_PROXY}\", headers=dict(authorization=PINK_PROXY_TOKEN),", "output\"\"\" def extract_language(data: Any) -> Optional[str]: if (properties := data.get(\"property\")) is None: return", "params={ \"key\": os.environ[\"OCR_API_TOKEN\"], }, json={ \"requests\": [ { \"features\": [{\"type\": \"TEXT_DETECTION\"}], \"image\": {", "min((self._src_height, self.lower + self._padding)), # type: ignore ) # TODO: implement w/h detection", "- self.left # type: ignore if self.angle in (90, 270): return self.lower -", "+= 90 * i break else: raise AngleUndetectable # # truncate last digit,", "= remaining_line[len(text) :].lstrip() # TODO: merge multiple lines into box try: field.add_word(word[\"boundingPoly\"][\"vertices\"], src.size)", "# type: ignore ) # TODO: implement w/h detection ASAP, this is temporary", "360 # compensate missing vertices degrees += 90 * i break else: raise", "Accent] ) -> Tuple[BytesIO, str]: src = await image.to_pil_image(ctx) annotations = await ocr(ctx,", "undetected)\", formatted=False, ) translated = await translator_cog.translate( \"\\n\".join(need_trasnslation.values()), language ) translated_lines = translated.split(\"\\n\")", "self.left - self._padding)), # type: ignore max((0, self.upper - self._padding)), # type: ignore", "vertex coordinate can be missing if None in (x, y, next_x, next_y): x,", ":= last_symbol.get(\"property\")) is None: continue if (detected_break := symbol_properties.get(\"detectedBreak\")) is None: continue if", "upper = vertices[3].get(\"y\") right = vertices[0].get(\"x\") lower = vertices[1].get(\"y\") elif 270 < angle", "word in word_annotations[current_word:]: text = word[\"description\"] if remaining_line.startswith(text): current_word += 1 remaining_line =", "A - 0 # B - 1 # C - 2 # D", "= [] for original_line, line in zip(lines, new_lines): field = TextField(line, src) remaining_line", "annotations and paragraph grouping in # full annotations. \"EOL_SURE_SPACE\" indicates line break matching", "upper = vertices[1].get(\"y\") right = vertices[2].get(\"x\") lower = vertices[3].get(\"y\") elif 90 < angle", "text order is preserved accent_cog.apply_accents_to_text(line, [accent]).replace(\"\\n\", \" \") for line in lines ]", "= src.size self._padding = padding def add_word(self, vertices: _VerticesType, src_size: Tuple[int, int]) ->", "reading it in constants.py # since it is not used anywhere else now", "mypy literally does not see previous statement delta_y = y - next_y #", "int, int, int]: \"\"\"Returns Pillow style coordinates (left, upper, right, lower).\"\"\" # A", "fields: cropped = src.crop(field.coords_padded) # NOTE: next line causes segfaults if coords are", "apply accents to each line separately and # replace any newlines with spaces", "ctx.bot.get_cog(\"Translator\")) is None: raise RuntimeError(\"No translator cog loaded\") # TODO: group by input", "RuntimeError( f\"expected {len(need_trasnslation)} translated lines, got {len(translated_lines)}\" ) new_lines = lines.copy() for idx,", "Google OCR output\"\"\" def extract_language(data: Any) -> Optional[str]: if (properties := data.get(\"property\")) is", "happened. I have no idea.\", 14: \"This means Google cannot access image URL.", "loaded\") # TODO: group by input languages to improve translation? need_trasnslation = {}", "translated.split(\"\\n\") if len(translated_lines) != len(need_trasnslation): raise RuntimeError( f\"expected {len(need_trasnslation)} translated lines, got {len(translated_lines)}\"", "_VerticesType) -> int: def get_coords(vertex: _VertexType) -> Tuple[Optional[int], Optional[int]]: return vertex.get(\"x\"), vertex.get(\"y\") cycle", "new_lines = await _apply_translation(ctx, lines, language, block_annotations) # error reporting notes = \"\"", "- 0 # B - 1 # C - 2 # D -", "self.code = code self.message = message super().__init__(str(self)) @classmethod def from_response(cls, response: Dict[str, Any])", "max((self.right, right)) self.lower = lower if self.lower is None else max((self.lower, lower)) @staticmethod", "is more reliable if it worked @property def width(self) -> int: if self.angle", "continue # algo: https://stackoverflow.com/a/27481611 # mypy literally does not see previous statement delta_y", "+= f\"angle for `{word}` is undetectable\\n\" else: break if field.initialized: if line.casefold() !=", "Accent from pink.context import Context from pink.cogs.utils.errorhandler import PINKError from .types import StaticImage", "self.angle = self._get_angle(vertices) left, upper, right, lower = self._vertices_to_coords( vertices, src_size, self.angle )", "!= \"application/json\": reason = await r.text() if reason.count(\"\\n\") > 1: # we got", "# B - 1 # C - 2 # D - 3 #", "is undetected)\", formatted=False, ) translated = await translator_cog.translate( \"\\n\".join(need_trasnslation.values()), language ) translated_lines =", "315 # \\ / # C # # D----A # | | angle", "lines, language) else: new_lines = await _apply_translation(ctx, lines, language, block_annotations) # error reporting", "key=lambda l: l.get(\"confidence\", 1))[-1][ \"languageCode\" ] for block in blocks: block_language = extract_language(block)", ") json = await r.json() raise PINKError( f\"Error in underlying API[{r.status}]: \" f'{json.get(\"message\",", "not fields: raise PINKError(\"could not translate anything on image\", formatted=False) result = await", "= 45 # \\ / # D if 0 <= angle <= 90:", "cannot access image URL. Try using a different one.\", } def __init__(self, code:", "Context from pink.cogs.utils.errorhandler import PINKError from .types import StaticImage _VertexType = Dict[str, int]", "for original_line, line in zip(lines, new_lines): field = TextField(line, src) remaining_line = original_line", "= responses[0] if \"textAnnotations\" not in maybe_annotations: if \"error\" in maybe_annotations: raise GoogleOCRError.from_response(maybe_annotations)", ":= self.KNOWN_HINTS.get(self.code)) is not None: base += f\"\\n\\nHint: {hint}\" return base class TROCRException(Exception):", "TEMPORARY: truncate angle to 90 degrees return 90 * round(degrees / 90) @property", "l.get(\"confidence\", 1))[-1][ \"languageCode\" ] for block in blocks: block_language = extract_language(block) for paragraph", "None else min((self.left, left)) self.upper = upper if self.upper is None else min((self.upper,", "right, lower).\"\"\" # A - 0 # B - 1 # C -", "lines: List[str], accent: Accent) -> List[str]: if (accent_cog := ctx.bot.get_cog(\"Accents\")) is None: raise", "= \"\" current_word = 0 fields = [] for original_line, line in zip(lines,", "if self.angle in (90, 270): return self.right - self.left # type: ignore assert", "- self.upper # type: ignore assert False # noqa @property def height(self) ->", "RuntimeError(\"No translator cog loaded\") # TODO: group by input languages to improve translation?", "= None self.angle = 0 self._src_width, self._src_height = src.size self._padding = padding def", "missing if None in (x, y, next_x, next_y): x, y = next_x, next_y", "\"https://content-vision.googleapis.com/v1/images:annotate\" # avoid making this a hard dependency by not reading it in", "height(self) -> int: if self.angle in (0, 180, 360): return self.lower - self.upper", "raise GoogleOCRError.from_response(maybe_annotations) else: raise PINKError(\"no text detected\", formatted=False) return maybe_annotations def _draw_trocr(src: PIL.Image,", "Optional[int] = None self.right: Optional[int] = None self.lower: Optional[int] = None self.angle =", "annotations[\"fullTextAnnotation\"][\"pages\"][0][\"blocks\"] # Google OCR API returns entry for each word separately, but they", "if not need_trasnslation: raise PINKError( \"nothing to translate on image \" \"(either entire", "360/0 # D----C # # A # / \\ # D B angle", "1: # we got some garbage HTML response reason = \"unknown error\" raise", "\"\"\"Extracts language for each paragraph in Google OCR output\"\"\" def extract_language(data: Any) ->", "if r.status != 200: await ctx.reply( f\"Unable to reach proxy: {r.status}\\n\" f\"Will try", "json={ \"requests\": [ { \"features\": [{\"type\": \"TEXT_DETECTION\"}], \"image\": { \"source\": { \"imageUri\": image_url,", "see previous statement delta_y = y - next_y # type: ignore delta_x =", "into # lines, lines are separated by newlines, there is a trailing newline.", "# # C # / \\ # B D angle = 135 #", "text='{self.text}' coords={self.coords} angle={self.angle}>\" def _language_iterator(blocks: Sequence[Any]) -> Iterator[Optional[str]]: \"\"\"Extracts language for each paragraph", ":= data.get(\"property\")) is None: return None if (languages := properties.get(\"detectedLanguages\")) is None: return", "ImageOps.invert( # blurred.resize((1, 1)).convert(\"L\") # ).getpixel((0, 0)) # ugly!!! src.paste(blurred, field.coords_padded) for field", "= await image.to_pil_image(ctx) annotations = await ocr(ctx, image.url) word_annotations = annotations[\"textAnnotations\"][1:] block_annotations =", "\"languageCode\" ] for block in blocks: block_language = extract_language(block) for paragraph in block[\"paragraphs\"]:", "\\ # D B angle = 315 # \\ / # C #", "if it worked @property def width(self) -> int: if self.angle in (0, 180,", "padding: int = 3): self.text = full_text self.left: Optional[int] = None self.upper: Optional[int]", "lines, language, block_annotations) # error reporting notes = \"\" current_word = 0 fields", "# blurred.resize((1, 1)).convert(\"L\") # ).getpixel((0, 0)) # ugly!!! src.paste(blurred, field.coords_padded) for field in", "have no idea.\", 14: \"This means Google cannot access image URL. Try using", "reporting notes = \"\" current_word = 0 fields = [] for original_line, line", "not in maybe_annotations: if \"error\" in maybe_annotations: raise GoogleOCRError.from_response(maybe_annotations) else: raise PINKError(\"no text", "-> int: if self.angle in (0, 180, 360): return self.right - self.left #", "= upper if self.upper is None else min((self.upper, upper)) self.right = right if", "vertices[2].get(\"x\") upper = vertices[3].get(\"y\") right = vertices[0].get(\"x\") lower = vertices[1].get(\"y\") elif 270 <", "next(paragraph_languages) is not None: need_trasnslation[i] = line if not need_trasnslation: raise PINKError( \"nothing", "math.degrees(math.atan2(delta_y, delta_x)) if degrees < 0: degrees += 360 # compensate missing vertices", "segfaults if coords are wrong, debug from here blurred = cropped.filter(ImageFilter.GaussianBlur(10)) # Does", "lower)) @staticmethod def _vertices_to_coords( vertices: _VerticesType, src_size: Tuple[int, int], angle: int ) ->", "next_x, next_y continue # algo: https://stackoverflow.com/a/27481611 # mypy literally does not see previous", "360): return self.lower - self.upper # type: ignore if self.angle in (90, 270):", "statement delta_y = y - next_y # type: ignore delta_x = next_x -", "\" \"(either entire text is in target language or language is undetected)\", formatted=False,", ") -> List[str]: if (translator_cog := ctx.bot.get_cog(\"Translator\")) is None: raise RuntimeError(\"No translator cog", "def __init__(self, full_text: str, src: PIL.Image, padding: int = 3): self.text = full_text", "- self._padding)), # type: ignore min((self._src_width, self.right + self._padding)), # type: ignore min((self._src_height,", "Tuple[int, int, int, int]: \"\"\"Returns Pillow style coordinates (left, upper, right, lower).\"\"\" #", "merge multiple lines into box try: field.add_word(word[\"boundingPoly\"][\"vertices\"], src.size) except AngleUndetectable: notes += f\"angle", "= 135 # \\ / # A # # B---C # | |", "with underlying API[{r.status}]: {reason}\" ) json = await r.json() raise PINKError( f\"Error in", "self._vertices_to_coords( vertices, src_size, self.angle ) self.left = left if self.left is None else", "0: return {} maybe_annotations = responses[0] if \"textAnnotations\" not in maybe_annotations: if \"error\"", "next_y continue # algo: https://stackoverflow.com/a/27481611 # mypy literally does not see previous statement", "algo: https://stackoverflow.com/a/27481611 # mypy literally does not see previous statement delta_y = y", "work anymore for some reason, black stroke is good anyway # field.inverted_avg_color =", "= annotations[\"textAnnotations\"][1:] block_annotations = annotations[\"fullTextAnnotation\"][\"pages\"][0][\"blocks\"] # Google OCR API returns entry for each", "Does not work anymore for some reason, black stroke is good anyway #", "import BytesIO from typing import Any, Dict, List, Tuple, Union, Iterator, Optional, Sequence", "block_language # line grouping differs between simple annotations and paragraph grouping in #", "text, ignore this # TEMPORARY: truncate angle to 90 degrees return 90 *", "formatted=False) result = await ctx.bot.loop.run_in_executor(None, _draw_trocr, src, fields) stats = f\"Words: {current_word}\\nLines: {len(fields)}\"", "remaining_line.startswith(text): current_word += 1 remaining_line = remaining_line[len(text) :].lstrip() # TODO: merge multiple lines", "src.convert(\"RGBA\") for field in fields: cropped = src.crop(field.coords_padded) # NOTE: next line causes", "int]: return (self.left, self.upper, self.right, self.lower) # type: ignore @property def coords_padded(self) ->", "https://stackoverflow.com/a/9972699 # text surrounding box dimensions are known, but i had no success", "in paragraph[\"words\"]: last_symbol = word[\"symbols\"][-1] if (symbol_properties := last_symbol.get(\"property\")) is None: continue if", "boxes with Pillow without creating # extra images font = FONT.font_variant(size=field.font_size) text_im =", "right = vertices[1].get(\"x\") lower = vertices[2].get(\"y\") if left is None: left = 0", "self.upper, self.right, self.lower) # type: ignore @property def coords_padded(self) -> Tuple[int, int, int,", "from pink.cogs.utils.errorhandler import PINKError from .types import StaticImage _VertexType = Dict[str, int] _VerticesType", "angle: int ) -> Tuple[int, int, int, int]: \"\"\"Returns Pillow style coordinates (left,", "= 90 # A---D # # B # / \\ # A C", "] }, headers={ \"x-origin\": \"https://explorer.apis.google.com\", \"x-referer\": \"https://explorer.apis.google.com\", }, ) as r: if r.status", "# type: ignore assert False # noqa @property def font_size(self) -> int: return", "= original_line # TODO: sane iterator instead of this for word in word_annotations[current_word:]:", "< angle <= 270: left = vertices[2].get(\"x\") upper = vertices[3].get(\"y\") right = vertices[0].get(\"x\")", "< angle <= 180: left = vertices[1].get(\"x\") upper = vertices[2].get(\"y\") right = vertices[3].get(\"x\")", "coordinates (left, upper, right, lower).\"\"\" # A - 0 # B - 1", "PINKError from .types import StaticImage _VertexType = Dict[str, int] _VerticesType = Tuple[_VertexType, _VertexType,", "by checking full image description. In description words are combined into # lines,", "without creating # extra images font = FONT.font_variant(size=field.font_size) text_im = PIL.Image.new( \"RGBA\", size=font.getsize(field.text,", ") src.alpha_composite( text_im.resize( ( min((text_im.width, field.width)), min((text_im.height, field.height)), ), ).rotate(field.angle, expand=True, resample=PIL.Image.BICUBIC), field.coords_padded[:2],", "Any, ) -> List[str]: if (translator_cog := ctx.bot.get_cog(\"Translator\")) is None: raise RuntimeError(\"No translator", "def get_coords(vertex: _VertexType) -> Tuple[Optional[int], Optional[int]]: return vertex.get(\"x\"), vertex.get(\"y\") cycle = itertools.cycle(vertices) x,", "if \"textAnnotations\" not in maybe_annotations: if \"error\" in maybe_annotations: raise GoogleOCRError.from_response(maybe_annotations) else: raise", "D - 3 # # A----B # | | angle = 360/0 #", "None: need_trasnslation[i] = line if not need_trasnslation: raise PINKError( \"nothing to translate on", "next_x, next_y): x, y = next_x, next_y continue # algo: https://stackoverflow.com/a/27481611 # mypy", "TextField: def __init__(self, full_text: str, src: PIL.Image, padding: int = 3): self.text =", "0), ) src.alpha_composite( text_im.resize( ( min((text_im.width, field.width)), min((text_im.height, field.height)), ), ).rotate(field.angle, expand=True, resample=PIL.Image.BICUBIC),", "image.url) word_annotations = annotations[\"textAnnotations\"][1:] block_annotations = annotations[\"fullTextAnnotation\"][\"pages\"][0][\"blocks\"] # Google OCR API returns entry", "language: str, block_annotations: Any, ) -> List[str]: if (translator_cog := ctx.bot.get_cog(\"Translator\")) is None:", "C # / \\ # B D angle = 135 # \\ /", "Union[str, Accent] ) -> Tuple[BytesIO, str]: src = await image.to_pil_image(ctx) annotations = await", "\"EOL_SURE_SPACE\" indicates line break matching simple # annotations for word in paragraph[\"words\"]: last_symbol", "None: if not self.initialized: # Get angle from first word self.angle = self._get_angle(vertices)", "OCR API returns entry for each word separately, but they can be joined", "upper = vertices[2].get(\"y\") right = vertices[3].get(\"x\") lower = vertices[0].get(\"y\") elif 180 < angle", "return None if (languages := properties.get(\"detectedLanguages\")) is None: return None return sorted(languages, key=lambda", "is None else min((self.left, left)) self.upper = upper if self.upper is None else", "y, next_x, next_y): x, y = next_x, next_y continue # algo: https://stackoverflow.com/a/27481611 #", "self.lower - self.upper # type: ignore assert False # noqa @property def height(self)", "\"RGBA\", size=font.getsize(field.text, stroke_width=field.stroke_width), ) ImageDraw.Draw(text_im).text( (0, 0), text=field.text, font=font, spacing=0, stroke_width=field.stroke_width, stroke_fill=(0, 0,", "accents cog loaded\") return [ # trocr fully depends on newlines, apply accents", "break if field.initialized: if line.casefold() != original_line.casefold(): fields.append(field) if not fields: raise PINKError(\"could", "translated lines, got {len(translated_lines)}\" ) new_lines = lines.copy() for idx, translated_line in zip(need_trasnslation.keys(),", "# B---A # # C # / \\ # B D angle =", "zip(need_trasnslation.keys(), translated_lines): new_lines[idx] = translated_line return new_lines async def ocr_translate( ctx: Context, image:", "is good anyway # field.inverted_avg_color = ImageOps.invert( # blurred.resize((1, 1)).convert(\"L\") # ).getpixel((0, 0))", "bad happened. I have no idea.\", 14: \"This means Google cannot access image", "return self.lower - self.upper # type: ignore if self.angle in (90, 270): return", "entire text is in target language or language is undetected)\", formatted=False, ) translated", "| | angle = 90 # A---D # # B # / \\", "self.right: Optional[int] = None self.lower: Optional[int] = None self.angle = 0 self._src_width, self._src_height", "None: return None return sorted(languages, key=lambda l: l.get(\"confidence\", 1))[-1][ \"languageCode\" ] for block", "-> None: if not self.initialized: # Get angle from first word self.angle =", "lines into box try: field.add_word(word[\"boundingPoly\"][\"vertices\"], src.size) except AngleUndetectable: notes += f\"angle for `{word}`", "None: lower = src_size[1] return (left, upper, right, lower) @staticmethod def _get_angle(vertices: _VerticesType)", "Context, lines: List[str], accent: Accent) -> List[str]: if (accent_cog := ctx.bot.get_cog(\"Accents\")) is None:", "anything on image\", formatted=False) result = await ctx.bot.loop.run_in_executor(None, _draw_trocr, src, fields) stats =", "implementing this # 2) try to keep track of full coords and just", "r.json() if len((responses := json[\"responses\"])) == 0: return {} maybe_annotations = responses[0] if", "angle from first word self.angle = self._get_angle(vertices) left, upper, right, lower = self._vertices_to_coords(", "{} maybe_annotations = responses[0] if \"textAnnotations\" not in maybe_annotations: if \"error\" in maybe_annotations:", "(x, y, next_x, next_y): x, y = next_x, next_y continue # algo: https://stackoverflow.com/a/27481611", "lower if self.lower is None else max((self.lower, lower)) @staticmethod def _vertices_to_coords( vertices: _VerticesType,", "type: ignore assert False # noqa @property def font_size(self) -> int: return max((1,", "Tuple[int, int], angle: int ) -> Tuple[int, int, int, int]: \"\"\"Returns Pillow style", "but i had no success implementing this # 2) try to keep track", "= lower if self.lower is None else max((self.lower, lower)) @staticmethod def _vertices_to_coords( vertices:", "with spaces to make sure text order is preserved accent_cog.apply_accents_to_text(line, [accent]).replace(\"\\n\", \" \")", "- self._padding)), # type: ignore max((0, self.upper - self._padding)), # type: ignore min((self._src_width,", "# truncate last digit, OCR often returns 1-2 degree tilted text, ignore this", "max((self.lower, lower)) @staticmethod def _vertices_to_coords( vertices: _VerticesType, src_size: Tuple[int, int], angle: int )", "self.upper is None else min((self.upper, upper)) self.right = right if self.right is None", "one.\", } def __init__(self, code: Optional[int], message: str): self.code = code self.message =", "0), text=field.text, font=font, spacing=0, stroke_width=field.stroke_width, stroke_fill=(0, 0, 0), ) src.alpha_composite( text_im.resize( ( min((text_im.width,", "= extract_language(paragraph) yield paragraph_language or block_language # line grouping differs between simple annotations", "= error.get(\"message\", \"unknown\") return cls(code, message) def __str__(self) -> str: base = f\"**{type(self).__name__}**[{self.code}]:", "C A angle = 225 # \\ / # B # # C---D", "need_trasnslation[i] = line if not need_trasnslation: raise PINKError( \"nothing to translate on image", "r: if r.status != 200: await ctx.reply( f\"Unable to reach proxy: {r.status}\\n\" f\"Will", "\\ / # D if 0 <= angle <= 90: left = vertices[0].get(\"x\")", "typing import Any, Dict, List, Tuple, Union, Iterator, Optional, Sequence import PIL from", "# Any vertex coordinate can be missing if None in (x, y, next_x,", "from .types import StaticImage _VertexType = Dict[str, int] _VerticesType = Tuple[_VertexType, _VertexType, _VertexType,", "self.KNOWN_HINTS.get(self.code)) is not None: base += f\"\\n\\nHint: {hint}\" return base class TROCRException(Exception): pass", "-> str: base = f\"**{type(self).__name__}**[{self.code}]: {self.message}\" if (hint := self.KNOWN_HINTS.get(self.code)) is not None:", "from io import BytesIO from typing import Any, Dict, List, Tuple, Union, Iterator,", "but they can be joined # by checking full image description. In description", "return (left, upper, right, lower) @staticmethod def _get_angle(vertices: _VerticesType) -> int: def get_coords(vertex:", "return max((1, int(1.3333333 * self.height) - 2)) @property def stroke_width(self) -> int: return", "properties.get(\"detectedLanguages\")) is None: return None return sorted(languages, key=lambda l: l.get(\"confidence\", 1))[-1][ \"languageCode\" ]", "is in target language or language is undetected)\", formatted=False, ) translated = await", "order is preserved accent_cog.apply_accents_to_text(line, [accent]).replace(\"\\n\", \" \") for line in lines ] async", "for each paragraph in Google OCR output\"\"\" def extract_language(data: Any) -> Optional[str]: if", "on image \" \"(either entire text is in target language or language is", "detected\", formatted=False) return maybe_annotations def _draw_trocr(src: PIL.Image, fields: Sequence[TextField]) -> BytesIO: FIELD_CAP =", "keep track of full coords and just calculate distance # a lot of", "in enumerate(lines): if next(paragraph_languages) is not None: need_trasnslation[i] = line if not need_trasnslation:", "assert False # noqa @property def height(self) -> int: if self.angle in (0,", "left if self.left is None else min((self.left, left)) self.upper = upper if self.upper", "# NOTE: next line causes segfaults if coords are wrong, debug from here", "int]: \"\"\"Returns Pillow style coordinates (left, upper, right, lower).\"\"\" # A - 0", "@property def coords(self) -> Tuple[int, int, int, int]: return (self.left, self.upper, self.right, self.lower)", "now PINK_PROXY = os.environ[\"PINK_PROXY\"] PINK_PROXY_TOKEN = f\"Bearer {os.environ['PINK_PROXY_TOKEN']}\" FONT = ImageFont.truetype(\"DejaVuSans.ttf\") class GoogleOCRError(PINKError):", "# A # # B---C # | | angle = 90 # A---D", "preserved accent_cog.apply_accents_to_text(line, [accent]).replace(\"\\n\", \" \") for line in lines ] async def _apply_translation(", "from pink.context import Context from pink.cogs.utils.errorhandler import PINKError from .types import StaticImage _VertexType", "left, upper, right, lower = self._vertices_to_coords( vertices, src_size, self.angle ) self.left = left", "constants.py # since it is not used anywhere else now PINK_PROXY = os.environ[\"PINK_PROXY\"]", "simple annotations and paragraph grouping in # full annotations. \"EOL_SURE_SPACE\" indicates line break", "first word self.angle = self._get_angle(vertices) left, upper, right, lower = self._vertices_to_coords( vertices, src_size,", "symbol_properties.get(\"detectedBreak\")) is None: continue if detected_break[\"type\"] != \"EOL_SURE_SPACE\": continue yield paragraph_language or block_language", "B---A # # C # / \\ # B D angle = 135", "right = vertices[3].get(\"x\") lower = vertices[0].get(\"y\") elif 180 < angle <= 270: left", "__repr__(self) -> str: return f\"<TextField text='{self.text}' coords={self.coords} angle={self.angle}>\" def _language_iterator(blocks: Sequence[Any]) -> Iterator[Optional[str]]:", "cls(code, message) def __str__(self) -> str: base = f\"**{type(self).__name__}**[{self.code}]: {self.message}\" if (hint :=", "self.left is None else min((self.left, left)) self.upper = upper if self.upper is None", "90 * round(degrees / 90) @property def coords(self) -> Tuple[int, int, int, int]:", "Dict[str, int] _VerticesType = Tuple[_VertexType, _VertexType, _VertexType, _VertexType] OCR_API_URL = \"https://content-vision.googleapis.com/v1/images:annotate\" # avoid", "< 0: degrees += 360 # compensate missing vertices degrees += 90 *", "OCR_API_URL, params={ \"key\": os.environ[\"OCR_API_TOKEN\"], }, json={ \"requests\": [ { \"features\": [{\"type\": \"TEXT_DETECTION\"}], \"image\":", "Dict, List, Tuple, Union, Iterator, Optional, Sequence import PIL from PIL import ImageDraw,", "class GoogleOCRError(PINKError): KNOWN_HINTS = { None: \"The world is on fire, something really", "-> BytesIO: FIELD_CAP = 150 fields = fields[:FIELD_CAP] src = src.convert(\"RGBA\") for field", "or block_language # line grouping differs between simple annotations and paragraph grouping in", "!= original_line.casefold(): fields.append(field) if not fields: raise PINKError(\"could not translate anything on image\",", "(self.left, self.upper, self.right, self.lower) # type: ignore @property def coords_padded(self) -> Tuple[int, int,", "\"EOL_SURE_SPACE\": continue yield paragraph_language or block_language async def ocr(ctx: Context, image_url: str) ->", "vertices[0].get(\"y\") elif 180 < angle <= 270: left = vertices[2].get(\"x\") upper = vertices[3].get(\"y\")", "field = TextField(line, src) remaining_line = original_line # TODO: sane iterator instead of", ") json = await r.json() if len((responses := json[\"responses\"])) == 0: return {}", "if \"error\" in maybe_annotations: raise GoogleOCRError.from_response(maybe_annotations) else: raise PINKError(\"no text detected\", formatted=False) return", "max((1, int(1.3333333 * self.height) - 2)) @property def stroke_width(self) -> int: return max((1,", "= self._get_angle(vertices) left, upper, right, lower = self._vertices_to_coords( vertices, src_size, self.angle ) self.left", "if self.angle in (0, 180, 360): return self.lower - self.upper # type: ignore", "coordinates might be missing, 1st solution is more reliable if it worked @property", "| angle = 270 # C----B # # D # / \\ #", "os import math import itertools from io import BytesIO from typing import Any,", "return None not in self.coords def __repr__(self) -> str: return f\"<TextField text='{self.text}' coords={self.coords}", "for line in lines ] async def _apply_translation( ctx: Context, lines: List[str], language:", "# type: ignore degrees = math.degrees(math.atan2(delta_y, delta_x)) if degrees < 0: degrees +=", "else max((self.right, right)) self.lower = lower if self.lower is None else max((self.lower, lower))", "self._get_angle(vertices) left, upper, right, lower = self._vertices_to_coords( vertices, src_size, self.angle ) self.left =", "we got some garbage HTML response reason = \"unknown error\" raise PINKError( f\"Something", "vertices[2].get(\"y\") if left is None: left = 0 if upper is None: upper", "language for each paragraph in Google OCR output\"\"\" def extract_language(data: Any) -> Optional[str]:", "= await _apply_translation(ctx, lines, language, block_annotations) # error reporting notes = \"\" current_word", "self.right + self._padding)), # type: ignore min((self._src_height, self.lower + self._padding)), # type: ignore", "truncate angle to 90 degrees return 90 * round(degrees / 90) @property def", "# # C---D # | | angle = 180 # B---A # #", "block_language = extract_language(block) for paragraph in block[\"paragraphs\"]: paragraph_language = extract_language(paragraph) yield paragraph_language or", "block_annotations = annotations[\"fullTextAnnotation\"][\"pages\"][0][\"blocks\"] # Google OCR API returns entry for each word separately,", "None self.upper: Optional[int] = None self.right: Optional[int] = None self.lower: Optional[int] = None", "w/h detection ASAP, this is temporary # solutions: # 1) https://stackoverflow.com/a/9972699 # text", "is not None: need_trasnslation[i] = line if not need_trasnslation: raise PINKError( \"nothing to", "undetectable\\n\" else: break if field.initialized: if line.casefold() != original_line.casefold(): fields.append(field) if not fields:", "\"x-referer\": \"https://explorer.apis.google.com\", }, ) as r: if r.status != 200: if r.content_type.lower() !=", "A # # B---C # | | angle = 90 # A---D #", "some garbage HTML response reason = \"unknown error\" raise PINKError( f\"Something really bad", "left = vertices[2].get(\"x\") upper = vertices[3].get(\"y\") right = vertices[0].get(\"x\") lower = vertices[1].get(\"y\") elif", "PINKError( \"nothing to translate on image \" \"(either entire text is in target", "block_annotations: Any, ) -> List[str]: if (translator_cog := ctx.bot.get_cog(\"Translator\")) is None: raise RuntimeError(\"No", "D----C # # A # / \\ # D B angle = 315", "<= 90: left = vertices[0].get(\"x\") upper = vertices[1].get(\"y\") right = vertices[2].get(\"x\") lower =", "round(self.font_size / 12))) @property def initialized(self) -> bool: return None not in self.coords", "async def ocr_translate( ctx: Context, image: StaticImage, language: Union[str, Accent] ) -> Tuple[BytesIO,", "90 # A---D # # B # / \\ # A C angle", "self.upper # type: ignore assert False # noqa @property def height(self) -> int:", "{hint}\" return base class TROCRException(Exception): pass class AngleUndetectable(TROCRException): pass class TextField: def __init__(self,", "left is None: left = 0 if upper is None: upper = 0", "translate on image \" \"(either entire text is in target language or language", "2) try to keep track of full coords and just calculate distance #", "TODO: figure out how to fit text into boxes with Pillow without creating", "ctx: Context, lines: List[str], language: str, block_annotations: Any, ) -> List[str]: if (translator_cog", "temporary # solutions: # 1) https://stackoverflow.com/a/9972699 # text surrounding box dimensions are known,", "f\"Unable to reach proxy: {r.status}\\n\" f\"Will try raw URL but it will most", "language) else: new_lines = await _apply_translation(ctx, lines, language, block_annotations) # error reporting notes", "is undetectable\\n\" else: break if field.initialized: if line.casefold() != original_line.casefold(): fields.append(field) if not", "await _apply_translation(ctx, lines, language, block_annotations) # error reporting notes = \"\" current_word =", "return cls(code, message) def __str__(self) -> str: base = f\"**{type(self).__name__}**[{self.code}]: {self.message}\" if (hint", "field.width)), min((text_im.height, field.height)), ), ).rotate(field.angle, expand=True, resample=PIL.Image.BICUBIC), field.coords_padded[:2], ) result = BytesIO() src.save(result,", "if not self.initialized: # Get angle from first word self.angle = self._get_angle(vertices) left,", "src.alpha_composite( text_im.resize( ( min((text_im.width, field.width)), min((text_im.height, field.height)), ), ).rotate(field.angle, expand=True, resample=PIL.Image.BICUBIC), field.coords_padded[:2], )", "a lot of coordinates might be missing, 1st solution is more reliable if", "= \"unknown error\" raise PINKError( f\"Something really bad happened with underlying API[{r.status}]: {reason}\"", "# text surrounding box dimensions are known, but i had no success implementing", "try raw URL but it will most likely fail\" ) else: json =", "A----B # | | angle = 360/0 # D----C # # A #", "= line if not need_trasnslation: raise PINKError( \"nothing to translate on image \"", "-> int: if self.angle in (0, 180, 360): return self.lower - self.upper #", "__str__(self) -> str: base = f\"**{type(self).__name__}**[{self.code}]: {self.message}\" if (hint := self.KNOWN_HINTS.get(self.code)) is not", "FONT.font_variant(size=field.font_size) text_im = PIL.Image.new( \"RGBA\", size=font.getsize(field.text, stroke_width=field.stroke_width), ) ImageDraw.Draw(text_im).text( (0, 0), text=field.text, font=font,", "= 0 if upper is None: upper = 0 if right is None:", "* i break else: raise AngleUndetectable # # truncate last digit, OCR often", "# / \\ # B D angle = 135 # \\ / #", "= {} paragraph_languages = _language_iterator(block_annotations) for i, line in enumerate(lines): if next(paragraph_languages) is", "0 if upper is None: upper = 0 if right is None: right", "degrees += 90 * i break else: raise AngleUndetectable # # truncate last", "angle = 315 # \\ / # C # # D----A # |", "in (90, 270): return self.lower - self.upper # type: ignore assert False #", "is None: raise RuntimeError(\"No accents cog loaded\") return [ # trocr fully depends", "size=font.getsize(field.text, stroke_width=field.stroke_width), ) ImageDraw.Draw(text_im).text( (0, 0), text=field.text, font=font, spacing=0, stroke_width=field.stroke_width, stroke_fill=(0, 0, 0),", "box dimensions are known, but i had no success implementing this # 2)", "new_lines = _apply_accents(ctx, lines, language) else: new_lines = await _apply_translation(ctx, lines, language, block_annotations)", "min((self._src_width, self.right + self._padding)), # type: ignore min((self._src_height, self.lower + self._padding)), # type:", "if coords are wrong, debug from here blurred = cropped.filter(ImageFilter.GaussianBlur(10)) # Does not", "with Pillow without creating # extra images font = FONT.font_variant(size=field.font_size) text_im = PIL.Image.new(", ") self.left = left if self.left is None else min((self.left, left)) self.upper =", "full image description. In description words are combined into # lines, lines are", "__init__(self, full_text: str, src: PIL.Image, padding: int = 3): self.text = full_text self.left:", "= lines.copy() for idx, translated_line in zip(need_trasnslation.keys(), translated_lines): new_lines[idx] = translated_line return new_lines", "next_y): x, y = next_x, next_y continue # algo: https://stackoverflow.com/a/27481611 # mypy literally", "to improve translation? need_trasnslation = {} paragraph_languages = _language_iterator(block_annotations) for i, line in", "# # A----B # | | angle = 360/0 # D----C # #", "GoogleOCRError: error = response.get(\"error\", {}) code = error.get(\"code\") message = error.get(\"message\", \"unknown\") return", "# \\ / # B # # C---D # | | angle =", "isinstance(language, Accent): new_lines = _apply_accents(ctx, lines, language) else: new_lines = await _apply_translation(ctx, lines,", "return f\"<TextField text='{self.text}' coords={self.coords} angle={self.angle}>\" def _language_iterator(blocks: Sequence[Any]) -> Iterator[Optional[str]]: \"\"\"Extracts language for", "# noqa @property def font_size(self) -> int: return max((1, int(1.3333333 * self.height) -", "_apply_accents(ctx, lines, language) else: new_lines = await _apply_translation(ctx, lines, language, block_annotations) # error", "raise AngleUndetectable # # truncate last digit, OCR often returns 1-2 degree tilted", "surrounding box dimensions are known, but i had no success implementing this #", "maybe_annotations: if \"error\" in maybe_annotations: raise GoogleOCRError.from_response(maybe_annotations) else: raise PINKError(\"no text detected\", formatted=False)", "last_symbol.get(\"property\")) is None: continue if (detected_break := symbol_properties.get(\"detectedBreak\")) is None: continue if detected_break[\"type\"]", "src = src.convert(\"RGBA\") for field in fields: cropped = src.crop(field.coords_padded) # NOTE: next", "!= len(need_trasnslation): raise RuntimeError( f\"expected {len(need_trasnslation)} translated lines, got {len(translated_lines)}\" ) new_lines =", "right = vertices[0].get(\"x\") lower = vertices[1].get(\"y\") elif 270 < angle <= 360: left", "str, block_annotations: Any, ) -> List[str]: if (translator_cog := ctx.bot.get_cog(\"Translator\")) is None: raise", "ignore degrees = math.degrees(math.atan2(delta_y, delta_x)) if degrees < 0: degrees += 360 #", "( min((text_im.width, field.width)), min((text_im.height, field.height)), ), ).rotate(field.angle, expand=True, resample=PIL.Image.BICUBIC), field.coords_padded[:2], ) result =", "OCR output\"\"\" def extract_language(data: Any) -> Optional[str]: if (properties := data.get(\"property\")) is None:", "PIL.Image, fields: Sequence[TextField]) -> BytesIO: FIELD_CAP = 150 fields = fields[:FIELD_CAP] src =", "response.get(\"error\", {}) code = error.get(\"code\") message = error.get(\"message\", \"unknown\") return cls(code, message) def", "words in the same line can be merged lines = annotations[\"fullTextAnnotation\"][\"text\"][:-1].split(\"\\n\") if isinstance(language,", "formatted=False) return maybe_annotations def _draw_trocr(src: PIL.Image, fields: Sequence[TextField]) -> BytesIO: FIELD_CAP = 150", "__future__ import annotations import os import math import itertools from io import BytesIO", "angle = 135 # \\ / # A # # B---C # |", "annotations. \"EOL_SURE_SPACE\" indicates line break matching simple # annotations for word in paragraph[\"words\"]:", "B---C # | | angle = 90 # A---D # # B #", "translated_line in zip(need_trasnslation.keys(), translated_lines): new_lines[idx] = translated_line return new_lines async def ocr_translate( ctx:", "be missing, 1st solution is more reliable if it worked @property def width(self)", "def _draw_trocr(src: PIL.Image, fields: Sequence[TextField]) -> BytesIO: FIELD_CAP = 150 fields = fields[:FIELD_CAP]", "await r.json() image_url = f\"{PINK_PROXY}/{json['id']}\" async with ctx.session.post( OCR_API_URL, params={ \"key\": os.environ[\"OCR_API_TOKEN\"], },", "\"x-origin\": \"https://explorer.apis.google.com\", \"x-referer\": \"https://explorer.apis.google.com\", }, ) as r: if r.status != 200: if", "break matching simple # annotations for word in paragraph[\"words\"]: last_symbol = word[\"symbols\"][-1] if", "self.text = full_text self.left: Optional[int] = None self.upper: Optional[int] = None self.right: Optional[int]", "1) https://stackoverflow.com/a/9972699 # text surrounding box dimensions are known, but i had no", "# D----A # | | angle = 270 # C----B # # D", "= vertices[3].get(\"x\") lower = vertices[0].get(\"y\") elif 180 < angle <= 270: left =", "coords={self.coords} angle={self.angle}>\" def _language_iterator(blocks: Sequence[Any]) -> Iterator[Optional[str]]: \"\"\"Extracts language for each paragraph in", "upper = 0 if right is None: right = src_size[0] if lower is", "= f\"Bearer {os.environ['PINK_PROXY_TOKEN']}\" FONT = ImageFont.truetype(\"DejaVuSans.ttf\") class GoogleOCRError(PINKError): KNOWN_HINTS = { None: \"The", "/ # C # # D----A # | | angle = 270 #", "right, lower) @staticmethod def _get_angle(vertices: _VerticesType) -> int: def get_coords(vertex: _VertexType) -> Tuple[Optional[int],", "# mypy literally does not see previous statement delta_y = y - next_y", "grouping in # full annotations. \"EOL_SURE_SPACE\" indicates line break matching simple # annotations", "if left is None: left = 0 if upper is None: upper =", "be joined # by checking full image description. In description words are combined", "# type: ignore assert False # noqa @property def height(self) -> int: if", "TODO: group by input languages to improve translation? need_trasnslation = {} paragraph_languages =", "really bad happened. I have no idea.\", 14: \"This means Google cannot access", "extra images font = FONT.font_variant(size=field.font_size) text_im = PIL.Image.new( \"RGBA\", size=font.getsize(field.text, stroke_width=field.stroke_width), ) ImageDraw.Draw(text_im).text(", "str]: src = await image.to_pil_image(ctx) annotations = await ocr(ctx, image.url) word_annotations = annotations[\"textAnnotations\"][1:]", "angle <= 180: left = vertices[1].get(\"x\") upper = vertices[2].get(\"y\") right = vertices[3].get(\"x\") lower", "images font = FONT.font_variant(size=field.font_size) text_im = PIL.Image.new( \"RGBA\", size=font.getsize(field.text, stroke_width=field.stroke_width), ) ImageDraw.Draw(text_im).text( (0,", "initialized(self) -> bool: return None not in self.coords def __repr__(self) -> str: return", "ignore this # TEMPORARY: truncate angle to 90 degrees return 90 * round(degrees", "}, ) as r: if r.status != 200: if r.content_type.lower() != \"application/json\": reason", "detected_break[\"type\"] != \"EOL_SURE_SPACE\": continue yield paragraph_language or block_language async def ocr(ctx: Context, image_url:", "Context, image_url: str) -> Dict[str, Any]: async with ctx.session.post( f\"{PINK_PROXY}\", headers=dict(authorization=PINK_PROXY_TOKEN), json=dict(url=image_url, ttl=3600),", "type: ignore max((0, self.upper - self._padding)), # type: ignore min((self._src_width, self.right + self._padding)),", "upper if self.upper is None else min((self.upper, upper)) self.right = right if self.right", "coords_padded(self) -> Tuple[int, int, int, int]: return ( max((0, self.left - self._padding)), #", "None: right = src_size[0] if lower is None: lower = src_size[1] return (left,", "return [ # trocr fully depends on newlines, apply accents to each line", "raise PINKError(\"could not translate anything on image\", formatted=False) result = await ctx.bot.loop.run_in_executor(None, _draw_trocr,", "@property def stroke_width(self) -> int: return max((1, round(self.font_size / 12))) @property def initialized(self)", "# \\ / # A # # B---C # | | angle =", "image_url = f\"{PINK_PROXY}/{json['id']}\" async with ctx.session.post( OCR_API_URL, params={ \"key\": os.environ[\"OCR_API_TOKEN\"], }, json={ \"requests\":", "if r.status != 200: if r.content_type.lower() != \"application/json\": reason = await r.text() if", "stroke_width=field.stroke_width, stroke_fill=(0, 0, 0), ) src.alpha_composite( text_im.resize( ( min((text_im.width, field.width)), min((text_im.height, field.height)), ),", "return (self.left, self.upper, self.right, self.lower) # type: ignore @property def coords_padded(self) -> Tuple[int,", "coords are wrong, debug from here blurred = cropped.filter(ImageFilter.GaussianBlur(10)) # Does not work", "annotations[\"textAnnotations\"][1:] block_annotations = annotations[\"fullTextAnnotation\"][\"pages\"][0][\"blocks\"] # Google OCR API returns entry for each word", "@staticmethod def _vertices_to_coords( vertices: _VerticesType, src_size: Tuple[int, int], angle: int ) -> Tuple[int,", "180, 360): return self.lower - self.upper # type: ignore if self.angle in (90,", "self.upper - self._padding)), # type: ignore min((self._src_width, self.right + self._padding)), # type: ignore", "lower = vertices[0].get(\"y\") elif 180 < angle <= 270: left = vertices[2].get(\"x\") upper", "if line.casefold() != original_line.casefold(): fields.append(field) if not fields: raise PINKError(\"could not translate anything", "word_annotations[current_word:]: text = word[\"description\"] if remaining_line.startswith(text): current_word += 1 remaining_line = remaining_line[len(text) :].lstrip()", "1)).convert(\"L\") # ).getpixel((0, 0)) # ugly!!! src.paste(blurred, field.coords_padded) for field in fields: #", "= right if self.right is None else max((self.right, right)) self.lower = lower if", "math import itertools from io import BytesIO from typing import Any, Dict, List,", "for idx, translated_line in zip(need_trasnslation.keys(), translated_lines): new_lines[idx] = translated_line return new_lines async def", "get_coords(vertex: _VertexType) -> Tuple[Optional[int], Optional[int]]: return vertex.get(\"x\"), vertex.get(\"y\") cycle = itertools.cycle(vertices) x, y", "json = await r.json() image_url = f\"{PINK_PROXY}/{json['id']}\" async with ctx.session.post( OCR_API_URL, params={ \"key\":", "had no success implementing this # 2) try to keep track of full", "( max((0, self.left - self._padding)), # type: ignore max((0, self.upper - self._padding)), #", "2)) @property def stroke_width(self) -> int: return max((1, round(self.font_size / 12))) @property def", "need_trasnslation = {} paragraph_languages = _language_iterator(block_annotations) for i, line in enumerate(lines): if next(paragraph_languages)", "str) -> Dict[str, Any]: async with ctx.session.post( f\"{PINK_PROXY}\", headers=dict(authorization=PINK_PROXY_TOKEN), json=dict(url=image_url, ttl=3600), ) as", "sorted(languages, key=lambda l: l.get(\"confidence\", 1))[-1][ \"languageCode\" ] for block in blocks: block_language =", "0 if right is None: right = src_size[0] if lower is None: lower", "annotations import os import math import itertools from io import BytesIO from typing", "type: ignore if self.angle in (90, 270): return self.lower - self.upper # type:", "str, src: PIL.Image, padding: int = 3): self.text = full_text self.left: Optional[int] =", "\"nothing to translate on image \" \"(either entire text is in target language", "right)) self.lower = lower if self.lower is None else max((self.lower, lower)) @staticmethod def", "= vertices[3].get(\"x\") upper = vertices[0].get(\"y\") right = vertices[1].get(\"x\") lower = vertices[2].get(\"y\") if left", "- self.left # type: ignore assert False # noqa @property def font_size(self) ->", "lines.copy() for idx, translated_line in zip(need_trasnslation.keys(), translated_lines): new_lines[idx] = translated_line return new_lines async", "or language is undetected)\", formatted=False, ) translated = await translator_cog.translate( \"\\n\".join(need_trasnslation.values()), language )", "language ) translated_lines = translated.split(\"\\n\") if len(translated_lines) != len(need_trasnslation): raise RuntimeError( f\"expected {len(need_trasnslation)}", "in underlying API[{r.status}]: \" f'{json.get(\"message\", \"unknown error\")}' ) json = await r.json() if", "= f\"**{type(self).__name__}**[{self.code}]: {self.message}\" if (hint := self.KNOWN_HINTS.get(self.code)) is not None: base += f\"\\n\\nHint:", "None self.lower: Optional[int] = None self.angle = 0 self._src_width, self._src_height = src.size self._padding", "else min((self.left, left)) self.upper = upper if self.upper is None else min((self.upper, upper))", "bool: return None not in self.coords def __repr__(self) -> str: return f\"<TextField text='{self.text}'", "style coordinates (left, upper, right, lower).\"\"\" # A - 0 # B -", ":= json[\"responses\"])) == 0: return {} maybe_annotations = responses[0] if \"textAnnotations\" not in", "vertices degrees += 90 * i break else: raise AngleUndetectable # # truncate", "not self.initialized: # Get angle from first word self.angle = self._get_angle(vertices) left, upper,", "next_x - x # type: ignore degrees = math.degrees(math.atan2(delta_y, delta_x)) if degrees <", "image \" \"(either entire text is in target language or language is undetected)\",", "pass class TextField: def __init__(self, full_text: str, src: PIL.Image, padding: int = 3):", "0 <= angle <= 90: left = vertices[0].get(\"x\") upper = vertices[1].get(\"y\") right =", "# type: ignore max((0, self.upper - self._padding)), # type: ignore min((self._src_width, self.right +", "(90, 270): return self.lower - self.upper # type: ignore assert False # noqa", "Any, Dict, List, Tuple, Union, Iterator, Optional, Sequence import PIL from PIL import", "\"image\": { \"source\": { \"imageUri\": image_url, } }, } ] }, headers={ \"x-origin\":", "None: continue if detected_break[\"type\"] != \"EOL_SURE_SPACE\": continue yield paragraph_language or block_language async def", "# | | angle = 180 # B---A # # C # /", "field.coords_padded) for field in fields: # TODO: figure out how to fit text", "last digit, OCR often returns 1-2 degree tilted text, ignore this # TEMPORARY:", "import PIL from PIL import ImageDraw, ImageFont, ImageFilter from pink_accents import Accent from", "<= 270: left = vertices[2].get(\"x\") upper = vertices[3].get(\"y\") right = vertices[0].get(\"x\") lower =", "None else max((self.right, right)) self.lower = lower if self.lower is None else max((self.lower,", "newlines, apply accents to each line separately and # replace any newlines with", "# TEMPORARY: truncate angle to 90 degrees return 90 * round(degrees / 90)", "max((0, self.upper - self._padding)), # type: ignore min((self._src_width, self.right + self._padding)), # type:", "text surrounding box dimensions are known, but i had no success implementing this", "return sorted(languages, key=lambda l: l.get(\"confidence\", 1))[-1][ \"languageCode\" ] for block in blocks: block_language", "class TROCRException(Exception): pass class AngleUndetectable(TROCRException): pass class TextField: def __init__(self, full_text: str, src:", "Any) -> Optional[str]: if (properties := data.get(\"property\")) is None: return None if (languages", "return new_lines async def ocr_translate( ctx: Context, image: StaticImage, language: Union[str, Accent] )", "self.right = right if self.right is None else max((self.right, right)) self.lower = lower", "# A---D # # B # / \\ # A C angle =", "= None self.right: Optional[int] = None self.lower: Optional[int] = None self.angle = 0", "ignore ) # TODO: implement w/h detection ASAP, this is temporary # solutions:", "self.angle in (0, 180, 360): return self.right - self.left # type: ignore if", "async with ctx.session.post( OCR_API_URL, params={ \"key\": os.environ[\"OCR_API_TOKEN\"], }, json={ \"requests\": [ { \"features\":", "pass class AngleUndetectable(TROCRException): pass class TextField: def __init__(self, full_text: str, src: PIL.Image, padding:", "improve translation? need_trasnslation = {} paragraph_languages = _language_iterator(block_annotations) for i, line in enumerate(lines):", "\"\" current_word = 0 fields = [] for original_line, line in zip(lines, new_lines):", "is a trailing newline. # Coordinates from words in the same line can", "= code self.message = message super().__init__(str(self)) @classmethod def from_response(cls, response: Dict[str, Any]) ->", "/ 90) @property def coords(self) -> Tuple[int, int, int, int]: return (self.left, self.upper,", "in fields: cropped = src.crop(field.coords_padded) # NOTE: next line causes segfaults if coords", "\"key\": os.environ[\"OCR_API_TOKEN\"], }, json={ \"requests\": [ { \"features\": [{\"type\": \"TEXT_DETECTION\"}], \"image\": { \"source\":", "paragraph_languages = _language_iterator(block_annotations) for i, line in enumerate(lines): if next(paragraph_languages) is not None:", "this is temporary # solutions: # 1) https://stackoverflow.com/a/9972699 # text surrounding box dimensions", "full coords and just calculate distance # a lot of coordinates might be", "Any]: async with ctx.session.post( f\"{PINK_PROXY}\", headers=dict(authorization=PINK_PROXY_TOKEN), json=dict(url=image_url, ttl=3600), ) as r: if r.status", "this # 2) try to keep track of full coords and just calculate", "int]) -> None: if not self.initialized: # Get angle from first word self.angle", "PINKError(\"could not translate anything on image\", formatted=False) result = await ctx.bot.loop.run_in_executor(None, _draw_trocr, src,", "vertices[1].get(\"x\") lower = vertices[2].get(\"y\") if left is None: left = 0 if upper", "A C angle = 45 # \\ / # D if 0 <=", "itertools.cycle(vertices) x, y = get_coords(next(cycle)) for i in range(4): next_x, next_y = get_coords(next(cycle))", "- x # type: ignore degrees = math.degrees(math.atan2(delta_y, delta_x)) if degrees < 0:", "replace any newlines with spaces to make sure text order is preserved accent_cog.apply_accents_to_text(line,", "\\ # B D angle = 135 # \\ / # A #", "@property def height(self) -> int: if self.angle in (0, 180, 360): return self.lower", "/ # D if 0 <= angle <= 90: left = vertices[0].get(\"x\") upper", "min((text_im.width, field.width)), min((text_im.height, field.height)), ), ).rotate(field.angle, expand=True, resample=PIL.Image.BICUBIC), field.coords_padded[:2], ) result = BytesIO()", "# Coordinates from words in the same line can be merged lines =", "self._src_width, self._src_height = src.size self._padding = padding def add_word(self, vertices: _VerticesType, src_size: Tuple[int,", "trailing newline. # Coordinates from words in the same line can be merged", "# # B---C # | | angle = 90 # A---D # #", "fields: # TODO: figure out how to fit text into boxes with Pillow", "# | | angle = 90 # A---D # # B # /", "def _vertices_to_coords( vertices: _VerticesType, src_size: Tuple[int, int], angle: int ) -> Tuple[int, int,", "_draw_trocr, src, fields) stats = f\"Words: {current_word}\\nLines: {len(fields)}\" if notes: stats += f\"\\nNotes:", "lower) @staticmethod def _get_angle(vertices: _VerticesType) -> int: def get_coords(vertex: _VertexType) -> Tuple[Optional[int], Optional[int]]:", "| | angle = 180 # B---A # # C # / \\", "= cropped.filter(ImageFilter.GaussianBlur(10)) # Does not work anymore for some reason, black stroke is", "] for block in blocks: block_language = extract_language(block) for paragraph in block[\"paragraphs\"]: paragraph_language", "`{word}` is undetectable\\n\" else: break if field.initialized: if line.casefold() != original_line.casefold(): fields.append(field) if", "coordinate can be missing if None in (x, y, next_x, next_y): x, y", "self.lower) # type: ignore @property def coords_padded(self) -> Tuple[int, int, int, int]: return", "Optional[int], message: str): self.code = code self.message = message super().__init__(str(self)) @classmethod def from_response(cls,", "dependency by not reading it in constants.py # since it is not used", "{len(need_trasnslation)} translated lines, got {len(translated_lines)}\" ) new_lines = lines.copy() for idx, translated_line in", "add_word(self, vertices: _VerticesType, src_size: Tuple[int, int]) -> None: if not self.initialized: # Get", "# # A # / \\ # D B angle = 315 #", "in zip(need_trasnslation.keys(), translated_lines): new_lines[idx] = translated_line return new_lines async def ocr_translate( ctx: Context,", "src.size) except AngleUndetectable: notes += f\"angle for `{word}` is undetectable\\n\" else: break if", "90 < angle <= 180: left = vertices[1].get(\"x\") upper = vertices[2].get(\"y\") right =", "1 # C - 2 # D - 3 # # A----B #", "indicates line break matching simple # annotations for word in paragraph[\"words\"]: last_symbol =", "self.left = left if self.left is None else min((self.left, left)) self.upper = upper", "None self.right: Optional[int] = None self.lower: Optional[int] = None self.angle = 0 self._src_width,", "is None: left = 0 if upper is None: upper = 0 if", "fields = [] for original_line, line in zip(lines, new_lines): field = TextField(line, src)", "happened with underlying API[{r.status}]: {reason}\" ) json = await r.json() raise PINKError( f\"Error", "Tuple[int, int, int, int]: return (self.left, self.upper, self.right, self.lower) # type: ignore @property", "known, but i had no success implementing this # 2) try to keep", "C # # D----A # | | angle = 270 # C----B #", "self.lower is None else max((self.lower, lower)) @staticmethod def _vertices_to_coords( vertices: _VerticesType, src_size: Tuple[int,", "full annotations. \"EOL_SURE_SPACE\" indicates line break matching simple # annotations for word in", "iterator instead of this for word in word_annotations[current_word:]: text = word[\"description\"] if remaining_line.startswith(text):", "-> bool: return None not in self.coords def __repr__(self) -> str: return f\"<TextField", "# # truncate last digit, OCR often returns 1-2 degree tilted text, ignore", "self._padding)), # type: ignore min((self._src_height, self.lower + self._padding)), # type: ignore ) #", "-> int: return max((1, round(self.font_size / 12))) @property def initialized(self) -> bool: return", "line grouping differs between simple annotations and paragraph grouping in # full annotations.", "return vertex.get(\"x\"), vertex.get(\"y\") cycle = itertools.cycle(vertices) x, y = get_coords(next(cycle)) for i in", "for paragraph in block[\"paragraphs\"]: paragraph_language = extract_language(paragraph) yield paragraph_language or block_language # line", "Sequence import PIL from PIL import ImageDraw, ImageFont, ImageFilter from pink_accents import Accent", "\"https://explorer.apis.google.com\", \"x-referer\": \"https://explorer.apis.google.com\", }, ) as r: if r.status != 200: if r.content_type.lower()", "some reason, black stroke is good anyway # field.inverted_avg_color = ImageOps.invert( # blurred.resize((1,", "await translator_cog.translate( \"\\n\".join(need_trasnslation.values()), language ) translated_lines = translated.split(\"\\n\") if len(translated_lines) != len(need_trasnslation): raise", "for i in range(4): next_x, next_y = get_coords(next(cycle)) # Any vertex coordinate can", "text into boxes with Pillow without creating # extra images font = FONT.font_variant(size=field.font_size)", "continue if (detected_break := symbol_properties.get(\"detectedBreak\")) is None: continue if detected_break[\"type\"] != \"EOL_SURE_SPACE\": continue", "maybe_annotations = responses[0] if \"textAnnotations\" not in maybe_annotations: if \"error\" in maybe_annotations: raise", "= src.crop(field.coords_padded) # NOTE: next line causes segfaults if coords are wrong, debug", "not in self.coords def __repr__(self) -> str: return f\"<TextField text='{self.text}' coords={self.coords} angle={self.angle}>\" def", "# D - 3 # # A----B # | | angle = 360/0", "raise PINKError( f\"Something really bad happened with underlying API[{r.status}]: {reason}\" ) json =", "compensate missing vertices degrees += 90 * i break else: raise AngleUndetectable #", "TROCRException(Exception): pass class AngleUndetectable(TROCRException): pass class TextField: def __init__(self, full_text: str, src: PIL.Image,", "self.lower: Optional[int] = None self.angle = 0 self._src_width, self._src_height = src.size self._padding =", "separately and # replace any newlines with spaces to make sure text order", "not see previous statement delta_y = y - next_y # type: ignore delta_x", "int, int]: return (self.left, self.upper, self.right, self.lower) # type: ignore @property def coords_padded(self)", "3 # # A----B # | | angle = 360/0 # D----C #", "import PINKError from .types import StaticImage _VertexType = Dict[str, int] _VerticesType = Tuple[_VertexType,", "45 # \\ / # D if 0 <= angle <= 90: left", "-> Iterator[Optional[str]]: \"\"\"Extracts language for each paragraph in Google OCR output\"\"\" def extract_language(data:", "270: left = vertices[2].get(\"x\") upper = vertices[3].get(\"y\") right = vertices[0].get(\"x\") lower = vertices[1].get(\"y\")", "int]: return ( max((0, self.left - self._padding)), # type: ignore max((0, self.upper -", "= await ctx.bot.loop.run_in_executor(None, _draw_trocr, src, fields) stats = f\"Words: {current_word}\\nLines: {len(fields)}\" if notes:", "worked @property def width(self) -> int: if self.angle in (0, 180, 360): return", "formatted=False, ) translated = await translator_cog.translate( \"\\n\".join(need_trasnslation.values()), language ) translated_lines = translated.split(\"\\n\") if", "else: new_lines = await _apply_translation(ctx, lines, language, block_annotations) # error reporting notes =", "+ self._padding)), # type: ignore min((self._src_height, self.lower + self._padding)), # type: ignore )", "if (languages := properties.get(\"detectedLanguages\")) is None: return None return sorted(languages, key=lambda l: l.get(\"confidence\",", "\"application/json\": reason = await r.text() if reason.count(\"\\n\") > 1: # we got some", "\\ / # C # # D----A # | | angle = 270", "translated = await translator_cog.translate( \"\\n\".join(need_trasnslation.values()), language ) translated_lines = translated.split(\"\\n\") if len(translated_lines) !=", "from __future__ import annotations import os import math import itertools from io import", "result = await ctx.bot.loop.run_in_executor(None, _draw_trocr, src, fields) stats = f\"Words: {current_word}\\nLines: {len(fields)}\" if", "= vertices[1].get(\"y\") elif 270 < angle <= 360: left = vertices[3].get(\"x\") upper =", "import Any, Dict, List, Tuple, Union, Iterator, Optional, Sequence import PIL from PIL", "in (0, 180, 360): return self.lower - self.upper # type: ignore if self.angle", "self.lower - self.upper # type: ignore if self.angle in (90, 270): return self.right", "upper)) self.right = right if self.right is None else max((self.right, right)) self.lower =", "grouping differs between simple annotations and paragraph grouping in # full annotations. \"EOL_SURE_SPACE\"", "returns 1-2 degree tilted text, ignore this # TEMPORARY: truncate angle to 90", "in blocks: block_language = extract_language(block) for paragraph in block[\"paragraphs\"]: paragraph_language = extract_language(paragraph) yield", "= 270 # C----B # # D # / \\ # C A", "# extra images font = FONT.font_variant(size=field.font_size) text_im = PIL.Image.new( \"RGBA\", size=font.getsize(field.text, stroke_width=field.stroke_width), )", "language: Union[str, Accent] ) -> Tuple[BytesIO, str]: src = await image.to_pil_image(ctx) annotations =", "# algo: https://stackoverflow.com/a/27481611 # mypy literally does not see previous statement delta_y =", "current_word += 1 remaining_line = remaining_line[len(text) :].lstrip() # TODO: merge multiple lines into", "{r.status}\\n\" f\"Will try raw URL but it will most likely fail\" ) else:", "int] _VerticesType = Tuple[_VertexType, _VertexType, _VertexType, _VertexType] OCR_API_URL = \"https://content-vision.googleapis.com/v1/images:annotate\" # avoid making", "solution is more reliable if it worked @property def width(self) -> int: if", "class TextField: def __init__(self, full_text: str, src: PIL.Image, padding: int = 3): self.text", "for each word separately, but they can be joined # by checking full", "None else max((self.lower, lower)) @staticmethod def _vertices_to_coords( vertices: _VerticesType, src_size: Tuple[int, int], angle:", "TextField(line, src) remaining_line = original_line # TODO: sane iterator instead of this for", "is None: return None if (languages := properties.get(\"detectedLanguages\")) is None: return None return", "often returns 1-2 degree tilted text, ignore this # TEMPORARY: truncate angle to", "import itertools from io import BytesIO from typing import Any, Dict, List, Tuple,", "OCR_API_URL = \"https://content-vision.googleapis.com/v1/images:annotate\" # avoid making this a hard dependency by not reading", "(0, 180, 360): return self.lower - self.upper # type: ignore if self.angle in", "good anyway # field.inverted_avg_color = ImageOps.invert( # blurred.resize((1, 1)).convert(\"L\") # ).getpixel((0, 0)) #", "int, int, int]: return (self.left, self.upper, self.right, self.lower) # type: ignore @property def", "last_symbol = word[\"symbols\"][-1] if (symbol_properties := last_symbol.get(\"property\")) is None: continue if (detected_break :=", "B - 1 # C - 2 # D - 3 # #", "# TODO: merge multiple lines into box try: field.add_word(word[\"boundingPoly\"][\"vertices\"], src.size) except AngleUndetectable: notes", "/ \\ # C A angle = 225 # \\ / # B", "if self.angle in (90, 270): return self.lower - self.upper # type: ignore assert", "else min((self.upper, upper)) self.right = right if self.right is None else max((self.right, right))", "it will most likely fail\" ) else: json = await r.json() image_url =", ") result = BytesIO() src.save(result, format=\"PNG\") return BytesIO(result.getvalue()) def _apply_accents(ctx: Context, lines: List[str],", "is None: return None return sorted(languages, key=lambda l: l.get(\"confidence\", 1))[-1][ \"languageCode\" ] for", "f\"angle for `{word}` is undetectable\\n\" else: break if field.initialized: if line.casefold() != original_line.casefold():", "text detected\", formatted=False) return maybe_annotations def _draw_trocr(src: PIL.Image, fields: Sequence[TextField]) -> BytesIO: FIELD_CAP", "Google cannot access image URL. Try using a different one.\", } def __init__(self,", "= error.get(\"code\") message = error.get(\"message\", \"unknown\") return cls(code, message) def __str__(self) -> str:", "expand=True, resample=PIL.Image.BICUBIC), field.coords_padded[:2], ) result = BytesIO() src.save(result, format=\"PNG\") return BytesIO(result.getvalue()) def _apply_accents(ctx:", "annotations for word in paragraph[\"words\"]: last_symbol = word[\"symbols\"][-1] if (symbol_properties := last_symbol.get(\"property\")) is", "= None self.lower: Optional[int] = None self.angle = 0 self._src_width, self._src_height = src.size", "it worked @property def width(self) -> int: if self.angle in (0, 180, 360):", "proxy: {r.status}\\n\" f\"Will try raw URL but it will most likely fail\" )", "response: Dict[str, Any]) -> GoogleOCRError: error = response.get(\"error\", {}) code = error.get(\"code\") message", "translated_lines): new_lines[idx] = translated_line return new_lines async def ocr_translate( ctx: Context, image: StaticImage,", "# / \\ # D B angle = 315 # \\ / #", "is None: right = src_size[0] if lower is None: lower = src_size[1] return", "API[{r.status}]: {reason}\" ) json = await r.json() raise PINKError( f\"Error in underlying API[{r.status}]:", "= word[\"description\"] if remaining_line.startswith(text): current_word += 1 remaining_line = remaining_line[len(text) :].lstrip() # TODO:", "original_line, line in zip(lines, new_lines): field = TextField(line, src) remaining_line = original_line #", "TODO: implement w/h detection ASAP, this is temporary # solutions: # 1) https://stackoverflow.com/a/9972699", "if reason.count(\"\\n\") > 1: # we got some garbage HTML response reason =", "PIL import ImageDraw, ImageFont, ImageFilter from pink_accents import Accent from pink.context import Context", "words are combined into # lines, lines are separated by newlines, there is", "code self.message = message super().__init__(str(self)) @classmethod def from_response(cls, response: Dict[str, Any]) -> GoogleOCRError:", "enumerate(lines): if next(paragraph_languages) is not None: need_trasnslation[i] = line if not need_trasnslation: raise", "coords(self) -> Tuple[int, int, int, int]: return (self.left, self.upper, self.right, self.lower) # type:", "Google OCR API returns entry for each word separately, but they can be", "-> Tuple[int, int, int, int]: return ( max((0, self.left - self._padding)), # type:", "[] for original_line, line in zip(lines, new_lines): field = TextField(line, src) remaining_line =", "StaticImage, language: Union[str, Accent] ) -> Tuple[BytesIO, str]: src = await image.to_pil_image(ctx) annotations", "raise PINKError( \"nothing to translate on image \" \"(either entire text is in", "= vertices[2].get(\"y\") if left is None: left = 0 if upper is None:", "TODO: sane iterator instead of this for word in word_annotations[current_word:]: text = word[\"description\"]", "await ctx.reply( f\"Unable to reach proxy: {r.status}\\n\" f\"Will try raw URL but it", "try to keep track of full coords and just calculate distance # a", "12))) @property def initialized(self) -> bool: return None not in self.coords def __repr__(self)", "f\"{PINK_PROXY}\", headers=dict(authorization=PINK_PROXY_TOKEN), json=dict(url=image_url, ttl=3600), ) as r: if r.status != 200: await ctx.reply(", "None: upper = 0 if right is None: right = src_size[0] if lower", "a trailing newline. # Coordinates from words in the same line can be", "itertools from io import BytesIO from typing import Any, Dict, List, Tuple, Union,", "field.initialized: if line.casefold() != original_line.casefold(): fields.append(field) if not fields: raise PINKError(\"could not translate", "on fire, something really bad happened. I have no idea.\", 14: \"This means", "upper, right, lower = self._vertices_to_coords( vertices, src_size, self.angle ) self.left = left if", "any newlines with spaces to make sure text order is preserved accent_cog.apply_accents_to_text(line, [accent]).replace(\"\\n\",", "(hint := self.KNOWN_HINTS.get(self.code)) is not None: base += f\"\\n\\nHint: {hint}\" return base class", "left = vertices[3].get(\"x\") upper = vertices[0].get(\"y\") right = vertices[1].get(\"x\") lower = vertices[2].get(\"y\") if", ") translated_lines = translated.split(\"\\n\") if len(translated_lines) != len(need_trasnslation): raise RuntimeError( f\"expected {len(need_trasnslation)} translated", "f\"Will try raw URL but it will most likely fail\" ) else: json", "ctx: Context, image: StaticImage, language: Union[str, Accent] ) -> Tuple[BytesIO, str]: src =", "if next(paragraph_languages) is not None: need_trasnslation[i] = line if not need_trasnslation: raise PINKError(", "= await ocr(ctx, image.url) word_annotations = annotations[\"textAnnotations\"][1:] block_annotations = annotations[\"fullTextAnnotation\"][\"pages\"][0][\"blocks\"] # Google OCR", "# 2) try to keep track of full coords and just calculate distance", "_VerticesType, src_size: Tuple[int, int]) -> None: if not self.initialized: # Get angle from", "each word separately, but they can be joined # by checking full image", "\" \") for line in lines ] async def _apply_translation( ctx: Context, lines:", "# D----C # # A # / \\ # D B angle =", "# # B # / \\ # A C angle = 45 #", "cropped.filter(ImageFilter.GaussianBlur(10)) # Does not work anymore for some reason, black stroke is good", "self.coords def __repr__(self) -> str: return f\"<TextField text='{self.text}' coords={self.coords} angle={self.angle}>\" def _language_iterator(blocks: Sequence[Any])", "resample=PIL.Image.BICUBIC), field.coords_padded[:2], ) result = BytesIO() src.save(result, format=\"PNG\") return BytesIO(result.getvalue()) def _apply_accents(ctx: Context,", "line can be merged lines = annotations[\"fullTextAnnotation\"][\"text\"][:-1].split(\"\\n\") if isinstance(language, Accent): new_lines = _apply_accents(ctx,", "base = f\"**{type(self).__name__}**[{self.code}]: {self.message}\" if (hint := self.KNOWN_HINTS.get(self.code)) is not None: base +=", "io import BytesIO from typing import Any, Dict, List, Tuple, Union, Iterator, Optional,", "Iterator, Optional, Sequence import PIL from PIL import ImageDraw, ImageFont, ImageFilter from pink_accents", "idx, translated_line in zip(need_trasnslation.keys(), translated_lines): new_lines[idx] = translated_line return new_lines async def ocr_translate(", "<= 180: left = vertices[1].get(\"x\") upper = vertices[2].get(\"y\") right = vertices[3].get(\"x\") lower =", "missing, 1st solution is more reliable if it worked @property def width(self) ->", "i in range(4): next_x, next_y = get_coords(next(cycle)) # Any vertex coordinate can be", "ImageFilter from pink_accents import Accent from pink.context import Context from pink.cogs.utils.errorhandler import PINKError", "__init__(self, code: Optional[int], message: str): self.code = code self.message = message super().__init__(str(self)) @classmethod", "200: if r.content_type.lower() != \"application/json\": reason = await r.text() if reason.count(\"\\n\") > 1:", "f\"<TextField text='{self.text}' coords={self.coords} angle={self.angle}>\" def _language_iterator(blocks: Sequence[Any]) -> Iterator[Optional[str]]: \"\"\"Extracts language for each", "A # / \\ # D B angle = 315 # \\ /", "= 3): self.text = full_text self.left: Optional[int] = None self.upper: Optional[int] = None", "src, fields) stats = f\"Words: {current_word}\\nLines: {len(fields)}\" if notes: stats += f\"\\nNotes: {notes}\"", "anyway # field.inverted_avg_color = ImageOps.invert( # blurred.resize((1, 1)).convert(\"L\") # ).getpixel((0, 0)) # ugly!!!", "= vertices[1].get(\"x\") lower = vertices[2].get(\"y\") if left is None: left = 0 if", "angle <= 360: left = vertices[3].get(\"x\") upper = vertices[0].get(\"y\") right = vertices[1].get(\"x\") lower", "spacing=0, stroke_width=field.stroke_width, stroke_fill=(0, 0, 0), ) src.alpha_composite( text_im.resize( ( min((text_im.width, field.width)), min((text_im.height, field.height)),", "stroke is good anyway # field.inverted_avg_color = ImageOps.invert( # blurred.resize((1, 1)).convert(\"L\") # ).getpixel((0,", "{}) code = error.get(\"code\") message = error.get(\"message\", \"unknown\") return cls(code, message) def __str__(self)", "= annotations[\"fullTextAnnotation\"][\"pages\"][0][\"blocks\"] # Google OCR API returns entry for each word separately, but", "= Dict[str, int] _VerticesType = Tuple[_VertexType, _VertexType, _VertexType, _VertexType] OCR_API_URL = \"https://content-vision.googleapis.com/v1/images:annotate\" #", "\"unknown error\")}' ) json = await r.json() if len((responses := json[\"responses\"])) == 0:", "self._padding)), # type: ignore max((0, self.upper - self._padding)), # type: ignore min((self._src_width, self.right", "min((self.left, left)) self.upper = upper if self.upper is None else min((self.upper, upper)) self.right", "ocr(ctx, image.url) word_annotations = annotations[\"textAnnotations\"][1:] block_annotations = annotations[\"fullTextAnnotation\"][\"pages\"][0][\"blocks\"] # Google OCR API returns", "as r: if r.status != 200: await ctx.reply( f\"Unable to reach proxy: {r.status}\\n\"", "{len(translated_lines)}\" ) new_lines = lines.copy() for idx, translated_line in zip(need_trasnslation.keys(), translated_lines): new_lines[idx] =", "= TextField(line, src) remaining_line = original_line # TODO: sane iterator instead of this", "angle <= 270: left = vertices[2].get(\"x\") upper = vertices[3].get(\"y\") right = vertices[0].get(\"x\") lower", "word[\"symbols\"][-1] if (symbol_properties := last_symbol.get(\"property\")) is None: continue if (detected_break := symbol_properties.get(\"detectedBreak\")) is", "in zip(lines, new_lines): field = TextField(line, src) remaining_line = original_line # TODO: sane", "# type: ignore min((self._src_width, self.right + self._padding)), # type: ignore min((self._src_height, self.lower +", "headers=dict(authorization=PINK_PROXY_TOKEN), json=dict(url=image_url, ttl=3600), ) as r: if r.status != 200: await ctx.reply( f\"Unable", "lower = src_size[1] return (left, upper, right, lower) @staticmethod def _get_angle(vertices: _VerticesType) ->", "{} paragraph_languages = _language_iterator(block_annotations) for i, line in enumerate(lines): if next(paragraph_languages) is not", "# type: ignore @property def coords_padded(self) -> Tuple[int, int, int, int]: return (", "self._padding)), # type: ignore ) # TODO: implement w/h detection ASAP, this is", "> 1: # we got some garbage HTML response reason = \"unknown error\"", "self.upper # type: ignore if self.angle in (90, 270): return self.right - self.left", ").rotate(field.angle, expand=True, resample=PIL.Image.BICUBIC), field.coords_padded[:2], ) result = BytesIO() src.save(result, format=\"PNG\") return BytesIO(result.getvalue()) def", "message) def __str__(self) -> str: base = f\"**{type(self).__name__}**[{self.code}]: {self.message}\" if (hint := self.KNOWN_HINTS.get(self.code))", "newline. # Coordinates from words in the same line can be merged lines", "+= 1 remaining_line = remaining_line[len(text) :].lstrip() # TODO: merge multiple lines into box", "with ctx.session.post( OCR_API_URL, params={ \"key\": os.environ[\"OCR_API_TOKEN\"], }, json={ \"requests\": [ { \"features\": [{\"type\":", "= await r.json() if len((responses := json[\"responses\"])) == 0: return {} maybe_annotations =", "is None else min((self.upper, upper)) self.right = right if self.right is None else", "ignore if self.angle in (90, 270): return self.right - self.left # type: ignore", "# D if 0 <= angle <= 90: left = vertices[0].get(\"x\") upper =", "async def _apply_translation( ctx: Context, lines: List[str], language: str, block_annotations: Any, ) ->", "sane iterator instead of this for word in word_annotations[current_word:]: text = word[\"description\"] if", "between simple annotations and paragraph grouping in # full annotations. \"EOL_SURE_SPACE\" indicates line", "# error reporting notes = \"\" current_word = 0 fields = [] for", "# B # / \\ # A C angle = 45 # \\", "Accent) -> List[str]: if (accent_cog := ctx.bot.get_cog(\"Accents\")) is None: raise RuntimeError(\"No accents cog", "# solutions: # 1) https://stackoverflow.com/a/9972699 # text surrounding box dimensions are known, but", "= vertices[0].get(\"x\") lower = vertices[1].get(\"y\") elif 270 < angle <= 360: left =", "in maybe_annotations: raise GoogleOCRError.from_response(maybe_annotations) else: raise PINKError(\"no text detected\", formatted=False) return maybe_annotations def", "not reading it in constants.py # since it is not used anywhere else", "Optional[int] = None self.lower: Optional[int] = None self.angle = 0 self._src_width, self._src_height =", "lines: List[str], language: str, block_annotations: Any, ) -> List[str]: if (translator_cog := ctx.bot.get_cog(\"Translator\"))", "font=font, spacing=0, stroke_width=field.stroke_width, stroke_fill=(0, 0, 0), ) src.alpha_composite( text_im.resize( ( min((text_im.width, field.width)), min((text_im.height,", "_get_angle(vertices: _VerticesType) -> int: def get_coords(vertex: _VertexType) -> Tuple[Optional[int], Optional[int]]: return vertex.get(\"x\"), vertex.get(\"y\")", "/ \\ # A C angle = 45 # \\ / # D", "# avoid making this a hard dependency by not reading it in constants.py", "field in fields: cropped = src.crop(field.coords_padded) # NOTE: next line causes segfaults if", "225 # \\ / # B # # C---D # | | angle", "# / \\ # C A angle = 225 # \\ / #", "fail\" ) else: json = await r.json() image_url = f\"{PINK_PROXY}/{json['id']}\" async with ctx.session.post(", "str: return f\"<TextField text='{self.text}' coords={self.coords} angle={self.angle}>\" def _language_iterator(blocks: Sequence[Any]) -> Iterator[Optional[str]]: \"\"\"Extracts language", "API[{r.status}]: \" f'{json.get(\"message\", \"unknown error\")}' ) json = await r.json() if len((responses :=", "translation? need_trasnslation = {} paragraph_languages = _language_iterator(block_annotations) for i, line in enumerate(lines): if", "r.text() if reason.count(\"\\n\") > 1: # we got some garbage HTML response reason", "(0, 0), text=field.text, font=font, spacing=0, stroke_width=field.stroke_width, stroke_fill=(0, 0, 0), ) src.alpha_composite( text_im.resize( (", "if self.lower is None else max((self.lower, lower)) @staticmethod def _vertices_to_coords( vertices: _VerticesType, src_size:", "Any]) -> GoogleOCRError: error = response.get(\"error\", {}) code = error.get(\"code\") message = error.get(\"message\",", "len((responses := json[\"responses\"])) == 0: return {} maybe_annotations = responses[0] if \"textAnnotations\" not", "angle = 45 # \\ / # D if 0 <= angle <=", "Sequence[Any]) -> Iterator[Optional[str]]: \"\"\"Extracts language for each paragraph in Google OCR output\"\"\" def", "Tuple[BytesIO, str]: src = await image.to_pil_image(ctx) annotations = await ocr(ctx, image.url) word_annotations =", "from pink_accents import Accent from pink.context import Context from pink.cogs.utils.errorhandler import PINKError from", "are separated by newlines, there is a trailing newline. # Coordinates from words", "/ # B # # C---D # | | angle = 180 #", "field in fields: # TODO: figure out how to fit text into boxes", "= full_text self.left: Optional[int] = None self.upper: Optional[int] = None self.right: Optional[int] =", "# C---D # | | angle = 180 # B---A # # C", "import Context from pink.cogs.utils.errorhandler import PINKError from .types import StaticImage _VertexType = Dict[str,", "PINKError(\"no text detected\", formatted=False) return maybe_annotations def _draw_trocr(src: PIL.Image, fields: Sequence[TextField]) -> BytesIO:", "in the same line can be merged lines = annotations[\"fullTextAnnotation\"][\"text\"][:-1].split(\"\\n\") if isinstance(language, Accent):", "Optional[str]: if (properties := data.get(\"property\")) is None: return None if (languages := properties.get(\"detectedLanguages\"))", "[{\"type\": \"TEXT_DETECTION\"}], \"image\": { \"source\": { \"imageUri\": image_url, } }, } ] },", "src: PIL.Image, padding: int = 3): self.text = full_text self.left: Optional[int] = None", "BytesIO(result.getvalue()) def _apply_accents(ctx: Context, lines: List[str], accent: Accent) -> List[str]: if (accent_cog :=", "URL but it will most likely fail\" ) else: json = await r.json()", "i break else: raise AngleUndetectable # # truncate last digit, OCR often returns", "image_url, } }, } ] }, headers={ \"x-origin\": \"https://explorer.apis.google.com\", \"x-referer\": \"https://explorer.apis.google.com\", }, )", "image_url: str) -> Dict[str, Any]: async with ctx.session.post( f\"{PINK_PROXY}\", headers=dict(authorization=PINK_PROXY_TOKEN), json=dict(url=image_url, ttl=3600), )", "maybe_annotations: raise GoogleOCRError.from_response(maybe_annotations) else: raise PINKError(\"no text detected\", formatted=False) return maybe_annotations def _draw_trocr(src:", "distance # a lot of coordinates might be missing, 1st solution is more", "# we got some garbage HTML response reason = \"unknown error\" raise PINKError(", "not work anymore for some reason, black stroke is good anyway # field.inverted_avg_color", "this # TEMPORARY: truncate angle to 90 degrees return 90 * round(degrees /", "return max((1, round(self.font_size / 12))) @property def initialized(self) -> bool: return None not", "json=dict(url=image_url, ttl=3600), ) as r: if r.status != 200: await ctx.reply( f\"Unable to", "Context, lines: List[str], language: str, block_annotations: Any, ) -> List[str]: if (translator_cog :=", "= vertices[0].get(\"y\") right = vertices[1].get(\"x\") lower = vertices[2].get(\"y\") if left is None: left", "| angle = 180 # B---A # # C # / \\ #", "need_trasnslation: raise PINKError( \"nothing to translate on image \" \"(either entire text is", "270): return self.lower - self.upper # type: ignore assert False # noqa @property", "lines = annotations[\"fullTextAnnotation\"][\"text\"][:-1].split(\"\\n\") if isinstance(language, Accent): new_lines = _apply_accents(ctx, lines, language) else: new_lines", "by newlines, there is a trailing newline. # Coordinates from words in the", "as r: if r.status != 200: if r.content_type.lower() != \"application/json\": reason = await", "= vertices[0].get(\"x\") upper = vertices[1].get(\"y\") right = vertices[2].get(\"x\") lower = vertices[3].get(\"y\") elif 90", "# B # # C---D # | | angle = 180 # B---A", "out how to fit text into boxes with Pillow without creating # extra", "}, } ] }, headers={ \"x-origin\": \"https://explorer.apis.google.com\", \"x-referer\": \"https://explorer.apis.google.com\", }, ) as r:", "block[\"paragraphs\"]: paragraph_language = extract_language(paragraph) yield paragraph_language or block_language # line grouping differs between", "async def ocr(ctx: Context, image_url: str) -> Dict[str, Any]: async with ctx.session.post( f\"{PINK_PROXY}\",", "-> Dict[str, Any]: async with ctx.session.post( f\"{PINK_PROXY}\", headers=dict(authorization=PINK_PROXY_TOKEN), json=dict(url=image_url, ttl=3600), ) as r:", "1))[-1][ \"languageCode\" ] for block in blocks: block_language = extract_language(block) for paragraph in", "implement w/h detection ASAP, this is temporary # solutions: # 1) https://stackoverflow.com/a/9972699 #", "return None return sorted(languages, key=lambda l: l.get(\"confidence\", 1))[-1][ \"languageCode\" ] for block in", "return base class TROCRException(Exception): pass class AngleUndetectable(TROCRException): pass class TextField: def __init__(self, full_text:", "or block_language async def ocr(ctx: Context, image_url: str) -> Dict[str, Any]: async with", "next_y # type: ignore delta_x = next_x - x # type: ignore degrees", "description. In description words are combined into # lines, lines are separated by", "= 0 self._src_width, self._src_height = src.size self._padding = padding def add_word(self, vertices: _VerticesType,", "underlying API[{r.status}]: {reason}\" ) json = await r.json() raise PINKError( f\"Error in underlying", "_VertexType = Dict[str, int] _VerticesType = Tuple[_VertexType, _VertexType, _VertexType, _VertexType] OCR_API_URL = \"https://content-vision.googleapis.com/v1/images:annotate\"", "# C # # D----A # | | angle = 270 # C----B", "\"The world is on fire, something really bad happened. I have no idea.\",", "into box try: field.add_word(word[\"boundingPoly\"][\"vertices\"], src.size) except AngleUndetectable: notes += f\"angle for `{word}` is", "paragraph[\"words\"]: last_symbol = word[\"symbols\"][-1] if (symbol_properties := last_symbol.get(\"property\")) is None: continue if (detected_break", "spaces to make sure text order is preserved accent_cog.apply_accents_to_text(line, [accent]).replace(\"\\n\", \" \") for", "new_lines async def ocr_translate( ctx: Context, image: StaticImage, language: Union[str, Accent] ) ->", "return 90 * round(degrees / 90) @property def coords(self) -> Tuple[int, int, int,", "no idea.\", 14: \"This means Google cannot access image URL. Try using a", "= vertices[1].get(\"x\") upper = vertices[2].get(\"y\") right = vertices[3].get(\"x\") lower = vertices[0].get(\"y\") elif 180", "debug from here blurred = cropped.filter(ImageFilter.GaussianBlur(10)) # Does not work anymore for some", "Union, Iterator, Optional, Sequence import PIL from PIL import ImageDraw, ImageFont, ImageFilter from", "= next_x - x # type: ignore degrees = math.degrees(math.atan2(delta_y, delta_x)) if degrees", "-> GoogleOCRError: error = response.get(\"error\", {}) code = error.get(\"code\") message = error.get(\"message\", \"unknown\")", "base += f\"\\n\\nHint: {hint}\" return base class TROCRException(Exception): pass class AngleUndetectable(TROCRException): pass class", "else: break if field.initialized: if line.casefold() != original_line.casefold(): fields.append(field) if not fields: raise", "(properties := data.get(\"property\")) is None: return None if (languages := properties.get(\"detectedLanguages\")) is None:", "Tuple[int, int, int, int]: return ( max((0, self.left - self._padding)), # type: ignore", "self._src_height = src.size self._padding = padding def add_word(self, vertices: _VerticesType, src_size: Tuple[int, int])", "ignore min((self._src_height, self.lower + self._padding)), # type: ignore ) # TODO: implement w/h", "type: ignore assert False # noqa @property def height(self) -> int: if self.angle", "AngleUndetectable: notes += f\"angle for `{word}` is undetectable\\n\" else: break if field.initialized: if", "# Google OCR API returns entry for each word separately, but they can", "new_lines = lines.copy() for idx, translated_line in zip(need_trasnslation.keys(), translated_lines): new_lines[idx] = translated_line return", "vertices, src_size, self.angle ) self.left = left if self.left is None else min((self.left,", "line in zip(lines, new_lines): field = TextField(line, src) remaining_line = original_line # TODO:", "FONT = ImageFont.truetype(\"DejaVuSans.ttf\") class GoogleOCRError(PINKError): KNOWN_HINTS = { None: \"The world is on", "str: base = f\"**{type(self).__name__}**[{self.code}]: {self.message}\" if (hint := self.KNOWN_HINTS.get(self.code)) is not None: base", "in # full annotations. \"EOL_SURE_SPACE\" indicates line break matching simple # annotations for", "json = await r.json() raise PINKError( f\"Error in underlying API[{r.status}]: \" f'{json.get(\"message\", \"unknown", "None return sorted(languages, key=lambda l: l.get(\"confidence\", 1))[-1][ \"languageCode\" ] for block in blocks:", "def initialized(self) -> bool: return None not in self.coords def __repr__(self) -> str:", "self.left: Optional[int] = None self.upper: Optional[int] = None self.right: Optional[int] = None self.lower:", "fit text into boxes with Pillow without creating # extra images font =", "from first word self.angle = self._get_angle(vertices) left, upper, right, lower = self._vertices_to_coords( vertices,", "left = 0 if upper is None: upper = 0 if right is", "= f\"Words: {current_word}\\nLines: {len(fields)}\" if notes: stats += f\"\\nNotes: {notes}\" return result, stats", "[accent]).replace(\"\\n\", \" \") for line in lines ] async def _apply_translation( ctx: Context,", "= { None: \"The world is on fire, something really bad happened. I", "180 # B---A # # C # / \\ # B D angle", "90 * i break else: raise AngleUndetectable # # truncate last digit, OCR", "self.lower = lower if self.lower is None else max((self.lower, lower)) @staticmethod def _vertices_to_coords(", "word_annotations = annotations[\"textAnnotations\"][1:] block_annotations = annotations[\"fullTextAnnotation\"][\"pages\"][0][\"blocks\"] # Google OCR API returns entry for", "= vertices[2].get(\"x\") upper = vertices[3].get(\"y\") right = vertices[0].get(\"x\") lower = vertices[1].get(\"y\") elif 270", "garbage HTML response reason = \"unknown error\" raise PINKError( f\"Something really bad happened", "180: left = vertices[1].get(\"x\") upper = vertices[2].get(\"y\") right = vertices[3].get(\"x\") lower = vertices[0].get(\"y\")", "Optional[int] = None self.angle = 0 self._src_width, self._src_height = src.size self._padding = padding", "reason = \"unknown error\" raise PINKError( f\"Something really bad happened with underlying API[{r.status}]:", "= next_x, next_y continue # algo: https://stackoverflow.com/a/27481611 # mypy literally does not see", "= 315 # \\ / # C # # D----A # | |", "for field in fields: cropped = src.crop(field.coords_padded) # NOTE: next line causes segfaults", "vertex.get(\"x\"), vertex.get(\"y\") cycle = itertools.cycle(vertices) x, y = get_coords(next(cycle)) for i in range(4):", "# ).getpixel((0, 0)) # ugly!!! src.paste(blurred, field.coords_padded) for field in fields: # TODO:", "URL. Try using a different one.\", } def __init__(self, code: Optional[int], message: str):", "y = get_coords(next(cycle)) for i in range(4): next_x, next_y = get_coords(next(cycle)) # Any", "= vertices[2].get(\"x\") lower = vertices[3].get(\"y\") elif 90 < angle <= 180: left =", "most likely fail\" ) else: json = await r.json() image_url = f\"{PINK_PROXY}/{json['id']}\" async", "message: str): self.code = code self.message = message super().__init__(str(self)) @classmethod def from_response(cls, response:", "are wrong, debug from here blurred = cropped.filter(ImageFilter.GaussianBlur(10)) # Does not work anymore", "text_im = PIL.Image.new( \"RGBA\", size=font.getsize(field.text, stroke_width=field.stroke_width), ) ImageDraw.Draw(text_im).text( (0, 0), text=field.text, font=font, spacing=0,", "[ # trocr fully depends on newlines, apply accents to each line separately", "= _language_iterator(block_annotations) for i, line in enumerate(lines): if next(paragraph_languages) is not None: need_trasnslation[i]", "\"\"\"Returns Pillow style coordinates (left, upper, right, lower).\"\"\" # A - 0 #", "it in constants.py # since it is not used anywhere else now PINK_PROXY", "make sure text order is preserved accent_cog.apply_accents_to_text(line, [accent]).replace(\"\\n\", \" \") for line in", "in (x, y, next_x, next_y): x, y = next_x, next_y continue # algo:", "maybe_annotations def _draw_trocr(src: PIL.Image, fields: Sequence[TextField]) -> BytesIO: FIELD_CAP = 150 fields =", "from here blurred = cropped.filter(ImageFilter.GaussianBlur(10)) # Does not work anymore for some reason,", "180 < angle <= 270: left = vertices[2].get(\"x\") upper = vertices[3].get(\"y\") right =", "image description. In description words are combined into # lines, lines are separated", "# TODO: implement w/h detection ASAP, this is temporary # solutions: # 1)", "# type: ignore min((self._src_height, self.lower + self._padding)), # type: ignore ) # TODO:", "separated by newlines, there is a trailing newline. # Coordinates from words in", "f\"expected {len(need_trasnslation)} translated lines, got {len(translated_lines)}\" ) new_lines = lines.copy() for idx, translated_line", "degrees return 90 * round(degrees / 90) @property def coords(self) -> Tuple[int, int,", "lower).\"\"\" # A - 0 # B - 1 # C - 2", "with ctx.session.post( f\"{PINK_PROXY}\", headers=dict(authorization=PINK_PROXY_TOKEN), json=dict(url=image_url, ttl=3600), ) as r: if r.status != 200:", "i had no success implementing this # 2) try to keep track of", "(accent_cog := ctx.bot.get_cog(\"Accents\")) is None: raise RuntimeError(\"No accents cog loaded\") return [ #", "right if self.right is None else max((self.right, right)) self.lower = lower if self.lower", "track of full coords and just calculate distance # a lot of coordinates", "next line causes segfaults if coords are wrong, debug from here blurred =", ":= ctx.bot.get_cog(\"Accents\")) is None: raise RuntimeError(\"No accents cog loaded\") return [ # trocr", "= await translator_cog.translate( \"\\n\".join(need_trasnslation.values()), language ) translated_lines = translated.split(\"\\n\") if len(translated_lines) != len(need_trasnslation):", "text = word[\"description\"] if remaining_line.startswith(text): current_word += 1 remaining_line = remaining_line[len(text) :].lstrip() #", "angle to 90 degrees return 90 * round(degrees / 90) @property def coords(self)", "self.left # type: ignore assert False # noqa @property def font_size(self) -> int:", "(90, 270): return self.right - self.left # type: ignore assert False # noqa", "= fields[:FIELD_CAP] src = src.convert(\"RGBA\") for field in fields: cropped = src.crop(field.coords_padded) #", "await r.json() raise PINKError( f\"Error in underlying API[{r.status}]: \" f'{json.get(\"message\", \"unknown error\")}' )", "Tuple[int, int]) -> None: if not self.initialized: # Get angle from first word", "is None else max((self.lower, lower)) @staticmethod def _vertices_to_coords( vertices: _VerticesType, src_size: Tuple[int, int],", "ignore delta_x = next_x - x # type: ignore degrees = math.degrees(math.atan2(delta_y, delta_x))", "are known, but i had no success implementing this # 2) try to", "0: degrees += 360 # compensate missing vertices degrees += 90 * i", "new_lines[idx] = translated_line return new_lines async def ocr_translate( ctx: Context, image: StaticImage, language:", "self.upper: Optional[int] = None self.right: Optional[int] = None self.lower: Optional[int] = None self.angle", "be missing if None in (x, y, next_x, next_y): x, y = next_x,", "hard dependency by not reading it in constants.py # since it is not", "each line separately and # replace any newlines with spaces to make sure", "\"requests\": [ { \"features\": [{\"type\": \"TEXT_DETECTION\"}], \"image\": { \"source\": { \"imageUri\": image_url, }", "{ \"imageUri\": image_url, } }, } ] }, headers={ \"x-origin\": \"https://explorer.apis.google.com\", \"x-referer\": \"https://explorer.apis.google.com\",", "error.get(\"code\") message = error.get(\"message\", \"unknown\") return cls(code, message) def __str__(self) -> str: base", "extract_language(data: Any) -> Optional[str]: if (properties := data.get(\"property\")) is None: return None if", "image: StaticImage, language: Union[str, Accent] ) -> Tuple[BytesIO, str]: src = await image.to_pil_image(ctx)", "# since it is not used anywhere else now PINK_PROXY = os.environ[\"PINK_PROXY\"] PINK_PROXY_TOKEN", "2 # D - 3 # # A----B # | | angle =", ") # TODO: implement w/h detection ASAP, this is temporary # solutions: #", "causes segfaults if coords are wrong, debug from here blurred = cropped.filter(ImageFilter.GaussianBlur(10)) #", "is temporary # solutions: # 1) https://stackoverflow.com/a/9972699 # text surrounding box dimensions are", "_VerticesType, src_size: Tuple[int, int], angle: int ) -> Tuple[int, int, int, int]: \"\"\"Returns", "field.add_word(word[\"boundingPoly\"][\"vertices\"], src.size) except AngleUndetectable: notes += f\"angle for `{word}` is undetectable\\n\" else: break", "D angle = 135 # \\ / # A # # B---C #", "Sequence[TextField]) -> BytesIO: FIELD_CAP = 150 fields = fields[:FIELD_CAP] src = src.convert(\"RGBA\") for", "line.casefold() != original_line.casefold(): fields.append(field) if not fields: raise PINKError(\"could not translate anything on", "degree tilted text, ignore this # TEMPORARY: truncate angle to 90 degrees return", "len(translated_lines) != len(need_trasnslation): raise RuntimeError( f\"expected {len(need_trasnslation)} translated lines, got {len(translated_lines)}\" ) new_lines", "-> int: return max((1, int(1.3333333 * self.height) - 2)) @property def stroke_width(self) ->", "sure text order is preserved accent_cog.apply_accents_to_text(line, [accent]).replace(\"\\n\", \" \") for line in lines", "self.right - self.left # type: ignore assert False # noqa @property def font_size(self)", "lot of coordinates might be missing, 1st solution is more reliable if it", "raise PINKError(\"no text detected\", formatted=False) return maybe_annotations def _draw_trocr(src: PIL.Image, fields: Sequence[TextField]) ->", "B # / \\ # A C angle = 45 # \\ /", "# 1) https://stackoverflow.com/a/9972699 # text surrounding box dimensions are known, but i had", "# a lot of coordinates might be missing, 1st solution is more reliable", "None: left = 0 if upper is None: upper = 0 if right", "vertices[0].get(\"x\") lower = vertices[1].get(\"y\") elif 270 < angle <= 360: left = vertices[3].get(\"x\")", "lines, got {len(translated_lines)}\" ) new_lines = lines.copy() for idx, translated_line in zip(need_trasnslation.keys(), translated_lines):", "for word in word_annotations[current_word:]: text = word[\"description\"] if remaining_line.startswith(text): current_word += 1 remaining_line", "/ 12))) @property def initialized(self) -> bool: return None not in self.coords def", "paragraph_language or block_language async def ocr(ctx: Context, image_url: str) -> Dict[str, Any]: async", "KNOWN_HINTS = { None: \"The world is on fire, something really bad happened.", "since it is not used anywhere else now PINK_PROXY = os.environ[\"PINK_PROXY\"] PINK_PROXY_TOKEN =", "A---D # # B # / \\ # A C angle = 45", "left = vertices[0].get(\"x\") upper = vertices[1].get(\"y\") right = vertices[2].get(\"x\") lower = vertices[3].get(\"y\") elif", "\"TEXT_DETECTION\"}], \"image\": { \"source\": { \"imageUri\": image_url, } }, } ] }, headers={", "likely fail\" ) else: json = await r.json() image_url = f\"{PINK_PROXY}/{json['id']}\" async with", "AngleUndetectable # # truncate last digit, OCR often returns 1-2 degree tilted text,", "int, int, int]: return ( max((0, self.left - self._padding)), # type: ignore max((0,", "paragraph_language = extract_language(paragraph) yield paragraph_language or block_language # line grouping differs between simple", "blocks: block_language = extract_language(block) for paragraph in block[\"paragraphs\"]: paragraph_language = extract_language(paragraph) yield paragraph_language", "src_size, self.angle ) self.left = left if self.left is None else min((self.left, left))", "-> List[str]: if (accent_cog := ctx.bot.get_cog(\"Accents\")) is None: raise RuntimeError(\"No accents cog loaded\")", "vertices[3].get(\"y\") right = vertices[0].get(\"x\") lower = vertices[1].get(\"y\") elif 270 < angle <= 360:", "self.lower + self._padding)), # type: ignore ) # TODO: implement w/h detection ASAP,", "if self.angle in (0, 180, 360): return self.right - self.left # type: ignore", "and # replace any newlines with spaces to make sure text order is", "async with ctx.session.post( f\"{PINK_PROXY}\", headers=dict(authorization=PINK_PROXY_TOKEN), json=dict(url=image_url, ttl=3600), ) as r: if r.status !=", "r.status != 200: if r.content_type.lower() != \"application/json\": reason = await r.text() if reason.count(\"\\n\")", "lower = vertices[1].get(\"y\") elif 270 < angle <= 360: left = vertices[3].get(\"x\") upper", "ImageFont.truetype(\"DejaVuSans.ttf\") class GoogleOCRError(PINKError): KNOWN_HINTS = { None: \"The world is on fire, something", "round(degrees / 90) @property def coords(self) -> Tuple[int, int, int, int]: return (self.left,", "_VertexType) -> Tuple[Optional[int], Optional[int]]: return vertex.get(\"x\"), vertex.get(\"y\") cycle = itertools.cycle(vertices) x, y =", "* self.height) - 2)) @property def stroke_width(self) -> int: return max((1, round(self.font_size /", "\"error\" in maybe_annotations: raise GoogleOCRError.from_response(maybe_annotations) else: raise PINKError(\"no text detected\", formatted=False) return maybe_annotations", "elif 90 < angle <= 180: left = vertices[1].get(\"x\") upper = vertices[2].get(\"y\") right", "!= 200: await ctx.reply( f\"Unable to reach proxy: {r.status}\\n\" f\"Will try raw URL", "used anywhere else now PINK_PROXY = os.environ[\"PINK_PROXY\"] PINK_PROXY_TOKEN = f\"Bearer {os.environ['PINK_PROXY_TOKEN']}\" FONT =", "from typing import Any, Dict, List, Tuple, Union, Iterator, Optional, Sequence import PIL", "# Does not work anymore for some reason, black stroke is good anyway", "int ) -> Tuple[int, int, int, int]: \"\"\"Returns Pillow style coordinates (left, upper,", "@property def font_size(self) -> int: return max((1, int(1.3333333 * self.height) - 2)) @property", "paragraph_language or block_language # line grouping differs between simple annotations and paragraph grouping", "else max((self.lower, lower)) @staticmethod def _vertices_to_coords( vertices: _VerticesType, src_size: Tuple[int, int], angle: int", "image\", formatted=False) result = await ctx.bot.loop.run_in_executor(None, _draw_trocr, src, fields) stats = f\"Words: {current_word}\\nLines:", "360): return self.right - self.left # type: ignore if self.angle in (90, 270):", "they can be joined # by checking full image description. In description words", "line causes segfaults if coords are wrong, debug from here blurred = cropped.filter(ImageFilter.GaussianBlur(10))", "delta_y = y - next_y # type: ignore delta_x = next_x - x", "next_x, next_y = get_coords(next(cycle)) # Any vertex coordinate can be missing if None", "# \\ / # C # # D----A # | | angle =", "else now PINK_PROXY = os.environ[\"PINK_PROXY\"] PINK_PROXY_TOKEN = f\"Bearer {os.environ['PINK_PROXY_TOKEN']}\" FONT = ImageFont.truetype(\"DejaVuSans.ttf\") class", "if (accent_cog := ctx.bot.get_cog(\"Accents\")) is None: raise RuntimeError(\"No accents cog loaded\") return [", "same line can be merged lines = annotations[\"fullTextAnnotation\"][\"text\"][:-1].split(\"\\n\") if isinstance(language, Accent): new_lines =", "# # D----A # | | angle = 270 # C----B # #", "270 # C----B # # D # / \\ # C A angle", "remaining_line = remaining_line[len(text) :].lstrip() # TODO: merge multiple lines into box try: field.add_word(word[\"boundingPoly\"][\"vertices\"],", "270): return self.right - self.left # type: ignore assert False # noqa @property", "making this a hard dependency by not reading it in constants.py # since", "text=field.text, font=font, spacing=0, stroke_width=field.stroke_width, stroke_fill=(0, 0, 0), ) src.alpha_composite( text_im.resize( ( min((text_im.width, field.width)),", "self.angle = 0 self._src_width, self._src_height = src.size self._padding = padding def add_word(self, vertices:", "if len((responses := json[\"responses\"])) == 0: return {} maybe_annotations = responses[0] if \"textAnnotations\"", "checking full image description. In description words are combined into # lines, lines", "-> Tuple[Optional[int], Optional[int]]: return vertex.get(\"x\"), vertex.get(\"y\") cycle = itertools.cycle(vertices) x, y = get_coords(next(cycle))", "src.save(result, format=\"PNG\") return BytesIO(result.getvalue()) def _apply_accents(ctx: Context, lines: List[str], accent: Accent) -> List[str]:", "Optional[int]]: return vertex.get(\"x\"), vertex.get(\"y\") cycle = itertools.cycle(vertices) x, y = get_coords(next(cycle)) for i", ") new_lines = lines.copy() for idx, translated_line in zip(need_trasnslation.keys(), translated_lines): new_lines[idx] = translated_line", "data.get(\"property\")) is None: return None if (languages := properties.get(\"detectedLanguages\")) is None: return None", "on image\", formatted=False) result = await ctx.bot.loop.run_in_executor(None, _draw_trocr, src, fields) stats = f\"Words:", "left)) self.upper = upper if self.upper is None else min((self.upper, upper)) self.right =", "word separately, but they can be joined # by checking full image description.", "lower is None: lower = src_size[1] return (left, upper, right, lower) @staticmethod def", "angle = 360/0 # D----C # # A # / \\ # D", "RuntimeError(\"No accents cog loaded\") return [ # trocr fully depends on newlines, apply", "json[\"responses\"])) == 0: return {} maybe_annotations = responses[0] if \"textAnnotations\" not in maybe_annotations:", "word self.angle = self._get_angle(vertices) left, upper, right, lower = self._vertices_to_coords( vertices, src_size, self.angle", "detection ASAP, this is temporary # solutions: # 1) https://stackoverflow.com/a/9972699 # text surrounding", "Any vertex coordinate can be missing if None in (x, y, next_x, next_y):", "line separately and # replace any newlines with spaces to make sure text", "r.content_type.lower() != \"application/json\": reason = await r.text() if reason.count(\"\\n\") > 1: # we", "fields.append(field) if not fields: raise PINKError(\"could not translate anything on image\", formatted=False) result", "vertices[0].get(\"x\") upper = vertices[1].get(\"y\") right = vertices[2].get(\"x\") lower = vertices[3].get(\"y\") elif 90 <", "i, line in enumerate(lines): if next(paragraph_languages) is not None: need_trasnslation[i] = line if", "is None else max((self.right, right)) self.lower = lower if self.lower is None else", "for word in paragraph[\"words\"]: last_symbol = word[\"symbols\"][-1] if (symbol_properties := last_symbol.get(\"property\")) is None:", "C - 2 # D - 3 # # A----B # | |", "underlying API[{r.status}]: \" f'{json.get(\"message\", \"unknown error\")}' ) json = await r.json() if len((responses", "literally does not see previous statement delta_y = y - next_y # type:", "# field.inverted_avg_color = ImageOps.invert( # blurred.resize((1, 1)).convert(\"L\") # ).getpixel((0, 0)) # ugly!!! src.paste(blurred,", ".types import StaticImage _VertexType = Dict[str, int] _VerticesType = Tuple[_VertexType, _VertexType, _VertexType, _VertexType]", "self._padding)), # type: ignore min((self._src_width, self.right + self._padding)), # type: ignore min((self._src_height, self.lower", "if detected_break[\"type\"] != \"EOL_SURE_SPACE\": continue yield paragraph_language or block_language async def ocr(ctx: Context,", "# replace any newlines with spaces to make sure text order is preserved", "\"\\n\".join(need_trasnslation.values()), language ) translated_lines = translated.split(\"\\n\") if len(translated_lines) != len(need_trasnslation): raise RuntimeError( f\"expected", "= self._vertices_to_coords( vertices, src_size, self.angle ) self.left = left if self.left is None", "responses[0] if \"textAnnotations\" not in maybe_annotations: if \"error\" in maybe_annotations: raise GoogleOCRError.from_response(maybe_annotations) else:", "got {len(translated_lines)}\" ) new_lines = lines.copy() for idx, translated_line in zip(need_trasnslation.keys(), translated_lines): new_lines[idx]" ]
[ "subprocess import check_call parser = ArgumentParser() parser.add_argument(\"search\", type=str) args = parser.parse_args() check_call( f'grep", "= parser.parse_args() check_call( f'grep {args.search} $(find /tmp/tmpbuild/ -name \"CMakeLists.txt\" -or -name \"*.cmake\")', shell=True,", "import check_call parser = ArgumentParser() parser.add_argument(\"search\", type=str) args = parser.parse_args() check_call( f'grep {args.search}", "ArgumentParser from subprocess import check_call parser = ArgumentParser() parser.add_argument(\"search\", type=str) args = parser.parse_args()", "from argparse import ArgumentParser from subprocess import check_call parser = ArgumentParser() parser.add_argument(\"search\", type=str)", "parser.parse_args() check_call( f'grep {args.search} $(find /tmp/tmpbuild/ -name \"CMakeLists.txt\" -or -name \"*.cmake\")', shell=True, )", "import ArgumentParser from subprocess import check_call parser = ArgumentParser() parser.add_argument(\"search\", type=str) args =", "parser = ArgumentParser() parser.add_argument(\"search\", type=str) args = parser.parse_args() check_call( f'grep {args.search} $(find /tmp/tmpbuild/", "type=str) args = parser.parse_args() check_call( f'grep {args.search} $(find /tmp/tmpbuild/ -name \"CMakeLists.txt\" -or -name", "= ArgumentParser() parser.add_argument(\"search\", type=str) args = parser.parse_args() check_call( f'grep {args.search} $(find /tmp/tmpbuild/ -name", "args = parser.parse_args() check_call( f'grep {args.search} $(find /tmp/tmpbuild/ -name \"CMakeLists.txt\" -or -name \"*.cmake\")',", "#!/usr/bin/env python from argparse import ArgumentParser from subprocess import check_call parser = ArgumentParser()", "check_call parser = ArgumentParser() parser.add_argument(\"search\", type=str) args = parser.parse_args() check_call( f'grep {args.search} $(find", "argparse import ArgumentParser from subprocess import check_call parser = ArgumentParser() parser.add_argument(\"search\", type=str) args", "<filename>search.py #!/usr/bin/env python from argparse import ArgumentParser from subprocess import check_call parser =", "from subprocess import check_call parser = ArgumentParser() parser.add_argument(\"search\", type=str) args = parser.parse_args() check_call(", "ArgumentParser() parser.add_argument(\"search\", type=str) args = parser.parse_args() check_call( f'grep {args.search} $(find /tmp/tmpbuild/ -name \"CMakeLists.txt\"", "python from argparse import ArgumentParser from subprocess import check_call parser = ArgumentParser() parser.add_argument(\"search\",", "parser.add_argument(\"search\", type=str) args = parser.parse_args() check_call( f'grep {args.search} $(find /tmp/tmpbuild/ -name \"CMakeLists.txt\" -or" ]
[ "django.apps import AppConfig class DjangoPricesOpenExchangeRatesConfig(AppConfig): name = 'django_prices_openexchangerates' verbose_name = \"Django prices openexchangerates", "from django.apps import AppConfig class DjangoPricesOpenExchangeRatesConfig(AppConfig): name = 'django_prices_openexchangerates' verbose_name = \"Django prices", "import AppConfig class DjangoPricesOpenExchangeRatesConfig(AppConfig): name = 'django_prices_openexchangerates' verbose_name = \"Django prices openexchangerates integration\"" ]
[ "False self.ball_at_bottom_message_sent = False self.ball_positioned = False self.offset = 100 self.camera_subscription = rospy.Subscriber(adjust_namespace(self.is_simulation,", "self.current_camera == \"ASUS_CAMERA\" else 15 params.maxArea = 2500 if self.current_camera == \"ASUS_CAMERA\" else", "self.robot_movement_publisher.publish(\"FORWARD_ARM\") else: self.move_robot_or_arm = \"MOVE_ARM\" self.robot_movement_publisher.publish(\"STOP-READY_TO_GRAB\") else: if target[1] > 10: if target[0]", "detector = cv2.SimpleBlobDetector(params) else: detector = cv2.SimpleBlobDetector_create(params) # Detect blobs keypoints = detector.detect(image_binary)", "!= None and self.current_camera == \"ARM_CAMERA\" and self.move_robot_or_arm == \"MOVE_ARM\": rospy.loginfo(\"Detector: publishing ball", "target = None if circles: circles = np.uint16(np.around(circles)) max_r = 0.0 target =", "Camera frame\") elif target != None and self.current_camera == \"ARM_CAMERA\" and self.move_robot_or_arm ==", "= 100 self.camera_subscription = rospy.Subscriber(adjust_namespace(self.is_simulation, \"/Asus_Camera/rgb/image_raw\"), Image, self.process_image) elif self.current_camera == \"ARM_CAMERA\": self.camera_subscription", "keypoint.pt[1] r = keypoint.size / 2.0 circles.append([x, y, r]) target = None if", "= True self.robot_movement_publisher.publish(\"STOP-BALL_FOUND\") rospy.loginfo(\"Detector: ball found\") elif target != None and self.current_camera ==", "String, self.state_change) self.robot_movement_publisher = rospy.Publisher(\"/jupiter/robot_movement/command\", String, queue_size = 10) self.state_machine_publisher = rospy.Publisher(\"/jupiter/robot_movement/result\", String,", "import rospy import cv2 import numpy as np import cv_bridge import time from", "queue_size = 10) self.processed_image_bw_publisher = rospy.Publisher(\"/jupiter/processed_image_bw\", Image, queue_size = 10) self.ball_position_publisher = rospy.Publisher(\"/jupiter/ball_position\",", "target[2], (255, 0, 0), 1, 8, 0) processed_image = cv2.drawKeypoints(image_cv, keypoints, np.array([]), (0,", "(255, 0, 0), 1, 8, 0) processed_image = cv2.drawKeypoints(image_cv, keypoints, np.array([]), (0, 255,", "= None if circles: circles = np.uint16(np.around(circles)) max_r = 0.0 target = circles[0]", "2.0 circles.append([x, y, r]) target = None if circles: circles = np.uint16(np.around(circles)) max_r", "def state_change(self, command): if command.data == \"SEARCH\": self.state = \"SEARCH\" rospy.loginfo(\"Detector: starting to", "self.ball_at_middle_X_of_Asus_Camera = True self.robot_movement_publisher.publish(\"STOP-BALL_FOUND\") rospy.loginfo(\"Detector: ball found\") elif target != None and self.current_camera", "False front_camera_x_reference = 0 front_camera_y_reference = 0 move_robot_or_arm = \"\" ball_position = None", "import BallPosition class Detector: current_camera = None camera_subscription = None bridge = None", "= cv2.SimpleBlobDetector_Params() params.filterByInertia = False params.filterByConvexity = True params.filterByColor = False params.filterByCircularity =", "= cv2.bitwise_and(blurred_image, blurred_image, mask = mask) else: # ARM_CAMERA blurred_image2 = cv2.GaussianBlur(image_cv, (9,", "= rospy.Subscriber(\"/Creative_Camera/rgb/image_raw\" if self.is_simulation else \"/komodo_1/arm_cam_node/image_raw\", Image, self.process_image) self.move_robot_or_arm = \"MOVE_ROBOT\" def state_change(self,", "(lower2, upper2) = ([65, 50, 170], [100, 70, 255]) lower2 = np.array(lower2, dtype", "= 10) self.processed_image_bw_publisher = rospy.Publisher(\"/jupiter/processed_image_bw\", Image, queue_size = 10) self.ball_position_publisher = rospy.Publisher(\"/jupiter/ball_position\", BallPosition,", "False def camera_change(self, command): self.current_camera = command.data rospy.loginfo(\"Detector: current camera changed to %s\",", "\"ARM_CAMERA\" and self.move_robot_or_arm == \"MOVE_ARM\": rospy.loginfo(\"Detector: publishing ball position\") self.ball_position.detected = True self.ball_position.x", "= keypoint.pt[1] r = keypoint.size / 2.0 circles.append([x, y, r]) target = None", "changed to %s\", self.current_camera) if self.camera_subscription: self.camera_subscription.unregister() if self.current_camera == \"ASUS_CAMERA\": self.ball_at_middle_X_of_Asus_Camera =", "10: if target[0] < image.width * 0.45: self.robot_movement_publisher.publish(\"FORWARD-LEFT\") elif target[0] > image.width *", "70, 255]) lower2 = np.array(lower2, dtype = \"uint8\") upper2 = np.array(upper2, dtype =", "jupiter.msg import BallPosition class Detector: current_camera = None camera_subscription = None bridge =", "output = output_light_orange cv2.bitwise_or(output_dark_orange, output_light_orange, output) image_grayscale = cv2.cvtColor(output, cv2.COLOR_BGR2GRAY) (thresh, image_binary) =", "cam emits an upside-down image, so adjust for orientation if target[1] < 10:", "cv2.bitwise_or(output_dark_orange, output_light_orange, output) image_grayscale = cv2.cvtColor(output, cv2.COLOR_BGR2GRAY) (thresh, image_binary) = cv2.threshold(image_grayscale, 128, 255,", "self.robot_movement_publisher.publish(\"FORWARD-RIGHT\") else: self.robot_movement_publisher.publish(\"FORWARD_ARM\") else: self.move_robot_or_arm = \"MOVE_ARM\" self.robot_movement_publisher.publish(\"STOP-READY_TO_GRAB\") else: if target[1] > 10:", "self.move_robot_or_arm == \"MOVE_ROBOT\": if self.is_simulation: # the real arm cam emits an upside-down", "y: %d, radius: %d\", target[0], target[1], target[2]) if self.current_camera == \"ASUS_CAMERA\" and self.asus_ballpark(target[0],", "camera_change(self, command): self.current_camera = command.data rospy.loginfo(\"Detector: current camera changed to %s\", self.current_camera) if", "<= x and x <= (image.width * 0.85) if __name__ == \"__main__\": rospy.init_node(\"detector\")", "searching for ball\") def process_image(self, image): if self.state == \"NO_SEARCH\": return image_cv =", "= detector.detect(image_binary) circles = [] for keypoint in keypoints: x = keypoint.pt[0] y", "20.0 # Create a detector with the parameters, according to your OpenCV version", "processed_image_publisher = None processed_image_bw_publisher = None offset = 100 wheel_publisher = None state", "ball_position = None def __init__(self): init_arguments(self) self.state = \"NO_SEARCH\" rospy.Subscriber(\"/jupiter/detector/current_camera\", String, self.camera_change) rospy.Subscriber(\"/jupiter/detector/state_change\",", "ball\") def process_image(self, image): if self.state == \"NO_SEARCH\": return image_cv = self.bridge.imgmsg_to_cv2(image, \"bgr8\")", "two cameras have different sensors, so their color rendition varies. Adjust for this", "else: self.robot_movement_publisher.publish(\"FORWARD_ARM\") else: self.move_robot_or_arm = \"MOVE_ARM\" self.robot_movement_publisher.publish(\"STOP-READY_TO_GRAB\") elif target != None and self.current_camera", "0, 0), 1, 8, 0) # publish the keypoints and target circle superimposed", "starting to search for ball\") elif command.data == \"NO_SEARCH\": self.state = \"NO_SEARCH\" rospy.loginfo(\"Detector:", "10) self.ball_position = BallPosition() self.ball_position.detected = False def camera_change(self, command): self.current_camera = command.data", "= ([0, 0, 100], [55, 55, 255]) # dark red lower = np.array(lower,", "real arm cam emits an upside-down image, so adjust for orientation if target[1]", "True params.filterByColor = False params.filterByCircularity = True params.filterByArea = True params.minArea = 30", "\"ASUS_CAMERA\" else True): max_r = circle[2] target = circle if target != None:", "and self.move_robot_or_arm == \"MOVE_ARM\": rospy.loginfo(\"Detector: publishing ball position\") self.ball_position.detected = True self.ball_position.x =", "8, 0) # publish the keypoints and target circle superimposed on the source", "\"MOVE_ARM\": rospy.loginfo(\"Detector: publishing ball position\") self.ball_position.detected = True self.ball_position.x = target[0] self.ball_position.y =", "= \"SEARCH\" rospy.loginfo(\"Detector: starting to search for ball\") elif command.data == \"NO_SEARCH\": self.state", "= None processed_image_publisher = None processed_image_bw_publisher = None offset = 100 wheel_publisher =", "0) # The two cameras have different sensors, so their color rendition varies.", "circles: circles = np.uint16(np.around(circles)) max_r = 0.0 target = circles[0] for circle in", "elif target != None and self.current_camera == \"ARM_CAMERA\" and self.move_robot_or_arm == \"MOVE_ARM\": rospy.loginfo(\"Detector:", "mask = mask) else: # ARM_CAMERA blurred_image2 = cv2.GaussianBlur(image_cv, (9, 9), 0) (lower,", "1.0 if self.current_camera == \"FRONT_CAMERA\": params.minDistBetweenBlobs = 20.0 # Create a detector with", "= rospy.Subscriber(adjust_namespace(self.is_simulation, \"/Asus_Camera/rgb/image_raw\"), Image, self.process_image) elif self.current_camera == \"ARM_CAMERA\": self.camera_subscription = rospy.Subscriber(\"/Creative_Camera/rgb/image_raw\" if", "if self.current_camera == \"ASUS_CAMERA\": (lower, upper) = ([0, 0, 100], [55, 55, 255])", "= 10) self.ball_position_publisher = rospy.Publisher(\"/jupiter/ball_position\", BallPosition, queue_size = 10) self.ball_position = BallPosition() self.ball_position.detected", "np.array(upper, dtype = \"uint8\") mask = cv2.inRange(blurred_image, lower, upper) output = cv2.bitwise_and(blurred_image, blurred_image,", "else 15 params.maxArea = 2500 if self.current_camera == \"ASUS_CAMERA\" else 38400 params.minConvexity =", "(target[0], target[1]) cv2.circle(processed_image_bw, center, target[2], (255, 0, 0), 1, 8, 0) processed_image =", "image, so adjust for orientation if target[1] < 10: if target[0] < image.width", "self.ball_position.detected = True self.ball_position.x = target[0] self.ball_position.y = target[1] self.ball_position.radius = target[2] self.ball_position.img_width", "issue when trying to filter the red colors in the image. if self.current_camera", "self.state = \"NO_SEARCH\" rospy.Subscriber(\"/jupiter/detector/current_camera\", String, self.camera_change) rospy.Subscriber(\"/jupiter/detector/state_change\", String, self.state_change) self.robot_movement_publisher = rospy.Publisher(\"/jupiter/robot_movement/command\", String,", "= rospy.Publisher(\"/jupiter/ball_position\", BallPosition, queue_size = 10) self.ball_position = BallPosition() self.ball_position.detected = False def", "rospy.loginfo(\"Detector: ball is at bottom of Asus Camera frame\") elif target != None", "return image_cv = self.bridge.imgmsg_to_cv2(image, \"bgr8\") blurred_image = cv2.GaussianBlur(image_cv, (9, 9), 0) # The", "and self.current_camera == \"ASUS_CAMERA\" and abs(target[1] - (image.height)) < (image.height / 10.0) and", "target[2], (255, 0, 0), 1, 8, 0) # publish the keypoints and target", "/ 2.0 circles.append([x, y, r]) target = None if circles: circles = np.uint16(np.around(circles))", "and self.current_camera == \"ARM_CAMERA\" and self.move_robot_or_arm == \"MOVE_ARM\": rospy.loginfo(\"Detector: publishing ball position\") self.ball_position.detected", "String, queue_size = 10) self.state_machine_publisher = rospy.Publisher(\"/jupiter/robot_movement/result\", String, queue_size = 10) self.bridge =", "255]) # dark red lower = np.array(lower, dtype = \"uint8\") upper = np.array(upper,", "rospy.Subscriber(\"/jupiter/detector/current_camera\", String, self.camera_change) rospy.Subscriber(\"/jupiter/detector/state_change\", String, self.state_change) self.robot_movement_publisher = rospy.Publisher(\"/jupiter/robot_movement/command\", String, queue_size = 10)", "< image.width * 0.45: self.robot_movement_publisher.publish(\"FORWARD-RIGHT\") elif target[0] > image.width * 0.55: self.robot_movement_publisher.publish(\"FORWARD-LEFT\") else:", "= \"uint8\") mask = cv2.inRange(blurred_image, lower, upper) output = cv2.bitwise_and(blurred_image, blurred_image, mask =", "orientation if target[1] < 10: if target[0] < image.width * 0.45: self.robot_movement_publisher.publish(\"FORWARD-LEFT\") elif", "command): self.current_camera = command.data rospy.loginfo(\"Detector: current camera changed to %s\", self.current_camera) if self.camera_subscription:", "= \"uint8\") upper = np.array(upper, dtype = \"uint8\") mask = cv2.inRange(blurred_image, lower, upper)", "Adjust for this issue when trying to filter the red colors in the", "blobs keypoints = detector.detect(image_binary) circles = [] for keypoint in keypoints: x =", "True params.filterByArea = True params.minArea = 30 if self.current_camera == \"ASUS_CAMERA\" else 15", "Image, self.process_image) elif self.current_camera == \"ARM_CAMERA\": self.camera_subscription = rospy.Subscriber(\"/Creative_Camera/rgb/image_raw\" if self.is_simulation else \"/komodo_1/arm_cam_node/image_raw\",", "= cv2.inRange(blurred_image, lower, upper) output = cv2.bitwise_and(blurred_image, blurred_image, mask = mask) else: #", "= 1.0 if self.current_camera == \"FRONT_CAMERA\": params.minDistBetweenBlobs = 20.0 # Create a detector", "if self.camera_subscription: self.camera_subscription.unregister() if self.current_camera == \"ASUS_CAMERA\": self.ball_at_middle_X_of_Asus_Camera = False self.ball_at_bottom_message_sent = False", "color rendition varies. Adjust for this issue when trying to filter the red", "processed_image_bw = cv2.drawKeypoints(image_binary, keypoints, np.array([]), (0, 255, 0), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS) center = (target[0], target[1])", "= np.array(upper, dtype = \"uint8\") mask = cv2.inRange(blurred_image, lower, upper) output = cv2.bitwise_and(blurred_image,", "dtype = \"uint8\") mask2 = cv2.inRange(blurred_image2, lower2, upper2) output_light_orange = cv2.bitwise_and(blurred_image2, blurred_image2, mask", "0.45: self.robot_movement_publisher.publish(\"FORWARD-LEFT\") elif target[0] > image.width * 0.55: self.robot_movement_publisher.publish(\"FORWARD-RIGHT\") else: self.robot_movement_publisher.publish(\"FORWARD_ARM\") else: self.move_robot_or_arm", "dtype = \"uint8\") upper2 = np.array(upper2, dtype = \"uint8\") mask2 = cv2.inRange(blurred_image2, lower2,", "= \"\" ball_position = None def __init__(self): init_arguments(self) self.state = \"NO_SEARCH\" rospy.Subscriber(\"/jupiter/detector/current_camera\", String,", "0), 1, 8, 0) processed_image = cv2.drawKeypoints(image_cv, keypoints, np.array([]), (0, 255, 0), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)", "self.robot_movement_publisher.publish(\"STOP-BALL_FOUND\") rospy.loginfo(\"Detector: ball found\") elif target != None and self.current_camera == \"ASUS_CAMERA\" and", "= True params.minArea = 30 if self.current_camera == \"ASUS_CAMERA\" else 15 params.maxArea =", "import Image from std_msgs.msg import String from common import * from jupiter.msg import", "else \"/komodo_1/arm_cam_node/image_raw\", Image, self.process_image) self.move_robot_or_arm = \"MOVE_ROBOT\" def state_change(self, command): if command.data ==", "params.minCircularity = 0.25 params.maxCircularity = 1.0 if self.current_camera == \"FRONT_CAMERA\": params.minDistBetweenBlobs = 20.0", "= 100 wheel_publisher = None state = \"\" ball_at_middle_X_of_Asus_Camera = False ball_positioned =", "False params.filterByConvexity = True params.filterByColor = False params.filterByCircularity = True params.filterByArea = True", "= keypoint.size / 2.0 circles.append([x, y, r]) target = None if circles: circles", "return (image.width * 0.65) <= x and x <= (image.width * 0.85) if", "x = keypoint.pt[0] y = keypoint.pt[1] r = keypoint.size / 2.0 circles.append([x, y,", "100 self.camera_subscription = rospy.Subscriber(adjust_namespace(self.is_simulation, \"/Asus_Camera/rgb/image_raw\"), Image, self.process_image) elif self.current_camera == \"ARM_CAMERA\": self.camera_subscription =", "cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS) center = (target[0], target[1]) cv2.circle(processed_image_bw, center, target[2], (255, 0, 0), 1, 8,", "if self.current_camera == \"ASUS_CAMERA\": self.ball_at_middle_X_of_Asus_Camera = False self.ball_at_bottom_message_sent = False self.ball_positioned = False", "self.state = \"SEARCH\" rospy.loginfo(\"Detector: starting to search for ball\") elif command.data == \"NO_SEARCH\":", "else True): max_r = circle[2] target = circle if target != None: processed_image_bw", "!= None: processed_image_bw = cv2.drawKeypoints(image_binary, keypoints, np.array([]), (0, 255, 0), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS) center =", "None and self.current_camera == \"ASUS_CAMERA\" and abs(target[1] - (image.height)) < (image.height / 10.0)", "upper2 = np.array(upper2, dtype = \"uint8\") mask2 = cv2.inRange(blurred_image2, lower2, upper2) output_light_orange =", "= None state = \"\" ball_at_middle_X_of_Asus_Camera = False ball_positioned = False front_camera_x_reference =", "= \"NO_SEARCH\" rospy.Subscriber(\"/jupiter/detector/current_camera\", String, self.camera_change) rospy.Subscriber(\"/jupiter/detector/state_change\", String, self.state_change) self.robot_movement_publisher = rospy.Publisher(\"/jupiter/robot_movement/command\", String, queue_size", "= mask) else: # ARM_CAMERA blurred_image2 = cv2.GaussianBlur(image_cv, (9, 9), 0) (lower, upper)", "self.ball_position.img_height = image.height self.ball_position_publisher.publish(self.ball_position) self.state = \"NO_SEARCH\" def asus_ballpark(self, x, image): return (image.width", "= target[1] self.ball_position.radius = target[2] self.ball_position.img_width = image.width self.ball_position.img_height = image.height self.ball_position_publisher.publish(self.ball_position) self.state", "= cv2.SimpleBlobDetector_create(params) # Detect blobs keypoints = detector.detect(image_binary) circles = [] for keypoint", "rendition varies. Adjust for this issue when trying to filter the red colors", "self.ball_at_bottom_message_sent = True self.robot_movement_publisher.publish(\"STOP-BALL_AT_BOTTOM_OF_FRAME\") rospy.loginfo(\"Detector: ball is at bottom of Asus Camera frame\")", "keypoints, np.array([]), (0, 255, 0), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS) center = (target[0], target[1]) cv2.circle(processed_image_bw, center, target[2],", "if target != None: processed_image_bw = cv2.drawKeypoints(image_binary, keypoints, np.array([]), (0, 255, 0), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)", "to your OpenCV version (2 or 3) ver = (cv2.__version__).split('.') if int(ver[0]) <", "publish the keypoints and target circle superimposed on the source image from the", "cv2.bitwise_and(blurred_image, blurred_image, mask = mask) (lower2, upper2) = ([65, 50, 170], [100, 70,", "on the b&w image self.processed_image_publisher.publish(self.bridge.cv2_to_imgmsg(processed_image, \"bgr8\")) self.processed_image_bw_publisher.publish(self.bridge.cv2_to_imgmsg(processed_image_bw, \"bgr8\")) if target[2]: rospy.loginfo(\"x: %d, y:", "10) self.state_machine_publisher = rospy.Publisher(\"/jupiter/robot_movement/result\", String, queue_size = 10) self.bridge = cv_bridge.CvBridge() self.processed_image_publisher =", "b&w image self.processed_image_publisher.publish(self.bridge.cv2_to_imgmsg(processed_image, \"bgr8\")) self.processed_image_bw_publisher.publish(self.bridge.cv2_to_imgmsg(processed_image_bw, \"bgr8\")) if target[2]: rospy.loginfo(\"x: %d, y: %d, radius:", "= 10) self.bridge = cv_bridge.CvBridge() self.processed_image_publisher = rospy.Publisher(\"/jupiter/processed_image\", Image, queue_size = 10) self.processed_image_bw_publisher", "if self.state == \"NO_SEARCH\": return image_cv = self.bridge.imgmsg_to_cv2(image, \"bgr8\") blurred_image = cv2.GaussianBlur(image_cv, (9,", "in the image. if self.current_camera == \"ASUS_CAMERA\": (lower, upper) = ([0, 0, 100],", "image. if self.current_camera == \"ASUS_CAMERA\": (lower, upper) = ([0, 0, 100], [55, 55,", "= 10) self.ball_position = BallPosition() self.ball_position.detected = False def camera_change(self, command): self.current_camera =", "else: if target[1] > 10: if target[0] < image.width * 0.45: self.robot_movement_publisher.publish(\"FORWARD-RIGHT\") elif", "= \"MOVE_ROBOT\" def state_change(self, command): if command.data == \"SEARCH\": self.state = \"SEARCH\" rospy.loginfo(\"Detector:", "== \"FRONT_CAMERA\": params.minDistBetweenBlobs = 20.0 # Create a detector with the parameters, according", "self.camera_subscription.unregister() if self.current_camera == \"ASUS_CAMERA\": self.ball_at_middle_X_of_Asus_Camera = False self.ball_at_bottom_message_sent = False self.ball_positioned =", "\"ASUS_CAMERA\" else 15 params.maxArea = 2500 if self.current_camera == \"ASUS_CAMERA\" else 38400 params.minConvexity", "y = keypoint.pt[1] r = keypoint.size / 2.0 circles.append([x, y, r]) target =", "= 2500 if self.current_camera == \"ASUS_CAMERA\" else 38400 params.minConvexity = 0.2 params.maxConvexity =", "\"uint8\") mask = cv2.inRange(blurred_image, lower, upper) output_dark_orange = cv2.bitwise_and(blurred_image, blurred_image, mask = mask)", "100 wheel_publisher = None state = \"\" ball_at_middle_X_of_Asus_Camera = False ball_positioned = False", "processed_image = cv2.drawKeypoints(image_cv, keypoints, np.array([]), (0, 255, 0), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS) cv2.circle(processed_image, center, target[2], (255,", "self.ball_at_middle_X_of_Asus_Camera and not self.ball_at_bottom_message_sent: self.ball_at_bottom_message_sent = True self.robot_movement_publisher.publish(\"STOP-BALL_AT_BOTTOM_OF_FRAME\") rospy.loginfo(\"Detector: ball is at bottom", "= cv2.threshold(image_grayscale, 128, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU) params = cv2.SimpleBlobDetector_Params() params.filterByInertia = False", "self.ball_position_publisher.publish(self.ball_position) self.state = \"NO_SEARCH\" def asus_ballpark(self, x, image): return (image.width * 0.65) <=", "not self.ball_at_middle_X_of_Asus_Camera: self.ball_at_middle_X_of_Asus_Camera = True self.robot_movement_publisher.publish(\"STOP-BALL_FOUND\") rospy.loginfo(\"Detector: ball found\") elif target != None", "frame\") elif target != None and self.current_camera == \"ARM_CAMERA\" and self.move_robot_or_arm == \"MOVE_ROBOT\":", "lower = np.array(lower, dtype = \"uint8\") upper = np.array(upper, dtype = \"uint8\") mask", "the real arm cam emits an upside-down image, so adjust for orientation if", "params.filterByInertia = False params.filterByConvexity = True params.filterByColor = False params.filterByCircularity = True params.filterByArea", "self.processed_image_publisher = rospy.Publisher(\"/jupiter/processed_image\", Image, queue_size = 10) self.processed_image_bw_publisher = rospy.Publisher(\"/jupiter/processed_image_bw\", Image, queue_size =", "Image, queue_size = 10) self.ball_position_publisher = rospy.Publisher(\"/jupiter/ball_position\", BallPosition, queue_size = 10) self.ball_position =", "np.array(upper, dtype = \"uint8\") mask = cv2.inRange(blurred_image, lower, upper) output_dark_orange = cv2.bitwise_and(blurred_image, blurred_image,", "= None def __init__(self): init_arguments(self) self.state = \"NO_SEARCH\" rospy.Subscriber(\"/jupiter/detector/current_camera\", String, self.camera_change) rospy.Subscriber(\"/jupiter/detector/state_change\", String,", "%d\", target[0], target[1], target[2]) if self.current_camera == \"ASUS_CAMERA\" and self.asus_ballpark(target[0], image) and not", "lower2, upper2) output_light_orange = cv2.bitwise_and(blurred_image2, blurred_image2, mask = mask2) output = output_light_orange cv2.bitwise_or(output_dark_orange,", "> image.width * 0.55: self.robot_movement_publisher.publish(\"FORWARD-RIGHT\") else: self.robot_movement_publisher.publish(\"FORWARD_ARM\") else: self.move_robot_or_arm = \"MOVE_ARM\" self.robot_movement_publisher.publish(\"STOP-READY_TO_GRAB\") else:", "== \"ARM_CAMERA\" and self.move_robot_or_arm == \"MOVE_ARM\": rospy.loginfo(\"Detector: publishing ball position\") self.ball_position.detected = True", "10) self.bridge = cv_bridge.CvBridge() self.processed_image_publisher = rospy.Publisher(\"/jupiter/processed_image\", Image, queue_size = 10) self.processed_image_bw_publisher =", "from common import * from jupiter.msg import BallPosition class Detector: current_camera = None", "target[1] < 10: if target[0] < image.width * 0.45: self.robot_movement_publisher.publish(\"FORWARD-LEFT\") elif target[0] >", "and self.asus_ballpark(target[0], image) and not self.ball_at_middle_X_of_Asus_Camera: self.ball_at_middle_X_of_Asus_Camera = True self.robot_movement_publisher.publish(\"STOP-BALL_FOUND\") rospy.loginfo(\"Detector: ball found\")", "\"ASUS_CAMERA\" else 38400 params.minConvexity = 0.2 params.maxConvexity = 1.0 params.minCircularity = 0.25 params.maxCircularity", "self.ball_at_bottom_message_sent = False self.ball_positioned = False self.offset = 100 self.camera_subscription = rospy.Subscriber(adjust_namespace(self.is_simulation, \"/Asus_Camera/rgb/image_raw\"),", "self.robot_movement_publisher.publish(\"FORWARD-LEFT\") elif target[0] > image.width * 0.55: self.robot_movement_publisher.publish(\"FORWARD-RIGHT\") else: self.robot_movement_publisher.publish(\"FORWARD_ARM\") else: self.move_robot_or_arm =", "1.0 params.minCircularity = 0.25 params.maxCircularity = 1.0 if self.current_camera == \"FRONT_CAMERA\": params.minDistBetweenBlobs =", "0 move_robot_or_arm = \"\" ball_position = None def __init__(self): init_arguments(self) self.state = \"NO_SEARCH\"", "= 10) self.state_machine_publisher = rospy.Publisher(\"/jupiter/robot_movement/result\", String, queue_size = 10) self.bridge = cv_bridge.CvBridge() self.processed_image_publisher", "\"\" ball_at_middle_X_of_Asus_Camera = False ball_positioned = False front_camera_x_reference = 0 front_camera_y_reference = 0", "= np.uint16(np.around(circles)) max_r = 0.0 target = circles[0] for circle in circles: if", "target != None and self.current_camera == \"ARM_CAMERA\" and self.move_robot_or_arm == \"MOVE_ROBOT\": if self.is_simulation:", "image): if self.state == \"NO_SEARCH\": return image_cv = self.bridge.imgmsg_to_cv2(image, \"bgr8\") blurred_image = cv2.GaussianBlur(image_cv,", "self.camera_subscription = rospy.Subscriber(\"/Creative_Camera/rgb/image_raw\" if self.is_simulation else \"/komodo_1/arm_cam_node/image_raw\", Image, self.process_image) self.move_robot_or_arm = \"MOVE_ROBOT\" def", "self.asus_ballpark(target[0], image) and not self.ball_at_middle_X_of_Asus_Camera: self.ball_at_middle_X_of_Asus_Camera = True self.robot_movement_publisher.publish(\"STOP-BALL_FOUND\") rospy.loginfo(\"Detector: ball found\") elif", "[] for keypoint in keypoints: x = keypoint.pt[0] y = keypoint.pt[1] r =", "None state = \"\" ball_at_middle_X_of_Asus_Camera = False ball_positioned = False front_camera_x_reference = 0", "a detector with the parameters, according to your OpenCV version (2 or 3)", "if self.is_simulation else \"/komodo_1/arm_cam_node/image_raw\", Image, self.process_image) self.move_robot_or_arm = \"MOVE_ROBOT\" def state_change(self, command): if", "cv2.SimpleBlobDetector(params) else: detector = cv2.SimpleBlobDetector_create(params) # Detect blobs keypoints = detector.detect(image_binary) circles =", "# the real arm cam emits an upside-down image, so adjust for orientation", "= cv2.GaussianBlur(image_cv, (9, 9), 0) # The two cameras have different sensors, so", "output_light_orange cv2.bitwise_or(output_dark_orange, output_light_orange, output) image_grayscale = cv2.cvtColor(output, cv2.COLOR_BGR2GRAY) (thresh, image_binary) = cv2.threshold(image_grayscale, 128,", "False ball_positioned = False front_camera_x_reference = 0 front_camera_y_reference = 0 move_robot_or_arm = \"\"", "(thresh, image_binary) = cv2.threshold(image_grayscale, 128, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU) params = cv2.SimpleBlobDetector_Params() params.filterByInertia", "np.array([]), (0, 255, 0), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS) cv2.circle(processed_image, center, target[2], (255, 0, 0), 1, 8,", "= rospy.Publisher(\"/jupiter/processed_image\", Image, queue_size = 10) self.processed_image_bw_publisher = rospy.Publisher(\"/jupiter/processed_image_bw\", Image, queue_size = 10)", "target = circle if target != None: processed_image_bw = cv2.drawKeypoints(image_binary, keypoints, np.array([]), (0,", "%s\", self.current_camera) if self.camera_subscription: self.camera_subscription.unregister() if self.current_camera == \"ASUS_CAMERA\": self.ball_at_middle_X_of_Asus_Camera = False self.ball_at_bottom_message_sent", "dtype = \"uint8\") upper = np.array(upper, dtype = \"uint8\") mask = cv2.inRange(blurred_image, lower,", "rospy.loginfo(\"Detector: current camera changed to %s\", self.current_camera) if self.camera_subscription: self.camera_subscription.unregister() if self.current_camera ==", "with the parameters, according to your OpenCV version (2 or 3) ver =", "rospy.loginfo(\"Detector: starting to search for ball\") elif command.data == \"NO_SEARCH\": self.state = \"NO_SEARCH\"", "else: self.move_robot_or_arm = \"MOVE_ARM\" self.robot_movement_publisher.publish(\"STOP-READY_TO_GRAB\") elif target != None and self.current_camera == \"ARM_CAMERA\"", "image) and not self.ball_at_middle_X_of_Asus_Camera: self.ball_at_middle_X_of_Asus_Camera = True self.robot_movement_publisher.publish(\"STOP-BALL_FOUND\") rospy.loginfo(\"Detector: ball found\") elif target", "OpenCV version (2 or 3) ver = (cv2.__version__).split('.') if int(ver[0]) < 3: detector", "= (target[0], target[1]) cv2.circle(processed_image_bw, center, target[2], (255, 0, 0), 1, 8, 0) processed_image", "255]) lower2 = np.array(lower2, dtype = \"uint8\") upper2 = np.array(upper2, dtype = \"uint8\")", "and not self.ball_at_bottom_message_sent: self.ball_at_bottom_message_sent = True self.robot_movement_publisher.publish(\"STOP-BALL_AT_BOTTOM_OF_FRAME\") rospy.loginfo(\"Detector: ball is at bottom of", "= True params.filterByArea = True params.minArea = 30 if self.current_camera == \"ASUS_CAMERA\" else", "0) processed_image = cv2.drawKeypoints(image_cv, keypoints, np.array([]), (0, 255, 0), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS) cv2.circle(processed_image, center, target[2],", "cv2.THRESH_OTSU) params = cv2.SimpleBlobDetector_Params() params.filterByInertia = False params.filterByConvexity = True params.filterByColor = False", "== \"NO_SEARCH\": self.state = \"NO_SEARCH\" rospy.loginfo(\"Detector: stopped searching for ball\") def process_image(self, image):", "= keypoint.pt[0] y = keypoint.pt[1] r = keypoint.size / 2.0 circles.append([x, y, r])", "center, target[2], (255, 0, 0), 1, 8, 0) # publish the keypoints and", "= 0 front_camera_y_reference = 0 move_robot_or_arm = \"\" ball_position = None def __init__(self):", "circles[0] for circle in circles: if circle[2] > max_r and (circle[1] >= (image.height", "superimposed on the source image from the camera and on the b&w image", "!= None and self.current_camera == \"ASUS_CAMERA\" and abs(target[1] - (image.height)) < (image.height /", "(9, 9), 0) # The two cameras have different sensors, so their color", "output = cv2.bitwise_and(blurred_image, blurred_image, mask = mask) else: # ARM_CAMERA blurred_image2 = cv2.GaussianBlur(image_cv,", "Asus Camera frame\") elif target != None and self.current_camera == \"ARM_CAMERA\" and self.move_robot_or_arm", "\"ASUS_CAMERA\" and self.asus_ballpark(target[0], image) and not self.ball_at_middle_X_of_Asus_Camera: self.ball_at_middle_X_of_Asus_Camera = True self.robot_movement_publisher.publish(\"STOP-BALL_FOUND\") rospy.loginfo(\"Detector: ball", "= None bridge = None processed_image_publisher = None processed_image_bw_publisher = None offset =", "ball_at_middle_X_of_Asus_Camera = False ball_positioned = False front_camera_x_reference = 0 front_camera_y_reference = 0 move_robot_or_arm", "| cv2.THRESH_OTSU) params = cv2.SimpleBlobDetector_Params() params.filterByInertia = False params.filterByConvexity = True params.filterByColor =", "r = keypoint.size / 2.0 circles.append([x, y, r]) target = None if circles:", "front_camera_x_reference = 0 front_camera_y_reference = 0 move_robot_or_arm = \"\" ball_position = None def", "__init__(self): init_arguments(self) self.state = \"NO_SEARCH\" rospy.Subscriber(\"/jupiter/detector/current_camera\", String, self.camera_change) rospy.Subscriber(\"/jupiter/detector/state_change\", String, self.state_change) self.robot_movement_publisher =", "True self.robot_movement_publisher.publish(\"STOP-BALL_AT_BOTTOM_OF_FRAME\") rospy.loginfo(\"Detector: ball is at bottom of Asus Camera frame\") elif target", "cameras have different sensors, so their color rendition varies. Adjust for this issue", "self.camera_change) rospy.Subscriber(\"/jupiter/detector/state_change\", String, self.state_change) self.robot_movement_publisher = rospy.Publisher(\"/jupiter/robot_movement/command\", String, queue_size = 10) self.state_machine_publisher =", "self.ball_position.radius = target[2] self.ball_position.img_width = image.width self.ball_position.img_height = image.height self.ball_position_publisher.publish(self.ball_position) self.state = \"NO_SEARCH\"", "= \"uint8\") mask = cv2.inRange(blurred_image, lower, upper) output_dark_orange = cv2.bitwise_and(blurred_image, blurred_image, mask =", "= False self.offset = 100 self.camera_subscription = rospy.Subscriber(adjust_namespace(self.is_simulation, \"/Asus_Camera/rgb/image_raw\"), Image, self.process_image) elif self.current_camera", "== \"ASUS_CAMERA\" and abs(target[1] - (image.height)) < (image.height / 10.0) and self.ball_at_middle_X_of_Asus_Camera and", "None bridge = None processed_image_publisher = None processed_image_bw_publisher = None offset = 100", "blurred_image, mask = mask) (lower2, upper2) = ([65, 50, 170], [100, 70, 255])", "colors in the image. if self.current_camera == \"ASUS_CAMERA\": (lower, upper) = ([0, 0,", "center, target[2], (255, 0, 0), 1, 8, 0) processed_image = cv2.drawKeypoints(image_cv, keypoints, np.array([]),", "target[0] < image.width * 0.45: self.robot_movement_publisher.publish(\"FORWARD-LEFT\") elif target[0] > image.width * 0.55: self.robot_movement_publisher.publish(\"FORWARD-RIGHT\")", "rospy.Publisher(\"/jupiter/robot_movement/result\", String, queue_size = 10) self.bridge = cv_bridge.CvBridge() self.processed_image_publisher = rospy.Publisher(\"/jupiter/processed_image\", Image, queue_size", "True self.robot_movement_publisher.publish(\"STOP-BALL_FOUND\") rospy.loginfo(\"Detector: ball found\") elif target != None and self.current_camera == \"ASUS_CAMERA\"", "= np.array(lower2, dtype = \"uint8\") upper2 = np.array(upper2, dtype = \"uint8\") mask2 =", "command.data rospy.loginfo(\"Detector: current camera changed to %s\", self.current_camera) if self.camera_subscription: self.camera_subscription.unregister() if self.current_camera", "0) (lower, upper) = ([0, 0, 100], [70, 100, 255]) lower = np.array(lower,", "mask2) output = output_light_orange cv2.bitwise_or(output_dark_orange, output_light_orange, output) image_grayscale = cv2.cvtColor(output, cv2.COLOR_BGR2GRAY) (thresh, image_binary)", "10.0) and self.ball_at_middle_X_of_Asus_Camera and not self.ball_at_bottom_message_sent: self.ball_at_bottom_message_sent = True self.robot_movement_publisher.publish(\"STOP-BALL_AT_BOTTOM_OF_FRAME\") rospy.loginfo(\"Detector: ball is", "max_r = 0.0 target = circles[0] for circle in circles: if circle[2] >", "else: self.robot_movement_publisher.publish(\"FORWARD_ARM\") else: self.move_robot_or_arm = \"MOVE_ARM\" self.robot_movement_publisher.publish(\"STOP-READY_TO_GRAB\") else: if target[1] > 10: if", "and self.current_camera == \"ARM_CAMERA\" and self.move_robot_or_arm == \"MOVE_ROBOT\": if self.is_simulation: # the real", "if target[0] < image.width * 0.45: self.robot_movement_publisher.publish(\"FORWARD-LEFT\") elif target[0] > image.width * 0.55:", "import cv_bridge import time from sensor_msgs.msg import Image from std_msgs.msg import String from", "\"uint8\") upper = np.array(upper, dtype = \"uint8\") mask = cv2.inRange(blurred_image, lower, upper) output", "camera changed to %s\", self.current_camera) if self.camera_subscription: self.camera_subscription.unregister() if self.current_camera == \"ASUS_CAMERA\": self.ball_at_middle_X_of_Asus_Camera", "self.robot_movement_publisher.publish(\"FORWARD-LEFT\") else: self.robot_movement_publisher.publish(\"FORWARD_ARM\") else: self.move_robot_or_arm = \"MOVE_ARM\" self.robot_movement_publisher.publish(\"STOP-READY_TO_GRAB\") elif target != None and", "= cv2.inRange(blurred_image, lower, upper) output_dark_orange = cv2.bitwise_and(blurred_image, blurred_image, mask = mask) (lower2, upper2)", "keypoints and target circle superimposed on the source image from the camera and", "self.state_machine_publisher = rospy.Publisher(\"/jupiter/robot_movement/result\", String, queue_size = 10) self.bridge = cv_bridge.CvBridge() self.processed_image_publisher = rospy.Publisher(\"/jupiter/processed_image\",", "\"NO_SEARCH\" def asus_ballpark(self, x, image): return (image.width * 0.65) <= x and x", "source image from the camera and on the b&w image self.processed_image_publisher.publish(self.bridge.cv2_to_imgmsg(processed_image, \"bgr8\")) self.processed_image_bw_publisher.publish(self.bridge.cv2_to_imgmsg(processed_image_bw,", "numpy as np import cv_bridge import time from sensor_msgs.msg import Image from std_msgs.msg", "= \"uint8\") upper2 = np.array(upper2, dtype = \"uint8\") mask2 = cv2.inRange(blurred_image2, lower2, upper2)", "0.0 target = circles[0] for circle in circles: if circle[2] > max_r and", "if command.data == \"SEARCH\": self.state = \"SEARCH\" rospy.loginfo(\"Detector: starting to search for ball\")", "255, cv2.THRESH_BINARY | cv2.THRESH_OTSU) params = cv2.SimpleBlobDetector_Params() params.filterByInertia = False params.filterByConvexity = True", "mask = mask2) output = output_light_orange cv2.bitwise_or(output_dark_orange, output_light_orange, output) image_grayscale = cv2.cvtColor(output, cv2.COLOR_BGR2GRAY)", "stopped searching for ball\") def process_image(self, image): if self.state == \"NO_SEARCH\": return image_cv", "r]) target = None if circles: circles = np.uint16(np.around(circles)) max_r = 0.0 target", "if self.current_camera == \"ASUS_CAMERA\" and self.asus_ballpark(target[0], image) and not self.ball_at_middle_X_of_Asus_Camera: self.ball_at_middle_X_of_Asus_Camera = True", "for orientation if target[1] < 10: if target[0] < image.width * 0.45: self.robot_movement_publisher.publish(\"FORWARD-LEFT\")", "2500 if self.current_camera == \"ASUS_CAMERA\" else 38400 params.minConvexity = 0.2 params.maxConvexity = 1.0", "None and self.current_camera == \"ARM_CAMERA\" and self.move_robot_or_arm == \"MOVE_ROBOT\": if self.is_simulation: # the", "cv2.bitwise_and(blurred_image2, blurred_image2, mask = mask2) output = output_light_orange cv2.bitwise_or(output_dark_orange, output_light_orange, output) image_grayscale =", "blurred_image = cv2.GaussianBlur(image_cv, (9, 9), 0) # The two cameras have different sensors,", "cv2.inRange(blurred_image2, lower2, upper2) output_light_orange = cv2.bitwise_and(blurred_image2, blurred_image2, mask = mask2) output = output_light_orange", "target[0], target[1], target[2]) if self.current_camera == \"ASUS_CAMERA\" and self.asus_ballpark(target[0], image) and not self.ball_at_middle_X_of_Asus_Camera:", "upper = np.array(upper, dtype = \"uint8\") mask = cv2.inRange(blurred_image, lower, upper) output_dark_orange =", "(9, 9), 0) (lower, upper) = ([0, 0, 100], [70, 100, 255]) lower", "queue_size = 10) self.ball_position_publisher = rospy.Publisher(\"/jupiter/ball_position\", BallPosition, queue_size = 10) self.ball_position = BallPosition()", "have different sensors, so their color rendition varies. Adjust for this issue when", "self.robot_movement_publisher.publish(\"FORWARD-RIGHT\") elif target[0] > image.width * 0.55: self.robot_movement_publisher.publish(\"FORWARD-LEFT\") else: self.robot_movement_publisher.publish(\"FORWARD_ARM\") else: self.move_robot_or_arm =", "ball position\") self.ball_position.detected = True self.ball_position.x = target[0] self.ball_position.y = target[1] self.ball_position.radius =", "self.is_simulation: # the real arm cam emits an upside-down image, so adjust for", "if target[1] < 10: if target[0] < image.width * 0.45: self.robot_movement_publisher.publish(\"FORWARD-LEFT\") elif target[0]", "keypoints = detector.detect(image_binary) circles = [] for keypoint in keypoints: x = keypoint.pt[0]", "> max_r and (circle[1] >= (image.height * 0.5) if self.current_camera == \"ASUS_CAMERA\" else", "== \"ARM_CAMERA\": self.camera_subscription = rospy.Subscriber(\"/Creative_Camera/rgb/image_raw\" if self.is_simulation else \"/komodo_1/arm_cam_node/image_raw\", Image, self.process_image) self.move_robot_or_arm =", "mask = cv2.inRange(blurred_image, lower, upper) output = cv2.bitwise_and(blurred_image, blurred_image, mask = mask) else:", "cv2.COLOR_BGR2GRAY) (thresh, image_binary) = cv2.threshold(image_grayscale, 128, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU) params = cv2.SimpleBlobDetector_Params()", "or 3) ver = (cv2.__version__).split('.') if int(ver[0]) < 3: detector = cv2.SimpleBlobDetector(params) else:", "self.current_camera == \"ASUS_CAMERA\": self.ball_at_middle_X_of_Asus_Camera = False self.ball_at_bottom_message_sent = False self.ball_positioned = False self.offset", "self.move_robot_or_arm = \"MOVE_ARM\" self.robot_movement_publisher.publish(\"STOP-READY_TO_GRAB\") elif target != None and self.current_camera == \"ARM_CAMERA\" and", "\"uint8\") upper = np.array(upper, dtype = \"uint8\") mask = cv2.inRange(blurred_image, lower, upper) output_dark_orange", "- (image.height)) < (image.height / 10.0) and self.ball_at_middle_X_of_Asus_Camera and not self.ball_at_bottom_message_sent: self.ball_at_bottom_message_sent =", "(image.height / 10.0) and self.ball_at_middle_X_of_Asus_Camera and not self.ball_at_bottom_message_sent: self.ball_at_bottom_message_sent = True self.robot_movement_publisher.publish(\"STOP-BALL_AT_BOTTOM_OF_FRAME\") rospy.loginfo(\"Detector:", "1, 8, 0) # publish the keypoints and target circle superimposed on the", "params = cv2.SimpleBlobDetector_Params() params.filterByInertia = False params.filterByConvexity = True params.filterByColor = False params.filterByCircularity", "ball\") elif command.data == \"NO_SEARCH\": self.state = \"NO_SEARCH\" rospy.loginfo(\"Detector: stopped searching for ball\")", "55, 255]) # dark red lower = np.array(lower, dtype = \"uint8\") upper =", "String from common import * from jupiter.msg import BallPosition class Detector: current_camera =", "target[2]: rospy.loginfo(\"x: %d, y: %d, radius: %d\", target[0], target[1], target[2]) if self.current_camera ==", "and x <= (image.width * 0.85) if __name__ == \"__main__\": rospy.init_node(\"detector\") detector =", "* 0.45: self.robot_movement_publisher.publish(\"FORWARD-LEFT\") elif target[0] > image.width * 0.55: self.robot_movement_publisher.publish(\"FORWARD-RIGHT\") else: self.robot_movement_publisher.publish(\"FORWARD_ARM\") else:", "self.camera_subscription: self.camera_subscription.unregister() if self.current_camera == \"ASUS_CAMERA\": self.ball_at_middle_X_of_Asus_Camera = False self.ball_at_bottom_message_sent = False self.ball_positioned", "def asus_ballpark(self, x, image): return (image.width * 0.65) <= x and x <=", "\"/Asus_Camera/rgb/image_raw\"), Image, self.process_image) elif self.current_camera == \"ARM_CAMERA\": self.camera_subscription = rospy.Subscriber(\"/Creative_Camera/rgb/image_raw\" if self.is_simulation else", "False params.filterByCircularity = True params.filterByArea = True params.minArea = 30 if self.current_camera ==", "trying to filter the red colors in the image. if self.current_camera == \"ASUS_CAMERA\":", "self.current_camera == \"ASUS_CAMERA\": (lower, upper) = ([0, 0, 100], [55, 55, 255]) #", "50, 170], [100, 70, 255]) lower2 = np.array(lower2, dtype = \"uint8\") upper2 =", ">= (image.height * 0.5) if self.current_camera == \"ASUS_CAMERA\" else True): max_r = circle[2]", "self.robot_movement_publisher = rospy.Publisher(\"/jupiter/robot_movement/command\", String, queue_size = 10) self.state_machine_publisher = rospy.Publisher(\"/jupiter/robot_movement/result\", String, queue_size =", "# Create a detector with the parameters, according to your OpenCV version (2", "Create a detector with the parameters, according to your OpenCV version (2 or", "== \"ASUS_CAMERA\" else True): max_r = circle[2] target = circle if target !=", "\"bgr8\")) self.processed_image_bw_publisher.publish(self.bridge.cv2_to_imgmsg(processed_image_bw, \"bgr8\")) if target[2]: rospy.loginfo(\"x: %d, y: %d, radius: %d\", target[0], target[1],", "self.move_robot_or_arm == \"MOVE_ARM\": rospy.loginfo(\"Detector: publishing ball position\") self.ball_position.detected = True self.ball_position.x = target[0]", "= target[2] self.ball_position.img_width = image.width self.ball_position.img_height = image.height self.ball_position_publisher.publish(self.ball_position) self.state = \"NO_SEARCH\" def", "self.bridge.imgmsg_to_cv2(image, \"bgr8\") blurred_image = cv2.GaussianBlur(image_cv, (9, 9), 0) # The two cameras have", "rospy.Subscriber(\"/Creative_Camera/rgb/image_raw\" if self.is_simulation else \"/komodo_1/arm_cam_node/image_raw\", Image, self.process_image) self.move_robot_or_arm = \"MOVE_ROBOT\" def state_change(self, command):", "rospy.loginfo(\"x: %d, y: %d, radius: %d\", target[0], target[1], target[2]) if self.current_camera == \"ASUS_CAMERA\"", "\"NO_SEARCH\": return image_cv = self.bridge.imgmsg_to_cv2(image, \"bgr8\") blurred_image = cv2.GaussianBlur(image_cv, (9, 9), 0) #", "rospy.loginfo(\"Detector: ball found\") elif target != None and self.current_camera == \"ASUS_CAMERA\" and abs(target[1]", "target[1] self.ball_position.radius = target[2] self.ball_position.img_width = image.width self.ball_position.img_height = image.height self.ball_position_publisher.publish(self.ball_position) self.state =", "= \"uint8\") mask2 = cv2.inRange(blurred_image2, lower2, upper2) output_light_orange = cv2.bitwise_and(blurred_image2, blurred_image2, mask =", "\"MOVE_ROBOT\": if self.is_simulation: # the real arm cam emits an upside-down image, so", "np.array(lower, dtype = \"uint8\") upper = np.array(upper, dtype = \"uint8\") mask = cv2.inRange(blurred_image,", "abs(target[1] - (image.height)) < (image.height / 10.0) and self.ball_at_middle_X_of_Asus_Camera and not self.ball_at_bottom_message_sent: self.ball_at_bottom_message_sent", "= [] for keypoint in keypoints: x = keypoint.pt[0] y = keypoint.pt[1] r", "image from the camera and on the b&w image self.processed_image_publisher.publish(self.bridge.cv2_to_imgmsg(processed_image, \"bgr8\")) self.processed_image_bw_publisher.publish(self.bridge.cv2_to_imgmsg(processed_image_bw, \"bgr8\"))", "= cv2.drawKeypoints(image_binary, keypoints, np.array([]), (0, 255, 0), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS) center = (target[0], target[1]) cv2.circle(processed_image_bw,", "camera and on the b&w image self.processed_image_publisher.publish(self.bridge.cv2_to_imgmsg(processed_image, \"bgr8\")) self.processed_image_bw_publisher.publish(self.bridge.cv2_to_imgmsg(processed_image_bw, \"bgr8\")) if target[2]: rospy.loginfo(\"x:", "%d, y: %d, radius: %d\", target[0], target[1], target[2]) if self.current_camera == \"ASUS_CAMERA\" and", "0), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS) center = (target[0], target[1]) cv2.circle(processed_image_bw, center, target[2], (255, 0, 0), 1,", "self.processed_image_bw_publisher = rospy.Publisher(\"/jupiter/processed_image_bw\", Image, queue_size = 10) self.ball_position_publisher = rospy.Publisher(\"/jupiter/ball_position\", BallPosition, queue_size =", "= cv2.inRange(blurred_image2, lower2, upper2) output_light_orange = cv2.bitwise_and(blurred_image2, blurred_image2, mask = mask2) output =", "self.move_robot_or_arm = \"MOVE_ARM\" self.robot_movement_publisher.publish(\"STOP-READY_TO_GRAB\") else: if target[1] > 10: if target[0] < image.width", "# publish the keypoints and target circle superimposed on the source image from", "params.maxArea = 2500 if self.current_camera == \"ASUS_CAMERA\" else 38400 params.minConvexity = 0.2 params.maxConvexity", "detector with the parameters, according to your OpenCV version (2 or 3) ver", "\"uint8\") mask = cv2.inRange(blurred_image, lower, upper) output = cv2.bitwise_and(blurred_image, blurred_image, mask = mask)", "< 10: if target[0] < image.width * 0.45: self.robot_movement_publisher.publish(\"FORWARD-LEFT\") elif target[0] > image.width", "\"MOVE_ARM\" self.robot_movement_publisher.publish(\"STOP-READY_TO_GRAB\") else: if target[1] > 10: if target[0] < image.width * 0.45:", "elif target[0] > image.width * 0.55: self.robot_movement_publisher.publish(\"FORWARD-LEFT\") else: self.robot_movement_publisher.publish(\"FORWARD_ARM\") else: self.move_robot_or_arm = \"MOVE_ARM\"", "= True params.filterByColor = False params.filterByCircularity = True params.filterByArea = True params.minArea =", "if self.current_camera == \"ASUS_CAMERA\" else True): max_r = circle[2] target = circle if", "for circle in circles: if circle[2] > max_r and (circle[1] >= (image.height *", "mask2 = cv2.inRange(blurred_image2, lower2, upper2) output_light_orange = cv2.bitwise_and(blurred_image2, blurred_image2, mask = mask2) output", "in circles: if circle[2] > max_r and (circle[1] >= (image.height * 0.5) if", "image.width * 0.45: self.robot_movement_publisher.publish(\"FORWARD-RIGHT\") elif target[0] > image.width * 0.55: self.robot_movement_publisher.publish(\"FORWARD-LEFT\") else: self.robot_movement_publisher.publish(\"FORWARD_ARM\")", "dtype = \"uint8\") mask = cv2.inRange(blurred_image, lower, upper) output_dark_orange = cv2.bitwise_and(blurred_image, blurred_image, mask", "elif target != None and self.current_camera == \"ASUS_CAMERA\" and abs(target[1] - (image.height)) <", "self.robot_movement_publisher.publish(\"STOP-BALL_AT_BOTTOM_OF_FRAME\") rospy.loginfo(\"Detector: ball is at bottom of Asus Camera frame\") elif target !=", "rospy.Subscriber(\"/jupiter/detector/state_change\", String, self.state_change) self.robot_movement_publisher = rospy.Publisher(\"/jupiter/robot_movement/command\", String, queue_size = 10) self.state_machine_publisher = rospy.Publisher(\"/jupiter/robot_movement/result\",", "self.ball_position.img_width = image.width self.ball_position.img_height = image.height self.ball_position_publisher.publish(self.ball_position) self.state = \"NO_SEARCH\" def asus_ballpark(self, x,", "params.maxConvexity = 1.0 params.minCircularity = 0.25 params.maxCircularity = 1.0 if self.current_camera == \"FRONT_CAMERA\":", "found\") elif target != None and self.current_camera == \"ASUS_CAMERA\" and abs(target[1] - (image.height))", "in keypoints: x = keypoint.pt[0] y = keypoint.pt[1] r = keypoint.size / 2.0", "upper) output = cv2.bitwise_and(blurred_image, blurred_image, mask = mask) else: # ARM_CAMERA blurred_image2 =", "process_image(self, image): if self.state == \"NO_SEARCH\": return image_cv = self.bridge.imgmsg_to_cv2(image, \"bgr8\") blurred_image =", "0), 1, 8, 0) # publish the keypoints and target circle superimposed on", "= BallPosition() self.ball_position.detected = False def camera_change(self, command): self.current_camera = command.data rospy.loginfo(\"Detector: current", "False self.offset = 100 self.camera_subscription = rospy.Subscriber(adjust_namespace(self.is_simulation, \"/Asus_Camera/rgb/image_raw\"), Image, self.process_image) elif self.current_camera ==", "= False params.filterByConvexity = True params.filterByColor = False params.filterByCircularity = True params.filterByArea =", "lower, upper) output = cv2.bitwise_and(blurred_image, blurred_image, mask = mask) else: # ARM_CAMERA blurred_image2", "std_msgs.msg import String from common import * from jupiter.msg import BallPosition class Detector:", "0), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS) cv2.circle(processed_image, center, target[2], (255, 0, 0), 1, 8, 0) # publish", "circles = np.uint16(np.around(circles)) max_r = 0.0 target = circles[0] for circle in circles:", "ball is at bottom of Asus Camera frame\") elif target != None and", "[70, 100, 255]) lower = np.array(lower, dtype = \"uint8\") upper = np.array(upper, dtype", "radius: %d\", target[0], target[1], target[2]) if self.current_camera == \"ASUS_CAMERA\" and self.asus_ballpark(target[0], image) and", "< image.width * 0.45: self.robot_movement_publisher.publish(\"FORWARD-LEFT\") elif target[0] > image.width * 0.55: self.robot_movement_publisher.publish(\"FORWARD-RIGHT\") else:", "blurred_image2 = cv2.GaussianBlur(image_cv, (9, 9), 0) (lower, upper) = ([0, 0, 100], [70,", "elif self.current_camera == \"ARM_CAMERA\": self.camera_subscription = rospy.Subscriber(\"/Creative_Camera/rgb/image_raw\" if self.is_simulation else \"/komodo_1/arm_cam_node/image_raw\", Image, self.process_image)", "== \"ARM_CAMERA\" and self.move_robot_or_arm == \"MOVE_ROBOT\": if self.is_simulation: # the real arm cam", "self.robot_movement_publisher.publish(\"FORWARD_ARM\") else: self.move_robot_or_arm = \"MOVE_ARM\" self.robot_movement_publisher.publish(\"STOP-READY_TO_GRAB\") elif target != None and self.current_camera ==", "== \"MOVE_ROBOT\": if self.is_simulation: # the real arm cam emits an upside-down image,", "import cv2 import numpy as np import cv_bridge import time from sensor_msgs.msg import", "publishing ball position\") self.ball_position.detected = True self.ball_position.x = target[0] self.ball_position.y = target[1] self.ball_position.radius", "version (2 or 3) ver = (cv2.__version__).split('.') if int(ver[0]) < 3: detector =", "True): max_r = circle[2] target = circle if target != None: processed_image_bw =", "# The two cameras have different sensors, so their color rendition varies. Adjust", "arm cam emits an upside-down image, so adjust for orientation if target[1] <", "if self.current_camera == \"ASUS_CAMERA\" else 38400 params.minConvexity = 0.2 params.maxConvexity = 1.0 params.minCircularity", "self.is_simulation else \"/komodo_1/arm_cam_node/image_raw\", Image, self.process_image) self.move_robot_or_arm = \"MOVE_ROBOT\" def state_change(self, command): if command.data", "image.width * 0.55: self.robot_movement_publisher.publish(\"FORWARD-LEFT\") else: self.robot_movement_publisher.publish(\"FORWARD_ARM\") else: self.move_robot_or_arm = \"MOVE_ARM\" self.robot_movement_publisher.publish(\"STOP-READY_TO_GRAB\") elif target", "params.filterByColor = False params.filterByCircularity = True params.filterByArea = True params.minArea = 30 if", "cv2.SimpleBlobDetector_Params() params.filterByInertia = False params.filterByConvexity = True params.filterByColor = False params.filterByCircularity = True", "different sensors, so their color rendition varies. Adjust for this issue when trying", "from std_msgs.msg import String from common import * from jupiter.msg import BallPosition class", "== \"SEARCH\": self.state = \"SEARCH\" rospy.loginfo(\"Detector: starting to search for ball\") elif command.data", "mask) else: # ARM_CAMERA blurred_image2 = cv2.GaussianBlur(image_cv, (9, 9), 0) (lower, upper) =", "to %s\", self.current_camera) if self.camera_subscription: self.camera_subscription.unregister() if self.current_camera == \"ASUS_CAMERA\": self.ball_at_middle_X_of_Asus_Camera = False", "10) self.ball_position_publisher = rospy.Publisher(\"/jupiter/ball_position\", BallPosition, queue_size = 10) self.ball_position = BallPosition() self.ball_position.detected =", "target[0] self.ball_position.y = target[1] self.ball_position.radius = target[2] self.ball_position.img_width = image.width self.ball_position.img_height = image.height", "import String from common import * from jupiter.msg import BallPosition class Detector: current_camera", "keypoint.size / 2.0 circles.append([x, y, r]) target = None if circles: circles =", "([0, 0, 100], [70, 100, 255]) lower = np.array(lower, dtype = \"uint8\") upper", "self.robot_movement_publisher.publish(\"STOP-READY_TO_GRAB\") else: if target[1] > 10: if target[0] < image.width * 0.45: self.robot_movement_publisher.publish(\"FORWARD-RIGHT\")", "output_light_orange = cv2.bitwise_and(blurred_image2, blurred_image2, mask = mask2) output = output_light_orange cv2.bitwise_or(output_dark_orange, output_light_orange, output)", "queue_size = 10) self.bridge = cv_bridge.CvBridge() self.processed_image_publisher = rospy.Publisher(\"/jupiter/processed_image\", Image, queue_size = 10)", "None offset = 100 wheel_publisher = None state = \"\" ball_at_middle_X_of_Asus_Camera = False", "self.ball_at_middle_X_of_Asus_Camera = False self.ball_at_bottom_message_sent = False self.ball_positioned = False self.offset = 100 self.camera_subscription", "queue_size = 10) self.ball_position = BallPosition() self.ball_position.detected = False def camera_change(self, command): self.current_camera", "* from jupiter.msg import BallPosition class Detector: current_camera = None camera_subscription = None", "self.current_camera == \"ASUS_CAMERA\" else 38400 params.minConvexity = 0.2 params.maxConvexity = 1.0 params.minCircularity =", "cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS) cv2.circle(processed_image, center, target[2], (255, 0, 0), 1, 8, 0) # publish the", "= False ball_positioned = False front_camera_x_reference = 0 front_camera_y_reference = 0 move_robot_or_arm =", "self.current_camera == \"ASUS_CAMERA\" and abs(target[1] - (image.height)) < (image.height / 10.0) and self.ball_at_middle_X_of_Asus_Camera", "self.state_change) self.robot_movement_publisher = rospy.Publisher(\"/jupiter/robot_movement/command\", String, queue_size = 10) self.state_machine_publisher = rospy.Publisher(\"/jupiter/robot_movement/result\", String, queue_size", "wheel_publisher = None state = \"\" ball_at_middle_X_of_Asus_Camera = False ball_positioned = False front_camera_x_reference", "if target[1] > 10: if target[0] < image.width * 0.45: self.robot_movement_publisher.publish(\"FORWARD-RIGHT\") elif target[0]", "rospy import cv2 import numpy as np import cv_bridge import time from sensor_msgs.msg", "The two cameras have different sensors, so their color rendition varies. Adjust for", "camera_subscription = None bridge = None processed_image_publisher = None processed_image_bw_publisher = None offset", "position\") self.ball_position.detected = True self.ball_position.x = target[0] self.ball_position.y = target[1] self.ball_position.radius = target[2]", "target[1], target[2]) if self.current_camera == \"ASUS_CAMERA\" and self.asus_ballpark(target[0], image) and not self.ball_at_middle_X_of_Asus_Camera: self.ball_at_middle_X_of_Asus_Camera", "= \"\" ball_at_middle_X_of_Asus_Camera = False ball_positioned = False front_camera_x_reference = 0 front_camera_y_reference =", "0.55: self.robot_movement_publisher.publish(\"FORWARD-RIGHT\") else: self.robot_movement_publisher.publish(\"FORWARD_ARM\") else: self.move_robot_or_arm = \"MOVE_ARM\" self.robot_movement_publisher.publish(\"STOP-READY_TO_GRAB\") else: if target[1] >", "self.current_camera == \"ARM_CAMERA\" and self.move_robot_or_arm == \"MOVE_ROBOT\": if self.is_simulation: # the real arm", "\"ARM_CAMERA\": self.camera_subscription = rospy.Subscriber(\"/Creative_Camera/rgb/image_raw\" if self.is_simulation else \"/komodo_1/arm_cam_node/image_raw\", Image, self.process_image) self.move_robot_or_arm = \"MOVE_ROBOT\"", "(image.height * 0.5) if self.current_camera == \"ASUS_CAMERA\" else True): max_r = circle[2] target", "else: # ARM_CAMERA blurred_image2 = cv2.GaussianBlur(image_cv, (9, 9), 0) (lower, upper) = ([0,", "\"NO_SEARCH\" rospy.loginfo(\"Detector: stopped searching for ball\") def process_image(self, image): if self.state == \"NO_SEARCH\":", "= 0.0 target = circles[0] for circle in circles: if circle[2] > max_r", "image): return (image.width * 0.65) <= x and x <= (image.width * 0.85)", "%d, radius: %d\", target[0], target[1], target[2]) if self.current_camera == \"ASUS_CAMERA\" and self.asus_ballpark(target[0], image)", "target = circles[0] for circle in circles: if circle[2] > max_r and (circle[1]", "= 30 if self.current_camera == \"ASUS_CAMERA\" else 15 params.maxArea = 2500 if self.current_camera", "asus_ballpark(self, x, image): return (image.width * 0.65) <= x and x <= (image.width", "self.ball_position.x = target[0] self.ball_position.y = target[1] self.ball_position.radius = target[2] self.ball_position.img_width = image.width self.ball_position.img_height", "= cv2.GaussianBlur(image_cv, (9, 9), 0) (lower, upper) = ([0, 0, 100], [70, 100,", "= self.bridge.imgmsg_to_cv2(image, \"bgr8\") blurred_image = cv2.GaussianBlur(image_cv, (9, 9), 0) # The two cameras", "\"ASUS_CAMERA\": (lower, upper) = ([0, 0, 100], [55, 55, 255]) # dark red", "cv2.cvtColor(output, cv2.COLOR_BGR2GRAY) (thresh, image_binary) = cv2.threshold(image_grayscale, 128, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU) params =", "* 0.45: self.robot_movement_publisher.publish(\"FORWARD-RIGHT\") elif target[0] > image.width * 0.55: self.robot_movement_publisher.publish(\"FORWARD-LEFT\") else: self.robot_movement_publisher.publish(\"FORWARD_ARM\") else:", "self.camera_subscription = rospy.Subscriber(adjust_namespace(self.is_simulation, \"/Asus_Camera/rgb/image_raw\"), Image, self.process_image) elif self.current_camera == \"ARM_CAMERA\": self.camera_subscription = rospy.Subscriber(\"/Creative_Camera/rgb/image_raw\"", "BallPosition class Detector: current_camera = None camera_subscription = None bridge = None processed_image_publisher", "upper2) output_light_orange = cv2.bitwise_and(blurred_image2, blurred_image2, mask = mask2) output = output_light_orange cv2.bitwise_or(output_dark_orange, output_light_orange,", "== \"ASUS_CAMERA\": self.ball_at_middle_X_of_Asus_Camera = False self.ball_at_bottom_message_sent = False self.ball_positioned = False self.offset =", "self.ball_position_publisher = rospy.Publisher(\"/jupiter/ball_position\", BallPosition, queue_size = 10) self.ball_position = BallPosition() self.ball_position.detected = False", "state_change(self, command): if command.data == \"SEARCH\": self.state = \"SEARCH\" rospy.loginfo(\"Detector: starting to search", "= None offset = 100 wheel_publisher = None state = \"\" ball_at_middle_X_of_Asus_Camera =", "= command.data rospy.loginfo(\"Detector: current camera changed to %s\", self.current_camera) if self.camera_subscription: self.camera_subscription.unregister() if", "bridge = None processed_image_publisher = None processed_image_bw_publisher = None offset = 100 wheel_publisher", "= ([65, 50, 170], [100, 70, 255]) lower2 = np.array(lower2, dtype = \"uint8\")", "command.data == \"SEARCH\": self.state = \"SEARCH\" rospy.loginfo(\"Detector: starting to search for ball\") elif", "params.minConvexity = 0.2 params.maxConvexity = 1.0 params.minCircularity = 0.25 params.maxCircularity = 1.0 if", "ARM_CAMERA blurred_image2 = cv2.GaussianBlur(image_cv, (9, 9), 0) (lower, upper) = ([0, 0, 100],", "self.process_image) self.move_robot_or_arm = \"MOVE_ROBOT\" def state_change(self, command): if command.data == \"SEARCH\": self.state =", "circle if target != None: processed_image_bw = cv2.drawKeypoints(image_binary, keypoints, np.array([]), (0, 255, 0),", "so their color rendition varies. Adjust for this issue when trying to filter", "common import * from jupiter.msg import BallPosition class Detector: current_camera = None camera_subscription", "filter the red colors in the image. if self.current_camera == \"ASUS_CAMERA\": (lower, upper)", "self.ball_at_bottom_message_sent: self.ball_at_bottom_message_sent = True self.robot_movement_publisher.publish(\"STOP-BALL_AT_BOTTOM_OF_FRAME\") rospy.loginfo(\"Detector: ball is at bottom of Asus Camera", "circles.append([x, y, r]) target = None if circles: circles = np.uint16(np.around(circles)) max_r =", "target circle superimposed on the source image from the camera and on the", "search for ball\") elif command.data == \"NO_SEARCH\": self.state = \"NO_SEARCH\" rospy.loginfo(\"Detector: stopped searching", "= None camera_subscription = None bridge = None processed_image_publisher = None processed_image_bw_publisher =", "= cv2.SimpleBlobDetector(params) else: detector = cv2.SimpleBlobDetector_create(params) # Detect blobs keypoints = detector.detect(image_binary) circles", "= None processed_image_bw_publisher = None offset = 100 wheel_publisher = None state =", "== \"ASUS_CAMERA\" and self.asus_ballpark(target[0], image) and not self.ball_at_middle_X_of_Asus_Camera: self.ball_at_middle_X_of_Asus_Camera = True self.robot_movement_publisher.publish(\"STOP-BALL_FOUND\") rospy.loginfo(\"Detector:", "0, 100], [55, 55, 255]) # dark red lower = np.array(lower, dtype =", "for ball\") def process_image(self, image): if self.state == \"NO_SEARCH\": return image_cv = self.bridge.imgmsg_to_cv2(image,", "self.ball_position = BallPosition() self.ball_position.detected = False def camera_change(self, command): self.current_camera = command.data rospy.loginfo(\"Detector:", "# Detect blobs keypoints = detector.detect(image_binary) circles = [] for keypoint in keypoints:", "ver = (cv2.__version__).split('.') if int(ver[0]) < 3: detector = cv2.SimpleBlobDetector(params) else: detector =", "> image.width * 0.55: self.robot_movement_publisher.publish(\"FORWARD-LEFT\") else: self.robot_movement_publisher.publish(\"FORWARD_ARM\") else: self.move_robot_or_arm = \"MOVE_ARM\" self.robot_movement_publisher.publish(\"STOP-READY_TO_GRAB\") elif", "\"FRONT_CAMERA\": params.minDistBetweenBlobs = 20.0 # Create a detector with the parameters, according to", "cv2.inRange(blurred_image, lower, upper) output = cv2.bitwise_and(blurred_image, blurred_image, mask = mask) else: # ARM_CAMERA", "their color rendition varies. Adjust for this issue when trying to filter the", "detector.detect(image_binary) circles = [] for keypoint in keypoints: x = keypoint.pt[0] y =", "on the source image from the camera and on the b&w image self.processed_image_publisher.publish(self.bridge.cv2_to_imgmsg(processed_image,", "def __init__(self): init_arguments(self) self.state = \"NO_SEARCH\" rospy.Subscriber(\"/jupiter/detector/current_camera\", String, self.camera_change) rospy.Subscriber(\"/jupiter/detector/state_change\", String, self.state_change) self.robot_movement_publisher", "offset = 100 wheel_publisher = None state = \"\" ball_at_middle_X_of_Asus_Camera = False ball_positioned", "blurred_image, mask = mask) else: # ARM_CAMERA blurred_image2 = cv2.GaussianBlur(image_cv, (9, 9), 0)", "\"ARM_CAMERA\" and self.move_robot_or_arm == \"MOVE_ROBOT\": if self.is_simulation: # the real arm cam emits", "(0, 255, 0), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS) center = (target[0], target[1]) cv2.circle(processed_image_bw, center, target[2], (255, 0,", "image.width * 0.45: self.robot_movement_publisher.publish(\"FORWARD-LEFT\") elif target[0] > image.width * 0.55: self.robot_movement_publisher.publish(\"FORWARD-RIGHT\") else: self.robot_movement_publisher.publish(\"FORWARD_ARM\")", "image_binary) = cv2.threshold(image_grayscale, 128, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU) params = cv2.SimpleBlobDetector_Params() params.filterByInertia =", "(cv2.__version__).split('.') if int(ver[0]) < 3: detector = cv2.SimpleBlobDetector(params) else: detector = cv2.SimpleBlobDetector_create(params) #", "for this issue when trying to filter the red colors in the image.", "(lower, upper) = ([0, 0, 100], [70, 100, 255]) lower = np.array(lower, dtype", "if self.current_camera == \"ASUS_CAMERA\" else 15 params.maxArea = 2500 if self.current_camera == \"ASUS_CAMERA\"", "= target[0] self.ball_position.y = target[1] self.ball_position.radius = target[2] self.ball_position.img_width = image.width self.ball_position.img_height =", "= False params.filterByCircularity = True params.filterByArea = True params.minArea = 30 if self.current_camera", "self.processed_image_publisher.publish(self.bridge.cv2_to_imgmsg(processed_image, \"bgr8\")) self.processed_image_bw_publisher.publish(self.bridge.cv2_to_imgmsg(processed_image_bw, \"bgr8\")) if target[2]: rospy.loginfo(\"x: %d, y: %d, radius: %d\", target[0],", "cv_bridge.CvBridge() self.processed_image_publisher = rospy.Publisher(\"/jupiter/processed_image\", Image, queue_size = 10) self.processed_image_bw_publisher = rospy.Publisher(\"/jupiter/processed_image_bw\", Image, queue_size", "not self.ball_at_bottom_message_sent: self.ball_at_bottom_message_sent = True self.robot_movement_publisher.publish(\"STOP-BALL_AT_BOTTOM_OF_FRAME\") rospy.loginfo(\"Detector: ball is at bottom of Asus", "the b&w image self.processed_image_publisher.publish(self.bridge.cv2_to_imgmsg(processed_image, \"bgr8\")) self.processed_image_bw_publisher.publish(self.bridge.cv2_to_imgmsg(processed_image_bw, \"bgr8\")) if target[2]: rospy.loginfo(\"x: %d, y: %d,", "= \"NO_SEARCH\" rospy.loginfo(\"Detector: stopped searching for ball\") def process_image(self, image): if self.state ==", "= cv2.bitwise_and(blurred_image2, blurred_image2, mask = mask2) output = output_light_orange cv2.bitwise_or(output_dark_orange, output_light_orange, output) image_grayscale", "self.current_camera == \"ASUS_CAMERA\" and self.asus_ballpark(target[0], image) and not self.ball_at_middle_X_of_Asus_Camera: self.ball_at_middle_X_of_Asus_Camera = True self.robot_movement_publisher.publish(\"STOP-BALL_FOUND\")", "if target[2]: rospy.loginfo(\"x: %d, y: %d, radius: %d\", target[0], target[1], target[2]) if self.current_camera", "import time from sensor_msgs.msg import Image from std_msgs.msg import String from common import", "* 0.55: self.robot_movement_publisher.publish(\"FORWARD-RIGHT\") else: self.robot_movement_publisher.publish(\"FORWARD_ARM\") else: self.move_robot_or_arm = \"MOVE_ARM\" self.robot_movement_publisher.publish(\"STOP-READY_TO_GRAB\") else: if target[1]", "keypoints, np.array([]), (0, 255, 0), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS) cv2.circle(processed_image, center, target[2], (255, 0, 0), 1,", "0.45: self.robot_movement_publisher.publish(\"FORWARD-RIGHT\") elif target[0] > image.width * 0.55: self.robot_movement_publisher.publish(\"FORWARD-LEFT\") else: self.robot_movement_publisher.publish(\"FORWARD_ARM\") else: self.move_robot_or_arm", "import * from jupiter.msg import BallPosition class Detector: current_camera = None camera_subscription =", "String, queue_size = 10) self.bridge = cv_bridge.CvBridge() self.processed_image_publisher = rospy.Publisher(\"/jupiter/processed_image\", Image, queue_size =", "\"NO_SEARCH\": self.state = \"NO_SEARCH\" rospy.loginfo(\"Detector: stopped searching for ball\") def process_image(self, image): if", "self.current_camera == \"ASUS_CAMERA\" else True): max_r = circle[2] target = circle if target", "np import cv_bridge import time from sensor_msgs.msg import Image from std_msgs.msg import String", "([0, 0, 100], [55, 55, 255]) # dark red lower = np.array(lower, dtype", "(0, 255, 0), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS) cv2.circle(processed_image, center, target[2], (255, 0, 0), 1, 8, 0)", "queue_size = 10) self.state_machine_publisher = rospy.Publisher(\"/jupiter/robot_movement/result\", String, queue_size = 10) self.bridge = cv_bridge.CvBridge()", "command.data == \"NO_SEARCH\": self.state = \"NO_SEARCH\" rospy.loginfo(\"Detector: stopped searching for ball\") def process_image(self,", "if self.is_simulation: # the real arm cam emits an upside-down image, so adjust", "upside-down image, so adjust for orientation if target[1] < 10: if target[0] <", "np.array([]), (0, 255, 0), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS) center = (target[0], target[1]) cv2.circle(processed_image_bw, center, target[2], (255,", "= True self.ball_position.x = target[0] self.ball_position.y = target[1] self.ball_position.radius = target[2] self.ball_position.img_width =", "cv2.GaussianBlur(image_cv, (9, 9), 0) (lower, upper) = ([0, 0, 100], [70, 100, 255])", "params.minDistBetweenBlobs = 20.0 # Create a detector with the parameters, according to your", "state = \"\" ball_at_middle_X_of_Asus_Camera = False ball_positioned = False front_camera_x_reference = 0 front_camera_y_reference", "cv2.GaussianBlur(image_cv, (9, 9), 0) # The two cameras have different sensors, so their", "x <= (image.width * 0.85) if __name__ == \"__main__\": rospy.init_node(\"detector\") detector = Detector()", "100, 255]) lower = np.array(lower, dtype = \"uint8\") upper = np.array(upper, dtype =", "0.25 params.maxCircularity = 1.0 if self.current_camera == \"FRONT_CAMERA\": params.minDistBetweenBlobs = 20.0 # Create", "0) # publish the keypoints and target circle superimposed on the source image", "at bottom of Asus Camera frame\") elif target != None and self.current_camera ==", "rospy.loginfo(\"Detector: stopped searching for ball\") def process_image(self, image): if self.state == \"NO_SEARCH\": return", "np.array(lower2, dtype = \"uint8\") upper2 = np.array(upper2, dtype = \"uint8\") mask2 = cv2.inRange(blurred_image2,", "dark red lower = np.array(lower, dtype = \"uint8\") upper = np.array(upper, dtype =", "from jupiter.msg import BallPosition class Detector: current_camera = None camera_subscription = None bridge", "sensors, so their color rendition varies. Adjust for this issue when trying to", "= cv2.bitwise_and(blurred_image, blurred_image, mask = mask) (lower2, upper2) = ([65, 50, 170], [100,", "image_grayscale = cv2.cvtColor(output, cv2.COLOR_BGR2GRAY) (thresh, image_binary) = cv2.threshold(image_grayscale, 128, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)", "self.robot_movement_publisher.publish(\"STOP-READY_TO_GRAB\") elif target != None and self.current_camera == \"ARM_CAMERA\" and self.move_robot_or_arm == \"MOVE_ARM\":", "circles = [] for keypoint in keypoints: x = keypoint.pt[0] y = keypoint.pt[1]", "for ball\") elif command.data == \"NO_SEARCH\": self.state = \"NO_SEARCH\" rospy.loginfo(\"Detector: stopped searching for", "<= (image.width * 0.85) if __name__ == \"__main__\": rospy.init_node(\"detector\") detector = Detector() rospy.spin()", "= cv2.drawKeypoints(image_cv, keypoints, np.array([]), (0, 255, 0), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS) cv2.circle(processed_image, center, target[2], (255, 0,", "= 1.0 params.minCircularity = 0.25 params.maxCircularity = 1.0 if self.current_camera == \"FRONT_CAMERA\": params.minDistBetweenBlobs", "params.filterByArea = True params.minArea = 30 if self.current_camera == \"ASUS_CAMERA\" else 15 params.maxArea", "circle[2] > max_r and (circle[1] >= (image.height * 0.5) if self.current_camera == \"ASUS_CAMERA\"", "= False def camera_change(self, command): self.current_camera = command.data rospy.loginfo(\"Detector: current camera changed to", "None camera_subscription = None bridge = None processed_image_publisher = None processed_image_bw_publisher = None", "cv2.inRange(blurred_image, lower, upper) output_dark_orange = cv2.bitwise_and(blurred_image, blurred_image, mask = mask) (lower2, upper2) =", "= False self.ball_positioned = False self.offset = 100 self.camera_subscription = rospy.Subscriber(adjust_namespace(self.is_simulation, \"/Asus_Camera/rgb/image_raw\"), Image,", "False self.ball_positioned = False self.offset = 100 self.camera_subscription = rospy.Subscriber(adjust_namespace(self.is_simulation, \"/Asus_Camera/rgb/image_raw\"), Image, self.process_image)", "cv2.drawKeypoints(image_binary, keypoints, np.array([]), (0, 255, 0), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS) center = (target[0], target[1]) cv2.circle(processed_image_bw, center,", "= \"NO_SEARCH\" def asus_ballpark(self, x, image): return (image.width * 0.65) <= x and", "output) image_grayscale = cv2.cvtColor(output, cv2.COLOR_BGR2GRAY) (thresh, image_binary) = cv2.threshold(image_grayscale, 128, 255, cv2.THRESH_BINARY |", "and self.ball_at_middle_X_of_Asus_Camera and not self.ball_at_bottom_message_sent: self.ball_at_bottom_message_sent = True self.robot_movement_publisher.publish(\"STOP-BALL_AT_BOTTOM_OF_FRAME\") rospy.loginfo(\"Detector: ball is at", "self.processed_image_bw_publisher.publish(self.bridge.cv2_to_imgmsg(processed_image_bw, \"bgr8\")) if target[2]: rospy.loginfo(\"x: %d, y: %d, radius: %d\", target[0], target[1], target[2])", "params.filterByConvexity = True params.filterByColor = False params.filterByCircularity = True params.filterByArea = True params.minArea", "0.5) if self.current_camera == \"ASUS_CAMERA\" else True): max_r = circle[2] target = circle", "30 if self.current_camera == \"ASUS_CAMERA\" else 15 params.maxArea = 2500 if self.current_camera ==", "output_light_orange, output) image_grayscale = cv2.cvtColor(output, cv2.COLOR_BGR2GRAY) (thresh, image_binary) = cv2.threshold(image_grayscale, 128, 255, cv2.THRESH_BINARY", "38400 params.minConvexity = 0.2 params.maxConvexity = 1.0 params.minCircularity = 0.25 params.maxCircularity = 1.0", "0.55: self.robot_movement_publisher.publish(\"FORWARD-LEFT\") else: self.robot_movement_publisher.publish(\"FORWARD_ARM\") else: self.move_robot_or_arm = \"MOVE_ARM\" self.robot_movement_publisher.publish(\"STOP-READY_TO_GRAB\") elif target != None", "\"bgr8\") blurred_image = cv2.GaussianBlur(image_cv, (9, 9), 0) # The two cameras have different", "15 params.maxArea = 2500 if self.current_camera == \"ASUS_CAMERA\" else 38400 params.minConvexity = 0.2", "cv2.circle(processed_image, center, target[2], (255, 0, 0), 1, 8, 0) # publish the keypoints", "\"bgr8\")) if target[2]: rospy.loginfo(\"x: %d, y: %d, radius: %d\", target[0], target[1], target[2]) if", "\"uint8\") upper2 = np.array(upper2, dtype = \"uint8\") mask2 = cv2.inRange(blurred_image2, lower2, upper2) output_light_orange", "self.current_camera == \"ARM_CAMERA\" and self.move_robot_or_arm == \"MOVE_ARM\": rospy.loginfo(\"Detector: publishing ball position\") self.ball_position.detected =", "circle in circles: if circle[2] > max_r and (circle[1] >= (image.height * 0.5)", "circle superimposed on the source image from the camera and on the b&w", "= ([0, 0, 100], [70, 100, 255]) lower = np.array(lower, dtype = \"uint8\")", "\"\" ball_position = None def __init__(self): init_arguments(self) self.state = \"NO_SEARCH\" rospy.Subscriber(\"/jupiter/detector/current_camera\", String, self.camera_change)", "target != None and self.current_camera == \"ASUS_CAMERA\" and abs(target[1] - (image.height)) < (image.height", "self.process_image) elif self.current_camera == \"ARM_CAMERA\": self.camera_subscription = rospy.Subscriber(\"/Creative_Camera/rgb/image_raw\" if self.is_simulation else \"/komodo_1/arm_cam_node/image_raw\", Image,", "= False front_camera_x_reference = 0 front_camera_y_reference = 0 move_robot_or_arm = \"\" ball_position =", "rospy.Publisher(\"/jupiter/robot_movement/command\", String, queue_size = 10) self.state_machine_publisher = rospy.Publisher(\"/jupiter/robot_movement/result\", String, queue_size = 10) self.bridge", "rospy.Subscriber(adjust_namespace(self.is_simulation, \"/Asus_Camera/rgb/image_raw\"), Image, self.process_image) elif self.current_camera == \"ARM_CAMERA\": self.camera_subscription = rospy.Subscriber(\"/Creative_Camera/rgb/image_raw\" if self.is_simulation", "= output_light_orange cv2.bitwise_or(output_dark_orange, output_light_orange, output) image_grayscale = cv2.cvtColor(output, cv2.COLOR_BGR2GRAY) (thresh, image_binary) = cv2.threshold(image_grayscale,", "= rospy.Publisher(\"/jupiter/robot_movement/command\", String, queue_size = 10) self.state_machine_publisher = rospy.Publisher(\"/jupiter/robot_movement/result\", String, queue_size = 10)", "the red colors in the image. if self.current_camera == \"ASUS_CAMERA\": (lower, upper) =", "params.filterByCircularity = True params.filterByArea = True params.minArea = 30 if self.current_camera == \"ASUS_CAMERA\"", "params.maxCircularity = 1.0 if self.current_camera == \"FRONT_CAMERA\": params.minDistBetweenBlobs = 20.0 # Create a", "9), 0) (lower, upper) = ([0, 0, 100], [70, 100, 255]) lower =", "0, 100], [70, 100, 255]) lower = np.array(lower, dtype = \"uint8\") upper =", "None if circles: circles = np.uint16(np.around(circles)) max_r = 0.0 target = circles[0] for", "and self.move_robot_or_arm == \"MOVE_ROBOT\": if self.is_simulation: # the real arm cam emits an", "Image, self.process_image) self.move_robot_or_arm = \"MOVE_ROBOT\" def state_change(self, command): if command.data == \"SEARCH\": self.state", "10) self.processed_image_bw_publisher = rospy.Publisher(\"/jupiter/processed_image_bw\", Image, queue_size = 10) self.ball_position_publisher = rospy.Publisher(\"/jupiter/ball_position\", BallPosition, queue_size", "cv2.threshold(image_grayscale, 128, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU) params = cv2.SimpleBlobDetector_Params() params.filterByInertia = False params.filterByConvexity", "self.ball_position.detected = False def camera_change(self, command): self.current_camera = command.data rospy.loginfo(\"Detector: current camera changed", "100], [70, 100, 255]) lower = np.array(lower, dtype = \"uint8\") upper = np.array(upper,", "0, 0), 1, 8, 0) processed_image = cv2.drawKeypoints(image_cv, keypoints, np.array([]), (0, 255, 0),", "and target circle superimposed on the source image from the camera and on", "BallPosition, queue_size = 10) self.ball_position = BallPosition() self.ball_position.detected = False def camera_change(self, command):", "an upside-down image, so adjust for orientation if target[1] < 10: if target[0]", "cv2 import numpy as np import cv_bridge import time from sensor_msgs.msg import Image", "to search for ball\") elif command.data == \"NO_SEARCH\": self.state = \"NO_SEARCH\" rospy.loginfo(\"Detector: stopped", "= mask2) output = output_light_orange cv2.bitwise_or(output_dark_orange, output_light_orange, output) image_grayscale = cv2.cvtColor(output, cv2.COLOR_BGR2GRAY) (thresh,", "# dark red lower = np.array(lower, dtype = \"uint8\") upper = np.array(upper, dtype", "* 0.55: self.robot_movement_publisher.publish(\"FORWARD-LEFT\") else: self.robot_movement_publisher.publish(\"FORWARD_ARM\") else: self.move_robot_or_arm = \"MOVE_ARM\" self.robot_movement_publisher.publish(\"STOP-READY_TO_GRAB\") elif target !=", "170], [100, 70, 255]) lower2 = np.array(lower2, dtype = \"uint8\") upper2 = np.array(upper2,", "elif target[0] > image.width * 0.55: self.robot_movement_publisher.publish(\"FORWARD-RIGHT\") else: self.robot_movement_publisher.publish(\"FORWARD_ARM\") else: self.move_robot_or_arm = \"MOVE_ARM\"", "the source image from the camera and on the b&w image self.processed_image_publisher.publish(self.bridge.cv2_to_imgmsg(processed_image, \"bgr8\"))", "(circle[1] >= (image.height * 0.5) if self.current_camera == \"ASUS_CAMERA\" else True): max_r =", "lower, upper) output_dark_orange = cv2.bitwise_and(blurred_image, blurred_image, mask = mask) (lower2, upper2) = ([65,", "#!/usr/bin/python import roslib import rospy import cv2 import numpy as np import cv_bridge", "dtype = \"uint8\") mask = cv2.inRange(blurred_image, lower, upper) output = cv2.bitwise_and(blurred_image, blurred_image, mask", "y, r]) target = None if circles: circles = np.uint16(np.around(circles)) max_r = 0.0", "= np.array(lower, dtype = \"uint8\") upper = np.array(upper, dtype = \"uint8\") mask =", "= circle if target != None: processed_image_bw = cv2.drawKeypoints(image_binary, keypoints, np.array([]), (0, 255,", "upper) = ([0, 0, 100], [70, 100, 255]) lower = np.array(lower, dtype =", "from the camera and on the b&w image self.processed_image_publisher.publish(self.bridge.cv2_to_imgmsg(processed_image, \"bgr8\")) self.processed_image_bw_publisher.publish(self.bridge.cv2_to_imgmsg(processed_image_bw, \"bgr8\")) if", "target[2]) if self.current_camera == \"ASUS_CAMERA\" and self.asus_ballpark(target[0], image) and not self.ball_at_middle_X_of_Asus_Camera: self.ball_at_middle_X_of_Asus_Camera =", "\"MOVE_ARM\" self.robot_movement_publisher.publish(\"STOP-READY_TO_GRAB\") elif target != None and self.current_camera == \"ARM_CAMERA\" and self.move_robot_or_arm ==", "else 38400 params.minConvexity = 0.2 params.maxConvexity = 1.0 params.minCircularity = 0.25 params.maxCircularity =", "= 0.2 params.maxConvexity = 1.0 params.minCircularity = 0.25 params.maxCircularity = 1.0 if self.current_camera", "ball found\") elif target != None and self.current_camera == \"ASUS_CAMERA\" and abs(target[1] -", "image.height self.ball_position_publisher.publish(self.ball_position) self.state = \"NO_SEARCH\" def asus_ballpark(self, x, image): return (image.width * 0.65)", "self.current_camera == \"ARM_CAMERA\": self.camera_subscription = rospy.Subscriber(\"/Creative_Camera/rgb/image_raw\" if self.is_simulation else \"/komodo_1/arm_cam_node/image_raw\", Image, self.process_image) self.move_robot_or_arm", "0 front_camera_y_reference = 0 move_robot_or_arm = \"\" ball_position = None def __init__(self): init_arguments(self)", "bottom of Asus Camera frame\") elif target != None and self.current_camera == \"ARM_CAMERA\"", "String, self.camera_change) rospy.Subscriber(\"/jupiter/detector/state_change\", String, self.state_change) self.robot_movement_publisher = rospy.Publisher(\"/jupiter/robot_movement/command\", String, queue_size = 10) self.state_machine_publisher", "emits an upside-down image, so adjust for orientation if target[1] < 10: if", "upper) output_dark_orange = cv2.bitwise_and(blurred_image, blurred_image, mask = mask) (lower2, upper2) = ([65, 50,", "the camera and on the b&w image self.processed_image_publisher.publish(self.bridge.cv2_to_imgmsg(processed_image, \"bgr8\")) self.processed_image_bw_publisher.publish(self.bridge.cv2_to_imgmsg(processed_image_bw, \"bgr8\")) if target[2]:", "from sensor_msgs.msg import Image from std_msgs.msg import String from common import * from", "as np import cv_bridge import time from sensor_msgs.msg import Image from std_msgs.msg import", "if self.current_camera == \"FRONT_CAMERA\": params.minDistBetweenBlobs = 20.0 # Create a detector with the", "== \"MOVE_ARM\": rospy.loginfo(\"Detector: publishing ball position\") self.ball_position.detected = True self.ball_position.x = target[0] self.ball_position.y", "time from sensor_msgs.msg import Image from std_msgs.msg import String from common import *", "= mask) (lower2, upper2) = ([65, 50, 170], [100, 70, 255]) lower2 =", "= rospy.Publisher(\"/jupiter/robot_movement/result\", String, queue_size = 10) self.bridge = cv_bridge.CvBridge() self.processed_image_publisher = rospy.Publisher(\"/jupiter/processed_image\", Image,", "image.width self.ball_position.img_height = image.height self.ball_position_publisher.publish(self.ball_position) self.state = \"NO_SEARCH\" def asus_ballpark(self, x, image): return", "elif target != None and self.current_camera == \"ARM_CAMERA\" and self.move_robot_or_arm == \"MOVE_ROBOT\": if", "0.2 params.maxConvexity = 1.0 params.minCircularity = 0.25 params.maxCircularity = 1.0 if self.current_camera ==", "target[1] > 10: if target[0] < image.width * 0.45: self.robot_movement_publisher.publish(\"FORWARD-RIGHT\") elif target[0] >", "[100, 70, 255]) lower2 = np.array(lower2, dtype = \"uint8\") upper2 = np.array(upper2, dtype", "if int(ver[0]) < 3: detector = cv2.SimpleBlobDetector(params) else: detector = cv2.SimpleBlobDetector_create(params) # Detect", "move_robot_or_arm = \"\" ball_position = None def __init__(self): init_arguments(self) self.state = \"NO_SEARCH\" rospy.Subscriber(\"/jupiter/detector/current_camera\",", "self.state = \"NO_SEARCH\" rospy.loginfo(\"Detector: stopped searching for ball\") def process_image(self, image): if self.state", "parameters, according to your OpenCV version (2 or 3) ver = (cv2.__version__).split('.') if", "= circles[0] for circle in circles: if circle[2] > max_r and (circle[1] >=", "target[0] < image.width * 0.45: self.robot_movement_publisher.publish(\"FORWARD-RIGHT\") elif target[0] > image.width * 0.55: self.robot_movement_publisher.publish(\"FORWARD-LEFT\")", "x and x <= (image.width * 0.85) if __name__ == \"__main__\": rospy.init_node(\"detector\") detector", "rospy.Publisher(\"/jupiter/processed_image_bw\", Image, queue_size = 10) self.ball_position_publisher = rospy.Publisher(\"/jupiter/ball_position\", BallPosition, queue_size = 10) self.ball_position", "else: self.move_robot_or_arm = \"MOVE_ARM\" self.robot_movement_publisher.publish(\"STOP-READY_TO_GRAB\") else: if target[1] > 10: if target[0] <", "self.ball_position.y = target[1] self.ball_position.radius = target[2] self.ball_position.img_width = image.width self.ball_position.img_height = image.height self.ball_position_publisher.publish(self.ball_position)", "= cv2.cvtColor(output, cv2.COLOR_BGR2GRAY) (thresh, image_binary) = cv2.threshold(image_grayscale, 128, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU) params", "varies. Adjust for this issue when trying to filter the red colors in", "roslib import rospy import cv2 import numpy as np import cv_bridge import time", "front_camera_y_reference = 0 move_robot_or_arm = \"\" ball_position = None def __init__(self): init_arguments(self) self.state", "0.65) <= x and x <= (image.width * 0.85) if __name__ == \"__main__\":", "= cv_bridge.CvBridge() self.processed_image_publisher = rospy.Publisher(\"/jupiter/processed_image\", Image, queue_size = 10) self.processed_image_bw_publisher = rospy.Publisher(\"/jupiter/processed_image_bw\", Image,", "cv_bridge import time from sensor_msgs.msg import Image from std_msgs.msg import String from common", "Detect blobs keypoints = detector.detect(image_binary) circles = [] for keypoint in keypoints: x", "cv2.bitwise_and(blurred_image, blurred_image, mask = mask) else: # ARM_CAMERA blurred_image2 = cv2.GaussianBlur(image_cv, (9, 9),", "keypoints: x = keypoint.pt[0] y = keypoint.pt[1] r = keypoint.size / 2.0 circles.append([x,", "None def __init__(self): init_arguments(self) self.state = \"NO_SEARCH\" rospy.Subscriber(\"/jupiter/detector/current_camera\", String, self.camera_change) rospy.Subscriber(\"/jupiter/detector/state_change\", String, self.state_change)", "1, 8, 0) processed_image = cv2.drawKeypoints(image_cv, keypoints, np.array([]), (0, 255, 0), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS) cv2.circle(processed_image,", "100], [55, 55, 255]) # dark red lower = np.array(lower, dtype = \"uint8\")", "lower2 = np.array(lower2, dtype = \"uint8\") upper2 = np.array(upper2, dtype = \"uint8\") mask2", "self.ball_positioned = False self.offset = 100 self.camera_subscription = rospy.Subscriber(adjust_namespace(self.is_simulation, \"/Asus_Camera/rgb/image_raw\"), Image, self.process_image) elif", "* 0.5) if self.current_camera == \"ASUS_CAMERA\" else True): max_r = circle[2] target =", "\"SEARCH\" rospy.loginfo(\"Detector: starting to search for ball\") elif command.data == \"NO_SEARCH\": self.state =", "== \"ASUS_CAMERA\": (lower, upper) = ([0, 0, 100], [55, 55, 255]) # dark", "\"ASUS_CAMERA\" and abs(target[1] - (image.height)) < (image.height / 10.0) and self.ball_at_middle_X_of_Asus_Camera and not", "and (circle[1] >= (image.height * 0.5) if self.current_camera == \"ASUS_CAMERA\" else True): max_r", "cv2.drawKeypoints(image_cv, keypoints, np.array([]), (0, 255, 0), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS) cv2.circle(processed_image, center, target[2], (255, 0, 0),", "= circle[2] target = circle if target != None: processed_image_bw = cv2.drawKeypoints(image_binary, keypoints,", "self.current_camera = command.data rospy.loginfo(\"Detector: current camera changed to %s\", self.current_camera) if self.camera_subscription: self.camera_subscription.unregister()", "your OpenCV version (2 or 3) ver = (cv2.__version__).split('.') if int(ver[0]) < 3:", "= (cv2.__version__).split('.') if int(ver[0]) < 3: detector = cv2.SimpleBlobDetector(params) else: detector = cv2.SimpleBlobDetector_create(params)", "< 3: detector = cv2.SimpleBlobDetector(params) else: detector = cv2.SimpleBlobDetector_create(params) # Detect blobs keypoints", "image self.processed_image_publisher.publish(self.bridge.cv2_to_imgmsg(processed_image, \"bgr8\")) self.processed_image_bw_publisher.publish(self.bridge.cv2_to_imgmsg(processed_image_bw, \"bgr8\")) if target[2]: rospy.loginfo(\"x: %d, y: %d, radius: %d\",", "None and self.current_camera == \"ARM_CAMERA\" and self.move_robot_or_arm == \"MOVE_ARM\": rospy.loginfo(\"Detector: publishing ball position\")", "\"uint8\") mask2 = cv2.inRange(blurred_image2, lower2, upper2) output_light_orange = cv2.bitwise_and(blurred_image2, blurred_image2, mask = mask2)", "the keypoints and target circle superimposed on the source image from the camera", "self.move_robot_or_arm = \"MOVE_ROBOT\" def state_change(self, command): if command.data == \"SEARCH\": self.state = \"SEARCH\"", "\"SEARCH\": self.state = \"SEARCH\" rospy.loginfo(\"Detector: starting to search for ball\") elif command.data ==", "def process_image(self, image): if self.state == \"NO_SEARCH\": return image_cv = self.bridge.imgmsg_to_cv2(image, \"bgr8\") blurred_image", "target[2] self.ball_position.img_width = image.width self.ball_position.img_height = image.height self.ball_position_publisher.publish(self.ball_position) self.state = \"NO_SEARCH\" def asus_ballpark(self,", "current camera changed to %s\", self.current_camera) if self.camera_subscription: self.camera_subscription.unregister() if self.current_camera == \"ASUS_CAMERA\":", "self.bridge = cv_bridge.CvBridge() self.processed_image_publisher = rospy.Publisher(\"/jupiter/processed_image\", Image, queue_size = 10) self.processed_image_bw_publisher = rospy.Publisher(\"/jupiter/processed_image_bw\",", "target[0] > image.width * 0.55: self.robot_movement_publisher.publish(\"FORWARD-RIGHT\") else: self.robot_movement_publisher.publish(\"FORWARD_ARM\") else: self.move_robot_or_arm = \"MOVE_ARM\" self.robot_movement_publisher.publish(\"STOP-READY_TO_GRAB\")", "is at bottom of Asus Camera frame\") elif target != None and self.current_camera", "255]) lower = np.array(lower, dtype = \"uint8\") upper = np.array(upper, dtype = \"uint8\")", "= 20.0 # Create a detector with the parameters, according to your OpenCV", "3: detector = cv2.SimpleBlobDetector(params) else: detector = cv2.SimpleBlobDetector_create(params) # Detect blobs keypoints =", "None processed_image_publisher = None processed_image_bw_publisher = None offset = 100 wheel_publisher = None", "if circles: circles = np.uint16(np.around(circles)) max_r = 0.0 target = circles[0] for circle", "self.state == \"NO_SEARCH\": return image_cv = self.bridge.imgmsg_to_cv2(image, \"bgr8\") blurred_image = cv2.GaussianBlur(image_cv, (9, 9),", "the parameters, according to your OpenCV version (2 or 3) ver = (cv2.__version__).split('.')", "max_r and (circle[1] >= (image.height * 0.5) if self.current_camera == \"ASUS_CAMERA\" else True):", "cv2.circle(processed_image_bw, center, target[2], (255, 0, 0), 1, 8, 0) processed_image = cv2.drawKeypoints(image_cv, keypoints,", "adjust for orientation if target[1] < 10: if target[0] < image.width * 0.45:", "else: detector = cv2.SimpleBlobDetector_create(params) # Detect blobs keypoints = detector.detect(image_binary) circles = []", "# ARM_CAMERA blurred_image2 = cv2.GaussianBlur(image_cv, (9, 9), 0) (lower, upper) = ([0, 0,", "> 10: if target[0] < image.width * 0.45: self.robot_movement_publisher.publish(\"FORWARD-RIGHT\") elif target[0] > image.width", "x, image): return (image.width * 0.65) <= x and x <= (image.width *", "upper) = ([0, 0, 100], [55, 55, 255]) # dark red lower =", "center = (target[0], target[1]) cv2.circle(processed_image_bw, center, target[2], (255, 0, 0), 1, 8, 0)", "= np.array(upper2, dtype = \"uint8\") mask2 = cv2.inRange(blurred_image2, lower2, upper2) output_light_orange = cv2.bitwise_and(blurred_image2,", "def camera_change(self, command): self.current_camera = command.data rospy.loginfo(\"Detector: current camera changed to %s\", self.current_camera)", "\"ASUS_CAMERA\": self.ball_at_middle_X_of_Asus_Camera = False self.ball_at_bottom_message_sent = False self.ball_positioned = False self.offset = 100", "== \"NO_SEARCH\": return image_cv = self.bridge.imgmsg_to_cv2(image, \"bgr8\") blurred_image = cv2.GaussianBlur(image_cv, (9, 9), 0)", "None: processed_image_bw = cv2.drawKeypoints(image_binary, keypoints, np.array([]), (0, 255, 0), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS) center = (target[0],", "to filter the red colors in the image. if self.current_camera == \"ASUS_CAMERA\": (lower,", "10: if target[0] < image.width * 0.45: self.robot_movement_publisher.publish(\"FORWARD-RIGHT\") elif target[0] > image.width *", "\"/komodo_1/arm_cam_node/image_raw\", Image, self.process_image) self.move_robot_or_arm = \"MOVE_ROBOT\" def state_change(self, command): if command.data == \"SEARCH\":", "when trying to filter the red colors in the image. if self.current_camera ==", "(image.height)) < (image.height / 10.0) and self.ball_at_middle_X_of_Asus_Camera and not self.ball_at_bottom_message_sent: self.ball_at_bottom_message_sent = True", "if target[0] < image.width * 0.45: self.robot_movement_publisher.publish(\"FORWARD-RIGHT\") elif target[0] > image.width * 0.55:", "None processed_image_bw_publisher = None offset = 100 wheel_publisher = None state = \"\"", "BallPosition() self.ball_position.detected = False def camera_change(self, command): self.current_camera = command.data rospy.loginfo(\"Detector: current camera", "ball_positioned = False front_camera_x_reference = 0 front_camera_y_reference = 0 move_robot_or_arm = \"\" ball_position", "np.uint16(np.around(circles)) max_r = 0.0 target = circles[0] for circle in circles: if circle[2]", "target[1]) cv2.circle(processed_image_bw, center, target[2], (255, 0, 0), 1, 8, 0) processed_image = cv2.drawKeypoints(image_cv,", "Image from std_msgs.msg import String from common import * from jupiter.msg import BallPosition", "= rospy.Publisher(\"/jupiter/processed_image_bw\", Image, queue_size = 10) self.ball_position_publisher = rospy.Publisher(\"/jupiter/ball_position\", BallPosition, queue_size = 10)", "= image.width self.ball_position.img_height = image.height self.ball_position_publisher.publish(self.ball_position) self.state = \"NO_SEARCH\" def asus_ballpark(self, x, image):", "processed_image_bw_publisher = None offset = 100 wheel_publisher = None state = \"\" ball_at_middle_X_of_Asus_Camera", "< (image.height / 10.0) and self.ball_at_middle_X_of_Asus_Camera and not self.ball_at_bottom_message_sent: self.ball_at_bottom_message_sent = True self.robot_movement_publisher.publish(\"STOP-BALL_AT_BOTTOM_OF_FRAME\")", "== \"ASUS_CAMERA\" else 38400 params.minConvexity = 0.2 params.maxConvexity = 1.0 params.minCircularity = 0.25", "target[0] > image.width * 0.55: self.robot_movement_publisher.publish(\"FORWARD-LEFT\") else: self.robot_movement_publisher.publish(\"FORWARD_ARM\") else: self.move_robot_or_arm = \"MOVE_ARM\" self.robot_movement_publisher.publish(\"STOP-READY_TO_GRAB\")", "blurred_image2, mask = mask2) output = output_light_orange cv2.bitwise_or(output_dark_orange, output_light_orange, output) image_grayscale = cv2.cvtColor(output,", "([65, 50, 170], [100, 70, 255]) lower2 = np.array(lower2, dtype = \"uint8\") upper2", "upper = np.array(upper, dtype = \"uint8\") mask = cv2.inRange(blurred_image, lower, upper) output =", "rospy.Publisher(\"/jupiter/processed_image\", Image, queue_size = 10) self.processed_image_bw_publisher = rospy.Publisher(\"/jupiter/processed_image_bw\", Image, queue_size = 10) self.ball_position_publisher", "for keypoint in keypoints: x = keypoint.pt[0] y = keypoint.pt[1] r = keypoint.size", "this issue when trying to filter the red colors in the image. if", "red lower = np.array(lower, dtype = \"uint8\") upper = np.array(upper, dtype = \"uint8\")", "init_arguments(self) self.state = \"NO_SEARCH\" rospy.Subscriber(\"/jupiter/detector/current_camera\", String, self.camera_change) rospy.Subscriber(\"/jupiter/detector/state_change\", String, self.state_change) self.robot_movement_publisher = rospy.Publisher(\"/jupiter/robot_movement/command\",", "= image.height self.ball_position_publisher.publish(self.ball_position) self.state = \"NO_SEARCH\" def asus_ballpark(self, x, image): return (image.width *", "image_cv = self.bridge.imgmsg_to_cv2(image, \"bgr8\") blurred_image = cv2.GaussianBlur(image_cv, (9, 9), 0) # The two", "max_r = circle[2] target = circle if target != None: processed_image_bw = cv2.drawKeypoints(image_binary,", "(lower, upper) = ([0, 0, 100], [55, 55, 255]) # dark red lower", "target != None: processed_image_bw = cv2.drawKeypoints(image_binary, keypoints, np.array([]), (0, 255, 0), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS) center", "255, 0), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS) cv2.circle(processed_image, center, target[2], (255, 0, 0), 1, 8, 0) #", "\"MOVE_ROBOT\" def state_change(self, command): if command.data == \"SEARCH\": self.state = \"SEARCH\" rospy.loginfo(\"Detector: starting", "self.state = \"NO_SEARCH\" def asus_ballpark(self, x, image): return (image.width * 0.65) <= x", "(image.width * 0.65) <= x and x <= (image.width * 0.85) if __name__", "Image, queue_size = 10) self.processed_image_bw_publisher = rospy.Publisher(\"/jupiter/processed_image_bw\", Image, queue_size = 10) self.ball_position_publisher =", "self.offset = 100 self.camera_subscription = rospy.Subscriber(adjust_namespace(self.is_simulation, \"/Asus_Camera/rgb/image_raw\"), Image, self.process_image) elif self.current_camera == \"ARM_CAMERA\":", "keypoint in keypoints: x = keypoint.pt[0] y = keypoint.pt[1] r = keypoint.size /", "== \"ASUS_CAMERA\" else 15 params.maxArea = 2500 if self.current_camera == \"ASUS_CAMERA\" else 38400", "output_dark_orange = cv2.bitwise_and(blurred_image, blurred_image, mask = mask) (lower2, upper2) = ([65, 50, 170],", "if circle[2] > max_r and (circle[1] >= (image.height * 0.5) if self.current_camera ==", "True params.minArea = 30 if self.current_camera == \"ASUS_CAMERA\" else 15 params.maxArea = 2500", "red colors in the image. if self.current_camera == \"ASUS_CAMERA\": (lower, upper) = ([0,", "sensor_msgs.msg import Image from std_msgs.msg import String from common import * from jupiter.msg", "9), 0) # The two cameras have different sensors, so their color rendition", "mask = mask) (lower2, upper2) = ([65, 50, 170], [100, 70, 255]) lower2", "/ 10.0) and self.ball_at_middle_X_of_Asus_Camera and not self.ball_at_bottom_message_sent: self.ball_at_bottom_message_sent = True self.robot_movement_publisher.publish(\"STOP-BALL_AT_BOTTOM_OF_FRAME\") rospy.loginfo(\"Detector: ball", "so adjust for orientation if target[1] < 10: if target[0] < image.width *", "self.current_camera) if self.camera_subscription: self.camera_subscription.unregister() if self.current_camera == \"ASUS_CAMERA\": self.ball_at_middle_X_of_Asus_Camera = False self.ball_at_bottom_message_sent =", "128, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU) params = cv2.SimpleBlobDetector_Params() params.filterByInertia = False params.filterByConvexity =", "upper2) = ([65, 50, 170], [100, 70, 255]) lower2 = np.array(lower2, dtype =", "self.current_camera == \"FRONT_CAMERA\": params.minDistBetweenBlobs = 20.0 # Create a detector with the parameters,", "self.ball_at_middle_X_of_Asus_Camera: self.ball_at_middle_X_of_Asus_Camera = True self.robot_movement_publisher.publish(\"STOP-BALL_FOUND\") rospy.loginfo(\"Detector: ball found\") elif target != None and", "!= None and self.current_camera == \"ARM_CAMERA\" and self.move_robot_or_arm == \"MOVE_ROBOT\": if self.is_simulation: #", "cv2.SimpleBlobDetector_create(params) # Detect blobs keypoints = detector.detect(image_binary) circles = [] for keypoint in", "elif command.data == \"NO_SEARCH\": self.state = \"NO_SEARCH\" rospy.loginfo(\"Detector: stopped searching for ball\") def", "= 0 move_robot_or_arm = \"\" ball_position = None def __init__(self): init_arguments(self) self.state =", "= 0.25 params.maxCircularity = 1.0 if self.current_camera == \"FRONT_CAMERA\": params.minDistBetweenBlobs = 20.0 #", "the image. if self.current_camera == \"ASUS_CAMERA\": (lower, upper) = ([0, 0, 100], [55,", "(2 or 3) ver = (cv2.__version__).split('.') if int(ver[0]) < 3: detector = cv2.SimpleBlobDetector(params)", "= \"MOVE_ARM\" self.robot_movement_publisher.publish(\"STOP-READY_TO_GRAB\") elif target != None and self.current_camera == \"ARM_CAMERA\" and self.move_robot_or_arm", "(255, 0, 0), 1, 8, 0) # publish the keypoints and target circle", "and not self.ball_at_middle_X_of_Asus_Camera: self.ball_at_middle_X_of_Asus_Camera = True self.robot_movement_publisher.publish(\"STOP-BALL_FOUND\") rospy.loginfo(\"Detector: ball found\") elif target !=", "= False self.ball_at_bottom_message_sent = False self.ball_positioned = False self.offset = 100 self.camera_subscription =", "according to your OpenCV version (2 or 3) ver = (cv2.__version__).split('.') if int(ver[0])", "* 0.65) <= x and x <= (image.width * 0.85) if __name__ ==", "mask = cv2.inRange(blurred_image, lower, upper) output_dark_orange = cv2.bitwise_and(blurred_image, blurred_image, mask = mask) (lower2,", "8, 0) processed_image = cv2.drawKeypoints(image_cv, keypoints, np.array([]), (0, 255, 0), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS) cv2.circle(processed_image, center,", "3) ver = (cv2.__version__).split('.') if int(ver[0]) < 3: detector = cv2.SimpleBlobDetector(params) else: detector", "import numpy as np import cv_bridge import time from sensor_msgs.msg import Image from", "circle[2] target = circle if target != None: processed_image_bw = cv2.drawKeypoints(image_binary, keypoints, np.array([]),", "\"NO_SEARCH\" rospy.Subscriber(\"/jupiter/detector/current_camera\", String, self.camera_change) rospy.Subscriber(\"/jupiter/detector/state_change\", String, self.state_change) self.robot_movement_publisher = rospy.Publisher(\"/jupiter/robot_movement/command\", String, queue_size =", "= \"MOVE_ARM\" self.robot_movement_publisher.publish(\"STOP-READY_TO_GRAB\") else: if target[1] > 10: if target[0] < image.width *", "class Detector: current_camera = None camera_subscription = None bridge = None processed_image_publisher =", "Detector: current_camera = None camera_subscription = None bridge = None processed_image_publisher = None", "command): if command.data == \"SEARCH\": self.state = \"SEARCH\" rospy.loginfo(\"Detector: starting to search for", "circles: if circle[2] > max_r and (circle[1] >= (image.height * 0.5) if self.current_camera", "keypoint.pt[0] y = keypoint.pt[1] r = keypoint.size / 2.0 circles.append([x, y, r]) target", "True self.ball_position.x = target[0] self.ball_position.y = target[1] self.ball_position.radius = target[2] self.ball_position.img_width = image.width", "and on the b&w image self.processed_image_publisher.publish(self.bridge.cv2_to_imgmsg(processed_image, \"bgr8\")) self.processed_image_bw_publisher.publish(self.bridge.cv2_to_imgmsg(processed_image_bw, \"bgr8\")) if target[2]: rospy.loginfo(\"x: %d,", "int(ver[0]) < 3: detector = cv2.SimpleBlobDetector(params) else: detector = cv2.SimpleBlobDetector_create(params) # Detect blobs", "and abs(target[1] - (image.height)) < (image.height / 10.0) and self.ball_at_middle_X_of_Asus_Camera and not self.ball_at_bottom_message_sent:", "target != None and self.current_camera == \"ARM_CAMERA\" and self.move_robot_or_arm == \"MOVE_ARM\": rospy.loginfo(\"Detector: publishing", "detector = cv2.SimpleBlobDetector_create(params) # Detect blobs keypoints = detector.detect(image_binary) circles = [] for", "current_camera = None camera_subscription = None bridge = None processed_image_publisher = None processed_image_bw_publisher", "mask) (lower2, upper2) = ([65, 50, 170], [100, 70, 255]) lower2 = np.array(lower2,", "params.minArea = 30 if self.current_camera == \"ASUS_CAMERA\" else 15 params.maxArea = 2500 if", "rospy.loginfo(\"Detector: publishing ball position\") self.ball_position.detected = True self.ball_position.x = target[0] self.ball_position.y = target[1]", "255, 0), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS) center = (target[0], target[1]) cv2.circle(processed_image_bw, center, target[2], (255, 0, 0),", "rospy.Publisher(\"/jupiter/ball_position\", BallPosition, queue_size = 10) self.ball_position = BallPosition() self.ball_position.detected = False def camera_change(self,", "cv2.THRESH_BINARY | cv2.THRESH_OTSU) params = cv2.SimpleBlobDetector_Params() params.filterByInertia = False params.filterByConvexity = True params.filterByColor", "image.width * 0.55: self.robot_movement_publisher.publish(\"FORWARD-RIGHT\") else: self.robot_movement_publisher.publish(\"FORWARD_ARM\") else: self.move_robot_or_arm = \"MOVE_ARM\" self.robot_movement_publisher.publish(\"STOP-READY_TO_GRAB\") else: if", "[55, 55, 255]) # dark red lower = np.array(lower, dtype = \"uint8\") upper", "= True self.robot_movement_publisher.publish(\"STOP-BALL_AT_BOTTOM_OF_FRAME\") rospy.loginfo(\"Detector: ball is at bottom of Asus Camera frame\") elif", "of Asus Camera frame\") elif target != None and self.current_camera == \"ARM_CAMERA\" and", "np.array(upper2, dtype = \"uint8\") mask2 = cv2.inRange(blurred_image2, lower2, upper2) output_light_orange = cv2.bitwise_and(blurred_image2, blurred_image2,", "= np.array(upper, dtype = \"uint8\") mask = cv2.inRange(blurred_image, lower, upper) output_dark_orange = cv2.bitwise_and(blurred_image,", "import roslib import rospy import cv2 import numpy as np import cv_bridge import" ]
[ "ans = [] for i in range(0, len(nums)): soln = 0 for j", "range(0, len(nums)): soln = 0 for j in range(0, len(nums)): if(nums[j] < nums[i]", "< nums[i] and j != i): soln += 1 ans. append(soln) return ans", "j in range(0, len(nums)): if(nums[j] < nums[i] and j != i): soln +=", "soln = 0 for j in range(0, len(nums)): if(nums[j] < nums[i] and j", "[] for i in range(0, len(nums)): soln = 0 for j in range(0,", "nums: List[int]) -> List[int]: ans = [] for i in range(0, len(nums)): soln", "def smallerNumbersThanCurrent(self, nums: List[int]) -> List[int]: ans = [] for i in range(0,", "len(nums)): if(nums[j] < nums[i] and j != i): soln += 1 ans. append(soln)", "Solution: def smallerNumbersThanCurrent(self, nums: List[int]) -> List[int]: ans = [] for i in", "in range(0, len(nums)): soln = 0 for j in range(0, len(nums)): if(nums[j] <", "if(nums[j] < nums[i] and j != i): soln += 1 ans. append(soln) return", "<filename>LeetCode/1365_How_Many_Numbers_Are_Smaller_Than_the_Current_Number.py<gh_stars>100-1000 class Solution: def smallerNumbersThanCurrent(self, nums: List[int]) -> List[int]: ans = [] for", "class Solution: def smallerNumbersThanCurrent(self, nums: List[int]) -> List[int]: ans = [] for i", "len(nums)): soln = 0 for j in range(0, len(nums)): if(nums[j] < nums[i] and", "= 0 for j in range(0, len(nums)): if(nums[j] < nums[i] and j !=", "in range(0, len(nums)): if(nums[j] < nums[i] and j != i): soln += 1", "0 for j in range(0, len(nums)): if(nums[j] < nums[i] and j != i):", "= [] for i in range(0, len(nums)): soln = 0 for j in", "smallerNumbersThanCurrent(self, nums: List[int]) -> List[int]: ans = [] for i in range(0, len(nums)):", "i in range(0, len(nums)): soln = 0 for j in range(0, len(nums)): if(nums[j]", "-> List[int]: ans = [] for i in range(0, len(nums)): soln = 0", "range(0, len(nums)): if(nums[j] < nums[i] and j != i): soln += 1 ans.", "for j in range(0, len(nums)): if(nums[j] < nums[i] and j != i): soln", "List[int]: ans = [] for i in range(0, len(nums)): soln = 0 for", "List[int]) -> List[int]: ans = [] for i in range(0, len(nums)): soln =", "for i in range(0, len(nums)): soln = 0 for j in range(0, len(nums)):" ]
[ "'csv' list_of_files = [i for i in glob.glob('*.{}'.format(extension))] # print(list_of_files) file_out = \"coupons.csv\"", "os import chdir import glob import pandas as pdlib # Produce a single", "print(list_of_files) file_out = \"coupons.csv\" csv_header = 'store,title,short_title,code,short_desc,description,expiry_date,expire_note' csv_merge = open(file_out, 'w') csv_merge.write(csv_header) csv_merge.write('\\n')", "combining all files def produceOneCSV(list_of_files,csv_merge): for file in list_of_files: csv_in = open(file) for", "library \"\"\" from os import chdir import glob import pandas as pdlib #", "produceOneCSV(list_of_files,csv_merge): for file in list_of_files: csv_in = open(file) for line in csv_in: csv_merge.write(line)", "in the working dir chdir(\"./csv_data\") extension = 'csv' list_of_files = [i for i", "Script: Combine/Merge multiple CSV files using the Pandas library \"\"\" from os import", "all CSV files in the working dir chdir(\"./csv_data\") extension = 'csv' list_of_files =", "extension = 'csv' list_of_files = [i for i in glob.glob('*.{}'.format(extension))] # print(list_of_files) file_out", "dir chdir(\"./csv_data\") extension = 'csv' list_of_files = [i for i in glob.glob('*.{}'.format(extension))] #", "i in glob.glob('*.{}'.format(extension))] # print(list_of_files) file_out = \"coupons.csv\" csv_header = 'store,title,short_title,code,short_desc,description,expiry_date,expire_note' csv_merge =", "# Produce a single CSV after combining all files def produceOneCSV(list_of_files,csv_merge): for file", "file_out = \"coupons.csv\" csv_header = 'store,title,short_title,code,short_desc,description,expiry_date,expire_note' csv_merge = open(file_out, 'w') csv_merge.write(csv_header) csv_merge.write('\\n') produceOneCSV(list_of_files,csv_merge)", "pandas as pdlib # Produce a single CSV after combining all files def", "working dir chdir(\"./csv_data\") extension = 'csv' list_of_files = [i for i in glob.glob('*.{}'.format(extension))]", "= [i for i in glob.glob('*.{}'.format(extension))] # print(list_of_files) file_out = \"coupons.csv\" csv_header =", "CSV after combining all files def produceOneCSV(list_of_files,csv_merge): for file in list_of_files: csv_in =", "file in list_of_files: csv_in = open(file) for line in csv_in: csv_merge.write(line) csv_in.close() #", "CSV files using the Pandas library \"\"\" from os import chdir import glob", "files in the working dir chdir(\"./csv_data\") extension = 'csv' list_of_files = [i for", "for i in glob.glob('*.{}'.format(extension))] # print(list_of_files) file_out = \"coupons.csv\" csv_header = 'store,title,short_title,code,short_desc,description,expiry_date,expire_note' csv_merge", "<filename>merger.py \"\"\" Python Script: Combine/Merge multiple CSV files using the Pandas library \"\"\"", "using the Pandas library \"\"\" from os import chdir import glob import pandas", "for file in list_of_files: csv_in = open(file) for line in csv_in: csv_merge.write(line) csv_in.close()", "[i for i in glob.glob('*.{}'.format(extension))] # print(list_of_files) file_out = \"coupons.csv\" csv_header = 'store,title,short_title,code,short_desc,description,expiry_date,expire_note'", "csv_in = open(file) for line in csv_in: csv_merge.write(line) csv_in.close() # List all CSV", "= open(file) for line in csv_in: csv_merge.write(line) csv_in.close() # List all CSV files", "after combining all files def produceOneCSV(list_of_files,csv_merge): for file in list_of_files: csv_in = open(file)", "= \"coupons.csv\" csv_header = 'store,title,short_title,code,short_desc,description,expiry_date,expire_note' csv_merge = open(file_out, 'w') csv_merge.write(csv_header) csv_merge.write('\\n') produceOneCSV(list_of_files,csv_merge) csv_merge.close()", "the Pandas library \"\"\" from os import chdir import glob import pandas as", "# print(list_of_files) file_out = \"coupons.csv\" csv_header = 'store,title,short_title,code,short_desc,description,expiry_date,expire_note' csv_merge = open(file_out, 'w') csv_merge.write(csv_header)", "Python Script: Combine/Merge multiple CSV files using the Pandas library \"\"\" from os", "import chdir import glob import pandas as pdlib # Produce a single CSV", "a single CSV after combining all files def produceOneCSV(list_of_files,csv_merge): for file in list_of_files:", "csv_in.close() # List all CSV files in the working dir chdir(\"./csv_data\") extension =", "from os import chdir import glob import pandas as pdlib # Produce a", "CSV files in the working dir chdir(\"./csv_data\") extension = 'csv' list_of_files = [i", "chdir(\"./csv_data\") extension = 'csv' list_of_files = [i for i in glob.glob('*.{}'.format(extension))] # print(list_of_files)", "pdlib # Produce a single CSV after combining all files def produceOneCSV(list_of_files,csv_merge): for", "in csv_in: csv_merge.write(line) csv_in.close() # List all CSV files in the working dir", "line in csv_in: csv_merge.write(line) csv_in.close() # List all CSV files in the working", "Produce a single CSV after combining all files def produceOneCSV(list_of_files,csv_merge): for file in", "csv_in: csv_merge.write(line) csv_in.close() # List all CSV files in the working dir chdir(\"./csv_data\")", "import pandas as pdlib # Produce a single CSV after combining all files", "\"\"\" from os import chdir import glob import pandas as pdlib # Produce", "csv_merge.write(line) csv_in.close() # List all CSV files in the working dir chdir(\"./csv_data\") extension", "chdir import glob import pandas as pdlib # Produce a single CSV after", "# List all CSV files in the working dir chdir(\"./csv_data\") extension = 'csv'", "Pandas library \"\"\" from os import chdir import glob import pandas as pdlib", "\"\"\" Python Script: Combine/Merge multiple CSV files using the Pandas library \"\"\" from", "glob.glob('*.{}'.format(extension))] # print(list_of_files) file_out = \"coupons.csv\" csv_header = 'store,title,short_title,code,short_desc,description,expiry_date,expire_note' csv_merge = open(file_out, 'w')", "as pdlib # Produce a single CSV after combining all files def produceOneCSV(list_of_files,csv_merge):", "open(file) for line in csv_in: csv_merge.write(line) csv_in.close() # List all CSV files in", "def produceOneCSV(list_of_files,csv_merge): for file in list_of_files: csv_in = open(file) for line in csv_in:", "list_of_files = [i for i in glob.glob('*.{}'.format(extension))] # print(list_of_files) file_out = \"coupons.csv\" csv_header", "single CSV after combining all files def produceOneCSV(list_of_files,csv_merge): for file in list_of_files: csv_in", "List all CSV files in the working dir chdir(\"./csv_data\") extension = 'csv' list_of_files", "in list_of_files: csv_in = open(file) for line in csv_in: csv_merge.write(line) csv_in.close() # List", "for line in csv_in: csv_merge.write(line) csv_in.close() # List all CSV files in the", "import glob import pandas as pdlib # Produce a single CSV after combining", "all files def produceOneCSV(list_of_files,csv_merge): for file in list_of_files: csv_in = open(file) for line", "the working dir chdir(\"./csv_data\") extension = 'csv' list_of_files = [i for i in", "glob import pandas as pdlib # Produce a single CSV after combining all", "= 'csv' list_of_files = [i for i in glob.glob('*.{}'.format(extension))] # print(list_of_files) file_out =", "in glob.glob('*.{}'.format(extension))] # print(list_of_files) file_out = \"coupons.csv\" csv_header = 'store,title,short_title,code,short_desc,description,expiry_date,expire_note' csv_merge = open(file_out,", "files def produceOneCSV(list_of_files,csv_merge): for file in list_of_files: csv_in = open(file) for line in", "list_of_files: csv_in = open(file) for line in csv_in: csv_merge.write(line) csv_in.close() # List all", "Combine/Merge multiple CSV files using the Pandas library \"\"\" from os import chdir", "multiple CSV files using the Pandas library \"\"\" from os import chdir import", "files using the Pandas library \"\"\" from os import chdir import glob import" ]
[ "2020 <NAME> # # Permission is hereby granted, free of charge, to any", "'AppDir/lib/aarch64-linux-gnu/libutil.so.1', 'AppDir/lib/aarch64-linux-gnu/libnsl.so.1', ] def test_get_binary_path(self): dl = DynamicLoader('AppDir', self.app_dir_files) self.assertEqual(dl.get_binary_path(), 'lib/aarch64-linux-gnu/ld-2.27.so') def test_list_libs(self):", "to use, copy, modify, merge, publish, distribute, sublicense, and/or # sell copies of", "is hereby granted, free of charge, to any person obtaining a # copy", "'AppDir/lib/', 'AppDir/lib/ld-linux-aarch64.so.1', 'AppDir/lib/aarch64-linux-gnu', 'AppDir/lib/aarch64-linux-gnu/libpthread-2.27.so', 'AppDir/lib/aarch64-linux-gnu/libnss_hesiod-2.27.so', 'AppDir/lib/aarch64-linux-gnu/libnss_nis.so.2', 'AppDir/lib/aarch64-linux-gnu/libmemusage.so', 'AppDir/lib/aarch64-linux-gnu/ld-2.27.so', 'AppDir/lib/aarch64-linux-gnu/libpthread.so.0', 'AppDir/lib/aarch64-linux-gnu/libacl.so.1.1.0', 'AppDir/lib/aarch64-linux-gnu/libcrypt.so.1', 'AppDir/lib/aarch64-linux-gnu/ld-linux-aarch64.so.1', 'AppDir/lib/aarch64-linux-gnu/libutil.so.1',", "AppImageBuilder.app_dir.runtimes.classic import DynamicLoader class DynamicLoaderTestCase(unittest.TestCase): def setUp(self) -> None: self.app_dir_files = [ 'AppDir/lib/',", "# copy of this software and associated documentation files (the \"Software\"), # to", "do so, subject to the following conditions: # # The above copyright notice", "the Software. import unittest from AppImageBuilder.app_dir.runtimes.classic import DynamicLoader class DynamicLoaderTestCase(unittest.TestCase): def setUp(self) ->", "files (the \"Software\"), # to deal in the Software without restriction, including without", "and to permit persons to whom the Software is # furnished to do", "DynamicLoaderTestCase(unittest.TestCase): def setUp(self) -> None: self.app_dir_files = [ 'AppDir/lib/', 'AppDir/lib/ld-linux-aarch64.so.1', 'AppDir/lib/aarch64-linux-gnu', 'AppDir/lib/aarch64-linux-gnu/libpthread-2.27.so', 'AppDir/lib/aarch64-linux-gnu/libnss_hesiod-2.27.so',", "-> None: self.app_dir_files = [ 'AppDir/lib/', 'AppDir/lib/ld-linux-aarch64.so.1', 'AppDir/lib/aarch64-linux-gnu', 'AppDir/lib/aarch64-linux-gnu/libpthread-2.27.so', 'AppDir/lib/aarch64-linux-gnu/libnss_hesiod-2.27.so', 'AppDir/lib/aarch64-linux-gnu/libnss_nis.so.2', 'AppDir/lib/aarch64-linux-gnu/libmemusage.so', 'AppDir/lib/aarch64-linux-gnu/ld-2.27.so',", "self.assertEqual(dl.get_binary_path(), 'lib/aarch64-linux-gnu/ld-2.27.so') def test_list_libs(self): dl = DynamicLoader('AppDir', ['/path/to/file', 'path/to/shared_lib.so', 'path/to/shared_lib.so.1']) self.assertEqual(dl._list_libs(), ['path/to/shared_lib.so', 'path/to/shared_lib.so.1'])", "the following conditions: # # The above copyright notice and this permission notice", "'AppDir/lib/aarch64-linux-gnu/libnss_nis.so.2', 'AppDir/lib/aarch64-linux-gnu/libmemusage.so', 'AppDir/lib/aarch64-linux-gnu/ld-2.27.so', 'AppDir/lib/aarch64-linux-gnu/libpthread.so.0', 'AppDir/lib/aarch64-linux-gnu/libacl.so.1.1.0', 'AppDir/lib/aarch64-linux-gnu/libcrypt.so.1', 'AppDir/lib/aarch64-linux-gnu/ld-linux-aarch64.so.1', 'AppDir/lib/aarch64-linux-gnu/libutil.so.1', 'AppDir/lib/aarch64-linux-gnu/libnsl.so.1', ] def test_get_binary_path(self): dl", "self.app_dir_files) self.assertEqual(dl.get_binary_path(), 'lib/aarch64-linux-gnu/ld-2.27.so') def test_list_libs(self): dl = DynamicLoader('AppDir', ['/path/to/file', 'path/to/shared_lib.so', 'path/to/shared_lib.so.1']) self.assertEqual(dl._list_libs(), ['path/to/shared_lib.so',", "# furnished to do so, subject to the following conditions: # # The", "the Software, and to permit persons to whom the Software is # furnished", "Software without restriction, including without limitation the # rights to use, copy, modify,", "permit persons to whom the Software is # furnished to do so, subject", "copy of this software and associated documentation files (the \"Software\"), # to deal", "modify, merge, publish, distribute, sublicense, and/or # sell copies of the Software, and", "without limitation the # rights to use, copy, modify, merge, publish, distribute, sublicense,", "the # rights to use, copy, modify, merge, publish, distribute, sublicense, and/or #", "DynamicLoader('AppDir', self.app_dir_files) self.assertEqual(dl.get_binary_path(), 'lib/aarch64-linux-gnu/ld-2.27.so') def test_list_libs(self): dl = DynamicLoader('AppDir', ['/path/to/file', 'path/to/shared_lib.so', 'path/to/shared_lib.so.1']) self.assertEqual(dl._list_libs(),", "charge, to any person obtaining a # copy of this software and associated", "substantial portions of the Software. import unittest from AppImageBuilder.app_dir.runtimes.classic import DynamicLoader class DynamicLoaderTestCase(unittest.TestCase):", "self.app_dir_files = [ 'AppDir/lib/', 'AppDir/lib/ld-linux-aarch64.so.1', 'AppDir/lib/aarch64-linux-gnu', 'AppDir/lib/aarch64-linux-gnu/libpthread-2.27.so', 'AppDir/lib/aarch64-linux-gnu/libnss_hesiod-2.27.so', 'AppDir/lib/aarch64-linux-gnu/libnss_nis.so.2', 'AppDir/lib/aarch64-linux-gnu/libmemusage.so', 'AppDir/lib/aarch64-linux-gnu/ld-2.27.so', 'AppDir/lib/aarch64-linux-gnu/libpthread.so.0', 'AppDir/lib/aarch64-linux-gnu/libacl.so.1.1.0',", "'AppDir/lib/aarch64-linux-gnu/libpthread-2.27.so', 'AppDir/lib/aarch64-linux-gnu/libnss_hesiod-2.27.so', 'AppDir/lib/aarch64-linux-gnu/libnss_nis.so.2', 'AppDir/lib/aarch64-linux-gnu/libmemusage.so', 'AppDir/lib/aarch64-linux-gnu/ld-2.27.so', 'AppDir/lib/aarch64-linux-gnu/libpthread.so.0', 'AppDir/lib/aarch64-linux-gnu/libacl.so.1.1.0', 'AppDir/lib/aarch64-linux-gnu/libcrypt.so.1', 'AppDir/lib/aarch64-linux-gnu/ld-linux-aarch64.so.1', 'AppDir/lib/aarch64-linux-gnu/libutil.so.1', 'AppDir/lib/aarch64-linux-gnu/libnsl.so.1', ] def", "a # copy of this software and associated documentation files (the \"Software\"), #", "[ 'AppDir/lib/', 'AppDir/lib/ld-linux-aarch64.so.1', 'AppDir/lib/aarch64-linux-gnu', 'AppDir/lib/aarch64-linux-gnu/libpthread-2.27.so', 'AppDir/lib/aarch64-linux-gnu/libnss_hesiod-2.27.so', 'AppDir/lib/aarch64-linux-gnu/libnss_nis.so.2', 'AppDir/lib/aarch64-linux-gnu/libmemusage.so', 'AppDir/lib/aarch64-linux-gnu/ld-2.27.so', 'AppDir/lib/aarch64-linux-gnu/libpthread.so.0', 'AppDir/lib/aarch64-linux-gnu/libacl.so.1.1.0', 'AppDir/lib/aarch64-linux-gnu/libcrypt.so.1', 'AppDir/lib/aarch64-linux-gnu/ld-linux-aarch64.so.1',", "above copyright notice and this permission notice shall be included in # all", "or substantial portions of the Software. import unittest from AppImageBuilder.app_dir.runtimes.classic import DynamicLoader class", "'AppDir/lib/aarch64-linux-gnu/libnsl.so.1', ] def test_get_binary_path(self): dl = DynamicLoader('AppDir', self.app_dir_files) self.assertEqual(dl.get_binary_path(), 'lib/aarch64-linux-gnu/ld-2.27.so') def test_list_libs(self): dl", "copies of the Software, and to permit persons to whom the Software is", "# The above copyright notice and this permission notice shall be included in", "of charge, to any person obtaining a # copy of this software and", "to do so, subject to the following conditions: # # The above copyright", "persons to whom the Software is # furnished to do so, subject to", "publish, distribute, sublicense, and/or # sell copies of the Software, and to permit", "<NAME> # # Permission is hereby granted, free of charge, to any person", "conditions: # # The above copyright notice and this permission notice shall be", "'AppDir/lib/aarch64-linux-gnu/libacl.so.1.1.0', 'AppDir/lib/aarch64-linux-gnu/libcrypt.so.1', 'AppDir/lib/aarch64-linux-gnu/ld-linux-aarch64.so.1', 'AppDir/lib/aarch64-linux-gnu/libutil.so.1', 'AppDir/lib/aarch64-linux-gnu/libnsl.so.1', ] def test_get_binary_path(self): dl = DynamicLoader('AppDir', self.app_dir_files) self.assertEqual(dl.get_binary_path(),", "'AppDir/lib/aarch64-linux-gnu/libmemusage.so', 'AppDir/lib/aarch64-linux-gnu/ld-2.27.so', 'AppDir/lib/aarch64-linux-gnu/libpthread.so.0', 'AppDir/lib/aarch64-linux-gnu/libacl.so.1.1.0', 'AppDir/lib/aarch64-linux-gnu/libcrypt.so.1', 'AppDir/lib/aarch64-linux-gnu/ld-linux-aarch64.so.1', 'AppDir/lib/aarch64-linux-gnu/libutil.so.1', 'AppDir/lib/aarch64-linux-gnu/libnsl.so.1', ] def test_get_binary_path(self): dl =", "to permit persons to whom the Software is # furnished to do so,", "associated documentation files (the \"Software\"), # to deal in the Software without restriction,", "and associated documentation files (the \"Software\"), # to deal in the Software without", "def test_get_binary_path(self): dl = DynamicLoader('AppDir', self.app_dir_files) self.assertEqual(dl.get_binary_path(), 'lib/aarch64-linux-gnu/ld-2.27.so') def test_list_libs(self): dl = DynamicLoader('AppDir',", "in the Software without restriction, including without limitation the # rights to use,", "the Software without restriction, including without limitation the # rights to use, copy,", "person obtaining a # copy of this software and associated documentation files (the", "# Copyright 2020 <NAME> # # Permission is hereby granted, free of charge,", "# to deal in the Software without restriction, including without limitation the #", "notice and this permission notice shall be included in # all copies or", "whom the Software is # furnished to do so, subject to the following", "of the Software. import unittest from AppImageBuilder.app_dir.runtimes.classic import DynamicLoader class DynamicLoaderTestCase(unittest.TestCase): def setUp(self)", "'AppDir/lib/aarch64-linux-gnu/libpthread.so.0', 'AppDir/lib/aarch64-linux-gnu/libacl.so.1.1.0', 'AppDir/lib/aarch64-linux-gnu/libcrypt.so.1', 'AppDir/lib/aarch64-linux-gnu/ld-linux-aarch64.so.1', 'AppDir/lib/aarch64-linux-gnu/libutil.so.1', 'AppDir/lib/aarch64-linux-gnu/libnsl.so.1', ] def test_get_binary_path(self): dl = DynamicLoader('AppDir', self.app_dir_files)", "included in # all copies or substantial portions of the Software. import unittest", "import DynamicLoader class DynamicLoaderTestCase(unittest.TestCase): def setUp(self) -> None: self.app_dir_files = [ 'AppDir/lib/', 'AppDir/lib/ld-linux-aarch64.so.1',", "# # Permission is hereby granted, free of charge, to any person obtaining", "documentation files (the \"Software\"), # to deal in the Software without restriction, including", "and this permission notice shall be included in # all copies or substantial", "DynamicLoader class DynamicLoaderTestCase(unittest.TestCase): def setUp(self) -> None: self.app_dir_files = [ 'AppDir/lib/', 'AppDir/lib/ld-linux-aarch64.so.1', 'AppDir/lib/aarch64-linux-gnu',", "so, subject to the following conditions: # # The above copyright notice and", "be included in # all copies or substantial portions of the Software. import", "Copyright 2020 <NAME> # # Permission is hereby granted, free of charge, to", "def setUp(self) -> None: self.app_dir_files = [ 'AppDir/lib/', 'AppDir/lib/ld-linux-aarch64.so.1', 'AppDir/lib/aarch64-linux-gnu', 'AppDir/lib/aarch64-linux-gnu/libpthread-2.27.so', 'AppDir/lib/aarch64-linux-gnu/libnss_hesiod-2.27.so', 'AppDir/lib/aarch64-linux-gnu/libnss_nis.so.2',", "is # furnished to do so, subject to the following conditions: # #", "class DynamicLoaderTestCase(unittest.TestCase): def setUp(self) -> None: self.app_dir_files = [ 'AppDir/lib/', 'AppDir/lib/ld-linux-aarch64.so.1', 'AppDir/lib/aarch64-linux-gnu', 'AppDir/lib/aarch64-linux-gnu/libpthread-2.27.so',", "# sell copies of the Software, and to permit persons to whom the", "to the following conditions: # # The above copyright notice and this permission", "unittest from AppImageBuilder.app_dir.runtimes.classic import DynamicLoader class DynamicLoaderTestCase(unittest.TestCase): def setUp(self) -> None: self.app_dir_files =", "to deal in the Software without restriction, including without limitation the # rights", "distribute, sublicense, and/or # sell copies of the Software, and to permit persons", "obtaining a # copy of this software and associated documentation files (the \"Software\"),", "from AppImageBuilder.app_dir.runtimes.classic import DynamicLoader class DynamicLoaderTestCase(unittest.TestCase): def setUp(self) -> None: self.app_dir_files = [", "following conditions: # # The above copyright notice and this permission notice shall", "None: self.app_dir_files = [ 'AppDir/lib/', 'AppDir/lib/ld-linux-aarch64.so.1', 'AppDir/lib/aarch64-linux-gnu', 'AppDir/lib/aarch64-linux-gnu/libpthread-2.27.so', 'AppDir/lib/aarch64-linux-gnu/libnss_hesiod-2.27.so', 'AppDir/lib/aarch64-linux-gnu/libnss_nis.so.2', 'AppDir/lib/aarch64-linux-gnu/libmemusage.so', 'AppDir/lib/aarch64-linux-gnu/ld-2.27.so', 'AppDir/lib/aarch64-linux-gnu/libpthread.so.0',", "of the Software, and to permit persons to whom the Software is #", "'AppDir/lib/ld-linux-aarch64.so.1', 'AppDir/lib/aarch64-linux-gnu', 'AppDir/lib/aarch64-linux-gnu/libpthread-2.27.so', 'AppDir/lib/aarch64-linux-gnu/libnss_hesiod-2.27.so', 'AppDir/lib/aarch64-linux-gnu/libnss_nis.so.2', 'AppDir/lib/aarch64-linux-gnu/libmemusage.so', 'AppDir/lib/aarch64-linux-gnu/ld-2.27.so', 'AppDir/lib/aarch64-linux-gnu/libpthread.so.0', 'AppDir/lib/aarch64-linux-gnu/libacl.so.1.1.0', 'AppDir/lib/aarch64-linux-gnu/libcrypt.so.1', 'AppDir/lib/aarch64-linux-gnu/ld-linux-aarch64.so.1', 'AppDir/lib/aarch64-linux-gnu/libutil.so.1', 'AppDir/lib/aarch64-linux-gnu/libnsl.so.1',", "permission notice shall be included in # all copies or substantial portions of", "restriction, including without limitation the # rights to use, copy, modify, merge, publish,", "free of charge, to any person obtaining a # copy of this software", "granted, free of charge, to any person obtaining a # copy of this", "this software and associated documentation files (the \"Software\"), # to deal in the", "sell copies of the Software, and to permit persons to whom the Software", "'AppDir/lib/aarch64-linux-gnu', 'AppDir/lib/aarch64-linux-gnu/libpthread-2.27.so', 'AppDir/lib/aarch64-linux-gnu/libnss_hesiod-2.27.so', 'AppDir/lib/aarch64-linux-gnu/libnss_nis.so.2', 'AppDir/lib/aarch64-linux-gnu/libmemusage.so', 'AppDir/lib/aarch64-linux-gnu/ld-2.27.so', 'AppDir/lib/aarch64-linux-gnu/libpthread.so.0', 'AppDir/lib/aarch64-linux-gnu/libacl.so.1.1.0', 'AppDir/lib/aarch64-linux-gnu/libcrypt.so.1', 'AppDir/lib/aarch64-linux-gnu/ld-linux-aarch64.so.1', 'AppDir/lib/aarch64-linux-gnu/libutil.so.1', 'AppDir/lib/aarch64-linux-gnu/libnsl.so.1', ]", "dl = DynamicLoader('AppDir', self.app_dir_files) self.assertEqual(dl.get_binary_path(), 'lib/aarch64-linux-gnu/ld-2.27.so') def test_list_libs(self): dl = DynamicLoader('AppDir', ['/path/to/file', 'path/to/shared_lib.so',", "The above copyright notice and this permission notice shall be included in #", "# # The above copyright notice and this permission notice shall be included", "(the \"Software\"), # to deal in the Software without restriction, including without limitation", "sublicense, and/or # sell copies of the Software, and to permit persons to", "use, copy, modify, merge, publish, distribute, sublicense, and/or # sell copies of the", "and/or # sell copies of the Software, and to permit persons to whom", "'AppDir/lib/aarch64-linux-gnu/libcrypt.so.1', 'AppDir/lib/aarch64-linux-gnu/ld-linux-aarch64.so.1', 'AppDir/lib/aarch64-linux-gnu/libutil.so.1', 'AppDir/lib/aarch64-linux-gnu/libnsl.so.1', ] def test_get_binary_path(self): dl = DynamicLoader('AppDir', self.app_dir_files) self.assertEqual(dl.get_binary_path(), 'lib/aarch64-linux-gnu/ld-2.27.so')", "copyright notice and this permission notice shall be included in # all copies", "] def test_get_binary_path(self): dl = DynamicLoader('AppDir', self.app_dir_files) self.assertEqual(dl.get_binary_path(), 'lib/aarch64-linux-gnu/ld-2.27.so') def test_list_libs(self): dl =", "Permission is hereby granted, free of charge, to any person obtaining a #", "\"Software\"), # to deal in the Software without restriction, including without limitation the", "Software is # furnished to do so, subject to the following conditions: #", "Software, and to permit persons to whom the Software is # furnished to", "= [ 'AppDir/lib/', 'AppDir/lib/ld-linux-aarch64.so.1', 'AppDir/lib/aarch64-linux-gnu', 'AppDir/lib/aarch64-linux-gnu/libpthread-2.27.so', 'AppDir/lib/aarch64-linux-gnu/libnss_hesiod-2.27.so', 'AppDir/lib/aarch64-linux-gnu/libnss_nis.so.2', 'AppDir/lib/aarch64-linux-gnu/libmemusage.so', 'AppDir/lib/aarch64-linux-gnu/ld-2.27.so', 'AppDir/lib/aarch64-linux-gnu/libpthread.so.0', 'AppDir/lib/aarch64-linux-gnu/libacl.so.1.1.0', 'AppDir/lib/aarch64-linux-gnu/libcrypt.so.1',", "rights to use, copy, modify, merge, publish, distribute, sublicense, and/or # sell copies", "'AppDir/lib/aarch64-linux-gnu/libnss_hesiod-2.27.so', 'AppDir/lib/aarch64-linux-gnu/libnss_nis.so.2', 'AppDir/lib/aarch64-linux-gnu/libmemusage.so', 'AppDir/lib/aarch64-linux-gnu/ld-2.27.so', 'AppDir/lib/aarch64-linux-gnu/libpthread.so.0', 'AppDir/lib/aarch64-linux-gnu/libacl.so.1.1.0', 'AppDir/lib/aarch64-linux-gnu/libcrypt.so.1', 'AppDir/lib/aarch64-linux-gnu/ld-linux-aarch64.so.1', 'AppDir/lib/aarch64-linux-gnu/libutil.so.1', 'AppDir/lib/aarch64-linux-gnu/libnsl.so.1', ] def test_get_binary_path(self):", "in # all copies or substantial portions of the Software. import unittest from", "import unittest from AppImageBuilder.app_dir.runtimes.classic import DynamicLoader class DynamicLoaderTestCase(unittest.TestCase): def setUp(self) -> None: self.app_dir_files", "'AppDir/lib/aarch64-linux-gnu/ld-2.27.so', 'AppDir/lib/aarch64-linux-gnu/libpthread.so.0', 'AppDir/lib/aarch64-linux-gnu/libacl.so.1.1.0', 'AppDir/lib/aarch64-linux-gnu/libcrypt.so.1', 'AppDir/lib/aarch64-linux-gnu/ld-linux-aarch64.so.1', 'AppDir/lib/aarch64-linux-gnu/libutil.so.1', 'AppDir/lib/aarch64-linux-gnu/libnsl.so.1', ] def test_get_binary_path(self): dl = DynamicLoader('AppDir',", "copy, modify, merge, publish, distribute, sublicense, and/or # sell copies of the Software,", "portions of the Software. import unittest from AppImageBuilder.app_dir.runtimes.classic import DynamicLoader class DynamicLoaderTestCase(unittest.TestCase): def", "including without limitation the # rights to use, copy, modify, merge, publish, distribute,", "to any person obtaining a # copy of this software and associated documentation", "without restriction, including without limitation the # rights to use, copy, modify, merge,", "Software. import unittest from AppImageBuilder.app_dir.runtimes.classic import DynamicLoader class DynamicLoaderTestCase(unittest.TestCase): def setUp(self) -> None:", "shall be included in # all copies or substantial portions of the Software.", "hereby granted, free of charge, to any person obtaining a # copy of", "setUp(self) -> None: self.app_dir_files = [ 'AppDir/lib/', 'AppDir/lib/ld-linux-aarch64.so.1', 'AppDir/lib/aarch64-linux-gnu', 'AppDir/lib/aarch64-linux-gnu/libpthread-2.27.so', 'AppDir/lib/aarch64-linux-gnu/libnss_hesiod-2.27.so', 'AppDir/lib/aarch64-linux-gnu/libnss_nis.so.2', 'AppDir/lib/aarch64-linux-gnu/libmemusage.so',", "# all copies or substantial portions of the Software. import unittest from AppImageBuilder.app_dir.runtimes.classic", "notice shall be included in # all copies or substantial portions of the", "to whom the Software is # furnished to do so, subject to the", "deal in the Software without restriction, including without limitation the # rights to", "furnished to do so, subject to the following conditions: # # The above", "any person obtaining a # copy of this software and associated documentation files", "software and associated documentation files (the \"Software\"), # to deal in the Software", "limitation the # rights to use, copy, modify, merge, publish, distribute, sublicense, and/or", "# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or # sell", "# Permission is hereby granted, free of charge, to any person obtaining a", "this permission notice shall be included in # all copies or substantial portions", "test_get_binary_path(self): dl = DynamicLoader('AppDir', self.app_dir_files) self.assertEqual(dl.get_binary_path(), 'lib/aarch64-linux-gnu/ld-2.27.so') def test_list_libs(self): dl = DynamicLoader('AppDir', ['/path/to/file',", "all copies or substantial portions of the Software. import unittest from AppImageBuilder.app_dir.runtimes.classic import", "= DynamicLoader('AppDir', self.app_dir_files) self.assertEqual(dl.get_binary_path(), 'lib/aarch64-linux-gnu/ld-2.27.so') def test_list_libs(self): dl = DynamicLoader('AppDir', ['/path/to/file', 'path/to/shared_lib.so', 'path/to/shared_lib.so.1'])", "merge, publish, distribute, sublicense, and/or # sell copies of the Software, and to", "'AppDir/lib/aarch64-linux-gnu/ld-linux-aarch64.so.1', 'AppDir/lib/aarch64-linux-gnu/libutil.so.1', 'AppDir/lib/aarch64-linux-gnu/libnsl.so.1', ] def test_get_binary_path(self): dl = DynamicLoader('AppDir', self.app_dir_files) self.assertEqual(dl.get_binary_path(), 'lib/aarch64-linux-gnu/ld-2.27.so') def", "the Software is # furnished to do so, subject to the following conditions:", "subject to the following conditions: # # The above copyright notice and this", "of this software and associated documentation files (the \"Software\"), # to deal in", "copies or substantial portions of the Software. import unittest from AppImageBuilder.app_dir.runtimes.classic import DynamicLoader" ]
[ "run an operation. We're subclassing from :mod:`django.db.IntegrityError` so that it is automatically rolled-back", "when a wallet has insufficient balance to run an operation. We're subclassing from", "django.db import IntegrityError class InsufficientBalance(IntegrityError): \"\"\"Raised when a wallet has insufficient balance to", "a wallet has insufficient balance to run an operation. We're subclassing from :mod:`django.db.IntegrityError`", "has insufficient balance to run an operation. We're subclassing from :mod:`django.db.IntegrityError` so that", "from :mod:`django.db.IntegrityError` so that it is automatically rolled-back during django's transaction lifecycle. \"\"\"", "balance to run an operation. We're subclassing from :mod:`django.db.IntegrityError` so that it is", "class InsufficientBalance(IntegrityError): \"\"\"Raised when a wallet has insufficient balance to run an operation.", "insufficient balance to run an operation. We're subclassing from :mod:`django.db.IntegrityError` so that it", "wallet has insufficient balance to run an operation. We're subclassing from :mod:`django.db.IntegrityError` so", "IntegrityError class InsufficientBalance(IntegrityError): \"\"\"Raised when a wallet has insufficient balance to run an", "from django.db import IntegrityError class InsufficientBalance(IntegrityError): \"\"\"Raised when a wallet has insufficient balance", "<reponame>iesteban/bitcoin_bazaar_backend<filename>wallet/errors.py from django.db import IntegrityError class InsufficientBalance(IntegrityError): \"\"\"Raised when a wallet has insufficient", "InsufficientBalance(IntegrityError): \"\"\"Raised when a wallet has insufficient balance to run an operation. We're", "\"\"\"Raised when a wallet has insufficient balance to run an operation. We're subclassing", "import IntegrityError class InsufficientBalance(IntegrityError): \"\"\"Raised when a wallet has insufficient balance to run", "to run an operation. We're subclassing from :mod:`django.db.IntegrityError` so that it is automatically", "an operation. We're subclassing from :mod:`django.db.IntegrityError` so that it is automatically rolled-back during", "We're subclassing from :mod:`django.db.IntegrityError` so that it is automatically rolled-back during django's transaction", "subclassing from :mod:`django.db.IntegrityError` so that it is automatically rolled-back during django's transaction lifecycle.", "operation. We're subclassing from :mod:`django.db.IntegrityError` so that it is automatically rolled-back during django's" ]
[ "with {fcount} frames') def split_training_validation(tag_tracks, validate_frame_counts): train_tracks = {} validate_tracks = {} for", "legends = [] for value in plot['values']: plt.plot(history.history[value]) legend = value.replace('_', ' ').title()", "callback_name == 'lr_callback': return tf.keras.callbacks.ReduceLROnPlateau(**config_copy) elif callback_name == 'stopping_callback': return tf.keras.callbacks.EarlyStopping(**config_copy) else: raise", "+= track_info.frame_count else: train_use.append(track_info) train_tracks[tag] = train_use validate_tracks[tag] = validate_use return train_tracks, validate_tracks", "in tracks])) def all_frame_counts(tag_tracks): return int(np.sum([frame_count(tag_tracks[t]) for t in CLASSES])) def print_track_information(training_tracks, validation_tracks):", "tracks: if track.tag in TAG_CLASS_MAP: track.tag = TAG_CLASS_MAP[track.tag] tag_tracks[track.tag].append(track) return tag_tracks def flatten_tag_tracks(tag_tracks):", "+ '/' + checkpoint_filename print(f'saving checkpoints to {config_copy[\"filepath\"]}') return tf.keras.callbacks.ModelCheckpoint(**config_copy) elif callback_name ==", "flat_tracks += tracks return flat_tracks def print_tag_track_info(infos): for k in infos: tracks =", "= {} validate_tracks = {} for tag in tag_tracks.keys(): if tag in CLASSES:", "in tracks]) print(f'{k}: {len(tracks)} tracks with {fcount} frames') def split_training_validation(tag_tracks, validate_frame_counts): train_tracks =", "def print_track_information(training_tracks, validation_tracks): details = f'\\nTraining with {all_frame_counts(training_tracks)} frames, validating with {all_frame_counts(validation_tracks)} frames:\\n'", "i, plot in enumerate(plots): plt.subplot(plt_position + i) plt.title(plot['title']) legends = [] for value", "as f: def summary_print(s): print(s, file=f) f.write('\\nTraining configuration:\\n' + training_config_text + '\\n') f.write('\\nModel", "elif callback_name == 'lr_callback': return tf.keras.callbacks.ReduceLROnPlateau(**config_copy) elif callback_name == 'stopping_callback': return tf.keras.callbacks.EarlyStopping(**config_copy) else:", "f'\\nTraining with {all_frame_counts(training_tracks)} frames, validating with {all_frame_counts(validation_tracks)} frames:\\n' print(details) print(' Train Validate') for", "type {callback_name}') def draw_figures(history, plots, save_directory): plt.figure(figsize=(8, 6 * len(plots))) plt_position = len(plots)", "{fcount} frames') def split_training_validation(tag_tracks, validate_frame_counts): train_tracks = {} validate_tracks = {} for tag", "{config_copy[\"filepath\"]}') return tf.keras.callbacks.ModelCheckpoint(**config_copy) elif callback_name == 'lr_callback': return tf.keras.callbacks.ReduceLROnPlateau(**config_copy) elif callback_name == 'stopping_callback':", "tracks.append(pickle.load(f)) except Exception as e: traceback.print_exc() pass return tracks def tracks_by_tag(tracks): tag_tracks =", "return flat_tracks def print_tag_track_info(infos): for k in infos: tracks = infos[k] fcount =", "tensorflow as tf import traceback from support.data_model import TAG_CLASS_MAP, CLASSES def load_raw_tracks(path): tracks", "= {t: [] for t in CLASSES} for track in tracks: if track.tag", "= tf.keras.layers.Dense(n, kernel_initializer='he_normal')(x) x = tf.keras.layers.BatchNormalization()(x) return tf.keras.layers.Activation(\"relu\")(x) def compute_scores(tp, fp, fn): if", "= tf.keras.layers.BatchNormalization()(x) return tf.keras.layers.Activation(\"relu\")(x) def compute_scores(tp, fp, fn): if tp != 0: precision", "legend) value = 'val_' + value plt.plot(history.history[value]) legends.append('Validation ' + legend) plt.xlim(left=1) plt.ylim(0.0,1.0)", "model_config_text + '\\n') print(model.summary(print_fn=summary_print)) tf.keras.utils.plot_model(model, to_file=f'{save_directory}/model.png', show_shapes=True) def frame_count(tracks): return int(np.sum([t.frame_count for t", "plots, save_directory): plt.figure(figsize=(8, 6 * len(plots))) plt_position = len(plots) * 100 + 11", "t in tracks])) def all_frame_counts(tag_tracks): return int(np.sum([frame_count(tag_tracks[t]) for t in CLASSES])) def print_track_information(training_tracks,", "/ (tp + fn) fscore = 2. * precision * recall / (precision", "def tracks_by_tag(tracks): tag_tracks = {t: [] for t in CLASSES} for track in", "def draw_figures(history, plots, save_directory): plt.figure(figsize=(8, 6 * len(plots))) plt_position = len(plots) * 100", "print(f'{key:12} {frame_count(training_tracks[key]):>7} {frame_count(validation_tracks[key]):>7}') def dense_norm_relu(n, x): x = tf.keras.layers.Dense(n, kernel_initializer='he_normal')(x) x = tf.keras.layers.BatchNormalization()(x)", "0.0, 0.0 def build_callback(config, save_directory): callback_name = config['name'] config_copy = config.copy() del config_copy['name']", "training_config_text + '\\n') f.write('\\nModel configuration:\\n' + model_config_text + '\\n') print(model.summary(print_fn=summary_print)) tf.keras.utils.plot_model(model, to_file=f'{save_directory}/model.png', show_shapes=True)", "json import matplotlib.pyplot as plt import numpy as np import pickle import tensorflow", "return tf.keras.callbacks.ModelCheckpoint(**config_copy) elif callback_name == 'lr_callback': return tf.keras.callbacks.ReduceLROnPlateau(**config_copy) elif callback_name == 'stopping_callback': return", "Validate') for key in CLASSES: print(f'{key:12} {frame_count(training_tracks[key]):>7} {frame_count(validation_tracks[key]):>7}') def dense_norm_relu(n, x): x =", "return tf.keras.callbacks.ReduceLROnPlateau(**config_copy) elif callback_name == 'stopping_callback': return tf.keras.callbacks.EarlyStopping(**config_copy) else: raise Exception(f'Unknown callback type", "vcount = 0 train_use = [] validate_use = [] for track_info in tracks:", "def compute_scores(tp, fp, fn): if tp != 0: precision = tp / (tp", "+ model_config_text + '\\n') print(model.summary(print_fn=summary_print)) tf.keras.utils.plot_model(model, to_file=f'{save_directory}/model.png', show_shapes=True) def frame_count(tracks): return int(np.sum([t.frame_count for", "tf.keras.callbacks.ModelCheckpoint(**config_copy) elif callback_name == 'lr_callback': return tf.keras.callbacks.ReduceLROnPlateau(**config_copy) elif callback_name == 'stopping_callback': return tf.keras.callbacks.EarlyStopping(**config_copy)", "CLASSES: tracks = tag_tracks[tag] np.random.shuffle(tracks) vcount = 0 train_use = [] validate_use =", "infos[k] fcount = np.sum([t.frame_count for t in tracks]) print(f'{k}: {len(tracks)} tracks with {fcount}", "numpy as np import pickle import tensorflow as tf import traceback from support.data_model", "precision = tp / (tp + fp) recall = tp / (tp +", "if vcount < validate_frame_counts[tag]: validate_use.append(track_info) vcount += track_info.frame_count else: train_use.append(track_info) train_tracks[tag] = train_use", "').title() legends.append('Training ' + legend) value = 'val_' + value plt.plot(history.history[value]) legends.append('Validation '", "{all_frame_counts(training_tracks)} frames, validating with {all_frame_counts(validation_tracks)} frames:\\n' print(details) print(' Train Validate') for key in", "dense_norm_relu(n, x): x = tf.keras.layers.Dense(n, kernel_initializer='he_normal')(x) x = tf.keras.layers.BatchNormalization()(x) return tf.keras.layers.Activation(\"relu\")(x) def compute_scores(tp,", "callback_name = config['name'] config_copy = config.copy() del config_copy['name'] if callback_name == 'checkpoint_callback': checkpoint_filename", "tag in CLASSES: tracks = tag_tracks[tag] np.random.shuffle(tracks) vcount = 0 train_use = []", "tag_tracks[track.tag].append(track) return tag_tracks def flatten_tag_tracks(tag_tracks): flat_tracks = [] for tracks in tag_tracks.values(): flat_tracks", "print(' Train Validate') for key in CLASSES: print(f'{key:12} {frame_count(training_tracks[key]):>7} {frame_count(validation_tracks[key]):>7}') def dense_norm_relu(n, x):", "track.tag in TAG_CLASS_MAP: track.tag = TAG_CLASS_MAP[track.tag] tag_tracks[track.tag].append(track) return tag_tracks def flatten_tag_tracks(tag_tracks): flat_tracks =", "return tf.keras.layers.Activation(\"relu\")(x) def compute_scores(tp, fp, fn): if tp != 0: precision = tp", "def first_time_model(model, training_config_text, model_config_text, save_directory): print(model.summary()) with open(f'{save_directory}/model.txt', 'w') as f: def summary_print(s):", "= tp / (tp + fp) recall = tp / (tp + fn)", "tracks = [] with open(path, 'rb') as f: try: while True: tracks.append(pickle.load(f)) except", "try: while True: tracks.append(pickle.load(f)) except Exception as e: traceback.print_exc() pass return tracks def", "build_callback(config, save_directory): callback_name = config['name'] config_copy = config.copy() del config_copy['name'] if callback_name ==", "elif callback_name == 'stopping_callback': return tf.keras.callbacks.EarlyStopping(**config_copy) else: raise Exception(f'Unknown callback type {callback_name}') def", "fcount = np.sum([t.frame_count for t in tracks]) print(f'{k}: {len(tracks)} tracks with {fcount} frames')", "t in tracks]) print(f'{k}: {len(tracks)} tracks with {fcount} frames') def split_training_validation(tag_tracks, validate_frame_counts): train_tracks", "tag in tag_tracks.keys(): if tag in CLASSES: tracks = tag_tracks[tag] np.random.shuffle(tracks) vcount =", "track in tracks: if track.tag in TAG_CLASS_MAP: track.tag = TAG_CLASS_MAP[track.tag] tag_tracks[track.tag].append(track) return tag_tracks", "f.write('\\nModel configuration:\\n' + model_config_text + '\\n') print(model.summary(print_fn=summary_print)) tf.keras.utils.plot_model(model, to_file=f'{save_directory}/model.png', show_shapes=True) def frame_count(tracks): return", "checkpoint_filename = config_copy['filepath'] config_copy['filepath'] = save_directory + '/' + checkpoint_filename print(f'saving checkpoints to", "fscore = 2. * precision * recall / (precision + recall) return precision,", "vcount < validate_frame_counts[tag]: validate_use.append(track_info) vcount += track_info.frame_count else: train_use.append(track_info) train_tracks[tag] = train_use validate_tracks[tag]", "= TAG_CLASS_MAP[track.tag] tag_tracks[track.tag].append(track) return tag_tracks def flatten_tag_tracks(tag_tracks): flat_tracks = [] for tracks in", "+ fn) fscore = 2. * precision * recall / (precision + recall)", "def summary_print(s): print(s, file=f) f.write('\\nTraining configuration:\\n' + training_config_text + '\\n') f.write('\\nModel configuration:\\n' +", "def print_tag_track_info(infos): for k in infos: tracks = infos[k] fcount = np.sum([t.frame_count for", "train_tracks, validate_tracks def first_time_model(model, training_config_text, model_config_text, save_directory): print(model.summary()) with open(f'{save_directory}/model.txt', 'w') as f:", "raise Exception(f'Unknown callback type {callback_name}') def draw_figures(history, plots, save_directory): plt.figure(figsize=(8, 6 * len(plots)))", "frames, validating with {all_frame_counts(validation_tracks)} frames:\\n' print(details) print(' Train Validate') for key in CLASSES:", "else: return 0.0, 0.0, 0.0 def build_callback(config, save_directory): callback_name = config['name'] config_copy =", "[] for track_info in tracks: if vcount < validate_frame_counts[tag]: validate_use.append(track_info) vcount += track_info.frame_count", "' ').title() legends.append('Training ' + legend) value = 'val_' + value plt.plot(history.history[value]) legends.append('Validation", "x = tf.keras.layers.BatchNormalization()(x) return tf.keras.layers.Activation(\"relu\")(x) def compute_scores(tp, fp, fn): if tp != 0:", "+ i) plt.title(plot['title']) legends = [] for value in plot['values']: plt.plot(history.history[value]) legend =", "tag_tracks = {t: [] for t in CLASSES} for track in tracks: if", "from support.data_model import TAG_CLASS_MAP, CLASSES def load_raw_tracks(path): tracks = [] with open(path, 'rb')", "* recall / (precision + recall) return precision, recall, fscore else: return 0.0,", "track.tag = TAG_CLASS_MAP[track.tag] tag_tracks[track.tag].append(track) return tag_tracks def flatten_tag_tracks(tag_tracks): flat_tracks = [] for tracks", "recall / (precision + recall) return precision, recall, fscore else: return 0.0, 0.0,", "'checkpoint_callback': checkpoint_filename = config_copy['filepath'] config_copy['filepath'] = save_directory + '/' + checkpoint_filename print(f'saving checkpoints", "tp / (tp + fn) fscore = 2. * precision * recall /", "kernel_initializer='he_normal')(x) x = tf.keras.layers.BatchNormalization()(x) return tf.keras.layers.Activation(\"relu\")(x) def compute_scores(tp, fp, fn): if tp !=", "vcount += track_info.frame_count else: train_use.append(track_info) train_tracks[tag] = train_use validate_tracks[tag] = validate_use return train_tracks,", "= validate_use return train_tracks, validate_tracks def first_time_model(model, training_config_text, model_config_text, save_directory): print(model.summary()) with open(f'{save_directory}/model.txt',", "model_config_text, save_directory): print(model.summary()) with open(f'{save_directory}/model.txt', 'w') as f: def summary_print(s): print(s, file=f) f.write('\\nTraining", "[] for tracks in tag_tracks.values(): flat_tracks += tracks return flat_tracks def print_tag_track_info(infos): for", "int(np.sum([t.frame_count for t in tracks])) def all_frame_counts(tag_tracks): return int(np.sum([frame_count(tag_tracks[t]) for t in CLASSES]))", "tag_tracks def flatten_tag_tracks(tag_tracks): flat_tracks = [] for tracks in tag_tracks.values(): flat_tracks += tracks", "for t in tracks])) def all_frame_counts(tag_tracks): return int(np.sum([frame_count(tag_tracks[t]) for t in CLASSES])) def", "except Exception as e: traceback.print_exc() pass return tracks def tracks_by_tag(tracks): tag_tracks = {t:", "config['name'] config_copy = config.copy() del config_copy['name'] if callback_name == 'checkpoint_callback': checkpoint_filename = config_copy['filepath']", "fp) recall = tp / (tp + fn) fscore = 2. * precision", "matplotlib.pyplot as plt import numpy as np import pickle import tensorflow as tf", "plt.plot(history.history[value]) legend = value.replace('_', ' ').title() legends.append('Training ' + legend) value = 'val_'", "as plt import numpy as np import pickle import tensorflow as tf import", "int(np.sum([frame_count(tag_tracks[t]) for t in CLASSES])) def print_track_information(training_tracks, validation_tracks): details = f'\\nTraining with {all_frame_counts(training_tracks)}", "def load_raw_tracks(path): tracks = [] with open(path, 'rb') as f: try: while True:", "if callback_name == 'checkpoint_callback': checkpoint_filename = config_copy['filepath'] config_copy['filepath'] = save_directory + '/' +", "value plt.plot(history.history[value]) legends.append('Validation ' + legend) plt.xlim(left=1) plt.ylim(0.0,1.0) plt.ylabel(plot['y-label']) plt.xlabel('Epoch') plt.legend(legends, loc=plot['caption-loc'], framealpha=.5)", "config_copy['filepath'] config_copy['filepath'] = save_directory + '/' + checkpoint_filename print(f'saving checkpoints to {config_copy[\"filepath\"]}') return", "+ 11 for i, plot in enumerate(plots): plt.subplot(plt_position + i) plt.title(plot['title']) legends =", "tracks with {fcount} frames') def split_training_validation(tag_tracks, validate_frame_counts): train_tracks = {} validate_tracks = {}", "x): x = tf.keras.layers.Dense(n, kernel_initializer='he_normal')(x) x = tf.keras.layers.BatchNormalization()(x) return tf.keras.layers.Activation(\"relu\")(x) def compute_scores(tp, fp,", "train_tracks[tag] = train_use validate_tracks[tag] = validate_use return train_tracks, validate_tracks def first_time_model(model, training_config_text, model_config_text,", "plt.plot(history.history[value]) legends.append('Validation ' + legend) plt.xlim(left=1) plt.ylim(0.0,1.0) plt.ylabel(plot['y-label']) plt.xlabel('Epoch') plt.legend(legends, loc=plot['caption-loc'], framealpha=.5) plt.savefig(f'{save_directory}/history.png')", "print(s, file=f) f.write('\\nTraining configuration:\\n' + training_config_text + '\\n') f.write('\\nModel configuration:\\n' + model_config_text +", "validate_tracks[tag] = validate_use return train_tracks, validate_tracks def first_time_model(model, training_config_text, model_config_text, save_directory): print(model.summary()) with", "100 + 11 for i, plot in enumerate(plots): plt.subplot(plt_position + i) plt.title(plot['title']) legends", "tracks def tracks_by_tag(tracks): tag_tracks = {t: [] for t in CLASSES} for track", "= [] for track_info in tracks: if vcount < validate_frame_counts[tag]: validate_use.append(track_info) vcount +=", "fn) fscore = 2. * precision * recall / (precision + recall) return", "tp != 0: precision = tp / (tp + fp) recall = tp", "print(f'saving checkpoints to {config_copy[\"filepath\"]}') return tf.keras.callbacks.ModelCheckpoint(**config_copy) elif callback_name == 'lr_callback': return tf.keras.callbacks.ReduceLROnPlateau(**config_copy) elif", "= 'val_' + value plt.plot(history.history[value]) legends.append('Validation ' + legend) plt.xlim(left=1) plt.ylim(0.0,1.0) plt.ylabel(plot['y-label']) plt.xlabel('Epoch')", "= f'\\nTraining with {all_frame_counts(training_tracks)} frames, validating with {all_frame_counts(validation_tracks)} frames:\\n' print(details) print(' Train Validate')", "[] with open(path, 'rb') as f: try: while True: tracks.append(pickle.load(f)) except Exception as", "validate_use return train_tracks, validate_tracks def first_time_model(model, training_config_text, model_config_text, save_directory): print(model.summary()) with open(f'{save_directory}/model.txt', 'w')", "+ value plt.plot(history.history[value]) legends.append('Validation ' + legend) plt.xlim(left=1) plt.ylim(0.0,1.0) plt.ylabel(plot['y-label']) plt.xlabel('Epoch') plt.legend(legends, loc=plot['caption-loc'],", "validate_tracks = {} for tag in tag_tracks.keys(): if tag in CLASSES: tracks =", "del config_copy['name'] if callback_name == 'checkpoint_callback': checkpoint_filename = config_copy['filepath'] config_copy['filepath'] = save_directory +", "value in plot['values']: plt.plot(history.history[value]) legend = value.replace('_', ' ').title() legends.append('Training ' + legend)", "pass return tracks def tracks_by_tag(tracks): tag_tracks = {t: [] for t in CLASSES}", "frames') def split_training_validation(tag_tracks, validate_frame_counts): train_tracks = {} validate_tracks = {} for tag in", "tf.keras.layers.BatchNormalization()(x) return tf.keras.layers.Activation(\"relu\")(x) def compute_scores(tp, fp, fn): if tp != 0: precision =", "= 2. * precision * recall / (precision + recall) return precision, recall,", "in CLASSES} for track in tracks: if track.tag in TAG_CLASS_MAP: track.tag = TAG_CLASS_MAP[track.tag]", "print_tag_track_info(infos): for k in infos: tracks = infos[k] fcount = np.sum([t.frame_count for t", "= [] validate_use = [] for track_info in tracks: if vcount < validate_frame_counts[tag]:", "return int(np.sum([frame_count(tag_tracks[t]) for t in CLASSES])) def print_track_information(training_tracks, validation_tracks): details = f'\\nTraining with", "flatten_tag_tracks(tag_tracks): flat_tracks = [] for tracks in tag_tracks.values(): flat_tracks += tracks return flat_tracks", "validate_frame_counts[tag]: validate_use.append(track_info) vcount += track_info.frame_count else: train_use.append(track_info) train_tracks[tag] = train_use validate_tracks[tag] = validate_use", "f: try: while True: tracks.append(pickle.load(f)) except Exception as e: traceback.print_exc() pass return tracks", "tf.keras.callbacks.ReduceLROnPlateau(**config_copy) elif callback_name == 'stopping_callback': return tf.keras.callbacks.EarlyStopping(**config_copy) else: raise Exception(f'Unknown callback type {callback_name}')", "= tp / (tp + fn) fscore = 2. * precision * recall", "for k in infos: tracks = infos[k] fcount = np.sum([t.frame_count for t in", "def flatten_tag_tracks(tag_tracks): flat_tracks = [] for tracks in tag_tracks.values(): flat_tracks += tracks return", "'\\n') print(model.summary(print_fn=summary_print)) tf.keras.utils.plot_model(model, to_file=f'{save_directory}/model.png', show_shapes=True) def frame_count(tracks): return int(np.sum([t.frame_count for t in tracks]))", "tracks_by_tag(tracks): tag_tracks = {t: [] for t in CLASSES} for track in tracks:", "as np import pickle import tensorflow as tf import traceback from support.data_model import", "print(details) print(' Train Validate') for key in CLASSES: print(f'{key:12} {frame_count(training_tracks[key]):>7} {frame_count(validation_tracks[key]):>7}') def dense_norm_relu(n,", "validation_tracks): details = f'\\nTraining with {all_frame_counts(training_tracks)} frames, validating with {all_frame_counts(validation_tracks)} frames:\\n' print(details) print('", "'/' + checkpoint_filename print(f'saving checkpoints to {config_copy[\"filepath\"]}') return tf.keras.callbacks.ModelCheckpoint(**config_copy) elif callback_name == 'lr_callback':", "callback type {callback_name}') def draw_figures(history, plots, save_directory): plt.figure(figsize=(8, 6 * len(plots))) plt_position =", "t in CLASSES])) def print_track_information(training_tracks, validation_tracks): details = f'\\nTraining with {all_frame_counts(training_tracks)} frames, validating", "[] validate_use = [] for track_info in tracks: if vcount < validate_frame_counts[tag]: validate_use.append(track_info)", "training_config_text, model_config_text, save_directory): print(model.summary()) with open(f'{save_directory}/model.txt', 'w') as f: def summary_print(s): print(s, file=f)", "import numpy as np import pickle import tensorflow as tf import traceback from", "(tp + fn) fscore = 2. * precision * recall / (precision +", "in CLASSES: print(f'{key:12} {frame_count(training_tracks[key]):>7} {frame_count(validation_tracks[key]):>7}') def dense_norm_relu(n, x): x = tf.keras.layers.Dense(n, kernel_initializer='he_normal')(x) x", "train_tracks = {} validate_tracks = {} for tag in tag_tracks.keys(): if tag in", "while True: tracks.append(pickle.load(f)) except Exception as e: traceback.print_exc() pass return tracks def tracks_by_tag(tracks):", "with {all_frame_counts(validation_tracks)} frames:\\n' print(details) print(' Train Validate') for key in CLASSES: print(f'{key:12} {frame_count(training_tracks[key]):>7}", "tf.keras.callbacks.EarlyStopping(**config_copy) else: raise Exception(f'Unknown callback type {callback_name}') def draw_figures(history, plots, save_directory): plt.figure(figsize=(8, 6", "first_time_model(model, training_config_text, model_config_text, save_directory): print(model.summary()) with open(f'{save_directory}/model.txt', 'w') as f: def summary_print(s): print(s,", "= tag_tracks[tag] np.random.shuffle(tracks) vcount = 0 train_use = [] validate_use = [] for", "t in CLASSES} for track in tracks: if track.tag in TAG_CLASS_MAP: track.tag =", "for key in CLASSES: print(f'{key:12} {frame_count(training_tracks[key]):>7} {frame_count(validation_tracks[key]):>7}') def dense_norm_relu(n, x): x = tf.keras.layers.Dense(n,", "/ (tp + fp) recall = tp / (tp + fn) fscore =", "CLASSES: print(f'{key:12} {frame_count(training_tracks[key]):>7} {frame_count(validation_tracks[key]):>7}') def dense_norm_relu(n, x): x = tf.keras.layers.Dense(n, kernel_initializer='he_normal')(x) x =", "load_raw_tracks(path): tracks = [] with open(path, 'rb') as f: try: while True: tracks.append(pickle.load(f))", "= save_directory + '/' + checkpoint_filename print(f'saving checkpoints to {config_copy[\"filepath\"]}') return tf.keras.callbacks.ModelCheckpoint(**config_copy) elif", "recall = tp / (tp + fn) fscore = 2. * precision *", "== 'lr_callback': return tf.keras.callbacks.ReduceLROnPlateau(**config_copy) elif callback_name == 'stopping_callback': return tf.keras.callbacks.EarlyStopping(**config_copy) else: raise Exception(f'Unknown", "{frame_count(training_tracks[key]):>7} {frame_count(validation_tracks[key]):>7}') def dense_norm_relu(n, x): x = tf.keras.layers.Dense(n, kernel_initializer='he_normal')(x) x = tf.keras.layers.BatchNormalization()(x) return", "+ recall) return precision, recall, fscore else: return 0.0, 0.0, 0.0 def build_callback(config,", "* 100 + 11 for i, plot in enumerate(plots): plt.subplot(plt_position + i) plt.title(plot['title'])", "checkpoints to {config_copy[\"filepath\"]}') return tf.keras.callbacks.ModelCheckpoint(**config_copy) elif callback_name == 'lr_callback': return tf.keras.callbacks.ReduceLROnPlateau(**config_copy) elif callback_name", "import pickle import tensorflow as tf import traceback from support.data_model import TAG_CLASS_MAP, CLASSES", "== 'checkpoint_callback': checkpoint_filename = config_copy['filepath'] config_copy['filepath'] = save_directory + '/' + checkpoint_filename print(f'saving", "f.write('\\nTraining configuration:\\n' + training_config_text + '\\n') f.write('\\nModel configuration:\\n' + model_config_text + '\\n') print(model.summary(print_fn=summary_print))", "else: train_use.append(track_info) train_tracks[tag] = train_use validate_tracks[tag] = validate_use return train_tracks, validate_tracks def first_time_model(model,", "configuration:\\n' + model_config_text + '\\n') print(model.summary(print_fn=summary_print)) tf.keras.utils.plot_model(model, to_file=f'{save_directory}/model.png', show_shapes=True) def frame_count(tracks): return int(np.sum([t.frame_count", "'w') as f: def summary_print(s): print(s, file=f) f.write('\\nTraining configuration:\\n' + training_config_text + '\\n')", "config_copy['filepath'] = save_directory + '/' + checkpoint_filename print(f'saving checkpoints to {config_copy[\"filepath\"]}') return tf.keras.callbacks.ModelCheckpoint(**config_copy)", "legend = value.replace('_', ' ').title() legends.append('Training ' + legend) value = 'val_' +", "all_frame_counts(tag_tracks): return int(np.sum([frame_count(tag_tracks[t]) for t in CLASSES])) def print_track_information(training_tracks, validation_tracks): details = f'\\nTraining", "in tag_tracks.keys(): if tag in CLASSES: tracks = tag_tracks[tag] np.random.shuffle(tracks) vcount = 0", "x = tf.keras.layers.Dense(n, kernel_initializer='he_normal')(x) x = tf.keras.layers.BatchNormalization()(x) return tf.keras.layers.Activation(\"relu\")(x) def compute_scores(tp, fp, fn):", "config_copy = config.copy() del config_copy['name'] if callback_name == 'checkpoint_callback': checkpoint_filename = config_copy['filepath'] config_copy['filepath']", "[] for t in CLASSES} for track in tracks: if track.tag in TAG_CLASS_MAP:", "0.0, 0.0, 0.0 def build_callback(config, save_directory): callback_name = config['name'] config_copy = config.copy() del", "import matplotlib.pyplot as plt import numpy as np import pickle import tensorflow as", "< validate_frame_counts[tag]: validate_use.append(track_info) vcount += track_info.frame_count else: train_use.append(track_info) train_tracks[tag] = train_use validate_tracks[tag] =", "return 0.0, 0.0, 0.0 def build_callback(config, save_directory): callback_name = config['name'] config_copy = config.copy()", "* precision * recall / (precision + recall) return precision, recall, fscore else:", "{all_frame_counts(validation_tracks)} frames:\\n' print(details) print(' Train Validate') for key in CLASSES: print(f'{key:12} {frame_count(training_tracks[key]):>7} {frame_count(validation_tracks[key]):>7}')", "= value.replace('_', ' ').title() legends.append('Training ' + legend) value = 'val_' + value", "in CLASSES])) def print_track_information(training_tracks, validation_tracks): details = f'\\nTraining with {all_frame_counts(training_tracks)} frames, validating with", "support.data_model import TAG_CLASS_MAP, CLASSES def load_raw_tracks(path): tracks = [] with open(path, 'rb') as", "True: tracks.append(pickle.load(f)) except Exception as e: traceback.print_exc() pass return tracks def tracks_by_tag(tracks): tag_tracks", "split_training_validation(tag_tracks, validate_frame_counts): train_tracks = {} validate_tracks = {} for tag in tag_tracks.keys(): if", "return int(np.sum([t.frame_count for t in tracks])) def all_frame_counts(tag_tracks): return int(np.sum([frame_count(tag_tracks[t]) for t in", "+ '\\n') print(model.summary(print_fn=summary_print)) tf.keras.utils.plot_model(model, to_file=f'{save_directory}/model.png', show_shapes=True) def frame_count(tracks): return int(np.sum([t.frame_count for t in", "!= 0: precision = tp / (tp + fp) recall = tp /", "open(f'{save_directory}/model.txt', 'w') as f: def summary_print(s): print(s, file=f) f.write('\\nTraining configuration:\\n' + training_config_text +", "'stopping_callback': return tf.keras.callbacks.EarlyStopping(**config_copy) else: raise Exception(f'Unknown callback type {callback_name}') def draw_figures(history, plots, save_directory):", "print(model.summary()) with open(f'{save_directory}/model.txt', 'w') as f: def summary_print(s): print(s, file=f) f.write('\\nTraining configuration:\\n' +", "if tag in CLASSES: tracks = tag_tracks[tag] np.random.shuffle(tracks) vcount = 0 train_use =", "value.replace('_', ' ').title() legends.append('Training ' + legend) value = 'val_' + value plt.plot(history.history[value])", "= config_copy['filepath'] config_copy['filepath'] = save_directory + '/' + checkpoint_filename print(f'saving checkpoints to {config_copy[\"filepath\"]}')", "if tp != 0: precision = tp / (tp + fp) recall =", "in tracks: if vcount < validate_frame_counts[tag]: validate_use.append(track_info) vcount += track_info.frame_count else: train_use.append(track_info) train_tracks[tag]", "{frame_count(validation_tracks[key]):>7}') def dense_norm_relu(n, x): x = tf.keras.layers.Dense(n, kernel_initializer='he_normal')(x) x = tf.keras.layers.BatchNormalization()(x) return tf.keras.layers.Activation(\"relu\")(x)", "save_directory + '/' + checkpoint_filename print(f'saving checkpoints to {config_copy[\"filepath\"]}') return tf.keras.callbacks.ModelCheckpoint(**config_copy) elif callback_name", "k in infos: tracks = infos[k] fcount = np.sum([t.frame_count for t in tracks])", "' + legend) value = 'val_' + value plt.plot(history.history[value]) legends.append('Validation ' + legend)", "plt.subplot(plt_position + i) plt.title(plot['title']) legends = [] for value in plot['values']: plt.plot(history.history[value]) legend", "plt import numpy as np import pickle import tensorflow as tf import traceback", "traceback from support.data_model import TAG_CLASS_MAP, CLASSES def load_raw_tracks(path): tracks = [] with open(path,", "tf.keras.layers.Activation(\"relu\")(x) def compute_scores(tp, fp, fn): if tp != 0: precision = tp /", "tracks]) print(f'{k}: {len(tracks)} tracks with {fcount} frames') def split_training_validation(tag_tracks, validate_frame_counts): train_tracks = {}", "track_info.frame_count else: train_use.append(track_info) train_tracks[tag] = train_use validate_tracks[tag] = validate_use return train_tracks, validate_tracks def", "validating with {all_frame_counts(validation_tracks)} frames:\\n' print(details) print(' Train Validate') for key in CLASSES: print(f'{key:12}", "return precision, recall, fscore else: return 0.0, 0.0, 0.0 def build_callback(config, save_directory): callback_name", "infos: tracks = infos[k] fcount = np.sum([t.frame_count for t in tracks]) print(f'{k}: {len(tracks)}", "'\\n') f.write('\\nModel configuration:\\n' + model_config_text + '\\n') print(model.summary(print_fn=summary_print)) tf.keras.utils.plot_model(model, to_file=f'{save_directory}/model.png', show_shapes=True) def frame_count(tracks):", "frame_count(tracks): return int(np.sum([t.frame_count for t in tracks])) def all_frame_counts(tag_tracks): return int(np.sum([frame_count(tag_tracks[t]) for t", "Exception(f'Unknown callback type {callback_name}') def draw_figures(history, plots, save_directory): plt.figure(figsize=(8, 6 * len(plots))) plt_position", "to {config_copy[\"filepath\"]}') return tf.keras.callbacks.ModelCheckpoint(**config_copy) elif callback_name == 'lr_callback': return tf.keras.callbacks.ReduceLROnPlateau(**config_copy) elif callback_name ==", "validate_tracks def first_time_model(model, training_config_text, model_config_text, save_directory): print(model.summary()) with open(f'{save_directory}/model.txt', 'w') as f: def", "summary_print(s): print(s, file=f) f.write('\\nTraining configuration:\\n' + training_config_text + '\\n') f.write('\\nModel configuration:\\n' + model_config_text", "recall) return precision, recall, fscore else: return 0.0, 0.0, 0.0 def build_callback(config, save_directory):", "CLASSES])) def print_track_information(training_tracks, validation_tracks): details = f'\\nTraining with {all_frame_counts(training_tracks)} frames, validating with {all_frame_counts(validation_tracks)}", "tf.keras.layers.Dense(n, kernel_initializer='he_normal')(x) x = tf.keras.layers.BatchNormalization()(x) return tf.keras.layers.Activation(\"relu\")(x) def compute_scores(tp, fp, fn): if tp", "np.sum([t.frame_count for t in tracks]) print(f'{k}: {len(tracks)} tracks with {fcount} frames') def split_training_validation(tag_tracks,", "config_copy['name'] if callback_name == 'checkpoint_callback': checkpoint_filename = config_copy['filepath'] config_copy['filepath'] = save_directory + '/'", "def split_training_validation(tag_tracks, validate_frame_counts): train_tracks = {} validate_tracks = {} for tag in tag_tracks.keys():", "f: def summary_print(s): print(s, file=f) f.write('\\nTraining configuration:\\n' + training_config_text + '\\n') f.write('\\nModel configuration:\\n'", "0 train_use = [] validate_use = [] for track_info in tracks: if vcount", "tracks = infos[k] fcount = np.sum([t.frame_count for t in tracks]) print(f'{k}: {len(tracks)} tracks", "import traceback from support.data_model import TAG_CLASS_MAP, CLASSES def load_raw_tracks(path): tracks = [] with", "TAG_CLASS_MAP: track.tag = TAG_CLASS_MAP[track.tag] tag_tracks[track.tag].append(track) return tag_tracks def flatten_tag_tracks(tag_tracks): flat_tracks = [] for", "flat_tracks def print_tag_track_info(infos): for k in infos: tracks = infos[k] fcount = np.sum([t.frame_count", "CLASSES} for track in tracks: if track.tag in TAG_CLASS_MAP: track.tag = TAG_CLASS_MAP[track.tag] tag_tracks[track.tag].append(track)", "for i, plot in enumerate(plots): plt.subplot(plt_position + i) plt.title(plot['title']) legends = [] for", "tag_tracks.keys(): if tag in CLASSES: tracks = tag_tracks[tag] np.random.shuffle(tracks) vcount = 0 train_use", "details = f'\\nTraining with {all_frame_counts(training_tracks)} frames, validating with {all_frame_counts(validation_tracks)} frames:\\n' print(details) print(' Train", "checkpoint_filename print(f'saving checkpoints to {config_copy[\"filepath\"]}') return tf.keras.callbacks.ModelCheckpoint(**config_copy) elif callback_name == 'lr_callback': return tf.keras.callbacks.ReduceLROnPlateau(**config_copy)", "+ '\\n') f.write('\\nModel configuration:\\n' + model_config_text + '\\n') print(model.summary(print_fn=summary_print)) tf.keras.utils.plot_model(model, to_file=f'{save_directory}/model.png', show_shapes=True) def", "configuration:\\n' + training_config_text + '\\n') f.write('\\nModel configuration:\\n' + model_config_text + '\\n') print(model.summary(print_fn=summary_print)) tf.keras.utils.plot_model(model,", "as e: traceback.print_exc() pass return tracks def tracks_by_tag(tracks): tag_tracks = {t: [] for", "tag_tracks.values(): flat_tracks += tracks return flat_tracks def print_tag_track_info(infos): for k in infos: tracks", "legends.append('Training ' + legend) value = 'val_' + value plt.plot(history.history[value]) legends.append('Validation ' +", "def frame_count(tracks): return int(np.sum([t.frame_count for t in tracks])) def all_frame_counts(tag_tracks): return int(np.sum([frame_count(tag_tracks[t]) for", "return tag_tracks def flatten_tag_tracks(tag_tracks): flat_tracks = [] for tracks in tag_tracks.values(): flat_tracks +=", "= len(plots) * 100 + 11 for i, plot in enumerate(plots): plt.subplot(plt_position +", "Train Validate') for key in CLASSES: print(f'{key:12} {frame_count(training_tracks[key]):>7} {frame_count(validation_tracks[key]):>7}') def dense_norm_relu(n, x): x", "11 for i, plot in enumerate(plots): plt.subplot(plt_position + i) plt.title(plot['title']) legends = []", "config.copy() del config_copy['name'] if callback_name == 'checkpoint_callback': checkpoint_filename = config_copy['filepath'] config_copy['filepath'] = save_directory", "fscore else: return 0.0, 0.0, 0.0 def build_callback(config, save_directory): callback_name = config['name'] config_copy", "= [] with open(path, 'rb') as f: try: while True: tracks.append(pickle.load(f)) except Exception", "for track in tracks: if track.tag in TAG_CLASS_MAP: track.tag = TAG_CLASS_MAP[track.tag] tag_tracks[track.tag].append(track) return", "{callback_name}') def draw_figures(history, plots, save_directory): plt.figure(figsize=(8, 6 * len(plots))) plt_position = len(plots) *", "in infos: tracks = infos[k] fcount = np.sum([t.frame_count for t in tracks]) print(f'{k}:", "for t in tracks]) print(f'{k}: {len(tracks)} tracks with {fcount} frames') def split_training_validation(tag_tracks, validate_frame_counts):", "flat_tracks = [] for tracks in tag_tracks.values(): flat_tracks += tracks return flat_tracks def", "traceback.print_exc() pass return tracks def tracks_by_tag(tracks): tag_tracks = {t: [] for t in", "track_info in tracks: if vcount < validate_frame_counts[tag]: validate_use.append(track_info) vcount += track_info.frame_count else: train_use.append(track_info)", "in tracks: if track.tag in TAG_CLASS_MAP: track.tag = TAG_CLASS_MAP[track.tag] tag_tracks[track.tag].append(track) return tag_tracks def", "+= tracks return flat_tracks def print_tag_track_info(infos): for k in infos: tracks = infos[k]", "Exception as e: traceback.print_exc() pass return tracks def tracks_by_tag(tracks): tag_tracks = {t: []", "in CLASSES: tracks = tag_tracks[tag] np.random.shuffle(tracks) vcount = 0 train_use = [] validate_use", "plt_position = len(plots) * 100 + 11 for i, plot in enumerate(plots): plt.subplot(plt_position", "in TAG_CLASS_MAP: track.tag = TAG_CLASS_MAP[track.tag] tag_tracks[track.tag].append(track) return tag_tracks def flatten_tag_tracks(tag_tracks): flat_tracks = []", "* len(plots))) plt_position = len(plots) * 100 + 11 for i, plot in", "return tf.keras.callbacks.EarlyStopping(**config_copy) else: raise Exception(f'Unknown callback type {callback_name}') def draw_figures(history, plots, save_directory): plt.figure(figsize=(8,", "= {} for tag in tag_tracks.keys(): if tag in CLASSES: tracks = tag_tracks[tag]", "tf.keras.utils.plot_model(model, to_file=f'{save_directory}/model.png', show_shapes=True) def frame_count(tracks): return int(np.sum([t.frame_count for t in tracks])) def all_frame_counts(tag_tracks):", "validate_frame_counts): train_tracks = {} validate_tracks = {} for tag in tag_tracks.keys(): if tag", "validate_use.append(track_info) vcount += track_info.frame_count else: train_use.append(track_info) train_tracks[tag] = train_use validate_tracks[tag] = validate_use return", "in plot['values']: plt.plot(history.history[value]) legend = value.replace('_', ' ').title() legends.append('Training ' + legend) value", "train_use.append(track_info) train_tracks[tag] = train_use validate_tracks[tag] = validate_use return train_tracks, validate_tracks def first_time_model(model, training_config_text,", "TAG_CLASS_MAP, CLASSES def load_raw_tracks(path): tracks = [] with open(path, 'rb') as f: try:", "print(model.summary(print_fn=summary_print)) tf.keras.utils.plot_model(model, to_file=f'{save_directory}/model.png', show_shapes=True) def frame_count(tracks): return int(np.sum([t.frame_count for t in tracks])) def", "save_directory): callback_name = config['name'] config_copy = config.copy() del config_copy['name'] if callback_name == 'checkpoint_callback':", "plot['values']: plt.plot(history.history[value]) legend = value.replace('_', ' ').title() legends.append('Training ' + legend) value =", "tracks])) def all_frame_counts(tag_tracks): return int(np.sum([frame_count(tag_tracks[t]) for t in CLASSES])) def print_track_information(training_tracks, validation_tracks): details", "(tp + fp) recall = tp / (tp + fn) fscore = 2.", "value = 'val_' + value plt.plot(history.history[value]) legends.append('Validation ' + legend) plt.xlim(left=1) plt.ylim(0.0,1.0) plt.ylabel(plot['y-label'])", "e: traceback.print_exc() pass return tracks def tracks_by_tag(tracks): tag_tracks = {t: [] for t", "for t in CLASSES} for track in tracks: if track.tag in TAG_CLASS_MAP: track.tag", "/ (precision + recall) return precision, recall, fscore else: return 0.0, 0.0, 0.0", "to_file=f'{save_directory}/model.png', show_shapes=True) def frame_count(tracks): return int(np.sum([t.frame_count for t in tracks])) def all_frame_counts(tag_tracks): return", "= infos[k] fcount = np.sum([t.frame_count for t in tracks]) print(f'{k}: {len(tracks)} tracks with", "in enumerate(plots): plt.subplot(plt_position + i) plt.title(plot['title']) legends = [] for value in plot['values']:", "callback_name == 'checkpoint_callback': checkpoint_filename = config_copy['filepath'] config_copy['filepath'] = save_directory + '/' + checkpoint_filename", "tp / (tp + fp) recall = tp / (tp + fn) fscore", "train_use validate_tracks[tag] = validate_use return train_tracks, validate_tracks def first_time_model(model, training_config_text, model_config_text, save_directory): print(model.summary())", "= [] for tracks in tag_tracks.values(): flat_tracks += tracks return flat_tracks def print_tag_track_info(infos):", "len(plots) * 100 + 11 for i, plot in enumerate(plots): plt.subplot(plt_position + i)", "print(f'{k}: {len(tracks)} tracks with {fcount} frames') def split_training_validation(tag_tracks, validate_frame_counts): train_tracks = {} validate_tracks", "precision * recall / (precision + recall) return precision, recall, fscore else: return", "import json import matplotlib.pyplot as plt import numpy as np import pickle import", "0: precision = tp / (tp + fp) recall = tp / (tp", "{len(tracks)} tracks with {fcount} frames') def split_training_validation(tag_tracks, validate_frame_counts): train_tracks = {} validate_tracks =", "+ fp) recall = tp / (tp + fn) fscore = 2. *", "import TAG_CLASS_MAP, CLASSES def load_raw_tracks(path): tracks = [] with open(path, 'rb') as f:", "show_shapes=True) def frame_count(tracks): return int(np.sum([t.frame_count for t in tracks])) def all_frame_counts(tag_tracks): return int(np.sum([frame_count(tag_tracks[t])", "key in CLASSES: print(f'{key:12} {frame_count(training_tracks[key]):>7} {frame_count(validation_tracks[key]):>7}') def dense_norm_relu(n, x): x = tf.keras.layers.Dense(n, kernel_initializer='he_normal')(x)", "def dense_norm_relu(n, x): x = tf.keras.layers.Dense(n, kernel_initializer='he_normal')(x) x = tf.keras.layers.BatchNormalization()(x) return tf.keras.layers.Activation(\"relu\")(x) def", "for value in plot['values']: plt.plot(history.history[value]) legend = value.replace('_', ' ').title() legends.append('Training ' +", "print_track_information(training_tracks, validation_tracks): details = f'\\nTraining with {all_frame_counts(training_tracks)} frames, validating with {all_frame_counts(validation_tracks)} frames:\\n' print(details)", "import tensorflow as tf import traceback from support.data_model import TAG_CLASS_MAP, CLASSES def load_raw_tracks(path):", "[] for value in plot['values']: plt.plot(history.history[value]) legend = value.replace('_', ' ').title() legends.append('Training '", "validate_use = [] for track_info in tracks: if vcount < validate_frame_counts[tag]: validate_use.append(track_info) vcount", "= [] for value in plot['values']: plt.plot(history.history[value]) legend = value.replace('_', ' ').title() legends.append('Training", "= config['name'] config_copy = config.copy() del config_copy['name'] if callback_name == 'checkpoint_callback': checkpoint_filename =", "tracks in tag_tracks.values(): flat_tracks += tracks return flat_tracks def print_tag_track_info(infos): for k in", "def all_frame_counts(tag_tracks): return int(np.sum([frame_count(tag_tracks[t]) for t in CLASSES])) def print_track_information(training_tracks, validation_tracks): details =", "plot in enumerate(plots): plt.subplot(plt_position + i) plt.title(plot['title']) legends = [] for value in", "def build_callback(config, save_directory): callback_name = config['name'] config_copy = config.copy() del config_copy['name'] if callback_name", "{} validate_tracks = {} for tag in tag_tracks.keys(): if tag in CLASSES: tracks", "with open(f'{save_directory}/model.txt', 'w') as f: def summary_print(s): print(s, file=f) f.write('\\nTraining configuration:\\n' + training_config_text", "'rb') as f: try: while True: tracks.append(pickle.load(f)) except Exception as e: traceback.print_exc() pass", "else: raise Exception(f'Unknown callback type {callback_name}') def draw_figures(history, plots, save_directory): plt.figure(figsize=(8, 6 *", "+ checkpoint_filename print(f'saving checkpoints to {config_copy[\"filepath\"]}') return tf.keras.callbacks.ModelCheckpoint(**config_copy) elif callback_name == 'lr_callback': return", "for tracks in tag_tracks.values(): flat_tracks += tracks return flat_tracks def print_tag_track_info(infos): for k", "recall, fscore else: return 0.0, 0.0, 0.0 def build_callback(config, save_directory): callback_name = config['name']", "save_directory): print(model.summary()) with open(f'{save_directory}/model.txt', 'w') as f: def summary_print(s): print(s, file=f) f.write('\\nTraining configuration:\\n'", "return train_tracks, validate_tracks def first_time_model(model, training_config_text, model_config_text, save_directory): print(model.summary()) with open(f'{save_directory}/model.txt', 'w') as", "tracks = tag_tracks[tag] np.random.shuffle(tracks) vcount = 0 train_use = [] validate_use = []", "len(plots))) plt_position = len(plots) * 100 + 11 for i, plot in enumerate(plots):", "callback_name == 'stopping_callback': return tf.keras.callbacks.EarlyStopping(**config_copy) else: raise Exception(f'Unknown callback type {callback_name}') def draw_figures(history,", "for t in CLASSES])) def print_track_information(training_tracks, validation_tracks): details = f'\\nTraining with {all_frame_counts(training_tracks)} frames,", "= np.sum([t.frame_count for t in tracks]) print(f'{k}: {len(tracks)} tracks with {fcount} frames') def", "'val_' + value plt.plot(history.history[value]) legends.append('Validation ' + legend) plt.xlim(left=1) plt.ylim(0.0,1.0) plt.ylabel(plot['y-label']) plt.xlabel('Epoch') plt.legend(legends,", "== 'stopping_callback': return tf.keras.callbacks.EarlyStopping(**config_copy) else: raise Exception(f'Unknown callback type {callback_name}') def draw_figures(history, plots,", "= 0 train_use = [] validate_use = [] for track_info in tracks: if", "draw_figures(history, plots, save_directory): plt.figure(figsize=(8, 6 * len(plots))) plt_position = len(plots) * 100 +", "tag_tracks[tag] np.random.shuffle(tracks) vcount = 0 train_use = [] validate_use = [] for track_info", "tf import traceback from support.data_model import TAG_CLASS_MAP, CLASSES def load_raw_tracks(path): tracks = []", "in tag_tracks.values(): flat_tracks += tracks return flat_tracks def print_tag_track_info(infos): for k in infos:", "(precision + recall) return precision, recall, fscore else: return 0.0, 0.0, 0.0 def", "precision, recall, fscore else: return 0.0, 0.0, 0.0 def build_callback(config, save_directory): callback_name =", "as tf import traceback from support.data_model import TAG_CLASS_MAP, CLASSES def load_raw_tracks(path): tracks =", "{t: [] for t in CLASSES} for track in tracks: if track.tag in", "with open(path, 'rb') as f: try: while True: tracks.append(pickle.load(f)) except Exception as e:", "TAG_CLASS_MAP[track.tag] tag_tracks[track.tag].append(track) return tag_tracks def flatten_tag_tracks(tag_tracks): flat_tracks = [] for tracks in tag_tracks.values():", "return tracks def tracks_by_tag(tracks): tag_tracks = {t: [] for t in CLASSES} for", "{} for tag in tag_tracks.keys(): if tag in CLASSES: tracks = tag_tracks[tag] np.random.shuffle(tracks)", "frames:\\n' print(details) print(' Train Validate') for key in CLASSES: print(f'{key:12} {frame_count(training_tracks[key]):>7} {frame_count(validation_tracks[key]):>7}') def", "plt.figure(figsize=(8, 6 * len(plots))) plt_position = len(plots) * 100 + 11 for i,", "pickle import tensorflow as tf import traceback from support.data_model import TAG_CLASS_MAP, CLASSES def", "train_use = [] validate_use = [] for track_info in tracks: if vcount <", "+ legend) value = 'val_' + value plt.plot(history.history[value]) legends.append('Validation ' + legend) plt.xlim(left=1)", "tracks return flat_tracks def print_tag_track_info(infos): for k in infos: tracks = infos[k] fcount", "np.random.shuffle(tracks) vcount = 0 train_use = [] validate_use = [] for track_info in", "as f: try: while True: tracks.append(pickle.load(f)) except Exception as e: traceback.print_exc() pass return", "save_directory): plt.figure(figsize=(8, 6 * len(plots))) plt_position = len(plots) * 100 + 11 for", "= config.copy() del config_copy['name'] if callback_name == 'checkpoint_callback': checkpoint_filename = config_copy['filepath'] config_copy['filepath'] =", "legends.append('Validation ' + legend) plt.xlim(left=1) plt.ylim(0.0,1.0) plt.ylabel(plot['y-label']) plt.xlabel('Epoch') plt.legend(legends, loc=plot['caption-loc'], framealpha=.5) plt.savefig(f'{save_directory}/history.png') plt.close()", "CLASSES def load_raw_tracks(path): tracks = [] with open(path, 'rb') as f: try: while", "= train_use validate_tracks[tag] = validate_use return train_tracks, validate_tracks def first_time_model(model, training_config_text, model_config_text, save_directory):", "for track_info in tracks: if vcount < validate_frame_counts[tag]: validate_use.append(track_info) vcount += track_info.frame_count else:", "fp, fn): if tp != 0: precision = tp / (tp + fp)", "if track.tag in TAG_CLASS_MAP: track.tag = TAG_CLASS_MAP[track.tag] tag_tracks[track.tag].append(track) return tag_tracks def flatten_tag_tracks(tag_tracks): flat_tracks", "np import pickle import tensorflow as tf import traceback from support.data_model import TAG_CLASS_MAP,", "with {all_frame_counts(training_tracks)} frames, validating with {all_frame_counts(validation_tracks)} frames:\\n' print(details) print(' Train Validate') for key", "fn): if tp != 0: precision = tp / (tp + fp) recall", "'lr_callback': return tf.keras.callbacks.ReduceLROnPlateau(**config_copy) elif callback_name == 'stopping_callback': return tf.keras.callbacks.EarlyStopping(**config_copy) else: raise Exception(f'Unknown callback", "for tag in tag_tracks.keys(): if tag in CLASSES: tracks = tag_tracks[tag] np.random.shuffle(tracks) vcount", "enumerate(plots): plt.subplot(plt_position + i) plt.title(plot['title']) legends = [] for value in plot['values']: plt.plot(history.history[value])", "tracks: if vcount < validate_frame_counts[tag]: validate_use.append(track_info) vcount += track_info.frame_count else: train_use.append(track_info) train_tracks[tag] =", "plt.title(plot['title']) legends = [] for value in plot['values']: plt.plot(history.history[value]) legend = value.replace('_', '", "6 * len(plots))) plt_position = len(plots) * 100 + 11 for i, plot", "open(path, 'rb') as f: try: while True: tracks.append(pickle.load(f)) except Exception as e: traceback.print_exc()", "i) plt.title(plot['title']) legends = [] for value in plot['values']: plt.plot(history.history[value]) legend = value.replace('_',", "2. * precision * recall / (precision + recall) return precision, recall, fscore", "0.0 def build_callback(config, save_directory): callback_name = config['name'] config_copy = config.copy() del config_copy['name'] if", "compute_scores(tp, fp, fn): if tp != 0: precision = tp / (tp +", "+ training_config_text + '\\n') f.write('\\nModel configuration:\\n' + model_config_text + '\\n') print(model.summary(print_fn=summary_print)) tf.keras.utils.plot_model(model, to_file=f'{save_directory}/model.png',", "file=f) f.write('\\nTraining configuration:\\n' + training_config_text + '\\n') f.write('\\nModel configuration:\\n' + model_config_text + '\\n')" ]
[ "import pytest from pytest_bdd import scenarios, scenario @scenario( feature_name='features/lighting.feature', scenario_name='The lights are controlled',", "import scenarios, scenario @scenario( feature_name='features/lighting.feature', scenario_name='The lights are controlled', example_converters=dict( light_id=str, light_begin_state=str, light_function=str,", "from pytest_bdd import scenarios, scenario @scenario( feature_name='features/lighting.feature', scenario_name='The lights are controlled', example_converters=dict( light_id=str,", "scenario_name='The lights are controlled', example_converters=dict( light_id=str, light_begin_state=str, light_function=str, light_final_state=str )) def test_turn_on_the_lights(): pass", "@scenario( feature_name='features/lighting.feature', scenario_name='The lights are controlled', example_converters=dict( light_id=str, light_begin_state=str, light_function=str, light_final_state=str )) def", "pytest_bdd import scenarios, scenario @scenario( feature_name='features/lighting.feature', scenario_name='The lights are controlled', example_converters=dict( light_id=str, light_begin_state=str,", "scenario @scenario( feature_name='features/lighting.feature', scenario_name='The lights are controlled', example_converters=dict( light_id=str, light_begin_state=str, light_function=str, light_final_state=str ))", "feature_name='features/lighting.feature', scenario_name='The lights are controlled', example_converters=dict( light_id=str, light_begin_state=str, light_function=str, light_final_state=str )) def test_turn_on_the_lights():", "scenarios, scenario @scenario( feature_name='features/lighting.feature', scenario_name='The lights are controlled', example_converters=dict( light_id=str, light_begin_state=str, light_function=str, light_final_state=str", "pytest from pytest_bdd import scenarios, scenario @scenario( feature_name='features/lighting.feature', scenario_name='The lights are controlled', example_converters=dict(" ]
[ "PIL import Image import os, pprint old_directory = 'old' new_directory = 'new' new_origin", "'old' new_directory = 'new' new_origin = (36, 32) for file in os.listdir(old_directory): filename", "= img.crop( ( new_origin[0], new_origin[1], 675 + new_origin[0], 976 + new_origin[1], ) )", "= img.size[0] height = img.size[1] if height != 1040: print(file) continue cropped_img =", "file) img = Image.open(filename) width = img.size[0] height = img.size[1] if height !=", "= 'new' new_origin = (36, 32) for file in os.listdir(old_directory): filename = \"{}/{}\".format(old_directory,", "file in os.listdir(old_directory): filename = \"{}/{}\".format(old_directory, file) img = Image.open(filename) width = img.size[0]", "from PIL import Image import os, pprint old_directory = 'old' new_directory = 'new'", "continue cropped_img = img.crop( ( new_origin[0], new_origin[1], 675 + new_origin[0], 976 + new_origin[1],", "new_origin[0], new_origin[1], 675 + new_origin[0], 976 + new_origin[1], ) ) save_location = \"{}/{}\".format(new_directory,", "new_origin = (36, 32) for file in os.listdir(old_directory): filename = \"{}/{}\".format(old_directory, file) img", "pprint old_directory = 'old' new_directory = 'new' new_origin = (36, 32) for file", "1040: print(file) continue cropped_img = img.crop( ( new_origin[0], new_origin[1], 675 + new_origin[0], 976", "os, pprint old_directory = 'old' new_directory = 'new' new_origin = (36, 32) for", "= \"{}/{}\".format(old_directory, file) img = Image.open(filename) width = img.size[0] height = img.size[1] if", "img.crop( ( new_origin[0], new_origin[1], 675 + new_origin[0], 976 + new_origin[1], ) ) save_location", "os.listdir(old_directory): filename = \"{}/{}\".format(old_directory, file) img = Image.open(filename) width = img.size[0] height =", "img = Image.open(filename) width = img.size[0] height = img.size[1] if height != 1040:", "import Image import os, pprint old_directory = 'old' new_directory = 'new' new_origin =", "for file in os.listdir(old_directory): filename = \"{}/{}\".format(old_directory, file) img = Image.open(filename) width =", "if height != 1040: print(file) continue cropped_img = img.crop( ( new_origin[0], new_origin[1], 675", "new_origin[1], 675 + new_origin[0], 976 + new_origin[1], ) ) save_location = \"{}/{}\".format(new_directory, file)", "img.size[0] height = img.size[1] if height != 1040: print(file) continue cropped_img = img.crop(", "= Image.open(filename) width = img.size[0] height = img.size[1] if height != 1040: print(file)", "height != 1040: print(file) continue cropped_img = img.crop( ( new_origin[0], new_origin[1], 675 +", "Image.open(filename) width = img.size[0] height = img.size[1] if height != 1040: print(file) continue", "!= 1040: print(file) continue cropped_img = img.crop( ( new_origin[0], new_origin[1], 675 + new_origin[0],", "( new_origin[0], new_origin[1], 675 + new_origin[0], 976 + new_origin[1], ) ) save_location =", "= img.size[1] if height != 1040: print(file) continue cropped_img = img.crop( ( new_origin[0],", "'new' new_origin = (36, 32) for file in os.listdir(old_directory): filename = \"{}/{}\".format(old_directory, file)", "import os, pprint old_directory = 'old' new_directory = 'new' new_origin = (36, 32)", "= (36, 32) for file in os.listdir(old_directory): filename = \"{}/{}\".format(old_directory, file) img =", "in os.listdir(old_directory): filename = \"{}/{}\".format(old_directory, file) img = Image.open(filename) width = img.size[0] height", "= 'old' new_directory = 'new' new_origin = (36, 32) for file in os.listdir(old_directory):", "\"{}/{}\".format(old_directory, file) img = Image.open(filename) width = img.size[0] height = img.size[1] if height", "675 + new_origin[0], 976 + new_origin[1], ) ) save_location = \"{}/{}\".format(new_directory, file) cropped_img.save(save_location)", "new_directory = 'new' new_origin = (36, 32) for file in os.listdir(old_directory): filename =", "height = img.size[1] if height != 1040: print(file) continue cropped_img = img.crop( (", "Image import os, pprint old_directory = 'old' new_directory = 'new' new_origin = (36,", "old_directory = 'old' new_directory = 'new' new_origin = (36, 32) for file in", "width = img.size[0] height = img.size[1] if height != 1040: print(file) continue cropped_img", "print(file) continue cropped_img = img.crop( ( new_origin[0], new_origin[1], 675 + new_origin[0], 976 +", "(36, 32) for file in os.listdir(old_directory): filename = \"{}/{}\".format(old_directory, file) img = Image.open(filename)", "img.size[1] if height != 1040: print(file) continue cropped_img = img.crop( ( new_origin[0], new_origin[1],", "32) for file in os.listdir(old_directory): filename = \"{}/{}\".format(old_directory, file) img = Image.open(filename) width", "cropped_img = img.crop( ( new_origin[0], new_origin[1], 675 + new_origin[0], 976 + new_origin[1], )", "filename = \"{}/{}\".format(old_directory, file) img = Image.open(filename) width = img.size[0] height = img.size[1]" ]
[ "yy = hstack(y) self.XX = XX self.yy = yy self.model = model def", "import Clair import matplotlib matplotlib.use('Agg') class Backtest(Clair): \"\"\"Backtest is a type of machine", "C=1, gamma=10, continuedTraining=False, tz=timezone('UTC') ): super().__init__( variables, trainStart, trainEnd, testStart, testEnd, buyThreshold=buyThreshold, sellThreshold=sellThreshold,", "= None self.yy = None self.model = None def runModel(self, data): \"\"\"Run backtesting.", "self.buyStats() > 50: prnt = f\"{gre}{self.buyStats()}%{end}\" elif self.buyStats() < 50: prnt = f\"{red}{self.buyStats()}%{end}\"", "from clairvoyant import Clair import matplotlib matplotlib.use('Agg') class Backtest(Clair): \"\"\"Backtest is a type", "that represent learning features. :param trainStart: A datetime as a string that should", "buy. Default 0.65. :param sellThreshold: Defines the confidence level at which Clair will", ":param prediction: Value of 1 or -1 representing an up or down performance.", "= Z.reshape(xx.shape) stock = self.stocks[len(self.stocks)-1] Axes.set_title(stock) Axes.contourf(xx, yy, Z, cmap=cm, alpha=0.75) Axes.scatter(X[:, 0],", ":param trainEnd: A datetime as a string that should be consistent with the", "else: prnt = f'{self.sellStats()}%' print(f\"Sell Accuracy: {prnt}\") def visualizeModel(self, width=5, height=5, stepsize=0.02): \"\"\"Output", "Periods: {self.periods}\") print(f\"Total Price Increases: {self.increases}\") print(f\"Total Price Decreases: {self.decreases}\") def displayStats(self): \"\"\"Print", "print(\"Error: Please run model before visualizing\") return X, y = self.XX, self.yy X", "Defines the end date for model testing. :param buyThreshold: Defines the confidence level", "for model training. :param trainEnd: A datetime as a string that should be", "visualizeModel(self, width=5, height=5, stepsize=0.02): \"\"\"Output a visualization of the backtesting results. The diagram", "self.yy = None self.model = None def runModel(self, data): \"\"\"Run backtesting. :param data:", "1 or -1 representing an up or down performance. :param performance: A positive", "print(f\"Buy Threshold: {self.buyThreshold*100}%\") print(f\"Sell Threshold: {self.sellThreshold*100}%\") print(f\"C: {self.C}\") print(f\"gamma: {self.gamma}\") print(f\"Continued Training: {self.continuedTraining}\")", "is run. Warning: may result in a lot of output. \"\"\" def __init__(", "import vstack, hstack from pytz import timezone from clairvoyant import Clair import matplotlib", "self.testStart.strftime('%m/%d/%Y'), self.testEnd.strftime('%m/%d/%Y') ]) XX = vstack(X) yy = hstack(y) self.XX = XX self.yy", "if self.buyStats() > 50: prnt = f\"{gre}{self.buyStats()}%{end}\" elif self.buyStats() < 50: prnt =", "\"\"\"Backtest is a type of machine learning classifier. The purpose of ``Backtest`` is", "RedBlue = ListedColormap(['#FF312E', '#6E8894']) Axes = plt.subplot(1, 1, 1) Z = self.model.decision_function(c_[xx.ravel(), yy.ravel()])", "= 0 self.correctBuys = 0 self.totalSells = 0 self.correctSells = 0 self.increases =", "timezone associated with the datetime parameters. Default UTC. :ivar debug: A boolean value", "if len(self.variables) != 2: print(\"Error: Plotting is restricted to 2 dimensions\") return if", "\"\"\"Increment the sell count.\"\"\" self.totalSells += 1 if self.debug: super().sellLogic(*args, **kwargs) def nextPeriodLogic(self,", "if data from the testing period should be used to continue training the", "yy self.model = model def buyLogic(self, *args, **kwargs): \"\"\"Increment the buy count.\"\"\" self.totalBuys", "performance > 0: self.increases += 1 if prediction == 1: self.correctBuys += 1", "'\\033[1m', '\\033[0m' print(f'{bld}Conditions{end}') i = 1 for var in self.variables: print(f\"X{i}: {var}\") i", "The color intensity represents the distribution of probability. \"\"\" import matplotlib.pyplot as plt", "model before displaying stats\") return print(f'{bld}Stats{end}') print(\"Stock(s):\") i = 0 for stock in", "A boolean value that determines if debug strings will be printed as backtesting", "self.totalBuys = 0 self.correctBuys = 0 self.totalSells = 0 self.correctSells = 0 self.increases", "None or self.model is None): print(\"Error: Please run model before visualizing\") return X,", "X, y = self.learn(data) self.execute(data, model, X, y) # Save for vizualization purposes", "start date for model testing. :param testEnd: A datetime as a string that", "for false positives. See scikit-learn documentation for more details. Default 1. :param gamma:", "Please run model before displaying stats\") return print(f'{bld}Stats{end}') print(\"Stock(s):\") i = 0 for", "correct and incorrect buys and sells. :param prediction: Value of 1 or -1", "sklearn.preprocessing import StandardScaler from numpy import vstack, hstack from pytz import timezone from", "width=5, height=5, stepsize=0.02): \"\"\"Output a visualization of the backtesting results. The diagram overlays", "{self.increases}\") print(f\"Total Price Decreases: {self.decreases}\") def displayStats(self): \"\"\"Print the collected backtesting statistics.\"\"\" bld,", "displayStats(self): \"\"\"Print the collected backtesting statistics.\"\"\" bld, gre, red, end = '\\033[1m', '\\033[92m',", "numpy import meshgrid, arange, c_ from sklearn.preprocessing import StandardScaler from numpy import vstack,", "vstack, hstack from pytz import timezone from clairvoyant import Clair import matplotlib matplotlib.use('Agg')", "*args, **kwargs): \"\"\"Increment the buy count.\"\"\" self.totalBuys += 1 if self.debug: super().buyLogic(*args, **kwargs)", "prnt = f\"{gre}{self.buyStats()}%{end}\" elif self.buyStats() < 50: prnt = f\"{red}{self.buyStats()}%{end}\" else: prnt =", "stepsize), arange(y_min, y_max, stepsize) ) plt.figure(figsize=(width, height)) cm = plt.cm.RdBu RedBlue = ListedColormap(['#FF312E',", "confidence level at which Clair will will recommend a buy. Default 0.65. :param", "to continue training the model during the testing phase. Default False. :param tz:", "Default False. :param tz: The timezone associated with the datetime parameters. Default UTC.", "parameter. Defines the end date for model testing. :param buyThreshold: Defines the confidence", "= f\"{gre}{self.buyStats()}%{end}\" elif self.buyStats() < 50: prnt = f\"{red}{self.buyStats()}%{end}\" else: prnt = f\"{self.buyStats()}%\"", "observed performance. \"\"\" self.periods += 1 if performance > 0: self.increases += 1", "classifications while providing a quick and easy way to vary parameters for rapid", "import matplotlib matplotlib.use('Agg') class Backtest(Clair): \"\"\"Backtest is a type of machine learning classifier.", "= 0 self.periods = 0 def buyStats(self): \"\"\"Return the collected buy statistics.\"\"\" try:", "Clair import matplotlib matplotlib.use('Agg') class Backtest(Clair): \"\"\"Backtest is a type of machine learning", "or self.model is None): print(\"Error: Please run model before visualizing\") return X, y", "False # Visualize self.XX = None self.yy = None self.model = None def", "Stats self.stocks = [] self.dates = [] self.totalBuys = 0 self.correctBuys = 0", "for model training. :param testStart: A datetime as a string that should be", "False. :param tz: The timezone associated with the datetime parameters. Default UTC. :ivar", "and sells. :param prediction: Value of 1 or -1 representing an up or", "+= 1 if self.debug: super().nextPeriodLogic(prediction, performance, *args, **kwargs) def clearStats(self): \"\"\"Reset all collected", "at which Clair will recommend a sell. Default 0.65. :param C: A penalty", "run model before displaying stats\") return print(f'{bld}Stats{end}') print(\"Stock(s):\") i = 0 for stock", "a string that should be consistent with the ``tz`` parameter. Defines the start", "visualizing\") return X, y = self.XX, self.yy X = StandardScaler().fit_transform(X) self.model.fit(X, y) x_min,", "Default UTC. :ivar debug: A boolean value that determines if debug strings will", "Training: {self.continuedTraining}\") print(f\"Total Testing Periods: {self.periods}\") print(f\"Total Price Increases: {self.increases}\") print(f\"Total Price Decreases:", "of learned classifications while providing a quick and easy way to vary parameters", "of the backtesting results. The diagram overlays training and testing observations on top", "var in self.variables: print(f\"X{i}: {var}\") i += 1 print(f\"Buy Threshold: {self.buyThreshold*100}%\") print(f\"Sell Threshold:", "or negative value representing the actual observed performance. \"\"\" self.periods += 1 if", "debug: A boolean value that determines if debug strings will be printed as", "import meshgrid, arange, c_ from sklearn.preprocessing import StandardScaler from numpy import vstack, hstack", "[] self.dates = [] self.totalBuys = 0 self.correctBuys = 0 self.totalSells = 0", "self.variables: print(f\"X{i}: {var}\") i += 1 print(f\"Buy Threshold: {self.buyThreshold*100}%\") print(f\"Sell Threshold: {self.sellThreshold*100}%\") print(f\"C:", "= plt.cm.RdBu RedBlue = ListedColormap(['#FF312E', '#6E8894']) Axes = plt.subplot(1, 1, 1) Z =", "**kwargs): \"\"\"Increment the buy count.\"\"\" self.totalBuys += 1 if self.debug: super().buyLogic(*args, **kwargs) def", "a sell. Default 0.65. :param C: A penalty parameter for false positives. See", "Axes.set_title(stock) Axes.contourf(xx, yy, Z, cmap=cm, alpha=0.75) Axes.scatter(X[:, 0], X[:, 1], c=y, cmap=RedBlue) Axes.set_xlim(xx.min(),", "def displayStats(self): \"\"\"Print the collected backtesting statistics.\"\"\" bld, gre, red, end = '\\033[1m',", "also provides some convenience functions for visualizing collected statistics. :param variables: A list", "provides some convenience functions for visualizing collected statistics. :param variables: A list of", "ListedColormap(['#FF312E', '#6E8894']) Axes = plt.subplot(1, 1, 1) Z = self.model.decision_function(c_[xx.ravel(), yy.ravel()]) Z =", "trainEnd, testStart, testEnd, buyThreshold=buyThreshold, sellThreshold=sellThreshold, C=C, gamma=gamma, continuedTraining=continuedTraining, tz=tz ) # Stats self.stocks", "and testing parameters.\"\"\" bld, end = '\\033[1m', '\\033[0m' print(f'{bld}Conditions{end}') i = 1 for", "import ListedColormap if len(self.variables) != 2: print(\"Error: Plotting is restricted to 2 dimensions\")", "sell. Default 0.65. :param C: A penalty parameter for false positives. See scikit-learn", "f\"{self.buyStats()}%\" print(f\"Buy Accuracy: {prnt}\") print(f\"Total Sells: {self.totalSells}\") if self.sellStats() > 50: prnt =", "on top of a color coded representation of learned recommendations. The color intensity", "testStart, testEnd, buyThreshold=buyThreshold, sellThreshold=sellThreshold, C=C, gamma=gamma, continuedTraining=continuedTraining, tz=tz ) # Stats self.stocks =", "self.increases += 1 if prediction == 1: self.correctBuys += 1 elif performance <", "Default 10. :param continuedTraining: Determine if data from the testing period should be", "f\"Training: {self.dates[i][0]}-{self.dates[i][1]}\", f\"Testing: {self.dates[i][2]}-{self.dates[i][3]}\") i += 1 print(f\"\\nTotal Buys: {self.totalBuys}\") prnt = None", "collected buy statistics.\"\"\" try: return round((float(self.correctBuys)/self.totalBuys)*100, 2) except ZeroDivisionError: return float(0) def sellStats(self):", "height)) cm = plt.cm.RdBu RedBlue = ListedColormap(['#FF312E', '#6E8894']) Axes = plt.subplot(1, 1, 1)", "0 self.correctBuys = 0 self.totalSells = 0 self.correctSells = 0 self.increases = 0", "and test phases. \"\"\" # Learn and execute model, X, y = self.learn(data)", "bld, gre, red, end = '\\033[1m', '\\033[92m', '\\033[91m', '\\033[0m' if len(self.dates) == 0:", "self.sellStats() < 50: prnt = f'{red}{self.sellStats()}%{end}' else: prnt = f'{self.sellStats()}%' print(f\"Sell Accuracy: {prnt}\")", "< 50: prnt = f\"{red}{self.buyStats()}%{end}\" else: prnt = f\"{self.buyStats()}%\" print(f\"Buy Accuracy: {prnt}\") print(f\"Total", "learning features. :param trainStart: A datetime as a string that should be consistent", "parameter. Defines the start date for model testing. :param testEnd: A datetime as", "test those on historical stock data. \"\"\" from numpy import meshgrid, arange, c_", "C=C, gamma=gamma, continuedTraining=continuedTraining, tz=tz ) # Stats self.stocks = [] self.dates = []", "value that determines if debug strings will be printed as backtesting is run.", "all collected statistics.\"\"\" self.dates = [] self.totalBuys = 0 self.correctBuys = 0 self.totalSells", "**kwargs) def sellLogic(self, *args, **kwargs): \"\"\"Increment the sell count.\"\"\" self.totalSells += 1 if", "{prnt}\") print(f\"Total Sells: {self.totalSells}\") if self.sellStats() > 50: prnt = f'{gre}{self.sellStats()}%{end}' elif self.sellStats()", "of stock data that includes observations in both the training and test phases.", "learned classifications while providing a quick and easy way to vary parameters for", "The kernel coefficient for machine learning. See scikit-learn documentation for more details. Default", "Increases: {self.increases}\") print(f\"Total Price Decreases: {self.decreases}\") def displayStats(self): \"\"\"Print the collected backtesting statistics.\"\"\"", "consistent with the ``tz`` parameter. Defines the start date for model training. :param", "prediction == 1: self.correctBuys += 1 elif performance < 0: self.decreases += 1", "- 0.5, X[:, 0].max() + 0.5 y_min, y_max = X[:, 1].min() - 0.5,", "bld, end = '\\033[1m', '\\033[0m' print(f'{bld}Conditions{end}') i = 1 for var in self.variables:", "Threshold: {self.sellThreshold*100}%\") print(f\"C: {self.C}\") print(f\"gamma: {self.gamma}\") print(f\"Continued Training: {self.continuedTraining}\") print(f\"Total Testing Periods: {self.periods}\")", "Threshold: {self.buyThreshold*100}%\") print(f\"Sell Threshold: {self.sellThreshold*100}%\") print(f\"C: {self.C}\") print(f\"gamma: {self.gamma}\") print(f\"Continued Training: {self.continuedTraining}\") print(f\"Total", "if self.debug: super().buyLogic(*args, **kwargs) def sellLogic(self, *args, **kwargs): \"\"\"Increment the sell count.\"\"\" self.totalSells", "consistent with the ``tz`` parameter. Defines the end date for model training. :param", "plt.subplot(1, 1, 1) Z = self.model.decision_function(c_[xx.ravel(), yy.ravel()]) Z = Z.reshape(xx.shape) stock = self.stocks[len(self.stocks)-1]", "except ZeroDivisionError: return float(0) def displayConditions(self): \"\"\"Print the learning and testing parameters.\"\"\" bld,", "Price Decreases: {self.decreases}\") def displayStats(self): \"\"\"Print the collected backtesting statistics.\"\"\" bld, gre, red,", "probability. \"\"\" import matplotlib.pyplot as plt from matplotlib.colors import ListedColormap if len(self.variables) !=", "+= 1 elif performance < 0: self.decreases += 1 if prediction == -1:", "print(\"Error: Plotting is restricted to 2 dimensions\") return if (self.XX is None or", "to 2 dimensions\") return if (self.XX is None or self.yy is None or", "self.periods = 0 self.debug = False # Visualize self.XX = None self.yy =", "-1 representing an up or down performance. :param performance: A positive or negative", "the ``tz`` parameter. Defines the end date for model training. :param testStart: A", "1].max() + 0.5 xx, yy = meshgrid( arange(x_min, x_max, stepsize), arange(y_min, y_max, stepsize)", "self.correctBuys += 1 elif performance < 0: self.decreases += 1 if prediction ==", "+= 1 print(f\"\\nTotal Buys: {self.totalBuys}\") prnt = None if self.buyStats() > 50: prnt", "= meshgrid( arange(x_min, x_max, stepsize), arange(y_min, y_max, stepsize) ) plt.figure(figsize=(width, height)) cm =", "print(\"Error: Please run model before displaying stats\") return print(f'{bld}Stats{end}') print(\"Stock(s):\") i = 0", "count.\"\"\" self.totalSells += 1 if self.debug: super().sellLogic(*args, **kwargs) def nextPeriodLogic(self, prediction, performance, *args,", "down performance. :param performance: A positive or negative value representing the actual observed", "= plt.subplot(1, 1, 1) Z = self.model.decision_function(c_[xx.ravel(), yy.ravel()]) Z = Z.reshape(xx.shape) stock =", "level at which Clair will will recommend a buy. Default 0.65. :param sellThreshold:", "value representing the actual observed performance. \"\"\" self.periods += 1 if performance >", "or down performance. :param performance: A positive or negative value representing the actual", "if debug strings will be printed as backtesting is run. Warning: may result", "> 0: self.increases += 1 if prediction == 1: self.correctBuys += 1 elif", "trainStart: A datetime as a string that should be consistent with the ``tz``", "should be consistent with the ``tz`` parameter. Defines the end date for model", "execute model, X, y = self.learn(data) self.execute(data, model, X, y) # Save for", "self.debug: super().nextPeriodLogic(prediction, performance, *args, **kwargs) def clearStats(self): \"\"\"Reset all collected statistics.\"\"\" self.dates =", "alpha=0.75) Axes.scatter(X[:, 0], X[:, 1], c=y, cmap=RedBlue) Axes.set_xlim(xx.min(), xx.max()) Axes.set_ylim(yy.min(), yy.max()) plt.savefig(stock+'.svg', format='svg')", "if performance > 0: self.increases += 1 if prediction == 1: self.correctBuys +=", "tz=tz ) # Stats self.stocks = [] self.dates = [] self.totalBuys = 0", "color coded representation of learned recommendations. The color intensity represents the distribution of", "a visualization of the backtesting results. The diagram overlays training and testing observations", "super().nextPeriodLogic(prediction, performance, *args, **kwargs) def clearStats(self): \"\"\"Reset all collected statistics.\"\"\" self.dates = []", "return print(f'{bld}Stats{end}') print(\"Stock(s):\") i = 0 for stock in self.stocks: print(f'{stock} | ',", "{prnt}\") def visualizeModel(self, width=5, height=5, stepsize=0.02): \"\"\"Output a visualization of the backtesting results.", "convenience functions for visualizing collected statistics. :param variables: A list of columns that", "1].min() - 0.5, X[:, 1].max() + 0.5 xx, yy = meshgrid( arange(x_min, x_max,", "matplotlib.colors import ListedColormap if len(self.variables) != 2: print(\"Error: Plotting is restricted to 2", "2) except ZeroDivisionError: return float(0) def displayConditions(self): \"\"\"Print the learning and testing parameters.\"\"\"", "of probability. \"\"\" import matplotlib.pyplot as plt from matplotlib.colors import ListedColormap if len(self.variables)", "various parameterizations. This module provides classes that allow clients to experiment with different", "trainEnd: A datetime as a string that should be consistent with the ``tz``", "X, y) # Save for vizualization purposes self.dates.append([ self.trainStart.strftime('%m/%d/%Y'), self.trainEnd.strftime('%m/%d/%Y'), self.testStart.strftime('%m/%d/%Y'), self.testEnd.strftime('%m/%d/%Y') ])", "1 if self.debug: super().buyLogic(*args, **kwargs) def sellLogic(self, *args, **kwargs): \"\"\"Increment the sell count.\"\"\"", ":param C: A penalty parameter for false positives. See scikit-learn documentation for more", "trainStart, trainEnd, testStart, testEnd, buyThreshold=0.65, sellThreshold=0.65, C=1, gamma=10, continuedTraining=False, tz=timezone('UTC') ): super().__init__( variables,", "len(self.variables) != 2: print(\"Error: Plotting is restricted to 2 dimensions\") return if (self.XX", "restricted to 2 dimensions\") return if (self.XX is None or self.yy is None", "Defines the start date for model training. :param trainEnd: A datetime as a", "prnt = None if self.buyStats() > 50: prnt = f\"{gre}{self.buyStats()}%{end}\" elif self.buyStats() <", "negative value representing the actual observed performance. \"\"\" self.periods += 1 if performance", "return X, y = self.XX, self.yy X = StandardScaler().fit_transform(X) self.model.fit(X, y) x_min, x_max", "= 0 self.totalSells = 0 self.correctSells = 0 self.increases = 0 self.decreases =", "vizualization purposes self.dates.append([ self.trainStart.strftime('%m/%d/%Y'), self.trainEnd.strftime('%m/%d/%Y'), self.testStart.strftime('%m/%d/%Y'), self.testEnd.strftime('%m/%d/%Y') ]) XX = vstack(X) yy =", "{self.sellThreshold*100}%\") print(f\"C: {self.C}\") print(f\"gamma: {self.gamma}\") print(f\"Continued Training: {self.continuedTraining}\") print(f\"Total Testing Periods: {self.periods}\") print(f\"Total", "2: print(\"Error: Plotting is restricted to 2 dimensions\") return if (self.XX is None", "'\\033[0m' if len(self.dates) == 0: print(\"Error: Please run model before displaying stats\") return", "confidence level at which Clair will recommend a sell. Default 0.65. :param C:", "cmap=cm, alpha=0.75) Axes.scatter(X[:, 0], X[:, 1], c=y, cmap=RedBlue) Axes.set_xlim(xx.min(), xx.max()) Axes.set_ylim(yy.min(), yy.max()) plt.savefig(stock+'.svg',", "intensity represents the distribution of probability. \"\"\" import matplotlib.pyplot as plt from matplotlib.colors", "None self.yy = None self.model = None def runModel(self, data): \"\"\"Run backtesting. :param", "a type of machine learning classifier. The purpose of ``Backtest`` is to collect", "buyThreshold=0.65, sellThreshold=0.65, C=1, gamma=10, continuedTraining=False, tz=timezone('UTC') ): super().__init__( variables, trainStart, trainEnd, testStart, testEnd,", "\"\"\" self.periods += 1 if performance > 0: self.increases += 1 if prediction", "determines if debug strings will be printed as backtesting is run. Warning: may", "cm = plt.cm.RdBu RedBlue = ListedColormap(['#FF312E', '#6E8894']) Axes = plt.subplot(1, 1, 1) Z", "*args, **kwargs): \"\"\"Increment the sell count.\"\"\" self.totalSells += 1 if self.debug: super().sellLogic(*args, **kwargs)", "= XX self.yy = yy self.model = model def buyLogic(self, *args, **kwargs): \"\"\"Increment", "self.XX = XX self.yy = yy self.model = model def buyLogic(self, *args, **kwargs):", "data. \"\"\" from numpy import meshgrid, arange, c_ from sklearn.preprocessing import StandardScaler from", "observations in both the training and test phases. \"\"\" # Learn and execute", "the ``tz`` parameter. Defines the start date for model training. :param trainEnd: A", "1 elif performance < 0: self.decreases += 1 if prediction == -1: self.correctSells", "self.increases = 0 self.decreases = 0 self.periods = 0 def buyStats(self): \"\"\"Return the", "as plt from matplotlib.colors import ListedColormap if len(self.variables) != 2: print(\"Error: Plotting is", "self.model.fit(X, y) x_min, x_max = X[:, 0].min() - 0.5, X[:, 0].max() + 0.5", "a buy. Default 0.65. :param sellThreshold: Defines the confidence level at which Clair", ":param tz: The timezone associated with the datetime parameters. Default UTC. :ivar debug:", "machine learning classifier. The purpose of ``Backtest`` is to collect statistics on the", "\"\"\"Print the collected backtesting statistics.\"\"\" bld, gre, red, end = '\\033[1m', '\\033[92m', '\\033[91m',", "prediction: Value of 1 or -1 representing an up or down performance. :param", "continue training the model during the testing phase. Default False. :param tz: The", "Value of 1 or -1 representing an up or down performance. :param performance:", "y = self.learn(data) self.execute(data, model, X, y) # Save for vizualization purposes self.dates.append([", "+= 1 if performance > 0: self.increases += 1 if prediction == 1:", "self.correctSells = 0 self.increases = 0 self.decreases = 0 self.periods = 0 self.debug", "clearStats(self): \"\"\"Reset all collected statistics.\"\"\" self.dates = [] self.totalBuys = 0 self.correctBuys =", "X[:, 0].min() - 0.5, X[:, 0].max() + 0.5 y_min, y_max = X[:, 1].min()", "Accuracy: {prnt}\") def visualizeModel(self, width=5, height=5, stepsize=0.02): \"\"\"Output a visualization of the backtesting", "that should be consistent with the ``tz`` parameter. Defines the end date for", "self.model is None): print(\"Error: Please run model before visualizing\") return X, y =", "self.trainEnd.strftime('%m/%d/%Y'), self.testStart.strftime('%m/%d/%Y'), self.testEnd.strftime('%m/%d/%Y') ]) XX = vstack(X) yy = hstack(y) self.XX = XX", "model, X, y = self.learn(data) self.execute(data, model, X, y) # Save for vizualization", "for model testing. :param testEnd: A datetime as a string that should be", "statistics.\"\"\" try: return round((float(self.correctSells)/self.totalSells)*100, 2) except ZeroDivisionError: return float(0) def displayConditions(self): \"\"\"Print the", "stock data that includes observations in both the training and test phases. \"\"\"", "\"\"\" # Learn and execute model, X, y = self.learn(data) self.execute(data, model, X,", "backtesting statistics.\"\"\" bld, gre, red, end = '\\033[1m', '\\033[92m', '\\033[91m', '\\033[0m' if len(self.dates)", ":param sellThreshold: Defines the confidence level at which Clair will recommend a sell.", "self.decreases += 1 if prediction == -1: self.correctSells += 1 if self.debug: super().nextPeriodLogic(prediction,", "with the ``tz`` parameter. Defines the start date for model training. :param trainEnd:", "that includes observations in both the training and test phases. \"\"\" # Learn", "learning. See scikit-learn documentation for more details. Default 10. :param continuedTraining: Determine if", "parameters. Default UTC. :ivar debug: A boolean value that determines if debug strings", "the learning and testing parameters.\"\"\" bld, end = '\\033[1m', '\\033[0m' print(f'{bld}Conditions{end}') i =", "with different machine learning parameterizations and test those on historical stock data. \"\"\"", "documentation for more details. Default 10. :param continuedTraining: Determine if data from the", "self.debug = False # Visualize self.XX = None self.yy = None self.model =", "self.model = model def buyLogic(self, *args, **kwargs): \"\"\"Increment the buy count.\"\"\" self.totalBuys +=", "<reponame>uclatommy/Clairvoyant \"\"\"Backtest provides a way of exploring and testing various parameterizations. This module", "f'{gre}{self.sellStats()}%{end}' elif self.sellStats() < 50: prnt = f'{red}{self.sellStats()}%{end}' else: prnt = f'{self.sellStats()}%' print(f\"Sell", "1 for var in self.variables: print(f\"X{i}: {var}\") i += 1 print(f\"Buy Threshold: {self.buyThreshold*100}%\")", "prediction == -1: self.correctSells += 1 if self.debug: super().nextPeriodLogic(prediction, performance, *args, **kwargs) def", "statistics.\"\"\" bld, gre, red, end = '\\033[1m', '\\033[92m', '\\033[91m', '\\033[0m' if len(self.dates) ==", "the ``tz`` parameter. Defines the start date for model testing. :param testEnd: A", "features. :param trainStart: A datetime as a string that should be consistent with", "> 50: prnt = f'{gre}{self.sellStats()}%{end}' elif self.sellStats() < 50: prnt = f'{red}{self.sellStats()}%{end}' else:", "tz=timezone('UTC') ): super().__init__( variables, trainStart, trainEnd, testStart, testEnd, buyThreshold=buyThreshold, sellThreshold=sellThreshold, C=C, gamma=gamma, continuedTraining=continuedTraining,", ":param testStart: A datetime as a string that should be consistent with the", "visualization of the backtesting results. The diagram overlays training and testing observations on", "0 self.decreases = 0 self.periods = 0 self.debug = False # Visualize self.XX", "and test those on historical stock data. \"\"\" from numpy import meshgrid, arange,", "self.stocks: print(f'{stock} | ', f\"Training: {self.dates[i][0]}-{self.dates[i][1]}\", f\"Testing: {self.dates[i][2]}-{self.dates[i][3]}\") i += 1 print(f\"\\nTotal Buys:", "Default 0.65. :param sellThreshold: Defines the confidence level at which Clair will recommend", "[] self.totalBuys = 0 self.correctBuys = 0 self.totalSells = 0 self.correctSells = 0", "count.\"\"\" self.totalBuys += 1 if self.debug: super().buyLogic(*args, **kwargs) def sellLogic(self, *args, **kwargs): \"\"\"Increment", "provides classes that allow clients to experiment with different machine learning parameterizations and", "stepsize=0.02): \"\"\"Output a visualization of the backtesting results. The diagram overlays training and", "with the ``tz`` parameter. Defines the start date for model testing. :param testEnd:", "and testing observations on top of a color coded representation of learned recommendations.", "\"\"\"Run backtesting. :param data: A ``History`` of stock data that includes observations in", "sellLogic(self, *args, **kwargs): \"\"\"Increment the sell count.\"\"\" self.totalSells += 1 if self.debug: super().sellLogic(*args,", "performance. \"\"\" self.periods += 1 if performance > 0: self.increases += 1 if", "ZeroDivisionError: return float(0) def sellStats(self): \"\"\"Return the collected sell statistics.\"\"\" try: return round((float(self.correctSells)/self.totalSells)*100,", "1 if self.debug: super().sellLogic(*args, **kwargs) def nextPeriodLogic(self, prediction, performance, *args, **kwargs): \"\"\"Collect statistics", "i = 1 for var in self.variables: print(f\"X{i}: {var}\") i += 1 print(f\"Buy", "functions for visualizing collected statistics. :param variables: A list of columns that represent", "Buys: {self.totalBuys}\") prnt = None if self.buyStats() > 50: prnt = f\"{gre}{self.buyStats()}%{end}\" elif", "displayConditions(self): \"\"\"Print the learning and testing parameters.\"\"\" bld, end = '\\033[1m', '\\033[0m' print(f'{bld}Conditions{end}')", "a string that should be consistent with the ``tz`` parameter. Defines the end", "len(self.dates) == 0: print(\"Error: Please run model before displaying stats\") return print(f'{bld}Stats{end}') print(\"Stock(s):\")", "self.dates = [] self.totalBuys = 0 self.correctBuys = 0 self.totalSells = 0 self.correctSells", "0 self.debug = False # Visualize self.XX = None self.yy = None self.model", "50: prnt = f\"{red}{self.buyStats()}%{end}\" else: prnt = f\"{self.buyStats()}%\" print(f\"Buy Accuracy: {prnt}\") print(f\"Total Sells:", "self.periods = 0 def buyStats(self): \"\"\"Return the collected buy statistics.\"\"\" try: return round((float(self.correctBuys)/self.totalBuys)*100,", "None): print(\"Error: Please run model before visualizing\") return X, y = self.XX, self.yy", "an up or down performance. :param performance: A positive or negative value representing", "'#6E8894']) Axes = plt.subplot(1, 1, 1) Z = self.model.decision_function(c_[xx.ravel(), yy.ravel()]) Z = Z.reshape(xx.shape)", "buy count.\"\"\" self.totalBuys += 1 if self.debug: super().buyLogic(*args, **kwargs) def sellLogic(self, *args, **kwargs):", "self.buyStats() < 50: prnt = f\"{red}{self.buyStats()}%{end}\" else: prnt = f\"{self.buyStats()}%\" print(f\"Buy Accuracy: {prnt}\")", "for visualizing collected statistics. :param variables: A list of columns that represent learning", "Defines the confidence level at which Clair will recommend a sell. Default 0.65.", "should be consistent with the ``tz`` parameter. Defines the start date for model", "date for model testing. :param buyThreshold: Defines the confidence level at which Clair", "to experiment with different machine learning parameterizations and test those on historical stock", "from matplotlib.colors import ListedColormap if len(self.variables) != 2: print(\"Error: Plotting is restricted to", "x_max = X[:, 0].min() - 0.5, X[:, 0].max() + 0.5 y_min, y_max =", "print(f'{bld}Stats{end}') print(\"Stock(s):\") i = 0 for stock in self.stocks: print(f'{stock} | ', f\"Training:", "0.5 y_min, y_max = X[:, 1].min() - 0.5, X[:, 1].max() + 0.5 xx,", "strings will be printed as backtesting is run. Warning: may result in a", "during the testing phase. Default False. :param tz: The timezone associated with the", "= 0 self.decreases = 0 self.periods = 0 self.debug = False # Visualize", "\"\"\"Output a visualization of the backtesting results. The diagram overlays training and testing", "sell count.\"\"\" self.totalSells += 1 if self.debug: super().sellLogic(*args, **kwargs) def nextPeriodLogic(self, prediction, performance,", "*args, **kwargs): \"\"\"Collect statistics on correct and incorrect buys and sells. :param prediction:", "0 self.correctSells = 0 self.increases = 0 self.decreases = 0 self.periods = 0", "StandardScaler().fit_transform(X) self.model.fit(X, y) x_min, x_max = X[:, 0].min() - 0.5, X[:, 0].max() +", "super().sellLogic(*args, **kwargs) def nextPeriodLogic(self, prediction, performance, *args, **kwargs): \"\"\"Collect statistics on correct and", "prediction, performance, *args, **kwargs): \"\"\"Collect statistics on correct and incorrect buys and sells.", "= '\\033[1m', '\\033[0m' print(f'{bld}Conditions{end}') i = 1 for var in self.variables: print(f\"X{i}: {var}\")", "{self.C}\") print(f\"gamma: {self.gamma}\") print(f\"Continued Training: {self.continuedTraining}\") print(f\"Total Testing Periods: {self.periods}\") print(f\"Total Price Increases:", "X[:, 1].max() + 0.5 xx, yy = meshgrid( arange(x_min, x_max, stepsize), arange(y_min, y_max,", "for machine learning. See scikit-learn documentation for more details. Default 10. :param continuedTraining:", "0: self.decreases += 1 if prediction == -1: self.correctSells += 1 if self.debug:", "0.65. :param C: A penalty parameter for false positives. See scikit-learn documentation for", "1 print(f\"Buy Threshold: {self.buyThreshold*100}%\") print(f\"Sell Threshold: {self.sellThreshold*100}%\") print(f\"C: {self.C}\") print(f\"gamma: {self.gamma}\") print(f\"Continued Training:", "print(f\"Buy Accuracy: {prnt}\") print(f\"Total Sells: {self.totalSells}\") if self.sellStats() > 50: prnt = f'{gre}{self.sellStats()}%{end}'", "= [] self.dates = [] self.totalBuys = 0 self.correctBuys = 0 self.totalSells =", "0.5, X[:, 1].max() + 0.5 xx, yy = meshgrid( arange(x_min, x_max, stepsize), arange(y_min,", "1 print(f\"\\nTotal Buys: {self.totalBuys}\") prnt = None if self.buyStats() > 50: prnt =", "hstack from pytz import timezone from clairvoyant import Clair import matplotlib matplotlib.use('Agg') class", "= X[:, 0].min() - 0.5, X[:, 0].max() + 0.5 y_min, y_max = X[:,", "1 if self.debug: super().nextPeriodLogic(prediction, performance, *args, **kwargs) def clearStats(self): \"\"\"Reset all collected statistics.\"\"\"", "dimensions\") return if (self.XX is None or self.yy is None or self.model is", "Clair will will recommend a buy. Default 0.65. :param sellThreshold: Defines the confidence", "used to continue training the model during the testing phase. Default False. :param", "matplotlib.use('Agg') class Backtest(Clair): \"\"\"Backtest is a type of machine learning classifier. The purpose", "``tz`` parameter. Defines the end date for model testing. :param buyThreshold: Defines the", "+= 1 if self.debug: super().sellLogic(*args, **kwargs) def nextPeriodLogic(self, prediction, performance, *args, **kwargs): \"\"\"Collect", "machine learning. See scikit-learn documentation for more details. Default 10. :param continuedTraining: Determine", "and easy way to vary parameters for rapid experimentation. Backtest also provides some", "# Save for vizualization purposes self.dates.append([ self.trainStart.strftime('%m/%d/%Y'), self.trainEnd.strftime('%m/%d/%Y'), self.testStart.strftime('%m/%d/%Y'), self.testEnd.strftime('%m/%d/%Y') ]) XX =", "debug strings will be printed as backtesting is run. Warning: may result in", "): super().__init__( variables, trainStart, trainEnd, testStart, testEnd, buyThreshold=buyThreshold, sellThreshold=sellThreshold, C=C, gamma=gamma, continuedTraining=continuedTraining, tz=tz", "before displaying stats\") return print(f'{bld}Stats{end}') print(\"Stock(s):\") i = 0 for stock in self.stocks:", "self.model = None def runModel(self, data): \"\"\"Run backtesting. :param data: A ``History`` of", "experimentation. Backtest also provides some convenience functions for visualizing collected statistics. :param variables:", "= None if self.buyStats() > 50: prnt = f\"{gre}{self.buyStats()}%{end}\" elif self.buyStats() < 50:", "buyThreshold=buyThreshold, sellThreshold=sellThreshold, C=C, gamma=gamma, continuedTraining=continuedTraining, tz=tz ) # Stats self.stocks = [] self.dates", "< 0: self.decreases += 1 if prediction == -1: self.correctSells += 1 if", "and testing various parameterizations. This module provides classes that allow clients to experiment", "self.totalBuys += 1 if self.debug: super().buyLogic(*args, **kwargs) def sellLogic(self, *args, **kwargs): \"\"\"Increment the", "XX self.yy = yy self.model = model def buyLogic(self, *args, **kwargs): \"\"\"Increment the", "collected sell statistics.\"\"\" try: return round((float(self.correctSells)/self.totalSells)*100, 2) except ZeroDivisionError: return float(0) def displayConditions(self):", "provides a way of exploring and testing various parameterizations. This module provides classes", "for more details. Default 1. :param gamma: The kernel coefficient for machine learning.", "from sklearn.preprocessing import StandardScaler from numpy import vstack, hstack from pytz import timezone", "``History`` of stock data that includes observations in both the training and test", "associated with the datetime parameters. Default UTC. :ivar debug: A boolean value that", "Learn and execute model, X, y = self.learn(data) self.execute(data, model, X, y) #", "representing the actual observed performance. \"\"\" self.periods += 1 if performance > 0:", "y = self.XX, self.yy X = StandardScaler().fit_transform(X) self.model.fit(X, y) x_min, x_max = X[:,", "def sellLogic(self, *args, **kwargs): \"\"\"Increment the sell count.\"\"\" self.totalSells += 1 if self.debug:", "y) # Save for vizualization purposes self.dates.append([ self.trainStart.strftime('%m/%d/%Y'), self.trainEnd.strftime('%m/%d/%Y'), self.testStart.strftime('%m/%d/%Y'), self.testEnd.strftime('%m/%d/%Y') ]) XX", "self.model.decision_function(c_[xx.ravel(), yy.ravel()]) Z = Z.reshape(xx.shape) stock = self.stocks[len(self.stocks)-1] Axes.set_title(stock) Axes.contourf(xx, yy, Z, cmap=cm,", "string that should be consistent with the ``tz`` parameter. Defines the end date", "run. Warning: may result in a lot of output. \"\"\" def __init__( self,", "to collect statistics on the performance of learned classifications while providing a quick", "if self.sellStats() > 50: prnt = f'{gre}{self.sellStats()}%{end}' elif self.sellStats() < 50: prnt =", "positives. See scikit-learn documentation for more details. Default 1. :param gamma: The kernel", "result in a lot of output. \"\"\" def __init__( self, variables, trainStart, trainEnd,", "0 self.totalSells = 0 self.correctSells = 0 self.increases = 0 self.decreases = 0", ") plt.figure(figsize=(width, height)) cm = plt.cm.RdBu RedBlue = ListedColormap(['#FF312E', '#6E8894']) Axes = plt.subplot(1,", "Z, cmap=cm, alpha=0.75) Axes.scatter(X[:, 0], X[:, 1], c=y, cmap=RedBlue) Axes.set_xlim(xx.min(), xx.max()) Axes.set_ylim(yy.min(), yy.max())", "= f\"{self.buyStats()}%\" print(f\"Buy Accuracy: {prnt}\") print(f\"Total Sells: {self.totalSells}\") if self.sellStats() > 50: prnt", "buyStats(self): \"\"\"Return the collected buy statistics.\"\"\" try: return round((float(self.correctBuys)/self.totalBuys)*100, 2) except ZeroDivisionError: return", "meshgrid, arange, c_ from sklearn.preprocessing import StandardScaler from numpy import vstack, hstack from", "stock data. \"\"\" from numpy import meshgrid, arange, c_ from sklearn.preprocessing import StandardScaler", "+ 0.5 y_min, y_max = X[:, 1].min() - 0.5, X[:, 1].max() + 0.5", "X[:, 0].max() + 0.5 y_min, y_max = X[:, 1].min() - 0.5, X[:, 1].max()", "buyThreshold: Defines the confidence level at which Clair will will recommend a buy.", "parameters for rapid experimentation. Backtest also provides some convenience functions for visualizing collected", "self.yy = yy self.model = model def buyLogic(self, *args, **kwargs): \"\"\"Increment the buy", "X, y = self.XX, self.yy X = StandardScaler().fit_transform(X) self.model.fit(X, y) x_min, x_max =", "continuedTraining=False, tz=timezone('UTC') ): super().__init__( variables, trainStart, trainEnd, testStart, testEnd, buyThreshold=buyThreshold, sellThreshold=sellThreshold, C=C, gamma=gamma,", "**kwargs) def nextPeriodLogic(self, prediction, performance, *args, **kwargs): \"\"\"Collect statistics on correct and incorrect", "data: A ``History`` of stock data that includes observations in both the training", "sellThreshold=sellThreshold, C=C, gamma=gamma, continuedTraining=continuedTraining, tz=tz ) # Stats self.stocks = [] self.dates =", "print(f\"Sell Threshold: {self.sellThreshold*100}%\") print(f\"C: {self.C}\") print(f\"gamma: {self.gamma}\") print(f\"Continued Training: {self.continuedTraining}\") print(f\"Total Testing Periods:", "def buyStats(self): \"\"\"Return the collected buy statistics.\"\"\" try: return round((float(self.correctBuys)/self.totalBuys)*100, 2) except ZeroDivisionError:", "print(f\"Total Price Decreases: {self.decreases}\") def displayStats(self): \"\"\"Print the collected backtesting statistics.\"\"\" bld, gre,", "import StandardScaler from numpy import vstack, hstack from pytz import timezone from clairvoyant", "self.increases = 0 self.decreases = 0 self.periods = 0 self.debug = False #", "model during the testing phase. Default False. :param tz: The timezone associated with", "# Learn and execute model, X, y = self.learn(data) self.execute(data, model, X, y)", "1: self.correctBuys += 1 elif performance < 0: self.decreases += 1 if prediction", "gamma=gamma, continuedTraining=continuedTraining, tz=tz ) # Stats self.stocks = [] self.dates = [] self.totalBuys", "test phases. \"\"\" # Learn and execute model, X, y = self.learn(data) self.execute(data,", "= vstack(X) yy = hstack(y) self.XX = XX self.yy = yy self.model =", "that allow clients to experiment with different machine learning parameterizations and test those", "a quick and easy way to vary parameters for rapid experimentation. Backtest also", "Clair will recommend a sell. Default 0.65. :param C: A penalty parameter for", "yy, Z, cmap=cm, alpha=0.75) Axes.scatter(X[:, 0], X[:, 1], c=y, cmap=RedBlue) Axes.set_xlim(xx.min(), xx.max()) Axes.set_ylim(yy.min(),", "sellThreshold: Defines the confidence level at which Clair will recommend a sell. Default", "the performance of learned classifications while providing a quick and easy way to", "the sell count.\"\"\" self.totalSells += 1 if self.debug: super().sellLogic(*args, **kwargs) def nextPeriodLogic(self, prediction,", "try: return round((float(self.correctSells)/self.totalSells)*100, 2) except ZeroDivisionError: return float(0) def displayConditions(self): \"\"\"Print the learning", "actual observed performance. \"\"\" self.periods += 1 if performance > 0: self.increases +=", "model testing. :param testEnd: A datetime as a string that should be consistent", "variables, trainStart, trainEnd, testStart, testEnd, buyThreshold=0.65, sellThreshold=0.65, C=1, gamma=10, continuedTraining=False, tz=timezone('UTC') ): super().__init__(", "phases. \"\"\" # Learn and execute model, X, y = self.learn(data) self.execute(data, model,", "= hstack(y) self.XX = XX self.yy = yy self.model = model def buyLogic(self,", "'\\033[0m' print(f'{bld}Conditions{end}') i = 1 for var in self.variables: print(f\"X{i}: {var}\") i +=", "Axes.contourf(xx, yy, Z, cmap=cm, alpha=0.75) Axes.scatter(X[:, 0], X[:, 1], c=y, cmap=RedBlue) Axes.set_xlim(xx.min(), xx.max())", "``tz`` parameter. Defines the start date for model testing. :param testEnd: A datetime", "**kwargs): \"\"\"Increment the sell count.\"\"\" self.totalSells += 1 if self.debug: super().sellLogic(*args, **kwargs) def", "__init__( self, variables, trainStart, trainEnd, testStart, testEnd, buyThreshold=0.65, sellThreshold=0.65, C=1, gamma=10, continuedTraining=False, tz=timezone('UTC')", "prnt = f\"{self.buyStats()}%\" print(f\"Buy Accuracy: {prnt}\") print(f\"Total Sells: {self.totalSells}\") if self.sellStats() > 50:", "type of machine learning classifier. The purpose of ``Backtest`` is to collect statistics", "the end date for model testing. :param buyThreshold: Defines the confidence level at", "of 1 or -1 representing an up or down performance. :param performance: A", "continuedTraining: Determine if data from the testing period should be used to continue", "10. :param continuedTraining: Determine if data from the testing period should be used", "with the ``tz`` parameter. Defines the end date for model testing. :param buyThreshold:", "= X[:, 1].min() - 0.5, X[:, 1].max() + 0.5 xx, yy = meshgrid(", "the ``tz`` parameter. Defines the end date for model testing. :param buyThreshold: Defines", "# Visualize self.XX = None self.yy = None self.model = None def runModel(self,", "0].max() + 0.5 y_min, y_max = X[:, 1].min() - 0.5, X[:, 1].max() +", "of machine learning classifier. The purpose of ``Backtest`` is to collect statistics on", "arange(y_min, y_max, stepsize) ) plt.figure(figsize=(width, height)) cm = plt.cm.RdBu RedBlue = ListedColormap(['#FF312E', '#6E8894'])", "and incorrect buys and sells. :param prediction: Value of 1 or -1 representing", "in both the training and test phases. \"\"\" # Learn and execute model,", "parameterizations and test those on historical stock data. \"\"\" from numpy import meshgrid,", "arange, c_ from sklearn.preprocessing import StandardScaler from numpy import vstack, hstack from pytz", "from numpy import meshgrid, arange, c_ from sklearn.preprocessing import StandardScaler from numpy import", "scikit-learn documentation for more details. Default 1. :param gamma: The kernel coefficient for", "< 50: prnt = f'{red}{self.sellStats()}%{end}' else: prnt = f'{self.sellStats()}%' print(f\"Sell Accuracy: {prnt}\") def", "plt.cm.RdBu RedBlue = ListedColormap(['#FF312E', '#6E8894']) Axes = plt.subplot(1, 1, 1) Z = self.model.decision_function(c_[xx.ravel(),", "{self.dates[i][0]}-{self.dates[i][1]}\", f\"Testing: {self.dates[i][2]}-{self.dates[i][3]}\") i += 1 print(f\"\\nTotal Buys: {self.totalBuys}\") prnt = None if", "float(0) def displayConditions(self): \"\"\"Print the learning and testing parameters.\"\"\" bld, end = '\\033[1m',", "from numpy import vstack, hstack from pytz import timezone from clairvoyant import Clair", "y_max = X[:, 1].min() - 0.5, X[:, 1].max() + 0.5 xx, yy =", "Backtest(Clair): \"\"\"Backtest is a type of machine learning classifier. The purpose of ``Backtest``", "performance, *args, **kwargs) def clearStats(self): \"\"\"Reset all collected statistics.\"\"\" self.dates = [] self.totalBuys", "gamma=10, continuedTraining=False, tz=timezone('UTC') ): super().__init__( variables, trainStart, trainEnd, testStart, testEnd, buyThreshold=buyThreshold, sellThreshold=sellThreshold, C=C,", "{var}\") i += 1 print(f\"Buy Threshold: {self.buyThreshold*100}%\") print(f\"Sell Threshold: {self.sellThreshold*100}%\") print(f\"C: {self.C}\") print(f\"gamma:", "print(f\"\\nTotal Buys: {self.totalBuys}\") prnt = None if self.buyStats() > 50: prnt = f\"{gre}{self.buyStats()}%{end}\"", "The purpose of ``Backtest`` is to collect statistics on the performance of learned", "classifier. The purpose of ``Backtest`` is to collect statistics on the performance of", "with the datetime parameters. Default UTC. :ivar debug: A boolean value that determines", "= 0 self.increases = 0 self.decreases = 0 self.periods = 0 def buyStats(self):", "= 0 def buyStats(self): \"\"\"Return the collected buy statistics.\"\"\" try: return round((float(self.correctBuys)/self.totalBuys)*100, 2)", "0: print(\"Error: Please run model before displaying stats\") return print(f'{bld}Stats{end}') print(\"Stock(s):\") i =", "up or down performance. :param performance: A positive or negative value representing the", "self.correctSells = 0 self.increases = 0 self.decreases = 0 self.periods = 0 def", "color intensity represents the distribution of probability. \"\"\" import matplotlib.pyplot as plt from", "Decreases: {self.decreases}\") def displayStats(self): \"\"\"Print the collected backtesting statistics.\"\"\" bld, gre, red, end", "testEnd, buyThreshold=buyThreshold, sellThreshold=sellThreshold, C=C, gamma=gamma, continuedTraining=continuedTraining, tz=tz ) # Stats self.stocks = []", "self.decreases = 0 self.periods = 0 def buyStats(self): \"\"\"Return the collected buy statistics.\"\"\"", "1) Z = self.model.decision_function(c_[xx.ravel(), yy.ravel()]) Z = Z.reshape(xx.shape) stock = self.stocks[len(self.stocks)-1] Axes.set_title(stock) Axes.contourf(xx,", "from pytz import timezone from clairvoyant import Clair import matplotlib matplotlib.use('Agg') class Backtest(Clair):", "for more details. Default 10. :param continuedTraining: Determine if data from the testing", "clients to experiment with different machine learning parameterizations and test those on historical", "matplotlib matplotlib.use('Agg') class Backtest(Clair): \"\"\"Backtest is a type of machine learning classifier. The", "return float(0) def displayConditions(self): \"\"\"Print the learning and testing parameters.\"\"\" bld, end =", "Price Increases: {self.increases}\") print(f\"Total Price Decreases: {self.decreases}\") def displayStats(self): \"\"\"Print the collected backtesting", "stepsize) ) plt.figure(figsize=(width, height)) cm = plt.cm.RdBu RedBlue = ListedColormap(['#FF312E', '#6E8894']) Axes =", "print(f'{stock} | ', f\"Training: {self.dates[i][0]}-{self.dates[i][1]}\", f\"Testing: {self.dates[i][2]}-{self.dates[i][3]}\") i += 1 print(f\"\\nTotal Buys: {self.totalBuys}\")", "self.execute(data, model, X, y) # Save for vizualization purposes self.dates.append([ self.trainStart.strftime('%m/%d/%Y'), self.trainEnd.strftime('%m/%d/%Y'), self.testStart.strftime('%m/%d/%Y'),", "collect statistics on the performance of learned classifications while providing a quick and", ") # Stats self.stocks = [] self.dates = [] self.totalBuys = 0 self.correctBuys", "50: prnt = f'{gre}{self.sellStats()}%{end}' elif self.sellStats() < 50: prnt = f'{red}{self.sellStats()}%{end}' else: prnt", "way to vary parameters for rapid experimentation. Backtest also provides some convenience functions", "phase. Default False. :param tz: The timezone associated with the datetime parameters. Default", "prnt = f'{self.sellStats()}%' print(f\"Sell Accuracy: {prnt}\") def visualizeModel(self, width=5, height=5, stepsize=0.02): \"\"\"Output a", "ListedColormap if len(self.variables) != 2: print(\"Error: Plotting is restricted to 2 dimensions\") return", "will be printed as backtesting is run. Warning: may result in a lot", "A list of columns that represent learning features. :param trainStart: A datetime as", "details. Default 1. :param gamma: The kernel coefficient for machine learning. See scikit-learn", "machine learning parameterizations and test those on historical stock data. \"\"\" from numpy", "the end date for model training. :param testStart: A datetime as a string", "model training. :param testStart: A datetime as a string that should be consistent", ":param trainStart: A datetime as a string that should be consistent with the", ":param testEnd: A datetime as a string that should be consistent with the", "historical stock data. \"\"\" from numpy import meshgrid, arange, c_ from sklearn.preprocessing import", "with the ``tz`` parameter. Defines the end date for model training. :param testStart:", "-1: self.correctSells += 1 if self.debug: super().nextPeriodLogic(prediction, performance, *args, **kwargs) def clearStats(self): \"\"\"Reset", "elif self.buyStats() < 50: prnt = f\"{red}{self.buyStats()}%{end}\" else: prnt = f\"{self.buyStats()}%\" print(f\"Buy Accuracy:", "self.stocks = [] self.dates = [] self.totalBuys = 0 self.correctBuys = 0 self.totalSells", "module provides classes that allow clients to experiment with different machine learning parameterizations", "vary parameters for rapid experimentation. Backtest also provides some convenience functions for visualizing", "sells. :param prediction: Value of 1 or -1 representing an up or down", "end = '\\033[1m', '\\033[92m', '\\033[91m', '\\033[0m' if len(self.dates) == 0: print(\"Error: Please run", "that should be consistent with the ``tz`` parameter. Defines the start date for", "print(f\"Total Sells: {self.totalSells}\") if self.sellStats() > 50: prnt = f'{gre}{self.sellStats()}%{end}' elif self.sellStats() <", "{self.dates[i][2]}-{self.dates[i][3]}\") i += 1 print(f\"\\nTotal Buys: {self.totalBuys}\") prnt = None if self.buyStats() >", "0 self.increases = 0 self.decreases = 0 self.periods = 0 self.debug = False", ":param data: A ``History`` of stock data that includes observations in both the", "+= 1 if prediction == -1: self.correctSells += 1 if self.debug: super().nextPeriodLogic(prediction, performance,", "0.5 xx, yy = meshgrid( arange(x_min, x_max, stepsize), arange(y_min, y_max, stepsize) ) plt.figure(figsize=(width,", "backtesting is run. Warning: may result in a lot of output. \"\"\" def", "date for model testing. :param testEnd: A datetime as a string that should", "training the model during the testing phase. Default False. :param tz: The timezone", "hstack(y) self.XX = XX self.yy = yy self.model = model def buyLogic(self, *args,", "the distribution of probability. \"\"\" import matplotlib.pyplot as plt from matplotlib.colors import ListedColormap", "the buy count.\"\"\" self.totalBuys += 1 if self.debug: super().buyLogic(*args, **kwargs) def sellLogic(self, *args,", "def runModel(self, data): \"\"\"Run backtesting. :param data: A ``History`` of stock data that", "purposes self.dates.append([ self.trainStart.strftime('%m/%d/%Y'), self.trainEnd.strftime('%m/%d/%Y'), self.testStart.strftime('%m/%d/%Y'), self.testEnd.strftime('%m/%d/%Y') ]) XX = vstack(X) yy = hstack(y)", "datetime parameters. Default UTC. :ivar debug: A boolean value that determines if debug", "X[:, 1].min() - 0.5, X[:, 1].max() + 0.5 xx, yy = meshgrid( arange(x_min,", "= '\\033[1m', '\\033[92m', '\\033[91m', '\\033[0m' if len(self.dates) == 0: print(\"Error: Please run model", "collected statistics. :param variables: A list of columns that represent learning features. :param", "lot of output. \"\"\" def __init__( self, variables, trainStart, trainEnd, testStart, testEnd, buyThreshold=0.65,", "testStart, testEnd, buyThreshold=0.65, sellThreshold=0.65, C=1, gamma=10, continuedTraining=False, tz=timezone('UTC') ): super().__init__( variables, trainStart, trainEnd,", "self.yy is None or self.model is None): print(\"Error: Please run model before visualizing\")", "Z = Z.reshape(xx.shape) stock = self.stocks[len(self.stocks)-1] Axes.set_title(stock) Axes.contourf(xx, yy, Z, cmap=cm, alpha=0.75) Axes.scatter(X[:,", "the collected sell statistics.\"\"\" try: return round((float(self.correctSells)/self.totalSells)*100, 2) except ZeroDivisionError: return float(0) def", "+= 1 print(f\"Buy Threshold: {self.buyThreshold*100}%\") print(f\"Sell Threshold: {self.sellThreshold*100}%\") print(f\"C: {self.C}\") print(f\"gamma: {self.gamma}\") print(f\"Continued", "date for model training. :param trainEnd: A datetime as a string that should", "{self.continuedTraining}\") print(f\"Total Testing Periods: {self.periods}\") print(f\"Total Price Increases: {self.increases}\") print(f\"Total Price Decreases: {self.decreases}\")", "= 0 self.decreases = 0 self.periods = 0 def buyStats(self): \"\"\"Return the collected", "learning classifier. The purpose of ``Backtest`` is to collect statistics on the performance", "of output. \"\"\" def __init__( self, variables, trainStart, trainEnd, testStart, testEnd, buyThreshold=0.65, sellThreshold=0.65,", "meshgrid( arange(x_min, x_max, stepsize), arange(y_min, y_max, stepsize) ) plt.figure(figsize=(width, height)) cm = plt.cm.RdBu", "trainStart, trainEnd, testStart, testEnd, buyThreshold=buyThreshold, sellThreshold=sellThreshold, C=C, gamma=gamma, continuedTraining=continuedTraining, tz=tz ) # Stats", "self.dates.append([ self.trainStart.strftime('%m/%d/%Y'), self.trainEnd.strftime('%m/%d/%Y'), self.testStart.strftime('%m/%d/%Y'), self.testEnd.strftime('%m/%d/%Y') ]) XX = vstack(X) yy = hstack(y) self.XX", "the confidence level at which Clair will will recommend a buy. Default 0.65.", "coefficient for machine learning. See scikit-learn documentation for more details. Default 10. :param", "Warning: may result in a lot of output. \"\"\" def __init__( self, variables,", "def sellStats(self): \"\"\"Return the collected sell statistics.\"\"\" try: return round((float(self.correctSells)/self.totalSells)*100, 2) except ZeroDivisionError:", "``Backtest`` is to collect statistics on the performance of learned classifications while providing", "``tz`` parameter. Defines the start date for model training. :param trainEnd: A datetime", "def visualizeModel(self, width=5, height=5, stepsize=0.02): \"\"\"Output a visualization of the backtesting results. The", "UTC. :ivar debug: A boolean value that determines if debug strings will be", "prnt = f'{gre}{self.sellStats()}%{end}' elif self.sellStats() < 50: prnt = f'{red}{self.sellStats()}%{end}' else: prnt =", "stock = self.stocks[len(self.stocks)-1] Axes.set_title(stock) Axes.contourf(xx, yy, Z, cmap=cm, alpha=0.75) Axes.scatter(X[:, 0], X[:, 1],", "(self.XX is None or self.yy is None or self.model is None): print(\"Error: Please", "parameterizations. This module provides classes that allow clients to experiment with different machine", "will recommend a sell. Default 0.65. :param C: A penalty parameter for false", "== 0: print(\"Error: Please run model before displaying stats\") return print(f'{bld}Stats{end}') print(\"Stock(s):\") i", "classes that allow clients to experiment with different machine learning parameterizations and test", "be consistent with the ``tz`` parameter. Defines the start date for model training.", "be used to continue training the model during the testing phase. Default False.", "runModel(self, data): \"\"\"Run backtesting. :param data: A ``History`` of stock data that includes", "both the training and test phases. \"\"\" # Learn and execute model, X,", "+= 1 if prediction == 1: self.correctBuys += 1 elif performance < 0:", "self.debug: super().sellLogic(*args, **kwargs) def nextPeriodLogic(self, prediction, performance, *args, **kwargs): \"\"\"Collect statistics on correct", "{self.periods}\") print(f\"Total Price Increases: {self.increases}\") print(f\"Total Price Decreases: {self.decreases}\") def displayStats(self): \"\"\"Print the", "', f\"Training: {self.dates[i][0]}-{self.dates[i][1]}\", f\"Testing: {self.dates[i][2]}-{self.dates[i][3]}\") i += 1 print(f\"\\nTotal Buys: {self.totalBuys}\") prnt =", "= 1 for var in self.variables: print(f\"X{i}: {var}\") i += 1 print(f\"Buy Threshold:", "parameter. Defines the end date for model training. :param testStart: A datetime as", "*args, **kwargs) def clearStats(self): \"\"\"Reset all collected statistics.\"\"\" self.dates = [] self.totalBuys =", "y_max, stepsize) ) plt.figure(figsize=(width, height)) cm = plt.cm.RdBu RedBlue = ListedColormap(['#FF312E', '#6E8894']) Axes", "while providing a quick and easy way to vary parameters for rapid experimentation.", "return round((float(self.correctBuys)/self.totalBuys)*100, 2) except ZeroDivisionError: return float(0) def sellStats(self): \"\"\"Return the collected sell", "performance of learned classifications while providing a quick and easy way to vary", "sell statistics.\"\"\" try: return round((float(self.correctSells)/self.totalSells)*100, 2) except ZeroDivisionError: return float(0) def displayConditions(self): \"\"\"Print", "0 for stock in self.stocks: print(f'{stock} | ', f\"Training: {self.dates[i][0]}-{self.dates[i][1]}\", f\"Testing: {self.dates[i][2]}-{self.dates[i][3]}\") i", "A penalty parameter for false positives. See scikit-learn documentation for more details. Default", "f'{self.sellStats()}%' print(f\"Sell Accuracy: {prnt}\") def visualizeModel(self, width=5, height=5, stepsize=0.02): \"\"\"Output a visualization of", "return if (self.XX is None or self.yy is None or self.model is None):", "= self.model.decision_function(c_[xx.ravel(), yy.ravel()]) Z = Z.reshape(xx.shape) stock = self.stocks[len(self.stocks)-1] Axes.set_title(stock) Axes.contourf(xx, yy, Z,", "C: A penalty parameter for false positives. See scikit-learn documentation for more details.", "be consistent with the ``tz`` parameter. Defines the end date for model training.", "| ', f\"Training: {self.dates[i][0]}-{self.dates[i][1]}\", f\"Testing: {self.dates[i][2]}-{self.dates[i][3]}\") i += 1 print(f\"\\nTotal Buys: {self.totalBuys}\") prnt", "consistent with the ``tz`` parameter. Defines the end date for model testing. :param", "is restricted to 2 dimensions\") return if (self.XX is None or self.yy is", "0 self.periods = 0 self.debug = False # Visualize self.XX = None self.yy", "scikit-learn documentation for more details. Default 10. :param continuedTraining: Determine if data from", "x_min, x_max = X[:, 0].min() - 0.5, X[:, 0].max() + 0.5 y_min, y_max", "gamma: The kernel coefficient for machine learning. See scikit-learn documentation for more details.", "statistics on the performance of learned classifications while providing a quick and easy", "more details. Default 1. :param gamma: The kernel coefficient for machine learning. See", "if len(self.dates) == 0: print(\"Error: Please run model before displaying stats\") return print(f'{bld}Stats{end}')", "testEnd: A datetime as a string that should be consistent with the ``tz``", "is a type of machine learning classifier. The purpose of ``Backtest`` is to", "rapid experimentation. Backtest also provides some convenience functions for visualizing collected statistics. :param", "print(f\"C: {self.C}\") print(f\"gamma: {self.gamma}\") print(f\"Continued Training: {self.continuedTraining}\") print(f\"Total Testing Periods: {self.periods}\") print(f\"Total Price", "print(\"Stock(s):\") i = 0 for stock in self.stocks: print(f'{stock} | ', f\"Training: {self.dates[i][0]}-{self.dates[i][1]}\",", "!= 2: print(\"Error: Plotting is restricted to 2 dimensions\") return if (self.XX is", "a way of exploring and testing various parameterizations. This module provides classes that", "2) except ZeroDivisionError: return float(0) def sellStats(self): \"\"\"Return the collected sell statistics.\"\"\" try:", "'\\033[92m', '\\033[91m', '\\033[0m' if len(self.dates) == 0: print(\"Error: Please run model before displaying", "**kwargs) def clearStats(self): \"\"\"Reset all collected statistics.\"\"\" self.dates = [] self.totalBuys = 0", "recommend a buy. Default 0.65. :param sellThreshold: Defines the confidence level at which", "is None): print(\"Error: Please run model before visualizing\") return X, y = self.XX,", "plt from matplotlib.colors import ListedColormap if len(self.variables) != 2: print(\"Error: Plotting is restricted", "if (self.XX is None or self.yy is None or self.model is None): print(\"Error:", "a lot of output. \"\"\" def __init__( self, variables, trainStart, trainEnd, testStart, testEnd,", "parameters.\"\"\" bld, end = '\\033[1m', '\\033[0m' print(f'{bld}Conditions{end}') i = 1 for var in", "sellStats(self): \"\"\"Return the collected sell statistics.\"\"\" try: return round((float(self.correctSells)/self.totalSells)*100, 2) except ZeroDivisionError: return", "model training. :param trainEnd: A datetime as a string that should be consistent", "0.65. :param sellThreshold: Defines the confidence level at which Clair will recommend a", "1. :param gamma: The kernel coefficient for machine learning. See scikit-learn documentation for", "self.XX = None self.yy = None self.model = None def runModel(self, data): \"\"\"Run", "model def buyLogic(self, *args, **kwargs): \"\"\"Increment the buy count.\"\"\" self.totalBuys += 1 if", "diagram overlays training and testing observations on top of a color coded representation", "testEnd, buyThreshold=0.65, sellThreshold=0.65, C=1, gamma=10, continuedTraining=False, tz=timezone('UTC') ): super().__init__( variables, trainStart, trainEnd, testStart,", "= f'{red}{self.sellStats()}%{end}' else: prnt = f'{self.sellStats()}%' print(f\"Sell Accuracy: {prnt}\") def visualizeModel(self, width=5, height=5,", "performance < 0: self.decreases += 1 if prediction == -1: self.correctSells += 1", "x_max, stepsize), arange(y_min, y_max, stepsize) ) plt.figure(figsize=(width, height)) cm = plt.cm.RdBu RedBlue =", ":param buyThreshold: Defines the confidence level at which Clair will will recommend a", "represents the distribution of probability. \"\"\" import matplotlib.pyplot as plt from matplotlib.colors import", "start date for model training. :param trainEnd: A datetime as a string that", "as a string that should be consistent with the ``tz`` parameter. Defines the", "will recommend a buy. Default 0.65. :param sellThreshold: Defines the confidence level at", "0: self.increases += 1 if prediction == 1: self.correctBuys += 1 elif performance", "learned recommendations. The color intensity represents the distribution of probability. \"\"\" import matplotlib.pyplot", "the datetime parameters. Default UTC. :ivar debug: A boolean value that determines if", "\"\"\"Collect statistics on correct and incorrect buys and sells. :param prediction: Value of", "way of exploring and testing various parameterizations. This module provides classes that allow", "the collected buy statistics.\"\"\" try: return round((float(self.correctBuys)/self.totalBuys)*100, 2) except ZeroDivisionError: return float(0) def", "0 self.increases = 0 self.decreases = 0 self.periods = 0 def buyStats(self): \"\"\"Return", "{self.totalBuys}\") prnt = None if self.buyStats() > 50: prnt = f\"{gre}{self.buyStats()}%{end}\" elif self.buyStats()", "learning and testing parameters.\"\"\" bld, end = '\\033[1m', '\\033[0m' print(f'{bld}Conditions{end}') i = 1", "def nextPeriodLogic(self, prediction, performance, *args, **kwargs): \"\"\"Collect statistics on correct and incorrect buys", "# Stats self.stocks = [] self.dates = [] self.totalBuys = 0 self.correctBuys =", "Default 0.65. :param C: A penalty parameter for false positives. See scikit-learn documentation", "print(f\"Sell Accuracy: {prnt}\") def visualizeModel(self, width=5, height=5, stepsize=0.02): \"\"\"Output a visualization of the", "purpose of ``Backtest`` is to collect statistics on the performance of learned classifications", "A positive or negative value representing the actual observed performance. \"\"\" self.periods +=", "plt.figure(figsize=(width, height)) cm = plt.cm.RdBu RedBlue = ListedColormap(['#FF312E', '#6E8894']) Axes = plt.subplot(1, 1,", "performance: A positive or negative value representing the actual observed performance. \"\"\" self.periods", "**kwargs): \"\"\"Collect statistics on correct and incorrect buys and sells. :param prediction: Value", "== 1: self.correctBuys += 1 elif performance < 0: self.decreases += 1 if", "consistent with the ``tz`` parameter. Defines the start date for model testing. :param", "data from the testing period should be used to continue training the model", "model testing. :param buyThreshold: Defines the confidence level at which Clair will will", "columns that represent learning features. :param trainStart: A datetime as a string that", "self.learn(data) self.execute(data, model, X, y) # Save for vizualization purposes self.dates.append([ self.trainStart.strftime('%m/%d/%Y'), self.trainEnd.strftime('%m/%d/%Y'),", "\"\"\"Increment the buy count.\"\"\" self.totalBuys += 1 if self.debug: super().buyLogic(*args, **kwargs) def sellLogic(self,", "Sells: {self.totalSells}\") if self.sellStats() > 50: prnt = f'{gre}{self.sellStats()}%{end}' elif self.sellStats() < 50:", "Defines the start date for model testing. :param testEnd: A datetime as a", "level at which Clair will recommend a sell. Default 0.65. :param C: A", "different machine learning parameterizations and test those on historical stock data. \"\"\" from", "self.testEnd.strftime('%m/%d/%Y') ]) XX = vstack(X) yy = hstack(y) self.XX = XX self.yy =", "Defines the end date for model training. :param testStart: A datetime as a", "float(0) def sellStats(self): \"\"\"Return the collected sell statistics.\"\"\" try: return round((float(self.correctSells)/self.totalSells)*100, 2) except", "50: prnt = f'{red}{self.sellStats()}%{end}' else: prnt = f'{self.sellStats()}%' print(f\"Sell Accuracy: {prnt}\") def visualizeModel(self,", "for model testing. :param buyThreshold: Defines the confidence level at which Clair will", "def clearStats(self): \"\"\"Reset all collected statistics.\"\"\" self.dates = [] self.totalBuys = 0 self.correctBuys", "visualizing collected statistics. :param variables: A list of columns that represent learning features.", "on correct and incorrect buys and sells. :param prediction: Value of 1 or", "1 if performance > 0: self.increases += 1 if prediction == 1: self.correctBuys", "== -1: self.correctSells += 1 if self.debug: super().nextPeriodLogic(prediction, performance, *args, **kwargs) def clearStats(self):", "round((float(self.correctSells)/self.totalSells)*100, 2) except ZeroDivisionError: return float(0) def displayConditions(self): \"\"\"Print the learning and testing", "print(f\"Continued Training: {self.continuedTraining}\") print(f\"Total Testing Periods: {self.periods}\") print(f\"Total Price Increases: {self.increases}\") print(f\"Total Price", "f\"{gre}{self.buyStats()}%{end}\" elif self.buyStats() < 50: prnt = f\"{red}{self.buyStats()}%{end}\" else: prnt = f\"{self.buyStats()}%\" print(f\"Buy", "boolean value that determines if debug strings will be printed as backtesting is", "self.periods += 1 if performance > 0: self.increases += 1 if prediction ==", "print(f\"X{i}: {var}\") i += 1 print(f\"Buy Threshold: {self.buyThreshold*100}%\") print(f\"Sell Threshold: {self.sellThreshold*100}%\") print(f\"C: {self.C}\")", "quick and easy way to vary parameters for rapid experimentation. Backtest also provides", "This module provides classes that allow clients to experiment with different machine learning", "xx, yy = meshgrid( arange(x_min, x_max, stepsize), arange(y_min, y_max, stepsize) ) plt.figure(figsize=(width, height))", "y) x_min, x_max = X[:, 0].min() - 0.5, X[:, 0].max() + 0.5 y_min,", "= None def runModel(self, data): \"\"\"Run backtesting. :param data: A ``History`` of stock", "0].min() - 0.5, X[:, 0].max() + 0.5 y_min, y_max = X[:, 1].min() -", "sellThreshold=0.65, C=1, gamma=10, continuedTraining=False, tz=timezone('UTC') ): super().__init__( variables, trainStart, trainEnd, testStart, testEnd, buyThreshold=buyThreshold,", "elif self.sellStats() < 50: prnt = f'{red}{self.sellStats()}%{end}' else: prnt = f'{self.sellStats()}%' print(f\"Sell Accuracy:", "print(f\"Total Price Increases: {self.increases}\") print(f\"Total Price Decreases: {self.decreases}\") def displayStats(self): \"\"\"Print the collected", "testStart: A datetime as a string that should be consistent with the ``tz``", "arange(x_min, x_max, stepsize), arange(y_min, y_max, stepsize) ) plt.figure(figsize=(width, height)) cm = plt.cm.RdBu RedBlue", "= ListedColormap(['#FF312E', '#6E8894']) Axes = plt.subplot(1, 1, 1) Z = self.model.decision_function(c_[xx.ravel(), yy.ravel()]) Z", "which Clair will will recommend a buy. Default 0.65. :param sellThreshold: Defines the", "See scikit-learn documentation for more details. Default 10. :param continuedTraining: Determine if data", "= f'{gre}{self.sellStats()}%{end}' elif self.sellStats() < 50: prnt = f'{red}{self.sellStats()}%{end}' else: prnt = f'{self.sellStats()}%'", "clairvoyant import Clair import matplotlib matplotlib.use('Agg') class Backtest(Clair): \"\"\"Backtest is a type of", "observations on top of a color coded representation of learned recommendations. The color", "the backtesting results. The diagram overlays training and testing observations on top of", "gre, red, end = '\\033[1m', '\\033[92m', '\\033[91m', '\\033[0m' if len(self.dates) == 0: print(\"Error:", "includes observations in both the training and test phases. \"\"\" # Learn and", "testing. :param testEnd: A datetime as a string that should be consistent with", "Save for vizualization purposes self.dates.append([ self.trainStart.strftime('%m/%d/%Y'), self.trainEnd.strftime('%m/%d/%Y'), self.testStart.strftime('%m/%d/%Y'), self.testEnd.strftime('%m/%d/%Y') ]) XX = vstack(X)", "= 0 self.increases = 0 self.decreases = 0 self.periods = 0 self.debug =", "i = 0 for stock in self.stocks: print(f'{stock} | ', f\"Training: {self.dates[i][0]}-{self.dates[i][1]}\", f\"Testing:", "data that includes observations in both the training and test phases. \"\"\" #", "yy = meshgrid( arange(x_min, x_max, stepsize), arange(y_min, y_max, stepsize) ) plt.figure(figsize=(width, height)) cm", "representing an up or down performance. :param performance: A positive or negative value", "{self.totalSells}\") if self.sellStats() > 50: prnt = f'{gre}{self.sellStats()}%{end}' elif self.sellStats() < 50: prnt", "self.decreases = 0 self.periods = 0 self.debug = False # Visualize self.XX =", "except ZeroDivisionError: return float(0) def sellStats(self): \"\"\"Return the collected sell statistics.\"\"\" try: return", "Testing Periods: {self.periods}\") print(f\"Total Price Increases: {self.increases}\") print(f\"Total Price Decreases: {self.decreases}\") def displayStats(self):", "or self.yy is None or self.model is None): print(\"Error: Please run model before", "Axes = plt.subplot(1, 1, 1) Z = self.model.decision_function(c_[xx.ravel(), yy.ravel()]) Z = Z.reshape(xx.shape) stock", "may result in a lot of output. \"\"\" def __init__( self, variables, trainStart,", "= self.XX, self.yy X = StandardScaler().fit_transform(X) self.model.fit(X, y) x_min, x_max = X[:, 0].min()", "string that should be consistent with the ``tz`` parameter. Defines the start date", "buys and sells. :param prediction: Value of 1 or -1 representing an up", "print(f'{bld}Conditions{end}') i = 1 for var in self.variables: print(f\"X{i}: {var}\") i += 1", "timezone from clairvoyant import Clair import matplotlib matplotlib.use('Agg') class Backtest(Clair): \"\"\"Backtest is a", "the testing period should be used to continue training the model during the", "= [] self.totalBuys = 0 self.correctBuys = 0 self.totalSells = 0 self.correctSells =", "def buyLogic(self, *args, **kwargs): \"\"\"Increment the buy count.\"\"\" self.totalBuys += 1 if self.debug:", "'\\033[1m', '\\033[92m', '\\033[91m', '\\033[0m' if len(self.dates) == 0: print(\"Error: Please run model before", "The timezone associated with the datetime parameters. Default UTC. :ivar debug: A boolean", "variables: A list of columns that represent learning features. :param trainStart: A datetime", "Please run model before visualizing\") return X, y = self.XX, self.yy X =", "be consistent with the ``tz`` parameter. Defines the start date for model testing.", "allow clients to experiment with different machine learning parameterizations and test those on", ":param variables: A list of columns that represent learning features. :param trainStart: A", "= 0 for stock in self.stocks: print(f'{stock} | ', f\"Training: {self.dates[i][0]}-{self.dates[i][1]}\", f\"Testing: {self.dates[i][2]}-{self.dates[i][3]}\")", "list of columns that represent learning features. :param trainStart: A datetime as a", "for rapid experimentation. Backtest also provides some convenience functions for visualizing collected statistics.", "None self.model = None def runModel(self, data): \"\"\"Run backtesting. :param data: A ``History``", "self.XX, self.yy X = StandardScaler().fit_transform(X) self.model.fit(X, y) x_min, x_max = X[:, 0].min() -", "\"\"\" from numpy import meshgrid, arange, c_ from sklearn.preprocessing import StandardScaler from numpy", "= f\"{red}{self.buyStats()}%{end}\" else: prnt = f\"{self.buyStats()}%\" print(f\"Buy Accuracy: {prnt}\") print(f\"Total Sells: {self.totalSells}\") if", "pytz import timezone from clairvoyant import Clair import matplotlib matplotlib.use('Agg') class Backtest(Clair): \"\"\"Backtest", "numpy import vstack, hstack from pytz import timezone from clairvoyant import Clair import", "= self.stocks[len(self.stocks)-1] Axes.set_title(stock) Axes.contourf(xx, yy, Z, cmap=cm, alpha=0.75) Axes.scatter(X[:, 0], X[:, 1], c=y,", "and execute model, X, y = self.learn(data) self.execute(data, model, X, y) # Save", "import timezone from clairvoyant import Clair import matplotlib matplotlib.use('Agg') class Backtest(Clair): \"\"\"Backtest is", "\"\"\"Return the collected buy statistics.\"\"\" try: return round((float(self.correctBuys)/self.totalBuys)*100, 2) except ZeroDivisionError: return float(0)", "distribution of probability. \"\"\" import matplotlib.pyplot as plt from matplotlib.colors import ListedColormap if", "tz: The timezone associated with the datetime parameters. Default UTC. :ivar debug: A", "of learned recommendations. The color intensity represents the distribution of probability. \"\"\" import", "0 def buyStats(self): \"\"\"Return the collected buy statistics.\"\"\" try: return round((float(self.correctBuys)/self.totalBuys)*100, 2) except", "A ``History`` of stock data that includes observations in both the training and", "prnt = f\"{red}{self.buyStats()}%{end}\" else: prnt = f\"{self.buyStats()}%\" print(f\"Buy Accuracy: {prnt}\") print(f\"Total Sells: {self.totalSells}\")", "end date for model testing. :param buyThreshold: Defines the confidence level at which", "parameter. Defines the start date for model training. :param trainEnd: A datetime as", "the collected backtesting statistics.\"\"\" bld, gre, red, end = '\\033[1m', '\\033[92m', '\\033[91m', '\\033[0m'", "= self.learn(data) self.execute(data, model, X, y) # Save for vizualization purposes self.dates.append([ self.trainStart.strftime('%m/%d/%Y'),", "the actual observed performance. \"\"\" self.periods += 1 if performance > 0: self.increases", "red, end = '\\033[1m', '\\033[92m', '\\033[91m', '\\033[0m' if len(self.dates) == 0: print(\"Error: Please", "easy way to vary parameters for rapid experimentation. Backtest also provides some convenience", "f\"Testing: {self.dates[i][2]}-{self.dates[i][3]}\") i += 1 print(f\"\\nTotal Buys: {self.totalBuys}\") prnt = None if self.buyStats()", "output. \"\"\" def __init__( self, variables, trainStart, trainEnd, testStart, testEnd, buyThreshold=0.65, sellThreshold=0.65, C=1,", "import matplotlib.pyplot as plt from matplotlib.colors import ListedColormap if len(self.variables) != 2: print(\"Error:", "on historical stock data. \"\"\" from numpy import meshgrid, arange, c_ from sklearn.preprocessing", "represent learning features. :param trainStart: A datetime as a string that should be", "printed as backtesting is run. Warning: may result in a lot of output.", "matplotlib.pyplot as plt from matplotlib.colors import ListedColormap if len(self.variables) != 2: print(\"Error: Plotting", "{self.buyThreshold*100}%\") print(f\"Sell Threshold: {self.sellThreshold*100}%\") print(f\"C: {self.C}\") print(f\"gamma: {self.gamma}\") print(f\"Continued Training: {self.continuedTraining}\") print(f\"Total Testing", "self.stocks[len(self.stocks)-1] Axes.set_title(stock) Axes.contourf(xx, yy, Z, cmap=cm, alpha=0.75) Axes.scatter(X[:, 0], X[:, 1], c=y, cmap=RedBlue)", "of exploring and testing various parameterizations. This module provides classes that allow clients", "print(f\"Total Testing Periods: {self.periods}\") print(f\"Total Price Increases: {self.increases}\") print(f\"Total Price Decreases: {self.decreases}\") def", "will will recommend a buy. Default 0.65. :param sellThreshold: Defines the confidence level", "collected statistics.\"\"\" self.dates = [] self.totalBuys = 0 self.correctBuys = 0 self.totalSells =", "Z.reshape(xx.shape) stock = self.stocks[len(self.stocks)-1] Axes.set_title(stock) Axes.contourf(xx, yy, Z, cmap=cm, alpha=0.75) Axes.scatter(X[:, 0], X[:,", "is None or self.yy is None or self.model is None): print(\"Error: Please run", "before visualizing\") return X, y = self.XX, self.yy X = StandardScaler().fit_transform(X) self.model.fit(X, y)", "date for model training. :param testStart: A datetime as a string that should", "should be used to continue training the model during the testing phase. Default", "1 if prediction == 1: self.correctBuys += 1 elif performance < 0: self.decreases", "print(f\"gamma: {self.gamma}\") print(f\"Continued Training: {self.continuedTraining}\") print(f\"Total Testing Periods: {self.periods}\") print(f\"Total Price Increases: {self.increases}\")", "\"\"\" def __init__( self, variables, trainStart, trainEnd, testStart, testEnd, buyThreshold=0.65, sellThreshold=0.65, C=1, gamma=10,", "i += 1 print(f\"\\nTotal Buys: {self.totalBuys}\") prnt = None if self.buyStats() > 50:", "testing phase. Default False. :param tz: The timezone associated with the datetime parameters.", "for var in self.variables: print(f\"X{i}: {var}\") i += 1 print(f\"Buy Threshold: {self.buyThreshold*100}%\") print(f\"Sell", "Visualize self.XX = None self.yy = None self.model = None def runModel(self, data):", "testing. :param buyThreshold: Defines the confidence level at which Clair will will recommend", "displaying stats\") return print(f'{bld}Stats{end}') print(\"Stock(s):\") i = 0 for stock in self.stocks: print(f'{stock}", "details. Default 10. :param continuedTraining: Determine if data from the testing period should", "if prediction == 1: self.correctBuys += 1 elif performance < 0: self.decreases +=", "super().__init__( variables, trainStart, trainEnd, testStart, testEnd, buyThreshold=buyThreshold, sellThreshold=sellThreshold, C=C, gamma=gamma, continuedTraining=continuedTraining, tz=tz )", "statistics. :param variables: A list of columns that represent learning features. :param trainStart:", "training and testing observations on top of a color coded representation of learned", "datetime as a string that should be consistent with the ``tz`` parameter. Defines", "be printed as backtesting is run. Warning: may result in a lot of", "the confidence level at which Clair will recommend a sell. Default 0.65. :param", "period should be used to continue training the model during the testing phase.", "nextPeriodLogic(self, prediction, performance, *args, **kwargs): \"\"\"Collect statistics on correct and incorrect buys and", "\"\"\"Reset all collected statistics.\"\"\" self.dates = [] self.totalBuys = 0 self.correctBuys = 0", "round((float(self.correctBuys)/self.totalBuys)*100, 2) except ZeroDivisionError: return float(0) def sellStats(self): \"\"\"Return the collected sell statistics.\"\"\"", "the model during the testing phase. Default False. :param tz: The timezone associated", "from the testing period should be used to continue training the model during", ":param continuedTraining: Determine if data from the testing period should be used to", "{self.gamma}\") print(f\"Continued Training: {self.continuedTraining}\") print(f\"Total Testing Periods: {self.periods}\") print(f\"Total Price Increases: {self.increases}\") print(f\"Total", "self.trainStart.strftime('%m/%d/%Y'), self.trainEnd.strftime('%m/%d/%Y'), self.testStart.strftime('%m/%d/%Y'), self.testEnd.strftime('%m/%d/%Y') ]) XX = vstack(X) yy = hstack(y) self.XX =", "\"\"\" import matplotlib.pyplot as plt from matplotlib.colors import ListedColormap if len(self.variables) != 2:", "\"\"\"Backtest provides a way of exploring and testing various parameterizations. This module provides", "model before visualizing\") return X, y = self.XX, self.yy X = StandardScaler().fit_transform(X) self.model.fit(X,", "stats\") return print(f'{bld}Stats{end}') print(\"Stock(s):\") i = 0 for stock in self.stocks: print(f'{stock} |", "coded representation of learned recommendations. The color intensity represents the distribution of probability.", "to vary parameters for rapid experimentation. Backtest also provides some convenience functions for", "variables, trainStart, trainEnd, testStart, testEnd, buyThreshold=buyThreshold, sellThreshold=sellThreshold, C=C, gamma=gamma, continuedTraining=continuedTraining, tz=tz ) #", "def displayConditions(self): \"\"\"Print the learning and testing parameters.\"\"\" bld, end = '\\033[1m', '\\033[0m'", "50: prnt = f\"{gre}{self.buyStats()}%{end}\" elif self.buyStats() < 50: prnt = f\"{red}{self.buyStats()}%{end}\" else: prnt", "positive or negative value representing the actual observed performance. \"\"\" self.periods += 1", "def __init__( self, variables, trainStart, trainEnd, testStart, testEnd, buyThreshold=0.65, sellThreshold=0.65, C=1, gamma=10, continuedTraining=False,", "the testing phase. Default False. :param tz: The timezone associated with the datetime", "statistics.\"\"\" self.dates = [] self.totalBuys = 0 self.correctBuys = 0 self.totalSells = 0", "buyLogic(self, *args, **kwargs): \"\"\"Increment the buy count.\"\"\" self.totalBuys += 1 if self.debug: super().buyLogic(*args,", "self.totalSells += 1 if self.debug: super().sellLogic(*args, **kwargs) def nextPeriodLogic(self, prediction, performance, *args, **kwargs):", "continuedTraining=continuedTraining, tz=tz ) # Stats self.stocks = [] self.dates = [] self.totalBuys =", "self, variables, trainStart, trainEnd, testStart, testEnd, buyThreshold=0.65, sellThreshold=0.65, C=1, gamma=10, continuedTraining=False, tz=timezone('UTC') ):", "\"\"\"Print the learning and testing parameters.\"\"\" bld, end = '\\033[1m', '\\033[0m' print(f'{bld}Conditions{end}') i", "Z = self.model.decision_function(c_[xx.ravel(), yy.ravel()]) Z = Z.reshape(xx.shape) stock = self.stocks[len(self.stocks)-1] Axes.set_title(stock) Axes.contourf(xx, yy,", "Backtest also provides some convenience functions for visualizing collected statistics. :param variables: A", "height=5, stepsize=0.02): \"\"\"Output a visualization of the backtesting results. The diagram overlays training", "which Clair will recommend a sell. Default 0.65. :param C: A penalty parameter", "model, X, y) # Save for vizualization purposes self.dates.append([ self.trainStart.strftime('%m/%d/%Y'), self.trainEnd.strftime('%m/%d/%Y'), self.testStart.strftime('%m/%d/%Y'), self.testEnd.strftime('%m/%d/%Y')", "those on historical stock data. \"\"\" from numpy import meshgrid, arange, c_ from", "0 self.decreases = 0 self.periods = 0 def buyStats(self): \"\"\"Return the collected buy", "f\"{red}{self.buyStats()}%{end}\" else: prnt = f\"{self.buyStats()}%\" print(f\"Buy Accuracy: {prnt}\") print(f\"Total Sells: {self.totalSells}\") if self.sellStats()", "testing observations on top of a color coded representation of learned recommendations. The", "+ 0.5 xx, yy = meshgrid( arange(x_min, x_max, stepsize), arange(y_min, y_max, stepsize) )", "backtesting results. The diagram overlays training and testing observations on top of a", "'\\033[91m', '\\033[0m' if len(self.dates) == 0: print(\"Error: Please run model before displaying stats\")", "super().buyLogic(*args, **kwargs) def sellLogic(self, *args, **kwargs): \"\"\"Increment the sell count.\"\"\" self.totalSells += 1", "penalty parameter for false positives. See scikit-learn documentation for more details. Default 1.", "]) XX = vstack(X) yy = hstack(y) self.XX = XX self.yy = yy", "trainEnd, testStart, testEnd, buyThreshold=0.65, sellThreshold=0.65, C=1, gamma=10, continuedTraining=False, tz=timezone('UTC') ): super().__init__( variables, trainStart,", "See scikit-learn documentation for more details. Default 1. :param gamma: The kernel coefficient", "{self.decreases}\") def displayStats(self): \"\"\"Print the collected backtesting statistics.\"\"\" bld, gre, red, end =", "f'{red}{self.sellStats()}%{end}' else: prnt = f'{self.sellStats()}%' print(f\"Sell Accuracy: {prnt}\") def visualizeModel(self, width=5, height=5, stepsize=0.02):", "self.correctBuys = 0 self.totalSells = 0 self.correctSells = 0 self.increases = 0 self.decreases", "class Backtest(Clair): \"\"\"Backtest is a type of machine learning classifier. The purpose of", "data): \"\"\"Run backtesting. :param data: A ``History`` of stock data that includes observations", "incorrect buys and sells. :param prediction: Value of 1 or -1 representing an", "if prediction == -1: self.correctSells += 1 if self.debug: super().nextPeriodLogic(prediction, performance, *args, **kwargs)", "return float(0) def sellStats(self): \"\"\"Return the collected sell statistics.\"\"\" try: return round((float(self.correctSells)/self.totalSells)*100, 2)", "top of a color coded representation of learned recommendations. The color intensity represents", "X = StandardScaler().fit_transform(X) self.model.fit(X, y) x_min, x_max = X[:, 0].min() - 0.5, X[:,", "\"\"\"Return the collected sell statistics.\"\"\" try: return round((float(self.correctSells)/self.totalSells)*100, 2) except ZeroDivisionError: return float(0)", "prnt = f'{red}{self.sellStats()}%{end}' else: prnt = f'{self.sellStats()}%' print(f\"Sell Accuracy: {prnt}\") def visualizeModel(self, width=5,", "None or self.yy is None or self.model is None): print(\"Error: Please run model", "yy.ravel()]) Z = Z.reshape(xx.shape) stock = self.stocks[len(self.stocks)-1] Axes.set_title(stock) Axes.contourf(xx, yy, Z, cmap=cm, alpha=0.75)", "representation of learned recommendations. The color intensity represents the distribution of probability. \"\"\"", ":param gamma: The kernel coefficient for machine learning. See scikit-learn documentation for more", "= None self.model = None def runModel(self, data): \"\"\"Run backtesting. :param data: A", "for vizualization purposes self.dates.append([ self.trainStart.strftime('%m/%d/%Y'), self.trainEnd.strftime('%m/%d/%Y'), self.testStart.strftime('%m/%d/%Y'), self.testEnd.strftime('%m/%d/%Y') ]) XX = vstack(X) yy", "0 self.periods = 0 def buyStats(self): \"\"\"Return the collected buy statistics.\"\"\" try: return", "self.sellStats() > 50: prnt = f'{gre}{self.sellStats()}%{end}' elif self.sellStats() < 50: prnt = f'{red}{self.sellStats()}%{end}'", "0.5, X[:, 0].max() + 0.5 y_min, y_max = X[:, 1].min() - 0.5, X[:,", "= 0 self.debug = False # Visualize self.XX = None self.yy = None", "> 50: prnt = f\"{gre}{self.buyStats()}%{end}\" elif self.buyStats() < 50: prnt = f\"{red}{self.buyStats()}%{end}\" else:", "in self.variables: print(f\"X{i}: {var}\") i += 1 print(f\"Buy Threshold: {self.buyThreshold*100}%\") print(f\"Sell Threshold: {self.sellThreshold*100}%\")", ":ivar debug: A boolean value that determines if debug strings will be printed", "= model def buyLogic(self, *args, **kwargs): \"\"\"Increment the buy count.\"\"\" self.totalBuys += 1", "more details. Default 10. :param continuedTraining: Determine if data from the testing period", "performance, *args, **kwargs): \"\"\"Collect statistics on correct and incorrect buys and sells. :param", "testing period should be used to continue training the model during the testing", "+= 1 if self.debug: super().buyLogic(*args, **kwargs) def sellLogic(self, *args, **kwargs): \"\"\"Increment the sell", "collected backtesting statistics.\"\"\" bld, gre, red, end = '\\033[1m', '\\033[92m', '\\033[91m', '\\033[0m' if", "on the performance of learned classifications while providing a quick and easy way", "is to collect statistics on the performance of learned classifications while providing a", "be consistent with the ``tz`` parameter. Defines the end date for model testing.", "the start date for model testing. :param testEnd: A datetime as a string", "parameter for false positives. See scikit-learn documentation for more details. Default 1. :param", "try: return round((float(self.correctBuys)/self.totalBuys)*100, 2) except ZeroDivisionError: return float(0) def sellStats(self): \"\"\"Return the collected", "end = '\\033[1m', '\\033[0m' print(f'{bld}Conditions{end}') i = 1 for var in self.variables: print(f\"X{i}:", "end date for model training. :param testStart: A datetime as a string that", "exploring and testing various parameterizations. This module provides classes that allow clients to", "= 0 self.correctSells = 0 self.increases = 0 self.decreases = 0 self.periods =", "testing various parameterizations. This module provides classes that allow clients to experiment with", "= yy self.model = model def buyLogic(self, *args, **kwargs): \"\"\"Increment the buy count.\"\"\"", "performance. :param performance: A positive or negative value representing the actual observed performance.", "Accuracy: {prnt}\") print(f\"Total Sells: {self.totalSells}\") if self.sellStats() > 50: prnt = f'{gre}{self.sellStats()}%{end}' elif", "self.correctSells += 1 if self.debug: super().nextPeriodLogic(prediction, performance, *args, **kwargs) def clearStats(self): \"\"\"Reset all", "Determine if data from the testing period should be used to continue training", "1 if prediction == -1: self.correctSells += 1 if self.debug: super().nextPeriodLogic(prediction, performance, *args,", "statistics on correct and incorrect buys and sells. :param prediction: Value of 1", "backtesting. :param data: A ``History`` of stock data that includes observations in both", "the training and test phases. \"\"\" # Learn and execute model, X, y", "buy statistics.\"\"\" try: return round((float(self.correctBuys)/self.totalBuys)*100, 2) except ZeroDivisionError: return float(0) def sellStats(self): \"\"\"Return", "if self.debug: super().nextPeriodLogic(prediction, performance, *args, **kwargs) def clearStats(self): \"\"\"Reset all collected statistics.\"\"\" self.dates", "as backtesting is run. Warning: may result in a lot of output. \"\"\"", "some convenience functions for visualizing collected statistics. :param variables: A list of columns", "at which Clair will will recommend a buy. Default 0.65. :param sellThreshold: Defines", "or -1 representing an up or down performance. :param performance: A positive or", "self.debug: super().buyLogic(*args, **kwargs) def sellLogic(self, *args, **kwargs): \"\"\"Increment the sell count.\"\"\" self.totalSells +=", "the start date for model training. :param trainEnd: A datetime as a string", "of a color coded representation of learned recommendations. The color intensity represents the", "Plotting is restricted to 2 dimensions\") return if (self.XX is None or self.yy", "StandardScaler from numpy import vstack, hstack from pytz import timezone from clairvoyant import", "i += 1 print(f\"Buy Threshold: {self.buyThreshold*100}%\") print(f\"Sell Threshold: {self.sellThreshold*100}%\") print(f\"C: {self.C}\") print(f\"gamma: {self.gamma}\")", "for stock in self.stocks: print(f'{stock} | ', f\"Training: {self.dates[i][0]}-{self.dates[i][1]}\", f\"Testing: {self.dates[i][2]}-{self.dates[i][3]}\") i +=", "XX = vstack(X) yy = hstack(y) self.XX = XX self.yy = yy self.model", "in a lot of output. \"\"\" def __init__( self, variables, trainStart, trainEnd, testStart,", "A datetime as a string that should be consistent with the ``tz`` parameter.", "in self.stocks: print(f'{stock} | ', f\"Training: {self.dates[i][0]}-{self.dates[i][1]}\", f\"Testing: {self.dates[i][2]}-{self.dates[i][3]}\") i += 1 print(f\"\\nTotal", "recommendations. The color intensity represents the distribution of probability. \"\"\" import matplotlib.pyplot as", "if self.debug: super().sellLogic(*args, **kwargs) def nextPeriodLogic(self, prediction, performance, *args, **kwargs): \"\"\"Collect statistics on", "kernel coefficient for machine learning. See scikit-learn documentation for more details. Default 10.", "recommend a sell. Default 0.65. :param C: A penalty parameter for false positives.", "Default 1. :param gamma: The kernel coefficient for machine learning. See scikit-learn documentation", "y_min, y_max = X[:, 1].min() - 0.5, X[:, 1].max() + 0.5 xx, yy", "else: prnt = f\"{self.buyStats()}%\" print(f\"Buy Accuracy: {prnt}\") print(f\"Total Sells: {self.totalSells}\") if self.sellStats() >", "Defines the confidence level at which Clair will will recommend a buy. Default", "learning parameterizations and test those on historical stock data. \"\"\" from numpy import", "a color coded representation of learned recommendations. The color intensity represents the distribution", "1, 1) Z = self.model.decision_function(c_[xx.ravel(), yy.ravel()]) Z = Z.reshape(xx.shape) stock = self.stocks[len(self.stocks)-1] Axes.set_title(stock)", "c_ from sklearn.preprocessing import StandardScaler from numpy import vstack, hstack from pytz import", "return round((float(self.correctSells)/self.totalSells)*100, 2) except ZeroDivisionError: return float(0) def displayConditions(self): \"\"\"Print the learning and", "testing parameters.\"\"\" bld, end = '\\033[1m', '\\033[0m' print(f'{bld}Conditions{end}') i = 1 for var", "None if self.buyStats() > 50: prnt = f\"{gre}{self.buyStats()}%{end}\" elif self.buyStats() < 50: prnt", "= f'{self.sellStats()}%' print(f\"Sell Accuracy: {prnt}\") def visualizeModel(self, width=5, height=5, stepsize=0.02): \"\"\"Output a visualization", "training and test phases. \"\"\" # Learn and execute model, X, y =", "= 0 self.periods = 0 self.debug = False # Visualize self.XX = None", "overlays training and testing observations on top of a color coded representation of", "is None or self.model is None): print(\"Error: Please run model before visualizing\") return", "training. :param trainEnd: A datetime as a string that should be consistent with", "documentation for more details. Default 1. :param gamma: The kernel coefficient for machine", "false positives. See scikit-learn documentation for more details. Default 1. :param gamma: The", "training. :param testStart: A datetime as a string that should be consistent with", "= False # Visualize self.XX = None self.yy = None self.model = None", "of ``Backtest`` is to collect statistics on the performance of learned classifications while", "results. The diagram overlays training and testing observations on top of a color", "run model before visualizing\") return X, y = self.XX, self.yy X = StandardScaler().fit_transform(X)", "vstack(X) yy = hstack(y) self.XX = XX self.yy = yy self.model = model", "stock in self.stocks: print(f'{stock} | ', f\"Training: {self.dates[i][0]}-{self.dates[i][1]}\", f\"Testing: {self.dates[i][2]}-{self.dates[i][3]}\") i += 1", "self.yy X = StandardScaler().fit_transform(X) self.model.fit(X, y) x_min, x_max = X[:, 0].min() - 0.5,", "that determines if debug strings will be printed as backtesting is run. Warning:", "2 dimensions\") return if (self.XX is None or self.yy is None or self.model", "statistics.\"\"\" try: return round((float(self.correctBuys)/self.totalBuys)*100, 2) except ZeroDivisionError: return float(0) def sellStats(self): \"\"\"Return the", "self.totalSells = 0 self.correctSells = 0 self.increases = 0 self.decreases = 0 self.periods", "of columns that represent learning features. :param trainStart: A datetime as a string", "ZeroDivisionError: return float(0) def displayConditions(self): \"\"\"Print the learning and testing parameters.\"\"\" bld, end", "= StandardScaler().fit_transform(X) self.model.fit(X, y) x_min, x_max = X[:, 0].min() - 0.5, X[:, 0].max()", "The diagram overlays training and testing observations on top of a color coded", "providing a quick and easy way to vary parameters for rapid experimentation. Backtest", "- 0.5, X[:, 1].max() + 0.5 xx, yy = meshgrid( arange(x_min, x_max, stepsize),", "None def runModel(self, data): \"\"\"Run backtesting. :param data: A ``History`` of stock data", "experiment with different machine learning parameterizations and test those on historical stock data.", ":param performance: A positive or negative value representing the actual observed performance. \"\"\"", "elif performance < 0: self.decreases += 1 if prediction == -1: self.correctSells +=", "``tz`` parameter. Defines the end date for model training. :param testStart: A datetime" ]
[ "src, caption_data): assign_injectables(self, locals()) def get_contents(self): return [] def as_view(self): return ImmutableDict.of(alt_text=self.alt_text, src=self.src,", "template_arguments): for argument in template_arguments: assert (argument in self.required_values) class StubJpegPicture(object): def __init__(self,", "assign_injectables from ..utils.immutabledict import ImmutableDict from ..generator.exporter import Exporter directory_values = ['title', 'images']", "import Exporter directory_values = ['title', 'images'] picture_values = ['alt_text', 'src', 'caption_data'] class MockJinja2Template(object):", "locals()) def render(self, template_arguments): for argument in template_arguments: assert (argument in self.required_values) class", "import ImmutableDict from ..generator.exporter import Exporter directory_values = ['title', 'images'] picture_values = ['alt_text',", "= Exporter(self.mock_template) def test_it_should_populate_the_jinja2_template(self): self.exporter.export(self.picture) class DirectoryExporterTest(unittest.TestCase): def setUp(self): self.pictures_in_dir = [ StubJpegPicture('first", "= MockJinja2Template(directory_values) self.exporter = Exporter(self.mock_template) def test_it_should_populate_the_jinja2_template(self): self.exporter.export(self.stub_directory) if __name__ == '__main__': unittest.main()", "MockJinja2Template(object): def __init__(self, required_values): assign_injectables(self, locals()) def render(self, template_arguments): for argument in template_arguments:", "def get_contents(self): return self.images def as_view(self): return ImmutableDict.of(title=self.title, images=self.images) def get_exporter(self): return Exporter(MockJinja2Template(directory_values))", "picture', 'picture1.jpg', 'Caption1'), StubJpegPicture('second picture', 'picture2.jpg', 'Caption2')] self.stub_directory = StubJpegDirectory('My Pictures', self.pictures_in_dir) self.mock_template", "def setUp(self): self.mock_template = MockJinja2Template(picture_values) self.picture = StubJpegPicture('a picture', 'picture1.jpg', 'Caption') self.exporter =", "assign_injectables(self, locals()) def get_contents(self): return self.images def as_view(self): return ImmutableDict.of(title=self.title, images=self.images) def get_exporter(self):", "= StubJpegPicture('a picture', 'picture1.jpg', 'Caption') self.exporter = Exporter(self.mock_template) def test_it_should_populate_the_jinja2_template(self): self.exporter.export(self.picture) class DirectoryExporterTest(unittest.TestCase):", "get_contents(self): return self.images def as_view(self): return ImmutableDict.of(title=self.title, images=self.images) def get_exporter(self): return Exporter(MockJinja2Template(directory_values)) def", "StubJpegPicture(object): def __init__(self, alt_text, src, caption_data): assign_injectables(self, locals()) def get_contents(self): return [] def", "picture', 'picture1.jpg', 'Caption') self.exporter = Exporter(self.mock_template) def test_it_should_populate_the_jinja2_template(self): self.exporter.export(self.picture) class DirectoryExporterTest(unittest.TestCase): def setUp(self):", "def get_output_file_name(self): return self.title class SimpleExporterTest(unittest.TestCase): def setUp(self): self.mock_template = MockJinja2Template(picture_values) self.picture =", "return [] def as_view(self): return ImmutableDict.of(alt_text=self.alt_text, src=self.src, caption_data=self.caption_data) def get_exporter(self): return Exporter(MockJinja2Template(picture_values)) def", "get_exporter(self): return Exporter(MockJinja2Template(picture_values)) def get_name(self): return self.src def get_output_file_name(self): return self.src class StubJpegDirectory(object):", "directory_values = ['title', 'images'] picture_values = ['alt_text', 'src', 'caption_data'] class MockJinja2Template(object): def __init__(self,", "'caption_data'] class MockJinja2Template(object): def __init__(self, required_values): assign_injectables(self, locals()) def render(self, template_arguments): for argument", "import assign_injectables from ..utils.immutabledict import ImmutableDict from ..generator.exporter import Exporter directory_values = ['title',", "unittest from ..utils.inject import assign_injectables from ..utils.immutabledict import ImmutableDict from ..generator.exporter import Exporter", "locals()) def get_contents(self): return self.images def as_view(self): return ImmutableDict.of(title=self.title, images=self.images) def get_exporter(self): return", "setUp(self): self.mock_template = MockJinja2Template(picture_values) self.picture = StubJpegPicture('a picture', 'picture1.jpg', 'Caption') self.exporter = Exporter(self.mock_template)", "class StubJpegDirectory(object): def __init__(self, title, images): assign_injectables(self, locals()) def get_contents(self): return self.images def", "Exporter(self.mock_template) def test_it_should_populate_the_jinja2_template(self): self.exporter.export(self.picture) class DirectoryExporterTest(unittest.TestCase): def setUp(self): self.pictures_in_dir = [ StubJpegPicture('first picture',", "locals()) def get_contents(self): return [] def as_view(self): return ImmutableDict.of(alt_text=self.alt_text, src=self.src, caption_data=self.caption_data) def get_exporter(self):", "def __init__(self, required_values): assign_injectables(self, locals()) def render(self, template_arguments): for argument in template_arguments: assert", "return Exporter(MockJinja2Template(picture_values)) def get_name(self): return self.src def get_output_file_name(self): return self.src class StubJpegDirectory(object): def", "ImmutableDict.of(title=self.title, images=self.images) def get_exporter(self): return Exporter(MockJinja2Template(directory_values)) def get_name(self): return self.title def get_output_file_name(self): return", "class DirectoryExporterTest(unittest.TestCase): def setUp(self): self.pictures_in_dir = [ StubJpegPicture('first picture', 'picture1.jpg', 'Caption1'), StubJpegPicture('second picture',", "in self.required_values) class StubJpegPicture(object): def __init__(self, alt_text, src, caption_data): assign_injectables(self, locals()) def get_contents(self):", "StubJpegDirectory('My Pictures', self.pictures_in_dir) self.mock_template = MockJinja2Template(directory_values) self.exporter = Exporter(self.mock_template) def test_it_should_populate_the_jinja2_template(self): self.exporter.export(self.stub_directory) if", "from ..utils.immutabledict import ImmutableDict from ..generator.exporter import Exporter directory_values = ['title', 'images'] picture_values", "def test_it_should_populate_the_jinja2_template(self): self.exporter.export(self.picture) class DirectoryExporterTest(unittest.TestCase): def setUp(self): self.pictures_in_dir = [ StubJpegPicture('first picture', 'picture1.jpg',", "ImmutableDict.of(alt_text=self.alt_text, src=self.src, caption_data=self.caption_data) def get_exporter(self): return Exporter(MockJinja2Template(picture_values)) def get_name(self): return self.src def get_output_file_name(self):", "def as_view(self): return ImmutableDict.of(alt_text=self.alt_text, src=self.src, caption_data=self.caption_data) def get_exporter(self): return Exporter(MockJinja2Template(picture_values)) def get_name(self): return", "__init__(self, alt_text, src, caption_data): assign_injectables(self, locals()) def get_contents(self): return [] def as_view(self): return", "'picture1.jpg', 'Caption') self.exporter = Exporter(self.mock_template) def test_it_should_populate_the_jinja2_template(self): self.exporter.export(self.picture) class DirectoryExporterTest(unittest.TestCase): def setUp(self): self.pictures_in_dir", "return self.images def as_view(self): return ImmutableDict.of(title=self.title, images=self.images) def get_exporter(self): return Exporter(MockJinja2Template(directory_values)) def get_name(self):", "from ..utils.inject import assign_injectables from ..utils.immutabledict import ImmutableDict from ..generator.exporter import Exporter directory_values", "self.exporter = Exporter(self.mock_template) def test_it_should_populate_the_jinja2_template(self): self.exporter.export(self.picture) class DirectoryExporterTest(unittest.TestCase): def setUp(self): self.pictures_in_dir = [", "'picture1.jpg', 'Caption1'), StubJpegPicture('second picture', 'picture2.jpg', 'Caption2')] self.stub_directory = StubJpegDirectory('My Pictures', self.pictures_in_dir) self.mock_template =", "def __init__(self, title, images): assign_injectables(self, locals()) def get_contents(self): return self.images def as_view(self): return", "get_name(self): return self.src def get_output_file_name(self): return self.src class StubJpegDirectory(object): def __init__(self, title, images):", "StubJpegPicture('second picture', 'picture2.jpg', 'Caption2')] self.stub_directory = StubJpegDirectory('My Pictures', self.pictures_in_dir) self.mock_template = MockJinja2Template(directory_values) self.exporter", "setUp(self): self.pictures_in_dir = [ StubJpegPicture('first picture', 'picture1.jpg', 'Caption1'), StubJpegPicture('second picture', 'picture2.jpg', 'Caption2')] self.stub_directory", "render(self, template_arguments): for argument in template_arguments: assert (argument in self.required_values) class StubJpegPicture(object): def", "src=self.src, caption_data=self.caption_data) def get_exporter(self): return Exporter(MockJinja2Template(picture_values)) def get_name(self): return self.src def get_output_file_name(self): return", "'images'] picture_values = ['alt_text', 'src', 'caption_data'] class MockJinja2Template(object): def __init__(self, required_values): assign_injectables(self, locals())", "self.title class SimpleExporterTest(unittest.TestCase): def setUp(self): self.mock_template = MockJinja2Template(picture_values) self.picture = StubJpegPicture('a picture', 'picture1.jpg',", "self.exporter.export(self.picture) class DirectoryExporterTest(unittest.TestCase): def setUp(self): self.pictures_in_dir = [ StubJpegPicture('first picture', 'picture1.jpg', 'Caption1'), StubJpegPicture('second", "as_view(self): return ImmutableDict.of(title=self.title, images=self.images) def get_exporter(self): return Exporter(MockJinja2Template(directory_values)) def get_name(self): return self.title def", "['title', 'images'] picture_values = ['alt_text', 'src', 'caption_data'] class MockJinja2Template(object): def __init__(self, required_values): assign_injectables(self,", "return ImmutableDict.of(alt_text=self.alt_text, src=self.src, caption_data=self.caption_data) def get_exporter(self): return Exporter(MockJinja2Template(picture_values)) def get_name(self): return self.src def", "= MockJinja2Template(picture_values) self.picture = StubJpegPicture('a picture', 'picture1.jpg', 'Caption') self.exporter = Exporter(self.mock_template) def test_it_should_populate_the_jinja2_template(self):", "= ['title', 'images'] picture_values = ['alt_text', 'src', 'caption_data'] class MockJinja2Template(object): def __init__(self, required_values):", "picture', 'picture2.jpg', 'Caption2')] self.stub_directory = StubJpegDirectory('My Pictures', self.pictures_in_dir) self.mock_template = MockJinja2Template(directory_values) self.exporter =", "template_arguments: assert (argument in self.required_values) class StubJpegPicture(object): def __init__(self, alt_text, src, caption_data): assign_injectables(self,", "test_it_should_populate_the_jinja2_template(self): self.exporter.export(self.picture) class DirectoryExporterTest(unittest.TestCase): def setUp(self): self.pictures_in_dir = [ StubJpegPicture('first picture', 'picture1.jpg', 'Caption1'),", "= [ StubJpegPicture('first picture', 'picture1.jpg', 'Caption1'), StubJpegPicture('second picture', 'picture2.jpg', 'Caption2')] self.stub_directory = StubJpegDirectory('My", "self.pictures_in_dir) self.mock_template = MockJinja2Template(directory_values) self.exporter = Exporter(self.mock_template) def test_it_should_populate_the_jinja2_template(self): self.exporter.export(self.stub_directory) if __name__ ==", "SimpleExporterTest(unittest.TestCase): def setUp(self): self.mock_template = MockJinja2Template(picture_values) self.picture = StubJpegPicture('a picture', 'picture1.jpg', 'Caption') self.exporter", "StubJpegPicture('first picture', 'picture1.jpg', 'Caption1'), StubJpegPicture('second picture', 'picture2.jpg', 'Caption2')] self.stub_directory = StubJpegDirectory('My Pictures', self.pictures_in_dir)", "argument in template_arguments: assert (argument in self.required_values) class StubJpegPicture(object): def __init__(self, alt_text, src,", "def get_name(self): return self.title def get_output_file_name(self): return self.title class SimpleExporterTest(unittest.TestCase): def setUp(self): self.mock_template", "picture_values = ['alt_text', 'src', 'caption_data'] class MockJinja2Template(object): def __init__(self, required_values): assign_injectables(self, locals()) def", "def get_output_file_name(self): return self.src class StubJpegDirectory(object): def __init__(self, title, images): assign_injectables(self, locals()) def", "self.mock_template = MockJinja2Template(picture_values) self.picture = StubJpegPicture('a picture', 'picture1.jpg', 'Caption') self.exporter = Exporter(self.mock_template) def", "get_output_file_name(self): return self.title class SimpleExporterTest(unittest.TestCase): def setUp(self): self.mock_template = MockJinja2Template(picture_values) self.picture = StubJpegPicture('a", "Exporter directory_values = ['title', 'images'] picture_values = ['alt_text', 'src', 'caption_data'] class MockJinja2Template(object): def", "for argument in template_arguments: assert (argument in self.required_values) class StubJpegPicture(object): def __init__(self, alt_text,", "images=self.images) def get_exporter(self): return Exporter(MockJinja2Template(directory_values)) def get_name(self): return self.title def get_output_file_name(self): return self.title", "self.src class StubJpegDirectory(object): def __init__(self, title, images): assign_injectables(self, locals()) def get_contents(self): return self.images", "Exporter(MockJinja2Template(picture_values)) def get_name(self): return self.src def get_output_file_name(self): return self.src class StubJpegDirectory(object): def __init__(self,", "class SimpleExporterTest(unittest.TestCase): def setUp(self): self.mock_template = MockJinja2Template(picture_values) self.picture = StubJpegPicture('a picture', 'picture1.jpg', 'Caption')", "Pictures', self.pictures_in_dir) self.mock_template = MockJinja2Template(directory_values) self.exporter = Exporter(self.mock_template) def test_it_should_populate_the_jinja2_template(self): self.exporter.export(self.stub_directory) if __name__", "get_name(self): return self.title def get_output_file_name(self): return self.title class SimpleExporterTest(unittest.TestCase): def setUp(self): self.mock_template =", "get_contents(self): return [] def as_view(self): return ImmutableDict.of(alt_text=self.alt_text, src=self.src, caption_data=self.caption_data) def get_exporter(self): return Exporter(MockJinja2Template(picture_values))", "'Caption2')] self.stub_directory = StubJpegDirectory('My Pictures', self.pictures_in_dir) self.mock_template = MockJinja2Template(directory_values) self.exporter = Exporter(self.mock_template) def", "= ['alt_text', 'src', 'caption_data'] class MockJinja2Template(object): def __init__(self, required_values): assign_injectables(self, locals()) def render(self,", "from ..generator.exporter import Exporter directory_values = ['title', 'images'] picture_values = ['alt_text', 'src', 'caption_data']", "return self.title def get_output_file_name(self): return self.title class SimpleExporterTest(unittest.TestCase): def setUp(self): self.mock_template = MockJinja2Template(picture_values)", "as_view(self): return ImmutableDict.of(alt_text=self.alt_text, src=self.src, caption_data=self.caption_data) def get_exporter(self): return Exporter(MockJinja2Template(picture_values)) def get_name(self): return self.src", "DirectoryExporterTest(unittest.TestCase): def setUp(self): self.pictures_in_dir = [ StubJpegPicture('first picture', 'picture1.jpg', 'Caption1'), StubJpegPicture('second picture', 'picture2.jpg',", "..utils.immutabledict import ImmutableDict from ..generator.exporter import Exporter directory_values = ['title', 'images'] picture_values =", "return Exporter(MockJinja2Template(directory_values)) def get_name(self): return self.title def get_output_file_name(self): return self.title class SimpleExporterTest(unittest.TestCase): def", "MockJinja2Template(picture_values) self.picture = StubJpegPicture('a picture', 'picture1.jpg', 'Caption') self.exporter = Exporter(self.mock_template) def test_it_should_populate_the_jinja2_template(self): self.exporter.export(self.picture)", "__init__(self, required_values): assign_injectables(self, locals()) def render(self, template_arguments): for argument in template_arguments: assert (argument", "import unittest from ..utils.inject import assign_injectables from ..utils.immutabledict import ImmutableDict from ..generator.exporter import", "def get_exporter(self): return Exporter(MockJinja2Template(directory_values)) def get_name(self): return self.title def get_output_file_name(self): return self.title class", "'Caption1'), StubJpegPicture('second picture', 'picture2.jpg', 'Caption2')] self.stub_directory = StubJpegDirectory('My Pictures', self.pictures_in_dir) self.mock_template = MockJinja2Template(directory_values)", "def get_name(self): return self.src def get_output_file_name(self): return self.src class StubJpegDirectory(object): def __init__(self, title,", "..generator.exporter import Exporter directory_values = ['title', 'images'] picture_values = ['alt_text', 'src', 'caption_data'] class", "def render(self, template_arguments): for argument in template_arguments: assert (argument in self.required_values) class StubJpegPicture(object):", "(argument in self.required_values) class StubJpegPicture(object): def __init__(self, alt_text, src, caption_data): assign_injectables(self, locals()) def", "caption_data=self.caption_data) def get_exporter(self): return Exporter(MockJinja2Template(picture_values)) def get_name(self): return self.src def get_output_file_name(self): return self.src", "[ StubJpegPicture('first picture', 'picture1.jpg', 'Caption1'), StubJpegPicture('second picture', 'picture2.jpg', 'Caption2')] self.stub_directory = StubJpegDirectory('My Pictures',", "class MockJinja2Template(object): def __init__(self, required_values): assign_injectables(self, locals()) def render(self, template_arguments): for argument in", "get_exporter(self): return Exporter(MockJinja2Template(directory_values)) def get_name(self): return self.title def get_output_file_name(self): return self.title class SimpleExporterTest(unittest.TestCase):", "return self.title class SimpleExporterTest(unittest.TestCase): def setUp(self): self.mock_template = MockJinja2Template(picture_values) self.picture = StubJpegPicture('a picture',", "self.stub_directory = StubJpegDirectory('My Pictures', self.pictures_in_dir) self.mock_template = MockJinja2Template(directory_values) self.exporter = Exporter(self.mock_template) def test_it_should_populate_the_jinja2_template(self):", "[] def as_view(self): return ImmutableDict.of(alt_text=self.alt_text, src=self.src, caption_data=self.caption_data) def get_exporter(self): return Exporter(MockJinja2Template(picture_values)) def get_name(self):", "def __init__(self, alt_text, src, caption_data): assign_injectables(self, locals()) def get_contents(self): return [] def as_view(self):", "assign_injectables(self, locals()) def get_contents(self): return [] def as_view(self): return ImmutableDict.of(alt_text=self.alt_text, src=self.src, caption_data=self.caption_data) def", "title, images): assign_injectables(self, locals()) def get_contents(self): return self.images def as_view(self): return ImmutableDict.of(title=self.title, images=self.images)", "self.title def get_output_file_name(self): return self.title class SimpleExporterTest(unittest.TestCase): def setUp(self): self.mock_template = MockJinja2Template(picture_values) self.picture", "images): assign_injectables(self, locals()) def get_contents(self): return self.images def as_view(self): return ImmutableDict.of(title=self.title, images=self.images) def", "self.pictures_in_dir = [ StubJpegPicture('first picture', 'picture1.jpg', 'Caption1'), StubJpegPicture('second picture', 'picture2.jpg', 'Caption2')] self.stub_directory =", "self.picture = StubJpegPicture('a picture', 'picture1.jpg', 'Caption') self.exporter = Exporter(self.mock_template) def test_it_should_populate_the_jinja2_template(self): self.exporter.export(self.picture) class", "assign_injectables(self, locals()) def render(self, template_arguments): for argument in template_arguments: assert (argument in self.required_values)", "required_values): assign_injectables(self, locals()) def render(self, template_arguments): for argument in template_arguments: assert (argument in", "return self.src class StubJpegDirectory(object): def __init__(self, title, images): assign_injectables(self, locals()) def get_contents(self): return", "return ImmutableDict.of(title=self.title, images=self.images) def get_exporter(self): return Exporter(MockJinja2Template(directory_values)) def get_name(self): return self.title def get_output_file_name(self):", "Exporter(MockJinja2Template(directory_values)) def get_name(self): return self.title def get_output_file_name(self): return self.title class SimpleExporterTest(unittest.TestCase): def setUp(self):", "def setUp(self): self.pictures_in_dir = [ StubJpegPicture('first picture', 'picture1.jpg', 'Caption1'), StubJpegPicture('second picture', 'picture2.jpg', 'Caption2')]", "self.src def get_output_file_name(self): return self.src class StubJpegDirectory(object): def __init__(self, title, images): assign_injectables(self, locals())", "caption_data): assign_injectables(self, locals()) def get_contents(self): return [] def as_view(self): return ImmutableDict.of(alt_text=self.alt_text, src=self.src, caption_data=self.caption_data)", "def get_contents(self): return [] def as_view(self): return ImmutableDict.of(alt_text=self.alt_text, src=self.src, caption_data=self.caption_data) def get_exporter(self): return", "get_output_file_name(self): return self.src class StubJpegDirectory(object): def __init__(self, title, images): assign_injectables(self, locals()) def get_contents(self):", "['alt_text', 'src', 'caption_data'] class MockJinja2Template(object): def __init__(self, required_values): assign_injectables(self, locals()) def render(self, template_arguments):", "self.images def as_view(self): return ImmutableDict.of(title=self.title, images=self.images) def get_exporter(self): return Exporter(MockJinja2Template(directory_values)) def get_name(self): return", "def get_exporter(self): return Exporter(MockJinja2Template(picture_values)) def get_name(self): return self.src def get_output_file_name(self): return self.src class", "'picture2.jpg', 'Caption2')] self.stub_directory = StubJpegDirectory('My Pictures', self.pictures_in_dir) self.mock_template = MockJinja2Template(directory_values) self.exporter = Exporter(self.mock_template)", "ImmutableDict from ..generator.exporter import Exporter directory_values = ['title', 'images'] picture_values = ['alt_text', 'src',", "alt_text, src, caption_data): assign_injectables(self, locals()) def get_contents(self): return [] def as_view(self): return ImmutableDict.of(alt_text=self.alt_text,", "self.mock_template = MockJinja2Template(directory_values) self.exporter = Exporter(self.mock_template) def test_it_should_populate_the_jinja2_template(self): self.exporter.export(self.stub_directory) if __name__ == '__main__':", "= StubJpegDirectory('My Pictures', self.pictures_in_dir) self.mock_template = MockJinja2Template(directory_values) self.exporter = Exporter(self.mock_template) def test_it_should_populate_the_jinja2_template(self): self.exporter.export(self.stub_directory)", "return self.src def get_output_file_name(self): return self.src class StubJpegDirectory(object): def __init__(self, title, images): assign_injectables(self,", "in template_arguments: assert (argument in self.required_values) class StubJpegPicture(object): def __init__(self, alt_text, src, caption_data):", "'Caption') self.exporter = Exporter(self.mock_template) def test_it_should_populate_the_jinja2_template(self): self.exporter.export(self.picture) class DirectoryExporterTest(unittest.TestCase): def setUp(self): self.pictures_in_dir =", "'src', 'caption_data'] class MockJinja2Template(object): def __init__(self, required_values): assign_injectables(self, locals()) def render(self, template_arguments): for", "def as_view(self): return ImmutableDict.of(title=self.title, images=self.images) def get_exporter(self): return Exporter(MockJinja2Template(directory_values)) def get_name(self): return self.title", "self.required_values) class StubJpegPicture(object): def __init__(self, alt_text, src, caption_data): assign_injectables(self, locals()) def get_contents(self): return", "StubJpegDirectory(object): def __init__(self, title, images): assign_injectables(self, locals()) def get_contents(self): return self.images def as_view(self):", "__init__(self, title, images): assign_injectables(self, locals()) def get_contents(self): return self.images def as_view(self): return ImmutableDict.of(title=self.title,", "..utils.inject import assign_injectables from ..utils.immutabledict import ImmutableDict from ..generator.exporter import Exporter directory_values =", "class StubJpegPicture(object): def __init__(self, alt_text, src, caption_data): assign_injectables(self, locals()) def get_contents(self): return []", "assert (argument in self.required_values) class StubJpegPicture(object): def __init__(self, alt_text, src, caption_data): assign_injectables(self, locals())", "StubJpegPicture('a picture', 'picture1.jpg', 'Caption') self.exporter = Exporter(self.mock_template) def test_it_should_populate_the_jinja2_template(self): self.exporter.export(self.picture) class DirectoryExporterTest(unittest.TestCase): def" ]
[ "3], 5) }) # Tuple tests.append({ 'input': { 'seq1': (1, 3, 5, 6,", "complexity: O(2 ^ (len(seq1) + len(seq2))) if type(seq1) != type(seq2): raise TypeError(\"Both input", "seq1[0:i] and seq2[0:j] table = [[0] * (len2 + 1) for _ in", "'seq1': (1, 3, 5, 6, 7, 2, 5, 2, 3), 'seq2': (6, 2,", "too slow (impossible when len(seq) > 7) @memoize def lcs_recursive(seq1: Sequence, seq2: Sequence)", "{ 'seq1': [1, 3, 5, 6, 7, 2, 5, 2, 3], 'seq2': [6,", "empty # Note: The vital idea here is, now that we know the", "go in that direction (ie in search of lcs). # Note: Putting this", "of seq2, instead of the left of seq1. j -= 1 else: i", "swap this elif with # the next `else`, the resulting lcs will be", "Consider all subclasses of generic type `Sequence` if isinstance(seq1, list): empty = []", "O(len1 * len2). # Step 1: find the lcs's length if type(seq1) !=", "is the func to be updated.) So obj's attributes will be copied to", "to be updated.) So obj's attributes will be copied to # memoizer. memoizer()", "if isinstance(seq1, list): add_elem = [seq1[i-1]] elif isinstance(seq1, str): add_elem = seq1[i-1] elif", "5) }) # Multiple subseqs with same length # In this case, return", "attributes will be copied to # memoizer. memoizer() is returned as the replacement", "'seq1': 'dense', 'seq2': 'condensed' }, 'output': ('dense', 5) }) # Multiple subseqs with", "does not need to be continuous in the original sequence.\"\"\" from typing import", "subsequence (both itself and its length) of two sequences by dynamic programming. Note", "# BACKWARDS (ie going up and right `table`) to find the feasible lcs.", "as an empty dict cache = obj.cache = {} # The decorator 'wraps'", "isinstance(seq1, tuple): empty = () else: raise TypeError(\"This type of sequence is not", "obj.cache as an empty dict cache = obj.cache = {} # The decorator", "Sequence, seq2: Sequence) -> Tuple[Sequence, int]: \"\"\"Find the longest common subsequence (both itself", "'badcfe' }, 'output': ('ace', 3) }) # No common subseq tests.append({ 'input': {", "lcs w/any seq is 0 if seq1[i - 1] == seq2[j - 1]:", "travel from it # BACKWARDS (ie going up and right `table`) to find", "from the left of `seq1`. \"\"\" # Time complexity: O(2 ^ (len(seq1) +", "from tests import jovian import functools ########################################## ### Test cases tests = []", "with same length # In this case, return the first common subseq (the", "`seq1`. \"\"\" # Time complexity: O(2 ^ (len(seq1) + len(seq2))) if type(seq1) !=", "empty, so its lcs w/any seq is 0 if seq1[i - 1] ==", "right corner of `table`, we should travel from it # BACKWARDS (ie going", "tuple can only be shown as (3,) but not (3) add_elem = (seq1[i-1],)", "('ace', 3) }) # No common subseq tests.append({ 'input': { 'seq1': 'a', 'seq2':", "0) }) ########################################## ### Methods def memoize(obj): \"\"\"Cache a function's return value each", "'seq1': 'serendipitous', 'seq2': 'precipitation' }, 'output': ('reipito', 7) }) # One is a", "test_cases=tests) # From the next two tests, we can see that memoized recursion", "bigger len() return ( max(lcs_recursive(seq1, seq2[1:])[0], lcs_recursive(seq1[1:], seq2)[0], key=len), max(lcs_recursive(seq1, seq2[1:])[1], lcs_recursive(seq1[1:], seq2)[1])", "1 else: i -= 1 return lcs, lcs_length ########################################## ### Test client jovian.evaluate_test_cases(func=lcs_recursive,", "7, 2, 5, 2, 3], 'seq2': [6, 2, 4, 7, 1, 5, 6,", "tests = [] # List tests.append({ 'input': { 'seq1': [1, 3, 5, 6,", "'input': { 'seq1': (1, 3, 5, 6, 7, 2, 5, 2, 3), 'seq2':", "If any one of the seqs is empty, then return the empty seq-type", "means to get from s1, s2 the one with bigger len() return (", "original sequence.\"\"\" from typing import Sequence, Tuple from tests import jovian import functools", "import jovian import functools ########################################## ### Test cases tests = [] # List", "if key not in cache: # When args are not present in cache's", "for _ in range(len1 + 1)] for i in range(1, len1 + 1):", "}, 'output': ('', 0) }) ########################################## ### Methods def memoize(obj): \"\"\"Cache a function's", "obj's attributes will be copied to # memoizer. memoizer() is returned as the", "str): empty = '' elif isinstance(seq1, tuple): empty = () else: raise TypeError(\"This", "+ lcs_recursive(seq1[1:], seq2[1:])[0], 1 + lcs_recursive(seq1[1:], seq2[1:])[1] ) else: # max(s1, s2, key=len)", "seq2[1:])[1] ) else: # max(s1, s2, key=len) means to get from s1, s2", "subseqs with same length, return the first common subseq from the left of", "3), 5) }) # String tests.append({ 'input': { 'seq1': 'serendipitous', 'seq2': 'precipitation' },", "lines are equivalent; use either # lcs_length = table[len1][len2] lcs_length = table[-1][-1] #", "complexity: O(len1 * len2). Space complexity: O(len1 * len2). # Step 1: find", "int]: \"\"\"Find the longest common subsequence (both itself and its length) of two", "str(kwargs) if key not in cache: # When args are not present in", "else: table[i][j] = max(table[i - 1][j], table[i][j - 1]) # The next two", "# String tests.append({ 'input': { 'seq1': 'serendipitous', 'seq2': 'precipitation' }, 'output': ('reipito', 7)", "present in cache's keys, add them cache[key] = obj(*args, **kwargs) return cache[key] return", "-= 1 elif table[i-1][j] < table[i][j-1]: # If the current elem of seq1", "**kwargs): key = str(args) + str(kwargs) if key not in cache: # When", "called later with the same arguments, the cached value is directly returned rather", "go and execute function `memoize(lcs)`, return memoizer. # Without memoization, the orig func", "runs too slow (impossible when len(seq) > 7) @memoize def lcs_recursive(seq1: Sequence, seq2:", "[] elif isinstance(seq1, str): empty = '' elif isinstance(seq1, tuple): empty = ()", "should be of the same type.\") # Consider all subclasses of generic type", "the cached value is directly returned rather than reevaluated.\"\"\" # Initialize cache and", "'seq2': 'stone' }, 'output': ('', 0) }) ########################################## ### Methods def memoize(obj): \"\"\"Cache", "is empty, then return the empty seq-type return empty, 0 len1, len2 =", "are equivalent; use either # lcs_length = table[len1][len2] lcs_length = table[-1][-1] # Step", "j > 0: if seq1[i-1] == seq2[j-1]: if isinstance(seq1, list): add_elem = [seq1[i-1]]", "be copied to # memoizer. memoizer() is returned as the replacement for the", "execute function `memoize(lcs)`, return memoizer. # Without memoization, the orig func runs too", "not (3) add_elem = (seq1[i-1],) lcs = add_elem + lcs i -= 1", "lcs, lcs_length ########################################## ### Test client jovian.evaluate_test_cases(func=lcs_recursive, test_cases=tests) # From the next two", "the longest common subsequence (both itself and its length) of two sequences by", "'seq1': 'a', 'seq2': 'bb' }, 'output': ('', 0) }) # One is empty", "return ( max(lcs_recursive(seq1, seq2[1:])[0], lcs_recursive(seq1[1:], seq2)[0], key=len), max(lcs_recursive(seq1, seq2[1:])[1], lcs_recursive(seq1[1:], seq2)[1]) ) def", "# Step 2: find the lcs ITSELF lcs = empty # Note: The", "& seq2 are not the same, then find the larger # of the", "is important; if we swap this elif with # the next `else`, the", "from range(1,) since seq[0:0] is empty, so its lcs w/any seq is 0", "empty seq-type return empty, 0 len1, len2 = len(seq1), len(seq2) # Use nested", "to be index, # ie the elem at the lower right corner of", "seq1[i-1] == seq2[j-1]: if isinstance(seq1, list): add_elem = [seq1[i-1]] elif isinstance(seq1, str): add_elem", "will be copied to # memoizer. memoizer() is returned as the replacement for", "2, 5, 2, 3), 'seq2': (6, 2, 4, 7, 1, 5, 6, 2,", "max(lcs_recursive(seq1, seq2[1:])[1], lcs_recursive(seq1[1:], seq2)[1]) ) def lcs_dynamic(seq1: Sequence, seq2: Sequence) -> int: \"\"\"Find", "with # the next `else`, the resulting lcs will be the 1st common", "key = str(args) + str(kwargs) if key not in cache: # When args", "lcs's length if type(seq1) != type(seq2): raise TypeError(\"Both input sequences should be of", "1st common subseq from the # left of seq2, instead of the left", "= {} # The decorator 'wraps' will run `functools.partial(update_wrapper, wrapped=obj)`, # ie `update_wrapper(wrapper=memoizer,", "---- If there are multiple subseqs with same length, return the first common", "# ie the elem at the lower right corner of `table`, we should", "[seq1[i-1]] elif isinstance(seq1, str): add_elem = seq1[i-1] elif isinstance(seq1, tuple): # A one-elem", "the 1st common subseq from the # left of seq2, instead of the", "to make a (len1+1) * (len2+1) 2D array (ie a table). # table[i][j]", "6, 7, 2, 5, 2, 3], 'seq2': [6, 2, 4, 7, 1, 5,", "1): for j in range(1, len2 + 1): # We start from range(1,)", "= table[-1][-1] # Step 2: find the lcs ITSELF lcs = empty #", "subseq from the # left of seq2, instead of the left of seq1.", "'abcdef', 'seq2': 'badcfe' }, 'output': ('ace', 3) }) # No common subseq tests.append({", "empty, 0 if seq1[0] == seq2[0]: if isinstance(seq1, list): add_elem = [seq1[0]] elif", "([1, 5, 6, 2, 3], 5) }) # Tuple tests.append({ 'input': { 'seq1':", "first from the left of seq1). tests.append({ 'input': { 'seq1': 'abcdef', 'seq2': 'badcfe'", "dynamic programming. Note ---- If there are multiple subseqs with same length, return", "'seq2': 'badcfe' }, 'output': ('ace', 3) }) # No common subseq tests.append({ 'input':", "5, 2, 3], 'seq2': [6, 2, 4, 7, 1, 5, 6, 2, 3]", "if seq1[0] == seq2[0]: if isinstance(seq1, list): add_elem = [seq1[0]] elif isinstance(seq1, str):", "subseq from the left of `seq1`. \"\"\" # Time complexity: O(len1 * len2).", "in range(1, len2 + 1): # We start from range(1,) since seq[0:0] is", "then return the empty seq-type return empty, 0 len1, len2 = len(seq1), len(seq2)", "lcs = add_elem + lcs i -= 1 j -= 1 elif table[i-1][j]", "= [seq1[0]] elif isinstance(seq1, str): add_elem = seq1[0] elif isinstance(seq1, tuple): # A", "1)] for i in range(1, len1 + 1): for j in range(1, len2", "should travel from it # BACKWARDS (ie going up and right `table`) to", "_ in range(len1 + 1)] for i in range(1, len1 + 1): for", "subsequence. The subsequence does not need to be continuous in the original sequence.\"\"\"", "'precipitation' }, 'output': ('reipito', 7) }) # One is a subseq of the", "is called. If called later with the same arguments, the cached value is", "will run `functools.partial(update_wrapper, wrapped=obj)`, # ie `update_wrapper(wrapper=memoizer, wrapped=obj)`. (wrapped is the orig func,", "we can see that memoized recursion is faster than plain- # vanilla dynamic", "'seq2': 'bb' }, 'output': ('', 0) }) # One is empty tests.append({ 'input':", "seq2: Sequence) -> Tuple[Sequence, int]: \"\"\"Find the longest common subsequence (both itself and", "seqs is empty, then return the empty seq-type return empty, 0 if seq1[0]", "cache[key] = obj(*args, **kwargs) return cache[key] return memoizer # The decorator 'memoize' will", "not (3) add_elem = (seq1[0],) return ( add_elem + lcs_recursive(seq1[1:], seq2[1:])[0], 1 +", "i -= 1 return lcs, lcs_length ########################################## ### Test client jovian.evaluate_test_cases(func=lcs_recursive, test_cases=tests) #", "Tuple from tests import jovian import functools ########################################## ### Test cases tests =", "time it is called. If called later with the same arguments, the cached", "not need to be continuous in the original sequence.\"\"\" from typing import Sequence,", "if we swap this elif with # the next `else`, the resulting lcs", "So obj's attributes will be copied to # memoizer. memoizer() is returned as", "the left of seq1. j -= 1 else: i -= 1 return lcs,", "seq2: # If any one of the seqs is empty, then return the", "{ 'seq1': '', 'seq2': 'stone' }, 'output': ('', 0) }) ########################################## ### Methods", "'output': ('ace', 3) }) # No common subseq tests.append({ 'input': { 'seq1': 'a',", "value each time it is called. If called later with the same arguments,", "String tests.append({ 'input': { 'seq1': 'serendipitous', 'seq2': 'precipitation' }, 'output': ('reipito', 7) })", "this `elif <` first is important; if we swap this elif with #", "than reevaluated.\"\"\" # Initialize cache and obj.cache as an empty dict cache =", "+ lcs_recursive(seq1[1:], seq2[1:])[1] ) else: # max(s1, s2, key=len) means to get from", "'output': ('', 0) }) # One is empty tests.append({ 'input': { 'seq1': '',", "# of the two predecessors and go in that direction (ie in search", "Note: The vital idea here is, now that we know the length of", "can only be shown as (3,) but not (3) add_elem = (seq1[i-1],) lcs", "= (seq1[i-1],) lcs = add_elem + lcs i -= 1 j -= 1", "next `else`, the resulting lcs will be the 1st common subseq from the", "of the left of seq1. j -= 1 else: i -= 1 return", "two tests, we can see that memoized recursion is faster than plain- #", "lcs i -= 1 j -= 1 elif table[i-1][j] < table[i][j-1]: # If", "elif isinstance(seq1, tuple): empty = () else: raise TypeError(\"This type of sequence is", "subseqs with same length # In this case, return the first common subseq", "first common subseq (the first from the left of seq1). tests.append({ 'input': {", "them cache[key] = obj(*args, **kwargs) return cache[key] return memoizer # The decorator 'memoize'", "longest common subsequence (both itself and its length) of two sequences recursively. Note", "length of seq1[0:i] and seq2[0:j] table = [[0] * (len2 + 1) for", "# From the next two tests, we can see that memoized recursion is", "# ie `update_wrapper(wrapper=memoizer, wrapped=obj)`. (wrapped is the orig func, # while wrapper is", "rather than reevaluated.\"\"\" # Initialize cache and obj.cache as an empty dict cache", "The decorator 'wraps' will run `functools.partial(update_wrapper, wrapped=obj)`, # ie `update_wrapper(wrapper=memoizer, wrapped=obj)`. (wrapped is", "( add_elem + lcs_recursive(seq1[1:], seq2[1:])[0], 1 + lcs_recursive(seq1[1:], seq2[1:])[1] ) else: # max(s1,", "elem of seq1 & seq2 are not the same, then find the larger", "= (seq1[0],) return ( add_elem + lcs_recursive(seq1[1:], seq2[1:])[0], 1 + lcs_recursive(seq1[1:], seq2[1:])[1] )", "(ie a table). # table[i][j] is the lcs length of seq1[0:i] and seq2[0:j]", "first is important; if we swap this elif with # the next `else`,", "O(len1 * len2). Space complexity: O(len1 * len2). # Step 1: find the", "elif with # the next `else`, the resulting lcs will be the 1st", "0: if seq1[i-1] == seq2[j-1]: if isinstance(seq1, list): add_elem = [seq1[i-1]] elif isinstance(seq1,", "obj(*args, **kwargs) return cache[key] return memoizer # The decorator 'memoize' will go and", "s1, s2 the one with bigger len() return ( max(lcs_recursive(seq1, seq2[1:])[0], lcs_recursive(seq1[1:], seq2)[0],", "lcs length of seq1[0:i] and seq2[0:j] table = [[0] * (len2 + 1)", "from s1, s2 the one with bigger len() return ( max(lcs_recursive(seq1, seq2[1:])[0], lcs_recursive(seq1[1:],", "know the length of lcs to be index, # ie the elem at", "[] # List tests.append({ 'input': { 'seq1': [1, 3, 5, 6, 7, 2,", "# the next `else`, the resulting lcs will be the 1st common subseq", "memoized recursion is faster than plain- # vanilla dynamic programming jovian.evaluate_test_cases_justyre(func=lcs_recursive, tests=tests) jovian.evaluate_test_cases_justyre(func=lcs_dynamic,", "* (len2+1) 2D array (ie a table). # table[i][j] is the lcs length", "}) # One is a subseq of the other tests.append({ 'input': { 'seq1':", "two predecessors and go in that direction (ie in search of lcs). #", "'seq2': 'condensed' }, 'output': ('dense', 5) }) # Multiple subseqs with same length", "'memoize' will go and execute function `memoize(lcs)`, return memoizer. # Without memoization, the", "func to be updated.) So obj's attributes will be copied to # memoizer.", "tuple.\") if not seq1 or not seq2: # If any one of the", "a (len1+1) * (len2+1) 2D array (ie a table). # table[i][j] is the", "seq[0:0] is empty, so its lcs w/any seq is 0 if seq1[i -", "1: find the lcs's length if type(seq1) != type(seq2): raise TypeError(\"Both input sequences", "add_elem = [seq1[i-1]] elif isinstance(seq1, str): add_elem = seq1[i-1] elif isinstance(seq1, tuple): #", "table[i - 1][j - 1] + 1 else: table[i][j] = max(table[i - 1][j],", "range(1, len1 + 1): for j in range(1, len2 + 1): # We", "= table[len1][len2] lcs_length = table[-1][-1] # Step 2: find the lcs ITSELF lcs", "slow (impossible when len(seq) > 7) @memoize def lcs_recursive(seq1: Sequence, seq2: Sequence) ->", "first common subseq from the left of `seq1`. \"\"\" # Time complexity: O(len1", "= len1, len2 while i > 0 and j > 0: if seq1[i-1]", "not the same, then find the larger # of the two predecessors and", "When args are not present in cache's keys, add them cache[key] = obj(*args,", "# The decorator 'memoize' will go and execute function `memoize(lcs)`, return memoizer. #", "4, 7, 1, 5, 6, 2, 3] }, 'output': ([1, 5, 6, 2,", "'input': { 'seq1': '', 'seq2': 'stone' }, 'output': ('', 0) }) ########################################## ###", "[1, 3, 5, 6, 7, 2, 5, 2, 3], 'seq2': [6, 2, 4,", "* len2). Space complexity: O(len1 * len2). # Step 1: find the lcs's", "- 1] == seq2[j - 1]: table[i][j] = table[i - 1][j - 1]", "seq2[j - 1]: table[i][j] = table[i - 1][j - 1] + 1 else:", "}) # One is empty tests.append({ 'input': { 'seq1': '', 'seq2': 'stone' },", "[seq1[0]] elif isinstance(seq1, str): add_elem = seq1[0] elif isinstance(seq1, tuple): # A one-elem", "cache = obj.cache = {} # The decorator 'wraps' will run `functools.partial(update_wrapper, wrapped=obj)`,", "# A one-elem tuple can only be shown as (3,) but not (3)", "shown as (3,) but not (3) add_elem = (seq1[0],) return ( add_elem +", "be updated.) So obj's attributes will be copied to # memoizer. memoizer() is", "[6, 2, 4, 7, 1, 5, 6, 2, 3] }, 'output': ([1, 5,", "args are not present in cache's keys, add them cache[key] = obj(*args, **kwargs)", "since seq[0:0] is empty, so its lcs w/any seq is 0 if seq1[i", "table[i][j-1]: # If the current elem of seq1 & seq2 are not the", "len2). # Step 1: find the lcs's length if type(seq1) != type(seq2): raise", "'condensed' }, 'output': ('dense', 5) }) # Multiple subseqs with same length #", "(wrapped is the orig func, # while wrapper is the func to be", "tuple): # A one-elem tuple can only be shown as (3,) but not", "seq2[1:])[0], lcs_recursive(seq1[1:], seq2)[0], key=len), max(lcs_recursive(seq1, seq2[1:])[1], lcs_recursive(seq1[1:], seq2)[1]) ) def lcs_dynamic(seq1: Sequence, seq2:", "A one-elem tuple can only be shown as (3,) but not (3) add_elem", "########################################## ### Test client jovian.evaluate_test_cases(func=lcs_recursive, test_cases=tests) # From the next two tests, we", "4, 7, 1, 5, 6, 2, 3) }, 'output': ((1, 5, 6, 2,", "+ 1 else: table[i][j] = max(table[i - 1][j], table[i][j - 1]) # The", "elif isinstance(seq1, str): add_elem = seq1[0] elif isinstance(seq1, tuple): # A one-elem tuple", "predecessors and go in that direction (ie in search of lcs). # Note:", "6, 2, 3) }, 'output': ((1, 5, 6, 2, 3), 5) }) #", "tests.append({ 'input': { 'seq1': '', 'seq2': 'stone' }, 'output': ('', 0) }) ##########################################", "(3,) but not (3) add_elem = (seq1[0],) return ( add_elem + lcs_recursive(seq1[1:], seq2[1:])[0],", "lcs = empty # Note: The vital idea here is, now that we", "= () else: raise TypeError(\"This type of sequence is not supported; try list,", "# No common subseq tests.append({ 'input': { 'seq1': 'a', 'seq2': 'bb' }, 'output':", "type.\") # Consider all subclasses of generic type `Sequence` if isinstance(seq1, list): empty", "\"\"\"Find the longest common subsequence (both itself and its length) of two sequences", "= seq1[0] elif isinstance(seq1, tuple): # A one-elem tuple can only be shown", "can see that memoized recursion is faster than plain- # vanilla dynamic programming", "with bigger len() return ( max(lcs_recursive(seq1, seq2[1:])[0], lcs_recursive(seq1[1:], seq2)[0], key=len), max(lcs_recursive(seq1, seq2[1:])[1], lcs_recursive(seq1[1:],", "sequences should be of the same type.\") # Consider all subclasses of generic", "'seq1': '', 'seq2': 'stone' }, 'output': ('', 0) }) ########################################## ### Methods def", "here is, now that we know the length of lcs to be index,", "{ 'seq1': 'dense', 'seq2': 'condensed' }, 'output': ('dense', 5) }) # Multiple subseqs", "len2 + 1): # We start from range(1,) since seq[0:0] is empty, so", "of `table`, we should travel from it # BACKWARDS (ie going up and", "array (ie a table). # table[i][j] is the lcs length of seq1[0:i] and", "Time complexity: O(2 ^ (len(seq1) + len(seq2))) if type(seq1) != type(seq2): raise TypeError(\"Both", "= seq1[i-1] elif isinstance(seq1, tuple): # A one-elem tuple can only be shown", "common subseq (the first from the left of seq1). tests.append({ 'input': { 'seq1':", "seq-type return empty, 0 len1, len2 = len(seq1), len(seq2) # Use nested lists", "empty, then return the empty seq-type return empty, 0 len1, len2 = len(seq1),", "= [] elif isinstance(seq1, str): empty = '' elif isinstance(seq1, tuple): empty =", "`seq1`. \"\"\" # Time complexity: O(len1 * len2). Space complexity: O(len1 * len2).", "def memoizer(*args, **kwargs): key = str(args) + str(kwargs) if key not in cache:", "== seq2[0]: if isinstance(seq1, list): add_elem = [seq1[0]] elif isinstance(seq1, str): add_elem =", "is empty, then return the empty seq-type return empty, 0 if seq1[0] ==", "Note ---- If there are multiple subseqs with same length, return the first", "in range(len1 + 1)] for i in range(1, len1 + 1): for j", "empty seq-type return empty, 0 if seq1[0] == seq2[0]: if isinstance(seq1, list): add_elem", "type(seq2): raise TypeError(\"Both input sequences should be of the same type.\") # Consider", "1] + 1 else: table[i][j] = max(table[i - 1][j], table[i][j - 1]) #", "len(seq2))) if type(seq1) != type(seq2): raise TypeError(\"Both input sequences should be of the", "+ 1): # We start from range(1,) since seq[0:0] is empty, so its", "it is called. If called later with the same arguments, the cached value", "returned as the replacement for the orig `obj` @functools.wraps(obj) def memoizer(*args, **kwargs): key", "list): empty = [] elif isinstance(seq1, str): empty = '' elif isinstance(seq1, tuple):", "of the seqs is empty, then return the empty seq-type return empty, 0", "the left of `seq1`. \"\"\" # Time complexity: O(len1 * len2). Space complexity:", "We start from range(1,) since seq[0:0] is empty, so its lcs w/any seq", "need to be continuous in the original sequence.\"\"\" from typing import Sequence, Tuple", "return empty, 0 len1, len2 = len(seq1), len(seq2) # Use nested lists to", "Initialize cache and obj.cache as an empty dict cache = obj.cache = {}", "keys, add them cache[key] = obj(*args, **kwargs) return cache[key] return memoizer # The", "raise TypeError(\"Both input sequences should be of the same type.\") # Consider all", "tests import jovian import functools ########################################## ### Test cases tests = [] #", "and right `table`) to find the feasible lcs. i, j = len1, len2", "else: i -= 1 return lcs, lcs_length ########################################## ### Test client jovian.evaluate_test_cases(func=lcs_recursive, test_cases=tests)", "the length of lcs to be index, # ie the elem at the", "but not (3) add_elem = (seq1[0],) return ( add_elem + lcs_recursive(seq1[1:], seq2[1:])[0], 1", "common subseq from the left of `seq1`. \"\"\" # Time complexity: O(2 ^", "`memoize(lcs)`, return memoizer. # Without memoization, the orig func runs too slow (impossible", "lcs. i, j = len1, len2 while i > 0 and j >", "'wraps' will run `functools.partial(update_wrapper, wrapped=obj)`, # ie `update_wrapper(wrapper=memoizer, wrapped=obj)`. (wrapped is the orig", "its length) of two sequences by dynamic programming. Note ---- If there are", "length) of two sequences by dynamic programming. Note ---- If there are multiple", "Test client jovian.evaluate_test_cases(func=lcs_recursive, test_cases=tests) # From the next two tests, we can see", "1 else: table[i][j] = max(table[i - 1][j], table[i][j - 1]) # The next", "(3,) but not (3) add_elem = (seq1[i-1],) lcs = add_elem + lcs i", "under MIT License. # See LICENSE in the project root for license information.", "2, 3], 'seq2': [6, 2, 4, 7, 1, 5, 6, 2, 3] },", "length of lcs to be index, # ie the elem at the lower", "make a (len1+1) * (len2+1) 2D array (ie a table). # table[i][j] is", "function `memoize(lcs)`, return memoizer. # Without memoization, the orig func runs too slow", "(6, 2, 4, 7, 1, 5, 6, 2, 3) }, 'output': ((1, 5,", "i -= 1 j -= 1 elif table[i-1][j] < table[i][j-1]: # If the", "add_elem = seq1[0] elif isinstance(seq1, tuple): # A one-elem tuple can only be", "return the first common subseq (the first from the left of seq1). tests.append({", "* len2). # Step 1: find the lcs's length if type(seq1) != type(seq2):", "itself and its length) of two sequences recursively. Note ---- If there are", "(both itself and its length) of two sequences by dynamic programming. Note ----", "in search of lcs). # Note: Putting this `elif <` first is important;", "elif isinstance(seq1, str): empty = '' elif isinstance(seq1, tuple): empty = () else:", "seq2)[0], key=len), max(lcs_recursive(seq1, seq2[1:])[1], lcs_recursive(seq1[1:], seq2)[1]) ) def lcs_dynamic(seq1: Sequence, seq2: Sequence) ->", "`update_wrapper(wrapper=memoizer, wrapped=obj)`. (wrapped is the orig func, # while wrapper is the func", "(3) add_elem = (seq1[i-1],) lcs = add_elem + lcs i -= 1 j", "the elem at the lower right corner of `table`, we should travel from", "'output': ((1, 5, 6, 2, 3), 5) }) # String tests.append({ 'input': {", "1, 5, 6, 2, 3) }, 'output': ((1, 5, 6, 2, 3), 5)", "cache[key] return memoizer # The decorator 'memoize' will go and execute function `memoize(lcs)`,", "str): add_elem = seq1[0] elif isinstance(seq1, tuple): # A one-elem tuple can only", "5) }) # String tests.append({ 'input': { 'seq1': 'serendipitous', 'seq2': 'precipitation' }, 'output':", "tests.append({ 'input': { 'seq1': 'abcdef', 'seq2': 'badcfe' }, 'output': ('ace', 3) }) #", "('', 0) }) # One is empty tests.append({ 'input': { 'seq1': '', 'seq2':", "of `seq1`. \"\"\" # Time complexity: O(len1 * len2). Space complexity: O(len1 *", "# while wrapper is the func to be updated.) So obj's attributes will", "3, 5, 6, 7, 2, 5, 2, 3], 'seq2': [6, 2, 4, 7,", "vital idea here is, now that we know the length of lcs to", "i > 0 and j > 0: if seq1[i-1] == seq2[j-1]: if isinstance(seq1,", "j in range(1, len2 + 1): # We start from range(1,) since seq[0:0]", "memoizer # The decorator 'memoize' will go and execute function `memoize(lcs)`, return memoizer.", "is the lcs length of seq1[0:i] and seq2[0:j] table = [[0] * (len2", "len1 + 1): for j in range(1, len2 + 1): # We start", "tests, we can see that memoized recursion is faster than plain- # vanilla", "# Use nested lists to make a (len1+1) * (len2+1) 2D array (ie", "max(s1, s2, key=len) means to get from s1, s2 the one with bigger", "### Methods def memoize(obj): \"\"\"Cache a function's return value each time it is", "the lcs's length if type(seq1) != type(seq2): raise TypeError(\"Both input sequences should be", "sequence.\"\"\" from typing import Sequence, Tuple from tests import jovian import functools ##########################################", "longest common subsequence (both itself and its length) of two sequences by dynamic", "type of sequence is not supported; try list, str, tuple.\") if not seq1", "() else: raise TypeError(\"This type of sequence is not supported; try list, str,", "1): # We start from range(1,) since seq[0:0] is empty, so its lcs", "# memoizer. memoizer() is returned as the replacement for the orig `obj` @functools.wraps(obj)", "see that memoized recursion is faster than plain- # vanilla dynamic programming jovian.evaluate_test_cases_justyre(func=lcs_recursive,", "of lcs to be index, # ie the elem at the lower right", "(the first from the left of seq1). tests.append({ 'input': { 'seq1': 'abcdef', 'seq2':", "important; if we swap this elif with # the next `else`, the resulting", "1 + lcs_recursive(seq1[1:], seq2[1:])[1] ) else: # max(s1, s2, key=len) means to get", "current elem of seq1 & seq2 are not the same, then find the", "(both itself and its length) of two sequences recursively. Note ---- If there", "common subsequence (both itself and its length) of two sequences by dynamic programming.", "the orig func, # while wrapper is the func to be updated.) So", "table[i-1][j] < table[i][j-1]: # If the current elem of seq1 & seq2 are", "# The next two lines are equivalent; use either # lcs_length = table[len1][len2]", "and obj.cache as an empty dict cache = obj.cache = {} # The", "now that we know the length of lcs to be index, # ie", "range(1,) since seq[0:0] is empty, so its lcs w/any seq is 0 if", "left of `seq1`. \"\"\" # Time complexity: O(len1 * len2). Space complexity: O(len1", "tuple): empty = () else: raise TypeError(\"This type of sequence is not supported;", "-= 1 else: i -= 1 return lcs, lcs_length ########################################## ### Test client", "= len(seq1), len(seq2) # Use nested lists to make a (len1+1) * (len2+1)", "complexity: O(len1 * len2). # Step 1: find the lcs's length if type(seq1)", "ITSELF lcs = empty # Note: The vital idea here is, now that", "subseq from the left of `seq1`. \"\"\" # Time complexity: O(2 ^ (len(seq1)", "< table[i][j-1]: # If the current elem of seq1 & seq2 are not", "seq2, instead of the left of seq1. j -= 1 else: i -=", "3], 'seq2': [6, 2, 4, 7, 1, 5, 6, 2, 3] }, 'output':", "lcs_length = table[len1][len2] lcs_length = table[-1][-1] # Step 2: find the lcs ITSELF", "('', 0) }) ########################################## ### Methods def memoize(obj): \"\"\"Cache a function's return value", "the replacement for the orig `obj` @functools.wraps(obj) def memoizer(*args, **kwargs): key = str(args)", "table). # table[i][j] is the lcs length of seq1[0:i] and seq2[0:j] table =", "2, 3] }, 'output': ([1, 5, 6, 2, 3], 5) }) # Tuple", "> 0 and j > 0: if seq1[i-1] == seq2[j-1]: if isinstance(seq1, list):", "Sequence, seq2: Sequence) -> int: \"\"\"Find the longest common subsequence (both itself and", "len2 while i > 0 and j > 0: if seq1[i-1] == seq2[j-1]:", "with the same arguments, the cached value is directly returned rather than reevaluated.\"\"\"", "- 1]: table[i][j] = table[i - 1][j - 1] + 1 else: table[i][j]", "= [[0] * (len2 + 1) for _ in range(len1 + 1)] for", "table[len1][len2] lcs_length = table[-1][-1] # Step 2: find the lcs ITSELF lcs =", "function's return value each time it is called. If called later with the", "use either # lcs_length = table[len1][len2] lcs_length = table[-1][-1] # Step 2: find", "5, 6, 2, 3), 5) }) # String tests.append({ 'input': { 'seq1': 'serendipitous',", "((1, 5, 6, 2, 3), 5) }) # String tests.append({ 'input': { 'seq1':", "5, 6, 2, 3] }, 'output': ([1, 5, 6, 2, 3], 5) })", "the two predecessors and go in that direction (ie in search of lcs).", "in the project root for license information. \"\"\"Longest common subsequence. The subsequence does", "jovian import functools ########################################## ### Test cases tests = [] # List tests.append({", "not seq2: # If any one of the seqs is empty, then return", "tuple can only be shown as (3,) but not (3) add_elem = (seq1[0],)", "(3) add_elem = (seq1[0],) return ( add_elem + lcs_recursive(seq1[1:], seq2[1:])[0], 1 + lcs_recursive(seq1[1:],", "(len2 + 1) for _ in range(len1 + 1)] for i in range(1,", "seq is 0 if seq1[i - 1] == seq2[j - 1]: table[i][j] =", "Step 2: find the lcs ITSELF lcs = empty # Note: The vital", "updated.) So obj's attributes will be copied to # memoizer. memoizer() is returned", "'input': { 'seq1': 'dense', 'seq2': 'condensed' }, 'output': ('dense', 5) }) # Multiple", "find the feasible lcs. i, j = len1, len2 while i > 0", "'' elif isinstance(seq1, tuple): empty = () else: raise TypeError(\"This type of sequence", "and execute function `memoize(lcs)`, return memoizer. # Without memoization, the orig func runs", "or not seq2: # If any one of the seqs is empty, then", "common subseq tests.append({ 'input': { 'seq1': 'a', 'seq2': 'bb' }, 'output': ('', 0)", "0) }) # One is empty tests.append({ 'input': { 'seq1': '', 'seq2': 'stone'", "common subsequence (both itself and its length) of two sequences recursively. Note ----", "find the lcs's length if type(seq1) != type(seq2): raise TypeError(\"Both input sequences should", "add_elem = seq1[i-1] elif isinstance(seq1, tuple): # A one-elem tuple can only be", "type `Sequence` if isinstance(seq1, list): empty = [] elif isinstance(seq1, str): empty =", "# Without memoization, the orig func runs too slow (impossible when len(seq) >", "str, tuple.\") if not seq1 or not seq2: # If any one of", "recursively. Note ---- If there are multiple subseqs with same length, return the", "O(2 ^ (len(seq1) + len(seq2))) if type(seq1) != type(seq2): raise TypeError(\"Both input sequences", "lcs_recursive(seq1[1:], seq2[1:])[0], 1 + lcs_recursive(seq1[1:], seq2[1:])[1] ) else: # max(s1, s2, key=len) means", "in cache's keys, add them cache[key] = obj(*args, **kwargs) return cache[key] return memoizer", "all subclasses of generic type `Sequence` if isinstance(seq1, list): empty = [] elif", "memoizer() is returned as the replacement for the orig `obj` @functools.wraps(obj) def memoizer(*args,", "client jovian.evaluate_test_cases(func=lcs_recursive, test_cases=tests) # From the next two tests, we can see that", "= str(args) + str(kwargs) if key not in cache: # When args are", "instead of the left of seq1. j -= 1 else: i -= 1", "dict cache = obj.cache = {} # The decorator 'wraps' will run `functools.partial(update_wrapper,", "lists to make a (len1+1) * (len2+1) 2D array (ie a table). #", "('dense', 5) }) # Multiple subseqs with same length # In this case,", "index, # ie the elem at the lower right corner of `table`, we", "arguments, the cached value is directly returned rather than reevaluated.\"\"\" # Initialize cache", "# Consider all subclasses of generic type `Sequence` if isinstance(seq1, list): empty =", "from the left of seq1). tests.append({ 'input': { 'seq1': 'abcdef', 'seq2': 'badcfe' },", "isinstance(seq1, str): add_elem = seq1[0] elif isinstance(seq1, tuple): # A one-elem tuple can", "'seq1': 'abcdef', 'seq2': 'badcfe' }, 'output': ('ace', 3) }) # No common subseq", "- 1][j - 1] + 1 else: table[i][j] = max(table[i - 1][j], table[i][j", "lcs_recursive(seq1[1:], seq2)[1]) ) def lcs_dynamic(seq1: Sequence, seq2: Sequence) -> int: \"\"\"Find the longest", "recursion is faster than plain- # vanilla dynamic programming jovian.evaluate_test_cases_justyre(func=lcs_recursive, tests=tests) jovian.evaluate_test_cases_justyre(func=lcs_dynamic, tests=tests)", "# Multiple subseqs with same length # In this case, return the first", "is a subseq of the other tests.append({ 'input': { 'seq1': 'dense', 'seq2': 'condensed'", "See LICENSE in the project root for license information. \"\"\"Longest common subsequence. The", "3) }) # No common subseq tests.append({ 'input': { 'seq1': 'a', 'seq2': 'bb'", "seq1). tests.append({ 'input': { 'seq1': 'abcdef', 'seq2': 'badcfe' }, 'output': ('ace', 3) })", "= add_elem + lcs i -= 1 j -= 1 elif table[i-1][j] <", "One is empty tests.append({ 'input': { 'seq1': '', 'seq2': 'stone' }, 'output': ('',", "isinstance(seq1, list): add_elem = [seq1[i-1]] elif isinstance(seq1, str): add_elem = seq1[i-1] elif isinstance(seq1,", "case, return the first common subseq (the first from the left of seq1).", "and seq2[0:j] table = [[0] * (len2 + 1) for _ in range(len1", "left of seq2, instead of the left of seq1. j -= 1 else:", "from typing import Sequence, Tuple from tests import jovian import functools ########################################## ###", "tests.append({ 'input': { 'seq1': (1, 3, 5, 6, 7, 2, 5, 2, 3),", "import Sequence, Tuple from tests import jovian import functools ########################################## ### Test cases", "3), 'seq2': (6, 2, 4, 7, 1, 5, 6, 2, 3) }, 'output':", "}, 'output': ('dense', 5) }) # Multiple subseqs with same length # In", "of two sequences recursively. Note ---- If there are multiple subseqs with same", "6, 2, 3], 5) }) # Tuple tests.append({ 'input': { 'seq1': (1, 3,", "the # left of seq2, instead of the left of seq1. j -=", "the feasible lcs. i, j = len1, len2 while i > 0 and", "'seq2': (6, 2, 4, 7, 1, 5, 6, 2, 3) }, 'output': ((1,", "= obj(*args, **kwargs) return cache[key] return memoizer # The decorator 'memoize' will go", "{ 'seq1': (1, 3, 5, 6, 7, 2, 5, 2, 3), 'seq2': (6,", "are not present in cache's keys, add them cache[key] = obj(*args, **kwargs) return", "seq-type return empty, 0 if seq1[0] == seq2[0]: if isinstance(seq1, list): add_elem =", "other tests.append({ 'input': { 'seq1': 'dense', 'seq2': 'condensed' }, 'output': ('dense', 5) })", "the resulting lcs will be the 1st common subseq from the # left", "if seq1[i - 1] == seq2[j - 1]: table[i][j] = table[i - 1][j", "return the empty seq-type return empty, 0 len1, len2 = len(seq1), len(seq2) #", "we swap this elif with # the next `else`, the resulting lcs will", "List tests.append({ 'input': { 'seq1': [1, 3, 5, 6, 7, 2, 5, 2,", "return empty, 0 if seq1[0] == seq2[0]: if isinstance(seq1, list): add_elem = [seq1[0]]", "str): add_elem = seq1[i-1] elif isinstance(seq1, tuple): # A one-elem tuple can only", "add_elem = (seq1[i-1],) lcs = add_elem + lcs i -= 1 j -=", "<` first is important; if we swap this elif with # the next", "{ 'seq1': 'serendipitous', 'seq2': 'precipitation' }, 'output': ('reipito', 7) }) # One is", "> 0: if seq1[i-1] == seq2[j-1]: if isinstance(seq1, list): add_elem = [seq1[i-1]] elif", "seq1[i - 1] == seq2[j - 1]: table[i][j] = table[i - 1][j -", "of seq1). tests.append({ 'input': { 'seq1': 'abcdef', 'seq2': 'badcfe' }, 'output': ('ace', 3)", "tests.append({ 'input': { 'seq1': 'a', 'seq2': 'bb' }, 'output': ('', 0) }) #", "feasible lcs. i, j = len1, len2 while i > 0 and j", "isinstance(seq1, str): add_elem = seq1[i-1] elif isinstance(seq1, tuple): # A one-elem tuple can", "left of seq1). tests.append({ 'input': { 'seq1': 'abcdef', 'seq2': 'badcfe' }, 'output': ('ace',", "Without memoization, the orig func runs too slow (impossible when len(seq) > 7)", "subclasses of generic type `Sequence` if isinstance(seq1, list): empty = [] elif isinstance(seq1,", "the orig func runs too slow (impossible when len(seq) > 7) @memoize def", "the empty seq-type return empty, 0 if seq1[0] == seq2[0]: if isinstance(seq1, list):", "subseq tests.append({ 'input': { 'seq1': 'a', 'seq2': 'bb' }, 'output': ('', 0) })", "not supported; try list, str, tuple.\") if not seq1 or not seq2: #", "= obj.cache = {} # The decorator 'wraps' will run `functools.partial(update_wrapper, wrapped=obj)`, #", "# We start from range(1,) since seq[0:0] is empty, so its lcs w/any", "2, 4, 7, 1, 5, 6, 2, 3] }, 'output': ([1, 5, 6,", "'input': { 'seq1': 'serendipitous', 'seq2': 'precipitation' }, 'output': ('reipito', 7) }) # One", "for license information. \"\"\"Longest common subsequence. The subsequence does not need to be", "(1, 3, 5, 6, 7, 2, 5, 2, 3), 'seq2': (6, 2, 4,", "its length) of two sequences recursively. Note ---- If there are multiple subseqs", "table[i][j] = table[i - 1][j - 1] + 1 else: table[i][j] = max(table[i", "the next two tests, we can see that memoized recursion is faster than", "find the lcs ITSELF lcs = empty # Note: The vital idea here", "from the left of `seq1`. \"\"\" # Time complexity: O(len1 * len2). Space", "the first common subseq from the left of `seq1`. \"\"\" # Time complexity:", "the orig `obj` @functools.wraps(obj) def memoizer(*args, **kwargs): key = str(args) + str(kwargs) if", "TypeError(\"This type of sequence is not supported; try list, str, tuple.\") if not", "seqs is empty, then return the empty seq-type return empty, 0 len1, len2", "+ 1)] for i in range(1, len1 + 1): for j in range(1,", "of the two predecessors and go in that direction (ie in search of", "is directly returned rather than reevaluated.\"\"\" # Initialize cache and obj.cache as an", "5, 6, 7, 2, 5, 2, 3], 'seq2': [6, 2, 4, 7, 1,", "and its length) of two sequences recursively. Note ---- If there are multiple", "length) of two sequences recursively. Note ---- If there are multiple subseqs with", "same length, return the first common subseq from the left of `seq1`. \"\"\"", "memoizer(*args, **kwargs): key = str(args) + str(kwargs) if key not in cache: #", "list): add_elem = [seq1[0]] elif isinstance(seq1, str): add_elem = seq1[0] elif isinstance(seq1, tuple):", "BACKWARDS (ie going up and right `table`) to find the feasible lcs. i,", "not in cache: # When args are not present in cache's keys, add", "idea here is, now that we know the length of lcs to be", "2: find the lcs ITSELF lcs = empty # Note: The vital idea", "license information. \"\"\"Longest common subsequence. The subsequence does not need to be continuous", "to # memoizer. memoizer() is returned as the replacement for the orig `obj`", "called. If called later with the same arguments, the cached value is directly", "seq2[1:])[1], lcs_recursive(seq1[1:], seq2)[1]) ) def lcs_dynamic(seq1: Sequence, seq2: Sequence) -> int: \"\"\"Find the", "lcs_length ########################################## ### Test client jovian.evaluate_test_cases(func=lcs_recursive, test_cases=tests) # From the next two tests,", "length, return the first common subseq from the left of `seq1`. \"\"\" #", "but not (3) add_elem = (seq1[i-1],) lcs = add_elem + lcs i -=", "while i > 0 and j > 0: if seq1[i-1] == seq2[j-1]: if", "-= 1 return lcs, lcs_length ########################################## ### Test client jovian.evaluate_test_cases(func=lcs_recursive, test_cases=tests) # From", "lcs_recursive(seq1[1:], seq2[1:])[1] ) else: # max(s1, s2, key=len) means to get from s1,", "'seq2': 'precipitation' }, 'output': ('reipito', 7) }) # One is a subseq of", "}, 'output': ([1, 5, 6, 2, 3], 5) }) # Tuple tests.append({ 'input':", "elem at the lower right corner of `table`, we should travel from it", "6, 2, 3] }, 'output': ([1, 5, 6, 2, 3], 5) }) #", "a subseq of the other tests.append({ 'input': { 'seq1': 'dense', 'seq2': 'condensed' },", "-> int: \"\"\"Find the longest common subsequence (both itself and its length) of", "to find the feasible lcs. i, j = len1, len2 while i >", "1][j - 1] + 1 else: table[i][j] = max(table[i - 1][j], table[i][j -", "1) for _ in range(len1 + 1)] for i in range(1, len1 +", "- 1] + 1 else: table[i][j] = max(table[i - 1][j], table[i][j - 1])", "table[i][j - 1]) # The next two lines are equivalent; use either #", "(seq1[i-1],) lcs = add_elem + lcs i -= 1 j -= 1 elif", "subseq of the other tests.append({ 'input': { 'seq1': 'dense', 'seq2': 'condensed' }, 'output':", "left of `seq1`. \"\"\" # Time complexity: O(2 ^ (len(seq1) + len(seq2))) if", "memoization, the orig func runs too slow (impossible when len(seq) > 7) @memoize", "# Note: The vital idea here is, now that we know the length", "is the orig func, # while wrapper is the func to be updated.)", "(len1+1) * (len2+1) 2D array (ie a table). # table[i][j] is the lcs", "if seq1[i-1] == seq2[j-1]: if isinstance(seq1, list): add_elem = [seq1[i-1]] elif isinstance(seq1, str):", "are not the same, then find the larger # of the two predecessors", "if isinstance(seq1, list): add_elem = [seq1[0]] elif isinstance(seq1, str): add_elem = seq1[0] elif", "isinstance(seq1, list): empty = [] elif isinstance(seq1, str): empty = '' elif isinstance(seq1,", "in that direction (ie in search of lcs). # Note: Putting this `elif", "subsequence (both itself and its length) of two sequences recursively. Note ---- If", "at the lower right corner of `table`, we should travel from it #", "of seq1. j -= 1 else: i -= 1 return lcs, lcs_length ##########################################", "}, 'output': ('reipito', 7) }) # One is a subseq of the other", "cache: # When args are not present in cache's keys, add them cache[key]", "by dynamic programming. Note ---- If there are multiple subseqs with same length,", "the lcs length of seq1[0:i] and seq2[0:j] table = [[0] * (len2 +", "is returned as the replacement for the orig `obj` @functools.wraps(obj) def memoizer(*args, **kwargs):", "\"\"\" # Time complexity: O(len1 * len2). Space complexity: O(len1 * len2). #", "copied to # memoizer. memoizer() is returned as the replacement for the orig", "only be shown as (3,) but not (3) add_elem = (seq1[0],) return (", "each time it is called. If called later with the same arguments, the", "get from s1, s2 the one with bigger len() return ( max(lcs_recursive(seq1, seq2[1:])[0],", "return memoizer # The decorator 'memoize' will go and execute function `memoize(lcs)`, return", "be index, # ie the elem at the lower right corner of `table`,", "while wrapper is the func to be updated.) So obj's attributes will be", "that memoized recursion is faster than plain- # vanilla dynamic programming jovian.evaluate_test_cases_justyre(func=lcs_recursive, tests=tests)", "in range(1, len1 + 1): for j in range(1, len2 + 1): #", "range(1, len2 + 1): # We start from range(1,) since seq[0:0] is empty,", "j = len1, len2 while i > 0 and j > 0: if", "and its length) of two sequences by dynamic programming. Note ---- If there", "0 if seq1[0] == seq2[0]: if isinstance(seq1, list): add_elem = [seq1[0]] elif isinstance(seq1,", "1 return lcs, lcs_length ########################################## ### Test client jovian.evaluate_test_cases(func=lcs_recursive, test_cases=tests) # From the", ") def lcs_dynamic(seq1: Sequence, seq2: Sequence) -> int: \"\"\"Find the longest common subsequence", "j -= 1 elif table[i-1][j] < table[i][j-1]: # If the current elem of", "(ie in search of lcs). # Note: Putting this `elif <` first is", "`table`, we should travel from it # BACKWARDS (ie going up and right", "Note: Putting this `elif <` first is important; if we swap this elif", "one-elem tuple can only be shown as (3,) but not (3) add_elem =", "a table). # table[i][j] is the lcs length of seq1[0:i] and seq2[0:j] table", "of sequence is not supported; try list, str, tuple.\") if not seq1 or", "we should travel from it # BACKWARDS (ie going up and right `table`)", "i in range(1, len1 + 1): for j in range(1, len2 + 1):", "}, 'output': ('', 0) }) # One is empty tests.append({ 'input': { 'seq1':", "key=len) means to get from s1, s2 the one with bigger len() return", "# Licensed under MIT License. # See LICENSE in the project root for", "{} # The decorator 'wraps' will run `functools.partial(update_wrapper, wrapped=obj)`, # ie `update_wrapper(wrapper=memoizer, wrapped=obj)`.", "corner of `table`, we should travel from it # BACKWARDS (ie going up", "1] == seq2[j - 1]: table[i][j] = table[i - 1][j - 1] +", "7, 2, 5, 2, 3), 'seq2': (6, 2, 4, 7, 1, 5, 6,", "( max(lcs_recursive(seq1, seq2[1:])[0], lcs_recursive(seq1[1:], seq2)[0], key=len), max(lcs_recursive(seq1, seq2[1:])[1], lcs_recursive(seq1[1:], seq2)[1]) ) def lcs_dynamic(seq1:", "not present in cache's keys, add them cache[key] = obj(*args, **kwargs) return cache[key]", "the longest common subsequence (both itself and its length) of two sequences recursively.", "3) }, 'output': ((1, 5, 6, 2, 3), 5) }) # String tests.append({", "table[-1][-1] # Step 2: find the lcs ITSELF lcs = empty # Note:", "equivalent; use either # lcs_length = table[len1][len2] lcs_length = table[-1][-1] # Step 2:", "The subsequence does not need to be continuous in the original sequence.\"\"\" from", "table[i][j] is the lcs length of seq1[0:i] and seq2[0:j] table = [[0] *", "cases tests = [] # List tests.append({ 'input': { 'seq1': [1, 3, 5,", "of the same type.\") # Consider all subclasses of generic type `Sequence` if", "- 1][j], table[i][j - 1]) # The next two lines are equivalent; use", "`Sequence` if isinstance(seq1, list): empty = [] elif isinstance(seq1, str): empty = ''", "seq2[j-1]: if isinstance(seq1, list): add_elem = [seq1[i-1]] elif isinstance(seq1, str): add_elem = seq1[i-1]", "lcs ITSELF lcs = empty # Note: The vital idea here is, now", "the project root for license information. \"\"\"Longest common subsequence. The subsequence does not", "cache's keys, add them cache[key] = obj(*args, **kwargs) return cache[key] return memoizer #", "nested lists to make a (len1+1) * (len2+1) 2D array (ie a table).", "5, 2, 3), 'seq2': (6, 2, 4, 7, 1, 5, 6, 2, 3)", "length if type(seq1) != type(seq2): raise TypeError(\"Both input sequences should be of the", "subsequence does not need to be continuous in the original sequence.\"\"\" from typing", "for i in range(1, len1 + 1): for j in range(1, len2 +", "right `table`) to find the feasible lcs. i, j = len1, len2 while", "for the orig `obj` @functools.wraps(obj) def memoizer(*args, **kwargs): key = str(args) + str(kwargs)", "shown as (3,) but not (3) add_elem = (seq1[i-1],) lcs = add_elem +", "'dense', 'seq2': 'condensed' }, 'output': ('dense', 5) }) # Multiple subseqs with same", "length # In this case, return the first common subseq (the first from", "2, 3], 5) }) # Tuple tests.append({ 'input': { 'seq1': (1, 3, 5,", "add_elem + lcs_recursive(seq1[1:], seq2[1:])[0], 1 + lcs_recursive(seq1[1:], seq2[1:])[1] ) else: # max(s1, s2,", "Tuple tests.append({ 'input': { 'seq1': (1, 3, 5, 6, 7, 2, 5, 2,", "len(seq1), len(seq2) # Use nested lists to make a (len1+1) * (len2+1) 2D", "the larger # of the two predecessors and go in that direction (ie", "return the first common subseq from the left of `seq1`. \"\"\" # Time", "isinstance(seq1, str): empty = '' elif isinstance(seq1, tuple): empty = () else: raise", "jovian.evaluate_test_cases(func=lcs_recursive, test_cases=tests) # From the next two tests, we can see that memoized", "there are multiple subseqs with same length, return the first common subseq from", "common subseq from the left of `seq1`. \"\"\" # Time complexity: O(len1 *", "If the current elem of seq1 & seq2 are not the same, then", "add_elem = [seq1[0]] elif isinstance(seq1, str): add_elem = seq1[0] elif isinstance(seq1, tuple): #", "# lcs_length = table[len1][len2] lcs_length = table[-1][-1] # Step 2: find the lcs", "the lower right corner of `table`, we should travel from it # BACKWARDS", "wrapped=obj)`. (wrapped is the orig func, # while wrapper is the func to", "going up and right `table`) to find the feasible lcs. i, j =", "= [seq1[i-1]] elif isinstance(seq1, str): add_elem = seq1[i-1] elif isinstance(seq1, tuple): # A", "Test cases tests = [] # List tests.append({ 'input': { 'seq1': [1, 3,", "two sequences recursively. Note ---- If there are multiple subseqs with same length,", "In this case, return the first common subseq (the first from the left", "(impossible when len(seq) > 7) @memoize def lcs_recursive(seq1: Sequence, seq2: Sequence) -> Tuple[Sequence,", "return lcs, lcs_length ########################################## ### Test client jovian.evaluate_test_cases(func=lcs_recursive, test_cases=tests) # From the next", "+ 1): for j in range(1, len2 + 1): # We start from", "> 7) @memoize def lcs_recursive(seq1: Sequence, seq2: Sequence) -> Tuple[Sequence, int]: \"\"\"Find the", "+ str(kwargs) if key not in cache: # When args are not present", "decorator 'memoize' will go and execute function `memoize(lcs)`, return memoizer. # Without memoization,", "memoize(obj): \"\"\"Cache a function's return value each time it is called. If called", "'', 'seq2': 'stone' }, 'output': ('', 0) }) ########################################## ### Methods def memoize(obj):", "first common subseq from the left of `seq1`. \"\"\" # Time complexity: O(2", "the seqs is empty, then return the empty seq-type return empty, 0 if", "}) # Multiple subseqs with same length # In this case, return the", "# The decorator 'wraps' will run `functools.partial(update_wrapper, wrapped=obj)`, # ie `update_wrapper(wrapper=memoizer, wrapped=obj)`. (wrapped", "(ie going up and right `table`) to find the feasible lcs. i, j", "\"\"\"Cache a function's return value each time it is called. If called later", "is 0 if seq1[i - 1] == seq2[j - 1]: table[i][j] = table[i", "seq2[0]: if isinstance(seq1, list): add_elem = [seq1[0]] elif isinstance(seq1, str): add_elem = seq1[0]", "to be continuous in the original sequence.\"\"\" from typing import Sequence, Tuple from", "func, # while wrapper is the func to be updated.) So obj's attributes", "replacement for the orig `obj` @functools.wraps(obj) def memoizer(*args, **kwargs): key = str(args) +", "the same, then find the larger # of the two predecessors and go", "'output': ('reipito', 7) }) # One is a subseq of the other tests.append({", "w/any seq is 0 if seq1[i - 1] == seq2[j - 1]: table[i][j]", "}) # No common subseq tests.append({ 'input': { 'seq1': 'a', 'seq2': 'bb' },", "'input': { 'seq1': 'abcdef', 'seq2': 'badcfe' }, 'output': ('ace', 3) }) # No", "programming. Note ---- If there are multiple subseqs with same length, return the", "sequences recursively. Note ---- If there are multiple subseqs with same length, return", "Sequence) -> int: \"\"\"Find the longest common subsequence (both itself and its length)", "two lines are equivalent; use either # lcs_length = table[len1][len2] lcs_length = table[-1][-1]", "len(seq) > 7) @memoize def lcs_recursive(seq1: Sequence, seq2: Sequence) -> Tuple[Sequence, int]: \"\"\"Find", "= '' elif isinstance(seq1, tuple): empty = () else: raise TypeError(\"This type of", "- 1]) # The next two lines are equivalent; use either # lcs_length", "`functools.partial(update_wrapper, wrapped=obj)`, # ie `update_wrapper(wrapper=memoizer, wrapped=obj)`. (wrapped is the orig func, # while", "of seq1 & seq2 are not the same, then find the larger #", "is empty tests.append({ 'input': { 'seq1': '', 'seq2': 'stone' }, 'output': ('', 0)", "be of the same type.\") # Consider all subclasses of generic type `Sequence`", "2, 4, 7, 1, 5, 6, 2, 3) }, 'output': ((1, 5, 6,", "seq1[0] elif isinstance(seq1, tuple): # A one-elem tuple can only be shown as", "{ 'seq1': 'abcdef', 'seq2': 'badcfe' }, 'output': ('ace', 3) }) # No common", "'seq1': [1, 3, 5, 6, 7, 2, 5, 2, 3], 'seq2': [6, 2,", "1, 5, 6, 2, 3] }, 'output': ([1, 5, 6, 2, 3], 5)", "the lcs ITSELF lcs = empty # Note: The vital idea here is,", "then find the larger # of the two predecessors and go in that", "reevaluated.\"\"\" # Initialize cache and obj.cache as an empty dict cache = obj.cache", "in cache: # When args are not present in cache's keys, add them", "is, now that we know the length of lcs to be index, #", "common subsequence. The subsequence does not need to be continuous in the original", "seq2[1:])[0], 1 + lcs_recursive(seq1[1:], seq2[1:])[1] ) else: # max(s1, s2, key=len) means to", "{ 'seq1': 'a', 'seq2': 'bb' }, 'output': ('', 0) }) # One is", "len1, len2 = len(seq1), len(seq2) # Use nested lists to make a (len1+1)", "2, 3), 'seq2': (6, 2, 4, 7, 1, 5, 6, 2, 3) },", "seq1[0] == seq2[0]: if isinstance(seq1, list): add_elem = [seq1[0]] elif isinstance(seq1, str): add_elem", "project root for license information. \"\"\"Longest common subsequence. The subsequence does not need", "add_elem + lcs i -= 1 j -= 1 elif table[i-1][j] < table[i][j-1]:", "if not seq1 or not seq2: # If any one of the seqs", "No common subseq tests.append({ 'input': { 'seq1': 'a', 'seq2': 'bb' }, 'output': ('',", "cached value is directly returned rather than reevaluated.\"\"\" # Initialize cache and obj.cache", "memoizer. # Without memoization, the orig func runs too slow (impossible when len(seq)", "'input': { 'seq1': [1, 3, 5, 6, 7, 2, 5, 2, 3], 'seq2':", "of seq1[0:i] and seq2[0:j] table = [[0] * (len2 + 1) for _", "elif isinstance(seq1, str): add_elem = seq1[i-1] elif isinstance(seq1, tuple): # A one-elem tuple", "# left of seq2, instead of the left of seq1. j -= 1", "isinstance(seq1, tuple): # A one-elem tuple can only be shown as (3,) but", ") else: # max(s1, s2, key=len) means to get from s1, s2 the", "len2). Space complexity: O(len1 * len2). # Step 1: find the lcs's length", "1]) # The next two lines are equivalent; use either # lcs_length =", "add_elem = (seq1[0],) return ( add_elem + lcs_recursive(seq1[1:], seq2[1:])[0], 1 + lcs_recursive(seq1[1:], seq2[1:])[1]", "root for license information. \"\"\"Longest common subsequence. The subsequence does not need to", "empty, 0 len1, len2 = len(seq1), len(seq2) # Use nested lists to make", "The decorator 'memoize' will go and execute function `memoize(lcs)`, return memoizer. # Without", "supported; try list, str, tuple.\") if not seq1 or not seq2: # If", "1 elif table[i-1][j] < table[i][j-1]: # If the current elem of seq1 &", "orig `obj` @functools.wraps(obj) def memoizer(*args, **kwargs): key = str(args) + str(kwargs) if key", "will go and execute function `memoize(lcs)`, return memoizer. # Without memoization, the orig", "then return the empty seq-type return empty, 0 if seq1[0] == seq2[0]: if", "\"\"\"Longest common subsequence. The subsequence does not need to be continuous in the", "seq2: Sequence) -> int: \"\"\"Find the longest common subsequence (both itself and its", "empty = () else: raise TypeError(\"This type of sequence is not supported; try", "-= 1 j -= 1 elif table[i-1][j] < table[i][j-1]: # If the current", "be the 1st common subseq from the # left of seq2, instead of", "6, 2, 3), 5) }) # String tests.append({ 'input': { 'seq1': 'serendipitous', 'seq2':", "type(seq1) != type(seq2): raise TypeError(\"Both input sequences should be of the same type.\")", "itself and its length) of two sequences by dynamic programming. Note ---- If", "'output': ('', 0) }) ########################################## ### Methods def memoize(obj): \"\"\"Cache a function's return", "run `functools.partial(update_wrapper, wrapped=obj)`, # ie `update_wrapper(wrapper=memoizer, wrapped=obj)`. (wrapped is the orig func, #", "of generic type `Sequence` if isinstance(seq1, list): empty = [] elif isinstance(seq1, str):", "[[0] * (len2 + 1) for _ in range(len1 + 1)] for i", "same, then find the larger # of the two predecessors and go in", "from the # left of seq2, instead of the left of seq1. j", "+ 1) for _ in range(len1 + 1)] for i in range(1, len1", "# See LICENSE in the project root for license information. \"\"\"Longest common subsequence.", "key not in cache: # When args are not present in cache's keys,", "this elif with # the next `else`, the resulting lcs will be the", "empty dict cache = obj.cache = {} # The decorator 'wraps' will run", "not seq1 or not seq2: # If any one of the seqs is", "two sequences by dynamic programming. Note ---- If there are multiple subseqs with", "If called later with the same arguments, the cached value is directly returned", "Time complexity: O(len1 * len2). Space complexity: O(len1 * len2). # Step 1:", "if type(seq1) != type(seq2): raise TypeError(\"Both input sequences should be of the same", "for j in range(1, len2 + 1): # We start from range(1,) since", "`table`) to find the feasible lcs. i, j = len1, len2 while i", "2D array (ie a table). # table[i][j] is the lcs length of seq1[0:i]", "seq1 & seq2 are not the same, then find the larger # of", "as the replacement for the orig `obj` @functools.wraps(obj) def memoizer(*args, **kwargs): key =", "1][j], table[i][j - 1]) # The next two lines are equivalent; use either", "lower right corner of `table`, we should travel from it # BACKWARDS (ie", "next two tests, we can see that memoized recursion is faster than plain-", "an empty dict cache = obj.cache = {} # The decorator 'wraps' will", "sequence is not supported; try list, str, tuple.\") if not seq1 or not", "ie `update_wrapper(wrapper=memoizer, wrapped=obj)`. (wrapped is the orig func, # while wrapper is the", "lcs_recursive(seq1[1:], seq2)[0], key=len), max(lcs_recursive(seq1, seq2[1:])[1], lcs_recursive(seq1[1:], seq2)[1]) ) def lcs_dynamic(seq1: Sequence, seq2: Sequence)", "3, 5, 6, 7, 2, 5, 2, 3), 'seq2': (6, 2, 4, 7,", "'a', 'seq2': 'bb' }, 'output': ('', 0) }) # One is empty tests.append({", "with same length, return the first common subseq from the left of `seq1`.", "when len(seq) > 7) @memoize def lcs_recursive(seq1: Sequence, seq2: Sequence) -> Tuple[Sequence, int]:", "obj.cache = {} # The decorator 'wraps' will run `functools.partial(update_wrapper, wrapped=obj)`, # ie", "empty, then return the empty seq-type return empty, 0 if seq1[0] == seq2[0]:", "lcs_length = table[-1][-1] # Step 2: find the lcs ITSELF lcs = empty", "up and right `table`) to find the feasible lcs. i, j = len1,", "the other tests.append({ 'input': { 'seq1': 'dense', 'seq2': 'condensed' }, 'output': ('dense', 5)", "return ( add_elem + lcs_recursive(seq1[1:], seq2[1:])[0], 1 + lcs_recursive(seq1[1:], seq2[1:])[1] ) else: #", "0 if seq1[i - 1] == seq2[j - 1]: table[i][j] = table[i -", "LICENSE in the project root for license information. \"\"\"Longest common subsequence. The subsequence", "2, 3) }, 'output': ((1, 5, 6, 2, 3), 5) }) # String", "the original sequence.\"\"\" from typing import Sequence, Tuple from tests import jovian import", "7, 1, 5, 6, 2, 3] }, 'output': ([1, 5, 6, 2, 3],", "be shown as (3,) but not (3) add_elem = (seq1[i-1],) lcs = add_elem", "== seq2[j - 1]: table[i][j] = table[i - 1][j - 1] + 1", "empty = [] elif isinstance(seq1, str): empty = '' elif isinstance(seq1, tuple): empty", "7) @memoize def lcs_recursive(seq1: Sequence, seq2: Sequence) -> Tuple[Sequence, int]: \"\"\"Find the longest", "@memoize def lcs_recursive(seq1: Sequence, seq2: Sequence) -> Tuple[Sequence, int]: \"\"\"Find the longest common", "'output': ([1, 5, 6, 2, 3], 5) }) # Tuple tests.append({ 'input': {", "the empty seq-type return empty, 0 len1, len2 = len(seq1), len(seq2) # Use", "def lcs_dynamic(seq1: Sequence, seq2: Sequence) -> int: \"\"\"Find the longest common subsequence (both", "larger # of the two predecessors and go in that direction (ie in", "of the other tests.append({ 'input': { 'seq1': 'dense', 'seq2': 'condensed' }, 'output': ('dense',", "return value each time it is called. If called later with the same", "orig func, # while wrapper is the func to be updated.) So obj's", "either # lcs_length = table[len1][len2] lcs_length = table[-1][-1] # Step 2: find the", "the first common subseq (the first from the left of seq1). tests.append({ 'input':", "return memoizer. # Without memoization, the orig func runs too slow (impossible when", "i, j = len1, len2 while i > 0 and j > 0:", "Putting this `elif <` first is important; if we swap this elif with", "only be shown as (3,) but not (3) add_elem = (seq1[i-1],) lcs =", "seq1. j -= 1 else: i -= 1 return lcs, lcs_length ########################################## ###", "input sequences should be of the same type.\") # Consider all subclasses of", "# Initialize cache and obj.cache as an empty dict cache = obj.cache =", "empty = '' elif isinstance(seq1, tuple): empty = () else: raise TypeError(\"This type", "one of the seqs is empty, then return the empty seq-type return empty,", "left of seq1. j -= 1 else: i -= 1 return lcs, lcs_length", "so its lcs w/any seq is 0 if seq1[i - 1] == seq2[j", "# Note: Putting this `elif <` first is important; if we swap this", "of two sequences by dynamic programming. Note ---- If there are multiple subseqs", "start from range(1,) since seq[0:0] is empty, so its lcs w/any seq is", "table[i][j] = max(table[i - 1][j], table[i][j - 1]) # The next two lines", "from it # BACKWARDS (ie going up and right `table`) to find the", "else: raise TypeError(\"This type of sequence is not supported; try list, str, tuple.\")", "# In this case, return the first common subseq (the first from the", "Space complexity: O(len1 * len2). # Step 1: find the lcs's length if", "generic type `Sequence` if isinstance(seq1, list): empty = [] elif isinstance(seq1, str): empty", "its lcs w/any seq is 0 if seq1[i - 1] == seq2[j -", "(len(seq1) + len(seq2))) if type(seq1) != type(seq2): raise TypeError(\"Both input sequences should be", "int: \"\"\"Find the longest common subsequence (both itself and its length) of two", "seq2[0:j] table = [[0] * (len2 + 1) for _ in range(len1 +", "1 j -= 1 elif table[i-1][j] < table[i][j-1]: # If the current elem", "# List tests.append({ 'input': { 'seq1': [1, 3, 5, 6, 7, 2, 5,", "7, 1, 5, 6, 2, 3) }, 'output': ((1, 5, 6, 2, 3),", "}) ########################################## ### Methods def memoize(obj): \"\"\"Cache a function's return value each time", "Sequence, Tuple from tests import jovian import functools ########################################## ### Test cases tests", "ie the elem at the lower right corner of `table`, we should travel", "add them cache[key] = obj(*args, **kwargs) return cache[key] return memoizer # The decorator", "key=len), max(lcs_recursive(seq1, seq2[1:])[1], lcs_recursive(seq1[1:], seq2)[1]) ) def lcs_dynamic(seq1: Sequence, seq2: Sequence) -> int:", "same arguments, the cached value is directly returned rather than reevaluated.\"\"\" # Initialize", "try list, str, tuple.\") if not seq1 or not seq2: # If any", "!= type(seq2): raise TypeError(\"Both input sequences should be of the same type.\") #", "-> Tuple[Sequence, int]: \"\"\"Find the longest common subsequence (both itself and its length)", "a function's return value each time it is called. If called later with", "that direction (ie in search of lcs). # Note: Putting this `elif <`", "lcs_recursive(seq1: Sequence, seq2: Sequence) -> Tuple[Sequence, int]: \"\"\"Find the longest common subsequence (both", "(seq1[0],) return ( add_elem + lcs_recursive(seq1[1:], seq2[1:])[0], 1 + lcs_recursive(seq1[1:], seq2[1:])[1] ) else:", "list, str, tuple.\") if not seq1 or not seq2: # If any one", "information. \"\"\"Longest common subsequence. The subsequence does not need to be continuous in", "direction (ie in search of lcs). # Note: Putting this `elif <` first", "the current elem of seq1 & seq2 are not the same, then find", "search of lcs). # Note: Putting this `elif <` first is important; if", "# When args are not present in cache's keys, add them cache[key] =", "'seq2': [6, 2, 4, 7, 1, 5, 6, 2, 3] }, 'output': ([1,", "Multiple subseqs with same length # In this case, return the first common", "= [] # List tests.append({ 'input': { 'seq1': [1, 3, 5, 6, 7,", "def lcs_recursive(seq1: Sequence, seq2: Sequence) -> Tuple[Sequence, int]: \"\"\"Find the longest common subsequence", "3] }, 'output': ([1, 5, 6, 2, 3], 5) }) # Tuple tests.append({", "later with the same arguments, the cached value is directly returned rather than", "table = [[0] * (len2 + 1) for _ in range(len1 + 1)]", "+ lcs i -= 1 j -= 1 elif table[i-1][j] < table[i][j-1]: #", "seq2 are not the same, then find the larger # of the two", "seq2)[1]) ) def lcs_dynamic(seq1: Sequence, seq2: Sequence) -> int: \"\"\"Find the longest common", "memoizer. memoizer() is returned as the replacement for the orig `obj` @functools.wraps(obj) def", "directly returned rather than reevaluated.\"\"\" # Initialize cache and obj.cache as an empty", "any one of the seqs is empty, then return the empty seq-type return", "is empty, so its lcs w/any seq is 0 if seq1[i - 1]", "(len2+1) 2D array (ie a table). # table[i][j] is the lcs length of", "5, 6, 2, 3) }, 'output': ((1, 5, 6, 2, 3), 5) })", "# max(s1, s2, key=len) means to get from s1, s2 the one with", "be continuous in the original sequence.\"\"\" from typing import Sequence, Tuple from tests", "* (len2 + 1) for _ in range(len1 + 1)] for i in", "as (3,) but not (3) add_elem = (seq1[i-1],) lcs = add_elem + lcs", "`obj` @functools.wraps(obj) def memoizer(*args, **kwargs): key = str(args) + str(kwargs) if key not", "If there are multiple subseqs with same length, return the first common subseq", "}) # Tuple tests.append({ 'input': { 'seq1': (1, 3, 5, 6, 7, 2,", "if isinstance(seq1, list): empty = [] elif isinstance(seq1, str): empty = '' elif", "len() return ( max(lcs_recursive(seq1, seq2[1:])[0], lcs_recursive(seq1[1:], seq2)[0], key=len), max(lcs_recursive(seq1, seq2[1:])[1], lcs_recursive(seq1[1:], seq2)[1]) )", "are multiple subseqs with same length, return the first common subseq from the", "raise TypeError(\"This type of sequence is not supported; try list, str, tuple.\") if", "empty tests.append({ 'input': { 'seq1': '', 'seq2': 'stone' }, 'output': ('', 0) })", "########################################## ### Test cases tests = [] # List tests.append({ 'input': { 'seq1':", "return the empty seq-type return empty, 0 if seq1[0] == seq2[0]: if isinstance(seq1,", "decorator 'wraps' will run `functools.partial(update_wrapper, wrapped=obj)`, # ie `update_wrapper(wrapper=memoizer, wrapped=obj)`. (wrapped is the", "func runs too slow (impossible when len(seq) > 7) @memoize def lcs_recursive(seq1: Sequence,", "# Time complexity: O(len1 * len2). Space complexity: O(len1 * len2). # Step", "and go in that direction (ie in search of lcs). # Note: Putting", "len2 = len(seq1), len(seq2) # Use nested lists to make a (len1+1) *", "the seqs is empty, then return the empty seq-type return empty, 0 len1,", "wrapped=obj)`, # ie `update_wrapper(wrapper=memoizer, wrapped=obj)`. (wrapped is the orig func, # while wrapper", "tests.append({ 'input': { 'seq1': 'dense', 'seq2': 'condensed' }, 'output': ('dense', 5) }) #", "the one with bigger len() return ( max(lcs_recursive(seq1, seq2[1:])[0], lcs_recursive(seq1[1:], seq2)[0], key=len), max(lcs_recursive(seq1,", "One is a subseq of the other tests.append({ 'input': { 'seq1': 'dense', 'seq2':", "lcs to be index, # ie the elem at the lower right corner", "^ (len(seq1) + len(seq2))) if type(seq1) != type(seq2): raise TypeError(\"Both input sequences should", "def memoize(obj): \"\"\"Cache a function's return value each time it is called. If", "s2, key=len) means to get from s1, s2 the one with bigger len()", "len1, len2 while i > 0 and j > 0: if seq1[i-1] ==", "Step 1: find the lcs's length if type(seq1) != type(seq2): raise TypeError(\"Both input", "1]: table[i][j] = table[i - 1][j - 1] + 1 else: table[i][j] =", "### Test cases tests = [] # List tests.append({ 'input': { 'seq1': [1,", "5, 6, 7, 2, 5, 2, 3), 'seq2': (6, 2, 4, 7, 1,", "('reipito', 7) }) # One is a subseq of the other tests.append({ 'input':", "is not supported; try list, str, tuple.\") if not seq1 or not seq2:", "common subseq from the # left of seq2, instead of the left of", "functools ########################################## ### Test cases tests = [] # List tests.append({ 'input': {", "max(table[i - 1][j], table[i][j - 1]) # The next two lines are equivalent;", "this case, return the first common subseq (the first from the left of", "same type.\") # Consider all subclasses of generic type `Sequence` if isinstance(seq1, list):", "in the original sequence.\"\"\" from typing import Sequence, Tuple from tests import jovian", "# One is empty tests.append({ 'input': { 'seq1': '', 'seq2': 'stone' }, 'output':", "len(seq2) # Use nested lists to make a (len1+1) * (len2+1) 2D array", "}, 'output': ((1, 5, 6, 2, 3), 5) }) # String tests.append({ 'input':", "= max(table[i - 1][j], table[i][j - 1]) # The next two lines are", "@functools.wraps(obj) def memoizer(*args, **kwargs): key = str(args) + str(kwargs) if key not in", "multiple subseqs with same length, return the first common subseq from the left", "}, 'output': ('ace', 3) }) # No common subseq tests.append({ 'input': { 'seq1':", "can only be shown as (3,) but not (3) add_elem = (seq1[0],) return", "tests.append({ 'input': { 'seq1': [1, 3, 5, 6, 7, 2, 5, 2, 3],", "find the larger # of the two predecessors and go in that direction", "5, 6, 2, 3], 5) }) # Tuple tests.append({ 'input': { 'seq1': (1,", "sequences by dynamic programming. Note ---- If there are multiple subseqs with same", "From the next two tests, we can see that memoized recursion is faster", "7) }) # One is a subseq of the other tests.append({ 'input': {", "MIT License. # See LICENSE in the project root for license information. \"\"\"Longest", "Licensed under MIT License. # See LICENSE in the project root for license", "'serendipitous', 'seq2': 'precipitation' }, 'output': ('reipito', 7) }) # One is a subseq", "returned rather than reevaluated.\"\"\" # Initialize cache and obj.cache as an empty dict", "str(args) + str(kwargs) if key not in cache: # When args are not", "elif isinstance(seq1, tuple): # A one-elem tuple can only be shown as (3,)", "list): add_elem = [seq1[i-1]] elif isinstance(seq1, str): add_elem = seq1[i-1] elif isinstance(seq1, tuple):", "tests.append({ 'input': { 'seq1': 'serendipitous', 'seq2': 'precipitation' }, 'output': ('reipito', 7) }) #", "== seq2[j-1]: if isinstance(seq1, list): add_elem = [seq1[i-1]] elif isinstance(seq1, str): add_elem =", "elif table[i-1][j] < table[i][j-1]: # If the current elem of seq1 & seq2", "same length # In this case, return the first common subseq (the first", "one with bigger len() return ( max(lcs_recursive(seq1, seq2[1:])[0], lcs_recursive(seq1[1:], seq2)[0], key=len), max(lcs_recursive(seq1, seq2[1:])[1],", "of lcs). # Note: Putting this `elif <` first is important; if we", "the left of seq1). tests.append({ 'input': { 'seq1': 'abcdef', 'seq2': 'badcfe' }, 'output':", "seq1 or not seq2: # If any one of the seqs is empty,", "isinstance(seq1, list): add_elem = [seq1[0]] elif isinstance(seq1, str): add_elem = seq1[0] elif isinstance(seq1,", "`else`, the resulting lcs will be the 1st common subseq from the #", "resulting lcs will be the 1st common subseq from the # left of", "of `seq1`. \"\"\" # Time complexity: O(2 ^ (len(seq1) + len(seq2))) if type(seq1)", "the same type.\") # Consider all subclasses of generic type `Sequence` if isinstance(seq1,", "seq1[i-1] elif isinstance(seq1, tuple): # A one-elem tuple can only be shown as", "will be the 1st common subseq from the # left of seq2, instead", "else: # max(s1, s2, key=len) means to get from s1, s2 the one", "continuous in the original sequence.\"\"\" from typing import Sequence, Tuple from tests import", "the func to be updated.) So obj's attributes will be copied to #", "TypeError(\"Both input sequences should be of the same type.\") # Consider all subclasses", "# Step 1: find the lcs's length if type(seq1) != type(seq2): raise TypeError(\"Both", "'input': { 'seq1': 'a', 'seq2': 'bb' }, 'output': ('', 0) }) # One", "max(lcs_recursive(seq1, seq2[1:])[0], lcs_recursive(seq1[1:], seq2)[0], key=len), max(lcs_recursive(seq1, seq2[1:])[1], lcs_recursive(seq1[1:], seq2)[1]) ) def lcs_dynamic(seq1: Sequence,", "0 len1, len2 = len(seq1), len(seq2) # Use nested lists to make a", "The vital idea here is, now that we know the length of lcs", "lcs_dynamic(seq1: Sequence, seq2: Sequence) -> int: \"\"\"Find the longest common subsequence (both itself", "'bb' }, 'output': ('', 0) }) # One is empty tests.append({ 'input': {", "next two lines are equivalent; use either # lcs_length = table[len1][len2] lcs_length =", "the same arguments, the cached value is directly returned rather than reevaluated.\"\"\" #", "2, 5, 2, 3], 'seq2': [6, 2, 4, 7, 1, 5, 6, 2,", "it # BACKWARDS (ie going up and right `table`) to find the feasible", "= table[i - 1][j - 1] + 1 else: table[i][j] = max(table[i -", "that we know the length of lcs to be index, # ie the", "as (3,) but not (3) add_elem = (seq1[0],) return ( add_elem + lcs_recursive(seq1[1:],", "the left of `seq1`. \"\"\" # Time complexity: O(2 ^ (len(seq1) + len(seq2)))", "'stone' }, 'output': ('', 0) }) ########################################## ### Methods def memoize(obj): \"\"\"Cache a", "### Test client jovian.evaluate_test_cases(func=lcs_recursive, test_cases=tests) # From the next two tests, we can", "to get from s1, s2 the one with bigger len() return ( max(lcs_recursive(seq1,", "# If the current elem of seq1 & seq2 are not the same,", "Methods def memoize(obj): \"\"\"Cache a function's return value each time it is called.", "orig func runs too slow (impossible when len(seq) > 7) @memoize def lcs_recursive(seq1:", "cache and obj.cache as an empty dict cache = obj.cache = {} #", "be shown as (3,) but not (3) add_elem = (seq1[0],) return ( add_elem", "6, 7, 2, 5, 2, 3), 'seq2': (6, 2, 4, 7, 1, 5,", "2, 3), 5) }) # String tests.append({ 'input': { 'seq1': 'serendipitous', 'seq2': 'precipitation'", "}) # String tests.append({ 'input': { 'seq1': 'serendipitous', 'seq2': 'precipitation' }, 'output': ('reipito',", "# One is a subseq of the other tests.append({ 'input': { 'seq1': 'dense',", "Sequence) -> Tuple[Sequence, int]: \"\"\"Find the longest common subsequence (both itself and its", "**kwargs) return cache[key] return memoizer # The decorator 'memoize' will go and execute", "# Tuple tests.append({ 'input': { 'seq1': (1, 3, 5, 6, 7, 2, 5,", "\"\"\" # Time complexity: O(2 ^ (len(seq1) + len(seq2))) if type(seq1) != type(seq2):", "# If any one of the seqs is empty, then return the empty", "subseq (the first from the left of seq1). tests.append({ 'input': { 'seq1': 'abcdef',", "########################################## ### Methods def memoize(obj): \"\"\"Cache a function's return value each time it", "value is directly returned rather than reevaluated.\"\"\" # Initialize cache and obj.cache as", "and j > 0: if seq1[i-1] == seq2[j-1]: if isinstance(seq1, list): add_elem =", "import functools ########################################## ### Test cases tests = [] # List tests.append({ 'input':", "return cache[key] return memoizer # The decorator 'memoize' will go and execute function", "Tuple[Sequence, int]: \"\"\"Find the longest common subsequence (both itself and its length) of", "# table[i][j] is the lcs length of seq1[0:i] and seq2[0:j] table = [[0]", "lcs will be the 1st common subseq from the # left of seq2,", "0 and j > 0: if seq1[i-1] == seq2[j-1]: if isinstance(seq1, list): add_elem", "the next `else`, the resulting lcs will be the 1st common subseq from", "License. # See LICENSE in the project root for license information. \"\"\"Longest common", "`elif <` first is important; if we swap this elif with # the", "wrapper is the func to be updated.) So obj's attributes will be copied", "Use nested lists to make a (len1+1) * (len2+1) 2D array (ie a", "typing import Sequence, Tuple from tests import jovian import functools ########################################## ### Test", "+ len(seq2))) if type(seq1) != type(seq2): raise TypeError(\"Both input sequences should be of", "we know the length of lcs to be index, # ie the elem", "j -= 1 else: i -= 1 return lcs, lcs_length ########################################## ### Test", "s2 the one with bigger len() return ( max(lcs_recursive(seq1, seq2[1:])[0], lcs_recursive(seq1[1:], seq2)[0], key=len),", "range(len1 + 1)] for i in range(1, len1 + 1): for j in", "lcs). # Note: Putting this `elif <` first is important; if we swap", "# Time complexity: O(2 ^ (len(seq1) + len(seq2))) if type(seq1) != type(seq2): raise", "= empty # Note: The vital idea here is, now that we know", "'output': ('dense', 5) }) # Multiple subseqs with same length # In this", "The next two lines are equivalent; use either # lcs_length = table[len1][len2] lcs_length", "5) }) # Tuple tests.append({ 'input': { 'seq1': (1, 3, 5, 6, 7," ]
[ "self.hud.drawHUD() # Determine if a bullet should be spawned, and then spawns a", "Entity import ParticleSpawner, ParticleFactory, Bullet from HUD import HUD from pyglet.window import key", "AsteroidDebris( asteroid.pos.getCopy())) if asteroid.size > 1: # add two baby asteroids! self.entities.append( Asteroid(", "spawn DEBOUNCE = 1 class StateManager(object): def __init__(self): self.quit = False self._init_window() self._init_game()", "screen def spawn_player(self): self.player = Player(Vect2(x=self.window.width/2, y=self.window.height/2)) self.entities.append(self.player) # This function runs when", "math.pi, math.pi / 4, .01, ParticleFactory(speed=20, color=(255, 0, 0)), True) self.entities.append(self.exhaust) #Create a", "function determines if any objects are colliding in a meaningful way for the", "Asteroid Spawning asteroids = [e for e in self.entities if isinstance(e, Asteroid)] if", "y=self.window.height/2)) self.entities.append(self.player) # This function runs when the look is in game mode,", "the screen def spawn_player(self): self.player = Player(Vect2(x=self.window.width/2, y=self.window.height/2)) self.entities.append(self.player) # This function runs", "of the screen def spawn_player(self): self.player = Player(Vect2(x=self.window.width/2, y=self.window.height/2)) self.entities.append(self.player) # This function", "asteroid in asteroids: if bullet.overlaps( asteroid.hit_radius, asteroid.pos.getCopy()): asteroid.kill() self.entities.append( AsteroidDebris( asteroid.pos.getCopy())) if asteroid.size", "makes it easier to use keyboards, mice, and # other controllers the user", "else: self.mode = \"GAMEOVER\" # Process asteroid/bullet collisions for bullet in [e for", "5 # number of asteroids to spawn DEBOUNCE = 1 class StateManager(object): def", "print(e) batch = pyglet.graphics.Batch() for e in self.entities: # batch.add expects a series", "HEIGHT) # Keys holds a handler that keeps track of keyboard state, part", "a minimum asteroid population def spawn_asteroids(self): # Asteroid Spawning asteroids = [e for", "asteroids! self.entities.append( Asteroid( asteroid.size - 1, asteroid.pos.getCopy())) self.entities.append( Asteroid( asteroid.size - 1, asteroid.pos.getCopy()))", "from pyglet import clock from Entity import Asteroid, AsteroidDebris, Player from Entity import", "elif self.keys[key.ESCAPE]: self.quit = True # Game over screen def game_over_loop(self, dt): self.window.clear()", "\"GAME\" elif self.keys[key.ESCAPE]: self.quit = True # Game over screen def game_over_loop(self, dt):", "import Asteroid, AsteroidDebris, Player from Entity import ParticleSpawner, ParticleFactory, Bullet from HUD import", "loop(self, dt): if self.debounce_timer > 0: self.debounce_timer -= dt if self.mode == \"GAME\":", "# Asteroid Spawning asteroids = [e for e in self.entities if isinstance(e, Asteroid)]", "True print(\"Error: Debug: state.mode == Invalid state!\") # Pause screen def pause_loop(self, dt):", "self.spawn_asteroids() self.detect_collisions() for e in self.entities: e.update(dt) #for e in self.entities: # print(e)", "e in self.entities: e.update(dt) #for e in self.entities: # print(e) batch = pyglet.graphics.Batch()", "and self.debounce_timer <= 0: self.mode = \"PAUSE\" self.debounce_timer = DEBOUNCE self.player.input(controller) #turn on", "be in invuln # period if (self.player.isAlive() != True): if (self.hud.has_lives()): self.spawn_player() self.hud.kill()", "self.player.pos.getCopy(), self.player.angle ) ) # Maintain a minimum asteroid population def spawn_asteroids(self): #", "self.entities: e.update(dt) #for e in self.entities: # print(e) batch = pyglet.graphics.Batch() for e", "Inform the main function if the player requested to quit def is_quit(self): return", "asteroid population def spawn_asteroids(self): # Asteroid Spawning asteroids = [e for e in", "controller['acc'] self.exhaust.angle = (self.player.angle + math.pi) self.exhaust.pos = self.player.pos.getCopy() self.spawn_bullets() self.spawn_asteroids() self.detect_collisions() for", "e in self.entities if isinstance(e, Bullet)]: for asteroid in asteroids: if bullet.overlaps( asteroid.hit_radius,", "a bullet def spawn_bullets(self): if self.player.isFiring(): self.entities.append( Bullet( self.player.pos.getCopy(), self.player.angle ) ) #", "[e for e in self.entities if isinstance(e, Asteroid)] for asteroid in asteroids: if", "Space: Press s to start\", font_size=38, x=WIDTH//2, y=HEIGHT//2, anchor_x = 'center', anchor_y =", "(self.player.isAlive() != True): if (self.hud.has_lives()): self.spawn_player() self.hud.kill() else: self.mode = \"GAMEOVER\" # Process", "\"PAUSE\" self.debounce_timer = DEBOUNCE self.player.input(controller) #turn on thrust effect if ship is accelerating", "anchor_y = 'center') label.draw() if self.keys[key.S]: self.mode = \"GAME\" elif self.keys[key.ESCAPE]: self.quit =", "the game def _init_window(self): # Window object represents the game's window self.window =", "'center') label.draw() if self.keys[key.S]: self.mode = \"GAME\" self._init_game() elif self.keys[key.ESCAPE]: self.quit = True", "game def detect_collisions(self): asteroids = [e for e in self.entities if isinstance(e, Asteroid)]", "# Log the points self.hud.hit() # Inform the main function if the player", "self.entities.append(self.exhaust) #Create a new instance of the Player class at the center of", "at the center of the screen def spawn_player(self): self.player = Player(Vect2(x=self.window.width/2, y=self.window.height/2)) self.entities.append(self.player)", "# Keys holds a handler that keeps track of keyboard state, part of", "# Check if player is actually dead, it may be in invuln #", "quit\", font_size=24, x=WIDTH//2, y=HEIGHT//2, anchor_x = 'center', anchor_y = 'center') label.draw() if self.keys[key.S]:", "keyboards, mice, and # other controllers the user may have controller = {", "4, .01, ParticleFactory(speed=20, color=(255, 0, 0)), True) self.entities.append(self.exhaust) #Create a new instance of", "from Vect2 import Vect2 import math # Target window size constant WIDTH =", "should be spawned, and then spawns a bullet def spawn_bullets(self): if self.player.isFiring(): self.entities.append(", "be its own class. # That level of abstraction makes it easier to", "color=(255, 0, 0)), True) self.entities.append(self.exhaust) #Create a new instance of the Player class", "collisions for bullet in [e for e in self.entities if isinstance(e, Bullet)]: for", "Paused: Press p to unpause, or ESC to quit\", font_size=24, x=WIDTH//2, y=HEIGHT//2, anchor_x", "asteroid/bullet collisions for bullet in [e for e in self.entities if isinstance(e, Bullet)]:", "keyboard state, part of pyglet self.keys = pyglet.window.key.KeyStateHandler() self.window.push_handlers(self.keys) # Stage the game", "self.debounce_timer <= 0: self.mode = \"PAUSE\" self.debounce_timer = DEBOUNCE self.player.input(controller) #turn on thrust", "/ 4, .01, ParticleFactory(speed=20, color=(255, 0, 0)), True) self.entities.append(self.exhaust) #Create a new instance", "# Stage the game or return it to its initial state def _init_game(self):", "asteroid.size > 1: # add two baby asteroids! self.entities.append( Asteroid( asteroid.size - 1,", "quit\", font_size=24, x=WIDTH//2, y=HEIGHT//2, anchor_x = 'center', anchor_y = 'center') label.draw() if self.keys[key.P]", "= HUD() self.entities = [] self.spawn_player() self.exhaust = ParticleSpawner( self.player.pos.getCopy(), self.player.angle + math.pi,", "Log the points self.hud.hit() # Inform the main function if the player requested", "the game's window self.window = pyglet.window.Window(WIDTH, HEIGHT) # Keys holds a handler that", "center of the screen def spawn_player(self): self.player = Player(Vect2(x=self.window.width/2, y=self.window.height/2)) self.entities.append(self.player) # This", "way for the game def detect_collisions(self): asteroids = [e for e in self.entities", "state.mode == Invalid state!\") # Pause screen def pause_loop(self, dt): self.window.clear() label =", "pyglet.graphics.Batch() for e in self.entities: # batch.add expects a series of arguments #", "Entity import Asteroid, AsteroidDebris, Player from Entity import ParticleSpawner, ParticleFactory, Bullet from HUD", "self.quit = True # Splash screen def splash_loop(self, dt): label = pyglet.text.Label(\"Rocks in", "# Dispatch loop to the right function def loop(self, dt): if self.debounce_timer >", "in self.entities: # print(e) batch = pyglet.graphics.Batch() for e in self.entities: # batch.add", "have controller = { 'acc': self.keys[key.W], 'left': self.keys[key.A], 'right': self.keys[key.D], 'fire': self.keys[key.SPACE], 'quit':", "== \"SPLASH\": self.splash_loop(dt) elif self.mode == \"GAMEOVER\": self.game_over_loop(dt) else: self.quit == True print(\"Error:", "return self.quit # Dispatch loop to the right function def loop(self, dt): if", "self.keys[key.P] } self.quit = controller['quit'] if controller['pause'] and self.debounce_timer <= 0: self.mode =", "initial state def _init_game(self): self.hud = HUD() self.entities = [] self.spawn_player() self.exhaust =", "# batch.add expects a series of arguments # most easily delivered as a", "label.draw() if self.keys[key.P] and self.debounce_timer <= 0: self.mode = \"GAME\" self.debounce_timer = DEBOUNCE", "label = pyglet.text.Label(\"Game Paused: Press p to unpause, or ESC to quit\", font_size=24,", "False self._init_window() self._init_game() self.mode = \"SPLASH\" # Prevent bouncing on switching game modes", "# Window object represents the game's window self.window = pyglet.window.Window(WIDTH, HEIGHT) # Keys", "# Draw objects to the frame batch.draw() self.hud.drawHUD() # Determine if a bullet", "= 'center') label.draw() if self.keys[key.P] and self.debounce_timer <= 0: self.mode = \"GAME\" self.debounce_timer", "self.player.angle ) ) # Maintain a minimum asteroid population def spawn_asteroids(self): # Asteroid", "controller['pause'] and self.debounce_timer <= 0: self.mode = \"PAUSE\" self.debounce_timer = DEBOUNCE self.player.input(controller) #turn", "self.debounce_timer -= dt if self.mode == \"GAME\": self.game_loop(dt) elif self.mode == \"PAUSE\": self.pause_loop(dt)", "self.spawn_bullets() self.spawn_asteroids() self.detect_collisions() for e in self.entities: e.update(dt) #for e in self.entities: #", "state def _init_game(self): self.hud = HUD() self.entities = [] self.spawn_player() self.exhaust = ParticleSpawner(", "asteroid.pos.getCopy())) # Remove bullet bullet.kill() # Log the points self.hud.hit() # Inform the", "self.entities[:] = [e for e in self.entities if e.isAlive()] # Draw objects to", "Prevent bouncing on switching game modes self.debounce_timer = DEBOUNCE # Create a window", "to quit\", font_size=24, x=WIDTH//2, y=HEIGHT//2, anchor_x = 'center', anchor_y = 'center') label.draw() if", "def spawn_player(self): self.player = Player(Vect2(x=self.window.width/2, y=self.window.height/2)) self.entities.append(self.player) # This function runs when the", "if controller['pause'] and self.debounce_timer <= 0: self.mode = \"PAUSE\" self.debounce_timer = DEBOUNCE self.player.input(controller)", "self.debounce_timer = DEBOUNCE # Create a window for the game def _init_window(self): #", "self.exhaust.active = controller['acc'] self.exhaust.angle = (self.player.angle + math.pi) self.exhaust.pos = self.player.pos.getCopy() self.spawn_bullets() self.spawn_asteroids()", "x=WIDTH//2, y=HEIGHT//2, anchor_x = 'center', anchor_y = 'center') label.draw() if self.keys[key.S]: self.mode =", "spawned, and then spawns a bullet def spawn_bullets(self): if self.player.isFiring(): self.entities.append( Bullet( self.player.pos.getCopy(),", "self.entities: # print(e) batch = pyglet.graphics.Batch() for e in self.entities: # batch.add expects", "level of abstraction makes it easier to use keyboards, mice, and # other", "of pyglet self.keys = pyglet.window.key.KeyStateHandler() self.window.push_handlers(self.keys) # Stage the game or return it", "game modes self.debounce_timer = DEBOUNCE # Create a window for the game def", "= Player(Vect2(x=self.window.width/2, y=self.window.height/2)) self.entities.append(self.player) # This function runs when the look is in", "pyglet import clock from Entity import Asteroid, AsteroidDebris, Player from Entity import ParticleSpawner,", "of asteroids to spawn DEBOUNCE = 1 class StateManager(object): def __init__(self): self.quit =", "frame batch.draw() self.hud.drawHUD() # Determine if a bullet should be spawned, and then", "in a meaningful way for the game def detect_collisions(self): asteroids = [e for", "= 'center', anchor_y = 'center') label.draw() if self.keys[key.P] and self.debounce_timer <= 0: self.mode", "the controller would probably be its own class. # That level of abstraction", "import HUD from pyglet.window import key from Vect2 import Vect2 import math #", "DEBOUNCE elif self.keys[key.ESCAPE]: self.quit = True # Splash screen def splash_loop(self, dt): label", "splash_loop(self, dt): label = pyglet.text.Label(\"Rocks in Space: Press s to start\", font_size=38, x=WIDTH//2,", "from HUD import HUD from pyglet.window import key from Vect2 import Vect2 import", "if (self.hud.has_lives()): self.spawn_player() self.hud.kill() else: self.mode = \"GAMEOVER\" # Process asteroid/bullet collisions for", "# This class manages the game's state import pyglet from pyglet import clock", "# number of asteroids to spawn DEBOUNCE = 1 class StateManager(object): def __init__(self):", "window self.window = pyglet.window.Window(WIDTH, HEIGHT) # Keys holds a handler that keeps track", "over screen def game_over_loop(self, dt): self.window.clear() label = pyglet.text.Label(\"Game over! Press S to", "self.player.input(controller) #turn on thrust effect if ship is accelerating self.exhaust.active = controller['acc'] self.exhaust.angle", "def __init__(self): self.quit = False self._init_window() self._init_game() self.mode = \"SPLASH\" # Prevent bouncing", "two baby asteroids! self.entities.append( Asteroid( asteroid.size - 1, asteroid.pos.getCopy())) self.entities.append( Asteroid( asteroid.size -", "any dead objects self.entities[:] = [e for e in self.entities if e.isAlive()] #", "s to start\", font_size=38, x=WIDTH//2, y=HEIGHT//2, anchor_x = 'center', anchor_y = 'center') label.draw()", "self.entities.append( Asteroid( asteroid.size - 1, asteroid.pos.getCopy())) # Remove bullet bullet.kill() # Log the", "#turn on thrust effect if ship is accelerating self.exhaust.active = controller['acc'] self.exhaust.angle =", "= [e for e in self.entities if isinstance(e, Asteroid)] for asteroid in asteroids:", "y=HEIGHT//2, anchor_x = 'center', anchor_y = 'center') label.draw() if self.keys[key.P] and self.debounce_timer <=", "of the Player class at the center of the screen def spawn_player(self): self.player", "self.mode = \"GAME\" self.debounce_timer = DEBOUNCE elif self.keys[key.ESCAPE]: self.quit = True # Splash", "1: # add two baby asteroids! self.entities.append( Asteroid( asteroid.size - 1, asteroid.pos.getCopy())) self.entities.append(", "period if (self.player.isAlive() != True): if (self.hud.has_lives()): self.spawn_player() self.hud.kill() else: self.mode = \"GAMEOVER\"", "p to unpause, or ESC to quit\", font_size=24, x=WIDTH//2, y=HEIGHT//2, anchor_x = 'center',", "abstraction makes it easier to use keyboards, mice, and # other controllers the", "to its initial state def _init_game(self): self.hud = HUD() self.entities = [] self.spawn_player()", "e.update(dt) #for e in self.entities: # print(e) batch = pyglet.graphics.Batch() for e in", "def spawn_asteroids(self): # Asteroid Spawning asteroids = [e for e in self.entities if", "# That level of abstraction makes it easier to use keyboards, mice, and", "# This function determines if any objects are colliding in a meaningful way", "handler that keeps track of keyboard state, part of pyglet self.keys = pyglet.window.key.KeyStateHandler()", "= (self.player.angle + math.pi) self.exhaust.pos = self.player.pos.getCopy() self.spawn_bullets() self.spawn_asteroids() self.detect_collisions() for e in", "or ESC to quit\", font_size=24, x=WIDTH//2, y=HEIGHT//2, anchor_x = 'center', anchor_y = 'center')", "[e for e in self.entities if e.isAlive()] # Draw objects to the frame", "use keyboards, mice, and # other controllers the user may have controller =", "ship is accelerating self.exhaust.active = controller['acc'] self.exhaust.angle = (self.player.angle + math.pi) self.exhaust.pos =", "Create a window for the game def _init_window(self): # Window object represents the", "series of arguments # most easily delivered as a tuple. # * is", "Asteroid)] if len(asteroids) < targetNo: newAsteroid = Asteroid(3, Vect2(0, 0)) self.entities.append(newAsteroid) # This", "# Prevent bouncing on switching game modes self.debounce_timer = DEBOUNCE # Create a", "Asteroid( asteroid.size - 1, asteroid.pos.getCopy())) self.entities.append( Asteroid( asteroid.size - 1, asteroid.pos.getCopy())) # Remove", "True): if (self.hud.has_lives()): self.spawn_player() self.hud.kill() else: self.mode = \"GAMEOVER\" # Process asteroid/bullet collisions", "StateManager(object): def __init__(self): self.quit = False self._init_window() self._init_game() self.mode = \"SPLASH\" # Prevent", "dead objects self.entities[:] = [e for e in self.entities if e.isAlive()] # Draw", "Vect2(0, 0)) self.entities.append(newAsteroid) # This function determines if any objects are colliding in", "bullet should be spawned, and then spawns a bullet def spawn_bullets(self): if self.player.isFiring():", "controller = { 'acc': self.keys[key.W], 'left': self.keys[key.A], 'right': self.keys[key.D], 'fire': self.keys[key.SPACE], 'quit': self.keys[key.ESCAPE],", "the points self.hud.hit() # Inform the main function if the player requested to", "of keyboard state, part of pyglet self.keys = pyglet.window.key.KeyStateHandler() self.window.push_handlers(self.keys) # Stage the", "easily delivered as a tuple. # * is the untuple argument. batch.add(*e.draw()) #", "number of asteroids to spawn DEBOUNCE = 1 class StateManager(object): def __init__(self): self.quit", "spawns a bullet def spawn_bullets(self): if self.player.isFiring(): self.entities.append( Bullet( self.player.pos.getCopy(), self.player.angle ) )", "\"GAME\": self.game_loop(dt) elif self.mode == \"PAUSE\": self.pause_loop(dt) elif self.mode == \"SPLASH\": self.splash_loop(dt) elif", "may have controller = { 'acc': self.keys[key.W], 'left': self.keys[key.A], 'right': self.keys[key.D], 'fire': self.keys[key.SPACE],", "= pyglet.text.Label(\"Game Paused: Press p to unpause, or ESC to quit\", font_size=24, x=WIDTH//2,", "isinstance(e, Asteroid)] for asteroid in asteroids: if self.player.overlaps(asteroid.hit_radius, asteroid.pos.getCopy()): self.player.kill() # Check if", "1 class StateManager(object): def __init__(self): self.quit = False self._init_window() self._init_game() self.mode = \"SPLASH\"", "Filter out any dead objects self.entities[:] = [e for e in self.entities if", "1, asteroid.pos.getCopy())) # Remove bullet bullet.kill() # Log the points self.hud.hit() # Inform", "return it to its initial state def _init_game(self): self.hud = HUD() self.entities =", "# period if (self.player.isAlive() != True): if (self.hud.has_lives()): self.spawn_player() self.hud.kill() else: self.mode =", "# Game over screen def game_over_loop(self, dt): self.window.clear() label = pyglet.text.Label(\"Game over! Press", "from Entity import Asteroid, AsteroidDebris, Player from Entity import ParticleSpawner, ParticleFactory, Bullet from", "to use keyboards, mice, and # other controllers the user may have controller", "= 'center', anchor_y = 'center') label.draw() if self.keys[key.S]: self.mode = \"GAME\" self._init_game() elif", "self.quit == True print(\"Error: Debug: state.mode == Invalid state!\") # Pause screen def", "self.mode = \"SPLASH\" # Prevent bouncing on switching game modes self.debounce_timer = DEBOUNCE", "unpause, or ESC to quit\", font_size=24, x=WIDTH//2, y=HEIGHT//2, anchor_x = 'center', anchor_y =", "isinstance(e, Asteroid)] if len(asteroids) < targetNo: newAsteroid = Asteroid(3, Vect2(0, 0)) self.entities.append(newAsteroid) #", "start\", font_size=38, x=WIDTH//2, y=HEIGHT//2, anchor_x = 'center', anchor_y = 'center') label.draw() if self.keys[key.S]:", "state!\") # Pause screen def pause_loop(self, dt): self.window.clear() label = pyglet.text.Label(\"Game Paused: Press", "= \"GAMEOVER\" # Process asteroid/bullet collisions for bullet in [e for e in", "# Target window size constant WIDTH = 800 HEIGHT = 400 targetNo =", "self.spawn_player() self.hud.kill() else: self.mode = \"GAMEOVER\" # Process asteroid/bullet collisions for bullet in", "Window object represents the game's window self.window = pyglet.window.Window(WIDTH, HEIGHT) # Keys holds", "self.entities if isinstance(e, Asteroid)] if len(asteroids) < targetNo: newAsteroid = Asteroid(3, Vect2(0, 0))", "anchor_x = 'center', anchor_y = 'center') label.draw() if self.keys[key.S]: self.mode = \"GAME\" self._init_game()", "out any dead objects self.entities[:] = [e for e in self.entities if e.isAlive()]", "is accelerating self.exhaust.active = controller['acc'] self.exhaust.angle = (self.player.angle + math.pi) self.exhaust.pos = self.player.pos.getCopy()", "mice, and # other controllers the user may have controller = { 'acc':", "x=WIDTH//2, y=HEIGHT//2, anchor_x = 'center', anchor_y = 'center') label.draw() if self.keys[key.P] and self.debounce_timer", "DEBOUNCE = 1 class StateManager(object): def __init__(self): self.quit = False self._init_window() self._init_game() self.mode", "self.game_over_loop(dt) else: self.quit == True print(\"Error: Debug: state.mode == Invalid state!\") # Pause", "\"GAME\" self.debounce_timer = DEBOUNCE elif self.keys[key.ESCAPE]: self.quit = True # Splash screen def", "self.entities.append(self.player) # This function runs when the look is in game mode, and", "the frame batch.draw() self.hud.drawHUD() # Determine if a bullet should be spawned, and", "self.entities.append( AsteroidDebris( asteroid.pos.getCopy())) if asteroid.size > 1: # add two baby asteroids! self.entities.append(", "self.keys[key.P] and self.debounce_timer <= 0: self.mode = \"GAME\" self.debounce_timer = DEBOUNCE elif self.keys[key.ESCAPE]:", "# other controllers the user may have controller = { 'acc': self.keys[key.W], 'left':", "'pause': self.keys[key.P] } self.quit = controller['quit'] if controller['pause'] and self.debounce_timer <= 0: self.mode", "ESC to quit\", font_size=24, x=WIDTH//2, y=HEIGHT//2, anchor_x = 'center', anchor_y = 'center') label.draw()", "class StateManager(object): def __init__(self): self.quit = False self._init_window() self._init_game() self.mode = \"SPLASH\" #", "instance of the Player class at the center of the screen def spawn_player(self):", "= DEBOUNCE elif self.keys[key.ESCAPE]: self.quit = True # Splash screen def splash_loop(self, dt):", "state import pyglet from pyglet import clock from Entity import Asteroid, AsteroidDebris, Player", "S to restart, or ESC to quit\", font_size=24, x=WIDTH//2, y=HEIGHT//2, anchor_x = 'center',", "asteroids = [e for e in self.entities if isinstance(e, Asteroid)] for asteroid in", "import ParticleSpawner, ParticleFactory, Bullet from HUD import HUD from pyglet.window import key from", "objects to the frame batch.draw() self.hud.drawHUD() # Determine if a bullet should be", "if isinstance(e, Bullet)]: for asteroid in asteroids: if bullet.overlaps( asteroid.hit_radius, asteroid.pos.getCopy()): asteroid.kill() self.entities.append(", "it to its initial state def _init_game(self): self.hud = HUD() self.entities = []", "effect if ship is accelerating self.exhaust.active = controller['acc'] self.exhaust.angle = (self.player.angle + math.pi)", "self.player.angle + math.pi, math.pi / 4, .01, ParticleFactory(speed=20, color=(255, 0, 0)), True) self.entities.append(self.exhaust)", "# This function runs when the look is in game mode, and has", "self.quit = False self._init_window() self._init_game() self.mode = \"SPLASH\" # Prevent bouncing on switching", "of arguments # most easily delivered as a tuple. # * is the", "def loop(self, dt): if self.debounce_timer > 0: self.debounce_timer -= dt if self.mode ==", "mode, and has all the updating/drawing logic def game_loop(self, dt): #Clear frame before", "game or return it to its initial state def _init_game(self): self.hud = HUD()", "the player requested to quit def is_quit(self): return self.quit # Dispatch loop to", "self.entities.append( Asteroid( asteroid.size - 1, asteroid.pos.getCopy())) self.entities.append( Asteroid( asteroid.size - 1, asteroid.pos.getCopy())) #", "self.quit = True # Game over screen def game_over_loop(self, dt): self.window.clear() label =", "= pyglet.window.key.KeyStateHandler() self.window.push_handlers(self.keys) # Stage the game or return it to its initial", "self.spawn_player() self.exhaust = ParticleSpawner( self.player.pos.getCopy(), self.player.angle + math.pi, math.pi / 4, .01, ParticleFactory(speed=20,", "0: self.debounce_timer -= dt if self.mode == \"GAME\": self.game_loop(dt) elif self.mode == \"PAUSE\":", "[e for e in self.entities if isinstance(e, Bullet)]: for asteroid in asteroids: if", "\"GAMEOVER\" # Process asteroid/bullet collisions for bullet in [e for e in self.entities", "game def _init_window(self): # Window object represents the game's window self.window = pyglet.window.Window(WIDTH,", "elif self.keys[key.ESCAPE]: self.quit = True # Splash screen def splash_loop(self, dt): label =", "# On a proper engine the controller would probably be its own class.", "the Player class at the center of the screen def spawn_player(self): self.player =", "self.window.clear() label = pyglet.text.Label(\"Game over! Press S to restart, or ESC to quit\",", "math.pi) self.exhaust.pos = self.player.pos.getCopy() self.spawn_bullets() self.spawn_asteroids() self.detect_collisions() for e in self.entities: e.update(dt) #for", "asteroid.pos.getCopy()): asteroid.kill() self.entities.append( AsteroidDebris( asteroid.pos.getCopy())) if asteroid.size > 1: # add two baby", "self.entities: # batch.add expects a series of arguments # most easily delivered as", "asteroids: if self.player.overlaps(asteroid.hit_radius, asteroid.pos.getCopy()): self.player.kill() # Check if player is actually dead, it", "self.keys[key.ESCAPE], 'pause': self.keys[key.P] } self.quit = controller['quit'] if controller['pause'] and self.debounce_timer <= 0:", "Draw objects to the frame batch.draw() self.hud.drawHUD() # Determine if a bullet should", "self.quit # Dispatch loop to the right function def loop(self, dt): if self.debounce_timer", "\"SPLASH\" # Prevent bouncing on switching game modes self.debounce_timer = DEBOUNCE # Create", "self.keys[key.D], 'fire': self.keys[key.SPACE], 'quit': self.keys[key.ESCAPE], 'pause': self.keys[key.P] } self.quit = controller['quit'] if controller['pause']", "isinstance(e, Bullet)]: for asteroid in asteroids: if bullet.overlaps( asteroid.hit_radius, asteroid.pos.getCopy()): asteroid.kill() self.entities.append( AsteroidDebris(", "elif self.mode == \"GAMEOVER\": self.game_over_loop(dt) else: self.quit == True print(\"Error: Debug: state.mode ==", "self.keys = pyglet.window.key.KeyStateHandler() self.window.push_handlers(self.keys) # Stage the game or return it to its", "elif self.mode == \"PAUSE\": self.pause_loop(dt) elif self.mode == \"SPLASH\": self.splash_loop(dt) elif self.mode ==", "a bullet should be spawned, and then spawns a bullet def spawn_bullets(self): if", "spawn_bullets(self): if self.player.isFiring(): self.entities.append( Bullet( self.player.pos.getCopy(), self.player.angle ) ) # Maintain a minimum", "# Splash screen def splash_loop(self, dt): label = pyglet.text.Label(\"Rocks in Space: Press s", "engine the controller would probably be its own class. # That level of", "{ 'acc': self.keys[key.W], 'left': self.keys[key.A], 'right': self.keys[key.D], 'fire': self.keys[key.SPACE], 'quit': self.keys[key.ESCAPE], 'pause': self.keys[key.P]", "ParticleSpawner, ParticleFactory, Bullet from HUD import HUD from pyglet.window import key from Vect2", "newAsteroid = Asteroid(3, Vect2(0, 0)) self.entities.append(newAsteroid) # This function determines if any objects", "class at the center of the screen def spawn_player(self): self.player = Player(Vect2(x=self.window.width/2, y=self.window.height/2))", "# * is the untuple argument. batch.add(*e.draw()) # Filter out any dead objects", "e in self.entities if isinstance(e, Asteroid)] for asteroid in asteroids: if self.player.overlaps(asteroid.hit_radius, asteroid.pos.getCopy()):", "for asteroid in asteroids: if bullet.overlaps( asteroid.hit_radius, asteroid.pos.getCopy()): asteroid.kill() self.entities.append( AsteroidDebris( asteroid.pos.getCopy())) if", "baby asteroids! self.entities.append( Asteroid( asteroid.size - 1, asteroid.pos.getCopy())) self.entities.append( Asteroid( asteroid.size - 1,", "anchor_x = 'center', anchor_y = 'center') label.draw() if self.keys[key.P] and self.debounce_timer <= 0:", "Game over screen def game_over_loop(self, dt): self.window.clear() label = pyglet.text.Label(\"Game over! Press S", "if len(asteroids) < targetNo: newAsteroid = Asteroid(3, Vect2(0, 0)) self.entities.append(newAsteroid) # This function", "Determine if a bullet should be spawned, and then spawns a bullet def", "'center', anchor_y = 'center') label.draw() if self.keys[key.P] and self.debounce_timer <= 0: self.mode =", "# Filter out any dead objects self.entities[:] = [e for e in self.entities", "asteroids: if bullet.overlaps( asteroid.hit_radius, asteroid.pos.getCopy()): asteroid.kill() self.entities.append( AsteroidDebris( asteroid.pos.getCopy())) if asteroid.size > 1:", "screen def splash_loop(self, dt): label = pyglet.text.Label(\"Rocks in Space: Press s to start\",", "key from Vect2 import Vect2 import math # Target window size constant WIDTH", "bouncing on switching game modes self.debounce_timer = DEBOUNCE # Create a window for", "self.player.overlaps(asteroid.hit_radius, asteroid.pos.getCopy()): self.player.kill() # Check if player is actually dead, it may be", "if (self.player.isAlive() != True): if (self.hud.has_lives()): self.spawn_player() self.hud.kill() else: self.mode = \"GAMEOVER\" #", "controller would probably be its own class. # That level of abstraction makes", "font_size=24, x=WIDTH//2, y=HEIGHT//2, anchor_x = 'center', anchor_y = 'center') label.draw() if self.keys[key.S]: self.mode", "in invuln # period if (self.player.isAlive() != True): if (self.hud.has_lives()): self.spawn_player() self.hud.kill() else:", "== Invalid state!\") # Pause screen def pause_loop(self, dt): self.window.clear() label = pyglet.text.Label(\"Game", "import clock from Entity import Asteroid, AsteroidDebris, Player from Entity import ParticleSpawner, ParticleFactory,", "self.exhaust = ParticleSpawner( self.player.pos.getCopy(), self.player.angle + math.pi, math.pi / 4, .01, ParticleFactory(speed=20, color=(255,", "state, part of pyglet self.keys = pyglet.window.key.KeyStateHandler() self.window.push_handlers(self.keys) # Stage the game or", "0)), True) self.entities.append(self.exhaust) #Create a new instance of the Player class at the", "= DEBOUNCE self.player.input(controller) #turn on thrust effect if ship is accelerating self.exhaust.active =", "= pyglet.window.Window(WIDTH, HEIGHT) # Keys holds a handler that keeps track of keyboard", "if the player requested to quit def is_quit(self): return self.quit # Dispatch loop", "= pyglet.text.Label(\"Rocks in Space: Press s to start\", font_size=38, x=WIDTH//2, y=HEIGHT//2, anchor_x =", "= controller['quit'] if controller['pause'] and self.debounce_timer <= 0: self.mode = \"PAUSE\" self.debounce_timer =", "the look is in game mode, and has all the updating/drawing logic def", "logic def game_loop(self, dt): #Clear frame before looping self.window.clear() #print(pyglet.gl.get_current_context()) # On a", "This function determines if any objects are colliding in a meaningful way for", "own class. # That level of abstraction makes it easier to use keyboards,", "controller['quit'] if controller['pause'] and self.debounce_timer <= 0: self.mode = \"PAUSE\" self.debounce_timer = DEBOUNCE", "# Remove bullet bullet.kill() # Log the points self.hud.hit() # Inform the main", "len(asteroids) < targetNo: newAsteroid = Asteroid(3, Vect2(0, 0)) self.entities.append(newAsteroid) # This function determines", "- 1, asteroid.pos.getCopy())) self.entities.append( Asteroid( asteroid.size - 1, asteroid.pos.getCopy())) # Remove bullet bullet.kill()", "self.keys[key.ESCAPE]: self.quit = True # Splash screen def splash_loop(self, dt): label = pyglet.text.Label(\"Rocks", "in self.entities if e.isAlive()] # Draw objects to the frame batch.draw() self.hud.drawHUD() #", "e in self.entities if e.isAlive()] # Draw objects to the frame batch.draw() self.hud.drawHUD()", "for e in self.entities: e.update(dt) #for e in self.entities: # print(e) batch =", "800 HEIGHT = 400 targetNo = 5 # number of asteroids to spawn", "if a bullet should be spawned, and then spawns a bullet def spawn_bullets(self):", "invuln # period if (self.player.isAlive() != True): if (self.hud.has_lives()): self.spawn_player() self.hud.kill() else: self.mode", "dt): if self.debounce_timer > 0: self.debounce_timer -= dt if self.mode == \"GAME\": self.game_loop(dt)", "to the right function def loop(self, dt): if self.debounce_timer > 0: self.debounce_timer -=", "pyglet.window import key from Vect2 import Vect2 import math # Target window size", "targetNo: newAsteroid = Asteroid(3, Vect2(0, 0)) self.entities.append(newAsteroid) # This function determines if any", "targetNo = 5 # number of asteroids to spawn DEBOUNCE = 1 class", "def spawn_bullets(self): if self.player.isFiring(): self.entities.append( Bullet( self.player.pos.getCopy(), self.player.angle ) ) # Maintain a", "True # Game over screen def game_over_loop(self, dt): self.window.clear() label = pyglet.text.Label(\"Game over!", "'fire': self.keys[key.SPACE], 'quit': self.keys[key.ESCAPE], 'pause': self.keys[key.P] } self.quit = controller['quit'] if controller['pause'] and", "for e in self.entities if isinstance(e, Asteroid)] if len(asteroids) < targetNo: newAsteroid =", "points self.hud.hit() # Inform the main function if the player requested to quit", "def pause_loop(self, dt): self.window.clear() label = pyglet.text.Label(\"Game Paused: Press p to unpause, or", "a proper engine the controller would probably be its own class. # That", "meaningful way for the game def detect_collisions(self): asteroids = [e for e in", "object represents the game's window self.window = pyglet.window.Window(WIDTH, HEIGHT) # Keys holds a", "a new instance of the Player class at the center of the screen", "asteroid.size - 1, asteroid.pos.getCopy())) self.entities.append( Asteroid( asteroid.size - 1, asteroid.pos.getCopy())) # Remove bullet", "bullet def spawn_bullets(self): if self.player.isFiring(): self.entities.append( Bullet( self.player.pos.getCopy(), self.player.angle ) ) # Maintain", "track of keyboard state, part of pyglet self.keys = pyglet.window.key.KeyStateHandler() self.window.push_handlers(self.keys) # Stage", "# Create a window for the game def _init_window(self): # Window object represents", "all the updating/drawing logic def game_loop(self, dt): #Clear frame before looping self.window.clear() #print(pyglet.gl.get_current_context())", "a meaningful way for the game def detect_collisions(self): asteroids = [e for e", "Check if player is actually dead, it may be in invuln # period", "> 0: self.debounce_timer -= dt if self.mode == \"GAME\": self.game_loop(dt) elif self.mode ==", "self.detect_collisions() for e in self.entities: e.update(dt) #for e in self.entities: # print(e) batch", "proper engine the controller would probably be its own class. # That level", "Bullet from HUD import HUD from pyglet.window import key from Vect2 import Vect2", "self.entities.append(newAsteroid) # This function determines if any objects are colliding in a meaningful", "it may be in invuln # period if (self.player.isAlive() != True): if (self.hud.has_lives()):", "self.window.clear() label = pyglet.text.Label(\"Game Paused: Press p to unpause, or ESC to quit\",", "bullet in [e for e in self.entities if isinstance(e, Bullet)]: for asteroid in", "if self.player.overlaps(asteroid.hit_radius, asteroid.pos.getCopy()): self.player.kill() # Check if player is actually dead, it may", "font_size=24, x=WIDTH//2, y=HEIGHT//2, anchor_x = 'center', anchor_y = 'center') label.draw() if self.keys[key.P] and", "self.hud.kill() else: self.mode = \"GAMEOVER\" # Process asteroid/bullet collisions for bullet in [e", "anchor_x = 'center', anchor_y = 'center') label.draw() if self.keys[key.S]: self.mode = \"GAME\" elif", "= pyglet.graphics.Batch() for e in self.entities: # batch.add expects a series of arguments", "bullet.kill() # Log the points self.hud.hit() # Inform the main function if the", "= 400 targetNo = 5 # number of asteroids to spawn DEBOUNCE =", "self.mode == \"GAME\": self.game_loop(dt) elif self.mode == \"PAUSE\": self.pause_loop(dt) elif self.mode == \"SPLASH\":", "self.pause_loop(dt) elif self.mode == \"SPLASH\": self.splash_loop(dt) elif self.mode == \"GAMEOVER\": self.game_over_loop(dt) else: self.quit", "self.entities if isinstance(e, Bullet)]: for asteroid in asteroids: if bullet.overlaps( asteroid.hit_radius, asteroid.pos.getCopy()): asteroid.kill()", "= DEBOUNCE # Create a window for the game def _init_window(self): # Window", "probably be its own class. # That level of abstraction makes it easier", "# print(e) batch = pyglet.graphics.Batch() for e in self.entities: # batch.add expects a", "is actually dead, it may be in invuln # period if (self.player.isAlive() !=", "math # Target window size constant WIDTH = 800 HEIGHT = 400 targetNo", "= 800 HEIGHT = 400 targetNo = 5 # number of asteroids to", "HEIGHT = 400 targetNo = 5 # number of asteroids to spawn DEBOUNCE", "may be in invuln # period if (self.player.isAlive() != True): if (self.hud.has_lives()): self.spawn_player()", "Spawning asteroids = [e for e in self.entities if isinstance(e, Asteroid)] if len(asteroids)", "else: self.quit == True print(\"Error: Debug: state.mode == Invalid state!\") # Pause screen", "Press S to restart, or ESC to quit\", font_size=24, x=WIDTH//2, y=HEIGHT//2, anchor_x =", "self._init_window() self._init_game() self.mode = \"SPLASH\" # Prevent bouncing on switching game modes self.debounce_timer", "'center') label.draw() if self.keys[key.P] and self.debounce_timer <= 0: self.mode = \"GAME\" self.debounce_timer =", "its own class. # That level of abstraction makes it easier to use", "to quit def is_quit(self): return self.quit # Dispatch loop to the right function", "Bullet( self.player.pos.getCopy(), self.player.angle ) ) # Maintain a minimum asteroid population def spawn_asteroids(self):", "batch.add(*e.draw()) # Filter out any dead objects self.entities[:] = [e for e in", "# Pause screen def pause_loop(self, dt): self.window.clear() label = pyglet.text.Label(\"Game Paused: Press p", "and self.debounce_timer <= 0: self.mode = \"GAME\" self.debounce_timer = DEBOUNCE elif self.keys[key.ESCAPE]: self.quit", "= \"SPLASH\" # Prevent bouncing on switching game modes self.debounce_timer = DEBOUNCE #", "Press p to unpause, or ESC to quit\", font_size=24, x=WIDTH//2, y=HEIGHT//2, anchor_x =", "manages the game's state import pyglet from pyglet import clock from Entity import", "to restart, or ESC to quit\", font_size=24, x=WIDTH//2, y=HEIGHT//2, anchor_x = 'center', anchor_y", "pyglet.text.Label(\"Game Paused: Press p to unpause, or ESC to quit\", font_size=24, x=WIDTH//2, y=HEIGHT//2,", "switching game modes self.debounce_timer = DEBOUNCE # Create a window for the game", "self.hud = HUD() self.entities = [] self.spawn_player() self.exhaust = ParticleSpawner( self.player.pos.getCopy(), self.player.angle +", "new instance of the Player class at the center of the screen def", "and then spawns a bullet def spawn_bullets(self): if self.player.isFiring(): self.entities.append( Bullet( self.player.pos.getCopy(), self.player.angle", "holds a handler that keeps track of keyboard state, part of pyglet self.keys", "tuple. # * is the untuple argument. batch.add(*e.draw()) # Filter out any dead", "= 'center') label.draw() if self.keys[key.S]: self.mode = \"GAME\" self._init_game() elif self.keys[key.ESCAPE]: self.quit =", "ParticleFactory(speed=20, color=(255, 0, 0)), True) self.entities.append(self.exhaust) #Create a new instance of the Player", "batch = pyglet.graphics.Batch() for e in self.entities: # batch.add expects a series of", "#for e in self.entities: # print(e) batch = pyglet.graphics.Batch() for e in self.entities:", "self.window.push_handlers(self.keys) # Stage the game or return it to its initial state def", "= Asteroid(3, Vect2(0, 0)) self.entities.append(newAsteroid) # This function determines if any objects are", "'quit': self.keys[key.ESCAPE], 'pause': self.keys[key.P] } self.quit = controller['quit'] if controller['pause'] and self.debounce_timer <=", "the game def detect_collisions(self): asteroids = [e for e in self.entities if isinstance(e,", "for bullet in [e for e in self.entities if isinstance(e, Bullet)]: for asteroid", "if self.keys[key.S]: self.mode = \"GAME\" elif self.keys[key.ESCAPE]: self.quit = True # Game over", "screen def game_over_loop(self, dt): self.window.clear() label = pyglet.text.Label(\"Game over! Press S to restart,", "Asteroid( asteroid.size - 1, asteroid.pos.getCopy())) # Remove bullet bullet.kill() # Log the points", "a series of arguments # most easily delivered as a tuple. # *", "== \"GAMEOVER\": self.game_over_loop(dt) else: self.quit == True print(\"Error: Debug: state.mode == Invalid state!\")", "+ math.pi, math.pi / 4, .01, ParticleFactory(speed=20, color=(255, 0, 0)), True) self.entities.append(self.exhaust) #Create", "game's state import pyglet from pyglet import clock from Entity import Asteroid, AsteroidDebris,", "for e in self.entities if e.isAlive()] # Draw objects to the frame batch.draw()", "to start\", font_size=38, x=WIDTH//2, y=HEIGHT//2, anchor_x = 'center', anchor_y = 'center') label.draw() if", "dt): self.window.clear() label = pyglet.text.Label(\"Game over! Press S to restart, or ESC to", "# Inform the main function if the player requested to quit def is_quit(self):", "= True # Splash screen def splash_loop(self, dt): label = pyglet.text.Label(\"Rocks in Space:", "[e for e in self.entities if isinstance(e, Asteroid)] if len(asteroids) < targetNo: newAsteroid", "objects are colliding in a meaningful way for the game def detect_collisions(self): asteroids", "right function def loop(self, dt): if self.debounce_timer > 0: self.debounce_timer -= dt if", "'center', anchor_y = 'center') label.draw() if self.keys[key.S]: self.mode = \"GAME\" self._init_game() elif self.keys[key.ESCAPE]:", "of abstraction makes it easier to use keyboards, mice, and # other controllers", "asteroid.size - 1, asteroid.pos.getCopy())) # Remove bullet bullet.kill() # Log the points self.hud.hit()", "if self.keys[key.P] and self.debounce_timer <= 0: self.mode = \"GAME\" self.debounce_timer = DEBOUNCE elif", "add two baby asteroids! self.entities.append( Asteroid( asteroid.size - 1, asteroid.pos.getCopy())) self.entities.append( Asteroid( asteroid.size", "self.entities.append( Bullet( self.player.pos.getCopy(), self.player.angle ) ) # Maintain a minimum asteroid population def", "before looping self.window.clear() #print(pyglet.gl.get_current_context()) # On a proper engine the controller would probably", "asteroid in asteroids: if self.player.overlaps(asteroid.hit_radius, asteroid.pos.getCopy()): self.player.kill() # Check if player is actually", "Remove bullet bullet.kill() # Log the points self.hud.hit() # Inform the main function", "if any objects are colliding in a meaningful way for the game def", "= \"PAUSE\" self.debounce_timer = DEBOUNCE self.player.input(controller) #turn on thrust effect if ship is", "= \"GAME\" elif self.keys[key.ESCAPE]: self.quit = True # Game over screen def game_over_loop(self,", "Debug: state.mode == Invalid state!\") # Pause screen def pause_loop(self, dt): self.window.clear() label", "argument. batch.add(*e.draw()) # Filter out any dead objects self.entities[:] = [e for e", "asteroid.pos.getCopy())) self.entities.append( Asteroid( asteroid.size - 1, asteroid.pos.getCopy())) # Remove bullet bullet.kill() # Log", "asteroids to spawn DEBOUNCE = 1 class StateManager(object): def __init__(self): self.quit = False", "DEBOUNCE # Create a window for the game def _init_window(self): # Window object", "for e in self.entities: # batch.add expects a series of arguments # most", "This class manages the game's state import pyglet from pyglet import clock from", "its initial state def _init_game(self): self.hud = HUD() self.entities = [] self.spawn_player() self.exhaust", "in self.entities: e.update(dt) #for e in self.entities: # print(e) batch = pyglet.graphics.Batch() for", "is_quit(self): return self.quit # Dispatch loop to the right function def loop(self, dt):", "asteroid.pos.getCopy())) if asteroid.size > 1: # add two baby asteroids! self.entities.append( Asteroid( asteroid.size", "Vect2 import Vect2 import math # Target window size constant WIDTH = 800", "_init_window(self): # Window object represents the game's window self.window = pyglet.window.Window(WIDTH, HEIGHT) #", "self.exhaust.angle = (self.player.angle + math.pi) self.exhaust.pos = self.player.pos.getCopy() self.spawn_bullets() self.spawn_asteroids() self.detect_collisions() for e", "DEBOUNCE self.player.input(controller) #turn on thrust effect if ship is accelerating self.exhaust.active = controller['acc']", "e in self.entities: # batch.add expects a series of arguments # most easily", "if bullet.overlaps( asteroid.hit_radius, asteroid.pos.getCopy()): asteroid.kill() self.entities.append( AsteroidDebris( asteroid.pos.getCopy())) if asteroid.size > 1: #", "self.debounce_timer > 0: self.debounce_timer -= dt if self.mode == \"GAME\": self.game_loop(dt) elif self.mode", "e in self.entities: # print(e) batch = pyglet.graphics.Batch() for e in self.entities: #", "self.player.isFiring(): self.entities.append( Bullet( self.player.pos.getCopy(), self.player.angle ) ) # Maintain a minimum asteroid population", "clock from Entity import Asteroid, AsteroidDebris, Player from Entity import ParticleSpawner, ParticleFactory, Bullet", "} self.quit = controller['quit'] if controller['pause'] and self.debounce_timer <= 0: self.mode = \"PAUSE\"", "game_over_loop(self, dt): self.window.clear() label = pyglet.text.Label(\"Game over! Press S to restart, or ESC", "self.mode = \"PAUSE\" self.debounce_timer = DEBOUNCE self.player.input(controller) #turn on thrust effect if ship", "True # Splash screen def splash_loop(self, dt): label = pyglet.text.Label(\"Rocks in Space: Press", "on switching game modes self.debounce_timer = DEBOUNCE # Create a window for the", "pyglet.text.Label(\"Game over! Press S to restart, or ESC to quit\", font_size=24, x=WIDTH//2, y=HEIGHT//2,", "frame before looping self.window.clear() #print(pyglet.gl.get_current_context()) # On a proper engine the controller would", "elif self.mode == \"SPLASH\": self.splash_loop(dt) elif self.mode == \"GAMEOVER\": self.game_over_loop(dt) else: self.quit ==", "pyglet.window.key.KeyStateHandler() self.window.push_handlers(self.keys) # Stage the game or return it to its initial state", "detect_collisions(self): asteroids = [e for e in self.entities if isinstance(e, Asteroid)] for asteroid", "Invalid state!\") # Pause screen def pause_loop(self, dt): self.window.clear() label = pyglet.text.Label(\"Game Paused:", "asteroid.kill() self.entities.append( AsteroidDebris( asteroid.pos.getCopy())) if asteroid.size > 1: # add two baby asteroids!", "WIDTH = 800 HEIGHT = 400 targetNo = 5 # number of asteroids", "in self.entities if isinstance(e, Bullet)]: for asteroid in asteroids: if bullet.overlaps( asteroid.hit_radius, asteroid.pos.getCopy()):", "controllers the user may have controller = { 'acc': self.keys[key.W], 'left': self.keys[key.A], 'right':", "or return it to its initial state def _init_game(self): self.hud = HUD() self.entities", "spawn_player(self): self.player = Player(Vect2(x=self.window.width/2, y=self.window.height/2)) self.entities.append(self.player) # This function runs when the look", "#print(pyglet.gl.get_current_context()) # On a proper engine the controller would probably be its own", "Process asteroid/bullet collisions for bullet in [e for e in self.entities if isinstance(e,", "< targetNo: newAsteroid = Asteroid(3, Vect2(0, 0)) self.entities.append(newAsteroid) # This function determines if", "+ math.pi) self.exhaust.pos = self.player.pos.getCopy() self.spawn_bullets() self.spawn_asteroids() self.detect_collisions() for e in self.entities: e.update(dt)", "to unpause, or ESC to quit\", font_size=24, x=WIDTH//2, y=HEIGHT//2, anchor_x = 'center', anchor_y", "function runs when the look is in game mode, and has all the", "other controllers the user may have controller = { 'acc': self.keys[key.W], 'left': self.keys[key.A],", "import key from Vect2 import Vect2 import math # Target window size constant", "asteroids = [e for e in self.entities if isinstance(e, Asteroid)] if len(asteroids) <", "population def spawn_asteroids(self): # Asteroid Spawning asteroids = [e for e in self.entities", "self.entities if e.isAlive()] # Draw objects to the frame batch.draw() self.hud.drawHUD() # Determine", "objects self.entities[:] = [e for e in self.entities if e.isAlive()] # Draw objects", "window for the game def _init_window(self): # Window object represents the game's window", "quit def is_quit(self): return self.quit # Dispatch loop to the right function def", "function if the player requested to quit def is_quit(self): return self.quit # Dispatch", "dt): label = pyglet.text.Label(\"Rocks in Space: Press s to start\", font_size=38, x=WIDTH//2, y=HEIGHT//2,", "most easily delivered as a tuple. # * is the untuple argument. batch.add(*e.draw())", "any objects are colliding in a meaningful way for the game def detect_collisions(self):", "#Create a new instance of the Player class at the center of the", "if isinstance(e, Asteroid)] if len(asteroids) < targetNo: newAsteroid = Asteroid(3, Vect2(0, 0)) self.entities.append(newAsteroid)", "Bullet)]: for asteroid in asteroids: if bullet.overlaps( asteroid.hit_radius, asteroid.pos.getCopy()): asteroid.kill() self.entities.append( AsteroidDebris( asteroid.pos.getCopy()))", "represents the game's window self.window = pyglet.window.Window(WIDTH, HEIGHT) # Keys holds a handler", "bullet.overlaps( asteroid.hit_radius, asteroid.pos.getCopy()): asteroid.kill() self.entities.append( AsteroidDebris( asteroid.pos.getCopy())) if asteroid.size > 1: # add", "Maintain a minimum asteroid population def spawn_asteroids(self): # Asteroid Spawning asteroids = [e", "def game_over_loop(self, dt): self.window.clear() label = pyglet.text.Label(\"Game over! Press S to restart, or", "AsteroidDebris, Player from Entity import ParticleSpawner, ParticleFactory, Bullet from HUD import HUD from", "= controller['acc'] self.exhaust.angle = (self.player.angle + math.pi) self.exhaust.pos = self.player.pos.getCopy() self.spawn_bullets() self.spawn_asteroids() self.detect_collisions()", "for the game def detect_collisions(self): asteroids = [e for e in self.entities if", "and has all the updating/drawing logic def game_loop(self, dt): #Clear frame before looping", "y=HEIGHT//2, anchor_x = 'center', anchor_y = 'center') label.draw() if self.keys[key.S]: self.mode = \"GAME\"", "it easier to use keyboards, mice, and # other controllers the user may", "if isinstance(e, Asteroid)] for asteroid in asteroids: if self.player.overlaps(asteroid.hit_radius, asteroid.pos.getCopy()): self.player.kill() # Check", "from pyglet.window import key from Vect2 import Vect2 import math # Target window", "self.keys[key.W], 'left': self.keys[key.A], 'right': self.keys[key.D], 'fire': self.keys[key.SPACE], 'quit': self.keys[key.ESCAPE], 'pause': self.keys[key.P] } self.quit", "'center', anchor_y = 'center') label.draw() if self.keys[key.S]: self.mode = \"GAME\" elif self.keys[key.ESCAPE]: self.quit", "asteroid.hit_radius, asteroid.pos.getCopy()): asteroid.kill() self.entities.append( AsteroidDebris( asteroid.pos.getCopy())) if asteroid.size > 1: # add two", "self._init_game() self.mode = \"SPLASH\" # Prevent bouncing on switching game modes self.debounce_timer =", "keeps track of keyboard state, part of pyglet self.keys = pyglet.window.key.KeyStateHandler() self.window.push_handlers(self.keys) #", "in Space: Press s to start\", font_size=38, x=WIDTH//2, y=HEIGHT//2, anchor_x = 'center', anchor_y", "actually dead, it may be in invuln # period if (self.player.isAlive() != True):", "untuple argument. batch.add(*e.draw()) # Filter out any dead objects self.entities[:] = [e for", "= 'center') label.draw() if self.keys[key.S]: self.mode = \"GAME\" elif self.keys[key.ESCAPE]: self.quit = True", "pyglet from pyglet import clock from Entity import Asteroid, AsteroidDebris, Player from Entity", "Asteroid)] for asteroid in asteroids: if self.player.overlaps(asteroid.hit_radius, asteroid.pos.getCopy()): self.player.kill() # Check if player", "== True print(\"Error: Debug: state.mode == Invalid state!\") # Pause screen def pause_loop(self,", "Splash screen def splash_loop(self, dt): label = pyglet.text.Label(\"Rocks in Space: Press s to", "'acc': self.keys[key.W], 'left': self.keys[key.A], 'right': self.keys[key.D], 'fire': self.keys[key.SPACE], 'quit': self.keys[key.ESCAPE], 'pause': self.keys[key.P] }", "label = pyglet.text.Label(\"Game over! Press S to restart, or ESC to quit\", font_size=24,", "if ship is accelerating self.exhaust.active = controller['acc'] self.exhaust.angle = (self.player.angle + math.pi) self.exhaust.pos", "the updating/drawing logic def game_loop(self, dt): #Clear frame before looping self.window.clear() #print(pyglet.gl.get_current_context()) #", "self.quit = controller['quit'] if controller['pause'] and self.debounce_timer <= 0: self.mode = \"PAUSE\" self.debounce_timer", "_init_game(self): self.hud = HUD() self.entities = [] self.spawn_player() self.exhaust = ParticleSpawner( self.player.pos.getCopy(), self.player.angle", "e in self.entities if isinstance(e, Asteroid)] if len(asteroids) < targetNo: newAsteroid = Asteroid(3,", "ParticleFactory, Bullet from HUD import HUD from pyglet.window import key from Vect2 import", "looping self.window.clear() #print(pyglet.gl.get_current_context()) # On a proper engine the controller would probably be", "def detect_collisions(self): asteroids = [e for e in self.entities if isinstance(e, Asteroid)] for", "def _init_window(self): # Window object represents the game's window self.window = pyglet.window.Window(WIDTH, HEIGHT)", "batch.draw() self.hud.drawHUD() # Determine if a bullet should be spawned, and then spawns", ") ) # Maintain a minimum asteroid population def spawn_asteroids(self): # Asteroid Spawning", "would probably be its own class. # That level of abstraction makes it", "= 1 class StateManager(object): def __init__(self): self.quit = False self._init_window() self._init_game() self.mode =", "That level of abstraction makes it easier to use keyboards, mice, and #", "pause_loop(self, dt): self.window.clear() label = pyglet.text.Label(\"Game Paused: Press p to unpause, or ESC", "self.mode == \"GAMEOVER\": self.game_over_loop(dt) else: self.quit == True print(\"Error: Debug: state.mode == Invalid", "that keeps track of keyboard state, part of pyglet self.keys = pyglet.window.key.KeyStateHandler() self.window.push_handlers(self.keys)", "batch.add expects a series of arguments # most easily delivered as a tuple.", "0: self.mode = \"GAME\" self.debounce_timer = DEBOUNCE elif self.keys[key.ESCAPE]: self.quit = True #", "= pyglet.text.Label(\"Game over! Press S to restart, or ESC to quit\", font_size=24, x=WIDTH//2,", "a handler that keeps track of keyboard state, part of pyglet self.keys =", "from Entity import ParticleSpawner, ParticleFactory, Bullet from HUD import HUD from pyglet.window import", "import pyglet from pyglet import clock from Entity import Asteroid, AsteroidDebris, Player from", "self.player.pos.getCopy(), self.player.angle + math.pi, math.pi / 4, .01, ParticleFactory(speed=20, color=(255, 0, 0)), True)", "1, asteroid.pos.getCopy())) self.entities.append( Asteroid( asteroid.size - 1, asteroid.pos.getCopy())) # Remove bullet bullet.kill() #", "game's window self.window = pyglet.window.Window(WIDTH, HEIGHT) # Keys holds a handler that keeps", "self.keys[key.ESCAPE]: self.quit = True # Game over screen def game_over_loop(self, dt): self.window.clear() label", "the untuple argument. batch.add(*e.draw()) # Filter out any dead objects self.entities[:] = [e", "self.splash_loop(dt) elif self.mode == \"GAMEOVER\": self.game_over_loop(dt) else: self.quit == True print(\"Error: Debug: state.mode", "function def loop(self, dt): if self.debounce_timer > 0: self.debounce_timer -= dt if self.mode", "print(\"Error: Debug: state.mode == Invalid state!\") # Pause screen def pause_loop(self, dt): self.window.clear()", "self.keys[key.S]: self.mode = \"GAME\" elif self.keys[key.ESCAPE]: self.quit = True # Game over screen", "screen def pause_loop(self, dt): self.window.clear() label = pyglet.text.Label(\"Game Paused: Press p to unpause,", "requested to quit def is_quit(self): return self.quit # Dispatch loop to the right", "font_size=38, x=WIDTH//2, y=HEIGHT//2, anchor_x = 'center', anchor_y = 'center') label.draw() if self.keys[key.S]: self.mode", "self.entities if isinstance(e, Asteroid)] for asteroid in asteroids: if self.player.overlaps(asteroid.hit_radius, asteroid.pos.getCopy()): self.player.kill() #", "self.exhaust.pos = self.player.pos.getCopy() self.spawn_bullets() self.spawn_asteroids() self.detect_collisions() for e in self.entities: e.update(dt) #for e", "dt): self.window.clear() label = pyglet.text.Label(\"Game Paused: Press p to unpause, or ESC to", "import math # Target window size constant WIDTH = 800 HEIGHT = 400", "HUD from pyglet.window import key from Vect2 import Vect2 import math # Target", "= self.player.pos.getCopy() self.spawn_bullets() self.spawn_asteroids() self.detect_collisions() for e in self.entities: e.update(dt) #for e in", "= [e for e in self.entities if e.isAlive()] # Draw objects to the", "# Process asteroid/bullet collisions for bullet in [e for e in self.entities if", "self.keys[key.A], 'right': self.keys[key.D], 'fire': self.keys[key.SPACE], 'quit': self.keys[key.ESCAPE], 'pause': self.keys[key.P] } self.quit = controller['quit']", "spawn_asteroids(self): # Asteroid Spawning asteroids = [e for e in self.entities if isinstance(e,", "Player(Vect2(x=self.window.width/2, y=self.window.height/2)) self.entities.append(self.player) # This function runs when the look is in game", "e.isAlive()] # Draw objects to the frame batch.draw() self.hud.drawHUD() # Determine if a", "-= dt if self.mode == \"GAME\": self.game_loop(dt) elif self.mode == \"PAUSE\": self.pause_loop(dt) elif", "0, 0)), True) self.entities.append(self.exhaust) #Create a new instance of the Player class at", "> 1: # add two baby asteroids! self.entities.append( Asteroid( asteroid.size - 1, asteroid.pos.getCopy()))", "class manages the game's state import pyglet from pyglet import clock from Entity", "size constant WIDTH = 800 HEIGHT = 400 targetNo = 5 # number", "Target window size constant WIDTH = 800 HEIGHT = 400 targetNo = 5", "the game or return it to its initial state def _init_game(self): self.hud =", "accelerating self.exhaust.active = controller['acc'] self.exhaust.angle = (self.player.angle + math.pi) self.exhaust.pos = self.player.pos.getCopy() self.spawn_bullets()", "if self.player.isFiring(): self.entities.append( Bullet( self.player.pos.getCopy(), self.player.angle ) ) # Maintain a minimum asteroid", "player requested to quit def is_quit(self): return self.quit # Dispatch loop to the", "game_loop(self, dt): #Clear frame before looping self.window.clear() #print(pyglet.gl.get_current_context()) # On a proper engine", "then spawns a bullet def spawn_bullets(self): if self.player.isFiring(): self.entities.append( Bullet( self.player.pos.getCopy(), self.player.angle )", "runs when the look is in game mode, and has all the updating/drawing", "import Vect2 import math # Target window size constant WIDTH = 800 HEIGHT", "= True # Game over screen def game_over_loop(self, dt): self.window.clear() label = pyglet.text.Label(\"Game", "self.keys[key.SPACE], 'quit': self.keys[key.ESCAPE], 'pause': self.keys[key.P] } self.quit = controller['quit'] if controller['pause'] and self.debounce_timer", "def splash_loop(self, dt): label = pyglet.text.Label(\"Rocks in Space: Press s to start\", font_size=38,", "self.mode = \"GAME\" elif self.keys[key.ESCAPE]: self.quit = True # Game over screen def", "in asteroids: if bullet.overlaps( asteroid.hit_radius, asteroid.pos.getCopy()): asteroid.kill() self.entities.append( AsteroidDebris( asteroid.pos.getCopy())) if asteroid.size >", "the game's state import pyglet from pyglet import clock from Entity import Asteroid,", "ParticleSpawner( self.player.pos.getCopy(), self.player.angle + math.pi, math.pi / 4, .01, ParticleFactory(speed=20, color=(255, 0, 0)),", "\"PAUSE\": self.pause_loop(dt) elif self.mode == \"SPLASH\": self.splash_loop(dt) elif self.mode == \"GAMEOVER\": self.game_over_loop(dt) else:", "<= 0: self.mode = \"PAUSE\" self.debounce_timer = DEBOUNCE self.player.input(controller) #turn on thrust effect", "= \"GAME\" self.debounce_timer = DEBOUNCE elif self.keys[key.ESCAPE]: self.quit = True # Splash screen", "Keys holds a handler that keeps track of keyboard state, part of pyglet", "Stage the game or return it to its initial state def _init_game(self): self.hud", "Pause screen def pause_loop(self, dt): self.window.clear() label = pyglet.text.Label(\"Game Paused: Press p to", "Asteroid, AsteroidDebris, Player from Entity import ParticleSpawner, ParticleFactory, Bullet from HUD import HUD", "self.debounce_timer = DEBOUNCE elif self.keys[key.ESCAPE]: self.quit = True # Splash screen def splash_loop(self,", "be spawned, and then spawns a bullet def spawn_bullets(self): if self.player.isFiring(): self.entities.append( Bullet(", "if player is actually dead, it may be in invuln # period if", "0: self.mode = \"PAUSE\" self.debounce_timer = DEBOUNCE self.player.input(controller) #turn on thrust effect if", "main function if the player requested to quit def is_quit(self): return self.quit #", "pyglet.window.Window(WIDTH, HEIGHT) # Keys holds a handler that keeps track of keyboard state,", "pyglet self.keys = pyglet.window.key.KeyStateHandler() self.window.push_handlers(self.keys) # Stage the game or return it to", "if self.debounce_timer > 0: self.debounce_timer -= dt if self.mode == \"GAME\": self.game_loop(dt) elif", "updating/drawing logic def game_loop(self, dt): #Clear frame before looping self.window.clear() #print(pyglet.gl.get_current_context()) # On", "This function runs when the look is in game mode, and has all", "<= 0: self.mode = \"GAME\" self.debounce_timer = DEBOUNCE elif self.keys[key.ESCAPE]: self.quit = True", "for the game def _init_window(self): # Window object represents the game's window self.window", "self.mode == \"PAUSE\": self.pause_loop(dt) elif self.mode == \"SPLASH\": self.splash_loop(dt) elif self.mode == \"GAMEOVER\":", "thrust effect if ship is accelerating self.exhaust.active = controller['acc'] self.exhaust.angle = (self.player.angle +", "self.mode == \"SPLASH\": self.splash_loop(dt) elif self.mode == \"GAMEOVER\": self.game_over_loop(dt) else: self.quit == True", "class. # That level of abstraction makes it easier to use keyboards, mice,", "Player from Entity import ParticleSpawner, ParticleFactory, Bullet from HUD import HUD from pyglet.window", "# Maintain a minimum asteroid population def spawn_asteroids(self): # Asteroid Spawning asteroids =", ") # Maintain a minimum asteroid population def spawn_asteroids(self): # Asteroid Spawning asteroids", "= { 'acc': self.keys[key.W], 'left': self.keys[key.A], 'right': self.keys[key.D], 'fire': self.keys[key.SPACE], 'quit': self.keys[key.ESCAPE], 'pause':", "# most easily delivered as a tuple. # * is the untuple argument.", "# add two baby asteroids! self.entities.append( Asteroid( asteroid.size - 1, asteroid.pos.getCopy())) self.entities.append( Asteroid(", "is the untuple argument. batch.add(*e.draw()) # Filter out any dead objects self.entities[:] =", "label.draw() if self.keys[key.S]: self.mode = \"GAME\" elif self.keys[key.ESCAPE]: self.quit = True # Game", "part of pyglet self.keys = pyglet.window.key.KeyStateHandler() self.window.push_handlers(self.keys) # Stage the game or return", "look is in game mode, and has all the updating/drawing logic def game_loop(self,", "'right': self.keys[key.D], 'fire': self.keys[key.SPACE], 'quit': self.keys[key.ESCAPE], 'pause': self.keys[key.P] } self.quit = controller['quit'] if", "self.player = Player(Vect2(x=self.window.width/2, y=self.window.height/2)) self.entities.append(self.player) # This function runs when the look is", "in [e for e in self.entities if isinstance(e, Bullet)]: for asteroid in asteroids:", "'left': self.keys[key.A], 'right': self.keys[key.D], 'fire': self.keys[key.SPACE], 'quit': self.keys[key.ESCAPE], 'pause': self.keys[key.P] } self.quit =", "\"SPLASH\": self.splash_loop(dt) elif self.mode == \"GAMEOVER\": self.game_over_loop(dt) else: self.quit == True print(\"Error: Debug:", "HUD import HUD from pyglet.window import key from Vect2 import Vect2 import math", "the right function def loop(self, dt): if self.debounce_timer > 0: self.debounce_timer -= dt", "to spawn DEBOUNCE = 1 class StateManager(object): def __init__(self): self.quit = False self._init_window()", "True) self.entities.append(self.exhaust) #Create a new instance of the Player class at the center", "dt if self.mode == \"GAME\": self.game_loop(dt) elif self.mode == \"PAUSE\": self.pause_loop(dt) elif self.mode", "self.window.clear() #print(pyglet.gl.get_current_context()) # On a proper engine the controller would probably be its", "# Determine if a bullet should be spawned, and then spawns a bullet", "HUD() self.entities = [] self.spawn_player() self.exhaust = ParticleSpawner( self.player.pos.getCopy(), self.player.angle + math.pi, math.pi", "self.mode = \"GAMEOVER\" # Process asteroid/bullet collisions for bullet in [e for e", "the user may have controller = { 'acc': self.keys[key.W], 'left': self.keys[key.A], 'right': self.keys[key.D],", "arguments # most easily delivered as a tuple. # * is the untuple", "= [e for e in self.entities if isinstance(e, Asteroid)] if len(asteroids) < targetNo:", "if self.mode == \"GAME\": self.game_loop(dt) elif self.mode == \"PAUSE\": self.pause_loop(dt) elif self.mode ==", "\"GAMEOVER\": self.game_over_loop(dt) else: self.quit == True print(\"Error: Debug: state.mode == Invalid state!\") #", "Dispatch loop to the right function def loop(self, dt): if self.debounce_timer > 0:", "are colliding in a meaningful way for the game def detect_collisions(self): asteroids =", "def is_quit(self): return self.quit # Dispatch loop to the right function def loop(self,", "for asteroid in asteroids: if self.player.overlaps(asteroid.hit_radius, asteroid.pos.getCopy()): self.player.kill() # Check if player is", "= [] self.spawn_player() self.exhaust = ParticleSpawner( self.player.pos.getCopy(), self.player.angle + math.pi, math.pi / 4,", "restart, or ESC to quit\", font_size=24, x=WIDTH//2, y=HEIGHT//2, anchor_x = 'center', anchor_y =", "asteroid.pos.getCopy()): self.player.kill() # Check if player is actually dead, it may be in", "if e.isAlive()] # Draw objects to the frame batch.draw() self.hud.drawHUD() # Determine if", "def _init_game(self): self.hud = HUD() self.entities = [] self.spawn_player() self.exhaust = ParticleSpawner( self.player.pos.getCopy(),", "(self.player.angle + math.pi) self.exhaust.pos = self.player.pos.getCopy() self.spawn_bullets() self.spawn_asteroids() self.detect_collisions() for e in self.entities:", "400 targetNo = 5 # number of asteroids to spawn DEBOUNCE = 1", "== \"PAUSE\": self.pause_loop(dt) elif self.mode == \"SPLASH\": self.splash_loop(dt) elif self.mode == \"GAMEOVER\": self.game_over_loop(dt)", "in self.entities: # batch.add expects a series of arguments # most easily delivered", "a window for the game def _init_window(self): # Window object represents the game's", "anchor_y = 'center') label.draw() if self.keys[key.S]: self.mode = \"GAME\" self._init_game() elif self.keys[key.ESCAPE]: self.quit", "the center of the screen def spawn_player(self): self.player = Player(Vect2(x=self.window.width/2, y=self.window.height/2)) self.entities.append(self.player) #", "self.game_loop(dt) elif self.mode == \"PAUSE\": self.pause_loop(dt) elif self.mode == \"SPLASH\": self.splash_loop(dt) elif self.mode", "the main function if the player requested to quit def is_quit(self): return self.quit", "__init__(self): self.quit = False self._init_window() self._init_game() self.mode = \"SPLASH\" # Prevent bouncing on", "self.player.kill() # Check if player is actually dead, it may be in invuln", "in game mode, and has all the updating/drawing logic def game_loop(self, dt): #Clear", "colliding in a meaningful way for the game def detect_collisions(self): asteroids = [e", "game mode, and has all the updating/drawing logic def game_loop(self, dt): #Clear frame", "pyglet.text.Label(\"Rocks in Space: Press s to start\", font_size=38, x=WIDTH//2, y=HEIGHT//2, anchor_x = 'center',", "= 5 # number of asteroids to spawn DEBOUNCE = 1 class StateManager(object):", "delivered as a tuple. # * is the untuple argument. batch.add(*e.draw()) # Filter", "in asteroids: if self.player.overlaps(asteroid.hit_radius, asteroid.pos.getCopy()): self.player.kill() # Check if player is actually dead,", "on thrust effect if ship is accelerating self.exhaust.active = controller['acc'] self.exhaust.angle = (self.player.angle", "in self.entities if isinstance(e, Asteroid)] for asteroid in asteroids: if self.player.overlaps(asteroid.hit_radius, asteroid.pos.getCopy()): self.player.kill()", "#Clear frame before looping self.window.clear() #print(pyglet.gl.get_current_context()) # On a proper engine the controller", "self.debounce_timer = DEBOUNCE self.player.input(controller) #turn on thrust effect if ship is accelerating self.exhaust.active", "modes self.debounce_timer = DEBOUNCE # Create a window for the game def _init_window(self):", "window size constant WIDTH = 800 HEIGHT = 400 targetNo = 5 #", "self.window = pyglet.window.Window(WIDTH, HEIGHT) # Keys holds a handler that keeps track of", "self.player.pos.getCopy() self.spawn_bullets() self.spawn_asteroids() self.detect_collisions() for e in self.entities: e.update(dt) #for e in self.entities:", "= ParticleSpawner( self.player.pos.getCopy(), self.player.angle + math.pi, math.pi / 4, .01, ParticleFactory(speed=20, color=(255, 0,", "self.hud.hit() # Inform the main function if the player requested to quit def", "* is the untuple argument. batch.add(*e.draw()) # Filter out any dead objects self.entities[:]", "in self.entities if isinstance(e, Asteroid)] if len(asteroids) < targetNo: newAsteroid = Asteroid(3, Vect2(0,", "user may have controller = { 'acc': self.keys[key.W], 'left': self.keys[key.A], 'right': self.keys[key.D], 'fire':", "self.entities = [] self.spawn_player() self.exhaust = ParticleSpawner( self.player.pos.getCopy(), self.player.angle + math.pi, math.pi /", "and # other controllers the user may have controller = { 'acc': self.keys[key.W],", "anchor_y = 'center') label.draw() if self.keys[key.P] and self.debounce_timer <= 0: self.mode = \"GAME\"", "math.pi / 4, .01, ParticleFactory(speed=20, color=(255, 0, 0)), True) self.entities.append(self.exhaust) #Create a new", "to the frame batch.draw() self.hud.drawHUD() # Determine if a bullet should be spawned,", "as a tuple. # * is the untuple argument. batch.add(*e.draw()) # Filter out", "determines if any objects are colliding in a meaningful way for the game", "Asteroid(3, Vect2(0, 0)) self.entities.append(newAsteroid) # This function determines if any objects are colliding", "On a proper engine the controller would probably be its own class. #", "== \"GAME\": self.game_loop(dt) elif self.mode == \"PAUSE\": self.pause_loop(dt) elif self.mode == \"SPLASH\": self.splash_loop(dt)", "constant WIDTH = 800 HEIGHT = 400 targetNo = 5 # number of", "easier to use keyboards, mice, and # other controllers the user may have", "'center') label.draw() if self.keys[key.S]: self.mode = \"GAME\" elif self.keys[key.ESCAPE]: self.quit = True #", "Player class at the center of the screen def spawn_player(self): self.player = Player(Vect2(x=self.window.width/2,", "is in game mode, and has all the updating/drawing logic def game_loop(self, dt):", "player is actually dead, it may be in invuln # period if (self.player.isAlive()", "when the look is in game mode, and has all the updating/drawing logic", "for e in self.entities if isinstance(e, Bullet)]: for asteroid in asteroids: if bullet.overlaps(", "has all the updating/drawing logic def game_loop(self, dt): #Clear frame before looping self.window.clear()", "for e in self.entities if isinstance(e, Asteroid)] for asteroid in asteroids: if self.player.overlaps(asteroid.hit_radius,", "self.debounce_timer <= 0: self.mode = \"GAME\" self.debounce_timer = DEBOUNCE elif self.keys[key.ESCAPE]: self.quit =", "0)) self.entities.append(newAsteroid) # This function determines if any objects are colliding in a", "a tuple. # * is the untuple argument. batch.add(*e.draw()) # Filter out any", "= 'center', anchor_y = 'center') label.draw() if self.keys[key.S]: self.mode = \"GAME\" elif self.keys[key.ESCAPE]:", "over! Press S to restart, or ESC to quit\", font_size=24, x=WIDTH//2, y=HEIGHT//2, anchor_x", "if asteroid.size > 1: # add two baby asteroids! self.entities.append( Asteroid( asteroid.size -", "- 1, asteroid.pos.getCopy())) # Remove bullet bullet.kill() # Log the points self.hud.hit() #", "!= True): if (self.hud.has_lives()): self.spawn_player() self.hud.kill() else: self.mode = \"GAMEOVER\" # Process asteroid/bullet", "def game_loop(self, dt): #Clear frame before looping self.window.clear() #print(pyglet.gl.get_current_context()) # On a proper", "= False self._init_window() self._init_game() self.mode = \"SPLASH\" # Prevent bouncing on switching game", "Vect2 import math # Target window size constant WIDTH = 800 HEIGHT =", "[] self.spawn_player() self.exhaust = ParticleSpawner( self.player.pos.getCopy(), self.player.angle + math.pi, math.pi / 4, .01,", "label = pyglet.text.Label(\"Rocks in Space: Press s to start\", font_size=38, x=WIDTH//2, y=HEIGHT//2, anchor_x", "bullet bullet.kill() # Log the points self.hud.hit() # Inform the main function if", "dt): #Clear frame before looping self.window.clear() #print(pyglet.gl.get_current_context()) # On a proper engine the", "loop to the right function def loop(self, dt): if self.debounce_timer > 0: self.debounce_timer", "expects a series of arguments # most easily delivered as a tuple. #", "Press s to start\", font_size=38, x=WIDTH//2, y=HEIGHT//2, anchor_x = 'center', anchor_y = 'center')", ".01, ParticleFactory(speed=20, color=(255, 0, 0)), True) self.entities.append(self.exhaust) #Create a new instance of the", "dead, it may be in invuln # period if (self.player.isAlive() != True): if", "minimum asteroid population def spawn_asteroids(self): # Asteroid Spawning asteroids = [e for e", "(self.hud.has_lives()): self.spawn_player() self.hud.kill() else: self.mode = \"GAMEOVER\" # Process asteroid/bullet collisions for bullet" ]
[ "-*- coding: utf-8 -*- def intsin(x): eps = 0.000000000000001 term = x sum", "utf-8 -*- def intsin(x): eps = 0.000000000000001 term = x sum = x", "coding: utf-8 -*- def intsin(x): eps = 0.000000000000001 term = x sum =", "k * k * n) sum += term n += 1 return sum", "n + 1 term *= -x * x * (k - 2) /", "sum = x n = 1 while (term * term) > (eps *", "1 while (term * term) > (eps * eps): k = 2 *", "sum += term n += 1 return sum if __name__ == '__main__': print(intsin(float(input(\"Введите", "= 2 * n + 1 term *= -x * x * (k", "#!/usr/bin/env python3 # -*- coding: utf-8 -*- def intsin(x): eps = 0.000000000000001 term", "x sum = x n = 1 while (term * term) > (eps", "term n += 1 return sum if __name__ == '__main__': print(intsin(float(input(\"Введите х: \"))))", "(eps * eps): k = 2 * n + 1 term *= -x", "k * n) sum += term n += 1 return sum if __name__", "> (eps * eps): k = 2 * n + 1 term *=", "1 term *= -x * x * (k - 2) / (2 *", "term *= -x * x * (k - 2) / (2 * k", "= x sum = x n = 1 while (term * term) >", "x n = 1 while (term * term) > (eps * eps): k", "n = 1 while (term * term) > (eps * eps): k =", "+ 1 term *= -x * x * (k - 2) / (2", "* eps): k = 2 * n + 1 term *= -x *", "def intsin(x): eps = 0.000000000000001 term = x sum = x n =", "0.000000000000001 term = x sum = x n = 1 while (term *", "* k * n) sum += term n += 1 return sum if", "*= -x * x * (k - 2) / (2 * k *", "python3 # -*- coding: utf-8 -*- def intsin(x): eps = 0.000000000000001 term =", "intsin(x): eps = 0.000000000000001 term = x sum = x n = 1", "(term * term) > (eps * eps): k = 2 * n +", "= 0.000000000000001 term = x sum = x n = 1 while (term", "term = x sum = x n = 1 while (term * term)", "* n + 1 term *= -x * x * (k - 2)", "x * (k - 2) / (2 * k * k * n)", "(k - 2) / (2 * k * k * n) sum +=", "eps = 0.000000000000001 term = x sum = x n = 1 while", "* k * k * n) sum += term n += 1 return", "n) sum += term n += 1 return sum if __name__ == '__main__':", "eps): k = 2 * n + 1 term *= -x * x", "* (k - 2) / (2 * k * k * n) sum", "* n) sum += term n += 1 return sum if __name__ ==", "+= term n += 1 return sum if __name__ == '__main__': print(intsin(float(input(\"Введите х:", "* term) > (eps * eps): k = 2 * n + 1", "# -*- coding: utf-8 -*- def intsin(x): eps = 0.000000000000001 term = x", "(2 * k * k * n) sum += term n += 1", "while (term * term) > (eps * eps): k = 2 * n", "/ (2 * k * k * n) sum += term n +=", "= 1 while (term * term) > (eps * eps): k = 2", "term) > (eps * eps): k = 2 * n + 1 term", "-*- def intsin(x): eps = 0.000000000000001 term = x sum = x n", "2 * n + 1 term *= -x * x * (k -", "-x * x * (k - 2) / (2 * k * k", "2) / (2 * k * k * n) sum += term n", "- 2) / (2 * k * k * n) sum += term", "* x * (k - 2) / (2 * k * k *", "= x n = 1 while (term * term) > (eps * eps):", "k = 2 * n + 1 term *= -x * x *" ]
[ "need a clock. New in 2.0: s/m keys set seconds/minutes timer for pop-up", "x, y = self.point(i, 12, radius-6, originX, originY) self.create_rectangle(x-3, y-3, x+3, y+3, fill=cfg.fg)", "originX, originY) self.create_rectangle(x-1, y-1, x+1, y+1, fill=cfg.fg) # mins for i in range(12):", "############################################################################### appname = 'PyClock 2.1' # use new custom Tk, Toplevel for icons,", "y+3, fill=cfg.fg) # hours self.ampm = self.create_text(3, 3, anchor=NW, fill=cfg.fg) def point(self, tick,", "= cfg.size * .01 self.cog = self.create_oval(originX-cogsz, originY+cogsz, originX+cogsz, originY-cogsz, fill=cfg.cog) self.dchars(self.ampm, 0,", "time.time() timeTuple = time.localtime(secsSinceEpoch) hour, min, sec = timeTuple[3:6] if sec != self.lastSec:", "= secs def onCountdownMin(self, event): secs = askinteger('Countdown', 'Minutes') if secs: self.countdownSeconds =", "PopupWindow.__init__(self, appname, name) clock = Clock(config, self) clock.pack(expand=YES, fill=BOTH) class ClockMain(MainWindow): def __init__(self,", "config=ClockConfig, name=''): MainWindow.__init__(self, appname, name) clock = Clock(config, self) clock.pack(expand=YES, fill=BOTH) # b/w", "round( radius * math.sin(angle * radiansPerDegree) )) pointY = int( round( radius *", "a pop-up date label, clock face images, general resizing, etc. May be run", "+ (mins / 60.0) hx, hy = self.point(hour, 12, (radius * .80), originX,", "else: self.dateLabel.pack_forget() self.update() def onResize(self, event): if event.widget == self.display: self.display.onResize(event.width, event.height, self.cfg)", "if len(sys.argv) >= 2: getOptions(config, sys.argv) # clock.py -size n -bg 'blue'... #myclock", "= Label(self) for label in self.hour, self.mins, self.secs, self.ampm: label.config(bd=4, relief=SUNKEN, bg=cfg.bg, fg=cfg.fg)", "cfg): # on timer callback if self.cog: # redraw hands, cog self.delete(self.cog) self.delete(self.hourHand)", "argv[ix+1]) #config = PhotoClockConfig() config = ClockConfig() if len(sys.argv) >= 2: getOptions(config, sys.argv)", "sys.argv) # clock.py -size n -bg 'blue'... #myclock = ClockWindow(config, Tk()) # parent", "radius-6, originX, originY) self.create_rectangle(x-3, y-3, x+3, y+3, fill=cfg.fg) # hours self.ampm = self.create_text(3,", "appname, name) clock = Clock(config, self) clock.pack(expand=YES, fill=BOTH) class ClockMain(MainWindow): def __init__(self, config=ClockConfig,", "y-1, x+1, y+1, fill=cfg.fg) # mins for i in range(12): x, y =", "sy, width=1, arrow='last', arrowshape=(5,10,5), fill=cfg.sh) cogsz = cfg.size * .01 self.cog = self.create_oval(originX-cogsz,", "hy = self.point(hour, 12, (radius * .80), originX, originY) mx, my = self.point(mins,", "= 0 self.onSwitchMode(None) self.onTimer() def makeWidgets(self, parent): self.digitalDisplay = DigitalDisplay(self, self.cfg) self.analogDisplay =", "timeTuple[3:6] if sec != self.lastSec: self.lastSec = sec ampm = ((hour >= 12)", "onResize', cfg.size+4, newSize) if newSize != cfg.size+4: cfg.size = newSize-4 self.delete('all') self.drawClockface(cfg) #", "= min(newWidth, newHeight) #print('analog onResize', cfg.size+4, newSize) if newSize != cfg.size+4: cfg.size =", "or subclass size = 200 # width=height bg, fg = 'beige', 'brown' #", "self.lastSec: self.lastSec = sec ampm = ((hour >= 12) and 'PM') or 'AM'", "self.ampm.config(text=str(ampm), width=4) def onResize(self, newWidth, newHeight, cfg): pass # nothing to redraw here", "(cfg.size - self.image.width()) // 2 # center it imgy = (cfg.size - self.image.height())", "bg='red', fg='blue') parent.bind('<ButtonPress-1>', self.onSwitchMode) parent.bind('<ButtonPress-3>', self.onToggleLabel) parent.bind('<Configure>', self.onResize) parent.bind('<KeyPress-s>', self.onCountdownSec) parent.bind('<KeyPress-m>', self.onCountdownMin) def", "= cfg.size // 2 # 3.x // div for i in range(60): x,", "if self.countdownSeconds == 0: self.onCountdownExpire() # countdown timer self.after(1000 // ChecksPerSec, self.onTimer) #", ")) pointY = int( round( radius * math.cos(angle * radiansPerDegree) )) return (pointX", "= hour + (mins / 60.0) hx, hy = self.point(hour, 12, (radius *", "msg.config(padx=10, pady=10) msg.pack(expand=YES, fill=BOTH) win.lift() # raise above siblings if sys.platform[:3] == 'win':", "self.create_image(imgx+1, imgy+1, anchor=NW, image=self.image) originX = originY = radius = cfg.size // 2", "self.cog: # redraw hands, cog self.delete(self.cog) self.delete(self.hourHand) self.delete(self.minsHand) self.delete(self.secsHand) originX = originY =", "x+3, y+3, fill=cfg.fg) # hours self.ampm = self.create_text(3, 3, anchor=NW, fill=cfg.fg) def point(self,", "siblings if sys.platform[:3] == 'win': # full screen on Windows win.state('zoomed') ############################################################################### #", "onUpdate(self, hour, mins, secs, ampm, cfg): mins = str(mins).zfill(2) # or '%02d' %", "cfg.picture: # draw ovals, picture try: self.image = PhotoImage(file=cfg.picture) # bkground except: self.image", "fill=BOTH) win.lift() # raise above siblings if sys.platform[:3] == 'win': # full screen", "originY) self.hourHand = self.create_line(originX, originY, hx, hy, width=(cfg.size * .04), arrow='last', arrowshape=(25,25,15), fill=cfg.hh)", "clock = Clock(config, self) clock.pack(expand=YES, fill=BOTH) class ClockMain(MainWindow): def __init__(self, config=ClockConfig, name=''): MainWindow.__init__(self,", "my, width=(cfg.size * .03), arrow='last', arrowshape=(20,20,10), fill=cfg.mh) self.secsHand = self.create_line(originX, originY, sx, sy,", "'white', 'blue', 'orange' ############################################################################### # Digital display object ############################################################################### class DigitalDisplay(Frame): def __init__(self,", "expand=YES, fill=BOTH) def onToggleLabel(self, event): self.labelOn += 1 if self.labelOn % 2: self.dateLabel.pack(side=BOTTOM,", "to redraw here ############################################################################### # Analog display object ############################################################################### class AnalogDisplay(Canvas): def __init__(self,", "# center it imgy = (cfg.size - self.image.height()) // 2 # 3.x //", "arrowshape=(5,10,5), fill=cfg.sh) cogsz = cfg.size * .01 self.cog = self.create_oval(originX-cogsz, originY+cogsz, originX+cogsz, originY-cogsz,", "Python 3.X (2.X no longer supported) ############################################################################### \"\"\" from tkinter import * from", "############################################################################### ChecksPerSec = 10 # second change timer class Clock(Frame): def __init__(self, config=ClockConfig,", "ovals, picture try: self.image = PhotoImage(file=cfg.picture) # bkground except: self.image = BitmapImage(file=cfg.picture) #", "bkground except: self.image = BitmapImage(file=cfg.picture) # save ref imgx = (cfg.size - self.image.width())", "sx, sy = self.point(secs, 60, (radius * .95), originX, originY) self.hourHand = self.create_line(originX,", "secs * 60 def onCountdownExpire(self): # caveat: only one active, no progress indicator", "int(argv[ix+1])) else: setattr(config, attr, argv[ix+1]) #config = PhotoClockConfig() config = ClockConfig() if len(sys.argv)", "sec = timeTuple[3:6] if sec != self.lastSec: self.lastSec = sec ampm = ((hour", "timeTuple = time.localtime(secsSinceEpoch) hour, min, sec = timeTuple[3:6] if sec != self.lastSec: self.lastSec", "if ix in range(1, len(argv)-1): if type(getattr(ClockConfig, attr)) == int: setattr(config, attr, int(argv[ix+1]))", "tick, units, radius, originX, originY): angle = tick * (360.0 / units) radiansPerDegree", "self.point(mins, 60, (radius * .90), originX, originY) sx, sy = self.point(secs, 60, (radius", "time.localtime(secsSinceEpoch) hour, min, sec = timeTuple[3:6] if sec != self.lastSec: self.lastSec = sec", "class DigitalDisplay(Frame): def __init__(self, parent, cfg): Frame.__init__(self, parent) self.hour = Label(self) self.mins =", "(radius * .90), originX, originY) sx, sy = self.point(secs, 60, (radius * .95),", "self.lastSec = sec ampm = ((hour >= 12) and 'PM') or 'AM' #", "= Label(self, bd=3, bg='red', fg='blue') parent.bind('<ButtonPress-1>', self.onSwitchMode) parent.bind('<ButtonPress-3>', self.onToggleLabel) parent.bind('<Configure>', self.onResize) parent.bind('<KeyPress-s>', self.onCountdownSec)", "# width=height bg, fg = 'beige', 'brown' # face, tick colors hh, mh,", "3, anchor=NW, fill=cfg.fg) def point(self, tick, units, radius, originX, originY): angle = tick", "!= cfg.size+4: cfg.size = newSize-4 self.delete('all') self.drawClockface(cfg) # onUpdate called next ############################################################################### #", "on Windows win.state('zoomed') ############################################################################### # Standalone clocks ############################################################################### appname = 'PyClock 2.1' #", "in dir(ClockConfig): # fill default config obj, try: # from \"-attr val\" cmd", "# clock.py -size n -bg 'blue'... #myclock = ClockWindow(config, Tk()) # parent is", "2: getOptions(config, sys.argv) # clock.py -size n -bg 'blue'... #myclock = ClockWindow(config, Tk())", "// ChecksPerSec, self.onTimer) # run N times per second # 3.x // trunc", "n -bg 'blue'... #myclock = ClockWindow(config, Tk()) # parent is Tk root if", "subclass size = 200 # width=height bg, fg = 'beige', 'brown' # face,", "Analog display object ############################################################################### class AnalogDisplay(Canvas): def __init__(self, parent, cfg): Canvas.__init__(self, parent, width=cfg.size,", "= argv.index('-' + attr) # will skip __x__ internals except: continue else: if", "'navy', 'blue', 'red' # clock hands, center picture = None # face photo", "self.analogDisplay: self.display = self.digitalDisplay else: self.display = self.analogDisplay self.display.pack(side=TOP, expand=YES, fill=BOTH) def onToggleLabel(self,", "expand, and scale font on resize def onUpdate(self, hour, mins, secs, ampm, cfg):", "both standalone, or embedded (attached) in other GUIs that need a clock. New", "= 320 picture = '../gifs/ora-pp.gif' bg, hh, mh = 'white', 'blue', 'orange' ###############################################################################", "if secs: self.countdownSeconds = secs def onCountdownMin(self, event): secs = askinteger('Countdown', 'Minutes') if", "picture = '../gifs/ora-pp.gif' bg, hh, mh = 'white', 'blue', 'orange' ############################################################################### # Digital", "askinteger('Countdown', 'Seconds?') if secs: self.countdownSeconds = secs def onCountdownMin(self, event): secs = askinteger('Countdown',", "Standalone clocks ############################################################################### appname = 'PyClock 2.1' # use new custom Tk, Toplevel", "((hour >= 12) and 'PM') or 'AM' # 0...23 hour = (hour %", "= (cfg.size - self.image.height()) // 2 # 3.x // div self.create_image(imgx+1, imgy+1, anchor=NW,", "self.quit) ############################################################################### # Program run ############################################################################### if __name__ == '__main__': def getOptions(config, argv):", "* math.cos(angle * radiansPerDegree) )) return (pointX + originX+1), (originY+1 - pointY) def", "self.display.onResize(event.width, event.height, self.cfg) def onTimer(self): secsSinceEpoch = time.time() timeTuple = time.localtime(secsSinceEpoch) hour, min,", "fg = 'beige', 'brown' # face, tick colors hh, mh, sh, cog =", "############################################################################### # Analog display object ############################################################################### class AnalogDisplay(Canvas): def __init__(self, parent, cfg): Canvas.__init__(self,", "width=1, arrow='last', arrowshape=(5,10,5), fill=cfg.sh) cogsz = cfg.size * .01 self.cog = self.create_oval(originX-cogsz, originY+cogsz,", "/ units) radiansPerDegree = math.pi / 180 pointX = int( round( radius *", "bd=3, bg='red', fg='blue') parent.bind('<ButtonPress-1>', self.onSwitchMode) parent.bind('<ButtonPress-3>', self.onToggleLabel) parent.bind('<Configure>', self.onResize) parent.bind('<KeyPress-s>', self.onCountdownSec) parent.bind('<KeyPress-m>', self.onCountdownMin)", "borders, passed-in parent class ClockWindow(Clock): def __init__(self, config=ClockConfig, parent=None, name=''): Clock.__init__(self, config, parent)", "radius = cfg.size // 2 # 3.x div hour = hour + (mins", "argv): for attr in dir(ClockConfig): # fill default config obj, try: # from", "__x__ internals except: continue else: if ix in range(1, len(argv)-1): if type(getattr(ClockConfig, attr))", "div def onCountdownSec(self, event): secs = askinteger('Countdown', 'Seconds?') if secs: self.countdownSeconds = secs", "PhotoClockConfig() config = ClockConfig() if len(sys.argv) >= 2: getOptions(config, sys.argv) # clock.py -size", "timer callback if self.cog: # redraw hands, cog self.delete(self.cog) self.delete(self.hourHand) self.delete(self.minsHand) self.delete(self.secsHand) originX", "= ClockWindow(config, Tk()) # parent is Tk root if standalone #myclock = ClockPopup(ClockConfig(),", "ClockWindow(config, Tk()) # parent is Tk root if standalone #myclock = ClockPopup(ClockConfig(), 'popup')", "save ref imgx = (cfg.size - self.image.width()) // 2 # center it imgy", "continue else: if ix in range(1, len(argv)-1): if type(getattr(ClockConfig, attr)) == int: setattr(config,", "0...23 hour = (hour % 12) or 12 # 12..11 self.display.onUpdate(hour, min, sec,", "sys ############################################################################### # Option configuration classes ############################################################################### class ClockConfig: # defaults--override in instance", "self.point(i, 12, radius-6, originX, originY) self.create_rectangle(x-3, y-3, x+3, y+3, fill=cfg.fg) # hours self.ampm", "attr) # will skip __x__ internals except: continue else: if ix in range(1,", "newWidth, newHeight, cfg): newSize = min(newWidth, newHeight) #print('analog onResize', cfg.size+4, newSize) if newSize", "clock.pack(expand=YES, fill=BOTH) # b/w compat: manual window borders, passed-in parent class ClockWindow(Clock): def", "= '../gifs/ora-pp.gif' bg, hh, mh = 'white', 'blue', 'orange' ############################################################################### # Digital display", "60, radius-6, originX, originY) self.create_rectangle(x-1, y-1, x+1, y+1, fill=cfg.fg) # mins for i", "parent class ClockWindow(Clock): def __init__(self, config=ClockConfig, parent=None, name=''): Clock.__init__(self, config, parent) self.pack(expand=YES, fill=BOTH)", "// 2 # 3.x // div for i in range(60): x, y =", "self.labelOn % 2: self.dateLabel.pack(side=BOTTOM, fill=X) else: self.dateLabel.pack_forget() self.update() def onResize(self, event): if event.widget", "anchor=NW, fill=cfg.fg) def point(self, tick, units, radius, originX, originY): angle = tick *", "if self.cog: # redraw hands, cog self.delete(self.cog) self.delete(self.hourHand) self.delete(self.minsHand) self.delete(self.secsHand) originX = originY", "hour + (mins / 60.0) hx, hy = self.point(hour, 12, (radius * .80),", "self.display.onUpdate(hour, min, sec, ampm, self.cfg) self.dateLabel.config(text=time.ctime(secsSinceEpoch)) self.countdownSeconds -= 1 if self.countdownSeconds == 0:", "face, tick colors hh, mh, sh, cog = 'black', 'navy', 'blue', 'red' #", "= originY = radius = cfg.size // 2 # 3.x // div for", "- pointY) def onUpdate(self, hour, mins, secs, ampm, cfg): # on timer callback", "= askinteger('Countdown', 'Minutes') if secs: self.countdownSeconds = secs * 60 def onCountdownExpire(self): #", "arrowshape=(25,25,15), fill=cfg.hh) self.minsHand = self.create_line(originX, originY, mx, my, width=(cfg.size * .03), arrow='last', arrowshape=(20,20,10),", "PP4E.Gui.Tools.windows import PopupWindow, MainWindow class ClockPopup(PopupWindow): def __init__(self, config=ClockConfig, name=''): PopupWindow.__init__(self, appname, name)", "ampm, self.cfg) self.dateLabel.config(text=time.ctime(secsSinceEpoch)) self.countdownSeconds -= 1 if self.countdownSeconds == 0: self.onCountdownExpire() # countdown", "3.x // div self.create_image(imgx+1, imgy+1, anchor=NW, image=self.image) originX = originY = radius =", "imgx = (cfg.size - self.image.width()) // 2 # center it imgy = (cfg.size", "clocks ############################################################################### appname = 'PyClock 2.1' # use new custom Tk, Toplevel for", "colors hh, mh, sh, cog = 'black', 'navy', 'blue', 'red' # clock hands,", "originX, originY) self.create_rectangle(x-3, y-3, x+3, y+3, fill=cfg.fg) # hours self.ampm = self.create_text(3, 3,", "bg, fg = 'beige', 'brown' # face, tick colors hh, mh, sh, cog", "self.delete(self.hourHand) self.delete(self.minsHand) self.delete(self.secsHand) originX = originY = radius = cfg.size // 2 #", "cfg): newSize = min(newWidth, newHeight) #print('analog onResize', cfg.size+4, newSize) if newSize != cfg.size+4:", "title = appname if name: title = appname + ' - ' +", "len(argv)-1): if type(getattr(ClockConfig, attr)) == int: setattr(config, attr, int(argv[ix+1])) else: setattr(config, attr, argv[ix+1])", ">= 2: getOptions(config, sys.argv) # clock.py -size n -bg 'blue'... #myclock = ClockWindow(config,", "self.onCountdownMin) def onSwitchMode(self, event): self.display.pack_forget() if self.display == self.analogDisplay: self.display = self.digitalDisplay else:", "Expired!', command=win.destroy) msg.config(font=('courier', 80, 'normal'), fg='white', bg='navy') msg.config(padx=10, pady=10) msg.pack(expand=YES, fill=BOTH) win.lift() #", "math.pi / 180 pointX = int( round( radius * math.sin(angle * radiansPerDegree) ))", "center it imgy = (cfg.size - self.image.height()) // 2 # 3.x // div", "parent.bind('<KeyPress-s>', self.onCountdownSec) parent.bind('<KeyPress-m>', self.onCountdownMin) def onSwitchMode(self, event): self.display.pack_forget() if self.display == self.analogDisplay: self.display", "self.dateLabel.pack(side=BOTTOM, fill=X) else: self.dateLabel.pack_forget() self.update() def onResize(self, event): if event.widget == self.display: self.display.onResize(event.width,", "second # 3.x // trunc int div def onCountdownSec(self, event): secs = askinteger('Countdown',", "self) clock.pack(expand=YES, fill=BOTH) class ClockMain(MainWindow): def __init__(self, config=ClockConfig, name=''): MainWindow.__init__(self, appname, name) clock", "self.display.pack(side=TOP, expand=YES, fill=BOTH) def onToggleLabel(self, event): self.labelOn += 1 if self.labelOn % 2:", "+= 1 if self.labelOn % 2: self.dateLabel.pack(side=BOTTOM, fill=X) else: self.dateLabel.pack_forget() self.update() def onResize(self,", "y = self.point(i, 12, radius-6, originX, originY) self.create_rectangle(x-3, y-3, x+3, y+3, fill=cfg.fg) #", "mh, sh, cog = 'black', 'navy', 'blue', 'red' # clock hands, center picture", "parent, cfg): Canvas.__init__(self, parent, width=cfg.size, height=cfg.size, bg=cfg.bg) self.drawClockface(cfg) self.hourHand = self.minsHand = self.secsHand", "= self.create_line(originX, originY, hx, hy, width=(cfg.size * .04), arrow='last', arrowshape=(25,25,15), fill=cfg.hh) self.minsHand =", "radius-6, originX, originY) self.create_rectangle(x-1, y-1, x+1, y+1, fill=cfg.fg) # mins for i in", "= self.lastMin = -1 self.countdownSeconds = 0 self.onSwitchMode(None) self.onTimer() def makeWidgets(self, parent): self.digitalDisplay", "attr)) == int: setattr(config, attr, int(argv[ix+1])) else: setattr(config, attr, argv[ix+1]) #config = PhotoClockConfig()", "updated to run under Python 3.X (2.X no longer supported) ############################################################################### \"\"\" from", "// div for i in range(60): x, y = self.point(i, 60, radius-6, originX,", "per second # 3.x // trunc int div def onCountdownSec(self, event): secs =", "self.cfg) self.analogDisplay = AnalogDisplay(self, self.cfg) self.dateLabel = Label(self, bd=3, bg='red', fg='blue') parent.bind('<ButtonPress-1>', self.onSwitchMode)", "def onCountdownExpire(self): # caveat: only one active, no progress indicator win = Toplevel()", "secs = askinteger('Countdown', 'Seconds?') if secs: self.countdownSeconds = secs def onCountdownMin(self, event): secs", "above siblings if sys.platform[:3] == 'win': # full screen on Windows win.state('zoomed') ###############################################################################", "children are packed but self.labelOn = 0 # clients pack or grid me", "= secs * 60 def onCountdownExpire(self): # caveat: only one active, no progress", "self.display == self.analogDisplay: self.display = self.digitalDisplay else: self.display = self.analogDisplay self.display.pack(side=TOP, expand=YES, fill=BOTH)", "drawClockface(self, cfg): # on start and resize if cfg.picture: # draw ovals, picture", "self.countdownSeconds = secs def onCountdownMin(self, event): secs = askinteger('Countdown', 'Minutes') if secs: self.countdownSeconds", "for attr in dir(ClockConfig): # fill default config obj, try: # from \"-attr", "* radiansPerDegree) )) pointY = int( round( radius * math.cos(angle * radiansPerDegree) ))", "self.create_text(3, 3, anchor=NW, fill=cfg.fg) def point(self, tick, units, radius, originX, originY): angle =", "self.create_line(originX, originY, hx, hy, width=(cfg.size * .04), arrow='last', arrowshape=(25,25,15), fill=cfg.hh) self.minsHand = self.create_line(originX,", "secsSinceEpoch = time.time() timeTuple = time.localtime(secsSinceEpoch) hour, min, sec = timeTuple[3:6] if sec", "60.0) hx, hy = self.point(hour, 12, (radius * .80), originX, originY) mx, my", "appname if name: title = appname + ' - ' + name self.master.title(title)", "############################################################################### class ClockConfig: # defaults--override in instance or subclass size = 200 #", "self.onTimer() def makeWidgets(self, parent): self.digitalDisplay = DigitalDisplay(self, self.cfg) self.analogDisplay = AnalogDisplay(self, self.cfg) self.dateLabel", "imgy = (cfg.size - self.image.height()) // 2 # 3.x // div self.create_image(imgx+1, imgy+1,", "__init__(self, config=ClockConfig, name=''): MainWindow.__init__(self, appname, name) clock = Clock(config, self) clock.pack(expand=YES, fill=BOTH) #", "times per second # 3.x // trunc int div def onCountdownSec(self, event): secs", "DigitalDisplay(self, self.cfg) self.analogDisplay = AnalogDisplay(self, self.cfg) self.dateLabel = Label(self, bd=3, bg='red', fg='blue') parent.bind('<ButtonPress-1>',", "############################################################################### class DigitalDisplay(Frame): def __init__(self, parent, cfg): Frame.__init__(self, parent) self.hour = Label(self) self.mins", ".80), originX, originY) mx, my = self.point(mins, 60, (radius * .90), originX, originY)", "'red' # clock hands, center picture = None # face photo file class", "run ############################################################################### if __name__ == '__main__': def getOptions(config, argv): for attr in dir(ClockConfig):", "only one active, no progress indicator win = Toplevel() msg = Button(win, text='Timer", "point(self, tick, units, radius, originX, originY): angle = tick * (360.0 / units)", "cogsz = cfg.size * .01 self.cog = self.create_oval(originX-cogsz, originY+cogsz, originX+cogsz, originY-cogsz, fill=cfg.cog) self.dchars(self.ampm,", "2.1' # use new custom Tk, Toplevel for icons, etc. from PP4E.Gui.Tools.windows import", "are packed but self.labelOn = 0 # clients pack or grid me self.display", "ClockConfig: # defaults--override in instance or subclass size = 200 # width=height bg,", "newSize-4 self.delete('all') self.drawClockface(cfg) # onUpdate called next ############################################################################### # Clock composite object ###############################################################################", "############################################################################### class AnalogDisplay(Canvas): def __init__(self, parent, cfg): Canvas.__init__(self, parent, width=cfg.size, height=cfg.size, bg=cfg.bg) self.drawClockface(cfg)", "is Tk root if standalone #myclock = ClockPopup(ClockConfig(), 'popup') myclock = ClockMain(config) myclock.mainloop()", "pady=10) msg.pack(expand=YES, fill=BOTH) win.lift() # raise above siblings if sys.platform[:3] == 'win': #", "import askinteger import math, time, sys ############################################################################### # Option configuration classes ############################################################################### class", "fill default config obj, try: # from \"-attr val\" cmd args ix =", "onCountdownSec(self, event): secs = askinteger('Countdown', 'Seconds?') if secs: self.countdownSeconds = secs def onCountdownMin(self,", "BitmapImage(file=cfg.picture) # save ref imgx = (cfg.size - self.image.width()) // 2 # center", "self.create_oval(originX-cogsz, originY+cogsz, originX+cogsz, originY-cogsz, fill=cfg.cog) self.dchars(self.ampm, 0, END) self.insert(self.ampm, END, ampm) def onResize(self,", "__init__(self, parent, cfg): Frame.__init__(self, parent) self.hour = Label(self) self.mins = Label(self) self.secs =", "Toplevel() msg = Button(win, text='Timer Expired!', command=win.destroy) msg.config(font=('courier', 80, 'normal'), fg='white', bg='navy') msg.config(padx=10,", "# 3.x // div self.create_image(imgx+1, imgy+1, anchor=NW, image=self.image) originX = originY = radius", "* .95), originX, originY) self.hourHand = self.create_line(originX, originY, hx, hy, width=(cfg.size * .04),", "cfg.size * .01 self.cog = self.create_oval(originX-cogsz, originY+cogsz, originX+cogsz, originY-cogsz, fill=cfg.cog) self.dchars(self.ampm, 0, END)", "# Standalone clocks ############################################################################### appname = 'PyClock 2.1' # use new custom Tk,", "= originY = radius = cfg.size // 2 # 3.x div hour =", "def onCountdownMin(self, event): secs = askinteger('Countdown', 'Minutes') if secs: self.countdownSeconds = secs *", "ampm = ((hour >= 12) and 'PM') or 'AM' # 0...23 hour =", "= Button(win, text='Timer Expired!', command=win.destroy) msg.config(font=('courier', 80, 'normal'), fg='white', bg='navy') msg.config(padx=10, pady=10) msg.pack(expand=YES,", "setattr(config, attr, int(argv[ix+1])) else: setattr(config, attr, argv[ix+1]) #config = PhotoClockConfig() config = ClockConfig()", "import math, time, sys ############################################################################### # Option configuration classes ############################################################################### class ClockConfig: #", "onResize(self, newWidth, newHeight, cfg): newSize = min(newWidth, newHeight) #print('analog onResize', cfg.size+4, newSize) if", "* from tkinter.simpledialog import askinteger import math, time, sys ############################################################################### # Option configuration", "'blue', 'red' # clock hands, center picture = None # face photo file", "angle = tick * (360.0 / units) radiansPerDegree = math.pi / 180 pointX", "fg='blue') parent.bind('<ButtonPress-1>', self.onSwitchMode) parent.bind('<ButtonPress-3>', self.onToggleLabel) parent.bind('<Configure>', self.onResize) parent.bind('<KeyPress-s>', self.onCountdownSec) parent.bind('<KeyPress-m>', self.onCountdownMin) def onSwitchMode(self,", "self.ampm = self.create_text(3, 3, anchor=NW, fill=cfg.fg) def point(self, tick, units, radius, originX, originY):", "originY-cogsz, fill=cfg.cog) self.dchars(self.ampm, 0, END) self.insert(self.ampm, END, ampm) def onResize(self, newWidth, newHeight, cfg):", "cmd args ix = argv.index('-' + attr) # will skip __x__ internals except:", "sx, sy, width=1, arrow='last', arrowshape=(5,10,5), fill=cfg.sh) cogsz = cfg.size * .01 self.cog =", "use new custom Tk, Toplevel for icons, etc. from PP4E.Gui.Tools.windows import PopupWindow, MainWindow", "12) or 12 # 12..11 self.display.onUpdate(hour, min, sec, ampm, self.cfg) self.dateLabel.config(text=time.ctime(secsSinceEpoch)) self.countdownSeconds -=", "# hours self.ampm = self.create_text(3, 3, anchor=NW, fill=cfg.fg) def point(self, tick, units, radius,", "hour = hour + (mins / 60.0) hx, hy = self.point(hour, 12, (radius", "and digital display modes, a pop-up date label, clock face images, general resizing,", "active, no progress indicator win = Toplevel() msg = Button(win, text='Timer Expired!', command=win.destroy)", "change timer class Clock(Frame): def __init__(self, config=ClockConfig, parent=None): Frame.__init__(self, parent) self.cfg = config", "originY, hx, hy, width=(cfg.size * .04), arrow='last', arrowshape=(25,25,15), fill=cfg.hh) self.minsHand = self.create_line(originX, originY,", "originX, originY) mx, my = self.point(mins, 60, (radius * .90), originX, originY) sx,", "embedded (attached) in other GUIs that need a clock. New in 2.0: s/m", "# or '%02d' % x self.hour.config(text=str(hour), width=4) self.mins.config(text=str(mins), width=4) self.secs.config(text=str(secs), width=4) self.ampm.config(text=str(ampm), width=4)", "= Clock(config, self) clock.pack(expand=YES, fill=BOTH) # b/w compat: manual window borders, passed-in parent", "self.point(secs, 60, (radius * .95), originX, originY) self.hourHand = self.create_line(originX, originY, hx, hy,", ".04), arrow='last', arrowshape=(25,25,15), fill=cfg.hh) self.minsHand = self.create_line(originX, originY, mx, my, width=(cfg.size * .03),", "= cfg.size // 2 # 3.x div hour = hour + (mins /", "except: continue else: if ix in range(1, len(argv)-1): if type(getattr(ClockConfig, attr)) == int:", "config, parent) self.pack(expand=YES, fill=BOTH) title = appname if name: title = appname +", "# fill default config obj, try: # from \"-attr val\" cmd args ix", "Label(self) self.mins = Label(self) self.secs = Label(self) self.ampm = Label(self) for label in", "originX, originY): angle = tick * (360.0 / units) radiansPerDegree = math.pi /", "name) clock = Clock(config, self) clock.pack(expand=YES, fill=BOTH) # b/w compat: manual window borders,", "# Clock composite object ############################################################################### ChecksPerSec = 10 # second change timer class", "parent=None, name=''): Clock.__init__(self, config, parent) self.pack(expand=YES, fill=BOTH) title = appname if name: title", "ClockConfig() if len(sys.argv) >= 2: getOptions(config, sys.argv) # clock.py -size n -bg 'blue'...", "sec ampm = ((hour >= 12) and 'PM') or 'AM' # 0...23 hour", "font on resize def onUpdate(self, hour, mins, secs, ampm, cfg): mins = str(mins).zfill(2)", "0 # clients pack or grid me self.display = self.digitalDisplay self.lastSec = self.lastMin", "None # face photo file class PhotoClockConfig(ClockConfig): # sample configuration size = 320", "0 self.onSwitchMode(None) self.onTimer() def makeWidgets(self, parent): self.digitalDisplay = DigitalDisplay(self, self.cfg) self.analogDisplay = AnalogDisplay(self,", "that need a clock. New in 2.0: s/m keys set seconds/minutes timer for", "i in range(60): x, y = self.point(i, 60, radius-6, originX, originY) self.create_rectangle(x-1, y-1,", "class ClockMain(MainWindow): def __init__(self, config=ClockConfig, name=''): MainWindow.__init__(self, appname, name) clock = Clock(config, self)", "parent.bind('<KeyPress-m>', self.onCountdownMin) def onSwitchMode(self, event): self.display.pack_forget() if self.display == self.analogDisplay: self.display = self.digitalDisplay", "self.minsHand = self.secsHand = self.cog = None def drawClockface(self, cfg): # on start", "sample configuration size = 320 picture = '../gifs/ora-pp.gif' bg, hh, mh = 'white',", "With both analog and digital display modes, a pop-up date label, clock face", "originY = radius = cfg.size // 2 # 3.x // div for i", "parent): self.digitalDisplay = DigitalDisplay(self, self.cfg) self.analogDisplay = AnalogDisplay(self, self.cfg) self.dateLabel = Label(self, bd=3,", "#config = PhotoClockConfig() config = ClockConfig() if len(sys.argv) >= 2: getOptions(config, sys.argv) #", "self.secs = Label(self) self.ampm = Label(self) for label in self.hour, self.mins, self.secs, self.ampm:", "= PhotoImage(file=cfg.picture) # bkground except: self.image = BitmapImage(file=cfg.picture) # save ref imgx =", "fill=BOTH) def onToggleLabel(self, event): self.labelOn += 1 if self.labelOn % 2: self.dateLabel.pack(side=BOTTOM, fill=X)", "no progress indicator win = Toplevel() msg = Button(win, text='Timer Expired!', command=win.destroy) msg.config(font=('courier',", "= self.cog = None def drawClockface(self, cfg): # on start and resize if", "# TBD: could expand, and scale font on resize def onUpdate(self, hour, mins,", "def __init__(self, parent, cfg): Canvas.__init__(self, parent, width=cfg.size, height=cfg.size, bg=cfg.bg) self.drawClockface(cfg) self.hourHand = self.minsHand", "(360.0 / units) radiansPerDegree = math.pi / 180 pointX = int( round( radius", "resizing, etc. May be run both standalone, or embedded (attached) in other GUIs", "self.delete('all') self.drawClockface(cfg) # onUpdate called next ############################################################################### # Clock composite object ############################################################################### ChecksPerSec", "GUIs that need a clock. New in 2.0: s/m keys set seconds/minutes timer", "clients pack or grid me self.display = self.digitalDisplay self.lastSec = self.lastMin = -1", "# clock hands, center picture = None # face photo file class PhotoClockConfig(ClockConfig):", "clock.py -size n -bg 'blue'... #myclock = ClockWindow(config, Tk()) # parent is Tk", "self.display = self.digitalDisplay else: self.display = self.analogDisplay self.display.pack(side=TOP, expand=YES, fill=BOTH) def onToggleLabel(self, event):", "x, y = self.point(i, 60, radius-6, originX, originY) self.create_rectangle(x-1, y-1, x+1, y+1, fill=cfg.fg)", "== self.display: self.display.onResize(event.width, event.height, self.cfg) def onTimer(self): secsSinceEpoch = time.time() timeTuple = time.localtime(secsSinceEpoch)", "name=''): PopupWindow.__init__(self, appname, name) clock = Clock(config, self) clock.pack(expand=YES, fill=BOTH) class ClockMain(MainWindow): def", "= (cfg.size - self.image.width()) // 2 # center it imgy = (cfg.size -", "if __name__ == '__main__': def getOptions(config, argv): for attr in dir(ClockConfig): # fill", "newHeight, cfg): newSize = min(newWidth, newHeight) #print('analog onResize', cfg.size+4, newSize) if newSize !=", "obj, try: # from \"-attr val\" cmd args ix = argv.index('-' + attr)", "* .80), originX, originY) mx, my = self.point(mins, 60, (radius * .90), originX,", "= tick * (360.0 / units) radiansPerDegree = math.pi / 180 pointX =", "3.x // div for i in range(60): x, y = self.point(i, 60, radius-6,", "# on start and resize if cfg.picture: # draw ovals, picture try: self.image", "hour, mins, secs, ampm, cfg): # on timer callback if self.cog: # redraw", "set seconds/minutes timer for pop-up msg; window icon. New in 2.1: updated to", "self.after(1000 // ChecksPerSec, self.onTimer) # run N times per second # 3.x //", "'Minutes') if secs: self.countdownSeconds = secs * 60 def onCountdownExpire(self): # caveat: only", "END, ampm) def onResize(self, newWidth, newHeight, cfg): newSize = min(newWidth, newHeight) #print('analog onResize',", "+ attr) # will skip __x__ internals except: continue else: if ix in", "pop-up msg; window icon. New in 2.1: updated to run under Python 3.X", "hours self.ampm = self.create_text(3, 3, anchor=NW, fill=cfg.fg) def point(self, tick, units, radius, originX,", "radius * math.sin(angle * radiansPerDegree) )) pointY = int( round( radius * math.cos(angle", "config=ClockConfig, name=''): PopupWindow.__init__(self, appname, name) clock = Clock(config, self) clock.pack(expand=YES, fill=BOTH) class ClockMain(MainWindow):", "self.point(i, 60, radius-6, originX, originY) self.create_rectangle(x-1, y-1, x+1, y+1, fill=cfg.fg) # mins for", "secs: self.countdownSeconds = secs def onCountdownMin(self, event): secs = askinteger('Countdown', 'Minutes') if secs:", "standalone, or embedded (attached) in other GUIs that need a clock. New in", "self.digitalDisplay else: self.display = self.analogDisplay self.display.pack(side=TOP, expand=YES, fill=BOTH) def onToggleLabel(self, event): self.labelOn +=", "parent, width=cfg.size, height=cfg.size, bg=cfg.bg) self.drawClockface(cfg) self.hourHand = self.minsHand = self.secsHand = self.cog =", "New in 2.1: updated to run under Python 3.X (2.X no longer supported)", "END) self.insert(self.ampm, END, ampm) def onResize(self, newWidth, newHeight, cfg): newSize = min(newWidth, newHeight)", "12) and 'PM') or 'AM' # 0...23 hour = (hour % 12) or", "run both standalone, or embedded (attached) in other GUIs that need a clock.", "self.digitalDisplay self.lastSec = self.lastMin = -1 self.countdownSeconds = 0 self.onSwitchMode(None) self.onTimer() def makeWidgets(self,", "= ClockConfig() if len(sys.argv) >= 2: getOptions(config, sys.argv) # clock.py -size n -bg", "configuration size = 320 picture = '../gifs/ora-pp.gif' bg, hh, mh = 'white', 'blue',", "'__main__': def getOptions(config, argv): for attr in dir(ClockConfig): # fill default config obj,", "Canvas.__init__(self, parent, width=cfg.size, height=cfg.size, bg=cfg.bg) self.drawClockface(cfg) self.hourHand = self.minsHand = self.secsHand = self.cog", "'Seconds?') if secs: self.countdownSeconds = secs def onCountdownMin(self, event): secs = askinteger('Countdown', 'Minutes')", "full screen on Windows win.state('zoomed') ############################################################################### # Standalone clocks ############################################################################### appname = 'PyClock", "anchor=NW, image=self.image) originX = originY = radius = cfg.size // 2 # 3.x", "width=(cfg.size * .03), arrow='last', arrowshape=(20,20,10), fill=cfg.mh) self.secsHand = self.create_line(originX, originY, sx, sy, width=1,", "if event.widget == self.display: self.display.onResize(event.width, event.height, self.cfg) def onTimer(self): secsSinceEpoch = time.time() timeTuple", "label, clock face images, general resizing, etc. May be run both standalone, or", "name self.master.title(title) # master=parent or default self.master.protocol('WM_DELETE_WINDOW', self.quit) ############################################################################### # Program run ###############################################################################", "or default self.master.protocol('WM_DELETE_WINDOW', self.quit) ############################################################################### # Program run ############################################################################### if __name__ == '__main__':", "self.labelOn = 0 # clients pack or grid me self.display = self.digitalDisplay self.lastSec", "args ix = argv.index('-' + attr) # will skip __x__ internals except: continue", "class ClockConfig: # defaults--override in instance or subclass size = 200 # width=height", "############################################################################### # Standalone clocks ############################################################################### appname = 'PyClock 2.1' # use new custom", "3.x // trunc int div def onCountdownSec(self, event): secs = askinteger('Countdown', 'Seconds?') if", "s/m keys set seconds/minutes timer for pop-up msg; window icon. New in 2.1:", "# children are packed but self.labelOn = 0 # clients pack or grid", "mx, my, width=(cfg.size * .03), arrow='last', arrowshape=(20,20,10), fill=cfg.mh) self.secsHand = self.create_line(originX, originY, sx,", "div hour = hour + (mins / 60.0) hx, hy = self.point(hour, 12,", "Option configuration classes ############################################################################### class ClockConfig: # defaults--override in instance or subclass size", "// 2 # center it imgy = (cfg.size - self.image.height()) // 2 #", "= Clock(config, self) clock.pack(expand=YES, fill=BOTH) class ClockMain(MainWindow): def __init__(self, config=ClockConfig, name=''): MainWindow.__init__(self, appname,", "PopupWindow, MainWindow class ClockPopup(PopupWindow): def __init__(self, config=ClockConfig, name=''): PopupWindow.__init__(self, appname, name) clock =", "instance or subclass size = 200 # width=height bg, fg = 'beige', 'brown'", "############################################################################### # Option configuration classes ############################################################################### class ClockConfig: # defaults--override in instance or", "originX, originY) sx, sy = self.point(secs, 60, (radius * .95), originX, originY) self.hourHand", "= DigitalDisplay(self, self.cfg) self.analogDisplay = AnalogDisplay(self, self.cfg) self.dateLabel = Label(self, bd=3, bg='red', fg='blue')", "self.countdownSeconds -= 1 if self.countdownSeconds == 0: self.onCountdownExpire() # countdown timer self.after(1000 //", "self.dateLabel.config(text=time.ctime(secsSinceEpoch)) self.countdownSeconds -= 1 if self.countdownSeconds == 0: self.onCountdownExpire() # countdown timer self.after(1000", "self.onToggleLabel) parent.bind('<Configure>', self.onResize) parent.bind('<KeyPress-s>', self.onCountdownSec) parent.bind('<KeyPress-m>', self.onCountdownMin) def onSwitchMode(self, event): self.display.pack_forget() if self.display", "y = self.point(i, 60, radius-6, originX, originY) self.create_rectangle(x-1, y-1, x+1, y+1, fill=cfg.fg) #", "2 # center it imgy = (cfg.size - self.image.height()) // 2 # 3.x", "display modes, a pop-up date label, clock face images, general resizing, etc. May", "range(1, len(argv)-1): if type(getattr(ClockConfig, attr)) == int: setattr(config, attr, int(argv[ix+1])) else: setattr(config, attr,", "hx, hy = self.point(hour, 12, (radius * .80), originX, originY) mx, my =", "sh, cog = 'black', 'navy', 'blue', 'red' # clock hands, center picture =", "newHeight, cfg): pass # nothing to redraw here ############################################################################### # Analog display object", "class AnalogDisplay(Canvas): def __init__(self, parent, cfg): Canvas.__init__(self, parent, width=cfg.size, height=cfg.size, bg=cfg.bg) self.drawClockface(cfg) self.hourHand", "__init__(self, config=ClockConfig, parent=None): Frame.__init__(self, parent) self.cfg = config self.makeWidgets(parent) # children are packed", "Button(win, text='Timer Expired!', command=win.destroy) msg.config(font=('courier', 80, 'normal'), fg='white', bg='navy') msg.config(padx=10, pady=10) msg.pack(expand=YES, fill=BOTH)", "// div self.create_image(imgx+1, imgy+1, anchor=NW, image=self.image) originX = originY = radius = cfg.size", "= 'PyClock 2.1' # use new custom Tk, Toplevel for icons, etc. from", "__init__(self, config=ClockConfig, parent=None, name=''): Clock.__init__(self, config, parent) self.pack(expand=YES, fill=BOTH) title = appname if", "width=4) self.ampm.config(text=str(ampm), width=4) def onResize(self, newWidth, newHeight, cfg): pass # nothing to redraw", "= ((hour >= 12) and 'PM') or 'AM' # 0...23 hour = (hour", "self.pack(expand=YES, fill=BOTH) title = appname if name: title = appname + ' -", "timer class Clock(Frame): def __init__(self, config=ClockConfig, parent=None): Frame.__init__(self, parent) self.cfg = config self.makeWidgets(parent)", "self.ampm: label.config(bd=4, relief=SUNKEN, bg=cfg.bg, fg=cfg.fg) label.pack(side=LEFT) # TBD: could expand, and scale font", "mh = 'white', 'blue', 'orange' ############################################################################### # Digital display object ############################################################################### class DigitalDisplay(Frame):", "run under Python 3.X (2.X no longer supported) ############################################################################### \"\"\" from tkinter import", "200 # width=height bg, fg = 'beige', 'brown' # face, tick colors hh,", "Toplevel for icons, etc. from PP4E.Gui.Tools.windows import PopupWindow, MainWindow class ClockPopup(PopupWindow): def __init__(self,", "pass # nothing to redraw here ############################################################################### # Analog display object ############################################################################### class", "Label(self) for label in self.hour, self.mins, self.secs, self.ampm: label.config(bd=4, relief=SUNKEN, bg=cfg.bg, fg=cfg.fg) label.pack(side=LEFT)", "self) clock.pack(expand=YES, fill=BOTH) # b/w compat: manual window borders, passed-in parent class ClockWindow(Clock):", "width=cfg.size, height=cfg.size, bg=cfg.bg) self.drawClockface(cfg) self.hourHand = self.minsHand = self.secsHand = self.cog = None", "== int: setattr(config, attr, int(argv[ix+1])) else: setattr(config, attr, argv[ix+1]) #config = PhotoClockConfig() config", "= self.create_line(originX, originY, mx, my, width=(cfg.size * .03), arrow='last', arrowshape=(20,20,10), fill=cfg.mh) self.secsHand =", "return (pointX + originX+1), (originY+1 - pointY) def onUpdate(self, hour, mins, secs, ampm,", "hour = (hour % 12) or 12 # 12..11 self.display.onUpdate(hour, min, sec, ampm,", "keys set seconds/minutes timer for pop-up msg; window icon. New in 2.1: updated", "i in range(12): x, y = self.point(i, 12, radius-6, originX, originY) self.create_rectangle(x-3, y-3,", "\"\"\" from tkinter import * from tkinter.simpledialog import askinteger import math, time, sys", "parent) self.cfg = config self.makeWidgets(parent) # children are packed but self.labelOn = 0", "parent.bind('<Configure>', self.onResize) parent.bind('<KeyPress-s>', self.onCountdownSec) parent.bind('<KeyPress-m>', self.onCountdownMin) def onSwitchMode(self, event): self.display.pack_forget() if self.display ==", "or 12 # 12..11 self.display.onUpdate(hour, min, sec, ampm, self.cfg) self.dateLabel.config(text=time.ctime(secsSinceEpoch)) self.countdownSeconds -= 1", "int div def onCountdownSec(self, event): secs = askinteger('Countdown', 'Seconds?') if secs: self.countdownSeconds =", "clock GUI in Python/tkinter. With both analog and digital display modes, a pop-up", "internals except: continue else: if ix in range(1, len(argv)-1): if type(getattr(ClockConfig, attr)) ==", "ChecksPerSec = 10 # second change timer class Clock(Frame): def __init__(self, config=ClockConfig, parent=None):", "me self.display = self.digitalDisplay self.lastSec = self.lastMin = -1 self.countdownSeconds = 0 self.onSwitchMode(None)", "nothing to redraw here ############################################################################### # Analog display object ############################################################################### class AnalogDisplay(Canvas): def", "2.0: s/m keys set seconds/minutes timer for pop-up msg; window icon. New in", "= self.point(secs, 60, (radius * .95), originX, originY) self.hourHand = self.create_line(originX, originY, hx,", "# Analog display object ############################################################################### class AnalogDisplay(Canvas): def __init__(self, parent, cfg): Canvas.__init__(self, parent,", "icons, etc. from PP4E.Gui.Tools.windows import PopupWindow, MainWindow class ClockPopup(PopupWindow): def __init__(self, config=ClockConfig, name=''):", "= appname + ' - ' + name self.master.title(title) # master=parent or default", "self.digitalDisplay = DigitalDisplay(self, self.cfg) self.analogDisplay = AnalogDisplay(self, self.cfg) self.dateLabel = Label(self, bd=3, bg='red',", "min, sec, ampm, self.cfg) self.dateLabel.config(text=time.ctime(secsSinceEpoch)) self.countdownSeconds -= 1 if self.countdownSeconds == 0: self.onCountdownExpire()", "onUpdate(self, hour, mins, secs, ampm, cfg): # on timer callback if self.cog: #", "getOptions(config, argv): for attr in dir(ClockConfig): # fill default config obj, try: #", "# b/w compat: manual window borders, passed-in parent class ClockWindow(Clock): def __init__(self, config=ClockConfig,", "label.config(bd=4, relief=SUNKEN, bg=cfg.bg, fg=cfg.fg) label.pack(side=LEFT) # TBD: could expand, and scale font on", "onSwitchMode(self, event): self.display.pack_forget() if self.display == self.analogDisplay: self.display = self.digitalDisplay else: self.display =", "for pop-up msg; window icon. New in 2.1: updated to run under Python", "onResize(self, newWidth, newHeight, cfg): pass # nothing to redraw here ############################################################################### # Analog", "my = self.point(mins, 60, (radius * .90), originX, originY) sx, sy = self.point(secs,", "fill=cfg.hh) self.minsHand = self.create_line(originX, originY, mx, my, width=(cfg.size * .03), arrow='last', arrowshape=(20,20,10), fill=cfg.mh)", "= int( round( radius * math.cos(angle * radiansPerDegree) )) return (pointX + originX+1),", "width=(cfg.size * .04), arrow='last', arrowshape=(25,25,15), fill=cfg.hh) self.minsHand = self.create_line(originX, originY, mx, my, width=(cfg.size", "secs = askinteger('Countdown', 'Minutes') if secs: self.countdownSeconds = secs * 60 def onCountdownExpire(self):", "no longer supported) ############################################################################### \"\"\" from tkinter import * from tkinter.simpledialog import askinteger", "<reponame>AngelLiang/PP4E \"\"\" ############################################################################### PyClock 2.1: a clock GUI in Python/tkinter. With both analog", "appname + ' - ' + name self.master.title(title) # master=parent or default self.master.protocol('WM_DELETE_WINDOW',", "* .90), originX, originY) sx, sy = self.point(secs, 60, (radius * .95), originX,", "= self.digitalDisplay else: self.display = self.analogDisplay self.display.pack(side=TOP, expand=YES, fill=BOTH) def onToggleLabel(self, event): self.labelOn", "* .01 self.cog = self.create_oval(originX-cogsz, originY+cogsz, originX+cogsz, originY-cogsz, fill=cfg.cog) self.dchars(self.ampm, 0, END) self.insert(self.ampm,", "self.onCountdownExpire() # countdown timer self.after(1000 // ChecksPerSec, self.onTimer) # run N times per", "arrow='last', arrowshape=(25,25,15), fill=cfg.hh) self.minsHand = self.create_line(originX, originY, mx, my, width=(cfg.size * .03), arrow='last',", "2: self.dateLabel.pack(side=BOTTOM, fill=X) else: self.dateLabel.pack_forget() self.update() def onResize(self, event): if event.widget == self.display:", "pop-up date label, clock face images, general resizing, etc. May be run both", "bg=cfg.bg, fg=cfg.fg) label.pack(side=LEFT) # TBD: could expand, and scale font on resize def", "# 3.x // div for i in range(60): x, y = self.point(i, 60,", "MainWindow.__init__(self, appname, name) clock = Clock(config, self) clock.pack(expand=YES, fill=BOTH) # b/w compat: manual", "getOptions(config, sys.argv) # clock.py -size n -bg 'blue'... #myclock = ClockWindow(config, Tk()) #", "= self.create_oval(originX-cogsz, originY+cogsz, originX+cogsz, originY-cogsz, fill=cfg.cog) self.dchars(self.ampm, 0, END) self.insert(self.ampm, END, ampm) def", "def onCountdownSec(self, event): secs = askinteger('Countdown', 'Seconds?') if secs: self.countdownSeconds = secs def", "+ ' - ' + name self.master.title(title) # master=parent or default self.master.protocol('WM_DELETE_WINDOW', self.quit)", "== '__main__': def getOptions(config, argv): for attr in dir(ClockConfig): # fill default config", "called next ############################################################################### # Clock composite object ############################################################################### ChecksPerSec = 10 # second", "2.1: updated to run under Python 3.X (2.X no longer supported) ############################################################################### \"\"\"", "Clock composite object ############################################################################### ChecksPerSec = 10 # second change timer class Clock(Frame):", "self.onSwitchMode(None) self.onTimer() def makeWidgets(self, parent): self.digitalDisplay = DigitalDisplay(self, self.cfg) self.analogDisplay = AnalogDisplay(self, self.cfg)", "# Program run ############################################################################### if __name__ == '__main__': def getOptions(config, argv): for attr", "self.master.title(title) # master=parent or default self.master.protocol('WM_DELETE_WINDOW', self.quit) ############################################################################### # Program run ############################################################################### if", "digital display modes, a pop-up date label, clock face images, general resizing, etc.", "# bkground except: self.image = BitmapImage(file=cfg.picture) # save ref imgx = (cfg.size -", "cfg): # on start and resize if cfg.picture: # draw ovals, picture try:", "* .04), arrow='last', arrowshape=(25,25,15), fill=cfg.hh) self.minsHand = self.create_line(originX, originY, mx, my, width=(cfg.size *", "12 # 12..11 self.display.onUpdate(hour, min, sec, ampm, self.cfg) self.dateLabel.config(text=time.ctime(secsSinceEpoch)) self.countdownSeconds -= 1 if", "imgy+1, anchor=NW, image=self.image) originX = originY = radius = cfg.size // 2 #", "win.lift() # raise above siblings if sys.platform[:3] == 'win': # full screen on", "= 0 # clients pack or grid me self.display = self.digitalDisplay self.lastSec =", "self.analogDisplay = AnalogDisplay(self, self.cfg) self.dateLabel = Label(self, bd=3, bg='red', fg='blue') parent.bind('<ButtonPress-1>', self.onSwitchMode) parent.bind('<ButtonPress-3>',", "originY = radius = cfg.size // 2 # 3.x div hour = hour", "to run under Python 3.X (2.X no longer supported) ############################################################################### \"\"\" from tkinter", "Windows win.state('zoomed') ############################################################################### # Standalone clocks ############################################################################### appname = 'PyClock 2.1' # use", "display object ############################################################################### class DigitalDisplay(Frame): def __init__(self, parent, cfg): Frame.__init__(self, parent) self.hour =", "= int( round( radius * math.sin(angle * radiansPerDegree) )) pointY = int( round(", "self.create_line(originX, originY, mx, my, width=(cfg.size * .03), arrow='last', arrowshape=(20,20,10), fill=cfg.mh) self.secsHand = self.create_line(originX,", "event): self.display.pack_forget() if self.display == self.analogDisplay: self.display = self.digitalDisplay else: self.display = self.analogDisplay", "countdown timer self.after(1000 // ChecksPerSec, self.onTimer) # run N times per second #", "def onResize(self, newWidth, newHeight, cfg): pass # nothing to redraw here ############################################################################### #", "ampm, cfg): mins = str(mins).zfill(2) # or '%02d' % x self.hour.config(text=str(hour), width=4) self.mins.config(text=str(mins),", "Tk()) # parent is Tk root if standalone #myclock = ClockPopup(ClockConfig(), 'popup') myclock", "try: self.image = PhotoImage(file=cfg.picture) # bkground except: self.image = BitmapImage(file=cfg.picture) # save ref", "text='Timer Expired!', command=win.destroy) msg.config(font=('courier', 80, 'normal'), fg='white', bg='navy') msg.config(padx=10, pady=10) msg.pack(expand=YES, fill=BOTH) win.lift()", "passed-in parent class ClockWindow(Clock): def __init__(self, config=ClockConfig, parent=None, name=''): Clock.__init__(self, config, parent) self.pack(expand=YES,", "(pointX + originX+1), (originY+1 - pointY) def onUpdate(self, hour, mins, secs, ampm, cfg):", "/ 180 pointX = int( round( radius * math.sin(angle * radiansPerDegree) )) pointY", "# from \"-attr val\" cmd args ix = argv.index('-' + attr) # will", "from PP4E.Gui.Tools.windows import PopupWindow, MainWindow class ClockPopup(PopupWindow): def __init__(self, config=ClockConfig, name=''): PopupWindow.__init__(self, appname,", "fill=BOTH) # b/w compat: manual window borders, passed-in parent class ClockWindow(Clock): def __init__(self,", "fill=cfg.fg) # hours self.ampm = self.create_text(3, 3, anchor=NW, fill=cfg.fg) def point(self, tick, units,", "self.master.protocol('WM_DELETE_WINDOW', self.quit) ############################################################################### # Program run ############################################################################### if __name__ == '__main__': def getOptions(config,", "cfg): Canvas.__init__(self, parent, width=cfg.size, height=cfg.size, bg=cfg.bg) self.drawClockface(cfg) self.hourHand = self.minsHand = self.secsHand =", "Clock(config, self) clock.pack(expand=YES, fill=BOTH) # b/w compat: manual window borders, passed-in parent class", "cfg.size+4, newSize) if newSize != cfg.size+4: cfg.size = newSize-4 self.delete('all') self.drawClockface(cfg) # onUpdate", "in 2.0: s/m keys set seconds/minutes timer for pop-up msg; window icon. New", "second change timer class Clock(Frame): def __init__(self, config=ClockConfig, parent=None): Frame.__init__(self, parent) self.cfg =", "'PyClock 2.1' # use new custom Tk, Toplevel for icons, etc. from PP4E.Gui.Tools.windows", "from tkinter.simpledialog import askinteger import math, time, sys ############################################################################### # Option configuration classes", "resize if cfg.picture: # draw ovals, picture try: self.image = PhotoImage(file=cfg.picture) # bkground", "Python/tkinter. With both analog and digital display modes, a pop-up date label, clock", "bg=cfg.bg) self.drawClockface(cfg) self.hourHand = self.minsHand = self.secsHand = self.cog = None def drawClockface(self,", "radiansPerDegree) )) pointY = int( round( radius * math.cos(angle * radiansPerDegree) )) return", ".95), originX, originY) self.hourHand = self.create_line(originX, originY, hx, hy, width=(cfg.size * .04), arrow='last',", "def onResize(self, newWidth, newHeight, cfg): newSize = min(newWidth, newHeight) #print('analog onResize', cfg.size+4, newSize)", "def onUpdate(self, hour, mins, secs, ampm, cfg): mins = str(mins).zfill(2) # or '%02d'", "self.image.height()) // 2 # 3.x // div self.create_image(imgx+1, imgy+1, anchor=NW, image=self.image) originX =", "= 10 # second change timer class Clock(Frame): def __init__(self, config=ClockConfig, parent=None): Frame.__init__(self,", "if self.display == self.analogDisplay: self.display = self.digitalDisplay else: self.display = self.analogDisplay self.display.pack(side=TOP, expand=YES,", "Clock.__init__(self, config, parent) self.pack(expand=YES, fill=BOTH) title = appname if name: title = appname", "def drawClockface(self, cfg): # on start and resize if cfg.picture: # draw ovals,", "Tk, Toplevel for icons, etc. from PP4E.Gui.Tools.windows import PopupWindow, MainWindow class ClockPopup(PopupWindow): def", "from \"-attr val\" cmd args ix = argv.index('-' + attr) # will skip", "next ############################################################################### # Clock composite object ############################################################################### ChecksPerSec = 10 # second change", "'win': # full screen on Windows win.state('zoomed') ############################################################################### # Standalone clocks ############################################################################### appname", "in range(1, len(argv)-1): if type(getattr(ClockConfig, attr)) == int: setattr(config, attr, int(argv[ix+1])) else: setattr(config,", "(attached) in other GUIs that need a clock. New in 2.0: s/m keys", "self.dateLabel = Label(self, bd=3, bg='red', fg='blue') parent.bind('<ButtonPress-1>', self.onSwitchMode) parent.bind('<ButtonPress-3>', self.onToggleLabel) parent.bind('<Configure>', self.onResize) parent.bind('<KeyPress-s>',", "cog = 'black', 'navy', 'blue', 'red' # clock hands, center picture = None", "self.delete(self.cog) self.delete(self.hourHand) self.delete(self.minsHand) self.delete(self.secsHand) originX = originY = radius = cfg.size // 2", "newHeight) #print('analog onResize', cfg.size+4, newSize) if newSize != cfg.size+4: cfg.size = newSize-4 self.delete('all')", "arrow='last', arrowshape=(20,20,10), fill=cfg.mh) self.secsHand = self.create_line(originX, originY, sx, sy, width=1, arrow='last', arrowshape=(5,10,5), fill=cfg.sh)", "= self.secsHand = self.cog = None def drawClockface(self, cfg): # on start and", "* (360.0 / units) radiansPerDegree = math.pi / 180 pointX = int( round(", "else: self.display = self.analogDisplay self.display.pack(side=TOP, expand=YES, fill=BOTH) def onToggleLabel(self, event): self.labelOn += 1", "classes ############################################################################### class ClockConfig: # defaults--override in instance or subclass size = 200", "relief=SUNKEN, bg=cfg.bg, fg=cfg.fg) label.pack(side=LEFT) # TBD: could expand, and scale font on resize", "self.minsHand = self.create_line(originX, originY, mx, my, width=(cfg.size * .03), arrow='last', arrowshape=(20,20,10), fill=cfg.mh) self.secsHand", "fill=cfg.cog) self.dchars(self.ampm, 0, END) self.insert(self.ampm, END, ampm) def onResize(self, newWidth, newHeight, cfg): newSize", "self.dateLabel.pack_forget() self.update() def onResize(self, event): if event.widget == self.display: self.display.onResize(event.width, event.height, self.cfg) def", "PhotoClockConfig(ClockConfig): # sample configuration size = 320 picture = '../gifs/ora-pp.gif' bg, hh, mh", "AnalogDisplay(Canvas): def __init__(self, parent, cfg): Canvas.__init__(self, parent, width=cfg.size, height=cfg.size, bg=cfg.bg) self.drawClockface(cfg) self.hourHand =", "180 pointX = int( round( radius * math.sin(angle * radiansPerDegree) )) pointY =", "cfg.size // 2 # 3.x div hour = hour + (mins / 60.0)", "clock hands, center picture = None # face photo file class PhotoClockConfig(ClockConfig): #", "defaults--override in instance or subclass size = 200 # width=height bg, fg =", "== 0: self.onCountdownExpire() # countdown timer self.after(1000 // ChecksPerSec, self.onTimer) # run N", "secs: self.countdownSeconds = secs * 60 def onCountdownExpire(self): # caveat: only one active,", "N times per second # 3.x // trunc int div def onCountdownSec(self, event):", "# sample configuration size = 320 picture = '../gifs/ora-pp.gif' bg, hh, mh =", "timer for pop-up msg; window icon. New in 2.1: updated to run under", "cfg.size // 2 # 3.x // div for i in range(60): x, y", "- ' + name self.master.title(title) # master=parent or default self.master.protocol('WM_DELETE_WINDOW', self.quit) ############################################################################### #", "10 # second change timer class Clock(Frame): def __init__(self, config=ClockConfig, parent=None): Frame.__init__(self, parent)", "'blue', 'orange' ############################################################################### # Digital display object ############################################################################### class DigitalDisplay(Frame): def __init__(self, parent,", "picture try: self.image = PhotoImage(file=cfg.picture) # bkground except: self.image = BitmapImage(file=cfg.picture) # save", "self.point(hour, 12, (radius * .80), originX, originY) mx, my = self.point(mins, 60, (radius", "it imgy = (cfg.size - self.image.height()) // 2 # 3.x // div self.create_image(imgx+1,", "onCountdownExpire(self): # caveat: only one active, no progress indicator win = Toplevel() msg", "fill=BOTH) class ClockMain(MainWindow): def __init__(self, config=ClockConfig, name=''): MainWindow.__init__(self, appname, name) clock = Clock(config,", "Frame.__init__(self, parent) self.hour = Label(self) self.mins = Label(self) self.secs = Label(self) self.ampm =", "in range(12): x, y = self.point(i, 12, radius-6, originX, originY) self.create_rectangle(x-3, y-3, x+3,", "Label(self, bd=3, bg='red', fg='blue') parent.bind('<ButtonPress-1>', self.onSwitchMode) parent.bind('<ButtonPress-3>', self.onToggleLabel) parent.bind('<Configure>', self.onResize) parent.bind('<KeyPress-s>', self.onCountdownSec) parent.bind('<KeyPress-m>',", "object ############################################################################### class AnalogDisplay(Canvas): def __init__(self, parent, cfg): Canvas.__init__(self, parent, width=cfg.size, height=cfg.size, bg=cfg.bg)", "= BitmapImage(file=cfg.picture) # save ref imgx = (cfg.size - self.image.width()) // 2 #", "AnalogDisplay(self, self.cfg) self.dateLabel = Label(self, bd=3, bg='red', fg='blue') parent.bind('<ButtonPress-1>', self.onSwitchMode) parent.bind('<ButtonPress-3>', self.onToggleLabel) parent.bind('<Configure>',", "self.create_rectangle(x-1, y-1, x+1, y+1, fill=cfg.fg) # mins for i in range(12): x, y", "'%02d' % x self.hour.config(text=str(hour), width=4) self.mins.config(text=str(mins), width=4) self.secs.config(text=str(secs), width=4) self.ampm.config(text=str(ampm), width=4) def onResize(self,", "clock. New in 2.0: s/m keys set seconds/minutes timer for pop-up msg; window", "scale font on resize def onUpdate(self, hour, mins, secs, ampm, cfg): mins =", "min, sec = timeTuple[3:6] if sec != self.lastSec: self.lastSec = sec ampm =", "manual window borders, passed-in parent class ClockWindow(Clock): def __init__(self, config=ClockConfig, parent=None, name=''): Clock.__init__(self,", "-= 1 if self.countdownSeconds == 0: self.onCountdownExpire() # countdown timer self.after(1000 // ChecksPerSec,", "on resize def onUpdate(self, hour, mins, secs, ampm, cfg): mins = str(mins).zfill(2) #", "attr in dir(ClockConfig): # fill default config obj, try: # from \"-attr val\"", "if type(getattr(ClockConfig, attr)) == int: setattr(config, attr, int(argv[ix+1])) else: setattr(config, attr, argv[ix+1]) #config", "############################################################################### PyClock 2.1: a clock GUI in Python/tkinter. With both analog and digital", "sy = self.point(secs, 60, (radius * .95), originX, originY) self.hourHand = self.create_line(originX, originY,", "raise above siblings if sys.platform[:3] == 'win': # full screen on Windows win.state('zoomed')", "= self.point(i, 60, radius-6, originX, originY) self.create_rectangle(x-1, y-1, x+1, y+1, fill=cfg.fg) # mins", "(originY+1 - pointY) def onUpdate(self, hour, mins, secs, ampm, cfg): # on timer", "0: self.onCountdownExpire() # countdown timer self.after(1000 // ChecksPerSec, self.onTimer) # run N times", "self.countdownSeconds = 0 self.onSwitchMode(None) self.onTimer() def makeWidgets(self, parent): self.digitalDisplay = DigitalDisplay(self, self.cfg) self.analogDisplay", "event.widget == self.display: self.display.onResize(event.width, event.height, self.cfg) def onTimer(self): secsSinceEpoch = time.time() timeTuple =", "PhotoImage(file=cfg.picture) # bkground except: self.image = BitmapImage(file=cfg.picture) # save ref imgx = (cfg.size", "Program run ############################################################################### if __name__ == '__main__': def getOptions(config, argv): for attr in", "mx, my = self.point(mins, 60, (radius * .90), originX, originY) sx, sy =", "on start and resize if cfg.picture: # draw ovals, picture try: self.image =", "progress indicator win = Toplevel() msg = Button(win, text='Timer Expired!', command=win.destroy) msg.config(font=('courier', 80,", "% 12) or 12 # 12..11 self.display.onUpdate(hour, min, sec, ampm, self.cfg) self.dateLabel.config(text=time.ctime(secsSinceEpoch)) self.countdownSeconds", "x+1, y+1, fill=cfg.fg) # mins for i in range(12): x, y = self.point(i,", "self.secs.config(text=str(secs), width=4) self.ampm.config(text=str(ampm), width=4) def onResize(self, newWidth, newHeight, cfg): pass # nothing to", "image=self.image) originX = originY = radius = cfg.size // 2 # 3.x //", "# face, tick colors hh, mh, sh, cog = 'black', 'navy', 'blue', 'red'", "dir(ClockConfig): # fill default config obj, try: # from \"-attr val\" cmd args", "and 'PM') or 'AM' # 0...23 hour = (hour % 12) or 12", "self.secsHand = self.cog = None def drawClockface(self, cfg): # on start and resize", ".01 self.cog = self.create_oval(originX-cogsz, originY+cogsz, originX+cogsz, originY-cogsz, fill=cfg.cog) self.dchars(self.ampm, 0, END) self.insert(self.ampm, END,", "!= self.lastSec: self.lastSec = sec ampm = ((hour >= 12) and 'PM') or", "# will skip __x__ internals except: continue else: if ix in range(1, len(argv)-1):", "if self.labelOn % 2: self.dateLabel.pack(side=BOTTOM, fill=X) else: self.dateLabel.pack_forget() self.update() def onResize(self, event): if", "tick * (360.0 / units) radiansPerDegree = math.pi / 180 pointX = int(", "self.display.pack_forget() if self.display == self.analogDisplay: self.display = self.digitalDisplay else: self.display = self.analogDisplay self.display.pack(side=TOP,", "if sys.platform[:3] == 'win': # full screen on Windows win.state('zoomed') ############################################################################### # Standalone", "self.mins, self.secs, self.ampm: label.config(bd=4, relief=SUNKEN, bg=cfg.bg, fg=cfg.fg) label.pack(side=LEFT) # TBD: could expand, and", "ClockPopup(PopupWindow): def __init__(self, config=ClockConfig, name=''): PopupWindow.__init__(self, appname, name) clock = Clock(config, self) clock.pack(expand=YES,", "in self.hour, self.mins, self.secs, self.ampm: label.config(bd=4, relief=SUNKEN, bg=cfg.bg, fg=cfg.fg) label.pack(side=LEFT) # TBD: could", "draw ovals, picture try: self.image = PhotoImage(file=cfg.picture) # bkground except: self.image = BitmapImage(file=cfg.picture)", "############################################################################### # Program run ############################################################################### if __name__ == '__main__': def getOptions(config, argv): for", "#myclock = ClockWindow(config, Tk()) # parent is Tk root if standalone #myclock =", "'normal'), fg='white', bg='navy') msg.config(padx=10, pady=10) msg.pack(expand=YES, fill=BOTH) win.lift() # raise above siblings if", "self.secs, self.ampm: label.config(bd=4, relief=SUNKEN, bg=cfg.bg, fg=cfg.fg) label.pack(side=LEFT) # TBD: could expand, and scale", "* math.sin(angle * radiansPerDegree) )) pointY = int( round( radius * math.cos(angle *", "new custom Tk, Toplevel for icons, etc. from PP4E.Gui.Tools.windows import PopupWindow, MainWindow class", "askinteger import math, time, sys ############################################################################### # Option configuration classes ############################################################################### class ClockConfig:", "self.create_line(originX, originY, sx, sy, width=1, arrow='last', arrowshape=(5,10,5), fill=cfg.sh) cogsz = cfg.size * .01", "hands, center picture = None # face photo file class PhotoClockConfig(ClockConfig): # sample", "= 'beige', 'brown' # face, tick colors hh, mh, sh, cog = 'black',", "a clock GUI in Python/tkinter. With both analog and digital display modes, a", "TBD: could expand, and scale font on resize def onUpdate(self, hour, mins, secs,", "= radius = cfg.size // 2 # 3.x div hour = hour +", "0, END) self.insert(self.ampm, END, ampm) def onResize(self, newWidth, newHeight, cfg): newSize = min(newWidth,", "msg; window icon. New in 2.1: updated to run under Python 3.X (2.X", "def onTimer(self): secsSinceEpoch = time.time() timeTuple = time.localtime(secsSinceEpoch) hour, min, sec = timeTuple[3:6]", "# Digital display object ############################################################################### class DigitalDisplay(Frame): def __init__(self, parent, cfg): Frame.__init__(self, parent)", "radius, originX, originY): angle = tick * (360.0 / units) radiansPerDegree = math.pi", ".90), originX, originY) sx, sy = self.point(secs, 60, (radius * .95), originX, originY)", "self.mins = Label(self) self.secs = Label(self) self.ampm = Label(self) for label in self.hour,", "self.cfg) self.dateLabel = Label(self, bd=3, bg='red', fg='blue') parent.bind('<ButtonPress-1>', self.onSwitchMode) parent.bind('<ButtonPress-3>', self.onToggleLabel) parent.bind('<Configure>', self.onResize)", "# face photo file class PhotoClockConfig(ClockConfig): # sample configuration size = 320 picture", "fg=cfg.fg) label.pack(side=LEFT) # TBD: could expand, and scale font on resize def onUpdate(self,", "12, radius-6, originX, originY) self.create_rectangle(x-3, y-3, x+3, y+3, fill=cfg.fg) # hours self.ampm =", "ix in range(1, len(argv)-1): if type(getattr(ClockConfig, attr)) == int: setattr(config, attr, int(argv[ix+1])) else:", "if sec != self.lastSec: self.lastSec = sec ampm = ((hour >= 12) and", "= time.localtime(secsSinceEpoch) hour, min, sec = timeTuple[3:6] if sec != self.lastSec: self.lastSec =", "/ 60.0) hx, hy = self.point(hour, 12, (radius * .80), originX, originY) mx,", "askinteger('Countdown', 'Minutes') if secs: self.countdownSeconds = secs * 60 def onCountdownExpire(self): # caveat:", "math.cos(angle * radiansPerDegree) )) return (pointX + originX+1), (originY+1 - pointY) def onUpdate(self,", "composite object ############################################################################### ChecksPerSec = 10 # second change timer class Clock(Frame): def", "# draw ovals, picture try: self.image = PhotoImage(file=cfg.picture) # bkground except: self.image =", "'beige', 'brown' # face, tick colors hh, mh, sh, cog = 'black', 'navy',", "hy, width=(cfg.size * .04), arrow='last', arrowshape=(25,25,15), fill=cfg.hh) self.minsHand = self.create_line(originX, originY, mx, my,", ">= 12) and 'PM') or 'AM' # 0...23 hour = (hour % 12)", "'AM' # 0...23 hour = (hour % 12) or 12 # 12..11 self.display.onUpdate(hour,", "size = 320 picture = '../gifs/ora-pp.gif' bg, hh, mh = 'white', 'blue', 'orange'", "hh, mh, sh, cog = 'black', 'navy', 'blue', 'red' # clock hands, center", "range(60): x, y = self.point(i, 60, radius-6, originX, originY) self.create_rectangle(x-1, y-1, x+1, y+1,", "(mins / 60.0) hx, hy = self.point(hour, 12, (radius * .80), originX, originY)", "parent) self.pack(expand=YES, fill=BOTH) title = appname if name: title = appname + '", "2.1: a clock GUI in Python/tkinter. With both analog and digital display modes,", "else: setattr(config, attr, argv[ix+1]) #config = PhotoClockConfig() config = ClockConfig() if len(sys.argv) >=", "y+1, fill=cfg.fg) # mins for i in range(12): x, y = self.point(i, 12,", "and scale font on resize def onUpdate(self, hour, mins, secs, ampm, cfg): mins", "= self.analogDisplay self.display.pack(side=TOP, expand=YES, fill=BOTH) def onToggleLabel(self, event): self.labelOn += 1 if self.labelOn", "def onToggleLabel(self, event): self.labelOn += 1 if self.labelOn % 2: self.dateLabel.pack(side=BOTTOM, fill=X) else:", "round( radius * math.cos(angle * radiansPerDegree) )) return (pointX + originX+1), (originY+1 -", "def __init__(self, parent, cfg): Frame.__init__(self, parent) self.hour = Label(self) self.mins = Label(self) self.secs", "# redraw hands, cog self.delete(self.cog) self.delete(self.hourHand) self.delete(self.minsHand) self.delete(self.secsHand) originX = originY = radius", "% x self.hour.config(text=str(hour), width=4) self.mins.config(text=str(mins), width=4) self.secs.config(text=str(secs), width=4) self.ampm.config(text=str(ampm), width=4) def onResize(self, newWidth,", "self.mins.config(text=str(mins), width=4) self.secs.config(text=str(secs), width=4) self.ampm.config(text=str(ampm), width=4) def onResize(self, newWidth, newHeight, cfg): pass #", "# mins for i in range(12): x, y = self.point(i, 12, radius-6, originX,", "self.lastSec = self.lastMin = -1 self.countdownSeconds = 0 self.onSwitchMode(None) self.onTimer() def makeWidgets(self, parent):", "(hour % 12) or 12 # 12..11 self.display.onUpdate(hour, min, sec, ampm, self.cfg) self.dateLabel.config(text=time.ctime(secsSinceEpoch))", "mins, secs, ampm, cfg): mins = str(mins).zfill(2) # or '%02d' % x self.hour.config(text=str(hour),", "newSize = min(newWidth, newHeight) #print('analog onResize', cfg.size+4, newSize) if newSize != cfg.size+4: cfg.size", "config=ClockConfig, parent=None): Frame.__init__(self, parent) self.cfg = config self.makeWidgets(parent) # children are packed but", "bg='navy') msg.config(padx=10, pady=10) msg.pack(expand=YES, fill=BOTH) win.lift() # raise above siblings if sys.platform[:3] ==", "= self.point(hour, 12, (radius * .80), originX, originY) mx, my = self.point(mins, 60,", "min(newWidth, newHeight) #print('analog onResize', cfg.size+4, newSize) if newSize != cfg.size+4: cfg.size = newSize-4", "secs, ampm, cfg): # on timer callback if self.cog: # redraw hands, cog", "// 2 # 3.x // div self.create_image(imgx+1, imgy+1, anchor=NW, image=self.image) originX = originY", "= None # face photo file class PhotoClockConfig(ClockConfig): # sample configuration size =", "indicator win = Toplevel() msg = Button(win, text='Timer Expired!', command=win.destroy) msg.config(font=('courier', 80, 'normal'),", "len(sys.argv) >= 2: getOptions(config, sys.argv) # clock.py -size n -bg 'blue'... #myclock =", "self.onSwitchMode) parent.bind('<ButtonPress-3>', self.onToggleLabel) parent.bind('<Configure>', self.onResize) parent.bind('<KeyPress-s>', self.onCountdownSec) parent.bind('<KeyPress-m>', self.onCountdownMin) def onSwitchMode(self, event): self.display.pack_forget()", "for icons, etc. from PP4E.Gui.Tools.windows import PopupWindow, MainWindow class ClockPopup(PopupWindow): def __init__(self, config=ClockConfig,", "self.cog = self.create_oval(originX-cogsz, originY+cogsz, originX+cogsz, originY-cogsz, fill=cfg.cog) self.dchars(self.ampm, 0, END) self.insert(self.ampm, END, ampm)", "originX = originY = radius = cfg.size // 2 # 3.x // div", "Clock(Frame): def __init__(self, config=ClockConfig, parent=None): Frame.__init__(self, parent) self.cfg = config self.makeWidgets(parent) # children", "time, sys ############################################################################### # Option configuration classes ############################################################################### class ClockConfig: # defaults--override in", "originY) self.create_rectangle(x-1, y-1, x+1, y+1, fill=cfg.fg) # mins for i in range(12): x,", "ix = argv.index('-' + attr) # will skip __x__ internals except: continue else:", "Clock(config, self) clock.pack(expand=YES, fill=BOTH) class ClockMain(MainWindow): def __init__(self, config=ClockConfig, name=''): MainWindow.__init__(self, appname, name)", "math, time, sys ############################################################################### # Option configuration classes ############################################################################### class ClockConfig: # defaults--override", "grid me self.display = self.digitalDisplay self.lastSec = self.lastMin = -1 self.countdownSeconds = 0", "= Label(self) self.mins = Label(self) self.secs = Label(self) self.ampm = Label(self) for label", "# 3.x // trunc int div def onCountdownSec(self, event): secs = askinteger('Countdown', 'Seconds?')", "start and resize if cfg.picture: # draw ovals, picture try: self.image = PhotoImage(file=cfg.picture)", "self.cfg) def onTimer(self): secsSinceEpoch = time.time() timeTuple = time.localtime(secsSinceEpoch) hour, min, sec =", "picture = None # face photo file class PhotoClockConfig(ClockConfig): # sample configuration size", "ClockWindow(Clock): def __init__(self, config=ClockConfig, parent=None, name=''): Clock.__init__(self, config, parent) self.pack(expand=YES, fill=BOTH) title =", "def __init__(self, config=ClockConfig, parent=None, name=''): Clock.__init__(self, config, parent) self.pack(expand=YES, fill=BOTH) title = appname", "12, (radius * .80), originX, originY) mx, my = self.point(mins, 60, (radius *", "def __init__(self, config=ClockConfig, parent=None): Frame.__init__(self, parent) self.cfg = config self.makeWidgets(parent) # children are", "2 # 3.x div hour = hour + (mins / 60.0) hx, hy", "onCountdownMin(self, event): secs = askinteger('Countdown', 'Minutes') if secs: self.countdownSeconds = secs * 60", "def point(self, tick, units, radius, originX, originY): angle = tick * (360.0 /", "originY, sx, sy, width=1, arrow='last', arrowshape=(5,10,5), fill=cfg.sh) cogsz = cfg.size * .01 self.cog", "= config self.makeWidgets(parent) # children are packed but self.labelOn = 0 # clients", "win = Toplevel() msg = Button(win, text='Timer Expired!', command=win.destroy) msg.config(font=('courier', 80, 'normal'), fg='white',", "GUI in Python/tkinter. With both analog and digital display modes, a pop-up date", "class PhotoClockConfig(ClockConfig): # sample configuration size = 320 picture = '../gifs/ora-pp.gif' bg, hh,", "3.X (2.X no longer supported) ############################################################################### \"\"\" from tkinter import * from tkinter.simpledialog", "for i in range(60): x, y = self.point(i, 60, radius-6, originX, originY) self.create_rectangle(x-1,", "= self.point(mins, 60, (radius * .90), originX, originY) sx, sy = self.point(secs, 60,", "== self.analogDisplay: self.display = self.digitalDisplay else: self.display = self.analogDisplay self.display.pack(side=TOP, expand=YES, fill=BOTH) def", "hour, mins, secs, ampm, cfg): mins = str(mins).zfill(2) # or '%02d' % x", "redraw here ############################################################################### # Analog display object ############################################################################### class AnalogDisplay(Canvas): def __init__(self, parent,", "self.hour = Label(self) self.mins = Label(self) self.secs = Label(self) self.ampm = Label(self) for", "fill=cfg.sh) cogsz = cfg.size * .01 self.cog = self.create_oval(originX-cogsz, originY+cogsz, originX+cogsz, originY-cogsz, fill=cfg.cog)", "custom Tk, Toplevel for icons, etc. from PP4E.Gui.Tools.windows import PopupWindow, MainWindow class ClockPopup(PopupWindow):", "screen on Windows win.state('zoomed') ############################################################################### # Standalone clocks ############################################################################### appname = 'PyClock 2.1'", "but self.labelOn = 0 # clients pack or grid me self.display = self.digitalDisplay", "both analog and digital display modes, a pop-up date label, clock face images,", "cfg): mins = str(mins).zfill(2) # or '%02d' % x self.hour.config(text=str(hour), width=4) self.mins.config(text=str(mins), width=4)", "analog and digital display modes, a pop-up date label, clock face images, general", "icon. New in 2.1: updated to run under Python 3.X (2.X no longer", "etc. May be run both standalone, or embedded (attached) in other GUIs that", "width=height bg, fg = 'beige', 'brown' # face, tick colors hh, mh, sh,", "80, 'normal'), fg='white', bg='navy') msg.config(padx=10, pady=10) msg.pack(expand=YES, fill=BOTH) win.lift() # raise above siblings", "self.countdownSeconds = secs * 60 def onCountdownExpire(self): # caveat: only one active, no", "' - ' + name self.master.title(title) # master=parent or default self.master.protocol('WM_DELETE_WINDOW', self.quit) ###############################################################################", "window icon. New in 2.1: updated to run under Python 3.X (2.X no", "self.secsHand = self.create_line(originX, originY, sx, sy, width=1, arrow='last', arrowshape=(5,10,5), fill=cfg.sh) cogsz = cfg.size", "self.image.width()) // 2 # center it imgy = (cfg.size - self.image.height()) // 2", "default self.master.protocol('WM_DELETE_WINDOW', self.quit) ############################################################################### # Program run ############################################################################### if __name__ == '__main__': def", "ChecksPerSec, self.onTimer) # run N times per second # 3.x // trunc int", "* 60 def onCountdownExpire(self): # caveat: only one active, no progress indicator win", "= PhotoClockConfig() config = ClockConfig() if len(sys.argv) >= 2: getOptions(config, sys.argv) # clock.py", "* radiansPerDegree) )) return (pointX + originX+1), (originY+1 - pointY) def onUpdate(self, hour,", "mins = str(mins).zfill(2) # or '%02d' % x self.hour.config(text=str(hour), width=4) self.mins.config(text=str(mins), width=4) self.secs.config(text=str(secs),", "self.dchars(self.ampm, 0, END) self.insert(self.ampm, END, ampm) def onResize(self, newWidth, newHeight, cfg): newSize =", "#print('analog onResize', cfg.size+4, newSize) if newSize != cfg.size+4: cfg.size = newSize-4 self.delete('all') self.drawClockface(cfg)", "// 2 # 3.x div hour = hour + (mins / 60.0) hx,", "############################################################################### # Digital display object ############################################################################### class DigitalDisplay(Frame): def __init__(self, parent, cfg): Frame.__init__(self,", "width=4) self.mins.config(text=str(mins), width=4) self.secs.config(text=str(secs), width=4) self.ampm.config(text=str(ampm), width=4) def onResize(self, newWidth, newHeight, cfg): pass", "originX = originY = radius = cfg.size // 2 # 3.x div hour", "############################################################################### # Clock composite object ############################################################################### ChecksPerSec = 10 # second change timer", "self.cog = None def drawClockface(self, cfg): # on start and resize if cfg.picture:", "= self.point(i, 12, radius-6, originX, originY) self.create_rectangle(x-3, y-3, x+3, y+3, fill=cfg.fg) # hours", "radius * math.cos(angle * radiansPerDegree) )) return (pointX + originX+1), (originY+1 - pointY)", "int: setattr(config, attr, int(argv[ix+1])) else: setattr(config, attr, argv[ix+1]) #config = PhotoClockConfig() config =", "# full screen on Windows win.state('zoomed') ############################################################################### # Standalone clocks ############################################################################### appname =", "str(mins).zfill(2) # or '%02d' % x self.hour.config(text=str(hour), width=4) self.mins.config(text=str(mins), width=4) self.secs.config(text=str(secs), width=4) self.ampm.config(text=str(ampm),", "self.insert(self.ampm, END, ampm) def onResize(self, newWidth, newHeight, cfg): newSize = min(newWidth, newHeight) #print('analog", ")) return (pointX + originX+1), (originY+1 - pointY) def onUpdate(self, hour, mins, secs,", "self.image = PhotoImage(file=cfg.picture) # bkground except: self.image = BitmapImage(file=cfg.picture) # save ref imgx", "div self.create_image(imgx+1, imgy+1, anchor=NW, image=self.image) originX = originY = radius = cfg.size //", "# Option configuration classes ############################################################################### class ClockConfig: # defaults--override in instance or subclass", "fill=X) else: self.dateLabel.pack_forget() self.update() def onResize(self, event): if event.widget == self.display: self.display.onResize(event.width, event.height,", "y-3, x+3, y+3, fill=cfg.fg) # hours self.ampm = self.create_text(3, 3, anchor=NW, fill=cfg.fg) def", "// trunc int div def onCountdownSec(self, event): secs = askinteger('Countdown', 'Seconds?') if secs:", "% 2: self.dateLabel.pack(side=BOTTOM, fill=X) else: self.dateLabel.pack_forget() self.update() def onResize(self, event): if event.widget ==", "self.hour, self.mins, self.secs, self.ampm: label.config(bd=4, relief=SUNKEN, bg=cfg.bg, fg=cfg.fg) label.pack(side=LEFT) # TBD: could expand,", "= self.create_line(originX, originY, sx, sy, width=1, arrow='last', arrowshape=(5,10,5), fill=cfg.sh) cogsz = cfg.size *", "name: title = appname + ' - ' + name self.master.title(title) # master=parent", "import PopupWindow, MainWindow class ClockPopup(PopupWindow): def __init__(self, config=ClockConfig, name=''): PopupWindow.__init__(self, appname, name) clock", "cog self.delete(self.cog) self.delete(self.hourHand) self.delete(self.minsHand) self.delete(self.secsHand) originX = originY = radius = cfg.size //", "config obj, try: # from \"-attr val\" cmd args ix = argv.index('-' +", "appname, name) clock = Clock(config, self) clock.pack(expand=YES, fill=BOTH) # b/w compat: manual window", "default config obj, try: # from \"-attr val\" cmd args ix = argv.index('-'", "class ClockPopup(PopupWindow): def __init__(self, config=ClockConfig, name=''): PopupWindow.__init__(self, appname, name) clock = Clock(config, self)", "tkinter import * from tkinter.simpledialog import askinteger import math, time, sys ############################################################################### #", "width=4) def onResize(self, newWidth, newHeight, cfg): pass # nothing to redraw here ###############################################################################", "self.countdownSeconds == 0: self.onCountdownExpire() # countdown timer self.after(1000 // ChecksPerSec, self.onTimer) # run", "def onSwitchMode(self, event): self.display.pack_forget() if self.display == self.analogDisplay: self.display = self.digitalDisplay else: self.display", "# 0...23 hour = (hour % 12) or 12 # 12..11 self.display.onUpdate(hour, min,", "self.cfg = config self.makeWidgets(parent) # children are packed but self.labelOn = 0 #", "# defaults--override in instance or subclass size = 200 # width=height bg, fg", "def makeWidgets(self, parent): self.digitalDisplay = DigitalDisplay(self, self.cfg) self.analogDisplay = AnalogDisplay(self, self.cfg) self.dateLabel =", "run N times per second # 3.x // trunc int div def onCountdownSec(self,", "mins for i in range(12): x, y = self.point(i, 12, radius-6, originX, originY)", "= math.pi / 180 pointX = int( round( radius * math.sin(angle * radiansPerDegree)", "or embedded (attached) in other GUIs that need a clock. New in 2.0:", "sec != self.lastSec: self.lastSec = sec ampm = ((hour >= 12) and 'PM')", "= Toplevel() msg = Button(win, text='Timer Expired!', command=win.destroy) msg.config(font=('courier', 80, 'normal'), fg='white', bg='navy')", "if name: title = appname + ' - ' + name self.master.title(title) #", "in 2.1: updated to run under Python 3.X (2.X no longer supported) ###############################################################################", "2 # 3.x // div for i in range(60): x, y = self.point(i,", "trunc int div def onCountdownSec(self, event): secs = askinteger('Countdown', 'Seconds?') if secs: self.countdownSeconds", "self.onResize) parent.bind('<KeyPress-s>', self.onCountdownSec) parent.bind('<KeyPress-m>', self.onCountdownMin) def onSwitchMode(self, event): self.display.pack_forget() if self.display == self.analogDisplay:", "ref imgx = (cfg.size - self.image.width()) // 2 # center it imgy =", "originX+1), (originY+1 - pointY) def onUpdate(self, hour, mins, secs, ampm, cfg): # on", "# parent is Tk root if standalone #myclock = ClockPopup(ClockConfig(), 'popup') myclock =", "= radius = cfg.size // 2 # 3.x // div for i in", "'../gifs/ora-pp.gif' bg, hh, mh = 'white', 'blue', 'orange' ############################################################################### # Digital display object", "= time.time() timeTuple = time.localtime(secsSinceEpoch) hour, min, sec = timeTuple[3:6] if sec !=", "def onUpdate(self, hour, mins, secs, ampm, cfg): # on timer callback if self.cog:", "on timer callback if self.cog: # redraw hands, cog self.delete(self.cog) self.delete(self.hourHand) self.delete(self.minsHand) self.delete(self.secsHand)", "self.display = self.digitalDisplay self.lastSec = self.lastMin = -1 self.countdownSeconds = 0 self.onSwitchMode(None) self.onTimer()", "# 12..11 self.display.onUpdate(hour, min, sec, ampm, self.cfg) self.dateLabel.config(text=time.ctime(secsSinceEpoch)) self.countdownSeconds -= 1 if self.countdownSeconds", "__init__(self, config=ClockConfig, name=''): PopupWindow.__init__(self, appname, name) clock = Clock(config, self) clock.pack(expand=YES, fill=BOTH) class", "modes, a pop-up date label, clock face images, general resizing, etc. May be", "New in 2.0: s/m keys set seconds/minutes timer for pop-up msg; window icon.", "\"\"\" ############################################################################### PyClock 2.1: a clock GUI in Python/tkinter. With both analog and", "photo file class PhotoClockConfig(ClockConfig): # sample configuration size = 320 picture = '../gifs/ora-pp.gif'", "and resize if cfg.picture: # draw ovals, picture try: self.image = PhotoImage(file=cfg.picture) #", "div for i in range(60): x, y = self.point(i, 60, radius-6, originX, originY)", "int( round( radius * math.sin(angle * radiansPerDegree) )) pointY = int( round( radius", "= AnalogDisplay(self, self.cfg) self.dateLabel = Label(self, bd=3, bg='red', fg='blue') parent.bind('<ButtonPress-1>', self.onSwitchMode) parent.bind('<ButtonPress-3>', self.onToggleLabel)", "width=4) self.secs.config(text=str(secs), width=4) self.ampm.config(text=str(ampm), width=4) def onResize(self, newWidth, newHeight, cfg): pass # nothing", "msg.config(font=('courier', 80, 'normal'), fg='white', bg='navy') msg.config(padx=10, pady=10) msg.pack(expand=YES, fill=BOTH) win.lift() # raise above", "attr, argv[ix+1]) #config = PhotoClockConfig() config = ClockConfig() if len(sys.argv) >= 2: getOptions(config,", "(cfg.size - self.image.height()) // 2 # 3.x // div self.create_image(imgx+1, imgy+1, anchor=NW, image=self.image)", "originX+cogsz, originY-cogsz, fill=cfg.cog) self.dchars(self.ampm, 0, END) self.insert(self.ampm, END, ampm) def onResize(self, newWidth, newHeight,", "config self.makeWidgets(parent) # children are packed but self.labelOn = 0 # clients pack", "self.onCountdownSec) parent.bind('<KeyPress-m>', self.onCountdownMin) def onSwitchMode(self, event): self.display.pack_forget() if self.display == self.analogDisplay: self.display =", "originY, mx, my, width=(cfg.size * .03), arrow='last', arrowshape=(20,20,10), fill=cfg.mh) self.secsHand = self.create_line(originX, originY,", "if newSize != cfg.size+4: cfg.size = newSize-4 self.delete('all') self.drawClockface(cfg) # onUpdate called next", "date label, clock face images, general resizing, etc. May be run both standalone,", "def __init__(self, config=ClockConfig, name=''): MainWindow.__init__(self, appname, name) clock = Clock(config, self) clock.pack(expand=YES, fill=BOTH)", "\"-attr val\" cmd args ix = argv.index('-' + attr) # will skip __x__", "pointX = int( round( radius * math.sin(angle * radiansPerDegree) )) pointY = int(", "units) radiansPerDegree = math.pi / 180 pointX = int( round( radius * math.sin(angle", "tkinter.simpledialog import askinteger import math, time, sys ############################################################################### # Option configuration classes ###############################################################################", "self.delete(self.secsHand) originX = originY = radius = cfg.size // 2 # 3.x div", "'PM') or 'AM' # 0...23 hour = (hour % 12) or 12 #", "clock face images, general resizing, etc. May be run both standalone, or embedded", "(radius * .80), originX, originY) mx, my = self.point(mins, 60, (radius * .90),", "# run N times per second # 3.x // trunc int div def", "# save ref imgx = (cfg.size - self.image.width()) // 2 # center it", "object ############################################################################### class DigitalDisplay(Frame): def __init__(self, parent, cfg): Frame.__init__(self, parent) self.hour = Label(self)", "= (hour % 12) or 12 # 12..11 self.display.onUpdate(hour, min, sec, ampm, self.cfg)", "60, (radius * .90), originX, originY) sx, sy = self.point(secs, 60, (radius *", "event): secs = askinteger('Countdown', 'Seconds?') if secs: self.countdownSeconds = secs def onCountdownMin(self, event):", "self.hour.config(text=str(hour), width=4) self.mins.config(text=str(mins), width=4) self.secs.config(text=str(secs), width=4) self.ampm.config(text=str(ampm), width=4) def onResize(self, newWidth, newHeight, cfg):", "appname = 'PyClock 2.1' # use new custom Tk, Toplevel for icons, etc.", "self.hourHand = self.create_line(originX, originY, hx, hy, width=(cfg.size * .04), arrow='last', arrowshape=(25,25,15), fill=cfg.hh) self.minsHand", "originY+cogsz, originX+cogsz, originY-cogsz, fill=cfg.cog) self.dchars(self.ampm, 0, END) self.insert(self.ampm, END, ampm) def onResize(self, newWidth,", "'orange' ############################################################################### # Digital display object ############################################################################### class DigitalDisplay(Frame): def __init__(self, parent, cfg):", "master=parent or default self.master.protocol('WM_DELETE_WINDOW', self.quit) ############################################################################### # Program run ############################################################################### if __name__ ==", "configuration classes ############################################################################### class ClockConfig: # defaults--override in instance or subclass size =", "except: self.image = BitmapImage(file=cfg.picture) # save ref imgx = (cfg.size - self.image.width()) //", "hands, cog self.delete(self.cog) self.delete(self.hourHand) self.delete(self.minsHand) self.delete(self.secsHand) originX = originY = radius = cfg.size", "parent=None): Frame.__init__(self, parent) self.cfg = config self.makeWidgets(parent) # children are packed but self.labelOn", "def __init__(self, config=ClockConfig, name=''): PopupWindow.__init__(self, appname, name) clock = Clock(config, self) clock.pack(expand=YES, fill=BOTH)", "hx, hy, width=(cfg.size * .04), arrow='last', arrowshape=(25,25,15), fill=cfg.hh) self.minsHand = self.create_line(originX, originY, mx,", "-size n -bg 'blue'... #myclock = ClockWindow(config, Tk()) # parent is Tk root", "cfg): pass # nothing to redraw here ############################################################################### # Analog display object ###############################################################################", "range(12): x, y = self.point(i, 12, radius-6, originX, originY) self.create_rectangle(x-3, y-3, x+3, y+3,", "self.makeWidgets(parent) # children are packed but self.labelOn = 0 # clients pack or", "msg = Button(win, text='Timer Expired!', command=win.destroy) msg.config(font=('courier', 80, 'normal'), fg='white', bg='navy') msg.config(padx=10, pady=10)", "May be run both standalone, or embedded (attached) in other GUIs that need", "Label(self) self.secs = Label(self) self.ampm = Label(self) for label in self.hour, self.mins, self.secs,", "onToggleLabel(self, event): self.labelOn += 1 if self.labelOn % 2: self.dateLabel.pack(side=BOTTOM, fill=X) else: self.dateLabel.pack_forget()", "2 # 3.x // div self.create_image(imgx+1, imgy+1, anchor=NW, image=self.image) originX = originY =", "############################################################################### if __name__ == '__main__': def getOptions(config, argv): for attr in dir(ClockConfig): #", "images, general resizing, etc. May be run both standalone, or embedded (attached) in", "onUpdate called next ############################################################################### # Clock composite object ############################################################################### ChecksPerSec = 10 #", "MainWindow class ClockPopup(PopupWindow): def __init__(self, config=ClockConfig, name=''): PopupWindow.__init__(self, appname, name) clock = Clock(config,", "originY) sx, sy = self.point(secs, 60, (radius * .95), originX, originY) self.hourHand =", "import * from tkinter.simpledialog import askinteger import math, time, sys ############################################################################### # Option", "longer supported) ############################################################################### \"\"\" from tkinter import * from tkinter.simpledialog import askinteger import", "# countdown timer self.after(1000 // ChecksPerSec, self.onTimer) # run N times per second", "' + name self.master.title(title) # master=parent or default self.master.protocol('WM_DELETE_WINDOW', self.quit) ############################################################################### # Program", "def getOptions(config, argv): for attr in dir(ClockConfig): # fill default config obj, try:", "math.sin(angle * radiansPerDegree) )) pointY = int( round( radius * math.cos(angle * radiansPerDegree)", "originY): angle = tick * (360.0 / units) radiansPerDegree = math.pi / 180", "secs, ampm, cfg): mins = str(mins).zfill(2) # or '%02d' % x self.hour.config(text=str(hour), width=4)", "fill=BOTH) title = appname if name: title = appname + ' - '", "= self.digitalDisplay self.lastSec = self.lastMin = -1 self.countdownSeconds = 0 self.onSwitchMode(None) self.onTimer() def", "type(getattr(ClockConfig, attr)) == int: setattr(config, attr, int(argv[ix+1])) else: setattr(config, attr, argv[ix+1]) #config =", "3.x div hour = hour + (mins / 60.0) hx, hy = self.point(hour,", "parent.bind('<ButtonPress-3>', self.onToggleLabel) parent.bind('<Configure>', self.onResize) parent.bind('<KeyPress-s>', self.onCountdownSec) parent.bind('<KeyPress-m>', self.onCountdownMin) def onSwitchMode(self, event): self.display.pack_forget() if", "* .03), arrow='last', arrowshape=(20,20,10), fill=cfg.mh) self.secsHand = self.create_line(originX, originY, sx, sy, width=1, arrow='last',", "pointY) def onUpdate(self, hour, mins, secs, ampm, cfg): # on timer callback if", "# on timer callback if self.cog: # redraw hands, cog self.delete(self.cog) self.delete(self.hourHand) self.delete(self.minsHand)", "name=''): MainWindow.__init__(self, appname, name) clock = Clock(config, self) clock.pack(expand=YES, fill=BOTH) # b/w compat:", "+ name self.master.title(title) # master=parent or default self.master.protocol('WM_DELETE_WINDOW', self.quit) ############################################################################### # Program run", "one active, no progress indicator win = Toplevel() msg = Button(win, text='Timer Expired!',", "clock.pack(expand=YES, fill=BOTH) class ClockMain(MainWindow): def __init__(self, config=ClockConfig, name=''): MainWindow.__init__(self, appname, name) clock =", "= str(mins).zfill(2) # or '%02d' % x self.hour.config(text=str(hour), width=4) self.mins.config(text=str(mins), width=4) self.secs.config(text=str(secs), width=4)", "# raise above siblings if sys.platform[:3] == 'win': # full screen on Windows", "skip __x__ internals except: continue else: if ix in range(1, len(argv)-1): if type(getattr(ClockConfig,", "mins, secs, ampm, cfg): # on timer callback if self.cog: # redraw hands,", "newSize != cfg.size+4: cfg.size = newSize-4 self.delete('all') self.drawClockface(cfg) # onUpdate called next ###############################################################################", "fill=cfg.fg) def point(self, tick, units, radius, originX, originY): angle = tick * (360.0", "clock = Clock(config, self) clock.pack(expand=YES, fill=BOTH) # b/w compat: manual window borders, passed-in", "(2.X no longer supported) ############################################################################### \"\"\" from tkinter import * from tkinter.simpledialog import", "window borders, passed-in parent class ClockWindow(Clock): def __init__(self, config=ClockConfig, parent=None, name=''): Clock.__init__(self, config,", "'brown' # face, tick colors hh, mh, sh, cog = 'black', 'navy', 'blue',", "will skip __x__ internals except: continue else: if ix in range(1, len(argv)-1): if", "or '%02d' % x self.hour.config(text=str(hour), width=4) self.mins.config(text=str(mins), width=4) self.secs.config(text=str(secs), width=4) self.ampm.config(text=str(ampm), width=4) def", "arrow='last', arrowshape=(5,10,5), fill=cfg.sh) cogsz = cfg.size * .01 self.cog = self.create_oval(originX-cogsz, originY+cogsz, originX+cogsz,", "radiansPerDegree = math.pi / 180 pointX = int( round( radius * math.sin(angle *", "= None def drawClockface(self, cfg): # on start and resize if cfg.picture: #", "self.image = BitmapImage(file=cfg.picture) # save ref imgx = (cfg.size - self.image.width()) // 2", "= self.create_text(3, 3, anchor=NW, fill=cfg.fg) def point(self, tick, units, radius, originX, originY): angle", "sec, ampm, self.cfg) self.dateLabel.config(text=time.ctime(secsSinceEpoch)) self.countdownSeconds -= 1 if self.countdownSeconds == 0: self.onCountdownExpire() #", "__name__ == '__main__': def getOptions(config, argv): for attr in dir(ClockConfig): # fill default", "self.update() def onResize(self, event): if event.widget == self.display: self.display.onResize(event.width, event.height, self.cfg) def onTimer(self):", "supported) ############################################################################### \"\"\" from tkinter import * from tkinter.simpledialog import askinteger import math,", "be run both standalone, or embedded (attached) in other GUIs that need a", "-bg 'blue'... #myclock = ClockWindow(config, Tk()) # parent is Tk root if standalone", "'blue'... #myclock = ClockWindow(config, Tk()) # parent is Tk root if standalone #myclock", "(radius * .95), originX, originY) self.hourHand = self.create_line(originX, originY, hx, hy, width=(cfg.size *", "makeWidgets(self, parent): self.digitalDisplay = DigitalDisplay(self, self.cfg) self.analogDisplay = AnalogDisplay(self, self.cfg) self.dateLabel = Label(self,", "for label in self.hour, self.mins, self.secs, self.ampm: label.config(bd=4, relief=SUNKEN, bg=cfg.bg, fg=cfg.fg) label.pack(side=LEFT) #", "class Clock(Frame): def __init__(self, config=ClockConfig, parent=None): Frame.__init__(self, parent) self.cfg = config self.makeWidgets(parent) #", "= newSize-4 self.delete('all') self.drawClockface(cfg) # onUpdate called next ############################################################################### # Clock composite object", "argv.index('-' + attr) # will skip __x__ internals except: continue else: if ix", "= Label(self) self.secs = Label(self) self.ampm = Label(self) for label in self.hour, self.mins,", "compat: manual window borders, passed-in parent class ClockWindow(Clock): def __init__(self, config=ClockConfig, parent=None, name=''):", "else: if ix in range(1, len(argv)-1): if type(getattr(ClockConfig, attr)) == int: setattr(config, attr,", "name=''): Clock.__init__(self, config, parent) self.pack(expand=YES, fill=BOTH) title = appname if name: title =", "self.drawClockface(cfg) self.hourHand = self.minsHand = self.secsHand = self.cog = None def drawClockface(self, cfg):", "DigitalDisplay(Frame): def __init__(self, parent, cfg): Frame.__init__(self, parent) self.hour = Label(self) self.mins = Label(self)", "ClockMain(MainWindow): def __init__(self, config=ClockConfig, name=''): MainWindow.__init__(self, appname, name) clock = Clock(config, self) clock.pack(expand=YES,", "b/w compat: manual window borders, passed-in parent class ClockWindow(Clock): def __init__(self, config=ClockConfig, parent=None,", "caveat: only one active, no progress indicator win = Toplevel() msg = Button(win,", "attr, int(argv[ix+1])) else: setattr(config, attr, argv[ix+1]) #config = PhotoClockConfig() config = ClockConfig() if", "radiansPerDegree) )) return (pointX + originX+1), (originY+1 - pointY) def onUpdate(self, hour, mins,", "face images, general resizing, etc. May be run both standalone, or embedded (attached)", "self.display: self.display.onResize(event.width, event.height, self.cfg) def onTimer(self): secsSinceEpoch = time.time() timeTuple = time.localtime(secsSinceEpoch) hour,", "= sec ampm = ((hour >= 12) and 'PM') or 'AM' # 0...23", "hour, min, sec = timeTuple[3:6] if sec != self.lastSec: self.lastSec = sec ampm", "in other GUIs that need a clock. New in 2.0: s/m keys set", "Label(self) self.ampm = Label(self) for label in self.hour, self.mins, self.secs, self.ampm: label.config(bd=4, relief=SUNKEN,", "+ originX+1), (originY+1 - pointY) def onUpdate(self, hour, mins, secs, ampm, cfg): #", "arrowshape=(20,20,10), fill=cfg.mh) self.secsHand = self.create_line(originX, originY, sx, sy, width=1, arrow='last', arrowshape=(5,10,5), fill=cfg.sh) cogsz", "12..11 self.display.onUpdate(hour, min, sec, ampm, self.cfg) self.dateLabel.config(text=time.ctime(secsSinceEpoch)) self.countdownSeconds -= 1 if self.countdownSeconds ==", "general resizing, etc. May be run both standalone, or embedded (attached) in other", "parent is Tk root if standalone #myclock = ClockPopup(ClockConfig(), 'popup') myclock = ClockMain(config)", "secs def onCountdownMin(self, event): secs = askinteger('Countdown', 'Minutes') if secs: self.countdownSeconds = secs", "title = appname + ' - ' + name self.master.title(title) # master=parent or", "self.ampm = Label(self) for label in self.hour, self.mins, self.secs, self.ampm: label.config(bd=4, relief=SUNKEN, bg=cfg.bg,", "- self.image.height()) // 2 # 3.x // div self.create_image(imgx+1, imgy+1, anchor=NW, image=self.image) originX", "= timeTuple[3:6] if sec != self.lastSec: self.lastSec = sec ampm = ((hour >=", "# use new custom Tk, Toplevel for icons, etc. from PP4E.Gui.Tools.windows import PopupWindow,", "face photo file class PhotoClockConfig(ClockConfig): # sample configuration size = 320 picture =", "if secs: self.countdownSeconds = secs * 60 def onCountdownExpire(self): # caveat: only one", "radius = cfg.size // 2 # 3.x // div for i in range(60):", "seconds/minutes timer for pop-up msg; window icon. New in 2.1: updated to run", "could expand, and scale font on resize def onUpdate(self, hour, mins, secs, ampm,", "name) clock = Clock(config, self) clock.pack(expand=YES, fill=BOTH) class ClockMain(MainWindow): def __init__(self, config=ClockConfig, name=''):", "= 200 # width=height bg, fg = 'beige', 'brown' # face, tick colors", "= self.minsHand = self.secsHand = self.cog = None def drawClockface(self, cfg): # on", "win.state('zoomed') ############################################################################### # Standalone clocks ############################################################################### appname = 'PyClock 2.1' # use new", "onTimer(self): secsSinceEpoch = time.time() timeTuple = time.localtime(secsSinceEpoch) hour, min, sec = timeTuple[3:6] if", "here ############################################################################### # Analog display object ############################################################################### class AnalogDisplay(Canvas): def __init__(self, parent, cfg):", "originX, originY) self.hourHand = self.create_line(originX, originY, hx, hy, width=(cfg.size * .04), arrow='last', arrowshape=(25,25,15),", "self.lastMin = -1 self.countdownSeconds = 0 self.onSwitchMode(None) self.onTimer() def makeWidgets(self, parent): self.digitalDisplay =", "event): self.labelOn += 1 if self.labelOn % 2: self.dateLabel.pack(side=BOTTOM, fill=X) else: self.dateLabel.pack_forget() self.update()", "if cfg.picture: # draw ovals, picture try: self.image = PhotoImage(file=cfg.picture) # bkground except:", "== 'win': # full screen on Windows win.state('zoomed') ############################################################################### # Standalone clocks ###############################################################################", "PyClock 2.1: a clock GUI in Python/tkinter. With both analog and digital display", "self.delete(self.minsHand) self.delete(self.secsHand) originX = originY = radius = cfg.size // 2 # 3.x", "in instance or subclass size = 200 # width=height bg, fg = 'beige',", "= Label(self) self.ampm = Label(self) for label in self.hour, self.mins, self.secs, self.ampm: label.config(bd=4,", "# 3.x div hour = hour + (mins / 60.0) hx, hy =", "# clients pack or grid me self.display = self.digitalDisplay self.lastSec = self.lastMin =", "# caveat: only one active, no progress indicator win = Toplevel() msg =", "fill=cfg.fg) # mins for i in range(12): x, y = self.point(i, 12, radius-6,", "bg, hh, mh = 'white', 'blue', 'orange' ############################################################################### # Digital display object ###############################################################################", "try: # from \"-attr val\" cmd args ix = argv.index('-' + attr) #", "or 'AM' # 0...23 hour = (hour % 12) or 12 # 12..11", "= -1 self.countdownSeconds = 0 self.onSwitchMode(None) self.onTimer() def makeWidgets(self, parent): self.digitalDisplay = DigitalDisplay(self,", "center picture = None # face photo file class PhotoClockConfig(ClockConfig): # sample configuration", "320 picture = '../gifs/ora-pp.gif' bg, hh, mh = 'white', 'blue', 'orange' ############################################################################### #", "for i in range(12): x, y = self.point(i, 12, radius-6, originX, originY) self.create_rectangle(x-3,", "or grid me self.display = self.digitalDisplay self.lastSec = self.lastMin = -1 self.countdownSeconds =", "units, radius, originX, originY): angle = tick * (360.0 / units) radiansPerDegree =", "60 def onCountdownExpire(self): # caveat: only one active, no progress indicator win =", "self.create_rectangle(x-3, y-3, x+3, y+3, fill=cfg.fg) # hours self.ampm = self.create_text(3, 3, anchor=NW, fill=cfg.fg)", "in Python/tkinter. With both analog and digital display modes, a pop-up date label,", "event): secs = askinteger('Countdown', 'Minutes') if secs: self.countdownSeconds = secs * 60 def", "int( round( radius * math.cos(angle * radiansPerDegree) )) return (pointX + originX+1), (originY+1", "etc. from PP4E.Gui.Tools.windows import PopupWindow, MainWindow class ClockPopup(PopupWindow): def __init__(self, config=ClockConfig, name=''): PopupWindow.__init__(self,", "self.labelOn += 1 if self.labelOn % 2: self.dateLabel.pack(side=BOTTOM, fill=X) else: self.dateLabel.pack_forget() self.update() def", "ampm, cfg): # on timer callback if self.cog: # redraw hands, cog self.delete(self.cog)", "x self.hour.config(text=str(hour), width=4) self.mins.config(text=str(mins), width=4) self.secs.config(text=str(secs), width=4) self.ampm.config(text=str(ampm), width=4) def onResize(self, newWidth, newHeight,", "Frame.__init__(self, parent) self.cfg = config self.makeWidgets(parent) # children are packed but self.labelOn =", "onResize(self, event): if event.widget == self.display: self.display.onResize(event.width, event.height, self.cfg) def onTimer(self): secsSinceEpoch =", "# second change timer class Clock(Frame): def __init__(self, config=ClockConfig, parent=None): Frame.__init__(self, parent) self.cfg", "cfg): Frame.__init__(self, parent) self.hour = Label(self) self.mins = Label(self) self.secs = Label(self) self.ampm", "under Python 3.X (2.X no longer supported) ############################################################################### \"\"\" from tkinter import *", "redraw hands, cog self.delete(self.cog) self.delete(self.hourHand) self.delete(self.minsHand) self.delete(self.secsHand) originX = originY = radius =", "fill=cfg.mh) self.secsHand = self.create_line(originX, originY, sx, sy, width=1, arrow='last', arrowshape=(5,10,5), fill=cfg.sh) cogsz =", "= appname if name: title = appname + ' - ' + name", ".03), arrow='last', arrowshape=(20,20,10), fill=cfg.mh) self.secsHand = self.create_line(originX, originY, sx, sy, width=1, arrow='last', arrowshape=(5,10,5),", "self.drawClockface(cfg) # onUpdate called next ############################################################################### # Clock composite object ############################################################################### ChecksPerSec =", "parent.bind('<ButtonPress-1>', self.onSwitchMode) parent.bind('<ButtonPress-3>', self.onToggleLabel) parent.bind('<Configure>', self.onResize) parent.bind('<KeyPress-s>', self.onCountdownSec) parent.bind('<KeyPress-m>', self.onCountdownMin) def onSwitchMode(self, event):", "config = ClockConfig() if len(sys.argv) >= 2: getOptions(config, sys.argv) # clock.py -size n", "class ClockWindow(Clock): def __init__(self, config=ClockConfig, parent=None, name=''): Clock.__init__(self, config, parent) self.pack(expand=YES, fill=BOTH) title", "msg.pack(expand=YES, fill=BOTH) win.lift() # raise above siblings if sys.platform[:3] == 'win': # full", "event): if event.widget == self.display: self.display.onResize(event.width, event.height, self.cfg) def onTimer(self): secsSinceEpoch = time.time()", "fg='white', bg='navy') msg.config(padx=10, pady=10) msg.pack(expand=YES, fill=BOTH) win.lift() # raise above siblings if sys.platform[:3]", "parent) self.hour = Label(self) self.mins = Label(self) self.secs = Label(self) self.ampm = Label(self)", "event.height, self.cfg) def onTimer(self): secsSinceEpoch = time.time() timeTuple = time.localtime(secsSinceEpoch) hour, min, sec", "self.onTimer) # run N times per second # 3.x // trunc int div", "object ############################################################################### ChecksPerSec = 10 # second change timer class Clock(Frame): def __init__(self,", "None def drawClockface(self, cfg): # on start and resize if cfg.picture: # draw", "packed but self.labelOn = 0 # clients pack or grid me self.display =", "1 if self.countdownSeconds == 0: self.onCountdownExpire() # countdown timer self.after(1000 // ChecksPerSec, self.onTimer)", "val\" cmd args ix = argv.index('-' + attr) # will skip __x__ internals", "label in self.hour, self.mins, self.secs, self.ampm: label.config(bd=4, relief=SUNKEN, bg=cfg.bg, fg=cfg.fg) label.pack(side=LEFT) # TBD:", "callback if self.cog: # redraw hands, cog self.delete(self.cog) self.delete(self.hourHand) self.delete(self.minsHand) self.delete(self.secsHand) originX =", "size = 200 # width=height bg, fg = 'beige', 'brown' # face, tick", "__init__(self, parent, cfg): Canvas.__init__(self, parent, width=cfg.size, height=cfg.size, bg=cfg.bg) self.drawClockface(cfg) self.hourHand = self.minsHand =", "def onResize(self, event): if event.widget == self.display: self.display.onResize(event.width, event.height, self.cfg) def onTimer(self): secsSinceEpoch", "command=win.destroy) msg.config(font=('courier', 80, 'normal'), fg='white', bg='navy') msg.config(padx=10, pady=10) msg.pack(expand=YES, fill=BOTH) win.lift() # raise", "tick colors hh, mh, sh, cog = 'black', 'navy', 'blue', 'red' # clock", "# onUpdate called next ############################################################################### # Clock composite object ############################################################################### ChecksPerSec = 10", "self.cfg) self.dateLabel.config(text=time.ctime(secsSinceEpoch)) self.countdownSeconds -= 1 if self.countdownSeconds == 0: self.onCountdownExpire() # countdown timer", "= askinteger('Countdown', 'Seconds?') if secs: self.countdownSeconds = secs def onCountdownMin(self, event): secs =", "display object ############################################################################### class AnalogDisplay(Canvas): def __init__(self, parent, cfg): Canvas.__init__(self, parent, width=cfg.size, height=cfg.size,", "'black', 'navy', 'blue', 'red' # clock hands, center picture = None # face", "newSize) if newSize != cfg.size+4: cfg.size = newSize-4 self.delete('all') self.drawClockface(cfg) # onUpdate called", "config=ClockConfig, parent=None, name=''): Clock.__init__(self, config, parent) self.pack(expand=YES, fill=BOTH) title = appname if name:", "############################################################################### \"\"\" from tkinter import * from tkinter.simpledialog import askinteger import math, time,", "setattr(config, attr, argv[ix+1]) #config = PhotoClockConfig() config = ClockConfig() if len(sys.argv) >= 2:", "height=cfg.size, bg=cfg.bg) self.drawClockface(cfg) self.hourHand = self.minsHand = self.secsHand = self.cog = None def", "parent, cfg): Frame.__init__(self, parent) self.hour = Label(self) self.mins = Label(self) self.secs = Label(self)", "label.pack(side=LEFT) # TBD: could expand, and scale font on resize def onUpdate(self, hour,", "cfg.size = newSize-4 self.delete('all') self.drawClockface(cfg) # onUpdate called next ############################################################################### # Clock composite", "# nothing to redraw here ############################################################################### # Analog display object ############################################################################### class AnalogDisplay(Canvas):", "-1 self.countdownSeconds = 0 self.onSwitchMode(None) self.onTimer() def makeWidgets(self, parent): self.digitalDisplay = DigitalDisplay(self, self.cfg)", "1 if self.labelOn % 2: self.dateLabel.pack(side=BOTTOM, fill=X) else: self.dateLabel.pack_forget() self.update() def onResize(self, event):", "sys.platform[:3] == 'win': # full screen on Windows win.state('zoomed') ############################################################################### # Standalone clocks", "pointY = int( round( radius * math.cos(angle * radiansPerDegree) )) return (pointX +", "originY) mx, my = self.point(mins, 60, (radius * .90), originX, originY) sx, sy", "resize def onUpdate(self, hour, mins, secs, ampm, cfg): mins = str(mins).zfill(2) # or", "Digital display object ############################################################################### class DigitalDisplay(Frame): def __init__(self, parent, cfg): Frame.__init__(self, parent) self.hour", "ampm) def onResize(self, newWidth, newHeight, cfg): newSize = min(newWidth, newHeight) #print('analog onResize', cfg.size+4,", "self.display = self.analogDisplay self.display.pack(side=TOP, expand=YES, fill=BOTH) def onToggleLabel(self, event): self.labelOn += 1 if", "originY) self.create_rectangle(x-3, y-3, x+3, y+3, fill=cfg.fg) # hours self.ampm = self.create_text(3, 3, anchor=NW,", "from tkinter import * from tkinter.simpledialog import askinteger import math, time, sys ###############################################################################", "= 'white', 'blue', 'orange' ############################################################################### # Digital display object ############################################################################### class DigitalDisplay(Frame): def", "newWidth, newHeight, cfg): pass # nothing to redraw here ############################################################################### # Analog display", "file class PhotoClockConfig(ClockConfig): # sample configuration size = 320 picture = '../gifs/ora-pp.gif' bg,", "= 'black', 'navy', 'blue', 'red' # clock hands, center picture = None #", "hh, mh = 'white', 'blue', 'orange' ############################################################################### # Digital display object ############################################################################### class", "cfg.size+4: cfg.size = newSize-4 self.delete('all') self.drawClockface(cfg) # onUpdate called next ############################################################################### # Clock", "self.hourHand = self.minsHand = self.secsHand = self.cog = None def drawClockface(self, cfg): #", "timer self.after(1000 // ChecksPerSec, self.onTimer) # run N times per second # 3.x", "60, (radius * .95), originX, originY) self.hourHand = self.create_line(originX, originY, hx, hy, width=(cfg.size", "- self.image.width()) // 2 # center it imgy = (cfg.size - self.image.height()) //", "in range(60): x, y = self.point(i, 60, radius-6, originX, originY) self.create_rectangle(x-1, y-1, x+1,", "# master=parent or default self.master.protocol('WM_DELETE_WINDOW', self.quit) ############################################################################### # Program run ############################################################################### if __name__", "self.analogDisplay self.display.pack(side=TOP, expand=YES, fill=BOTH) def onToggleLabel(self, event): self.labelOn += 1 if self.labelOn %", "other GUIs that need a clock. New in 2.0: s/m keys set seconds/minutes", "pack or grid me self.display = self.digitalDisplay self.lastSec = self.lastMin = -1 self.countdownSeconds", "a clock. New in 2.0: s/m keys set seconds/minutes timer for pop-up msg;" ]
[ ": print('no factorial exists') elif n == 0 : print('the factorial is egal", "factorial is egal to 1') else : factorial = 1 for i in", "0 : print('the factorial is egal to 1') else : factorial = 1", "1 for i in range (1, n + 1) : factorial *= i", "if n < 0 : print('no factorial exists') elif n == 0 :", "factoriel if n < 0 : print('no factorial exists') elif n == 0", "factorial exists') elif n == 0 : print('the factorial is egal to 1')", "le factoriel if n < 0 : print('no factorial exists') elif n ==", "# Calculer le factoriel if n < 0 : print('no factorial exists') elif", "## Calculer et afficher le factoriel # taper directement le clavier n =", "int(input()) # Calculer le factoriel if n < 0 : print('no factorial exists')", "afficher le factoriel # taper directement le clavier n = int(input()) # Calculer", "factorial = 1 for i in range (1, n + 1) : factorial", ": print('the factorial is egal to 1') else : factorial = 1 for", "= 1 for i in range (1, n + 1) : factorial *=", ": factorial = 1 for i in range (1, n + 1) :", "directement le clavier n = int(input()) # Calculer le factoriel if n <", "le factoriel # taper directement le clavier n = int(input()) # Calculer le", "print('no factorial exists') elif n == 0 : print('the factorial is egal to", "clavier n = int(input()) # Calculer le factoriel if n < 0 :", "is egal to 1') else : factorial = 1 for i in range", "else : factorial = 1 for i in range (1, n + 1)", "= int(input()) # Calculer le factoriel if n < 0 : print('no factorial", "n == 0 : print('the factorial is egal to 1') else : factorial", "n = int(input()) # Calculer le factoriel if n < 0 : print('no", "== 0 : print('the factorial is egal to 1') else : factorial =", "egal to 1') else : factorial = 1 for i in range (1,", "et afficher le factoriel # taper directement le clavier n = int(input()) #", "Calculer et afficher le factoriel # taper directement le clavier n = int(input())", "Calculer le factoriel if n < 0 : print('no factorial exists') elif n", "to 1') else : factorial = 1 for i in range (1, n", "for i in range (1, n + 1) : factorial *= i print(factorial)", "n < 0 : print('no factorial exists') elif n == 0 : print('the", "elif n == 0 : print('the factorial is egal to 1') else :", "0 : print('no factorial exists') elif n == 0 : print('the factorial is", "exists') elif n == 0 : print('the factorial is egal to 1') else", "# taper directement le clavier n = int(input()) # Calculer le factoriel if", "taper directement le clavier n = int(input()) # Calculer le factoriel if n", "< 0 : print('no factorial exists') elif n == 0 : print('the factorial", "print('the factorial is egal to 1') else : factorial = 1 for i", "le clavier n = int(input()) # Calculer le factoriel if n < 0", "1') else : factorial = 1 for i in range (1, n +", "factoriel # taper directement le clavier n = int(input()) # Calculer le factoriel" ]
[ "core.transport.gateways.rabbitmq import RabbitMQAgentGateway, RabbitMQServiceGateway, RabbitMQChannelGateway from core.connectors import ServiceGatewayHTTPConnector GATEWAYS_MAP = { 'AMQP':", "RabbitMQChannelGateway from core.connectors import ServiceGatewayHTTPConnector GATEWAYS_MAP = { 'AMQP': { 'agent': RabbitMQAgentGateway, 'service':", "{ 'AMQP': { 'agent': RabbitMQAgentGateway, 'service': RabbitMQServiceGateway, 'channel': RabbitMQChannelGateway } } CONNECTORS_MAP =", "RabbitMQAgentGateway, 'service': RabbitMQServiceGateway, 'channel': RabbitMQChannelGateway } } CONNECTORS_MAP = { 'AMQP': ServiceGatewayHTTPConnector }", "from core.transport.gateways.rabbitmq import RabbitMQAgentGateway, RabbitMQServiceGateway, RabbitMQChannelGateway from core.connectors import ServiceGatewayHTTPConnector GATEWAYS_MAP = {", "RabbitMQServiceGateway, RabbitMQChannelGateway from core.connectors import ServiceGatewayHTTPConnector GATEWAYS_MAP = { 'AMQP': { 'agent': RabbitMQAgentGateway,", "from core.connectors import ServiceGatewayHTTPConnector GATEWAYS_MAP = { 'AMQP': { 'agent': RabbitMQAgentGateway, 'service': RabbitMQServiceGateway,", "core.connectors import ServiceGatewayHTTPConnector GATEWAYS_MAP = { 'AMQP': { 'agent': RabbitMQAgentGateway, 'service': RabbitMQServiceGateway, 'channel':", "'AMQP': { 'agent': RabbitMQAgentGateway, 'service': RabbitMQServiceGateway, 'channel': RabbitMQChannelGateway } } CONNECTORS_MAP = {", "ServiceGatewayHTTPConnector GATEWAYS_MAP = { 'AMQP': { 'agent': RabbitMQAgentGateway, 'service': RabbitMQServiceGateway, 'channel': RabbitMQChannelGateway }", "'agent': RabbitMQAgentGateway, 'service': RabbitMQServiceGateway, 'channel': RabbitMQChannelGateway } } CONNECTORS_MAP = { 'AMQP': ServiceGatewayHTTPConnector", "{ 'agent': RabbitMQAgentGateway, 'service': RabbitMQServiceGateway, 'channel': RabbitMQChannelGateway } } CONNECTORS_MAP = { 'AMQP':", "import ServiceGatewayHTTPConnector GATEWAYS_MAP = { 'AMQP': { 'agent': RabbitMQAgentGateway, 'service': RabbitMQServiceGateway, 'channel': RabbitMQChannelGateway", "import RabbitMQAgentGateway, RabbitMQServiceGateway, RabbitMQChannelGateway from core.connectors import ServiceGatewayHTTPConnector GATEWAYS_MAP = { 'AMQP': {", "RabbitMQAgentGateway, RabbitMQServiceGateway, RabbitMQChannelGateway from core.connectors import ServiceGatewayHTTPConnector GATEWAYS_MAP = { 'AMQP': { 'agent':", "= { 'AMQP': { 'agent': RabbitMQAgentGateway, 'service': RabbitMQServiceGateway, 'channel': RabbitMQChannelGateway } } CONNECTORS_MAP", "GATEWAYS_MAP = { 'AMQP': { 'agent': RabbitMQAgentGateway, 'service': RabbitMQServiceGateway, 'channel': RabbitMQChannelGateway } }" ]
[ "Z1=test_dict[\"Z1\"], Z2=test_dict[\"Z2\"], is_join=test_dict[\"is_join\"], ) assert res_top_surf == test_dict[\"exp_top_surf\"], ( \"Differente Top surface:\\nResult:\\n\" +", "test_dict[\"exp_bot_surf\"], ( \"Differente Bot surface:\\nResult:\\n\" + str(res_bot_surf) + \"\\nExpected:\\n\" + str(test_dict[\"exp_bot_surf\"]) ) if", "point_ref=0.5) split_test = list() # Cut Square top line_list = list() line_list.append(Segment(begin=0, end=1j))", "@pytest.mark.parametrize(\"test_dict\", split_test) def test_split_line(test_dict): res_top_surf, res_bot_surf = test_dict[\"surf\"].split_line( Z1=test_dict[\"Z1\"], Z2=test_dict[\"Z2\"], is_join=test_dict[\"is_join\"], ) assert", "1)) line_list.append(Segment(begin=1j + 1, end=1)) line_list.append(Segment(begin=1, end=0)) exp_top_surf = SurfLine(line_list=line_list, label=\"test\", point_ref=0.5 +", "end=-1j + 1)) line_list.append(Segment(begin=-1j + 1, end=-1j)) surf = SurfLine(line_list=line_list, label=\"test\", point_ref=0.5) split_test", "} ) @pytest.mark.parametrize(\"test_dict\", split_test) def test_split_line(test_dict): res_top_surf, res_bot_surf = test_dict[\"surf\"].split_line( Z1=test_dict[\"Z1\"], Z2=test_dict[\"Z2\"], is_join=test_dict[\"is_join\"],", "list() line_list.append(Segment(begin=-1j, end=1j)) line_list.append(Segment(begin=1j, end=1j + 1)) line_list.append(Segment(begin=1j + 1, end=-1j + 1))", "Top surface:\\nResult:\\n\" + str(res_top_surf) + \"\\nExpected:\\n\" + str(test_dict[\"exp_top_surf\"]) ) assert res_bot_surf == test_dict[\"exp_bot_surf\"],", "surface:\\nResult:\\n\" + str(res_top_surf) + \"\\nExpected:\\n\" + str(test_dict[\"exp_top_surf\"]) ) assert res_bot_surf == test_dict[\"exp_bot_surf\"], (", "+ 1, end=1)) line_list.append(Segment(begin=1, end=0)) exp_top_surf = SurfLine(line_list=line_list, label=\"test\", point_ref=0.5 + 0.5j) #", "== test_dict[\"exp_bot_surf\"], ( \"Differente Bot surface:\\nResult:\\n\" + str(res_bot_surf) + \"\\nExpected:\\n\" + str(test_dict[\"exp_bot_surf\"]) )", "end=1j + 1)) line_list.append(Segment(begin=1j + 1, end=1)) line_list.append(Segment(begin=1, end=0)) exp_top_surf = SurfLine(line_list=line_list, label=\"test\",", "\"exp_bot_surf\": exp_bot_surf, \"Z1\": 0, \"Z2\": 2, \"is_join\": True, } ) @pytest.mark.parametrize(\"test_dict\", split_test) def", "end=-1j)) exp_bot_surf = SurfLine(line_list=line_list, label=\"test\", point_ref=0.5 - 0.5j) split_test.append( { \"surf\": surf, \"exp_top_surf\":", "res_top_surf, res_bot_surf = test_dict[\"surf\"].split_line( Z1=test_dict[\"Z1\"], Z2=test_dict[\"Z2\"], is_join=test_dict[\"is_join\"], ) assert res_top_surf == test_dict[\"exp_top_surf\"], (", "surface:\\nResult:\\n\" + str(res_bot_surf) + \"\\nExpected:\\n\" + str(test_dict[\"exp_bot_surf\"]) ) if __name__ == \"__main__\": for", "res_bot_surf == test_dict[\"exp_bot_surf\"], ( \"Differente Bot surface:\\nResult:\\n\" + str(res_bot_surf) + \"\\nExpected:\\n\" + str(test_dict[\"exp_bot_surf\"])", "str(res_top_surf) + \"\\nExpected:\\n\" + str(test_dict[\"exp_top_surf\"]) ) assert res_bot_surf == test_dict[\"exp_bot_surf\"], ( \"Differente Bot", "+ str(res_top_surf) + \"\\nExpected:\\n\" + str(test_dict[\"exp_top_surf\"]) ) assert res_bot_surf == test_dict[\"exp_bot_surf\"], ( \"Differente", "0.5j) # Cut Square bottom line_list = list() line_list.append(Segment(begin=-1j, end=0)) line_list.append(Segment(begin=0, end=1)) line_list.append(Segment(begin=1,", "from pyleecan.Classes.Segment import Segment from pyleecan.Classes.SurfLine import SurfLine import pytest line_list = list()", "1)) line_list.append(Segment(begin=-1j + 1, end=-1j)) exp_bot_surf = SurfLine(line_list=line_list, label=\"test\", point_ref=0.5 - 0.5j) split_test.append(", "SurfLine(line_list=line_list, label=\"test\", point_ref=0.5 + 0.5j) # Cut Square bottom line_list = list() line_list.append(Segment(begin=-1j,", "end=1)) line_list.append(Segment(begin=1, end=0)) exp_top_surf = SurfLine(line_list=line_list, label=\"test\", point_ref=0.5 + 0.5j) # Cut Square", "\"surf\": surf, \"exp_top_surf\": exp_top_surf, \"exp_bot_surf\": exp_bot_surf, \"Z1\": 0, \"Z2\": 2, \"is_join\": True, }", "test_split_line(test_dict): res_top_surf, res_bot_surf = test_dict[\"surf\"].split_line( Z1=test_dict[\"Z1\"], Z2=test_dict[\"Z2\"], is_join=test_dict[\"is_join\"], ) assert res_top_surf == test_dict[\"exp_top_surf\"],", "assert res_top_surf == test_dict[\"exp_top_surf\"], ( \"Differente Top surface:\\nResult:\\n\" + str(res_top_surf) + \"\\nExpected:\\n\" +", "pytest line_list = list() line_list.append(Segment(begin=-1j, end=1j)) line_list.append(Segment(begin=1j, end=1j + 1)) line_list.append(Segment(begin=1j + 1,", "line_list.append(Segment(begin=0, end=1j)) line_list.append(Segment(begin=1j, end=1j + 1)) line_list.append(Segment(begin=1j + 1, end=1)) line_list.append(Segment(begin=1, end=0)) exp_top_surf", "import Segment from pyleecan.Classes.SurfLine import SurfLine import pytest line_list = list() line_list.append(Segment(begin=-1j, end=1j))", "split_test) def test_split_line(test_dict): res_top_surf, res_bot_surf = test_dict[\"surf\"].split_line( Z1=test_dict[\"Z1\"], Z2=test_dict[\"Z2\"], is_join=test_dict[\"is_join\"], ) assert res_top_surf", "0, \"Z2\": 2, \"is_join\": True, } ) @pytest.mark.parametrize(\"test_dict\", split_test) def test_split_line(test_dict): res_top_surf, res_bot_surf", "1, end=-1j)) surf = SurfLine(line_list=line_list, label=\"test\", point_ref=0.5) split_test = list() # Cut Square", "list() # Cut Square top line_list = list() line_list.append(Segment(begin=0, end=1j)) line_list.append(Segment(begin=1j, end=1j +", "+ \"\\nExpected:\\n\" + str(test_dict[\"exp_bot_surf\"]) ) if __name__ == \"__main__\": for test_dict in split_test:", "+ 1, end=-1j)) exp_bot_surf = SurfLine(line_list=line_list, label=\"test\", point_ref=0.5 - 0.5j) split_test.append( { \"surf\":", "exp_bot_surf = SurfLine(line_list=line_list, label=\"test\", point_ref=0.5 - 0.5j) split_test.append( { \"surf\": surf, \"exp_top_surf\": exp_top_surf,", "exp_bot_surf, \"Z1\": 0, \"Z2\": 2, \"is_join\": True, } ) @pytest.mark.parametrize(\"test_dict\", split_test) def test_split_line(test_dict):", "line_list.append(Segment(begin=-1j, end=1j)) line_list.append(Segment(begin=1j, end=1j + 1)) line_list.append(Segment(begin=1j + 1, end=-1j + 1)) line_list.append(Segment(begin=-1j", "split_test.append( { \"surf\": surf, \"exp_top_surf\": exp_top_surf, \"exp_bot_surf\": exp_bot_surf, \"Z1\": 0, \"Z2\": 2, \"is_join\":", "surf, \"exp_top_surf\": exp_top_surf, \"exp_bot_surf\": exp_bot_surf, \"Z1\": 0, \"Z2\": 2, \"is_join\": True, } )", "res_bot_surf = test_dict[\"surf\"].split_line( Z1=test_dict[\"Z1\"], Z2=test_dict[\"Z2\"], is_join=test_dict[\"is_join\"], ) assert res_top_surf == test_dict[\"exp_top_surf\"], ( \"Differente", "end=1j + 1)) line_list.append(Segment(begin=1j + 1, end=-1j + 1)) line_list.append(Segment(begin=-1j + 1, end=-1j))", "Cut Square bottom line_list = list() line_list.append(Segment(begin=-1j, end=0)) line_list.append(Segment(begin=0, end=1)) line_list.append(Segment(begin=1, end=-1j +", "( \"Differente Top surface:\\nResult:\\n\" + str(res_top_surf) + \"\\nExpected:\\n\" + str(test_dict[\"exp_top_surf\"]) ) assert res_bot_surf", "point_ref=0.5 - 0.5j) split_test.append( { \"surf\": surf, \"exp_top_surf\": exp_top_surf, \"exp_bot_surf\": exp_bot_surf, \"Z1\": 0,", "= test_dict[\"surf\"].split_line( Z1=test_dict[\"Z1\"], Z2=test_dict[\"Z2\"], is_join=test_dict[\"is_join\"], ) assert res_top_surf == test_dict[\"exp_top_surf\"], ( \"Differente Top", "line_list.append(Segment(begin=1j, end=1j + 1)) line_list.append(Segment(begin=1j + 1, end=-1j + 1)) line_list.append(Segment(begin=-1j + 1,", "1)) line_list.append(Segment(begin=1j + 1, end=-1j + 1)) line_list.append(Segment(begin=-1j + 1, end=-1j)) surf =", "test_dict[\"exp_top_surf\"], ( \"Differente Top surface:\\nResult:\\n\" + str(res_top_surf) + \"\\nExpected:\\n\" + str(test_dict[\"exp_top_surf\"]) ) assert", ") assert res_bot_surf == test_dict[\"exp_bot_surf\"], ( \"Differente Bot surface:\\nResult:\\n\" + str(res_bot_surf) + \"\\nExpected:\\n\"", "end=0)) line_list.append(Segment(begin=0, end=1)) line_list.append(Segment(begin=1, end=-1j + 1)) line_list.append(Segment(begin=-1j + 1, end=-1j)) exp_bot_surf =", "= SurfLine(line_list=line_list, label=\"test\", point_ref=0.5 + 0.5j) # Cut Square bottom line_list = list()", "+ 1)) line_list.append(Segment(begin=-1j + 1, end=-1j)) exp_bot_surf = SurfLine(line_list=line_list, label=\"test\", point_ref=0.5 - 0.5j)", "Cut Square top line_list = list() line_list.append(Segment(begin=0, end=1j)) line_list.append(Segment(begin=1j, end=1j + 1)) line_list.append(Segment(begin=1j", "split_test = list() # Cut Square top line_list = list() line_list.append(Segment(begin=0, end=1j)) line_list.append(Segment(begin=1j,", "Square bottom line_list = list() line_list.append(Segment(begin=-1j, end=0)) line_list.append(Segment(begin=0, end=1)) line_list.append(Segment(begin=1, end=-1j + 1))", "SurfLine import pytest line_list = list() line_list.append(Segment(begin=-1j, end=1j)) line_list.append(Segment(begin=1j, end=1j + 1)) line_list.append(Segment(begin=1j", "end=1j)) line_list.append(Segment(begin=1j, end=1j + 1)) line_list.append(Segment(begin=1j + 1, end=1)) line_list.append(Segment(begin=1, end=0)) exp_top_surf =", "{ \"surf\": surf, \"exp_top_surf\": exp_top_surf, \"exp_bot_surf\": exp_bot_surf, \"Z1\": 0, \"Z2\": 2, \"is_join\": True,", "+ str(res_bot_surf) + \"\\nExpected:\\n\" + str(test_dict[\"exp_bot_surf\"]) ) if __name__ == \"__main__\": for test_dict", "+ 1, end=-1j)) surf = SurfLine(line_list=line_list, label=\"test\", point_ref=0.5) split_test = list() # Cut", "line_list.append(Segment(begin=1j + 1, end=-1j + 1)) line_list.append(Segment(begin=-1j + 1, end=-1j)) surf = SurfLine(line_list=line_list,", "line_list.append(Segment(begin=0, end=1)) line_list.append(Segment(begin=1, end=-1j + 1)) line_list.append(Segment(begin=-1j + 1, end=-1j)) exp_bot_surf = SurfLine(line_list=line_list,", "- 0.5j) split_test.append( { \"surf\": surf, \"exp_top_surf\": exp_top_surf, \"exp_bot_surf\": exp_bot_surf, \"Z1\": 0, \"Z2\":", "\"Differente Bot surface:\\nResult:\\n\" + str(res_bot_surf) + \"\\nExpected:\\n\" + str(test_dict[\"exp_bot_surf\"]) ) if __name__ ==", "SurfLine(line_list=line_list, label=\"test\", point_ref=0.5) split_test = list() # Cut Square top line_list = list()", "from pyleecan.Classes.SurfLine import SurfLine import pytest line_list = list() line_list.append(Segment(begin=-1j, end=1j)) line_list.append(Segment(begin=1j, end=1j", "line_list.append(Segment(begin=1j, end=1j + 1)) line_list.append(Segment(begin=1j + 1, end=1)) line_list.append(Segment(begin=1, end=0)) exp_top_surf = SurfLine(line_list=line_list,", "line_list.append(Segment(begin=1, end=-1j + 1)) line_list.append(Segment(begin=-1j + 1, end=-1j)) exp_bot_surf = SurfLine(line_list=line_list, label=\"test\", point_ref=0.5", "Z2=test_dict[\"Z2\"], is_join=test_dict[\"is_join\"], ) assert res_top_surf == test_dict[\"exp_top_surf\"], ( \"Differente Top surface:\\nResult:\\n\" + str(res_top_surf)", "def test_split_line(test_dict): res_top_surf, res_bot_surf = test_dict[\"surf\"].split_line( Z1=test_dict[\"Z1\"], Z2=test_dict[\"Z2\"], is_join=test_dict[\"is_join\"], ) assert res_top_surf ==", "+ 1, end=-1j + 1)) line_list.append(Segment(begin=-1j + 1, end=-1j)) surf = SurfLine(line_list=line_list, label=\"test\",", "1, end=-1j + 1)) line_list.append(Segment(begin=-1j + 1, end=-1j)) surf = SurfLine(line_list=line_list, label=\"test\", point_ref=0.5)", "bottom line_list = list() line_list.append(Segment(begin=-1j, end=0)) line_list.append(Segment(begin=0, end=1)) line_list.append(Segment(begin=1, end=-1j + 1)) line_list.append(Segment(begin=-1j", "0.5j) split_test.append( { \"surf\": surf, \"exp_top_surf\": exp_top_surf, \"exp_bot_surf\": exp_bot_surf, \"Z1\": 0, \"Z2\": 2,", "== test_dict[\"exp_top_surf\"], ( \"Differente Top surface:\\nResult:\\n\" + str(res_top_surf) + \"\\nExpected:\\n\" + str(test_dict[\"exp_top_surf\"]) )", "top line_list = list() line_list.append(Segment(begin=0, end=1j)) line_list.append(Segment(begin=1j, end=1j + 1)) line_list.append(Segment(begin=1j + 1,", "+ str(test_dict[\"exp_top_surf\"]) ) assert res_bot_surf == test_dict[\"exp_bot_surf\"], ( \"Differente Bot surface:\\nResult:\\n\" + str(res_bot_surf)", "\"\\nExpected:\\n\" + str(test_dict[\"exp_top_surf\"]) ) assert res_bot_surf == test_dict[\"exp_bot_surf\"], ( \"Differente Bot surface:\\nResult:\\n\" +", "Segment from pyleecan.Classes.SurfLine import SurfLine import pytest line_list = list() line_list.append(Segment(begin=-1j, end=1j)) line_list.append(Segment(begin=1j,", "surf = SurfLine(line_list=line_list, label=\"test\", point_ref=0.5) split_test = list() # Cut Square top line_list", "= list() # Cut Square top line_list = list() line_list.append(Segment(begin=0, end=1j)) line_list.append(Segment(begin=1j, end=1j", "+ 1)) line_list.append(Segment(begin=1j + 1, end=-1j + 1)) line_list.append(Segment(begin=-1j + 1, end=-1j)) surf", "+ 1)) line_list.append(Segment(begin=-1j + 1, end=-1j)) surf = SurfLine(line_list=line_list, label=\"test\", point_ref=0.5) split_test =", "= list() line_list.append(Segment(begin=-1j, end=0)) line_list.append(Segment(begin=0, end=1)) line_list.append(Segment(begin=1, end=-1j + 1)) line_list.append(Segment(begin=-1j + 1,", "line_list.append(Segment(begin=1j + 1, end=1)) line_list.append(Segment(begin=1, end=0)) exp_top_surf = SurfLine(line_list=line_list, label=\"test\", point_ref=0.5 + 0.5j)", "Square top line_list = list() line_list.append(Segment(begin=0, end=1j)) line_list.append(Segment(begin=1j, end=1j + 1)) line_list.append(Segment(begin=1j +", "1, end=1)) line_list.append(Segment(begin=1, end=0)) exp_top_surf = SurfLine(line_list=line_list, label=\"test\", point_ref=0.5 + 0.5j) # Cut", "list() line_list.append(Segment(begin=-1j, end=0)) line_list.append(Segment(begin=0, end=1)) line_list.append(Segment(begin=1, end=-1j + 1)) line_list.append(Segment(begin=-1j + 1, end=-1j))", "assert res_bot_surf == test_dict[\"exp_bot_surf\"], ( \"Differente Bot surface:\\nResult:\\n\" + str(res_bot_surf) + \"\\nExpected:\\n\" +", "1, end=-1j)) exp_bot_surf = SurfLine(line_list=line_list, label=\"test\", point_ref=0.5 - 0.5j) split_test.append( { \"surf\": surf,", "= list() line_list.append(Segment(begin=0, end=1j)) line_list.append(Segment(begin=1j, end=1j + 1)) line_list.append(Segment(begin=1j + 1, end=1)) line_list.append(Segment(begin=1,", "label=\"test\", point_ref=0.5 - 0.5j) split_test.append( { \"surf\": surf, \"exp_top_surf\": exp_top_surf, \"exp_bot_surf\": exp_bot_surf, \"Z1\":", "exp_top_surf = SurfLine(line_list=line_list, label=\"test\", point_ref=0.5 + 0.5j) # Cut Square bottom line_list =", "\"Z2\": 2, \"is_join\": True, } ) @pytest.mark.parametrize(\"test_dict\", split_test) def test_split_line(test_dict): res_top_surf, res_bot_surf =", "line_list.append(Segment(begin=-1j + 1, end=-1j)) surf = SurfLine(line_list=line_list, label=\"test\", point_ref=0.5) split_test = list() #", "point_ref=0.5 + 0.5j) # Cut Square bottom line_list = list() line_list.append(Segment(begin=-1j, end=0)) line_list.append(Segment(begin=0,", "\"Differente Top surface:\\nResult:\\n\" + str(res_top_surf) + \"\\nExpected:\\n\" + str(test_dict[\"exp_top_surf\"]) ) assert res_bot_surf ==", "end=0)) exp_top_surf = SurfLine(line_list=line_list, label=\"test\", point_ref=0.5 + 0.5j) # Cut Square bottom line_list", "line_list.append(Segment(begin=-1j + 1, end=-1j)) exp_bot_surf = SurfLine(line_list=line_list, label=\"test\", point_ref=0.5 - 0.5j) split_test.append( {", "( \"Differente Bot surface:\\nResult:\\n\" + str(res_bot_surf) + \"\\nExpected:\\n\" + str(test_dict[\"exp_bot_surf\"]) ) if __name__", "is_join=test_dict[\"is_join\"], ) assert res_top_surf == test_dict[\"exp_top_surf\"], ( \"Differente Top surface:\\nResult:\\n\" + str(res_top_surf) +", "line_list.append(Segment(begin=-1j, end=0)) line_list.append(Segment(begin=0, end=1)) line_list.append(Segment(begin=1, end=-1j + 1)) line_list.append(Segment(begin=-1j + 1, end=-1j)) exp_bot_surf", "str(test_dict[\"exp_top_surf\"]) ) assert res_bot_surf == test_dict[\"exp_bot_surf\"], ( \"Differente Bot surface:\\nResult:\\n\" + str(res_bot_surf) +", "label=\"test\", point_ref=0.5) split_test = list() # Cut Square top line_list = list() line_list.append(Segment(begin=0,", "line_list.append(Segment(begin=1, end=0)) exp_top_surf = SurfLine(line_list=line_list, label=\"test\", point_ref=0.5 + 0.5j) # Cut Square bottom", "\"is_join\": True, } ) @pytest.mark.parametrize(\"test_dict\", split_test) def test_split_line(test_dict): res_top_surf, res_bot_surf = test_dict[\"surf\"].split_line( Z1=test_dict[\"Z1\"],", "pyleecan.Classes.Segment import Segment from pyleecan.Classes.SurfLine import SurfLine import pytest line_list = list() line_list.append(Segment(begin=-1j,", "import pytest line_list = list() line_list.append(Segment(begin=-1j, end=1j)) line_list.append(Segment(begin=1j, end=1j + 1)) line_list.append(Segment(begin=1j +", "\"Z1\": 0, \"Z2\": 2, \"is_join\": True, } ) @pytest.mark.parametrize(\"test_dict\", split_test) def test_split_line(test_dict): res_top_surf,", "res_top_surf == test_dict[\"exp_top_surf\"], ( \"Differente Top surface:\\nResult:\\n\" + str(res_top_surf) + \"\\nExpected:\\n\" + str(test_dict[\"exp_top_surf\"])", "+ 1)) line_list.append(Segment(begin=1j + 1, end=1)) line_list.append(Segment(begin=1, end=0)) exp_top_surf = SurfLine(line_list=line_list, label=\"test\", point_ref=0.5", "list() line_list.append(Segment(begin=0, end=1j)) line_list.append(Segment(begin=1j, end=1j + 1)) line_list.append(Segment(begin=1j + 1, end=1)) line_list.append(Segment(begin=1, end=0))", "= list() line_list.append(Segment(begin=-1j, end=1j)) line_list.append(Segment(begin=1j, end=1j + 1)) line_list.append(Segment(begin=1j + 1, end=-1j +", "# Cut Square bottom line_list = list() line_list.append(Segment(begin=-1j, end=0)) line_list.append(Segment(begin=0, end=1)) line_list.append(Segment(begin=1, end=-1j", "line_list = list() line_list.append(Segment(begin=-1j, end=0)) line_list.append(Segment(begin=0, end=1)) line_list.append(Segment(begin=1, end=-1j + 1)) line_list.append(Segment(begin=-1j +", "+ \"\\nExpected:\\n\" + str(test_dict[\"exp_top_surf\"]) ) assert res_bot_surf == test_dict[\"exp_bot_surf\"], ( \"Differente Bot surface:\\nResult:\\n\"", "pyleecan.Classes.SurfLine import SurfLine import pytest line_list = list() line_list.append(Segment(begin=-1j, end=1j)) line_list.append(Segment(begin=1j, end=1j +", "import SurfLine import pytest line_list = list() line_list.append(Segment(begin=-1j, end=1j)) line_list.append(Segment(begin=1j, end=1j + 1))", "end=-1j)) surf = SurfLine(line_list=line_list, label=\"test\", point_ref=0.5) split_test = list() # Cut Square top", "end=1j)) line_list.append(Segment(begin=1j, end=1j + 1)) line_list.append(Segment(begin=1j + 1, end=-1j + 1)) line_list.append(Segment(begin=-1j +", "2, \"is_join\": True, } ) @pytest.mark.parametrize(\"test_dict\", split_test) def test_split_line(test_dict): res_top_surf, res_bot_surf = test_dict[\"surf\"].split_line(", "Bot surface:\\nResult:\\n\" + str(res_bot_surf) + \"\\nExpected:\\n\" + str(test_dict[\"exp_bot_surf\"]) ) if __name__ == \"__main__\":", "test_dict[\"surf\"].split_line( Z1=test_dict[\"Z1\"], Z2=test_dict[\"Z2\"], is_join=test_dict[\"is_join\"], ) assert res_top_surf == test_dict[\"exp_top_surf\"], ( \"Differente Top surface:\\nResult:\\n\"", "# Cut Square top line_list = list() line_list.append(Segment(begin=0, end=1j)) line_list.append(Segment(begin=1j, end=1j + 1))", ") assert res_top_surf == test_dict[\"exp_top_surf\"], ( \"Differente Top surface:\\nResult:\\n\" + str(res_top_surf) + \"\\nExpected:\\n\"", "end=-1j + 1)) line_list.append(Segment(begin=-1j + 1, end=-1j)) exp_bot_surf = SurfLine(line_list=line_list, label=\"test\", point_ref=0.5 -", "1)) line_list.append(Segment(begin=-1j + 1, end=-1j)) surf = SurfLine(line_list=line_list, label=\"test\", point_ref=0.5) split_test = list()", "+ 0.5j) # Cut Square bottom line_list = list() line_list.append(Segment(begin=-1j, end=0)) line_list.append(Segment(begin=0, end=1))", "\"\\nExpected:\\n\" + str(test_dict[\"exp_bot_surf\"]) ) if __name__ == \"__main__\": for test_dict in split_test: test_split_line(test_dict)", "label=\"test\", point_ref=0.5 + 0.5j) # Cut Square bottom line_list = list() line_list.append(Segment(begin=-1j, end=0))", "= SurfLine(line_list=line_list, label=\"test\", point_ref=0.5) split_test = list() # Cut Square top line_list =", "True, } ) @pytest.mark.parametrize(\"test_dict\", split_test) def test_split_line(test_dict): res_top_surf, res_bot_surf = test_dict[\"surf\"].split_line( Z1=test_dict[\"Z1\"], Z2=test_dict[\"Z2\"],", "+ str(test_dict[\"exp_bot_surf\"]) ) if __name__ == \"__main__\": for test_dict in split_test: test_split_line(test_dict) print(\"Done\")", "= SurfLine(line_list=line_list, label=\"test\", point_ref=0.5 - 0.5j) split_test.append( { \"surf\": surf, \"exp_top_surf\": exp_top_surf, \"exp_bot_surf\":", "SurfLine(line_list=line_list, label=\"test\", point_ref=0.5 - 0.5j) split_test.append( { \"surf\": surf, \"exp_top_surf\": exp_top_surf, \"exp_bot_surf\": exp_bot_surf,", "end=1)) line_list.append(Segment(begin=1, end=-1j + 1)) line_list.append(Segment(begin=-1j + 1, end=-1j)) exp_bot_surf = SurfLine(line_list=line_list, label=\"test\",", "exp_top_surf, \"exp_bot_surf\": exp_bot_surf, \"Z1\": 0, \"Z2\": 2, \"is_join\": True, } ) @pytest.mark.parametrize(\"test_dict\", split_test)", "line_list = list() line_list.append(Segment(begin=-1j, end=1j)) line_list.append(Segment(begin=1j, end=1j + 1)) line_list.append(Segment(begin=1j + 1, end=-1j", ") @pytest.mark.parametrize(\"test_dict\", split_test) def test_split_line(test_dict): res_top_surf, res_bot_surf = test_dict[\"surf\"].split_line( Z1=test_dict[\"Z1\"], Z2=test_dict[\"Z2\"], is_join=test_dict[\"is_join\"], )", "str(res_bot_surf) + \"\\nExpected:\\n\" + str(test_dict[\"exp_bot_surf\"]) ) if __name__ == \"__main__\": for test_dict in", "\"exp_top_surf\": exp_top_surf, \"exp_bot_surf\": exp_bot_surf, \"Z1\": 0, \"Z2\": 2, \"is_join\": True, } ) @pytest.mark.parametrize(\"test_dict\",", "line_list = list() line_list.append(Segment(begin=0, end=1j)) line_list.append(Segment(begin=1j, end=1j + 1)) line_list.append(Segment(begin=1j + 1, end=1))" ]
[ "_parse_bluesky_document_path(\"#bluesky/start@abc\") assert parsed_path[\"doc\"] == \"start\" assert parsed_path[\"attribute\"] == \"abc\" parsed_path = _parse_bluesky_document_path(\"#bluesky/start/abc\") assert", "assert parsed_path[\"attribute\"] == \"ghi\" parsed_path = _parse_bluesky_document_path(\"#bluesky/stop/abc/def@ghi\") assert parsed_path[\"doc\"] == \"stop\" assert parsed_path[\"keys\"]", "assert parsed_path[\"doc\"] == \"start\" assert parsed_path[\"keys\"] == (\"abc\",) parsed_path = _parse_bluesky_document_path(\"#bluesky/start/abc/def\") assert parsed_path[\"doc\"]", "= _parse_bluesky_document_path(\"#bluesky/start/abc/def\") assert parsed_path[\"doc\"] == \"start\" assert parsed_path[\"keys\"] == (\"abc\", \"def\") parsed_path =", "test__build_bluesky_document_path(): parsed_path = _parse_bluesky_document_path(\"#bluesky/start@abc\") assert parsed_path[\"doc\"] == \"start\" assert parsed_path[\"attribute\"] == \"abc\" parsed_path", "assert parsed_path[\"doc\"] == \"start\" assert parsed_path[\"keys\"] == (\"abc\", \"def\") assert parsed_path[\"attribute\"] == \"ghi\"", "== \"primary\" assert parsed_path[\"keys\"] == (\"abc\", \"def\") assert parsed_path[\"attribute\"] == \"ghi\" parsed_path =", "assert parsed_path[\"doc\"] == \"stop\" assert parsed_path[\"keys\"] == (\"abc\", \"def\") assert parsed_path[\"attribute\"] == \"ghi\"", "parsed_path = _parse_bluesky_document_path(\"#bluesky/start/abc/def@ghi\") assert parsed_path[\"doc\"] == \"start\" assert parsed_path[\"keys\"] == (\"abc\", \"def\") assert", "suitcase.nxsas.utils import _parse_bluesky_document_path def test__build_bluesky_document_path(): parsed_path = _parse_bluesky_document_path(\"#bluesky/start@abc\") assert parsed_path[\"doc\"] == \"start\" assert", "_parse_bluesky_document_path(\"#bluesky/desc/primary/abc/def@ghi\") assert parsed_path[\"doc\"] == \"desc\" assert parsed_path[\"stream\"] == \"primary\" assert parsed_path[\"keys\"] == (\"abc\",", "parsed_path = _parse_bluesky_document_path(\"#bluesky/start/abc\") assert parsed_path[\"doc\"] == \"start\" assert parsed_path[\"keys\"] == (\"abc\",) parsed_path =", "parsed_path[\"doc\"] == \"start\" assert parsed_path[\"attribute\"] == \"abc\" parsed_path = _parse_bluesky_document_path(\"#bluesky/start/abc\") assert parsed_path[\"doc\"] ==", "assert parsed_path[\"keys\"] == (\"abc\",) parsed_path = _parse_bluesky_document_path(\"#bluesky/start/abc/def\") assert parsed_path[\"doc\"] == \"start\" assert parsed_path[\"keys\"]", "== \"start\" assert parsed_path[\"keys\"] == (\"abc\",) parsed_path = _parse_bluesky_document_path(\"#bluesky/start/abc/def\") assert parsed_path[\"doc\"] == \"start\"", "_parse_bluesky_document_path(\"#bluesky/stop/abc/def@ghi\") assert parsed_path[\"doc\"] == \"stop\" assert parsed_path[\"keys\"] == (\"abc\", \"def\") assert parsed_path[\"attribute\"] ==", "parsed_path[\"doc\"] == \"start\" assert parsed_path[\"keys\"] == (\"abc\",) parsed_path = _parse_bluesky_document_path(\"#bluesky/start/abc/def\") assert parsed_path[\"doc\"] ==", "\"start\" assert parsed_path[\"keys\"] == (\"abc\",) parsed_path = _parse_bluesky_document_path(\"#bluesky/start/abc/def\") assert parsed_path[\"doc\"] == \"start\" assert", "\"primary\" assert parsed_path[\"keys\"] == (\"abc\", \"def\") assert parsed_path[\"attribute\"] == \"ghi\" parsed_path = _parse_bluesky_document_path(\"#bluesky/stop/abc/def@ghi\")", "from suitcase.nxsas.utils import _parse_bluesky_document_path def test__build_bluesky_document_path(): parsed_path = _parse_bluesky_document_path(\"#bluesky/start@abc\") assert parsed_path[\"doc\"] == \"start\"", "parsed_path = _parse_bluesky_document_path(\"#bluesky/start@abc\") assert parsed_path[\"doc\"] == \"start\" assert parsed_path[\"attribute\"] == \"abc\" parsed_path =", "\"def\") assert parsed_path[\"attribute\"] == \"ghi\" parsed_path = _parse_bluesky_document_path(\"#bluesky/desc/primary/abc/def@ghi\") assert parsed_path[\"doc\"] == \"desc\" assert", "\"start\" assert parsed_path[\"keys\"] == (\"abc\", \"def\") parsed_path = _parse_bluesky_document_path(\"#bluesky/start/abc/def@ghi\") assert parsed_path[\"doc\"] == \"start\"", "parsed_path[\"attribute\"] == \"abc\" parsed_path = _parse_bluesky_document_path(\"#bluesky/start/abc\") assert parsed_path[\"doc\"] == \"start\" assert parsed_path[\"keys\"] ==", "def test__build_bluesky_document_path(): parsed_path = _parse_bluesky_document_path(\"#bluesky/start@abc\") assert parsed_path[\"doc\"] == \"start\" assert parsed_path[\"attribute\"] == \"abc\"", "_parse_bluesky_document_path(\"#bluesky/start/abc/def\") assert parsed_path[\"doc\"] == \"start\" assert parsed_path[\"keys\"] == (\"abc\", \"def\") parsed_path = _parse_bluesky_document_path(\"#bluesky/start/abc/def@ghi\")", "\"abc\" parsed_path = _parse_bluesky_document_path(\"#bluesky/start/abc\") assert parsed_path[\"doc\"] == \"start\" assert parsed_path[\"keys\"] == (\"abc\",) parsed_path", "= _parse_bluesky_document_path(\"#bluesky/stop/abc/def@ghi\") assert parsed_path[\"doc\"] == \"stop\" assert parsed_path[\"keys\"] == (\"abc\", \"def\") assert parsed_path[\"attribute\"]", "== (\"abc\", \"def\") assert parsed_path[\"attribute\"] == \"ghi\" parsed_path = _parse_bluesky_document_path(\"#bluesky/stop/abc/def@ghi\") assert parsed_path[\"doc\"] ==", "parsed_path[\"attribute\"] == \"ghi\" parsed_path = _parse_bluesky_document_path(\"#bluesky/stop/abc/def@ghi\") assert parsed_path[\"doc\"] == \"stop\" assert parsed_path[\"keys\"] ==", "== (\"abc\",) parsed_path = _parse_bluesky_document_path(\"#bluesky/start/abc/def\") assert parsed_path[\"doc\"] == \"start\" assert parsed_path[\"keys\"] == (\"abc\",", "\"start\" assert parsed_path[\"keys\"] == (\"abc\", \"def\") assert parsed_path[\"attribute\"] == \"ghi\" parsed_path = _parse_bluesky_document_path(\"#bluesky/desc/primary/abc/def@ghi\")", "== \"abc\" parsed_path = _parse_bluesky_document_path(\"#bluesky/start/abc\") assert parsed_path[\"doc\"] == \"start\" assert parsed_path[\"keys\"] == (\"abc\",)", "\"def\") assert parsed_path[\"attribute\"] == \"ghi\" parsed_path = _parse_bluesky_document_path(\"#bluesky/stop/abc/def@ghi\") assert parsed_path[\"doc\"] == \"stop\" assert", "parsed_path = _parse_bluesky_document_path(\"#bluesky/start/abc/def\") assert parsed_path[\"doc\"] == \"start\" assert parsed_path[\"keys\"] == (\"abc\", \"def\") parsed_path", "== (\"abc\", \"def\") assert parsed_path[\"attribute\"] == \"ghi\" parsed_path = _parse_bluesky_document_path(\"#bluesky/desc/primary/abc/def@ghi\") assert parsed_path[\"doc\"] ==", "_parse_bluesky_document_path(\"#bluesky/start/abc\") assert parsed_path[\"doc\"] == \"start\" assert parsed_path[\"keys\"] == (\"abc\",) parsed_path = _parse_bluesky_document_path(\"#bluesky/start/abc/def\") assert", "\"ghi\" parsed_path = _parse_bluesky_document_path(\"#bluesky/desc/primary/abc/def@ghi\") assert parsed_path[\"doc\"] == \"desc\" assert parsed_path[\"stream\"] == \"primary\" assert", "= _parse_bluesky_document_path(\"#bluesky/desc/primary/abc/def@ghi\") assert parsed_path[\"doc\"] == \"desc\" assert parsed_path[\"stream\"] == \"primary\" assert parsed_path[\"keys\"] ==", "== \"ghi\" parsed_path = _parse_bluesky_document_path(\"#bluesky/desc/primary/abc/def@ghi\") assert parsed_path[\"doc\"] == \"desc\" assert parsed_path[\"stream\"] == \"primary\"", "assert parsed_path[\"keys\"] == (\"abc\", \"def\") assert parsed_path[\"attribute\"] == \"ghi\" parsed_path = _parse_bluesky_document_path(\"#bluesky/stop/abc/def@ghi\") assert", "assert parsed_path[\"doc\"] == \"start\" assert parsed_path[\"attribute\"] == \"abc\" parsed_path = _parse_bluesky_document_path(\"#bluesky/start/abc\") assert parsed_path[\"doc\"]", "parsed_path[\"doc\"] == \"desc\" assert parsed_path[\"stream\"] == \"primary\" assert parsed_path[\"keys\"] == (\"abc\", \"def\") assert", "== \"ghi\" parsed_path = _parse_bluesky_document_path(\"#bluesky/stop/abc/def@ghi\") assert parsed_path[\"doc\"] == \"stop\" assert parsed_path[\"keys\"] == (\"abc\",", "(\"abc\", \"def\") assert parsed_path[\"attribute\"] == \"ghi\" parsed_path = _parse_bluesky_document_path(\"#bluesky/desc/primary/abc/def@ghi\") assert parsed_path[\"doc\"] == \"desc\"", "parsed_path[\"doc\"] == \"start\" assert parsed_path[\"keys\"] == (\"abc\", \"def\") assert parsed_path[\"attribute\"] == \"ghi\" parsed_path", "parsed_path[\"keys\"] == (\"abc\", \"def\") assert parsed_path[\"attribute\"] == \"ghi\" parsed_path = _parse_bluesky_document_path(\"#bluesky/desc/primary/abc/def@ghi\") assert parsed_path[\"doc\"]", "== (\"abc\", \"def\") parsed_path = _parse_bluesky_document_path(\"#bluesky/start/abc/def@ghi\") assert parsed_path[\"doc\"] == \"start\" assert parsed_path[\"keys\"] ==", "= _parse_bluesky_document_path(\"#bluesky/start/abc\") assert parsed_path[\"doc\"] == \"start\" assert parsed_path[\"keys\"] == (\"abc\",) parsed_path = _parse_bluesky_document_path(\"#bluesky/start/abc/def\")", "parsed_path[\"keys\"] == (\"abc\",) parsed_path = _parse_bluesky_document_path(\"#bluesky/start/abc/def\") assert parsed_path[\"doc\"] == \"start\" assert parsed_path[\"keys\"] ==", "\"ghi\" parsed_path = _parse_bluesky_document_path(\"#bluesky/stop/abc/def@ghi\") assert parsed_path[\"doc\"] == \"stop\" assert parsed_path[\"keys\"] == (\"abc\", \"def\")", "assert parsed_path[\"doc\"] == \"start\" assert parsed_path[\"keys\"] == (\"abc\", \"def\") parsed_path = _parse_bluesky_document_path(\"#bluesky/start/abc/def@ghi\") assert", "_parse_bluesky_document_path def test__build_bluesky_document_path(): parsed_path = _parse_bluesky_document_path(\"#bluesky/start@abc\") assert parsed_path[\"doc\"] == \"start\" assert parsed_path[\"attribute\"] ==", "assert parsed_path[\"keys\"] == (\"abc\", \"def\") assert parsed_path[\"attribute\"] == \"ghi\" parsed_path = _parse_bluesky_document_path(\"#bluesky/desc/primary/abc/def@ghi\") assert", "parsed_path = _parse_bluesky_document_path(\"#bluesky/stop/abc/def@ghi\") assert parsed_path[\"doc\"] == \"stop\" assert parsed_path[\"keys\"] == (\"abc\", \"def\") assert", "_parse_bluesky_document_path(\"#bluesky/start/abc/def@ghi\") assert parsed_path[\"doc\"] == \"start\" assert parsed_path[\"keys\"] == (\"abc\", \"def\") assert parsed_path[\"attribute\"] ==", "(\"abc\", \"def\") parsed_path = _parse_bluesky_document_path(\"#bluesky/start/abc/def@ghi\") assert parsed_path[\"doc\"] == \"start\" assert parsed_path[\"keys\"] == (\"abc\",", "assert parsed_path[\"doc\"] == \"desc\" assert parsed_path[\"stream\"] == \"primary\" assert parsed_path[\"keys\"] == (\"abc\", \"def\")", "= _parse_bluesky_document_path(\"#bluesky/start/abc/def@ghi\") assert parsed_path[\"doc\"] == \"start\" assert parsed_path[\"keys\"] == (\"abc\", \"def\") assert parsed_path[\"attribute\"]", "\"def\") parsed_path = _parse_bluesky_document_path(\"#bluesky/start/abc/def@ghi\") assert parsed_path[\"doc\"] == \"start\" assert parsed_path[\"keys\"] == (\"abc\", \"def\")", "parsed_path[\"keys\"] == (\"abc\", \"def\") assert parsed_path[\"attribute\"] == \"ghi\" parsed_path = _parse_bluesky_document_path(\"#bluesky/stop/abc/def@ghi\") assert parsed_path[\"doc\"]", "assert parsed_path[\"keys\"] == (\"abc\", \"def\") parsed_path = _parse_bluesky_document_path(\"#bluesky/start/abc/def@ghi\") assert parsed_path[\"doc\"] == \"start\" assert", "== \"start\" assert parsed_path[\"attribute\"] == \"abc\" parsed_path = _parse_bluesky_document_path(\"#bluesky/start/abc\") assert parsed_path[\"doc\"] == \"start\"", "== \"desc\" assert parsed_path[\"stream\"] == \"primary\" assert parsed_path[\"keys\"] == (\"abc\", \"def\") assert parsed_path[\"attribute\"]", "== \"start\" assert parsed_path[\"keys\"] == (\"abc\", \"def\") assert parsed_path[\"attribute\"] == \"ghi\" parsed_path =", "parsed_path = _parse_bluesky_document_path(\"#bluesky/desc/primary/abc/def@ghi\") assert parsed_path[\"doc\"] == \"desc\" assert parsed_path[\"stream\"] == \"primary\" assert parsed_path[\"keys\"]", "parsed_path[\"stream\"] == \"primary\" assert parsed_path[\"keys\"] == (\"abc\", \"def\") assert parsed_path[\"attribute\"] == \"ghi\" parsed_path", "assert parsed_path[\"attribute\"] == \"abc\" parsed_path = _parse_bluesky_document_path(\"#bluesky/start/abc\") assert parsed_path[\"doc\"] == \"start\" assert parsed_path[\"keys\"]", "= _parse_bluesky_document_path(\"#bluesky/start@abc\") assert parsed_path[\"doc\"] == \"start\" assert parsed_path[\"attribute\"] == \"abc\" parsed_path = _parse_bluesky_document_path(\"#bluesky/start/abc\")", "parsed_path[\"attribute\"] == \"ghi\" parsed_path = _parse_bluesky_document_path(\"#bluesky/desc/primary/abc/def@ghi\") assert parsed_path[\"doc\"] == \"desc\" assert parsed_path[\"stream\"] ==", "(\"abc\", \"def\") assert parsed_path[\"attribute\"] == \"ghi\" parsed_path = _parse_bluesky_document_path(\"#bluesky/stop/abc/def@ghi\") assert parsed_path[\"doc\"] == \"stop\"", "== \"start\" assert parsed_path[\"keys\"] == (\"abc\", \"def\") parsed_path = _parse_bluesky_document_path(\"#bluesky/start/abc/def@ghi\") assert parsed_path[\"doc\"] ==", "\"desc\" assert parsed_path[\"stream\"] == \"primary\" assert parsed_path[\"keys\"] == (\"abc\", \"def\") assert parsed_path[\"attribute\"] ==", "parsed_path[\"keys\"] == (\"abc\", \"def\") parsed_path = _parse_bluesky_document_path(\"#bluesky/start/abc/def@ghi\") assert parsed_path[\"doc\"] == \"start\" assert parsed_path[\"keys\"]", "assert parsed_path[\"stream\"] == \"primary\" assert parsed_path[\"keys\"] == (\"abc\", \"def\") assert parsed_path[\"attribute\"] == \"ghi\"", "import _parse_bluesky_document_path def test__build_bluesky_document_path(): parsed_path = _parse_bluesky_document_path(\"#bluesky/start@abc\") assert parsed_path[\"doc\"] == \"start\" assert parsed_path[\"attribute\"]", "\"start\" assert parsed_path[\"attribute\"] == \"abc\" parsed_path = _parse_bluesky_document_path(\"#bluesky/start/abc\") assert parsed_path[\"doc\"] == \"start\" assert", "(\"abc\",) parsed_path = _parse_bluesky_document_path(\"#bluesky/start/abc/def\") assert parsed_path[\"doc\"] == \"start\" assert parsed_path[\"keys\"] == (\"abc\", \"def\")", "assert parsed_path[\"attribute\"] == \"ghi\" parsed_path = _parse_bluesky_document_path(\"#bluesky/desc/primary/abc/def@ghi\") assert parsed_path[\"doc\"] == \"desc\" assert parsed_path[\"stream\"]", "parsed_path[\"doc\"] == \"start\" assert parsed_path[\"keys\"] == (\"abc\", \"def\") parsed_path = _parse_bluesky_document_path(\"#bluesky/start/abc/def@ghi\") assert parsed_path[\"doc\"]" ]
[ "= [] cixing = ['n', 'v' , 'vn','a','nr','nt','l','nz','ns','i','f','d','t','s','eng','an'] patt1 = re.compile(r'\\[.*?\\]') for i", "re.compile(r'\\[.*?\\]') for i in cont: s=[] line1 = re.sub(patt1, '', i) wordList =", "in cixing: s.append(i.word.encode('utf8')) b=\" \".join(s) c.append(b) return c def callog(X): return math.log(X+1) def", "wordWeightList[i] >= minVal and wordWeightList[i] <= minVal + stepVal: freqList[0]+=1 continue for j", "freqList) #画第二张图 plt.subplot(122) plt.xlabel(\"log(TF-IDF bins)\") plt.ylabel(\"log(Frequance)\") plt.title(\"TF-IDF powerlaw Explore\") plt.plot(map(callog,map(float,X)), map(callog,map(float,freqList))) plt.show() #", "if i.word >= u'\\u4e00' and i.word <= u'\\u9fa5': if i.word.encode('utf8') in stop_words: pass", "re.sub(patt1, '', i) wordList = pseg.cut(line1) for i in wordList: if i.word >=", "from sklearn import metrics import matplotlib.pyplot as plt import math import jieba.posseg as", "jieba import codecs import re import numpy as np from sklearn.feature_extraction.text import CountVectorizer", "plt.ylabel(\"Frequance\") plt.title(\"TF-IDF Explore\") plt.bar(X, freqList) #画第二张图 plt.subplot(122) plt.xlabel(\"log(TF-IDF bins)\") plt.ylabel(\"log(Frequance)\") plt.title(\"TF-IDF powerlaw Explore\")", ", 'vn','a','nr','nt','l','nz','ns','i','f','d','t','s','eng','an'] patt1 = re.compile(r'\\[.*?\\]') for i in cont: s=[] line1 = re.sub(patt1,", "j in range(1,splitNum): if wordWeightList[i] > minVal+j*stepVal and wordWeightList[i] <= minVal + (j+1)*stepVal:", "i.decode('GB18030').encode('utf-8') a.append(i.decode('GB18030').encode('utf-8')) except: a.append(i) return a def readexam(): with open('all.csv', 'rb') as csvfile:", "= tfidftransformer.fit_transform(vectorizer.fit_transform(content)) # 先转换成词频矩阵,再计算TFIDF值 print tfidf.shape word=vectorizer.get_feature_names()#获取词袋模型中的所有词语 weight=tfidf.toarray()#将tf-idf矩阵抽取出来,元素a[i][j]表示j词在i类文本中的tf-idf权重 wordWeightList=[0]*len(word) for i in range(len(weight)):#打印每类文本的tf-idf词语权重,第一个for遍历所有文本,第二个for便利某一类文本下的词语权重", "pseg.cut(line1) for i in wordList: if i.word >= u'\\u4e00' and i.word <= u'\\u9fa5':", "wordList3=[] # wordWeightList3=[] # for i in range(len(wordWeightList2)): # if wordWeightList2[orderwords2[i]] < threshold2:", "weight=tfidf.toarray()#将tf-idf矩阵抽取出来,元素a[i][j]表示j词在i类文本中的tf-idf权重 wordWeightList=[0]*len(word) for i in range(len(weight)):#打印每类文本的tf-idf词语权重,第一个for遍历所有文本,第二个for便利某一类文本下的词语权重 for j in range(len(word)): wordWeightList[j]+=weight[i][j] #看下划分结果下的词频分布 #第一次划分", "stepVal: freqList[0]+=1 continue for j in range(1,splitNum): if wordWeightList[i] > minVal+j*stepVal and wordWeightList[i]", "for i in cont: s=[] line1 = re.sub(patt1, '', i) wordList = pseg.cut(line1)", "segmentWord(exam) # 计算权重 vectorizer = CountVectorizer() tfidftransformer = TfidfTransformer() tfidf = tfidftransformer.fit_transform(vectorizer.fit_transform(content)) #", "from sklearn.feature_extraction.text import CountVectorizer from sklearn.feature_extraction.text import TfidfTransformer import xlwt from sklearn import", "= ['n', 'v' , 'vn','a','nr','nt','l','nz','ns','i','f','d','t','s','eng','an'] patt1 = re.compile(r'\\[.*?\\]') for i in cont: s=[]", "> minVal+j*stepVal and wordWeightList[i] <= minVal + (j+1)*stepVal: freqList[j] += 1 X=range(1,splitNum+1) #画第一张图", "if wordWeightList3[orderwords3[i]] < threshold3: # break # wordList4.append(wordList3[orderwords3[i]]) # wordWeightList4.append(wordWeightList3[orderwords3[i]]) # print len(wordList4)", "wordList2=[] wordWeightList2=[] for i in range(len(wordWeightList)): if wordWeightList[orderwords[i]] < threshold: break wordList2.append(word[orderwords[i]]) wordWeightList2.append(wordWeightList[orderwords[i]])", "matplotlib.pyplot as plt import math import jieba.posseg as pseg def read_tingyongci(): path =", "硕士 毕业 与 中国 科学院\"] stopwords = read_tingyongci() data_exam = readexam() exam =", "range(len(wordWeightList2)): if wordWeightList2[orderwords2[i]]<threshold2: break f.write(wordList2[orderwords2[i]]) f.write('\\n') f.close() # #第三次划分 # orderwords3=np.argsort(-np.array(wordWeightList3)) # threshold3=np.mean(np.array(wordWeightList3))", "b: try: i1 = i.decode('GB18030').encode('utf-8') a.append(i.decode('GB18030').encode('utf-8')) except: a.append(i) return a def readexam(): with", "for i in column1[1:]] #第一列为文本内容,并去除列名 print '测试集有 %s 条句子' % len(content_exam) return content_exam", "cont: s=[] line1 = re.sub(patt1, '', i) wordList = pseg.cut(line1) for i in", "# if wordWeightList3[orderwords3[i]] < threshold3: # break # wordList4.append(wordList3[orderwords3[i]]) # wordWeightList4.append(wordWeightList3[orderwords3[i]]) # print", ">= u'\\u4e00' and i.word <= u'\\u9fa5': if i.word.encode('utf8') in stop_words: pass else: if", "if wordWeightList[i] >= minVal and wordWeightList[i] <= minVal + stepVal: freqList[0]+=1 continue for", "corpus = [\"我 来到 北京 清华大学\", \"他 来到 了 网易 杭研 大厦\", \"小明", "minVal + (j+1)*stepVal: freqList[j] += 1 X=range(1,splitNum+1) #画第一张图 plt.subplot(121) plt.xlabel(\"TF-IDF\") plt.ylabel(\"Frequance\") plt.title(\"TF-IDF Explore\")", "column1 = [row for row in reader] content_exam = [i[3] for i in", "= segmentWord(exam) # 计算权重 vectorizer = CountVectorizer() tfidftransformer = TfidfTransformer() tfidf = tfidftransformer.fit_transform(vectorizer.fit_transform(content))", "#第三次划分 # orderwords3=np.argsort(-np.array(wordWeightList3)) # threshold3=np.mean(np.array(wordWeightList3)) # wordList4=[] # wordWeightList4=[] # for i in", "tfidf.shape word=vectorizer.get_feature_names()#获取词袋模型中的所有词语 weight=tfidf.toarray()#将tf-idf矩阵抽取出来,元素a[i][j]表示j词在i类文本中的tf-idf权重 wordWeightList=[0]*len(word) for i in range(len(weight)):#打印每类文本的tf-idf词语权重,第一个for遍历所有文本,第二个for便利某一类文本下的词语权重 for j in range(len(word)): wordWeightList[j]+=weight[i][j]", "plt.plot(map(callog,map(float,X)), map(callog,map(float,freqList))) plt.show() # corpus = [\"我 来到 北京 清华大学\", \"他 来到 了", "= [i[3] for i in column1[1:]] #第一列为文本内容,并去除列名 print '测试集有 %s 条句子' % len(content_exam)", "import jieba.posseg as pseg def read_tingyongci(): path = \"123.txt\" #停用词分析 stop_words = []", "freqList[j] += 1 X=range(1,splitNum+1) #画第一张图 plt.subplot(121) plt.xlabel(\"TF-IDF\") plt.ylabel(\"Frequance\") plt.title(\"TF-IDF Explore\") plt.bar(X, freqList) #画第二张图", "sklearn.feature_extraction.text import TfidfTransformer import xlwt from sklearn import metrics import matplotlib.pyplot as plt", "%s 条句子' % len(content_exam) return content_exam # 对列表进行分词并用空格连接 def segmentWord(cont): c = []", "wordWeightList[i] <= minVal + stepVal: freqList[0]+=1 continue for j in range(1,splitNum): if wordWeightList[i]", "c = [] cixing = ['n', 'v' , 'vn','a','nr','nt','l','nz','ns','i','f','d','t','s','eng','an'] patt1 = re.compile(r'\\[.*?\\]') for", "def segmentWord(cont): c = [] cixing = ['n', 'v' , 'vn','a','nr','nt','l','nz','ns','i','f','d','t','s','eng','an'] patt1 =", "row in reader] content_exam = [i[3] for i in column1[1:]] #第一列为文本内容,并去除列名 print '测试集有", "b=\" \".join(s) c.append(b) return c def callog(X): return math.log(X+1) def drawPWords(wordWeightList,splitNum=200): minVal=min(wordWeightList) maxVal=max(wordWeightList)", "and wordWeightList[i] <= minVal + stepVal: freqList[0]+=1 continue for j in range(1,splitNum): if", "了 网易 杭研 大厦\", \"小明 硕士 毕业 与 中国 科学院\"] stopwords = read_tingyongci()", "vectorizer = CountVectorizer() tfidftransformer = TfidfTransformer() tfidf = tfidftransformer.fit_transform(vectorizer.fit_transform(content)) # 先转换成词频矩阵,再计算TFIDF值 print tfidf.shape", "将utf8的列表转换成unicode def changeListCode(b): a = [] for i in b: try: i1 =", "pseg def read_tingyongci(): path = \"123.txt\" #停用词分析 stop_words = [] csv_reader = codecs.open(path,\"r\",", "in range(len(wordWeightList)): if wordWeightList[orderwords[i]] < threshold: break wordList2.append(word[orderwords[i]]) wordWeightList2.append(wordWeightList[orderwords[i]]) #第二次划分 orderwords2=np.argsort(-np.array(wordWeightList2)) threshold2=np.mean(np.array(wordWeightList2)) #", "毕业 与 中国 科学院\"] stopwords = read_tingyongci() data_exam = readexam() exam = changeListCode(data_exam)", "import CountVectorizer from sklearn.feature_extraction.text import TfidfTransformer import xlwt from sklearn import metrics import", "import TfidfTransformer import xlwt from sklearn import metrics import matplotlib.pyplot as plt import", "metrics import matplotlib.pyplot as plt import math import jieba.posseg as pseg def read_tingyongci():", "import codecs import re import numpy as np from sklearn.feature_extraction.text import CountVectorizer from", "[] for i in b: try: i1 = i.decode('GB18030').encode('utf-8') a.append(i.decode('GB18030').encode('utf-8')) except: a.append(i) return", "minVal and wordWeightList[i] <= minVal + stepVal: freqList[0]+=1 continue for j in range(1,splitNum):", "reader = csv.reader(csvfile) column1 = [row for row in reader] content_exam = [i[3]", "content_exam = [i[3] for i in column1[1:]] #第一列为文本内容,并去除列名 print '测试集有 %s 条句子' %", "orderwords3=np.argsort(-np.array(wordWeightList3)) # threshold3=np.mean(np.array(wordWeightList3)) # wordList4=[] # wordWeightList4=[] # for i in range(len(wordWeightList3)): #", "range(len(word)): wordWeightList[j]+=weight[i][j] #看下划分结果下的词频分布 #第一次划分 orderwords=np.argsort(-np.array(wordWeightList)) threshold=np.mean(np.array(wordWeightList)) wordList2=[] wordWeightList2=[] for i in range(len(wordWeightList)): if", "import math import jieba.posseg as pseg def read_tingyongci(): path = \"123.txt\" #停用词分析 stop_words", "import xlwt from sklearn import metrics import matplotlib.pyplot as plt import math import", "read_tingyongci(): path = \"123.txt\" #停用词分析 stop_words = [] csv_reader = codecs.open(path,\"r\", \"gbk\") for", "as csvfile: reader = csv.reader(csvfile) column1 = [row for row in reader] content_exam", "a = [] for i in b: try: i1 = i.decode('GB18030').encode('utf-8') a.append(i.decode('GB18030').encode('utf-8')) except:", "with open('all.csv', 'rb') as csvfile: reader = csv.reader(csvfile) column1 = [row for row", "content = segmentWord(exam) # 计算权重 vectorizer = CountVectorizer() tfidftransformer = TfidfTransformer() tfidf =", "CountVectorizer from sklearn.feature_extraction.text import TfidfTransformer import xlwt from sklearn import metrics import matplotlib.pyplot", "i.word >= u'\\u4e00' and i.word <= u'\\u9fa5': if i.word.encode('utf8') in stop_words: pass else:", "stop_words.append(row) return stop_words stop_words = read_tingyongci() # 将utf8的列表转换成unicode def changeListCode(b): a = []", "wordWeightList[i] > minVal+j*stepVal and wordWeightList[i] <= minVal + (j+1)*stepVal: freqList[j] += 1 X=range(1,splitNum+1)", "= pseg.cut(line1) for i in wordList: if i.word >= u'\\u4e00' and i.word <=", "print tfidf.shape word=vectorizer.get_feature_names()#获取词袋模型中的所有词语 weight=tfidf.toarray()#将tf-idf矩阵抽取出来,元素a[i][j]表示j词在i类文本中的tf-idf权重 wordWeightList=[0]*len(word) for i in range(len(weight)):#打印每类文本的tf-idf词语权重,第一个for遍历所有文本,第二个for便利某一类文本下的词语权重 for j in range(len(word)):", "\"gbk\") for row in csv_reader: row = row.strip( '\\r\\n' ).encode(\"utf-8\") stop_words.append(row) return stop_words", "i in range(len(wordWeightList2)): # if wordWeightList2[orderwords2[i]] < threshold2: # break # wordList3.append(wordList2[orderwords2[i]]) #", "<= minVal + stepVal: freqList[0]+=1 continue for j in range(1,splitNum): if wordWeightList[i] >", "range(len(wordWeightList)): if wordWeightList[i] >= minVal and wordWeightList[i] <= minVal + stepVal: freqList[0]+=1 continue", "threshold=np.mean(np.array(wordWeightList)) wordList2=[] wordWeightList2=[] for i in range(len(wordWeightList)): if wordWeightList[orderwords[i]] < threshold: break wordList2.append(word[orderwords[i]])", "tfidftransformer = TfidfTransformer() tfidf = tfidftransformer.fit_transform(vectorizer.fit_transform(content)) # 先转换成词频矩阵,再计算TFIDF值 print tfidf.shape word=vectorizer.get_feature_names()#获取词袋模型中的所有词语 weight=tfidf.toarray()#将tf-idf矩阵抽取出来,元素a[i][j]表示j词在i类文本中的tf-idf权重 wordWeightList=[0]*len(word)", "wordWeightList4=[] # for i in range(len(wordWeightList3)): # if wordWeightList3[orderwords3[i]] < threshold3: # break", "def readexam(): with open('all.csv', 'rb') as csvfile: reader = csv.reader(csvfile) column1 = [row", "stop_words stop_words = read_tingyongci() # 将utf8的列表转换成unicode def changeListCode(b): a = [] for i", "codecs import re import numpy as np from sklearn.feature_extraction.text import CountVectorizer from sklearn.feature_extraction.text", "= csv.reader(csvfile) column1 = [row for row in reader] content_exam = [i[3] for", "pass else: if i.flag in cixing: s.append(i.word.encode('utf8')) b=\" \".join(s) c.append(b) return c def", "tfidf = tfidftransformer.fit_transform(vectorizer.fit_transform(content)) # 先转换成词频矩阵,再计算TFIDF值 print tfidf.shape word=vectorizer.get_feature_names()#获取词袋模型中的所有词语 weight=tfidf.toarray()#将tf-idf矩阵抽取出来,元素a[i][j]表示j词在i类文本中的tf-idf权重 wordWeightList=[0]*len(word) for i in", "for i in b: try: i1 = i.decode('GB18030').encode('utf-8') a.append(i.decode('GB18030').encode('utf-8')) except: a.append(i) return a", "plt.bar(X, freqList) #画第二张图 plt.subplot(122) plt.xlabel(\"log(TF-IDF bins)\") plt.ylabel(\"log(Frequance)\") plt.title(\"TF-IDF powerlaw Explore\") plt.plot(map(callog,map(float,X)), map(callog,map(float,freqList))) plt.show()", "a def readexam(): with open('all.csv', 'rb') as csvfile: reader = csv.reader(csvfile) column1 =", "plt.show() # corpus = [\"我 来到 北京 清华大学\", \"他 来到 了 网易 杭研", "range(len(weight)):#打印每类文本的tf-idf词语权重,第一个for遍历所有文本,第二个for便利某一类文本下的词语权重 for j in range(len(word)): wordWeightList[j]+=weight[i][j] #看下划分结果下的词频分布 #第一次划分 orderwords=np.argsort(-np.array(wordWeightList)) threshold=np.mean(np.array(wordWeightList)) wordList2=[] wordWeightList2=[] for", "if wordWeightList[orderwords[i]] < threshold: break wordList2.append(word[orderwords[i]]) wordWeightList2.append(wordWeightList[orderwords[i]]) #第二次划分 orderwords2=np.argsort(-np.array(wordWeightList2)) threshold2=np.mean(np.array(wordWeightList2)) # wordList3=[] #", "print len(wordList3) f = codecs.open(\"keywordsflag3.txt\", \"w\", \"utf-8\") for i in range(len(wordWeightList2)): if wordWeightList2[orderwords2[i]]<threshold2:", "csv.reader(csvfile) column1 = [row for row in reader] content_exam = [i[3] for i", "powerlaw Explore\") plt.plot(map(callog,map(float,X)), map(callog,map(float,freqList))) plt.show() # corpus = [\"我 来到 北京 清华大学\", \"他", "'v' , 'vn','a','nr','nt','l','nz','ns','i','f','d','t','s','eng','an'] patt1 = re.compile(r'\\[.*?\\]') for i in cont: s=[] line1 =", "for i in wordList: if i.word >= u'\\u4e00' and i.word <= u'\\u9fa5': if", "u'\\u4e00' and i.word <= u'\\u9fa5': if i.word.encode('utf8') in stop_words: pass else: if i.flag", "i.word <= u'\\u9fa5': if i.word.encode('utf8') in stop_words: pass else: if i.flag in cixing:", "wordList4=[] # wordWeightList4=[] # for i in range(len(wordWeightList3)): # if wordWeightList3[orderwords3[i]] < threshold3:", "# orderwords3=np.argsort(-np.array(wordWeightList3)) # threshold3=np.mean(np.array(wordWeightList3)) # wordList4=[] # wordWeightList4=[] # for i in range(len(wordWeightList3)):", "stopwords = read_tingyongci() data_exam = readexam() exam = changeListCode(data_exam) content = segmentWord(exam) #", "wordWeightList2[orderwords2[i]] < threshold2: # break # wordList3.append(wordList2[orderwords2[i]]) # wordWeightList3.append(wordWeightList2[orderwords2[i]]) # print len(wordList3) f", "import jieba import codecs import re import numpy as np from sklearn.feature_extraction.text import", "if wordWeightList2[orderwords2[i]]<threshold2: break f.write(wordList2[orderwords2[i]]) f.write('\\n') f.close() # #第三次划分 # orderwords3=np.argsort(-np.array(wordWeightList3)) # threshold3=np.mean(np.array(wordWeightList3)) #", "# for i in range(len(wordWeightList3)): # if wordWeightList3[orderwords3[i]] < threshold3: # break #", "= [row for row in reader] content_exam = [i[3] for i in column1[1:]]", "# wordWeightList3=[] # for i in range(len(wordWeightList2)): # if wordWeightList2[orderwords2[i]] < threshold2: #", "cixing: s.append(i.word.encode('utf8')) b=\" \".join(s) c.append(b) return c def callog(X): return math.log(X+1) def drawPWords(wordWeightList,splitNum=200):", "= readexam() exam = changeListCode(data_exam) content = segmentWord(exam) # 计算权重 vectorizer = CountVectorizer()", "[i[3] for i in column1[1:]] #第一列为文本内容,并去除列名 print '测试集有 %s 条句子' % len(content_exam) return", "in b: try: i1 = i.decode('GB18030').encode('utf-8') a.append(i.decode('GB18030').encode('utf-8')) except: a.append(i) return a def readexam():", "wordList2.append(word[orderwords[i]]) wordWeightList2.append(wordWeightList[orderwords[i]]) #第二次划分 orderwords2=np.argsort(-np.array(wordWeightList2)) threshold2=np.mean(np.array(wordWeightList2)) # wordList3=[] # wordWeightList3=[] # for i in", "# -*- coding: utf-8 -*- import csv import jieba import codecs import re", "Explore\") plt.plot(map(callog,map(float,X)), map(callog,map(float,freqList))) plt.show() # corpus = [\"我 来到 北京 清华大学\", \"他 来到", "= re.compile(r'\\[.*?\\]') for i in cont: s=[] line1 = re.sub(patt1, '', i) wordList", "plt.title(\"TF-IDF Explore\") plt.bar(X, freqList) #画第二张图 plt.subplot(122) plt.xlabel(\"log(TF-IDF bins)\") plt.ylabel(\"log(Frequance)\") plt.title(\"TF-IDF powerlaw Explore\") plt.plot(map(callog,map(float,X)),", "科学院\"] stopwords = read_tingyongci() data_exam = readexam() exam = changeListCode(data_exam) content = segmentWord(exam)", "= CountVectorizer() tfidftransformer = TfidfTransformer() tfidf = tfidftransformer.fit_transform(vectorizer.fit_transform(content)) # 先转换成词频矩阵,再计算TFIDF值 print tfidf.shape word=vectorizer.get_feature_names()#获取词袋模型中的所有词语", "+ stepVal: freqList[0]+=1 continue for j in range(1,splitNum): if wordWeightList[i] > minVal+j*stepVal and", "Explore\") plt.bar(X, freqList) #画第二张图 plt.subplot(122) plt.xlabel(\"log(TF-IDF bins)\") plt.ylabel(\"log(Frequance)\") plt.title(\"TF-IDF powerlaw Explore\") plt.plot(map(callog,map(float,X)), map(callog,map(float,freqList)))", "orderwords=np.argsort(-np.array(wordWeightList)) threshold=np.mean(np.array(wordWeightList)) wordList2=[] wordWeightList2=[] for i in range(len(wordWeightList)): if wordWeightList[orderwords[i]] < threshold: break", "# wordList4=[] # wordWeightList4=[] # for i in range(len(wordWeightList3)): # if wordWeightList3[orderwords3[i]] <", "for j in range(1,splitNum): if wordWeightList[i] > minVal+j*stepVal and wordWeightList[i] <= minVal +", "s.append(i.word.encode('utf8')) b=\" \".join(s) c.append(b) return c def callog(X): return math.log(X+1) def drawPWords(wordWeightList,splitNum=200): minVal=min(wordWeightList)", "in range(len(wordWeightList3)): # if wordWeightList3[orderwords3[i]] < threshold3: # break # wordList4.append(wordList3[orderwords3[i]]) # wordWeightList4.append(wordWeightList3[orderwords3[i]])", "#第一列为文本内容,并去除列名 print '测试集有 %s 条句子' % len(content_exam) return content_exam # 对列表进行分词并用空格连接 def segmentWord(cont):", "def changeListCode(b): a = [] for i in b: try: i1 = i.decode('GB18030').encode('utf-8')", "'测试集有 %s 条句子' % len(content_exam) return content_exam # 对列表进行分词并用空格连接 def segmentWord(cont): c =", "[\"我 来到 北京 清华大学\", \"他 来到 了 网易 杭研 大厦\", \"小明 硕士 毕业", "\"123.txt\" #停用词分析 stop_words = [] csv_reader = codecs.open(path,\"r\", \"gbk\") for row in csv_reader:", "codecs.open(\"keywordsflag3.txt\", \"w\", \"utf-8\") for i in range(len(wordWeightList2)): if wordWeightList2[orderwords2[i]]<threshold2: break f.write(wordList2[orderwords2[i]]) f.write('\\n') f.close()", "网易 杭研 大厦\", \"小明 硕士 毕业 与 中国 科学院\"] stopwords = read_tingyongci() data_exam", "\"w\", \"utf-8\") for i in range(len(wordWeightList2)): if wordWeightList2[orderwords2[i]]<threshold2: break f.write(wordList2[orderwords2[i]]) f.write('\\n') f.close() #", "wordWeightList2.append(wordWeightList[orderwords[i]]) #第二次划分 orderwords2=np.argsort(-np.array(wordWeightList2)) threshold2=np.mean(np.array(wordWeightList2)) # wordList3=[] # wordWeightList3=[] # for i in range(len(wordWeightList2)):", "def drawPWords(wordWeightList,splitNum=200): minVal=min(wordWeightList) maxVal=max(wordWeightList) stepVal=(maxVal-minVal)/float(splitNum) freqList=[0]*splitNum for i in range(len(wordWeightList)): if wordWeightList[i] >=", "minVal + stepVal: freqList[0]+=1 continue for j in range(1,splitNum): if wordWeightList[i] > minVal+j*stepVal", "patt1 = re.compile(r'\\[.*?\\]') for i in cont: s=[] line1 = re.sub(patt1, '', i)", "in wordList: if i.word >= u'\\u4e00' and i.word <= u'\\u9fa5': if i.word.encode('utf8') in", "= [\"我 来到 北京 清华大学\", \"他 来到 了 网易 杭研 大厦\", \"小明 硕士", "plt.title(\"TF-IDF powerlaw Explore\") plt.plot(map(callog,map(float,X)), map(callog,map(float,freqList))) plt.show() # corpus = [\"我 来到 北京 清华大学\",", "freqList[0]+=1 continue for j in range(1,splitNum): if wordWeightList[i] > minVal+j*stepVal and wordWeightList[i] <=", "对列表进行分词并用空格连接 def segmentWord(cont): c = [] cixing = ['n', 'v' , 'vn','a','nr','nt','l','nz','ns','i','f','d','t','s','eng','an'] patt1", "range(len(wordWeightList3)): # if wordWeightList3[orderwords3[i]] < threshold3: # break # wordList4.append(wordList3[orderwords3[i]]) # wordWeightList4.append(wordWeightList3[orderwords3[i]]) #", "i) wordList = pseg.cut(line1) for i in wordList: if i.word >= u'\\u4e00' and", "import metrics import matplotlib.pyplot as plt import math import jieba.posseg as pseg def", "import re import numpy as np from sklearn.feature_extraction.text import CountVectorizer from sklearn.feature_extraction.text import", "# 对列表进行分词并用空格连接 def segmentWord(cont): c = [] cixing = ['n', 'v' , 'vn','a','nr','nt','l','nz','ns','i','f','d','t','s','eng','an']", "#停用词分析 stop_words = [] csv_reader = codecs.open(path,\"r\", \"gbk\") for row in csv_reader: row", "in reader] content_exam = [i[3] for i in column1[1:]] #第一列为文本内容,并去除列名 print '测试集有 %s", "jieba.posseg as pseg def read_tingyongci(): path = \"123.txt\" #停用词分析 stop_words = [] csv_reader", "read_tingyongci() data_exam = readexam() exam = changeListCode(data_exam) content = segmentWord(exam) # 计算权重 vectorizer", "threshold2=np.mean(np.array(wordWeightList2)) # wordList3=[] # wordWeightList3=[] # for i in range(len(wordWeightList2)): # if wordWeightList2[orderwords2[i]]", "#画第二张图 plt.subplot(122) plt.xlabel(\"log(TF-IDF bins)\") plt.ylabel(\"log(Frequance)\") plt.title(\"TF-IDF powerlaw Explore\") plt.plot(map(callog,map(float,X)), map(callog,map(float,freqList))) plt.show() # corpus", "in column1[1:]] #第一列为文本内容,并去除列名 print '测试集有 %s 条句子' % len(content_exam) return content_exam # 对列表进行分词并用空格连接", "i in column1[1:]] #第一列为文本内容,并去除列名 print '测试集有 %s 条句子' % len(content_exam) return content_exam #", "a.append(i.decode('GB18030').encode('utf-8')) except: a.append(i) return a def readexam(): with open('all.csv', 'rb') as csvfile: reader", "# 将utf8的列表转换成unicode def changeListCode(b): a = [] for i in b: try: i1", "for row in reader] content_exam = [i[3] for i in column1[1:]] #第一列为文本内容,并去除列名 print", "wordWeightList=[0]*len(word) for i in range(len(weight)):#打印每类文本的tf-idf词语权重,第一个for遍历所有文本,第二个for便利某一类文本下的词语权重 for j in range(len(word)): wordWeightList[j]+=weight[i][j] #看下划分结果下的词频分布 #第一次划分 orderwords=np.argsort(-np.array(wordWeightList))", "TfidfTransformer() tfidf = tfidftransformer.fit_transform(vectorizer.fit_transform(content)) # 先转换成词频矩阵,再计算TFIDF值 print tfidf.shape word=vectorizer.get_feature_names()#获取词袋模型中的所有词语 weight=tfidf.toarray()#将tf-idf矩阵抽取出来,元素a[i][j]表示j词在i类文本中的tf-idf权重 wordWeightList=[0]*len(word) for i", "break wordList2.append(word[orderwords[i]]) wordWeightList2.append(wordWeightList[orderwords[i]]) #第二次划分 orderwords2=np.argsort(-np.array(wordWeightList2)) threshold2=np.mean(np.array(wordWeightList2)) # wordList3=[] # wordWeightList3=[] # for i", "\"他 来到 了 网易 杭研 大厦\", \"小明 硕士 毕业 与 中国 科学院\"] stopwords", "plt.xlabel(\"TF-IDF\") plt.ylabel(\"Frequance\") plt.title(\"TF-IDF Explore\") plt.bar(X, freqList) #画第二张图 plt.subplot(122) plt.xlabel(\"log(TF-IDF bins)\") plt.ylabel(\"log(Frequance)\") plt.title(\"TF-IDF powerlaw", "wordWeightList3.append(wordWeightList2[orderwords2[i]]) # print len(wordList3) f = codecs.open(\"keywordsflag3.txt\", \"w\", \"utf-8\") for i in range(len(wordWeightList2)):", "# wordList3=[] # wordWeightList3=[] # for i in range(len(wordWeightList2)): # if wordWeightList2[orderwords2[i]] <", "column1[1:]] #第一列为文本内容,并去除列名 print '测试集有 %s 条句子' % len(content_exam) return content_exam # 对列表进行分词并用空格连接 def", "-*- import csv import jieba import codecs import re import numpy as np", "row = row.strip( '\\r\\n' ).encode(\"utf-8\") stop_words.append(row) return stop_words stop_words = read_tingyongci() # 将utf8的列表转换成unicode", "'rb') as csvfile: reader = csv.reader(csvfile) column1 = [row for row in reader]", "return content_exam # 对列表进行分词并用空格连接 def segmentWord(cont): c = [] cixing = ['n', 'v'", "i in range(len(wordWeightList2)): if wordWeightList2[orderwords2[i]]<threshold2: break f.write(wordList2[orderwords2[i]]) f.write('\\n') f.close() # #第三次划分 # orderwords3=np.argsort(-np.array(wordWeightList3))", "= read_tingyongci() # 将utf8的列表转换成unicode def changeListCode(b): a = [] for i in b:", "changeListCode(data_exam) content = segmentWord(exam) # 计算权重 vectorizer = CountVectorizer() tfidftransformer = TfidfTransformer() tfidf", "i.flag in cixing: s.append(i.word.encode('utf8')) b=\" \".join(s) c.append(b) return c def callog(X): return math.log(X+1)", "a.append(i) return a def readexam(): with open('all.csv', 'rb') as csvfile: reader = csv.reader(csvfile)", "else: if i.flag in cixing: s.append(i.word.encode('utf8')) b=\" \".join(s) c.append(b) return c def callog(X):", "先转换成词频矩阵,再计算TFIDF值 print tfidf.shape word=vectorizer.get_feature_names()#获取词袋模型中的所有词语 weight=tfidf.toarray()#将tf-idf矩阵抽取出来,元素a[i][j]表示j词在i类文本中的tf-idf权重 wordWeightList=[0]*len(word) for i in range(len(weight)):#打印每类文本的tf-idf词语权重,第一个for遍历所有文本,第二个for便利某一类文本下的词语权重 for j in", "# 先转换成词频矩阵,再计算TFIDF值 print tfidf.shape word=vectorizer.get_feature_names()#获取词袋模型中的所有词语 weight=tfidf.toarray()#将tf-idf矩阵抽取出来,元素a[i][j]表示j词在i类文本中的tf-idf权重 wordWeightList=[0]*len(word) for i in range(len(weight)):#打印每类文本的tf-idf词语权重,第一个for遍历所有文本,第二个for便利某一类文本下的词语权重 for j", "= re.sub(patt1, '', i) wordList = pseg.cut(line1) for i in wordList: if i.word", "in range(len(wordWeightList2)): # if wordWeightList2[orderwords2[i]] < threshold2: # break # wordList3.append(wordList2[orderwords2[i]]) # wordWeightList3.append(wordWeightList2[orderwords2[i]])", "大厦\", \"小明 硕士 毕业 与 中国 科学院\"] stopwords = read_tingyongci() data_exam = readexam()", "len(wordList3) f = codecs.open(\"keywordsflag3.txt\", \"w\", \"utf-8\") for i in range(len(wordWeightList2)): if wordWeightList2[orderwords2[i]]<threshold2: break", "row in csv_reader: row = row.strip( '\\r\\n' ).encode(\"utf-8\") stop_words.append(row) return stop_words stop_words =", "i.word.encode('utf8') in stop_words: pass else: if i.flag in cixing: s.append(i.word.encode('utf8')) b=\" \".join(s) c.append(b)", "= i.decode('GB18030').encode('utf-8') a.append(i.decode('GB18030').encode('utf-8')) except: a.append(i) return a def readexam(): with open('all.csv', 'rb') as", "as plt import math import jieba.posseg as pseg def read_tingyongci(): path = \"123.txt\"", "来到 了 网易 杭研 大厦\", \"小明 硕士 毕业 与 中国 科学院\"] stopwords =", "except: a.append(i) return a def readexam(): with open('all.csv', 'rb') as csvfile: reader =", "import matplotlib.pyplot as plt import math import jieba.posseg as pseg def read_tingyongci(): path", "#画第一张图 plt.subplot(121) plt.xlabel(\"TF-IDF\") plt.ylabel(\"Frequance\") plt.title(\"TF-IDF Explore\") plt.bar(X, freqList) #画第二张图 plt.subplot(122) plt.xlabel(\"log(TF-IDF bins)\") plt.ylabel(\"log(Frequance)\")", "<reponame>dsh651470774/dshAlgorithm<gh_stars>0 # -*- coding: utf-8 -*- import csv import jieba import codecs import", "re import numpy as np from sklearn.feature_extraction.text import CountVectorizer from sklearn.feature_extraction.text import TfidfTransformer", "\"小明 硕士 毕业 与 中国 科学院\"] stopwords = read_tingyongci() data_exam = readexam() exam", "bins)\") plt.ylabel(\"log(Frequance)\") plt.title(\"TF-IDF powerlaw Explore\") plt.plot(map(callog,map(float,X)), map(callog,map(float,freqList))) plt.show() # corpus = [\"我 来到", "'vn','a','nr','nt','l','nz','ns','i','f','d','t','s','eng','an'] patt1 = re.compile(r'\\[.*?\\]') for i in cont: s=[] line1 = re.sub(patt1, '',", "range(1,splitNum): if wordWeightList[i] > minVal+j*stepVal and wordWeightList[i] <= minVal + (j+1)*stepVal: freqList[j] +=", "in range(len(wordWeightList)): if wordWeightList[i] >= minVal and wordWeightList[i] <= minVal + stepVal: freqList[0]+=1", "stop_words = read_tingyongci() # 将utf8的列表转换成unicode def changeListCode(b): a = [] for i in", "stop_words = [] csv_reader = codecs.open(path,\"r\", \"gbk\") for row in csv_reader: row =", "# 计算权重 vectorizer = CountVectorizer() tfidftransformer = TfidfTransformer() tfidf = tfidftransformer.fit_transform(vectorizer.fit_transform(content)) # 先转换成词频矩阵,再计算TFIDF值", "# threshold3=np.mean(np.array(wordWeightList3)) # wordList4=[] # wordWeightList4=[] # for i in range(len(wordWeightList3)): # if", "wordWeightList2=[] for i in range(len(wordWeightList)): if wordWeightList[orderwords[i]] < threshold: break wordList2.append(word[orderwords[i]]) wordWeightList2.append(wordWeightList[orderwords[i]]) #第二次划分", "\".join(s) c.append(b) return c def callog(X): return math.log(X+1) def drawPWords(wordWeightList,splitNum=200): minVal=min(wordWeightList) maxVal=max(wordWeightList) stepVal=(maxVal-minVal)/float(splitNum)", "readexam(): with open('all.csv', 'rb') as csvfile: reader = csv.reader(csvfile) column1 = [row for", "if i.flag in cixing: s.append(i.word.encode('utf8')) b=\" \".join(s) c.append(b) return c def callog(X): return", "['n', 'v' , 'vn','a','nr','nt','l','nz','ns','i','f','d','t','s','eng','an'] patt1 = re.compile(r'\\[.*?\\]') for i in cont: s=[] line1", "try: i1 = i.decode('GB18030').encode('utf-8') a.append(i.decode('GB18030').encode('utf-8')) except: a.append(i) return a def readexam(): with open('all.csv',", "< threshold: break wordList2.append(word[orderwords[i]]) wordWeightList2.append(wordWeightList[orderwords[i]]) #第二次划分 orderwords2=np.argsort(-np.array(wordWeightList2)) threshold2=np.mean(np.array(wordWeightList2)) # wordList3=[] # wordWeightList3=[] #", "#看下划分结果下的词频分布 #第一次划分 orderwords=np.argsort(-np.array(wordWeightList)) threshold=np.mean(np.array(wordWeightList)) wordList2=[] wordWeightList2=[] for i in range(len(wordWeightList)): if wordWeightList[orderwords[i]] <", "sklearn.feature_extraction.text import CountVectorizer from sklearn.feature_extraction.text import TfidfTransformer import xlwt from sklearn import metrics", "f = codecs.open(\"keywordsflag3.txt\", \"w\", \"utf-8\") for i in range(len(wordWeightList2)): if wordWeightList2[orderwords2[i]]<threshold2: break f.write(wordList2[orderwords2[i]])", "stop_words: pass else: if i.flag in cixing: s.append(i.word.encode('utf8')) b=\" \".join(s) c.append(b) return c", "for i in range(len(weight)):#打印每类文本的tf-idf词语权重,第一个for遍历所有文本,第二个for便利某一类文本下的词语权重 for j in range(len(word)): wordWeightList[j]+=weight[i][j] #看下划分结果下的词频分布 #第一次划分 orderwords=np.argsort(-np.array(wordWeightList)) threshold=np.mean(np.array(wordWeightList))", "as pseg def read_tingyongci(): path = \"123.txt\" #停用词分析 stop_words = [] csv_reader =", "for j in range(len(word)): wordWeightList[j]+=weight[i][j] #看下划分结果下的词频分布 #第一次划分 orderwords=np.argsort(-np.array(wordWeightList)) threshold=np.mean(np.array(wordWeightList)) wordList2=[] wordWeightList2=[] for i", "= row.strip( '\\r\\n' ).encode(\"utf-8\") stop_words.append(row) return stop_words stop_words = read_tingyongci() # 将utf8的列表转换成unicode def", "i1 = i.decode('GB18030').encode('utf-8') a.append(i.decode('GB18030').encode('utf-8')) except: a.append(i) return a def readexam(): with open('all.csv', 'rb')", "# wordList3.append(wordList2[orderwords2[i]]) # wordWeightList3.append(wordWeightList2[orderwords2[i]]) # print len(wordList3) f = codecs.open(\"keywordsflag3.txt\", \"w\", \"utf-8\") for", "[row for row in reader] content_exam = [i[3] for i in column1[1:]] #第一列为文本内容,并去除列名", "stepVal=(maxVal-minVal)/float(splitNum) freqList=[0]*splitNum for i in range(len(wordWeightList)): if wordWeightList[i] >= minVal and wordWeightList[i] <=", "minVal+j*stepVal and wordWeightList[i] <= minVal + (j+1)*stepVal: freqList[j] += 1 X=range(1,splitNum+1) #画第一张图 plt.subplot(121)", "= codecs.open(path,\"r\", \"gbk\") for row in csv_reader: row = row.strip( '\\r\\n' ).encode(\"utf-8\") stop_words.append(row)", "return math.log(X+1) def drawPWords(wordWeightList,splitNum=200): minVal=min(wordWeightList) maxVal=max(wordWeightList) stepVal=(maxVal-minVal)/float(splitNum) freqList=[0]*splitNum for i in range(len(wordWeightList)): if", "i in cont: s=[] line1 = re.sub(patt1, '', i) wordList = pseg.cut(line1) for", "i in range(len(wordWeightList3)): # if wordWeightList3[orderwords3[i]] < threshold3: # break # wordList4.append(wordList3[orderwords3[i]]) #", "word=vectorizer.get_feature_names()#获取词袋模型中的所有词语 weight=tfidf.toarray()#将tf-idf矩阵抽取出来,元素a[i][j]表示j词在i类文本中的tf-idf权重 wordWeightList=[0]*len(word) for i in range(len(weight)):#打印每类文本的tf-idf词语权重,第一个for遍历所有文本,第二个for便利某一类文本下的词语权重 for j in range(len(word)): wordWeightList[j]+=weight[i][j] #看下划分结果下的词频分布", "exam = changeListCode(data_exam) content = segmentWord(exam) # 计算权重 vectorizer = CountVectorizer() tfidftransformer =", "计算权重 vectorizer = CountVectorizer() tfidftransformer = TfidfTransformer() tfidf = tfidftransformer.fit_transform(vectorizer.fit_transform(content)) # 先转换成词频矩阵,再计算TFIDF值 print", "threshold: break wordList2.append(word[orderwords[i]]) wordWeightList2.append(wordWeightList[orderwords[i]]) #第二次划分 orderwords2=np.argsort(-np.array(wordWeightList2)) threshold2=np.mean(np.array(wordWeightList2)) # wordList3=[] # wordWeightList3=[] # for", "for i in range(len(wordWeightList3)): # if wordWeightList3[orderwords3[i]] < threshold3: # break # wordList4.append(wordList3[orderwords3[i]])", "f.write(wordList2[orderwords2[i]]) f.write('\\n') f.close() # #第三次划分 # orderwords3=np.argsort(-np.array(wordWeightList3)) # threshold3=np.mean(np.array(wordWeightList3)) # wordList4=[] # wordWeightList4=[]", "for i in range(len(wordWeightList2)): if wordWeightList2[orderwords2[i]]<threshold2: break f.write(wordList2[orderwords2[i]]) f.write('\\n') f.close() # #第三次划分 #", "def read_tingyongci(): path = \"123.txt\" #停用词分析 stop_words = [] csv_reader = codecs.open(path,\"r\", \"gbk\")", "< threshold3: # break # wordList4.append(wordList3[orderwords3[i]]) # wordWeightList4.append(wordWeightList3[orderwords3[i]]) # print len(wordList4) # drawPWords(wordWeightList4,200)", "i in range(len(wordWeightList)): if wordWeightList[orderwords[i]] < threshold: break wordList2.append(word[orderwords[i]]) wordWeightList2.append(wordWeightList[orderwords[i]]) #第二次划分 orderwords2=np.argsort(-np.array(wordWeightList2)) threshold2=np.mean(np.array(wordWeightList2))", "f.close() # #第三次划分 # orderwords3=np.argsort(-np.array(wordWeightList3)) # threshold3=np.mean(np.array(wordWeightList3)) # wordList4=[] # wordWeightList4=[] # for", ").encode(\"utf-8\") stop_words.append(row) return stop_words stop_words = read_tingyongci() # 将utf8的列表转换成unicode def changeListCode(b): a =", "in range(len(wordWeightList2)): if wordWeightList2[orderwords2[i]]<threshold2: break f.write(wordList2[orderwords2[i]]) f.write('\\n') f.close() # #第三次划分 # orderwords3=np.argsort(-np.array(wordWeightList3)) #", "wordWeightList[j]+=weight[i][j] #看下划分结果下的词频分布 #第一次划分 orderwords=np.argsort(-np.array(wordWeightList)) threshold=np.mean(np.array(wordWeightList)) wordList2=[] wordWeightList2=[] for i in range(len(wordWeightList)): if wordWeightList[orderwords[i]]", "read_tingyongci() # 将utf8的列表转换成unicode def changeListCode(b): a = [] for i in b: try:", "numpy as np from sklearn.feature_extraction.text import CountVectorizer from sklearn.feature_extraction.text import TfidfTransformer import xlwt", "import numpy as np from sklearn.feature_extraction.text import CountVectorizer from sklearn.feature_extraction.text import TfidfTransformer import", "plt.subplot(122) plt.xlabel(\"log(TF-IDF bins)\") plt.ylabel(\"log(Frequance)\") plt.title(\"TF-IDF powerlaw Explore\") plt.plot(map(callog,map(float,X)), map(callog,map(float,freqList))) plt.show() # corpus =", "reader] content_exam = [i[3] for i in column1[1:]] #第一列为文本内容,并去除列名 print '测试集有 %s 条句子'", "minVal=min(wordWeightList) maxVal=max(wordWeightList) stepVal=(maxVal-minVal)/float(splitNum) freqList=[0]*splitNum for i in range(len(wordWeightList)): if wordWeightList[i] >= minVal and", "X=range(1,splitNum+1) #画第一张图 plt.subplot(121) plt.xlabel(\"TF-IDF\") plt.ylabel(\"Frequance\") plt.title(\"TF-IDF Explore\") plt.bar(X, freqList) #画第二张图 plt.subplot(122) plt.xlabel(\"log(TF-IDF bins)\")", "xlwt from sklearn import metrics import matplotlib.pyplot as plt import math import jieba.posseg", "= TfidfTransformer() tfidf = tfidftransformer.fit_transform(vectorizer.fit_transform(content)) # 先转换成词频矩阵,再计算TFIDF值 print tfidf.shape word=vectorizer.get_feature_names()#获取词袋模型中的所有词语 weight=tfidf.toarray()#将tf-idf矩阵抽取出来,元素a[i][j]表示j词在i类文本中的tf-idf权重 wordWeightList=[0]*len(word) for", "wordWeightList3=[] # for i in range(len(wordWeightList2)): # if wordWeightList2[orderwords2[i]] < threshold2: # break", "print '测试集有 %s 条句子' % len(content_exam) return content_exam # 对列表进行分词并用空格连接 def segmentWord(cont): c", "line1 = re.sub(patt1, '', i) wordList = pseg.cut(line1) for i in wordList: if", "wordWeightList[orderwords[i]] < threshold: break wordList2.append(word[orderwords[i]]) wordWeightList2.append(wordWeightList[orderwords[i]]) #第二次划分 orderwords2=np.argsort(-np.array(wordWeightList2)) threshold2=np.mean(np.array(wordWeightList2)) # wordList3=[] # wordWeightList3=[]", "callog(X): return math.log(X+1) def drawPWords(wordWeightList,splitNum=200): minVal=min(wordWeightList) maxVal=max(wordWeightList) stepVal=(maxVal-minVal)/float(splitNum) freqList=[0]*splitNum for i in range(len(wordWeightList)):", "continue for j in range(1,splitNum): if wordWeightList[i] > minVal+j*stepVal and wordWeightList[i] <= minVal", "i in wordList: if i.word >= u'\\u4e00' and i.word <= u'\\u9fa5': if i.word.encode('utf8')", "#第二次划分 orderwords2=np.argsort(-np.array(wordWeightList2)) threshold2=np.mean(np.array(wordWeightList2)) # wordList3=[] # wordWeightList3=[] # for i in range(len(wordWeightList2)): #", "+= 1 X=range(1,splitNum+1) #画第一张图 plt.subplot(121) plt.xlabel(\"TF-IDF\") plt.ylabel(\"Frequance\") plt.title(\"TF-IDF Explore\") plt.bar(X, freqList) #画第二张图 plt.subplot(122)", "and i.word <= u'\\u9fa5': if i.word.encode('utf8') in stop_words: pass else: if i.flag in", "map(callog,map(float,freqList))) plt.show() # corpus = [\"我 来到 北京 清华大学\", \"他 来到 了 网易", "c def callog(X): return math.log(X+1) def drawPWords(wordWeightList,splitNum=200): minVal=min(wordWeightList) maxVal=max(wordWeightList) stepVal=(maxVal-minVal)/float(splitNum) freqList=[0]*splitNum for i", "in range(len(word)): wordWeightList[j]+=weight[i][j] #看下划分结果下的词频分布 #第一次划分 orderwords=np.argsort(-np.array(wordWeightList)) threshold=np.mean(np.array(wordWeightList)) wordList2=[] wordWeightList2=[] for i in range(len(wordWeightList)):", "= codecs.open(\"keywordsflag3.txt\", \"w\", \"utf-8\") for i in range(len(wordWeightList2)): if wordWeightList2[orderwords2[i]]<threshold2: break f.write(wordList2[orderwords2[i]]) f.write('\\n')", "+ (j+1)*stepVal: freqList[j] += 1 X=range(1,splitNum+1) #画第一张图 plt.subplot(121) plt.xlabel(\"TF-IDF\") plt.ylabel(\"Frequance\") plt.title(\"TF-IDF Explore\") plt.bar(X,", "plt.xlabel(\"log(TF-IDF bins)\") plt.ylabel(\"log(Frequance)\") plt.title(\"TF-IDF powerlaw Explore\") plt.plot(map(callog,map(float,X)), map(callog,map(float,freqList))) plt.show() # corpus = [\"我", "in stop_words: pass else: if i.flag in cixing: s.append(i.word.encode('utf8')) b=\" \".join(s) c.append(b) return", "freqList=[0]*splitNum for i in range(len(wordWeightList)): if wordWeightList[i] >= minVal and wordWeightList[i] <= minVal", "[] csv_reader = codecs.open(path,\"r\", \"gbk\") for row in csv_reader: row = row.strip( '\\r\\n'", "csv_reader: row = row.strip( '\\r\\n' ).encode(\"utf-8\") stop_words.append(row) return stop_words stop_words = read_tingyongci() #", "range(len(wordWeightList)): if wordWeightList[orderwords[i]] < threshold: break wordList2.append(word[orderwords[i]]) wordWeightList2.append(wordWeightList[orderwords[i]]) #第二次划分 orderwords2=np.argsort(-np.array(wordWeightList2)) threshold2=np.mean(np.array(wordWeightList2)) # wordList3=[]", "#第一次划分 orderwords=np.argsort(-np.array(wordWeightList)) threshold=np.mean(np.array(wordWeightList)) wordList2=[] wordWeightList2=[] for i in range(len(wordWeightList)): if wordWeightList[orderwords[i]] < threshold:", "plt import math import jieba.posseg as pseg def read_tingyongci(): path = \"123.txt\" #停用词分析", "if wordWeightList[i] > minVal+j*stepVal and wordWeightList[i] <= minVal + (j+1)*stepVal: freqList[j] += 1", "codecs.open(path,\"r\", \"gbk\") for row in csv_reader: row = row.strip( '\\r\\n' ).encode(\"utf-8\") stop_words.append(row) return", "cixing = ['n', 'v' , 'vn','a','nr','nt','l','nz','ns','i','f','d','t','s','eng','an'] patt1 = re.compile(r'\\[.*?\\]') for i in cont:", "in range(1,splitNum): if wordWeightList[i] > minVal+j*stepVal and wordWeightList[i] <= minVal + (j+1)*stepVal: freqList[j]", "# wordWeightList4=[] # for i in range(len(wordWeightList3)): # if wordWeightList3[orderwords3[i]] < threshold3: #", "np from sklearn.feature_extraction.text import CountVectorizer from sklearn.feature_extraction.text import TfidfTransformer import xlwt from sklearn", "CountVectorizer() tfidftransformer = TfidfTransformer() tfidf = tfidftransformer.fit_transform(vectorizer.fit_transform(content)) # 先转换成词频矩阵,再计算TFIDF值 print tfidf.shape word=vectorizer.get_feature_names()#获取词袋模型中的所有词语 weight=tfidf.toarray()#将tf-idf矩阵抽取出来,元素a[i][j]表示j词在i类文本中的tf-idf权重", "wordWeightList2[orderwords2[i]]<threshold2: break f.write(wordList2[orderwords2[i]]) f.write('\\n') f.close() # #第三次划分 # orderwords3=np.argsort(-np.array(wordWeightList3)) # threshold3=np.mean(np.array(wordWeightList3)) # wordList4=[]", "maxVal=max(wordWeightList) stepVal=(maxVal-minVal)/float(splitNum) freqList=[0]*splitNum for i in range(len(wordWeightList)): if wordWeightList[i] >= minVal and wordWeightList[i]", "杭研 大厦\", \"小明 硕士 毕业 与 中国 科学院\"] stopwords = read_tingyongci() data_exam =", "来到 北京 清华大学\", \"他 来到 了 网易 杭研 大厦\", \"小明 硕士 毕业 与", "return stop_words stop_words = read_tingyongci() # 将utf8的列表转换成unicode def changeListCode(b): a = [] for", "1 X=range(1,splitNum+1) #画第一张图 plt.subplot(121) plt.xlabel(\"TF-IDF\") plt.ylabel(\"Frequance\") plt.title(\"TF-IDF Explore\") plt.bar(X, freqList) #画第二张图 plt.subplot(122) plt.xlabel(\"log(TF-IDF", "for i in range(len(wordWeightList2)): # if wordWeightList2[orderwords2[i]] < threshold2: # break # wordList3.append(wordList2[orderwords2[i]])", "'\\r\\n' ).encode(\"utf-8\") stop_words.append(row) return stop_words stop_words = read_tingyongci() # 将utf8的列表转换成unicode def changeListCode(b): a", "wordList3.append(wordList2[orderwords2[i]]) # wordWeightList3.append(wordWeightList2[orderwords2[i]]) # print len(wordList3) f = codecs.open(\"keywordsflag3.txt\", \"w\", \"utf-8\") for i", "from sklearn.feature_extraction.text import TfidfTransformer import xlwt from sklearn import metrics import matplotlib.pyplot as", "in csv_reader: row = row.strip( '\\r\\n' ).encode(\"utf-8\") stop_words.append(row) return stop_words stop_words = read_tingyongci()", "plt.subplot(121) plt.xlabel(\"TF-IDF\") plt.ylabel(\"Frequance\") plt.title(\"TF-IDF Explore\") plt.bar(X, freqList) #画第二张图 plt.subplot(122) plt.xlabel(\"log(TF-IDF bins)\") plt.ylabel(\"log(Frequance)\") plt.title(\"TF-IDF", "< threshold2: # break # wordList3.append(wordList2[orderwords2[i]]) # wordWeightList3.append(wordWeightList2[orderwords2[i]]) # print len(wordList3) f =", "tfidftransformer.fit_transform(vectorizer.fit_transform(content)) # 先转换成词频矩阵,再计算TFIDF值 print tfidf.shape word=vectorizer.get_feature_names()#获取词袋模型中的所有词语 weight=tfidf.toarray()#将tf-idf矩阵抽取出来,元素a[i][j]表示j词在i类文本中的tf-idf权重 wordWeightList=[0]*len(word) for i in range(len(weight)):#打印每类文本的tf-idf词语权重,第一个for遍历所有文本,第二个for便利某一类文本下的词语权重 for", "utf-8 -*- import csv import jieba import codecs import re import numpy as", "北京 清华大学\", \"他 来到 了 网易 杭研 大厦\", \"小明 硕士 毕业 与 中国", "# print len(wordList3) f = codecs.open(\"keywordsflag3.txt\", \"w\", \"utf-8\") for i in range(len(wordWeightList2)): if", "清华大学\", \"他 来到 了 网易 杭研 大厦\", \"小明 硕士 毕业 与 中国 科学院\"]", "c.append(b) return c def callog(X): return math.log(X+1) def drawPWords(wordWeightList,splitNum=200): minVal=min(wordWeightList) maxVal=max(wordWeightList) stepVal=(maxVal-minVal)/float(splitNum) freqList=[0]*splitNum", "break # wordList3.append(wordList2[orderwords2[i]]) # wordWeightList3.append(wordWeightList2[orderwords2[i]]) # print len(wordList3) f = codecs.open(\"keywordsflag3.txt\", \"w\", \"utf-8\")", "return c def callog(X): return math.log(X+1) def drawPWords(wordWeightList,splitNum=200): minVal=min(wordWeightList) maxVal=max(wordWeightList) stepVal=(maxVal-minVal)/float(splitNum) freqList=[0]*splitNum for", "<= minVal + (j+1)*stepVal: freqList[j] += 1 X=range(1,splitNum+1) #画第一张图 plt.subplot(121) plt.xlabel(\"TF-IDF\") plt.ylabel(\"Frequance\") plt.title(\"TF-IDF", "in range(len(weight)):#打印每类文本的tf-idf词语权重,第一个for遍历所有文本,第二个for便利某一类文本下的词语权重 for j in range(len(word)): wordWeightList[j]+=weight[i][j] #看下划分结果下的词频分布 #第一次划分 orderwords=np.argsort(-np.array(wordWeightList)) threshold=np.mean(np.array(wordWeightList)) wordList2=[] wordWeightList2=[]", "break f.write(wordList2[orderwords2[i]]) f.write('\\n') f.close() # #第三次划分 # orderwords3=np.argsort(-np.array(wordWeightList3)) # threshold3=np.mean(np.array(wordWeightList3)) # wordList4=[] #", "wordList: if i.word >= u'\\u4e00' and i.word <= u'\\u9fa5': if i.word.encode('utf8') in stop_words:", "j in range(len(word)): wordWeightList[j]+=weight[i][j] #看下划分结果下的词频分布 #第一次划分 orderwords=np.argsort(-np.array(wordWeightList)) threshold=np.mean(np.array(wordWeightList)) wordList2=[] wordWeightList2=[] for i in", "as np from sklearn.feature_extraction.text import CountVectorizer from sklearn.feature_extraction.text import TfidfTransformer import xlwt from", "i in b: try: i1 = i.decode('GB18030').encode('utf-8') a.append(i.decode('GB18030').encode('utf-8')) except: a.append(i) return a def", "= read_tingyongci() data_exam = readexam() exam = changeListCode(data_exam) content = segmentWord(exam) # 计算权重", "= [] for i in b: try: i1 = i.decode('GB18030').encode('utf-8') a.append(i.decode('GB18030').encode('utf-8')) except: a.append(i)", "def callog(X): return math.log(X+1) def drawPWords(wordWeightList,splitNum=200): minVal=min(wordWeightList) maxVal=max(wordWeightList) stepVal=(maxVal-minVal)/float(splitNum) freqList=[0]*splitNum for i in", "(j+1)*stepVal: freqList[j] += 1 X=range(1,splitNum+1) #画第一张图 plt.subplot(121) plt.xlabel(\"TF-IDF\") plt.ylabel(\"Frequance\") plt.title(\"TF-IDF Explore\") plt.bar(X, freqList)", "wordList = pseg.cut(line1) for i in wordList: if i.word >= u'\\u4e00' and i.word", "sklearn import metrics import matplotlib.pyplot as plt import math import jieba.posseg as pseg", "threshold3=np.mean(np.array(wordWeightList3)) # wordList4=[] # wordWeightList4=[] # for i in range(len(wordWeightList3)): # if wordWeightList3[orderwords3[i]]", "# wordWeightList3.append(wordWeightList2[orderwords2[i]]) # print len(wordList3) f = codecs.open(\"keywordsflag3.txt\", \"w\", \"utf-8\") for i in", "[] cixing = ['n', 'v' , 'vn','a','nr','nt','l','nz','ns','i','f','d','t','s','eng','an'] patt1 = re.compile(r'\\[.*?\\]') for i in", "i in range(len(weight)):#打印每类文本的tf-idf词语权重,第一个for遍历所有文本,第二个for便利某一类文本下的词语权重 for j in range(len(word)): wordWeightList[j]+=weight[i][j] #看下划分结果下的词频分布 #第一次划分 orderwords=np.argsort(-np.array(wordWeightList)) threshold=np.mean(np.array(wordWeightList)) wordList2=[]", "= changeListCode(data_exam) content = segmentWord(exam) # 计算权重 vectorizer = CountVectorizer() tfidftransformer = TfidfTransformer()", "\"utf-8\") for i in range(len(wordWeightList2)): if wordWeightList2[orderwords2[i]]<threshold2: break f.write(wordList2[orderwords2[i]]) f.write('\\n') f.close() # #第三次划分", "'', i) wordList = pseg.cut(line1) for i in wordList: if i.word >= u'\\u4e00'", "# corpus = [\"我 来到 北京 清华大学\", \"他 来到 了 网易 杭研 大厦\",", "= [] csv_reader = codecs.open(path,\"r\", \"gbk\") for row in csv_reader: row = row.strip(", ">= minVal and wordWeightList[i] <= minVal + stepVal: freqList[0]+=1 continue for j in", "-*- coding: utf-8 -*- import csv import jieba import codecs import re import", "math import jieba.posseg as pseg def read_tingyongci(): path = \"123.txt\" #停用词分析 stop_words =", "TfidfTransformer import xlwt from sklearn import metrics import matplotlib.pyplot as plt import math", "path = \"123.txt\" #停用词分析 stop_words = [] csv_reader = codecs.open(path,\"r\", \"gbk\") for row", "# for i in range(len(wordWeightList2)): # if wordWeightList2[orderwords2[i]] < threshold2: # break #", "f.write('\\n') f.close() # #第三次划分 # orderwords3=np.argsort(-np.array(wordWeightList3)) # threshold3=np.mean(np.array(wordWeightList3)) # wordList4=[] # wordWeightList4=[] #", "# if wordWeightList2[orderwords2[i]] < threshold2: # break # wordList3.append(wordList2[orderwords2[i]]) # wordWeightList3.append(wordWeightList2[orderwords2[i]]) # print", "wordWeightList[i] <= minVal + (j+1)*stepVal: freqList[j] += 1 X=range(1,splitNum+1) #画第一张图 plt.subplot(121) plt.xlabel(\"TF-IDF\") plt.ylabel(\"Frequance\")", "i in range(len(wordWeightList)): if wordWeightList[i] >= minVal and wordWeightList[i] <= minVal + stepVal:", "% len(content_exam) return content_exam # 对列表进行分词并用空格连接 def segmentWord(cont): c = [] cixing =", "row.strip( '\\r\\n' ).encode(\"utf-8\") stop_words.append(row) return stop_words stop_words = read_tingyongci() # 将utf8的列表转换成unicode def changeListCode(b):", "drawPWords(wordWeightList,splitNum=200): minVal=min(wordWeightList) maxVal=max(wordWeightList) stepVal=(maxVal-minVal)/float(splitNum) freqList=[0]*splitNum for i in range(len(wordWeightList)): if wordWeightList[i] >= minVal", "与 中国 科学院\"] stopwords = read_tingyongci() data_exam = readexam() exam = changeListCode(data_exam) content", "in cont: s=[] line1 = re.sub(patt1, '', i) wordList = pseg.cut(line1) for i", "return a def readexam(): with open('all.csv', 'rb') as csvfile: reader = csv.reader(csvfile) column1", "条句子' % len(content_exam) return content_exam # 对列表进行分词并用空格连接 def segmentWord(cont): c = [] cixing", "s=[] line1 = re.sub(patt1, '', i) wordList = pseg.cut(line1) for i in wordList:", "and wordWeightList[i] <= minVal + (j+1)*stepVal: freqList[j] += 1 X=range(1,splitNum+1) #画第一张图 plt.subplot(121) plt.xlabel(\"TF-IDF\")", "plt.ylabel(\"log(Frequance)\") plt.title(\"TF-IDF powerlaw Explore\") plt.plot(map(callog,map(float,X)), map(callog,map(float,freqList))) plt.show() # corpus = [\"我 来到 北京", "content_exam # 对列表进行分词并用空格连接 def segmentWord(cont): c = [] cixing = ['n', 'v' ,", "csv_reader = codecs.open(path,\"r\", \"gbk\") for row in csv_reader: row = row.strip( '\\r\\n' ).encode(\"utf-8\")", "coding: utf-8 -*- import csv import jieba import codecs import re import numpy", "u'\\u9fa5': if i.word.encode('utf8') in stop_words: pass else: if i.flag in cixing: s.append(i.word.encode('utf8')) b=\"", "math.log(X+1) def drawPWords(wordWeightList,splitNum=200): minVal=min(wordWeightList) maxVal=max(wordWeightList) stepVal=(maxVal-minVal)/float(splitNum) freqList=[0]*splitNum for i in range(len(wordWeightList)): if wordWeightList[i]", "<= u'\\u9fa5': if i.word.encode('utf8') in stop_words: pass else: if i.flag in cixing: s.append(i.word.encode('utf8'))", "for row in csv_reader: row = row.strip( '\\r\\n' ).encode(\"utf-8\") stop_words.append(row) return stop_words stop_words", "wordWeightList3[orderwords3[i]] < threshold3: # break # wordList4.append(wordList3[orderwords3[i]]) # wordWeightList4.append(wordWeightList3[orderwords3[i]]) # print len(wordList4) #", "threshold2: # break # wordList3.append(wordList2[orderwords2[i]]) # wordWeightList3.append(wordWeightList2[orderwords2[i]]) # print len(wordList3) f = codecs.open(\"keywordsflag3.txt\",", "range(len(wordWeightList2)): # if wordWeightList2[orderwords2[i]] < threshold2: # break # wordList3.append(wordList2[orderwords2[i]]) # wordWeightList3.append(wordWeightList2[orderwords2[i]]) #", "readexam() exam = changeListCode(data_exam) content = segmentWord(exam) # 计算权重 vectorizer = CountVectorizer() tfidftransformer", "import csv import jieba import codecs import re import numpy as np from", "# #第三次划分 # orderwords3=np.argsort(-np.array(wordWeightList3)) # threshold3=np.mean(np.array(wordWeightList3)) # wordList4=[] # wordWeightList4=[] # for i", "csvfile: reader = csv.reader(csvfile) column1 = [row for row in reader] content_exam =", "if wordWeightList2[orderwords2[i]] < threshold2: # break # wordList3.append(wordList2[orderwords2[i]]) # wordWeightList3.append(wordWeightList2[orderwords2[i]]) # print len(wordList3)", "# break # wordList3.append(wordList2[orderwords2[i]]) # wordWeightList3.append(wordWeightList2[orderwords2[i]]) # print len(wordList3) f = codecs.open(\"keywordsflag3.txt\", \"w\",", "open('all.csv', 'rb') as csvfile: reader = csv.reader(csvfile) column1 = [row for row in", "orderwords2=np.argsort(-np.array(wordWeightList2)) threshold2=np.mean(np.array(wordWeightList2)) # wordList3=[] # wordWeightList3=[] # for i in range(len(wordWeightList2)): # if", "csv import jieba import codecs import re import numpy as np from sklearn.feature_extraction.text", "中国 科学院\"] stopwords = read_tingyongci() data_exam = readexam() exam = changeListCode(data_exam) content =", "if i.word.encode('utf8') in stop_words: pass else: if i.flag in cixing: s.append(i.word.encode('utf8')) b=\" \".join(s)", "data_exam = readexam() exam = changeListCode(data_exam) content = segmentWord(exam) # 计算权重 vectorizer =", "for i in range(len(wordWeightList)): if wordWeightList[orderwords[i]] < threshold: break wordList2.append(word[orderwords[i]]) wordWeightList2.append(wordWeightList[orderwords[i]]) #第二次划分 orderwords2=np.argsort(-np.array(wordWeightList2))", "= \"123.txt\" #停用词分析 stop_words = [] csv_reader = codecs.open(path,\"r\", \"gbk\") for row in", "changeListCode(b): a = [] for i in b: try: i1 = i.decode('GB18030').encode('utf-8') a.append(i.decode('GB18030').encode('utf-8'))", "len(content_exam) return content_exam # 对列表进行分词并用空格连接 def segmentWord(cont): c = [] cixing = ['n',", "for i in range(len(wordWeightList)): if wordWeightList[i] >= minVal and wordWeightList[i] <= minVal +", "segmentWord(cont): c = [] cixing = ['n', 'v' , 'vn','a','nr','nt','l','nz','ns','i','f','d','t','s','eng','an'] patt1 = re.compile(r'\\[.*?\\]')" ]
[]
[ "[] flag = True while flag: topping = input(prompt) toppings.append(topping) if topping ==", "folling toppings: ') for topping in toppings[:-1]: print('\\t- ' + topping) toppings =", "while n < n_toppings: topping = input(prompt) toppings.append(topping) n += 1 print ('You", "flag = True while flag: topping = input(prompt) toppings.append(topping) if topping == 'quit':", "' + str(n_toppings) + ' following toppings: ') for topping in toppings: print('\\t-", "\"\\nHi, please enter pizza toppings you want\" prompt += \"\\n(Write 'quit' to exit)", "topping == 'quit': break print ('You choose those folling toppings: ') for topping", "the number of topping you would like to add: ') n_toppings = int(n_toppings)", "for topping in toppings[:-1]: print('\\t- ' + topping) toppings = [] flag =", "prompt = \"\\n Please enter pizza toppings you want: \" while n <", "= [] flag = True while flag: topping = input(prompt) toppings.append(topping) if topping", "= input(prompt) toppings.append(topping) if topping == 'quit': break print ('You choose those folling", "enter pizza toppings you want\" prompt += \"\\n(Write 'quit' to exit) \" toppings", "pizza toppings you want: \" while n < n_toppings: topping = input(prompt) toppings.append(topping)", "0 toppings = [] prompt = \"\\n Please enter pizza toppings you want:", "toppings you want: \" while n < n_toppings: topping = input(prompt) toppings.append(topping) n", "') n_toppings = int(n_toppings) n = 0 toppings = [] prompt = \"\\n", "of topping you would like to add: ') n_toppings = int(n_toppings) n =", "input(prompt) toppings.append(topping) if topping == 'quit': flag = False print ('You choose those", "= \"\\nHi, please enter pizza toppings you want\" prompt += \"\\n(Write 'quit' to", "topping) n_toppings = input('Enter the number of topping you would like to add:", "would like to add: ') n_toppings = int(n_toppings) n = 0 toppings =", "print ('You choose those ' + str(n_toppings) + ' following toppings: ') for", "topping in toppings[:-1]: print('\\t- ' + topping) n_toppings = input('Enter the number of", "topping you would like to add: ') n_toppings = int(n_toppings) n = 0", "= 0 toppings = [] prompt = \"\\n Please enter pizza toppings you", "in toppings[:-1]: print('\\t- ' + topping) toppings = [] flag = True while", "toppings.append(topping) n += 1 print ('You choose those ' + str(n_toppings) + '", "('You choose those folling toppings: ') for topping in toppings[:-1]: print('\\t- ' +", "<reponame>simonhoch/python_basics prompt = \"\\nHi, please enter pizza toppings you want\" prompt += \"\\n(Write", "please enter pizza toppings you want\" prompt += \"\\n(Write 'quit' to exit) \"", "toppings you want\" prompt += \"\\n(Write 'quit' to exit) \" toppings = []", "topping = input(prompt) toppings.append(topping) if topping == 'quit': flag = False print ('You", "= False print ('You choose those folling toppings: ') for topping in toppings[:-1]:", "= input('Enter the number of topping you would like to add: ') n_toppings", "for topping in toppings[:-1]: print('\\t- ' + topping) n_toppings = input('Enter the number", "n = 0 toppings = [] prompt = \"\\n Please enter pizza toppings", "+ topping) n_toppings = input('Enter the number of topping you would like to", "to exit) \" toppings = [] while True: topping = input(prompt) toppings.append(topping) if", "pizza toppings you want\" prompt += \"\\n(Write 'quit' to exit) \" toppings =", "n_toppings = input('Enter the number of topping you would like to add: ')", "those ' + str(n_toppings) + ' following toppings: ') for topping in toppings:", "' + topping) n_toppings = input('Enter the number of topping you would like", "[] while True: topping = input(prompt) toppings.append(topping) if topping == 'quit': break print", "topping = input(prompt) toppings.append(topping) if topping == 'quit': break print ('You choose those", "== 'quit': break print ('You choose those folling toppings: ') for topping in", "want: \" while n < n_toppings: topping = input(prompt) toppings.append(topping) n += 1", "+ ' following toppings: ') for topping in toppings: print('\\t- ' + topping)", "= True while flag: topping = input(prompt) toppings.append(topping) if topping == 'quit': flag", "\" while n < n_toppings: topping = input(prompt) toppings.append(topping) n += 1 print", "' + topping) toppings = [] flag = True while flag: topping =", "= [] prompt = \"\\n Please enter pizza toppings you want: \" while", "\" toppings = [] while True: topping = input(prompt) toppings.append(topping) if topping ==", "[] prompt = \"\\n Please enter pizza toppings you want: \" while n", "print('\\t- ' + topping) toppings = [] flag = True while flag: topping", "flag: topping = input(prompt) toppings.append(topping) if topping == 'quit': flag = False print", "toppings = [] flag = True while flag: topping = input(prompt) toppings.append(topping) if", "+ topping) toppings = [] flag = True while flag: topping = input(prompt)", "choose those ' + str(n_toppings) + ' following toppings: ') for topping in", "('You choose those ' + str(n_toppings) + ' following toppings: ') for topping", "= int(n_toppings) n = 0 toppings = [] prompt = \"\\n Please enter", "= \"\\n Please enter pizza toppings you want: \" while n < n_toppings:", "== 'quit': flag = False print ('You choose those folling toppings: ') for", "those folling toppings: ') for topping in toppings[:-1]: print('\\t- ' + topping) n_toppings", "'quit': break print ('You choose those folling toppings: ') for topping in toppings[:-1]:", "False print ('You choose those folling toppings: ') for topping in toppings[:-1]: print('\\t-", "exit) \" toppings = [] while True: topping = input(prompt) toppings.append(topping) if topping", "input(prompt) toppings.append(topping) n += 1 print ('You choose those ' + str(n_toppings) +", "Please enter pizza toppings you want: \" while n < n_toppings: topping =", "if topping == 'quit': break print ('You choose those folling toppings: ') for", "in toppings[:-1]: print('\\t- ' + topping) n_toppings = input('Enter the number of topping", "topping = input(prompt) toppings.append(topping) n += 1 print ('You choose those ' +", "\"\\n(Write 'quit' to exit) \" toppings = [] while True: topping = input(prompt)", "int(n_toppings) n = 0 toppings = [] prompt = \"\\n Please enter pizza", "to add: ') n_toppings = int(n_toppings) n = 0 toppings = [] prompt", "= input(prompt) toppings.append(topping) n += 1 print ('You choose those ' + str(n_toppings)", "topping == 'quit': flag = False print ('You choose those folling toppings: ')", "toppings[:-1]: print('\\t- ' + topping) toppings = [] flag = True while flag:", "number of topping you would like to add: ') n_toppings = int(n_toppings) n", "\"\\n Please enter pizza toppings you want: \" while n < n_toppings: topping", "add: ') n_toppings = int(n_toppings) n = 0 toppings = [] prompt =", "want\" prompt += \"\\n(Write 'quit' to exit) \" toppings = [] while True:", "') for topping in toppings[:-1]: print('\\t- ' + topping) toppings = [] flag", "topping in toppings[:-1]: print('\\t- ' + topping) toppings = [] flag = True", "print('\\t- ' + topping) n_toppings = input('Enter the number of topping you would", "+= 1 print ('You choose those ' + str(n_toppings) + ' following toppings:", "toppings.append(topping) if topping == 'quit': break print ('You choose those folling toppings: ')", "those folling toppings: ') for topping in toppings[:-1]: print('\\t- ' + topping) toppings", "flag = False print ('You choose those folling toppings: ') for topping in", "if topping == 'quit': flag = False print ('You choose those folling toppings:", "str(n_toppings) + ' following toppings: ') for topping in toppings: print('\\t- ' +", "= [] while True: topping = input(prompt) toppings.append(topping) if topping == 'quit': break", "toppings: ') for topping in toppings[:-1]: print('\\t- ' + topping) toppings = []", "') for topping in toppings[:-1]: print('\\t- ' + topping) n_toppings = input('Enter the", "print ('You choose those folling toppings: ') for topping in toppings[:-1]: print('\\t- '", "n += 1 print ('You choose those ' + str(n_toppings) + ' following", "1 print ('You choose those ' + str(n_toppings) + ' following toppings: ')", "prompt += \"\\n(Write 'quit' to exit) \" toppings = [] while True: topping", "like to add: ') n_toppings = int(n_toppings) n = 0 toppings = []", "+ str(n_toppings) + ' following toppings: ') for topping in toppings: print('\\t- '", "'quit': flag = False print ('You choose those folling toppings: ') for topping", "n_toppings: topping = input(prompt) toppings.append(topping) n += 1 print ('You choose those '", "toppings = [] while True: topping = input(prompt) toppings.append(topping) if topping == 'quit':", "True: topping = input(prompt) toppings.append(topping) if topping == 'quit': break print ('You choose", "you want: \" while n < n_toppings: topping = input(prompt) toppings.append(topping) n +=", "input('Enter the number of topping you would like to add: ') n_toppings =", "n_toppings = int(n_toppings) n = 0 toppings = [] prompt = \"\\n Please", "input(prompt) toppings.append(topping) if topping == 'quit': break print ('You choose those folling toppings:", "you would like to add: ') n_toppings = int(n_toppings) n = 0 toppings", "= input(prompt) toppings.append(topping) if topping == 'quit': flag = False print ('You choose", "choose those folling toppings: ') for topping in toppings[:-1]: print('\\t- ' + topping)", "True while flag: topping = input(prompt) toppings.append(topping) if topping == 'quit': flag =", "break print ('You choose those folling toppings: ') for topping in toppings[:-1]: print('\\t-", "n < n_toppings: topping = input(prompt) toppings.append(topping) n += 1 print ('You choose", "toppings[:-1]: print('\\t- ' + topping) n_toppings = input('Enter the number of topping you", "folling toppings: ') for topping in toppings[:-1]: print('\\t- ' + topping) n_toppings =", "toppings.append(topping) if topping == 'quit': flag = False print ('You choose those folling", "'quit' to exit) \" toppings = [] while True: topping = input(prompt) toppings.append(topping)", "+= \"\\n(Write 'quit' to exit) \" toppings = [] while True: topping =", "topping) toppings = [] flag = True while flag: topping = input(prompt) toppings.append(topping)", "toppings: ') for topping in toppings[:-1]: print('\\t- ' + topping) n_toppings = input('Enter", "toppings = [] prompt = \"\\n Please enter pizza toppings you want: \"", "enter pizza toppings you want: \" while n < n_toppings: topping = input(prompt)", "while flag: topping = input(prompt) toppings.append(topping) if topping == 'quit': flag = False", "< n_toppings: topping = input(prompt) toppings.append(topping) n += 1 print ('You choose those", "prompt = \"\\nHi, please enter pizza toppings you want\" prompt += \"\\n(Write 'quit'", "while True: topping = input(prompt) toppings.append(topping) if topping == 'quit': break print ('You", "you want\" prompt += \"\\n(Write 'quit' to exit) \" toppings = [] while" ]
[ "if if if', '1-1', should_validate=False)) print(get_similarity('a\\na\\np\\na\\nfor i in range(10):\\n print(i)\\nr=2', '1-1')) print(\"1-2\") print(get_similarity('for", "continue if print', '1-2', should_validate=False)) print(\"1-3\") print(get_similarity('for print if if else', '1-3', should_validate=False))", "'1-3', should_validate=False)) print(get_similarity('for print for if else if', '1-3', should_validate=False)) if __name__ ==", "if if', '1-1', should_validate=False)) print(get_similarity('a\\na\\np\\na\\nfor i in range(10):\\n print(i)\\nr=2', '1-1')) print(\"1-2\") print(get_similarity('for print", "print(get_similarity('for print if', '1-1', should_validate=False)) print(get_similarity('for print if if if for', '1-1', should_validate=False))", "def test_samples(): print(\"1-1\") print(get_similarity('for print if', '1-1', should_validate=False)) print(get_similarity('for print if if if", "range(10):\\n print(i)\\nr=2', '1-1')) print(\"1-2\") print(get_similarity('for print if', '1-2', should_validate=False)) print(get_similarity('for print', '1-2', should_validate=False))", "test_samples(): print(\"1-1\") print(get_similarity('for print if', '1-1', should_validate=False)) print(get_similarity('for print if if if for',", "print(get_similarity('for print', '1-1', should_validate=False)) print(get_similarity('for print if if if', '1-1', should_validate=False)) print(get_similarity('a\\na\\np\\na\\nfor i", "should_validate=False)) print(get_similarity('for print', '1-2', should_validate=False)) print(get_similarity('for break continue if print', '1-2', should_validate=False)) print(\"1-3\")", "'1-2', should_validate=False)) print(get_similarity('for break continue if print', '1-2', should_validate=False)) print(\"1-3\") print(get_similarity('for print if", "if', '1-1', should_validate=False)) print(get_similarity('for', '1-1', should_validate=False)) print(get_similarity('for print', '1-1', should_validate=False)) print(get_similarity('for print if", "print(get_similarity('for', '1-1', should_validate=False)) print(get_similarity('for print', '1-1', should_validate=False)) print(get_similarity('for print if if if', '1-1',", "break continue if print', '1-2', should_validate=False)) print(\"1-3\") print(get_similarity('for print if if else', '1-3',", "should_validate=False)) print(get_similarity('a\\na\\np\\na\\nfor i in range(10):\\n print(i)\\nr=2', '1-1')) print(\"1-2\") print(get_similarity('for print if', '1-2', should_validate=False))", "print(get_similarity('a\\na\\np\\na\\nfor i in range(10):\\n print(i)\\nr=2', '1-1')) print(\"1-2\") print(get_similarity('for print if', '1-2', should_validate=False)) print(get_similarity('for", "'1-1', should_validate=False)) print(get_similarity('for print if if if for', '1-1', should_validate=False)) print(get_similarity('for print if", "'1-2', should_validate=False)) print(\"1-3\") print(get_similarity('for print if if else', '1-3', should_validate=False)) print(get_similarity('for', '1-3', should_validate=False))", "'1-1')) print(\"1-2\") print(get_similarity('for print if', '1-2', should_validate=False)) print(get_similarity('for print', '1-2', should_validate=False)) print(get_similarity('for break", "print(get_similarity('for break continue if print', '1-2', should_validate=False)) print(\"1-3\") print(get_similarity('for print if if else',", "print(get_similarity('for print', '1-2', should_validate=False)) print(get_similarity('for break continue if print', '1-2', should_validate=False)) print(\"1-3\") print(get_similarity('for", "'1-1', should_validate=False)) print(get_similarity('a\\na\\np\\na\\nfor i in range(10):\\n print(i)\\nr=2', '1-1')) print(\"1-2\") print(get_similarity('for print if', '1-2',", "print(\"1-3\") print(get_similarity('for print if if else', '1-3', should_validate=False)) print(get_similarity('for', '1-3', should_validate=False)) print(get_similarity('for print',", "else', '1-3', should_validate=False)) print(get_similarity('for', '1-3', should_validate=False)) print(get_similarity('for print', '1-3', should_validate=False)) print(get_similarity('for print for", "print(get_similarity('for print if if if if if', '1-1', should_validate=False)) print(get_similarity('for', '1-1', should_validate=False)) print(get_similarity('for", "if else', '1-3', should_validate=False)) print(get_similarity('for', '1-3', should_validate=False)) print(get_similarity('for print', '1-3', should_validate=False)) print(get_similarity('for print", "print(get_similarity('for print if if else', '1-3', should_validate=False)) print(get_similarity('for', '1-3', should_validate=False)) print(get_similarity('for print', '1-3',", "'1-1', should_validate=False)) print(get_similarity('for print if if if if if', '1-1', should_validate=False)) print(get_similarity('for', '1-1',", "if if if for', '1-1', should_validate=False)) print(get_similarity('for print if if if if if',", "print if if if if if', '1-1', should_validate=False)) print(get_similarity('for', '1-1', should_validate=False)) print(get_similarity('for print',", "'1-1', should_validate=False)) print(get_similarity('for print if if if', '1-1', should_validate=False)) print(get_similarity('a\\na\\np\\na\\nfor i in range(10):\\n", "should_validate=False)) print(get_similarity('for', '1-1', should_validate=False)) print(get_similarity('for print', '1-1', should_validate=False)) print(get_similarity('for print if if if',", "print', '1-2', should_validate=False)) print(\"1-3\") print(get_similarity('for print if if else', '1-3', should_validate=False)) print(get_similarity('for', '1-3',", "print(get_similarity('for print', '1-3', should_validate=False)) print(get_similarity('for print for if else if', '1-3', should_validate=False)) if", "for', '1-1', should_validate=False)) print(get_similarity('for print if if if if if', '1-1', should_validate=False)) print(get_similarity('for',", "if', '1-2', should_validate=False)) print(get_similarity('for print', '1-2', should_validate=False)) print(get_similarity('for break continue if print', '1-2',", "should_validate=False)) print(get_similarity('for print', '1-1', should_validate=False)) print(get_similarity('for print if if if', '1-1', should_validate=False)) print(get_similarity('a\\na\\np\\na\\nfor", "'1-3', should_validate=False)) print(get_similarity('for print', '1-3', should_validate=False)) print(get_similarity('for print for if else if', '1-3',", "should_validate=False)) print(get_similarity('for', '1-3', should_validate=False)) print(get_similarity('for print', '1-3', should_validate=False)) print(get_similarity('for print for if else", "should_validate=False)) print(get_similarity('for print if if if for', '1-1', should_validate=False)) print(get_similarity('for print if if", "if if if if', '1-1', should_validate=False)) print(get_similarity('for', '1-1', should_validate=False)) print(get_similarity('for print', '1-1', should_validate=False))", "'1-1', should_validate=False)) print(get_similarity('for print', '1-1', should_validate=False)) print(get_similarity('for print if if if', '1-1', should_validate=False))", "if if else', '1-3', should_validate=False)) print(get_similarity('for', '1-3', should_validate=False)) print(get_similarity('for print', '1-3', should_validate=False)) print(get_similarity('for", "should_validate=False)) print(\"1-3\") print(get_similarity('for print if if else', '1-3', should_validate=False)) print(get_similarity('for', '1-3', should_validate=False)) print(get_similarity('for", "print if', '1-1', should_validate=False)) print(get_similarity('for print if if if for', '1-1', should_validate=False)) print(get_similarity('for", "'1-1', should_validate=False)) print(get_similarity('for', '1-1', should_validate=False)) print(get_similarity('for print', '1-1', should_validate=False)) print(get_similarity('for print if if", "'1-3', should_validate=False)) print(get_similarity('for', '1-3', should_validate=False)) print(get_similarity('for print', '1-3', should_validate=False)) print(get_similarity('for print for if", "print(get_similarity('for print if', '1-2', should_validate=False)) print(get_similarity('for print', '1-2', should_validate=False)) print(get_similarity('for break continue if", "print', '1-2', should_validate=False)) print(get_similarity('for break continue if print', '1-2', should_validate=False)) print(\"1-3\") print(get_similarity('for print", "print(get_similarity('for print for if else if', '1-3', should_validate=False)) if __name__ == \"__main__\": test_samples()", "should_validate=False)) print(get_similarity('for print if if if', '1-1', should_validate=False)) print(get_similarity('a\\na\\np\\na\\nfor i in range(10):\\n print(i)\\nr=2',", "get_similarity def test_samples(): print(\"1-1\") print(get_similarity('for print if', '1-1', should_validate=False)) print(get_similarity('for print if if", "in range(10):\\n print(i)\\nr=2', '1-1')) print(\"1-2\") print(get_similarity('for print if', '1-2', should_validate=False)) print(get_similarity('for print', '1-2',", "if for', '1-1', should_validate=False)) print(get_similarity('for print if if if if if', '1-1', should_validate=False))", "should_validate=False)) print(get_similarity('for print', '1-3', should_validate=False)) print(get_similarity('for print for if else if', '1-3', should_validate=False))", "<reponame>MOOC-Learner-Project/edx-extension-code-similarity from compare_trajectories import get_similarity def test_samples(): print(\"1-1\") print(get_similarity('for print if', '1-1', should_validate=False))", "compare_trajectories import get_similarity def test_samples(): print(\"1-1\") print(get_similarity('for print if', '1-1', should_validate=False)) print(get_similarity('for print", "should_validate=False)) print(get_similarity('for break continue if print', '1-2', should_validate=False)) print(\"1-3\") print(get_similarity('for print if if", "print(get_similarity('for print if if if for', '1-1', should_validate=False)) print(get_similarity('for print if if if", "print(get_similarity('for', '1-3', should_validate=False)) print(get_similarity('for print', '1-3', should_validate=False)) print(get_similarity('for print for if else if',", "print if', '1-2', should_validate=False)) print(get_similarity('for print', '1-2', should_validate=False)) print(get_similarity('for break continue if print',", "import get_similarity def test_samples(): print(\"1-1\") print(get_similarity('for print if', '1-1', should_validate=False)) print(get_similarity('for print if", "should_validate=False)) print(get_similarity('for print for if else if', '1-3', should_validate=False)) if __name__ == \"__main__\":", "if if', '1-1', should_validate=False)) print(get_similarity('for', '1-1', should_validate=False)) print(get_similarity('for print', '1-1', should_validate=False)) print(get_similarity('for print", "'1-2', should_validate=False)) print(get_similarity('for print', '1-2', should_validate=False)) print(get_similarity('for break continue if print', '1-2', should_validate=False))", "i in range(10):\\n print(i)\\nr=2', '1-1')) print(\"1-2\") print(get_similarity('for print if', '1-2', should_validate=False)) print(get_similarity('for print',", "from compare_trajectories import get_similarity def test_samples(): print(\"1-1\") print(get_similarity('for print if', '1-1', should_validate=False)) print(get_similarity('for", "if print', '1-2', should_validate=False)) print(\"1-3\") print(get_similarity('for print if if else', '1-3', should_validate=False)) print(get_similarity('for',", "print if if else', '1-3', should_validate=False)) print(get_similarity('for', '1-3', should_validate=False)) print(get_similarity('for print', '1-3', should_validate=False))", "print if if if', '1-1', should_validate=False)) print(get_similarity('a\\na\\np\\na\\nfor i in range(10):\\n print(i)\\nr=2', '1-1')) print(\"1-2\")", "if if if if if', '1-1', should_validate=False)) print(get_similarity('for', '1-1', should_validate=False)) print(get_similarity('for print', '1-1',", "if', '1-1', should_validate=False)) print(get_similarity('a\\na\\np\\na\\nfor i in range(10):\\n print(i)\\nr=2', '1-1')) print(\"1-2\") print(get_similarity('for print if',", "print(i)\\nr=2', '1-1')) print(\"1-2\") print(get_similarity('for print if', '1-2', should_validate=False)) print(get_similarity('for print', '1-2', should_validate=False)) print(get_similarity('for", "print(\"1-2\") print(get_similarity('for print if', '1-2', should_validate=False)) print(get_similarity('for print', '1-2', should_validate=False)) print(get_similarity('for break continue", "print(\"1-1\") print(get_similarity('for print if', '1-1', should_validate=False)) print(get_similarity('for print if if if for', '1-1',", "print if if if for', '1-1', should_validate=False)) print(get_similarity('for print if if if if", "if if if', '1-1', should_validate=False)) print(get_similarity('for', '1-1', should_validate=False)) print(get_similarity('for print', '1-1', should_validate=False)) print(get_similarity('for", "if if for', '1-1', should_validate=False)) print(get_similarity('for print if if if if if', '1-1',", "print(get_similarity('for print if if if', '1-1', should_validate=False)) print(get_similarity('a\\na\\np\\na\\nfor i in range(10):\\n print(i)\\nr=2', '1-1'))", "print', '1-3', should_validate=False)) print(get_similarity('for print for if else if', '1-3', should_validate=False)) if __name__", "should_validate=False)) print(get_similarity('for print if if if if if', '1-1', should_validate=False)) print(get_similarity('for', '1-1', should_validate=False))", "if', '1-1', should_validate=False)) print(get_similarity('for print if if if for', '1-1', should_validate=False)) print(get_similarity('for print", "print', '1-1', should_validate=False)) print(get_similarity('for print if if if', '1-1', should_validate=False)) print(get_similarity('a\\na\\np\\na\\nfor i in" ]
[ "def test_preprocess_mds(sess): data = np.arange(25, dtype=float).reshape((5, 5)) data = tf.convert_to_tensor(data, dtype=tf.float32) preprocessed_data =", "6], [3, 7, 9]], [[1, 4, 7], [2, 5, 8], [3, 6, 9]]]),", "= \"data, is_distance, k, real_result\" test_params = [ ( np.arange(50, dtype=float).reshape((2, 5, 5)),", "value cannot be negative one = np.arange(25, dtype=float).reshape((5, 5)) two = np.arange(25, 50,", "data matrices do not have the same number of rows one = np.arange(25,", "from numpy.testing import assert_array_almost_equal as array_eq from sklearn.utils.testing import assert_raises import pytest import", "from the original multiview library test_names = \"data, is_distance, k, real_result\" test_params =", "-0.1318591591], [-0.3857583749, 0.7860987651, -0.8372055327, 0.8368890363, -0.8369952644], [-0.5400617249, 0.0058598224, 0.1199497301, -0.1154634166, 0.1151282954]]) ) ]", "@pytest.mark.parametrize(test_names, test_params) def test_mvmds_multiple(sess, data, is_distance, k, real_result): data_tf = tf.convert_to_tensor(data, dtype=tf.float32) result", "tf.convert_to_tensor(data, dtype=tf.float32) preprocessed_data = sess.run(mvmds.preprocess_mvmds(data)) sim = np.array([[40., 20., 0., -20., -40.], [20.,", "7, 9]], [[1, 4, 7], [2, 5, 8], [3, 6, 9]]]), [False, False],", "is_distance = [False, False] mvmds_est = mvmds.MVMDS(k=-2) assert_raises(ValueError, mvmds_est.fit, data, is_distance) # These", ") ] @pytest.mark.parametrize(test_names, test_params) def test_mvmds_multiple(sess, data, is_distance, k, real_result): data_tf = tf.convert_to_tensor(data,", "is_distance, k=2)) from multiview.mvmds import mvmds as mds_cpu print(\"Multiview Result\") np.set_printoptions(precision=10, suppress=True) print(mds_cpu(data,", "( np.arange(256, dtype=float).reshape((4, 8, 8)), [False] * 4, 5, np.array([[0.5400617249, 0.4806639344, -0.3528596134, 0.3516817362,", "is_distane do not have the same length. one = np.arange(25, dtype=float).reshape((5, 5)) two", "np.arange(50, dtype=float).reshape((2, 5, 5)), [False] * 2, 2, np.array([[-0.632455532, -0.1989703693], [-0.316227766, -0.6963962924], [-0.,", "# Data and is_distane do not have the same length. one = np.arange(25,", "test_preprocess_mds(sess): data = np.arange(25, dtype=float).reshape((5, 5)) data = tf.convert_to_tensor(data, dtype=tf.float32) preprocessed_data = sess.run(mvmds.preprocess_mvmds(data))", "-0.3293040024, 0.3377397826, -0.3387822177, 0.3380904857], [-0.2314550249, 0.1219703099, -0.1324274795, 0.1317680694, -0.1318591591], [-0.3857583749, 0.7860987651, -0.8372055327, 0.8368890363,", "= np.arange(25, 50, dtype=float).reshape((5, 5)) data = [one, two] is_distance = [False, False,", "dtype=float).reshape((5, 5)) data = [one, two] is_distance = [False, False, False] mvmds_est =", "= np.arange(25, dtype=float).reshape((5, 5)) data = tf.convert_to_tensor(data, dtype=tf.float32) preprocessed_data = sess.run(mvmds.preprocess_mvmds(data)) sim =", "= np.array([[40., 20., 0., -20., -40.], [20., 10., 0., -10., -20.], [0., 0.,", "is_distance = [False, False, False] mvmds_est = mvmds.MVMDS(k=2) assert_raises(ValueError, mvmds_est.fit, data, is_distance) #", "load_data_tf def test_preprocess_mds(sess): data = np.arange(25, dtype=float).reshape((5, 5)) data = tf.convert_to_tensor(data, dtype=tf.float32) preprocessed_data", "-0.0825078346, 0.0821366762, -0.0818364084], [-0.077151675, -0.3293040024, 0.3377397826, -0.3387822177, 0.3380904857], [-0.2314550249, 0.1219703099, -0.1324274795, 0.1317680694, -0.1318591591],", "import load_data_tf def test_preprocess_mds(sess): data = np.arange(25, dtype=float).reshape((5, 5)) data = tf.convert_to_tensor(data, dtype=tf.float32)", "( np.arange(108, dtype=float).reshape((3, 6, 6)), [False] * 3, 2, np.array([[0.5976143047, 0.6346897855], [0.3585685828, 0.1020481616],", "k value cannot be negative one = np.arange(25, dtype=float).reshape((5, 5)) two = np.arange(25,", "-0.8372055327, 0.8368890363, -0.8369952644], [-0.5400617249, 0.0058598224, 0.1199497301, -0.1154634166, 0.1151282954]]) ) ] @pytest.mark.parametrize(test_names, test_params) def", "# k value cannot be negative one = np.arange(25, dtype=float).reshape((5, 5)) two =", "np.arange(25, 50, dtype=float).reshape((5, 5)) data = [one, two] is_distance = [False, False] mvmds_est", "np.array([[[2, 1, 8], [4, 5, 6], [3, 7, 9]], [[1, 4, 7], [2,", "mvmds_est = mvmds.MVMDS(k=2) assert_raises(ValueError, mvmds_est.fit, data, is_distance) # Sample data matrices do not", "[0.1195228609, 0.0049779591], [-0.1195228609, -0.0049779591], [-0.3585685828, -0.1020481616], [-0.5976143047, 0.759138763]]) ), ( np.arange(256, dtype=float).reshape((4, 8,", "= mvmds.MVMDS(k=-2) assert_raises(ValueError, mvmds_est.fit, data, is_distance) # These test results come from the", "-0.0591191469, 0.0594984884, -0.0601554582], [0.077151675, 0.0746392244, -0.0825078346, 0.0821366762, -0.0818364084], [-0.077151675, -0.3293040024, 0.3377397826, -0.3387822177, 0.3380904857],", "[3, 6, 9]]]), [False, False], 3, np.array([[-0.740466335, 0.344058532, 0.5773502692], [0.0722697384, -0.8132919227, 0.5773502692], [0.6681965966,", "[one, two] is_distance = [False, False] mvmds_est = mvmds.MVMDS(k=-2) assert_raises(ValueError, mvmds_est.fit, data, is_distance)", "original multiview library test_names = \"data, is_distance, k, real_result\" test_params = [ (", "matrices do not have the same number of rows one = np.arange(25, dtype=float).reshape((5,", "= np.arange(25, 50, dtype=float).reshape((5, 5)) data = [one, two] is_distance = [False, False]", "[-0.5976143047, 0.759138763]]) ), ( np.arange(256, dtype=float).reshape((4, 8, 8)), [False] * 4, 5, np.array([[0.5400617249,", "dtype=float).reshape((3, 6, 6)), [False] * 3, 2, np.array([[0.5976143047, 0.6346897855], [0.3585685828, 0.1020481616], [0.1195228609, 0.0049779591],", "from multiview_gpu.util import load_data_tf def test_preprocess_mds(sess): data = np.arange(25, dtype=float).reshape((5, 5)) data =", "mvmds_est = mvmds.MVMDS(k=-2) assert_raises(ValueError, mvmds_est.fit, data, is_distance) # These test results come from", "real_result\" test_params = [ ( np.arange(50, dtype=float).reshape((2, 5, 5)), [False] * 2, 2,", "[False, False, False] mvmds_est = mvmds.MVMDS(k=2) assert_raises(ValueError, mvmds_est.fit, data, is_distance) # Sample data", "20., 0., -20., -40.], [20., 10., 0., -10., -20.], [0., 0., 0., 0.,", "test_params) def test_mvmds_multiple(sess, data, is_distance, k, real_result): data_tf = tf.convert_to_tensor(data, dtype=tf.float32) result =", "-0.0049779591], [-0.3585685828, -0.1020481616], [-0.5976143047, 0.759138763]]) ), ( np.arange(256, dtype=float).reshape((4, 8, 8)), [False] *", "[one, two] is_distance = [False, False] mvmds_est = mvmds.MVMDS(k=2) assert_raises(ValueError, mvmds_est.fit, data, is_distance)", "2, 2, np.array([[-0.632455532, -0.1989703693], [-0.316227766, -0.6963962924], [-0., -0.3305190213], [0.316227766, 0.0994851846], [0.632455532, -0.5969111078]]) ),", "two] is_distance = [False, False] mvmds_est = mvmds.MVMDS(k=-2) assert_raises(ValueError, mvmds_est.fit, data, is_distance) #", "0., -10., -20.], [0., 0., 0., 0., 0.], [-20., -10., 0., 10., 20.],", "mvmds_est.fit, data, is_distance) # k value cannot be negative one = np.arange(25, dtype=float).reshape((5,", "import numpy as np import tensorflow as tf from numpy.testing import assert_array_almost_equal as", "0., 0., 0., 0.], [-20., -10., 0., 10., 20.], [-40., -20., 0., 20.,", "10., 20.], [-40., -20., 0., 20., 40.]]) array_eq(preprocessed_data, sim, decimal=4) def test_mvmds_error(): #", "multiview library test_names = \"data, is_distance, k, real_result\" test_params = [ ( np.arange(50,", "np.array([[-0.632455532, -0.1989703693], [-0.316227766, -0.6963962924], [-0., -0.3305190213], [0.316227766, 0.0994851846], [0.632455532, -0.5969111078]]) ), ( np.array([[[2,", "[2, 5, 8], [3, 6, 9]]]), [False, False], 3, np.array([[-0.740466335, 0.344058532, 0.5773502692], [0.0722697384,", "6)), [False] * 3, 2, np.array([[0.5976143047, 0.6346897855], [0.3585685828, 0.1020481616], [0.1195228609, 0.0049779591], [-0.1195228609, -0.0049779591],", "[-0.3857583749, 0.7860987651, -0.8372055327, 0.8368890363, -0.8369952644], [-0.5400617249, 0.0058598224, 0.1199497301, -0.1154634166, 0.1151282954]]) ) ] @pytest.mark.parametrize(test_names,", "), ( np.array([[[2, 1, 8], [4, 5, 6], [3, 7, 9]], [[1, 4,", "np.array([[-0.740466335, 0.344058532, 0.5773502692], [0.0722697384, -0.8132919227, 0.5773502692], [0.6681965966, 0.4692333907, 0.5773502692]]) ), ( np.array([[[2, 1,", "0.4806639344, -0.3528596134, 0.3516817362, -0.3523045507], [0.3857583749, 0.1413371427, -0.1352081249, 0.1420639924, -0.1416263226], [0.2314550249, -0.045950973, -0.0591191469, 0.0594984884,", "assert_array_almost_equal as array_eq from sklearn.utils.testing import assert_raises import pytest import multiview_gpu.mvmds as mvmds", "-0.3305190213], [0.316227766, 0.0994851846], [0.632455532, -0.5969111078]]) ), ( np.array([[[2, 1, 8], [4, 5, 6],", "0.344058532, 0.5773502692], [0.0722697384, -0.8132919227, 0.5773502692], [0.6681965966, 0.4692333907, 0.5773502692]]) ), ( np.array([[[2, 1, 8],", "data = np.arange(25, dtype=float).reshape((5, 5)) data = tf.convert_to_tensor(data, dtype=tf.float32) preprocessed_data = sess.run(mvmds.preprocess_mvmds(data)) sim", "-0.0601554582], [0.077151675, 0.0746392244, -0.0825078346, 0.0821366762, -0.0818364084], [-0.077151675, -0.3293040024, 0.3377397826, -0.3387822177, 0.3380904857], [-0.2314550249, 0.1219703099,", "is_distance) # Sample data matrices do not have the same number of rows", "import tensorflow as tf from numpy.testing import assert_array_almost_equal as array_eq from sklearn.utils.testing import", "), ( np.arange(108, dtype=float).reshape((3, 6, 6)), [False] * 3, 2, np.array([[0.5976143047, 0.6346897855], [0.3585685828,", "0.0058598224, 0.1199497301, -0.1154634166, 0.1151282954]]) ) ] @pytest.mark.parametrize(test_names, test_params) def test_mvmds_multiple(sess, data, is_distance, k,", "5)) data = [one, two] is_distance = [False, False] mvmds_est = mvmds.MVMDS(k=-2) assert_raises(ValueError,", "= sess.run(mvmds.mvmds(data_tf, is_distance, k=2)) from multiview.mvmds import mvmds as mds_cpu print(\"Multiview Result\") np.set_printoptions(precision=10,", "not have the same number of rows one = np.arange(25, dtype=float).reshape((5, 5)) two", "0.1317680694, -0.1318591591], [-0.3857583749, 0.7860987651, -0.8372055327, 0.8368890363, -0.8369952644], [-0.5400617249, 0.0058598224, 0.1199497301, -0.1154634166, 0.1151282954]]) )", "[2, 5, 8], [3, 6, 9]]]), [False] * 2, 3, np.array([[-0.740466335, 0.344058532, 0.5773502692],", "1, 8], [4, 5, 6], [3, 7, 9]], [[1, 4, 7], [2, 5,", "= mvmds.MVMDS(k=2) assert_raises(ValueError, mvmds_est.fit, data, is_distance) # k value cannot be negative one", "49, dtype=float).reshape((4, 6)) data = [one, two] is_distance = [False, False] mvmds_est =", "= [one, two] is_distance = [False, False, False] mvmds_est = mvmds.MVMDS(k=2) assert_raises(ValueError, mvmds_est.fit,", "0.1199497301, -0.1154634166, 0.1151282954]]) ) ] @pytest.mark.parametrize(test_names, test_params) def test_mvmds_multiple(sess, data, is_distance, k, real_result):", "[False, False], 3, np.array([[-0.740466335, 0.344058532, 0.5773502692], [0.0722697384, -0.8132919227, 0.5773502692], [0.6681965966, 0.4692333907, 0.5773502692]]) ),", "data, is_distance) # Sample data matrices do not have the same number of", "9]]]), [False, False], 3, np.array([[-0.740466335, 0.344058532, 0.5773502692], [0.0722697384, -0.8132919227, 0.5773502692], [0.6681965966, 0.4692333907, 0.5773502692]])", "[0.2314550249, -0.045950973, -0.0591191469, 0.0594984884, -0.0601554582], [0.077151675, 0.0746392244, -0.0825078346, 0.0821366762, -0.0818364084], [-0.077151675, -0.3293040024, 0.3377397826,", "9]], [[1, 4, 7], [2, 5, 8], [3, 6, 9]]]), [False, False], 3,", "preprocessed_data = sess.run(mvmds.preprocess_mvmds(data)) sim = np.array([[40., 20., 0., -20., -40.], [20., 10., 0.,", "= sess.run(mvmds.preprocess_mvmds(data)) sim = np.array([[40., 20., 0., -20., -40.], [20., 10., 0., -10.,", "mvmds.MVMDS(k=-2) assert_raises(ValueError, mvmds_est.fit, data, is_distance) # These test results come from the original", "two = np.arange(25, 49, dtype=float).reshape((4, 6)) data = [one, two] is_distance = [False,", "as tf from numpy.testing import assert_array_almost_equal as array_eq from sklearn.utils.testing import assert_raises import", "0.0994851846], [0.632455532, -0.5969111078]]) ), ( np.array([[[2, 1, 8], [4, 5, 6], [3, 7,", "), ( np.arange(256, dtype=float).reshape((4, 8, 8)), [False] * 4, 5, np.array([[0.5400617249, 0.4806639344, -0.3528596134,", "0.344058532, 0.5773502692], [0.0722697384, -0.8132919227, 0.5773502692], [0.6681965966, 0.4692333907, 0.5773502692]]) ), ( np.arange(108, dtype=float).reshape((3, 6,", "array_eq(preprocessed_data, sim, decimal=4) def test_mvmds_error(): # Data and is_distane do not have the", "0.5773502692], [0.6681965966, 0.4692333907, 0.5773502692]]) ), ( np.array([[[2, 1, 8], [4, 5, 6], [3,", "4, 5, np.array([[0.5400617249, 0.4806639344, -0.3528596134, 0.3516817362, -0.3523045507], [0.3857583749, 0.1413371427, -0.1352081249, 0.1420639924, -0.1416263226], [0.2314550249,", "have the same number of rows one = np.arange(25, dtype=float).reshape((5, 5)) two =", "be negative one = np.arange(25, dtype=float).reshape((5, 5)) two = np.arange(25, 50, dtype=float).reshape((5, 5))", "False] mvmds_est = mvmds.MVMDS(k=2) assert_raises(ValueError, mvmds_est.fit, data, is_distance) # k value cannot be", "] @pytest.mark.parametrize(test_names, test_params) def test_mvmds_multiple(sess, data, is_distance, k, real_result): data_tf = tf.convert_to_tensor(data, dtype=tf.float32)", "[-0.3585685828, -0.1020481616], [-0.5976143047, 0.759138763]]) ), ( np.arange(256, dtype=float).reshape((4, 8, 8)), [False] * 4,", "sess.run(mvmds.preprocess_mvmds(data)) sim = np.array([[40., 20., 0., -20., -40.], [20., 10., 0., -10., -20.],", "np import tensorflow as tf from numpy.testing import assert_array_almost_equal as array_eq from sklearn.utils.testing", "* 4, 5, np.array([[0.5400617249, 0.4806639344, -0.3528596134, 0.3516817362, -0.3523045507], [0.3857583749, 0.1413371427, -0.1352081249, 0.1420639924, -0.1416263226],", "one = np.arange(25, dtype=float).reshape((5, 5)) two = np.arange(25, 49, dtype=float).reshape((4, 6)) data =", "8], [3, 6, 9]]]), [False] * 2, 3, np.array([[-0.740466335, 0.344058532, 0.5773502692], [0.0722697384, -0.8132919227,", "do not have the same length. one = np.arange(25, dtype=float).reshape((5, 5)) two =", "0.5773502692], [0.6681965966, 0.4692333907, 0.5773502692]]) ), ( np.arange(108, dtype=float).reshape((3, 6, 6)), [False] * 3,", "0.7860987651, -0.8372055327, 0.8368890363, -0.8369952644], [-0.5400617249, 0.0058598224, 0.1199497301, -0.1154634166, 0.1151282954]]) ) ] @pytest.mark.parametrize(test_names, test_params)", "rows one = np.arange(25, dtype=float).reshape((5, 5)) two = np.arange(25, 49, dtype=float).reshape((4, 6)) data", "50, dtype=float).reshape((5, 5)) data = [one, two] is_distance = [False, False] mvmds_est =", "test_mvmds_error(): # Data and is_distane do not have the same length. one =", "( np.arange(50, dtype=float).reshape((2, 5, 5)), [False] * 2, 2, np.array([[-0.632455532, -0.1989703693], [-0.316227766, -0.6963962924],", "8], [3, 6, 9]]]), [False, False], 3, np.array([[-0.740466335, 0.344058532, 0.5773502692], [0.0722697384, -0.8132919227, 0.5773502692],", "= [one, two] is_distance = [False, False] mvmds_est = mvmds.MVMDS(k=2) assert_raises(ValueError, mvmds_est.fit, data,", "4, 7], [2, 5, 8], [3, 6, 9]]]), [False, False], 3, np.array([[-0.740466335, 0.344058532,", "0.5773502692], [0.0722697384, -0.8132919227, 0.5773502692], [0.6681965966, 0.4692333907, 0.5773502692]]) ), ( np.array([[[2, 1, 8], [4,", "data = [one, two] is_distance = [False, False] mvmds_est = mvmds.MVMDS(k=-2) assert_raises(ValueError, mvmds_est.fit,", "dtype=float).reshape((5, 5)) two = np.arange(25, 50, dtype=float).reshape((5, 5)) data = [one, two] is_distance", "-0.1352081249, 0.1420639924, -0.1416263226], [0.2314550249, -0.045950973, -0.0591191469, 0.0594984884, -0.0601554582], [0.077151675, 0.0746392244, -0.0825078346, 0.0821366762, -0.0818364084],", "= np.arange(25, dtype=float).reshape((5, 5)) two = np.arange(25, 50, dtype=float).reshape((5, 5)) data = [one,", "assert_raises(ValueError, mvmds_est.fit, data, is_distance) # k value cannot be negative one = np.arange(25,", "[3, 7, 9]], [[1, 4, 7], [2, 5, 8], [3, 6, 9]]]), [False]", "[0.3857583749, 0.1413371427, -0.1352081249, 0.1420639924, -0.1416263226], [0.2314550249, -0.045950973, -0.0591191469, 0.0594984884, -0.0601554582], [0.077151675, 0.0746392244, -0.0825078346,", "False], 3, np.array([[-0.740466335, 0.344058532, 0.5773502692], [0.0722697384, -0.8132919227, 0.5773502692], [0.6681965966, 0.4692333907, 0.5773502692]]) ), (", "0., 20., 40.]]) array_eq(preprocessed_data, sim, decimal=4) def test_mvmds_error(): # Data and is_distane do", "[4, 5, 6], [3, 7, 9]], [[1, 4, 7], [2, 5, 8], [3,", "mvmds_est = mvmds.MVMDS(k=2) assert_raises(ValueError, mvmds_est.fit, data, is_distance) # k value cannot be negative", "[False] * 3, 2, np.array([[0.5976143047, 0.6346897855], [0.3585685828, 0.1020481616], [0.1195228609, 0.0049779591], [-0.1195228609, -0.0049779591], [-0.3585685828,", "-0.8369952644], [-0.5400617249, 0.0058598224, 0.1199497301, -0.1154634166, 0.1151282954]]) ) ] @pytest.mark.parametrize(test_names, test_params) def test_mvmds_multiple(sess, data,", "0., 0.], [-20., -10., 0., 10., 20.], [-40., -20., 0., 20., 40.]]) array_eq(preprocessed_data,", "test results come from the original multiview library test_names = \"data, is_distance, k,", "6)) data = [one, two] is_distance = [False, False] mvmds_est = mvmds.MVMDS(k=2) assert_raises(ValueError,", "sess.run(mvmds.mvmds(data_tf, is_distance, k=2)) from multiview.mvmds import mvmds as mds_cpu print(\"Multiview Result\") np.set_printoptions(precision=10, suppress=True)", "[3, 7, 9]], [[1, 4, 7], [2, 5, 8], [3, 6, 9]]]), [False,", "[-40., -20., 0., 20., 40.]]) array_eq(preprocessed_data, sim, decimal=4) def test_mvmds_error(): # Data and", "data_tf = tf.convert_to_tensor(data, dtype=tf.float32) result = sess.run(mvmds.mvmds(data_tf, is_distance, k=2)) from multiview.mvmds import mvmds", "* 2, 3, np.array([[-0.740466335, 0.344058532, 0.5773502692], [0.0722697384, -0.8132919227, 0.5773502692], [0.6681965966, 0.4692333907, 0.5773502692]]) ),", "6, 9]]]), [False] * 2, 3, np.array([[-0.740466335, 0.344058532, 0.5773502692], [0.0722697384, -0.8132919227, 0.5773502692], [0.6681965966,", "7], [2, 5, 8], [3, 6, 9]]]), [False] * 2, 3, np.array([[-0.740466335, 0.344058532,", "8)), [False] * 4, 5, np.array([[0.5400617249, 0.4806639344, -0.3528596134, 0.3516817362, -0.3523045507], [0.3857583749, 0.1413371427, -0.1352081249,", "= np.arange(25, 49, dtype=float).reshape((4, 6)) data = [one, two] is_distance = [False, False]", "import mvmds as mds_cpu print(\"Multiview Result\") np.set_printoptions(precision=10, suppress=True) print(mds_cpu(data, is_distance, k=k)) array_eq(np.abs(result[:, 0]),", "np.arange(25, 50, dtype=float).reshape((5, 5)) data = [one, two] is_distance = [False, False, False]", "import assert_raises import pytest import multiview_gpu.mvmds as mvmds from multiview_gpu.util import load_data_tf def", "the same length. one = np.arange(25, dtype=float).reshape((5, 5)) two = np.arange(25, 50, dtype=float).reshape((5,", "-0.045950973, -0.0591191469, 0.0594984884, -0.0601554582], [0.077151675, 0.0746392244, -0.0825078346, 0.0821366762, -0.0818364084], [-0.077151675, -0.3293040024, 0.3377397826, -0.3387822177,", "multiview_gpu.util import load_data_tf def test_preprocess_mds(sess): data = np.arange(25, dtype=float).reshape((5, 5)) data = tf.convert_to_tensor(data,", "dtype=tf.float32) preprocessed_data = sess.run(mvmds.preprocess_mvmds(data)) sim = np.array([[40., 20., 0., -20., -40.], [20., 10.,", "4, 7], [2, 5, 8], [3, 6, 9]]]), [False] * 2, 3, np.array([[-0.740466335,", "0., 0., 0.], [-20., -10., 0., 10., 20.], [-40., -20., 0., 20., 40.]])", "8, 8)), [False] * 4, 5, np.array([[0.5400617249, 0.4806639344, -0.3528596134, 0.3516817362, -0.3523045507], [0.3857583749, 0.1413371427,", "# Sample data matrices do not have the same number of rows one", "is_distance) # k value cannot be negative one = np.arange(25, dtype=float).reshape((5, 5)) two", "[-0.316227766, -0.6963962924], [-0., -0.3305190213], [0.316227766, 0.0994851846], [0.632455532, -0.5969111078]]) ), ( np.array([[[2, 1, 8],", "-0.1324274795, 0.1317680694, -0.1318591591], [-0.3857583749, 0.7860987651, -0.8372055327, 0.8368890363, -0.8369952644], [-0.5400617249, 0.0058598224, 0.1199497301, -0.1154634166, 0.1151282954]])", "0.0049779591], [-0.1195228609, -0.0049779591], [-0.3585685828, -0.1020481616], [-0.5976143047, 0.759138763]]) ), ( np.arange(256, dtype=float).reshape((4, 8, 8)),", "is_distance = [False, False] mvmds_est = mvmds.MVMDS(k=2) assert_raises(ValueError, mvmds_est.fit, data, is_distance) # k", "number of rows one = np.arange(25, dtype=float).reshape((5, 5)) two = np.arange(25, 49, dtype=float).reshape((4,", "-0.1989703693], [-0.316227766, -0.6963962924], [-0., -0.3305190213], [0.316227766, 0.0994851846], [0.632455532, -0.5969111078]]) ), ( np.array([[[2, 1,", "= np.arange(25, dtype=float).reshape((5, 5)) two = np.arange(25, 49, dtype=float).reshape((4, 6)) data = [one,", "two] is_distance = [False, False] mvmds_est = mvmds.MVMDS(k=2) assert_raises(ValueError, mvmds_est.fit, data, is_distance) #", "sklearn.utils.testing import assert_raises import pytest import multiview_gpu.mvmds as mvmds from multiview_gpu.util import load_data_tf", "0.0746392244, -0.0825078346, 0.0821366762, -0.0818364084], [-0.077151675, -0.3293040024, 0.3377397826, -0.3387822177, 0.3380904857], [-0.2314550249, 0.1219703099, -0.1324274795, 0.1317680694,", "-10., 0., 10., 20.], [-40., -20., 0., 20., 40.]]) array_eq(preprocessed_data, sim, decimal=4) def", "dtype=float).reshape((5, 5)) two = np.arange(25, 49, dtype=float).reshape((4, 6)) data = [one, two] is_distance", "results come from the original multiview library test_names = \"data, is_distance, k, real_result\"", "as np import tensorflow as tf from numpy.testing import assert_array_almost_equal as array_eq from", "negative one = np.arange(25, dtype=float).reshape((5, 5)) two = np.arange(25, 50, dtype=float).reshape((5, 5)) data", "( np.array([[[2, 1, 8], [4, 5, 6], [3, 7, 9]], [[1, 4, 7],", "[0.316227766, 0.0994851846], [0.632455532, -0.5969111078]]) ), ( np.array([[[2, 1, 8], [4, 5, 6], [3,", "-0.0818364084], [-0.077151675, -0.3293040024, 0.3377397826, -0.3387822177, 0.3380904857], [-0.2314550249, 0.1219703099, -0.1324274795, 0.1317680694, -0.1318591591], [-0.3857583749, 0.7860987651,", "result = sess.run(mvmds.mvmds(data_tf, is_distance, k=2)) from multiview.mvmds import mvmds as mds_cpu print(\"Multiview Result\")", "-40.], [20., 10., 0., -10., -20.], [0., 0., 0., 0., 0.], [-20., -10.,", "5)) two = np.arange(25, 49, dtype=float).reshape((4, 6)) data = [one, two] is_distance =", "2, np.array([[0.5976143047, 0.6346897855], [0.3585685828, 0.1020481616], [0.1195228609, 0.0049779591], [-0.1195228609, -0.0049779591], [-0.3585685828, -0.1020481616], [-0.5976143047, 0.759138763]])", "is_distance, k, real_result\" test_params = [ ( np.arange(50, dtype=float).reshape((2, 5, 5)), [False] *", "one = np.arange(25, dtype=float).reshape((5, 5)) two = np.arange(25, 50, dtype=float).reshape((5, 5)) data =", "0.0821366762, -0.0818364084], [-0.077151675, -0.3293040024, 0.3377397826, -0.3387822177, 0.3380904857], [-0.2314550249, 0.1219703099, -0.1324274795, 0.1317680694, -0.1318591591], [-0.3857583749,", "np.arange(25, dtype=float).reshape((5, 5)) data = tf.convert_to_tensor(data, dtype=tf.float32) preprocessed_data = sess.run(mvmds.preprocess_mvmds(data)) sim = np.array([[40.,", "np.array([[0.5400617249, 0.4806639344, -0.3528596134, 0.3516817362, -0.3523045507], [0.3857583749, 0.1413371427, -0.1352081249, 0.1420639924, -0.1416263226], [0.2314550249, -0.045950973, -0.0591191469,", "10., 0., -10., -20.], [0., 0., 0., 0., 0.], [-20., -10., 0., 10.,", "Sample data matrices do not have the same number of rows one =", "[20., 10., 0., -10., -20.], [0., 0., 0., 0., 0.], [-20., -10., 0.,", "mvmds from multiview_gpu.util import load_data_tf def test_preprocess_mds(sess): data = np.arange(25, dtype=float).reshape((5, 5)) data", "0.1420639924, -0.1416263226], [0.2314550249, -0.045950973, -0.0591191469, 0.0594984884, -0.0601554582], [0.077151675, 0.0746392244, -0.0825078346, 0.0821366762, -0.0818364084], [-0.077151675,", "two = np.arange(25, 50, dtype=float).reshape((5, 5)) data = [one, two] is_distance = [False,", "# These test results come from the original multiview library test_names = \"data,", "0.1413371427, -0.1352081249, 0.1420639924, -0.1416263226], [0.2314550249, -0.045950973, -0.0591191469, 0.0594984884, -0.0601554582], [0.077151675, 0.0746392244, -0.0825078346, 0.0821366762,", "[-0., -0.3305190213], [0.316227766, 0.0994851846], [0.632455532, -0.5969111078]]) ), ( np.array([[[2, 1, 8], [4, 5,", "[0.0722697384, -0.8132919227, 0.5773502692], [0.6681965966, 0.4692333907, 0.5773502692]]) ), ( np.array([[[2, 1, 8], [4, 5,", "assert_raises(ValueError, mvmds_est.fit, data, is_distance) # Sample data matrices do not have the same", "do not have the same number of rows one = np.arange(25, dtype=float).reshape((5, 5))", "20.], [-40., -20., 0., 20., 40.]]) array_eq(preprocessed_data, sim, decimal=4) def test_mvmds_error(): # Data", "50, dtype=float).reshape((5, 5)) data = [one, two] is_distance = [False, False, False] mvmds_est", "real_result): data_tf = tf.convert_to_tensor(data, dtype=tf.float32) result = sess.run(mvmds.mvmds(data_tf, is_distance, k=2)) from multiview.mvmds import", "8], [4, 5, 6], [3, 7, 9]], [[1, 4, 7], [2, 5, 8],", "0.0594984884, -0.0601554582], [0.077151675, 0.0746392244, -0.0825078346, 0.0821366762, -0.0818364084], [-0.077151675, -0.3293040024, 0.3377397826, -0.3387822177, 0.3380904857], [-0.2314550249,", "5, 6], [3, 7, 9]], [[1, 4, 7], [2, 5, 8], [3, 6,", "[0.077151675, 0.0746392244, -0.0825078346, 0.0821366762, -0.0818364084], [-0.077151675, -0.3293040024, 0.3377397826, -0.3387822177, 0.3380904857], [-0.2314550249, 0.1219703099, -0.1324274795,", "np.arange(108, dtype=float).reshape((3, 6, 6)), [False] * 3, 2, np.array([[0.5976143047, 0.6346897855], [0.3585685828, 0.1020481616], [0.1195228609,", "[-0.5400617249, 0.0058598224, 0.1199497301, -0.1154634166, 0.1151282954]]) ) ] @pytest.mark.parametrize(test_names, test_params) def test_mvmds_multiple(sess, data, is_distance,", "= [False, False, False] mvmds_est = mvmds.MVMDS(k=2) assert_raises(ValueError, mvmds_est.fit, data, is_distance) # Sample", "= [False, False] mvmds_est = mvmds.MVMDS(k=-2) assert_raises(ValueError, mvmds_est.fit, data, is_distance) # These test", "0.4692333907, 0.5773502692]]) ), ( np.array([[[2, 1, 8], [4, 5, 6], [3, 7, 9]],", "5)), [False] * 2, 2, np.array([[-0.632455532, -0.1989703693], [-0.316227766, -0.6963962924], [-0., -0.3305190213], [0.316227766, 0.0994851846],", "from multiview.mvmds import mvmds as mds_cpu print(\"Multiview Result\") np.set_printoptions(precision=10, suppress=True) print(mds_cpu(data, is_distance, k=k))", "= [one, two] is_distance = [False, False] mvmds_est = mvmds.MVMDS(k=-2) assert_raises(ValueError, mvmds_est.fit, data,", "-0.6963962924], [-0., -0.3305190213], [0.316227766, 0.0994851846], [0.632455532, -0.5969111078]]) ), ( np.array([[[2, 1, 8], [4,", "[0.6681965966, 0.4692333907, 0.5773502692]]) ), ( np.arange(108, dtype=float).reshape((3, 6, 6)), [False] * 3, 2,", "as array_eq from sklearn.utils.testing import assert_raises import pytest import multiview_gpu.mvmds as mvmds from", "0.4692333907, 0.5773502692]]) ), ( np.arange(108, dtype=float).reshape((3, 6, 6)), [False] * 3, 2, np.array([[0.5976143047,", "data, is_distance) # k value cannot be negative one = np.arange(25, dtype=float).reshape((5, 5))", "-0.3528596134, 0.3516817362, -0.3523045507], [0.3857583749, 0.1413371427, -0.1352081249, 0.1420639924, -0.1416263226], [0.2314550249, -0.045950973, -0.0591191469, 0.0594984884, -0.0601554582],", "come from the original multiview library test_names = \"data, is_distance, k, real_result\" test_params", "[-0.1195228609, -0.0049779591], [-0.3585685828, -0.1020481616], [-0.5976143047, 0.759138763]]) ), ( np.arange(256, dtype=float).reshape((4, 8, 8)), [False]", "[False] * 4, 5, np.array([[0.5400617249, 0.4806639344, -0.3528596134, 0.3516817362, -0.3523045507], [0.3857583749, 0.1413371427, -0.1352081249, 0.1420639924,", "-0.1020481616], [-0.5976143047, 0.759138763]]) ), ( np.arange(256, dtype=float).reshape((4, 8, 8)), [False] * 4, 5,", "dtype=float).reshape((4, 8, 8)), [False] * 4, 5, np.array([[0.5400617249, 0.4806639344, -0.3528596134, 0.3516817362, -0.3523045507], [0.3857583749,", "5, 8], [3, 6, 9]]]), [False] * 2, 3, np.array([[-0.740466335, 0.344058532, 0.5773502692], [0.0722697384,", "= [ ( np.arange(50, dtype=float).reshape((2, 5, 5)), [False] * 2, 2, np.array([[-0.632455532, -0.1989703693],", "not have the same length. one = np.arange(25, dtype=float).reshape((5, 5)) two = np.arange(25,", "[False, False] mvmds_est = mvmds.MVMDS(k=-2) assert_raises(ValueError, mvmds_est.fit, data, is_distance) # These test results", "0.3380904857], [-0.2314550249, 0.1219703099, -0.1324274795, 0.1317680694, -0.1318591591], [-0.3857583749, 0.7860987651, -0.8372055327, 0.8368890363, -0.8369952644], [-0.5400617249, 0.0058598224,", "numpy as np import tensorflow as tf from numpy.testing import assert_array_almost_equal as array_eq", "5)) data = [one, two] is_distance = [False, False, False] mvmds_est = mvmds.MVMDS(k=2)", "* 3, 2, np.array([[0.5976143047, 0.6346897855], [0.3585685828, 0.1020481616], [0.1195228609, 0.0049779591], [-0.1195228609, -0.0049779591], [-0.3585685828, -0.1020481616],", "-0.1416263226], [0.2314550249, -0.045950973, -0.0591191469, 0.0594984884, -0.0601554582], [0.077151675, 0.0746392244, -0.0825078346, 0.0821366762, -0.0818364084], [-0.077151675, -0.3293040024,", "dtype=float).reshape((4, 6)) data = [one, two] is_distance = [False, False] mvmds_est = mvmds.MVMDS(k=2)", "False] mvmds_est = mvmds.MVMDS(k=-2) assert_raises(ValueError, mvmds_est.fit, data, is_distance) # These test results come", "5, np.array([[0.5400617249, 0.4806639344, -0.3528596134, 0.3516817362, -0.3523045507], [0.3857583749, 0.1413371427, -0.1352081249, 0.1420639924, -0.1416263226], [0.2314550249, -0.045950973,", "= tf.convert_to_tensor(data, dtype=tf.float32) preprocessed_data = sess.run(mvmds.preprocess_mvmds(data)) sim = np.array([[40., 20., 0., -20., -40.],", "-0.3523045507], [0.3857583749, 0.1413371427, -0.1352081249, 0.1420639924, -0.1416263226], [0.2314550249, -0.045950973, -0.0591191469, 0.0594984884, -0.0601554582], [0.077151675, 0.0746392244,", "9]], [[1, 4, 7], [2, 5, 8], [3, 6, 9]]]), [False] * 2,", "\"data, is_distance, k, real_result\" test_params = [ ( np.arange(50, dtype=float).reshape((2, 5, 5)), [False]", "np.array([[-0.740466335, 0.344058532, 0.5773502692], [0.0722697384, -0.8132919227, 0.5773502692], [0.6681965966, 0.4692333907, 0.5773502692]]) ), ( np.arange(108, dtype=float).reshape((3,", "0.5773502692]]) ), ( np.array([[[2, 1, 8], [4, 5, 6], [3, 7, 9]], [[1,", "mvmds.MVMDS(k=2) assert_raises(ValueError, mvmds_est.fit, data, is_distance) # k value cannot be negative one =", "False] mvmds_est = mvmds.MVMDS(k=2) assert_raises(ValueError, mvmds_est.fit, data, is_distance) # Sample data matrices do", "-0.1154634166, 0.1151282954]]) ) ] @pytest.mark.parametrize(test_names, test_params) def test_mvmds_multiple(sess, data, is_distance, k, real_result): data_tf", "assert_raises(ValueError, mvmds_est.fit, data, is_distance) # These test results come from the original multiview", "5, 8], [3, 6, 9]]]), [False, False], 3, np.array([[-0.740466335, 0.344058532, 0.5773502692], [0.0722697384, -0.8132919227,", "is_distance, k, real_result): data_tf = tf.convert_to_tensor(data, dtype=tf.float32) result = sess.run(mvmds.mvmds(data_tf, is_distance, k=2)) from", "data = [one, two] is_distance = [False, False, False] mvmds_est = mvmds.MVMDS(k=2) assert_raises(ValueError,", "tensorflow as tf from numpy.testing import assert_array_almost_equal as array_eq from sklearn.utils.testing import assert_raises", "test_mvmds_multiple(sess, data, is_distance, k, real_result): data_tf = tf.convert_to_tensor(data, dtype=tf.float32) result = sess.run(mvmds.mvmds(data_tf, is_distance,", "numpy.testing import assert_array_almost_equal as array_eq from sklearn.utils.testing import assert_raises import pytest import multiview_gpu.mvmds", "test_params = [ ( np.arange(50, dtype=float).reshape((2, 5, 5)), [False] * 2, 2, np.array([[-0.632455532,", "assert_raises import pytest import multiview_gpu.mvmds as mvmds from multiview_gpu.util import load_data_tf def test_preprocess_mds(sess):", "decimal=4) def test_mvmds_error(): # Data and is_distane do not have the same length.", "as mvmds from multiview_gpu.util import load_data_tf def test_preprocess_mds(sess): data = np.arange(25, dtype=float).reshape((5, 5))", "5, 5)), [False] * 2, 2, np.array([[-0.632455532, -0.1989703693], [-0.316227766, -0.6963962924], [-0., -0.3305190213], [0.316227766,", "k, real_result\" test_params = [ ( np.arange(50, dtype=float).reshape((2, 5, 5)), [False] * 2,", "[False] * 2, 2, np.array([[-0.632455532, -0.1989703693], [-0.316227766, -0.6963962924], [-0., -0.3305190213], [0.316227766, 0.0994851846], [0.632455532,", "-0.5969111078]]) ), ( np.array([[[2, 1, 8], [4, 5, 6], [3, 7, 9]], [[1,", "from sklearn.utils.testing import assert_raises import pytest import multiview_gpu.mvmds as mvmds from multiview_gpu.util import", "7], [2, 5, 8], [3, 6, 9]]]), [False, False], 3, np.array([[-0.740466335, 0.344058532, 0.5773502692],", "-0.8132919227, 0.5773502692], [0.6681965966, 0.4692333907, 0.5773502692]]) ), ( np.array([[[2, 1, 8], [4, 5, 6],", "and is_distane do not have the same length. one = np.arange(25, dtype=float).reshape((5, 5))", "[0., 0., 0., 0., 0.], [-20., -10., 0., 10., 20.], [-40., -20., 0.,", "tf.convert_to_tensor(data, dtype=tf.float32) result = sess.run(mvmds.mvmds(data_tf, is_distance, k=2)) from multiview.mvmds import mvmds as mds_cpu", "3, np.array([[-0.740466335, 0.344058532, 0.5773502692], [0.0722697384, -0.8132919227, 0.5773502692], [0.6681965966, 0.4692333907, 0.5773502692]]) ), ( np.array([[[2,", "two] is_distance = [False, False, False] mvmds_est = mvmds.MVMDS(k=2) assert_raises(ValueError, mvmds_est.fit, data, is_distance)", "def test_mvmds_multiple(sess, data, is_distance, k, real_result): data_tf = tf.convert_to_tensor(data, dtype=tf.float32) result = sess.run(mvmds.mvmds(data_tf,", "sim = np.array([[40., 20., 0., -20., -40.], [20., 10., 0., -10., -20.], [0.,", "pytest import multiview_gpu.mvmds as mvmds from multiview_gpu.util import load_data_tf def test_preprocess_mds(sess): data =", "np.array([[40., 20., 0., -20., -40.], [20., 10., 0., -10., -20.], [0., 0., 0.,", "np.arange(25, dtype=float).reshape((5, 5)) two = np.arange(25, 49, dtype=float).reshape((4, 6)) data = [one, two]", "k=2)) from multiview.mvmds import mvmds as mds_cpu print(\"Multiview Result\") np.set_printoptions(precision=10, suppress=True) print(mds_cpu(data, is_distance,", "* 2, 2, np.array([[-0.632455532, -0.1989703693], [-0.316227766, -0.6963962924], [-0., -0.3305190213], [0.316227766, 0.0994851846], [0.632455532, -0.5969111078]])", "k, real_result): data_tf = tf.convert_to_tensor(data, dtype=tf.float32) result = sess.run(mvmds.mvmds(data_tf, is_distance, k=2)) from multiview.mvmds", "data = [one, two] is_distance = [False, False] mvmds_est = mvmds.MVMDS(k=2) assert_raises(ValueError, mvmds_est.fit,", "40.]]) array_eq(preprocessed_data, sim, decimal=4) def test_mvmds_error(): # Data and is_distane do not have", "7, 9]], [[1, 4, 7], [2, 5, 8], [3, 6, 9]]]), [False] *", "mvmds_est.fit, data, is_distance) # Sample data matrices do not have the same number", "dtype=float).reshape((5, 5)) data = tf.convert_to_tensor(data, dtype=tf.float32) preprocessed_data = sess.run(mvmds.preprocess_mvmds(data)) sim = np.array([[40., 20.,", "[-20., -10., 0., 10., 20.], [-40., -20., 0., 20., 40.]]) array_eq(preprocessed_data, sim, decimal=4)", "0.1151282954]]) ) ] @pytest.mark.parametrize(test_names, test_params) def test_mvmds_multiple(sess, data, is_distance, k, real_result): data_tf =", "mvmds_est.fit, data, is_distance) # These test results come from the original multiview library", "0.5773502692], [0.0722697384, -0.8132919227, 0.5773502692], [0.6681965966, 0.4692333907, 0.5773502692]]) ), ( np.arange(108, dtype=float).reshape((3, 6, 6)),", "def test_mvmds_error(): # Data and is_distane do not have the same length. one", "test_names = \"data, is_distance, k, real_result\" test_params = [ ( np.arange(50, dtype=float).reshape((2, 5,", "0.8368890363, -0.8369952644], [-0.5400617249, 0.0058598224, 0.1199497301, -0.1154634166, 0.1151282954]]) ) ] @pytest.mark.parametrize(test_names, test_params) def test_mvmds_multiple(sess,", "data, is_distance) # These test results come from the original multiview library test_names", "of rows one = np.arange(25, dtype=float).reshape((5, 5)) two = np.arange(25, 49, dtype=float).reshape((4, 6))", "[[1, 4, 7], [2, 5, 8], [3, 6, 9]]]), [False, False], 3, np.array([[-0.740466335,", "9]]]), [False] * 2, 3, np.array([[-0.740466335, 0.344058532, 0.5773502692], [0.0722697384, -0.8132919227, 0.5773502692], [0.6681965966, 0.4692333907,", "np.arange(25, 49, dtype=float).reshape((4, 6)) data = [one, two] is_distance = [False, False] mvmds_est", "[-0.2314550249, 0.1219703099, -0.1324274795, 0.1317680694, -0.1318591591], [-0.3857583749, 0.7860987651, -0.8372055327, 0.8368890363, -0.8369952644], [-0.5400617249, 0.0058598224, 0.1199497301,", "0.3516817362, -0.3523045507], [0.3857583749, 0.1413371427, -0.1352081249, 0.1420639924, -0.1416263226], [0.2314550249, -0.045950973, -0.0591191469, 0.0594984884, -0.0601554582], [0.077151675,", "mvmds as mds_cpu print(\"Multiview Result\") np.set_printoptions(precision=10, suppress=True) print(mds_cpu(data, is_distance, k=k)) array_eq(np.abs(result[:, 0]), np.abs(real_result[:,", "[ ( np.arange(50, dtype=float).reshape((2, 5, 5)), [False] * 2, 2, np.array([[-0.632455532, -0.1989703693], [-0.316227766,", "[0.6681965966, 0.4692333907, 0.5773502692]]) ), ( np.array([[[2, 1, 8], [4, 5, 6], [3, 7,", "library test_names = \"data, is_distance, k, real_result\" test_params = [ ( np.arange(50, dtype=float).reshape((2,", "0., 10., 20.], [-40., -20., 0., 20., 40.]]) array_eq(preprocessed_data, sim, decimal=4) def test_mvmds_error():", "[0.0722697384, -0.8132919227, 0.5773502692], [0.6681965966, 0.4692333907, 0.5773502692]]) ), ( np.arange(108, dtype=float).reshape((3, 6, 6)), [False]", "np.arange(25, dtype=float).reshape((5, 5)) two = np.arange(25, 50, dtype=float).reshape((5, 5)) data = [one, two]", "import multiview_gpu.mvmds as mvmds from multiview_gpu.util import load_data_tf def test_preprocess_mds(sess): data = np.arange(25,", "same length. one = np.arange(25, dtype=float).reshape((5, 5)) two = np.arange(25, 50, dtype=float).reshape((5, 5))", "-20.], [0., 0., 0., 0., 0.], [-20., -10., 0., 10., 20.], [-40., -20.,", "the original multiview library test_names = \"data, is_distance, k, real_result\" test_params = [", "2, 3, np.array([[-0.740466335, 0.344058532, 0.5773502692], [0.0722697384, -0.8132919227, 0.5773502692], [0.6681965966, 0.4692333907, 0.5773502692]]) ), (", "import assert_array_almost_equal as array_eq from sklearn.utils.testing import assert_raises import pytest import multiview_gpu.mvmds as", "These test results come from the original multiview library test_names = \"data, is_distance,", "np.arange(256, dtype=float).reshape((4, 8, 8)), [False] * 4, 5, np.array([[0.5400617249, 0.4806639344, -0.3528596134, 0.3516817362, -0.3523045507],", "0.3377397826, -0.3387822177, 0.3380904857], [-0.2314550249, 0.1219703099, -0.1324274795, 0.1317680694, -0.1318591591], [-0.3857583749, 0.7860987651, -0.8372055327, 0.8368890363, -0.8369952644],", "dtype=tf.float32) result = sess.run(mvmds.mvmds(data_tf, is_distance, k=2)) from multiview.mvmds import mvmds as mds_cpu print(\"Multiview", "as mds_cpu print(\"Multiview Result\") np.set_printoptions(precision=10, suppress=True) print(mds_cpu(data, is_distance, k=k)) array_eq(np.abs(result[:, 0]), np.abs(real_result[:, 0]),", "data, is_distance, k, real_result): data_tf = tf.convert_to_tensor(data, dtype=tf.float32) result = sess.run(mvmds.mvmds(data_tf, is_distance, k=2))", "3, np.array([[-0.740466335, 0.344058532, 0.5773502692], [0.0722697384, -0.8132919227, 0.5773502692], [0.6681965966, 0.4692333907, 0.5773502692]]) ), ( np.arange(108,", "5)) data = tf.convert_to_tensor(data, dtype=tf.float32) preprocessed_data = sess.run(mvmds.preprocess_mvmds(data)) sim = np.array([[40., 20., 0.,", "[one, two] is_distance = [False, False, False] mvmds_est = mvmds.MVMDS(k=2) assert_raises(ValueError, mvmds_est.fit, data,", "= [False, False] mvmds_est = mvmds.MVMDS(k=2) assert_raises(ValueError, mvmds_est.fit, data, is_distance) # k value", "dtype=float).reshape((5, 5)) data = [one, two] is_distance = [False, False] mvmds_est = mvmds.MVMDS(k=-2)", "0.1020481616], [0.1195228609, 0.0049779591], [-0.1195228609, -0.0049779591], [-0.3585685828, -0.1020481616], [-0.5976143047, 0.759138763]]) ), ( np.arange(256, dtype=float).reshape((4,", "Data and is_distane do not have the same length. one = np.arange(25, dtype=float).reshape((5,", "0., -20., -40.], [20., 10., 0., -10., -20.], [0., 0., 0., 0., 0.],", "6, 6)), [False] * 3, 2, np.array([[0.5976143047, 0.6346897855], [0.3585685828, 0.1020481616], [0.1195228609, 0.0049779591], [-0.1195228609,", "2, np.array([[-0.632455532, -0.1989703693], [-0.316227766, -0.6963962924], [-0., -0.3305190213], [0.316227766, 0.0994851846], [0.632455532, -0.5969111078]]) ), (", "3, 2, np.array([[0.5976143047, 0.6346897855], [0.3585685828, 0.1020481616], [0.1195228609, 0.0049779591], [-0.1195228609, -0.0049779591], [-0.3585685828, -0.1020481616], [-0.5976143047,", "multiview_gpu.mvmds as mvmds from multiview_gpu.util import load_data_tf def test_preprocess_mds(sess): data = np.arange(25, dtype=float).reshape((5,", "-10., -20.], [0., 0., 0., 0., 0.], [-20., -10., 0., 10., 20.], [-40.,", "[0.632455532, -0.5969111078]]) ), ( np.array([[[2, 1, 8], [4, 5, 6], [3, 7, 9]],", "[False, False] mvmds_est = mvmds.MVMDS(k=2) assert_raises(ValueError, mvmds_est.fit, data, is_distance) # k value cannot", "cannot be negative one = np.arange(25, dtype=float).reshape((5, 5)) two = np.arange(25, 50, dtype=float).reshape((5,", "the same number of rows one = np.arange(25, dtype=float).reshape((5, 5)) two = np.arange(25,", "[3, 6, 9]]]), [False] * 2, 3, np.array([[-0.740466335, 0.344058532, 0.5773502692], [0.0722697384, -0.8132919227, 0.5773502692],", "mds_cpu print(\"Multiview Result\") np.set_printoptions(precision=10, suppress=True) print(mds_cpu(data, is_distance, k=k)) array_eq(np.abs(result[:, 0]), np.abs(real_result[:, 0]), decimal=4)", "same number of rows one = np.arange(25, dtype=float).reshape((5, 5)) two = np.arange(25, 49,", "0.5773502692]]) ), ( np.arange(108, dtype=float).reshape((3, 6, 6)), [False] * 3, 2, np.array([[0.5976143047, 0.6346897855],", "-0.3387822177, 0.3380904857], [-0.2314550249, 0.1219703099, -0.1324274795, 0.1317680694, -0.1318591591], [-0.3857583749, 0.7860987651, -0.8372055327, 0.8368890363, -0.8369952644], [-0.5400617249,", "tf from numpy.testing import assert_array_almost_equal as array_eq from sklearn.utils.testing import assert_raises import pytest", "0.], [-20., -10., 0., 10., 20.], [-40., -20., 0., 20., 40.]]) array_eq(preprocessed_data, sim,", "6, 9]]]), [False, False], 3, np.array([[-0.740466335, 0.344058532, 0.5773502692], [0.0722697384, -0.8132919227, 0.5773502692], [0.6681965966, 0.4692333907,", "array_eq from sklearn.utils.testing import assert_raises import pytest import multiview_gpu.mvmds as mvmds from multiview_gpu.util", "= mvmds.MVMDS(k=2) assert_raises(ValueError, mvmds_est.fit, data, is_distance) # Sample data matrices do not have", "0.1219703099, -0.1324274795, 0.1317680694, -0.1318591591], [-0.3857583749, 0.7860987651, -0.8372055327, 0.8368890363, -0.8369952644], [-0.5400617249, 0.0058598224, 0.1199497301, -0.1154634166,", "-20., 0., 20., 40.]]) array_eq(preprocessed_data, sim, decimal=4) def test_mvmds_error(): # Data and is_distane", "np.array([[0.5976143047, 0.6346897855], [0.3585685828, 0.1020481616], [0.1195228609, 0.0049779591], [-0.1195228609, -0.0049779591], [-0.3585685828, -0.1020481616], [-0.5976143047, 0.759138763]]) ),", "have the same length. one = np.arange(25, dtype=float).reshape((5, 5)) two = np.arange(25, 50,", "[0.3585685828, 0.1020481616], [0.1195228609, 0.0049779591], [-0.1195228609, -0.0049779591], [-0.3585685828, -0.1020481616], [-0.5976143047, 0.759138763]]) ), ( np.arange(256,", "-0.8132919227, 0.5773502692], [0.6681965966, 0.4692333907, 0.5773502692]]) ), ( np.arange(108, dtype=float).reshape((3, 6, 6)), [False] *", "[-0.077151675, -0.3293040024, 0.3377397826, -0.3387822177, 0.3380904857], [-0.2314550249, 0.1219703099, -0.1324274795, 0.1317680694, -0.1318591591], [-0.3857583749, 0.7860987651, -0.8372055327,", "mvmds.MVMDS(k=2) assert_raises(ValueError, mvmds_est.fit, data, is_distance) # Sample data matrices do not have the", "[False] * 2, 3, np.array([[-0.740466335, 0.344058532, 0.5773502692], [0.0722697384, -0.8132919227, 0.5773502692], [0.6681965966, 0.4692333907, 0.5773502692]])", "length. one = np.arange(25, dtype=float).reshape((5, 5)) two = np.arange(25, 50, dtype=float).reshape((5, 5)) data", "0.759138763]]) ), ( np.arange(256, dtype=float).reshape((4, 8, 8)), [False] * 4, 5, np.array([[0.5400617249, 0.4806639344,", "0.6346897855], [0.3585685828, 0.1020481616], [0.1195228609, 0.0049779591], [-0.1195228609, -0.0049779591], [-0.3585685828, -0.1020481616], [-0.5976143047, 0.759138763]]) ), (", "sim, decimal=4) def test_mvmds_error(): # Data and is_distane do not have the same", "data = tf.convert_to_tensor(data, dtype=tf.float32) preprocessed_data = sess.run(mvmds.preprocess_mvmds(data)) sim = np.array([[40., 20., 0., -20.,", "dtype=float).reshape((2, 5, 5)), [False] * 2, 2, np.array([[-0.632455532, -0.1989703693], [-0.316227766, -0.6963962924], [-0., -0.3305190213],", "import pytest import multiview_gpu.mvmds as mvmds from multiview_gpu.util import load_data_tf def test_preprocess_mds(sess): data", "multiview.mvmds import mvmds as mds_cpu print(\"Multiview Result\") np.set_printoptions(precision=10, suppress=True) print(mds_cpu(data, is_distance, k=k)) array_eq(np.abs(result[:,", "<gh_stars>1-10 import numpy as np import tensorflow as tf from numpy.testing import assert_array_almost_equal", "-20., -40.], [20., 10., 0., -10., -20.], [0., 0., 0., 0., 0.], [-20.,", "[[1, 4, 7], [2, 5, 8], [3, 6, 9]]]), [False] * 2, 3,", "20., 40.]]) array_eq(preprocessed_data, sim, decimal=4) def test_mvmds_error(): # Data and is_distane do not", "= tf.convert_to_tensor(data, dtype=tf.float32) result = sess.run(mvmds.mvmds(data_tf, is_distance, k=2)) from multiview.mvmds import mvmds as", "False, False] mvmds_est = mvmds.MVMDS(k=2) assert_raises(ValueError, mvmds_est.fit, data, is_distance) # Sample data matrices", "is_distance) # These test results come from the original multiview library test_names =", "5)) two = np.arange(25, 50, dtype=float).reshape((5, 5)) data = [one, two] is_distance =" ]
[ "variable\") data = store.load() yml = yaml.safe_dump(data, default_flow_style=False, allow_unicode=True) cmd = os.getenv('EDITOR') fd,", "tempfile.mkstemp(prefix='timefred.') with open(temp_path, \"r+\") as f: f.write(yml.replace('\\n- ', '\\n\\n- ')) f.seek(0) subprocess.check_call(cmd +", "store.load() yml = yaml.safe_dump(data, default_flow_style=False, allow_unicode=True) cmd = os.getenv('EDITOR') fd, temp_path = tempfile.mkstemp(prefix='timefred.')", "from timefred.store import store def edit(): if \"EDITOR\" not in os.environ: raise NoEditor(\"Please", "os.close(fd) os.remove(temp_path) try: data = yaml.load(yml) except: raise InvalidYAML(\"Oops, that YAML doesn't appear", "allow_unicode=True) cmd = os.getenv('EDITOR') fd, temp_path = tempfile.mkstemp(prefix='timefred.') with open(temp_path, \"r+\") as f:", "default_flow_style=False, allow_unicode=True) cmd = os.getenv('EDITOR') fd, temp_path = tempfile.mkstemp(prefix='timefred.') with open(temp_path, \"r+\") as", "yml = yaml.safe_dump(data, default_flow_style=False, allow_unicode=True) cmd = os.getenv('EDITOR') fd, temp_path = tempfile.mkstemp(prefix='timefred.') with", "in os.environ: raise NoEditor(\"Please set the 'EDITOR' environment variable\") data = store.load() yml", "subprocess.check_call(cmd + ' ' + temp_path, shell=True) yml = f.read() f.truncate() os.close(fd) os.remove(temp_path)", "f.seek(0) subprocess.check_call(cmd + ' ' + temp_path, shell=True) yml = f.read() f.truncate() os.close(fd)", "data = yaml.load(yml) except: raise InvalidYAML(\"Oops, that YAML doesn't appear to be valid!\")", "open(temp_path, \"r+\") as f: f.write(yml.replace('\\n- ', '\\n\\n- ')) f.seek(0) subprocess.check_call(cmd + ' '", "store def edit(): if \"EDITOR\" not in os.environ: raise NoEditor(\"Please set the 'EDITOR'", "\"r+\") as f: f.write(yml.replace('\\n- ', '\\n\\n- ')) f.seek(0) subprocess.check_call(cmd + ' ' +", "environment variable\") data = store.load() yml = yaml.safe_dump(data, default_flow_style=False, allow_unicode=True) cmd = os.getenv('EDITOR')", "subprocess import tempfile import yaml from timefred.error import NoEditor, InvalidYAML from timefred.store import", "import NoEditor, InvalidYAML from timefred.store import store def edit(): if \"EDITOR\" not in", "os.getenv('EDITOR') fd, temp_path = tempfile.mkstemp(prefix='timefred.') with open(temp_path, \"r+\") as f: f.write(yml.replace('\\n- ', '\\n\\n-", "fd, temp_path = tempfile.mkstemp(prefix='timefred.') with open(temp_path, \"r+\") as f: f.write(yml.replace('\\n- ', '\\n\\n- '))", "f.read() f.truncate() os.close(fd) os.remove(temp_path) try: data = yaml.load(yml) except: raise InvalidYAML(\"Oops, that YAML", "f.truncate() os.close(fd) os.remove(temp_path) try: data = yaml.load(yml) except: raise InvalidYAML(\"Oops, that YAML doesn't", "yml = f.read() f.truncate() os.close(fd) os.remove(temp_path) try: data = yaml.load(yml) except: raise InvalidYAML(\"Oops,", "= yaml.safe_dump(data, default_flow_style=False, allow_unicode=True) cmd = os.getenv('EDITOR') fd, temp_path = tempfile.mkstemp(prefix='timefred.') with open(temp_path,", "import subprocess import tempfile import yaml from timefred.error import NoEditor, InvalidYAML from timefred.store", "temp_path = tempfile.mkstemp(prefix='timefred.') with open(temp_path, \"r+\") as f: f.write(yml.replace('\\n- ', '\\n\\n- ')) f.seek(0)", "= tempfile.mkstemp(prefix='timefred.') with open(temp_path, \"r+\") as f: f.write(yml.replace('\\n- ', '\\n\\n- ')) f.seek(0) subprocess.check_call(cmd", "shell=True) yml = f.read() f.truncate() os.close(fd) os.remove(temp_path) try: data = yaml.load(yml) except: raise", "import tempfile import yaml from timefred.error import NoEditor, InvalidYAML from timefred.store import store", "', '\\n\\n- ')) f.seek(0) subprocess.check_call(cmd + ' ' + temp_path, shell=True) yml =", "try: data = yaml.load(yml) except: raise InvalidYAML(\"Oops, that YAML doesn't appear to be", "import store def edit(): if \"EDITOR\" not in os.environ: raise NoEditor(\"Please set the", "NoEditor, InvalidYAML from timefred.store import store def edit(): if \"EDITOR\" not in os.environ:", "raise NoEditor(\"Please set the 'EDITOR' environment variable\") data = store.load() yml = yaml.safe_dump(data,", "yaml from timefred.error import NoEditor, InvalidYAML from timefred.store import store def edit(): if", "<filename>timefred/action/edit.py import os import subprocess import tempfile import yaml from timefred.error import NoEditor,", "from timefred.error import NoEditor, InvalidYAML from timefred.store import store def edit(): if \"EDITOR\"", "not in os.environ: raise NoEditor(\"Please set the 'EDITOR' environment variable\") data = store.load()", "os.environ: raise NoEditor(\"Please set the 'EDITOR' environment variable\") data = store.load() yml =", "import os import subprocess import tempfile import yaml from timefred.error import NoEditor, InvalidYAML", "'EDITOR' environment variable\") data = store.load() yml = yaml.safe_dump(data, default_flow_style=False, allow_unicode=True) cmd =", "= os.getenv('EDITOR') fd, temp_path = tempfile.mkstemp(prefix='timefred.') with open(temp_path, \"r+\") as f: f.write(yml.replace('\\n- ',", "import yaml from timefred.error import NoEditor, InvalidYAML from timefred.store import store def edit():", "def edit(): if \"EDITOR\" not in os.environ: raise NoEditor(\"Please set the 'EDITOR' environment", "edit(): if \"EDITOR\" not in os.environ: raise NoEditor(\"Please set the 'EDITOR' environment variable\")", "+ ' ' + temp_path, shell=True) yml = f.read() f.truncate() os.close(fd) os.remove(temp_path) try:", "+ temp_path, shell=True) yml = f.read() f.truncate() os.close(fd) os.remove(temp_path) try: data = yaml.load(yml)", "set the 'EDITOR' environment variable\") data = store.load() yml = yaml.safe_dump(data, default_flow_style=False, allow_unicode=True)", "NoEditor(\"Please set the 'EDITOR' environment variable\") data = store.load() yml = yaml.safe_dump(data, default_flow_style=False,", "data = store.load() yml = yaml.safe_dump(data, default_flow_style=False, allow_unicode=True) cmd = os.getenv('EDITOR') fd, temp_path", "= yaml.load(yml) except: raise InvalidYAML(\"Oops, that YAML doesn't appear to be valid!\") store.dump(data)", "os import subprocess import tempfile import yaml from timefred.error import NoEditor, InvalidYAML from", "if \"EDITOR\" not in os.environ: raise NoEditor(\"Please set the 'EDITOR' environment variable\") data", "temp_path, shell=True) yml = f.read() f.truncate() os.close(fd) os.remove(temp_path) try: data = yaml.load(yml) except:", "timefred.error import NoEditor, InvalidYAML from timefred.store import store def edit(): if \"EDITOR\" not", "\"EDITOR\" not in os.environ: raise NoEditor(\"Please set the 'EDITOR' environment variable\") data =", "tempfile import yaml from timefred.error import NoEditor, InvalidYAML from timefred.store import store def", "cmd = os.getenv('EDITOR') fd, temp_path = tempfile.mkstemp(prefix='timefred.') with open(temp_path, \"r+\") as f: f.write(yml.replace('\\n-", "= f.read() f.truncate() os.close(fd) os.remove(temp_path) try: data = yaml.load(yml) except: raise InvalidYAML(\"Oops, that", "')) f.seek(0) subprocess.check_call(cmd + ' ' + temp_path, shell=True) yml = f.read() f.truncate()", "the 'EDITOR' environment variable\") data = store.load() yml = yaml.safe_dump(data, default_flow_style=False, allow_unicode=True) cmd", "f.write(yml.replace('\\n- ', '\\n\\n- ')) f.seek(0) subprocess.check_call(cmd + ' ' + temp_path, shell=True) yml", "timefred.store import store def edit(): if \"EDITOR\" not in os.environ: raise NoEditor(\"Please set", "= store.load() yml = yaml.safe_dump(data, default_flow_style=False, allow_unicode=True) cmd = os.getenv('EDITOR') fd, temp_path =", "'\\n\\n- ')) f.seek(0) subprocess.check_call(cmd + ' ' + temp_path, shell=True) yml = f.read()", "yaml.safe_dump(data, default_flow_style=False, allow_unicode=True) cmd = os.getenv('EDITOR') fd, temp_path = tempfile.mkstemp(prefix='timefred.') with open(temp_path, \"r+\")", "InvalidYAML from timefred.store import store def edit(): if \"EDITOR\" not in os.environ: raise", "f: f.write(yml.replace('\\n- ', '\\n\\n- ')) f.seek(0) subprocess.check_call(cmd + ' ' + temp_path, shell=True)", "os.remove(temp_path) try: data = yaml.load(yml) except: raise InvalidYAML(\"Oops, that YAML doesn't appear to", "with open(temp_path, \"r+\") as f: f.write(yml.replace('\\n- ', '\\n\\n- ')) f.seek(0) subprocess.check_call(cmd + '", "as f: f.write(yml.replace('\\n- ', '\\n\\n- ')) f.seek(0) subprocess.check_call(cmd + ' ' + temp_path,", "' + temp_path, shell=True) yml = f.read() f.truncate() os.close(fd) os.remove(temp_path) try: data =", "' ' + temp_path, shell=True) yml = f.read() f.truncate() os.close(fd) os.remove(temp_path) try: data" ]
[ "= new_node self.size += 1 def find(self, d): this_node = self.root while this_node", "new_node self.size += 1 def find(self, d): this_node = self.root while this_node is", "p=None): self.data = d self.next_node = n self.prev_node = p def __str__(self): return", "# data is in non-root prev_node.next_node = this_node.next_node else: # data is in", "self.size -= 1 return True # data removed else: prev_node = this_node this_node", "None def remove(self, d): this_node = self.root prev_node = None while this_node is", "= this_node this_node = this_node.next_node return False # data not found def print_list(self):", "this_node this_node = this_node.next_node return False # data not found def print_list(self): this_node", "prev_node.next_node = this_node.next_node else: # data is in root node self.root = this_node.next_node", "else: # data is in root node self.root = this_node.next_node self.size -= 1", "= None while this_node is not None: if this_node.data == d: if prev_node", "d): this_node = self.root prev_node = None while this_node is not None: if", "__init__(self, d, n=None, p=None): self.data = d self.next_node = n self.prev_node = p", "def __str__(self): return '(' + str(self.data) + ')' class LinkedList: def __init__(self, r=None):", "= self.root prev_node = None while this_node is not None: if this_node.data ==", "# data is in root node self.root = this_node.next_node self.size -= 1 return", "def remove(self, d): this_node = self.root prev_node = None while this_node is not", "non-root prev_node.next_node = this_node.next_node else: # data is in root node self.root =", "prev_node = None while this_node is not None: if this_node.data == d: if", "while this_node is not None: if this_node.data == d: if prev_node is not", "= this_node.next_node self.size -= 1 return True # data removed else: prev_node =", "n=None, p=None): self.data = d self.next_node = n self.prev_node = p def __str__(self):", "self.root while this_node is not None: if this_node.data == d: return d else:", "self.root = new_node self.size += 1 def find(self, d): this_node = self.root while", "add(self, d): new_node = Node(d, self.root) self.root = new_node self.size += 1 def", "d else: this_node = this_node.next_node return None def remove(self, d): this_node = self.root", "self.data = d self.next_node = n self.prev_node = p def __str__(self): return '('", "= self.root while this_node is not None: if this_node.data == d: return d", "data is in root node self.root = this_node.next_node self.size -= 1 return True", "print_list(self): this_node = self.root while this_node is not None: print(this_node, end='->') this_node =", "self.root = this_node.next_node self.size -= 1 return True # data removed else: prev_node", "def find(self, d): this_node = self.root while this_node is not None: if this_node.data", "d self.next_node = n self.prev_node = p def __str__(self): return '(' + str(self.data)", "node self.root = this_node.next_node self.size -= 1 return True # data removed else:", "prev_node = this_node this_node = this_node.next_node return False # data not found def", "1 def find(self, d): this_node = self.root while this_node is not None: if", "self.prev_node = p def __str__(self): return '(' + str(self.data) + ')' class LinkedList:", "data is in non-root prev_node.next_node = this_node.next_node else: # data is in root", "return '(' + str(self.data) + ')' class LinkedList: def __init__(self, r=None): self.root =", "this_node = self.root while this_node is not None: print(this_node, end='->') this_node = this_node.next_node", "this_node.next_node else: # data is in root node self.root = this_node.next_node self.size -=", "d: if prev_node is not None: # data is in non-root prev_node.next_node =", "__init__(self, r=None): self.root = r self.size = 0 def add(self, d): new_node =", "None: if this_node.data == d: return d else: this_node = this_node.next_node return None", "this_node = this_node.next_node return False # data not found def print_list(self): this_node =", "not found def print_list(self): this_node = self.root while this_node is not None: print(this_node,", "this_node = self.root while this_node is not None: if this_node.data == d: return", "class Node: def __init__(self, d, n=None, p=None): self.data = d self.next_node = n", "prev_node is not None: # data is in non-root prev_node.next_node = this_node.next_node else:", "class LinkedList: def __init__(self, r=None): self.root = r self.size = 0 def add(self,", "self.next_node = n self.prev_node = p def __str__(self): return '(' + str(self.data) +", "None: if this_node.data == d: if prev_node is not None: # data is", "# data removed else: prev_node = this_node this_node = this_node.next_node return False #", "else: this_node = this_node.next_node return None def remove(self, d): this_node = self.root prev_node", "return False # data not found def print_list(self): this_node = self.root while this_node", "def print_list(self): this_node = self.root while this_node is not None: print(this_node, end='->') this_node", "= Node(d, self.root) self.root = new_node self.size += 1 def find(self, d): this_node", "+= 1 def find(self, d): this_node = self.root while this_node is not None:", "return None def remove(self, d): this_node = self.root prev_node = None while this_node", "str(self.data) + ')' class LinkedList: def __init__(self, r=None): self.root = r self.size =", "+ str(self.data) + ')' class LinkedList: def __init__(self, r=None): self.root = r self.size", "found def print_list(self): this_node = self.root while this_node is not None: print(this_node, end='->')", "in non-root prev_node.next_node = this_node.next_node else: # data is in root node self.root", "False # data not found def print_list(self): this_node = self.root while this_node is", "= self.root while this_node is not None: print(this_node, end='->') this_node = this_node.next_node print('None')", "in root node self.root = this_node.next_node self.size -= 1 return True # data", "= this_node.next_node return False # data not found def print_list(self): this_node = self.root", "d, n=None, p=None): self.data = d self.next_node = n self.prev_node = p def", "is in root node self.root = this_node.next_node self.size -= 1 return True #", "return d else: this_node = this_node.next_node return None def remove(self, d): this_node =", "data not found def print_list(self): this_node = self.root while this_node is not None:", "self.size = 0 def add(self, d): new_node = Node(d, self.root) self.root = new_node", "def __init__(self, r=None): self.root = r self.size = 0 def add(self, d): new_node", "d: return d else: this_node = this_node.next_node return None def remove(self, d): this_node", "not None: if this_node.data == d: return d else: this_node = this_node.next_node return", "is not None: if this_node.data == d: if prev_node is not None: #", "== d: return d else: this_node = this_node.next_node return None def remove(self, d):", "if this_node.data == d: return d else: this_node = this_node.next_node return None def", "= 0 def add(self, d): new_node = Node(d, self.root) self.root = new_node self.size", "not None: if this_node.data == d: if prev_node is not None: # data", "'(' + str(self.data) + ')' class LinkedList: def __init__(self, r=None): self.root = r", "Node: def __init__(self, d, n=None, p=None): self.data = d self.next_node = n self.prev_node", "= this_node.next_node return None def remove(self, d): this_node = self.root prev_node = None", "d): new_node = Node(d, self.root) self.root = new_node self.size += 1 def find(self,", "= r self.size = 0 def add(self, d): new_node = Node(d, self.root) self.root", "= this_node.next_node else: # data is in root node self.root = this_node.next_node self.size", "')' class LinkedList: def __init__(self, r=None): self.root = r self.size = 0 def", "LinkedList: def __init__(self, r=None): self.root = r self.size = 0 def add(self, d):", "= d self.next_node = n self.prev_node = p def __str__(self): return '(' +", "n self.prev_node = p def __str__(self): return '(' + str(self.data) + ')' class", "this_node.data == d: if prev_node is not None: # data is in non-root", "while this_node is not None: if this_node.data == d: return d else: this_node", "p def __str__(self): return '(' + str(self.data) + ')' class LinkedList: def __init__(self,", "0 def add(self, d): new_node = Node(d, self.root) self.root = new_node self.size +=", "new_node = Node(d, self.root) self.root = new_node self.size += 1 def find(self, d):", "def __init__(self, d, n=None, p=None): self.data = d self.next_node = n self.prev_node =", "this_node is not None: if this_node.data == d: if prev_node is not None:", "this_node is not None: if this_node.data == d: return d else: this_node =", "not None: # data is in non-root prev_node.next_node = this_node.next_node else: # data", "self.root = r self.size = 0 def add(self, d): new_node = Node(d, self.root)", "True # data removed else: prev_node = this_node this_node = this_node.next_node return False", "# data not found def print_list(self): this_node = self.root while this_node is not", "== d: if prev_node is not None: # data is in non-root prev_node.next_node", "Node(d, self.root) self.root = new_node self.size += 1 def find(self, d): this_node =", "return True # data removed else: prev_node = this_node this_node = this_node.next_node return", "find(self, d): this_node = self.root while this_node is not None: if this_node.data ==", "this_node = this_node.next_node return None def remove(self, d): this_node = self.root prev_node =", "remove(self, d): this_node = self.root prev_node = None while this_node is not None:", "1 return True # data removed else: prev_node = this_node this_node = this_node.next_node", "__str__(self): return '(' + str(self.data) + ')' class LinkedList: def __init__(self, r=None): self.root", "if prev_node is not None: # data is in non-root prev_node.next_node = this_node.next_node", "this_node.next_node return False # data not found def print_list(self): this_node = self.root while", "is not None: if this_node.data == d: return d else: this_node = this_node.next_node", "is not None: # data is in non-root prev_node.next_node = this_node.next_node else: #", "this_node.next_node return None def remove(self, d): this_node = self.root prev_node = None while", "r self.size = 0 def add(self, d): new_node = Node(d, self.root) self.root =", "is in non-root prev_node.next_node = this_node.next_node else: # data is in root node", "= n self.prev_node = p def __str__(self): return '(' + str(self.data) + ')'", "self.root) self.root = new_node self.size += 1 def find(self, d): this_node = self.root", "root node self.root = this_node.next_node self.size -= 1 return True # data removed", "this_node.data == d: return d else: this_node = this_node.next_node return None def remove(self,", "None while this_node is not None: if this_node.data == d: if prev_node is", "-= 1 return True # data removed else: prev_node = this_node this_node =", "def add(self, d): new_node = Node(d, self.root) self.root = new_node self.size += 1", "data removed else: prev_node = this_node this_node = this_node.next_node return False # data", "r=None): self.root = r self.size = 0 def add(self, d): new_node = Node(d,", "d): this_node = self.root while this_node is not None: if this_node.data == d:", "removed else: prev_node = this_node this_node = this_node.next_node return False # data not", "if this_node.data == d: if prev_node is not None: # data is in", "self.root prev_node = None while this_node is not None: if this_node.data == d:", "this_node = self.root prev_node = None while this_node is not None: if this_node.data", "+ ')' class LinkedList: def __init__(self, r=None): self.root = r self.size = 0", "this_node.next_node self.size -= 1 return True # data removed else: prev_node = this_node", "self.size += 1 def find(self, d): this_node = self.root while this_node is not", "None: # data is in non-root prev_node.next_node = this_node.next_node else: # data is", "= p def __str__(self): return '(' + str(self.data) + ')' class LinkedList: def", "else: prev_node = this_node this_node = this_node.next_node return False # data not found" ]
[ "shape: # (1, 1, n_heads, block_len, 3 * block_len) if not self.has_relative_attention_bias: position_bias", "Args: relative_position: an int32 Tensor bidirectional: a boolean - whether the attention is", "head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`torch.FloatTensor`", "model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`.", "this strategy only for a decoder, orphan tokens, i.e. those tokens which do", "= self.compute_side_bias(mask, global_segment_ids) # (batch_size, num_blocks, num_heads, block_len, global_seq_len) side_position_bias = _split_into_blocks(side_position_bias, self.block_len,", "relative_position_if_large) return relative_buckets def compute_bias(self, query_length, key_length, device=None): \"\"\"Compute binned relative position bias\"\"\"", "and encoder_hidden_states is not None: encoder_decoder_position_bias = layer_outputs[4 if output_attentions else 3] #", "\"\"\" Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads", "def __init__(self, config, has_relative_attention_bias=False): super().__init__() self.LocalSelfAttention = LongT5LocalAttention(config, has_relative_attention_bias=has_relative_attention_bias) self.layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)", "encoder_outputs = BaseModelOutput( last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if", "position_bias=position_bias, layer_head_mask=layer_head_mask, past_key_value=self_attn_past_key_value, use_cache=use_cache, output_attentions=output_attentions, ) hidden_states, present_key_value_state = self_attention_outputs[:2] attention_outputs = self_attention_outputs[2:]", "* self.block_len), device=scores.device, dtype=scores.dtype ) if self.gradient_checkpointing and self.training: position_bias.requires_grad = True else:", "`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and value hidden states", "} return dummy_inputs def _init_weights(self, module): \"\"\"Initialize the weights\"\"\" factor = self.config.initializer_factor #", "else: query_length = None cross_attention_outputs = self.layer[1]( hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, position_bias=encoder_decoder_position_bias, layer_head_mask=cross_attn_layer_head_mask, past_key_value=cross_attn_past_key_value,", "= values.permute([2, 0, 1]).unsqueeze(0).unsqueeze(0) return values def forward( self, hidden_states, mask=None, position_bias=None, layer_head_mask=None,", "if past is used if past is not None: input_ids = input_ids[:, -1:]", "can be obtained using [`T5Tokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are", "layer_head_mask=None, output_attentions=False, ): batch_size, seq_length = hidden_states.shape[:2] def shape(states): \"\"\"projection\"\"\" return states.view(batch_size, -1,", "LongT5DenseActDense): # Mesh TensorFlow FF initialization # See https://github.com/tensorflow/mesh/blob/master/mesh_tensorflow/transformer/transformer_layers.py#L56 # and https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L89 module.wi.weight.data.normal_(mean=0.0,", "past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape", "encoder_hidden_states is not None if do_cross_attention: # the actual query length is unknown", "and # limitations under the License. \"\"\" PyTorch LongT5 model.\"\"\" import copy import", "* block_length) bidirectional=(not self.is_decoder), num_buckets=self.relative_attention_num_buckets, max_distance=self.relative_attention_max_distance, ) # (block_length, 3 * block_length, num_heads)", "= nn.Linear(self.d_model, self.inner_dim, bias=False) self.o = nn.Linear(self.inner_dim, self.d_model, bias=False) if self.has_relative_attention_bias: self.relative_attention_bias =", "else key_value_states.shape[1] def shape(states): \"\"\"projection\"\"\" return states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim).transpose(1, 2) def unshape(states):", "> 2 else None, ) hidden_states = encoder_outputs[0] if labels is not None", "dimension length is not a multiple of `block_len`, it will be padded first", "inf) # half of the buckets are for exact increments in positions max_exact", "the model. heads_to_prune: dict of {layer_num: list of heads to prune in this", "layer_outputs[4 if output_attentions else 3] # append next layer key value states if", "class PreTrainedModel \"\"\" for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(LONGT5_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=Seq2SeqModelOutput, config_class=_CONFIG_FOR_DOC) def", "= position_bias[:, :, -hidden_states.size(1) :, :] if mask is not None: position_bias =", "] def __init__(self, config: LongT5Config): super().__init__(config) self.shared = nn.Embedding(config.vocab_size, config.d_model) encoder_config = copy.deepcopy(config)", "LongT5ForConditionalGeneration, LongT5EncoderModel)): # Mesh TensorFlow embeddings initialization # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L1624 module.shared.weight.data.normal_(mean=0.0, std=factor *", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "tokens to -1 global_block_ids = (global_block_ids * attention_mask) + (attention_mask - 1) #", "block_ids >= 0, torch.tensor(global_seq_len, dtype=block_ids.dtype, device=block_ids.device) ) one_hot_block_ids = nn.functional.one_hot(block_ids.type(torch.int64), global_seq_len + 1)[:,", "dtype=torch.int32) center_position_ids = position_ids[block_len:-block_len] # [block_len, 3 * block_len] relative_position_ids = position_ids.unsqueeze(0) -", "def project(hidden_states, proj_layer, key_value_states, past_key_value): \"\"\"projects hidden states correctly to key/query states\"\"\" if", "self.is_decoder = config.is_decoder if config.is_decoder: attention_layer = LongT5LayerSelfAttention elif config.encoder_attention_type == \"local\": attention_layer", "outputting raw hidden-states without any specific head on top.\", LONGT5_START_DOCSTRING, ) class LongT5Model(LongT5PreTrainedModel):", ") assert reordered_layer_past_states[0].shape == layer_past_states[0].shape assert len(reordered_layer_past_states) == len(layer_past_states) reordered_decoder_past = reordered_decoder_past +", "try a very long input. >>> input_ids = tokenizer( ... \"summarize: \" +", "blocks of a given `block_len` along the given `dim`. If the dimension length", "head is **masked**. cross_attn_head_mask (`torch.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask", "states\" ) self_attn_past_key_value = past_key_value[:2] cross_attn_past_key_value = past_key_value[2:] else: self_attn_past_key_value, cross_attn_past_key_value = None,", "used as a decoder\" if attention_mask is None: attention_mask = torch.ones(batch_size, mask_seq_length).to(inputs_embeds.device) if", "_make_3block_relative_position_ids(block_len: int) -> torch.Tensor: \"\"\"Makes 3-blocked relative position ids for local attention.\"\"\" position_ids", "try a very long encoder input. >>> input_ids = tokenizer( ... 100 *", "also be used by default. head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`,", "attention_mask=None, position_bias=None, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None, layer_head_mask=None, cross_attn_layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False, return_dict=True, ): if", "pad_value=0) num_blocks = x.shape[dim] // block_len output_shape = x.shape[:dim] + (num_blocks, block_len) +", "global_segment_ids # global_seq_len := seq_len // self.global_block_size # shapes: (batch_size, seq_len) & (batch_size,", ") if self.gradient_checkpointing and self.training: position_bias.requires_grad = True else: position_bias = self.compute_bias(real_seq_length, key_length,", ">>> from transformers import AutoTokenizer, LongT5ForConditionalGeneration >>> tokenizer = AutoTokenizer.from_pretrained(\"Stancld/longt5-tglobal-large-16384-pubmed-3k_steps\") >>> model =", "the last `decoder_input_ids` (those that don't have their past key value states given", "keys and values -> (batch_size, num_blocks, 3 * block_len, n_heads, dim_per_head) key_states =", "_set_gradient_checkpointing(self, module, value=False): if isinstance(module, (LongT5Attention, LongT5Stack)): module.gradient_checkpointing = value # Copied from", "Union[Tuple[torch.FloatTensor], Seq2SeqLMOutput]: r\"\"\" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the", "int ) -> torch.Tensor: \"\"\"Compute individual block aggregates by summing over individual blocks.\"\"\"", "+ 100 * \"studies have shown that owning a dog is good for", "conversion if 0 in output_shape: return torch.empty(output_shape, dtype=x.dtype, device=x.device) return x.reshape(output_shape) def _concatenate_3_blocks(x:", "x.ndim pad[block_dim] = (1, 1) pad = sum(pad[::-1], ()) # [batch_size, num_blocks, block_len]", "past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, inputs_embeds: Optional[torch.Tensor] = None, decoder_inputs_embeds: Optional[torch.Tensor] = None, use_cache:", "position_bias = torch.zeros( (1, self.n_heads, real_seq_length, key_length), device=scores.device, dtype=scores.dtype ) if self.gradient_checkpointing and", "* block_len + global_seq_len) attn_weights = nn.functional.softmax(scores.float(), dim=-1).type_as(scores) attn_weights = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)", "key_length) bidirectional=(not self.is_decoder), num_buckets=self.relative_attention_num_buckets, max_distance=self.relative_attention_max_distance, ) values = self.relative_attention_bias(relative_position_bucket) # shape (query_length, key_length,", "only the last query position bias if past_key_value is not None: position_bias =", "= position_bias + mask # (batch_size, n_heads, seq_length, key_length) scores += position_bias attn_weights", "= nn.ModuleList( [LongT5Block(config, has_relative_attention_bias=bool(i == 0)) for i in range(config.num_layers)] ) self.final_layer_norm =", "module.wi.bias.data.zero_() module.wo.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_ff) ** -0.5)) if hasattr(module.wo, \"bias\") and module.wo.bias is", "** -0.5)) if hasattr(module.wi_1, \"bias\") and module.wi_1.bias is not None: module.wi_1.bias.data.zero_() module.wo.weight.data.normal_(mean=0.0, std=factor", "to the PyTorch documentation for all matter related to general usage and behavior.", "If set to `True`, `past_key_values` key value states are returned and can be", "from transformers.models.t5.modeling_t5.T5DenseActDense with T5->LongT5 class LongT5DenseActDense(nn.Module): def __init__(self, config: LongT5Config): super().__init__() self.wi =", "is not None: input_shape = inputs_embeds.size()[:-1] else: err_msg_prefix = \"decoder_\" if self.is_decoder else", "mask is provided for the cross-attention # we need to make broadcastable to", "is not None: err_msg_prefix = \"decoder_\" if self.is_decoder else \"\" raise ValueError( f\"You", "the model with valid token embeddings\" inputs_embeds = self.embed_tokens(input_ids) batch_size, seq_length = input_shape", "= LongT5ForConditionalGeneration.from_pretrained( ... \"Stancld/longt5-tglobal-large-16384-pubmed-3k_steps\" ... ) >>> # Let's try a very long", "relative_position: an int32 Tensor bidirectional: a boolean - whether the attention is bidirectional", "from_seq_length, to_seq_length] # ourselves in which case we just need to make it", "if position_bias is None: if not self.has_relative_attention_bias: position_bias = torch.zeros( (1, self.n_heads, real_seq_length,", "want to use any `decoder_head_mask` now, please set `decoder_head_mask = torch.ones(num_layers, num_heads)`. \"\"\"", "LongT5 model.\"\"\" import copy import math import warnings from typing import Any, List,", "import checkpoint from ...activations import ACT2FN from ...modeling_outputs import ( BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, Seq2SeqLMOutput,", "LongT5PreTrainedModel(PreTrainedModel): \"\"\" An abstract class to handle weights initialization and a simple interface", "# self-attn # (batch_size, n_heads, seq_length, dim_per_head) hidden_states = shape(proj_layer(hidden_states)) elif past_key_value is", "p=self.dropout, training=self.training ) # (batch_size, n_heads, seq_length, key_length) # Mask heads if we", "specify either {err_msg_prefix}input_ids or {err_msg_prefix}inputs_embeds\") if inputs_embeds is None: assert self.embed_tokens is not", "hidden_states = self.wi(hidden_states) hidden_states = self.act(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.wo(hidden_states) return", "pad = sum(pad[::-1], ()) # [batch_size, num_blocks, block_len] -> [batch_size, num_blocks + 2,", "= None if labels is not None: loss_fct = CrossEntropyLoss(ignore_index=-100) loss = loss_fct(lm_logits.view(-1,", "value_states = _concatenate_3_blocks(value_states, block_dim=1, sequence_dim=2) # Compute scores scores = torch.einsum( \"...qhd,...khd->...hqk\", query_states,", "not None: attn_weights = attn_weights * layer_head_mask attn_output = unshape(torch.matmul(attn_weights, value_states)) # (batch_size,", "global_block_ids, _global_block_ids_lower_bound ) # set padding tokens to -1 global_block_ids = (global_block_ids *", "not None: loss_fct = CrossEntropyLoss(ignore_index=-100) loss = loss_fct(lm_logits.view(-1, lm_logits.size(-1)), labels.view(-1)) # TODO(thom): Add", "n_heads, dim_per_head) reps = [1] * (side_key_states.ndim + 1) reps[1] = key_states.shape[1] side_key_states", "global_segment_ids = _make_global_fixed_block_ids(attention_mask, global_block_size) global_seq_len = global_segment_ids.shape[-1] global_positions = torch.arange(global_seq_len, device=block_ids.device) side_relative_position =", "Get global/side key/value states shape: (batch_size, global_seq_len, n_heads, dim_per_head) side_key_states = shape(self.k(global_inputs)) side_value_states", "In LongT5 it is usually set to the\" \" pad_token_id. See LongT5 docs", "decoder_config.num_layers = config.num_decoder_layers self.decoder = LongT5Stack(decoder_config, self.shared) self.lm_head = nn.Linear(config.d_model, config.vocab_size, bias=False) #", "local_attention_mask = torch.where(local_attention_mask > 0, 0.0, -1e10) else: local_attention_mask = None if position_bias", "applied for a local attention.\"\"\" # [batch_size, num_blocks, block_len] _blocked_attention_mask = _split_into_blocks(attention_mask, block_len,", "for the whole fixed block, are assigned to the preceding block. Padding tokens", "`decoder_input_ids` generation. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to", "eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) def forward( self, hidden_states, attention_mask=None, position_bias=None, layer_head_mask=None, past_key_value=None, use_cache=False,", "= LongT5Stack(encoder_config, self.shared) decoder_config = copy.deepcopy(config) decoder_config.is_decoder = True decoder_config.is_encoder_decoder = False decoder_config.num_layers", "= shape(self.v(hidden_states)) # Get global/side key/value states shape: (batch_size, global_seq_len, n_heads, dim_per_head) side_key_states", "head is **not masked**, - 0 indicates the head is **masked**. encoder_outputs (`tuple(tuple(torch.FloatTensor)`,", "not self.has_relative_attention_bias: position_bias = torch.zeros( (1, 1, self.n_heads, self.block_len, 3 * self.block_len), device=scores.device,", "global_seq_len) if mask is None: mask = torch.ones(batch_size, seq_length) # (batch_size, num_heads, seq_len,", "encoder_seq_length = encoder_hidden_states.shape[1] encoder_attention_mask = torch.ones( batch_size, encoder_seq_length, device=inputs_embeds.device, dtype=torch.long ) # initialize", "( layer_past_state.index_select(0, beam_idx.to(layer_past_state.device)), ) assert reordered_layer_past_states[0].shape == layer_past_states[0].shape assert len(reordered_layer_past_states) == len(layer_past_states) reordered_decoder_past", "self.inner_dim = self.key_value_proj_dim * self.n_heads self.pruned_heads = self.pruned_heads.union(heads) @staticmethod # Copied from transformers.models.t5.modeling_t5.T5Attention._relative_position_bucket", "# (batch_size, num_heads, seq_len, global_seq_len) attention_side_bias = attention_side_bias + side_bias return attention_side_bias def", "`hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not", "self.key_value_proj_dim * self.n_heads self.pruned_heads = self.pruned_heads.union(heads) @staticmethod # Copied from transformers.models.t5.modeling_t5.T5Attention._relative_position_bucket def _relative_position_bucket(relative_position,", "attention.\"\"\" block_ids, global_segment_ids = _make_global_fixed_block_ids(attention_mask, global_block_size) global_seq_len = global_segment_ids.shape[-1] global_positions = torch.arange(global_seq_len, device=block_ids.device)", "*optional*): Whether or not to return the attentions tensors of all attention layers.", "**kwargs: Any, # to accept past_key_value and use_cache kwargs ): normed_hidden_states = self.layer_norm(hidden_states)", "# shape (query_length, key_length) bidirectional=(not self.is_decoder), num_buckets=self.relative_attention_num_buckets, max_distance=self.relative_attention_max_distance, ) values = self.relative_attention_bias(relative_position_bucket) #", "have been shown that owning a dog is good for you \", return_tensors=\"pt\"", "sequence length will be a multiple of `block_len`\"\"\" pad_len = -x.shape[dim] % block_len", "position_bias + mask # (batch_size, n_heads, seq_length, key_length) scores += position_bias attn_weights =", "input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds:", "3 * block_len] _3blocked_attention_mask = _concatenate_3_blocks(_blocked_attention_mask, block_dim=1, sequence_dim=2) _blocked_attention_mask = _blocked_attention_mask.unsqueeze(-1) _3blocked_attention_mask =", "PyTorch documentation for all matter related to general usage and behavior. Parameters: config", "attention initialization to avoid scaling before softmax # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/attention.py#L136 d_model = self.config.d_model", "that don't have their past key value states given to this model) of", "was separated into two input args - head_mask, decoder_head_mask if head_mask is not", ":] # If 0 is in output_shape, we cannot apply reshape because of", "None: module.wo.bias.data.zero_() elif isinstance(module, (LongT5Attention, LongT5LocalAttention, LongT5TransientGlobalAttention)): # Mesh TensorFlow attention initialization to", "* \"studies have shown that owning a dog is good for you \",", "Copied from transformers.models.t5.modeling_t5.T5DenseActDense with T5->LongT5 class LongT5DenseActDense(nn.Module): def __init__(self, config: LongT5Config): super().__init__() self.wi", "output_attentions=output_attentions, ) layer_output = hidden_states + self.dropout(attention_output[0]) outputs = (layer_output,) + attention_output[1:] #", "config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, decoder_input_ids:", "*optional*): Mask to avoid performing attention on padding token indices. Mask values selected", "if loss is not None else output return Seq2SeqLMOutput( loss=loss, logits=lm_logits, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states,", "_ = encoder_hidden_states.size() encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: encoder_attention_mask =", "outputs = (hidden_states,) if use_cache: outputs = outputs + (present_key_value_state,) + attention_outputs else:", "of shape `(batch_size, sequence_length, hidden_size)` is a sequence of hidden states at the", "in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(LONGT5_ENCODER_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BaseModelOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.LongTensor] = None,", "num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and value hidden states of", "heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(LONGT5_ENCODER_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BaseModelOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.LongTensor] =", "if inputs_embeds is None: assert self.embed_tokens is not None, \"You have to initialize", "is **masked**. decoder_head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to", "*optional*): Mask to nullify selected heads of the self-attention modules in the encoder.", "- 1, dim_per_head) batch_size, seq_length = hidden_states.shape[:2] real_seq_length = seq_length if past_key_value is", "- 0 indicates the head is **masked**. cross_attn_head_mask (`torch.Tensor` of shape `(num_heads,)` or", "def forward( self, hidden_states, mask=None, position_bias=None, layer_head_mask=None, output_attentions=False, ): batch_size, seq_length = hidden_states.shape[:2]", "get_input_embeddings(self): return self.shared def set_input_embeddings(self, new_embeddings): self.shared = new_embeddings self.encoder.set_input_embeddings(new_embeddings) def get_encoder(self): return", "device=scores.device, dtype=scores.dtype, ) if self.gradient_checkpointing and self.training: position_bias.requires_grad = True else: position_bias =", "return_dict = return_dict if return_dict is not None else self.config.use_return_dict # FutureWarning: head_mask", "over individual blocks.\"\"\" # (batch..., seq_len, global_seq_len)) block_ids = block_ids.where( block_ids >= 0,", "(batch_size, n_heads, seq_length, key_length) # Mask heads if we want to if layer_head_mask", "= attn_output[:, :seq_length, :] attn_output = self.o(attn_output) present_key_value_state = None outputs = (attn_output,)", "self.local_radius + 1 self.global_block_size = config.global_block_size self.dropout = config.dropout_rate self.inner_dim = self.n_heads *", "isinstance(module, (LongT5Attention, LongT5LocalAttention, LongT5TransientGlobalAttention)): # Mesh TensorFlow attention initialization to avoid scaling before", "2nd position reordered_layer_past_states = () for layer_past_state in layer_past_states: # need to set", "of a given `block_len` along the given `dim`. If the dimension length is", "= [ r\"encoder.embed_tokens.weight\", r\"decoder.embed_tokens.weight\", r\"lm_head.weight\", ] _keys_to_ignore_on_load_unexpected = [ r\"decoder.block.0.layer.1.EncDecAttention.relative_attention_bias.weight\", ] def __init__(self,", "LongT5Config, has_relative_attention_bias: bool = False) -> None: super().__init__() self.is_decoder = config.is_decoder self.has_relative_attention_bias =", "before projecting on vocab # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/transformer.py#L586 sequence_output = sequence_output * (self.model_dim**-0.5) lm_logits", "use_cache, output_attentions)) return custom_forward layer_outputs = checkpoint( create_custom_forward(layer_module), hidden_states, extended_attention_mask, position_bias, encoder_hidden_states, encoder_extended_attention_mask,", "= () if (output_attentions and self.is_decoder) else None position_bias = None encoder_decoder_position_bias =", "(batch_size, num_blocks, n_heads, block_len, 3 * block_len) attn_weights = nn.functional.softmax(scores.float(), dim=-1).type_as(scores) # (batch_size,", ") # layer_outputs is a tuple with: # hidden-states, key-value-states, (self-attention position bias),", "states if use_cache: present_key_value_states = present_key_value_states + (present_key_value_state,) if output_attentions: all_attentions = all_attentions", "but this feature is deprecated and will be removed in future versions. If", "hidden_states = encoder_outputs[0] # Decode decoder_outputs = self.decoder( input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, inputs_embeds=decoder_inputs_embeds, past_key_values=past_key_values, encoder_hidden_states=hidden_states,", "then positive relative positions are invalid. We use smaller buckets for small absolute", "to each input token. This implementation is a simlified version of the original", "__init__(self, config: LongT5Config): super().__init__() self.wi = nn.Linear(config.d_model, config.d_ff, bias=False) self.wo = nn.Linear(config.d_ff, config.d_model,", "is set to copy `head_mask`, but this feature is deprecated and will be", "_pad_to_multiple(x, block_len, dim, pad_value=0) num_blocks = x.shape[dim] // block_len output_shape = x.shape[:dim] +", "Seq2SeqModelOutput]: r\"\"\" Returns: Example: ```python >>> from transformers import T5Tokenizer, LongT5Model >>> tokenizer", "= torch.tensor(-1, dtype=global_block_ids.dtype, device=global_block_ids.device) global_block_ids = torch.where( global_block_ids > _global_block_ids_lower_bound, global_block_ids, _global_block_ids_lower_bound )", "we want to if layer_head_mask is not None: attn_weights = attn_weights * layer_head_mask", "3 * block_len + global_seq_len) position_bias = torch.cat([position_bias, side_position_bias], dim=-1) scores += position_bias", "elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: err_msg_prefix = \"decoder_\" if", "[batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make", "states to allow each token to attend global aggregated ones # New shape:", "decoder past is not included in output # speedy decoding is disabled and", "@staticmethod # Copied from transformers.models.t5.modeling_t5.T5Attention._relative_position_bucket def _relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128): \"\"\" Adapted from", "torch.ones(batch_size, mask_seq_length).to(inputs_embeds.device) if self.is_decoder and encoder_attention_mask is None and encoder_hidden_states is not None:", "It's an encoder-decoder transformer pre-trained in a text-to-text denoising generative setting. LongT5 model", "= loss_fct(lm_logits.view(-1, lm_logits.size(-1)), labels.view(-1)) # TODO(thom): Add z_loss https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L666 if not return_dict: output", "key_states = _split_into_blocks(key_states, self.block_len, dim=1) value_states = _split_into_blocks(value_states, self.block_len, dim=1) # Concatenate 3", "torch.where(attention_mask != 0.0, 1.0, -1000.0).type(attention_mask.dtype) global_block_ids = torch.floor(mask + fixed_block_mask - 1.0).type(attention_mask.dtype) _global_block_ids_lower_bound", "1, self.n_heads, self.block_len, 3 * self.block_len), device=scores.device, dtype=scores.dtype, ) if self.gradient_checkpointing and self.training:", "= \"decoder_\" if self.is_decoder else \"\" raise ValueError( f\"You cannot specify both {err_msg_prefix}input_ids", "= key_states.shape[1] side_key_states = side_key_states.unsqueeze(1).repeat(reps) side_value_states = side_value_states.unsqueeze(1).repeat(reps) # Concatenate \"local\" and \"side\"/\"global\"", "is not None and inputs_embeds is not None: err_msg_prefix = \"decoder_\" if self.is_decoder", "None, past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, decoder_inputs_embeds: Optional[torch.FloatTensor] = None,", "a config file does not load the weights associated with the model, only", "-> global attention.\"\"\" block_ids, global_segment_ids = _make_global_fixed_block_ids(attention_mask, global_block_size) global_seq_len = global_segment_ids.shape[-1] global_positions =", "if output_attentions: all_attentions = all_attentions + (layer_outputs[3],) if self.is_decoder: all_cross_attentions = all_cross_attentions +", "config, embed_tokens=None): super().__init__(config) self.embed_tokens = embed_tokens self.is_decoder = config.is_decoder self.local_radius = config.local_radius self.block_len", "HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the", "None: if not self.is_decoder: logger.warning(\"`past_key_values` is passed to the encoder. Please make sure", "module.global_relative_attention_bias.weight.data.normal_( mean=0.0, std=factor * ((d_model) ** -0.5) ) # Copied from transformers.models.t5.modeling_t5.T5PreTrainedModel._set_gradient_checkpointing with", "starting token for `decoder_input_ids` generation. If `past_key_values` is used, optionally only the last", "self.n_heads, self.key_value_proj_dim).transpose(1, 2) def unshape(states): \"\"\"reshape\"\"\" return states.transpose(1, 2).contiguous().view(batch_size, -1, self.inner_dim) def project(hidden_states,", "See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or", "in enumerate(zip(self.block, past_key_values)): layer_head_mask = head_mask[i] cross_attn_layer_head_mask = cross_attn_head_mask[i] if output_hidden_states: all_hidden_states =", "variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True) hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) # convert", "a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case", "def __init__(self, config, has_relative_attention_bias=False): super().__init__() self.is_decoder = config.is_decoder if config.is_decoder: attention_layer = LongT5LayerSelfAttention", "are for logarithmically bigger bins in positions up to max_distance relative_position_if_large = max_exact", "module.wi_1.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5)) if hasattr(module.wi_1, \"bias\") and module.wi_1.bias is not", "Returns: a Tensor with the same shape as relative_position, containing int32 values in", "bidirectional=(not self.is_decoder), num_buckets=self.relative_attention_num_buckets, max_distance=self.relative_attention_max_distance, ) # (block_length, 3 * block_length, num_heads) values =", "== 0)) for i in range(config.num_layers)] ) self.final_layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout =", "position_ids[block_len:-block_len] # [block_len, 3 * block_len] relative_position_ids = position_ids.unsqueeze(0) - center_position_ids.unsqueeze(1) return relative_position_ids", "states query_states = shape(self.q(hidden_states)) # (batch_size, n_heads, seq_length, dim_per_head) # get key/value states", "is good for you\", return_tensors=\"pt\" ... ).input_ids # Batch size 1 >>> decoder_input_ids", "None: encoder_outputs = self.encoder( input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) elif", "https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/attention.py#L136 d_model = self.config.d_model key_value_proj_dim = self.config.d_kv n_heads = self.config.num_heads module.q.weight.data.normal_(mean=0.0, std=factor *", ") value_states = project( hidden_states, self.v, key_value_states, past_key_value[1] if past_key_value is not None", "= new_embeddings def forward( self, input_ids=None, attention_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, inputs_embeds=None, head_mask=None, cross_attn_head_mask=None, past_key_values=None,", "[batch_size, seq_len] global_block_ids = handle_orphan_tokens(global_block_ids) num_globals = seq_len // global_block_size # [batch_size, seq_len", "torch.ones(num_layers, num_heads)`. \"\"\" @add_start_docstrings( \"The bare LONGT5 Model transformer outputting raw hidden-states without", "of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `decoder_input_ids` you can", "# shape (1, num_heads, query_length, key_length) return values def forward( self, hidden_states, mask=None,", "return reordered_decoder_past @add_start_docstrings( \"The bare LONGT5 Model transformer outputting encoder's raw hidden-states without", "position_bias.type(scores.dtype) # Calculate global/side bias - shape: # (batch_size, num_heads, seq_len, global_seq_len) if", "> 0, 0.0, -1e10) # We need to adjust position bias shape to", "shift, which is also known as Root Mean # Square Layer Normalization https://arxiv.org/abs/1910.07467", "mask seq length can be calculated via length of past mask_seq_length = past_key_values[0][0].shape[2]", "the input embeddings, pruning heads etc.) This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module)", "is None: # get decoder inputs from shifting lm labels to the right", "r\"\"\" Returns: Example: ```python >>> from transformers import AutoTokenizer, LongT5ForConditionalGeneration >>> tokenizer =", "which do not make for the whole fixed block, are assigned to the", "self.dropout = nn.Dropout(config.dropout_rate) self.act = ACT2FN[config.dense_act_fn] def forward(self, hidden_states): hidden_states = self.wi(hidden_states) hidden_states", "+ global_seq_len) scores = torch.einsum(\"...qhd,...khd->...hqk\", query_states, key_states) if mask is not None: #", "instead of passing `input_ids` you can choose to directly pass an embedded representation.", "cross_attn_head_mask[i] if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if self.gradient_checkpointing and self.training: if", "The other half of the buckets are for logarithmically bigger bins in positions", ">>> last_hidden_states = outputs.last_hidden_state ```\"\"\" return_dict = return_dict if return_dict is not None", "more control over how to convert `decoder_input_ids` indices into associated vectors than the", "calculated # w/o mean and there is no bias. Additionally we want to", "is None: logger.warning(\"You might want to consider setting `use_cache=True` to speed up decoding\")", "resizing the input embeddings, pruning heads etc.) This model is also a PyTorch", "dim) attn_output = self.o(attn_output) present_key_value_state = (key_states, value_states) if (self.is_decoder and use_cache) else", ") self.final_layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) # Initialize weights and apply", "self.pruned_heads = set() self.gradient_checkpointing = False def prune_heads(self, heads): if len(heads) == 0:", "= None, past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, decoder_inputs_embeds: Optional[torch.FloatTensor] =", "module.wi_1.bias.data.zero_() module.wo.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_ff) ** -0.5)) if hasattr(module.wo, \"bias\") and module.wo.bias is", "= memory_position - context_position # shape (query_length, key_length) relative_position_bucket = self._relative_position_bucket( relative_position, #", "with gradient checkpointing ) else: layer_outputs = layer_module( hidden_states, attention_mask=extended_attention_mask, position_bias=position_bias, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask,", "the library implements for all its model (such as downloading or saving, resizing", "provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which", "and module.wi_1.bias is not None: module.wi_1.bias.data.zero_() module.wo.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_ff) ** -0.5)) if", "Optional[Tuple[Tuple[torch.Tensor]]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, decoder_inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor]", "[ \"google/long-t5-local-base\", \"google/long-t5-local-large\", \"google/long-t5-tglobal-base\", \"google/long-t5-tglobal-large\", ] def _pad_to_multiple(x: torch.Tensor, block_len: int, dim: int,", "Use it as a regular PyTorch Module and refer to the PyTorch documentation", "relative position ids for local attention.\"\"\" position_ids = torch.arange(3 * block_len, dtype=torch.int32) center_position_ids", "encoder_seq_length, device=inputs_embeds.device, dtype=torch.long ) # initialize past_key_values with `None` if past does not", "to enforce that tokens are not allowed to attend tokens farther than ``local_radius.\"\"\"", "decoder input IDs?](../glossary#decoder-input-ids) LONGT5 uses the `pad_token_id` as the starting token for `decoder_input_ids`", "is None: # position_bias shape: # (1, 1, n_heads, block_len, 3 * block_len)", "for transient-global attention extended_attention_mask = attention_mask # If a 2D or 3D attention", "torch.matmul( query_states, key_states.transpose(3, 2) ) # equivalent of torch.einsum(\"bnqd,bnkd->bnqk\", query_states, key_states), compatible with", "int) -> torch.Tensor: \"\"\"Makes 3-blocked relative position ids for local attention.\"\"\" position_ids =", "x.shape[:dim] + (num_blocks, block_len) + x.shape[(dim + 1) :] # If 0 is", "layer_past_state in layer_past_states: # need to set correct `past` for each of the", "key/value blocks # New shape: (batch_size, num_blocks, global_seq_len, n_heads, dim_per_head) reps = [1]", "torch.empty(output_shape, dtype=x.dtype, device=x.device) return x.reshape(output_shape) def _concatenate_3_blocks(x: torch.Tensor, block_dim: int, sequence_dim: int, pad_value:", "bidirectional=True, num_buckets=32, max_distance=128): \"\"\" Adapted from Mesh Tensorflow: https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593 Translate relative position to", ":] relative_position = memory_position - context_position # shape (query_length, key_length) relative_position_bucket = self._relative_position_bucket(", "states.contiguous().view(batch_size, -1, self.inner_dim) # Prepare components for transient-global attention # Obtain block_ids and", "We need to adjust position bias shape to be sum with mask position_bias", "not None: module.wi.bias.data.zero_() module.wo.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_ff) ** -0.5)) if hasattr(module.wo, \"bias\") and", "mask is not None: position_bias = position_bias + mask # (batch_size, n_heads, seq_length,", "(batch_size, seq_len, global_seq_len, num_heads) side_bias = self.global_relative_attention_bias(side_relative_position_bucket) # (batch_size, num_heads, seq_len, global_seq_len) side_bias", "tokens that are **not masked**, - 0 for tokens that are **masked**. [What", "+ 2, block_len] x = nn.functional.pad(x, pad=pad, mode=\"constant\", value=pad_value) blocks_list: List[torch.Tensor] = []", "output_attentions=output_attentions, ) hidden_states, present_key_value_state = self_attention_outputs[:2] attention_outputs = self_attention_outputs[2:] # Keep self-attention outputs", "global_seq_len) block_ids, global_segment_ids = _make_global_fixed_block_ids( mask if mask is not None else torch.ones(hidden_states.shape[:-1]),", "CrossEntropyLoss from torch.utils.checkpoint import checkpoint from ...activations import ACT2FN from ...modeling_outputs import (", "from torch.utils.checkpoint import checkpoint from ...activations import ACT2FN from ...modeling_outputs import ( BaseModelOutput,", "can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in", "Mask values selected in `[0, 1]`: - 1 for tokens that are **not", "# global_seq_len := seq_len // self.global_block_size # shapes: (batch_size, seq_len) & (batch_size, global_seq_len)", "self.dropout = nn.Dropout(config.dropout_rate) def forward( self, hidden_states, key_value_states, attention_mask=None, position_bias=None, layer_head_mask=None, past_key_value=None, use_cache=False,", "License. \"\"\" PyTorch LongT5 model.\"\"\" import copy import math import warnings from typing", "from ...activations import ACT2FN from ...modeling_outputs import ( BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput, )", "for the generic methods the library implements for all its model (such as", "0 indicates the head is **masked**. decoder_head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers,", "torch.Tensor: \"\"\"Create the relative position tensor for local -> global attention.\"\"\" block_ids, global_segment_ids", "embeddings if needed encoder_outputs = self.encoder( input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict,", "* key_value_proj_dim) ** -0.5)) if module.has_relative_attention_bias: module.relative_attention_bias.weight.data.normal_(mean=0.0, std=factor * ((d_model) ** -0.5)) if", "failed to load, falling back to LongT5LayerNorm\") pass # Copied from transformers.models.t5.modeling_t5.T5DenseActDense with", "[(0, 0)] * x.ndim pad[block_dim] = (1, 1) pad = sum(pad[::-1], ()) #", "is at 2nd position reordered_layer_past_states = () for layer_past_state in layer_past_states: # need", "self.key_value_proj_dim = config.d_kv self.n_heads = config.num_heads self.local_radius = config.local_radius self.block_len = self.local_radius +", "embed_tokens=None): super().__init__(config) self.embed_tokens = embed_tokens self.is_decoder = config.is_decoder self.local_radius = config.local_radius self.block_len =", "used by default. head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask", "None and decoder_inputs_embeds is None: # get decoder inputs from shifting lm labels", "`transient-global` attention type is expected, \" f\"but got {config.encoder_attention_type}.\" ) self.layer = nn.ModuleList()", "None else torch.ones(hidden_states.shape[:-1]), self.global_block_size, ) # Create global inputs _global_seq_len = global_segment_ids.shape[-1] global_inputs", "into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*):", "from typing import Any, List, Optional, Tuple, Union import torch from torch import", "Mesh TensorFlow initialization to avoid scaling before softmax self.q = nn.Linear(self.d_model, self.inner_dim, bias=False)", "self.q = nn.Linear(self.d_model, self.inner_dim, bias=False) self.k = nn.Linear(self.d_model, self.inner_dim, bias=False) self.v = nn.Linear(self.d_model,", "** -0.5)) if hasattr(module.wi_0, \"bias\") and module.wi_0.bias is not None: module.wi_0.bias.data.zero_() module.wi_1.weight.data.normal_(mean=0.0, std=factor", "= False # Copied from transformers.models.t5.modeling_t5.T5Attention.prune_heads def prune_heads(self, heads): if len(heads) == 0:", "is not None, ( \"self.model.config.decoder_start_token_id has to be defined. In LongT5 it is", "Optional[torch.Tensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool]", "-> torch.Tensor: \"\"\"Create the relative position tensor for local -> global attention.\"\"\" block_ids,", "position_bias + mask.transpose(1, 2) scores += position_bias # (batch_size, num_blocks, n_heads, block_len, 3", "layer_outputs[2] if self.is_decoder and encoder_hidden_states is not None: encoder_decoder_position_bias = layer_outputs[4 if output_attentions", "for larger absolute relative_positions. All relative positions >=max_distance map to the same bucket.", "global_seq_len := seq_len // self.global_block_size # shapes: (batch_size, seq_len) & (batch_size, global_seq_len) block_ids,", "not None: input_shape = inputs_embeds.size()[:-1] else: err_msg_prefix = \"decoder_\" if self.is_decoder else \"\"", "block_length, 3 * block_length) values = values.permute([2, 0, 1]).unsqueeze(0).unsqueeze(0) return values def compute_side_bias(self,", "(`bool`, *optional*): Whether or not to return the attentions tensors of all attention", "not load the weights associated with the model, only the configuration. Check out", "instead of passing `decoder_input_ids` you can choose to directly pass an embedded representation.", "outputs class LongT5LocalAttention(nn.Module): def __init__(self, config: LongT5Config, has_relative_attention_bias: bool = False) -> None:", "[ r\"encoder.embed_tokens.weight\", ] def __init__(self, config: LongT5Config): super().__init__(config) self.shared = nn.Embedding(config.vocab_size, config.d_model) encoder_config", "int = 0) -> torch.Tensor: \"\"\"Concatenate three consecutive blocks for each input block", "# Warning message for FutureWarning: head_mask was separated into two input args -", "replace_return_docstrings, ) from .configuration_longt5 import LongT5Config logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = \"LongT5Config\" _TOKENIZER_FOR_DOC", "# speedy decoding is disabled and no need to reorder if past is", "`shifted_input_ids` has only positive values\" return shifted_input_ids class LongT5Stack(LongT5PreTrainedModel): def __init__(self, config, embed_tokens=None):", "(d_model**-0.5)) module.v.weight.data.normal_(mean=0.0, std=factor * (d_model**-0.5)) module.o.weight.data.normal_(mean=0.0, std=factor * ((n_heads * key_value_proj_dim) ** -0.5))", "To know more on how to prepare `input_ids` for pretraining take a look", "set() self.gradient_checkpointing = False # Relativen attention bias & Layer norm for global", "use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = decoder_outputs[0] if self.config.tie_word_embeddings: # Rescale output", "len(reordered_layer_past_states) == len(layer_past_states) reordered_decoder_past = reordered_decoder_past + (reordered_layer_past_states,) return reordered_decoder_past @add_start_docstrings( \"The bare", "def forward(self, hidden_states): # LongT5 uses a layer_norm which only scales and doesn't", "relative_position < max_exact # The other half of the buckets are for logarithmically", "of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need", "3 * block_len, n_heads, dim_per_head) key_states = _concatenate_3_blocks(key_states, block_dim=1, sequence_dim=2) value_states = _concatenate_3_blocks(value_states,", "as relative_position, containing int32 values in the range [0, num_buckets) \"\"\" relative_buckets =", "block_dim=1, sequence_dim=2) value_states = _concatenate_3_blocks(value_states, block_dim=1, sequence_dim=2) # Compute scores scores = torch.einsum(", "or {err_msg_prefix}inputs_embeds\") if inputs_embeds is None: assert self.embed_tokens is not None, \"You have", "context_position # shape (query_length, key_length) relative_position_bucket = self._relative_position_bucket( relative_position, # shape (query_length, key_length)", "for local -> global attention.\"\"\" block_ids, global_segment_ids = _make_global_fixed_block_ids(attention_mask, global_block_size) global_seq_len = global_segment_ids.shape[-1]", "None else self.config.use_return_dict # FutureWarning: head_mask was separated into two input args -", "(batch_size, num_heads, seq_len, global_seq_len) attention_side_bias = attention_side_bias + side_bias return attention_side_bias def forward(", "global_seq_len)) block_ids = block_ids.where( block_ids >= 0, torch.tensor(global_seq_len, dtype=block_ids.dtype, device=block_ids.device) ) one_hot_block_ids =", "the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. \"\"\"", "nullify selected heads of the self-attention modules in the encoder. Mask values selected", "= [] for i in range(3): # We use indexing approach here: #", "be used to speed up decoding (see `past_key_values`). output_attentions (`bool`, *optional*): Whether or", "None] return side_relative_position.type(torch.int64) def _create_global_aggregates( hidden_states: torch.Tensor, block_ids: torch.Tensor, global_seq_len: int ) ->", "None] memory_position = torch.arange(key_length, dtype=torch.long, device=device)[None, :] relative_position = memory_position - context_position #", "layer_past_states in past: # get the correct batch idx from layer past batch", "is not None: query_length = present_key_value_state[0].shape[2] else: query_length = None cross_attention_outputs = self.layer[1](", "(present_key_value_state,) if output_attentions: all_attentions = all_attentions + (layer_outputs[3],) if self.is_decoder: all_cross_attentions = all_cross_attentions", "position bias), (cross-attention weights) if use_cache is False: layer_outputs = layer_outputs[:1] + (None,)", "attn_weights * layer_head_mask attn_output = unshape(torch.matmul(attn_weights, value_states)) # (batch_size, seq_length, dim) attn_output =", "TODO(thom): Add z_loss https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L666 if not return_dict: output = (lm_logits,) + decoder_outputs[1:] +", "return custom_forward layer_outputs = checkpoint( create_custom_forward(layer_module), hidden_states, extended_attention_mask, position_bias, encoder_hidden_states, encoder_extended_attention_mask, encoder_decoder_position_bias, layer_head_mask,", "is None: # self-attn # (batch_size, n_heads, seq_length, dim_per_head) hidden_states = shape(proj_layer(hidden_states)) elif", "x.shape[block_dim] pad = [(0, 0)] * x.ndim pad[block_dim] = (1, 1) pad =", "else torch.ones(hidden_states.shape[:-1]), self.global_block_size, ) # Create global inputs _global_seq_len = global_segment_ids.shape[-1] global_inputs =", "the encoder. Please make sure this is intended.\") expected_num_past_key_values = 2 if encoder_hidden_states", "labels in `[0, ..., config.vocab_size]` Returns: Examples: ```python >>> from transformers import AutoTokenizer,", "\"\"\" # Input is (batch_size, seq_length, dim) # Mask is (batch_size, key_length) (non-causal)", "the range [0, inf) # half of the buckets are for exact increments", "weights) if use_cache is False: layer_outputs = layer_outputs[:1] + (None,) + layer_outputs[1:] hidden_states,", "attention_mask=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, use_cache=None, encoder_outputs=None, **kwargs ): # cut decoder_input_ids if past", "Tensor bidirectional: a boolean - whether the attention is bidirectional num_buckets: an integer", "all_cross_attentions = () if (output_attentions and self.is_decoder) else None position_bias = None encoder_decoder_position_bias", "self.TransientGlobalSelfAttention = LongT5TransientGlobalAttention( config, has_relative_attention_bias=has_relative_attention_bias ) self.layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate)", "past, beam_idx): # if decoder past is not included in output # speedy", "attention used in encoder\"\"\" def __init__(self, config, has_relative_attention_bias=False): super().__init__() self.LocalSelfAttention = LongT5LocalAttention(config, has_relative_attention_bias=has_relative_attention_bias)", "key value states given to this model) of shape `(batch_size, 1)` instead of", "an int32 Tensor bidirectional: a boolean - whether the attention is bidirectional num_buckets:", "`block_len`\"\"\" pad_len = -x.shape[dim] % block_len # Handle cases when an empty input", "self.config.decoder_start_token_id pad_token_id = self.config.pad_token_id assert decoder_start_token_id is not None, ( \"self.model.config.decoder_start_token_id has to", "block_len) attn_weights = nn.functional.softmax(scores.float(), dim=-1).type_as(scores) # (batch_size, num_blocks, n_heads, block_len, 3 * block_len)", "tokenizer = T5Tokenizer.from_pretrained(\"google/long-t5-local-base\") >>> model = LongT5Model.from_pretrained(\"google/long-t5-local-base\") >>> # Let's try a very", "precomputed key and value hidden states of the attention blocks. Can be used", "= self._relative_position_bucket( relative_position, # shape (query_length, key_length) bidirectional=(not self.is_decoder), num_buckets=self.relative_attention_num_buckets, max_distance=self.relative_attention_max_distance, ) values", "outputs # hidden-states, present_key_value_states, (self-attention position bias), (self-attention weights), (cross-attention position bias), (cross-attention", "self.wo(hidden_states) return hidden_states # Copied from transformers.models.t5.modeling_t5.T5DenseGatedActDense with T5->LongT5 class LongT5DenseGatedActDense(nn.Module): def __init__(self,", "Optional[bool] = None, ) -> Union[Tuple[torch.FloatTensor], Seq2SeqLMOutput]: r\"\"\" labels (`torch.LongTensor` of shape `(batch_size,)`,", "= torch.arange( 3 * block_length, dtype=torch.long, device=self.relative_attention_bias.weight.device ) context_position = memory_position[block_length:-block_length] # (block_length,", "# You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "as Root Mean # Square Layer Normalization https://arxiv.org/abs/1910.07467 thus varience is calculated #", "to make sure that the accumulation for # half-precision inputs is done in", "raise ValueError(f\"You have to specify either {err_msg_prefix}input_ids or {err_msg_prefix}inputs_embeds\") if inputs_embeds is None:", "= torch.ones(encoder_hidden_shape, device=inputs_embeds.device) encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = None # Prepare head", "mask = torch.where(mask > 0, 0.0, -1e10) # We need to adjust position", "relative_position is in the range [0, inf) # half of the buckets are", "context_position = memory_position[block_length:-block_length] # (block_length, 3 * block_length) relative_position = memory_position[None, :] -", "side inputs across local key/value blocks # New shape: (batch_size, num_blocks, global_seq_len, n_heads,", "side_relative_position = global_positions - block_ids[..., None] return side_relative_position.type(torch.int64) def _create_global_aggregates( hidden_states: torch.Tensor, block_ids:", "LongT5TransientGlobalAttention(nn.Module): def __init__(self, config: LongT5Config, has_relative_attention_bias: bool = False) -> None: super().__init__() self.is_decoder", "projecting on vocab # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/transformer.py#L586 sequence_output = sequence_output * (self.model_dim**-0.5) lm_logits =", "= nn.Embedding(self.relative_attention_num_buckets, self.n_heads) self.global_input_layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) # Copied from transformers.models.t5.modeling_t5.T5Attention.prune_heads def prune_heads(self,", "nn.Linear(self.d_model, self.inner_dim, bias=False) self.k = nn.Linear(self.d_model, self.inner_dim, bias=False) self.v = nn.Linear(self.d_model, self.inner_dim, bias=False)", "encoder_decoder_position_bias = layer_outputs[4 if output_attentions else 3] # append next layer key value", "num_blocks, block_len] _blocked_attention_mask = _split_into_blocks(attention_mask, block_len, dim=1) # [batch_size, num_block, 3 * block_len]", "(block_length, 3 * block_length) bidirectional=(not self.is_decoder), num_buckets=self.relative_attention_num_buckets, max_distance=self.relative_attention_max_distance, ) # (block_length, 3 *", "Square Layer Normalization https://arxiv.org/abs/1910.07467 thus varience is calculated # w/o mean and there", "shape(self.k(hidden_states)) value_states = shape(self.v(hidden_states)) # Get global/side key/value states shape: (batch_size, global_seq_len, n_heads,", "@staticmethod def _relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128): \"\"\" Adapted from Mesh Tensorflow: https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593 Translate", "input_ids, \"decoder_attention_mask\": input_mask, } return dummy_inputs def _init_weights(self, module): \"\"\"Initialize the weights\"\"\" factor", "on top.\", LONGT5_START_DOCSTRING, ) class LongT5EncoderModel(LongT5PreTrainedModel): authorized_missing_keys = [ r\"encoder.embed_tokens.weight\", ] def __init__(self,", "value_states = project( hidden_states, self.v, key_value_states, past_key_value[1] if past_key_value is not None else", "seq_len, global_seq_len) side_relative_position = _make_side_relative_position_ids(mask, self.global_block_size) side_relative_position_bucket = self._relative_position_bucket( side_relative_position, bidirectional=(not self.is_decoder), num_buckets=self.relative_attention_num_buckets,", "- the first layer store them # layer_outputs = hidden-states, key-value-states (self-attention position", "raise ValueError( f\"There should be {expected_num_past_key_values} past states. \" f\"{'2 (past / key)", "hasattr(module.wo, \"bias\") and module.wo.bias is not None: module.wo.bias.data.zero_() elif isinstance(module, (LongT5Attention, LongT5LocalAttention, LongT5TransientGlobalAttention)):", "self.relative_attention_bias(relative_position_bucket) # (1, 1, num_heads, block_length, 3 * block_length) values = values.permute([2, 0,", "decoder_head_mask = head_mask # Encode if needed (training, first prediction pass) if encoder_outputs", "self.v, key_value_states, past_key_value[1] if past_key_value is not None else None ) # compute", "not None: attn_weights = attn_weights * layer_head_mask attn_weights = attn_weights.type(value_states.dtype) attn_output = unshape(torch.einsum(\"...hqk,...khd->...qhd\",", "plain tuple. \"\"\" # Warning message for FutureWarning: head_mask was separated into two", "vectors than the model's internal embedding lookup matrix. If `decoder_input_ids` and `decoder_inputs_embeds` are", "self.gradient_checkpointing = False def prune_heads(self, heads): if len(heads) == 0: return heads, index", "seq_len, global_seq_len, num_heads) side_bias = self.global_relative_attention_bias(side_relative_position_bucket) # (batch_size, num_heads, seq_len, global_seq_len) side_bias =", "if key_value_states is None: # self-attn # (batch_size, n_heads, seq_length, dim_per_head) hidden_states =", "KIND, either express or implied. # See the License for the specific language", "< max_exact # The other half of the buckets are for logarithmically bigger", "Contains precomputed key and value hidden states of the attention blocks. Can be", "value of `inputs_embeds`. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value", "LongT5ForConditionalGeneration.from_pretrained( ... \"Stancld/longt5-tglobal-large-16384-pubmed-3k_steps\" ... ) >>> # Let's try a very long input.", "- 0 indicates the head is **masked**. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length,", "-1, self.n_heads, self.key_value_proj_dim) def unshape(states): \"\"\"reshape\"\"\" return states.contiguous().view(batch_size, -1, self.inner_dim) # get query/key/value", "(layer_outputs[5],) hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.dropout(hidden_states) # Add last layer if output_hidden_states:", "if v is not None ) return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=present_key_value_states, hidden_states=all_hidden_states, attentions=all_attentions, cross_attentions=all_cross_attentions,", "\"past_key_values\": past, \"encoder_outputs\": encoder_outputs, \"attention_mask\": attention_mask, \"head_mask\": head_mask, \"decoder_head_mask\": decoder_head_mask, \"cross_attn_head_mask\": cross_attn_head_mask, \"use_cache\":", "- 1 indicates the head is **not masked**, - 0 indicates the head", "+ self.dropout(forwarded_states) return hidden_states # Copied from transformers.models.t5.modeling_t5.T5Attention with T5->LongT5 class LongT5Attention(nn.Module): def", "if use_cache: use_cache = False def create_custom_forward(module): def custom_forward(*inputs): return tuple(module(*inputs, use_cache, output_attentions))", "== 0: return heads, index = find_pruneable_heads_and_indices( heads, self.n_heads, self.key_value_proj_dim, self.pruned_heads ) #", "config: LongT5Config): super().__init__() self.wi = nn.Linear(config.d_model, config.d_ff, bias=False) self.wo = nn.Linear(config.d_ff, config.d_model, bias=False)", "output_attentions=False, return_dict=True, ): if past_key_value is not None: if not self.is_decoder: logger.warning(\"`past_key_values` is", "relative_buckets def compute_bias(self, query_length, key_length, device=None): \"\"\"Compute binned relative position bias\"\"\" if device", "model(input_ids=input_ids, decoder_input_ids=decoder_input_ids) >>> last_hidden_states = outputs.last_hidden_state ```\"\"\" use_cache = use_cache if use_cache is", "None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else", "module): \"\"\"Initialize the weights\"\"\" factor = self.config.initializer_factor # Used for testing weights initialization", "if hasattr(module.wi_1, \"bias\") and module.wi_1.bias is not None: module.wi_1.bias.data.zero_() module.wo.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_ff)", "torch.abs(relative_position) else: relative_position = -torch.min(relative_position, torch.zeros_like(relative_position)) # now relative_position is in the range", "logging, replace_return_docstrings, ) from .configuration_longt5 import LongT5Config logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = \"LongT5Config\"", "= torch.cat([value_states, side_value_states], dim=2) # Compute scores -> (batch_size, num_block, n_heads, block_len, 3", "def unshape(states): \"\"\"reshape\"\"\" return states.contiguous().view(batch_size, -1, self.inner_dim) # get query/key/value states -> (batch_size,", "nn.functional.pad(x, pad=pad, mode=\"constant\", value=pad_value) blocks_list: List[torch.Tensor] = [] for i in range(3): #", "2 relative_buckets += (relative_position > 0).to(torch.long) * num_buckets relative_position = torch.abs(relative_position) else: relative_position", "// global_block_size # [batch_size, seq_len // global_block_size] if num_globals > 0: _sequence_block_ids_max =", "[LONGT5 Training](./longt5#training). attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing", "position_bias attn_weights = nn.functional.softmax(scores.float(), dim=-1).type_as( scores ) # (batch_size, n_heads, seq_length, key_length) attn_weights", "= unshape(torch.matmul(attn_weights, value_states)) # (batch_size, seq_length, dim) attn_output = self.o(attn_output) present_key_value_state = (key_states,", "attentions are used if self.is_decoder: extended_attention_mask = self.get_extended_attention_mask( attention_mask, input_shape, inputs_embeds.device ) elif", "super().__init__(config) self.shared = nn.Embedding(config.vocab_size, config.d_model) encoder_config = copy.deepcopy(config) encoder_config.use_cache = False encoder_config.is_encoder_decoder =", "4 else ''}\" f\"Got {len(past_key_value)} past key / value states\" ) self_attn_past_key_value =", "r\"encoder.embed_tokens.weight\", r\"decoder.embed_tokens.weight\", r\"lm_head.weight\", ] _keys_to_ignore_on_load_unexpected = [ r\"decoder.block.0.layer.1.EncDecAttention.relative_attention_bias.weight\", ] def __init__(self, config: LongT5Config):", "(batch..., seq_len, global_seq_len)) block_ids = block_ids.where( block_ids >= 0, torch.tensor(global_seq_len, dtype=block_ids.dtype, device=block_ids.device) )", "Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded", "shape(self.k(hidden_states)) value_states = shape(self.v(hidden_states)) # Split into blocks -> (batch_size, num_blocks, block_len, n_heads,", "of incompatibility with ONNX conversion if 0 in output_shape: return torch.empty(output_shape, dtype=x.dtype, device=x.device)", "to the right if is_torch_fx_proxy(input_ids): # Item assignment is not supported natively for", "position_bias = torch.cat([position_bias, side_position_bias], dim=-1) scores += position_bias # (batch_size, num_blocks, n_heads, block_len,", "and relative position weights attention_outputs = attention_outputs + cross_attention_outputs[2:] # Apply Feed Forward", "tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a", "have shown that owning a dog is good for you \", return_tensors=\"pt\" ...", "which is also known as Root Mean # Square Layer Normalization https://arxiv.org/abs/1910.07467 thus", "True else: position_bias = self.compute_bias(self.block_len) if mask is not None: # Replace masked", "= torch.cat([key_states, side_key_states], dim=2) value_states = torch.cat([value_states, side_value_states], dim=2) # Compute scores ->", "all_hidden_states = all_hidden_states + (hidden_states,) if self.gradient_checkpointing and self.training: if use_cache: use_cache =", "from transformers.models.t5.modeling_t5.T5DenseGatedActDense with T5->LongT5 class LongT5DenseGatedActDense(nn.Module): def __init__(self, config: LongT5Config): super().__init__() self.wi_0 =", "normed_hidden_states, mask=attention_mask, key_value_states=key_value_states, position_bias=position_bias, layer_head_mask=layer_head_mask, past_key_value=past_key_value, use_cache=use_cache, query_length=query_length, output_attentions=output_attentions, ) layer_output = hidden_states", "= self.relative_attention_bias.weight.device context_position = torch.arange(query_length, dtype=torch.long, device=device)[:, None] memory_position = torch.arange(key_length, dtype=torch.long, device=device)[None,", "None: input_shape = inputs_embeds.size()[:-1] else: err_msg_prefix = \"decoder_\" if self.is_decoder else \"\" raise", "num_blocks, 3 * block_len + global_seq_len, n_heads, dim_per_head) key_states = torch.cat([key_states, side_key_states], dim=2)", "our scenario, as we use this strategy only for a decoder, orphan tokens,", "torch.where(local_attention_mask > 0, 0.0, -1e10) else: local_attention_mask = None if position_bias is None:", "self.shared) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.shared", "last_hidden_states = outputs.last_hidden_state ```\"\"\" return_dict = return_dict if return_dict is not None else", "decoder_input_ids if past is used if past is not None: input_ids = input_ids[:,", "= values.permute([2, 0, 1]).unsqueeze(0) # shape (1, num_heads, query_length, key_length) return values def", "will be a multiple of `block_len`\"\"\" pad_len = -x.shape[dim] % block_len # Handle", "* 1.0) elif isinstance(module, (LongT5Model, LongT5ForConditionalGeneration, LongT5EncoderModel)): # Mesh TensorFlow embeddings initialization #", "= self.get_head_mask(head_mask, self.config.num_layers) cross_attn_head_mask = self.get_head_mask(cross_attn_head_mask, self.config.num_layers) present_key_value_states = () if use_cache else", ") elif return_dict and not isinstance(encoder_outputs, BaseModelOutput): encoder_outputs = BaseModelOutput( last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if", "dog is good for you \", return_tensors=\"pt\" ... ).input_ids # Batch size 1", "side_key_states], dim=2) value_states = torch.cat([value_states, side_value_states], dim=2) # Compute scores -> (batch_size, num_block,", "config.is_decoder self.has_relative_attention_bias = has_relative_attention_bias self.relative_attention_num_buckets = config.relative_attention_num_buckets self.relative_attention_max_distance = config.relative_attention_max_distance self.d_model = config.d_model", "if present_key_value_state is not None: present_key_value_state = present_key_value_state + cross_attention_outputs[1] # Keep cross-attention", "self.has_relative_attention_bias = has_relative_attention_bias self.relative_attention_num_buckets = config.relative_attention_num_buckets self.relative_attention_max_distance = config.relative_attention_max_distance self.d_model = config.d_model self.key_value_proj_dim", "- 1]`. All labels set to `-100` are ignored (masked), the loss is", "prepare_inputs_for_generation( self, input_ids, past=None, attention_mask=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, use_cache=None, encoder_outputs=None, **kwargs ): #", "+ (present_key_value_state,) if output_attentions: all_attentions = all_attentions + (layer_outputs[3],) if self.is_decoder: all_cross_attentions =", "attentions if we output them return outputs # Copied from transformers.models.t5.modeling_t5.T5LayerCrossAttention with T5->LongT5", "cross_attn_head_mask = self.get_head_mask(cross_attn_head_mask, self.config.num_layers) present_key_value_states = () if use_cache else None all_hidden_states =", "key_length = real_seq_length if key_value_states is None else key_value_states.shape[1] def shape(states): \"\"\"projection\"\"\" return", "std=factor * ((self.config.d_model) ** -0.5)) if hasattr(module.wi, \"bias\") and module.wi.bias is not None:", "(batch_size, seq_length, dim) attn_output = self.o(attn_output) present_key_value_state = (key_states, value_states) if (self.is_decoder and", "self.has_relative_attention_bias: self.relative_attention_bias = nn.Embedding(self.relative_attention_num_buckets, self.n_heads) self.pruned_heads = set() self.gradient_checkpointing = False def prune_heads(self,", "global_seq_len) side_relative_position = _make_side_relative_position_ids(mask, self.global_block_size) side_relative_position_bucket = self._relative_position_bucket( side_relative_position, bidirectional=(not self.is_decoder), num_buckets=self.relative_attention_num_buckets, max_distance=self.relative_attention_max_distance,", "# (batch_size, 1, n_heads, block_len, 3 * block_len) position_bias = position_bias + local_attention_mask.transpose(1,", "of heads to prune in this layer} See base class PreTrainedModel \"\"\" for", "\"local\": extended_attention_mask = _get_local_attention_mask(attention_mask, self.block_len, inputs_embeds.device) else: # we need to use both", "__init__(self, config, has_relative_attention_bias=False): super().__init__() self.TransientGlobalSelfAttention = LongT5TransientGlobalAttention( config, has_relative_attention_bias=has_relative_attention_bias ) self.layer_norm = LongT5LayerNorm(config.d_model,", "= None, decoder_inputs_embeds: Optional[torch.Tensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] =", "# add attentions if we output them return outputs class LongT5LayerTransientGlobalSelfAttention(nn.Module): \"\"\"Transient-Global self", "LongT5Model.from_pretrained(\"google/long-t5-local-base\") >>> # Let's try a very long encoder input. >>> input_ids =", "() if (output_attentions and self.is_decoder) else None position_bias = None encoder_decoder_position_bias = None", "Tuple[torch.Tensor, torch.Tensor]: \"\"\"Obtain the \"fixed block\" global id corresponding to each input token.", "tokens farther than ``local_radius.\"\"\" relative_position_ids = _make_3block_relative_position_ids(block_len) locality_mask = torch.abs(relative_position_ids) < block_len locality_mask", "return self.encoder def get_decoder(self): return self.decoder def _prune_heads(self, heads_to_prune): \"\"\" Prunes heads of", "decoder_input_ids=decoder_input_ids) >>> last_hidden_states = outputs.last_hidden_state ```\"\"\" use_cache = use_cache if use_cache is not", "general usage and behavior. Parameters: config ([`LongT5Config`]): Model configuration class with all the", "you\", return_tensors=\"pt\" ... ).input_ids # Batch size 1 >>> decoder_input_ids = tokenizer(\"Studies show", "self.dropout(hidden_states) # Add last layer if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if", "\"You have to initialize the model with valid token embeddings\" inputs_embeds = self.embed_tokens(input_ids)", "*optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[-100,", "num_heads, block_len, 3 * block_len + global_seq_len) position_bias = torch.cat([position_bias, side_position_bias], dim=-1) scores", "= LongT5Config base_model_prefix = \"transformer\" supports_gradient_checkpointing = True @property # Copied from transformers.models.t5.modeling_t5.T5PreTrainedModel.dummy_inputs", "key_value_proj_dim) ** -0.5)) if module.has_relative_attention_bias: module.relative_attention_bias.weight.data.normal_(mean=0.0, std=factor * ((d_model) ** -0.5)) if isinstance(module,", "None, None self_attention_outputs = self.layer[0]( hidden_states, attention_mask=attention_mask, position_bias=position_bias, layer_head_mask=layer_head_mask, past_key_value=self_attn_past_key_value, use_cache=use_cache, output_attentions=output_attentions, )", "heads of the self-attention modules in the encoder. Mask values selected in `[0,", "of `block_len`\"\"\" pad_len = -x.shape[dim] % block_len # Handle cases when an empty", ") if self.gradient_checkpointing and self.training: position_bias.requires_grad = True else: position_bias = self.compute_bias(self.block_len) if", "batch_size, seq_length = input_shape # required mask seq length can be calculated via", "(relative_position > 0).to(torch.long) * num_buckets relative_position = torch.abs(relative_position) else: relative_position = -torch.min(relative_position, torch.zeros_like(relative_position))", "input_ids[..., :-1]], dim=-1) else: shifted_input_ids = input_ids.new_zeros(input_ids.shape) shifted_input_ids[..., 1:] = input_ids[..., :-1].clone() shifted_input_ids[...,", "self.act(self.wi_0(hidden_states)) hidden_linear = self.wi_1(hidden_states) hidden_states = hidden_gelu * hidden_linear hidden_states = self.dropout(hidden_states) hidden_states", "index = find_pruneable_heads_and_indices( heads, self.n_heads, self.key_value_proj_dim, self.pruned_heads ) # Prune linear layers self.q", "the model's internal embedding lookup matrix. decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`,", "block_ids[..., None] return side_relative_position.type(torch.int64) def _create_global_aggregates( hidden_states: torch.Tensor, block_ids: torch.Tensor, global_seq_len: int )", "= self.config.pad_token_id assert decoder_start_token_id is not None, ( \"self.model.config.decoder_start_token_id has to be defined.", "def __init__(self, config, embed_tokens=None): super().__init__(config) self.embed_tokens = embed_tokens self.is_decoder = config.is_decoder self.local_radius =", "torch.nn import CrossEntropyLoss from torch.utils.checkpoint import checkpoint from ...activations import ACT2FN from ...modeling_outputs", "Feed Forward layer hidden_states = self.layer[-1](hidden_states) outputs = (hidden_states,) if use_cache: outputs =", "output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = decoder_outputs[0] if self.config.tie_word_embeddings: # Rescale output before", "1.0) elif isinstance(module, LongT5DenseActDense): # Mesh TensorFlow FF initialization # See https://github.com/tensorflow/mesh/blob/master/mesh_tensorflow/transformer/transformer_layers.py#L56 #", "all_cross_attentions + (layer_outputs[5],) hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.dropout(hidden_states) # Add last layer", "f\"past_key_value should have 2 past states: keys and values. Got { len(past_key_value)} past", "block_len + global_seq_len) position_bias = torch.cat([position_bias, side_position_bias], dim=-1) scores += position_bias # (batch_size,", "= project( hidden_states, self.v, key_value_states, past_key_value[1] if past_key_value is not None else None", "raw hidden-states without any specific head on top.\", LONGT5_START_DOCSTRING, ) class LongT5Model(LongT5PreTrainedModel): _keys_to_ignore_on_load_missing", "as the starting token for `decoder_input_ids` generation. If `past_key_values` is used, optionally only", "is **masked**. encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*): Tuple consists of (`last_hidden_state`, `optional`: *hidden_states*, `optional`: *attentions*)", "input_ids.new_zeros(input_ids.shape) shifted_input_ids[..., 1:] = input_ids[..., :-1].clone() shifted_input_ids[..., 0] = decoder_start_token_id assert pad_token_id is", "torch.where(side_attention_mask > 0, 0.0, -1e10) # (batch_size, seq_len, global_seq_len) side_relative_position = _make_side_relative_position_ids(mask, self.global_block_size)", "bucket. This should allow for more graceful generalization to longer sequences than the", "pad the inputs on both the right and the left. Indices can be", "This implementation is a simlified version of the original Flaxformr implementation adopted from:", "def set_input_embeddings(self, new_embeddings): self.embed_tokens = new_embeddings def forward( self, input_ids=None, attention_mask=None, encoder_hidden_states=None, encoder_attention_mask=None,", "else 4 if len(past_key_value) != expected_num_past_key_values: raise ValueError( f\"There should be {expected_num_past_key_values} past", "nn.Linear(self.d_model, self.inner_dim, bias=False) self.v = nn.Linear(self.d_model, self.inner_dim, bias=False) self.o = nn.Linear(self.inner_dim, self.d_model, bias=False)", "__init__(self, config: LongT5Config): super().__init__() self.wi_0 = nn.Linear(config.d_model, config.d_ff, bias=False) self.wi_1 = nn.Linear(config.d_model, config.d_ff,", "LongT5ForConditionalGeneration(LongT5PreTrainedModel): _keys_to_ignore_on_load_missing = [ r\"encoder.embed_tokens.weight\", r\"decoder.embed_tokens.weight\", r\"lm_head.weight\", ] _keys_to_ignore_on_load_unexpected = [ r\"decoder.block.0.layer.1.EncDecAttention.relative_attention_bias.weight\", ]", "return_tensors=\"pt\" ... ).input_ids # Batch size 1 >>> outputs = model(input_ids=input_ids) >>> last_hidden_states", "block_len, 3 * block_len] local_attention_mask = torch.logical_and(_blocked_attention_mask, _3blocked_attention_mask) local_attention_mask = _mask_local_attention_mask(local_attention_mask, block_len) #", "self.has_relative_attention_bias: self.relative_attention_bias = nn.Embedding(self.relative_attention_num_buckets, self.n_heads) self.pruned_heads = set() self.gradient_checkpointing = False # Relativen", ") relative_buckets += torch.where(is_small, relative_position, relative_position_if_large) return relative_buckets def compute_bias(self, block_length: int): \"\"\"Compute", "None ) # compute scores scores = torch.matmul( query_states, key_states.transpose(3, 2) ) #", "self.dropout(inputs_embeds) for i, (layer_module, past_key_value) in enumerate(zip(self.block, past_key_values)): layer_head_mask = head_mask[i] cross_attn_layer_head_mask =", "\"studies have shown that owning a dog is good for you \", return_tensors=\"pt\"", "See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/attention.py#L136 d_model = self.config.d_model key_value_proj_dim = self.config.d_kv n_heads = self.config.num_heads module.q.weight.data.normal_(mean=0.0, std=factor", "def get_input_embeddings(self): return self.shared def set_input_embeddings(self, new_embeddings): self.shared = new_embeddings self.encoder.set_input_embeddings(new_embeddings) def get_encoder(self):", ">>> from transformers import T5Tokenizer, LongT5Model >>> tokenizer = T5Tokenizer.from_pretrained(\"google/long-t5-local-base\") >>> model =", "encoder. Please make sure this is intended.\") expected_num_past_key_values = 2 if encoder_hidden_states is", "torch.abs(relative_position_ids) < block_len locality_mask = locality_mask[None, None, :, :] locality_mask = locality_mask.to(local_attention_mask.device) return", "by applicable law or agreed to in writing, software # distributed under the", "None: query_length = present_key_value_state[0].shape[2] else: query_length = None cross_attention_outputs = self.layer[1]( hidden_states, key_value_states=encoder_hidden_states,", "> _global_block_ids_lower_bound, global_block_ids, _global_block_ids_lower_bound ) # set padding tokens to -1 global_block_ids =", "input_ids[:, -1:] return { \"decoder_input_ids\": input_ids, \"past_key_values\": past, \"encoder_outputs\": encoder_outputs, \"attention_mask\": attention_mask, \"head_mask\":", "hidden states correctly to key/query states\"\"\" if key_value_states is None: # self-attn #", "module.wi.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5)) if hasattr(module.wi, \"bias\") and module.wi.bias is not", "and [`PreTrainedTokenizer.__call__`] for detail. To know more on how to prepare `input_ids` for", "# and https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L89 module.wi.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5)) if hasattr(module.wi, \"bias\") and", "): \"\"\" Self-attention (if key_value_states is None) or attention over source sentence (provided", "pre-trained in a text-to-text denoising generative setting. LongT5 model is an extension of", "module.gradient_checkpointing = value # Copied from transformers.models.t5.modeling_t5.T5PreTrainedModel._shift_right with T5->LongT5 def _shift_right(self, input_ids): decoder_start_token_id", "= memory_position[block_length:-block_length] # (block_length, 3 * block_length) relative_position = memory_position[None, :] - context_position[:,", "Copyright 2022 Google LLC., LongT5 Authors and HuggingFace Inc. team. # # Licensed", "Batch size 1 >>> outputs = model(input_ids=input_ids) >>> last_hidden_states = outputs.last_hidden_state ```\"\"\" return_dict", "-0.5)) if hasattr(module.wi_0, \"bias\") and module.wi_0.bias is not None: module.wi_0.bias.data.zero_() module.wi_1.weight.data.normal_(mean=0.0, std=factor *", "left. Indices can be obtained using [`T5Tokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for detail.", "not a multiple of `block_len`, it will be padded first with selected `pad_value`.", "layer_head_mask attn_weights = attn_weights.type(value_states.dtype) attn_output = unshape(torch.einsum(\"...hqk,...khd->...qhd\", attn_weights, value_states)) attn_output = attn_output[:, :seq_length,", "position_ids.unsqueeze(0) - center_position_ids.unsqueeze(1) return relative_position_ids def _mask_local_attention_mask(local_attention_mask: torch.Tensor, block_len: int) -> torch.Tensor: \"\"\"Mask", "See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L1624 module.shared.weight.data.normal_(mean=0.0, std=factor * 1.0) elif isinstance(module, LongT5DenseActDense): # Mesh TensorFlow FF", "from transformers import AutoTokenizer, LongT5ForConditionalGeneration >>> tokenizer = AutoTokenizer.from_pretrained(\"google/long-t5-local-base\") >>> model = LongT5EncoderModel.from_pretrained(\"google/long-t5-local-base\")", "_get_local_attention_mask(mask, self.block_len, hidden_states.device) # Replace masked positions with -10_000 (according to the original", "scores = torch.matmul( query_states, key_states.transpose(3, 2) ) # equivalent of torch.einsum(\"bnqd,bnkd->bnqk\", query_states, key_states),", "Combine self attn and cross attn key value states if present_key_value_state is not", "(encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: encoder_attention_mask = torch.ones(encoder_hidden_shape, device=inputs_embeds.device) encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)", "attention_mask=None, position_bias=None, layer_head_mask=None, past_key_value=None, use_cache=False, query_length=None, output_attentions=False, ): normed_hidden_states = self.layer_norm(hidden_states) attention_output =", "+ global_seq_len) position_bias = torch.cat([position_bias, side_position_bias], dim=-1) scores += position_bias # (batch_size, num_blocks,", "present_key_value_state = None outputs = (attn_output,) + (present_key_value_state,) + (position_bias,) if output_attentions: outputs", "\", return_tensors=\"pt\" ... ).input_ids # Batch size 1 >>> outputs = model.generate(input_ids) >>>", "= real_seq_length if key_value_states is None else key_value_states.shape[1] def shape(states): \"\"\"projection\"\"\" return states.view(batch_size,", "_sequence_block_ids_max = torch.max(global_block_ids, dim=-1).values.repeat(num_globals, 1).transpose(0, 1) else: _sequence_block_ids_max = torch.zeros( batch_size, 0, dtype=global_block_ids.dtype,", "is not None else None ) value_states = project( hidden_states, self.v, key_value_states, past_key_value[1]", "`decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):", "query_states, key_states.transpose(3, 2) ) # equivalent of torch.einsum(\"bnqd,bnkd->bnqk\", query_states, key_states), compatible with onnx", "key_states = shape(self.k(hidden_states)) value_states = shape(self.v(hidden_states)) # Split into blocks -> (batch_size, num_blocks,", "self.inner_dim) # Prepare components for transient-global attention # Obtain block_ids and global_segment_ids #", "num_blocks, 3 * block_len, ...] return torch.cat(blocks_list, dim=sequence_dim) def _make_3block_relative_position_ids(block_len: int) -> torch.Tensor:", "cross attentions are used if self.is_decoder: extended_attention_mask = self.get_extended_attention_mask( attention_mask, input_shape, inputs_embeds.device )", "is not None: if key_value_states is None: # self-attn # (batch_size, n_heads, key_length,", "= (global_block_ids * attention_mask) + (attention_mask - 1) # [batch_size, seq_len] global_block_ids =", "None position_bias = None encoder_decoder_position_bias = None hidden_states = self.dropout(inputs_embeds) for i, (layer_module,", "seq_length, n_heads, dim_per_head) query_states = shape(self.q(hidden_states)) key_states = shape(self.k(hidden_states)) value_states = shape(self.v(hidden_states)) #", "style. No bias and no subtraction of mean. \"\"\" super().__init__() self.weight = nn.Parameter(torch.ones(hidden_size))", "shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id) assert torch.all(shifted_input_ids >= 0).item(), \"Verify that `shifted_input_ids` has only", "std=factor * ((d_model * key_value_proj_dim) ** -0.5)) module.k.weight.data.normal_(mean=0.0, std=factor * (d_model**-0.5)) module.v.weight.data.normal_(mean=0.0, std=factor", "hidden_states, self.k, key_value_states, past_key_value[0] if past_key_value is not None else None ) value_states", "an integer Returns: a Tensor with the same shape as relative_position, containing int32", "matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of", "weights initialization if isinstance(module, LongT5LayerNorm): module.weight.data.fill_(factor * 1.0) elif isinstance(module, (LongT5Model, LongT5ForConditionalGeneration, LongT5EncoderModel)):", "= tokenizer( ... 100 * \"Studies have been shown that owning a dog", "a dog is good for you \", return_tensors=\"pt\" ... ).input_ids # Batch size", "into associated vectors than the model's internal embedding lookup matrix. If `decoder_input_ids` and", "may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "relative position is defined as memory_position - query_position, i.e. the distance in tokens", "head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, use_cache=None, encoder_outputs=None, **kwargs ): # cut decoder_input_ids if past is", "as a regular PyTorch Module and refer to the PyTorch documentation for all", "base_model_prefix = \"transformer\" supports_gradient_checkpointing = True @property # Copied from transformers.models.t5.modeling_t5.T5PreTrainedModel.dummy_inputs def dummy_inputs(self):", "__init__(self, config, has_relative_attention_bias=False): super().__init__() self.LocalSelfAttention = LongT5LocalAttention(config, has_relative_attention_bias=has_relative_attention_bias) self.layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout", "encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, ) @add_start_docstrings(\"\"\"LONGT5 Model with a `language modeling` head on top.\"\"\",", "Tensor with the same shape as relative_position, containing int32 values in the range", "weights and apply final processing self.post_init() def get_input_embeddings(self): return self.shared def set_input_embeddings(self, new_embeddings):", "super().__init__() self.wi_0 = nn.Linear(config.d_model, config.d_ff, bias=False) self.wi_1 = nn.Linear(config.d_model, config.d_ff, bias=False) self.wo =", "torch.cat([key_states, side_key_states], dim=2) value_states = torch.cat([value_states, side_value_states], dim=2) # Compute scores -> (batch_size,", "return a [`~utils.ModelOutput`] instead of a plain tuple. \"\"\" LONGT5_ENCODER_INPUTS_DOCSTRING = r\"\"\" Args:", "deprecated and will be removed in future versions. If you do not want", "want to make sure that the accumulation for # half-precision inputs is done", "return_dict=None, ): use_cache = use_cache if use_cache is not None else self.config.use_cache output_attentions", "all_attentions = all_attentions + (layer_outputs[3],) if self.is_decoder: all_cross_attentions = all_cross_attentions + (layer_outputs[5],) hidden_states", "to load the model weights. \"\"\" LONGT5_INPUTS_DOCSTRING = r\"\"\" Args: input_ids (`torch.LongTensor` of", "that\", return_tensors=\"pt\").input_ids # Batch size 1 >>> # forward pass >>> outputs =", "use_cache: use_cache = False def create_custom_forward(module): def custom_forward(*inputs): return tuple(module(*inputs, use_cache, output_attentions)) return", "we output them return outputs class LongT5LayerTransientGlobalSelfAttention(nn.Module): \"\"\"Transient-Global self attention used in encoder\"\"\"", "None, decoder_head_mask: Optional[torch.FloatTensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, encoder_outputs: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,", "None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None,", "output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.FloatTensor], BaseModelOutput]: r\"\"\"", "uses a layer_norm which only scales and doesn't shift, which is also known", "self.key_value_proj_dim) def unshape(states): \"\"\"reshape\"\"\" return states.contiguous().view(batch_size, -1, self.inner_dim) # get query/key/value states ->", "output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is", "All labels set to `-100` are ignored (masked), the loss is only computed", "in which case we just need to make it broadcastable to all heads.", "torch.cat(blocks_list, dim=sequence_dim) def _make_3block_relative_position_ids(block_len: int) -> torch.Tensor: \"\"\"Makes 3-blocked relative position ids for", "to `True`, `past_key_values` key value states are returned and can be used to", "Rescale output before projecting on vocab # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/transformer.py#L586 sequence_output = sequence_output *", "all_attentions = () if output_attentions else None all_cross_attentions = () if (output_attentions and", "position_bias.requires_grad = True else: position_bias = self.compute_bias(real_seq_length, key_length, device=scores.device) # if key and", "] _keys_to_ignore_on_load_unexpected = [ r\"decoder.block.0.layer.1.EncDecAttention.relative_attention_bias.weight\", ] def __init__(self, config: LongT5Config): super().__init__(config) self.shared =", "extended_attention_mask = attention_mask # If a 2D or 3D attention mask is provided", "# Input is (batch_size, seq_length, dim) # Mask is (batch_size, key_length) (non-causal) or", "True @property # Copied from transformers.models.t5.modeling_t5.T5PreTrainedModel.dummy_inputs def dummy_inputs(self): input_ids = torch.tensor(DUMMY_INPUTS) input_mask =", "= hidden_states + self.dropout(attention_output[0]) outputs = (layer_output,) + attention_output[1:] # add attentions if", "\"transformer\" supports_gradient_checkpointing = True @property # Copied from transformers.models.t5.modeling_t5.T5PreTrainedModel.dummy_inputs def dummy_inputs(self): input_ids =", "] if v is not None ) return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=present_key_value_states, hidden_states=all_hidden_states, attentions=all_attentions,", "# LongT5 uses a layer_norm which only scales and doesn't shift, which is", "= attention_outputs + cross_attention_outputs[2:] # Apply Feed Forward layer hidden_states = self.layer[-1](hidden_states) outputs", "# Prepare components for transient-global attention # Obtain block_ids and global_segment_ids # global_seq_len", "`True`, `past_key_values` key value states are returned and can be used to speed", "self.model_dim = config.d_model self.shared = nn.Embedding(config.vocab_size, config.d_model) encoder_config = copy.deepcopy(config) encoder_config.is_decoder = False", ":] attn_output = self.o(attn_output) present_key_value_state = None outputs = (attn_output,) + (present_key_value_state,) +", "= block_ids.where( block_ids >= 0, torch.tensor(global_seq_len, dtype=block_ids.dtype, device=block_ids.device) ) one_hot_block_ids = nn.functional.one_hot(block_ids.type(torch.int64), global_seq_len", "for cross attention # if using past key value states. Need to inject", "0, 1]).unsqueeze(0) # shape (1, num_heads, query_length, key_length) return values def forward( self,", "torch from torch import nn from torch.nn import CrossEntropyLoss from torch.utils.checkpoint import checkpoint", "LongT5EncoderModel)): # Mesh TensorFlow embeddings initialization # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L1624 module.shared.weight.data.normal_(mean=0.0, std=factor * 1.0)", "module.weight.data.fill_(factor * 1.0) elif isinstance(module, (LongT5Model, LongT5ForConditionalGeneration, LongT5EncoderModel)): # Mesh TensorFlow embeddings initialization", "num_heads, block_length, 3 * block_length) values = values.permute([2, 0, 1]).unsqueeze(0).unsqueeze(0) return values def", "= None encoder_decoder_position_bias = None hidden_states = self.dropout(inputs_embeds) for i, (layer_module, past_key_value) in", "layer_past_state.index_select(0, beam_idx.to(layer_past_state.device)), ) assert reordered_layer_past_states[0].shape == layer_past_states[0].shape assert len(reordered_layer_past_states) == len(layer_past_states) reordered_decoder_past =", "LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) def forward(self, hidden_states): forwarded_states = self.layer_norm(hidden_states) forwarded_states =", "if self.gradient_checkpointing and self.training: position_bias.requires_grad = True else: position_bias = self.compute_bias(self.block_len) if local_attention_mask", "key_value_states=key_value_states, position_bias=position_bias, layer_head_mask=layer_head_mask, past_key_value=past_key_value, use_cache=use_cache, query_length=query_length, output_attentions=output_attentions, ) layer_output = hidden_states + self.dropout(attention_output[0])", "local attention mask and standard extended mask for transient-global attention extended_attention_mask = attention_mask", "real_seq_length, key_length), device=scores.device, dtype=scores.dtype ) if self.gradient_checkpointing and self.training: position_bias.requires_grad = True else:", ") # (batch_size, num_block, n_heads, block_len, 3 * block_len) if position_bias is None:", "forward(self, hidden_states): forwarded_states = self.layer_norm(hidden_states) forwarded_states = self.DenseReluDense(forwarded_states) hidden_states = hidden_states + self.dropout(forwarded_states)", "values = self.relative_attention_bias(relative_position_bucket) # (1, 1, num_heads, block_length, 3 * block_length) values =", "self.key_value_proj_dim # Mesh TensorFlow initialization to avoid scaling before softmax self.q = nn.Linear(self.d_model,", "torch.Tensor: \"\"\"Split an input tensor into blocks of a given `block_len` along the", "weights\"\"\" factor = self.config.initializer_factor # Used for testing weights initialization if isinstance(module, LongT5LayerNorm):", "LongT5DenseActDense(config) self.layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) def forward(self, hidden_states): forwarded_states =", "num_buckets=self.relative_attention_num_buckets, max_distance=self.relative_attention_max_distance, ) # (block_length, 3 * block_length, num_heads) values = self.relative_attention_bias(relative_position_bucket) #", "file does not load the weights associated with the model, only the configuration.", "attention_outputs + cross_attention_outputs[2:] # Apply Feed Forward layer hidden_states = self.layer[-1](hidden_states) outputs =", "input args - head_mask, decoder_head_mask __HEAD_MASK_WARNING_MSG = \"\"\" The input argument `head_mask` was", "= self.relative_attention_bias(relative_position_bucket) # shape (query_length, key_length, num_heads) values = values.permute([2, 0, 1]).unsqueeze(0) #", "hidden_gelu * hidden_linear hidden_states = self.dropout(hidden_states) hidden_states = self.wo(hidden_states) return hidden_states # Copied", "self.is_decoder), num_buckets=self.relative_attention_num_buckets, max_distance=self.relative_attention_max_distance, ) values = self.relative_attention_bias(relative_position_bucket) # shape (query_length, key_length, num_heads) values", "assignment is not supported natively for proxies. shifted_input_ids = torch.full(input_ids.shape[:-1] + (1,), decoder_start_token_id)", "in `[-100, 0, ..., config.vocab_size - 1]`. All labels set to `-100` are", "sequence_dim=2) # Compute scores scores = torch.einsum( \"...qhd,...khd->...hqk\", query_states, key_states ) # (batch_size,", "+ (position_bias,) if output_attentions: outputs = outputs + (attn_weights,) return outputs class LongT5LocalAttention(nn.Module):", "returned and can be used to speed up decoding (see `past_key_values`). output_attentions (`bool`,", "+ (num_blocks, block_len) + x.shape[(dim + 1) :] # If 0 is in", "+ self.dropout(attention_output[0]) outputs = (layer_output,) + attention_output[1:] # add attentions if we output", "None if labels is not None: loss_fct = CrossEntropyLoss(ignore_index=-100) loss = loss_fct(lm_logits.view(-1, lm_logits.size(-1)),", "more information, see: https://arxiv.org/pdf/2112.07916.pdf. \"\"\" num_blocks = x.shape[block_dim] pad = [(0, 0)] *", "torch.tensor(global_seq_len, dtype=block_ids.dtype, device=block_ids.device) ) one_hot_block_ids = nn.functional.one_hot(block_ids.type(torch.int64), global_seq_len + 1)[:, :, :-1] return", "] def __init__(self, config: LongT5Config): super().__init__(config) self.model_dim = config.d_model self.shared = nn.Embedding(config.vocab_size, config.d_model)", "{err_msg_prefix}input_ids or {err_msg_prefix}inputs_embeds\") if inputs_embeds is None: assert self.embed_tokens is not None, \"You", "block_length: int): \"\"\"Compute binned relative position bias\"\"\" memory_position = torch.arange( 3 * block_length,", "fp32 variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True) hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) #", "return dummy_inputs def _init_weights(self, module): \"\"\"Initialize the weights\"\"\" factor = self.config.initializer_factor # Used", "relative attention. The relative position is defined as memory_position - query_position, i.e. the", "tuple. \"\"\" LONGT5_ENCODER_INPUTS_DOCSTRING = r\"\"\" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices", "try: from apex.normalization import FusedRMSNorm LongT5LayerNorm = FusedRMSNorm # noqa logger.info(\"Discovered apex.normalization.FusedRMSNorm -", "ACT2FN[config.dense_act_fn] def forward(self, hidden_states): hidden_states = self.wi(hidden_states) hidden_states = self.act(hidden_states) hidden_states = self.dropout(hidden_states)", "model with valid token embeddings\" inputs_embeds = self.embed_tokens(input_ids) batch_size, seq_length = input_shape #", "from transformers import T5Tokenizer, LongT5Model >>> tokenizer = T5Tokenizer.from_pretrained(\"google/long-t5-local-base\") >>> model = LongT5Model.from_pretrained(\"google/long-t5-local-base\")", "= input_ids.view(-1, input_shape[-1]) elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: err_msg_prefix", "values = values.permute([2, 0, 1]).unsqueeze(0).unsqueeze(0) return values def forward( self, hidden_states, mask=None, position_bias=None,", "_3blocked_attention_mask = _3blocked_attention_mask.unsqueeze(-2) # [batch_size, num_block, block_len, 3 * block_len] local_attention_mask = torch.logical_and(_blocked_attention_mask,", "return_dict: output = (lm_logits,) + decoder_outputs[1:] + encoder_outputs return ((loss,) + output) if", "attention_mask=attention_mask, inputs_embeds=inputs_embeds, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):", "= position_bias + local_attention_mask.transpose(1, 2) position_bias = position_bias.type(scores.dtype) # Calculate global/side bias -", "config: LongT5Config): super().__init__(config) self.model_dim = config.d_model self.shared = nn.Embedding(config.vocab_size, config.d_model) encoder_config = copy.deepcopy(config)", "the original sequence are represented by -1. \"\"\" batch_size, seq_len = attention_mask.shape[:2] def", "indices[block_dim] = slice(i, i + num_blocks) indices = tuple(indices) blocks_list.append(x[indices]) # [batch_size, num_blocks,", "self.lm_head(sequence_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss(ignore_index=-100) loss", "Indices should be in `[-100, 0, ..., config.vocab_size - 1]`. All labels set", "values. Got { len(past_key_value)} past states\" real_seq_length += past_key_value[0].shape[2] if query_length is None", "None: # We need to adjust position bias shape to be sum with", "full_blocks, block_ids, full_blocks) return block_ids fixed_block_mask = torch.ones_like(attention_mask, device=attention_mask.device) / global_block_size fixed_block_mask =", "= (1, 1) pad = sum(pad[::-1], ()) # [batch_size, num_blocks, block_len] -> [batch_size,", "# convert into half-precision if necessary if self.weight.dtype in [torch.float16, torch.bfloat16]: hidden_states =", "= model.generate(input_ids) >>> print(tokenizer.decode(outputs[0], skip_special_tokens=True)) abstractthe aim of this article is to summarize", "model (such as downloading or saving, resizing the input embeddings, pruning heads etc.)", "`input_ids` for pretraining take a look a [LONGT5 Training](./longt5#training). attention_mask (`torch.FloatTensor` of shape", "**masked**. [What are attention masks?](../glossary#attention-mask) decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Indices", "`decoder_input_ids`. Causal mask will also be used by default. head_mask (`torch.FloatTensor` of shape", "0 in output_shape: return torch.empty(output_shape, dtype=x.dtype, device=x.device) return x.reshape(output_shape) def _concatenate_3_blocks(x: torch.Tensor, block_dim:", "(batch_size, n_heads, seq_length, dim_per_head) # get key/value states key_states = project( hidden_states, self.k,", "True decoder_config.is_encoder_decoder = False decoder_config.num_layers = config.num_decoder_layers self.decoder = LongT5Stack(decoder_config, self.shared) self.lm_head =", "# hidden-states, present_key_value_states, (self-attention position bias), (self-attention weights), (cross-attention position bias), (cross-attention weights)", "None else 4 if len(past_key_value) != expected_num_past_key_values: raise ValueError( f\"There should be {expected_num_past_key_values}", "class LongT5DenseActDense(nn.Module): def __init__(self, config: LongT5Config): super().__init__() self.wi = nn.Linear(config.d_model, config.d_ff, bias=False) self.wo", "real_seq_length += past_key_value[0].shape[2] if query_length is None else query_length key_length = real_seq_length if", "global_seq_len, num_heads) side_bias = self.global_relative_attention_bias(side_relative_position_bucket) # (batch_size, num_heads, seq_len, global_seq_len) side_bias = side_bias.permute([0,", "input_mask = torch.tensor(DUMMY_MASK) dummy_inputs = { \"decoder_input_ids\": input_ids, \"input_ids\": input_ids, \"decoder_attention_mask\": input_mask, }", "if layer_head_mask is not None: attn_weights = attn_weights * layer_head_mask attn_output = unshape(torch.matmul(attn_weights,", "self.EncDecAttention( normed_hidden_states, mask=attention_mask, key_value_states=key_value_states, position_bias=position_bias, layer_head_mask=layer_head_mask, past_key_value=past_key_value, use_cache=use_cache, query_length=query_length, output_attentions=output_attentions, ) layer_output =", "tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used", "position_bias = self.compute_bias(self.block_len) if mask is not None: # Replace masked positions with", "input. >>> input_ids = tokenizer( ... \"summarize: \" + 100 * \"studies have", "inputs_embeds=decoder_inputs_embeds, past_key_values=past_key_values, encoder_hidden_states=hidden_states, encoder_attention_mask=attention_mask, head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) if not", "{err_msg_prefix}inputs_embeds\") if inputs_embeds is None: assert self.embed_tokens is not None, \"You have to", "_concatenate_3_blocks(x: torch.Tensor, block_dim: int, sequence_dim: int, pad_value: int = 0) -> torch.Tensor: \"\"\"Concatenate", "outputs + (attn_weights,) return outputs # Copied from transformers.models.t5.modeling_t5.T5LayerSelfAttention with T5->LongT5 class LongT5LayerSelfAttention(nn.Module):", ">>> model = LongT5Model.from_pretrained(\"google/long-t5-local-base\") >>> # Let's try a very long encoder input.", "self.inner_dim, bias=False) self.k = nn.Linear(self.d_model, self.inner_dim, bias=False) self.v = nn.Linear(self.d_model, self.inner_dim, bias=False) self.o", "one_hot_block_ids = nn.functional.one_hot(block_ids.type(torch.int64), global_seq_len + 1)[:, :, :-1] return torch.einsum(\"...nd,...ng->...gd\", hidden_states, one_hot_block_ids.type(hidden_states.dtype)) #", "attention_output = self.LocalSelfAttention( normed_hidden_states, mask=attention_mask, position_bias=position_bias, layer_head_mask=layer_head_mask, output_attentions=output_attentions, ) hidden_states = hidden_states +", "encoder_outputs: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, inputs_embeds: Optional[torch.Tensor] = None, decoder_inputs_embeds:", "the License for the specific language governing permissions and # limitations under the", "Example: ```python >>> from transformers import T5Tokenizer, LongT5Model >>> tokenizer = T5Tokenizer.from_pretrained(\"google/long-t5-local-base\") >>>", "+= (relative_position > 0).to(torch.long) * num_buckets relative_position = torch.abs(relative_position) else: relative_position = -torch.min(relative_position,", "attention_outputs = self_attention_outputs[2:] # Keep self-attention outputs and relative position weights do_cross_attention =", "std=factor * ((self.config.d_ff) ** -0.5)) if hasattr(module.wo, \"bias\") and module.wo.bias is not None:", "it will be padded first with selected `pad_value`. \"\"\" # pad tensor to", "not None: position_bias = position_bias + mask # (batch_size, n_heads, seq_length, key_length) scores", "output_attentions=None, output_hidden_states=None, return_dict=None, ): use_cache = use_cache if use_cache is not None else", "n_heads, dim_per_head) query_states = _split_into_blocks(query_states, self.block_len, dim=1) key_states = _split_into_blocks(key_states, self.block_len, dim=1) value_states", ") def prepare_inputs_for_generation( self, input_ids, past=None, attention_mask=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, use_cache=None, encoder_outputs=None, **kwargs", "locality_mask.to(local_attention_mask.device) return torch.logical_and(local_attention_mask, locality_mask) def _get_local_attention_mask(attention_mask: torch.Tensor, block_len: int, device: torch.device) -> torch.Tensor:", "with all the parameters of the model. Initializing with a config file does", "head is **not masked**, - 0 indicates the head is **masked**. decoder_head_mask (`torch.FloatTensor`", "docs for more information\" ) # shift inputs to the right if is_torch_fx_proxy(input_ids):", "block_len] _3blocked_attention_mask = _concatenate_3_blocks(_blocked_attention_mask, block_dim=1, sequence_dim=2) _blocked_attention_mask = _blocked_attention_mask.unsqueeze(-1) _3blocked_attention_mask = _3blocked_attention_mask.unsqueeze(-2) #", "self.gradient_checkpointing and self.training: if use_cache: use_cache = False def create_custom_forward(module): def custom_forward(*inputs): return", "`decoder_inputs_embeds` takes the value of `inputs_embeds`. use_cache (`bool`, *optional*): If set to `True`,", "`(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules in", "-0.5)) if hasattr(module.wo, \"bias\") and module.wo.bias is not None: module.wo.bias.data.zero_() elif isinstance(module, LongT5DenseGatedActDense):", "be input (see `past_key_values`). This is useful if you want more control over", "past_key_values=past_key_values, encoder_hidden_states=hidden_states, encoder_attention_mask=attention_mask, head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = decoder_outputs[0]", "output_attentions=False, ): normed_hidden_states = self.layer_norm(hidden_states) attention_output = self.EncDecAttention( normed_hidden_states, mask=attention_mask, key_value_states=key_value_states, position_bias=position_bias, layer_head_mask=layer_head_mask,", "self.d_model = config.d_model self.key_value_proj_dim = config.d_kv self.n_heads = config.num_heads self.local_radius = config.local_radius self.block_len", "output_hidden_states else None all_attentions = () if output_attentions else None all_cross_attentions = ()", "1, num_heads, block_length, 3 * block_length) values = values.permute([2, 0, 1]).unsqueeze(0).unsqueeze(0) return values", "\"decoder_\" if self.is_decoder else \"\" raise ValueError( f\"You cannot specify both {err_msg_prefix}input_ids and", "Create global inputs _global_seq_len = global_segment_ids.shape[-1] global_inputs = _create_global_aggregates(hidden_states, block_ids, _global_seq_len) global_inputs =", "assert torch.all(shifted_input_ids >= 0).item(), \"Verify that `shifted_input_ids` has only positive values\" return shifted_input_ids", "T5Tokenizer, LongT5Model >>> tokenizer = T5Tokenizer.from_pretrained(\"google/long-t5-local-base\") >>> model = LongT5Model.from_pretrained(\"google/long-t5-local-base\") >>> # Let's", "the original implementation) local_attention_mask = torch.where(local_attention_mask > 0, 0.0, -1e10) else: local_attention_mask =", "if needed head_mask = self.get_head_mask(head_mask, self.config.num_layers) cross_attn_head_mask = self.get_head_mask(cross_attn_head_mask, self.config.num_layers) present_key_value_states = ()", "hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) # convert into half-precision if necessary", "p=self.dropout, training=self.training) # Mask heads if we want to if layer_head_mask is not", "the normal LongT5LayerNorm pass except Exception: logger.warning(\"discovered apex but it failed to load,", "config, has_relative_attention_bias=False): super().__init__() self.LocalSelfAttention = LongT5LocalAttention(config, has_relative_attention_bias=has_relative_attention_bias) self.layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout =", "for pretraining take a look at [LONGT5 Training](./longt5#training). decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size,", "self.lm_head = new_embeddings def get_output_embeddings(self): return self.lm_head def get_encoder(self): return self.encoder def get_decoder(self):", "key_length) attn_weights = nn.functional.dropout( attn_weights, p=self.dropout, training=self.training ) # (batch_size, n_heads, seq_length, key_length)", "hidden_states + self.dropout(attention_output[0]) outputs = (layer_output,) + attention_output[1:] # add attentions if we", "past is used if past is not None: input_ids = input_ids[:, -1:] return", "config.relative_attention_num_buckets self.relative_attention_max_distance = config.relative_attention_max_distance self.d_model = config.d_model self.key_value_proj_dim = config.d_kv self.n_heads = config.num_heads", "key_states = project( hidden_states, self.k, key_value_states, past_key_value[0] if past_key_value is not None else", "positions max_exact = num_buckets // 2 is_small = relative_position < max_exact # The", "block_length, 3 * block_length) values = values.permute([2, 0, 1]).unsqueeze(0).unsqueeze(0) return values def forward(", "expected, \" f\"but got {config.encoder_attention_type}.\" ) self.layer = nn.ModuleList() self.layer.append(attention_layer(config, has_relative_attention_bias=has_relative_attention_bias)) if self.is_decoder:", "context_position[:, None] relative_position_bucket = self._relative_position_bucket( relative_position, # (block_length, 3 * block_length) bidirectional=(not self.is_decoder),", "query_states, key_states), compatible with onnx op>9 if position_bias is None: if not self.has_relative_attention_bias:", "self.config.d_kv n_heads = self.config.num_heads module.q.weight.data.normal_(mean=0.0, std=factor * ((d_model * key_value_proj_dim) ** -0.5)) module.k.weight.data.normal_(mean=0.0,", "half of the buckets are for logarithmically bigger bins in positions up to", "This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods", "and relative position weights do_cross_attention = self.is_decoder and encoder_hidden_states is not None if", "the encoder. Mask values selected in `[0, 1]`: - 1 indicates the head", "# We need to adjust position bias shape to be sum with mask", "-0.5) ) # Copied from transformers.models.t5.modeling_t5.T5PreTrainedModel._set_gradient_checkpointing with T5->LongT5 def _set_gradient_checkpointing(self, module, value=False): if", "use_cache is True: assert self.is_decoder, f\"`use_cache` can only be set to `True` if", "dim=2) else: # cross-attn hidden_states = past_key_value return hidden_states # get query states", "checkpoint from ...activations import ACT2FN from ...modeling_outputs import ( BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput,", "get_encoder(self): return self.encoder def _prune_heads(self, heads_to_prune): \"\"\" Prunes heads of the model. heads_to_prune:", "be sum with mask local_attention_mask = _get_local_attention_mask(mask, self.block_len, hidden_states.device) # Replace masked positions", "List[torch.Tensor] = [] for i in range(3): # We use indexing approach here:", "are **masked**. [What are attention masks?](../glossary#attention-mask) head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers,", "`decoder_input_ids` and `decoder_inputs_embeds` are both unset, `decoder_inputs_embeds` takes the value of `inputs_embeds`. use_cache", "selected in `[0, 1]`: - 1 for tokens that are **not masked**, -", "target_sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `decoder_input_ids` you can choose to directly", "apply reshape because of incompatibility with ONNX conversion if 0 in output_shape: return", "* key_value_proj_dim) ** -0.5)) module.k.weight.data.normal_(mean=0.0, std=factor * (d_model**-0.5)) module.v.weight.data.normal_(mean=0.0, std=factor * (d_model**-0.5)) module.o.weight.data.normal_(mean=0.0,", ":] locality_mask = locality_mask.to(local_attention_mask.device) return torch.logical_and(local_attention_mask, locality_mask) def _get_local_attention_mask(attention_mask: torch.Tensor, block_len: int, device:", "and values are already calculated # we want only the last query position", "self.has_relative_attention_bias: position_bias = torch.zeros( (1, 1, self.n_heads, self.block_len, 3 * self.block_len), device=scores.device, dtype=scores.dtype,", "1, n_heads, block_len, 3 * block_len) if not self.has_relative_attention_bias: position_bias = torch.zeros( (1,", "_3blocked_attention_mask = _concatenate_3_blocks(_blocked_attention_mask, block_dim=1, sequence_dim=2) _blocked_attention_mask = _blocked_attention_mask.unsqueeze(-1) _3blocked_attention_mask = _3blocked_attention_mask.unsqueeze(-2) # [batch_size,", "encoder input. >>> input_ids = tokenizer( ... 100 * \"Studies have been shown", "(`last_hidden_state`, `optional`: *hidden_states*, `optional`: *attentions*) `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)` is a", "self.block_len, hidden_states.device) # Replace masked positions with -10_000 (according to the original implementation)", ") class LongT5Model(LongT5PreTrainedModel): _keys_to_ignore_on_load_missing = [ r\"encoder.embed_tokens.weight\", r\"decoder.embed_tokens.weight\", ] _keys_to_ignore_on_load_unexpected = [ r\"decoder.block.0.layer.1.EncDecAttention.relative_attention_bias.weight\",", "= self.dropout(hidden_states) hidden_states = self.wo(hidden_states) return hidden_states # Copied from transformers.models.t5.modeling_t5.T5DenseGatedActDense with T5->LongT5", "): # cut decoder_input_ids if past is used if past is not None:", "- whether the attention is bidirectional num_buckets: an integer max_distance: an integer Returns:", "__init__(self, config: LongT5Config): super().__init__(config) self.shared = nn.Embedding(config.vocab_size, config.d_model) encoder_config = copy.deepcopy(config) encoder_config.use_cache =", "decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, ) def prepare_inputs_for_generation( self, input_ids, past=None, attention_mask=None,", "layer if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple(", "all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*):", "vocabulary. Indices can be obtained using [`T5Tokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details.", "# Copied from transformers.models.t5.modeling_t5.T5Attention with T5->LongT5 class LongT5Attention(nn.Module): def __init__(self, config: LongT5Config, has_relative_attention_bias=False):", "or 3D attention mask is provided for the cross-attention # we need to", "= prune_linear_layer(self.o, index, dim=1) # Update hyper params self.n_heads = self.n_heads - len(heads)", "LongT5Config): super().__init__() self.wi_0 = nn.Linear(config.d_model, config.d_ff, bias=False) self.wi_1 = nn.Linear(config.d_model, config.d_ff, bias=False) self.wo", "= outputs + (attn_weights,) return outputs class LongT5TransientGlobalAttention(nn.Module): def __init__(self, config: LongT5Config, has_relative_attention_bias:", "in the range [0, inf) # half of the buckets are for exact", "from transformers import AutoTokenizer, LongT5ForConditionalGeneration >>> tokenizer = AutoTokenizer.from_pretrained(\"Stancld/longt5-tglobal-large-16384-pubmed-3k_steps\") >>> model = LongT5ForConditionalGeneration.from_pretrained(", "associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to", "if self.has_relative_attention_bias: self.global_relative_attention_bias = nn.Embedding(self.relative_attention_num_buckets, self.n_heads) self.global_input_layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) # Copied from", "transformers.models.t5.modeling_t5.T5Attention with T5->LongT5 class LongT5Attention(nn.Module): def __init__(self, config: LongT5Config, has_relative_attention_bias=False): super().__init__() self.is_decoder =", "position_bias = position_bias + local_attention_mask.transpose(1, 2) position_bias = position_bias.type(scores.dtype) # Calculate global/side bias", "T5->LongT5 class LongT5DenseActDense(nn.Module): def __init__(self, config: LongT5Config): super().__init__() self.wi = nn.Linear(config.d_model, config.d_ff, bias=False)", "to prune in this layer} See base class PreTrainedModel \"\"\" for layer, heads", "half-precision inputs is done in fp32 variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True) hidden_states = hidden_states", "+ attention_output[1:] # add attentions if we output them return outputs class LongT5LayerLocalSelfAttention(nn.Module):", "= None, decoder_input_ids: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.BoolTensor] = None, head_mask: Optional[torch.FloatTensor] =", "= _make_global_fixed_block_ids(attention_mask, global_block_size) global_seq_len = global_segment_ids.shape[-1] global_positions = torch.arange(global_seq_len, device=block_ids.device) side_relative_position = global_positions", "else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict #", "**masked**. decoder_head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify", "<= _sequence_block_ids_max, 1, 0) return global_block_ids.type(torch.int), global_segment_ids.type(torch.int) def _make_side_relative_position_ids(attention_mask: torch.Tensor, global_block_size: int) ->", "None and decoder_input_ids is None and decoder_inputs_embeds is None: # get decoder inputs", "seq_length, dim) # Mask is (batch_size, key_length) (non-causal) or (batch_size, key_length, key_length) #", "indicates the head is **masked**. decoder_head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`,", "( DUMMY_INPUTS, DUMMY_MASK, add_start_docstrings, add_start_docstrings_to_model_forward, is_torch_fx_proxy, logging, replace_return_docstrings, ) from .configuration_longt5 import LongT5Config", "self.compute_bias(real_seq_length, key_length, device=scores.device) # if key and values are already calculated # we", "layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether", "in `[0, ..., config.vocab_size]` Returns: Examples: ```python >>> from transformers import AutoTokenizer, LongT5ForConditionalGeneration", "* block_length, dtype=torch.long, device=self.relative_attention_bias.weight.device ) context_position = memory_position[block_length:-block_length] # (block_length, 3 * block_length)", "shape(self.q(hidden_states)) # (batch_size, n_heads, seq_length, dim_per_head) # get key/value states key_states = project(", "return_dict is not None else self.config.use_return_dict # FutureWarning: head_mask was separated into two", "in this layer} See base class PreTrainedModel \"\"\" for layer, heads in heads_to_prune.items():", "= _make_side_relative_position_ids(mask, self.global_block_size) side_relative_position_bucket = self._relative_position_bucket( side_relative_position, bidirectional=(not self.is_decoder), num_buckets=self.relative_attention_num_buckets, max_distance=self.relative_attention_max_distance, ) #", "values -> (batch_size, num_blocks, 3 * block_len, n_heads, dim_per_head) key_states = _concatenate_3_blocks(key_states, block_dim=1,", "cross-attention outputs and relative position weights attention_outputs = attention_outputs + cross_attention_outputs[2:] # Apply", "a very long input. >>> input_ids = tokenizer( ... \"summarize: \" + 100", "under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to", "original implementation) mask = torch.where(mask > 0, 0.0, -1e10) # We need to", "`past_key_values`). To know more on how to prepare `decoder_input_ids` for pretraining take a", "are ignored (masked), the loss is only computed for labels in `[0, ...,", "ONNX conversion if 0 in output_shape: return torch.empty(output_shape, dtype=x.dtype, device=x.device) return x.reshape(output_shape) def", "and use_cache kwargs ): normed_hidden_states = self.layer_norm(hidden_states) attention_output = self.LocalSelfAttention( normed_hidden_states, mask=attention_mask, position_bias=position_bias,", "Initialize weights and apply final processing self.post_init() self.gradient_checkpointing = False # Copied from", "id corresponding to each input token. This implementation is a simlified version of", "the [`~PreTrainedModel.from_pretrained`] method to load the model weights. \"\"\" LONGT5_INPUTS_DOCSTRING = r\"\"\" Args:", "and a simple interface for downloading and loading pretrained models. \"\"\" config_class =", "loss is not None else output return Seq2SeqLMOutput( loss=loss, logits=lm_logits, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions,", "hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True) hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) # convert into half-precision", "are used, the user can optionally input only the last `decoder_input_ids` (those that", "dim, pad_value=0) num_blocks = x.shape[dim] // block_len output_shape = x.shape[:dim] + (num_blocks, block_len)", "\"\"\" relative_buckets = 0 if bidirectional: num_buckets //= 2 relative_buckets += (relative_position >", "[LongT5Block(config, has_relative_attention_bias=bool(i == 0)) for i in range(config.num_layers)] ) self.final_layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)", "\" f\"but got {config.encoder_attention_type}.\" ) self.layer = nn.ModuleList() self.layer.append(attention_layer(config, has_relative_attention_bias=has_relative_attention_bias)) if self.is_decoder: self.layer.append(LongT5LayerCrossAttention(config))", "Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.FloatTensor], Seq2SeqLMOutput]: r\"\"\" labels", "3 * block_len) attn_weights = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) # Mask heads if we", "OF ANY KIND, either express or implied. # See the License for the", "separated into two input args - head_mask, decoder_head_mask if head_mask is not None", "cross_attn_head_mask=cross_attn_head_mask, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) if not return_dict: return decoder_outputs + encoder_outputs", "need to adjust position bias shape to be sum with mask position_bias =", "3 * block_len] relative_position_ids = position_ids.unsqueeze(0) - center_position_ids.unsqueeze(1) return relative_position_ids def _mask_local_attention_mask(local_attention_mask: torch.Tensor,", "project( hidden_states, self.k, key_value_states, past_key_value[0] if past_key_value is not None else None )", "self.key_value_proj_dim * self.n_heads self.pruned_heads = self.pruned_heads.union(heads) @staticmethod def _relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128): \"\"\"", "return states.contiguous().view(batch_size, -1, self.inner_dim) # Prepare components for transient-global attention # Obtain block_ids", "= outputs.last_hidden_state ```\"\"\" use_cache = use_cache if use_cache is not None else self.config.use_cache", "self.n_heads) self.pruned_heads = set() self.gradient_checkpointing = False # Relativen attention bias & Layer", "self.global_block_size) side_relative_position_bucket = self._relative_position_bucket( side_relative_position, bidirectional=(not self.is_decoder), num_buckets=self.relative_attention_num_buckets, max_distance=self.relative_attention_max_distance, ) # (batch_size, seq_len,", "outputs # Copied from transformers.models.t5.modeling_t5.T5LayerCrossAttention with T5->LongT5 class LongT5LayerCrossAttention(nn.Module): def __init__(self, config): super().__init__()", "num_block, block_len, 3 * block_len] return local_attention_mask.unsqueeze(1).to(device) def _make_global_fixed_block_ids( attention_mask: torch.Tensor, global_block_size: int", "if past_key_value is not None: if not self.is_decoder: logger.warning(\"`past_key_values` is passed to the", "head_mask was separated into two input args - head_mask, decoder_head_mask if head_mask is", "fixed_block_mask mask = torch.where(attention_mask != 0.0, 1.0, -1000.0).type(attention_mask.dtype) global_block_ids = torch.floor(mask + fixed_block_mask", "# layer_outputs is a tuple with: # hidden-states, key-value-states, (self-attention position bias), (self-attention", "the self-attention modules in the encoder. Mask values selected in `[0, 1]`: -", "global_seq_len, n_heads, dim_per_head) key_states = torch.cat([key_states, side_key_states], dim=2) value_states = torch.cat([value_states, side_value_states], dim=2)", "past_key_values=present_key_value_states, hidden_states=all_hidden_states, attentions=all_attentions, cross_attentions=all_cross_attentions, ) LONGT5_START_DOCSTRING = r\"\"\" The LongT5 model was proposed", "None ) value_states = project( hidden_states, self.v, key_value_states, past_key_value[1] if past_key_value is not", "dim # batch dim of `past` is at 2nd position reordered_layer_past_states = ()", "of the encoder. Used in the cross-attention of the decoder. past_key_values (`tuple(tuple(torch.FloatTensor))` of", "global_block_size # [batch_size, seq_len // global_block_size] if num_globals > 0: _sequence_block_ids_max = torch.max(global_block_ids,", "self.layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) def forward( self, hidden_states, key_value_states, attention_mask=None,", "= head_mask # Encode if needed (training, first prediction pass) if encoder_outputs is", "FF initialization # See https://github.com/tensorflow/mesh/blob/master/mesh_tensorflow/transformer/transformer_layers.py#L56 # and https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L89 module.wi.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) **", "to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if self.is_decoder and encoder_hidden_states is", "than the model's internal embedding lookup matrix. If `decoder_input_ids` and `decoder_inputs_embeds` are both", "0)] * x.ndim pad[dim] = (0, pad_len) pad = sum(pad[::-1], ()) x =", "transformers import T5Tokenizer, LongT5Model >>> tokenizer = T5Tokenizer.from_pretrained(\"google/long-t5-local-base\") >>> model = LongT5Model.from_pretrained(\"google/long-t5-local-base\") >>>", "= [ r\"decoder.block.0.layer.1.EncDecAttention.relative_attention_bias.weight\", ] def __init__(self, config: LongT5Config): super().__init__(config) self.shared = nn.Embedding(config.vocab_size, config.d_model)", "inputs_embeds.size()[:-1] else: err_msg_prefix = \"decoder_\" if self.is_decoder else \"\" raise ValueError(f\"You have to", "model with relative position embeddings so you should be able to pad the", "to max_distance relative_position_if_large = max_exact + ( torch.log(relative_position.float() / max_exact) / math.log(max_distance /", "# we need to use both local attention mask and standard extended mask", "and no subtraction of mean. \"\"\" super().__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.variance_epsilon = eps", "the range [0, num_buckets) \"\"\" relative_buckets = 0 if bidirectional: num_buckets //= 2", "hidden_states = self.dropout(inputs_embeds) for i, (layer_module, past_key_value) in enumerate(zip(self.block, past_key_values)): layer_head_mask = head_mask[i]", "self.weight.dtype in [torch.float16, torch.bfloat16]: hidden_states = hidden_states.to(self.weight.dtype) return self.weight * hidden_states try: from", "(0, pad_len) pad = sum(pad[::-1], ()) x = nn.functional.pad(x, pad=pad, mode=\"constant\", value=pad_value) return", "= seq_length if past_key_value is not None: assert ( len(past_key_value) == 2 ),", "key_states = shape(self.k(hidden_states)) value_states = shape(self.v(hidden_states)) # Get global/side key/value states shape: (batch_size,", "or (2) Transient-Global attention. This model inherits from [`PreTrainedModel`]. Check the superclass documentation", "bool = False) -> None: super().__init__() self.is_decoder = config.is_decoder self.has_relative_attention_bias = has_relative_attention_bias self.relative_attention_num_buckets", "+= position_bias # (batch_size, num_blocks, n_heads, block_len, 3 * block_len + global_seq_len) attn_weights", "initialization if isinstance(module, LongT5LayerNorm): module.weight.data.fill_(factor * 1.0) elif isinstance(module, (LongT5Model, LongT5ForConditionalGeneration, LongT5EncoderModel)): #", "real_seq_length if key_value_states is None else key_value_states.shape[1] def shape(states): \"\"\"projection\"\"\" return states.view(batch_size, -1,", "use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned", "= None, inputs_embeds: Optional[torch.Tensor] = None, decoder_inputs_embeds: Optional[torch.Tensor] = None, use_cache: Optional[bool] =", "a plain tuple. \"\"\" LONGT5_ENCODER_INPUTS_DOCSTRING = r\"\"\" Args: input_ids (`torch.LongTensor` of shape `(batch_size,", "mask.transpose(1, 2) scores += position_bias # (batch_size, num_blocks, n_heads, block_len, 3 * block_len)", "next layer key value states if use_cache: present_key_value_states = present_key_value_states + (present_key_value_state,) if", "global_segment_ids = _make_global_fixed_block_ids( mask if mask is not None else torch.ones(hidden_states.shape[:-1]), self.global_block_size, )", "layer} See base class PreTrainedModel \"\"\" for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(LONGT5_ENCODER_INPUTS_DOCSTRING)", "return tuple(module(*inputs, use_cache, output_attentions)) return custom_forward layer_outputs = checkpoint( create_custom_forward(layer_module), hidden_states, extended_attention_mask, position_bias,", ") else: layer_outputs = layer_module( hidden_states, attention_mask=extended_attention_mask, position_bias=position_bias, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, encoder_decoder_position_bias=encoder_decoder_position_bias, layer_head_mask=layer_head_mask, cross_attn_layer_head_mask=cross_attn_layer_head_mask,", "\"Stancld/longt5-tglobal-large-16384-pubmed-3k_steps\" ... ) >>> # Let's try a very long input. >>> input_ids", "or agreed to in writing, software # distributed under the License is distributed", "first with selected `pad_value`. \"\"\" # pad tensor to multiple of block_len if", "inputs_embeds.device) else: # we need to use both local attention mask and standard", "incompatibility with ONNX conversion if 0 in output_shape: return torch.empty(output_shape, dtype=x.dtype, device=x.device) return", "of passing `input_ids` you can choose to directly pass an embedded representation. This", "attention # Obtain block_ids and global_segment_ids # global_seq_len := seq_len // self.global_block_size #", "= self.lm_head(sequence_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss(ignore_index=-100)", "_split_into_blocks(x: torch.Tensor, block_len: int, dim: int) -> torch.Tensor: \"\"\"Split an input tensor into", "present_key_value_states = present_key_value_states + (present_key_value_state,) if output_attentions: all_attentions = all_attentions + (layer_outputs[3],) if", "r\"encoder.embed_tokens.weight\", ] def __init__(self, config: LongT5Config): super().__init__(config) self.shared = nn.Embedding(config.vocab_size, config.d_model) encoder_config =", "(see `past_key_values`). To know more on how to prepare `decoder_input_ids` for pretraining take", "be padded first with selected `pad_value`. \"\"\" # pad tensor to multiple of", "elif isinstance(module, (LongT5Attention, LongT5LocalAttention, LongT5TransientGlobalAttention)): # Mesh TensorFlow attention initialization to avoid scaling", "the studies have shown that owning a dog ```\"\"\" use_cache = use_cache if", "is not None: attn_weights = attn_weights * layer_head_mask attn_weights = attn_weights.type(value_states.dtype) attn_output =", "share the position biases between the layers - the first layer store them", "position to a bucket number for relative attention. The relative position is defined", "BaseModelOutputWithPastAndCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput, ) from ...modeling_utils import PreTrainedModel, find_pruneable_heads_and_indices, prune_linear_layer from ...utils import", "return outputs # hidden-states, present_key_value_states, (self-attention position bias), (self-attention weights), (cross-attention position bias),", "get_input_embeddings(self): return self.shared def set_input_embeddings(self, new_embeddings): self.shared = new_embeddings self.encoder.set_input_embeddings(new_embeddings) self.decoder.set_input_embeddings(new_embeddings) def get_encoder(self):", "tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) decoder_input_ids (`torch.LongTensor` of shape `(batch_size,", "key_value_states, past_key_value[0] if past_key_value is not None else None ) value_states = project(", "encoder_outputs is None: # Convert encoder inputs in embeddings if needed encoder_outputs =", "attention. ' if expected_num_past_key_values == 4 else ''}\" f\"Got {len(past_key_value)} past key /", "> 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, ) hidden_states", "value states given to this model) of shape `(batch_size, 1)` instead of all", "for exact increments in positions max_exact = num_buckets // 2 is_small = relative_position", "you want more control over how to convert `decoder_input_ids` indices into associated vectors", "layer_outputs[:2] # We share the position biases between the layers - the first", ") hidden_states = encoder_outputs[0] # Decode decoder_outputs = self.decoder( input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, inputs_embeds=decoder_inputs_embeds, past_key_values=past_key_values,", "(d_model**-0.5)) module.o.weight.data.normal_(mean=0.0, std=factor * ((n_heads * key_value_proj_dim) ** -0.5)) if module.has_relative_attention_bias: module.relative_attention_bias.weight.data.normal_(mean=0.0, std=factor", "self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(LONGT5_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=Seq2SeqModelOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor]", "accept past_key_value and use_cache kwargs ): normed_hidden_states = self.layer_norm(hidden_states) attention_output = self.TransientGlobalSelfAttention( normed_hidden_states,", "2) side_position_bias = side_position_bias.type(scores.dtype).to(scores.device) # (batch_size, num_blocks, num_heads, block_len, 3 * block_len +", "integer Returns: a Tensor with the same shape as relative_position, containing int32 values", "[What are attention masks?](../glossary#attention-mask) decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of", "= True decoder_config.is_encoder_decoder = False decoder_config.num_layers = config.num_decoder_layers self.decoder = LongT5Stack(decoder_config, self.shared) self.lm_head", "is only computed for labels in `[0, ..., config.vocab_size]` Returns: Examples: ```python >>>", "trained on Args: relative_position: an int32 Tensor bidirectional: a boolean - whether the", "sequence_output = sequence_output * (self.model_dim**-0.5) lm_logits = self.lm_head(sequence_output) loss = None if labels", "use_cache else None all_hidden_states = () if output_hidden_states else None all_attentions = ()", "3 * block_length, num_heads) values = self.relative_attention_bias(relative_position_bucket) # (1, 1, num_heads, block_length, 3", "passing `decoder_input_ids` you can choose to directly pass an embedded representation. If `past_key_values`", "input_shape[-1]) elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: err_msg_prefix = \"decoder_\"", "TensorFlow FF initialization # See https://github.com/tensorflow/mesh/blob/master/mesh_tensorflow/transformer/transformer_layers.py#L56 # and https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L89 module.wi.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model)", "(position_bias,) if output_attentions: outputs = outputs + (attn_weights,) return outputs # Copied from", "# get query states query_states = shape(self.q(hidden_states)) # (batch_size, n_heads, seq_length, dim_per_head) #", "input_ids = tokenizer( ... 100 * \"Studies have been shown that owning a", "# set padding tokens to -1 global_block_ids = (global_block_ids * attention_mask) + (attention_mask", "return_dict=return_dict, ) elif return_dict and not isinstance(encoder_outputs, BaseModelOutput): encoder_outputs = BaseModelOutput( last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1]", "import nn from torch.nn import CrossEntropyLoss from torch.utils.checkpoint import checkpoint from ...activations import", "None outputs = (attn_output,) + (present_key_value_state,) + (position_bias,) if output_attentions: outputs = outputs", "class LongT5Block(nn.Module): def __init__(self, config, has_relative_attention_bias=False): super().__init__() self.is_decoder = config.is_decoder if config.is_decoder: attention_layer", "def forward( self, input_ids=None, attention_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, inputs_embeds=None, head_mask=None, cross_attn_head_mask=None, past_key_values=None, use_cache=None, output_attentions=None,", "layer} See base class PreTrainedModel \"\"\" for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(LONGT5_INPUTS_DOCSTRING)", "look a [LONGT5 Training](./longt5#training). attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to", "hidden_linear = self.wi_1(hidden_states) hidden_states = hidden_gelu * hidden_linear hidden_states = self.dropout(hidden_states) hidden_states =", "load the model weights. \"\"\" LONGT5_INPUTS_DOCSTRING = r\"\"\" Args: input_ids (`torch.LongTensor` of shape", "`input_ids` you can choose to directly pass an embedded representation. This is useful", "and \"side\"/\"global\" key/value states to allow each token to attend global aggregated ones", "to nullify selected heads of the self-attention modules in the decoder. Mask values", "super().__init__() self.wi = nn.Linear(config.d_model, config.d_ff, bias=False) self.wo = nn.Linear(config.d_ff, config.d_model, bias=False) self.dropout =", "is not None and decoder_head_mask is None: if self.config.num_layers == self.config.num_decoder_layers: warnings.warn(__HEAD_MASK_WARNING_MSG, FutureWarning)", "labels is not None: loss_fct = CrossEntropyLoss(ignore_index=-100) loss = loss_fct(lm_logits.view(-1, lm_logits.size(-1)), labels.view(-1)) #", "eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) # Initialize weights and apply final processing self.post_init() self.gradient_checkpointing", "None, decoder_input_ids: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.BoolTensor] = None, head_mask: Optional[torch.FloatTensor] = None,", "self.block_len), device=scores.device, dtype=scores.dtype, ) if self.gradient_checkpointing and self.training: position_bias.requires_grad = True else: position_bias", "use indexing approach here: # https://numpy.org/doc/stable/user/basics.indexing.html#dealing-with-variable-numbers-of-indices-within-programs indices = [slice(0, None)] * x.ndim indices[block_dim]", "([`LongT5Config`]): Model configuration class with all the parameters of the model. Initializing with", "shifted_input_ids class LongT5Stack(LongT5PreTrainedModel): def __init__(self, config, embed_tokens=None): super().__init__(config) self.embed_tokens = embed_tokens self.is_decoder =", "n_heads, dim_per_head) query_states = shape(self.q(hidden_states)) key_states = shape(self.k(hidden_states)) value_states = shape(self.v(hidden_states)) # Split", "to pad the inputs on both the right and the left. Indices can", "...] attention_side_bias = torch.where(side_attention_mask > 0, 0.0, -1e10) # (batch_size, seq_len, global_seq_len) side_relative_position", "to convert `input_ids` indices into associated vectors than the model's internal embedding lookup", "nn.Embedding(config.vocab_size, config.d_model) encoder_config = copy.deepcopy(config) encoder_config.use_cache = False encoder_config.is_encoder_decoder = False self.encoder =", "self.decoder = LongT5Stack(decoder_config, self.shared) self.lm_head = nn.Linear(config.d_model, config.vocab_size, bias=False) # Initialize weights and", "self.block_len = self.local_radius + 1 self.global_block_size = config.global_block_size self.dropout = config.dropout_rate self.inner_dim =", "past reordered_decoder_past = () for layer_past_states in past: # get the correct batch", "key_value_proj_dim) ** -0.5)) module.k.weight.data.normal_(mean=0.0, std=factor * (d_model**-0.5)) module.v.weight.data.normal_(mean=0.0, std=factor * (d_model**-0.5)) module.o.weight.data.normal_(mean=0.0, std=factor", "3 * block_len, ...] return torch.cat(blocks_list, dim=sequence_dim) def _make_3block_relative_position_ids(block_len: int) -> torch.Tensor: \"\"\"Makes", "mask to be applied for a local attention.\"\"\" # [batch_size, num_blocks, block_len] _blocked_attention_mask", "labels by `pad_token_id` shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id) assert torch.all(shifted_input_ids >= 0).item(), \"Verify that", "self.is_decoder else \"\" raise ValueError(f\"You have to specify either {err_msg_prefix}input_ids or {err_msg_prefix}inputs_embeds\") if", "self.gradient_checkpointing and self.training: position_bias.requires_grad = True else: position_bias = self.compute_bias(real_seq_length, key_length, device=scores.device) #", "the attended-to position. If bidirectional=False, then positive relative positions are invalid. We use", "pad tokens in `decoder_input_ids`. Causal mask will also be used by default. head_mask", "bias), (self-attention weights), # (cross-attention position bias), (cross-attention weights) position_bias = layer_outputs[2] if", "encoder_attention_mask=attention_mask, head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) if not return_dict: return decoder_outputs", "top.\"\"\", LONGT5_START_DOCSTRING) class LongT5ForConditionalGeneration(LongT5PreTrainedModel): _keys_to_ignore_on_load_missing = [ r\"encoder.embed_tokens.weight\", r\"decoder.embed_tokens.weight\", r\"lm_head.weight\", ] _keys_to_ignore_on_load_unexpected =", "\"side\"/\"global\" key/value states to allow each token to attend global aggregated ones #", "prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor): return self._shift_right(labels) def _reorder_cache(self, past, beam_idx): # if decoder past", "# Copyright 2022 Google LLC., LongT5 Authors and HuggingFace Inc. team. # #", "add_start_docstrings, add_start_docstrings_to_model_forward, is_torch_fx_proxy, logging, replace_return_docstrings, ) from .configuration_longt5 import LongT5Config logger = logging.get_logger(__name__)", "1, self.n_heads, self.block_len, 3 * self.block_len), device=scores.device, dtype=scores.dtype ) if self.gradient_checkpointing and self.training:", "beam_idx.to(layer_past_state.device)), ) assert reordered_layer_past_states[0].shape == layer_past_states[0].shape assert len(reordered_layer_past_states) == len(layer_past_states) reordered_decoder_past = reordered_decoder_past", "seq_len // global_block_size # [batch_size, seq_len // global_block_size] if num_globals > 0: _sequence_block_ids_max", "position_bias, encoder_hidden_states, encoder_extended_attention_mask, encoder_decoder_position_bias, layer_head_mask, cross_attn_layer_head_mask, None, # past_key_value is always None with", "assert ( len(past_key_value) == 2 ), f\"past_key_value should have 2 past states: keys", "to the attended-to position. If bidirectional=False, then positive relative positions are invalid. We", "= torch.matmul( query_states, key_states.transpose(3, 2) ) # equivalent of torch.einsum(\"bnqd,bnkd->bnqk\", query_states, key_states), compatible", "0)) for i in range(config.num_layers)] ) self.final_layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate)", "is intended.\") expected_num_past_key_values = 2 if encoder_hidden_states is None else 4 if len(past_key_value)", "# Copied from transformers.models.t5.modeling_t5.T5PreTrainedModel.dummy_inputs def dummy_inputs(self): input_ids = torch.tensor(DUMMY_INPUTS) input_mask = torch.tensor(DUMMY_MASK) dummy_inputs", "= prune_linear_layer(self.v, index) self.o = prune_linear_layer(self.o, index, dim=1) # Update hyper params self.n_heads", "torch.utils.checkpoint import checkpoint from ...activations import ACT2FN from ...modeling_outputs import ( BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions,", "None with gradient checkpointing ) else: layer_outputs = layer_module( hidden_states, attention_mask=extended_attention_mask, position_bias=position_bias, encoder_hidden_states=encoder_hidden_states,", "or implied. # See the License for the specific language governing permissions and", "local attention in encoder self-attention, otherwise standard self & cross attentions are used", "return_dict is not None else self.config.use_return_dict encoder_outputs = self.encoder( input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, head_mask=head_mask,", "# batch dim of `past` is at 2nd position reordered_layer_past_states = () for", ") hidden_states, present_key_value_state = self_attention_outputs[:2] attention_outputs = self_attention_outputs[2:] # Keep self-attention outputs and", "(`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you", "generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also", "self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: err_msg_prefix =", "1.0) elif isinstance(module, (LongT5Model, LongT5ForConditionalGeneration, LongT5EncoderModel)): # Mesh TensorFlow embeddings initialization # See", "Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, decoder_input_ids: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.BoolTensor]", "nn.Linear(config.d_ff, config.d_model, bias=False) self.dropout = nn.Dropout(config.dropout_rate) self.act = ACT2FN[config.dense_act_fn] def forward(self, hidden_states): hidden_gelu", "num_blocks, 3 * block_len, n_heads, dim_per_head) key_states = _concatenate_3_blocks(key_states, block_dim=1, sequence_dim=2) value_states =", "configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. \"\"\" LONGT5_INPUTS_DOCSTRING", "0, 0.0, -1e10) else: local_attention_mask = None if position_bias is None: # position_bias", "associated vectors than the model's internal embedding lookup matrix. If `decoder_input_ids` and `decoder_inputs_embeds`", ") sequence_output = decoder_outputs[0] if self.config.tie_word_embeddings: # Rescale output before projecting on vocab", "global_block_size - 1 block_ends = block_ends.to(block_ids.device) true_block_ends = torch.logical_and(block_ends, block_ids >= 0) full_blocks", "seq_length, key_length) # Mask heads if we want to if layer_head_mask is not", "scores = torch.einsum( \"...qhd,...khd->...hqk\", query_states, key_states ) # (batch_size, num_block, n_heads, block_len, 3", "If you do not want to use any `decoder_head_mask` now, please set `decoder_head_mask", "set to `-100` are ignored (masked), the loss is only computed for labels", "Seq2SeqModelOutput, ) from ...modeling_utils import PreTrainedModel, find_pruneable_heads_and_indices, prune_linear_layer from ...utils import ( DUMMY_INPUTS,", "matrix. decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*): Optionally, instead of passing", "# [batch_size, num_blocks, block_len] -> [batch_size, num_blocks + 2, block_len] x = nn.functional.pad(x,", "- 1.0).type(attention_mask.dtype) _global_block_ids_lower_bound = torch.tensor(-1, dtype=global_block_ids.dtype, device=global_block_ids.device) global_block_ids = torch.where( global_block_ids > _global_block_ids_lower_bound,", "sequence_output * (self.model_dim**-0.5) lm_logits = self.lm_head(sequence_output) loss = None if labels is not", "self.inner_dim, bias=False) self.o = nn.Linear(self.inner_dim, self.d_model, bias=False) if self.has_relative_attention_bias: self.relative_attention_bias = nn.Embedding(self.relative_attention_num_buckets, self.n_heads)", "model. Initializing with a config file does not load the weights associated with", "torch.Tensor, block_len: int, device: torch.device) -> torch.Tensor: \"\"\"Prepare attention mask to be applied", "config.relative_attention_max_distance self.d_model = config.d_model self.key_value_proj_dim = config.d_kv self.n_heads = config.num_heads self.dropout = config.dropout_rate", "= self.layer[1]( hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, position_bias=encoder_decoder_position_bias, layer_head_mask=cross_attn_layer_head_mask, past_key_value=cross_attn_past_key_value, query_length=query_length, use_cache=use_cache, output_attentions=output_attentions, ) hidden_states", "-0.5)) if hasattr(module.wi_1, \"bias\") and module.wi_1.bias is not None: module.wi_1.bias.data.zero_() module.wo.weight.data.normal_(mean=0.0, std=factor *", "= torch.where( global_block_ids > _global_block_ids_lower_bound, global_block_ids, _global_block_ids_lower_bound ) # set padding tokens to", "self.gradient_checkpointing = False # Copied from transformers.models.t5.modeling_t5.T5Attention.prune_heads def prune_heads(self, heads): if len(heads) ==", "attention.\"\"\" position_ids = torch.arange(3 * block_len, dtype=torch.int32) center_position_ids = position_ids[block_len:-block_len] # [block_len, 3", "weights), # (cross-attention position bias), (cross-attention weights) position_bias = layer_outputs[2] if self.is_decoder and", "position_ids = torch.arange(3 * block_len, dtype=torch.int32) center_position_ids = position_ids[block_len:-block_len] # [block_len, 3 *", "= torch.einsum( \"...qhd,...khd->...hqk\", query_states, key_states ) # (batch_size, num_block, n_heads, block_len, 3 *", "# Tile side inputs across local key/value blocks # New shape: (batch_size, num_blocks,", "them return outputs # Copied from transformers.models.t5.modeling_t5.T5LayerCrossAttention with T5->LongT5 class LongT5LayerCrossAttention(nn.Module): def __init__(self,", "mask: torch.Tensor, global_segment_ids: torch.Tensor) -> torch.Tensor: # (batch_size, 1, seq_len, global_seq_len) side_attention_mask =", "tokenizer( ... \"summarize: \" + 100 * \"studies have shown that owning a", "to consider setting `use_cache=True` to speed up decoding\") return past reordered_decoder_past = ()", "instead of LongT5LayerNorm\") except ImportError: # using the normal LongT5LayerNorm pass except Exception:", "locality_mask = locality_mask[None, None, :, :] locality_mask = locality_mask.to(local_attention_mask.device) return torch.logical_and(local_attention_mask, locality_mask) def", "# past_key_value is always None with gradient checkpointing ) else: layer_outputs = layer_module(", "return_dict if return_dict is not None else self.config.use_return_dict encoder_outputs = self.encoder( input_ids=input_ids, attention_mask=attention_mask,", "global_segment_ids: torch.Tensor) -> torch.Tensor: # (batch_size, 1, seq_len, global_seq_len) side_attention_mask = torch.eq(mask[..., None],", "encoder_attention_mask=None, inputs_embeds=None, head_mask=None, cross_attn_head_mask=None, past_key_values=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): use_cache = use_cache", "sum with mask local_attention_mask = _get_local_attention_mask(mask, self.block_len, hidden_states.device) # Replace masked positions with", "decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `decoder_input_ids`", "is also known as Root Mean # Square Layer Normalization https://arxiv.org/abs/1910.07467 thus varience", "} def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor): return self._shift_right(labels) def _reorder_cache(self, past, beam_idx): # if", "training=self.training) # Mask heads if we want to if layer_head_mask is not None:", "def shape(states): \"\"\"projection\"\"\" return states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim).transpose(1, 2) def unshape(states): \"\"\"reshape\"\"\" return", "position bias shape to be sum with mask local_attention_mask = _get_local_attention_mask(mask, self.block_len, hidden_states.device)", "num_heads, seq_length, seq_length] if self.is_decoder and encoder_hidden_states is not None: encoder_batch_size, encoder_sequence_length, _", "case we just need to make it broadcastable to all heads. # We", "to speed up decoding (see `past_key_values`). output_attentions (`bool`, *optional*): Whether or not to", "are returned and can be used to speed up decoding (see `past_key_values`). output_attentions", "* block_length, num_heads) values = self.relative_attention_bias(relative_position_bucket) # (1, 1, num_heads, block_length, 3 *", "will use it instead of LongT5LayerNorm\") except ImportError: # using the normal LongT5LayerNorm", "LongT5 is a model with relative position embeddings so you should be able", "decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, ) @add_start_docstrings(\"\"\"LONGT5 Model with a `language modeling`", "input_ids, \"past_key_values\": past, \"encoder_outputs\": encoder_outputs, \"attention_mask\": attention_mask, \"head_mask\": head_mask, \"decoder_head_mask\": decoder_head_mask, \"cross_attn_head_mask\": cross_attn_head_mask,", "self-attn # (batch_size, n_heads, seq_length, dim_per_head) hidden_states = shape(proj_layer(hidden_states)) elif past_key_value is None:", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "0).item(), \"Verify that `shifted_input_ids` has only positive values\" return shifted_input_ids class LongT5Stack(LongT5PreTrainedModel): def", "a tensor so that a sequence length will be a multiple of `block_len`\"\"\"", "up decoding. If `past_key_values` are used, the user can optionally input only the", "= list(x.shape) new_shape[dim] += pad_len return torch.zeros(new_shape, dtype=x.dtype) pad = [(0, 0)] *", "Text-To-Text Transformer for Long Sequences](https://arxiv.org/abs/2112.07916) by <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME> and", "heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(LONGT5_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=Seq2SeqModelOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask:", "= outputs + (attn_weights,) return outputs class LongT5LocalAttention(nn.Module): def __init__(self, config: LongT5Config, has_relative_attention_bias:", "return self.encoder def get_decoder(self): return self.decoder @add_start_docstrings_to_model_forward(LONGT5_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids:", "head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states:", "side_relative_position = _make_side_relative_position_ids(mask, self.global_block_size) side_relative_position_bucket = self._relative_position_bucket( side_relative_position, bidirectional=(not self.is_decoder), num_buckets=self.relative_attention_num_buckets, max_distance=self.relative_attention_max_distance, )", "LongT5LayerCrossAttention(nn.Module): def __init__(self, config): super().__init__() self.EncDecAttention = LongT5Attention(config, has_relative_attention_bias=False) self.layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)", "a local attention.\"\"\" # [batch_size, num_blocks, block_len] _blocked_attention_mask = _split_into_blocks(attention_mask, block_len, dim=1) #", "to the preceding block. Padding tokens from the original sequence are represented by", "is bidirectional num_buckets: an integer max_distance: an integer Returns: a Tensor with the", "key_length) # past_key_value[0] is (batch_size, n_heads, q_len - 1, dim_per_head) batch_size, seq_length =", "value # Copied from transformers.models.t5.modeling_t5.T5PreTrainedModel._shift_right with T5->LongT5 def _shift_right(self, input_ids): decoder_start_token_id = self.config.decoder_start_token_id", "T5->LongT5 def _set_gradient_checkpointing(self, module, value=False): if isinstance(module, (LongT5Attention, LongT5Stack)): module.gradient_checkpointing = value #", "it enables using one of the two different efficient attention mechanisms - (1)", "or not to return the hidden states of all layers. See `hidden_states` under", "logger.warning(\"discovered apex but it failed to load, falling back to LongT5LayerNorm\") pass #", "mask # (batch_size, n_heads, seq_length, key_length) scores += position_bias attn_weights = nn.functional.softmax(scores.float(), dim=-1).type_as(", "n_heads = self.config.num_heads module.q.weight.data.normal_(mean=0.0, std=factor * ((d_model * key_value_proj_dim) ** -0.5)) module.k.weight.data.normal_(mean=0.0, std=factor", "if output_attentions else None all_cross_attentions = () if (output_attentions and self.is_decoder) else None", "normed_hidden_states = self.layer_norm(hidden_states) attention_output = self.LocalSelfAttention( normed_hidden_states, mask=attention_mask, position_bias=position_bias, layer_head_mask=layer_head_mask, output_attentions=output_attentions, ) hidden_states", "abstractthe aim of this article is to summarize the studies have shown that", "_3blocked_attention_mask.unsqueeze(-2) # [batch_size, num_block, block_len, 3 * block_len] local_attention_mask = torch.logical_and(_blocked_attention_mask, _3blocked_attention_mask) local_attention_mask", "() for layer_past_states in past: # get the correct batch idx from layer", "torch.ones( batch_size, encoder_seq_length, device=inputs_embeds.device, dtype=torch.long ) # initialize past_key_values with `None` if past", ">>> model = LongT5EncoderModel.from_pretrained(\"google/long-t5-local-base\") >>> input_ids = tokenizer( ... 100 * \"Studies have", "layer_head_mask is not None: attn_weights = attn_weights * layer_head_mask attn_weights = attn_weights.type(value_states.dtype) attn_output", "output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all", "Layer Normalization https://arxiv.org/abs/1910.07467 thus varience is calculated # w/o mean and there is", "None and encoder_hidden_states is not None: encoder_seq_length = encoder_hidden_states.shape[1] encoder_attention_mask = torch.ones( batch_size,", "mask to enforce that tokens are not allowed to attend tokens farther than", "= 2 if encoder_hidden_states is None else 4 if len(past_key_value) != expected_num_past_key_values: raise", "(LongT5Model, LongT5ForConditionalGeneration, LongT5EncoderModel)): # Mesh TensorFlow embeddings initialization # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L1624 module.shared.weight.data.normal_(mean=0.0, std=factor", "transformers.models.t5.modeling_t5.T5LayerCrossAttention with T5->LongT5 class LongT5LayerCrossAttention(nn.Module): def __init__(self, config): super().__init__() self.EncDecAttention = LongT5Attention(config, has_relative_attention_bias=False)", "False encoder_config.is_encoder_decoder = False self.encoder = LongT5Stack(encoder_config, self.shared) # Initialize weights and apply", "output_attentions: outputs = outputs + (attn_weights,) return outputs class LongT5LocalAttention(nn.Module): def __init__(self, config:", "-1, self.inner_dim) # Prepare components for transient-global attention # Obtain block_ids and global_segment_ids", "masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) decoder_input_ids", "T5->LongT5 class LongT5Attention(nn.Module): def __init__(self, config: LongT5Config, has_relative_attention_bias=False): super().__init__() self.is_decoder = config.is_decoder self.has_relative_attention_bias", "hidden_size)`, *optional*): Optionally, instead of passing `decoder_input_ids` you can choose to directly pass", "def get_input_embeddings(self): return self.shared def set_input_embeddings(self, new_embeddings): self.shared = new_embeddings self.encoder.set_input_embeddings(new_embeddings) self.decoder.set_input_embeddings(new_embeddings) def", "normal LongT5LayerNorm pass except Exception: logger.warning(\"discovered apex but it failed to load, falling", "If bidirectional=False, then positive relative positions are invalid. We use smaller buckets for", "more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states", "shifted_input_ids = input_ids.new_zeros(input_ids.shape) shifted_input_ids[..., 1:] = input_ids[..., :-1].clone() shifted_input_ids[..., 0] = decoder_start_token_id assert", "# shape (query_length, key_length) relative_position_bucket = self._relative_position_bucket( relative_position, # shape (query_length, key_length) bidirectional=(not", "None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, inputs_embeds: Optional[torch.Tensor] = None, decoder_inputs_embeds: Optional[torch.Tensor] = None,", "dim) # Mask is (batch_size, key_length) (non-causal) or (batch_size, key_length, key_length) # past_key_value[0]", "-> torch.Tensor: \"\"\"Pad a tensor so that a sequence length will be a", "self.is_decoder and encoder_hidden_states is not None: encoder_decoder_position_bias = layer_outputs[4 if output_attentions else 3]", "\"\"\" Self-attention (if key_value_states is None) or attention over source sentence (provided by", "BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput, ) from ...modeling_utils import PreTrainedModel, find_pruneable_heads_and_indices, prune_linear_layer from ...utils", "(hidden_states,) if not return_dict: return tuple( v for v in [ hidden_states, present_key_value_states,", "decoder_outputs[1:] + encoder_outputs return ((loss,) + output) if loss is not None else", "<=-max_distance map to the same bucket. This should allow for more graceful generalization", "self.dropout(hidden_states) hidden_states = self.wo(hidden_states) return hidden_states # Copied from transformers.models.t5.modeling_t5.T5DenseGatedActDense with T5->LongT5 class", "and behavior. Parameters: config ([`LongT5Config`]): Model configuration class with all the parameters of", "** -0.5) ) # Copied from transformers.models.t5.modeling_t5.T5PreTrainedModel._set_gradient_checkpointing with T5->LongT5 def _set_gradient_checkpointing(self, module, value=False):", "be obtained using [`T5Tokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for detail. To know more", "in compliance with the License. # You may obtain a copy of the", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "dim=-2).transpose(1, 2) side_position_bias = side_position_bias.type(scores.dtype).to(scores.device) # (batch_size, num_blocks, num_heads, block_len, 3 * block_len", "-> Union[Tuple[torch.FloatTensor], BaseModelOutput]: r\"\"\" Returns: Example: ```python >>> from transformers import AutoTokenizer, LongT5ForConditionalGeneration", "attention. This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic", "loss=loss, logits=lm_logits, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, ) def prepare_inputs_for_generation( self,", "attention mask is provided for the cross-attention # we need to make broadcastable", "padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens", "if mask is not None: # We need to adjust position bias shape", "= CrossEntropyLoss(ignore_index=-100) loss = loss_fct(lm_logits.view(-1, lm_logits.size(-1)), labels.view(-1)) # TODO(thom): Add z_loss https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L666 if", "needed (training, first prediction pass) if encoder_outputs is None: encoder_outputs = self.encoder( input_ids=input_ids,", "Any, List, Optional, Tuple, Union import torch from torch import nn from torch.nn", "encoder_sequence_length) if encoder_attention_mask is None: encoder_attention_mask = torch.ones(encoder_hidden_shape, device=inputs_embeds.device) encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) else:", "relative_position_ids = _make_3block_relative_position_ids(block_len) locality_mask = torch.abs(relative_position_ids) < block_len locality_mask = locality_mask[None, None, :,", "return outputs class LongT5Block(nn.Module): def __init__(self, config, has_relative_attention_bias=False): super().__init__() self.is_decoder = config.is_decoder if", "if we output them return outputs class LongT5LayerLocalSelfAttention(nn.Module): \"\"\"Local self attention used in", "return self.decoder @add_start_docstrings_to_model_forward(LONGT5_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask:", "and standard extended mask for transient-global attention extended_attention_mask = attention_mask # If a", "so that a sequence length will be a multiple of `block_len`\"\"\" pad_len =", "aggregates by summing over individual blocks.\"\"\" # (batch..., seq_len, global_seq_len)) block_ids = block_ids.where(", "# if key and values are already calculated # we want only the", "use_cache=use_cache, output_attentions=output_attentions, ) hidden_states = cross_attention_outputs[0] # Combine self attn and cross attn", "good for you\", return_tensors=\"pt\" ... ).input_ids # Batch size 1 >>> decoder_input_ids =", "last `decoder_input_ids` have to be input (see `past_key_values`). To know more on how", "we output them return outputs # Copied from transformers.models.t5.modeling_t5.T5LayerCrossAttention with T5->LongT5 class LongT5LayerCrossAttention(nn.Module):", "def _set_gradient_checkpointing(self, module, value=False): if isinstance(module, (LongT5Attention, LongT5Stack)): module.gradient_checkpointing = value # Copied", "set_input_embeddings(self, new_embeddings): self.embed_tokens = new_embeddings def forward( self, input_ids=None, attention_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, inputs_embeds=None,", "head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) elif return_dict and not isinstance(encoder_outputs, BaseModelOutput): encoder_outputs =", "None self_attention_outputs = self.layer[0]( hidden_states, attention_mask=attention_mask, position_bias=position_bias, layer_head_mask=layer_head_mask, past_key_value=self_attn_past_key_value, use_cache=use_cache, output_attentions=output_attentions, ) hidden_states,", "any specific head on top.\", LONGT5_START_DOCSTRING, ) class LongT5EncoderModel(LongT5PreTrainedModel): authorized_missing_keys = [ r\"encoder.embed_tokens.weight\",", "is not None ) return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=present_key_value_states, hidden_states=all_hidden_states, attentions=all_attentions, cross_attentions=all_cross_attentions, ) LONGT5_START_DOCSTRING", "# Concatenate \"local\" and \"side\"/\"global\" key/value states to allow each token to attend", "decoder_config.is_decoder = True decoder_config.is_encoder_decoder = False decoder_config.num_layers = config.num_decoder_layers self.decoder = LongT5Stack(decoder_config, self.shared)", "nn.Dropout(config.dropout_rate) def forward( self, hidden_states, key_value_states, attention_mask=None, position_bias=None, layer_head_mask=None, past_key_value=None, use_cache=False, query_length=None, output_attentions=False,", "torch.full_like(relative_position_if_large, num_buckets - 1) ) relative_buckets += torch.where(is_small, relative_position, relative_position_if_large) return relative_buckets def", "False # Relativen attention bias & Layer norm for global attention if self.has_relative_attention_bias:", "components for transient-global attention # Obtain block_ids and global_segment_ids # global_seq_len := seq_len", "# (block_length, 3 * block_length, num_heads) values = self.relative_attention_bias(relative_position_bucket) # (1, 1, num_heads,", "over source sentence (provided by key_value_states). \"\"\" # Input is (batch_size, seq_length, dim)", "with T5->LongT5 class LongT5LayerFF(nn.Module): def __init__(self, config: LongT5Config): super().__init__() if config.is_gated_act: self.DenseReluDense =", "supported natively for proxies. shifted_input_ids = torch.full(input_ids.shape[:-1] + (1,), decoder_start_token_id) shifted_input_ids = torch.cat([shifted_input_ids,", "output_attentions else None all_cross_attentions = () if (output_attentions and self.is_decoder) else None position_bias", "= LongT5LayerLocalSelfAttention elif config.encoder_attention_type == \"transient-global\": attention_layer = LongT5LayerTransientGlobalSelfAttention else: raise ValueError( \"For", "self.DenseReluDense = LongT5DenseActDense(config) self.layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) def forward(self, hidden_states):", "calculated via length of past mask_seq_length = past_key_values[0][0].shape[2] + seq_length if past_key_values is", "1) ) relative_buckets += torch.where(is_small, relative_position, relative_position_if_large) return relative_buckets def compute_bias(self, block_length: int):", "softmax # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/attention.py#L136 d_model = self.config.d_model key_value_proj_dim = self.config.d_kv n_heads = self.config.num_heads", "target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can", "(`bool`, *optional*): Whether or not to return the hidden states of all layers.", "hidden_states.to(self.weight.dtype) return self.weight * hidden_states try: from apex.normalization import FusedRMSNorm LongT5LayerNorm = FusedRMSNorm", "= hidden-states, key-value-states (self-attention position bias), (self-attention weights), # (cross-attention position bias), (cross-attention", "a `language modeling` head on top.\"\"\", LONGT5_START_DOCSTRING) class LongT5ForConditionalGeneration(LongT5PreTrainedModel): _keys_to_ignore_on_load_missing = [ r\"encoder.embed_tokens.weight\",", "get query/key/value states -> (batch_size, seq_length, n_heads, dim_per_head) query_states = shape(self.q(hidden_states)) key_states =", "= outputs.last_hidden_state ```\"\"\" return_dict = return_dict if return_dict is not None else self.config.use_return_dict", "1 self.block = nn.ModuleList( [LongT5Block(config, has_relative_attention_bias=bool(i == 0)) for i in range(config.num_layers)] )", "LongT5Config, has_relative_attention_bias=False): super().__init__() self.is_decoder = config.is_decoder self.has_relative_attention_bias = has_relative_attention_bias self.relative_attention_num_buckets = config.relative_attention_num_buckets self.relative_attention_max_distance", "encoder_decoder_position_bias=None, layer_head_mask=None, cross_attn_layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False, return_dict=True, ): if past_key_value is not None:", "also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and", "model.\"\"\" import copy import math import warnings from typing import Any, List, Optional,", "is (batch_size, key_length) (non-causal) or (batch_size, key_length, key_length) # past_key_value[0] is (batch_size, n_heads,", "return self.encoder def _prune_heads(self, heads_to_prune): \"\"\" Prunes heads of the model. heads_to_prune: dict", "torch.arange(query_length, dtype=torch.long, device=device)[:, None] memory_position = torch.arange(key_length, dtype=torch.long, device=device)[None, :] relative_position = memory_position", "= self.wo(hidden_states) return hidden_states # Copied from transformers.models.t5.modeling_t5.T5DenseGatedActDense with T5->LongT5 class LongT5DenseGatedActDense(nn.Module): def", "logits=lm_logits, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, ) def prepare_inputs_for_generation( self, input_ids,", "self.config.num_decoder_layers: warnings.warn(__HEAD_MASK_WARNING_MSG, FutureWarning) decoder_head_mask = head_mask # Encode if needed (training, first prediction", "avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:", "= tuple(indices) blocks_list.append(x[indices]) # [batch_size, num_blocks, 3 * block_len, ...] return torch.cat(blocks_list, dim=sequence_dim)", "model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the", "Mesh TensorFlow attention initialization to avoid scaling before softmax # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/attention.py#L136 d_model", "the parameters of the model. Initializing with a config file does not load", "else: shifted_input_ids = input_ids.new_zeros(input_ids.shape) shifted_input_ids[..., 1:] = input_ids[..., :-1].clone() shifted_input_ids[..., 0] = decoder_start_token_id", "locality_mask[None, None, :, :] locality_mask = locality_mask.to(local_attention_mask.device) return torch.logical_and(local_attention_mask, locality_mask) def _get_local_attention_mask(attention_mask: torch.Tensor,", "as downloading or saving, resizing the input embeddings, pruning heads etc.) This model", "def _get_local_attention_mask(attention_mask: torch.Tensor, block_len: int, device: torch.device) -> torch.Tensor: \"\"\"Prepare attention mask to", "* block_len] local_attention_mask = torch.logical_and(_blocked_attention_mask, _3blocked_attention_mask) local_attention_mask = _mask_local_attention_mask(local_attention_mask, block_len) # [batch_size, 1,", "block_len, 3 * block_len + global_seq_len) scores = torch.einsum(\"...qhd,...khd->...hqk\", query_states, key_states) if mask", "self.o(attn_output) present_key_value_state = (key_states, value_states) if (self.is_decoder and use_cache) else None outputs =", "from .configuration_longt5 import LongT5Config logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = \"LongT5Config\" _TOKENIZER_FOR_DOC = \"T5Tokenizer\"", "self.block_len), device=scores.device, dtype=scores.dtype ) if self.gradient_checkpointing and self.training: position_bias.requires_grad = True else: position_bias", ") # (batch_size, n_heads, seq_length, key_length) attn_weights = nn.functional.dropout( attn_weights, p=self.dropout, training=self.training )", "any `decoder_head_mask` now, please set `decoder_head_mask = torch.ones(num_layers, num_heads)`. \"\"\" @add_start_docstrings( \"The bare", "only positive values\" return shifted_input_ids class LongT5Stack(LongT5PreTrainedModel): def __init__(self, config, embed_tokens=None): super().__init__(config) self.embed_tokens", "key_states = _concatenate_3_blocks(key_states, block_dim=1, sequence_dim=2) value_states = _concatenate_3_blocks(value_states, block_dim=1, sequence_dim=2) # Tile side", "\"\"\" The input argument `head_mask` was split into two arguments `head_mask` and `decoder_head_mask`.", "use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict", "print(tokenizer.decode(outputs[0], skip_special_tokens=True)) abstractthe aim of this article is to summarize the studies have", "all_hidden_states + (hidden_states,) if not return_dict: return tuple( v for v in [", "layer_head_mask=None, output_attentions=False, **kwargs: Any, # to accept past_key_value and use_cache kwargs ): normed_hidden_states", "# add attentions if we output them return outputs class LongT5Block(nn.Module): def __init__(self,", "* block_length) values = values.permute([2, 0, 1]).unsqueeze(0).unsqueeze(0) return values def forward( self, hidden_states,", "device=device)[None, :] relative_position = memory_position - context_position # shape (query_length, key_length) relative_position_bucket =", "args - head_mask, decoder_head_mask __HEAD_MASK_WARNING_MSG = \"\"\" The input argument `head_mask` was split", "= False # Relativen attention bias & Layer norm for global attention if", "into two input args - head_mask, decoder_head_mask if head_mask is not None and", "((d_model * key_value_proj_dim) ** -0.5)) module.k.weight.data.normal_(mean=0.0, std=factor * (d_model**-0.5)) module.v.weight.data.normal_(mean=0.0, std=factor * (d_model**-0.5))", "given `block_len` along the given `dim`. If the dimension length is not a", "self.is_decoder and encoder_attention_mask is None and encoder_hidden_states is not None: encoder_seq_length = encoder_hidden_states.shape[1]", "want more control over how to convert `input_ids` indices into associated vectors than", "all(x.shape): new_shape = list(x.shape) new_shape[dim] += pad_len return torch.zeros(new_shape, dtype=x.dtype) pad = [(0,", "also known as Root Mean # Square Layer Normalization https://arxiv.org/abs/1910.07467 thus varience is", "_create_global_aggregates(hidden_states, block_ids, _global_seq_len) global_inputs = self.global_input_layer_norm(global_inputs) # get query states -> (batch_size, seq_length,", "mask = torch.ones(batch_size, seq_length) # (batch_size, num_heads, seq_len, global_seq_len) side_position_bias = self.compute_side_bias(mask, global_segment_ids)", "local_attention_mask is not None: # (batch_size, 1, n_heads, block_len, 3 * block_len) position_bias", "device=block_ids.device) side_relative_position = global_positions - block_ids[..., None] return side_relative_position.type(torch.int64) def _create_global_aggregates( hidden_states: torch.Tensor,", "-0.5)) if isinstance(module, LongT5TransientGlobalAttention): module.global_relative_attention_bias.weight.data.normal_( mean=0.0, std=factor * ((d_model) ** -0.5) ) #", "generic methods the library implements for all its model (such as downloading or", "input_ids = tokenizer( ... \"summarize: \" + 100 * \"studies have shown that", "return the hidden states of all layers. See `hidden_states` under returned tensors for", "the self-attention modules in the decoder. Mask values selected in `[0, 1]`: -", "head is **masked**. decoder_head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask", "else: self_attn_past_key_value, cross_attn_past_key_value = None, None self_attention_outputs = self.layer[0]( hidden_states, attention_mask=attention_mask, position_bias=position_bias, layer_head_mask=layer_head_mask,", "= torch.full(input_ids.shape[:-1] + (1,), decoder_start_token_id) shifted_input_ids = torch.cat([shifted_input_ids, input_ids[..., :-1]], dim=-1) else: shifted_input_ids", "any specific head on top.\", LONGT5_START_DOCSTRING, ) class LongT5Model(LongT5PreTrainedModel): _keys_to_ignore_on_load_missing = [ r\"encoder.embed_tokens.weight\",", "self._shift_right(labels) def _reorder_cache(self, past, beam_idx): # if decoder past is not included in", "block_len, global_seq_len) side_position_bias = _split_into_blocks(side_position_bias, self.block_len, dim=-2).transpose(1, 2) side_position_bias = side_position_bias.type(scores.dtype).to(scores.device) # (batch_size,", "pad tensor to multiple of block_len if x.shape[dim] % block_len != 0: x", "states reordered_layer_past_states = reordered_layer_past_states + ( layer_past_state.index_select(0, beam_idx.to(layer_past_state.device)), ) assert reordered_layer_past_states[0].shape == layer_past_states[0].shape", "the distance in tokens from the attending position to the attended-to position. If", "has_relative_attention_bias=False): super().__init__() self.LocalSelfAttention = LongT5LocalAttention(config, has_relative_attention_bias=has_relative_attention_bias) self.layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate)", "should allow for more graceful generalization to longer sequences than the model has", "is usually set to the\" \" pad_token_id. See LongT5 docs for more information\"", "be {expected_num_past_key_values} past states. \" f\"{'2 (past / key) for cross attention. '", "pretrained models. \"\"\" config_class = LongT5Config base_model_prefix = \"transformer\" supports_gradient_checkpointing = True @property", ">>> outputs = model.generate(input_ids) >>> print(tokenizer.decode(outputs[0], skip_special_tokens=True)) abstractthe aim of this article is", "self.shared = new_embeddings self.encoder.set_input_embeddings(new_embeddings) self.decoder.set_input_embeddings(new_embeddings) def get_encoder(self): return self.encoder def get_decoder(self): return self.decoder", "int, dim: int) -> torch.Tensor: \"\"\"Split an input tensor into blocks of a", "attn_output = unshape(torch.matmul(attn_weights, value_states)) # (batch_size, seq_length, dim) attn_output = self.o(attn_output) present_key_value_state =", "if self.gradient_checkpointing and self.training: if use_cache: use_cache = False def create_custom_forward(module): def custom_forward(*inputs):", "block_length, dtype=torch.long, device=self.relative_attention_bias.weight.device ) context_position = memory_position[block_length:-block_length] # (block_length, 3 * block_length) relative_position", "for computing the sequence classification/regression loss. Indices should be in `[-100, 0, ...,", "None, ...] attention_side_bias = torch.where(side_attention_mask > 0, 0.0, -1e10) # (batch_size, seq_len, global_seq_len)", "def unshape(states): \"\"\"reshape\"\"\" return states.transpose(1, 2).contiguous().view(batch_size, -1, self.inner_dim) def project(hidden_states, proj_layer, key_value_states, past_key_value):", "self.pruned_heads = self.pruned_heads.union(heads) @staticmethod # Copied from transformers.models.t5.modeling_t5.T5Attention._relative_position_bucket def _relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128):", "correct `past` for each of the four key / value states reordered_layer_past_states =", "\"\"\" # pad tensor to multiple of block_len if x.shape[dim] % block_len !=", "hidden states at the output of the last layer of the encoder. Used", "that owning a dog ```\"\"\" use_cache = use_cache if use_cache is not None", "block_len) if not self.has_relative_attention_bias: position_bias = torch.zeros( (1, 1, self.n_heads, self.block_len, 3 *", "shift inputs to the right if is_torch_fx_proxy(input_ids): # Item assignment is not supported", "tokens in the vocabulary. LongT5 is a model with relative position embeddings so", "and it enables using one of the two different efficient attention mechanisms -", "blocks -> (batch_size, num_blocks, block_len, n_heads, dim_per_head) query_states = _split_into_blocks(query_states, self.block_len, dim=1) key_states", "if past_key_values is not None else seq_length if use_cache is True: assert self.is_decoder,", "layer_outputs = layer_module( hidden_states, attention_mask=extended_attention_mask, position_bias=position_bias, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, encoder_decoder_position_bias=encoder_decoder_position_bias, layer_head_mask=layer_head_mask, cross_attn_layer_head_mask=cross_attn_layer_head_mask, past_key_value=past_key_value, use_cache=use_cache,", "is to summarize the studies have shown that owning a dog ```\"\"\" use_cache", "not None else self.config.use_return_dict encoder_outputs = self.encoder( input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states,", "hidden_states = self.dropout(hidden_states) hidden_states = self.wo(hidden_states) return hidden_states # Copied from transformers.models.t5.modeling_t5.T5DenseGatedActDense with", "- 1 global_segment_ids = global_segment_ids.to(attention_mask.device) global_segment_ids = torch.where(global_segment_ids <= _sequence_block_ids_max, 1, 0) return", "```python >>> from transformers import T5Tokenizer, LongT5Model >>> tokenizer = T5Tokenizer.from_pretrained(\"google/long-t5-local-base\") >>> model", "2022 Google LLC., LongT5 Authors and HuggingFace Inc. team. # # Licensed under", "want to if layer_head_mask is not None: attn_weights = attn_weights * layer_head_mask attn_output", "assert len(reordered_layer_past_states) == len(layer_past_states) reordered_decoder_past = reordered_decoder_past + (reordered_layer_past_states,) return reordered_decoder_past @add_start_docstrings( \"The", "def forward( self, hidden_states, mask=None, key_value_states=None, position_bias=None, past_key_value=None, layer_head_mask=None, query_length=None, use_cache=False, output_attentions=False, ):", "sequence classification/regression loss. Indices should be in `[-100, 0, ..., config.vocab_size - 1]`.", "value_states) if (self.is_decoder and use_cache) else None outputs = (attn_output,) + (present_key_value_state,) +", "if present_key_value_state is not None: query_length = present_key_value_state[0].shape[2] else: query_length = None cross_attention_outputs", "Seq2SeqLMOutput( loss=loss, logits=lm_logits, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, ) def prepare_inputs_for_generation(", "a boolean - whether the attention is bidirectional num_buckets: an integer max_distance: an", "side_bias = side_bias.permute([0, 3, 1, 2]) # (batch_size, num_heads, seq_len, global_seq_len) attention_side_bias =", "vectors than the model's internal embedding lookup matrix. decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size,", "LongT5LayerSelfAttention elif config.encoder_attention_type == \"local\": attention_layer = LongT5LayerLocalSelfAttention elif config.encoder_attention_type == \"transient-global\": attention_layer", "head on top.\", LONGT5_START_DOCSTRING, ) class LongT5EncoderModel(LongT5PreTrainedModel): authorized_missing_keys = [ r\"encoder.embed_tokens.weight\", ] def", "always None with gradient checkpointing ) else: layer_outputs = layer_module( hidden_states, attention_mask=extended_attention_mask, position_bias=position_bias,", "not None: module.wi_0.bias.data.zero_() module.wi_1.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5)) if hasattr(module.wi_1, \"bias\") and", "= None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] =", "memory_position = torch.arange( 3 * block_length, dtype=torch.long, device=self.relative_attention_bias.weight.device ) context_position = memory_position[block_length:-block_length] #", "_split_into_blocks(query_states, self.block_len, dim=1) key_states = _split_into_blocks(key_states, self.block_len, dim=1) value_states = _split_into_blocks(value_states, self.block_len, dim=1)", "BaseModelOutput): encoder_outputs = BaseModelOutput( last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2]", "without any specific head on top.\", LONGT5_START_DOCSTRING, ) class LongT5EncoderModel(LongT5PreTrainedModel): authorized_missing_keys = [", "positive values\" return shifted_input_ids class LongT5Stack(LongT5PreTrainedModel): def __init__(self, config, embed_tokens=None): super().__init__(config) self.embed_tokens =", "# get query/key/value states -> (batch_size, seq_length, n_heads, dim_per_head) query_states = shape(self.q(hidden_states)) key_states", "= self.DenseReluDense(forwarded_states) hidden_states = hidden_states + self.dropout(forwarded_states) return hidden_states # Copied from transformers.models.t5.modeling_t5.T5Attention", "= _split_into_blocks(value_states, self.block_len, dim=1) # Concatenate 3 blocks for keys and values ->", "return outputs # Copied from transformers.models.t5.modeling_t5.T5LayerCrossAttention with T5->LongT5 class LongT5LayerCrossAttention(nn.Module): def __init__(self, config):", "final processing self.post_init() def get_input_embeddings(self): return self.shared def set_input_embeddings(self, new_embeddings): self.shared = new_embeddings", "self_attn_past_key_value = past_key_value[:2] cross_attn_past_key_value = past_key_value[2:] else: self_attn_past_key_value, cross_attn_past_key_value = None, None self_attention_outputs", "# w/o mean and there is no bias. Additionally we want to make", "each input block for local attentiont. For more information, see: https://arxiv.org/pdf/2112.07916.pdf. \"\"\" num_blocks", "transformers.models.t5.modeling_t5.T5PreTrainedModel._set_gradient_checkpointing with T5->LongT5 def _set_gradient_checkpointing(self, module, value=False): if isinstance(module, (LongT5Attention, LongT5Stack)): module.gradient_checkpointing =", "if attention_mask is None: attention_mask = torch.ones(batch_size, mask_seq_length).to(inputs_embeds.device) if self.is_decoder and encoder_attention_mask is", "tuple. \"\"\" # Warning message for FutureWarning: head_mask was separated into two input", "and refer to the PyTorch documentation for all matter related to general usage", "the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to", "class PreTrainedModel \"\"\" for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(LONGT5_ENCODER_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BaseModelOutput, config_class=_CONFIG_FOR_DOC) def", "query states -> (batch_size, seq_length, n_heads, dim_per_head) query_states = shape(self.q(hidden_states)) key_states = shape(self.k(hidden_states))", "self.has_relative_attention_bias: self.global_relative_attention_bias = nn.Embedding(self.relative_attention_num_buckets, self.n_heads) self.global_input_layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) # Copied from transformers.models.t5.modeling_t5.T5Attention.prune_heads", "`decoder_inputs_embeds` have to be input (see `past_key_values`). This is useful if you want", "def _concatenate_3_blocks(x: torch.Tensor, block_dim: int, sequence_dim: int, pad_value: int = 0) -> torch.Tensor:", "new_shape = list(x.shape) new_shape[dim] += pad_len return torch.zeros(new_shape, dtype=x.dtype) pad = [(0, 0)]", "used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). To", "self.config.d_model key_value_proj_dim = self.config.d_kv n_heads = self.config.num_heads module.q.weight.data.normal_(mean=0.0, std=factor * ((d_model * key_value_proj_dim)", "+ attention_output[1:] # add attentions if we output them return outputs class LongT5Block(nn.Module):", "do not make for the whole fixed block, are assigned to the preceding", "self.is_decoder), num_buckets=self.relative_attention_num_buckets, max_distance=self.relative_attention_max_distance, ) # (block_length, 3 * block_length, num_heads) values = self.relative_attention_bias(relative_position_bucket)", "encoder_decoder_position_bias, layer_head_mask, cross_attn_layer_head_mask, None, # past_key_value is always None with gradient checkpointing )", "`use_cache=True` to speed up decoding\") return past reordered_decoder_past = () for layer_past_states in", "self.pruned_heads.union(heads) @staticmethod def _relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128): \"\"\" Adapted from Mesh Tensorflow: https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593", "use smaller buckets for small absolute relative_position and larger buckets for larger absolute", "values in labels by `pad_token_id` shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id) assert torch.all(shifted_input_ids >= 0).item(),", "position embeddings so you should be able to pad the inputs on both", "hyper params self.n_heads = self.n_heads - len(heads) self.inner_dim = self.key_value_proj_dim * self.n_heads self.pruned_heads", "isinstance(module, LongT5LayerNorm): module.weight.data.fill_(factor * 1.0) elif isinstance(module, (LongT5Model, LongT5ForConditionalGeneration, LongT5EncoderModel)): # Mesh TensorFlow", "get_decoder(self): return self.decoder @add_start_docstrings_to_model_forward(LONGT5_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.LongTensor] = None,", "initialize the model with valid token embeddings\" inputs_embeds = self.embed_tokens(input_ids) batch_size, seq_length =", "attention_layer = LongT5LayerLocalSelfAttention elif config.encoder_attention_type == \"transient-global\": attention_layer = LongT5LayerTransientGlobalSelfAttention else: raise ValueError(", "of hidden states at the output of the last layer of the encoder.", "+ 1 self.global_block_size = config.global_block_size self.dropout = config.dropout_rate self.inner_dim = self.n_heads * self.key_value_proj_dim", "T5->LongT5 class LongT5LayerCrossAttention(nn.Module): def __init__(self, config): super().__init__() self.EncDecAttention = LongT5Attention(config, has_relative_attention_bias=False) self.layer_norm =", "new_embeddings): self.embed_tokens = new_embeddings def forward( self, input_ids=None, attention_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, inputs_embeds=None, head_mask=None,", "all_hidden_states = () if output_hidden_states else None all_attentions = () if output_attentions else", "block_len, 3 * block_len) if position_bias is None: # position_bias shape: # (1,", "+ (attention_mask - 1) # [batch_size, seq_len] global_block_ids = handle_orphan_tokens(global_block_ids) num_globals = seq_len", "LongT5ForConditionalGeneration >>> tokenizer = AutoTokenizer.from_pretrained(\"Stancld/longt5-tglobal-large-16384-pubmed-3k_steps\") >>> model = LongT5ForConditionalGeneration.from_pretrained( ... \"Stancld/longt5-tglobal-large-16384-pubmed-3k_steps\" ... )", "value=pad_value) return x def _split_into_blocks(x: torch.Tensor, block_len: int, dim: int) -> torch.Tensor: \"\"\"Split", "self.relative_attention_max_distance = config.relative_attention_max_distance self.d_model = config.d_model self.key_value_proj_dim = config.d_kv self.n_heads = config.num_heads self.local_radius", "if self.is_decoder else \"\" raise ValueError( f\"You cannot specify both {err_msg_prefix}input_ids and {err_msg_prefix}inputs_embeds", "than the model's internal embedding lookup matrix. decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length,", "len(past_key_value) == 2 ), f\"past_key_value should have 2 past states: keys and values.", "it here if present_key_value_state is not None: query_length = present_key_value_state[0].shape[2] else: query_length =", "values.permute([2, 0, 1]).unsqueeze(0) # shape (1, num_heads, query_length, key_length) return values def forward(", "attention_mask=None, position_bias=None, layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False, ): normed_hidden_states = self.layer_norm(hidden_states) attention_output = self.SelfAttention(", "input_ids = torch.tensor(DUMMY_INPUTS) input_mask = torch.tensor(DUMMY_MASK) dummy_inputs = { \"decoder_input_ids\": input_ids, \"input_ids\": input_ids,", "this layer} See base class PreTrainedModel \"\"\" for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads)", "+ global_seq_len, n_heads, dim_per_head) key_states = torch.cat([key_states, side_key_states], dim=2) value_states = torch.cat([value_states, side_value_states],", "set `decoder_head_mask = torch.ones(num_layers, num_heads)`. \"\"\" @add_start_docstrings( \"The bare LONGT5 Model transformer outputting", "def __init__(self, hidden_size, eps=1e-6): \"\"\" Construct a layernorm module in the LongT5 style.", "is None else query_length key_length = real_seq_length if key_value_states is None else key_value_states.shape[1]", "self.dropout = config.dropout_rate self.inner_dim = self.n_heads * self.key_value_proj_dim # Mesh TensorFlow initialization to", "past_key_values is not None else seq_length if use_cache is True: assert self.is_decoder, f\"`use_cache`", "if past_key_value is not None else None ) # compute scores scores =", "num_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules in the", "self.global_block_size = config.global_block_size self.dropout = config.dropout_rate self.inner_dim = self.n_heads * self.key_value_proj_dim # Mesh", "(1, 1) pad = sum(pad[::-1], ()) # [batch_size, num_blocks, block_len] -> [batch_size, num_blocks", "strategy only for a decoder, orphan tokens, i.e. those tokens which do not", "# get key/value states key_states = project( hidden_states, self.k, key_value_states, past_key_value[0] if past_key_value", "of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds", "def forward( self, hidden_states, attention_mask=None, position_bias=None, layer_head_mask=None, output_attentions=False, **kwargs: Any, # to accept", "and module.wo.bias is not None: module.wo.bias.data.zero_() elif isinstance(module, LongT5DenseGatedActDense): module.wi_0.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model)", "position_bias=position_bias, layer_head_mask=layer_head_mask, past_key_value=past_key_value, use_cache=use_cache, output_attentions=output_attentions, ) hidden_states = hidden_states + self.dropout(attention_output[0]) outputs =", "None, ) -> Union[Tuple[torch.FloatTensor], BaseModelOutput]: r\"\"\" Returns: Example: ```python >>> from transformers import", "standard extended mask for transient-global attention extended_attention_mask = attention_mask # If a 2D", "# now relative_position is in the range [0, inf) # half of the", "LongT5Config): super().__init__() if config.is_gated_act: self.DenseReluDense = LongT5DenseGatedActDense(config) else: self.DenseReluDense = LongT5DenseActDense(config) self.layer_norm =", "Sequences](https://arxiv.org/abs/2112.07916) by <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME> and <NAME>. It's an encoder-decoder", "= x.shape[:dim] + (num_blocks, block_len) + x.shape[(dim + 1) :] # If 0", "self.encoder.set_input_embeddings(new_embeddings) self.decoder.set_input_embeddings(new_embeddings) def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings def get_output_embeddings(self): return self.lm_head def", "Copied from transformers.models.t5.modeling_t5.T5Stack.get_input_embeddings def get_input_embeddings(self): return self.embed_tokens # Copied from transformers.models.t5.modeling_t5.T5Stack.set_input_embeddings def set_input_embeddings(self,", "or not to return a [`~utils.ModelOutput`] instead of a plain tuple. \"\"\" #", "n_heads, dim_per_head) query_states = shape(self.q(hidden_states)) key_states = shape(self.k(hidden_states)) value_states = shape(self.v(hidden_states)) # Get", "for you \", return_tensors=\"pt\" ... ).input_ids # Batch size 1 >>> outputs =", "but it failed to load, falling back to LongT5LayerNorm\") pass # Copied from", "is (batch_size, n_heads, q_len - 1, dim_per_head) batch_size, seq_length = hidden_states.shape[:2] real_seq_length =", "the attending position to the attended-to position. If bidirectional=False, then positive relative positions", "== 4 else ''}\" f\"Got {len(past_key_value)} past key / value states\" ) self_attn_past_key_value", "`(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices", "self.is_decoder and encoder_hidden_states is not None if do_cross_attention: # the actual query length", "scores += position_bias # (batch_size, num_blocks, n_heads, block_len, 3 * block_len) attn_weights =", "heads of the cross-attention modules in the decoder. Mask values selected in `[0,", "self.compute_side_bias(mask, global_segment_ids) # (batch_size, num_blocks, num_heads, block_len, global_seq_len) side_position_bias = _split_into_blocks(side_position_bias, self.block_len, dim=-2).transpose(1,", "global_positions = torch.arange(global_seq_len, device=block_ids.device) side_relative_position = global_positions - block_ids[..., None] return side_relative_position.type(torch.int64) def", "+ attention_output[1:] # add attentions if we output them return outputs # Copied", "- 1, embed_size_per_head)`): Contains precomputed key and value hidden states of the attention", "if encoder_attention_mask is None: encoder_attention_mask = torch.ones(encoder_hidden_shape, device=inputs_embeds.device) encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask", "past_key_value is not None: if not self.is_decoder: logger.warning(\"`past_key_values` is passed to the encoder.", "Mask to nullify selected heads of the self-attention modules. Mask values selected in", "LONGT5_START_DOCSTRING = r\"\"\" The LongT5 model was proposed in [LongT5: Efficient Text-To-Text Transformer", "global aggregated ones # New shape: (batch_size, num_blocks, 3 * block_len + global_seq_len,", "attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on", "not None, \"You have to initialize the model with valid token embeddings\" inputs_embeds", "representation. This is useful if you want more control over how to convert", "= T5Tokenizer.from_pretrained(\"google/long-t5-local-base\") >>> model = LongT5Model.from_pretrained(\"google/long-t5-local-base\") >>> # Let's try a very long", "position_bias = layer_outputs[2] if self.is_decoder and encoder_hidden_states is not None: encoder_decoder_position_bias = layer_outputs[4", "Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool]", "self.inner_dim, bias=False) self.v = nn.Linear(self.d_model, self.inner_dim, bias=False) self.o = nn.Linear(self.inner_dim, self.d_model, bias=False) if", "_mask_local_attention_mask(local_attention_mask, block_len) # [batch_size, 1, num_block, block_len, 3 * block_len] return local_attention_mask.unsqueeze(1).to(device) def", "sequence is given if not all(x.shape): new_shape = list(x.shape) new_shape[dim] += pad_len return", "and {err_msg_prefix}inputs_embeds at the same time\" ) elif input_ids is not None: input_shape", "# (batch_size, n_heads, seq_length, dim_per_head) # get key/value states key_states = project( hidden_states,", "attn key value states if present_key_value_state is not None: present_key_value_state = present_key_value_state +", "\"\"\"reshape\"\"\" return states.contiguous().view(batch_size, -1, self.inner_dim) # Prepare components for transient-global attention # Obtain", "the buckets are for logarithmically bigger bins in positions up to max_distance relative_position_if_large", "shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary.", "# cut decoder_input_ids if past is used if past is not None: input_ids", "past_key_value is always None with gradient checkpointing ) else: layer_outputs = layer_module( hidden_states,", "be input (see `past_key_values`). To know more on how to prepare `decoder_input_ids` for", "attention_output[1:] # add attentions if we output them return outputs class LongT5LayerLocalSelfAttention(nn.Module): \"\"\"Local", "query_length, key_length, device=None): \"\"\"Compute binned relative position bias\"\"\" if device is None: device", "torch.cumsum(torch.ones(batch_size, num_globals), dim=-1) - 1 global_segment_ids = global_segment_ids.to(attention_mask.device) global_segment_ids = torch.where(global_segment_ids <= _sequence_block_ids_max,", "mask local_attention_mask = _get_local_attention_mask(mask, self.block_len, hidden_states.device) # Replace masked positions with -10_000 (according", "and [`PreTrainedTokenizer.__call__`] for detail. [What are input IDs?](../glossary#input-ids) To know more on how", "normed_hidden_states = self.layer_norm(hidden_states) attention_output = self.EncDecAttention( normed_hidden_states, mask=attention_mask, key_value_states=key_value_states, position_bias=position_bias, layer_head_mask=layer_head_mask, past_key_value=past_key_value, use_cache=use_cache,", "not return_dict: output = (lm_logits,) + decoder_outputs[1:] + encoder_outputs return ((loss,) + output)", "= AutoTokenizer.from_pretrained(\"google/long-t5-local-base\") >>> model = LongT5EncoderModel.from_pretrained(\"google/long-t5-local-base\") >>> input_ids = tokenizer( ... 100 *", "0).to(torch.long) * num_buckets relative_position = torch.abs(relative_position) else: relative_position = -torch.min(relative_position, torch.zeros_like(relative_position)) # now", "use_cache: present_key_value_states = present_key_value_states + (present_key_value_state,) if output_attentions: all_attentions = all_attentions + (layer_outputs[3],)", "dim=-1).type_as( scores ) # (batch_size, n_heads, seq_length, key_length) attn_weights = nn.functional.dropout( attn_weights, p=self.dropout,", "hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else", "inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: err_msg_prefix = \"decoder_\" if self.is_decoder", "= self.config.initializer_factor # Used for testing weights initialization if isinstance(module, LongT5LayerNorm): module.weight.data.fill_(factor *", "// block_len output_shape = x.shape[:dim] + (num_blocks, block_len) + x.shape[(dim + 1) :]", "for each input block for local attentiont. For more information, see: https://arxiv.org/pdf/2112.07916.pdf. \"\"\"", "get_encoder(self): return self.encoder def get_decoder(self): return self.decoder @add_start_docstrings_to_model_forward(LONGT5_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC) def forward( self,", "We share the position biases between the layers - the first layer store", "not included in output # speedy decoding is disabled and no need to", "initialization # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L1624 module.shared.weight.data.normal_(mean=0.0, std=factor * 1.0) elif isinstance(module, LongT5DenseActDense): # Mesh", "adjust position bias shape to be sum with mask position_bias = position_bias +", "if self.weight.dtype in [torch.float16, torch.bfloat16]: hidden_states = hidden_states.to(self.weight.dtype) return self.weight * hidden_states try:", "- 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) head_mask (`torch.FloatTensor`", "num_heads, query_length, key_length) return values def forward( self, hidden_states, mask=None, key_value_states=None, position_bias=None, past_key_value=None,", "' if expected_num_past_key_values == 4 else ''}\" f\"Got {len(past_key_value)} past key / value", "if past_key_values is None: past_key_values = [None] * len(self.block) # We can provide", "n_heads, block_len, 3 * block_len) position_bias = position_bias + local_attention_mask.transpose(1, 2) position_bias =", "= present_key_value_state + cross_attention_outputs[1] # Keep cross-attention outputs and relative position weights attention_outputs", "used to speed up decoding. If `past_key_values` are used, the user can optionally", "None else output return Seq2SeqLMOutput( loss=loss, logits=lm_logits, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states,", "by key_value_states). \"\"\" # Input is (batch_size, seq_length, dim) # Mask is (batch_size,", "!= 0.0, 1.0, -1000.0).type(attention_mask.dtype) global_block_ids = torch.floor(mask + fixed_block_mask - 1.0).type(attention_mask.dtype) _global_block_ids_lower_bound =", "labels set to `-100` are ignored (masked), the loss is only computed for", "weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method", "https://github.com/google/flaxformer/blob/main/flaxformer/architectures/longt5/long_attention.py. In our scenario, as we use this strategy only for a decoder,", "forward(self, hidden_states): # LongT5 uses a layer_norm which only scales and doesn't shift,", "has been trained on Args: relative_position: an int32 Tensor bidirectional: a boolean -", "an integer max_distance: an integer Returns: a Tensor with the same shape as", "seq_length, dim) attn_output = self.o(attn_output) present_key_value_state = (key_states, value_states) if (self.is_decoder and use_cache)", "output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.FloatTensor], Seq2SeqModelOutput]: r\"\"\"", "dim=1) # Concatenate 3 blocks for keys and values -> (batch_size, num_blocks, 3", "torch.eq(mask[..., None], global_segment_ids[:, None, :])[:, None, ...] attention_side_bias = torch.where(side_attention_mask > 0, 0.0,", "[`T5Tokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for detail. To know more on how to", "None: module.wi.bias.data.zero_() module.wo.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_ff) ** -0.5)) if hasattr(module.wo, \"bias\") and module.wo.bias", "position reordered_layer_past_states = () for layer_past_state in layer_past_states: # need to set correct", "logger.warning(\"You might want to consider setting `use_cache=True` to speed up decoding\") return past", "side_key_states = shape(self.k(global_inputs)) side_value_states = shape(self.v(global_inputs)) # Split into blocks -> (batch_size, num_blocks,", "self.v = nn.Linear(self.d_model, self.inner_dim, bias=False) self.o = nn.Linear(self.inner_dim, self.d_model, bias=False) if self.has_relative_attention_bias: self.relative_attention_bias", "return values def forward( self, hidden_states, mask=None, key_value_states=None, position_bias=None, past_key_value=None, layer_head_mask=None, query_length=None, use_cache=False,", "= [ \"google/long-t5-local-base\", \"google/long-t5-local-large\", \"google/long-t5-tglobal-base\", \"google/long-t5-tglobal-large\", ] def _pad_to_multiple(x: torch.Tensor, block_len: int, dim:", "* block_len] return local_attention_mask.unsqueeze(1).to(device) def _make_global_fixed_block_ids( attention_mask: torch.Tensor, global_block_size: int ) -> Tuple[torch.Tensor,", "Got { len(past_key_value)} past states\" real_seq_length += past_key_value[0].shape[2] if query_length is None else", "seq_length, key_length) scores += position_bias attn_weights = nn.functional.softmax(scores.float(), dim=-1).type_as( scores ) # (batch_size,", "\", return_tensors=\"pt\" ... ).input_ids # Batch size 1 >>> outputs = model(input_ids=input_ids) >>>", "# Batch size 1 >>> outputs = model(input_ids=input_ids) >>> last_hidden_states = outputs.last_hidden_state ```\"\"\"", "sequence_output = decoder_outputs[0] if self.config.tie_word_embeddings: # Rescale output before projecting on vocab #", "seq_len // global_block_size] if num_globals > 0: _sequence_block_ids_max = torch.max(global_block_ids, dim=-1).values.repeat(num_globals, 1).transpose(0, 1)", "== -100, pad_token_id) assert torch.all(shifted_input_ids >= 0).item(), \"Verify that `shifted_input_ids` has only positive", "range(config.num_layers)] ) self.final_layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) # Initialize weights and", "1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of shape", "None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None,", "tokenizer = AutoTokenizer.from_pretrained(\"Stancld/longt5-tglobal-large-16384-pubmed-3k_steps\") >>> model = LongT5ForConditionalGeneration.from_pretrained( ... \"Stancld/longt5-tglobal-large-16384-pubmed-3k_steps\" ... ) >>> #", "encoder_attention_mask is None: encoder_attention_mask = torch.ones(encoder_hidden_shape, device=inputs_embeds.device) encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask =", "Input is (batch_size, seq_length, dim) # Mask is (batch_size, key_length) (non-causal) or (batch_size,", "def _create_global_aggregates( hidden_states: torch.Tensor, block_ids: torch.Tensor, global_seq_len: int ) -> torch.Tensor: \"\"\"Compute individual", "is not None: present_key_value_state = present_key_value_state + cross_attention_outputs[1] # Keep cross-attention outputs and", "for i in range(3): # We use indexing approach here: # https://numpy.org/doc/stable/user/basics.indexing.html#dealing-with-variable-numbers-of-indices-within-programs indices", "= position_bias.type(scores.dtype) # Calculate global/side bias - shape: # (batch_size, num_heads, seq_len, global_seq_len)", "PreTrainedModel, find_pruneable_heads_and_indices, prune_linear_layer from ...utils import ( DUMMY_INPUTS, DUMMY_MASK, add_start_docstrings, add_start_docstrings_to_model_forward, is_torch_fx_proxy, logging,", "# # Unless required by applicable law or agreed to in writing, software", "are attention masks?](../glossary#attention-mask) decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder", "and can be used to speed up decoding (see `past_key_values`). output_attentions (`bool`, *optional*):", "eps=1e-6): \"\"\" Construct a layernorm module in the LongT5 style. No bias and", "None all_hidden_states = () if output_hidden_states else None all_attentions = () if output_attentions", "specific head on top.\", LONGT5_START_DOCSTRING, ) class LongT5EncoderModel(LongT5PreTrainedModel): authorized_missing_keys = [ r\"encoder.embed_tokens.weight\", ]", "you can choose to directly pass an embedded representation. If `past_key_values` is used,", "`[0, 1]`: - 1 for tokens that are **not masked**, - 0 for", "# (batch_size, num_heads, seq_len, global_seq_len) side_position_bias = self.compute_side_bias(mask, global_segment_ids) # (batch_size, num_blocks, num_heads,", "# [batch_size, num_block, 3 * block_len] _3blocked_attention_mask = _concatenate_3_blocks(_blocked_attention_mask, block_dim=1, sequence_dim=2) _blocked_attention_mask =", "mask position_bias = position_bias + mask.transpose(1, 2) scores += position_bias # (batch_size, num_blocks,", "* \"Studies have been shown that owning a dog is good for you", ") >>> # Let's try a very long input. >>> input_ids = tokenizer(", "either express or implied. # See the License for the specific language governing", "= sum(pad[::-1], ()) x = nn.functional.pad(x, pad=pad, mode=\"constant\", value=pad_value) return x def _split_into_blocks(x:", "\"decoder_attention_mask\": input_mask, } return dummy_inputs def _init_weights(self, module): \"\"\"Initialize the weights\"\"\" factor =", "None, cross_attn_head_mask: Optional[torch.Tensor] = None, encoder_outputs: Optional[Tuple[Tuple[torch.Tensor]]] = None, past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,", "input argument `head_mask` was split into two arguments `head_mask` and `decoder_head_mask`. Currently, `decoder_head_mask`", "# we want only the last query position bias if past_key_value is not", "(past / key) for cross attention. ' if expected_num_past_key_values == 4 else ''}\"", "3 * self.block_len), device=scores.device, dtype=scores.dtype, ) if self.gradient_checkpointing and self.training: position_bias.requires_grad = True", "is **not masked**, - 0 indicates the head is **masked**. encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*):", "with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1,", "encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None, layer_head_mask=None, cross_attn_layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False, return_dict=True, ): if past_key_value is", "super().__init__() self.EncDecAttention = LongT5Attention(config, has_relative_attention_bias=False) self.layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) def", "present_key_value_states, (self-attention position bias), (self-attention weights), (cross-attention position bias), (cross-attention weights) class LongT5PreTrainedModel(PreTrainedModel):", "was proposed in [LongT5: Efficient Text-To-Text Transformer for Long Sequences](https://arxiv.org/abs/2112.07916) by <NAME>, <NAME>,", "* block_len) attn_weights = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) # Mask heads if we want", "elif past_key_value is None: # cross-attn # (batch_size, n_heads, seq_length, dim_per_head) hidden_states =", "hidden_states + self.dropout(forwarded_states) return hidden_states # Copied from transformers.models.t5.modeling_t5.T5Attention with T5->LongT5 class LongT5Attention(nn.Module):", "the License. # You may obtain a copy of the License at #", "to be input (see `past_key_values`). To know more on how to prepare `decoder_input_ids`", "relative position bias\"\"\" if device is None: device = self.relative_attention_bias.weight.device context_position = torch.arange(query_length,", "T5 model, and it enables using one of the two different efficient attention", "CrossEntropyLoss(ignore_index=-100) loss = loss_fct(lm_logits.view(-1, lm_logits.size(-1)), labels.view(-1)) # TODO(thom): Add z_loss https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L666 if not", "if bidirectional: num_buckets //= 2 relative_buckets += (relative_position > 0).to(torch.long) * num_buckets relative_position", "nn from torch.nn import CrossEntropyLoss from torch.utils.checkpoint import checkpoint from ...activations import ACT2FN", "states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids`", "attn_weights = nn.functional.dropout( attn_weights, p=self.dropout, training=self.training ) # (batch_size, n_heads, seq_length, key_length) #", "to return the hidden states of all layers. See `hidden_states` under returned tensors", "inputs _global_seq_len = global_segment_ids.shape[-1] global_inputs = _create_global_aggregates(hidden_states, block_ids, _global_seq_len) global_inputs = self.global_input_layer_norm(global_inputs) #", "seq_length, dim_per_head) hidden_states = shape(proj_layer(key_value_states)) if past_key_value is not None: if key_value_states is", "= () if output_hidden_states else None all_attentions = () if output_attentions else None", "{err_msg_prefix}input_ids and {err_msg_prefix}inputs_embeds at the same time\" ) elif input_ids is not None:", "model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch", "_concatenate_3_blocks(value_states, block_dim=1, sequence_dim=2) # Compute scores scores = torch.einsum( \"...qhd,...khd->...hqk\", query_states, key_states )", "information, see: https://arxiv.org/pdf/2112.07916.pdf. \"\"\" num_blocks = x.shape[block_dim] pad = [(0, 0)] * x.ndim", "map to the same bucket. All relative positions <=-max_distance map to the same", "that a sequence length will be a multiple of `block_len`\"\"\" pad_len = -x.shape[dim]", "global_segment_ids.type(torch.int) def _make_side_relative_position_ids(attention_mask: torch.Tensor, global_block_size: int) -> torch.Tensor: \"\"\"Create the relative position tensor", "broadcastable to all heads. # We use local attention in encoder self-attention, otherwise", "(`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size,", "# (batch_size, n_heads, seq_length, dim_per_head) hidden_states = shape(proj_layer(hidden_states)) elif past_key_value is None: #", "sequence of hidden states at the output of the last layer of the", "torch.tensor(DUMMY_INPUTS) input_mask = torch.tensor(DUMMY_MASK) dummy_inputs = { \"decoder_input_ids\": input_ids, \"input_ids\": input_ids, \"decoder_attention_mask\": input_mask,", "def compute_bias(self, query_length, key_length, device=None): \"\"\"Compute binned relative position bias\"\"\" if device is", "generalization to longer sequences than the model has been trained on Args: relative_position:", "* self.block_len), device=scores.device, dtype=scores.dtype, ) if self.gradient_checkpointing and self.training: position_bias.requires_grad = True else:", "encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size() encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None:", "self.config.num_heads module.q.weight.data.normal_(mean=0.0, std=factor * ((d_model * key_value_proj_dim) ** -0.5)) module.k.weight.data.normal_(mean=0.0, std=factor * (d_model**-0.5))", "normed_hidden_states = self.layer_norm(hidden_states) attention_output = self.TransientGlobalSelfAttention( normed_hidden_states, mask=attention_mask, position_bias=position_bias, layer_head_mask=layer_head_mask, output_attentions=output_attentions, ) hidden_states", "module.shared.weight.data.normal_(mean=0.0, std=factor * 1.0) elif isinstance(module, LongT5DenseActDense): # Mesh TensorFlow FF initialization #", "None: # Replace masked positions with -1e10 (according to the original implementation) mask", "to convert `decoder_input_ids` indices into associated vectors than the model's internal embedding lookup", "(`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in", "are input IDs?](../glossary#input-ids) To know more on how to prepare `input_ids` for pretraining", "1 indicates the head is **not masked**, - 0 indicates the head is", "self_attention_outputs[:2] attention_outputs = self_attention_outputs[2:] # Keep self-attention outputs and relative position weights do_cross_attention", "= None outputs = (attn_output,) + (present_key_value_state,) + (position_bias,) if output_attentions: outputs =", "is None: mask = torch.ones(batch_size, seq_length) # (batch_size, num_heads, seq_len, global_seq_len) side_position_bias =", "torch.ones(batch_size, seq_length) # (batch_size, num_heads, seq_len, global_seq_len) side_position_bias = self.compute_side_bias(mask, global_segment_ids) # (batch_size,", "expected_num_past_key_values: raise ValueError( f\"There should be {expected_num_past_key_values} past states. \" f\"{'2 (past /", "has_relative_attention_bias=False) self.layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) def forward( self, hidden_states, key_value_states,", "new_shape[dim] += pad_len return torch.zeros(new_shape, dtype=x.dtype) pad = [(0, 0)] * x.ndim pad[dim]", "dtype=x.dtype) pad = [(0, 0)] * x.ndim pad[dim] = (0, pad_len) pad =", "attend tokens farther than ``local_radius.\"\"\" relative_position_ids = _make_3block_relative_position_ids(block_len) locality_mask = torch.abs(relative_position_ids) < block_len", "global_block_ids = torch.floor(mask + fixed_block_mask - 1.0).type(attention_mask.dtype) _global_block_ids_lower_bound = torch.tensor(-1, dtype=global_block_ids.dtype, device=global_block_ids.device) global_block_ids", "# (batch_size, n_heads, seq_length, key_length) # Mask heads if we want to if", "= locality_mask.to(local_attention_mask.device) return torch.logical_and(local_attention_mask, locality_mask) def _get_local_attention_mask(attention_mask: torch.Tensor, block_len: int, device: torch.device) ->", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "standard self & cross attentions are used if self.is_decoder: extended_attention_mask = self.get_extended_attention_mask( attention_mask,", "= find_pruneable_heads_and_indices( heads, self.n_heads, self.key_value_proj_dim, self.pruned_heads ) # Prune linear layers self.q =", "is not None: input_ids = input_ids[:, -1:] return { \"decoder_input_ids\": input_ids, \"past_key_values\": past,", ") hidden_states = cross_attention_outputs[0] # Combine self attn and cross attn key value", "shape: # (batch_size, num_heads, seq_len, global_seq_len) if mask is None: mask = torch.ones(batch_size,", "a multiple of `block_len`, it will be padded first with selected `pad_value`. \"\"\"", "self.LocalSelfAttention = LongT5LocalAttention(config, has_relative_attention_bias=has_relative_attention_bias) self.layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) def forward(", "= (0, pad_len) pad = sum(pad[::-1], ()) x = nn.functional.pad(x, pad=pad, mode=\"constant\", value=pad_value)", "module.wo.bias is not None: module.wo.bias.data.zero_() elif isinstance(module, LongT5DenseGatedActDense): module.wi_0.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) **", "# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] #", "defined.\" # replace possible -100 values in labels by `pad_token_id` shifted_input_ids.masked_fill_(shifted_input_ids == -100,", "time\" ) elif input_ids is not None: input_shape = input_ids.size() input_ids = input_ids.view(-1,", "new_embeddings self.encoder.set_input_embeddings(new_embeddings) self.decoder.set_input_embeddings(new_embeddings) def get_encoder(self): return self.encoder def get_decoder(self): return self.decoder def _prune_heads(self,", "indices. Mask values selected in `[0, 1]`: - 1 for tokens that are", "* x.ndim pad[block_dim] = (1, 1) pad = sum(pad[::-1], ()) # [batch_size, num_blocks,", "LongT5 style. No bias and no subtraction of mean. \"\"\" super().__init__() self.weight =", "decoder_start_token_id assert pad_token_id is not None, \"self.model.config.pad_token_id has to be defined.\" # replace", "up to max_distance relative_position_if_large = max_exact + ( torch.log(relative_position.float() / max_exact) / math.log(max_distance", "size 1 >>> decoder_input_ids = tokenizer(\"Studies show that\", return_tensors=\"pt\").input_ids # Batch size 1", "attention mask to be applied for a local attention.\"\"\" # [batch_size, num_blocks, block_len]", "i.e. the distance in tokens from the attending position to the attended-to position.", "return Seq2SeqModelOutput( last_hidden_state=decoder_outputs.last_hidden_state, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, ) @add_start_docstrings(\"\"\"LONGT5 Model", "torch.Tensor) -> torch.Tensor: block_ends = (torch.arange(seq_len) % global_block_size) == global_block_size - 1 block_ends", "= self.compute_bias(self.block_len) if mask is not None: # Replace masked positions with -1e10", "dim_per_head) batch_size, seq_length = hidden_states.shape[:2] real_seq_length = seq_length if past_key_value is not None:", "= checkpoint( create_custom_forward(layer_module), hidden_states, extended_attention_mask, position_bias, encoder_hidden_states, encoder_extended_attention_mask, encoder_decoder_position_bias, layer_head_mask, cross_attn_layer_head_mask, None, #", "cross_attn_layer_head_mask=cross_attn_layer_head_mask, past_key_value=past_key_value, use_cache=use_cache, output_attentions=output_attentions, ) # layer_outputs is a tuple with: # hidden-states,", "an embedded representation. This is useful if you want more control over how", "output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) elif return_dict and not isinstance(encoder_outputs, BaseModelOutput): encoder_outputs = BaseModelOutput(", "std=factor * 1.0) elif isinstance(module, LongT5DenseActDense): # Mesh TensorFlow FF initialization # See", "<NAME>, <NAME> and <NAME>. It's an encoder-decoder transformer pre-trained in a text-to-text denoising", "relative_position = torch.abs(relative_position) else: relative_position = -torch.min(relative_position, torch.zeros_like(relative_position)) # now relative_position is in", "both local attention mask and standard extended mask for transient-global attention extended_attention_mask =", "superclass documentation for the generic methods the library implements for all its model", "past does not exist if past_key_values is None: past_key_values = [None] * len(self.block)", "get_decoder(self): return self.decoder def _prune_heads(self, heads_to_prune): \"\"\" Prunes heads of the model. heads_to_prune:", "import warnings from typing import Any, List, Optional, Tuple, Union import torch from", "may not use this file except in compliance with the License. # You", "= nn.Dropout(config.dropout_rate) self.act = ACT2FN[config.dense_act_fn] def forward(self, hidden_states): hidden_gelu = self.act(self.wi_0(hidden_states)) hidden_linear =", "use_cache kwargs ): normed_hidden_states = self.layer_norm(hidden_states) attention_output = self.TransientGlobalSelfAttention( normed_hidden_states, mask=attention_mask, position_bias=position_bias, layer_head_mask=layer_head_mask,", "= False self.encoder = LongT5Stack(encoder_config, self.shared) # Initialize weights and apply final processing", "the accumulation for # half-precision inputs is done in fp32 variance = hidden_states.to(torch.float32).pow(2).mean(-1,", "cross-attn hidden_states = past_key_value return hidden_states # get query states query_states = shape(self.q(hidden_states))", "# (batch_size, seq_len, global_seq_len) side_relative_position = _make_side_relative_position_ids(mask, self.global_block_size) side_relative_position_bucket = self._relative_position_bucket( side_relative_position, bidirectional=(not", "LongT5ForConditionalGeneration >>> tokenizer = AutoTokenizer.from_pretrained(\"google/long-t5-local-base\") >>> model = LongT5EncoderModel.from_pretrained(\"google/long-t5-local-base\") >>> input_ids = tokenizer(", "of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the", "not return_dict: return decoder_outputs + encoder_outputs return Seq2SeqModelOutput( last_hidden_state=decoder_outputs.last_hidden_state, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions,", "layer past batch dim # batch dim of `past` is at 2nd position", "masked positions with -10_000 (according to the original implementation) local_attention_mask = torch.where(local_attention_mask >", "\"decoder_input_ids\": input_ids, \"past_key_values\": past, \"encoder_outputs\": encoder_outputs, \"attention_mask\": attention_mask, \"head_mask\": head_mask, \"decoder_head_mask\": decoder_head_mask, \"cross_attn_head_mask\":", "bidirectional num_buckets: an integer max_distance: an integer Returns: a Tensor with the same", "int ) -> Tuple[torch.Tensor, torch.Tensor]: \"\"\"Obtain the \"fixed block\" global id corresponding to", "(batch_size, num_blocks, n_heads, block_len, 3 * block_len + global_seq_len) attn_weights = nn.functional.softmax(scores.float(), dim=-1).type_as(scores)", "the left. Indices can be obtained using [`T5Tokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for", "detail. [What are input IDs?](../glossary#input-ids) To know more on how to prepare `input_ids`", "= layer_outputs[4 if output_attentions else 3] # append next layer key value states", "past_key_values)): layer_head_mask = head_mask[i] cross_attn_layer_head_mask = cross_attn_head_mask[i] if output_hidden_states: all_hidden_states = all_hidden_states +", "head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = decoder_outputs[0] if self.config.tie_word_embeddings: #", "in past: # get the correct batch idx from layer past batch dim", "tokens in `decoder_input_ids`. Causal mask will also be used by default. head_mask (`torch.FloatTensor`", "need to set correct `past` for each of the four key / value", "(position_bias,) if output_attentions: outputs = outputs + (attn_weights,) return outputs class LongT5TransientGlobalAttention(nn.Module): def", "LongT5Config): super().__init__(config) self.model_dim = config.d_model self.shared = nn.Embedding(config.vocab_size, config.d_model) encoder_config = copy.deepcopy(config) encoder_config.is_decoder", "to attend tokens farther than ``local_radius.\"\"\" relative_position_ids = _make_3block_relative_position_ids(block_len) locality_mask = torch.abs(relative_position_ids) <", "def _make_global_fixed_block_ids( attention_mask: torch.Tensor, global_block_size: int ) -> Tuple[torch.Tensor, torch.Tensor]: \"\"\"Obtain the \"fixed", "= all_cross_attentions + (layer_outputs[5],) hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.dropout(hidden_states) # Add last", "0, torch.tensor(global_seq_len, dtype=block_ids.dtype, device=block_ids.device) ) one_hot_block_ids = nn.functional.one_hot(block_ids.type(torch.int64), global_seq_len + 1)[:, :, :-1]", "__init__(self, config, embed_tokens=None): super().__init__(config) self.embed_tokens = embed_tokens self.is_decoder = config.is_decoder self.local_radius = config.local_radius", "for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(LONGT5_ENCODER_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BaseModelOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids:", "selected heads of the cross-attention modules in the decoder. Mask values selected in", "Check the superclass documentation for the generic methods the library implements for all", "self.has_relative_attention_bias: position_bias = torch.zeros( (1, 1, self.n_heads, self.block_len, 3 * self.block_len), device=scores.device, dtype=scores.dtype", "tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) head_mask (`torch.FloatTensor` of shape `(num_heads,)`", "model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer}", "Copied from transformers.models.t5.modeling_t5.T5LayerNorm with T5->LongT5 class LongT5LayerNorm(nn.Module): def __init__(self, hidden_size, eps=1e-6): \"\"\" Construct", "sequence_dim=2) # Tile side inputs across local key/value blocks # New shape: (batch_size,", "__init__(self, config: LongT5Config): super().__init__() if config.is_gated_act: self.DenseReluDense = LongT5DenseGatedActDense(config) else: self.DenseReluDense = LongT5DenseActDense(config)", "states key_states = project( hidden_states, self.k, key_value_states, past_key_value[0] if past_key_value is not None", "specify both {err_msg_prefix}input_ids and {err_msg_prefix}inputs_embeds at the same time\" ) elif input_ids is", "more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead", "past_key_value=None, use_cache=False, output_attentions=False, ): normed_hidden_states = self.layer_norm(hidden_states) attention_output = self.SelfAttention( normed_hidden_states, mask=attention_mask, position_bias=position_bias,", "not want to use any `decoder_head_mask` now, please set `decoder_head_mask = torch.ones(num_layers, num_heads)`.", "Can be used to speed up decoding. If `past_key_values` are used, the user", "hidden_states, mask=None, key_value_states=None, position_bias=None, past_key_value=None, layer_head_mask=None, query_length=None, use_cache=False, output_attentions=False, ): \"\"\" Self-attention (if", "= self.encoder( input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) elif return_dict and", "[batch_size, num_blocks + 2, block_len] x = nn.functional.pad(x, pad=pad, mode=\"constant\", value=pad_value) blocks_list: List[torch.Tensor]", "farther than ``local_radius.\"\"\" relative_position_ids = _make_3block_relative_position_ids(block_len) locality_mask = torch.abs(relative_position_ids) < block_len locality_mask =", "LONGT5 Model transformer outputting encoder's raw hidden-states without any specific head on top.\",", "which only scales and doesn't shift, which is also known as Root Mean", "If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input", "* \"Studies have been shown that owning a dog is good for you\",", "does not load the weights associated with the model, only the configuration. Check", "= _make_3block_relative_position_ids(block_len) locality_mask = torch.abs(relative_position_ids) < block_len locality_mask = locality_mask[None, None, :, :]", "f\"You cannot specify both {err_msg_prefix}input_ids and {err_msg_prefix}inputs_embeds at the same time\" ) elif", "transformers.models.t5.modeling_t5.T5LayerNorm with T5->LongT5 class LongT5LayerNorm(nn.Module): def __init__(self, hidden_size, eps=1e-6): \"\"\" Construct a layernorm", "1]).unsqueeze(0).unsqueeze(0) return values def compute_side_bias(self, mask: torch.Tensor, global_segment_ids: torch.Tensor) -> torch.Tensor: # (batch_size,", "behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will", "to [batch_size, num_heads, seq_length, seq_length] if self.is_decoder and encoder_hidden_states is not None: encoder_batch_size,", "to the right decoder_input_ids = self._shift_right(labels) # Decode decoder_outputs = self.decoder( input_ids=decoder_input_ids, attention_mask=decoder_attention_mask,", "1.0).type(attention_mask.dtype) _global_block_ids_lower_bound = torch.tensor(-1, dtype=global_block_ids.dtype, device=global_block_ids.device) global_block_ids = torch.where( global_block_ids > _global_block_ids_lower_bound, global_block_ids,", "buckets for larger absolute relative_positions. All relative positions >=max_distance map to the same", "attn_output = self.o(attn_output) present_key_value_state = (key_states, value_states) if (self.is_decoder and use_cache) else None", "shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor`", "self.lm_head def get_encoder(self): return self.encoder def get_decoder(self): return self.decoder @add_start_docstrings_to_model_forward(LONGT5_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC) def", "self.relative_attention_max_distance = config.relative_attention_max_distance self.d_model = config.d_model self.key_value_proj_dim = config.d_kv self.n_heads = config.num_heads self.dropout", "embedding lookup matrix. decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*): Optionally, instead", "None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else", "fixed_block_mask - 1.0).type(attention_mask.dtype) _global_block_ids_lower_bound = torch.tensor(-1, dtype=global_block_ids.dtype, device=global_block_ids.device) global_block_ids = torch.where( global_block_ids >", "attention extended_attention_mask = attention_mask # If a 2D or 3D attention mask is", "of the original Flaxformr implementation adopted from: https://github.com/google/flaxformer/blob/main/flaxformer/architectures/longt5/long_attention.py. In our scenario, as we", "know more on how to prepare `input_ids` for pretraining take a look a", "# if using past key value states. Need to inject it here if", "self.n_heads self.pruned_heads = self.pruned_heads.union(heads) @staticmethod def _relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128): \"\"\" Adapted from", "* layer_head_mask attn_output = unshape(torch.matmul(attn_weights, value_states)) # (batch_size, seq_length, dim) attn_output = self.o(attn_output)", "relative position weights attention_outputs = attention_outputs + cross_attention_outputs[2:] # Apply Feed Forward layer", "(cross-attention position bias), (cross-attention weights) if use_cache is False: layer_outputs = layer_outputs[:1] +", "from transformers.models.t5.modeling_t5.T5Stack.set_input_embeddings def set_input_embeddings(self, new_embeddings): self.embed_tokens = new_embeddings def forward( self, input_ids=None, attention_mask=None,", "in range(config.num_layers)] ) self.final_layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) # Initialize weights", "self-attention outputs and relative position weights do_cross_attention = self.is_decoder and encoder_hidden_states is not", "unshape(torch.einsum(\"...hqk,...khd->...qhd\", attn_weights, value_states)) attn_output = attn_output[:, :seq_length, :] attn_output = self.o(attn_output) present_key_value_state =", "more control over how to convert `input_ids` indices into associated vectors than the", "and inputs_embeds is not None: err_msg_prefix = \"decoder_\" if self.is_decoder else \"\" raise", "transformers import AutoTokenizer, LongT5ForConditionalGeneration >>> tokenizer = AutoTokenizer.from_pretrained(\"google/long-t5-local-base\") >>> model = LongT5EncoderModel.from_pretrained(\"google/long-t5-local-base\") >>>", "copy.deepcopy(config) encoder_config.use_cache = False encoder_config.is_encoder_decoder = False self.encoder = LongT5Stack(encoder_config, self.shared) # Initialize", "_split_into_blocks(attention_mask, block_len, dim=1) # [batch_size, num_block, 3 * block_len] _3blocked_attention_mask = _concatenate_3_blocks(_blocked_attention_mask, block_dim=1,", "positions up to max_distance relative_position_if_large = max_exact + ( torch.log(relative_position.float() / max_exact) /", "Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.FloatTensor], Seq2SeqModelOutput]: r\"\"\" Returns:", "output_attentions=False, ): \"\"\" Self-attention (if key_value_states is None) or attention over source sentence", "blocks_list.append(x[indices]) # [batch_size, num_blocks, 3 * block_len, ...] return torch.cat(blocks_list, dim=sequence_dim) def _make_3block_relative_position_ids(block_len:", "None: # (batch_size, 1, n_heads, block_len, 3 * block_len) position_bias = position_bias +", "of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. LongT5", "is not None, \"self.model.config.pad_token_id has to be defined.\" # replace possible -100 values", "output_shape: return torch.empty(output_shape, dtype=x.dtype, device=x.device) return x.reshape(output_shape) def _concatenate_3_blocks(x: torch.Tensor, block_dim: int, sequence_dim:", "if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.n_heads, self.key_value_proj_dim, self.pruned_heads", "encoder_outputs: Optional[Tuple[Tuple[torch.Tensor]]] = None, past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, decoder_inputs_embeds:", "dim=1) # Update hyper params self.n_heads = self.n_heads - len(heads) self.inner_dim = self.key_value_proj_dim", "for all its model (such as downloading or saving, resizing the input embeddings,", "the head is **masked**. encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*): Tuple consists of (`last_hidden_state`, `optional`: *hidden_states*,", "# We use indexing approach here: # https://numpy.org/doc/stable/user/basics.indexing.html#dealing-with-variable-numbers-of-indices-within-programs indices = [slice(0, None)] *", "= self._relative_position_bucket( side_relative_position, bidirectional=(not self.is_decoder), num_buckets=self.relative_attention_num_buckets, max_distance=self.relative_attention_max_distance, ) # (batch_size, seq_len, global_seq_len, num_heads)", "Mask heads if we want to if layer_head_mask is not None: attn_weights =", "== \"local\": attention_layer = LongT5LayerLocalSelfAttention elif config.encoder_attention_type == \"transient-global\": attention_layer = LongT5LayerTransientGlobalSelfAttention else:", "absolute relative_position and larger buckets for larger absolute relative_positions. All relative positions >=max_distance", "center_position_ids.unsqueeze(1) return relative_position_ids def _mask_local_attention_mask(local_attention_mask: torch.Tensor, block_len: int) -> torch.Tensor: \"\"\"Mask local attention", "1]).unsqueeze(0) # shape (1, num_heads, query_length, key_length) return values def forward( self, hidden_states,", "0.0, -1e10) else: local_attention_mask = None if position_bias is None: # position_bias shape:", "of `block_len`, it will be padded first with selected `pad_value`. \"\"\" # pad", "if mask is not None: # Replace masked positions with -1e10 (according to", "[slice(0, None)] * x.ndim indices[block_dim] = slice(i, i + num_blocks) indices = tuple(indices)", "PyTorch Module and refer to the PyTorch documentation for all matter related to", "hasattr(module.wi_1, \"bias\") and module.wi_1.bias is not None: module.wi_1.bias.data.zero_() module.wo.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_ff) **", "Seq2SeqLMOutput, Seq2SeqModelOutput, ) from ...modeling_utils import PreTrainedModel, find_pruneable_heads_and_indices, prune_linear_layer from ...utils import (", "for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) decoder_input_ids (`torch.LongTensor` of shape", "num_blocks, global_seq_len, n_heads, dim_per_head) reps = [1] * (side_key_states.ndim + 1) reps[1] =", "class LongT5LayerLocalSelfAttention(nn.Module): \"\"\"Local self attention used in encoder\"\"\" def __init__(self, config, has_relative_attention_bias=False): super().__init__()", "hidden_states # Copied from transformers.models.t5.modeling_t5.T5DenseGatedActDense with T5->LongT5 class LongT5DenseGatedActDense(nn.Module): def __init__(self, config: LongT5Config):", "@add_start_docstrings( \"The bare LONGT5 Model transformer outputting raw hidden-states without any specific head", "None: super().__init__() self.is_decoder = config.is_decoder self.has_relative_attention_bias = has_relative_attention_bias self.relative_attention_num_buckets = config.relative_attention_num_buckets self.relative_attention_max_distance =", "defined as memory_position - query_position, i.e. the distance in tokens from the attending", "that tokens are not allowed to attend tokens farther than ``local_radius.\"\"\" relative_position_ids =", "pruning heads etc.) This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it", "* (side_key_states.ndim + 1) reps[1] = key_states.shape[1] side_key_states = side_key_states.unsqueeze(1).repeat(reps) side_value_states = side_value_states.unsqueeze(1).repeat(reps)", "than the model has been trained on Args: relative_position: an int32 Tensor bidirectional:", "self._relative_position_bucket( relative_position, # (block_length, 3 * block_length) bidirectional=(not self.is_decoder), num_buckets=self.relative_attention_num_buckets, max_distance=self.relative_attention_max_distance, ) #", "prune_linear_layer(self.k, index) self.v = prune_linear_layer(self.v, index) self.o = prune_linear_layer(self.o, index, dim=1) # Update", "Please make sure this is intended.\") expected_num_past_key_values = 2 if encoder_hidden_states is None", "position_bias = position_bias[:, :, -hidden_states.size(1) :, :] if mask is not None: position_bias", "(see `past_key_values`). output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors", "in `decoder_input_ids`. Causal mask will also be used by default. head_mask (`torch.FloatTensor` of", "2 is_small = relative_position < max_exact # The other half of the buckets", "= LongT5TransientGlobalAttention( config, has_relative_attention_bias=has_relative_attention_bias ) self.layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) def", "Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to", "return values def compute_side_bias(self, mask: torch.Tensor, global_segment_ids: torch.Tensor) -> torch.Tensor: # (batch_size, 1,", "(layer_outputs[3],) if self.is_decoder: all_cross_attentions = all_cross_attentions + (layer_outputs[5],) hidden_states = self.final_layer_norm(hidden_states) hidden_states =", "# Copied from transformers.models.t5.modeling_t5.T5Attention.prune_heads def prune_heads(self, heads): if len(heads) == 0: return heads,", "{layer_num: list of heads to prune in this layer} See base class PreTrainedModel", "model, and it enables using one of the two different efficient attention mechanisms", "inputs_embeds: Optional[torch.FloatTensor] = None, decoder_inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache:", "consider setting `use_cache=True` to speed up decoding\") return past reordered_decoder_past = () for", "bare LONGT5 Model transformer outputting encoder's raw hidden-states without any specific head on", "return relative_position_ids def _mask_local_attention_mask(local_attention_mask: torch.Tensor, block_len: int) -> torch.Tensor: \"\"\"Mask local attention mask", "= [(0, 0)] * x.ndim pad[block_dim] = (1, 1) pad = sum(pad[::-1], ())", "choose to directly pass an embedded representation. If `past_key_values` is used, optionally only", "= _split_into_blocks(attention_mask, block_len, dim=1) # [batch_size, num_block, 3 * block_len] _3blocked_attention_mask = _concatenate_3_blocks(_blocked_attention_mask,", "def get_decoder(self): return self.decoder def _prune_heads(self, heads_to_prune): \"\"\" Prunes heads of the model.", "i.e. those tokens which do not make for the whole fixed block, are", "= LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) def forward(self, hidden_states): forwarded_states = self.layer_norm(hidden_states) forwarded_states", "self.global_input_layer_norm(global_inputs) # get query states -> (batch_size, seq_length, n_heads, dim_per_head) query_states = shape(self.q(hidden_states))", "_make_global_fixed_block_ids(attention_mask, global_block_size) global_seq_len = global_segment_ids.shape[-1] global_positions = torch.arange(global_seq_len, device=block_ids.device) side_relative_position = global_positions -", "global_segment_ids.shape[-1] global_inputs = _create_global_aggregates(hidden_states, block_ids, _global_seq_len) global_inputs = self.global_input_layer_norm(global_inputs) # get query states", "\"transient-global\": attention_layer = LongT5LayerTransientGlobalSelfAttention else: raise ValueError( \"For encoder attention mechanism, either `local`", "self.TransientGlobalSelfAttention( normed_hidden_states, mask=attention_mask, position_bias=position_bias, layer_head_mask=layer_head_mask, output_attentions=output_attentions, ) hidden_states = hidden_states + self.dropout(attention_output[0]) outputs", "encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = None # Prepare head mask if needed", "block_len) if position_bias is None: # position_bias shape: # (1, 1, n_heads, block_len,", "(those that don't have their past key value states given to this model)", "processing self.post_init() def get_input_embeddings(self): return self.shared def set_input_embeddings(self, new_embeddings): self.shared = new_embeddings self.encoder.set_input_embeddings(new_embeddings)", "loss = loss_fct(lm_logits.view(-1, lm_logits.size(-1)), labels.view(-1)) # TODO(thom): Add z_loss https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L666 if not return_dict:", "typing import Any, List, Optional, Tuple, Union import torch from torch import nn", "before softmax # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/attention.py#L136 d_model = self.config.d_model key_value_proj_dim = self.config.d_kv n_heads =", "= [(0, 0)] * x.ndim pad[dim] = (0, pad_len) pad = sum(pad[::-1], ())", "n_heads, block_len, 3 * block_len) if not self.has_relative_attention_bias: position_bias = torch.zeros( (1, 1,", "+ (attn_weights,) return outputs class LongT5LocalAttention(nn.Module): def __init__(self, config: LongT5Config, has_relative_attention_bias: bool =", "None: encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size() encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is", "is None: attention_mask = torch.ones(batch_size, mask_seq_length).to(inputs_embeds.device) if self.is_decoder and encoder_attention_mask is None and", "past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, decoder_inputs_embeds: Optional[torch.FloatTensor] = None, labels:", "None: attention_mask = torch.ones(batch_size, mask_seq_length).to(inputs_embeds.device) if self.is_decoder and encoder_attention_mask is None and encoder_hidden_states", "n_heads, block_len, 3 * block_len) attn_weights = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) # Mask heads", "values = values.permute([2, 0, 1]).unsqueeze(0).unsqueeze(0) return values def compute_side_bias(self, mask: torch.Tensor, global_segment_ids: torch.Tensor)", "self.decoder( input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, inputs_embeds=decoder_inputs_embeds, past_key_values=past_key_values, encoder_hidden_states=hidden_states, encoder_attention_mask=attention_mask, head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict,", "block_len output_shape = x.shape[:dim] + (num_blocks, block_len) + x.shape[(dim + 1) :] #", "\"\"\"reshape\"\"\" return states.contiguous().view(batch_size, -1, self.inner_dim) # get query/key/value states -> (batch_size, seq_length, n_heads,", "value hidden states of the attention blocks. Can be used to speed up", "\"\"\"projection\"\"\" return states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim).transpose(1, 2) def unshape(states): \"\"\"reshape\"\"\" return states.transpose(1, 2).contiguous().view(batch_size,", "this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size,", "output = (lm_logits,) + decoder_outputs[1:] + encoder_outputs return ((loss,) + output) if loss", "\"input_ids\": input_ids, \"decoder_attention_mask\": input_mask, } return dummy_inputs def _init_weights(self, module): \"\"\"Initialize the weights\"\"\"", "self.local_radius + 1 self.block = nn.ModuleList( [LongT5Block(config, has_relative_attention_bias=bool(i == 0)) for i in", "individual blocks.\"\"\" # (batch..., seq_len, global_seq_len)) block_ids = block_ids.where( block_ids >= 0, torch.tensor(global_seq_len,", "hidden_states try: from apex.normalization import FusedRMSNorm LongT5LayerNorm = FusedRMSNorm # noqa logger.info(\"Discovered apex.normalization.FusedRMSNorm", "convert `decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix.", "else \"\" raise ValueError(f\"You have to specify either {err_msg_prefix}input_ids or {err_msg_prefix}inputs_embeds\") if inputs_embeds", "# Copied from transformers.models.t5.modeling_t5.T5Attention._relative_position_bucket def _relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128): \"\"\" Adapted from Mesh", "not use this file except in compliance with the License. # You may", "will also be used by default. head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers,", "super().__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.variance_epsilon = eps def forward(self, hidden_states): # LongT5 uses", "(num_buckets - max_exact) ).to(torch.long) relative_position_if_large = torch.min( relative_position_if_large, torch.full_like(relative_position_if_large, num_buckets - 1) )", "self, hidden_states, attention_mask=None, position_bias=None, layer_head_mask=None, output_attentions=False, **kwargs: Any, # to accept past_key_value and", "() if output_attentions else None all_cross_attentions = () if (output_attentions and self.is_decoder) else", "device=global_block_ids.device) global_block_ids = torch.where( global_block_ids > _global_block_ids_lower_bound, global_block_ids, _global_block_ids_lower_bound ) # set padding", "1).transpose(0, 1) else: _sequence_block_ids_max = torch.zeros( batch_size, 0, dtype=global_block_ids.dtype, device=global_block_ids.device ) global_segment_ids =", "torch.Tensor: \"\"\"Concatenate three consecutive blocks for each input block for local attentiont. For", "`(batch_size, target_sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `decoder_input_ids` you can choose to", "else self.config.use_return_dict # FutureWarning: head_mask was separated into two input args - head_mask,", "None, inputs_embeds: Optional[torch.FloatTensor] = None, decoder_inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None,", "of `inputs_embeds`. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states", "config.vocab_size - 1]`. All labels set to `-100` are ignored (masked), the loss", "hidden_states * torch.rsqrt(variance + self.variance_epsilon) # convert into half-precision if necessary if self.weight.dtype", "into half-precision if necessary if self.weight.dtype in [torch.float16, torch.bfloat16]: hidden_states = hidden_states.to(self.weight.dtype) return", "# get decoder inputs from shifting lm labels to the right decoder_input_ids =", "takes the value of `inputs_embeds`. use_cache (`bool`, *optional*): If set to `True`, `past_key_values`", "self.encoder def get_decoder(self): return self.decoder @add_start_docstrings_to_model_forward(LONGT5_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.LongTensor]", "block_ids.where( block_ids >= 0, torch.tensor(global_seq_len, dtype=block_ids.dtype, device=block_ids.device) ) one_hot_block_ids = nn.functional.one_hot(block_ids.type(torch.int64), global_seq_len +", "# (batch_size, 1, seq_len, global_seq_len) side_attention_mask = torch.eq(mask[..., None], global_segment_ids[:, None, :])[:, None,", "agreed to in writing, software # distributed under the License is distributed on", "bias), (cross-attention weights) class LongT5PreTrainedModel(PreTrainedModel): \"\"\" An abstract class to handle weights initialization", "attention_output[1:] # add attentions if we output them return outputs class LongT5Block(nn.Module): def", "= new_embeddings def get_output_embeddings(self): return self.lm_head def get_encoder(self): return self.encoder def get_decoder(self): return", "decoder_outputs = self.decoder( input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, inputs_embeds=decoder_inputs_embeds, past_key_values=past_key_values, encoder_hidden_states=hidden_states, encoder_attention_mask=attention_mask, head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, use_cache=use_cache, output_attentions=output_attentions,", "if return_dict is not None else self.config.use_return_dict # FutureWarning: head_mask was separated into", "tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and", "module.wi_0.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5)) if hasattr(module.wi_0, \"bias\") and module.wi_0.bias is not", "position_bias = self.compute_bias(real_seq_length, key_length, device=scores.device) # if key and values are already calculated", "__init__(self, hidden_size, eps=1e-6): \"\"\" Construct a layernorm module in the LongT5 style. No", "`None` if past does not exist if past_key_values is None: past_key_values = [None]", "else 3] # append next layer key value states if use_cache: present_key_value_states =", "\"google/long-t5-local-base\" # TODO: Update before the merge LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST = [ \"google/long-t5-local-base\", \"google/long-t5-local-large\", \"google/long-t5-tglobal-base\",", "int, sequence_dim: int, pad_value: int = 0) -> torch.Tensor: \"\"\"Concatenate three consecutive blocks", "an empty input sequence is given if not all(x.shape): new_shape = list(x.shape) new_shape[dim]", "= decoder_outputs[0] if self.config.tie_word_embeddings: # Rescale output before projecting on vocab # See", "to be sum with mask local_attention_mask = _get_local_attention_mask(mask, self.block_len, hidden_states.device) # Replace masked", "use_cache = False def create_custom_forward(module): def custom_forward(*inputs): return tuple(module(*inputs, use_cache, output_attentions)) return custom_forward", "message for FutureWarning: head_mask was separated into two input args - head_mask, decoder_head_mask", "FusedRMSNorm LongT5LayerNorm = FusedRMSNorm # noqa logger.info(\"Discovered apex.normalization.FusedRMSNorm - will use it instead", "attentions if we output them return outputs class LongT5LayerTransientGlobalSelfAttention(nn.Module): \"\"\"Transient-Global self attention used", "model's internal embedding lookup matrix. If `decoder_input_ids` and `decoder_inputs_embeds` are both unset, `decoder_inputs_embeds`", "scores -> (batch_size, num_block, n_heads, block_len, 3 * block_len + global_seq_len) scores =", "(self-attention weights), # (cross-attention position bias), (cross-attention weights) position_bias = layer_outputs[2] if self.is_decoder", "block_len] _blocked_attention_mask = _split_into_blocks(attention_mask, block_len, dim=1) # [batch_size, num_block, 3 * block_len] _3blocked_attention_mask", "= model(input_ids=input_ids, decoder_input_ids=decoder_input_ids) >>> last_hidden_states = outputs.last_hidden_state ```\"\"\" use_cache = use_cache if use_cache", "store them # layer_outputs = hidden-states, key-value-states (self-attention position bias), (self-attention weights), #", "def _make_side_relative_position_ids(attention_mask: torch.Tensor, global_block_size: int) -> torch.Tensor: \"\"\"Create the relative position tensor for", "None and decoder_head_mask is None: if self.config.num_layers == self.config.num_decoder_layers: warnings.warn(__HEAD_MASK_WARNING_MSG, FutureWarning) decoder_head_mask =", "that owning a dog is good for you \", return_tensors=\"pt\" ... ).input_ids #", "+ encoder_outputs return ((loss,) + output) if loss is not None else output", "extended_attention_mask, position_bias, encoder_hidden_states, encoder_extended_attention_mask, encoder_decoder_position_bias, layer_head_mask, cross_attn_layer_head_mask, None, # past_key_value is always None", "local -> global attention.\"\"\" block_ids, global_segment_ids = _make_global_fixed_block_ids(attention_mask, global_block_size) global_seq_len = global_segment_ids.shape[-1] global_positions", "position bias\"\"\" memory_position = torch.arange( 3 * block_length, dtype=torch.long, device=self.relative_attention_bias.weight.device ) context_position =", "hidden_states = shape(proj_layer(key_value_states)) if past_key_value is not None: if key_value_states is None: #", "value_states)) attn_output = attn_output[:, :seq_length, :] attn_output = self.o(attn_output) present_key_value_state = None outputs", "block_len, 3 * block_len) attn_weights = nn.functional.softmax(scores.float(), dim=-1).type_as(scores) # (batch_size, num_blocks, n_heads, block_len,", "use_cache) else None outputs = (attn_output,) + (present_key_value_state,) + (position_bias,) if output_attentions: outputs", "+ decoder_outputs[1:] + encoder_outputs return ((loss,) + output) if loss is not None", "to load, falling back to LongT5LayerNorm\") pass # Copied from transformers.models.t5.modeling_t5.T5DenseActDense with T5->LongT5", "encoder_config = copy.deepcopy(config) encoder_config.is_decoder = False encoder_config.use_cache = False encoder_config.is_encoder_decoder = False self.encoder", "reordered_decoder_past = reordered_decoder_past + (reordered_layer_past_states,) return reordered_decoder_past @add_start_docstrings( \"The bare LONGT5 Model transformer", "dim_per_head) hidden_states = shape(proj_layer(key_value_states)) if past_key_value is not None: if key_value_states is None:", "Indices can be obtained using [`T5Tokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for detail. [What", "None all_cross_attentions = () if (output_attentions and self.is_decoder) else None position_bias = None", "not None and decoder_head_mask is None: if self.config.num_layers == self.config.num_decoder_layers: warnings.warn(__HEAD_MASK_WARNING_MSG, FutureWarning) decoder_head_mask", "mask if mask is not None else torch.ones(hidden_states.shape[:-1]), self.global_block_size, ) # Create global", "prune in this layer} See base class PreTrainedModel \"\"\" for layer, heads in", "shape `(batch_size, sequence_length, hidden_size)` is a sequence of hidden states at the output", "= shape(proj_layer(hidden_states)) elif past_key_value is None: # cross-attn # (batch_size, n_heads, seq_length, dim_per_head)", "-> torch.Tensor: \"\"\"Mask local attention mask to enforce that tokens are not allowed", "of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad", "Optional[torch.FloatTensor] = None, decoder_head_mask: Optional[torch.FloatTensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, encoder_outputs: Optional[Tuple[Tuple[torch.Tensor]]]", "None, ) -> Union[Tuple[torch.FloatTensor], Seq2SeqModelOutput]: r\"\"\" Returns: Example: ```python >>> from transformers import", "the head is **not masked**, - 0 indicates the head is **masked**. cross_attn_head_mask", "2) def unshape(states): \"\"\"reshape\"\"\" return states.transpose(1, 2).contiguous().view(batch_size, -1, self.inner_dim) def project(hidden_states, proj_layer, key_value_states,", "layer_head_mask=layer_head_mask, past_key_value=past_key_value, use_cache=use_cache, output_attentions=output_attentions, ) hidden_states = hidden_states + self.dropout(attention_output[0]) outputs = (hidden_states,)", "LongT5LayerLocalSelfAttention(nn.Module): \"\"\"Local self attention used in encoder\"\"\" def __init__(self, config, has_relative_attention_bias=False): super().__init__() self.LocalSelfAttention", "global_seq_len) attention_side_bias = attention_side_bias + side_bias return attention_side_bias def forward( self, hidden_states, mask=None,", "else None all_attentions = () if output_attentions else None all_cross_attentions = () if", "not None if do_cross_attention: # the actual query length is unknown for cross", "attention_side_bias = torch.where(side_attention_mask > 0, 0.0, -1e10) # (batch_size, seq_len, global_seq_len) side_relative_position =", "x.shape[dim] // block_len output_shape = x.shape[:dim] + (num_blocks, block_len) + x.shape[(dim + 1)", "2) position_bias = position_bias.type(scores.dtype) # Calculate global/side bias - shape: # (batch_size, num_heads,", "past_key_value=past_key_value, use_cache=use_cache, query_length=query_length, output_attentions=output_attentions, ) layer_output = hidden_states + self.dropout(attention_output[0]) outputs = (layer_output,)", "Optional[torch.FloatTensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, encoder_outputs: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]]", "LongT5 uses a layer_norm which only scales and doesn't shift, which is also", "== \"local\": extended_attention_mask = _get_local_attention_mask(attention_mask, self.block_len, inputs_embeds.device) else: # we need to use", "relative_position and larger buckets for larger absolute relative_positions. All relative positions >=max_distance map", "from ...modeling_utils import PreTrainedModel, find_pruneable_heads_and_indices, prune_linear_layer from ...utils import ( DUMMY_INPUTS, DUMMY_MASK, add_start_docstrings,", "use_cache = use_cache if use_cache is not None else self.config.use_cache output_attentions = output_attentions", "from layer past batch dim # batch dim of `past` is at 2nd", "): batch_size, seq_length = hidden_states.shape[:2] def shape(states): \"\"\"projection\"\"\" return states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim)", "needed encoder_outputs = self.encoder( input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) elif", "the Apache License, Version 2.0 (the \"License\"); # you may not use this", "global_segment_ids = torch.cumsum(torch.ones(batch_size, num_globals), dim=-1) - 1 global_segment_ids = global_segment_ids.to(attention_mask.device) global_segment_ids = torch.where(global_segment_ids", "LongT5Attention(config, has_relative_attention_bias=has_relative_attention_bias) self.layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) def forward( self, hidden_states,", "LongT5LayerTransientGlobalSelfAttention(nn.Module): \"\"\"Transient-Global self attention used in encoder\"\"\" def __init__(self, config, has_relative_attention_bias=False): super().__init__() self.TransientGlobalSelfAttention", "# Keep self-attention outputs and relative position weights do_cross_attention = self.is_decoder and encoder_hidden_states", "-> (batch_size, num_blocks, 3 * block_len, n_heads, dim_per_head) key_states = _concatenate_3_blocks(key_states, block_dim=1, sequence_dim=2)", "DUMMY_MASK, add_start_docstrings, add_start_docstrings_to_model_forward, is_torch_fx_proxy, logging, replace_return_docstrings, ) from .configuration_longt5 import LongT5Config logger =", "create_custom_forward(module): def custom_forward(*inputs): return tuple(module(*inputs, use_cache, output_attentions)) return custom_forward layer_outputs = checkpoint( create_custom_forward(layer_module),", "block_ids, global_segment_ids = _make_global_fixed_block_ids( mask if mask is not None else torch.ones(hidden_states.shape[:-1]), self.global_block_size,", "layer of the encoder. Used in the cross-attention of the decoder. past_key_values (`tuple(tuple(torch.FloatTensor))`", "to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape", "block_ids >= 0) full_blocks = true_block_ends.sum(-1).unsqueeze(-1).type(block_ids.dtype) - 1 block_ids = torch.where(block_ids < full_blocks,", "encoder attention mechanism, either `local` or `transient-global` attention type is expected, \" f\"but", "None: # get decoder inputs from shifting lm labels to the right decoder_input_ids", "transient-global attention # Obtain block_ids and global_segment_ids # global_seq_len := seq_len // self.global_block_size", "def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings def get_output_embeddings(self): return self.lm_head def get_encoder(self): return", "states shape: (batch_size, global_seq_len, n_heads, dim_per_head) side_key_states = shape(self.k(global_inputs)) side_value_states = shape(self.v(global_inputs)) #", "1)[:, :, :-1] return torch.einsum(\"...nd,...ng->...gd\", hidden_states, one_hot_block_ids.type(hidden_states.dtype)) # Copied from transformers.models.t5.modeling_t5.T5LayerNorm with T5->LongT5", "and encoder_attention_mask is None and encoder_hidden_states is not None: encoder_seq_length = encoder_hidden_states.shape[1] encoder_attention_mask", "position_bias=None, past_key_value=None, layer_head_mask=None, query_length=None, use_cache=False, output_attentions=False, ): \"\"\" Self-attention (if key_value_states is None)", "None: position_bias = position_bias[:, :, -hidden_states.size(1) :, :] if mask is not None:", "downloading or saving, resizing the input embeddings, pruning heads etc.) This model is", "op>9 if position_bias is None: if not self.has_relative_attention_bias: position_bias = torch.zeros( (1, self.n_heads,", "1 >>> decoder_input_ids = tokenizer(\"Studies show that\", return_tensors=\"pt\").input_ids # Batch size 1 >>>", "self.global_block_size, ) # Create global inputs _global_seq_len = global_segment_ids.shape[-1] global_inputs = _create_global_aggregates(hidden_states, block_ids,", "attention in encoder self-attention, otherwise standard self & cross attentions are used if", "if device is None: device = self.relative_attention_bias.weight.device context_position = torch.arange(query_length, dtype=torch.long, device=device)[:, None]", "key_length, dim_per_head) hidden_states = torch.cat([past_key_value, hidden_states], dim=2) else: # cross-attn hidden_states = past_key_value", "[`~PreTrainedModel.from_pretrained`] method to load the model weights. \"\"\" LONGT5_INPUTS_DOCSTRING = r\"\"\" Args: input_ids", "(query_length, key_length) bidirectional=(not self.is_decoder), num_buckets=self.relative_attention_num_buckets, max_distance=self.relative_attention_max_distance, ) values = self.relative_attention_bias(relative_position_bucket) # shape (query_length,", "for `decoder_input_ids` generation. If `past_key_values` is used, optionally only the last `decoder_input_ids` have", "= self.act(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.wo(hidden_states) return hidden_states # Copied from", "__init__(self, config, has_relative_attention_bias=False): super().__init__() self.is_decoder = config.is_decoder if config.is_decoder: attention_layer = LongT5LayerSelfAttention elif", "return torch.zeros(new_shape, dtype=x.dtype) pad = [(0, 0)] * x.ndim pad[dim] = (0, pad_len)", "-1, self.inner_dim) # get query/key/value states -> (batch_size, seq_length, n_heads, dim_per_head) query_states =", "side_position_bias = side_position_bias.type(scores.dtype).to(scores.device) # (batch_size, num_blocks, num_heads, block_len, 3 * block_len + global_seq_len)", "seq_len, global_seq_len) side_position_bias = self.compute_side_bias(mask, global_segment_ids) # (batch_size, num_blocks, num_heads, block_len, global_seq_len) side_position_bias", "`past` is at 2nd position reordered_layer_past_states = () for layer_past_state in layer_past_states: #", "return self._shift_right(labels) def _reorder_cache(self, past, beam_idx): # if decoder past is not included", "return states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim).transpose(1, 2) def unshape(states): \"\"\"reshape\"\"\" return states.transpose(1, 2).contiguous().view(batch_size, -1,", "for # half-precision inputs is done in fp32 variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True) hidden_states", "`(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask", "encoder_outputs=None, **kwargs ): # cut decoder_input_ids if past is used if past is", "as we use this strategy only for a decoder, orphan tokens, i.e. those", "self.dropout = nn.Dropout(config.dropout_rate) def forward( self, hidden_states, attention_mask=None, position_bias=None, layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False,", "self.layer.append(attention_layer(config, has_relative_attention_bias=has_relative_attention_bias)) if self.is_decoder: self.layer.append(LongT5LayerCrossAttention(config)) self.layer.append(LongT5LayerFF(config)) def forward( self, hidden_states, attention_mask=None, position_bias=None, encoder_hidden_states=None,", "else seq_length if use_cache is True: assert self.is_decoder, f\"`use_cache` can only be set", "(cross-attention weights) class LongT5PreTrainedModel(PreTrainedModel): \"\"\" An abstract class to handle weights initialization and", "= None # Prepare head mask if needed head_mask = self.get_head_mask(head_mask, self.config.num_layers) cross_attn_head_mask", "None, :, :] locality_mask = locality_mask.to(local_attention_mask.device) return torch.logical_and(local_attention_mask, locality_mask) def _get_local_attention_mask(attention_mask: torch.Tensor, block_len:", "usage and behavior. Parameters: config ([`LongT5Config`]): Model configuration class with all the parameters", "to accept past_key_value and use_cache kwargs ): normed_hidden_states = self.layer_norm(hidden_states) attention_output = self.LocalSelfAttention(", "return outputs class LongT5TransientGlobalAttention(nn.Module): def __init__(self, config: LongT5Config, has_relative_attention_bias: bool = False) ->", "num_globals = seq_len // global_block_size # [batch_size, seq_len // global_block_size] if num_globals >", "- 1 block_ids = torch.where(block_ids < full_blocks, block_ids, full_blocks) return block_ids fixed_block_mask =", "Long Sequences](https://arxiv.org/abs/2112.07916) by <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME> and <NAME>. It's an", "raise ValueError( f\"You cannot specify both {err_msg_prefix}input_ids and {err_msg_prefix}inputs_embeds at the same time\"", "err_msg_prefix = \"decoder_\" if self.is_decoder else \"\" raise ValueError(f\"You have to specify either", "hidden_states = self.wo(hidden_states) return hidden_states # Copied from transformers.models.t5.modeling_t5.T5LayerFF with T5->LongT5 class LongT5LayerFF(nn.Module):", "else \"\" raise ValueError( f\"You cannot specify both {err_msg_prefix}input_ids and {err_msg_prefix}inputs_embeds at the", "get_input_embeddings(self): return self.shared def set_input_embeddings(self, new_embeddings): self.shared = new_embeddings self.encoder.set_input_embeddings(new_embeddings) self.decoder.set_input_embeddings(new_embeddings) def set_output_embeddings(self,", "is not None else self.config.use_return_dict encoder_outputs = self.encoder( input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, head_mask=head_mask, output_attentions=output_attentions,", "not None, ( \"self.model.config.decoder_start_token_id has to be defined. In LongT5 it is usually", "() if use_cache else None all_hidden_states = () if output_hidden_states else None all_attentions", "and self.is_decoder) else None position_bias = None encoder_decoder_position_bias = None hidden_states = self.dropout(inputs_embeds)", "`(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask", "speed up decoding. If `past_key_values` are used, the user can optionally input only", "None: if key_value_states is None: # self-attn # (batch_size, n_heads, key_length, dim_per_head) hidden_states", "outputs class LongT5TransientGlobalAttention(nn.Module): def __init__(self, config: LongT5Config, has_relative_attention_bias: bool = False) -> None:", "decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, ) @add_start_docstrings(\"\"\"LONGT5 Model with a `language modeling` head", "attentions if we output them return outputs class LongT5LayerLocalSelfAttention(nn.Module): \"\"\"Local self attention used", "append next layer key value states if use_cache: present_key_value_states = present_key_value_states + (present_key_value_state,)", "self.is_decoder = config.is_decoder self.local_radius = config.local_radius self.block_len = self.local_radius + 1 self.block =", "one of the two different efficient attention mechanisms - (1) Local attention, or", "(`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores", "with ONNX conversion if 0 in output_shape: return torch.empty(output_shape, dtype=x.dtype, device=x.device) return x.reshape(output_shape)", "= nn.Embedding(config.vocab_size, config.d_model) encoder_config = copy.deepcopy(config) encoder_config.use_cache = False encoder_config.is_encoder_decoder = False self.encoder", "shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be", "None, attention_mask: Optional[torch.FloatTensor] = None, decoder_input_ids: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.BoolTensor] = None,", "= self.local_radius + 1 self.global_block_size = config.global_block_size self.dropout = config.dropout_rate self.inner_dim = self.n_heads", "binned relative position bias\"\"\" if device is None: device = self.relative_attention_bias.weight.device context_position =", "past_key_value[2:] else: self_attn_past_key_value, cross_attn_past_key_value = None, None self_attention_outputs = self.layer[0]( hidden_states, attention_mask=attention_mask, position_bias=position_bias,", "cross_attn_head_mask: Optional[torch.Tensor] = None, encoder_outputs: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, inputs_embeds:", "forwarded_states = self.DenseReluDense(forwarded_states) hidden_states = hidden_states + self.dropout(forwarded_states) return hidden_states # Copied from", "transformers.models.t5.modeling_t5.T5Stack.set_input_embeddings def set_input_embeddings(self, new_embeddings): self.embed_tokens = new_embeddings def forward( self, input_ids=None, attention_mask=None, encoder_hidden_states=None,", "in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the", "a text-to-text denoising generative setting. LongT5 model is an extension of T5 model,", "else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: err_msg_prefix", "Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. \"\"\"", "coding=utf-8 # Copyright 2022 Google LLC., LongT5 Authors and HuggingFace Inc. team. #", "to the\" \" pad_token_id. See LongT5 docs for more information\" ) # shift", "...modeling_outputs import ( BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput, ) from ...modeling_utils import PreTrainedModel, find_pruneable_heads_and_indices,", "inputs_embeds: Optional[torch.Tensor] = None, decoder_inputs_embeds: Optional[torch.Tensor] = None, use_cache: Optional[bool] = None, output_attentions:", "See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) LONGT5 uses", "mask will also be used by default. head_mask (`torch.FloatTensor` of shape `(num_heads,)` or", "+ (layer_outputs[5],) hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.dropout(hidden_states) # Add last layer if", "self.relative_attention_bias.weight.device context_position = torch.arange(query_length, dtype=torch.long, device=device)[:, None] memory_position = torch.arange(key_length, dtype=torch.long, device=device)[None, :]", "key_value_states). \"\"\" # Input is (batch_size, seq_length, dim) # Mask is (batch_size, key_length)", "that owning a dog is good for you\", return_tensors=\"pt\" ... ).input_ids # Batch", "Keep self-attention outputs and relative position weights do_cross_attention = self.is_decoder and encoder_hidden_states is", "`optional`: *hidden_states*, `optional`: *attentions*) `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)` is a sequence", "= return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not", "key / value states\" ) self_attn_past_key_value = past_key_value[:2] cross_attn_past_key_value = past_key_value[2:] else: self_attn_past_key_value,", "we output them return outputs class LongT5LayerLocalSelfAttention(nn.Module): \"\"\"Local self attention used in encoder\"\"\"", "super().__init__() self.is_decoder = config.is_decoder self.has_relative_attention_bias = has_relative_attention_bias self.relative_attention_num_buckets = config.relative_attention_num_buckets self.relative_attention_max_distance = config.relative_attention_max_distance", "Model configuration class with all the parameters of the model. Initializing with a", "Indices can be obtained using [`T5Tokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What", "`past_key_values` is used, optionally only the last `decoder_inputs_embeds` have to be input (see", "self.layer[-1](hidden_states) outputs = (hidden_states,) if use_cache: outputs = outputs + (present_key_value_state,) + attention_outputs", "seq length can be calculated via length of past mask_seq_length = past_key_values[0][0].shape[2] +", ").input_ids # Batch size 1 >>> outputs = model.generate(input_ids) >>> print(tokenizer.decode(outputs[0], skip_special_tokens=True)) abstractthe", "attention mask and standard extended mask for transient-global attention extended_attention_mask = attention_mask #", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "num_buckets //= 2 relative_buckets += (relative_position > 0).to(torch.long) * num_buckets relative_position = torch.abs(relative_position)", "class LongT5PreTrainedModel(PreTrainedModel): \"\"\" An abstract class to handle weights initialization and a simple", "= [None] * len(self.block) # We can provide a self-attention mask of dimensions", "is None: past_key_values = [None] * len(self.block) # We can provide a self-attention", "with mask position_bias = position_bias + mask.transpose(1, 2) scores += position_bias # (batch_size,", "# (batch_size, num_blocks, num_heads, block_len, 3 * block_len + global_seq_len) position_bias = torch.cat([position_bias,", "is None and decoder_inputs_embeds is None: # get decoder inputs from shifting lm", "self.shared = new_embeddings self.encoder.set_input_embeddings(new_embeddings) self.decoder.set_input_embeddings(new_embeddings) def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings def get_output_embeddings(self):", "decoding. If `past_key_values` are used, the user can optionally input only the last", "num_blocks, n_heads, block_len, 3 * block_len) attn_weights = nn.functional.softmax(scores.float(), dim=-1).type_as(scores) # (batch_size, num_blocks,", "LongT5LayerNorm = FusedRMSNorm # noqa logger.info(\"Discovered apex.normalization.FusedRMSNorm - will use it instead of", "self.is_decoder: logger.warning(\"`past_key_values` is passed to the encoder. Please make sure this is intended.\")", "= torch.min( relative_position_if_large, torch.full_like(relative_position_if_large, num_buckets - 1) ) relative_buckets += torch.where(is_small, relative_position, relative_position_if_large)", "(self-attention weights), (cross-attention position bias), (cross-attention weights) class LongT5PreTrainedModel(PreTrainedModel): \"\"\" An abstract class", "pad_value: int = 0) -> torch.Tensor: \"\"\"Pad a tensor so that a sequence", "torch.arange(3 * block_len, dtype=torch.int32) center_position_ids = position_ids[block_len:-block_len] # [block_len, 3 * block_len] relative_position_ids", "# past_key_value[0] is (batch_size, n_heads, q_len - 1, dim_per_head) batch_size, seq_length = hidden_states.shape[:2]", "binned relative position bias\"\"\" memory_position = torch.arange( 3 * block_length, dtype=torch.long, device=self.relative_attention_bias.weight.device )", "output them return outputs # Copied from transformers.models.t5.modeling_t5.T5LayerCrossAttention with T5->LongT5 class LongT5LayerCrossAttention(nn.Module): def", "module.wo.bias is not None: module.wo.bias.data.zero_() elif isinstance(module, (LongT5Attention, LongT5LocalAttention, LongT5TransientGlobalAttention)): # Mesh TensorFlow", "speed up decoding (see `past_key_values`). output_attentions (`bool`, *optional*): Whether or not to return", "f\"Got {len(past_key_value)} past key / value states\" ) self_attn_past_key_value = past_key_value[:2] cross_attn_past_key_value =", "= nn.Embedding(self.relative_attention_num_buckets, self.n_heads) self.pruned_heads = set() self.gradient_checkpointing = False def prune_heads(self, heads): if", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in", "for relative attention. The relative position is defined as memory_position - query_position, i.e.", "```python >>> from transformers import AutoTokenizer, LongT5ForConditionalGeneration >>> tokenizer = AutoTokenizer.from_pretrained(\"Stancld/longt5-tglobal-large-16384-pubmed-3k_steps\") >>> model", "in [LongT5: Efficient Text-To-Text Transformer for Long Sequences](https://arxiv.org/abs/2112.07916) by <NAME>, <NAME>, <NAME>, <NAME>,", "if isinstance(module, LongT5LayerNorm): module.weight.data.fill_(factor * 1.0) elif isinstance(module, (LongT5Model, LongT5ForConditionalGeneration, LongT5EncoderModel)): # Mesh", "larger buckets for larger absolute relative_positions. All relative positions >=max_distance map to the", "global inputs _global_seq_len = global_segment_ids.shape[-1] global_inputs = _create_global_aggregates(hidden_states, block_ids, _global_seq_len) global_inputs = self.global_input_layer_norm(global_inputs)", "nn.Linear(config.d_model, config.d_ff, bias=False) self.wi_1 = nn.Linear(config.d_model, config.d_ff, bias=False) self.wo = nn.Linear(config.d_ff, config.d_model, bias=False)", "instead of a plain tuple. \"\"\" # Warning message for FutureWarning: head_mask was", "not None and decoder_input_ids is None and decoder_inputs_embeds is None: # get decoder", "LongT5Stack(decoder_config, self.shared) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return", "internal embedding lookup matrix. If `decoder_input_ids` and `decoder_inputs_embeds` are both unset, `decoder_inputs_embeds` takes", "be obtained using [`T5Tokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for detail. [What are input", "the first layer store them # layer_outputs = hidden-states, key-value-states (self-attention position bias),", "def _split_into_blocks(x: torch.Tensor, block_len: int, dim: int) -> torch.Tensor: \"\"\"Split an input tensor", "-100 values in labels by `pad_token_id` shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id) assert torch.all(shifted_input_ids >=", "= layer_outputs[:2] # We share the position biases between the layers - the", "tensor for local -> global attention.\"\"\" block_ids, global_segment_ids = _make_global_fixed_block_ids(attention_mask, global_block_size) global_seq_len =", "# position_bias shape: # (1, 1, n_heads, block_len, 3 * block_len) if not", "We use smaller buckets for small absolute relative_position and larger buckets for larger", "\"local\": attention_layer = LongT5LayerLocalSelfAttention elif config.encoder_attention_type == \"transient-global\": attention_layer = LongT5LayerTransientGlobalSelfAttention else: raise", "False) -> None: super().__init__() self.is_decoder = config.is_decoder self.has_relative_attention_bias = has_relative_attention_bias self.relative_attention_num_buckets = config.relative_attention_num_buckets", "allow for more graceful generalization to longer sequences than the model has been", "1) # [batch_size, seq_len] global_block_ids = handle_orphan_tokens(global_block_ids) num_globals = seq_len // global_block_size #", "is not None: # (batch_size, 1, n_heads, block_len, 3 * block_len) position_bias =", "= self.LocalSelfAttention( normed_hidden_states, mask=attention_mask, position_bias=position_bias, layer_head_mask=layer_head_mask, output_attentions=output_attentions, ) hidden_states = hidden_states + self.dropout(attention_output[0])", "[`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for detail. [What are input IDs?](../glossary#input-ids) To know more on", "[batch_size, num_blocks, 3 * block_len, ...] return torch.cat(blocks_list, dim=sequence_dim) def _make_3block_relative_position_ids(block_len: int) ->", "relative_position = -torch.min(relative_position, torch.zeros_like(relative_position)) # now relative_position is in the range [0, inf)", "heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1", "block_ids and global_segment_ids # global_seq_len := seq_len // self.global_block_size # shapes: (batch_size, seq_len)", "key/value states shape: (batch_size, global_seq_len, n_heads, dim_per_head) side_key_states = shape(self.k(global_inputs)) side_value_states = shape(self.v(global_inputs))", "indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`,", "[0, inf) # half of the buckets are for exact increments in positions", "as a decoder\" if attention_mask is None: attention_mask = torch.ones(batch_size, mask_seq_length).to(inputs_embeds.device) if self.is_decoder", "hidden_states # Copied from transformers.models.t5.modeling_t5.T5Attention with T5->LongT5 class LongT5Attention(nn.Module): def __init__(self, config: LongT5Config,", "if encoder_outputs is None: encoder_outputs = self.encoder( input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states,", "tuple(indices) blocks_list.append(x[indices]) # [batch_size, num_blocks, 3 * block_len, ...] return torch.cat(blocks_list, dim=sequence_dim) def", "* torch.rsqrt(variance + self.variance_epsilon) # convert into half-precision if necessary if self.weight.dtype in", "have to be input (see `past_key_values`). To know more on how to prepare", "return ((loss,) + output) if loss is not None else output return Seq2SeqLMOutput(", "0 if bidirectional: num_buckets //= 2 relative_buckets += (relative_position > 0).to(torch.long) * num_buckets", "False decoder_config.num_layers = config.num_decoder_layers self.decoder = LongT5Stack(decoder_config, self.shared) # Initialize weights and apply", "config.d_kv self.n_heads = config.num_heads self.local_radius = config.local_radius self.block_len = self.local_radius + 1 self.dropout", "Union import torch from torch import nn from torch.nn import CrossEntropyLoss from torch.utils.checkpoint", "((self.config.d_ff) ** -0.5)) if hasattr(module.wo, \"bias\") and module.wo.bias is not None: module.wo.bias.data.zero_() elif", "If `decoder_input_ids` and `decoder_inputs_embeds` are both unset, `decoder_inputs_embeds` takes the value of `inputs_embeds`.", "nn.Parameter(torch.ones(hidden_size)) self.variance_epsilon = eps def forward(self, hidden_states): # LongT5 uses a layer_norm which", "None, ) hidden_states = encoder_outputs[0] if labels is not None and decoder_input_ids is", "self.final_layer_norm(hidden_states) hidden_states = self.dropout(hidden_states) # Add last layer if output_hidden_states: all_hidden_states = all_hidden_states", "is not None: module.wo.bias.data.zero_() elif isinstance(module, (LongT5Attention, LongT5LocalAttention, LongT5TransientGlobalAttention)): # Mesh TensorFlow attention", "specific head on top.\", LONGT5_START_DOCSTRING, ) class LongT5Model(LongT5PreTrainedModel): _keys_to_ignore_on_load_missing = [ r\"encoder.embed_tokens.weight\", r\"decoder.embed_tokens.weight\",", "set_input_embeddings(self, new_embeddings): self.shared = new_embeddings self.encoder.set_input_embeddings(new_embeddings) self.decoder.set_input_embeddings(new_embeddings) def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings", "Training](./longt5#training). decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor", "= None, head_mask: Optional[torch.FloatTensor] = None, decoder_head_mask: Optional[torch.FloatTensor] = None, cross_attn_head_mask: Optional[torch.Tensor] =", "Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool]", "None else query_length key_length = real_seq_length if key_value_states is None else key_value_states.shape[1] def", "of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token", "hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass", "do_cross_attention: # the actual query length is unknown for cross attention # if", "or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules", "super().__init__() self.SelfAttention = LongT5Attention(config, has_relative_attention_bias=has_relative_attention_bias) self.layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) def", "possible -100 values in labels by `pad_token_id` shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id) assert torch.all(shifted_input_ids", "containing int32 values in the range [0, num_buckets) \"\"\" relative_buckets = 0 if", "is always None with gradient checkpointing ) else: layer_outputs = layer_module( hidden_states, attention_mask=extended_attention_mask,", "used, optionally only the last `decoder_inputs_embeds` have to be input (see `past_key_values`). This", "int) -> torch.Tensor: \"\"\"Mask local attention mask to enforce that tokens are not", "`decoder_input_ids` you can choose to directly pass an embedded representation. If `past_key_values` is", "pad[dim] = (0, pad_len) pad = sum(pad[::-1], ()) x = nn.functional.pad(x, pad=pad, mode=\"constant\",", "ImportError: # using the normal LongT5LayerNorm pass except Exception: logger.warning(\"discovered apex but it", "0) return global_block_ids.type(torch.int), global_segment_ids.type(torch.int) def _make_side_relative_position_ids(attention_mask: torch.Tensor, global_block_size: int) -> torch.Tensor: \"\"\"Create the", "key_states ) # (batch_size, num_block, n_heads, block_len, 3 * block_len) if position_bias is", "...utils import ( DUMMY_INPUTS, DUMMY_MASK, add_start_docstrings, add_start_docstrings_to_model_forward, is_torch_fx_proxy, logging, replace_return_docstrings, ) from .configuration_longt5", "past_key_value is None: # cross-attn # (batch_size, n_heads, seq_length, dim_per_head) hidden_states = shape(proj_layer(key_value_states))", "num_block, 3 * block_len] _3blocked_attention_mask = _concatenate_3_blocks(_blocked_attention_mask, block_dim=1, sequence_dim=2) _blocked_attention_mask = _blocked_attention_mask.unsqueeze(-1) _3blocked_attention_mask", "layer_past_states: # need to set correct `past` for each of the four key", "or (batch_size, key_length, key_length) # past_key_value[0] is (batch_size, n_heads, q_len - 1, dim_per_head)", "See base class PreTrainedModel \"\"\" for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(LONGT5_ENCODER_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BaseModelOutput,", "> 0: _sequence_block_ids_max = torch.max(global_block_ids, dim=-1).values.repeat(num_globals, 1).transpose(0, 1) else: _sequence_block_ids_max = torch.zeros( batch_size,", "key) for cross attention. ' if expected_num_past_key_values == 4 else ''}\" f\"Got {len(past_key_value)}", "`head_mask`, but this feature is deprecated and will be removed in future versions.", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See", "set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings def get_output_embeddings(self): return self.lm_head def get_encoder(self): return self.encoder", "new_embeddings def forward( self, input_ids=None, attention_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, inputs_embeds=None, head_mask=None, cross_attn_head_mask=None, past_key_values=None, use_cache=None,", "* 1.0) elif isinstance(module, LongT5DenseActDense): # Mesh TensorFlow FF initialization # See https://github.com/tensorflow/mesh/blob/master/mesh_tensorflow/transformer/transformer_layers.py#L56", "Example: ```python >>> from transformers import AutoTokenizer, LongT5ForConditionalGeneration >>> tokenizer = AutoTokenizer.from_pretrained(\"google/long-t5-local-base\") >>>", "**masked**. [What are attention masks?](../glossary#attention-mask) head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`,", "attention # if using past key value states. Need to inject it here", "using one of the two different efficient attention mechanisms - (1) Local attention,", "self.act = ACT2FN[config.dense_act_fn] def forward(self, hidden_states): hidden_states = self.wi(hidden_states) hidden_states = self.act(hidden_states) hidden_states", "- 0 indicates the head is **masked**. encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*): Tuple consists of", "and decoder_input_ids is None and decoder_inputs_embeds is None: # get decoder inputs from", "config.d_model, bias=False) self.dropout = nn.Dropout(config.dropout_rate) self.act = ACT2FN[config.dense_act_fn] def forward(self, hidden_states): hidden_states =", "x.shape[(dim + 1) :] # If 0 is in output_shape, we cannot apply", "self.dropout = nn.Dropout(config.dropout_rate) # Initialize weights and apply final processing self.post_init() self.gradient_checkpointing =", "a regular PyTorch Module and refer to the PyTorch documentation for all matter", "buckets are for logarithmically bigger bins in positions up to max_distance relative_position_if_large =", "module in the LongT5 style. No bias and no subtraction of mean. \"\"\"", "and apply final processing self.post_init() self.gradient_checkpointing = False # Copied from transformers.models.t5.modeling_t5.T5Stack.get_input_embeddings def", "encoder_attentions=encoder_outputs.attentions, ) @add_start_docstrings(\"\"\"LONGT5 Model with a `language modeling` head on top.\"\"\", LONGT5_START_DOCSTRING) class", "return_tensors=\"pt\").input_ids # Batch size 1 >>> # forward pass >>> outputs = model(input_ids=input_ids,", "position_bias shape: # (1, 1, n_heads, block_len, 3 * block_len) if not self.has_relative_attention_bias:", "* self.key_value_proj_dim # Mesh TensorFlow initialization to avoid scaling before softmax self.q =", "= self.decoder( input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, inputs_embeds=decoder_inputs_embeds, past_key_values=past_key_values, encoder_hidden_states=hidden_states, encoder_attention_mask=attention_mask, head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states,", "not None: input_ids = input_ids[:, -1:] return { \"decoder_input_ids\": input_ids, \"past_key_values\": past, \"encoder_outputs\":", "<NAME> and <NAME>. It's an encoder-decoder transformer pre-trained in a text-to-text denoising generative", "behavior. Parameters: config ([`LongT5Config`]): Model configuration class with all the parameters of the", "dog ```\"\"\" use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict", "// global_block_size] if num_globals > 0: _sequence_block_ids_max = torch.max(global_block_ids, dim=-1).values.repeat(num_globals, 1).transpose(0, 1) else:", "attention. The relative position is defined as memory_position - query_position, i.e. the distance", "torch.full(input_ids.shape[:-1] + (1,), decoder_start_token_id) shifted_input_ids = torch.cat([shifted_input_ids, input_ids[..., :-1]], dim=-1) else: shifted_input_ids =", "config.vocab_size, bias=False) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return", "decoding is disabled and no need to reorder if past is None: logger.warning(\"You", "= present_key_value_state[0].shape[2] else: query_length = None cross_attention_outputs = self.layer[1]( hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, position_bias=encoder_decoder_position_bias,", "sequence_length - 1, embed_size_per_head)`): Contains precomputed key and value hidden states of the", "return states.transpose(1, 2).contiguous().view(batch_size, -1, self.inner_dim) def project(hidden_states, proj_layer, key_value_states, past_key_value): \"\"\"projects hidden states", "*hidden_states*, `optional`: *attentions*) `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)` is a sequence of", "else: relative_position = -torch.min(relative_position, torch.zeros_like(relative_position)) # now relative_position is in the range [0,", "lookup matrix. If `decoder_input_ids` and `decoder_inputs_embeds` are both unset, `decoder_inputs_embeds` takes the value", "embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions", "to directly pass an embedded representation. If `past_key_values` is used, optionally only the", "= (attn_output,) + (present_key_value_state,) + (position_bias,) if output_attentions: outputs = outputs + (attn_weights,)", "under the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "key and value hidden states of the attention blocks. Can be used to", "torch.einsum(\"...qhd,...khd->...hqk\", query_states, key_states) if mask is not None: # We need to adjust", "else: # cross-attn hidden_states = past_key_value return hidden_states # get query states query_states", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "if return_dict is not None else self.config.use_return_dict encoder_outputs = self.encoder( input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds,", "= LongT5Stack(decoder_config, self.shared) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self):", "layer_module( hidden_states, attention_mask=extended_attention_mask, position_bias=position_bias, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, encoder_decoder_position_bias=encoder_decoder_position_bias, layer_head_mask=layer_head_mask, cross_attn_layer_head_mask=cross_attn_layer_head_mask, past_key_value=past_key_value, use_cache=use_cache, output_attentions=output_attentions, )", "key and values are already calculated # we want only the last query", "None: loss_fct = CrossEntropyLoss(ignore_index=-100) loss = loss_fct(lm_logits.view(-1, lm_logits.size(-1)), labels.view(-1)) # TODO(thom): Add z_loss", "config.num_decoder_layers self.decoder = LongT5Stack(decoder_config, self.shared) self.lm_head = nn.Linear(config.d_model, config.vocab_size, bias=False) # Initialize weights", "LongT5LayerSelfAttention(nn.Module): def __init__(self, config, has_relative_attention_bias=False): super().__init__() self.SelfAttention = LongT5Attention(config, has_relative_attention_bias=has_relative_attention_bias) self.layer_norm = LongT5LayerNorm(config.d_model,", "if config.is_gated_act: self.DenseReluDense = LongT5DenseGatedActDense(config) else: self.DenseReluDense = LongT5DenseActDense(config) self.layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)", "(cross-attention position bias), (cross-attention weights) position_bias = layer_outputs[2] if self.is_decoder and encoder_hidden_states is", "training=self.training ) # (batch_size, n_heads, seq_length, key_length) # Mask heads if we want", "Prune linear layers self.q = prune_linear_layer(self.q, index) self.k = prune_linear_layer(self.k, index) self.v =", "r\"\"\" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens", "(batch_size, seq_length, dim) # Mask is (batch_size, key_length) (non-causal) or (batch_size, key_length, key_length)", "Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. \"\"\" LONGT5_INPUTS_DOCSTRING =", "None: input_ids = input_ids[:, -1:] return { \"decoder_input_ids\": input_ids, \"past_key_values\": past, \"encoder_outputs\": encoder_outputs,", "is no bias. Additionally we want to make sure that the accumulation for", "CONDITIONS OF ANY KIND, either express or implied. # See the License for", "can choose to directly pass an embedded representation. This is useful if you", "len(layer_past_states) reordered_decoder_past = reordered_decoder_past + (reordered_layer_past_states,) return reordered_decoder_past @add_start_docstrings( \"The bare LONGT5 Model", "to inject it here if present_key_value_state is not None: query_length = present_key_value_state[0].shape[2] else:", "else None outputs = (attn_output,) + (present_key_value_state,) + (position_bias,) if output_attentions: outputs =", "hidden_states: torch.Tensor, block_ids: torch.Tensor, global_seq_len: int ) -> torch.Tensor: \"\"\"Compute individual block aggregates", "is None: assert self.embed_tokens is not None, \"You have to initialize the model", "To know more on how to prepare `decoder_input_ids` for pretraining take a look", "position tensor for local -> global attention.\"\"\" block_ids, global_segment_ids = _make_global_fixed_block_ids(attention_mask, global_block_size) global_seq_len", "= tokenizer(\"Studies show that\", return_tensors=\"pt\").input_ids # Batch size 1 >>> # forward pass", "- 0 indicates the head is **masked**. decoder_head_mask (`torch.FloatTensor` of shape `(num_heads,)` or", "is not None: # Replace masked positions with -1e10 (according to the original", "multiple of `block_len`\"\"\" pad_len = -x.shape[dim] % block_len # Handle cases when an", "is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not", "does not exist if past_key_values is None: past_key_values = [None] * len(self.block) #", "max_exact) ).to(torch.long) relative_position_if_large = torch.min( relative_position_if_large, torch.full_like(relative_position_if_large, num_buckets - 1) ) relative_buckets +=", "# Square Layer Normalization https://arxiv.org/abs/1910.07467 thus varience is calculated # w/o mean and", "encoder_hidden_states is not None: encoder_decoder_position_bias = layer_outputs[4 if output_attentions else 3] # append", "`(batch_size, sequence_length, hidden_size)` is a sequence of hidden states at the output of", "len(heads) self.inner_dim = self.key_value_proj_dim * self.n_heads self.pruned_heads = self.pruned_heads.union(heads) @staticmethod def _relative_position_bucket(relative_position, bidirectional=True,", "copy import math import warnings from typing import Any, List, Optional, Tuple, Union", "nn.Dropout(config.dropout_rate) def forward( self, hidden_states, attention_mask=None, position_bias=None, layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False, ): normed_hidden_states", "indices = [slice(0, None)] * x.ndim indices[block_dim] = slice(i, i + num_blocks) indices", "(batch_size, seq_len, global_seq_len) side_relative_position = _make_side_relative_position_ids(mask, self.global_block_size) side_relative_position_bucket = self._relative_position_bucket( side_relative_position, bidirectional=(not self.is_decoder),", "and global_segment_ids # global_seq_len := seq_len // self.global_block_size # shapes: (batch_size, seq_len) &", "into blocks of a given `block_len` along the given `dim`. If the dimension", "self.shared def set_input_embeddings(self, new_embeddings): self.shared = new_embeddings self.encoder.set_input_embeddings(new_embeddings) def get_encoder(self): return self.encoder def", "layer_output = hidden_states + self.dropout(attention_output[0]) outputs = (layer_output,) + attention_output[1:] # add attentions", "have been shown that owning a dog is good for you\", return_tensors=\"pt\" ...", "compliance with the License. # You may obtain a copy of the License", "class LongT5EncoderModel(LongT5PreTrainedModel): authorized_missing_keys = [ r\"encoder.embed_tokens.weight\", ] def __init__(self, config: LongT5Config): super().__init__(config) self.shared", "encoder_outputs = self.encoder( input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) elif return_dict", "self.shared = nn.Embedding(config.vocab_size, config.d_model) encoder_config = copy.deepcopy(config) encoder_config.is_decoder = False encoder_config.use_cache = False", "to LongT5LayerNorm\") pass # Copied from transformers.models.t5.modeling_t5.T5DenseActDense with T5->LongT5 class LongT5DenseActDense(nn.Module): def __init__(self,", "position bias), (self-attention weights), (cross-attention position bias), (cross-attention weights) class LongT5PreTrainedModel(PreTrainedModel): \"\"\" An", "simlified version of the original Flaxformr implementation adopted from: https://github.com/google/flaxformer/blob/main/flaxformer/architectures/longt5/long_attention.py. In our scenario,", "= hidden_states * torch.rsqrt(variance + self.variance_epsilon) # convert into half-precision if necessary if", "return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds", "approach here: # https://numpy.org/doc/stable/user/basics.indexing.html#dealing-with-variable-numbers-of-indices-within-programs indices = [slice(0, None)] * x.ndim indices[block_dim] = slice(i,", "# [batch_size, 1, num_block, block_len, 3 * block_len] return local_attention_mask.unsqueeze(1).to(device) def _make_global_fixed_block_ids( attention_mask:", "self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(LONGT5_ENCODER_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BaseModelOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor]", "DUMMY_INPUTS, DUMMY_MASK, add_start_docstrings, add_start_docstrings_to_model_forward, is_torch_fx_proxy, logging, replace_return_docstrings, ) from .configuration_longt5 import LongT5Config logger", "= self.config.d_model key_value_proj_dim = self.config.d_kv n_heads = self.config.num_heads module.q.weight.data.normal_(mean=0.0, std=factor * ((d_model *", "self_attention_outputs[2:] # Keep self-attention outputs and relative position weights do_cross_attention = self.is_decoder and", "# self-attn # (batch_size, n_heads, key_length, dim_per_head) hidden_states = torch.cat([past_key_value, hidden_states], dim=2) else:", "if use_cache is True: assert self.is_decoder, f\"`use_cache` can only be set to `True`", "from transformers.models.t5.modeling_t5.T5PreTrainedModel.dummy_inputs def dummy_inputs(self): input_ids = torch.tensor(DUMMY_INPUTS) input_mask = torch.tensor(DUMMY_MASK) dummy_inputs = {", "last_hidden_states = outputs.last_hidden_state ```\"\"\" use_cache = use_cache if use_cache is not None else", "attention_layer = LongT5LayerSelfAttention elif config.encoder_attention_type == \"local\": attention_layer = LongT5LayerLocalSelfAttention elif config.encoder_attention_type ==", "query_states, key_states ) # (batch_size, num_block, n_heads, block_len, 3 * block_len) if position_bias", "We use local attention in encoder self-attention, otherwise standard self & cross attentions", "attention_outputs = attention_outputs + cross_attention_outputs[2:] # Apply Feed Forward layer hidden_states = self.layer[-1](hidden_states)", "decoder input sequence tokens in the vocabulary. Indices can be obtained using [`T5Tokenizer`].", "size 1 >>> outputs = model(input_ids=input_ids) >>> last_hidden_states = outputs.last_hidden_state ```\"\"\" return_dict =", "< full_blocks, block_ids, full_blocks) return block_ids fixed_block_mask = torch.ones_like(attention_mask, device=attention_mask.device) / global_block_size fixed_block_mask", ") layer_output = hidden_states + self.dropout(attention_output[0]) outputs = (layer_output,) + attention_output[1:] # add", "last `decoder_inputs_embeds` have to be input (see `past_key_values`). This is useful if you", "a look a [LONGT5 Training](./longt5#training). attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask", "This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular", "..., config.vocab_size]` Returns: Examples: ```python >>> from transformers import AutoTokenizer, LongT5ForConditionalGeneration >>> tokenizer", "__init__(self, config): super().__init__() self.EncDecAttention = LongT5Attention(config, has_relative_attention_bias=False) self.layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout =", "to multiple of block_len if x.shape[dim] % block_len != 0: x = _pad_to_multiple(x,", "BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=present_key_value_states, hidden_states=all_hidden_states, attentions=all_attentions, cross_attentions=all_cross_attentions, ) LONGT5_START_DOCSTRING = r\"\"\" The LongT5 model", "*optional*): Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an", "config, has_relative_attention_bias=False): super().__init__() self.is_decoder = config.is_decoder if config.is_decoder: attention_layer = LongT5LayerSelfAttention elif config.encoder_attention_type", "loss is only computed for labels in `[0, ..., config.vocab_size]` Returns: Examples: ```python", "set() self.gradient_checkpointing = False # Copied from transformers.models.t5.modeling_t5.T5Attention.prune_heads def prune_heads(self, heads): if len(heads)", "is None: encoder_outputs = self.encoder( input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, )", "weights and apply final processing self.post_init() self.gradient_checkpointing = False # Copied from transformers.models.t5.modeling_t5.T5Stack.get_input_embeddings", "self.n_heads, real_seq_length, key_length), device=scores.device, dtype=scores.dtype ) if self.gradient_checkpointing and self.training: position_bias.requires_grad = True", "vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or", "self.global_relative_attention_bias = nn.Embedding(self.relative_attention_num_buckets, self.n_heads) self.global_input_layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) # Copied from transformers.models.t5.modeling_t5.T5Attention.prune_heads def", "# [batch_size, num_blocks, 3 * block_len, ...] return torch.cat(blocks_list, dim=sequence_dim) def _make_3block_relative_position_ids(block_len: int)", "\"cross_attn_head_mask\": cross_attn_head_mask, \"use_cache\": use_cache, } def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor): return self._shift_right(labels) def _reorder_cache(self,", "`[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates", "fixed block, are assigned to the preceding block. Padding tokens from the original", "2 ), f\"past_key_value should have 2 past states: keys and values. Got {", "- 1) ) relative_buckets += torch.where(is_small, relative_position, relative_position_if_large) return relative_buckets def compute_bias(self, query_length,", "Copied from transformers.models.t5.modeling_t5.T5LayerSelfAttention with T5->LongT5 class LongT5LayerSelfAttention(nn.Module): def __init__(self, config, has_relative_attention_bias=False): super().__init__() self.SelfAttention", "base class PreTrainedModel \"\"\" for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(LONGT5_ENCODER_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BaseModelOutput, config_class=_CONFIG_FOR_DOC)", "False def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices(", "use_cache is not None else self.config.use_cache output_attentions = output_attentions if output_attentions is not", "# Unless required by applicable law or agreed to in writing, software #", "= nn.Parameter(torch.ones(hidden_size)) self.variance_epsilon = eps def forward(self, hidden_states): # LongT5 uses a layer_norm", "None: logger.warning(\"You might want to consider setting `use_cache=True` to speed up decoding\") return", "the cross-attention modules in the decoder. Mask values selected in `[0, 1]`: -", "= config.d_kv self.n_heads = config.num_heads self.dropout = config.dropout_rate self.inner_dim = self.n_heads * self.key_value_proj_dim", "mechanism, either `local` or `transient-global` attention type is expected, \" f\"but got {config.encoder_attention_type}.\"", "config.local_radius self.block_len = self.local_radius + 1 self.global_block_size = config.global_block_size self.dropout = config.dropout_rate self.inner_dim", "# shape (query_length, key_length, num_heads) values = values.permute([2, 0, 1]).unsqueeze(0) # shape (1,", "token indices. Mask values selected in `[0, 1]`: - 1 for tokens that", "attn and cross attn key value states if present_key_value_state is not None: present_key_value_state", "get key/value states key_states = project( hidden_states, self.k, key_value_states, past_key_value[0] if past_key_value is", "new_embeddings): self.lm_head = new_embeddings def get_output_embeddings(self): return self.lm_head def get_encoder(self): return self.encoder def", "global_seq_len) side_position_bias = _split_into_blocks(side_position_bias, self.block_len, dim=-2).transpose(1, 2) side_position_bias = side_position_bias.type(scores.dtype).to(scores.device) # (batch_size, num_blocks,", "is None and encoder_hidden_states is not None: encoder_seq_length = encoder_hidden_states.shape[1] encoder_attention_mask = torch.ones(", "extension of T5 model, and it enables using one of the two different", "as memory_position - query_position, i.e. the distance in tokens from the attending position", "pass # Copied from transformers.models.t5.modeling_t5.T5DenseActDense with T5->LongT5 class LongT5DenseActDense(nn.Module): def __init__(self, config: LongT5Config):", "torch.Tensor, block_len: int, dim: int, pad_value: int = 0) -> torch.Tensor: \"\"\"Pad a", "len(past_key_value)} past states\" real_seq_length += past_key_value[0].shape[2] if query_length is None else query_length key_length", "= attention_mask.shape[:2] def handle_orphan_tokens(block_ids: torch.Tensor) -> torch.Tensor: block_ends = (torch.arange(seq_len) % global_block_size) ==", "shape(self.k(global_inputs)) side_value_states = shape(self.v(global_inputs)) # Split into blocks -> (batch_size, num_blocks, block_len, n_heads,", "config.is_decoder if config.is_decoder: attention_layer = LongT5LayerSelfAttention elif config.encoder_attention_type == \"local\": attention_layer = LongT5LayerLocalSelfAttention", "self.relative_attention_bias = nn.Embedding(self.relative_attention_num_buckets, self.n_heads) self.pruned_heads = set() self.gradient_checkpointing = False # Relativen attention", "shape: (batch_size, num_blocks, global_seq_len, n_heads, dim_per_head) reps = [1] * (side_key_states.ndim + 1)", "self.encoder.set_input_embeddings(new_embeddings) self.decoder.set_input_embeddings(new_embeddings) def get_encoder(self): return self.encoder def get_decoder(self): return self.decoder def _prune_heads(self, heads_to_prune):", "in output_shape, we cannot apply reshape because of incompatibility with ONNX conversion if", "(batch_size, num_block, n_heads, block_len, 3 * block_len + global_seq_len) scores = torch.einsum(\"...qhd,...khd->...hqk\", query_states,", "unshape(states): \"\"\"reshape\"\"\" return states.transpose(1, 2).contiguous().view(batch_size, -1, self.inner_dim) def project(hidden_states, proj_layer, key_value_states, past_key_value): \"\"\"projects", "self_attn_past_key_value, cross_attn_past_key_value = None, None self_attention_outputs = self.layer[0]( hidden_states, attention_mask=attention_mask, position_bias=position_bias, layer_head_mask=layer_head_mask, past_key_value=self_attn_past_key_value,", "-0.5)) if module.has_relative_attention_bias: module.relative_attention_bias.weight.data.normal_(mean=0.0, std=factor * ((d_model) ** -0.5)) if isinstance(module, LongT5TransientGlobalAttention): module.global_relative_attention_bias.weight.data.normal_(", "= config.dropout_rate self.inner_dim = self.n_heads * self.key_value_proj_dim # Mesh TensorFlow initialization to avoid", "key-value-states, (self-attention position bias), (self-attention weights), (cross-attention position bias), (cross-attention weights) if use_cache", "None] relative_position_bucket = self._relative_position_bucket( relative_position, # (block_length, 3 * block_length) bidirectional=(not self.is_decoder), num_buckets=self.relative_attention_num_buckets,", "None cross_attention_outputs = self.layer[1]( hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, position_bias=encoder_decoder_position_bias, layer_head_mask=cross_attn_layer_head_mask, past_key_value=cross_attn_past_key_value, query_length=query_length, use_cache=use_cache, output_attentions=output_attentions,", "values def forward( self, hidden_states, mask=None, key_value_states=None, position_bias=None, past_key_value=None, layer_head_mask=None, query_length=None, use_cache=False, output_attentions=False,", "Mask to avoid performing attention on padding token indices. Mask values selected in", "present_key_value_state is not None: present_key_value_state = present_key_value_state + cross_attention_outputs[1] # Keep cross-attention outputs", "was split into two arguments `head_mask` and `decoder_head_mask`. Currently, `decoder_head_mask` is set to", "= embed_tokens self.is_decoder = config.is_decoder self.local_radius = config.local_radius self.block_len = self.local_radius + 1", "over how to convert `input_ids` indices into associated vectors than the model's internal", "to be sum with mask position_bias = position_bias + mask.transpose(1, 2) scores +=", "OR CONDITIONS OF ANY KIND, either express or implied. # See the License", "mode=\"constant\", value=pad_value) return x def _split_into_blocks(x: torch.Tensor, block_len: int, dim: int) -> torch.Tensor:", "None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None,", "first prediction pass) if encoder_outputs is None: # Convert encoder inputs in embeddings", "is (batch_size, seq_length, dim) # Mask is (batch_size, key_length) (non-causal) or (batch_size, key_length,", ") elif input_ids is not None: input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1])", "self.final_layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) # Initialize weights and apply final", "other half of the buckets are for logarithmically bigger bins in positions up", "past_key_value=None, use_cache=False, query_length=None, output_attentions=False, ): normed_hidden_states = self.layer_norm(hidden_states) attention_output = self.EncDecAttention( normed_hidden_states, mask=attention_mask,", "= None hidden_states = self.dropout(inputs_embeds) for i, (layer_module, past_key_value) in enumerate(zip(self.block, past_key_values)): layer_head_mask", "[What are attention masks?](../glossary#attention-mask) head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):", "self.has_relative_attention_bias: position_bias = torch.zeros( (1, self.n_heads, real_seq_length, key_length), device=scores.device, dtype=scores.dtype ) if self.gradient_checkpointing", "LongT5Stack)): module.gradient_checkpointing = value # Copied from transformers.models.t5.modeling_t5.T5PreTrainedModel._shift_right with T5->LongT5 def _shift_right(self, input_ids):", "f\"{'2 (past / key) for cross attention. ' if expected_num_past_key_values == 4 else", "attention_mask=None, position_bias=None, layer_head_mask=None, output_attentions=False, **kwargs: Any, # to accept past_key_value and use_cache kwargs", "input_ids is not None and inputs_embeds is not None: err_msg_prefix = \"decoder_\" if", "present_key_value_states + (present_key_value_state,) if output_attentions: all_attentions = all_attentions + (layer_outputs[3],) if self.is_decoder: all_cross_attentions", "get_output_embeddings(self): return self.lm_head def get_encoder(self): return self.encoder def get_decoder(self): return self.decoder @add_start_docstrings_to_model_forward(LONGT5_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=Seq2SeqLMOutput,", "= past_key_value[:2] cross_attn_past_key_value = past_key_value[2:] else: self_attn_past_key_value, cross_attn_past_key_value = None, None self_attention_outputs =", "bidirectional=False, then positive relative positions are invalid. We use smaller buckets for small", "relative_position_if_large, torch.full_like(relative_position_if_large, num_buckets - 1) ) relative_buckets += torch.where(is_small, relative_position, relative_position_if_large) return relative_buckets", "tokenizer = AutoTokenizer.from_pretrained(\"google/long-t5-local-base\") >>> model = LongT5EncoderModel.from_pretrained(\"google/long-t5-local-base\") >>> input_ids = tokenizer( ... 100", "LongT5LayerTransientGlobalSelfAttention else: raise ValueError( \"For encoder attention mechanism, either `local` or `transient-global` attention", "= None, decoder_head_mask: Optional[torch.FloatTensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, encoder_outputs: Optional[Tuple[Tuple[torch.Tensor]]] =", "-0.5)) if hasattr(module.wi, \"bias\") and module.wi.bias is not None: module.wi.bias.data.zero_() module.wo.weight.data.normal_(mean=0.0, std=factor *", "Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor]", "= None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, inputs_embeds: Optional[torch.Tensor] = None, decoder_inputs_embeds: Optional[torch.Tensor] =", "LongT5TransientGlobalAttention): module.global_relative_attention_bias.weight.data.normal_( mean=0.0, std=factor * ((d_model) ** -0.5) ) # Copied from transformers.models.t5.modeling_t5.T5PreTrainedModel._set_gradient_checkpointing", "= position_ids.unsqueeze(0) - center_position_ids.unsqueeze(1) return relative_position_ids def _mask_local_attention_mask(local_attention_mask: torch.Tensor, block_len: int) -> torch.Tensor:", "self.relative_attention_num_buckets = config.relative_attention_num_buckets self.relative_attention_max_distance = config.relative_attention_max_distance self.d_model = config.d_model self.key_value_proj_dim = config.d_kv self.n_heads", "def __init__(self, config: LongT5Config, has_relative_attention_bias: bool = False) -> None: super().__init__() self.is_decoder =", "the same shape as relative_position, containing int32 values in the range [0, num_buckets)", "\"self.model.config.decoder_start_token_id has to be defined. In LongT5 it is usually set to the\"", "+ encoder_outputs return Seq2SeqModelOutput( last_hidden_state=decoder_outputs.last_hidden_state, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, )", "obtained using [`T5Tokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for detail. [What are input IDs?](../glossary#input-ids)", "has_relative_attention_bias=False): super().__init__() self.TransientGlobalSelfAttention = LongT5TransientGlobalAttention( config, has_relative_attention_bias=has_relative_attention_bias ) self.layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout", "# [batch_size, num_blocks, block_len] _blocked_attention_mask = _split_into_blocks(attention_mask, block_len, dim=1) # [batch_size, num_block, 3", "Keep cross-attention outputs and relative position weights attention_outputs = attention_outputs + cross_attention_outputs[2:] #", "in encoder self-attention, otherwise standard self & cross attentions are used if self.is_decoder:", "(batch_size, n_heads, seq_length, dim_per_head) hidden_states = shape(proj_layer(key_value_states)) if past_key_value is not None: if", "weights do_cross_attention = self.is_decoder and encoder_hidden_states is not None if do_cross_attention: # the", "matter related to general usage and behavior. Parameters: config ([`LongT5Config`]): Model configuration class", "0 is in output_shape, we cannot apply reshape because of incompatibility with ONNX", "load the weights associated with the model, only the configuration. Check out the", "can be obtained using [`T5Tokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for detail. To know", "key/value states key_states = project( hidden_states, self.k, key_value_states, past_key_value[0] if past_key_value is not", "of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally,", "= cross_attention_outputs[0] # Combine self attn and cross attn key value states if", "and encoder_hidden_states is not None: encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size() encoder_hidden_shape = (encoder_batch_size,", "else query_length key_length = real_seq_length if key_value_states is None else key_value_states.shape[1] def shape(states):", "self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict # FutureWarning:", "if self.config.num_layers == self.config.num_decoder_layers: warnings.warn(__HEAD_MASK_WARNING_MSG, FutureWarning) decoder_head_mask = head_mask # Encode if needed", "`[0, ..., config.vocab_size]` Returns: Examples: ```python >>> from transformers import AutoTokenizer, LongT5ForConditionalGeneration >>>", "use_cache=False, output_attentions=False, ): \"\"\" Self-attention (if key_value_states is None) or attention over source", "key_value_states.shape[1] def shape(states): \"\"\"projection\"\"\" return states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim).transpose(1, 2) def unshape(states): \"\"\"reshape\"\"\"", "We use indexing approach here: # https://numpy.org/doc/stable/user/basics.indexing.html#dealing-with-variable-numbers-of-indices-within-programs indices = [slice(0, None)] * x.ndim", "= torch.zeros( (1, self.n_heads, real_seq_length, key_length), device=scores.device, dtype=scores.dtype ) if self.gradient_checkpointing and self.training:", "= side_key_states.unsqueeze(1).repeat(reps) side_value_states = side_value_states.unsqueeze(1).repeat(reps) # Concatenate \"local\" and \"side\"/\"global\" key/value states to", "Mean # Square Layer Normalization https://arxiv.org/abs/1910.07467 thus varience is calculated # w/o mean", "get query states -> (batch_size, seq_length, n_heads, dim_per_head) query_states = shape(self.q(hidden_states)) key_states =", "\"\"\" Construct a layernorm module in the LongT5 style. No bias and no", ":, :-1] return torch.einsum(\"...nd,...ng->...gd\", hidden_states, one_hot_block_ids.type(hidden_states.dtype)) # Copied from transformers.models.t5.modeling_t5.T5LayerNorm with T5->LongT5 class", "= LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) # Copied from transformers.models.t5.modeling_t5.T5Attention.prune_heads def prune_heads(self, heads): if len(heads) ==", "= self._relative_position_bucket( relative_position, # (block_length, 3 * block_length) bidirectional=(not self.is_decoder), num_buckets=self.relative_attention_num_buckets, max_distance=self.relative_attention_max_distance, )", "the weights\"\"\" factor = self.config.initializer_factor # Used for testing weights initialization if isinstance(module,", "side_attention_mask = torch.eq(mask[..., None], global_segment_ids[:, None, :])[:, None, ...] attention_side_bias = torch.where(side_attention_mask >", "get the correct batch idx from layer past batch dim # batch dim", "Batch size 1 >>> # forward pass >>> outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids) >>>", "(batch_size, num_blocks, 3 * block_len + global_seq_len, n_heads, dim_per_head) key_states = torch.cat([key_states, side_key_states],", "= self.layer_norm(hidden_states) attention_output = self.EncDecAttention( normed_hidden_states, mask=attention_mask, key_value_states=key_value_states, position_bias=position_bias, layer_head_mask=layer_head_mask, past_key_value=past_key_value, use_cache=use_cache, query_length=query_length,", ") # (batch_size, seq_len, global_seq_len, num_heads) side_bias = self.global_relative_attention_bias(side_relative_position_bucket) # (batch_size, num_heads, seq_len,", "dim_per_head) query_states = shape(self.q(hidden_states)) key_states = shape(self.k(hidden_states)) value_states = shape(self.v(hidden_states)) # Split into", "not None: # We need to adjust position bias shape to be sum", "attention_mask, input_shape, inputs_embeds.device ) elif self.config.encoder_attention_type == \"local\": extended_attention_mask = _get_local_attention_mask(attention_mask, self.block_len, inputs_embeds.device)", "Licensed under the Apache License, Version 2.0 (the \"License\"); # you may not", "import Any, List, Optional, Tuple, Union import torch from torch import nn from", "[LongT5: Efficient Text-To-Text Transformer for Long Sequences](https://arxiv.org/abs/2112.07916) by <NAME>, <NAME>, <NAME>, <NAME>, <NAME>,", "config.global_block_size self.dropout = config.dropout_rate self.inner_dim = self.n_heads * self.key_value_proj_dim # Mesh TensorFlow initialization", "on both the right and the left. Indices can be obtained using [`T5Tokenizer`].", "device=global_block_ids.device ) global_segment_ids = torch.cumsum(torch.ones(batch_size, num_globals), dim=-1) - 1 global_segment_ids = global_segment_ids.to(attention_mask.device) global_segment_ids", "\"\"\" num_blocks = x.shape[block_dim] pad = [(0, 0)] * x.ndim pad[block_dim] = (1,", "return self.embed_tokens # Copied from transformers.models.t5.modeling_t5.T5Stack.set_input_embeddings def set_input_embeddings(self, new_embeddings): self.embed_tokens = new_embeddings def", "\"bias\") and module.wo.bias is not None: module.wo.bias.data.zero_() elif isinstance(module, LongT5DenseGatedActDense): module.wi_0.weight.data.normal_(mean=0.0, std=factor *", "relative position tensor for local -> global attention.\"\"\" block_ids, global_segment_ids = _make_global_fixed_block_ids(attention_mask, global_block_size)", "relative_position, containing int32 values in the range [0, num_buckets) \"\"\" relative_buckets = 0", "if x.shape[dim] % block_len != 0: x = _pad_to_multiple(x, block_len, dim, pad_value=0) num_blocks", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "those tokens which do not make for the whole fixed block, are assigned", "key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, position_bias=encoder_decoder_position_bias, layer_head_mask=cross_attn_layer_head_mask, past_key_value=cross_attn_past_key_value, query_length=query_length, use_cache=use_cache, output_attentions=output_attentions, ) hidden_states = cross_attention_outputs[0] #", "head is **masked**. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead", "last_hidden_state=hidden_states, past_key_values=present_key_value_states, hidden_states=all_hidden_states, attentions=all_attentions, cross_attentions=all_cross_attentions, ) LONGT5_START_DOCSTRING = r\"\"\" The LongT5 model was", "individual block aggregates by summing over individual blocks.\"\"\" # (batch..., seq_len, global_seq_len)) block_ids", "weights. \"\"\" LONGT5_INPUTS_DOCSTRING = r\"\"\" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices", "bias=False) self.dropout = nn.Dropout(config.dropout_rate) self.act = ACT2FN[config.dense_act_fn] def forward(self, hidden_states): hidden_gelu = self.act(self.wi_0(hidden_states))", "model is an extension of T5 model, and it enables using one of", "self._relative_position_bucket( side_relative_position, bidirectional=(not self.is_decoder), num_buckets=self.relative_attention_num_buckets, max_distance=self.relative_attention_max_distance, ) # (batch_size, seq_len, global_seq_len, num_heads) side_bias", "reorder if past is None: logger.warning(\"You might want to consider setting `use_cache=True` to", "None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None,", "by summing over individual blocks.\"\"\" # (batch..., seq_len, global_seq_len)) block_ids = block_ids.where( block_ids", "...modeling_utils import PreTrainedModel, find_pruneable_heads_and_indices, prune_linear_layer from ...utils import ( DUMMY_INPUTS, DUMMY_MASK, add_start_docstrings, add_start_docstrings_to_model_forward,", "= torch.cat([shifted_input_ids, input_ids[..., :-1]], dim=-1) else: shifted_input_ids = input_ids.new_zeros(input_ids.shape) shifted_input_ids[..., 1:] = input_ids[...,", "_sequence_block_ids_max = torch.zeros( batch_size, 0, dtype=global_block_ids.dtype, device=global_block_ids.device ) global_segment_ids = torch.cumsum(torch.ones(batch_size, num_globals), dim=-1)", "be calculated via length of past mask_seq_length = past_key_values[0][0].shape[2] + seq_length if past_key_values", "head_mask, decoder_head_mask if head_mask is not None and decoder_head_mask is None: if self.config.num_layers", "not all(x.shape): new_shape = list(x.shape) new_shape[dim] += pad_len return torch.zeros(new_shape, dtype=x.dtype) pad =", "and values -> (batch_size, num_blocks, 3 * block_len, n_heads, dim_per_head) key_states = _concatenate_3_blocks(key_states,", "LongT5LocalAttention(config, has_relative_attention_bias=has_relative_attention_bias) self.layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) def forward( self, hidden_states,", "forward( self, hidden_states, key_value_states, attention_mask=None, position_bias=None, layer_head_mask=None, past_key_value=None, use_cache=False, query_length=None, output_attentions=False, ): normed_hidden_states", "return the attentions tensors of all attention layers. See `attentions` under returned tensors", "# half of the buckets are for exact increments in positions max_exact =", "+= pad_len return torch.zeros(new_shape, dtype=x.dtype) pad = [(0, 0)] * x.ndim pad[dim] =", "0, 0.0, -1e10) # We need to adjust position bias shape to be", "heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune", "sequence_dim=2) value_states = _concatenate_3_blocks(value_states, block_dim=1, sequence_dim=2) # Compute scores scores = torch.einsum( \"...qhd,...khd->...hqk\",", "BaseModelOutput( last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) >", "heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(LONGT5_ENCODER_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BaseModelOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask:", "(present_key_value_state,) + (position_bias,) if output_attentions: outputs = outputs + (attn_weights,) return outputs class", "use_cache=use_cache, output_attentions=output_attentions, ) # layer_outputs is a tuple with: # hidden-states, key-value-states, (self-attention", "scores ) # (batch_size, n_heads, seq_length, key_length) attn_weights = nn.functional.dropout( attn_weights, p=self.dropout, training=self.training", "the head is **masked**. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally,", "Optional[bool] = None, ) -> Union[Tuple[torch.FloatTensor], Seq2SeqModelOutput]: r\"\"\" Returns: Example: ```python >>> from", "`decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix. If", "hidden_states, attention_mask=None, position_bias=None, layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False, ): normed_hidden_states = self.layer_norm(hidden_states) attention_output =", "past_key_value and use_cache kwargs ): normed_hidden_states = self.layer_norm(hidden_states) attention_output = self.LocalSelfAttention( normed_hidden_states, mask=attention_mask,", "= () if use_cache else None all_hidden_states = () if output_hidden_states else None", "0, 1]).unsqueeze(0).unsqueeze(0) return values def compute_side_bias(self, mask: torch.Tensor, global_segment_ids: torch.Tensor) -> torch.Tensor: #", "z_loss https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L666 if not return_dict: output = (lm_logits,) + decoder_outputs[1:] + encoder_outputs return", "torch.zeros( (1, 1, self.n_heads, self.block_len, 3 * self.block_len), device=scores.device, dtype=scores.dtype ) if self.gradient_checkpointing", "memory_position[block_length:-block_length] # (block_length, 3 * block_length) relative_position = memory_position[None, :] - context_position[:, None]", "a simlified version of the original Flaxformr implementation adopted from: https://github.com/google/flaxformer/blob/main/flaxformer/architectures/longt5/long_attention.py. In our", "self & cross attentions are used if self.is_decoder: extended_attention_mask = self.get_extended_attention_mask( attention_mask, input_shape,", "to make it broadcastable to all heads. # We use local attention in", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #", "not None else self.config.use_return_dict # FutureWarning: head_mask was separated into two input args", "layer_head_mask=None, query_length=None, use_cache=False, output_attentions=False, ): \"\"\" Self-attention (if key_value_states is None) or attention", "self.key_value_proj_dim).transpose(1, 2) def unshape(states): \"\"\"reshape\"\"\" return states.transpose(1, 2).contiguous().view(batch_size, -1, self.inner_dim) def project(hidden_states, proj_layer,", "side_relative_position_bucket = self._relative_position_bucket( side_relative_position, bidirectional=(not self.is_decoder), num_buckets=self.relative_attention_num_buckets, max_distance=self.relative_attention_max_distance, ) # (batch_size, seq_len, global_seq_len,", "# We share the position biases between the layers - the first layer", "Calculate global/side bias - shape: # (batch_size, num_heads, seq_len, global_seq_len) if mask is", "\"\"\"Pad a tensor so that a sequence length will be a multiple of", "== global_block_size - 1 block_ends = block_ends.to(block_ids.device) true_block_ends = torch.logical_and(block_ends, block_ids >= 0)", "**not masked**, - 0 indicates the head is **masked**. inputs_embeds (`torch.FloatTensor` of shape", "for each of the four key / value states reordered_layer_past_states = reordered_layer_past_states +", "+ cross_attention_outputs[1] # Keep cross-attention outputs and relative position weights attention_outputs = attention_outputs", "= head_mask[i] cross_attn_layer_head_mask = cross_attn_head_mask[i] if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if", "by default. head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to", "classification/regression loss. Indices should be in `[-100, 0, ..., config.vocab_size - 1]`. All", "= self.global_input_layer_norm(global_inputs) # get query states -> (batch_size, seq_length, n_heads, dim_per_head) query_states =", "std=factor * ((d_model) ** -0.5) ) # Copied from transformers.models.t5.modeling_t5.T5PreTrainedModel._set_gradient_checkpointing with T5->LongT5 def", "= encoder_hidden_states.size() encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: encoder_attention_mask = torch.ones(encoder_hidden_shape,", "or `transient-global` attention type is expected, \" f\"but got {config.encoder_attention_type}.\" ) self.layer =", "dog is good for you\", return_tensors=\"pt\" ... ).input_ids # Batch size 1 >>>", "https://arxiv.org/abs/1910.07467 thus varience is calculated # w/o mean and there is no bias.", "(1) Local attention, or (2) Transient-Global attention. This model inherits from [`PreTrainedModel`]. Check", "needed head_mask = self.get_head_mask(head_mask, self.config.num_layers) cross_attn_head_mask = self.get_head_mask(cross_attn_head_mask, self.config.num_layers) present_key_value_states = () if", "encoder_hidden_states.size() encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: encoder_attention_mask = torch.ones(encoder_hidden_shape, device=inputs_embeds.device)", "self.encoder def _prune_heads(self, heads_to_prune): \"\"\" Prunes heads of the model. heads_to_prune: dict of", "global_segment_ids = torch.where(global_segment_ids <= _sequence_block_ids_max, 1, 0) return global_block_ids.type(torch.int), global_segment_ids.type(torch.int) def _make_side_relative_position_ids(attention_mask: torch.Tensor,", "if (output_attentions and self.is_decoder) else None position_bias = None encoder_decoder_position_bias = None hidden_states", "\"T5Tokenizer\" _CHECKPOINT_FOR_DOC = \"google/long-t5-local-base\" # TODO: Update before the merge LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST = [", "((d_model) ** -0.5) ) # Copied from transformers.models.t5.modeling_t5.T5PreTrainedModel._set_gradient_checkpointing with T5->LongT5 def _set_gradient_checkpointing(self, module,", "are **masked**. [What are attention masks?](../glossary#attention-mask) decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):", "control over how to convert `decoder_input_ids` indices into associated vectors than the model's", "hidden_states, key_value_states, attention_mask=None, position_bias=None, layer_head_mask=None, past_key_value=None, use_cache=False, query_length=None, output_attentions=False, ): normed_hidden_states = self.layer_norm(hidden_states)", "config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, head_mask:", "you can choose to directly pass an embedded representation. This is useful if", "model's internal embedding lookup matrix. decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*):", "mask for transient-global attention extended_attention_mask = attention_mask # If a 2D or 3D", "# Copied from transformers.models.t5.modeling_t5.T5LayerCrossAttention with T5->LongT5 class LongT5LayerCrossAttention(nn.Module): def __init__(self, config): super().__init__() self.EncDecAttention", "either {err_msg_prefix}input_ids or {err_msg_prefix}inputs_embeds\") if inputs_embeds is None: assert self.embed_tokens is not None,", "Compute scores scores = torch.einsum( \"...qhd,...khd->...hqk\", query_states, key_states ) # (batch_size, num_block, n_heads,", "shape: (batch_size, num_blocks, 3 * block_len + global_seq_len, n_heads, dim_per_head) key_states = torch.cat([key_states,", "block_len if x.shape[dim] % block_len != 0: x = _pad_to_multiple(x, block_len, dim, pad_value=0)", "**not masked**, - 0 indicates the head is **masked**. cross_attn_head_mask (`torch.Tensor` of shape", "over how to convert `decoder_input_ids` indices into associated vectors than the model's internal", "Model transformer outputting raw hidden-states without any specific head on top.\", LONGT5_START_DOCSTRING, )", "hidden_states, one_hot_block_ids.type(hidden_states.dtype)) # Copied from transformers.models.t5.modeling_t5.T5LayerNorm with T5->LongT5 class LongT5LayerNorm(nn.Module): def __init__(self, hidden_size,", "if self.gradient_checkpointing and self.training: position_bias.requires_grad = True else: position_bias = self.compute_bias(self.block_len) if mask", "sentence (provided by key_value_states). \"\"\" # Input is (batch_size, seq_length, dim) # Mask", "forward( self, hidden_states, mask=None, position_bias=None, layer_head_mask=None, output_attentions=False, ): batch_size, seq_length = hidden_states.shape[:2] def", "position bias), (self-attention weights), # (cross-attention position bias), (cross-attention weights) position_bias = layer_outputs[2]", "half of the buckets are for exact increments in positions max_exact = num_buckets", "axis=1) - fixed_block_mask mask = torch.where(attention_mask != 0.0, 1.0, -1000.0).type(attention_mask.dtype) global_block_ids = torch.floor(mask", "= LongT5DenseGatedActDense(config) else: self.DenseReluDense = LongT5DenseActDense(config) self.layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate)", "the head is **not masked**, - 0 indicates the head is **masked**. decoder_head_mask", "use_cache: outputs = outputs + (present_key_value_state,) + attention_outputs else: outputs = outputs +", "indicates the head is **masked**. encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*): Tuple consists of (`last_hidden_state`, `optional`:", "their past key value states given to this model) of shape `(batch_size, 1)`", "apply final processing self.post_init() def get_input_embeddings(self): return self.shared def set_input_embeddings(self, new_embeddings): self.shared =", "position_bias[:, :, -hidden_states.size(1) :, :] if mask is not None: position_bias = position_bias", "return relative_buckets def compute_bias(self, query_length, key_length, device=None): \"\"\"Compute binned relative position bias\"\"\" if", "= shape(self.v(global_inputs)) # Split into blocks -> (batch_size, num_blocks, block_len, n_heads, dim_per_head) query_states", "return decoder_outputs + encoder_outputs return Seq2SeqModelOutput( last_hidden_state=decoder_outputs.last_hidden_state, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states,", "ValueError(f\"You have to specify either {err_msg_prefix}input_ids or {err_msg_prefix}inputs_embeds\") if inputs_embeds is None: assert", "output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = (", "encoder_hidden_states=hidden_states, encoder_attention_mask=attention_mask, head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) if not return_dict: return", "LongT5TransientGlobalAttention)): # Mesh TensorFlow attention initialization to avoid scaling before softmax # See", "model = LongT5ForConditionalGeneration.from_pretrained( ... \"Stancld/longt5-tglobal-large-16384-pubmed-3k_steps\" ... ) >>> # Let's try a very", "def create_custom_forward(module): def custom_forward(*inputs): return tuple(module(*inputs, use_cache, output_attentions)) return custom_forward layer_outputs = checkpoint(", "batch_size, seq_length = hidden_states.shape[:2] real_seq_length = seq_length if past_key_value is not None: assert", "sum(pad[::-1], ()) # [batch_size, num_blocks, block_len] -> [batch_size, num_blocks + 2, block_len] x", "cut decoder_input_ids if past is used if past is not None: input_ids =", "else None all_hidden_states = () if output_hidden_states else None all_attentions = () if", "# (batch_size, n_heads, seq_length, key_length) scores += position_bias attn_weights = nn.functional.softmax(scores.float(), dim=-1).type_as( scores", "self.inner_dim) def project(hidden_states, proj_layer, key_value_states, past_key_value): \"\"\"projects hidden states correctly to key/query states\"\"\"", "= self.n_heads - len(heads) self.inner_dim = self.key_value_proj_dim * self.n_heads self.pruned_heads = self.pruned_heads.union(heads) @staticmethod", "Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool]", "Mask to nullify selected heads of the cross-attention modules in the decoder. Mask", "prune_linear_layer(self.o, index, dim=1) # Update hyper params self.n_heads = self.n_heads - len(heads) self.inner_dim", "prepare `decoder_input_ids` for pretraining take a look at [LONGT5 Training](./longt5#training). decoder_attention_mask (`torch.BoolTensor` of", "device=inputs_embeds.device) encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = None # Prepare head mask if", "use this strategy only for a decoder, orphan tokens, i.e. those tokens which", ") global_segment_ids = torch.cumsum(torch.ones(batch_size, num_globals), dim=-1) - 1 global_segment_ids = global_segment_ids.to(attention_mask.device) global_segment_ids =", "= (lm_logits,) + decoder_outputs[1:] + encoder_outputs return ((loss,) + output) if loss is", "def __init__(self, config: LongT5Config): super().__init__() if config.is_gated_act: self.DenseReluDense = LongT5DenseGatedActDense(config) else: self.DenseReluDense =", "For more information, see: https://arxiv.org/pdf/2112.07916.pdf. \"\"\" num_blocks = x.shape[block_dim] pad = [(0, 0)]", "states if present_key_value_state is not None: present_key_value_state = present_key_value_state + cross_attention_outputs[1] # Keep", "transformers.models.t5.modeling_t5.T5Stack.get_input_embeddings def get_input_embeddings(self): return self.embed_tokens # Copied from transformers.models.t5.modeling_t5.T5Stack.set_input_embeddings def set_input_embeddings(self, new_embeddings): self.embed_tokens", "return { \"decoder_input_ids\": input_ids, \"past_key_values\": past, \"encoder_outputs\": encoder_outputs, \"attention_mask\": attention_mask, \"head_mask\": head_mask, \"decoder_head_mask\":", "attention_side_bias def forward( self, hidden_states, mask=None, position_bias=None, layer_head_mask=None, output_attentions=False, ): batch_size, seq_length =", "all heads. # We use local attention in encoder self-attention, otherwise standard self", "has_relative_attention_bias=has_relative_attention_bias)) if self.is_decoder: self.layer.append(LongT5LayerCrossAttention(config)) self.layer.append(LongT5LayerFF(config)) def forward( self, hidden_states, attention_mask=None, position_bias=None, encoder_hidden_states=None, encoder_attention_mask=None,", "a sequence length will be a multiple of `block_len`\"\"\" pad_len = -x.shape[dim] %", "key_value_states is None: # self-attn # (batch_size, n_heads, seq_length, dim_per_head) hidden_states = shape(proj_layer(hidden_states))", "both {err_msg_prefix}input_ids and {err_msg_prefix}inputs_embeds at the same time\" ) elif input_ids is not", "dim: int, pad_value: int = 0) -> torch.Tensor: \"\"\"Pad a tensor so that", "the sequence classification/regression loss. Indices should be in `[-100, 0, ..., config.vocab_size -", "This is useful if you want more control over how to convert `input_ids`", "\"\"\"projection\"\"\" return states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim) def unshape(states): \"\"\"reshape\"\"\" return states.contiguous().view(batch_size, -1, self.inner_dim)", "query length is unknown for cross attention # if using past key value", "ACT2FN from ...modeling_outputs import ( BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput, ) from ...modeling_utils import", "sequence_length, hidden_size)` is a sequence of hidden states at the output of the", "\"\"\" config_class = LongT5Config base_model_prefix = \"transformer\" supports_gradient_checkpointing = True @property # Copied", "past mask_seq_length = past_key_values[0][0].shape[2] + seq_length if past_key_values is not None else seq_length", "[LONGT5 Training](./longt5#training). decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a", "block_dim=1, sequence_dim=2) # Compute scores scores = torch.einsum( \"...qhd,...khd->...hqk\", query_states, key_states ) #", "= (layer_output,) + attention_output[1:] # add attentions if we output them return outputs", "key_length, num_heads) values = values.permute([2, 0, 1]).unsqueeze(0) # shape (1, num_heads, query_length, key_length)", "mask is not None: # Replace masked positions with -1e10 (according to the", "attentions if we output them return outputs class LongT5Block(nn.Module): def __init__(self, config, has_relative_attention_bias=False):", ") # (block_length, 3 * block_length, num_heads) values = self.relative_attention_bias(relative_position_bucket) # (1, 1,", "decoder. past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of", "device=scores.device) # if key and values are already calculated # we want only", "is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`).", "decoder_config.is_encoder_decoder = False decoder_config.num_layers = config.num_decoder_layers self.decoder = LongT5Stack(decoder_config, self.shared) # Initialize weights", "them return outputs class LongT5LayerLocalSelfAttention(nn.Module): \"\"\"Local self attention used in encoder\"\"\" def __init__(self,", "int = 0) -> torch.Tensor: \"\"\"Pad a tensor so that a sequence length", "`(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules in", "embedded representation. This is useful if you want more control over how to", "self.variance_epsilon) # convert into half-precision if necessary if self.weight.dtype in [torch.float16, torch.bfloat16]: hidden_states", "self.layer.append(LongT5LayerCrossAttention(config)) self.layer.append(LongT5LayerFF(config)) def forward( self, hidden_states, attention_mask=None, position_bias=None, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None, layer_head_mask=None, cross_attn_layer_head_mask=None,", "n_heads, seq_length, key_length) attn_weights = nn.functional.dropout( attn_weights, p=self.dropout, training=self.training ) # (batch_size, n_heads,", "tuple with: # hidden-states, key-value-states, (self-attention position bias), (self-attention weights), (cross-attention position bias),", "masked**, - 0 indicates the head is **masked**. encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*): Tuple consists", "into two input args - head_mask, decoder_head_mask __HEAD_MASK_WARNING_MSG = \"\"\" The input argument", "past_key_values is None: past_key_values = [None] * len(self.block) # We can provide a", "Transformer for Long Sequences](https://arxiv.org/abs/2112.07916) by <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME> and <NAME>.", "for labels in `[0, ..., config.vocab_size]` Returns: Examples: ```python >>> from transformers import", "+ output) if loss is not None else output return Seq2SeqLMOutput( loss=loss, logits=lm_logits,", "0) -> torch.Tensor: \"\"\"Concatenate three consecutive blocks for each input block for local", "be used to speed up decoding. If `past_key_values` are used, the user can", "num_blocks) indices = tuple(indices) blocks_list.append(x[indices]) # [batch_size, num_blocks, 3 * block_len, ...] return", "None: # self-attn # (batch_size, n_heads, key_length, dim_per_head) hidden_states = torch.cat([past_key_value, hidden_states], dim=2)", "LongT5 it is usually set to the\" \" pad_token_id. See LongT5 docs for", "0)] * x.ndim pad[block_dim] = (1, 1) pad = sum(pad[::-1], ()) # [batch_size,", "attention_side_bias = attention_side_bias + side_bias return attention_side_bias def forward( self, hidden_states, mask=None, position_bias=None,", "= global_segment_ids.to(attention_mask.device) global_segment_ids = torch.where(global_segment_ids <= _sequence_block_ids_max, 1, 0) return global_block_ids.type(torch.int), global_segment_ids.type(torch.int) def", "None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.FloatTensor], Seq2SeqModelOutput]: r\"\"\" Returns: Example: ```python", "# (batch_size, num_blocks, n_heads, block_len, 3 * block_len) attn_weights = nn.functional.softmax(scores.float(), dim=-1).type_as(scores) #", "inject it here if present_key_value_state is not None: query_length = present_key_value_state[0].shape[2] else: query_length", "Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool]", "config.local_radius self.block_len = self.local_radius + 1 self.dropout = config.dropout_rate self.inner_dim = self.n_heads *", "internal embedding lookup matrix. decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*): Optionally,", "= \"google/long-t5-local-base\" # TODO: Update before the merge LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST = [ \"google/long-t5-local-base\", \"google/long-t5-local-large\",", "Optional[torch.FloatTensor] = None, decoder_inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool]", "input_shape # required mask seq length can be calculated via length of past", "bias shape to be sum with mask local_attention_mask = _get_local_attention_mask(mask, self.block_len, hidden_states.device) #", "if labels is not None and decoder_input_ids is None and decoder_inputs_embeds is None:", "= nn.Embedding(self.relative_attention_num_buckets, self.n_heads) self.pruned_heads = set() self.gradient_checkpointing = False # Copied from transformers.models.t5.modeling_t5.T5Attention.prune_heads", "(batch_size, num_blocks, num_heads, block_len, 3 * block_len + global_seq_len) position_bias = torch.cat([position_bias, side_position_bias],", "outputs = model(input_ids=input_ids) >>> last_hidden_states = outputs.last_hidden_state ```\"\"\" return_dict = return_dict if return_dict", "n_heads, seq_length, key_length) # Mask heads if we want to if layer_head_mask is", "is disabled and no need to reorder if past is None: logger.warning(\"You might", "than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not", "forward( self, input_ids=None, attention_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, inputs_embeds=None, head_mask=None, cross_attn_head_mask=None, past_key_values=None, use_cache=None, output_attentions=None, output_hidden_states=None,", "make broadcastable to [batch_size, num_heads, seq_length, seq_length] if self.is_decoder and encoder_hidden_states is not", "to directly pass an embedded representation. This is useful if you want more", "See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for detail. To know more on how to prepare", "expected_num_past_key_values == 4 else ''}\" f\"Got {len(past_key_value)} past key / value states\" )", "# Decode decoder_outputs = self.decoder( input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, inputs_embeds=decoder_inputs_embeds, past_key_values=past_key_values, encoder_hidden_states=hidden_states, encoder_attention_mask=attention_mask, head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask,", "all_hidden_states + (hidden_states,) if self.gradient_checkpointing and self.training: if use_cache: use_cache = False def", "= [ r\"encoder.embed_tokens.weight\", r\"decoder.embed_tokens.weight\", ] _keys_to_ignore_on_load_unexpected = [ r\"decoder.block.0.layer.1.EncDecAttention.relative_attention_bias.weight\", ] def __init__(self, config:", "efficient attention mechanisms - (1) Local attention, or (2) Transient-Global attention. This model", "(batch_size, n_heads, seq_length, key_length) scores += position_bias attn_weights = nn.functional.softmax(scores.float(), dim=-1).type_as( scores )", "Translate relative position to a bucket number for relative attention. The relative position", "config.d_model self.shared = nn.Embedding(config.vocab_size, config.d_model) encoder_config = copy.deepcopy(config) encoder_config.is_decoder = False encoder_config.use_cache =", "Flaxformr implementation adopted from: https://github.com/google/flaxformer/blob/main/flaxformer/architectures/longt5/long_attention.py. In our scenario, as we use this strategy", "self.global_block_size # shapes: (batch_size, seq_len) & (batch_size, global_seq_len) block_ids, global_segment_ids = _make_global_fixed_block_ids( mask", "0, ..., config.vocab_size - 1]`. All labels set to `-100` are ignored (masked),", "outputting encoder's raw hidden-states without any specific head on top.\", LONGT5_START_DOCSTRING, ) class", "position_bias # (batch_size, num_blocks, n_heads, block_len, 3 * block_len + global_seq_len) attn_weights =", "heads of the self-attention modules in the decoder. Mask values selected in `[0,", "n_heads, block_len, 3 * block_len) attn_weights = nn.functional.softmax(scores.float(), dim=-1).type_as(scores) # (batch_size, num_blocks, n_heads,", "only for a decoder, orphan tokens, i.e. those tokens which do not make", "token to attend global aggregated ones # New shape: (batch_size, num_blocks, 3 *", "in `[0, 1]`: - 1 indicates the head is **not masked**, - 0", "num_blocks, n_heads, block_len, 3 * block_len) attn_weights = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) # Mask", "= self.key_value_proj_dim * self.n_heads self.pruned_heads = self.pruned_heads.union(heads) @staticmethod def _relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128):", "up decoding (see `past_key_values`). output_attentions (`bool`, *optional*): Whether or not to return the", "2, block_len] x = nn.functional.pad(x, pad=pad, mode=\"constant\", value=pad_value) blocks_list: List[torch.Tensor] = [] for", "with valid token embeddings\" inputs_embeds = self.embed_tokens(input_ids) batch_size, seq_length = input_shape # required", "= torch.arange(key_length, dtype=torch.long, device=device)[None, :] relative_position = memory_position - context_position # shape (query_length,", "set to copy `head_mask`, but this feature is deprecated and will be removed", "output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.FloatTensor], Seq2SeqLMOutput]: r\"\"\"", "dim=-1).type_as(scores) # (batch_size, num_blocks, n_heads, block_len, 3 * block_len) attn_weights = nn.functional.dropout(attn_weights, p=self.dropout,", "Encode if needed (training, first prediction pass) if encoder_outputs is None: encoder_outputs =", "is not None: module.wi.bias.data.zero_() module.wo.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_ff) ** -0.5)) if hasattr(module.wo, \"bias\")", "= past_key_value return hidden_states # get query states query_states = shape(self.q(hidden_states)) # (batch_size,", "else None ) value_states = project( hidden_states, self.v, key_value_states, past_key_value[1] if past_key_value is", "real_seq_length = seq_length if past_key_value is not None: assert ( len(past_key_value) == 2", "https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L89 module.wi.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5)) if hasattr(module.wi, \"bias\") and module.wi.bias is", "[torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the", "tokens, i.e. those tokens which do not make for the whole fixed block,", "None: attn_weights = attn_weights * layer_head_mask attn_output = unshape(torch.matmul(attn_weights, value_states)) # (batch_size, seq_length,", "under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to", "if you want more control over how to convert `input_ids` indices into associated", "use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict", "speedy decoding is disabled and no need to reorder if past is None:", "1, embed_size_per_head)`): Contains precomputed key and value hidden states of the attention blocks.", "1, n_heads, block_len, 3 * block_len) position_bias = position_bias + local_attention_mask.transpose(1, 2) position_bias", "\"\"\"Concatenate three consecutive blocks for each input block for local attentiont. For more", "unset, `decoder_inputs_embeds` takes the value of `inputs_embeds`. use_cache (`bool`, *optional*): If set to", "(batch_size, 1, n_heads, block_len, 3 * block_len) position_bias = position_bias + local_attention_mask.transpose(1, 2)", "lm_logits = self.lm_head(sequence_output) loss = None if labels is not None: loss_fct =", "take a look at [LONGT5 Training](./longt5#training). decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*):", "has_relative_attention_bias=has_relative_attention_bias) self.layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) def forward( self, hidden_states, attention_mask=None,", "isinstance(module, LongT5TransientGlobalAttention): module.global_relative_attention_bias.weight.data.normal_( mean=0.0, std=factor * ((d_model) ** -0.5) ) # Copied from", "get decoder inputs from shifting lm labels to the right decoder_input_ids = self._shift_right(labels)", "= cross_attn_head_mask[i] if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if self.gradient_checkpointing and self.training:", "for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length,", "the right decoder_input_ids = self._shift_right(labels) # Decode decoder_outputs = self.decoder( input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, inputs_embeds=decoder_inputs_embeds,", "def set_input_embeddings(self, new_embeddings): self.shared = new_embeddings self.encoder.set_input_embeddings(new_embeddings) self.decoder.set_input_embeddings(new_embeddings) def set_output_embeddings(self, new_embeddings): self.lm_head =", "LongT5Config base_model_prefix = \"transformer\" supports_gradient_checkpointing = True @property # Copied from transformers.models.t5.modeling_t5.T5PreTrainedModel.dummy_inputs def", "torch.cat([shifted_input_ids, input_ids[..., :-1]], dim=-1) else: shifted_input_ids = input_ids.new_zeros(input_ids.shape) shifted_input_ids[..., 1:] = input_ids[..., :-1].clone()", "self.training: position_bias.requires_grad = True else: position_bias = self.compute_bias(self.block_len) if local_attention_mask is not None:", "super().__init__(config) self.model_dim = config.d_model self.shared = nn.Embedding(config.vocab_size, config.d_model) encoder_config = copy.deepcopy(config) encoder_config.is_decoder =", "last `decoder_input_ids` (those that don't have their past key value states given to", "Returns: Example: ```python >>> from transformers import AutoTokenizer, LongT5ForConditionalGeneration >>> tokenizer = AutoTokenizer.from_pretrained(\"google/long-t5-local-base\")", "use_cache=False, query_length=None, output_attentions=False, ): normed_hidden_states = self.layer_norm(hidden_states) attention_output = self.EncDecAttention( normed_hidden_states, mask=attention_mask, key_value_states=key_value_states,", "to avoid scaling before softmax # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/attention.py#L136 d_model = self.config.d_model key_value_proj_dim =", "in future versions. If you do not want to use any `decoder_head_mask` now,", "+= position_bias # (batch_size, num_blocks, n_heads, block_len, 3 * block_len) attn_weights = nn.functional.softmax(scores.float(),", "of block_len if x.shape[dim] % block_len != 0: x = _pad_to_multiple(x, block_len, dim,", "heads. # We use local attention in encoder self-attention, otherwise standard self &", "how to prepare `input_ids` for pretraining take a look a [LONGT5 Training](./longt5#training). attention_mask", "handle_orphan_tokens(global_block_ids) num_globals = seq_len // global_block_size # [batch_size, seq_len // global_block_size] if num_globals", "/ max_exact) * (num_buckets - max_exact) ).to(torch.long) relative_position_if_large = torch.min( relative_position_if_large, torch.full_like(relative_position_if_large, num_buckets", "if not self.is_decoder: logger.warning(\"`past_key_values` is passed to the encoder. Please make sure this", "-> torch.Tensor: \"\"\"Makes 3-blocked relative position ids for local attention.\"\"\" position_ids = torch.arange(3", "memory_position[None, :] - context_position[:, None] relative_position_bucket = self._relative_position_bucket( relative_position, # (block_length, 3 *", "= \"decoder_\" if self.is_decoder else \"\" raise ValueError(f\"You have to specify either {err_msg_prefix}input_ids", "bucket number for relative attention. The relative position is defined as memory_position -", "hidden_states = torch.cat([past_key_value, hidden_states], dim=2) else: # cross-attn hidden_states = past_key_value return hidden_states", "owning a dog is good for you \", return_tensors=\"pt\" ... ).input_ids # Batch", "testing weights initialization if isinstance(module, LongT5LayerNorm): module.weight.data.fill_(factor * 1.0) elif isinstance(module, (LongT5Model, LongT5ForConditionalGeneration,", "self attention used in encoder\"\"\" def __init__(self, config, has_relative_attention_bias=False): super().__init__() self.LocalSelfAttention = LongT5LocalAttention(config,", "side_value_states = side_value_states.unsqueeze(1).repeat(reps) # Concatenate \"local\" and \"side\"/\"global\" key/value states to allow each", "torch.device) -> torch.Tensor: \"\"\"Prepare attention mask to be applied for a local attention.\"\"\"", "(hidden_states,) + attention_output[1:] # add attentions if we output them return outputs #", "_relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128): \"\"\" Adapted from Mesh Tensorflow: https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593 Translate relative position", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "None: err_msg_prefix = \"decoder_\" if self.is_decoder else \"\" raise ValueError( f\"You cannot specify", "is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module", "`last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)` is a sequence of hidden states at", "(global_block_ids * attention_mask) + (attention_mask - 1) # [batch_size, seq_len] global_block_ids = handle_orphan_tokens(global_block_ids)", "dim=-1) else: shifted_input_ids = input_ids.new_zeros(input_ids.shape) shifted_input_ids[..., 1:] = input_ids[..., :-1].clone() shifted_input_ids[..., 0] =", "can be used to speed up decoding (see `past_key_values`). output_attentions (`bool`, *optional*): Whether", "passing `input_ids` you can choose to directly pass an embedded representation. This is", "LongT5 docs for more information\" ) # shift inputs to the right if", "required mask seq length can be calculated via length of past mask_seq_length =", "or not to return a [`~utils.ModelOutput`] instead of a plain tuple. \"\"\" LONGT5_ENCODER_INPUTS_DOCSTRING", "unshape(states): \"\"\"reshape\"\"\" return states.contiguous().view(batch_size, -1, self.inner_dim) # get query/key/value states -> (batch_size, seq_length,", "block_len: int, device: torch.device) -> torch.Tensor: \"\"\"Prepare attention mask to be applied for", "`decoder_head_mask = torch.ones(num_layers, num_heads)`. \"\"\" @add_start_docstrings( \"The bare LONGT5 Model transformer outputting raw", "attention_outputs return outputs # hidden-states, present_key_value_states, (self-attention position bias), (self-attention weights), (cross-attention position", "= nn.Linear(config.d_model, config.vocab_size, bias=False) # Initialize weights and apply final processing self.post_init() def", "not None: assert ( len(past_key_value) == 2 ), f\"past_key_value should have 2 past", "0) full_blocks = true_block_ends.sum(-1).unsqueeze(-1).type(block_ids.dtype) - 1 block_ids = torch.where(block_ids < full_blocks, block_ids, full_blocks)", "+= past_key_value[0].shape[2] if query_length is None else query_length key_length = real_seq_length if key_value_states", "biases between the layers - the first layer store them # layer_outputs =", "be defined. In LongT5 it is usually set to the\" \" pad_token_id. See", ">>> outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids) >>> last_hidden_states = outputs.last_hidden_state ```\"\"\" use_cache = use_cache", "query/key/value states -> (batch_size, seq_length, n_heads, dim_per_head) query_states = shape(self.q(hidden_states)) key_states = shape(self.k(hidden_states))", "key_states.shape[1] side_key_states = side_key_states.unsqueeze(1).repeat(reps) side_value_states = side_value_states.unsqueeze(1).repeat(reps) # Concatenate \"local\" and \"side\"/\"global\" key/value", "global attention if self.has_relative_attention_bias: self.global_relative_attention_bias = nn.Embedding(self.relative_attention_num_buckets, self.n_heads) self.global_input_layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) #", "Currently, `decoder_head_mask` is set to copy `head_mask`, but this feature is deprecated and", "# Copied from transformers.models.t5.modeling_t5.T5LayerFF with T5->LongT5 class LongT5LayerFF(nn.Module): def __init__(self, config: LongT5Config): super().__init__()", "_mask_local_attention_mask(local_attention_mask: torch.Tensor, block_len: int) -> torch.Tensor: \"\"\"Mask local attention mask to enforce that", "output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) if not return_dict: return decoder_outputs + encoder_outputs return Seq2SeqModelOutput(", "return self.weight * hidden_states try: from apex.normalization import FusedRMSNorm LongT5LayerNorm = FusedRMSNorm #", "is not None: # We need to adjust position bias shape to be", "seq_length) # (batch_size, num_heads, seq_len, global_seq_len) side_position_bias = self.compute_side_bias(mask, global_segment_ids) # (batch_size, num_blocks,", "mask_seq_length = past_key_values[0][0].shape[2] + seq_length if past_key_values is not None else seq_length if", "config.d_model) encoder_config = copy.deepcopy(config) encoder_config.use_cache = False encoder_config.is_encoder_decoder = False self.encoder = LongT5Stack(encoder_config,", "# Concatenate 3 blocks for keys and values -> (batch_size, num_blocks, 3 *", "def get_encoder(self): return self.encoder def get_decoder(self): return self.decoder @add_start_docstrings_to_model_forward(LONGT5_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC) def forward(", "= torch.zeros( (1, 1, self.n_heads, self.block_len, 3 * self.block_len), device=scores.device, dtype=scores.dtype ) if", "torch.Tensor: # (batch_size, 1, seq_len, global_seq_len) side_attention_mask = torch.eq(mask[..., None], global_segment_ids[:, None, :])[:,", "+ mask # (batch_size, n_heads, seq_length, key_length) scores += position_bias attn_weights = nn.functional.softmax(scores.float(),", ") -> Union[Tuple[torch.FloatTensor], Seq2SeqModelOutput]: r\"\"\" Returns: Example: ```python >>> from transformers import T5Tokenizer,", ":-1]], dim=-1) else: shifted_input_ids = input_ids.new_zeros(input_ids.shape) shifted_input_ids[..., 1:] = input_ids[..., :-1].clone() shifted_input_ids[..., 0]", "= None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] =", "= self.layer[0]( hidden_states, attention_mask=attention_mask, position_bias=position_bias, layer_head_mask=layer_head_mask, past_key_value=self_attn_past_key_value, use_cache=use_cache, output_attentions=output_attentions, ) hidden_states, present_key_value_state =", "not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None", "position bias), (cross-attention weights) class LongT5PreTrainedModel(PreTrainedModel): \"\"\" An abstract class to handle weights", "output_shape, we cannot apply reshape because of incompatibility with ONNX conversion if 0", "are invalid. We use smaller buckets for small absolute relative_position and larger buckets", "normed_hidden_states = self.layer_norm(hidden_states) attention_output = self.SelfAttention( normed_hidden_states, mask=attention_mask, position_bias=position_bias, layer_head_mask=layer_head_mask, past_key_value=past_key_value, use_cache=use_cache, output_attentions=output_attentions,", "= self_attention_outputs[2:] # Keep self-attention outputs and relative position weights do_cross_attention = self.is_decoder", "attention mechanism, either `local` or `transient-global` attention type is expected, \" f\"but got", "hidden_linear hidden_states = self.dropout(hidden_states) hidden_states = self.wo(hidden_states) return hidden_states # Copied from transformers.models.t5.modeling_t5.T5LayerFF", "limitations under the License. \"\"\" PyTorch LongT5 model.\"\"\" import copy import math import", "global_seq_len + 1)[:, :, :-1] return torch.einsum(\"...nd,...ng->...gd\", hidden_states, one_hot_block_ids.type(hidden_states.dtype)) # Copied from transformers.models.t5.modeling_t5.T5LayerNorm", "position_bias.requires_grad = True else: position_bias = self.compute_bias(self.block_len) if local_attention_mask is not None: #", "mask=attention_mask, key_value_states=key_value_states, position_bias=position_bias, layer_head_mask=layer_head_mask, past_key_value=past_key_value, use_cache=use_cache, query_length=query_length, output_attentions=output_attentions, ) layer_output = hidden_states +", "if module.has_relative_attention_bias: module.relative_attention_bias.weight.data.normal_(mean=0.0, std=factor * ((d_model) ** -0.5)) if isinstance(module, LongT5TransientGlobalAttention): module.global_relative_attention_bias.weight.data.normal_( mean=0.0,", "config.encoder_attention_type == \"transient-global\": attention_layer = LongT5LayerTransientGlobalSelfAttention else: raise ValueError( \"For encoder attention mechanism,", "return past reordered_decoder_past = () for layer_past_states in past: # get the correct", "shape(self.v(hidden_states)) # Split into blocks -> (batch_size, num_blocks, block_len, n_heads, dim_per_head) query_states =", "intended.\") expected_num_past_key_values = 2 if encoder_hidden_states is None else 4 if len(past_key_value) !=", "checkpointing ) else: layer_outputs = layer_module( hidden_states, attention_mask=extended_attention_mask, position_bias=position_bias, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, encoder_decoder_position_bias=encoder_decoder_position_bias, layer_head_mask=layer_head_mask,", "has_relative_attention_bias=False): super().__init__() self.SelfAttention = LongT5Attention(config, has_relative_attention_bias=has_relative_attention_bias) self.layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate)", "[What are input IDs?](../glossary#input-ids) To know more on how to prepare `input_ids` for", "has_relative_attention_bias self.relative_attention_num_buckets = config.relative_attention_num_buckets self.relative_attention_max_distance = config.relative_attention_max_distance self.d_model = config.d_model self.key_value_proj_dim = config.d_kv", "side_key_states = side_key_states.unsqueeze(1).repeat(reps) side_value_states = side_value_states.unsqueeze(1).repeat(reps) # Concatenate \"local\" and \"side\"/\"global\" key/value states", "*attentions*) `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)` is a sequence of hidden states", "= config.d_model self.key_value_proj_dim = config.d_kv self.n_heads = config.num_heads self.local_radius = config.local_radius self.block_len =", "== \"transient-global\": attention_layer = LongT5LayerTransientGlobalSelfAttention else: raise ValueError( \"For encoder attention mechanism, either", "if mask is None: mask = torch.ones(batch_size, seq_length) # (batch_size, num_heads, seq_len, global_seq_len)", "same bucket. All relative positions <=-max_distance map to the same bucket. This should", "each token to attend global aggregated ones # New shape: (batch_size, num_blocks, 3", "need to adjust position bias shape to be sum with mask local_attention_mask =", "that `shifted_input_ids` has only positive values\" return shifted_input_ids class LongT5Stack(LongT5PreTrainedModel): def __init__(self, config,", "logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = \"LongT5Config\" _TOKENIZER_FOR_DOC = \"T5Tokenizer\" _CHECKPOINT_FOR_DOC = \"google/long-t5-local-base\" #", "https://arxiv.org/pdf/2112.07916.pdf. \"\"\" num_blocks = x.shape[block_dim] pad = [(0, 0)] * x.ndim pad[block_dim] =", "_global_block_ids_lower_bound, global_block_ids, _global_block_ids_lower_bound ) # set padding tokens to -1 global_block_ids = (global_block_ids", "range [0, inf) # half of the buckets are for exact increments in", "reordered_decoder_past + (reordered_layer_past_states,) return reordered_decoder_past @add_start_docstrings( \"The bare LONGT5 Model transformer outputting encoder's", "LONGT5_ENCODER_INPUTS_DOCSTRING = r\"\"\" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input", "= torch.ones(num_layers, num_heads)`. \"\"\" @add_start_docstrings( \"The bare LONGT5 Model transformer outputting raw hidden-states", "the given `dim`. If the dimension length is not a multiple of `block_len`,", "states. \" f\"{'2 (past / key) for cross attention. ' if expected_num_past_key_values ==", "decoder_inputs_embeds is None: # get decoder inputs from shifting lm labels to the", "query states query_states = shape(self.q(hidden_states)) # (batch_size, n_heads, seq_length, dim_per_head) # get key/value", "global_seq_len) scores = torch.einsum(\"...qhd,...khd->...hqk\", query_states, key_states) if mask is not None: # We", "math.log(max_distance / max_exact) * (num_buckets - max_exact) ).to(torch.long) relative_position_if_large = torch.min( relative_position_if_large, torch.full_like(relative_position_if_large,", "cases when an empty input sequence is given if not all(x.shape): new_shape =", "= global_positions - block_ids[..., None] return side_relative_position.type(torch.int64) def _create_global_aggregates( hidden_states: torch.Tensor, block_ids: torch.Tensor,", "1 block_ends = block_ends.to(block_ids.device) true_block_ends = torch.logical_and(block_ends, block_ids >= 0) full_blocks = true_block_ends.sum(-1).unsqueeze(-1).type(block_ids.dtype)", ") one_hot_block_ids = nn.functional.one_hot(block_ids.type(torch.int64), global_seq_len + 1)[:, :, :-1] return torch.einsum(\"...nd,...ng->...gd\", hidden_states, one_hot_block_ids.type(hidden_states.dtype))", "(hidden_states,) if use_cache: outputs = outputs + (present_key_value_state,) + attention_outputs else: outputs =", "only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.", "LongT5LocalAttention(nn.Module): def __init__(self, config: LongT5Config, has_relative_attention_bias: bool = False) -> None: super().__init__() self.is_decoder", "make for the whole fixed block, are assigned to the preceding block. Padding", "+= torch.where(is_small, relative_position, relative_position_if_large) return relative_buckets def compute_bias(self, query_length, key_length, device=None): \"\"\"Compute binned", "= None cross_attention_outputs = self.layer[1]( hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, position_bias=encoder_decoder_position_bias, layer_head_mask=cross_attn_layer_head_mask, past_key_value=cross_attn_past_key_value, query_length=query_length, use_cache=use_cache,", "use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): use_cache = use_cache if use_cache is not None", "-> Union[Tuple[torch.FloatTensor], Seq2SeqModelOutput]: r\"\"\" Returns: Example: ```python >>> from transformers import T5Tokenizer, LongT5Model", "self.block_len = self.local_radius + 1 self.block = nn.ModuleList( [LongT5Block(config, has_relative_attention_bias=bool(i == 0)) for", "past is None: logger.warning(\"You might want to consider setting `use_cache=True` to speed up", "scores += position_bias attn_weights = nn.functional.softmax(scores.float(), dim=-1).type_as( scores ) # (batch_size, n_heads, seq_length,", "= self.invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = None # Prepare head mask if needed head_mask", "internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the", "torch.einsum(\"...nd,...ng->...gd\", hidden_states, one_hot_block_ids.type(hidden_states.dtype)) # Copied from transformers.models.t5.modeling_t5.T5LayerNorm with T5->LongT5 class LongT5LayerNorm(nn.Module): def __init__(self,", "return self.shared def set_input_embeddings(self, new_embeddings): self.shared = new_embeddings self.encoder.set_input_embeddings(new_embeddings) def get_encoder(self): return self.encoder", "(batch_size, num_blocks, global_seq_len, n_heads, dim_per_head) reps = [1] * (side_key_states.ndim + 1) reps[1]", "0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) head_mask (`torch.FloatTensor` of", "is not None else self.config.use_return_dict # FutureWarning: head_mask was separated into two input", ") return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids", "= nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) # Mask heads if we want to if layer_head_mask", "head on top.\"\"\", LONGT5_START_DOCSTRING) class LongT5ForConditionalGeneration(LongT5PreTrainedModel): _keys_to_ignore_on_load_missing = [ r\"encoder.embed_tokens.weight\", r\"decoder.embed_tokens.weight\", r\"lm_head.weight\", ]", "ids for local attention.\"\"\" position_ids = torch.arange(3 * block_len, dtype=torch.int32) center_position_ids = position_ids[block_len:-block_len]", "[`~utils.ModelOutput`] instead of a plain tuple. \"\"\" # Warning message for FutureWarning: head_mask", "position is defined as memory_position - query_position, i.e. the distance in tokens from", "None, decoder_inputs_embeds: Optional[torch.Tensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None,", "torch.log(relative_position.float() / max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact) ).to(torch.long) relative_position_if_large", "\"Studies have been shown that owning a dog is good for you\", return_tensors=\"pt\"", "# add attentions if we output them return outputs # Copied from transformers.models.t5.modeling_t5.T5LayerCrossAttention", "`input_ids` indices into associated vectors than the model's internal embedding lookup matrix. decoder_inputs_embeds", "n_heads, q_len - 1, dim_per_head) batch_size, seq_length = hidden_states.shape[:2] real_seq_length = seq_length if", "__init__(self, config, has_relative_attention_bias=False): super().__init__() self.SelfAttention = LongT5Attention(config, has_relative_attention_bias=has_relative_attention_bias) self.layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout", "relative_position_ids = position_ids.unsqueeze(0) - center_position_ids.unsqueeze(1) return relative_position_ids def _mask_local_attention_mask(local_attention_mask: torch.Tensor, block_len: int) ->", "return heads, index = find_pruneable_heads_and_indices( heads, self.n_heads, self.key_value_proj_dim, self.pruned_heads ) # Prune linear", "self.wi_1 = nn.Linear(config.d_model, config.d_ff, bias=False) self.wo = nn.Linear(config.d_ff, config.d_model, bias=False) self.dropout = nn.Dropout(config.dropout_rate)", "LongT5LocalAttention, LongT5TransientGlobalAttention)): # Mesh TensorFlow attention initialization to avoid scaling before softmax #", "and decoder_head_mask is None: if self.config.num_layers == self.config.num_decoder_layers: warnings.warn(__HEAD_MASK_WARNING_MSG, FutureWarning) decoder_head_mask = head_mask", "not None: position_bias = position_bias[:, :, -hidden_states.size(1) :, :] if mask is not", "a dog ```\"\"\" use_cache = use_cache if use_cache is not None else self.config.use_cache", "(LongT5Attention, LongT5LocalAttention, LongT5TransientGlobalAttention)): # Mesh TensorFlow attention initialization to avoid scaling before softmax", "T5Tokenizer.from_pretrained(\"google/long-t5-local-base\") >>> model = LongT5Model.from_pretrained(\"google/long-t5-local-base\") >>> # Let's try a very long encoder", ") # Prune linear layers self.q = prune_linear_layer(self.q, index) self.k = prune_linear_layer(self.k, index)", "LongT5DenseGatedActDense(nn.Module): def __init__(self, config: LongT5Config): super().__init__() self.wi_0 = nn.Linear(config.d_model, config.d_ff, bias=False) self.wi_1 =", "hidden_states = cross_attention_outputs[0] # Combine self attn and cross attn key value states", "the\" \" pad_token_id. See LongT5 docs for more information\" ) # shift inputs", "attention_mask: torch.Tensor, global_block_size: int ) -> Tuple[torch.Tensor, torch.Tensor]: \"\"\"Obtain the \"fixed block\" global", "to specify either {err_msg_prefix}input_ids or {err_msg_prefix}inputs_embeds\") if inputs_embeds is None: assert self.embed_tokens is", "LONGT5_START_DOCSTRING, ) class LongT5EncoderModel(LongT5PreTrainedModel): authorized_missing_keys = [ r\"encoder.embed_tokens.weight\", ] def __init__(self, config: LongT5Config):", "\"\"\"Create the relative position tensor for local -> global attention.\"\"\" block_ids, global_segment_ids =", "= { \"decoder_input_ids\": input_ids, \"input_ids\": input_ids, \"decoder_attention_mask\": input_mask, } return dummy_inputs def _init_weights(self,", "torch.where(block_ids < full_blocks, block_ids, full_blocks) return block_ids fixed_block_mask = torch.ones_like(attention_mask, device=attention_mask.device) / global_block_size", "+ (position_bias,) if output_attentions: outputs = outputs + (attn_weights,) return outputs class LongT5TransientGlobalAttention(nn.Module):", "except Exception: logger.warning(\"discovered apex but it failed to load, falling back to LongT5LayerNorm\")", "hasattr(module.wi, \"bias\") and module.wi.bias is not None: module.wi.bias.data.zero_() module.wo.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_ff) **", "on top.\", LONGT5_START_DOCSTRING, ) class LongT5Model(LongT5PreTrainedModel): _keys_to_ignore_on_load_missing = [ r\"encoder.embed_tokens.weight\", r\"decoder.embed_tokens.weight\", ] _keys_to_ignore_on_load_unexpected", "memory_position = torch.arange(key_length, dtype=torch.long, device=device)[None, :] relative_position = memory_position - context_position # shape", "= config.d_model self.key_value_proj_dim = config.d_kv self.n_heads = config.num_heads self.dropout = config.dropout_rate self.inner_dim =", "self.is_decoder and encoder_hidden_states is not None: encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size() encoder_hidden_shape =", "(non-causal) or (batch_size, key_length, key_length) # past_key_value[0] is (batch_size, n_heads, q_len - 1,", "block_len] return local_attention_mask.unsqueeze(1).to(device) def _make_global_fixed_block_ids( attention_mask: torch.Tensor, global_block_size: int ) -> Tuple[torch.Tensor, torch.Tensor]:", "self.encoder.set_input_embeddings(new_embeddings) def get_encoder(self): return self.encoder def _prune_heads(self, heads_to_prune): \"\"\" Prunes heads of the", "(hidden_states,) + attention_output[1:] # add attentions if we output them return outputs class", "(1, 1, num_heads, block_length, 3 * block_length) values = values.permute([2, 0, 1]).unsqueeze(0).unsqueeze(0) return", "set to the\" \" pad_token_id. See LongT5 docs for more information\" ) #", "model weights. \"\"\" LONGT5_INPUTS_DOCSTRING = r\"\"\" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):", "module.wi.bias is not None: module.wi.bias.data.zero_() module.wo.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_ff) ** -0.5)) if hasattr(module.wo,", "= project( hidden_states, self.k, key_value_states, past_key_value[0] if past_key_value is not None else None", "!= expected_num_past_key_values: raise ValueError( f\"There should be {expected_num_past_key_values} past states. \" f\"{'2 (past", "= config.local_radius self.block_len = self.local_radius + 1 self.block = nn.ModuleList( [LongT5Block(config, has_relative_attention_bias=bool(i ==", "for more graceful generalization to longer sequences than the model has been trained", "__init__(self, config: LongT5Config, has_relative_attention_bias=False): super().__init__() self.is_decoder = config.is_decoder self.has_relative_attention_bias = has_relative_attention_bias self.relative_attention_num_buckets =", "((self.config.d_model) ** -0.5)) if hasattr(module.wi_1, \"bias\") and module.wi_1.bias is not None: module.wi_1.bias.data.zero_() module.wo.weight.data.normal_(mean=0.0,", "TensorFlow attention initialization to avoid scaling before softmax # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/attention.py#L136 d_model =", "is_torch_fx_proxy(input_ids): # Item assignment is not supported natively for proxies. shifted_input_ids = torch.full(input_ids.shape[:-1]", "dtype=torch.long ) # initialize past_key_values with `None` if past does not exist if", "selected heads of the self-attention modules in the encoder. Mask values selected in", "# ourselves in which case we just need to make it broadcastable to", "of the model. heads_to_prune: dict of {layer_num: list of heads to prune in", "seq_length, seq_length] if self.is_decoder and encoder_hidden_states is not None: encoder_batch_size, encoder_sequence_length, _ =", "= self.get_head_mask(cross_attn_head_mask, self.config.num_layers) present_key_value_states = () if use_cache else None all_hidden_states = ()", "len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.n_heads, self.key_value_proj_dim, self.pruned_heads )", "= sequence_output * (self.model_dim**-0.5) lm_logits = self.lm_head(sequence_output) loss = None if labels is", "elif config.encoder_attention_type == \"transient-global\": attention_layer = LongT5LayerTransientGlobalSelfAttention else: raise ValueError( \"For encoder attention", "related to general usage and behavior. Parameters: config ([`LongT5Config`]): Model configuration class with", "= config.num_heads self.local_radius = config.local_radius self.block_len = self.local_radius + 1 self.global_block_size = config.global_block_size", "(key_states, value_states) if (self.is_decoder and use_cache) else None outputs = (attn_output,) + (present_key_value_state,)", "return shifted_input_ids class LongT5Stack(LongT5PreTrainedModel): def __init__(self, config, embed_tokens=None): super().__init__(config) self.embed_tokens = embed_tokens self.is_decoder", "# Copied from transformers.models.t5.modeling_t5.T5DenseGatedActDense with T5->LongT5 class LongT5DenseGatedActDense(nn.Module): def __init__(self, config: LongT5Config): super().__init__()", "see: https://arxiv.org/pdf/2112.07916.pdf. \"\"\" num_blocks = x.shape[block_dim] pad = [(0, 0)] * x.ndim pad[block_dim]", "\"encoder_outputs\": encoder_outputs, \"attention_mask\": attention_mask, \"head_mask\": head_mask, \"decoder_head_mask\": decoder_head_mask, \"cross_attn_head_mask\": cross_attn_head_mask, \"use_cache\": use_cache, }", "None: present_key_value_state = present_key_value_state + cross_attention_outputs[1] # Keep cross-attention outputs and relative position", "and decoder_inputs_embeds is None: # get decoder inputs from shifting lm labels to", "of the two different efficient attention mechanisms - (1) Local attention, or (2)", "shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices.", "= config.num_decoder_layers self.decoder = LongT5Stack(decoder_config, self.shared) # Initialize weights and apply final processing", "query_length, key_length) return values def forward( self, hidden_states, mask=None, key_value_states=None, position_bias=None, past_key_value=None, layer_head_mask=None,", "can choose to directly pass an embedded representation. If `past_key_values` is used, optionally", "# (batch_size, n_heads, seq_length, key_length) attn_weights = nn.functional.dropout( attn_weights, p=self.dropout, training=self.training ) #", "with T5->LongT5 class LongT5DenseActDense(nn.Module): def __init__(self, config: LongT5Config): super().__init__() self.wi = nn.Linear(config.d_model, config.d_ff,", "self-attention modules in the encoder. Mask values selected in `[0, 1]`: - 1", "values selected in `[0, 1]`: - 1 for tokens that are **not masked**,", "def forward( self, hidden_states, attention_mask=None, position_bias=None, layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False, ): normed_hidden_states =", "`dim`. If the dimension length is not a multiple of `block_len`, it will", "self.embed_tokens(input_ids) batch_size, seq_length = input_shape # required mask seq length can be calculated", "is not None: module.wi_0.bias.data.zero_() module.wi_1.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5)) if hasattr(module.wi_1, \"bias\")", "# Encode if needed (training, first prediction pass) if encoder_outputs is None: #", "shape(states): \"\"\"projection\"\"\" return states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim).transpose(1, 2) def unshape(states): \"\"\"reshape\"\"\" return states.transpose(1,", "1 >>> outputs = model(input_ids=input_ids) >>> last_hidden_states = outputs.last_hidden_state ```\"\"\" return_dict = return_dict", "is a sequence of hidden states at the output of the last layer", "if key_value_states is None else key_value_states.shape[1] def shape(states): \"\"\"projection\"\"\" return states.view(batch_size, -1, self.n_heads,", "-1e10) else: local_attention_mask = None if position_bias is None: # position_bias shape: #", "need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if self.is_decoder and encoder_hidden_states", "class with all the parameters of the model. Initializing with a config file", "self.layer_norm(hidden_states) attention_output = self.LocalSelfAttention( normed_hidden_states, mask=attention_mask, position_bias=position_bias, layer_head_mask=layer_head_mask, output_attentions=output_attentions, ) hidden_states = hidden_states", "head_mask was separated into two input args - head_mask, decoder_head_mask __HEAD_MASK_WARNING_MSG = \"\"\"", "to avoid performing attention on padding token indices. Mask values selected in `[0,", "encoder_outputs = self.encoder( input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) return encoder_outputs", "config.relative_attention_max_distance self.d_model = config.d_model self.key_value_proj_dim = config.d_kv self.n_heads = config.num_heads self.local_radius = config.local_radius", "tuple(module(*inputs, use_cache, output_attentions)) return custom_forward layer_outputs = checkpoint( create_custom_forward(layer_module), hidden_states, extended_attention_mask, position_bias, encoder_hidden_states,", "= side_bias.permute([0, 3, 1, 2]) # (batch_size, num_heads, seq_len, global_seq_len) attention_side_bias = attention_side_bias", "not None: encoder_decoder_position_bias = layer_outputs[4 if output_attentions else 3] # append next layer", "lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors", "ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`", "encoder_config.is_decoder = False encoder_config.use_cache = False encoder_config.is_encoder_decoder = False self.encoder = LongT5Stack(encoder_config, self.shared)", "We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves", "attn_weights = attn_weights.type(value_states.dtype) attn_output = unshape(torch.einsum(\"...hqk,...khd->...qhd\", attn_weights, value_states)) attn_output = attn_output[:, :seq_length, :]", "= 0) -> torch.Tensor: \"\"\"Concatenate three consecutive blocks for each input block for", "the superclass documentation for the generic methods the library implements for all its", "= nn.Dropout(config.dropout_rate) # Initialize weights and apply final processing self.post_init() self.gradient_checkpointing = False", "size 1 >>> outputs = model.generate(input_ids) >>> print(tokenizer.decode(outputs[0], skip_special_tokens=True)) abstractthe aim of this", "transformers.models.t5.modeling_t5.T5PreTrainedModel.dummy_inputs def dummy_inputs(self): input_ids = torch.tensor(DUMMY_INPUTS) input_mask = torch.tensor(DUMMY_MASK) dummy_inputs = { \"decoder_input_ids\":", "return a [`~utils.ModelOutput`] instead of a plain tuple. \"\"\" # Warning message for", "((loss,) + output) if loss is not None else output return Seq2SeqLMOutput( loss=loss,", "= ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict =", "it broadcastable to all heads. # We use local attention in encoder self-attention,", "has only positive values\" return shifted_input_ids class LongT5Stack(LongT5PreTrainedModel): def __init__(self, config, embed_tokens=None): super().__init__(config)", "layer store them # layer_outputs = hidden-states, key-value-states (self-attention position bias), (self-attention weights),", "at 2nd position reordered_layer_past_states = () for layer_past_state in layer_past_states: # need to", "self.dropout = nn.Dropout(config.dropout_rate) self.act = ACT2FN[config.dense_act_fn] def forward(self, hidden_states): hidden_gelu = self.act(self.wi_0(hidden_states)) hidden_linear", "= nn.Dropout(config.dropout_rate) def forward( self, hidden_states, key_value_states, attention_mask=None, position_bias=None, layer_head_mask=None, past_key_value=None, use_cache=False, query_length=None,", "context_position = torch.arange(query_length, dtype=torch.long, device=device)[:, None] memory_position = torch.arange(key_length, dtype=torch.long, device=device)[None, :] relative_position", "not None else output return Seq2SeqLMOutput( loss=loss, logits=lm_logits, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state,", "and values. Got { len(past_key_value)} past states\" real_seq_length += past_key_value[0].shape[2] if query_length is", "make sure this is intended.\") expected_num_past_key_values = 2 if encoder_hidden_states is None else", "encoder_attention_mask=attention_mask, head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = decoder_outputs[0] if self.config.tie_word_embeddings:", "None: module.wi_0.bias.data.zero_() module.wi_1.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5)) if hasattr(module.wi_1, \"bias\") and module.wi_1.bias", "outputs and relative position weights do_cross_attention = self.is_decoder and encoder_hidden_states is not None", "of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states", "# Relativen attention bias & Layer norm for global attention if self.has_relative_attention_bias: self.global_relative_attention_bias", "key_length), device=scores.device, dtype=scores.dtype ) if self.gradient_checkpointing and self.training: position_bias.requires_grad = True else: position_bias", "self.q = prune_linear_layer(self.q, index) self.k = prune_linear_layer(self.k, index) self.v = prune_linear_layer(self.v, index) self.o", "side_value_states], dim=2) # Compute scores -> (batch_size, num_block, n_heads, block_len, 3 * block_len", "scaling before softmax # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/attention.py#L136 d_model = self.config.d_model key_value_proj_dim = self.config.d_kv n_heads", "# Keep cross-attention outputs and relative position weights attention_outputs = attention_outputs + cross_attention_outputs[2:]", "r\"decoder.embed_tokens.weight\", ] _keys_to_ignore_on_load_unexpected = [ r\"decoder.block.0.layer.1.EncDecAttention.relative_attention_bias.weight\", ] def __init__(self, config: LongT5Config): super().__init__(config) self.shared", "the original Flaxformr implementation adopted from: https://github.com/google/flaxformer/blob/main/flaxformer/architectures/longt5/long_attention.py. In our scenario, as we use", "nn.Embedding(config.vocab_size, config.d_model) encoder_config = copy.deepcopy(config) encoder_config.is_decoder = False encoder_config.use_cache = False encoder_config.is_encoder_decoder =", "return_dict = return_dict if return_dict is not None else self.config.use_return_dict encoder_outputs = self.encoder(", "* hidden_states try: from apex.normalization import FusedRMSNorm LongT5LayerNorm = FusedRMSNorm # noqa logger.info(\"Discovered", "key_length, device=scores.device) # if key and values are already calculated # we want", "can be obtained using [`T5Tokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for detail. [What are", "are represented by -1. \"\"\" batch_size, seq_len = attention_mask.shape[:2] def handle_orphan_tokens(block_ids: torch.Tensor) ->", "None hidden_states = self.dropout(inputs_embeds) for i, (layer_module, past_key_value) in enumerate(zip(self.block, past_key_values)): layer_head_mask =", "# Copied from transformers.models.t5.modeling_t5.T5PreTrainedModel._shift_right with T5->LongT5 def _shift_right(self, input_ids): decoder_start_token_id = self.config.decoder_start_token_id pad_token_id", "else ''}\" f\"Got {len(past_key_value)} past key / value states\" ) self_attn_past_key_value = past_key_value[:2]", "*optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.", "pad_token_id = self.config.pad_token_id assert decoder_start_token_id is not None, ( \"self.model.config.decoder_start_token_id has to be", "bias), (cross-attention weights) if use_cache is False: layer_outputs = layer_outputs[:1] + (None,) +", "num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values", "set to `True` if {self} is used as a decoder\" if attention_mask is", "checkpoint( create_custom_forward(layer_module), hidden_states, extended_attention_mask, position_bias, encoder_hidden_states, encoder_extended_attention_mask, encoder_decoder_position_bias, layer_head_mask, cross_attn_layer_head_mask, None, # past_key_value", "global_segment_ids.to(attention_mask.device) global_segment_ids = torch.where(global_segment_ids <= _sequence_block_ids_max, 1, 0) return global_block_ids.type(torch.int), global_segment_ids.type(torch.int) def _make_side_relative_position_ids(attention_mask:", "distributed under the License is distributed on an \"AS IS\" BASIS, # WITHOUT", "None, ) -> Union[Tuple[torch.FloatTensor], Seq2SeqLMOutput]: r\"\"\" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels", "extended_attention_mask = self.get_extended_attention_mask( attention_mask, input_shape, inputs_embeds.device ) elif self.config.encoder_attention_type == \"local\": extended_attention_mask =", "in positions max_exact = num_buckets // 2 is_small = relative_position < max_exact #", "+ seq_length if past_key_values is not None else seq_length if use_cache is True:", "is in the range [0, inf) # half of the buckets are for", "dim=sequence_dim) def _make_3block_relative_position_ids(block_len: int) -> torch.Tensor: \"\"\"Makes 3-blocked relative position ids for local", "past_key_value) in enumerate(zip(self.block, past_key_values)): layer_head_mask = head_mask[i] cross_attn_layer_head_mask = cross_attn_head_mask[i] if output_hidden_states: all_hidden_states", "instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of shape `(batch_size,", "= -x.shape[dim] % block_len # Handle cases when an empty input sequence is", "reordered_decoder_past @add_start_docstrings( \"The bare LONGT5 Model transformer outputting encoder's raw hidden-states without any", "the vocabulary. LongT5 is a model with relative position embeddings so you should", "block for local attentiont. For more information, see: https://arxiv.org/pdf/2112.07916.pdf. \"\"\" num_blocks = x.shape[block_dim]", "= nn.functional.softmax(scores.float(), dim=-1).type_as( scores ) # (batch_size, n_heads, seq_length, key_length) attn_weights = nn.functional.dropout(", "adjust position bias shape to be sum with mask local_attention_mask = _get_local_attention_mask(mask, self.block_len,", "key value states if use_cache: present_key_value_states = present_key_value_states + (present_key_value_state,) if output_attentions: all_attentions", "vocab # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/transformer.py#L586 sequence_output = sequence_output * (self.model_dim**-0.5) lm_logits = self.lm_head(sequence_output) loss", "+ 1)[:, :, :-1] return torch.einsum(\"...nd,...ng->...gd\", hidden_states, one_hot_block_ids.type(hidden_states.dtype)) # Copied from transformers.models.t5.modeling_t5.T5LayerNorm with", "value_states = _split_into_blocks(value_states, self.block_len, dim=1) # Concatenate 3 blocks for keys and values", ") return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=present_key_value_states, hidden_states=all_hidden_states, attentions=all_attentions, cross_attentions=all_cross_attentions, ) LONGT5_START_DOCSTRING = r\"\"\" The", "+ attention_output[1:] # add attentions if we output them return outputs class LongT5LayerTransientGlobalSelfAttention(nn.Module):", "LongT5Config): super().__init__(config) self.shared = nn.Embedding(config.vocab_size, config.d_model) encoder_config = copy.deepcopy(config) encoder_config.use_cache = False encoder_config.is_encoder_decoder", "None, \"self.model.config.pad_token_id has to be defined.\" # replace possible -100 values in labels", "equivalent of torch.einsum(\"bnqd,bnkd->bnqk\", query_states, key_states), compatible with onnx op>9 if position_bias is None:", "\"\"\" LONGT5_ENCODER_INPUTS_DOCSTRING = r\"\"\" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of", "only computed for labels in `[0, ..., config.vocab_size]` Returns: Examples: ```python >>> from", "to handle weights initialization and a simple interface for downloading and loading pretrained", "config: LongT5Config, has_relative_attention_bias: bool = False) -> None: super().__init__() self.is_decoder = config.is_decoder self.has_relative_attention_bias", "elif isinstance(module, LongT5DenseGatedActDense): module.wi_0.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5)) if hasattr(module.wi_0, \"bias\") and", "past_key_value is not None else None ) value_states = project( hidden_states, self.v, key_value_states,", "bias if past_key_value is not None: position_bias = position_bias[:, :, -hidden_states.size(1) :, :]", "torch.ones(encoder_hidden_shape, device=inputs_embeds.device) encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = None # Prepare head mask", "type is expected, \" f\"but got {config.encoder_attention_type}.\" ) self.layer = nn.ModuleList() self.layer.append(attention_layer(config, has_relative_attention_bias=has_relative_attention_bias))", "`(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in", "all_hidden_states, all_attentions, all_cross_attentions, ] if v is not None ) return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states,", "Parameters: config ([`LongT5Config`]): Model configuration class with all the parameters of the model.", "encoder_hidden_states is not None: encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size() encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)", "attn_weights = attn_weights * layer_head_mask attn_weights = attn_weights.type(value_states.dtype) attn_output = unshape(torch.einsum(\"...hqk,...khd->...qhd\", attn_weights, value_states))", "self._relative_position_bucket( relative_position, # shape (query_length, key_length) bidirectional=(not self.is_decoder), num_buckets=self.relative_attention_num_buckets, max_distance=self.relative_attention_max_distance, ) values =", "right and the left. Indices can be obtained using [`T5Tokenizer`]. See [`PreTrainedTokenizer.encode`] and", "hidden_states = self.wo(hidden_states) return hidden_states # Copied from transformers.models.t5.modeling_t5.T5DenseGatedActDense with T5->LongT5 class LongT5DenseGatedActDense(nn.Module):", "return values def forward( self, hidden_states, mask=None, position_bias=None, layer_head_mask=None, output_attentions=False, ): batch_size, seq_length", "Padding tokens from the original sequence are represented by -1. \"\"\" batch_size, seq_len", "either `local` or `transient-global` attention type is expected, \" f\"but got {config.encoder_attention_type}.\" )", "def forward(self, hidden_states): hidden_states = self.wi(hidden_states) hidden_states = self.act(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states", "= self.is_decoder and encoder_hidden_states is not None if do_cross_attention: # the actual query", "raw hidden-states without any specific head on top.\", LONGT5_START_DOCSTRING, ) class LongT5EncoderModel(LongT5PreTrainedModel): authorized_missing_keys", ") self.layer = nn.ModuleList() self.layer.append(attention_layer(config, has_relative_attention_bias=has_relative_attention_bias)) if self.is_decoder: self.layer.append(LongT5LayerCrossAttention(config)) self.layer.append(LongT5LayerFF(config)) def forward( self,", "_concatenate_3_blocks(value_states, block_dim=1, sequence_dim=2) # Tile side inputs across local key/value blocks # New", "modules. Mask values selected in `[0, 1]`: - 1 indicates the head is", "__HEAD_MASK_WARNING_MSG = \"\"\" The input argument `head_mask` was split into two arguments `head_mask`", "cross_attn_head_mask=None, use_cache=None, encoder_outputs=None, **kwargs ): # cut decoder_input_ids if past is used if", "it instead of LongT5LayerNorm\") except ImportError: # using the normal LongT5LayerNorm pass except", "length can be calculated via length of past mask_seq_length = past_key_values[0][0].shape[2] + seq_length", "= block_ends.to(block_ids.device) true_block_ends = torch.logical_and(block_ends, block_ids >= 0) full_blocks = true_block_ends.sum(-1).unsqueeze(-1).type(block_ids.dtype) - 1", "= global_segment_ids.shape[-1] global_positions = torch.arange(global_seq_len, device=block_ids.device) side_relative_position = global_positions - block_ids[..., None] return", "and loading pretrained models. \"\"\" config_class = LongT5Config base_model_prefix = \"transformer\" supports_gradient_checkpointing =", "layer_past_states[0].shape assert len(reordered_layer_past_states) == len(layer_past_states) reordered_decoder_past = reordered_decoder_past + (reordered_layer_past_states,) return reordered_decoder_past @add_start_docstrings(", "last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2", "not None: module.wo.bias.data.zero_() elif isinstance(module, LongT5DenseGatedActDense): module.wi_0.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5)) if", "= self.TransientGlobalSelfAttention( normed_hidden_states, mask=attention_mask, position_bias=position_bias, layer_head_mask=layer_head_mask, output_attentions=output_attentions, ) hidden_states = hidden_states + self.dropout(attention_output[0])", "relative positions >=max_distance map to the same bucket. All relative positions <=-max_distance map", "@add_start_docstrings_to_model_forward(LONGT5_ENCODER_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BaseModelOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] =", "= torch.tensor(DUMMY_INPUTS) input_mask = torch.tensor(DUMMY_MASK) dummy_inputs = { \"decoder_input_ids\": input_ids, \"input_ids\": input_ids, \"decoder_attention_mask\":", "convert into half-precision if necessary if self.weight.dtype in [torch.float16, torch.bfloat16]: hidden_states = hidden_states.to(self.weight.dtype)", "decoder_attention_mask: Optional[torch.BoolTensor] = None, head_mask: Optional[torch.FloatTensor] = None, decoder_head_mask: Optional[torch.FloatTensor] = None, cross_attn_head_mask:", "self.layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) def forward(self, hidden_states): forwarded_states = self.layer_norm(hidden_states)", "of the four key / value states reordered_layer_past_states = reordered_layer_past_states + ( layer_past_state.index_select(0,", "new_embeddings self.encoder.set_input_embeddings(new_embeddings) def get_encoder(self): return self.encoder def _prune_heads(self, heads_to_prune): \"\"\" Prunes heads of", "choose to directly pass an embedded representation. This is useful if you want", "self.block_len, dim=-2).transpose(1, 2) side_position_bias = side_position_bias.type(scores.dtype).to(scores.device) # (batch_size, num_blocks, num_heads, block_len, 3 *", "range(3): # We use indexing approach here: # https://numpy.org/doc/stable/user/basics.indexing.html#dealing-with-variable-numbers-of-indices-within-programs indices = [slice(0, None)]", "eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) def forward( self, hidden_states, key_value_states, attention_mask=None, position_bias=None, layer_head_mask=None, past_key_value=None,", "make sure that the accumulation for # half-precision inputs is done in fp32", "output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is", "proposed in [LongT5: Efficient Text-To-Text Transformer for Long Sequences](https://arxiv.org/abs/2112.07916) by <NAME>, <NAME>, <NAME>,", "the generic methods the library implements for all its model (such as downloading", "setting. LongT5 model is an extension of T5 model, and it enables using", "inputs_embeds=inputs_embeds, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) elif return_dict and not isinstance(encoder_outputs, BaseModelOutput): encoder_outputs", "have 2 past states: keys and values. Got { len(past_key_value)} past states\" real_seq_length", "in the LongT5 style. No bias and no subtraction of mean. \"\"\" super().__init__()", "0] = decoder_start_token_id assert pad_token_id is not None, \"self.model.config.pad_token_id has to be defined.\"", "seq_length if use_cache is True: assert self.is_decoder, f\"`use_cache` can only be set to", "bias=False) self.wo = nn.Linear(config.d_ff, config.d_model, bias=False) self.dropout = nn.Dropout(config.dropout_rate) self.act = ACT2FN[config.dense_act_fn] def", "passed to the encoder. Please make sure this is intended.\") expected_num_past_key_values = 2", "If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids`", "\"google/long-t5-local-base\", \"google/long-t5-local-large\", \"google/long-t5-tglobal-base\", \"google/long-t5-tglobal-large\", ] def _pad_to_multiple(x: torch.Tensor, block_len: int, dim: int, pad_value:", "if isinstance(module, LongT5TransientGlobalAttention): module.global_relative_attention_bias.weight.data.normal_( mean=0.0, std=factor * ((d_model) ** -0.5) ) # Copied", "This should allow for more graceful generalization to longer sequences than the model", "+ (reordered_layer_past_states,) return reordered_decoder_past @add_start_docstrings( \"The bare LONGT5 Model transformer outputting encoder's raw", "the License. \"\"\" PyTorch LongT5 model.\"\"\" import copy import math import warnings from", "1]`: - 1 indicates the head is **not masked**, - 0 indicates the", "return_dict=return_dict, ) if not return_dict: return decoder_outputs + encoder_outputs return Seq2SeqModelOutput( last_hidden_state=decoder_outputs.last_hidden_state, past_key_values=decoder_outputs.past_key_values,", "two different efficient attention mechanisms - (1) Local attention, or (2) Transient-Global attention.", "(batch_size, num_heads, seq_len, global_seq_len) side_bias = side_bias.permute([0, 3, 1, 2]) # (batch_size, num_heads,", "it failed to load, falling back to LongT5LayerNorm\") pass # Copied from transformers.models.t5.modeling_t5.T5DenseActDense", "torch.floor(mask + fixed_block_mask - 1.0).type(attention_mask.dtype) _global_block_ids_lower_bound = torch.tensor(-1, dtype=global_block_ids.dtype, device=global_block_ids.device) global_block_ids = torch.where(", "masked positions with -1e10 (according to the original implementation) mask = torch.where(mask >", "(batch_size, num_blocks, num_heads, block_len, global_seq_len) side_position_bias = _split_into_blocks(side_position_bias, self.block_len, dim=-2).transpose(1, 2) side_position_bias =", "input tensor into blocks of a given `block_len` along the given `dim`. If", "if past_key_value is not None else None ) value_states = project( hidden_states, self.v,", "only be set to `True` if {self} is used as a decoder\" if", "pad = [(0, 0)] * x.ndim pad[block_dim] = (1, 1) pad = sum(pad[::-1],", "self.SelfAttention = LongT5Attention(config, has_relative_attention_bias=has_relative_attention_bias) self.layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) def forward(", "= input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) elif inputs_embeds is not None: input_shape =", "attn_output[:, :seq_length, :] attn_output = self.o(attn_output) present_key_value_state = None outputs = (attn_output,) +", "load, falling back to LongT5LayerNorm\") pass # Copied from transformers.models.t5.modeling_t5.T5DenseActDense with T5->LongT5 class", "= self.wi_1(hidden_states) hidden_states = hidden_gelu * hidden_linear hidden_states = self.dropout(hidden_states) hidden_states = self.wo(hidden_states)", "return torch.empty(output_shape, dtype=x.dtype, device=x.device) return x.reshape(output_shape) def _concatenate_3_blocks(x: torch.Tensor, block_dim: int, sequence_dim: int,", "transformers.models.t5.modeling_t5.T5LayerSelfAttention with T5->LongT5 class LongT5LayerSelfAttention(nn.Module): def __init__(self, config, has_relative_attention_bias=False): super().__init__() self.SelfAttention = LongT5Attention(config,", "relative_position, # (block_length, 3 * block_length) bidirectional=(not self.is_decoder), num_buckets=self.relative_attention_num_buckets, max_distance=self.relative_attention_max_distance, ) # (block_length,", "r\"\"\" Returns: Example: ```python >>> from transformers import T5Tokenizer, LongT5Model >>> tokenizer =", "= LongT5Stack(encoder_config, self.shared) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self):", "is not None: assert ( len(past_key_value) == 2 ), f\"past_key_value should have 2", "self.training: position_bias.requires_grad = True else: position_bias = self.compute_bias(self.block_len) if mask is not None:", "LLC., LongT5 Authors and HuggingFace Inc. team. # # Licensed under the Apache", "outputs.last_hidden_state ```\"\"\" use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict", "config: LongT5Config): super().__init__() self.wi_0 = nn.Linear(config.d_model, config.d_ff, bias=False) self.wi_1 = nn.Linear(config.d_model, config.d_ff, bias=False)", "head_mask: Optional[torch.FloatTensor] = None, decoder_head_mask: Optional[torch.FloatTensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, encoder_outputs:", "sequence_dim=2) value_states = _concatenate_3_blocks(value_states, block_dim=1, sequence_dim=2) # Tile side inputs across local key/value", "Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool]", "attention if self.has_relative_attention_bias: self.global_relative_attention_bias = nn.Embedding(self.relative_attention_num_buckets, self.n_heads) self.global_input_layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) # Copied", "__init__(self, config: LongT5Config): super().__init__(config) self.shared = nn.Embedding(config.vocab_size, config.d_model) encoder_config = copy.deepcopy(config) encoder_config.is_decoder =", "layer_outputs = layer_outputs[:1] + (None,) + layer_outputs[1:] hidden_states, present_key_value_state = layer_outputs[:2] # We", "tokenizer( ... 100 * \"Studies have been shown that owning a dog is", "global_block_size: int ) -> Tuple[torch.Tensor, torch.Tensor]: \"\"\"Obtain the \"fixed block\" global id corresponding", "dim_per_head) query_states = _split_into_blocks(query_states, self.block_len, dim=1) key_states = _split_into_blocks(key_states, self.block_len, dim=1) value_states =", "input only the last `decoder_input_ids` (those that don't have their past key value", "torch.ones(hidden_states.shape[:-1]), self.global_block_size, ) # Create global inputs _global_seq_len = global_segment_ids.shape[-1] global_inputs = _create_global_aggregates(hidden_states,", "# forward pass >>> outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids) >>> last_hidden_states = outputs.last_hidden_state ```\"\"\"", "= nn.Dropout(config.dropout_rate) def forward( self, hidden_states, attention_mask=None, position_bias=None, layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False, ):", "global/side bias - shape: # (batch_size, num_heads, seq_len, global_seq_len) if mask is None:", "not to return the hidden states of all layers. See `hidden_states` under returned", "for FutureWarning: head_mask was separated into two input args - head_mask, decoder_head_mask __HEAD_MASK_WARNING_MSG", "`decoder_head_mask` is set to copy `head_mask`, but this feature is deprecated and will", "embedded representation. If `past_key_values` is used, optionally only the last `decoder_inputs_embeds` have to", "for proxies. shifted_input_ids = torch.full(input_ids.shape[:-1] + (1,), decoder_start_token_id) shifted_input_ids = torch.cat([shifted_input_ids, input_ids[..., :-1]],", "value states\" ) self_attn_past_key_value = past_key_value[:2] cross_attn_past_key_value = past_key_value[2:] else: self_attn_past_key_value, cross_attn_past_key_value =", "whether the attention is bidirectional num_buckets: an integer max_distance: an integer Returns: a", "_CHECKPOINT_FOR_DOC = \"google/long-t5-local-base\" # TODO: Update before the merge LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST = [ \"google/long-t5-local-base\",", "relative_position, relative_position_if_large) return relative_buckets def compute_bias(self, query_length, key_length, device=None): \"\"\"Compute binned relative position", "the output of the last layer of the encoder. Used in the cross-attention", "hidden_states = hidden_states.to(self.weight.dtype) return self.weight * hidden_states try: from apex.normalization import FusedRMSNorm LongT5LayerNorm", "sequence tokens in the vocabulary. LongT5 is a model with relative position embeddings", "compute_bias(self, query_length, key_length, device=None): \"\"\"Compute binned relative position bias\"\"\" if device is None:", "encoder_decoder_position_bias=encoder_decoder_position_bias, layer_head_mask=layer_head_mask, cross_attn_layer_head_mask=cross_attn_layer_head_mask, past_key_value=past_key_value, use_cache=use_cache, output_attentions=output_attentions, ) # layer_outputs is a tuple with:", "The LongT5 model was proposed in [LongT5: Efficient Text-To-Text Transformer for Long Sequences](https://arxiv.org/abs/2112.07916)", "language governing permissions and # limitations under the License. \"\"\" PyTorch LongT5 model.\"\"\"", "with mask local_attention_mask = _get_local_attention_mask(mask, self.block_len, hidden_states.device) # Replace masked positions with -10_000", "top.\", LONGT5_START_DOCSTRING, ) class LongT5Model(LongT5PreTrainedModel): _keys_to_ignore_on_load_missing = [ r\"encoder.embed_tokens.weight\", r\"decoder.embed_tokens.weight\", ] _keys_to_ignore_on_load_unexpected =", "-torch.min(relative_position, torch.zeros_like(relative_position)) # now relative_position is in the range [0, inf) # half", "is not None: encoder_seq_length = encoder_hidden_states.shape[1] encoder_attention_mask = torch.ones( batch_size, encoder_seq_length, device=inputs_embeds.device, dtype=torch.long", "block_len, dtype=torch.int32) center_position_ids = position_ids[block_len:-block_len] # [block_len, 3 * block_len] relative_position_ids = position_ids.unsqueeze(0)", "head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected", "+ 1) reps[1] = key_states.shape[1] side_key_states = side_key_states.unsqueeze(1).repeat(reps) side_value_states = side_value_states.unsqueeze(1).repeat(reps) # Concatenate", "return outputs class LongT5LayerTransientGlobalSelfAttention(nn.Module): \"\"\"Transient-Global self attention used in encoder\"\"\" def __init__(self, config,", "[`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) LONGT5 uses the `pad_token_id` as", "weights initialization and a simple interface for downloading and loading pretrained models. \"\"\"", "inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library", "= torch.where(global_segment_ids <= _sequence_block_ids_max, 1, 0) return global_block_ids.type(torch.int), global_segment_ids.type(torch.int) def _make_side_relative_position_ids(attention_mask: torch.Tensor, global_block_size:", "= None, inputs_embeds: Optional[torch.FloatTensor] = None, decoder_inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] =", "absolute relative_positions. All relative positions >=max_distance map to the same bucket. All relative", "are decoder input IDs?](../glossary#decoder-input-ids) LONGT5 uses the `pad_token_id` as the starting token for", "def get_input_embeddings(self): return self.embed_tokens # Copied from transformers.models.t5.modeling_t5.T5Stack.set_input_embeddings def set_input_embeddings(self, new_embeddings): self.embed_tokens =", "optionally only the last `decoder_inputs_embeds` have to be input (see `past_key_values`). This is", "torch.zeros( batch_size, 0, dtype=global_block_ids.dtype, device=global_block_ids.device ) global_segment_ids = torch.cumsum(torch.ones(batch_size, num_globals), dim=-1) - 1", "def prepare_inputs_for_generation( self, input_ids, past=None, attention_mask=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, use_cache=None, encoder_outputs=None, **kwargs ):", "shape to be sum with mask position_bias = position_bias + mask.transpose(1, 2) scores", "the merge LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST = [ \"google/long-t5-local-base\", \"google/long-t5-local-large\", \"google/long-t5-tglobal-base\", \"google/long-t5-tglobal-large\", ] def _pad_to_multiple(x: torch.Tensor,", "* ((self.config.d_model) ** -0.5)) if hasattr(module.wi, \"bias\") and module.wi.bias is not None: module.wi.bias.data.zero_()", "set padding tokens to -1 global_block_ids = (global_block_ids * attention_mask) + (attention_mask -", ") @add_start_docstrings(\"\"\"LONGT5 Model with a `language modeling` head on top.\"\"\", LONGT5_START_DOCSTRING) class LongT5ForConditionalGeneration(LongT5PreTrainedModel):", "-1e10) # (batch_size, seq_len, global_seq_len) side_relative_position = _make_side_relative_position_ids(mask, self.global_block_size) side_relative_position_bucket = self._relative_position_bucket( side_relative_position,", "scores = torch.einsum(\"...qhd,...khd->...hqk\", query_states, key_states) if mask is not None: # We need", "cross_attentions=all_cross_attentions, ) LONGT5_START_DOCSTRING = r\"\"\" The LongT5 model was proposed in [LongT5: Efficient", "the buckets are for exact increments in positions max_exact = num_buckets // 2", "\"\"\" PyTorch LongT5 model.\"\"\" import copy import math import warnings from typing import", "cross_attn_head_mask, \"use_cache\": use_cache, } def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor): return self._shift_right(labels) def _reorder_cache(self, past,", "None, head_mask: Optional[torch.FloatTensor] = None, decoder_head_mask: Optional[torch.FloatTensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None,", "Self-attention (if key_value_states is None) or attention over source sentence (provided by key_value_states).", "been shown that owning a dog is good for you\", return_tensors=\"pt\" ... ).input_ids", "past_key_value=None, layer_head_mask=None, query_length=None, use_cache=False, output_attentions=False, ): \"\"\" Self-attention (if key_value_states is None) or", "is useful if you want more control over how to convert `input_ids` indices", "elif self.config.encoder_attention_type == \"local\": extended_attention_mask = _get_local_attention_mask(attention_mask, self.block_len, inputs_embeds.device) else: # we need", "class LongT5Model(LongT5PreTrainedModel): _keys_to_ignore_on_load_missing = [ r\"encoder.embed_tokens.weight\", r\"decoder.embed_tokens.weight\", ] _keys_to_ignore_on_load_unexpected = [ r\"decoder.block.0.layer.1.EncDecAttention.relative_attention_bias.weight\", ]", "query_length is None else query_length key_length = real_seq_length if key_value_states is None else", "device is None: device = self.relative_attention_bias.weight.device context_position = torch.arange(query_length, dtype=torch.long, device=device)[:, None] memory_position", "torch.Tensor: \"\"\"Makes 3-blocked relative position ids for local attention.\"\"\" position_ids = torch.arange(3 *", "to nullify selected heads of the cross-attention modules in the decoder. Mask values", "of the self-attention modules in the decoder. Mask values selected in `[0, 1]`:", "will be removed in future versions. If you do not want to use", "w/o mean and there is no bias. Additionally we want to make sure", "tokens are not allowed to attend tokens farther than ``local_radius.\"\"\" relative_position_ids = _make_3block_relative_position_ids(block_len)", "https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593 Translate relative position to a bucket number for relative attention. The relative", "torch.zeros(new_shape, dtype=x.dtype) pad = [(0, 0)] * x.ndim pad[dim] = (0, pad_len) pad", "input_ids, past=None, attention_mask=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, use_cache=None, encoder_outputs=None, **kwargs ): # cut decoder_input_ids", "dtype=torch.long, device=self.relative_attention_bias.weight.device ) context_position = memory_position[block_length:-block_length] # (block_length, 3 * block_length) relative_position =", "past_key_value is not None: assert ( len(past_key_value) == 2 ), f\"past_key_value should have", "[batch_size, num_blocks, block_len] _blocked_attention_mask = _split_into_blocks(attention_mask, block_len, dim=1) # [batch_size, num_block, 3 *", "is useful if you want more control over how to convert `decoder_input_ids` indices", "(block_length, 3 * block_length) relative_position = memory_position[None, :] - context_position[:, None] relative_position_bucket =", "# replace possible -100 values in labels by `pad_token_id` shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id)", "been trained on Args: relative_position: an int32 Tensor bidirectional: a boolean - whether", "self.block_len, 3 * self.block_len), device=scores.device, dtype=scores.dtype, ) if self.gradient_checkpointing and self.training: position_bias.requires_grad =", "config, has_relative_attention_bias=False): super().__init__() self.TransientGlobalSelfAttention = LongT5TransientGlobalAttention( config, has_relative_attention_bias=has_relative_attention_bias ) self.layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)", "and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) LONGT5 uses the `pad_token_id`", "\"License\"); # you may not use this file except in compliance with the", "= None, encoder_outputs: Optional[Tuple[Tuple[torch.Tensor]]] = None, past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, inputs_embeds: Optional[torch.FloatTensor] =", "encoder_hidden_states.shape[1] encoder_attention_mask = torch.ones( batch_size, encoder_seq_length, device=inputs_embeds.device, dtype=torch.long ) # initialize past_key_values with", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "mean=0.0, std=factor * ((d_model) ** -0.5) ) # Copied from transformers.models.t5.modeling_t5.T5PreTrainedModel._set_gradient_checkpointing with T5->LongT5", "local attention mask to enforce that tokens are not allowed to attend tokens", "if necessary if self.weight.dtype in [torch.float16, torch.bfloat16]: hidden_states = hidden_states.to(self.weight.dtype) return self.weight *", "team. # # Licensed under the Apache License, Version 2.0 (the \"License\"); #", "to nullify selected heads of the self-attention modules. Mask values selected in `[0,", "dim=-1) - 1 global_segment_ids = global_segment_ids.to(attention_mask.device) global_segment_ids = torch.where(global_segment_ids <= _sequence_block_ids_max, 1, 0)", "self.is_decoder, f\"`use_cache` can only be set to `True` if {self} is used as", "+ cross_attention_outputs[2:] # Apply Feed Forward layer hidden_states = self.layer[-1](hidden_states) outputs = (hidden_states,)", "value=False): if isinstance(module, (LongT5Attention, LongT5Stack)): module.gradient_checkpointing = value # Copied from transformers.models.t5.modeling_t5.T5PreTrainedModel._shift_right with", "self.encoder = LongT5Stack(encoder_config, self.shared) # Initialize weights and apply final processing self.post_init() def", "logarithmically bigger bins in positions up to max_distance relative_position_if_large = max_exact + (", "an embedded representation. If `past_key_values` is used, optionally only the last `decoder_inputs_embeds` have", "def shape(states): \"\"\"projection\"\"\" return states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim) def unshape(states): \"\"\"reshape\"\"\" return states.contiguous().view(batch_size,", "shape (1, num_heads, query_length, key_length) return values def forward( self, hidden_states, mask=None, key_value_states=None,", "attention_mask) + (attention_mask - 1) # [batch_size, seq_len] global_block_ids = handle_orphan_tokens(global_block_ids) num_globals =", "isinstance(encoder_outputs, BaseModelOutput): encoder_outputs = BaseModelOutput( last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,", "dim=1) value_states = _split_into_blocks(value_states, self.block_len, dim=1) # Concatenate 3 blocks for keys and", "here if present_key_value_state is not None: query_length = present_key_value_state[0].shape[2] else: query_length = None", "initialization to avoid scaling before softmax self.q = nn.Linear(self.d_model, self.inner_dim, bias=False) self.k =", "global attention.\"\"\" block_ids, global_segment_ids = _make_global_fixed_block_ids(attention_mask, global_block_size) global_seq_len = global_segment_ids.shape[-1] global_positions = torch.arange(global_seq_len,", "transformers.models.t5.modeling_t5.T5DenseActDense with T5->LongT5 class LongT5DenseActDense(nn.Module): def __init__(self, config: LongT5Config): super().__init__() self.wi = nn.Linear(config.d_model,", "values\" return shifted_input_ids class LongT5Stack(LongT5PreTrainedModel): def __init__(self, config, embed_tokens=None): super().__init__(config) self.embed_tokens = embed_tokens", "bidirectional: num_buckets //= 2 relative_buckets += (relative_position > 0).to(torch.long) * num_buckets relative_position =", "Handle cases when an empty input sequence is given if not all(x.shape): new_shape", "All relative positions <=-max_distance map to the same bucket. This should allow for", "instead of a plain tuple. \"\"\" LONGT5_ENCODER_INPUTS_DOCSTRING = r\"\"\" Args: input_ids (`torch.LongTensor` of", "-1 global_block_ids = (global_block_ids * attention_mask) + (attention_mask - 1) # [batch_size, seq_len]", "# Handle cases when an empty input sequence is given if not all(x.shape):", "have their past key value states given to this model) of shape `(batch_size,", "# See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/attention.py#L136 d_model = self.config.d_model key_value_proj_dim = self.config.d_kv n_heads = self.config.num_heads module.q.weight.data.normal_(mean=0.0,", "*optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected", "= (hidden_states,) if use_cache: outputs = outputs + (present_key_value_state,) + attention_outputs else: outputs", "device=self.relative_attention_bias.weight.device ) context_position = memory_position[block_length:-block_length] # (block_length, 3 * block_length) relative_position = memory_position[None,", "allow each token to attend global aggregated ones # New shape: (batch_size, num_blocks,", "value_states = torch.cat([value_states, side_value_states], dim=2) # Compute scores -> (batch_size, num_block, n_heads, block_len,", "self.config.tie_word_embeddings: # Rescale output before projecting on vocab # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/transformer.py#L586 sequence_output =", "of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`,", "key_value_proj_dim = self.config.d_kv n_heads = self.config.num_heads module.q.weight.data.normal_(mean=0.0, std=factor * ((d_model * key_value_proj_dim) **", "good for you \", return_tensors=\"pt\" ... ).input_ids # Batch size 1 >>> outputs", "implementation) mask = torch.where(mask > 0, 0.0, -1e10) # We need to adjust", "1]).unsqueeze(0).unsqueeze(0) return values def forward( self, hidden_states, mask=None, position_bias=None, layer_head_mask=None, output_attentions=False, ): batch_size,", "`optional`: *attentions*) `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)` is a sequence of hidden", "= shape(proj_layer(key_value_states)) if past_key_value is not None: if key_value_states is None: # self-attn", "return block_ids fixed_block_mask = torch.ones_like(attention_mask, device=attention_mask.device) / global_block_size fixed_block_mask = torch.cumsum(fixed_block_mask, axis=1) -", "n_heads, key_length, dim_per_head) hidden_states = torch.cat([past_key_value, hidden_states], dim=2) else: # cross-attn hidden_states =", "hidden_states): hidden_states = self.wi(hidden_states) hidden_states = self.act(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.wo(hidden_states)", "- (1) Local attention, or (2) Transient-Global attention. This model inherits from [`PreTrainedModel`].", "output_attentions)) return custom_forward layer_outputs = checkpoint( create_custom_forward(layer_module), hidden_states, extended_attention_mask, position_bias, encoder_hidden_states, encoder_extended_attention_mask, encoder_decoder_position_bias,", "not self.has_relative_attention_bias: position_bias = torch.zeros( (1, self.n_heads, real_seq_length, key_length), device=scores.device, dtype=scores.dtype ) if", "list(x.shape) new_shape[dim] += pad_len return torch.zeros(new_shape, dtype=x.dtype) pad = [(0, 0)] * x.ndim", "block_length, num_heads) values = self.relative_attention_bias(relative_position_bucket) # (1, 1, num_heads, block_length, 3 * block_length)", "# New shape: (batch_size, num_blocks, global_seq_len, n_heads, dim_per_head) reps = [1] * (side_key_states.ndim", "of past mask_seq_length = past_key_values[0][0].shape[2] + seq_length if past_key_values is not None else", "head_mask, \"decoder_head_mask\": decoder_head_mask, \"cross_attn_head_mask\": cross_attn_head_mask, \"use_cache\": use_cache, } def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor): return", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the", "reps = [1] * (side_key_states.ndim + 1) reps[1] = key_states.shape[1] side_key_states = side_key_states.unsqueeze(1).repeat(reps)", "want to consider setting `use_cache=True` to speed up decoding\") return past reordered_decoder_past =", "governing permissions and # limitations under the License. \"\"\" PyTorch LongT5 model.\"\"\" import", "is used as a decoder\" if attention_mask is None: attention_mask = torch.ones(batch_size, mask_seq_length).to(inputs_embeds.device)", "None: past_key_values = [None] * len(self.block) # We can provide a self-attention mask", "def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor]", "f\"but got {config.encoder_attention_type}.\" ) self.layer = nn.ModuleList() self.layer.append(attention_layer(config, has_relative_attention_bias=has_relative_attention_bias)) if self.is_decoder: self.layer.append(LongT5LayerCrossAttention(config)) self.layer.append(LongT5LayerFF(config))", "\"\"\" Adapted from Mesh Tensorflow: https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593 Translate relative position to a bucket number", "2) ) # equivalent of torch.einsum(\"bnqd,bnkd->bnqk\", query_states, key_states), compatible with onnx op>9 if", "heads, index = find_pruneable_heads_and_indices( heads, self.n_heads, self.key_value_proj_dim, self.pruned_heads ) # Prune linear layers", "device=block_ids.device) ) one_hot_block_ids = nn.functional.one_hot(block_ids.type(torch.int64), global_seq_len + 1)[:, :, :-1] return torch.einsum(\"...nd,...ng->...gd\", hidden_states,", "if past_key_value is not None: if key_value_states is None: # self-attn # (batch_size,", "decoder_outputs[0] if self.config.tie_word_embeddings: # Rescale output before projecting on vocab # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/transformer.py#L586", "using [`T5Tokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for detail. [What are input IDs?](../glossary#input-ids) To", "dummy_inputs = { \"decoder_input_ids\": input_ids, \"input_ids\": input_ids, \"decoder_attention_mask\": input_mask, } return dummy_inputs def", "library implements for all its model (such as downloading or saving, resizing the", "is a simlified version of the original Flaxformr implementation adopted from: https://github.com/google/flaxformer/blob/main/flaxformer/architectures/longt5/long_attention.py. In", "@replace_return_docstrings(output_type=Seq2SeqModelOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None,", "pad_token_id is not None, \"self.model.config.pad_token_id has to be defined.\" # replace possible -100", "block_len] -> [batch_size, num_blocks + 2, block_len] x = nn.functional.pad(x, pad=pad, mode=\"constant\", value=pad_value)", "be sum with mask position_bias = position_bias + mask.transpose(1, 2) scores += position_bias", "super().__init__() self.LocalSelfAttention = LongT5LocalAttention(config, has_relative_attention_bias=has_relative_attention_bias) self.layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) def", "nn.Dropout(config.dropout_rate) # Initialize weights and apply final processing self.post_init() self.gradient_checkpointing = False #", "hidden states of the attention blocks. Can be used to speed up decoding.", "num_blocks = x.shape[block_dim] pad = [(0, 0)] * x.ndim pad[block_dim] = (1, 1)", "* block_len) attn_weights = nn.functional.softmax(scores.float(), dim=-1).type_as(scores) # (batch_size, num_blocks, n_heads, block_len, 3 *", "has to be defined. In LongT5 it is usually set to the\" \"", "returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return", "class LongT5ForConditionalGeneration(LongT5PreTrainedModel): _keys_to_ignore_on_load_missing = [ r\"encoder.embed_tokens.weight\", r\"decoder.embed_tokens.weight\", r\"lm_head.weight\", ] _keys_to_ignore_on_load_unexpected = [ r\"decoder.block.0.layer.1.EncDecAttention.relative_attention_bias.weight\",", "to summarize the studies have shown that owning a dog ```\"\"\" use_cache =", "with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load", "sequence_dim=2) _blocked_attention_mask = _blocked_attention_mask.unsqueeze(-1) _3blocked_attention_mask = _3blocked_attention_mask.unsqueeze(-2) # [batch_size, num_block, block_len, 3 *", "torch.cat([position_bias, side_position_bias], dim=-1) scores += position_bias # (batch_size, num_blocks, n_heads, block_len, 3 *", "Union[Tuple[torch.FloatTensor], Seq2SeqModelOutput]: r\"\"\" Returns: Example: ```python >>> from transformers import T5Tokenizer, LongT5Model >>>", "bias=False) self.wi_1 = nn.Linear(config.d_model, config.d_ff, bias=False) self.wo = nn.Linear(config.d_ff, config.d_model, bias=False) self.dropout =", "sure that the accumulation for # half-precision inputs is done in fp32 variance", "= self.dropout(inputs_embeds) for i, (layer_module, past_key_value) in enumerate(zip(self.block, past_key_values)): layer_head_mask = head_mask[i] cross_attn_layer_head_mask", "else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states", "module, value=False): if isinstance(module, (LongT5Attention, LongT5Stack)): module.gradient_checkpointing = value # Copied from transformers.models.t5.modeling_t5.T5PreTrainedModel._shift_right", "know more on how to prepare `decoder_input_ids` for pretraining take a look at", "= config.relative_attention_max_distance self.d_model = config.d_model self.key_value_proj_dim = config.d_kv self.n_heads = config.num_heads self.local_radius =", "-1000.0).type(attention_mask.dtype) global_block_ids = torch.floor(mask + fixed_block_mask - 1.0).type(attention_mask.dtype) _global_block_ids_lower_bound = torch.tensor(-1, dtype=global_block_ids.dtype, device=global_block_ids.device)", "from transformers.models.t5.modeling_t5.T5Stack.get_input_embeddings def get_input_embeddings(self): return self.embed_tokens # Copied from transformers.models.t5.modeling_t5.T5Stack.set_input_embeddings def set_input_embeddings(self, new_embeddings):", "** -0.5)) if hasattr(module.wi, \"bias\") and module.wi.bias is not None: module.wi.bias.data.zero_() module.wo.weight.data.normal_(mean=0.0, std=factor", "std=factor * (d_model**-0.5)) module.o.weight.data.normal_(mean=0.0, std=factor * ((n_heads * key_value_proj_dim) ** -0.5)) if module.has_relative_attention_bias:", "is None: device = self.relative_attention_bias.weight.device context_position = torch.arange(query_length, dtype=torch.long, device=device)[:, None] memory_position =", "_keys_to_ignore_on_load_unexpected = [ r\"decoder.block.0.layer.1.EncDecAttention.relative_attention_bias.weight\", ] def __init__(self, config: LongT5Config): super().__init__(config) self.model_dim = config.d_model", "global_inputs = self.global_input_layer_norm(global_inputs) # get query states -> (batch_size, seq_length, n_heads, dim_per_head) query_states", "implementation) local_attention_mask = torch.where(local_attention_mask > 0, 0.0, -1e10) else: local_attention_mask = None if", "outputs + attention_outputs return outputs # hidden-states, present_key_value_states, (self-attention position bias), (self-attention weights),", "attention_mask=decoder_attention_mask, inputs_embeds=decoder_inputs_embeds, past_key_values=past_key_values, encoder_hidden_states=hidden_states, encoder_attention_mask=attention_mask, head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output", "in encoder\"\"\" def __init__(self, config, has_relative_attention_bias=False): super().__init__() self.TransientGlobalSelfAttention = LongT5TransientGlobalAttention( config, has_relative_attention_bias=has_relative_attention_bias )", "LongT5LayerNorm\") except ImportError: # using the normal LongT5LayerNorm pass except Exception: logger.warning(\"discovered apex", "output_attentions: all_attentions = all_attentions + (layer_outputs[3],) if self.is_decoder: all_cross_attentions = all_cross_attentions + (layer_outputs[5],)", "allowed to attend tokens farther than ``local_radius.\"\"\" relative_position_ids = _make_3block_relative_position_ids(block_len) locality_mask = torch.abs(relative_position_ids)", "layer_head_mask=layer_head_mask, past_key_value=self_attn_past_key_value, use_cache=use_cache, output_attentions=output_attentions, ) hidden_states, present_key_value_state = self_attention_outputs[:2] attention_outputs = self_attention_outputs[2:] #", "{config.encoder_attention_type}.\" ) self.layer = nn.ModuleList() self.layer.append(attention_layer(config, has_relative_attention_bias=has_relative_attention_bias)) if self.is_decoder: self.layer.append(LongT5LayerCrossAttention(config)) self.layer.append(LongT5LayerFF(config)) def forward(", "each of the four key / value states reordered_layer_past_states = reordered_layer_past_states + (", "New shape: (batch_size, num_blocks, 3 * block_len + global_seq_len, n_heads, dim_per_head) key_states =", "elif config.encoder_attention_type == \"local\": attention_layer = LongT5LayerLocalSelfAttention elif config.encoder_attention_type == \"transient-global\": attention_layer =", "if self.has_relative_attention_bias: self.relative_attention_bias = nn.Embedding(self.relative_attention_num_buckets, self.n_heads) self.pruned_heads = set() self.gradient_checkpointing = False #", "# Batch size 1 >>> outputs = model.generate(input_ids) >>> print(tokenizer.decode(outputs[0], skip_special_tokens=True)) abstractthe aim", "device = self.relative_attention_bias.weight.device context_position = torch.arange(query_length, dtype=torch.long, device=device)[:, None] memory_position = torch.arange(key_length, dtype=torch.long,", "cross attn key value states if present_key_value_state is not None: present_key_value_state = present_key_value_state", "[`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for detail. To know more on how to prepare `input_ids`", "Any, # to accept past_key_value and use_cache kwargs ): normed_hidden_states = self.layer_norm(hidden_states) attention_output", "(self.is_decoder and use_cache) else None outputs = (attn_output,) + (present_key_value_state,) + (position_bias,) if", "< block_len locality_mask = locality_mask[None, None, :, :] locality_mask = locality_mask.to(local_attention_mask.device) return torch.logical_and(local_attention_mask,", "past_key_value=None, use_cache=False, output_attentions=False, return_dict=True, ): if past_key_value is not None: if not self.is_decoder:", "owning a dog is good for you\", return_tensors=\"pt\" ... ).input_ids # Batch size", "[ r\"decoder.block.0.layer.1.EncDecAttention.relative_attention_bias.weight\", ] def __init__(self, config: LongT5Config): super().__init__(config) self.shared = nn.Embedding(config.vocab_size, config.d_model) encoder_config", "linear layers self.q = prune_linear_layer(self.q, index) self.k = prune_linear_layer(self.k, index) self.v = prune_linear_layer(self.v,", "adopted from: https://github.com/google/flaxformer/blob/main/flaxformer/architectures/longt5/long_attention.py. In our scenario, as we use this strategy only for", "this article is to summarize the studies have shown that owning a dog", "each input token. This implementation is a simlified version of the original Flaxformr", "2D or 3D attention mask is provided for the cross-attention # we need", "= new_embeddings self.encoder.set_input_embeddings(new_embeddings) def get_encoder(self): return self.encoder def _prune_heads(self, heads_to_prune): \"\"\" Prunes heads", "bias=False) self.k = nn.Linear(self.d_model, self.inner_dim, bias=False) self.v = nn.Linear(self.d_model, self.inner_dim, bias=False) self.o =", "tuple( v for v in [ hidden_states, present_key_value_states, all_hidden_states, all_attentions, all_cross_attentions, ] if", "(batch_size, key_length, key_length) # past_key_value[0] is (batch_size, n_heads, q_len - 1, dim_per_head) batch_size,", "self.dropout(attention_output[0]) outputs = (layer_output,) + attention_output[1:] # add attentions if we output them", "cross_attention_outputs[2:] # Apply Feed Forward layer hidden_states = self.layer[-1](hidden_states) outputs = (hidden_states,) if", "self.pruned_heads = self.pruned_heads.union(heads) @staticmethod def _relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128): \"\"\" Adapted from Mesh", "key_value_states is None else key_value_states.shape[1] def shape(states): \"\"\"projection\"\"\" return states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim).transpose(1,", "encoder's raw hidden-states without any specific head on top.\", LONGT5_START_DOCSTRING, ) class LongT5EncoderModel(LongT5PreTrainedModel):", "Copied from transformers.models.t5.modeling_t5.T5DenseGatedActDense with T5->LongT5 class LongT5DenseGatedActDense(nn.Module): def __init__(self, config: LongT5Config): super().__init__() self.wi_0", "key/query states\"\"\" if key_value_states is None: # self-attn # (batch_size, n_heads, seq_length, dim_per_head)", "device=attention_mask.device) / global_block_size fixed_block_mask = torch.cumsum(fixed_block_mask, axis=1) - fixed_block_mask mask = torch.where(attention_mask !=", "denoising generative setting. LongT5 model is an extension of T5 model, and it", "output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all", "1, num_block, block_len, 3 * block_len] return local_attention_mask.unsqueeze(1).to(device) def _make_global_fixed_block_ids( attention_mask: torch.Tensor, global_block_size:", "= set() self.gradient_checkpointing = False def prune_heads(self, heads): if len(heads) == 0: return", "on Args: relative_position: an int32 Tensor bidirectional: a boolean - whether the attention", "is not None else self.config.use_cache return_dict = return_dict if return_dict is not None", "None: # position_bias shape: # (1, 1, n_heads, block_len, 3 * block_len) if", "is True: assert self.is_decoder, f\"`use_cache` can only be set to `True` if {self}", "None: device = self.relative_attention_bias.weight.device context_position = torch.arange(query_length, dtype=torch.long, device=device)[:, None] memory_position = torch.arange(key_length,", "cross-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1", "LongT5LayerNorm): module.weight.data.fill_(factor * 1.0) elif isinstance(module, (LongT5Model, LongT5ForConditionalGeneration, LongT5EncoderModel)): # Mesh TensorFlow embeddings", "**not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask)", "layer_head_mask attn_output = unshape(torch.matmul(attn_weights, value_states)) # (batch_size, seq_length, dim) attn_output = self.o(attn_output) present_key_value_state", "# half-precision inputs is done in fp32 variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True) hidden_states =", "cross-attention of the decoder. past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having", "_concatenate_3_blocks(key_states, block_dim=1, sequence_dim=2) value_states = _concatenate_3_blocks(value_states, block_dim=1, sequence_dim=2) # Tile side inputs across", "hidden states of all layers. See `hidden_states` under returned tensors for more detail.", "def get_output_embeddings(self): return self.lm_head def get_encoder(self): return self.encoder def get_decoder(self): return self.decoder @add_start_docstrings_to_model_forward(LONGT5_INPUTS_DOCSTRING)", "base class PreTrainedModel \"\"\" for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(LONGT5_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=Seq2SeqModelOutput, config_class=_CONFIG_FOR_DOC)", "bias=False) self.o = nn.Linear(self.inner_dim, self.d_model, bias=False) if self.has_relative_attention_bias: self.relative_attention_bias = nn.Embedding(self.relative_attention_num_buckets, self.n_heads) self.pruned_heads", "else: self.DenseReluDense = LongT5DenseActDense(config) self.layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) def forward(self,", "hidden_states = encoder_outputs[0] if labels is not None and decoder_input_ids is None and", "LongT5Config logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = \"LongT5Config\" _TOKENIZER_FOR_DOC = \"T5Tokenizer\" _CHECKPOINT_FOR_DOC = \"google/long-t5-local-base\"", "to use both local attention mask and standard extended mask for transient-global attention", "the attention is bidirectional num_buckets: an integer max_distance: an integer Returns: a Tensor", "Module and refer to the PyTorch documentation for all matter related to general", "License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "torch.einsum( \"...qhd,...khd->...hqk\", query_states, key_states ) # (batch_size, num_block, n_heads, block_len, 3 * block_len)", "torch.Tensor, block_ids: torch.Tensor, global_seq_len: int ) -> torch.Tensor: \"\"\"Compute individual block aggregates by", "def _relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128): \"\"\" Adapted from Mesh Tensorflow: https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593 Translate relative", "self.pruned_heads.union(heads) @staticmethod # Copied from transformers.models.t5.modeling_t5.T5Attention._relative_position_bucket def _relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128): \"\"\" Adapted", "dim_per_head) key_states = _concatenate_3_blocks(key_states, block_dim=1, sequence_dim=2) value_states = _concatenate_3_blocks(value_states, block_dim=1, sequence_dim=2) # Tile", "to `-100` are ignored (masked), the loss is only computed for labels in", "heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.n_heads, self.key_value_proj_dim,", "module.q.weight.data.normal_(mean=0.0, std=factor * ((d_model * key_value_proj_dim) ** -0.5)) module.k.weight.data.normal_(mean=0.0, std=factor * (d_model**-0.5)) module.v.weight.data.normal_(mean=0.0,", "inputs in embeddings if needed encoder_outputs = self.encoder( input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, head_mask=head_mask, output_attentions=output_attentions,", ") # shift inputs to the right if is_torch_fx_proxy(input_ids): # Item assignment is", "version of the original Flaxformr implementation adopted from: https://github.com/google/flaxformer/blob/main/flaxformer/architectures/longt5/long_attention.py. In our scenario, as", "output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if self.gradient_checkpointing and self.training: if use_cache: use_cache", "masks?](../glossary#attention-mask) decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence", ">= 0) full_blocks = true_block_ends.sum(-1).unsqueeze(-1).type(block_ids.dtype) - 1 block_ids = torch.where(block_ids < full_blocks, block_ids,", "positions >=max_distance map to the same bucket. All relative positions <=-max_distance map to", "be able to pad the inputs on both the right and the left.", "shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and value hidden", "# Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.shared def", "\"use_cache\": use_cache, } def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor): return self._shift_right(labels) def _reorder_cache(self, past, beam_idx):", "decoding\") return past reordered_decoder_past = () for layer_past_states in past: # get the", "= position_bias + mask.transpose(1, 2) scores += position_bias # (batch_size, num_blocks, n_heads, block_len,", "self.post_init() self.gradient_checkpointing = False # Copied from transformers.models.t5.modeling_t5.T5Stack.get_input_embeddings def get_input_embeddings(self): return self.embed_tokens #", "<NAME>, <NAME>, <NAME> and <NAME>. It's an encoder-decoder transformer pre-trained in a text-to-text", "dim_per_head) key_states = torch.cat([key_states, side_key_states], dim=2) value_states = torch.cat([value_states, side_value_states], dim=2) # Compute", "()) x = nn.functional.pad(x, pad=pad, mode=\"constant\", value=pad_value) return x def _split_into_blocks(x: torch.Tensor, block_len:", "position_bias=None, layer_head_mask=None, output_attentions=False, ): batch_size, seq_length = hidden_states.shape[:2] def shape(states): \"\"\"projection\"\"\" return states.view(batch_size,", "self.act(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.wo(hidden_states) return hidden_states # Copied from transformers.models.t5.modeling_t5.T5DenseGatedActDense", "\"google/long-t5-tglobal-base\", \"google/long-t5-tglobal-large\", ] def _pad_to_multiple(x: torch.Tensor, block_len: int, dim: int, pad_value: int =", "is **masked**. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of", "attention over source sentence (provided by key_value_states). \"\"\" # Input is (batch_size, seq_length,", "device=x.device) return x.reshape(output_shape) def _concatenate_3_blocks(x: torch.Tensor, block_dim: int, sequence_dim: int, pad_value: int =", "num_blocks, block_len, n_heads, dim_per_head) query_states = _split_into_blocks(query_states, self.block_len, dim=1) key_states = _split_into_blocks(key_states, self.block_len,", "encoder_extended_attention_mask = None # Prepare head mask if needed head_mask = self.get_head_mask(head_mask, self.config.num_layers)", "batch_size, 0, dtype=global_block_ids.dtype, device=global_block_ids.device ) global_segment_ids = torch.cumsum(torch.ones(batch_size, num_globals), dim=-1) - 1 global_segment_ids", "dim=-1).values.repeat(num_globals, 1).transpose(0, 1) else: _sequence_block_ids_max = torch.zeros( batch_size, 0, dtype=global_block_ids.dtype, device=global_block_ids.device ) global_segment_ids", "default. head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify", "output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = decoder_outputs[0] if self.config.tie_word_embeddings: # Rescale output before projecting", "increments in positions max_exact = num_buckets // 2 is_small = relative_position < max_exact", "global_block_size: int) -> torch.Tensor: \"\"\"Create the relative position tensor for local -> global", "detail. To know more on how to prepare `input_ids` for pretraining take a", "only the last `decoder_inputs_embeds` have to be input (see `past_key_values`). This is useful", "\"Studies have been shown that owning a dog is good for you \",", "self, hidden_states, mask=None, position_bias=None, layer_head_mask=None, output_attentions=False, ): batch_size, seq_length = hidden_states.shape[:2] def shape(states):", "to be input (see `past_key_values`). This is useful if you want more control", "key_states.transpose(3, 2) ) # equivalent of torch.einsum(\"bnqd,bnkd->bnqk\", query_states, key_states), compatible with onnx op>9", "cross_attn_head_mask=cross_attn_head_mask, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = decoder_outputs[0] if self.config.tie_word_embeddings: # Rescale", "self.encoder = LongT5Stack(encoder_config, self.shared) decoder_config = copy.deepcopy(config) decoder_config.is_decoder = True decoder_config.is_encoder_decoder = False", "= None, ) -> Union[Tuple[torch.FloatTensor], Seq2SeqLMOutput]: r\"\"\" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):", "self.post_init() def get_input_embeddings(self): return self.shared def set_input_embeddings(self, new_embeddings): self.shared = new_embeddings self.encoder.set_input_embeddings(new_embeddings) self.decoder.set_input_embeddings(new_embeddings)", "vocabulary. LongT5 is a model with relative position embeddings so you should be", "Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.shared def set_input_embeddings(self,", "= copy.deepcopy(config) decoder_config.is_decoder = True decoder_config.is_encoder_decoder = False decoder_config.num_layers = config.num_decoder_layers self.decoder =", "v in [ hidden_states, present_key_value_states, all_hidden_states, all_attentions, all_cross_attentions, ] if v is not", "scores scores = torch.matmul( query_states, key_states.transpose(3, 2) ) # equivalent of torch.einsum(\"bnqd,bnkd->bnqk\", query_states,", "T5->LongT5 class LongT5LayerSelfAttention(nn.Module): def __init__(self, config, has_relative_attention_bias=False): super().__init__() self.SelfAttention = LongT5Attention(config, has_relative_attention_bias=has_relative_attention_bias) self.layer_norm", "of the decoder. past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4", "from transformers.models.t5.modeling_t5.T5Attention with T5->LongT5 class LongT5Attention(nn.Module): def __init__(self, config: LongT5Config, has_relative_attention_bias=False): super().__init__() self.is_decoder", "\"\"\"Compute binned relative position bias\"\"\" if device is None: device = self.relative_attention_bias.weight.device context_position", "Unless required by applicable law or agreed to in writing, software # distributed", "arguments `head_mask` and `decoder_head_mask`. Currently, `decoder_head_mask` is set to copy `head_mask`, but this", "LongT5Model(LongT5PreTrainedModel): _keys_to_ignore_on_load_missing = [ r\"encoder.embed_tokens.weight\", r\"decoder.embed_tokens.weight\", ] _keys_to_ignore_on_load_unexpected = [ r\"decoder.block.0.layer.1.EncDecAttention.relative_attention_bias.weight\", ] def", "on vocab # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/transformer.py#L586 sequence_output = sequence_output * (self.model_dim**-0.5) lm_logits = self.lm_head(sequence_output)", "layer_head_mask=layer_head_mask, output_attentions=output_attentions, ) hidden_states = hidden_states + self.dropout(attention_output[0]) outputs = (hidden_states,) + attention_output[1:]", "one_hot_block_ids.type(hidden_states.dtype)) # Copied from transformers.models.t5.modeling_t5.T5LayerNorm with T5->LongT5 class LongT5LayerNorm(nn.Module): def __init__(self, hidden_size, eps=1e-6):", "= position_ids[block_len:-block_len] # [block_len, 3 * block_len] relative_position_ids = position_ids.unsqueeze(0) - center_position_ids.unsqueeze(1) return", "its model (such as downloading or saving, resizing the input embeddings, pruning heads", "Copied from transformers.models.t5.modeling_t5.T5Stack.set_input_embeddings def set_input_embeddings(self, new_embeddings): self.embed_tokens = new_embeddings def forward( self, input_ids=None,", "# Mesh TensorFlow FF initialization # See https://github.com/tensorflow/mesh/blob/master/mesh_tensorflow/transformer/transformer_layers.py#L56 # and https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L89 module.wi.weight.data.normal_(mean=0.0, std=factor", "self.shared) decoder_config = copy.deepcopy(config) decoder_config.is_decoder = True decoder_config.is_encoder_decoder = False decoder_config.num_layers = config.num_decoder_layers", "present_key_value_state + cross_attention_outputs[1] # Keep cross-attention outputs and relative position weights attention_outputs =", "decoder_input_ids: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.BoolTensor] = None, head_mask: Optional[torch.FloatTensor] = None, decoder_head_mask:", "how to prepare `decoder_input_ids` for pretraining take a look at [LONGT5 Training](./longt5#training). decoder_attention_mask", "cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, ) @add_start_docstrings(\"\"\"LONGT5 Model with a `language modeling` head on", "control over how to convert `input_ids` indices into associated vectors than the model's", "= self.n_heads * self.key_value_proj_dim # Mesh TensorFlow initialization to avoid scaling before softmax", "eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) def forward(self, hidden_states): forwarded_states = self.layer_norm(hidden_states) forwarded_states = self.DenseReluDense(forwarded_states)", "( BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput, ) from ...modeling_utils import PreTrainedModel, find_pruneable_heads_and_indices, prune_linear_layer from", "assert self.embed_tokens is not None, \"You have to initialize the model with valid", "of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length,", "position_bias.requires_grad = True else: position_bias = self.compute_bias(self.block_len) if mask is not None: #", "is None: if self.config.num_layers == self.config.num_decoder_layers: warnings.warn(__HEAD_MASK_WARNING_MSG, FutureWarning) decoder_head_mask = head_mask # Encode", "add attentions if we output them return outputs # Copied from transformers.models.t5.modeling_t5.T5LayerCrossAttention with", "for a local attention.\"\"\" # [batch_size, num_blocks, block_len] _blocked_attention_mask = _split_into_blocks(attention_mask, block_len, dim=1)", "attention_mask=decoder_attention_mask, inputs_embeds=decoder_inputs_embeds, past_key_values=past_key_values, encoder_hidden_states=hidden_states, encoder_attention_mask=attention_mask, head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) if", "attention bias & Layer norm for global attention if self.has_relative_attention_bias: self.global_relative_attention_bias = nn.Embedding(self.relative_attention_num_buckets,", "= False # Copied from transformers.models.t5.modeling_t5.T5Stack.get_input_embeddings def get_input_embeddings(self): return self.embed_tokens # Copied from", "a look at [LONGT5 Training](./longt5#training). decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default", "*optional*): Whether or not to return the hidden states of all layers. See", "Optional[bool] = None, ) -> Union[Tuple[torch.FloatTensor], BaseModelOutput]: r\"\"\" Returns: Example: ```python >>> from", "\"decoder_input_ids\": input_ids, \"input_ids\": input_ids, \"decoder_attention_mask\": input_mask, } return dummy_inputs def _init_weights(self, module): \"\"\"Initialize", "inputs on both the right and the left. Indices can be obtained using", "output_attentions=False, ): normed_hidden_states = self.layer_norm(hidden_states) attention_output = self.SelfAttention( normed_hidden_states, mask=attention_mask, position_bias=position_bias, layer_head_mask=layer_head_mask, past_key_value=past_key_value,", "= copy.deepcopy(config) encoder_config.use_cache = False encoder_config.is_encoder_decoder = False self.encoder = LongT5Stack(encoder_config, self.shared) #", "tokenizer(\"Studies show that\", return_tensors=\"pt\").input_ids # Batch size 1 >>> # forward pass >>>", "will be padded first with selected `pad_value`. \"\"\" # pad tensor to multiple", "relative_position = memory_position - context_position # shape (query_length, key_length) relative_position_bucket = self._relative_position_bucket( relative_position,", "if output_attentions: outputs = outputs + (attn_weights,) return outputs # Copied from transformers.models.t5.modeling_t5.T5LayerSelfAttention", "= self.relative_attention_bias(relative_position_bucket) # (1, 1, num_heads, block_length, 3 * block_length) values = values.permute([2,", "+ (1,), decoder_start_token_id) shifted_input_ids = torch.cat([shifted_input_ids, input_ids[..., :-1]], dim=-1) else: shifted_input_ids = input_ids.new_zeros(input_ids.shape)", "`attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not", "self.variance_epsilon = eps def forward(self, hidden_states): # LongT5 uses a layer_norm which only", "self.n_heads, self.key_value_proj_dim) def unshape(states): \"\"\"reshape\"\"\" return states.contiguous().view(batch_size, -1, self.inner_dim) # Prepare components for", "model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return", "past_key_value return hidden_states # get query states query_states = shape(self.q(hidden_states)) # (batch_size, n_heads,", "bare LONGT5 Model transformer outputting raw hidden-states without any specific head on top.\",", ">>> # Let's try a very long encoder input. >>> input_ids = tokenizer(", "TensorFlow embeddings initialization # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L1624 module.shared.weight.data.normal_(mean=0.0, std=factor * 1.0) elif isinstance(module, LongT5DenseActDense):", "mask=None, key_value_states=None, position_bias=None, past_key_value=None, layer_head_mask=None, query_length=None, use_cache=False, output_attentions=False, ): \"\"\" Self-attention (if key_value_states", "[ r\"encoder.embed_tokens.weight\", r\"decoder.embed_tokens.weight\", r\"lm_head.weight\", ] _keys_to_ignore_on_load_unexpected = [ r\"decoder.block.0.layer.1.EncDecAttention.relative_attention_bias.weight\", ] def __init__(self, config:", "(self-attention weights), (cross-attention position bias), (cross-attention weights) if use_cache is False: layer_outputs =", "avoid scaling before softmax # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/attention.py#L136 d_model = self.config.d_model key_value_proj_dim = self.config.d_kv", "decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, ) def prepare_inputs_for_generation( self, input_ids, past=None, attention_mask=None, head_mask=None,", "not None: if not self.is_decoder: logger.warning(\"`past_key_values` is passed to the encoder. Please make", "last query position bias if past_key_value is not None: position_bias = position_bias[:, :,", "TODO: Update before the merge LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST = [ \"google/long-t5-local-base\", \"google/long-t5-local-large\", \"google/long-t5-tglobal-base\", \"google/long-t5-tglobal-large\", ]", "true_block_ends.sum(-1).unsqueeze(-1).type(block_ids.dtype) - 1 block_ids = torch.where(block_ids < full_blocks, block_ids, full_blocks) return block_ids fixed_block_mask", "Relativen attention bias & Layer norm for global attention if self.has_relative_attention_bias: self.global_relative_attention_bias =", "and encoder_hidden_states is not None: encoder_seq_length = encoder_hidden_states.shape[1] encoder_attention_mask = torch.ones( batch_size, encoder_seq_length,", "relative_buckets += torch.where(is_small, relative_position, relative_position_if_large) return relative_buckets def compute_bias(self, block_length: int): \"\"\"Compute binned", "of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of", "is_small = relative_position < max_exact # The other half of the buckets are", "`block_len`, it will be padded first with selected `pad_value`. \"\"\" # pad tensor", "attention_output = self.EncDecAttention( normed_hidden_states, mask=attention_mask, key_value_states=key_value_states, position_bias=position_bias, layer_head_mask=layer_head_mask, past_key_value=past_key_value, use_cache=use_cache, query_length=query_length, output_attentions=output_attentions, )", "head_mask is not None and decoder_head_mask is None: if self.config.num_layers == self.config.num_decoder_layers: warnings.warn(__HEAD_MASK_WARNING_MSG,", "new_embeddings): self.shared = new_embeddings self.encoder.set_input_embeddings(new_embeddings) self.decoder.set_input_embeddings(new_embeddings) def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings def", "# add attentions if we output them return outputs class LongT5LayerLocalSelfAttention(nn.Module): \"\"\"Local self", "# (batch_size, num_blocks, n_heads, block_len, 3 * block_len) attn_weights = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)", "dim=1) # [batch_size, num_block, 3 * block_len] _3blocked_attention_mask = _concatenate_3_blocks(_blocked_attention_mask, block_dim=1, sequence_dim=2) _blocked_attention_mask", "have to be input (see `past_key_values`). This is useful if you want more", "0: x = _pad_to_multiple(x, block_len, dim, pad_value=0) num_blocks = x.shape[dim] // block_len output_shape", "beam_idx): # if decoder past is not included in output # speedy decoding", "self.layer[0]( hidden_states, attention_mask=attention_mask, position_bias=position_bias, layer_head_mask=layer_head_mask, past_key_value=self_attn_past_key_value, use_cache=use_cache, output_attentions=output_attentions, ) hidden_states, present_key_value_state = self_attention_outputs[:2]", "key_length) # Mask heads if we want to if layer_head_mask is not None:", "length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length", "embed_size_per_head)`): Contains precomputed key and value hidden states of the attention blocks. Can", "locality_mask) def _get_local_attention_mask(attention_mask: torch.Tensor, block_len: int, device: torch.device) -> torch.Tensor: \"\"\"Prepare attention mask", "valid token embeddings\" inputs_embeds = self.embed_tokens(input_ids) batch_size, seq_length = input_shape # required mask", "Tile side inputs across local key/value blocks # New shape: (batch_size, num_blocks, global_seq_len,", "self.is_decoder else \"\" raise ValueError( f\"You cannot specify both {err_msg_prefix}input_ids and {err_msg_prefix}inputs_embeds at", "`(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to", "`decoder_inputs_embeds` are both unset, `decoder_inputs_embeds` takes the value of `inputs_embeds`. use_cache (`bool`, *optional*):", "masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) head_mask", "... \"summarize: \" + 100 * \"studies have shown that owning a dog", "(batch_size, num_block, n_heads, block_len, 3 * block_len) if position_bias is None: # position_bias", "@replace_return_docstrings(output_type=BaseModelOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None,", "self._shift_right(labels) # Decode decoder_outputs = self.decoder( input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, inputs_embeds=decoder_inputs_embeds, past_key_values=past_key_values, encoder_hidden_states=hidden_states, encoder_attention_mask=attention_mask, head_mask=decoder_head_mask,", "cannot apply reshape because of incompatibility with ONNX conversion if 0 in output_shape:", "`pad_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values` is used, optionally", "is in output_shape, we cannot apply reshape because of incompatibility with ONNX conversion", "self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head", "is an extension of T5 model, and it enables using one of the", "= global_segment_ids.shape[-1] global_inputs = _create_global_aggregates(hidden_states, block_ids, _global_seq_len) global_inputs = self.global_input_layer_norm(global_inputs) # get query", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "_split_into_blocks(key_states, self.block_len, dim=1) value_states = _split_into_blocks(value_states, self.block_len, dim=1) # Concatenate 3 blocks for", "= self.config.d_kv n_heads = self.config.num_heads module.q.weight.data.normal_(mean=0.0, std=factor * ((d_model * key_value_proj_dim) ** -0.5))", "seq_len, global_seq_len) side_attention_mask = torch.eq(mask[..., None], global_segment_ids[:, None, :])[:, None, ...] attention_side_bias =", "* block_len) if not self.has_relative_attention_bias: position_bias = torch.zeros( (1, 1, self.n_heads, self.block_len, 3", "- will use it instead of LongT5LayerNorm\") except ImportError: # using the normal", "has_relative_attention_bias=has_relative_attention_bias ) self.layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) def forward( self, hidden_states,", "and https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L89 module.wi.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5)) if hasattr(module.wi, \"bias\") and module.wi.bias", "the whole fixed block, are assigned to the preceding block. Padding tokens from", "are attention masks?](../glossary#attention-mask) head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask", "past_key_value is not None else None ) # compute scores scores = torch.matmul(", "https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L1624 module.shared.weight.data.normal_(mean=0.0, std=factor * 1.0) elif isinstance(module, LongT5DenseActDense): # Mesh TensorFlow FF initialization", "= torch.cat([past_key_value, hidden_states], dim=2) else: # cross-attn hidden_states = past_key_value return hidden_states #", "(query_length, key_length, num_heads) values = values.permute([2, 0, 1]).unsqueeze(0) # shape (1, num_heads, query_length,", "is deprecated and will be removed in future versions. If you do not", "def _init_weights(self, module): \"\"\"Initialize the weights\"\"\" factor = self.config.initializer_factor # Used for testing", "attention_mask = torch.ones(batch_size, mask_seq_length).to(inputs_embeds.device) if self.is_decoder and encoder_attention_mask is None and encoder_hidden_states is", "= set() self.gradient_checkpointing = False # Copied from transformers.models.t5.modeling_t5.T5Attention.prune_heads def prune_heads(self, heads): if", "simple interface for downloading and loading pretrained models. \"\"\" config_class = LongT5Config base_model_prefix", "pad_value: int = 0) -> torch.Tensor: \"\"\"Concatenate three consecutive blocks for each input", "torch.logical_and(_blocked_attention_mask, _3blocked_attention_mask) local_attention_mask = _mask_local_attention_mask(local_attention_mask, block_len) # [batch_size, 1, num_block, block_len, 3 *", "positions with -1e10 (according to the original implementation) mask = torch.where(mask > 0,", "if output_attentions else 3] # append next layer key value states if use_cache:", "output before projecting on vocab # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/transformer.py#L586 sequence_output = sequence_output * (self.model_dim**-0.5)", "max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact) ).to(torch.long) relative_position_if_large = torch.min(", "v for v in [ hidden_states, present_key_value_states, all_hidden_states, all_attentions, all_cross_attentions, ] if v", "Optional[torch.Tensor] = None, decoder_inputs_embeds: Optional[torch.Tensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool]", "+ mask.transpose(1, 2) scores += position_bias # (batch_size, num_blocks, n_heads, block_len, 3 *", "# Mask heads if we want to if layer_head_mask is not None: attn_weights", "of the last layer of the encoder. Used in the cross-attention of the", "layer_head_mask=layer_head_mask, cross_attn_layer_head_mask=cross_attn_layer_head_mask, past_key_value=past_key_value, use_cache=use_cache, output_attentions=output_attentions, ) # layer_outputs is a tuple with: #", "self_attention_outputs = self.layer[0]( hidden_states, attention_mask=attention_mask, position_bias=position_bias, layer_head_mask=layer_head_mask, past_key_value=self_attn_past_key_value, use_cache=use_cache, output_attentions=output_attentions, ) hidden_states, present_key_value_state", "None: position_bias = position_bias + mask # (batch_size, n_heads, seq_length, key_length) scores +=", "the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the", "optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). To know", "states.contiguous().view(batch_size, -1, self.inner_dim) # get query/key/value states -> (batch_size, seq_length, n_heads, dim_per_head) query_states", "seq_length if past_key_value is not None: assert ( len(past_key_value) == 2 ), f\"past_key_value", "past_key_values=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): use_cache = use_cache if use_cache is not", "import T5Tokenizer, LongT5Model >>> tokenizer = T5Tokenizer.from_pretrained(\"google/long-t5-local-base\") >>> model = LongT5Model.from_pretrained(\"google/long-t5-local-base\") >>> #", "loss_fct = CrossEntropyLoss(ignore_index=-100) loss = loss_fct(lm_logits.view(-1, lm_logits.size(-1)), labels.view(-1)) # TODO(thom): Add z_loss https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L666", "the model has been trained on Args: relative_position: an int32 Tensor bidirectional: a", "hidden_states = hidden_states + self.dropout(attention_output[0]) outputs = (hidden_states,) + attention_output[1:] # add attentions", "get query states query_states = shape(self.q(hidden_states)) # (batch_size, n_heads, seq_length, dim_per_head) # get", "ValueError( f\"You cannot specify both {err_msg_prefix}input_ids and {err_msg_prefix}inputs_embeds at the same time\" )", "apex but it failed to load, falling back to LongT5LayerNorm\") pass # Copied", "None encoder_decoder_position_bias = None hidden_states = self.dropout(inputs_embeds) for i, (layer_module, past_key_value) in enumerate(zip(self.block,", "pad_token_id) assert torch.all(shifted_input_ids >= 0).item(), \"Verify that `shifted_input_ids` has only positive values\" return", "def __init__(self, config: LongT5Config, has_relative_attention_bias=False): super().__init__() self.is_decoder = config.is_decoder self.has_relative_attention_bias = has_relative_attention_bias self.relative_attention_num_buckets", "(block_length, 3 * block_length, num_heads) values = self.relative_attention_bias(relative_position_bucket) # (1, 1, num_heads, block_length,", "value states are returned and can be used to speed up decoding (see", "removed in future versions. If you do not want to use any `decoder_head_mask`", "= new_embeddings self.encoder.set_input_embeddings(new_embeddings) self.decoder.set_input_embeddings(new_embeddings) def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings def get_output_embeddings(self): return", "same shape as relative_position, containing int32 values in the range [0, num_buckets) \"\"\"", "it is usually set to the\" \" pad_token_id. See LongT5 docs for more", "- 1) # [batch_size, seq_len] global_block_ids = handle_orphan_tokens(global_block_ids) num_globals = seq_len // global_block_size", "self.embed_tokens # Copied from transformers.models.t5.modeling_t5.T5Stack.set_input_embeddings def set_input_embeddings(self, new_embeddings): self.embed_tokens = new_embeddings def forward(", "self.embed_tokens is not None, \"You have to initialize the model with valid token", "seq_length, dim_per_head) hidden_states = shape(proj_layer(hidden_states)) elif past_key_value is None: # cross-attn # (batch_size,", "is not None if do_cross_attention: # the actual query length is unknown for", "if query_length is None else query_length key_length = real_seq_length if key_value_states is None", "LONGT5_START_DOCSTRING) class LongT5ForConditionalGeneration(LongT5PreTrainedModel): _keys_to_ignore_on_load_missing = [ r\"encoder.embed_tokens.weight\", r\"decoder.embed_tokens.weight\", r\"lm_head.weight\", ] _keys_to_ignore_on_load_unexpected = [", "Copied from transformers.models.t5.modeling_t5.T5Attention.prune_heads def prune_heads(self, heads): if len(heads) == 0: return heads, index", "if not all(x.shape): new_shape = list(x.shape) new_shape[dim] += pad_len return torch.zeros(new_shape, dtype=x.dtype) pad", "thus varience is calculated # w/o mean and there is no bias. Additionally", "is not a multiple of `block_len`, it will be padded first with selected", "self attention used in encoder\"\"\" def __init__(self, config, has_relative_attention_bias=False): super().__init__() self.TransientGlobalSelfAttention = LongT5TransientGlobalAttention(", "which case we just need to make it broadcastable to all heads. #", "heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(LONGT5_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=Seq2SeqModelOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.LongTensor] =", "No bias and no subtraction of mean. \"\"\" super().__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.variance_epsilon", "seq_length, dim_per_head) # get key/value states key_states = project( hidden_states, self.k, key_value_states, past_key_value[0]", "original sequence are represented by -1. \"\"\" batch_size, seq_len = attention_mask.shape[:2] def handle_orphan_tokens(block_ids:", "if self.gradient_checkpointing and self.training: position_bias.requires_grad = True else: position_bias = self.compute_bias(real_seq_length, key_length, device=scores.device)", "num_blocks, block_len] -> [batch_size, num_blocks + 2, block_len] x = nn.functional.pad(x, pad=pad, mode=\"constant\",", "past_key_value[0] is (batch_size, n_heads, q_len - 1, dim_per_head) batch_size, seq_length = hidden_states.shape[:2] real_seq_length", "self-attn # (batch_size, n_heads, key_length, dim_per_head) hidden_states = torch.cat([past_key_value, hidden_states], dim=2) else: #", "attention_side_bias + side_bias return attention_side_bias def forward( self, hidden_states, mask=None, position_bias=None, layer_head_mask=None, output_attentions=False,", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "local_attention_mask = None if position_bias is None: # position_bias shape: # (1, 1,", "self-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1", "self.embed_tokens = embed_tokens self.is_decoder = config.is_decoder self.local_radius = config.local_radius self.block_len = self.local_radius +", "if expected_num_past_key_values == 4 else ''}\" f\"Got {len(past_key_value)} past key / value states\"", "encoder_outputs return ((loss,) + output) if loss is not None else output return", "= LongT5LocalAttention(config, has_relative_attention_bias=has_relative_attention_bias) self.layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) def forward( self,", "labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss.", "shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead", "Model with a `language modeling` head on top.\"\"\", LONGT5_START_DOCSTRING) class LongT5ForConditionalGeneration(LongT5PreTrainedModel): _keys_to_ignore_on_load_missing =", "all_attentions, all_cross_attentions, ] if v is not None ) return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=present_key_value_states,", "is not supported natively for proxies. shifted_input_ids = torch.full(input_ids.shape[:-1] + (1,), decoder_start_token_id) shifted_input_ids", "torch.Tensor: \"\"\"Compute individual block aggregates by summing over individual blocks.\"\"\" # (batch..., seq_len,", "class LongT5LayerSelfAttention(nn.Module): def __init__(self, config, has_relative_attention_bias=False): super().__init__() self.SelfAttention = LongT5Attention(config, has_relative_attention_bias=has_relative_attention_bias) self.layer_norm =", "cross_attn_layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False, return_dict=True, ): if past_key_value is not None: if not", "seq_len, global_seq_len)) block_ids = block_ids.where( block_ids >= 0, torch.tensor(global_seq_len, dtype=block_ids.dtype, device=block_ids.device) ) one_hot_block_ids", "used in encoder\"\"\" def __init__(self, config, has_relative_attention_bias=False): super().__init__() self.TransientGlobalSelfAttention = LongT5TransientGlobalAttention( config, has_relative_attention_bias=has_relative_attention_bias", "* ((self.config.d_model) ** -0.5)) if hasattr(module.wi_0, \"bias\") and module.wi_0.bias is not None: module.wi_0.bias.data.zero_()", "LONGT5_INPUTS_DOCSTRING = r\"\"\" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input", "past_key_value[0] if past_key_value is not None else None ) value_states = project( hidden_states,", "Copied from transformers.models.t5.modeling_t5.T5PreTrainedModel._shift_right with T5->LongT5 def _shift_right(self, input_ids): decoder_start_token_id = self.config.decoder_start_token_id pad_token_id =", "buckets for small absolute relative_position and larger buckets for larger absolute relative_positions. All", "= config.is_decoder self.local_radius = config.local_radius self.block_len = self.local_radius + 1 self.block = nn.ModuleList(", "= has_relative_attention_bias self.relative_attention_num_buckets = config.relative_attention_num_buckets self.relative_attention_max_distance = config.relative_attention_max_distance self.d_model = config.d_model self.key_value_proj_dim =", "\"...qhd,...khd->...hqk\", query_states, key_states ) # (batch_size, num_block, n_heads, block_len, 3 * block_len) if", "and self.training: if use_cache: use_cache = False def create_custom_forward(module): def custom_forward(*inputs): return tuple(module(*inputs,", "block_len + global_seq_len) scores = torch.einsum(\"...qhd,...khd->...hqk\", query_states, key_states) if mask is not None:", ":] - context_position[:, None] relative_position_bucket = self._relative_position_bucket( relative_position, # (block_length, 3 * block_length)", "= LongT5LayerTransientGlobalSelfAttention else: raise ValueError( \"For encoder attention mechanism, either `local` or `transient-global`", "(batch_size, num_heads, seq_len, global_seq_len) if mask is None: mask = torch.ones(batch_size, seq_length) #", "encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*): Tuple consists of (`last_hidden_state`, `optional`: *hidden_states*, `optional`: *attentions*) `last_hidden_state` of", "* ((d_model * key_value_proj_dim) ** -0.5)) module.k.weight.data.normal_(mean=0.0, std=factor * (d_model**-0.5)) module.v.weight.data.normal_(mean=0.0, std=factor *", "is not None: attn_weights = attn_weights * layer_head_mask attn_output = unshape(torch.matmul(attn_weights, value_states)) #", "masked**, - 0 indicates the head is **masked**. cross_attn_head_mask (`torch.Tensor` of shape `(num_heads,)`", "Optional[Tuple[Tuple[torch.FloatTensor]]] = None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, inputs_embeds: Optional[torch.Tensor] = None, decoder_inputs_embeds: Optional[torch.Tensor]", "encoder_outputs, \"attention_mask\": attention_mask, \"head_mask\": head_mask, \"decoder_head_mask\": decoder_head_mask, \"cross_attn_head_mask\": cross_attn_head_mask, \"use_cache\": use_cache, } def", "- max_exact) ).to(torch.long) relative_position_if_large = torch.min( relative_position_if_large, torch.full_like(relative_position_if_large, num_buckets - 1) ) relative_buckets", "dim_per_head) side_key_states = shape(self.k(global_inputs)) side_value_states = shape(self.v(global_inputs)) # Split into blocks -> (batch_size,", "to in writing, software # distributed under the License is distributed on an", "hidden_states], dim=2) else: # cross-attn hidden_states = past_key_value return hidden_states # get query", "+ global_seq_len) attn_weights = nn.functional.softmax(scores.float(), dim=-1).type_as(scores) attn_weights = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) # Mask", "= torch.tensor(DUMMY_MASK) dummy_inputs = { \"decoder_input_ids\": input_ids, \"input_ids\": input_ids, \"decoder_attention_mask\": input_mask, } return", "has_relative_attention_bias: bool = False) -> None: super().__init__() self.is_decoder = config.is_decoder self.has_relative_attention_bias = has_relative_attention_bias", "\"\"\" @add_start_docstrings( \"The bare LONGT5 Model transformer outputting raw hidden-states without any specific", "block aggregates by summing over individual blocks.\"\"\" # (batch..., seq_len, global_seq_len)) block_ids =", "Transient-Global attention. This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the", "_get_local_attention_mask(attention_mask, self.block_len, inputs_embeds.device) else: # we need to use both local attention mask", "= _blocked_attention_mask.unsqueeze(-1) _3blocked_attention_mask = _3blocked_attention_mask.unsqueeze(-2) # [batch_size, num_block, block_len, 3 * block_len] local_attention_mask", "apex.normalization.FusedRMSNorm - will use it instead of LongT5LayerNorm\") except ImportError: # using the", "layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(LONGT5_ENCODER_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BaseModelOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.LongTensor]", "int) -> torch.Tensor: \"\"\"Create the relative position tensor for local -> global attention.\"\"\"", "labels is not None and decoder_input_ids is None and decoder_inputs_embeds is None: #", "= torch.where(local_attention_mask > 0, 0.0, -1e10) else: local_attention_mask = None if position_bias is", "config: LongT5Config): super().__init__(config) self.shared = nn.Embedding(config.vocab_size, config.d_model) encoder_config = copy.deepcopy(config) encoder_config.is_decoder = False", "Apache License, Version 2.0 (the \"License\"); # you may not use this file", "+ 1 self.block = nn.ModuleList( [LongT5Block(config, has_relative_attention_bias=bool(i == 0)) for i in range(config.num_layers)]", "Initializing with a config file does not load the weights associated with the", "int): \"\"\"Compute binned relative position bias\"\"\" memory_position = torch.arange( 3 * block_length, dtype=torch.long,", "block_len, 3 * block_len) if not self.has_relative_attention_bias: position_bias = torch.zeros( (1, 1, self.n_heads,", "use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not", "global_segment_ids) # (batch_size, num_blocks, num_heads, block_len, global_seq_len) side_position_bias = _split_into_blocks(side_position_bias, self.block_len, dim=-2).transpose(1, 2)", ").to(torch.long) relative_position_if_large = torch.min( relative_position_if_large, torch.full_like(relative_position_if_large, num_buckets - 1) ) relative_buckets += torch.where(is_small,", "states correctly to key/query states\"\"\" if key_value_states is None: # self-attn # (batch_size,", "the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "self.dropout(hidden_states) hidden_states = self.wo(hidden_states) return hidden_states # Copied from transformers.models.t5.modeling_t5.T5LayerFF with T5->LongT5 class", "hidden_states, mask=None, position_bias=None, layer_head_mask=None, output_attentions=False, ): batch_size, seq_length = hidden_states.shape[:2] def shape(states): \"\"\"projection\"\"\"", "* ((self.config.d_model) ** -0.5)) if hasattr(module.wi_1, \"bias\") and module.wi_1.bias is not None: module.wi_1.bias.data.zero_()", "= nn.functional.pad(x, pad=pad, mode=\"constant\", value=pad_value) return x def _split_into_blocks(x: torch.Tensor, block_len: int, dim:", "= value # Copied from transformers.models.t5.modeling_t5.T5PreTrainedModel._shift_right with T5->LongT5 def _shift_right(self, input_ids): decoder_start_token_id =", "None, decoder_head_mask: Optional[torch.FloatTensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, encoder_outputs: Optional[Tuple[Tuple[torch.Tensor]]] = None,", "mask if needed head_mask = self.get_head_mask(head_mask, self.config.num_layers) cross_attn_head_mask = self.get_head_mask(cross_attn_head_mask, self.config.num_layers) present_key_value_states =", "**masked**. cross_attn_head_mask (`torch.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify", "to set correct `past` for each of the four key / value states", "__init__(self, config: LongT5Config): super().__init__(config) self.model_dim = config.d_model self.shared = nn.Embedding(config.vocab_size, config.d_model) encoder_config =", "```python >>> from transformers import AutoTokenizer, LongT5ForConditionalGeneration >>> tokenizer = AutoTokenizer.from_pretrained(\"google/long-t5-local-base\") >>> model", "(attn_weights,) return outputs # Copied from transformers.models.t5.modeling_t5.T5LayerSelfAttention with T5->LongT5 class LongT5LayerSelfAttention(nn.Module): def __init__(self,", "+ ( layer_past_state.index_select(0, beam_idx.to(layer_past_state.device)), ) assert reordered_layer_past_states[0].shape == layer_past_states[0].shape assert len(reordered_layer_past_states) == len(layer_past_states)", "both unset, `decoder_inputs_embeds` takes the value of `inputs_embeds`. use_cache (`bool`, *optional*): If set", "seq_length if past_key_values is not None else seq_length if use_cache is True: assert", "{len(past_key_value)} past key / value states\" ) self_attn_past_key_value = past_key_value[:2] cross_attn_past_key_value = past_key_value[2:]", "from transformers.models.t5.modeling_t5.T5Attention.prune_heads def prune_heads(self, heads): if len(heads) == 0: return heads, index =", "None: if self.config.num_layers == self.config.num_decoder_layers: warnings.warn(__HEAD_MASK_WARNING_MSG, FutureWarning) decoder_head_mask = head_mask # Encode if", "you may not use this file except in compliance with the License. #", "the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`]", "self.encoder( input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) elif return_dict and not", "a layer_norm which only scales and doesn't shift, which is also known as", "Returns: Examples: ```python >>> from transformers import AutoTokenizer, LongT5ForConditionalGeneration >>> tokenizer = AutoTokenizer.from_pretrained(\"Stancld/longt5-tglobal-large-16384-pubmed-3k_steps\")", "seq_length = input_shape # required mask seq length can be calculated via length", "if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if self.gradient_checkpointing and self.training: if use_cache:", "for you\", return_tensors=\"pt\" ... ).input_ids # Batch size 1 >>> decoder_input_ids = tokenizer(\"Studies", ">>> outputs = model(input_ids=input_ids) >>> last_hidden_states = outputs.last_hidden_state ```\"\"\" return_dict = return_dict if", "embeddings initialization # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L1624 module.shared.weight.data.normal_(mean=0.0, std=factor * 1.0) elif isinstance(module, LongT5DenseActDense): #", "torch.Tensor, global_segment_ids: torch.Tensor) -> torch.Tensor: # (batch_size, 1, seq_len, global_seq_len) side_attention_mask = torch.eq(mask[...,", "= self.wo(hidden_states) return hidden_states # Copied from transformers.models.t5.modeling_t5.T5LayerFF with T5->LongT5 class LongT5LayerFF(nn.Module): def", "cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, ) def prepare_inputs_for_generation( self, input_ids, past=None, attention_mask=None, head_mask=None, decoder_head_mask=None,", "encoder_config = copy.deepcopy(config) encoder_config.use_cache = False encoder_config.is_encoder_decoder = False self.encoder = LongT5Stack(encoder_config, self.shared)", "self.o(attn_output) present_key_value_state = None outputs = (attn_output,) + (present_key_value_state,) + (position_bias,) if output_attentions:", "def _pad_to_multiple(x: torch.Tensor, block_len: int, dim: int, pad_value: int = 0) -> torch.Tensor:", "dtype=block_ids.dtype, device=block_ids.device) ) one_hot_block_ids = nn.functional.one_hot(block_ids.type(torch.int64), global_seq_len + 1)[:, :, :-1] return torch.einsum(\"...nd,...ng->...gd\",", "n_heads, dim_per_head) key_states = torch.cat([key_states, side_key_states], dim=2) value_states = torch.cat([value_states, side_value_states], dim=2) #", "hidden_states): # LongT5 uses a layer_norm which only scales and doesn't shift, which", "+ (hidden_states,) if not return_dict: return tuple( v for v in [ hidden_states,", "return x def _split_into_blocks(x: torch.Tensor, block_len: int, dim: int) -> torch.Tensor: \"\"\"Split an", "pass >>> outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids) >>> last_hidden_states = outputs.last_hidden_state ```\"\"\" use_cache =", "None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.FloatTensor], BaseModelOutput]:", "# (batch_size, num_blocks, n_heads, block_len, 3 * block_len + global_seq_len) attn_weights = nn.functional.softmax(scores.float(),", ":-1].clone() shifted_input_ids[..., 0] = decoder_start_token_id assert pad_token_id is not None, \"self.model.config.pad_token_id has to", "if self.is_decoder: extended_attention_mask = self.get_extended_attention_mask( attention_mask, input_shape, inputs_embeds.device ) elif self.config.encoder_attention_type == \"local\":", "use_cache=use_cache, output_attentions=output_attentions, ) hidden_states = hidden_states + self.dropout(attention_output[0]) outputs = (hidden_states,) + attention_output[1:]", "not None: module.wo.bias.data.zero_() elif isinstance(module, (LongT5Attention, LongT5LocalAttention, LongT5TransientGlobalAttention)): # Mesh TensorFlow attention initialization", "3-blocked relative position ids for local attention.\"\"\" position_ids = torch.arange(3 * block_len, dtype=torch.int32)", "value_states = shape(self.v(hidden_states)) # Get global/side key/value states shape: (batch_size, global_seq_len, n_heads, dim_per_head)", "past key / value states\" ) self_attn_past_key_value = past_key_value[:2] cross_attn_past_key_value = past_key_value[2:] else:", "# Licensed under the Apache License, Version 2.0 (the \"License\"); # you may", "onnx op>9 if position_bias is None: if not self.has_relative_attention_bias: position_bias = torch.zeros( (1,", "(`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices", ":] if mask is not None: position_bias = position_bias + mask # (batch_size,", "associated vectors than the model's internal embedding lookup matrix. decoder_inputs_embeds (`torch.FloatTensor` of shape", "(self.model_dim**-0.5) lm_logits = self.lm_head(sequence_output) loss = None if labels is not None: loss_fct", "\"decoder_head_mask\": decoder_head_mask, \"cross_attn_head_mask\": cross_attn_head_mask, \"use_cache\": use_cache, } def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor): return self._shift_right(labels)", "cross_attn_head_mask: Optional[torch.Tensor] = None, encoder_outputs: Optional[Tuple[Tuple[torch.Tensor]]] = None, past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, inputs_embeds:", "seq_len // self.global_block_size # shapes: (batch_size, seq_len) & (batch_size, global_seq_len) block_ids, global_segment_ids =", "block_ends.to(block_ids.device) true_block_ends = torch.logical_and(block_ends, block_ids >= 0) full_blocks = true_block_ends.sum(-1).unsqueeze(-1).type(block_ids.dtype) - 1 block_ids", "with: # hidden-states, key-value-states, (self-attention position bias), (self-attention weights), (cross-attention position bias), (cross-attention", "not to return the attentions tensors of all attention layers. See `attentions` under", "= nn.functional.dropout( attn_weights, p=self.dropout, training=self.training ) # (batch_size, n_heads, seq_length, key_length) # Mask", "past: # get the correct batch idx from layer past batch dim #", "std=factor * ((d_model) ** -0.5)) if isinstance(module, LongT5TransientGlobalAttention): module.global_relative_attention_bias.weight.data.normal_( mean=0.0, std=factor * ((d_model)", "encoder_attention_mask is None and encoder_hidden_states is not None: encoder_seq_length = encoder_hidden_states.shape[1] encoder_attention_mask =", "`head_mask` was split into two arguments `head_mask` and `decoder_head_mask`. Currently, `decoder_head_mask` is set", "Mesh Tensorflow: https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593 Translate relative position to a bucket number for relative attention.", "on top.\"\"\", LONGT5_START_DOCSTRING) class LongT5ForConditionalGeneration(LongT5PreTrainedModel): _keys_to_ignore_on_load_missing = [ r\"encoder.embed_tokens.weight\", r\"decoder.embed_tokens.weight\", r\"lm_head.weight\", ] _keys_to_ignore_on_load_unexpected", "Mask is (batch_size, key_length) (non-causal) or (batch_size, key_length, key_length) # past_key_value[0] is (batch_size,", "doesn't shift, which is also known as Root Mean # Square Layer Normalization", "= LongT5Model.from_pretrained(\"google/long-t5-local-base\") >>> # Let's try a very long encoder input. >>> input_ids", "global_block_ids > _global_block_ids_lower_bound, global_block_ids, _global_block_ids_lower_bound ) # set padding tokens to -1 global_block_ids", "len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, )", "past_key_value[:2] cross_attn_past_key_value = past_key_value[2:] else: self_attn_past_key_value, cross_attn_past_key_value = None, None self_attention_outputs = self.layer[0](", "indicates the head is **masked**. cross_attn_head_mask (`torch.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`,", "None, decoder_attention_mask: Optional[torch.BoolTensor] = None, head_mask: Optional[torch.FloatTensor] = None, decoder_head_mask: Optional[torch.FloatTensor] = None,", "with T5->LongT5 def _shift_right(self, input_ids): decoder_start_token_id = self.config.decoder_start_token_id pad_token_id = self.config.pad_token_id assert decoder_start_token_id", "= torch.arange(3 * block_len, dtype=torch.int32) center_position_ids = position_ids[block_len:-block_len] # [block_len, 3 * block_len]", "position_bias # (batch_size, num_blocks, n_heads, block_len, 3 * block_len) attn_weights = nn.functional.softmax(scores.float(), dim=-1).type_as(scores)", "to the same bucket. This should allow for more graceful generalization to longer", "selected in `[0, 1]`: - 1 indicates the head is **not masked**, -", "= outputs + attention_outputs return outputs # hidden-states, present_key_value_states, (self-attention position bias), (self-attention", "key_length, key_length) # past_key_value[0] is (batch_size, n_heads, q_len - 1, dim_per_head) batch_size, seq_length", "): normed_hidden_states = self.layer_norm(hidden_states) attention_output = self.SelfAttention( normed_hidden_states, mask=attention_mask, position_bias=position_bias, layer_head_mask=layer_head_mask, past_key_value=past_key_value, use_cache=use_cache,", "final processing self.post_init() self.gradient_checkpointing = False # Copied from transformers.models.t5.modeling_t5.T5Stack.get_input_embeddings def get_input_embeddings(self): return", "at the output of the last layer of the encoder. Used in the", "outputs class LongT5LayerLocalSelfAttention(nn.Module): \"\"\"Local self attention used in encoder\"\"\" def __init__(self, config, has_relative_attention_bias=False):", "(1, 1, self.n_heads, self.block_len, 3 * self.block_len), device=scores.device, dtype=scores.dtype, ) if self.gradient_checkpointing and", "= torch.logical_and(block_ends, block_ids >= 0) full_blocks = true_block_ends.sum(-1).unsqueeze(-1).type(block_ids.dtype) - 1 block_ids = torch.where(block_ids", "In our scenario, as we use this strategy only for a decoder, orphan", "(output_attentions and self.is_decoder) else None position_bias = None encoder_decoder_position_bias = None hidden_states =", "to all heads. # We use local attention in encoder self-attention, otherwise standard", "hidden_states # get query states query_states = shape(self.q(hidden_states)) # (batch_size, n_heads, seq_length, dim_per_head)", "pad_len = -x.shape[dim] % block_len # Handle cases when an empty input sequence", "torch.Tensor: \"\"\"Prepare attention mask to be applied for a local attention.\"\"\" # [batch_size,", "input. >>> input_ids = tokenizer( ... 100 * \"Studies have been shown that", "= 0 if bidirectional: num_buckets //= 2 relative_buckets += (relative_position > 0).to(torch.long) *", "a very long encoder input. >>> input_ids = tokenizer( ... 100 * \"Studies", "is not None: if not self.is_decoder: logger.warning(\"`past_key_values` is passed to the encoder. Please", "values selected in `[0, 1]`: - 1 indicates the head is **not masked**,", "there is no bias. Additionally we want to make sure that the accumulation", "key_states), compatible with onnx op>9 if position_bias is None: if not self.has_relative_attention_bias: position_bias", "= self.global_relative_attention_bias(side_relative_position_bucket) # (batch_size, num_heads, seq_len, global_seq_len) side_bias = side_bias.permute([0, 3, 1, 2])", "if is_torch_fx_proxy(input_ids): # Item assignment is not supported natively for proxies. shifted_input_ids =", "relative_position_if_large = torch.min( relative_position_if_large, torch.full_like(relative_position_if_large, num_buckets - 1) ) relative_buckets += torch.where(is_small, relative_position,", "keys and values. Got { len(past_key_value)} past states\" real_seq_length += past_key_value[0].shape[2] if query_length", "device=scores.device, dtype=scores.dtype ) if self.gradient_checkpointing and self.training: position_bias.requires_grad = True else: position_bias =", "generation. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be", "return local_attention_mask.unsqueeze(1).to(device) def _make_global_fixed_block_ids( attention_mask: torch.Tensor, global_block_size: int ) -> Tuple[torch.Tensor, torch.Tensor]: \"\"\"Obtain", "LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) def forward( self, hidden_states, attention_mask=None, position_bias=None, layer_head_mask=None, past_key_value=None,", "outputs = (attn_output,) + (present_key_value_state,) + (position_bias,) if output_attentions: outputs = outputs +", "If `past_key_values` is used, optionally only the last `decoder_inputs_embeds` have to be input", "num_heads) values = self.relative_attention_bias(relative_position_bucket) # (1, 1, num_heads, block_length, 3 * block_length) values", "with `None` if past does not exist if past_key_values is None: past_key_values =", "you want more control over how to convert `input_ids` indices into associated vectors", "_sequence_block_ids_max, 1, 0) return global_block_ids.type(torch.int), global_segment_ids.type(torch.int) def _make_side_relative_position_ids(attention_mask: torch.Tensor, global_block_size: int) -> torch.Tensor:", "-0.5)) module.k.weight.data.normal_(mean=0.0, std=factor * (d_model**-0.5)) module.v.weight.data.normal_(mean=0.0, std=factor * (d_model**-0.5)) module.o.weight.data.normal_(mean=0.0, std=factor * ((n_heads", "attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*):", "seq_length] if self.is_decoder and encoder_hidden_states is not None: encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()", "head_mask = self.get_head_mask(head_mask, self.config.num_layers) cross_attn_head_mask = self.get_head_mask(cross_attn_head_mask, self.config.num_layers) present_key_value_states = () if use_cache", "num_block, block_len, 3 * block_len] local_attention_mask = torch.logical_and(_blocked_attention_mask, _3blocked_attention_mask) local_attention_mask = _mask_local_attention_mask(local_attention_mask, block_len)", "None: encoder_decoder_position_bias = layer_outputs[4 if output_attentions else 3] # append next layer key", ":, :] if mask is not None: position_bias = position_bias + mask #", "Copied from transformers.models.t5.modeling_t5.T5PreTrainedModel._set_gradient_checkpointing with T5->LongT5 def _set_gradient_checkpointing(self, module, value=False): if isinstance(module, (LongT5Attention, LongT5Stack)):", "def forward( self, hidden_states, key_value_states, attention_mask=None, position_bias=None, layer_head_mask=None, past_key_value=None, use_cache=False, query_length=None, output_attentions=False, ):", "# required mask seq length can be calculated via length of past mask_seq_length", "output of the last layer of the encoder. Used in the cross-attention of", "args - head_mask, decoder_head_mask if head_mask is not None and decoder_head_mask is None:", "prediction pass) if encoder_outputs is None: # Convert encoder inputs in embeddings if", "# Apply Feed Forward layer hidden_states = self.layer[-1](hidden_states) outputs = (hidden_states,) if use_cache:", "optionally input only the last `decoder_input_ids` (those that don't have their past key", "if self.config.tie_word_embeddings: # Rescale output before projecting on vocab # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/transformer.py#L586 sequence_output", "-> (batch_size, seq_length, n_heads, dim_per_head) query_states = shape(self.q(hidden_states)) key_states = shape(self.k(hidden_states)) value_states =", "= (hidden_states,) + attention_output[1:] # add attentions if we output them return outputs", "( len(past_key_value) == 2 ), f\"past_key_value should have 2 past states: keys and", "# Prune linear layers self.q = prune_linear_layer(self.q, index) self.k = prune_linear_layer(self.k, index) self.v", "set_input_embeddings(self, new_embeddings): self.shared = new_embeddings self.encoder.set_input_embeddings(new_embeddings) def get_encoder(self): return self.encoder def _prune_heads(self, heads_to_prune):", "Mask to nullify selected heads of the self-attention modules in the decoder. Mask", "if self.is_decoder and encoder_attention_mask is None and encoder_hidden_states is not None: encoder_seq_length =", "pass except Exception: logger.warning(\"discovered apex but it failed to load, falling back to", "be set to `True` if {self} is used as a decoder\" if attention_mask", "See the License for the specific language governing permissions and # limitations under", "= return_dict if return_dict is not None else self.config.use_return_dict # FutureWarning: head_mask was", "in layer_past_states: # need to set correct `past` for each of the four", "(1,), decoder_start_token_id) shifted_input_ids = torch.cat([shifted_input_ids, input_ids[..., :-1]], dim=-1) else: shifted_input_ids = input_ids.new_zeros(input_ids.shape) shifted_input_ids[...,", "The relative position is defined as memory_position - query_position, i.e. the distance in", "hidden_states + self.dropout(attention_output[0]) outputs = (hidden_states,) + attention_output[1:] # add attentions if we", "transformers.models.t5.modeling_t5.T5PreTrainedModel._shift_right with T5->LongT5 def _shift_right(self, input_ids): decoder_start_token_id = self.config.decoder_start_token_id pad_token_id = self.config.pad_token_id assert", "the last `decoder_input_ids` have to be input (see `past_key_values`). To know more on", "full_blocks) return block_ids fixed_block_mask = torch.ones_like(attention_mask, device=attention_mask.device) / global_block_size fixed_block_mask = torch.cumsum(fixed_block_mask, axis=1)", "torch.Tensor: \"\"\"Pad a tensor so that a sequence length will be a multiple", "outputs = (layer_output,) + attention_output[1:] # add attentions if we output them return", "mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just", "None: encoder_attention_mask = torch.ones(encoder_hidden_shape, device=inputs_embeds.device) encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = None #", "self.DenseReluDense = LongT5DenseGatedActDense(config) else: self.DenseReluDense = LongT5DenseActDense(config) self.layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout =", "- 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) decoder_input_ids (`torch.LongTensor`", "hidden_states, present_key_value_states, all_hidden_states, all_attentions, all_cross_attentions, ] if v is not None ) return", "attention, or (2) Transient-Global attention. This model inherits from [`PreTrainedModel`]. Check the superclass", "= None, encoder_outputs: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, inputs_embeds: Optional[torch.Tensor] =", "None: assert self.embed_tokens is not None, \"You have to initialize the model with", "* block_len] relative_position_ids = position_ids.unsqueeze(0) - center_position_ids.unsqueeze(1) return relative_position_ids def _mask_local_attention_mask(local_attention_mask: torch.Tensor, block_len:", "blocks. Can be used to speed up decoding. If `past_key_values` are used, the", "heads_to_prune): \"\"\" Prunes heads of the model. heads_to_prune: dict of {layer_num: list of", "(batch_size, n_heads, q_len - 1, dim_per_head) batch_size, seq_length = hidden_states.shape[:2] real_seq_length = seq_length", "- len(heads) self.inner_dim = self.key_value_proj_dim * self.n_heads self.pruned_heads = self.pruned_heads.union(heads) @staticmethod def _relative_position_bucket(relative_position,", "] _keys_to_ignore_on_load_unexpected = [ r\"decoder.block.0.layer.1.EncDecAttention.relative_attention_bias.weight\", ] def __init__(self, config: LongT5Config): super().__init__(config) self.model_dim =", "key value states if present_key_value_state is not None: present_key_value_state = present_key_value_state + cross_attention_outputs[1]", "without any specific head on top.\", LONGT5_START_DOCSTRING, ) class LongT5Model(LongT5PreTrainedModel): _keys_to_ignore_on_load_missing = [", "= _create_global_aggregates(hidden_states, block_ids, _global_seq_len) global_inputs = self.global_input_layer_norm(global_inputs) # get query states -> (batch_size,", "# (block_length, 3 * block_length) relative_position = memory_position[None, :] - context_position[:, None] relative_position_bucket", "from transformers.models.t5.modeling_t5.T5PreTrainedModel._shift_right with T5->LongT5 def _shift_right(self, input_ids): decoder_start_token_id = self.config.decoder_start_token_id pad_token_id = self.config.pad_token_id", "if use_cache else None all_hidden_states = () if output_hidden_states else None all_attentions =", "nn.ModuleList( [LongT5Block(config, has_relative_attention_bias=bool(i == 0)) for i in range(config.num_layers)] ) self.final_layer_norm = LongT5LayerNorm(config.d_model,", "block_len: int, dim: int, pad_value: int = 0) -> torch.Tensor: \"\"\"Pad a tensor", "for downloading and loading pretrained models. \"\"\" config_class = LongT5Config base_model_prefix = \"transformer\"", "super().__init__(config) self.embed_tokens = embed_tokens self.is_decoder = config.is_decoder self.local_radius = config.local_radius self.block_len = self.local_radius", "block_len) attn_weights = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) # Mask heads if we want to", "inputs_embeds: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict:", "if not self.has_relative_attention_bias: position_bias = torch.zeros( (1, 1, self.n_heads, self.block_len, 3 * self.block_len),", "1, dim_per_head) batch_size, seq_length = hidden_states.shape[:2] real_seq_length = seq_length if past_key_value is not", "torch.Tensor, block_len: int) -> torch.Tensor: \"\"\"Mask local attention mask to enforce that tokens", "not None: encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size() encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask", "if not self.has_relative_attention_bias: position_bias = torch.zeros( (1, self.n_heads, real_seq_length, key_length), device=scores.device, dtype=scores.dtype )", "source sentence (provided by key_value_states). \"\"\" # Input is (batch_size, seq_length, dim) #", "block_ids: torch.Tensor, global_seq_len: int ) -> torch.Tensor: \"\"\"Compute individual block aggregates by summing", "relative_positions. All relative positions >=max_distance map to the same bucket. All relative positions", ">=max_distance map to the same bucket. All relative positions <=-max_distance map to the", "(attn_weights,) return outputs class LongT5LocalAttention(nn.Module): def __init__(self, config: LongT5Config, has_relative_attention_bias: bool = False)", "def __init__(self, config: LongT5Config): super().__init__(config) self.model_dim = config.d_model self.shared = nn.Embedding(config.vocab_size, config.d_model) encoder_config", "shape as relative_position, containing int32 values in the range [0, num_buckets) \"\"\" relative_buckets", "nn.Dropout(config.dropout_rate) self.act = ACT2FN[config.dense_act_fn] def forward(self, hidden_states): hidden_gelu = self.act(self.wi_0(hidden_states)) hidden_linear = self.wi_1(hidden_states)", "if use_cache: present_key_value_states = present_key_value_states + (present_key_value_state,) if output_attentions: all_attentions = all_attentions +", "transformers.models.t5.modeling_t5.T5DenseGatedActDense with T5->LongT5 class LongT5DenseGatedActDense(nn.Module): def __init__(self, config: LongT5Config): super().__init__() self.wi_0 = nn.Linear(config.d_model,", "transformers.models.t5.modeling_t5.T5LayerFF with T5->LongT5 class LongT5LayerFF(nn.Module): def __init__(self, config: LongT5Config): super().__init__() if config.is_gated_act: self.DenseReluDense", "target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`.", "shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the", "# Batch size 1 >>> decoder_input_ids = tokenizer(\"Studies show that\", return_tensors=\"pt\").input_ids # Batch", "add_start_docstrings_to_model_forward, is_torch_fx_proxy, logging, replace_return_docstrings, ) from .configuration_longt5 import LongT5Config logger = logging.get_logger(__name__) _CONFIG_FOR_DOC", "[(0, 0)] * x.ndim pad[dim] = (0, pad_len) pad = sum(pad[::-1], ()) x", "reshape because of incompatibility with ONNX conversion if 0 in output_shape: return torch.empty(output_shape,", "the head is **not masked**, - 0 indicates the head is **masked**. encoder_outputs", ") -> Union[Tuple[torch.FloatTensor], BaseModelOutput]: r\"\"\" Returns: Example: ```python >>> from transformers import AutoTokenizer,", "): normed_hidden_states = self.layer_norm(hidden_states) attention_output = self.TransientGlobalSelfAttention( normed_hidden_states, mask=attention_mask, position_bias=position_bias, layer_head_mask=layer_head_mask, output_attentions=output_attentions, )", "torch.min( relative_position_if_large, torch.full_like(relative_position_if_large, num_buckets - 1) ) relative_buckets += torch.where(is_small, relative_position, relative_position_if_large) return", "* hidden_linear hidden_states = self.dropout(hidden_states) hidden_states = self.wo(hidden_states) return hidden_states # Copied from", "dim of `past` is at 2nd position reordered_layer_past_states = () for layer_past_state in", "\"self.model.config.pad_token_id has to be defined.\" # replace possible -100 values in labels by", "PreTrainedModel \"\"\" for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(LONGT5_ENCODER_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BaseModelOutput, config_class=_CONFIG_FOR_DOC) def forward(", ">>> input_ids = tokenizer( ... \"summarize: \" + 100 * \"studies have shown", "side_bias return attention_side_bias def forward( self, hidden_states, mask=None, position_bias=None, layer_head_mask=None, output_attentions=False, ): batch_size,", ") -> Union[Tuple[torch.FloatTensor], Seq2SeqLMOutput]: r\"\"\" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for", "(batch_size, seq_length, n_heads, dim_per_head) query_states = shape(self.q(hidden_states)) key_states = shape(self.k(hidden_states)) value_states = shape(self.v(hidden_states))", "= () for layer_past_state in layer_past_states: # need to set correct `past` for", "block_ids = torch.where(block_ids < full_blocks, block_ids, full_blocks) return block_ids fixed_block_mask = torch.ones_like(attention_mask, device=attention_mask.device)", "-> torch.Tensor: \"\"\"Prepare attention mask to be applied for a local attention.\"\"\" #", "from Mesh Tensorflow: https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593 Translate relative position to a bucket number for relative", "`config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length -", "a plain tuple. \"\"\" # Warning message for FutureWarning: head_mask was separated into", "1, 0) return global_block_ids.type(torch.int), global_segment_ids.type(torch.int) def _make_side_relative_position_ids(attention_mask: torch.Tensor, global_block_size: int) -> torch.Tensor: \"\"\"Create", "else None ) # compute scores scores = torch.matmul( query_states, key_states.transpose(3, 2) )", "needed (training, first prediction pass) if encoder_outputs is None: # Convert encoder inputs", "**kwargs ): # cut decoder_input_ids if past is used if past is not", "shifted_input_ids = torch.cat([shifted_input_ids, input_ids[..., :-1]], dim=-1) else: shifted_input_ids = input_ids.new_zeros(input_ids.shape) shifted_input_ids[..., 1:] =", "two input args - head_mask, decoder_head_mask if head_mask is not None and decoder_head_mask", "return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.FloatTensor], Seq2SeqModelOutput]: r\"\"\" Returns: Example: ```python >>>", "LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) # Initialize weights and apply final processing self.post_init()", "number for relative attention. The relative position is defined as memory_position - query_position,", "hidden_states, attention_mask=attention_mask, position_bias=position_bias, layer_head_mask=layer_head_mask, past_key_value=self_attn_past_key_value, use_cache=use_cache, output_attentions=output_attentions, ) hidden_states, present_key_value_state = self_attention_outputs[:2] attention_outputs", "by `pad_token_id` shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id) assert torch.all(shifted_input_ids >= 0).item(), \"Verify that `shifted_input_ids`", "position_bias = torch.zeros( (1, 1, self.n_heads, self.block_len, 3 * self.block_len), device=scores.device, dtype=scores.dtype )", "= self.SelfAttention( normed_hidden_states, mask=attention_mask, position_bias=position_bias, layer_head_mask=layer_head_mask, past_key_value=past_key_value, use_cache=use_cache, output_attentions=output_attentions, ) hidden_states = hidden_states", "int, pad_value: int = 0) -> torch.Tensor: \"\"\"Concatenate three consecutive blocks for each", "** -0.5)) if module.has_relative_attention_bias: module.relative_attention_bias.weight.data.normal_(mean=0.0, std=factor * ((d_model) ** -0.5)) if isinstance(module, LongT5TransientGlobalAttention):", "is not None, \"You have to initialize the model with valid token embeddings\"", "ANY KIND, either express or implied. # See the License for the specific", "`local` or `transient-global` attention type is expected, \" f\"but got {config.encoder_attention_type}.\" ) self.layer", "shape to be sum with mask local_attention_mask = _get_local_attention_mask(mask, self.block_len, hidden_states.device) # Replace", "tokens in the vocabulary. Indices can be obtained using [`T5Tokenizer`]. See [`PreTrainedTokenizer.encode`] and", "`decoder_head_mask`. Currently, `decoder_head_mask` is set to copy `head_mask`, but this feature is deprecated", "etc.) This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a", "= LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) def forward( self, hidden_states, key_value_states, attention_mask=None, position_bias=None,", "weights) class LongT5PreTrainedModel(PreTrainedModel): \"\"\" An abstract class to handle weights initialization and a", "use_cache=False, output_attentions=False, return_dict=True, ): if past_key_value is not None: if not self.is_decoder: logger.warning(\"`past_key_values`", "attn_weights = nn.functional.softmax(scores.float(), dim=-1).type_as( scores ) # (batch_size, n_heads, seq_length, key_length) attn_weights =", "dim_per_head) query_states = shape(self.q(hidden_states)) key_states = shape(self.k(hidden_states)) value_states = shape(self.v(hidden_states)) # Get global/side", "r\"encoder.embed_tokens.weight\", r\"decoder.embed_tokens.weight\", ] _keys_to_ignore_on_load_unexpected = [ r\"decoder.block.0.layer.1.EncDecAttention.relative_attention_bias.weight\", ] def __init__(self, config: LongT5Config): super().__init__(config)", "when an empty input sequence is given if not all(x.shape): new_shape = list(x.shape)", "out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. \"\"\" LONGT5_INPUTS_DOCSTRING = r\"\"\"", "FusedRMSNorm # noqa logger.info(\"Discovered apex.normalization.FusedRMSNorm - will use it instead of LongT5LayerNorm\") except", "((self.config.d_model) ** -0.5)) if hasattr(module.wi_0, \"bias\") and module.wi_0.bias is not None: module.wi_0.bias.data.zero_() module.wi_1.weight.data.normal_(mean=0.0,", "key_value_states, past_key_value[1] if past_key_value is not None else None ) # compute scores", "= \"LongT5Config\" _TOKENIZER_FOR_DOC = \"T5Tokenizer\" _CHECKPOINT_FOR_DOC = \"google/long-t5-local-base\" # TODO: Update before the", "= layer_module( hidden_states, attention_mask=extended_attention_mask, position_bias=position_bias, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, encoder_decoder_position_bias=encoder_decoder_position_bias, layer_head_mask=layer_head_mask, cross_attn_layer_head_mask=cross_attn_layer_head_mask, past_key_value=past_key_value, use_cache=use_cache, output_attentions=output_attentions,", "each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):", "block_len] relative_position_ids = position_ids.unsqueeze(0) - center_position_ids.unsqueeze(1) return relative_position_ids def _mask_local_attention_mask(local_attention_mask: torch.Tensor, block_len: int)", "= nn.ModuleList() self.layer.append(attention_layer(config, has_relative_attention_bias=has_relative_attention_bias)) if self.is_decoder: self.layer.append(LongT5LayerCrossAttention(config)) self.layer.append(LongT5LayerFF(config)) def forward( self, hidden_states, attention_mask=None,", "if encoder_outputs is None: # Convert encoder inputs in embeddings if needed encoder_outputs", "not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None", "key_value_states=None, position_bias=None, past_key_value=None, layer_head_mask=None, query_length=None, use_cache=False, output_attentions=False, ): \"\"\" Self-attention (if key_value_states is", "past key value states given to this model) of shape `(batch_size, 1)` instead", "tensor so that a sequence length will be a multiple of `block_len`\"\"\" pad_len", "# The other half of the buckets are for logarithmically bigger bins in", "side_value_states.unsqueeze(1).repeat(reps) # Concatenate \"local\" and \"side\"/\"global\" key/value states to allow each token to", "return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=present_key_value_states, hidden_states=all_hidden_states, attentions=all_attentions, cross_attentions=all_cross_attentions, ) LONGT5_START_DOCSTRING = r\"\"\" The LongT5", "in a text-to-text denoising generative setting. LongT5 model is an extension of T5", "# (batch_size, n_heads, seq_length, dim_per_head) hidden_states = shape(proj_layer(key_value_states)) if past_key_value is not None:", "# using the normal LongT5LayerNorm pass except Exception: logger.warning(\"discovered apex but it failed", "int, device: torch.device) -> torch.Tensor: \"\"\"Prepare attention mask to be applied for a", ") self_attn_past_key_value = past_key_value[:2] cross_attn_past_key_value = past_key_value[2:] else: self_attn_past_key_value, cross_attn_past_key_value = None, None", "= shape(self.k(hidden_states)) value_states = shape(self.v(hidden_states)) # Split into blocks -> (batch_size, num_blocks, block_len,", "Exception: logger.warning(\"discovered apex but it failed to load, falling back to LongT5LayerNorm\") pass", "plain tuple. \"\"\" LONGT5_ENCODER_INPUTS_DOCSTRING = r\"\"\" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):", "heads, self.n_heads, self.key_value_proj_dim, self.pruned_heads ) # Prune linear layers self.q = prune_linear_layer(self.q, index)", "d_model = self.config.d_model key_value_proj_dim = self.config.d_kv n_heads = self.config.num_heads module.q.weight.data.normal_(mean=0.0, std=factor * ((d_model", "self.invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = None # Prepare head mask if needed head_mask =", "* (d_model**-0.5)) module.o.weight.data.normal_(mean=0.0, std=factor * ((n_heads * key_value_proj_dim) ** -0.5)) if module.has_relative_attention_bias: module.relative_attention_bias.weight.data.normal_(mean=0.0,", "LongT5DenseGatedActDense): module.wi_0.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5)) if hasattr(module.wi_0, \"bias\") and module.wi_0.bias is", "# See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L1624 module.shared.weight.data.normal_(mean=0.0, std=factor * 1.0) elif isinstance(module, LongT5DenseActDense): # Mesh TensorFlow", "is expected, \" f\"but got {config.encoder_attention_type}.\" ) self.layer = nn.ModuleList() self.layer.append(attention_layer(config, has_relative_attention_bias=has_relative_attention_bias)) if", "(batch_size, global_seq_len) block_ids, global_segment_ids = _make_global_fixed_block_ids( mask if mask is not None else", "corresponding to each input token. This implementation is a simlified version of the", "config.d_kv self.n_heads = config.num_heads self.local_radius = config.local_radius self.block_len = self.local_radius + 1 self.global_block_size", "hidden_states = self.dropout(hidden_states) # Add last layer if output_hidden_states: all_hidden_states = all_hidden_states +", "relative_position_if_large) return relative_buckets def compute_bias(self, block_length: int): \"\"\"Compute binned relative position bias\"\"\" memory_position", "nn.functional.softmax(scores.float(), dim=-1).type_as(scores) attn_weights = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) # Mask heads if we want", "key / value states reordered_layer_past_states = reordered_layer_past_states + ( layer_past_state.index_select(0, beam_idx.to(layer_past_state.device)), ) assert", "the model. Initializing with a config file does not load the weights associated", "position. If bidirectional=False, then positive relative positions are invalid. We use smaller buckets", "self.wo(hidden_states) return hidden_states # Copied from transformers.models.t5.modeling_t5.T5LayerFF with T5->LongT5 class LongT5LayerFF(nn.Module): def __init__(self,", "block_ends = (torch.arange(seq_len) % global_block_size) == global_block_size - 1 block_ends = block_ends.to(block_ids.device) true_block_ends", "for logarithmically bigger bins in positions up to max_distance relative_position_if_large = max_exact +", "Additionally we want to make sure that the accumulation for # half-precision inputs", "def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads,", "= nn.Dropout(config.dropout_rate) self.act = ACT2FN[config.dense_act_fn] def forward(self, hidden_states): hidden_states = self.wi(hidden_states) hidden_states =", "hidden_states.shape[:2] def shape(states): \"\"\"projection\"\"\" return states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim) def unshape(states): \"\"\"reshape\"\"\" return", "values.permute([2, 0, 1]).unsqueeze(0).unsqueeze(0) return values def compute_side_bias(self, mask: torch.Tensor, global_segment_ids: torch.Tensor) -> torch.Tensor:", "config.num_decoder_layers self.decoder = LongT5Stack(decoder_config, self.shared) # Initialize weights and apply final processing self.post_init()", "used if past is not None: input_ids = input_ids[:, -1:] return { \"decoder_input_ids\":", "input sequence is given if not all(x.shape): new_shape = list(x.shape) new_shape[dim] += pad_len", "= self.local_radius + 1 self.dropout = config.dropout_rate self.inner_dim = self.n_heads * self.key_value_proj_dim #", "enumerate(zip(self.block, past_key_values)): layer_head_mask = head_mask[i] cross_attn_layer_head_mask = cross_attn_head_mask[i] if output_hidden_states: all_hidden_states = all_hidden_states", "config_class = LongT5Config base_model_prefix = \"transformer\" supports_gradient_checkpointing = True @property # Copied from", "cross_attn_head_mask=None, past_key_values=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): use_cache = use_cache if use_cache is", "return_dict: return tuple( v for v in [ hidden_states, present_key_value_states, all_hidden_states, all_attentions, all_cross_attentions,", "assigned to the preceding block. Padding tokens from the original sequence are represented", "an extension of T5 model, and it enables using one of the two", "= input_shape # required mask seq length can be calculated via length of", "of passing `decoder_input_ids` you can choose to directly pass an embedded representation. If", "Batch size 1 >>> outputs = model.generate(input_ids) >>> print(tokenizer.decode(outputs[0], skip_special_tokens=True)) abstractthe aim of", "This is useful if you want more control over how to convert `decoder_input_ids`", "this is intended.\") expected_num_past_key_values = 2 if encoder_hidden_states is None else 4 if", "(lm_logits,) + decoder_outputs[1:] + encoder_outputs return ((loss,) + output) if loss is not", "future versions. If you do not want to use any `decoder_head_mask` now, please", "((n_heads * key_value_proj_dim) ** -0.5)) if module.has_relative_attention_bias: module.relative_attention_bias.weight.data.normal_(mean=0.0, std=factor * ((d_model) ** -0.5))", "cross_attn_layer_head_mask = cross_attn_head_mask[i] if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if self.gradient_checkpointing and", "Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) ->", "writing, software # distributed under the License is distributed on an \"AS IS\"", "layer hidden_states = self.layer[-1](hidden_states) outputs = (hidden_states,) if use_cache: outputs = outputs +", "None # Prepare head mask if needed head_mask = self.get_head_mask(head_mask, self.config.num_layers) cross_attn_head_mask =", "Warning message for FutureWarning: head_mask was separated into two input args - head_mask,", "torch.Tensor, global_block_size: int ) -> Tuple[torch.Tensor, torch.Tensor]: \"\"\"Obtain the \"fixed block\" global id", "# pad tensor to multiple of block_len if x.shape[dim] % block_len != 0:", "1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, ) hidden_states =", "merge LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST = [ \"google/long-t5-local-base\", \"google/long-t5-local-large\", \"google/long-t5-tglobal-base\", \"google/long-t5-tglobal-large\", ] def _pad_to_multiple(x: torch.Tensor, block_len:", "{err_msg_prefix}inputs_embeds at the same time\" ) elif input_ids is not None: input_shape =", "query_length=query_length, use_cache=use_cache, output_attentions=output_attentions, ) hidden_states = cross_attention_outputs[0] # Combine self attn and cross", "multiple of `block_len`, it will be padded first with selected `pad_value`. \"\"\" #", "/ value states reordered_layer_past_states = reordered_layer_past_states + ( layer_past_state.index_select(0, beam_idx.to(layer_past_state.device)), ) assert reordered_layer_past_states[0].shape", "x = _pad_to_multiple(x, block_len, dim, pad_value=0) num_blocks = x.shape[dim] // block_len output_shape =", "= LongT5EncoderModel.from_pretrained(\"google/long-t5-local-base\") >>> input_ids = tokenizer( ... 100 * \"Studies have been shown", "] def _pad_to_multiple(x: torch.Tensor, block_len: int, dim: int, pad_value: int = 0) ->", "tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains", "fixed_block_mask = torch.ones_like(attention_mask, device=attention_mask.device) / global_block_size fixed_block_mask = torch.cumsum(fixed_block_mask, axis=1) - fixed_block_mask mask", "values def forward( self, hidden_states, mask=None, position_bias=None, layer_head_mask=None, output_attentions=False, ): batch_size, seq_length =", "cross_attn_layer_head_mask, None, # past_key_value is always None with gradient checkpointing ) else: layer_outputs", "= ACT2FN[config.dense_act_fn] def forward(self, hidden_states): hidden_states = self.wi(hidden_states) hidden_states = self.act(hidden_states) hidden_states =", "(`tuple(tuple(torch.FloatTensor)`, *optional*): Tuple consists of (`last_hidden_state`, `optional`: *hidden_states*, `optional`: *attentions*) `last_hidden_state` of shape", "_3blocked_attention_mask) local_attention_mask = _mask_local_attention_mask(local_attention_mask, block_len) # [batch_size, 1, num_block, block_len, 3 * block_len]", "```\"\"\" return_dict = return_dict if return_dict is not None else self.config.use_return_dict encoder_outputs =", "attention_mask=extended_attention_mask, position_bias=position_bias, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, encoder_decoder_position_bias=encoder_decoder_position_bias, layer_head_mask=layer_head_mask, cross_attn_layer_head_mask=cross_attn_layer_head_mask, past_key_value=past_key_value, use_cache=use_cache, output_attentions=output_attentions, ) # layer_outputs", "= hidden_states.shape[:2] def shape(states): \"\"\"projection\"\"\" return states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim) def unshape(states): \"\"\"reshape\"\"\"", "config.vocab_size]` Returns: Examples: ```python >>> from transformers import AutoTokenizer, LongT5ForConditionalGeneration >>> tokenizer =", "new_embeddings self.encoder.set_input_embeddings(new_embeddings) self.decoder.set_input_embeddings(new_embeddings) def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings def get_output_embeddings(self): return self.lm_head", "positive relative positions are invalid. We use smaller buckets for small absolute relative_position", "self.key_value_proj_dim, self.pruned_heads ) # Prune linear layers self.q = prune_linear_layer(self.q, index) self.k =", "hidden_states, extended_attention_mask, position_bias, encoder_hidden_states, encoder_extended_attention_mask, encoder_decoder_position_bias, layer_head_mask, cross_attn_layer_head_mask, None, # past_key_value is always", "- center_position_ids.unsqueeze(1) return relative_position_ids def _mask_local_attention_mask(local_attention_mask: torch.Tensor, block_len: int) -> torch.Tensor: \"\"\"Mask local", "set() self.gradient_checkpointing = False def prune_heads(self, heads): if len(heads) == 0: return heads,", "past=None, attention_mask=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, use_cache=None, encoder_outputs=None, **kwargs ): # cut decoder_input_ids if", "last layer if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return", "Copied from transformers.models.t5.modeling_t5.T5LayerFF with T5->LongT5 class LongT5LayerFF(nn.Module): def __init__(self, config: LongT5Config): super().__init__() if", "_pad_to_multiple(x: torch.Tensor, block_len: int, dim: int, pad_value: int = 0) -> torch.Tensor: \"\"\"Pad", "the same bucket. All relative positions <=-max_distance map to the same bucket. This", "using [`T5Tokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for detail. To know more on how", "None: module.wi_1.bias.data.zero_() module.wo.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_ff) ** -0.5)) if hasattr(module.wo, \"bias\") and module.wo.bias", "decoder_start_token_id) shifted_input_ids = torch.cat([shifted_input_ids, input_ids[..., :-1]], dim=-1) else: shifted_input_ids = input_ids.new_zeros(input_ids.shape) shifted_input_ids[..., 1:]", "not None: module.wi_1.bias.data.zero_() module.wo.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_ff) ** -0.5)) if hasattr(module.wo, \"bias\") and", "is used, optionally only the last `decoder_inputs_embeds` have to be input (see `past_key_values`).", "transformer outputting encoder's raw hidden-states without any specific head on top.\", LONGT5_START_DOCSTRING, )", "int32 values in the range [0, num_buckets) \"\"\" relative_buckets = 0 if bidirectional:", "position weights do_cross_attention = self.is_decoder and encoder_hidden_states is not None if do_cross_attention: #", "of `past` is at 2nd position reordered_layer_past_states = () for layer_past_state in layer_past_states:", "hidden-states without any specific head on top.\", LONGT5_START_DOCSTRING, ) class LongT5EncoderModel(LongT5PreTrainedModel): authorized_missing_keys =", "position_bias=position_bias, layer_head_mask=layer_head_mask, past_key_value=past_key_value, use_cache=use_cache, query_length=query_length, output_attentions=output_attentions, ) layer_output = hidden_states + self.dropout(attention_output[0]) outputs", "cannot specify both {err_msg_prefix}input_ids and {err_msg_prefix}inputs_embeds at the same time\" ) elif input_ids", "self.v = prune_linear_layer(self.v, index) self.o = prune_linear_layer(self.o, index, dim=1) # Update hyper params", "Layer norm for global attention if self.has_relative_attention_bias: self.global_relative_attention_bias = nn.Embedding(self.relative_attention_num_buckets, self.n_heads) self.global_input_layer_norm =", "/ math.log(max_distance / max_exact) * (num_buckets - max_exact) ).to(torch.long) relative_position_if_large = torch.min( relative_position_if_large,", "are not allowed to attend tokens farther than ``local_radius.\"\"\" relative_position_ids = _make_3block_relative_position_ids(block_len) locality_mask", "find_pruneable_heads_and_indices( heads, self.n_heads, self.key_value_proj_dim, self.pruned_heads ) # Prune linear layers self.q = prune_linear_layer(self.q,", "for layer_past_state in layer_past_states: # need to set correct `past` for each of", "both the right and the left. Indices can be obtained using [`T5Tokenizer`]. See", "def unshape(states): \"\"\"reshape\"\"\" return states.contiguous().view(batch_size, -1, self.inner_dim) # Prepare components for transient-global attention", "past_key_value[0].shape[2] if query_length is None else query_length key_length = real_seq_length if key_value_states is", "global_block_size) global_seq_len = global_segment_ids.shape[-1] global_positions = torch.arange(global_seq_len, device=block_ids.device) side_relative_position = global_positions - block_ids[...,", "loss = None if labels is not None: loss_fct = CrossEntropyLoss(ignore_index=-100) loss =", "position_bias = position_bias + mask.transpose(1, 2) scores += position_bias # (batch_size, num_blocks, n_heads,", "owning a dog ```\"\"\" use_cache = use_cache if use_cache is not None else", "# (batch_size, num_heads, seq_len, global_seq_len) side_bias = side_bias.permute([0, 3, 1, 2]) # (batch_size,", "decoder_config.num_layers = config.num_decoder_layers self.decoder = LongT5Stack(decoder_config, self.shared) # Initialize weights and apply final", "show that\", return_tensors=\"pt\").input_ids # Batch size 1 >>> # forward pass >>> outputs", "self.shared def set_input_embeddings(self, new_embeddings): self.shared = new_embeddings self.encoder.set_input_embeddings(new_embeddings) self.decoder.set_input_embeddings(new_embeddings) def set_output_embeddings(self, new_embeddings): self.lm_head", "config: LongT5Config): super().__init__() if config.is_gated_act: self.DenseReluDense = LongT5DenseGatedActDense(config) else: self.DenseReluDense = LongT5DenseActDense(config) self.layer_norm", "the position biases between the layers - the first layer store them #", "attention_mask=encoder_attention_mask, position_bias=encoder_decoder_position_bias, layer_head_mask=cross_attn_layer_head_mask, past_key_value=cross_attn_past_key_value, query_length=query_length, use_cache=use_cache, output_attentions=output_attentions, ) hidden_states = cross_attention_outputs[0] # Combine", "outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids) >>> last_hidden_states = outputs.last_hidden_state ```\"\"\" use_cache = use_cache if", "* block_len) if position_bias is None: # position_bias shape: # (1, 1, n_heads,", "return Seq2SeqLMOutput( loss=loss, logits=lm_logits, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, ) def", "relative_position, relative_position_if_large) return relative_buckets def compute_bias(self, block_length: int): \"\"\"Compute binned relative position bias\"\"\"", "{ len(past_key_value)} past states\" real_seq_length += past_key_value[0].shape[2] if query_length is None else query_length", "key_states = torch.cat([key_states, side_key_states], dim=2) value_states = torch.cat([value_states, side_value_states], dim=2) # Compute scores", ">>> last_hidden_states = outputs.last_hidden_state ```\"\"\" use_cache = use_cache if use_cache is not None", "representation. If `past_key_values` is used, optionally only the last `decoder_inputs_embeds` have to be", "= self.dropout(hidden_states) # Add last layer if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,)", "= (torch.arange(seq_len) % global_block_size) == global_block_size - 1 block_ends = block_ends.to(block_ids.device) true_block_ends =", "file except in compliance with the License. # You may obtain a copy", "(query_length, key_length) relative_position_bucket = self._relative_position_bucket( relative_position, # shape (query_length, key_length) bidirectional=(not self.is_decoder), num_buckets=self.relative_attention_num_buckets,", "_init_weights(self, module): \"\"\"Initialize the weights\"\"\" factor = self.config.initializer_factor # Used for testing weights", "self.local_radius = config.local_radius self.block_len = self.local_radius + 1 self.block = nn.ModuleList( [LongT5Block(config, has_relative_attention_bias=bool(i", "# FutureWarning: head_mask was separated into two input args - head_mask, decoder_head_mask if", "isinstance(module, LongT5DenseGatedActDense): module.wi_0.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5)) if hasattr(module.wi_0, \"bias\") and module.wi_0.bias", ">>> decoder_input_ids = tokenizer(\"Studies show that\", return_tensors=\"pt\").input_ids # Batch size 1 >>> #", "a 2D or 3D attention mask is provided for the cross-attention # we", "is not None: position_bias = position_bias + mask # (batch_size, n_heads, seq_length, key_length)", "# Used for testing weights initialization if isinstance(module, LongT5LayerNorm): module.weight.data.fill_(factor * 1.0) elif", "-x.shape[dim] % block_len # Handle cases when an empty input sequence is given", "`head_mask` and `decoder_head_mask`. Currently, `decoder_head_mask` is set to copy `head_mask`, but this feature", "(reordered_layer_past_states,) return reordered_decoder_past @add_start_docstrings( \"The bare LONGT5 Model transformer outputting encoder's raw hidden-states", "self.encoder def get_decoder(self): return self.decoder def _prune_heads(self, heads_to_prune): \"\"\" Prunes heads of the", "mask=attention_mask, position_bias=position_bias, layer_head_mask=layer_head_mask, past_key_value=past_key_value, use_cache=use_cache, output_attentions=output_attentions, ) hidden_states = hidden_states + self.dropout(attention_output[0]) outputs", "the same bucket. This should allow for more graceful generalization to longer sequences", "center_position_ids = position_ids[block_len:-block_len] # [block_len, 3 * block_len] relative_position_ids = position_ids.unsqueeze(0) - center_position_ids.unsqueeze(1)", "dim_per_head) key_states = _concatenate_3_blocks(key_states, block_dim=1, sequence_dim=2) value_states = _concatenate_3_blocks(value_states, block_dim=1, sequence_dim=2) # Compute", "prune_linear_layer(self.v, index) self.o = prune_linear_layer(self.o, index, dim=1) # Update hyper params self.n_heads =", "global_seq_len, n_heads, dim_per_head) side_key_states = shape(self.k(global_inputs)) side_value_states = shape(self.v(global_inputs)) # Split into blocks", "two arguments `head_mask` and `decoder_head_mask`. Currently, `decoder_head_mask` is set to copy `head_mask`, but", "\"\"\"Compute binned relative position bias\"\"\" memory_position = torch.arange( 3 * block_length, dtype=torch.long, device=self.relative_attention_bias.weight.device", "config): super().__init__() self.EncDecAttention = LongT5Attention(config, has_relative_attention_bias=False) self.layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate)", "be removed in future versions. If you do not want to use any", "FutureWarning: head_mask was separated into two input args - head_mask, decoder_head_mask if head_mask", "tokens which do not make for the whole fixed block, are assigned to", "= None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.FloatTensor], BaseModelOutput]: r\"\"\" Returns: Example:", "positions with -10_000 (according to the original implementation) local_attention_mask = torch.where(local_attention_mask > 0,", "- context_position # shape (query_length, key_length) relative_position_bucket = self._relative_position_bucket( relative_position, # shape (query_length,", "output them return outputs class LongT5LayerLocalSelfAttention(nn.Module): \"\"\"Local self attention used in encoder\"\"\" def", "Local attention, or (2) Transient-Global attention. This model inherits from [`PreTrainedModel`]. Check the", "None, ) hidden_states = encoder_outputs[0] # Decode decoder_outputs = self.decoder( input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, inputs_embeds=decoder_inputs_embeds,", "encoder_attention_mask=None, encoder_decoder_position_bias=None, layer_head_mask=None, cross_attn_layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False, return_dict=True, ): if past_key_value is not", "self.block_len = self.local_radius + 1 self.dropout = config.dropout_rate self.inner_dim = self.n_heads * self.key_value_proj_dim", "1) pad = sum(pad[::-1], ()) # [batch_size, num_blocks, block_len] -> [batch_size, num_blocks +", "outputs + (attn_weights,) return outputs class LongT5TransientGlobalAttention(nn.Module): def __init__(self, config: LongT5Config, has_relative_attention_bias: bool", "outputs = (hidden_states,) + attention_output[1:] # add attentions if we output them return", "past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, ) @add_start_docstrings(\"\"\"LONGT5 Model with a `language", "to initialize the model with valid token embeddings\" inputs_embeds = self.embed_tokens(input_ids) batch_size, seq_length", "attention_mask.shape[:2] def handle_orphan_tokens(block_ids: torch.Tensor) -> torch.Tensor: block_ends = (torch.arange(seq_len) % global_block_size) == global_block_size", "import math import warnings from typing import Any, List, Optional, Tuple, Union import", "shape(proj_layer(hidden_states)) elif past_key_value is None: # cross-attn # (batch_size, n_heads, seq_length, dim_per_head) hidden_states", "need to use both local attention mask and standard extended mask for transient-global", "are both unset, `decoder_inputs_embeds` takes the value of `inputs_embeds`. use_cache (`bool`, *optional*): If", "compute_side_bias(self, mask: torch.Tensor, global_segment_ids: torch.Tensor) -> torch.Tensor: # (batch_size, 1, seq_len, global_seq_len) side_attention_mask", "self.block_len, dim=1) value_states = _split_into_blocks(value_states, self.block_len, dim=1) # Concatenate 3 blocks for keys", "# Copied from transformers.models.t5.modeling_t5.T5DenseActDense with T5->LongT5 class LongT5DenseActDense(nn.Module): def __init__(self, config: LongT5Config): super().__init__()", "https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L666 if not return_dict: output = (lm_logits,) + decoder_outputs[1:] + encoder_outputs return ((loss,)", "return_tensors=\"pt\" ... ).input_ids # Batch size 1 >>> decoder_input_ids = tokenizer(\"Studies show that\",", "bias), (self-attention weights), (cross-attention position bias), (cross-attention weights) class LongT5PreTrainedModel(PreTrainedModel): \"\"\" An abstract", "input_ids is not None: input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) elif inputs_embeds", "dim=-1) scores += position_bias # (batch_size, num_blocks, n_heads, block_len, 3 * block_len +", "False # Copied from transformers.models.t5.modeling_t5.T5Attention.prune_heads def prune_heads(self, heads): if len(heads) == 0: return", "layer_outputs = checkpoint( create_custom_forward(layer_module), hidden_states, extended_attention_mask, position_bias, encoder_hidden_states, encoder_extended_attention_mask, encoder_decoder_position_bias, layer_head_mask, cross_attn_layer_head_mask, None,", "all_attentions + (layer_outputs[3],) if self.is_decoder: all_cross_attentions = all_cross_attentions + (layer_outputs[5],) hidden_states = self.final_layer_norm(hidden_states)", "3, 1, 2]) # (batch_size, num_heads, seq_len, global_seq_len) attention_side_bias = attention_side_bias + side_bias", "num_heads)`. \"\"\" @add_start_docstrings( \"The bare LONGT5 Model transformer outputting raw hidden-states without any", "Seq2SeqModelOutput( last_hidden_state=decoder_outputs.last_hidden_state, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, ) @add_start_docstrings(\"\"\"LONGT5 Model with", "..., config.vocab_size - 1]`. All labels set to `-100` are ignored (masked), the", "= config.d_model self.shared = nn.Embedding(config.vocab_size, config.d_model) encoder_config = copy.deepcopy(config) encoder_config.is_decoder = False encoder_config.use_cache", "hidden-states, key-value-states, (self-attention position bias), (self-attention weights), (cross-attention position bias), (cross-attention weights) if", "for v in [ hidden_states, present_key_value_states, all_hidden_states, all_attentions, all_cross_attentions, ] if v is", "hidden_states): forwarded_states = self.layer_norm(hidden_states) forwarded_states = self.DenseReluDense(forwarded_states) hidden_states = hidden_states + self.dropout(forwarded_states) return", "decoder_head_mask=None, cross_attn_head_mask=None, use_cache=None, encoder_outputs=None, **kwargs ): # cut decoder_input_ids if past is used", "else: position_bias = self.compute_bias(self.block_len) if local_attention_mask is not None: # (batch_size, 1, n_heads,", "= layer_outputs[:1] + (None,) + layer_outputs[1:] hidden_states, present_key_value_state = layer_outputs[:2] # We share", "len(heads) self.inner_dim = self.key_value_proj_dim * self.n_heads self.pruned_heads = self.pruned_heads.union(heads) @staticmethod # Copied from", "block_len, dim, pad_value=0) num_blocks = x.shape[dim] // block_len output_shape = x.shape[:dim] + (num_blocks,", "output_attentions=output_attentions, ) hidden_states = hidden_states + self.dropout(attention_output[0]) outputs = (hidden_states,) + attention_output[1:] #", "self.n_heads self.pruned_heads = self.pruned_heads.union(heads) @staticmethod # Copied from transformers.models.t5.modeling_t5.T5Attention._relative_position_bucket def _relative_position_bucket(relative_position, bidirectional=True, num_buckets=32,", "config ([`LongT5Config`]): Model configuration class with all the parameters of the model. Initializing", "nn.functional.dropout( attn_weights, p=self.dropout, training=self.training ) # (batch_size, n_heads, seq_length, key_length) # Mask heads", "config.num_heads self.local_radius = config.local_radius self.block_len = self.local_radius + 1 self.global_block_size = config.global_block_size self.dropout", "add attentions if we output them return outputs class LongT5LayerLocalSelfAttention(nn.Module): \"\"\"Local self attention", "past_key_value=self_attn_past_key_value, use_cache=use_cache, output_attentions=output_attentions, ) hidden_states, present_key_value_state = self_attention_outputs[:2] attention_outputs = self_attention_outputs[2:] # Keep", "1 block_ids = torch.where(block_ids < full_blocks, block_ids, full_blocks) return block_ids fixed_block_mask = torch.ones_like(attention_mask,", "self.gradient_checkpointing = False # Copied from transformers.models.t5.modeling_t5.T5Stack.get_input_embeddings def get_input_embeddings(self): return self.embed_tokens # Copied", "- len(heads) self.inner_dim = self.key_value_proj_dim * self.n_heads self.pruned_heads = self.pruned_heads.union(heads) @staticmethod # Copied", "bias & Layer norm for global attention if self.has_relative_attention_bias: self.global_relative_attention_bias = nn.Embedding(self.relative_attention_num_buckets, self.n_heads)", "global_seq_len) side_position_bias = self.compute_side_bias(mask, global_segment_ids) # (batch_size, num_blocks, num_heads, block_len, global_seq_len) side_position_bias =", "(layer_module, past_key_value) in enumerate(zip(self.block, past_key_values)): layer_head_mask = head_mask[i] cross_attn_layer_head_mask = cross_attn_head_mask[i] if output_hidden_states:", "head_mask, decoder_head_mask __HEAD_MASK_WARNING_MSG = \"\"\" The input argument `head_mask` was split into two", "top.\", LONGT5_START_DOCSTRING, ) class LongT5EncoderModel(LongT5PreTrainedModel): authorized_missing_keys = [ r\"encoder.embed_tokens.weight\", ] def __init__(self, config:", "bias), (cross-attention weights) position_bias = layer_outputs[2] if self.is_decoder and encoder_hidden_states is not None:", "values = self.relative_attention_bias(relative_position_bucket) # shape (query_length, key_length, num_heads) values = values.permute([2, 0, 1]).unsqueeze(0)", "# Copied from transformers.models.t5.modeling_t5.T5LayerSelfAttention with T5->LongT5 class LongT5LayerSelfAttention(nn.Module): def __init__(self, config, has_relative_attention_bias=False): super().__init__()", "to if layer_head_mask is not None: attn_weights = attn_weights * layer_head_mask attn_output =", "(`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding", "if key and values are already calculated # we want only the last", "extended mask for transient-global attention extended_attention_mask = attention_mask # If a 2D or", "given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of", "token for `decoder_input_ids` generation. If `past_key_values` is used, optionally only the last `decoder_input_ids`", "inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids`", "block_length) values = values.permute([2, 0, 1]).unsqueeze(0).unsqueeze(0) return values def forward( self, hidden_states, mask=None,", "a sequence of hidden states at the output of the last layer of", "= _mask_local_attention_mask(local_attention_mask, block_len) # [batch_size, 1, num_block, block_len, 3 * block_len] return local_attention_mask.unsqueeze(1).to(device)", "decoder_start_token_id is not None, ( \"self.model.config.decoder_start_token_id has to be defined. In LongT5 it", "block_len + global_seq_len) attn_weights = nn.functional.softmax(scores.float(), dim=-1).type_as(scores) attn_weights = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) #", "model(input_ids=input_ids) >>> last_hidden_states = outputs.last_hidden_state ```\"\"\" return_dict = return_dict if return_dict is not", "to if layer_head_mask is not None: attn_weights = attn_weights * layer_head_mask attn_weights =", "for detail. [What are input IDs?](../glossary#input-ids) To know more on how to prepare", "enables using one of the two different efficient attention mechanisms - (1) Local", "lm_logits.size(-1)), labels.view(-1)) # TODO(thom): Add z_loss https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L666 if not return_dict: output = (lm_logits,)", "batch dim of `past` is at 2nd position reordered_layer_past_states = () for layer_past_state", "if self.has_relative_attention_bias: self.relative_attention_bias = nn.Embedding(self.relative_attention_num_buckets, self.n_heads) self.pruned_heads = set() self.gradient_checkpointing = False def", "decoder_input_ids = tokenizer(\"Studies show that\", return_tensors=\"pt\").input_ids # Batch size 1 >>> # forward", "_global_seq_len) global_inputs = self.global_input_layer_norm(global_inputs) # get query states -> (batch_size, seq_length, n_heads, dim_per_head)", "\"\"\"Local self attention used in encoder\"\"\" def __init__(self, config, has_relative_attention_bias=False): super().__init__() self.LocalSelfAttention =", "the user can optionally input only the last `decoder_input_ids` (those that don't have", "= tokenizer( ... \"summarize: \" + 100 * \"studies have shown that owning", "custom_forward(*inputs): return tuple(module(*inputs, use_cache, output_attentions)) return custom_forward layer_outputs = checkpoint( create_custom_forward(layer_module), hidden_states, extended_attention_mask,", "of the buckets are for exact increments in positions max_exact = num_buckets //", "position_bias is None: # position_bias shape: # (1, 1, n_heads, block_len, 3 *", "shape (query_length, key_length) relative_position_bucket = self._relative_position_bucket( relative_position, # shape (query_length, key_length) bidirectional=(not self.is_decoder),", "+ (attn_weights,) return outputs class LongT5TransientGlobalAttention(nn.Module): def __init__(self, config: LongT5Config, has_relative_attention_bias: bool =", ":-1] return torch.einsum(\"...nd,...ng->...gd\", hidden_states, one_hot_block_ids.type(hidden_states.dtype)) # Copied from transformers.models.t5.modeling_t5.T5LayerNorm with T5->LongT5 class LongT5LayerNorm(nn.Module):", "distance in tokens from the attending position to the attended-to position. If bidirectional=False,", "\"\"\"Split an input tensor into blocks of a given `block_len` along the given", "the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]", "self.shared = new_embeddings self.encoder.set_input_embeddings(new_embeddings) def get_encoder(self): return self.encoder def _prune_heads(self, heads_to_prune): \"\"\" Prunes", "Optional[torch.Tensor] = None, encoder_outputs: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, inputs_embeds: Optional[torch.Tensor]", "eps=config.layer_norm_epsilon) # Copied from transformers.models.t5.modeling_t5.T5Attention.prune_heads def prune_heads(self, heads): if len(heads) == 0: return", "output # speedy decoding is disabled and no need to reorder if past", "= attn_weights * layer_head_mask attn_output = unshape(torch.matmul(attn_weights, value_states)) # (batch_size, seq_length, dim) attn_output", "use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) if not return_dict: return decoder_outputs + encoder_outputs return", "return states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim) def unshape(states): \"\"\"reshape\"\"\" return states.contiguous().view(batch_size, -1, self.inner_dim) #", "with T5->LongT5 class LongT5DenseGatedActDense(nn.Module): def __init__(self, config: LongT5Config): super().__init__() self.wi_0 = nn.Linear(config.d_model, config.d_ff,", "user can optionally input only the last `decoder_input_ids` (those that don't have their", "2).contiguous().view(batch_size, -1, self.inner_dim) def project(hidden_states, proj_layer, key_value_states, past_key_value): \"\"\"projects hidden states correctly to", "FutureWarning: head_mask was separated into two input args - head_mask, decoder_head_mask __HEAD_MASK_WARNING_MSG =", "labels.view(-1)) # TODO(thom): Add z_loss https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L666 if not return_dict: output = (lm_logits,) +", "```\"\"\" use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict =", "with T5->LongT5 class LongT5LayerCrossAttention(nn.Module): def __init__(self, config): super().__init__() self.EncDecAttention = LongT5Attention(config, has_relative_attention_bias=False) self.layer_norm", "copy.deepcopy(config) encoder_config.is_decoder = False encoder_config.use_cache = False encoder_config.is_encoder_decoder = False self.encoder = LongT5Stack(encoder_config,", "(batch_size, num_blocks, block_len, n_heads, dim_per_head) query_states = _split_into_blocks(query_states, self.block_len, dim=1) key_states = _split_into_blocks(key_states,", "self.n_heads = self.n_heads - len(heads) self.inner_dim = self.key_value_proj_dim * self.n_heads self.pruned_heads = self.pruned_heads.union(heads)", "self.compute_bias(self.block_len) if mask is not None: # Replace masked positions with -1e10 (according", "self.inner_dim = self.key_value_proj_dim * self.n_heads self.pruned_heads = self.pruned_heads.union(heads) @staticmethod def _relative_position_bucket(relative_position, bidirectional=True, num_buckets=32,", "\"\"\"projects hidden states correctly to key/query states\"\"\" if key_value_states is None: # self-attn", "attention_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, inputs_embeds=None, head_mask=None, cross_attn_head_mask=None, past_key_values=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): use_cache", "global_positions - block_ids[..., None] return side_relative_position.type(torch.int64) def _create_global_aggregates( hidden_states: torch.Tensor, block_ids: torch.Tensor, global_seq_len:", "bins in positions up to max_distance relative_position_if_large = max_exact + ( torch.log(relative_position.float() /", "None) or attention over source sentence (provided by key_value_states). \"\"\" # Input is", "to the original implementation) local_attention_mask = torch.where(local_attention_mask > 0, 0.0, -1e10) else: local_attention_mask", "if layer_head_mask is not None: attn_weights = attn_weights * layer_head_mask attn_weights = attn_weights.type(value_states.dtype)", "logger.warning(\"`past_key_values` is passed to the encoder. Please make sure this is intended.\") expected_num_past_key_values", "[What are decoder input IDs?](../glossary#decoder-input-ids) LONGT5 uses the `pad_token_id` as the starting token", "(1, self.n_heads, real_seq_length, key_length), device=scores.device, dtype=scores.dtype ) if self.gradient_checkpointing and self.training: position_bias.requires_grad =", "torch.Tensor, block_len: int, dim: int) -> torch.Tensor: \"\"\"Split an input tensor into blocks", "to key/query states\"\"\" if key_value_states is None: # self-attn # (batch_size, n_heads, seq_length,", "self.config.pad_token_id assert decoder_start_token_id is not None, ( \"self.model.config.decoder_start_token_id has to be defined. In", "return hidden_states # Copied from transformers.models.t5.modeling_t5.T5Attention with T5->LongT5 class LongT5Attention(nn.Module): def __init__(self, config:", "states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim) def unshape(states): \"\"\"reshape\"\"\" return states.contiguous().view(batch_size, -1, self.inner_dim) # Prepare", "key_states) if mask is not None: # We need to adjust position bias", "cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if", "feature is deprecated and will be removed in future versions. If you do", "q_len - 1, dim_per_head) batch_size, seq_length = hidden_states.shape[:2] real_seq_length = seq_length if past_key_value", "= decoder_start_token_id assert pad_token_id is not None, \"self.model.config.pad_token_id has to be defined.\" #", "not to return a [`~utils.ModelOutput`] instead of a plain tuple. \"\"\" LONGT5_ENCODER_INPUTS_DOCSTRING =", "shapes: (batch_size, seq_len) & (batch_size, global_seq_len) block_ids, global_segment_ids = _make_global_fixed_block_ids( mask if mask", "`(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of", "output_attentions: outputs = outputs + (attn_weights,) return outputs class LongT5TransientGlobalAttention(nn.Module): def __init__(self, config:", "nullify selected heads of the self-attention modules in the decoder. Mask values selected", "Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in", "-10_000 (according to the original implementation) local_attention_mask = torch.where(local_attention_mask > 0, 0.0, -1e10)", "input_ids): decoder_start_token_id = self.config.decoder_start_token_id pad_token_id = self.config.pad_token_id assert decoder_start_token_id is not None, (", "= input_ids[:, -1:] return { \"decoder_input_ids\": input_ids, \"past_key_values\": past, \"encoder_outputs\": encoder_outputs, \"attention_mask\": attention_mask,", "import PreTrainedModel, find_pruneable_heads_and_indices, prune_linear_layer from ...utils import ( DUMMY_INPUTS, DUMMY_MASK, add_start_docstrings, add_start_docstrings_to_model_forward, is_torch_fx_proxy,", "(`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and", "= config.is_decoder self.has_relative_attention_bias = has_relative_attention_bias self.relative_attention_num_buckets = config.relative_attention_num_buckets self.relative_attention_max_distance = config.relative_attention_max_distance self.d_model =", "project(hidden_states, proj_layer, key_value_states, past_key_value): \"\"\"projects hidden states correctly to key/query states\"\"\" if key_value_states", "query_states = shape(self.q(hidden_states)) key_states = shape(self.k(hidden_states)) value_states = shape(self.v(hidden_states)) # Split into blocks", "modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates", "with a `language modeling` head on top.\"\"\", LONGT5_START_DOCSTRING) class LongT5ForConditionalGeneration(LongT5PreTrainedModel): _keys_to_ignore_on_load_missing = [", "query_states, key_states) if mask is not None: # We need to adjust position", "self.layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) def forward( self, hidden_states, attention_mask=None, position_bias=None,", "self-attention, otherwise standard self & cross attentions are used if self.is_decoder: extended_attention_mask =", "states: keys and values. Got { len(past_key_value)} past states\" real_seq_length += past_key_value[0].shape[2] if", "*optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be", "shape (query_length, key_length, num_heads) values = values.permute([2, 0, 1]).unsqueeze(0) # shape (1, num_heads,", "= self.compute_bias(self.block_len) if local_attention_mask is not None: # (batch_size, 1, n_heads, block_len, 3", "output_shape = x.shape[:dim] + (num_blocks, block_len) + x.shape[(dim + 1) :] # If", "else: outputs = outputs + attention_outputs return outputs # hidden-states, present_key_value_states, (self-attention position", "return x.reshape(output_shape) def _concatenate_3_blocks(x: torch.Tensor, block_dim: int, sequence_dim: int, pad_value: int = 0)", "input sequence tokens in the vocabulary. Indices can be obtained using [`T5Tokenizer`]. See", "the correct batch idx from layer past batch dim # batch dim of", "output_attentions=False, ): batch_size, seq_length = hidden_states.shape[:2] def shape(states): \"\"\"projection\"\"\" return states.view(batch_size, -1, self.n_heads,", "dim_per_head) hidden_states = shape(proj_layer(hidden_states)) elif past_key_value is None: # cross-attn # (batch_size, n_heads,", "the specific language governing permissions and # limitations under the License. \"\"\" PyTorch", "Version 2.0 (the \"License\"); # you may not use this file except in", "* block_length) values = values.permute([2, 0, 1]).unsqueeze(0).unsqueeze(0) return values def compute_side_bias(self, mask: torch.Tensor,", "\"\"\"Compute individual block aggregates by summing over individual blocks.\"\"\" # (batch..., seq_len, global_seq_len))", "def _shift_right(self, input_ids): decoder_start_token_id = self.config.decoder_start_token_id pad_token_id = self.config.pad_token_id assert decoder_start_token_id is not", "[batch_size, num_blocks, block_len] -> [batch_size, num_blocks + 2, block_len] x = nn.functional.pad(x, pad=pad,", "``local_radius.\"\"\" relative_position_ids = _make_3block_relative_position_ids(block_len) locality_mask = torch.abs(relative_position_ids) < block_len locality_mask = locality_mask[None, None,", "-> torch.Tensor: \"\"\"Concatenate three consecutive blocks for each input block for local attentiont.", "(side_key_states.ndim + 1) reps[1] = key_states.shape[1] side_key_states = side_key_states.unsqueeze(1).repeat(reps) side_value_states = side_value_states.unsqueeze(1).repeat(reps) #", "True else: position_bias = self.compute_bias(self.block_len) if local_attention_mask is not None: # (batch_size, 1,", "# (batch_size, num_block, n_heads, block_len, 3 * block_len) if position_bias is None: #", "no subtraction of mean. \"\"\" super().__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.variance_epsilon = eps def", "... ) >>> # Let's try a very long input. >>> input_ids =", "None: mask = torch.ones(batch_size, seq_length) # (batch_size, num_heads, seq_len, global_seq_len) side_position_bias = self.compute_side_bias(mask,", "local_attention_mask.unsqueeze(1).to(device) def _make_global_fixed_block_ids( attention_mask: torch.Tensor, global_block_size: int ) -> Tuple[torch.Tensor, torch.Tensor]: \"\"\"Obtain the", "self.k = nn.Linear(self.d_model, self.inner_dim, bias=False) self.v = nn.Linear(self.d_model, self.inner_dim, bias=False) self.o = nn.Linear(self.inner_dim,", "decoder_inputs_embeds: Optional[torch.Tensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states:", "[batch_size, 1, num_block, block_len, 3 * block_len] return local_attention_mask.unsqueeze(1).to(device) def _make_global_fixed_block_ids( attention_mask: torch.Tensor,", "hidden_states # Copied from transformers.models.t5.modeling_t5.T5LayerFF with T5->LongT5 class LongT5LayerFF(nn.Module): def __init__(self, config: LongT5Config):", "self.wo = nn.Linear(config.d_ff, config.d_model, bias=False) self.dropout = nn.Dropout(config.dropout_rate) self.act = ACT2FN[config.dense_act_fn] def forward(self,", "-hidden_states.size(1) :, :] if mask is not None: position_bias = position_bias + mask", "# to accept past_key_value and use_cache kwargs ): normed_hidden_states = self.layer_norm(hidden_states) attention_output =", "dtype=scores.dtype ) if self.gradient_checkpointing and self.training: position_bias.requires_grad = True else: position_bias = self.compute_bias(self.block_len)", "else self.config.use_cache output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states", "the last query position bias if past_key_value is not None: position_bias = position_bias[:,", "(present_key_value_state,) + attention_outputs else: outputs = outputs + attention_outputs return outputs # hidden-states,", "blocks for keys and values -> (batch_size, num_blocks, 3 * block_len, n_heads, dim_per_head)", "model = LongT5Model.from_pretrained(\"google/long-t5-local-base\") >>> # Let's try a very long encoder input. >>>", "-> Tuple[torch.Tensor, torch.Tensor]: \"\"\"Obtain the \"fixed block\" global id corresponding to each input", "x.ndim pad[dim] = (0, pad_len) pad = sum(pad[::-1], ()) x = nn.functional.pad(x, pad=pad,", "new_embeddings def get_output_embeddings(self): return self.lm_head def get_encoder(self): return self.encoder def get_decoder(self): return self.decoder", "_CONFIG_FOR_DOC = \"LongT5Config\" _TOKENIZER_FOR_DOC = \"T5Tokenizer\" _CHECKPOINT_FOR_DOC = \"google/long-t5-local-base\" # TODO: Update before", "prune_linear_layer from ...utils import ( DUMMY_INPUTS, DUMMY_MASK, add_start_docstrings, add_start_docstrings_to_model_forward, is_torch_fx_proxy, logging, replace_return_docstrings, )", "of T5 model, and it enables using one of the two different efficient", "= torch.arange(query_length, dtype=torch.long, device=device)[:, None] memory_position = torch.arange(key_length, dtype=torch.long, device=device)[None, :] relative_position =", "= nn.Linear(config.d_model, config.d_ff, bias=False) self.wo = nn.Linear(config.d_ff, config.d_model, bias=False) self.dropout = nn.Dropout(config.dropout_rate) self.act", "\" + 100 * \"studies have shown that owning a dog is good", "set correct `past` for each of the four key / value states reordered_layer_past_states", "use_cache=use_cache, query_length=query_length, output_attentions=output_attentions, ) layer_output = hidden_states + self.dropout(attention_output[0]) outputs = (layer_output,) +", "shape(self.v(hidden_states)) # Get global/side key/value states shape: (batch_size, global_seq_len, n_heads, dim_per_head) side_key_states =", "module.has_relative_attention_bias: module.relative_attention_bias.weight.data.normal_(mean=0.0, std=factor * ((d_model) ** -0.5)) if isinstance(module, LongT5TransientGlobalAttention): module.global_relative_attention_bias.weight.data.normal_( mean=0.0, std=factor", "sequences than the model has been trained on Args: relative_position: an int32 Tensor", "torch.all(shifted_input_ids >= 0).item(), \"Verify that `shifted_input_ids` has only positive values\" return shifted_input_ids class", "def _reorder_cache(self, past, beam_idx): # if decoder past is not included in output", "= nn.Embedding(config.vocab_size, config.d_model) encoder_config = copy.deepcopy(config) encoder_config.is_decoder = False encoder_config.use_cache = False encoder_config.is_encoder_decoder", "AutoTokenizer, LongT5ForConditionalGeneration >>> tokenizer = AutoTokenizer.from_pretrained(\"google/long-t5-local-base\") >>> model = LongT5EncoderModel.from_pretrained(\"google/long-t5-local-base\") >>> input_ids =", "\"attention_mask\": attention_mask, \"head_mask\": head_mask, \"decoder_head_mask\": decoder_head_mask, \"cross_attn_head_mask\": cross_attn_head_mask, \"use_cache\": use_cache, } def prepare_decoder_input_ids_from_labels(self,", "# equivalent of torch.einsum(\"bnqd,bnkd->bnqk\", query_states, key_states), compatible with onnx op>9 if position_bias is", "def handle_orphan_tokens(block_ids: torch.Tensor) -> torch.Tensor: block_ends = (torch.arange(seq_len) % global_block_size) == global_block_size -", "input args - head_mask, decoder_head_mask if head_mask is not None and decoder_head_mask is", "Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained", "= (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: encoder_attention_mask = torch.ones(encoder_hidden_shape, device=inputs_embeds.device) encoder_extended_attention_mask =", "all_cross_attentions = all_cross_attentions + (layer_outputs[5],) hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.dropout(hidden_states) # Add", "the head is **masked**. cross_attn_head_mask (`torch.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):", "r\"decoder.embed_tokens.weight\", r\"lm_head.weight\", ] _keys_to_ignore_on_load_unexpected = [ r\"decoder.block.0.layer.1.EncDecAttention.relative_attention_bias.weight\", ] def __init__(self, config: LongT5Config): super().__init__(config)", "... 100 * \"Studies have been shown that owning a dog is good", "num_block, n_heads, block_len, 3 * block_len + global_seq_len) scores = torch.einsum(\"...qhd,...khd->...hqk\", query_states, key_states)", "side_bias.permute([0, 3, 1, 2]) # (batch_size, num_heads, seq_len, global_seq_len) attention_side_bias = attention_side_bias +", "torch.Tensor, global_seq_len: int ) -> torch.Tensor: \"\"\"Compute individual block aggregates by summing over", "<NAME>. It's an encoder-decoder transformer pre-trained in a text-to-text denoising generative setting. LongT5", "else None, ) hidden_states = encoder_outputs[0] # Decode decoder_outputs = self.decoder( input_ids=decoder_input_ids, attention_mask=decoder_attention_mask,", "outputs # Copied from transformers.models.t5.modeling_t5.T5LayerSelfAttention with T5->LongT5 class LongT5LayerSelfAttention(nn.Module): def __init__(self, config, has_relative_attention_bias=False):", "layer_head_mask, cross_attn_layer_head_mask, None, # past_key_value is always None with gradient checkpointing ) else:", "0: return heads, index = find_pruneable_heads_and_indices( heads, self.n_heads, self.key_value_proj_dim, self.pruned_heads ) # Prune", "None, :])[:, None, ...] attention_side_bias = torch.where(side_attention_mask > 0, 0.0, -1e10) # (batch_size,", "1 global_segment_ids = global_segment_ids.to(attention_mask.device) global_segment_ids = torch.where(global_segment_ids <= _sequence_block_ids_max, 1, 0) return global_block_ids.type(torch.int),", "False encoder_config.is_encoder_decoder = False self.encoder = LongT5Stack(encoder_config, self.shared) decoder_config = copy.deepcopy(config) decoder_config.is_decoder =", "if encoder_hidden_states is None else 4 if len(past_key_value) != expected_num_past_key_values: raise ValueError( f\"There", "None)] * x.ndim indices[block_dim] = slice(i, i + num_blocks) indices = tuple(indices) blocks_list.append(x[indices])", "-1. \"\"\" batch_size, seq_len = attention_mask.shape[:2] def handle_orphan_tokens(block_ids: torch.Tensor) -> torch.Tensor: block_ends =", "(cross-attention position bias), (cross-attention weights) class LongT5PreTrainedModel(PreTrainedModel): \"\"\" An abstract class to handle", "# Copied from transformers.models.t5.modeling_t5.T5Stack.get_input_embeddings def get_input_embeddings(self): return self.embed_tokens # Copied from transformers.models.t5.modeling_t5.T5Stack.set_input_embeddings def", "shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens", "and larger buckets for larger absolute relative_positions. All relative positions >=max_distance map to", "shape(self.v(global_inputs)) # Split into blocks -> (batch_size, num_blocks, block_len, n_heads, dim_per_head) query_states =", "not None: input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) elif inputs_embeds is not", "add attentions if we output them return outputs class LongT5Block(nn.Module): def __init__(self, config,", "(cross-attention weights) if use_cache is False: layer_outputs = layer_outputs[:1] + (None,) + layer_outputs[1:]", "for i in range(config.num_layers)] ) self.final_layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) #", "f\"There should be {expected_num_past_key_values} past states. \" f\"{'2 (past / key) for cross", "is a tuple with: # hidden-states, key-value-states, (self-attention position bias), (self-attention weights), (cross-attention", "raise ValueError( \"For encoder attention mechanism, either `local` or `transient-global` attention type is", "== len(layer_past_states) reordered_decoder_past = reordered_decoder_past + (reordered_layer_past_states,) return reordered_decoder_past @add_start_docstrings( \"The bare LONGT5", "+ local_attention_mask.transpose(1, 2) position_bias = position_bias.type(scores.dtype) # Calculate global/side bias - shape: #", "= None, inputs_embeds: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] =", "nn.Linear(config.d_ff, config.d_model, bias=False) self.dropout = nn.Dropout(config.dropout_rate) self.act = ACT2FN[config.dense_act_fn] def forward(self, hidden_states): hidden_states", "\"\"\"Initialize the weights\"\"\" factor = self.config.initializer_factor # Used for testing weights initialization if", "is a model with relative position embeddings so you should be able to", "= attention_side_bias + side_bias return attention_side_bias def forward( self, hidden_states, mask=None, position_bias=None, layer_head_mask=None,", "integer max_distance: an integer Returns: a Tensor with the same shape as relative_position,", "(present_key_value_state,) + (position_bias,) if output_attentions: outputs = outputs + (attn_weights,) return outputs #", "isinstance(module, (LongT5Model, LongT5ForConditionalGeneration, LongT5EncoderModel)): # Mesh TensorFlow embeddings initialization # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L1624 module.shared.weight.data.normal_(mean=0.0,", "attention_output = self.TransientGlobalSelfAttention( normed_hidden_states, mask=attention_mask, position_bias=position_bias, layer_head_mask=layer_head_mask, output_attentions=output_attentions, ) hidden_states = hidden_states +", "LONGT5 Model transformer outputting raw hidden-states without any specific head on top.\", LONGT5_START_DOCSTRING,", "normed_hidden_states, mask=attention_mask, position_bias=position_bias, layer_head_mask=layer_head_mask, past_key_value=past_key_value, use_cache=use_cache, output_attentions=output_attentions, ) hidden_states = hidden_states + self.dropout(attention_output[0])", "* layer_head_mask attn_weights = attn_weights.type(value_states.dtype) attn_output = unshape(torch.einsum(\"...hqk,...khd->...qhd\", attn_weights, value_states)) attn_output = attn_output[:,", "num_buckets=self.relative_attention_num_buckets, max_distance=self.relative_attention_max_distance, ) values = self.relative_attention_bias(relative_position_bucket) # shape (query_length, key_length, num_heads) values =", "LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) def forward( self, hidden_states, attention_mask=None, position_bias=None, layer_head_mask=None, output_attentions=False,", "False: layer_outputs = layer_outputs[:1] + (None,) + layer_outputs[1:] hidden_states, present_key_value_state = layer_outputs[:2] #", "block_len, dim=1) # [batch_size, num_block, 3 * block_len] _3blocked_attention_mask = _concatenate_3_blocks(_blocked_attention_mask, block_dim=1, sequence_dim=2)", "not None else self.config.use_cache return_dict = return_dict if return_dict is not None else", "0, dtype=global_block_ids.dtype, device=global_block_ids.device ) global_segment_ids = torch.cumsum(torch.ones(batch_size, num_globals), dim=-1) - 1 global_segment_ids =", "if hasattr(module.wo, \"bias\") and module.wo.bias is not None: module.wo.bias.data.zero_() elif isinstance(module, LongT5DenseGatedActDense): module.wi_0.weight.data.normal_(mean=0.0,", "> 0, 0.0, -1e10) # (batch_size, seq_len, global_seq_len) side_relative_position = _make_side_relative_position_ids(mask, self.global_block_size) side_relative_position_bucket", "we output them return outputs class LongT5Block(nn.Module): def __init__(self, config, has_relative_attention_bias=False): super().__init__() self.is_decoder", "be obtained using [`T5Tokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder", "Copied from transformers.models.t5.modeling_t5.T5PreTrainedModel.dummy_inputs def dummy_inputs(self): input_ids = torch.tensor(DUMMY_INPUTS) input_mask = torch.tensor(DUMMY_MASK) dummy_inputs =", "= sum(pad[::-1], ()) # [batch_size, num_blocks, block_len] -> [batch_size, num_blocks + 2, block_len]", "encoder_config.is_encoder_decoder = False self.encoder = LongT5Stack(encoder_config, self.shared) # Initialize weights and apply final", "= _split_into_blocks(side_position_bias, self.block_len, dim=-2).transpose(1, 2) side_position_bias = side_position_bias.type(scores.dtype).to(scores.device) # (batch_size, num_blocks, num_heads, block_len,", "split into two arguments `head_mask` and `decoder_head_mask`. Currently, `decoder_head_mask` is set to copy", "for testing weights initialization if isinstance(module, LongT5LayerNorm): module.weight.data.fill_(factor * 1.0) elif isinstance(module, (LongT5Model,", "Examples: ```python >>> from transformers import AutoTokenizer, LongT5ForConditionalGeneration >>> tokenizer = AutoTokenizer.from_pretrained(\"Stancld/longt5-tglobal-large-16384-pubmed-3k_steps\") >>>", "**masked**. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing", "using [`T5Tokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids)", "else None position_bias = None encoder_decoder_position_bias = None hidden_states = self.dropout(inputs_embeds) for i,", "@replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None,", "output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if", "setting `use_cache=True` to speed up decoding\") return past reordered_decoder_past = () for layer_past_states", "num_buckets relative_position = torch.abs(relative_position) else: relative_position = -torch.min(relative_position, torch.zeros_like(relative_position)) # now relative_position is", "the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the", "dtype=torch.long, device=device)[None, :] relative_position = memory_position - context_position # shape (query_length, key_length) relative_position_bucket", "no need to reorder if past is None: logger.warning(\"You might want to consider", "relative_buckets += (relative_position > 0).to(torch.long) * num_buckets relative_position = torch.abs(relative_position) else: relative_position =", "None, cross_attn_head_mask: Optional[torch.Tensor] = None, encoder_outputs: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,", "to reorder if past is None: logger.warning(\"You might want to consider setting `use_cache=True`", "is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not", "in labels by `pad_token_id` shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id) assert torch.all(shifted_input_ids >= 0).item(), \"Verify", "None: # cross-attn # (batch_size, n_heads, seq_length, dim_per_head) hidden_states = shape(proj_layer(key_value_states)) if past_key_value", "self.n_heads, self.block_len, 3 * self.block_len), device=scores.device, dtype=scores.dtype, ) if self.gradient_checkpointing and self.training: position_bias.requires_grad", "encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: encoder_attention_mask = torch.ones(encoder_hidden_shape, device=inputs_embeds.device) encoder_extended_attention_mask", "outputs = outputs + (present_key_value_state,) + attention_outputs else: outputs = outputs + attention_outputs", "= model(input_ids=input_ids) >>> last_hidden_states = outputs.last_hidden_state ```\"\"\" return_dict = return_dict if return_dict is", "else: local_attention_mask = None if position_bias is None: # position_bias shape: # (1,", "cross_attn_head_mask (`torch.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected", "= torch.cumsum(fixed_block_mask, axis=1) - fixed_block_mask mask = torch.where(attention_mask != 0.0, 1.0, -1000.0).type(attention_mask.dtype) global_block_ids", "make it broadcastable to all heads. # We use local attention in encoder", "_concatenate_3_blocks(key_states, block_dim=1, sequence_dim=2) value_states = _concatenate_3_blocks(value_states, block_dim=1, sequence_dim=2) # Compute scores scores =", "mask is None: mask = torch.ones(batch_size, seq_length) # (batch_size, num_heads, seq_len, global_seq_len) side_position_bias", "len(self.block) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]", "we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if self.is_decoder and", "attention type is expected, \" f\"but got {config.encoder_attention_type}.\" ) self.layer = nn.ModuleList() self.layer.append(attention_layer(config,", "i in range(3): # We use indexing approach here: # https://numpy.org/doc/stable/user/basics.indexing.html#dealing-with-variable-numbers-of-indices-within-programs indices =", "config.d_ff, bias=False) self.wo = nn.Linear(config.d_ff, config.d_model, bias=False) self.dropout = nn.Dropout(config.dropout_rate) self.act = ACT2FN[config.dense_act_fn]", "if we output them return outputs # Copied from transformers.models.t5.modeling_t5.T5LayerCrossAttention with T5->LongT5 class", "forward(self, hidden_states): hidden_states = self.wi(hidden_states) hidden_states = self.act(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states =", "shown that owning a dog is good for you\", return_tensors=\"pt\" ... ).input_ids #", "query position bias if past_key_value is not None: position_bias = position_bias[:, :, -hidden_states.size(1)", "block_len, 3 * block_len) attn_weights = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) # Mask heads if", "compute_bias(self, block_length: int): \"\"\"Compute binned relative position bias\"\"\" memory_position = torch.arange( 3 *", "output_hidden_states=output_hidden_states, return_dict=return_dict, ) if not return_dict: return decoder_outputs + encoder_outputs return Seq2SeqModelOutput( last_hidden_state=decoder_outputs.last_hidden_state,", "if past is None: logger.warning(\"You might want to consider setting `use_cache=True` to speed", "- block_ids[..., None] return side_relative_position.type(torch.int64) def _create_global_aggregates( hidden_states: torch.Tensor, block_ids: torch.Tensor, global_seq_len: int", "now, please set `decoder_head_mask = torch.ones(num_layers, num_heads)`. \"\"\" @add_start_docstrings( \"The bare LONGT5 Model", "at [LONGT5 Training](./longt5#training). decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate", "otherwise standard self & cross attentions are used if self.is_decoder: extended_attention_mask = self.get_extended_attention_mask(", "query_length = present_key_value_state[0].shape[2] else: query_length = None cross_attention_outputs = self.layer[1]( hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask,", "`decoder_input_ids` (those that don't have their past key value states given to this", "False encoder_config.use_cache = False encoder_config.is_encoder_decoder = False self.encoder = LongT5Stack(encoder_config, self.shared) decoder_config =", "= None, None self_attention_outputs = self.layer[0]( hidden_states, attention_mask=attention_mask, position_bias=position_bias, layer_head_mask=layer_head_mask, past_key_value=self_attn_past_key_value, use_cache=use_cache, output_attentions=output_attentions,", "PyTorch LongT5 model.\"\"\" import copy import math import warnings from typing import Any,", "than ``local_radius.\"\"\" relative_position_ids = _make_3block_relative_position_ids(block_len) locality_mask = torch.abs(relative_position_ids) < block_len locality_mask = locality_mask[None,", "self.pruned_heads = set() self.gradient_checkpointing = False # Relativen attention bias & Layer norm", "= shape(self.k(hidden_states)) value_states = shape(self.v(hidden_states)) # Get global/side key/value states shape: (batch_size, global_seq_len,", "under the Apache License, Version 2.0 (the \"License\"); # you may not use", "Construct a layernorm module in the LongT5 style. No bias and no subtraction", "copy `head_mask`, but this feature is deprecated and will be removed in future", "layer key value states if use_cache: present_key_value_states = present_key_value_states + (present_key_value_state,) if output_attentions:", "dim_per_head) reps = [1] * (side_key_states.ndim + 1) reps[1] = key_states.shape[1] side_key_states =", "broadcastable to [batch_size, num_heads, seq_length, seq_length] if self.is_decoder and encoder_hidden_states is not None:", "== self.config.num_decoder_layers: warnings.warn(__HEAD_MASK_WARNING_MSG, FutureWarning) decoder_head_mask = head_mask # Encode if needed (training, first", "want more control over how to convert `decoder_input_ids` indices into associated vectors than", "self.layer_norm(hidden_states) attention_output = self.SelfAttention( normed_hidden_states, mask=attention_mask, position_bias=position_bias, layer_head_mask=layer_head_mask, past_key_value=past_key_value, use_cache=use_cache, output_attentions=output_attentions, ) hidden_states", "actual query length is unknown for cross attention # if using past key", "torch.logical_and(block_ends, block_ids >= 0) full_blocks = true_block_ends.sum(-1).unsqueeze(-1).type(block_ids.dtype) - 1 block_ids = torch.where(block_ids <", "present_key_value_state[0].shape[2] else: query_length = None cross_attention_outputs = self.layer[1]( hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, position_bias=encoder_decoder_position_bias, layer_head_mask=cross_attn_layer_head_mask,", "self.k = prune_linear_layer(self.k, index) self.v = prune_linear_layer(self.v, index) self.o = prune_linear_layer(self.o, index, dim=1)", "the attentions tensors of all attention layers. See `attentions` under returned tensors for", "[] for i in range(3): # We use indexing approach here: # https://numpy.org/doc/stable/user/basics.indexing.html#dealing-with-variable-numbers-of-indices-within-programs", "class LongT5Attention(nn.Module): def __init__(self, config: LongT5Config, has_relative_attention_bias=False): super().__init__() self.is_decoder = config.is_decoder self.has_relative_attention_bias =", "( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict", "accept past_key_value and use_cache kwargs ): normed_hidden_states = self.layer_norm(hidden_states) attention_output = self.LocalSelfAttention( normed_hidden_states,", "a [LONGT5 Training](./longt5#training). attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid", "past_key_values[0][0].shape[2] + seq_length if past_key_values is not None else seq_length if use_cache is", "return outputs class LongT5LayerLocalSelfAttention(nn.Module): \"\"\"Local self attention used in encoder\"\"\" def __init__(self, config,", "# New shape: (batch_size, num_blocks, 3 * block_len + global_seq_len, n_heads, dim_per_head) key_states", "same time\" ) elif input_ids is not None: input_shape = input_ids.size() input_ids =", "\"For encoder attention mechanism, either `local` or `transient-global` attention type is expected, \"", "hidden-states without any specific head on top.\", LONGT5_START_DOCSTRING, ) class LongT5Model(LongT5PreTrainedModel): _keys_to_ignore_on_load_missing =", "masked**, - 0 indicates the head is **masked**. decoder_head_mask (`torch.FloatTensor` of shape `(num_heads,)`", "def dummy_inputs(self): input_ids = torch.tensor(DUMMY_INPUTS) input_mask = torch.tensor(DUMMY_MASK) dummy_inputs = { \"decoder_input_ids\": input_ids,", "past_key_value): \"\"\"projects hidden states correctly to key/query states\"\"\" if key_value_states is None: #", "decoder_start_token_id = self.config.decoder_start_token_id pad_token_id = self.config.pad_token_id assert decoder_start_token_id is not None, ( \"self.model.config.decoder_start_token_id", "= nn.Dropout(config.dropout_rate) def forward(self, hidden_states): forwarded_states = self.layer_norm(hidden_states) forwarded_states = self.DenseReluDense(forwarded_states) hidden_states =", "prune_linear_layer(self.q, index) self.k = prune_linear_layer(self.k, index) self.v = prune_linear_layer(self.v, index) self.o = prune_linear_layer(self.o,", "License, Version 2.0 (the \"License\"); # you may not use this file except", "states of all layers. See `hidden_states` under returned tensors for more detail. return_dict", "\"local\" and \"side\"/\"global\" key/value states to allow each token to attend global aggregated", "\"\"\" for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(LONGT5_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=Seq2SeqModelOutput, config_class=_CONFIG_FOR_DOC) def forward( self,", "encoder_outputs[0] if labels is not None and decoder_input_ids is None and decoder_inputs_embeds is", "or attention over source sentence (provided by key_value_states). \"\"\" # Input is (batch_size,", "LongT5LayerNorm(nn.Module): def __init__(self, hidden_size, eps=1e-6): \"\"\" Construct a layernorm module in the LongT5", "[batch_size, num_block, 3 * block_len] _3blocked_attention_mask = _concatenate_3_blocks(_blocked_attention_mask, block_dim=1, sequence_dim=2) _blocked_attention_mask = _blocked_attention_mask.unsqueeze(-1)", "outputs = model.generate(input_ids) >>> print(tokenizer.decode(outputs[0], skip_special_tokens=True)) abstractthe aim of this article is to", "# [batch_size, seq_len] global_block_ids = handle_orphan_tokens(global_block_ids) num_globals = seq_len // global_block_size # [batch_size,", "the head is **masked**. decoder_head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):", "= torch.where(block_ids < full_blocks, block_ids, full_blocks) return block_ids fixed_block_mask = torch.ones_like(attention_mask, device=attention_mask.device) /", "method to load the model weights. \"\"\" LONGT5_INPUTS_DOCSTRING = r\"\"\" Args: input_ids (`torch.LongTensor`", "hidden_states = hidden_gelu * hidden_linear hidden_states = self.dropout(hidden_states) hidden_states = self.wo(hidden_states) return hidden_states", "self.n_heads) self.global_input_layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) # Copied from transformers.models.t5.modeling_t5.T5Attention.prune_heads def prune_heads(self, heads): if", "dummy_inputs def _init_weights(self, module): \"\"\"Initialize the weights\"\"\" factor = self.config.initializer_factor # Used for", "embeddings, pruning heads etc.) This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use", "self, hidden_states, attention_mask=None, position_bias=None, layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False, ): normed_hidden_states = self.layer_norm(hidden_states) attention_output", "heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See", "and no need to reorder if past is None: logger.warning(\"You might want to", "block_len, 3 * block_len] return local_attention_mask.unsqueeze(1).to(device) def _make_global_fixed_block_ids( attention_mask: torch.Tensor, global_block_size: int )", "the right if is_torch_fx_proxy(input_ids): # Item assignment is not supported natively for proxies.", "are for exact increments in positions max_exact = num_buckets // 2 is_small =", "(None,) + layer_outputs[1:] hidden_states, present_key_value_state = layer_outputs[:2] # We share the position biases", "the attention blocks. Can be used to speed up decoding. If `past_key_values` are", "the vocabulary. Indices can be obtained using [`T5Tokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for", "See https://github.com/tensorflow/mesh/blob/master/mesh_tensorflow/transformer/transformer_layers.py#L56 # and https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L89 module.wi.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5)) if hasattr(module.wi,", "def _prune_heads(self, heads_to_prune): \"\"\" Prunes heads of the model. heads_to_prune: dict of {layer_num:", "class to handle weights initialization and a simple interface for downloading and loading", "def _make_3block_relative_position_ids(block_len: int) -> torch.Tensor: \"\"\"Makes 3-blocked relative position ids for local attention.\"\"\"", "positions are invalid. We use smaller buckets for small absolute relative_position and larger", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "torch.where(is_small, relative_position, relative_position_if_large) return relative_buckets def compute_bias(self, query_length, key_length, device=None): \"\"\"Compute binned relative", "Replace masked positions with -10_000 (according to the original implementation) local_attention_mask = torch.where(local_attention_mask", "self.layer_norm(hidden_states) forwarded_states = self.DenseReluDense(forwarded_states) hidden_states = hidden_states + self.dropout(forwarded_states) return hidden_states # Copied", "consists of (`last_hidden_state`, `optional`: *hidden_states*, `optional`: *attentions*) `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`", "= None if position_bias is None: # position_bias shape: # (1, 1, n_heads,", "max_distance relative_position_if_large = max_exact + ( torch.log(relative_position.float() / max_exact) / math.log(max_distance / max_exact)", "weights), (cross-attention position bias), (cross-attention weights) class LongT5PreTrainedModel(PreTrainedModel): \"\"\" An abstract class to", "warnings.warn(__HEAD_MASK_WARNING_MSG, FutureWarning) decoder_head_mask = head_mask # Encode if needed (training, first prediction pass)", "Mesh TensorFlow embeddings initialization # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L1624 module.shared.weight.data.normal_(mean=0.0, std=factor * 1.0) elif isinstance(module,", "torch.cumsum(fixed_block_mask, axis=1) - fixed_block_mask mask = torch.where(attention_mask != 0.0, 1.0, -1000.0).type(attention_mask.dtype) global_block_ids =", "@add_start_docstrings_to_model_forward(LONGT5_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=Seq2SeqModelOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] =", "a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer", ">>> # forward pass >>> outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids) >>> last_hidden_states = outputs.last_hidden_state", "included in output # speedy decoding is disabled and no need to reorder", "= _concatenate_3_blocks(key_states, block_dim=1, sequence_dim=2) value_states = _concatenate_3_blocks(value_states, block_dim=1, sequence_dim=2) # Tile side inputs", "self.wi_0 = nn.Linear(config.d_model, config.d_ff, bias=False) self.wi_1 = nn.Linear(config.d_model, config.d_ff, bias=False) self.wo = nn.Linear(config.d_ff,", "return_dict and not isinstance(encoder_outputs, BaseModelOutput): encoder_outputs = BaseModelOutput( last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) >", "query_states = _split_into_blocks(query_states, self.block_len, dim=1) key_states = _split_into_blocks(key_states, self.block_len, dim=1) value_states = _split_into_blocks(value_states,", "the model weights. \"\"\" LONGT5_INPUTS_DOCSTRING = r\"\"\" Args: input_ids (`torch.LongTensor` of shape `(batch_size,", "# Mask is (batch_size, key_length) (non-causal) or (batch_size, key_length, key_length) # past_key_value[0] is", "the `pad_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values` is used,", "return self.shared def set_input_embeddings(self, new_embeddings): self.shared = new_embeddings self.encoder.set_input_embeddings(new_embeddings) self.decoder.set_input_embeddings(new_embeddings) def get_encoder(self): return", "encoder_outputs is None: encoder_outputs = self.encoder( input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict,", "key/value states to allow each token to attend global aggregated ones # New", "= torch.ones( batch_size, encoder_seq_length, device=inputs_embeds.device, dtype=torch.long ) # initialize past_key_values with `None` if", "reordered_layer_past_states = reordered_layer_past_states + ( layer_past_state.index_select(0, beam_idx.to(layer_past_state.device)), ) assert reordered_layer_past_states[0].shape == layer_past_states[0].shape assert", "input embeddings, pruning heads etc.) This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.", "look at [LONGT5 Training](./longt5#training). decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior:", "global_inputs = _create_global_aggregates(hidden_states, block_ids, _global_seq_len) global_inputs = self.global_input_layer_norm(global_inputs) # get query states ->", "LongT5Block(nn.Module): def __init__(self, config, has_relative_attention_bias=False): super().__init__() self.is_decoder = config.is_decoder if config.is_decoder: attention_layer =", "* block_len) position_bias = position_bias + local_attention_mask.transpose(1, 2) position_bias = position_bias.type(scores.dtype) # Calculate", "assert self.is_decoder, f\"`use_cache` can only be set to `True` if {self} is used", "@add_start_docstrings(\"\"\"LONGT5 Model with a `language modeling` head on top.\"\"\", LONGT5_START_DOCSTRING) class LongT5ForConditionalGeneration(LongT5PreTrainedModel): _keys_to_ignore_on_load_missing", "is_torch_fx_proxy, logging, replace_return_docstrings, ) from .configuration_longt5 import LongT5Config logger = logging.get_logger(__name__) _CONFIG_FOR_DOC =", "else: _sequence_block_ids_max = torch.zeros( batch_size, 0, dtype=global_block_ids.dtype, device=global_block_ids.device ) global_segment_ids = torch.cumsum(torch.ones(batch_size, num_globals),", "= self.layer_norm(hidden_states) forwarded_states = self.DenseReluDense(forwarded_states) hidden_states = hidden_states + self.dropout(forwarded_states) return hidden_states #", "= r\"\"\" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence", "and <NAME>. It's an encoder-decoder transformer pre-trained in a text-to-text denoising generative setting.", "detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of", "+ (hidden_states,) if self.gradient_checkpointing and self.training: if use_cache: use_cache = False def create_custom_forward(module):", "past_key_values = [None] * len(self.block) # We can provide a self-attention mask of", "not return_dict: return tuple( v for v in [ hidden_states, present_key_value_states, all_hidden_states, all_attentions,", "the last layer of the encoder. Used in the cross-attention of the decoder.", "- head_mask, decoder_head_mask __HEAD_MASK_WARNING_MSG = \"\"\" The input argument `head_mask` was split into", "a bucket number for relative attention. The relative position is defined as memory_position", "correctly to key/query states\"\"\" if key_value_states is None: # self-attn # (batch_size, n_heads,", "and self.training: position_bias.requires_grad = True else: position_bias = self.compute_bias(self.block_len) if mask is not", "3D attention mask is provided for the cross-attention # we need to make", "local_attention_mask = _get_local_attention_mask(mask, self.block_len, hidden_states.device) # Replace masked positions with -10_000 (according to", "outputs + (present_key_value_state,) + attention_outputs else: outputs = outputs + attention_outputs return outputs", "position to the attended-to position. If bidirectional=False, then positive relative positions are invalid.", "`block_len` along the given `dim`. If the dimension length is not a multiple", "seq_length, key_length) attn_weights = nn.functional.dropout( attn_weights, p=self.dropout, training=self.training ) # (batch_size, n_heads, seq_length,", "= torch.ones(batch_size, seq_length) # (batch_size, num_heads, seq_len, global_seq_len) side_position_bias = self.compute_side_bias(mask, global_segment_ids) #", ") hidden_states = hidden_states + self.dropout(attention_output[0]) outputs = (hidden_states,) + attention_output[1:] # add", "model was proposed in [LongT5: Efficient Text-To-Text Transformer for Long Sequences](https://arxiv.org/abs/2112.07916) by <NAME>,", "shifting lm labels to the right decoder_input_ids = self._shift_right(labels) # Decode decoder_outputs =", "= return_dict if return_dict is not None else self.config.use_return_dict encoder_outputs = self.encoder( input_ids=input_ids,", "(according to the original implementation) local_attention_mask = torch.where(local_attention_mask > 0, 0.0, -1e10) else:", "100 * \"studies have shown that owning a dog is good for you", "hidden-states, key-value-states (self-attention position bias), (self-attention weights), # (cross-attention position bias), (cross-attention weights)", "= encoder_outputs[0] if labels is not None and decoder_input_ids is None and decoder_inputs_embeds", "# See https://github.com/tensorflow/mesh/blob/master/mesh_tensorflow/transformer/transformer_layers.py#L56 # and https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L89 module.wi.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5)) if", "present_key_value_state = present_key_value_state + cross_attention_outputs[1] # Keep cross-attention outputs and relative position weights", "encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, ) @add_start_docstrings(\"\"\"LONGT5 Model with a `language modeling` head on top.\"\"\", LONGT5_START_DOCSTRING)", "- head_mask, decoder_head_mask if head_mask is not None and decoder_head_mask is None: if", "(attention_mask - 1) # [batch_size, seq_len] global_block_ids = handle_orphan_tokens(global_block_ids) num_globals = seq_len //", "input_ids=None, attention_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, inputs_embeds=None, head_mask=None, cross_attn_head_mask=None, past_key_values=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, ):", "= True else: position_bias = self.compute_bias(self.block_len) if mask is not None: # Replace", "accumulation for # half-precision inputs is done in fp32 variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True)", "# We use local attention in encoder self-attention, otherwise standard self & cross", "self.block_len, 3 * self.block_len), device=scores.device, dtype=scores.dtype ) if self.gradient_checkpointing and self.training: position_bias.requires_grad =", "- shape: # (batch_size, num_heads, seq_len, global_seq_len) if mask is None: mask =", "last layer of the encoder. Used in the cross-attention of the decoder. past_key_values", "if len(encoder_outputs) > 2 else None, ) hidden_states = encoder_outputs[0] # Decode decoder_outputs", "is not included in output # speedy decoding is disabled and no need", "under the License. \"\"\" PyTorch LongT5 model.\"\"\" import copy import math import warnings", "= max_exact + ( torch.log(relative_position.float() / max_exact) / math.log(max_distance / max_exact) * (num_buckets", "# limitations under the License. \"\"\" PyTorch LongT5 model.\"\"\" import copy import math", "position bias), (cross-attention weights) position_bias = layer_outputs[2] if self.is_decoder and encoder_hidden_states is not", "Inc. team. # # Licensed under the Apache License, Version 2.0 (the \"License\");", "`(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the cross-attention", "LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) # Copied from transformers.models.t5.modeling_t5.T5Attention.prune_heads def prune_heads(self, heads): if len(heads) == 0:", "input_ids[..., :-1].clone() shifted_input_ids[..., 0] = decoder_start_token_id assert pad_token_id is not None, \"self.model.config.pad_token_id has", "def get_encoder(self): return self.encoder def get_decoder(self): return self.decoder def _prune_heads(self, heads_to_prune): \"\"\" Prunes", "for cross attention. ' if expected_num_past_key_values == 4 else ''}\" f\"Got {len(past_key_value)} past", "for keys and values -> (batch_size, num_blocks, 3 * block_len, n_heads, dim_per_head) key_states", "max_exact = num_buckets // 2 is_small = relative_position < max_exact # The other", "r\"lm_head.weight\", ] _keys_to_ignore_on_load_unexpected = [ r\"decoder.block.0.layer.1.EncDecAttention.relative_attention_bias.weight\", ] def __init__(self, config: LongT5Config): super().__init__(config) self.model_dim", "blocks_list: List[torch.Tensor] = [] for i in range(3): # We use indexing approach", "are already calculated # we want only the last query position bias if", "be defined.\" # replace possible -100 values in labels by `pad_token_id` shifted_input_ids.masked_fill_(shifted_input_ids ==", "not None: # Replace masked positions with -1e10 (according to the original implementation)", "(batch_size, n_heads, key_length, dim_per_head) hidden_states = torch.cat([past_key_value, hidden_states], dim=2) else: # cross-attn hidden_states", "Concatenate 3 blocks for keys and values -> (batch_size, num_blocks, 3 * block_len,", "// self.global_block_size # shapes: (batch_size, seq_len) & (batch_size, global_seq_len) block_ids, global_segment_ids = _make_global_fixed_block_ids(", "None, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None,", "calculated # we want only the last query position bias if past_key_value is", "self.decoder @add_start_docstrings_to_model_forward(LONGT5_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor]", "of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates", "* len(self.block) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length,", "1 for tokens that are **not masked**, - 0 for tokens that are", "position bias), (self-attention weights), (cross-attention position bias), (cross-attention weights) if use_cache is False:", "downloading and loading pretrained models. \"\"\" config_class = LongT5Config base_model_prefix = \"transformer\" supports_gradient_checkpointing", "input IDs?](../glossary#decoder-input-ids) LONGT5 uses the `pad_token_id` as the starting token for `decoder_input_ids` generation.", "relative positions are invalid. We use smaller buckets for small absolute relative_position and", "max_distance=128): \"\"\" Adapted from Mesh Tensorflow: https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593 Translate relative position to a bucket", "* block_len, dtype=torch.int32) center_position_ids = position_ids[block_len:-block_len] # [block_len, 3 * block_len] relative_position_ids =", "torch.where( global_block_ids > _global_block_ids_lower_bound, global_block_ids, _global_block_ids_lower_bound ) # set padding tokens to -1", "side_position_bias = self.compute_side_bias(mask, global_segment_ids) # (batch_size, num_blocks, num_heads, block_len, global_seq_len) side_position_bias = _split_into_blocks(side_position_bias,", "0 indicates the head is **masked**. cross_attn_head_mask (`torch.Tensor` of shape `(num_heads,)` or `(num_layers,", "= inputs_embeds.size()[:-1] else: err_msg_prefix = \"decoder_\" if self.is_decoder else \"\" raise ValueError(f\"You have", "3 * block_length) values = values.permute([2, 0, 1]).unsqueeze(0).unsqueeze(0) return values def forward( self,", ") # (batch_size, n_heads, seq_length, key_length) # Mask heads if we want to", "Whether or not to return the hidden states of all layers. See `hidden_states`", "None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None:", "self, input_ids, past=None, attention_mask=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, use_cache=None, encoder_outputs=None, **kwargs ): # cut", "encoder_extended_attention_mask, encoder_decoder_position_bias, layer_head_mask, cross_attn_layer_head_mask, None, # past_key_value is always None with gradient checkpointing", "layer_outputs[:1] + (None,) + layer_outputs[1:] hidden_states, present_key_value_state = layer_outputs[:2] # We share the", "with a config file does not load the weights associated with the model,", "def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, decoder_input_ids: Optional[torch.LongTensor]", "num_block, n_heads, block_len, 3 * block_len) if position_bias is None: # position_bias shape:", "\"The bare LONGT5 Model transformer outputting encoder's raw hidden-states without any specific head", "batch_size, seq_length = hidden_states.shape[:2] def shape(states): \"\"\"projection\"\"\" return states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim) def", "3 * block_length) values = values.permute([2, 0, 1]).unsqueeze(0).unsqueeze(0) return values def compute_side_bias(self, mask:", "self.config.num_layers) cross_attn_head_mask = self.get_head_mask(cross_attn_head_mask, self.config.num_layers) present_key_value_states = () if use_cache else None all_hidden_states", "local_attention_mask.transpose(1, 2) position_bias = position_bias.type(scores.dtype) # Calculate global/side bias - shape: # (batch_size,", "query_length=None, output_attentions=False, ): normed_hidden_states = self.layer_norm(hidden_states) attention_output = self.EncDecAttention( normed_hidden_states, mask=attention_mask, key_value_states=key_value_states, position_bias=position_bias,", "\"\"\" for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(LONGT5_ENCODER_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BaseModelOutput, config_class=_CONFIG_FOR_DOC) def forward( self,", "the four key / value states reordered_layer_past_states = reordered_layer_past_states + ( layer_past_state.index_select(0, beam_idx.to(layer_past_state.device)),", "LongT5 Authors and HuggingFace Inc. team. # # Licensed under the Apache License,", "global_seq_len) attn_weights = nn.functional.softmax(scores.float(), dim=-1).type_as(scores) attn_weights = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) # Mask heads", "block_len, n_heads, dim_per_head) key_states = _concatenate_3_blocks(key_states, block_dim=1, sequence_dim=2) value_states = _concatenate_3_blocks(value_states, block_dim=1, sequence_dim=2)", "= nn.Linear(config.d_model, config.d_ff, bias=False) self.wi_1 = nn.Linear(config.d_model, config.d_ff, bias=False) self.wo = nn.Linear(config.d_ff, config.d_model,", "mask is not None: # We need to adjust position bias shape to", "[`T5Tokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for detail. [What are input IDs?](../glossary#input-ids) To know", "x.ndim indices[block_dim] = slice(i, i + num_blocks) indices = tuple(indices) blocks_list.append(x[indices]) # [batch_size,", "= False encoder_config.is_encoder_decoder = False self.encoder = LongT5Stack(encoder_config, self.shared) decoder_config = copy.deepcopy(config) decoder_config.is_decoder", "True decoder_config.is_encoder_decoder = False decoder_config.num_layers = config.num_decoder_layers self.decoder = LongT5Stack(decoder_config, self.shared) # Initialize", "bias=False) self.v = nn.Linear(self.d_model, self.inner_dim, bias=False) self.o = nn.Linear(self.inner_dim, self.d_model, bias=False) if self.has_relative_attention_bias:", "= present_key_value_states + (present_key_value_state,) if output_attentions: all_attentions = all_attentions + (layer_outputs[3],) if self.is_decoder:", "if input_ids is not None and inputs_embeds is not None: err_msg_prefix = \"decoder_\"", "for a decoder, orphan tokens, i.e. those tokens which do not make for", "return hidden_states # Copied from transformers.models.t5.modeling_t5.T5LayerFF with T5->LongT5 class LongT5LayerFF(nn.Module): def __init__(self, config:", "module.o.weight.data.normal_(mean=0.0, std=factor * ((n_heads * key_value_proj_dim) ** -0.5)) if module.has_relative_attention_bias: module.relative_attention_bias.weight.data.normal_(mean=0.0, std=factor *", "import ( DUMMY_INPUTS, DUMMY_MASK, add_start_docstrings, add_start_docstrings_to_model_forward, is_torch_fx_proxy, logging, replace_return_docstrings, ) from .configuration_longt5 import", "(1, num_heads, query_length, key_length) return values def forward( self, hidden_states, mask=None, key_value_states=None, position_bias=None,", "article is to summarize the studies have shown that owning a dog ```\"\"\"", "attention_mask, \"head_mask\": head_mask, \"decoder_head_mask\": decoder_head_mask, \"cross_attn_head_mask\": cross_attn_head_mask, \"use_cache\": use_cache, } def prepare_decoder_input_ids_from_labels(self, labels:", "past, \"encoder_outputs\": encoder_outputs, \"attention_mask\": attention_mask, \"head_mask\": head_mask, \"decoder_head_mask\": decoder_head_mask, \"cross_attn_head_mask\": cross_attn_head_mask, \"use_cache\": use_cache,", "self.layer[1]( hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, position_bias=encoder_decoder_position_bias, layer_head_mask=cross_attn_layer_head_mask, past_key_value=cross_attn_past_key_value, query_length=query_length, use_cache=use_cache, output_attentions=output_attentions, ) hidden_states =", "with the License. # You may obtain a copy of the License at", "std=factor * ((n_heads * key_value_proj_dim) ** -0.5)) if module.has_relative_attention_bias: module.relative_attention_bias.weight.data.normal_(mean=0.0, std=factor * ((d_model)", "forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] =", "n_heads, seq_length, dim_per_head) # get key/value states key_states = project( hidden_states, self.k, key_value_states,", "= LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) def forward( self, hidden_states, attention_mask=None, position_bias=None, layer_head_mask=None,", "attention used in encoder\"\"\" def __init__(self, config, has_relative_attention_bias=False): super().__init__() self.TransientGlobalSelfAttention = LongT5TransientGlobalAttention( config,", "is not None: encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size() encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) if", "seq_len, global_seq_len) if mask is None: mask = torch.ones(batch_size, seq_length) # (batch_size, num_heads,", "in `[0, 1]`: - 1 for tokens that are **not masked**, - 0", "head is **not masked**, - 0 indicates the head is **masked**. cross_attn_head_mask (`torch.Tensor`", "device=None): \"\"\"Compute binned relative position bias\"\"\" if device is None: device = self.relative_attention_bias.weight.device", "if 0 in output_shape: return torch.empty(output_shape, dtype=x.dtype, device=x.device) return x.reshape(output_shape) def _concatenate_3_blocks(x: torch.Tensor,", "is not None: encoder_decoder_position_bias = layer_outputs[4 if output_attentions else 3] # append next", "with T5->LongT5 class LongT5Attention(nn.Module): def __init__(self, config: LongT5Config, has_relative_attention_bias=False): super().__init__() self.is_decoder = config.is_decoder", "def __init__(self, config: LongT5Config): super().__init__() self.wi = nn.Linear(config.d_model, config.d_ff, bias=False) self.wo = nn.Linear(config.d_ff,", "is **masked**. cross_attn_head_mask (`torch.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to", "law or agreed to in writing, software # distributed under the License is", "pass an embedded representation. If `past_key_values` is used, optionally only the last `decoder_inputs_embeds`", "self.DenseReluDense(forwarded_states) hidden_states = hidden_states + self.dropout(forwarded_states) return hidden_states # Copied from transformers.models.t5.modeling_t5.T5Attention with", "* (self.model_dim**-0.5) lm_logits = self.lm_head(sequence_output) loss = None if labels is not None:", "seq_length = hidden_states.shape[:2] real_seq_length = seq_length if past_key_value is not None: assert (", "`input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions", "int32 Tensor bidirectional: a boolean - whether the attention is bidirectional num_buckets: an", "* (num_buckets - max_exact) ).to(torch.long) relative_position_if_large = torch.min( relative_position_if_large, torch.full_like(relative_position_if_large, num_buckets - 1)", "= input_ids.new_zeros(input_ids.shape) shifted_input_ids[..., 1:] = input_ids[..., :-1].clone() shifted_input_ids[..., 0] = decoder_start_token_id assert pad_token_id", "[`~utils.ModelOutput`] instead of a plain tuple. \"\"\" LONGT5_ENCODER_INPUTS_DOCSTRING = r\"\"\" Args: input_ids (`torch.LongTensor`", "of {layer_num: list of heads to prune in this layer} See base class", "of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads,", "def set_input_embeddings(self, new_embeddings): self.shared = new_embeddings self.encoder.set_input_embeddings(new_embeddings) def get_encoder(self): return self.encoder def _prune_heads(self,", "input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1]", "self.relative_attention_bias = nn.Embedding(self.relative_attention_num_buckets, self.n_heads) self.pruned_heads = set() self.gradient_checkpointing = False # Copied from", "if needed (training, first prediction pass) if encoder_outputs is None: # Convert encoder", "class LongT5Stack(LongT5PreTrainedModel): def __init__(self, config, embed_tokens=None): super().__init__(config) self.embed_tokens = embed_tokens self.is_decoder = config.is_decoder", "to_seq_length] # ourselves in which case we just need to make it broadcastable", "_shift_right(self, input_ids): decoder_start_token_id = self.config.decoder_start_token_id pad_token_id = self.config.pad_token_id assert decoder_start_token_id is not None,", "torch.zeros( (1, 1, self.n_heads, self.block_len, 3 * self.block_len), device=scores.device, dtype=scores.dtype, ) if self.gradient_checkpointing", "layer_head_mask=cross_attn_layer_head_mask, past_key_value=cross_attn_past_key_value, query_length=query_length, use_cache=use_cache, output_attentions=output_attentions, ) hidden_states = cross_attention_outputs[0] # Combine self attn", "encoder_hidden_states, encoder_extended_attention_mask, encoder_decoder_position_bias, layer_head_mask, cross_attn_layer_head_mask, None, # past_key_value is always None with gradient", "before the merge LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST = [ \"google/long-t5-local-base\", \"google/long-t5-local-large\", \"google/long-t5-tglobal-base\", \"google/long-t5-tglobal-large\", ] def _pad_to_multiple(x:", "output them return outputs class LongT5Block(nn.Module): def __init__(self, config, has_relative_attention_bias=False): super().__init__() self.is_decoder =", "Mask to nullify selected heads of the self-attention modules in the encoder. Mask", "None else seq_length if use_cache is True: assert self.is_decoder, f\"`use_cache` can only be", "# compute scores scores = torch.matmul( query_states, key_states.transpose(3, 2) ) # equivalent of", "relative_buckets += torch.where(is_small, relative_position, relative_position_if_large) return relative_buckets def compute_bias(self, query_length, key_length, device=None): \"\"\"Compute", "if return_dict is not None else self.config.use_return_dict if input_ids is not None and", "Labels for computing the sequence classification/regression loss. Indices should be in `[-100, 0,", "*optional*): Mask to nullify selected heads of the self-attention modules in the decoder.", "global_block_ids.type(torch.int), global_segment_ids.type(torch.int) def _make_side_relative_position_ids(attention_mask: torch.Tensor, global_block_size: int) -> torch.Tensor: \"\"\"Create the relative position", "labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states:", "global_segment_ids.shape[-1] global_positions = torch.arange(global_seq_len, device=block_ids.device) side_relative_position = global_positions - block_ids[..., None] return side_relative_position.type(torch.int64)", "refer to the PyTorch documentation for all matter related to general usage and", "num_heads) side_bias = self.global_relative_attention_bias(side_relative_position_bucket) # (batch_size, num_heads, seq_len, global_seq_len) side_bias = side_bias.permute([0, 3,", "position_bias=None, layer_head_mask=None, past_key_value=None, use_cache=False, query_length=None, output_attentions=False, ): normed_hidden_states = self.layer_norm(hidden_states) attention_output = self.EncDecAttention(", "defined. In LongT5 it is usually set to the\" \" pad_token_id. See LongT5", "in the range [0, num_buckets) \"\"\" relative_buckets = 0 if bidirectional: num_buckets //=", "if past_key_value is not None: position_bias = position_bias[:, :, -hidden_states.size(1) :, :] if", "keepdim=True) hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) # convert into half-precision if", "num_buckets=32, max_distance=128): \"\"\" Adapted from Mesh Tensorflow: https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593 Translate relative position to a", "# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if self.is_decoder", "from torch.nn import CrossEntropyLoss from torch.utils.checkpoint import checkpoint from ...activations import ACT2FN from", ") # Create global inputs _global_seq_len = global_segment_ids.shape[-1] global_inputs = _create_global_aggregates(hidden_states, block_ids, _global_seq_len)", "dtype=scores.dtype, ) if self.gradient_checkpointing and self.training: position_bias.requires_grad = True else: position_bias = self.compute_bias(self.block_len)", "use it instead of LongT5LayerNorm\") except ImportError: # using the normal LongT5LayerNorm pass", "masks?](../glossary#attention-mask) head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify", "= None, attention_mask: Optional[torch.FloatTensor] = None, decoder_input_ids: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.BoolTensor] =", "All relative positions >=max_distance map to the same bucket. All relative positions <=-max_distance", "- 1 for tokens that are **not masked**, - 0 for tokens that", "batch_size, seq_len = attention_mask.shape[:2] def handle_orphan_tokens(block_ids: torch.Tensor) -> torch.Tensor: block_ends = (torch.arange(seq_len) %", ") # compute scores scores = torch.matmul( query_states, key_states.transpose(3, 2) ) # equivalent", "* self.n_heads self.pruned_heads = self.pruned_heads.union(heads) @staticmethod def _relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128): \"\"\" Adapted", "them return outputs class LongT5Block(nn.Module): def __init__(self, config, has_relative_attention_bias=False): super().__init__() self.is_decoder = config.is_decoder", "None, \"You have to initialize the model with valid token embeddings\" inputs_embeds =", "False # Copied from transformers.models.t5.modeling_t5.T5Stack.get_input_embeddings def get_input_embeddings(self): return self.embed_tokens # Copied from transformers.models.t5.modeling_t5.T5Stack.set_input_embeddings", "= self.dropout(hidden_states) hidden_states = self.wo(hidden_states) return hidden_states # Copied from transformers.models.t5.modeling_t5.T5LayerFF with T5->LongT5", "_reorder_cache(self, past, beam_idx): # if decoder past is not included in output #", "with T5->LongT5 class LongT5LayerSelfAttention(nn.Module): def __init__(self, config, has_relative_attention_bias=False): super().__init__() self.SelfAttention = LongT5Attention(config, has_relative_attention_bias=has_relative_attention_bias)", "num_buckets // 2 is_small = relative_position < max_exact # The other half of", "hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.dropout(hidden_states) # Add last layer if output_hidden_states: all_hidden_states", "num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules in the", "side_bias = self.global_relative_attention_bias(side_relative_position_bucket) # (batch_size, num_heads, seq_len, global_seq_len) side_bias = side_bias.permute([0, 3, 1,", "https://numpy.org/doc/stable/user/basics.indexing.html#dealing-with-variable-numbers-of-indices-within-programs indices = [slice(0, None)] * x.ndim indices[block_dim] = slice(i, i + num_blocks)", "forward pass >>> outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids) >>> last_hidden_states = outputs.last_hidden_state ```\"\"\" use_cache", "# [batch_size, num_block, block_len, 3 * block_len] local_attention_mask = torch.logical_and(_blocked_attention_mask, _3blocked_attention_mask) local_attention_mask =", "/ max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact) ).to(torch.long) relative_position_if_large =", "PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to", "Tensorflow: https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593 Translate relative position to a bucket number for relative attention. The", "return torch.cat(blocks_list, dim=sequence_dim) def _make_3block_relative_position_ids(block_len: int) -> torch.Tensor: \"\"\"Makes 3-blocked relative position ids", "= \"T5Tokenizer\" _CHECKPOINT_FOR_DOC = \"google/long-t5-local-base\" # TODO: Update before the merge LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST =", "* ((d_model) ** -0.5)) if isinstance(module, LongT5TransientGlobalAttention): module.global_relative_attention_bias.weight.data.normal_( mean=0.0, std=factor * ((d_model) **", "if you want more control over how to convert `decoder_input_ids` indices into associated", "# noqa logger.info(\"Discovered apex.normalization.FusedRMSNorm - will use it instead of LongT5LayerNorm\") except ImportError:", "the cross-attention of the decoder. past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple", "position_bias = position_bias.type(scores.dtype) # Calculate global/side bias - shape: # (batch_size, num_heads, seq_len,", "= self.get_extended_attention_mask( attention_mask, input_shape, inputs_embeds.device ) elif self.config.encoder_attention_type == \"local\": extended_attention_mask = _get_local_attention_mask(attention_mask,", "(1, 1, self.n_heads, self.block_len, 3 * self.block_len), device=scores.device, dtype=scores.dtype ) if self.gradient_checkpointing and", "disabled and no need to reorder if past is None: logger.warning(\"You might want", "if output_attentions: outputs = outputs + (attn_weights,) return outputs class LongT5LocalAttention(nn.Module): def __init__(self,", "= use_cache if use_cache is not None else self.config.use_cache output_attentions = output_attentions if", "input IDs?](../glossary#input-ids) To know more on how to prepare `input_ids` for pretraining take", "shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. LongT5 is", ">>> print(tokenizer.decode(outputs[0], skip_special_tokens=True)) abstractthe aim of this article is to summarize the studies", "# Replace masked positions with -10_000 (according to the original implementation) local_attention_mask =", "relative_position_bucket = self._relative_position_bucket( relative_position, # (block_length, 3 * block_length) bidirectional=(not self.is_decoder), num_buckets=self.relative_attention_num_buckets, max_distance=self.relative_attention_max_distance,", "[0, num_buckets) \"\"\" relative_buckets = 0 if bidirectional: num_buckets //= 2 relative_buckets +=", "find_pruneable_heads_and_indices, prune_linear_layer from ...utils import ( DUMMY_INPUTS, DUMMY_MASK, add_start_docstrings, add_start_docstrings_to_model_forward, is_torch_fx_proxy, logging, replace_return_docstrings,", "-> Union[Tuple[torch.FloatTensor], Seq2SeqLMOutput]: r\"\"\" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing", "num_heads, seq_len, global_seq_len) side_bias = side_bias.permute([0, 3, 1, 2]) # (batch_size, num_heads, seq_len,", "cross_attn_past_key_value = past_key_value[2:] else: self_attn_past_key_value, cross_attn_past_key_value = None, None self_attention_outputs = self.layer[0]( hidden_states,", "class LongT5DenseGatedActDense(nn.Module): def __init__(self, config: LongT5Config): super().__init__() self.wi_0 = nn.Linear(config.d_model, config.d_ff, bias=False) self.wi_1", "be used by default. head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):", "this file except in compliance with the License. # You may obtain a", "hidden_states, attention_mask=extended_attention_mask, position_bias=position_bias, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, encoder_decoder_position_bias=encoder_decoder_position_bias, layer_head_mask=layer_head_mask, cross_attn_layer_head_mask=cross_attn_layer_head_mask, past_key_value=past_key_value, use_cache=use_cache, output_attentions=output_attentions, ) #", "an input tensor into blocks of a given `block_len` along the given `dim`.", "= config.global_block_size self.dropout = config.dropout_rate self.inner_dim = self.n_heads * self.key_value_proj_dim # Mesh TensorFlow", "copy.deepcopy(config) decoder_config.is_decoder = True decoder_config.is_encoder_decoder = False decoder_config.num_layers = config.num_decoder_layers self.decoder = LongT5Stack(decoder_config,", "not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not", "and module.wo.bias is not None: module.wo.bias.data.zero_() elif isinstance(module, (LongT5Attention, LongT5LocalAttention, LongT5TransientGlobalAttention)): # Mesh", "mask = torch.where(attention_mask != 0.0, 1.0, -1000.0).type(attention_mask.dtype) global_block_ids = torch.floor(mask + fixed_block_mask -", "LongT5TransientGlobalAttention( config, has_relative_attention_bias=has_relative_attention_bias ) self.layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) def forward(", "cross_attention_outputs[1] # Keep cross-attention outputs and relative position weights attention_outputs = attention_outputs +", "full_blocks = true_block_ends.sum(-1).unsqueeze(-1).type(block_ids.dtype) - 1 block_ids = torch.where(block_ids < full_blocks, block_ids, full_blocks) return", "query_position, i.e. the distance in tokens from the attending position to the attended-to", "Optional[torch.BoolTensor] = None, head_mask: Optional[torch.FloatTensor] = None, decoder_head_mask: Optional[torch.FloatTensor] = None, cross_attn_head_mask: Optional[torch.Tensor]", "this feature is deprecated and will be removed in future versions. If you", "reordered_decoder_past = () for layer_past_states in past: # get the correct batch idx", "(torch.arange(seq_len) % global_block_size) == global_block_size - 1 block_ends = block_ends.to(block_ids.device) true_block_ends = torch.logical_and(block_ends,", "input_ids, \"input_ids\": input_ids, \"decoder_attention_mask\": input_mask, } return dummy_inputs def _init_weights(self, module): \"\"\"Initialize the", "LongT5Stack(LongT5PreTrainedModel): def __init__(self, config, embed_tokens=None): super().__init__(config) self.embed_tokens = embed_tokens self.is_decoder = config.is_decoder self.local_radius", "local attention.\"\"\" # [batch_size, num_blocks, block_len] _blocked_attention_mask = _split_into_blocks(attention_mask, block_len, dim=1) # [batch_size,", "indices into associated vectors than the model's internal embedding lookup matrix. If `decoder_input_ids`", "= torch.ones(batch_size, mask_seq_length).to(inputs_embeds.device) if self.is_decoder and encoder_attention_mask is None and encoder_hidden_states is not", "local_attention_mask = _mask_local_attention_mask(local_attention_mask, block_len) # [batch_size, 1, num_block, block_len, 3 * block_len] return", "* ((n_heads * key_value_proj_dim) ** -0.5)) if module.has_relative_attention_bias: module.relative_attention_bias.weight.data.normal_(mean=0.0, std=factor * ((d_model) **", "- 1 block_ends = block_ends.to(block_ids.device) true_block_ends = torch.logical_and(block_ends, block_ids >= 0) full_blocks =", "mask_seq_length).to(inputs_embeds.device) if self.is_decoder and encoder_attention_mask is None and encoder_hidden_states is not None: encoder_seq_length", "present_key_value_state = self_attention_outputs[:2] attention_outputs = self_attention_outputs[2:] # Keep self-attention outputs and relative position", "in tokens from the attending position to the attended-to position. If bidirectional=False, then", "seq_len, global_seq_len) attention_side_bias = attention_side_bias + side_bias return attention_side_bias def forward( self, hidden_states,", "mask=attention_mask, position_bias=position_bias, layer_head_mask=layer_head_mask, output_attentions=output_attentions, ) hidden_states = hidden_states + self.dropout(attention_output[0]) outputs = (hidden_states,)", "dtype=x.dtype, device=x.device) return x.reshape(output_shape) def _concatenate_3_blocks(x: torch.Tensor, block_dim: int, sequence_dim: int, pad_value: int", "from transformers.models.t5.modeling_t5.T5Attention._relative_position_bucket def _relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128): \"\"\" Adapted from Mesh Tensorflow: https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593", "_blocked_attention_mask.unsqueeze(-1) _3blocked_attention_mask = _3blocked_attention_mask.unsqueeze(-2) # [batch_size, num_block, block_len, 3 * block_len] local_attention_mask =", "Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask", "inputs_embeds=decoder_inputs_embeds, past_key_values=past_key_values, encoder_hidden_states=hidden_states, encoder_attention_mask=attention_mask, head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output =", "else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict", "num_heads, seq_len, global_seq_len) attention_side_bias = attention_side_bias + side_bias return attention_side_bias def forward( self,", "= torch.ones_like(attention_mask, device=attention_mask.device) / global_block_size fixed_block_mask = torch.cumsum(fixed_block_mask, axis=1) - fixed_block_mask mask =", "mask=None, position_bias=None, layer_head_mask=None, output_attentions=False, ): batch_size, seq_length = hidden_states.shape[:2] def shape(states): \"\"\"projection\"\"\" return", "for more information\" ) # shift inputs to the right if is_torch_fx_proxy(input_ids): #", "return relative_buckets def compute_bias(self, block_length: int): \"\"\"Compute binned relative position bias\"\"\" memory_position =", "[`T5Tokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) LONGT5", "a decoder\" if attention_mask is None: attention_mask = torch.ones(batch_size, mask_seq_length).to(inputs_embeds.device) if self.is_decoder and", "if hasattr(module.wi_0, \"bias\") and module.wi_0.bias is not None: module.wi_0.bias.data.zero_() module.wi_1.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model)", "shape(states): \"\"\"projection\"\"\" return states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim) def unshape(states): \"\"\"reshape\"\"\" return states.contiguous().view(batch_size, -1,", "use_cache=use_cache, output_attentions=output_attentions, ) hidden_states, present_key_value_state = self_attention_outputs[:2] attention_outputs = self_attention_outputs[2:] # Keep self-attention", "if config.is_decoder: attention_layer = LongT5LayerSelfAttention elif config.encoder_attention_type == \"local\": attention_layer = LongT5LayerLocalSelfAttention elif", "= config.num_heads self.local_radius = config.local_radius self.block_len = self.local_radius + 1 self.dropout = config.dropout_rate", "if use_cache is False: layer_outputs = layer_outputs[:1] + (None,) + layer_outputs[1:] hidden_states, present_key_value_state", "is calculated # w/o mean and there is no bias. Additionally we want", "key_length, device=None): \"\"\"Compute binned relative position bias\"\"\" if device is None: device =", "masked**, - 0 indicates the head is **masked**. inputs_embeds (`torch.FloatTensor` of shape `(batch_size,", "torch.arange(global_seq_len, device=block_ids.device) side_relative_position = global_positions - block_ids[..., None] return side_relative_position.type(torch.int64) def _create_global_aggregates( hidden_states:", "inputs to the right if is_torch_fx_proxy(input_ids): # Item assignment is not supported natively", "0: _sequence_block_ids_max = torch.max(global_block_ids, dim=-1).values.repeat(num_globals, 1).transpose(0, 1) else: _sequence_block_ids_max = torch.zeros( batch_size, 0,", "_concatenate_3_blocks(_blocked_attention_mask, block_dim=1, sequence_dim=2) _blocked_attention_mask = _blocked_attention_mask.unsqueeze(-1) _3blocked_attention_mask = _3blocked_attention_mask.unsqueeze(-2) # [batch_size, num_block, block_len,", "= nn.Linear(self.d_model, self.inner_dim, bias=False) self.k = nn.Linear(self.d_model, self.inner_dim, bias=False) self.v = nn.Linear(self.d_model, self.inner_dim,", "= _make_global_fixed_block_ids( mask if mask is not None else torch.ones(hidden_states.shape[:-1]), self.global_block_size, ) #", "hidden_states = shape(proj_layer(hidden_states)) elif past_key_value is None: # cross-attn # (batch_size, n_heads, seq_length,", "None, inputs_embeds: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None,", "1, seq_len, global_seq_len) side_attention_mask = torch.eq(mask[..., None], global_segment_ids[:, None, :])[:, None, ...] attention_side_bias", "# initialize past_key_values with `None` if past does not exist if past_key_values is", "the inputs on both the right and the left. Indices can be obtained", "that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by", "# Get global/side key/value states shape: (batch_size, global_seq_len, n_heads, dim_per_head) side_key_states = shape(self.k(global_inputs))", "decoder_config = copy.deepcopy(config) decoder_config.is_decoder = True decoder_config.is_encoder_decoder = False decoder_config.num_layers = config.num_decoder_layers self.decoder", "forward( self, hidden_states, mask=None, key_value_states=None, position_bias=None, past_key_value=None, layer_head_mask=None, query_length=None, use_cache=False, output_attentions=False, ): \"\"\"", "`decoder_input_ids` have to be input (see `past_key_values`). To know more on how to", "`past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see", "= config.d_kv self.n_heads = config.num_heads self.local_radius = config.local_radius self.block_len = self.local_radius + 1", "blocks for each input block for local attentiont. For more information, see: https://arxiv.org/pdf/2112.07916.pdf.", "if isinstance(module, (LongT5Attention, LongT5Stack)): module.gradient_checkpointing = value # Copied from transformers.models.t5.modeling_t5.T5PreTrainedModel._shift_right with T5->LongT5", "hasattr(module.wo, \"bias\") and module.wo.bias is not None: module.wo.bias.data.zero_() elif isinstance(module, LongT5DenseGatedActDense): module.wi_0.weight.data.normal_(mean=0.0, std=factor", "past_key_values with `None` if past does not exist if past_key_values is None: past_key_values", "dim: int) -> torch.Tensor: \"\"\"Split an input tensor into blocks of a given", "required by applicable law or agreed to in writing, software # distributed under", "-> torch.Tensor: # (batch_size, 1, seq_len, global_seq_len) side_attention_mask = torch.eq(mask[..., None], global_segment_ids[:, None,", "encoder\"\"\" def __init__(self, config, has_relative_attention_bias=False): super().__init__() self.LocalSelfAttention = LongT5LocalAttention(config, has_relative_attention_bias=has_relative_attention_bias) self.layer_norm = LongT5LayerNorm(config.d_model,", "you \", return_tensors=\"pt\" ... ).input_ids # Batch size 1 >>> outputs = model(input_ids=input_ids)", "outputs = outputs + (attn_weights,) return outputs class LongT5LocalAttention(nn.Module): def __init__(self, config: LongT5Config,", "dim_per_head) # get key/value states key_states = project( hidden_states, self.k, key_value_states, past_key_value[0] if", "self.act = ACT2FN[config.dense_act_fn] def forward(self, hidden_states): hidden_gelu = self.act(self.wi_0(hidden_states)) hidden_linear = self.wi_1(hidden_states) hidden_states", "self.n_heads * self.key_value_proj_dim # Mesh TensorFlow initialization to avoid scaling before softmax self.q", "outputs = outputs + attention_outputs return outputs # hidden-states, present_key_value_states, (self-attention position bias),", "== 2 ), f\"past_key_value should have 2 past states: keys and values. Got", "block_dim=1, sequence_dim=2) value_states = _concatenate_3_blocks(value_states, block_dim=1, sequence_dim=2) # Tile side inputs across local", "query_states = shape(self.q(hidden_states)) # (batch_size, n_heads, seq_length, dim_per_head) # get key/value states key_states", "torch.Tensor, global_block_size: int) -> torch.Tensor: \"\"\"Create the relative position tensor for local ->", "hidden_states): hidden_gelu = self.act(self.wi_0(hidden_states)) hidden_linear = self.wi_1(hidden_states) hidden_states = hidden_gelu * hidden_linear hidden_states", "** -0.5)) module.k.weight.data.normal_(mean=0.0, std=factor * (d_model**-0.5)) module.v.weight.data.normal_(mean=0.0, std=factor * (d_model**-0.5)) module.o.weight.data.normal_(mean=0.0, std=factor *", "if self.is_decoder: self.layer.append(LongT5LayerCrossAttention(config)) self.layer.append(LongT5LayerFF(config)) def forward( self, hidden_states, attention_mask=None, position_bias=None, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None,", "is None else key_value_states.shape[1] def shape(states): \"\"\"projection\"\"\" return states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim).transpose(1, 2)", "is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is", "block_dim=1, sequence_dim=2) _blocked_attention_mask = _blocked_attention_mask.unsqueeze(-1) _3blocked_attention_mask = _3blocked_attention_mask.unsqueeze(-2) # [batch_size, num_block, block_len, 3", "self.n_heads = config.num_heads self.local_radius = config.local_radius self.block_len = self.local_radius + 1 self.dropout =", "0, 1]).unsqueeze(0).unsqueeze(0) return values def forward( self, hidden_states, mask=None, position_bias=None, layer_head_mask=None, output_attentions=False, ):", "pad_len return torch.zeros(new_shape, dtype=x.dtype) pad = [(0, 0)] * x.ndim pad[dim] = (0,", "in output # speedy decoding is disabled and no need to reorder if", "positions <=-max_distance map to the same bucket. This should allow for more graceful", "cross_attn_past_key_value = None, None self_attention_outputs = self.layer[0]( hidden_states, attention_mask=attention_mask, position_bias=position_bias, layer_head_mask=layer_head_mask, past_key_value=self_attn_past_key_value, use_cache=use_cache,", "from transformers.models.t5.modeling_t5.T5LayerSelfAttention with T5->LongT5 class LongT5LayerSelfAttention(nn.Module): def __init__(self, config, has_relative_attention_bias=False): super().__init__() self.SelfAttention =", "and self.training: position_bias.requires_grad = True else: position_bias = self.compute_bias(self.block_len) if local_attention_mask is not", "useful if you want more control over how to convert `decoder_input_ids` indices into", "block_len: int, dim: int) -> torch.Tensor: \"\"\"Split an input tensor into blocks of", "torch.rsqrt(variance + self.variance_epsilon) # convert into half-precision if necessary if self.weight.dtype in [torch.float16,", "= input_ids[..., :-1].clone() shifted_input_ids[..., 0] = decoder_start_token_id assert pad_token_id is not None, \"self.model.config.pad_token_id", "do not want to use any `decoder_head_mask` now, please set `decoder_head_mask = torch.ones(num_layers,", "lm labels to the right decoder_input_ids = self._shift_right(labels) # Decode decoder_outputs = self.decoder(", "torch.bfloat16]: hidden_states = hidden_states.to(self.weight.dtype) return self.weight * hidden_states try: from apex.normalization import FusedRMSNorm", "torch.cat([value_states, side_value_states], dim=2) # Compute scores -> (batch_size, num_block, n_heads, block_len, 3 *", "to adjust position bias shape to be sum with mask position_bias = position_bias", "global_block_size fixed_block_mask = torch.cumsum(fixed_block_mask, axis=1) - fixed_block_mask mask = torch.where(attention_mask != 0.0, 1.0,", "encoder-decoder transformer pre-trained in a text-to-text denoising generative setting. LongT5 model is an", "* block_len, n_heads, dim_per_head) key_states = _concatenate_3_blocks(key_states, block_dim=1, sequence_dim=2) value_states = _concatenate_3_blocks(value_states, block_dim=1,", "Need to inject it here if present_key_value_state is not None: query_length = present_key_value_state[0].shape[2]", "class LongT5LayerTransientGlobalSelfAttention(nn.Module): \"\"\"Transient-Global self attention used in encoder\"\"\" def __init__(self, config, has_relative_attention_bias=False): super().__init__()", "super().__init__() self.TransientGlobalSelfAttention = LongT5TransientGlobalAttention( config, has_relative_attention_bias=has_relative_attention_bias ) self.layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout =", "block_len: int) -> torch.Tensor: \"\"\"Mask local attention mask to enforce that tokens are", "the decoder. past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors", "std=factor * (d_model**-0.5)) module.v.weight.data.normal_(mean=0.0, std=factor * (d_model**-0.5)) module.o.weight.data.normal_(mean=0.0, std=factor * ((n_heads * key_value_proj_dim)", "relative_buckets def compute_bias(self, block_length: int): \"\"\"Compute binned relative position bias\"\"\" memory_position = torch.arange(", "Split into blocks -> (batch_size, num_blocks, block_len, n_heads, dim_per_head) query_states = _split_into_blocks(query_states, self.block_len,", "mean and there is no bias. Additionally we want to make sure that", "= config.local_radius self.block_len = self.local_radius + 1 self.dropout = config.dropout_rate self.inner_dim = self.n_heads", "self.dropout = nn.Dropout(config.dropout_rate) def forward( self, hidden_states, attention_mask=None, position_bias=None, layer_head_mask=None, output_attentions=False, **kwargs: Any,", "the encoder. Used in the cross-attention of the decoder. past_key_values (`tuple(tuple(torch.FloatTensor))` of length", "head_mask[i] cross_attn_layer_head_mask = cross_attn_head_mask[i] if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if self.gradient_checkpointing", "hidden_states = self.act(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.wo(hidden_states) return hidden_states # Copied", "to the original implementation) mask = torch.where(mask > 0, 0.0, -1e10) # We", "is **not masked**, - 0 indicates the head is **masked**. decoder_head_mask (`torch.FloatTensor` of", "= torch.eq(mask[..., None], global_segment_ids[:, None, :])[:, None, ...] attention_side_bias = torch.where(side_attention_mask > 0,", "details. [What are decoder input IDs?](../glossary#decoder-input-ids) LONGT5 uses the `pad_token_id` as the starting", "self.has_relative_attention_bias: self.relative_attention_bias = nn.Embedding(self.relative_attention_num_buckets, self.n_heads) self.pruned_heads = set() self.gradient_checkpointing = False # Copied", "tokens from the attending position to the attended-to position. If bidirectional=False, then positive", "/ global_block_size fixed_block_mask = torch.cumsum(fixed_block_mask, axis=1) - fixed_block_mask mask = torch.where(attention_mask != 0.0,", "very long encoder input. >>> input_ids = tokenizer( ... 100 * \"Studies have", "# Replace masked positions with -1e10 (according to the original implementation) mask =", "# (batch_size, seq_len, global_seq_len, num_heads) side_bias = self.global_relative_attention_bias(side_relative_position_bucket) # (batch_size, num_heads, seq_len, global_seq_len)", "# (batch_size, num_heads, seq_len, global_seq_len) if mask is None: mask = torch.ones(batch_size, seq_length)", "of the attention blocks. Can be used to speed up decoding. If `past_key_values`", "forward( self, hidden_states, attention_mask=None, position_bias=None, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None, layer_head_mask=None, cross_attn_layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False,", "use local attention in encoder self-attention, otherwise standard self & cross attentions are", "return hidden_states # Copied from transformers.models.t5.modeling_t5.T5DenseGatedActDense with T5->LongT5 class LongT5DenseGatedActDense(nn.Module): def __init__(self, config:", "forward( self, hidden_states, attention_mask=None, position_bias=None, layer_head_mask=None, output_attentions=False, **kwargs: Any, # to accept past_key_value", "self.gradient_checkpointing = False # Relativen attention bias & Layer norm for global attention", "more graceful generalization to longer sequences than the model has been trained on", "3 * block_length) bidirectional=(not self.is_decoder), num_buckets=self.relative_attention_num_buckets, max_distance=self.relative_attention_max_distance, ) # (block_length, 3 * block_length,", "not None else None ) # compute scores scores = torch.matmul( query_states, key_states.transpose(3,", "max_distance: an integer Returns: a Tensor with the same shape as relative_position, containing", "of a plain tuple. \"\"\" LONGT5_ENCODER_INPUTS_DOCSTRING = r\"\"\" Args: input_ids (`torch.LongTensor` of shape", "self.EncDecAttention = LongT5Attention(config, has_relative_attention_bias=False) self.layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) def forward(", "was separated into two input args - head_mask, decoder_head_mask __HEAD_MASK_WARNING_MSG = \"\"\" The", "transformer outputting raw hidden-states without any specific head on top.\", LONGT5_START_DOCSTRING, ) class", "@add_start_docstrings_to_model_forward(LONGT5_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] =", "in range(3): # We use indexing approach here: # https://numpy.org/doc/stable/user/basics.indexing.html#dealing-with-variable-numbers-of-indices-within-programs indices = [slice(0,", "Optional[torch.FloatTensor] = None, decoder_head_mask: Optional[torch.FloatTensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, encoder_outputs: Optional[Tuple[Tuple[torch.FloatTensor]]]", "is not None: input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) elif inputs_embeds is", "None all_attentions = () if output_attentions else None all_cross_attentions = () if (output_attentions", "= hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True) hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) # convert into", "+ self.dropout(attention_output[0]) outputs = (hidden_states,) + attention_output[1:] # add attentions if we output", ") hidden_states = encoder_outputs[0] if labels is not None and decoder_input_ids is None", "of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and value", "torch.Tensor]: \"\"\"Obtain the \"fixed block\" global id corresponding to each input token. This", "= all_hidden_states + (hidden_states,) if self.gradient_checkpointing and self.training: if use_cache: use_cache = False", "TensorFlow initialization to avoid scaling before softmax self.q = nn.Linear(self.d_model, self.inner_dim, bias=False) self.k", "indicates the head is **masked**. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):", "See base class PreTrainedModel \"\"\" for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(LONGT5_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=Seq2SeqModelOutput,", "outputs.last_hidden_state ```\"\"\" return_dict = return_dict if return_dict is not None else self.config.use_return_dict encoder_outputs", ") LONGT5_START_DOCSTRING = r\"\"\" The LongT5 model was proposed in [LongT5: Efficient Text-To-Text", "def forward(self, hidden_states): hidden_gelu = self.act(self.wi_0(hidden_states)) hidden_linear = self.wi_1(hidden_states) hidden_states = hidden_gelu *", "def compute_side_bias(self, mask: torch.Tensor, global_segment_ids: torch.Tensor) -> torch.Tensor: # (batch_size, 1, seq_len, global_seq_len)", "2 else None, ) hidden_states = encoder_outputs[0] if labels is not None and", "\"Verify that `shifted_input_ids` has only positive values\" return shifted_input_ids class LongT5Stack(LongT5PreTrainedModel): def __init__(self,", "0.0, 1.0, -1000.0).type(attention_mask.dtype) global_block_ids = torch.floor(mask + fixed_block_mask - 1.0).type(attention_mask.dtype) _global_block_ids_lower_bound = torch.tensor(-1,", "have shown that owning a dog ```\"\"\" use_cache = use_cache if use_cache is", "= self.pruned_heads.union(heads) @staticmethod def _relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128): \"\"\" Adapted from Mesh Tensorflow:", "import torch from torch import nn from torch.nn import CrossEntropyLoss from torch.utils.checkpoint import", "self.weight = nn.Parameter(torch.ones(hidden_size)) self.variance_epsilon = eps def forward(self, hidden_states): # LongT5 uses a", "False def create_custom_forward(module): def custom_forward(*inputs): return tuple(module(*inputs, use_cache, output_attentions)) return custom_forward layer_outputs =", "long input. >>> input_ids = tokenizer( ... \"summarize: \" + 100 * \"studies", "T5->LongT5 class LongT5DenseGatedActDense(nn.Module): def __init__(self, config: LongT5Config): super().__init__() self.wi_0 = nn.Linear(config.d_model, config.d_ff, bias=False)", "-1, self.inner_dim) def project(hidden_states, proj_layer, key_value_states, past_key_value): \"\"\"projects hidden states correctly to key/query", "num_heads, seq_len, global_seq_len) side_position_bias = self.compute_side_bias(mask, global_segment_ids) # (batch_size, num_blocks, num_heads, block_len, global_seq_len)", "outputs and relative position weights attention_outputs = attention_outputs + cross_attention_outputs[2:] # Apply Feed", "very long input. >>> input_ids = tokenizer( ... \"summarize: \" + 100 *", "weights), (cross-attention position bias), (cross-attention weights) if use_cache is False: layer_outputs = layer_outputs[:1]", "self.n_heads) self.pruned_heads = set() self.gradient_checkpointing = False # Copied from transformers.models.t5.modeling_t5.T5Attention.prune_heads def prune_heads(self,", "bias\"\"\" if device is None: device = self.relative_attention_bias.weight.device context_position = torch.arange(query_length, dtype=torch.long, device=device)[:,", "... ).input_ids # Batch size 1 >>> outputs = model.generate(input_ids) >>> print(tokenizer.decode(outputs[0], skip_special_tokens=True))", "**masked**. encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*): Tuple consists of (`last_hidden_state`, `optional`: *hidden_states*, `optional`: *attentions*) `last_hidden_state`", "PreTrainedModel \"\"\" for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(LONGT5_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=Seq2SeqModelOutput, config_class=_CONFIG_FOR_DOC) def forward(", "the right and the left. Indices can be obtained using [`T5Tokenizer`]. See [`PreTrainedTokenizer.encode`]", "attended-to position. If bidirectional=False, then positive relative positions are invalid. We use smaller", "nn.functional.softmax(scores.float(), dim=-1).type_as(scores) # (batch_size, num_blocks, n_heads, block_len, 3 * block_len) attn_weights = nn.functional.dropout(attn_weights,", "Copied from transformers.models.t5.modeling_t5.T5Attention with T5->LongT5 class LongT5Attention(nn.Module): def __init__(self, config: LongT5Config, has_relative_attention_bias=False): super().__init__()", "side_value_states = shape(self.v(global_inputs)) # Split into blocks -> (batch_size, num_blocks, block_len, n_heads, dim_per_head)", "head_mask # Encode if needed (training, first prediction pass) if encoder_outputs is None:", "is None: # Convert encoder inputs in embeddings if needed encoder_outputs = self.encoder(", "= False encoder_config.use_cache = False encoder_config.is_encoder_decoder = False self.encoder = LongT5Stack(encoder_config, self.shared) decoder_config", "inputs_embeds.device ) elif self.config.encoder_attention_type == \"local\": extended_attention_mask = _get_local_attention_mask(attention_mask, self.block_len, inputs_embeds.device) else: #", "shifted_input_ids[..., 1:] = input_ids[..., :-1].clone() shifted_input_ids[..., 0] = decoder_start_token_id assert pad_token_id is not", "decoder_head_mask: Optional[torch.FloatTensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, encoder_outputs: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, past_key_values:", "= LongT5LayerSelfAttention elif config.encoder_attention_type == \"local\": attention_layer = LongT5LayerLocalSelfAttention elif config.encoder_attention_type == \"transient-global\":", ").input_ids # Batch size 1 >>> outputs = model(input_ids=input_ids) >>> last_hidden_states = outputs.last_hidden_state", "self, input_ids=None, attention_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, inputs_embeds=None, head_mask=None, cross_attn_head_mask=None, past_key_values=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None,", "encoder. Used in the cross-attention of the decoder. past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers`", "= attention_mask # If a 2D or 3D attention mask is provided for", "all matter related to general usage and behavior. Parameters: config ([`LongT5Config`]): Model configuration", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing,", "in writing, software # distributed under the License is distributed on an \"AS", "block_length) values = values.permute([2, 0, 1]).unsqueeze(0).unsqueeze(0) return values def compute_side_bias(self, mask: torch.Tensor, global_segment_ids:", "return_dict: return decoder_outputs + encoder_outputs return Seq2SeqModelOutput( last_hidden_state=decoder_outputs.last_hidden_state, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state,", "+ (position_bias,) if output_attentions: outputs = outputs + (attn_weights,) return outputs # Copied", "= None, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] =", "past_key_value=cross_attn_past_key_value, query_length=query_length, use_cache=use_cache, output_attentions=output_attentions, ) hidden_states = cross_attention_outputs[0] # Combine self attn and", "def forward( self, hidden_states, attention_mask=None, position_bias=None, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None, layer_head_mask=None, cross_attn_layer_head_mask=None, past_key_value=None, use_cache=False,", "import CrossEntropyLoss from torch.utils.checkpoint import checkpoint from ...activations import ACT2FN from ...modeling_outputs import", "Update hyper params self.n_heads = self.n_heads - len(heads) self.inner_dim = self.key_value_proj_dim * self.n_heads", "dtype=global_block_ids.dtype, device=global_block_ids.device ) global_segment_ids = torch.cumsum(torch.ones(batch_size, num_globals), dim=-1) - 1 global_segment_ids = global_segment_ids.to(attention_mask.device)", "encoder_attention_mask=encoder_extended_attention_mask, encoder_decoder_position_bias=encoder_decoder_position_bias, layer_head_mask=layer_head_mask, cross_attn_layer_head_mask=cross_attn_layer_head_mask, past_key_value=past_key_value, use_cache=use_cache, output_attentions=output_attentions, ) # layer_outputs is a tuple", "= hidden_states.shape[:2] real_seq_length = seq_length if past_key_value is not None: assert ( len(past_key_value)", "# shapes: (batch_size, seq_len) & (batch_size, global_seq_len) block_ids, global_segment_ids = _make_global_fixed_block_ids( mask if", "input sequence tokens in the vocabulary. LongT5 is a model with relative position", "side_position_bias], dim=-1) scores += position_bias # (batch_size, num_blocks, n_heads, block_len, 3 * block_len", "to the encoder. Please make sure this is intended.\") expected_num_past_key_values = 2 if", "in the vocabulary. LongT5 is a model with relative position embeddings so you", "Authors and HuggingFace Inc. team. # # Licensed under the Apache License, Version", "# Update hyper params self.n_heads = self.n_heads - len(heads) self.inner_dim = self.key_value_proj_dim *", "summarize the studies have shown that owning a dog ```\"\"\" use_cache = use_cache", "layer_outputs = hidden-states, key-value-states (self-attention position bias), (self-attention weights), # (cross-attention position bias),", "\"\" raise ValueError(f\"You have to specify either {err_msg_prefix}input_ids or {err_msg_prefix}inputs_embeds\") if inputs_embeds is", "relative position to a bucket number for relative attention. The relative position is", "been shown that owning a dog is good for you \", return_tensors=\"pt\" ...", "attending position to the attended-to position. If bidirectional=False, then positive relative positions are", "head mask if needed head_mask = self.get_head_mask(head_mask, self.config.num_layers) cross_attn_head_mask = self.get_head_mask(cross_attn_head_mask, self.config.num_layers) present_key_value_states", "in encoder\"\"\" def __init__(self, config, has_relative_attention_bias=False): super().__init__() self.LocalSelfAttention = LongT5LocalAttention(config, has_relative_attention_bias=has_relative_attention_bias) self.layer_norm =", "a tuple with: # hidden-states, key-value-states, (self-attention position bias), (self-attention weights), (cross-attention position", "not supported natively for proxies. shifted_input_ids = torch.full(input_ids.shape[:-1] + (1,), decoder_start_token_id) shifted_input_ids =", "& cross attentions are used if self.is_decoder: extended_attention_mask = self.get_extended_attention_mask( attention_mask, input_shape, inputs_embeds.device", "unshape(states): \"\"\"reshape\"\"\" return states.contiguous().view(batch_size, -1, self.inner_dim) # Prepare components for transient-global attention #", "documentation for all matter related to general usage and behavior. Parameters: config ([`LongT5Config`]):", ") from ...modeling_utils import PreTrainedModel, find_pruneable_heads_and_indices, prune_linear_layer from ...utils import ( DUMMY_INPUTS, DUMMY_MASK,", "use_cache=None, encoder_outputs=None, **kwargs ): # cut decoder_input_ids if past is used if past", "class LongT5LocalAttention(nn.Module): def __init__(self, config: LongT5Config, has_relative_attention_bias: bool = False) -> None: super().__init__()", "= config.num_heads self.dropout = config.dropout_rate self.inner_dim = self.n_heads * self.key_value_proj_dim # Mesh TensorFlow", "position bias if past_key_value is not None: position_bias = position_bias[:, :, -hidden_states.size(1) :,", "nn.Dropout(config.dropout_rate) self.act = ACT2FN[config.dense_act_fn] def forward(self, hidden_states): hidden_states = self.wi(hidden_states) hidden_states = self.act(hidden_states)", "# (batch_size, seq_length, dim) attn_output = self.o(attn_output) present_key_value_state = (key_states, value_states) if (self.is_decoder", "position_bias = torch.zeros( (1, 1, self.n_heads, self.block_len, 3 * self.block_len), device=scores.device, dtype=scores.dtype, )", "2.0 (the \"License\"); # you may not use this file except in compliance", "class LongT5LayerNorm(nn.Module): def __init__(self, hidden_size, eps=1e-6): \"\"\" Construct a layernorm module in the", "= self.config.decoder_start_token_id pad_token_id = self.config.pad_token_id assert decoder_start_token_id is not None, ( \"self.model.config.decoder_start_token_id has", "1 self.global_block_size = config.global_block_size self.dropout = config.dropout_rate self.inner_dim = self.n_heads * self.key_value_proj_dim #", "supports_gradient_checkpointing = True @property # Copied from transformers.models.t5.modeling_t5.T5PreTrainedModel.dummy_inputs def dummy_inputs(self): input_ids = torch.tensor(DUMMY_INPUTS)", "Apply Feed Forward layer hidden_states = self.layer[-1](hidden_states) outputs = (hidden_states,) if use_cache: outputs", "self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states )", "1 >>> # forward pass >>> outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids) >>> last_hidden_states =", "0 indicates the head is **masked**. encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*): Tuple consists of (`last_hidden_state`,", "`pad_value`. \"\"\" # pad tensor to multiple of block_len if x.shape[dim] % block_len", "= torch.abs(relative_position) else: relative_position = -torch.min(relative_position, torch.zeros_like(relative_position)) # now relative_position is in the", "def custom_forward(*inputs): return tuple(module(*inputs, use_cache, output_attentions)) return custom_forward layer_outputs = checkpoint( create_custom_forward(layer_module), hidden_states,", "output_attentions else 3] # append next layer key value states if use_cache: present_key_value_states", "position ids for local attention.\"\"\" position_ids = torch.arange(3 * block_len, dtype=torch.int32) center_position_ids =", "express or implied. # See the License for the specific language governing permissions", "values are already calculated # we want only the last query position bias", "[`PreTrainedTokenizer.__call__`] for detail. [What are input IDs?](../glossary#input-ids) To know more on how to", "= unshape(torch.einsum(\"...hqk,...khd->...qhd\", attn_weights, value_states)) attn_output = attn_output[:, :seq_length, :] attn_output = self.o(attn_output) present_key_value_state", "forward(self, hidden_states): hidden_gelu = self.act(self.wi_0(hidden_states)) hidden_linear = self.wi_1(hidden_states) hidden_states = hidden_gelu * hidden_linear", "add attentions if we output them return outputs class LongT5LayerTransientGlobalSelfAttention(nn.Module): \"\"\"Transient-Global self attention", "self.shared = nn.Embedding(config.vocab_size, config.d_model) encoder_config = copy.deepcopy(config) encoder_config.use_cache = False encoder_config.is_encoder_decoder = False", "input_shape = inputs_embeds.size()[:-1] else: err_msg_prefix = \"decoder_\" if self.is_decoder else \"\" raise ValueError(f\"You", "nullify selected heads of the cross-attention modules in the decoder. Mask values selected", "decoder_input_ids is None and decoder_inputs_embeds is None: # get decoder inputs from shifting", "None: encoder_seq_length = encoder_hidden_states.shape[1] encoder_attention_mask = torch.ones( batch_size, encoder_seq_length, device=inputs_embeds.device, dtype=torch.long ) #", "for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(LONGT5_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=Seq2SeqModelOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids:", "def compute_bias(self, block_length: int): \"\"\"Compute binned relative position bias\"\"\" memory_position = torch.arange( 3", "layers self.q = prune_linear_layer(self.q, index) self.k = prune_linear_layer(self.k, index) self.v = prune_linear_layer(self.v, index)", "= nn.functional.one_hot(block_ids.type(torch.int64), global_seq_len + 1)[:, :, :-1] return torch.einsum(\"...nd,...ng->...gd\", hidden_states, one_hot_block_ids.type(hidden_states.dtype)) # Copied", "num_blocks, num_heads, block_len, global_seq_len) side_position_bias = _split_into_blocks(side_position_bias, self.block_len, dim=-2).transpose(1, 2) side_position_bias = side_position_bias.type(scores.dtype).to(scores.device)", "nn.Dropout(config.dropout_rate) def forward(self, hidden_states): forwarded_states = self.layer_norm(hidden_states) forwarded_states = self.DenseReluDense(forwarded_states) hidden_states = hidden_states", "key_value_states, attention_mask=None, position_bias=None, layer_head_mask=None, past_key_value=None, use_cache=False, query_length=None, output_attentions=False, ): normed_hidden_states = self.layer_norm(hidden_states) attention_output", "List, Optional, Tuple, Union import torch from torch import nn from torch.nn import", "you \", return_tensors=\"pt\" ... ).input_ids # Batch size 1 >>> outputs = model.generate(input_ids)", "first prediction pass) if encoder_outputs is None: encoder_outputs = self.encoder( input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds,", "def _mask_local_attention_mask(local_attention_mask: torch.Tensor, block_len: int) -> torch.Tensor: \"\"\"Mask local attention mask to enforce", "if len(encoder_outputs) > 2 else None, ) hidden_states = encoder_outputs[0] if labels is", "want only the last query position bias if past_key_value is not None: position_bias", "output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple( v for", "of input sequence tokens in the vocabulary. LongT5 is a model with relative", "self.training: if use_cache: use_cache = False def create_custom_forward(module): def custom_forward(*inputs): return tuple(module(*inputs, use_cache,", "- 1) ) relative_buckets += torch.where(is_small, relative_position, relative_position_if_large) return relative_buckets def compute_bias(self, block_length:", "output_hidden_states=None, return_dict=None, ): use_cache = use_cache if use_cache is not None else self.config.use_cache", "= self.o(attn_output) present_key_value_state = (key_states, value_states) if (self.is_decoder and use_cache) else None outputs", "# (batch..., seq_len, global_seq_len)) block_ids = block_ids.where( block_ids >= 0, torch.tensor(global_seq_len, dtype=block_ids.dtype, device=block_ids.device)", ") context_position = memory_position[block_length:-block_length] # (block_length, 3 * block_length) relative_position = memory_position[None, :]", "3 * block_len) if not self.has_relative_attention_bias: position_bias = torch.zeros( (1, 1, self.n_heads, self.block_len,", "\"google/long-t5-local-large\", \"google/long-t5-tglobal-base\", \"google/long-t5-tglobal-large\", ] def _pad_to_multiple(x: torch.Tensor, block_len: int, dim: int, pad_value: int", "self.get_head_mask(head_mask, self.config.num_layers) cross_attn_head_mask = self.get_head_mask(cross_attn_head_mask, self.config.num_layers) present_key_value_states = () if use_cache else None", "LongT5LayerNorm\") pass # Copied from transformers.models.t5.modeling_t5.T5DenseActDense with T5->LongT5 class LongT5DenseActDense(nn.Module): def __init__(self, config:", "self.compute_bias(self.block_len) if local_attention_mask is not None: # (batch_size, 1, n_heads, block_len, 3 *", "if past does not exist if past_key_values is None: past_key_values = [None] *", "return tuple( v for v in [ hidden_states, present_key_value_states, all_hidden_states, all_attentions, all_cross_attentions, ]", "len(encoder_outputs) > 2 else None, ) hidden_states = encoder_outputs[0] if labels is not", "configuration class with all the parameters of the model. Initializing with a config", "# Encode if needed (training, first prediction pass) if encoder_outputs is None: encoder_outputs", ") values = self.relative_attention_bias(relative_position_bucket) # shape (query_length, key_length, num_heads) values = values.permute([2, 0,", "scaling before softmax self.q = nn.Linear(self.d_model, self.inner_dim, bias=False) self.k = nn.Linear(self.d_model, self.inner_dim, bias=False)", "map to the same bucket. This should allow for more graceful generalization to", "specific language governing permissions and # limitations under the License. \"\"\" PyTorch LongT5", "done in fp32 variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True) hidden_states = hidden_states * torch.rsqrt(variance +", "hidden_states.device) # Replace masked positions with -10_000 (according to the original implementation) local_attention_mask", "here: # https://numpy.org/doc/stable/user/basics.indexing.html#dealing-with-variable-numbers-of-indices-within-programs indices = [slice(0, None)] * x.ndim indices[block_dim] = slice(i, i", "Root Mean # Square Layer Normalization https://arxiv.org/abs/1910.07467 thus varience is calculated # w/o", "# # Licensed under the Apache License, Version 2.0 (the \"License\"); # you", "used, the user can optionally input only the last `decoder_input_ids` (those that don't", "compute scores scores = torch.matmul( query_states, key_states.transpose(3, 2) ) # equivalent of torch.einsum(\"bnqd,bnkd->bnqk\",", "\"\" raise ValueError( f\"You cannot specify both {err_msg_prefix}input_ids and {err_msg_prefix}inputs_embeds at the same", "with T5->LongT5 def _set_gradient_checkpointing(self, module, value=False): if isinstance(module, (LongT5Attention, LongT5Stack)): module.gradient_checkpointing = value", "head on top.\", LONGT5_START_DOCSTRING, ) class LongT5Model(LongT5PreTrainedModel): _keys_to_ignore_on_load_missing = [ r\"encoder.embed_tokens.weight\", r\"decoder.embed_tokens.weight\", ]", "= hidden_states.to(self.weight.dtype) return self.weight * hidden_states try: from apex.normalization import FusedRMSNorm LongT5LayerNorm =", "batch_size, encoder_seq_length, device=inputs_embeds.device, dtype=torch.long ) # initialize past_key_values with `None` if past does", "= None, cross_attn_head_mask: Optional[torch.Tensor] = None, encoder_outputs: Optional[Tuple[Tuple[torch.Tensor]]] = None, past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] =", "import ( BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput, ) from ...modeling_utils import PreTrainedModel, find_pruneable_heads_and_indices, prune_linear_layer", "= torch.einsum(\"...qhd,...khd->...hqk\", query_states, key_states) if mask is not None: # We need to", "how to convert `decoder_input_ids` indices into associated vectors than the model's internal embedding", "and will be removed in future versions. If you do not want to", "empty input sequence is given if not all(x.shape): new_shape = list(x.shape) new_shape[dim] +=", "output_attentions=output_attentions, ) # layer_outputs is a tuple with: # hidden-states, key-value-states, (self-attention position", "self.d_model = config.d_model self.key_value_proj_dim = config.d_kv self.n_heads = config.num_heads self.dropout = config.dropout_rate self.inner_dim", "= copy.deepcopy(config) encoder_config.is_decoder = False encoder_config.use_cache = False encoder_config.is_encoder_decoder = False self.encoder =", "key_value_states is None: # self-attn # (batch_size, n_heads, key_length, dim_per_head) hidden_states = torch.cat([past_key_value,", "position_bias=None, layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False, ): normed_hidden_states = self.layer_norm(hidden_states) attention_output = self.SelfAttention( normed_hidden_states,", "past key value states. Need to inject it here if present_key_value_state is not", "LongT5Attention(nn.Module): def __init__(self, config: LongT5Config, has_relative_attention_bias=False): super().__init__() self.is_decoder = config.is_decoder self.has_relative_attention_bias = has_relative_attention_bias", ">>> tokenizer = AutoTokenizer.from_pretrained(\"Stancld/longt5-tglobal-large-16384-pubmed-3k_steps\") >>> model = LongT5ForConditionalGeneration.from_pretrained( ... \"Stancld/longt5-tglobal-large-16384-pubmed-3k_steps\" ... ) >>>", "+ (None,) + layer_outputs[1:] hidden_states, present_key_value_state = layer_outputs[:2] # We share the position", "of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should", "if head_mask is not None and decoder_head_mask is None: if self.config.num_layers == self.config.num_decoder_layers:", "= hidden_gelu * hidden_linear hidden_states = self.dropout(hidden_states) hidden_states = self.wo(hidden_states) return hidden_states #", "self.is_decoder: self.layer.append(LongT5LayerCrossAttention(config)) self.layer.append(LongT5LayerFF(config)) def forward( self, hidden_states, attention_mask=None, position_bias=None, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None, layer_head_mask=None,", "ValueError( \"For encoder attention mechanism, either `local` or `transient-global` attention type is expected,", "larger absolute relative_positions. All relative positions >=max_distance map to the same bucket. All", "query_length=None, use_cache=False, output_attentions=False, ): \"\"\" Self-attention (if key_value_states is None) or attention over", "torch.max(global_block_ids, dim=-1).values.repeat(num_globals, 1).transpose(0, 1) else: _sequence_block_ids_max = torch.zeros( batch_size, 0, dtype=global_block_ids.dtype, device=global_block_ids.device )", "(batch_size, 1, seq_len, global_seq_len) side_attention_mask = torch.eq(mask[..., None], global_segment_ids[:, None, :])[:, None, ...]", "self.decoder.set_input_embeddings(new_embeddings) def get_encoder(self): return self.encoder def get_decoder(self): return self.decoder def _prune_heads(self, heads_to_prune): \"\"\"", "return global_block_ids.type(torch.int), global_segment_ids.type(torch.int) def _make_side_relative_position_ids(attention_mask: torch.Tensor, global_block_size: int) -> torch.Tensor: \"\"\"Create the relative", "graceful generalization to longer sequences than the model has been trained on Args:", "nn.functional.softmax(scores.float(), dim=-1).type_as( scores ) # (batch_size, n_heads, seq_length, key_length) attn_weights = nn.functional.dropout( attn_weights,", ">>> model = LongT5ForConditionalGeneration.from_pretrained( ... \"Stancld/longt5-tglobal-large-16384-pubmed-3k_steps\" ... ) >>> # Let's try a", "bias and no subtraction of mean. \"\"\" super().__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.variance_epsilon =", "and not isinstance(encoder_outputs, BaseModelOutput): encoder_outputs = BaseModelOutput( last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1", "with the same shape as relative_position, containing int32 values in the range [0,", "orphan tokens, i.e. those tokens which do not make for the whole fixed", "!= 0: x = _pad_to_multiple(x, block_len, dim, pad_value=0) num_blocks = x.shape[dim] // block_len", "self.global_input_layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) # Copied from transformers.models.t5.modeling_t5.T5Attention.prune_heads def prune_heads(self, heads): if len(heads)", "= None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.FloatTensor],", "* block_length) relative_position = memory_position[None, :] - context_position[:, None] relative_position_bucket = self._relative_position_bucket( relative_position,", "= self._shift_right(labels) # Decode decoder_outputs = self.decoder( input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, inputs_embeds=decoder_inputs_embeds, past_key_values=past_key_values, encoder_hidden_states=hidden_states, encoder_attention_mask=attention_mask,", "except in compliance with the License. # You may obtain a copy of", "is done in fp32 variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True) hidden_states = hidden_states * torch.rsqrt(variance", "the value of `inputs_embeds`. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key", "= torch.logical_and(_blocked_attention_mask, _3blocked_attention_mask) local_attention_mask = _mask_local_attention_mask(local_attention_mask, block_len) # [batch_size, 1, num_block, block_len, 3", "# [block_len, 3 * block_len] relative_position_ids = position_ids.unsqueeze(0) - center_position_ids.unsqueeze(1) return relative_position_ids def", "decoder_head_mask if head_mask is not None and decoder_head_mask is None: if self.config.num_layers ==", "output) if loss is not None else output return Seq2SeqLMOutput( loss=loss, logits=lm_logits, past_key_values=decoder_outputs.past_key_values,", "[batch_size, num_heads, seq_length, seq_length] if self.is_decoder and encoder_hidden_states is not None: encoder_batch_size, encoder_sequence_length,", "attn_weights.type(value_states.dtype) attn_output = unshape(torch.einsum(\"...hqk,...khd->...qhd\", attn_weights, value_states)) attn_output = attn_output[:, :seq_length, :] attn_output =", "[`PreTrainedTokenizer.__call__`] for detail. To know more on how to prepare `input_ids` for pretraining", "to prepare `input_ids` for pretraining take a look a [LONGT5 Training](./longt5#training). attention_mask (`torch.FloatTensor`", "input_shape, inputs_embeds.device ) elif self.config.encoder_attention_type == \"local\": extended_attention_mask = _get_local_attention_mask(attention_mask, self.block_len, inputs_embeds.device) else:", "encoder\"\"\" def __init__(self, config, has_relative_attention_bias=False): super().__init__() self.TransientGlobalSelfAttention = LongT5TransientGlobalAttention( config, has_relative_attention_bias=has_relative_attention_bias ) self.layer_norm", "not isinstance(encoder_outputs, BaseModelOutput): encoder_outputs = BaseModelOutput( last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else", "given `dim`. If the dimension length is not a multiple of `block_len`, it", "whole fixed block, are assigned to the preceding block. Padding tokens from the", "for i, (layer_module, past_key_value) in enumerate(zip(self.block, past_key_values)): layer_head_mask = head_mask[i] cross_attn_layer_head_mask = cross_attn_head_mask[i]", "torch.arange(key_length, dtype=torch.long, device=device)[None, :] relative_position = memory_position - context_position # shape (query_length, key_length)", "subclass. Use it as a regular PyTorch Module and refer to the PyTorch", "attn_weights * layer_head_mask attn_weights = attn_weights.type(value_states.dtype) attn_output = unshape(torch.einsum(\"...hqk,...khd->...qhd\", attn_weights, value_states)) attn_output =", "\"bias\") and module.wo.bias is not None: module.wo.bias.data.zero_() elif isinstance(module, (LongT5Attention, LongT5LocalAttention, LongT5TransientGlobalAttention)): #", "block_ids, global_segment_ids = _make_global_fixed_block_ids(attention_mask, global_block_size) global_seq_len = global_segment_ids.shape[-1] global_positions = torch.arange(global_seq_len, device=block_ids.device) side_relative_position", "Training](./longt5#training). attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention", "shifted_input_ids = torch.full(input_ids.shape[:-1] + (1,), decoder_start_token_id) shifted_input_ids = torch.cat([shifted_input_ids, input_ids[..., :-1]], dim=-1) else:", "if needed encoder_outputs = self.encoder( input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, )", "= _get_local_attention_mask(attention_mask, self.block_len, inputs_embeds.device) else: # we need to use both local attention", "heads if we want to if layer_head_mask is not None: attn_weights = attn_weights", "relative_buckets = 0 if bidirectional: num_buckets //= 2 relative_buckets += (relative_position > 0).to(torch.long)", "self.shared) self.lm_head = nn.Linear(config.d_model, config.vocab_size, bias=False) # Initialize weights and apply final processing", "tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the", "None, # past_key_value is always None with gradient checkpointing ) else: layer_outputs =", "(2) Transient-Global attention. This model inherits from [`PreTrainedModel`]. Check the superclass documentation for", "block_len] local_attention_mask = torch.logical_and(_blocked_attention_mask, _3blocked_attention_mask) local_attention_mask = _mask_local_attention_mask(local_attention_mask, block_len) # [batch_size, 1, num_block,", "self.LocalSelfAttention( normed_hidden_states, mask=attention_mask, position_bias=position_bias, layer_head_mask=layer_head_mask, output_attentions=output_attentions, ) hidden_states = hidden_states + self.dropout(attention_output[0]) outputs", "Optional[torch.Tensor] = None, encoder_outputs: Optional[Tuple[Tuple[torch.Tensor]]] = None, past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, inputs_embeds: Optional[torch.FloatTensor]", "idx from layer past batch dim # batch dim of `past` is at", "if needed (training, first prediction pass) if encoder_outputs is None: encoder_outputs = self.encoder(", "New shape: (batch_size, num_blocks, global_seq_len, n_heads, dim_per_head) reps = [1] * (side_key_states.ndim +", "block_ids, _global_seq_len) global_inputs = self.global_input_layer_norm(global_inputs) # get query states -> (batch_size, seq_length, n_heads,", ">= 0, torch.tensor(global_seq_len, dtype=block_ids.dtype, device=block_ids.device) ) one_hot_block_ids = nn.functional.one_hot(block_ids.type(torch.int64), global_seq_len + 1)[:, :,", "position_bias = position_bias + mask # (batch_size, n_heads, seq_length, key_length) scores += position_bias", ") # Copied from transformers.models.t5.modeling_t5.T5PreTrainedModel._set_gradient_checkpointing with T5->LongT5 def _set_gradient_checkpointing(self, module, value=False): if isinstance(module,", "= torch.floor(mask + fixed_block_mask - 1.0).type(attention_mask.dtype) _global_block_ids_lower_bound = torch.tensor(-1, dtype=global_block_ids.dtype, device=global_block_ids.device) global_block_ids =", "documentation for the generic methods the library implements for all its model (such", "_create_global_aggregates( hidden_states: torch.Tensor, block_ids: torch.Tensor, global_seq_len: int ) -> torch.Tensor: \"\"\"Compute individual block", "output return Seq2SeqLMOutput( loss=loss, logits=lm_logits, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, )", "it as a regular PyTorch Module and refer to the PyTorch documentation for", "we want only the last query position bias if past_key_value is not None:", "\"google/long-t5-tglobal-large\", ] def _pad_to_multiple(x: torch.Tensor, block_len: int, dim: int, pad_value: int = 0)", "= None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] =", "and use_cache) else None outputs = (attn_output,) + (present_key_value_state,) + (position_bias,) if output_attentions:", "initialization # See https://github.com/tensorflow/mesh/blob/master/mesh_tensorflow/transformer/transformer_layers.py#L56 # and https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L89 module.wi.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5))", "# get the correct batch idx from layer past batch dim # batch", "Tuple, Union import torch from torch import nn from torch.nn import CrossEntropyLoss from", "index) self.o = prune_linear_layer(self.o, index, dim=1) # Update hyper params self.n_heads = self.n_heads", "cross-attn # (batch_size, n_heads, seq_length, dim_per_head) hidden_states = shape(proj_layer(key_value_states)) if past_key_value is not", "= torch.cat([position_bias, side_position_bias], dim=-1) scores += position_bias # (batch_size, num_blocks, n_heads, block_len, 3", "obtained using [`T5Tokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input", "are assigned to the preceding block. Padding tokens from the original sequence are", "= () for layer_past_states in past: # get the correct batch idx from", "is good for you \", return_tensors=\"pt\" ... ).input_ids # Batch size 1 >>>", "are **not masked**, - 0 for tokens that are **masked**. [What are attention", "# layer_outputs = hidden-states, key-value-states (self-attention position bias), (self-attention weights), # (cross-attention position", "config.d_model, bias=False) self.dropout = nn.Dropout(config.dropout_rate) self.act = ACT2FN[config.dense_act_fn] def forward(self, hidden_states): hidden_gelu =", "[`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) LONGT5 uses the", "have to initialize the model with valid token embeddings\" inputs_embeds = self.embed_tokens(input_ids) batch_size,", "if we output them return outputs class LongT5Block(nn.Module): def __init__(self, config, has_relative_attention_bias=False): super().__init__()", "(batch_size, num_blocks, n_heads, block_len, 3 * block_len) attn_weights = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) #", "nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:", "class LongT5LayerFF(nn.Module): def __init__(self, config: LongT5Config): super().__init__() if config.is_gated_act: self.DenseReluDense = LongT5DenseGatedActDense(config) else:", "hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, position_bias=encoder_decoder_position_bias, layer_head_mask=cross_attn_layer_head_mask, past_key_value=cross_attn_past_key_value, query_length=query_length, use_cache=use_cache, output_attentions=output_attentions, ) hidden_states = cross_attention_outputs[0]", "* self.n_heads self.pruned_heads = self.pruned_heads.union(heads) @staticmethod # Copied from transformers.models.t5.modeling_t5.T5Attention._relative_position_bucket def _relative_position_bucket(relative_position, bidirectional=True,", "encoder_hidden_states=hidden_states, encoder_attention_mask=attention_mask, head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = decoder_outputs[0] if", "nn.Linear(config.d_model, config.d_ff, bias=False) self.wo = nn.Linear(config.d_ff, config.d_model, bias=False) self.dropout = nn.Dropout(config.dropout_rate) self.act =", "is None: # cross-attn # (batch_size, n_heads, seq_length, dim_per_head) hidden_states = shape(proj_layer(key_value_states)) if", "bidirectional: a boolean - whether the attention is bidirectional num_buckets: an integer max_distance:", "else: raise ValueError( \"For encoder attention mechanism, either `local` or `transient-global` attention type", "global_seq_len = global_segment_ids.shape[-1] global_positions = torch.arange(global_seq_len, device=block_ids.device) side_relative_position = global_positions - block_ids[..., None]", "* num_buckets relative_position = torch.abs(relative_position) else: relative_position = -torch.min(relative_position, torch.zeros_like(relative_position)) # now relative_position", "# Compute scores scores = torch.einsum( \"...qhd,...khd->...hqk\", query_states, key_states ) # (batch_size, num_block,", "num_blocks = x.shape[dim] // block_len output_shape = x.shape[:dim] + (num_blocks, block_len) + x.shape[(dim", "past_key_value is not None: position_bias = position_bias[:, :, -hidden_states.size(1) :, :] if mask", "regular PyTorch Module and refer to the PyTorch documentation for all matter related", "attentions=all_attentions, cross_attentions=all_cross_attentions, ) LONGT5_START_DOCSTRING = r\"\"\" The LongT5 model was proposed in [LongT5:", "_split_into_blocks(value_states, self.block_len, dim=1) # Concatenate 3 blocks for keys and values -> (batch_size,", "decoder_config.is_encoder_decoder = False decoder_config.num_layers = config.num_decoder_layers self.decoder = LongT5Stack(decoder_config, self.shared) self.lm_head = nn.Linear(config.d_model,", "attention_output[1:] # add attentions if we output them return outputs # Copied from", "LongT5Stack(encoder_config, self.shared) decoder_config = copy.deepcopy(config) decoder_config.is_decoder = True decoder_config.is_encoder_decoder = False decoder_config.num_layers =", "if past is not None: input_ids = input_ids[:, -1:] return { \"decoder_input_ids\": input_ids,", "warnings from typing import Any, List, Optional, Tuple, Union import torch from torch", "apply final processing self.post_init() self.gradient_checkpointing = False # Copied from transformers.models.t5.modeling_t5.T5Stack.get_input_embeddings def get_input_embeddings(self):", "weights) position_bias = layer_outputs[2] if self.is_decoder and encoder_hidden_states is not None: encoder_decoder_position_bias =", "None else key_value_states.shape[1] def shape(states): \"\"\"projection\"\"\" return states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim).transpose(1, 2) def", "is used if past is not None: input_ids = input_ids[:, -1:] return {", "num_blocks, num_heads, block_len, 3 * block_len + global_seq_len) position_bias = torch.cat([position_bias, side_position_bias], dim=-1)", "layer_head_mask is not None: attn_weights = attn_weights * layer_head_mask attn_output = unshape(torch.matmul(attn_weights, value_states))", "have to specify either {err_msg_prefix}input_ids or {err_msg_prefix}inputs_embeds\") if inputs_embeds is None: assert self.embed_tokens", "return self.decoder def _prune_heads(self, heads_to_prune): \"\"\" Prunes heads of the model. heads_to_prune: dict", "shape(proj_layer(key_value_states)) if past_key_value is not None: if key_value_states is None: # self-attn #", "Encode if needed (training, first prediction pass) if encoder_outputs is None: # Convert", "input token. This implementation is a simlified version of the original Flaxformr implementation", "layer_norm which only scales and doesn't shift, which is also known as Root", "False self.encoder = LongT5Stack(encoder_config, self.shared) # Initialize weights and apply final processing self.post_init()", "can only be set to `True` if {self} is used as a decoder\"", "norm for global attention if self.has_relative_attention_bias: self.global_relative_attention_bias = nn.Embedding(self.relative_attention_num_buckets, self.n_heads) self.global_input_layer_norm = LongT5LayerNorm(config.d_model,", "[batch_size, seq_len // global_block_size] if num_globals > 0: _sequence_block_ids_max = torch.max(global_block_ids, dim=-1).values.repeat(num_globals, 1).transpose(0,", "= _concatenate_3_blocks(value_states, block_dim=1, sequence_dim=2) # Tile side inputs across local key/value blocks #", "self.training: position_bias.requires_grad = True else: position_bias = self.compute_bias(real_seq_length, key_length, device=scores.device) # if key", "software # distributed under the License is distributed on an \"AS IS\" BASIS,", "range [0, num_buckets) \"\"\" relative_buckets = 0 if bidirectional: num_buckets //= 2 relative_buckets", "//= 2 relative_buckets += (relative_position > 0).to(torch.long) * num_buckets relative_position = torch.abs(relative_position) else:", "# (batch_size, n_heads, key_length, dim_per_head) hidden_states = torch.cat([past_key_value, hidden_states], dim=2) else: # cross-attn", "is not None: loss_fct = CrossEntropyLoss(ignore_index=-100) loss = loss_fct(lm_logits.view(-1, lm_logits.size(-1)), labels.view(-1)) # TODO(thom):", "= new_embeddings self.encoder.set_input_embeddings(new_embeddings) self.decoder.set_input_embeddings(new_embeddings) def get_encoder(self): return self.encoder def get_decoder(self): return self.decoder def", "self.config.use_return_dict # FutureWarning: head_mask was separated into two input args - head_mask, decoder_head_mask", "module.wi_1.bias is not None: module.wi_1.bias.data.zero_() module.wo.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_ff) ** -0.5)) if hasattr(module.wo,", "attention_output[1:] # add attentions if we output them return outputs class LongT5LayerTransientGlobalSelfAttention(nn.Module): \"\"\"Transient-Global", "for all matter related to general usage and behavior. Parameters: config ([`LongT5Config`]): Model", "ACT2FN[config.dense_act_fn] def forward(self, hidden_states): hidden_gelu = self.act(self.wi_0(hidden_states)) hidden_linear = self.wi_1(hidden_states) hidden_states = hidden_gelu", "Optionally, instead of passing `input_ids` you can choose to directly pass an embedded", "list of heads to prune in this layer} See base class PreTrainedModel \"\"\"", "position_bias + local_attention_mask.transpose(1, 2) position_bias = position_bias.type(scores.dtype) # Calculate global/side bias - shape:", "size 1 >>> # forward pass >>> outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids) >>> last_hidden_states", "+ side_bias return attention_side_bias def forward( self, hidden_states, mask=None, position_bias=None, layer_head_mask=None, output_attentions=False, ):", "Prepare components for transient-global attention # Obtain block_ids and global_segment_ids # global_seq_len :=", "Copied from transformers.models.t5.modeling_t5.T5Attention._relative_position_bucket def _relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128): \"\"\" Adapted from Mesh Tensorflow:", "return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a", "3 * block_length) relative_position = memory_position[None, :] - context_position[:, None] relative_position_bucket = self._relative_position_bucket(", "for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`]", "* block_len + global_seq_len) scores = torch.einsum(\"...qhd,...khd->...hqk\", query_states, key_states) if mask is not", "**not masked**, - 0 indicates the head is **masked**. encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*): Tuple", "logging.get_logger(__name__) _CONFIG_FOR_DOC = \"LongT5Config\" _TOKENIZER_FOR_DOC = \"T5Tokenizer\" _CHECKPOINT_FOR_DOC = \"google/long-t5-local-base\" # TODO: Update", "of mean. \"\"\" super().__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.variance_epsilon = eps def forward(self, hidden_states):", "past is not included in output # speedy decoding is disabled and no", "attention_mask: Optional[torch.FloatTensor] = None, decoder_input_ids: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.BoolTensor] = None, head_mask:", "exist if past_key_values is None: past_key_values = [None] * len(self.block) # We can", "+ attention_outputs else: outputs = outputs + attention_outputs return outputs # hidden-states, present_key_value_states,", "the same time\" ) elif input_ids is not None: input_shape = input_ids.size() input_ids", "LongT5 model was proposed in [LongT5: Efficient Text-To-Text Transformer for Long Sequences](https://arxiv.org/abs/2112.07916) by", "attentions tensors of all attention layers. See `attentions` under returned tensors for more", "{self} is used as a decoder\" if attention_mask is None: attention_mask = torch.ones(batch_size,", "(batch_size, n_heads, seq_length, key_length) attn_weights = nn.functional.dropout( attn_weights, p=self.dropout, training=self.training ) # (batch_size,", "`(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in", "config, has_relative_attention_bias=has_relative_attention_bias ) self.layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) def forward( self,", "length is not a multiple of `block_len`, it will be padded first with", "before softmax self.q = nn.Linear(self.d_model, self.inner_dim, bias=False) self.k = nn.Linear(self.d_model, self.inner_dim, bias=False) self.v", "= encoder_outputs[0] # Decode decoder_outputs = self.decoder( input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, inputs_embeds=decoder_inputs_embeds, past_key_values=past_key_values, encoder_hidden_states=hidden_states, encoder_attention_mask=attention_mask,", "()) # [batch_size, num_blocks, block_len] -> [batch_size, num_blocks + 2, block_len] x =", ".configuration_longt5 import LongT5Config logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = \"LongT5Config\" _TOKENIZER_FOR_DOC = \"T5Tokenizer\" _CHECKPOINT_FOR_DOC", "block_ids, full_blocks) return block_ids fixed_block_mask = torch.ones_like(attention_mask, device=attention_mask.device) / global_block_size fixed_block_mask = torch.cumsum(fixed_block_mask,", "is not None and decoder_input_ids is None and decoder_inputs_embeds is None: # get", "states at the output of the last layer of the encoder. Used in", "decoder_head_mask, \"cross_attn_head_mask\": cross_attn_head_mask, \"use_cache\": use_cache, } def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor): return self._shift_right(labels) def", "_keys_to_ignore_on_load_missing = [ r\"encoder.embed_tokens.weight\", r\"decoder.embed_tokens.weight\", r\"lm_head.weight\", ] _keys_to_ignore_on_load_unexpected = [ r\"decoder.block.0.layer.1.EncDecAttention.relative_attention_bias.weight\", ] def", "T5->LongT5 class LongT5LayerFF(nn.Module): def __init__(self, config: LongT5Config): super().__init__() if config.is_gated_act: self.DenseReluDense = LongT5DenseGatedActDense(config)", "self.k, key_value_states, past_key_value[0] if past_key_value is not None else None ) value_states =", "input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the", "tensor into blocks of a given `block_len` along the given `dim`. If the", "dim_per_head) hidden_states = torch.cat([past_key_value, hidden_states], dim=2) else: # cross-attn hidden_states = past_key_value return", "r\"decoder.block.0.layer.1.EncDecAttention.relative_attention_bias.weight\", ] def __init__(self, config: LongT5Config): super().__init__(config) self.shared = nn.Embedding(config.vocab_size, config.d_model) encoder_config =", "hidden_states = hidden_states + self.dropout(forwarded_states) return hidden_states # Copied from transformers.models.t5.modeling_t5.T5Attention with T5->LongT5", "past is not None: input_ids = input_ids[:, -1:] return { \"decoder_input_ids\": input_ids, \"past_key_values\":", "side_position_bias.type(scores.dtype).to(scores.device) # (batch_size, num_blocks, num_heads, block_len, 3 * block_len + global_seq_len) position_bias =", "= prune_linear_layer(self.q, index) self.k = prune_linear_layer(self.k, index) self.v = prune_linear_layer(self.v, index) self.o =", "input_mask, } return dummy_inputs def _init_weights(self, module): \"\"\"Initialize the weights\"\"\" factor = self.config.initializer_factor", "attention masks?](../glossary#attention-mask) head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to", "= past_key_value[2:] else: self_attn_past_key_value, cross_attn_past_key_value = None, None self_attention_outputs = self.layer[0]( hidden_states, attention_mask=attention_mask,", "else None all_cross_attentions = () if (output_attentions and self.is_decoder) else None position_bias =", "torch.Tensor, block_dim: int, sequence_dim: int, pad_value: int = 0) -> torch.Tensor: \"\"\"Concatenate three", "= self.final_layer_norm(hidden_states) hidden_states = self.dropout(hidden_states) # Add last layer if output_hidden_states: all_hidden_states =", "position_bias = self.compute_bias(self.block_len) if local_attention_mask is not None: # (batch_size, 1, n_heads, block_len,", "= self.embed_tokens(input_ids) batch_size, seq_length = input_shape # required mask seq length can be", "torch import nn from torch.nn import CrossEntropyLoss from torch.utils.checkpoint import checkpoint from ...activations", "be a multiple of `block_len`\"\"\" pad_len = -x.shape[dim] % block_len # Handle cases", "Replace masked positions with -1e10 (according to the original implementation) mask = torch.where(mask", "is not None else output return Seq2SeqLMOutput( loss=loss, logits=lm_logits, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions,", "inputs is done in fp32 variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True) hidden_states = hidden_states *", "is None: # self-attn # (batch_size, n_heads, key_length, dim_per_head) hidden_states = torch.cat([past_key_value, hidden_states],", "if position_bias is None: # position_bias shape: # (1, 1, n_heads, block_len, 3", "\"\"\" super().__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.variance_epsilon = eps def forward(self, hidden_states): # LongT5", "LongT5Config): super().__init__() self.wi = nn.Linear(config.d_model, config.d_ff, bias=False) self.wo = nn.Linear(config.d_ff, config.d_model, bias=False) self.dropout", "= 0) -> torch.Tensor: \"\"\"Pad a tensor so that a sequence length will", "encoder_config.use_cache = False encoder_config.is_encoder_decoder = False self.encoder = LongT5Stack(encoder_config, self.shared) # Initialize weights", "past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, ) def prepare_inputs_for_generation( self, input_ids, past=None,", "self, hidden_states, mask=None, key_value_states=None, position_bias=None, past_key_value=None, layer_head_mask=None, query_length=None, use_cache=False, output_attentions=False, ): \"\"\" Self-attention", "or saving, resizing the input embeddings, pruning heads etc.) This model is also", "nn.functional.one_hot(block_ids.type(torch.int64), global_seq_len + 1)[:, :, :-1] return torch.einsum(\"...nd,...ng->...gd\", hidden_states, one_hot_block_ids.type(hidden_states.dtype)) # Copied from", "is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`torch.FloatTensor` of", "torch.arange( 3 * block_length, dtype=torch.long, device=self.relative_attention_bias.weight.device ) context_position = memory_position[block_length:-block_length] # (block_length, 3", "only the last `decoder_input_ids` (those that don't have their past key value states", "natively for proxies. shifted_input_ids = torch.full(input_ids.shape[:-1] + (1,), decoder_start_token_id) shifted_input_ids = torch.cat([shifted_input_ids, input_ids[...,", "= torch.zeros( batch_size, 0, dtype=global_block_ids.dtype, device=global_block_ids.device ) global_segment_ids = torch.cumsum(torch.ones(batch_size, num_globals), dim=-1) -", "pretraining take a look a [LONGT5 Training](./longt5#training). attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`,", "(batch_size, seq_len) & (batch_size, global_seq_len) block_ids, global_segment_ids = _make_global_fixed_block_ids( mask if mask is", "Indices of input sequence tokens in the vocabulary. LongT5 is a model with", "(`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary.", "isinstance(module, LongT5DenseActDense): # Mesh TensorFlow FF initialization # See https://github.com/tensorflow/mesh/blob/master/mesh_tensorflow/transformer/transformer_layers.py#L56 # and https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L89", "in [torch.float16, torch.bfloat16]: hidden_states = hidden_states.to(self.weight.dtype) return self.weight * hidden_states try: from apex.normalization", "if self.is_decoder: all_cross_attentions = all_cross_attentions + (layer_outputs[5],) hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.dropout(hidden_states)", "that are **not masked**, - 0 for tokens that are **masked**. [What are", "of (`last_hidden_state`, `optional`: *hidden_states*, `optional`: *attentions*) `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)` is", "if use_cache is not None else self.config.use_cache output_attentions = output_attentions if output_attentions is", "See LongT5 docs for more information\" ) # shift inputs to the right", "input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) elif return_dict and not isinstance(encoder_outputs,", "+ ( torch.log(relative_position.float() / max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact)", "position bias\"\"\" if device is None: device = self.relative_attention_bias.weight.device context_position = torch.arange(query_length, dtype=torch.long,", "# (1, 1, n_heads, block_len, 3 * block_len) if not self.has_relative_attention_bias: position_bias =", "encoder_hidden_states is None else 4 if len(past_key_value) != expected_num_past_key_values: raise ValueError( f\"There should", "for tokens that are **not masked**, - 0 for tokens that are **masked**.", "versions. If you do not want to use any `decoder_head_mask` now, please set", "now relative_position is in the range [0, inf) # half of the buckets", "module.k.weight.data.normal_(mean=0.0, std=factor * (d_model**-0.5)) module.v.weight.data.normal_(mean=0.0, std=factor * (d_model**-0.5)) module.o.weight.data.normal_(mean=0.0, std=factor * ((n_heads *", "because of incompatibility with ONNX conversion if 0 in output_shape: return torch.empty(output_shape, dtype=x.dtype,", "* block_len] _3blocked_attention_mask = _concatenate_3_blocks(_blocked_attention_mask, block_dim=1, sequence_dim=2) _blocked_attention_mask = _blocked_attention_mask.unsqueeze(-1) _3blocked_attention_mask = _3blocked_attention_mask.unsqueeze(-2)", "using past key value states. Need to inject it here if present_key_value_state is", "implied. # See the License for the specific language governing permissions and #", "{ \"decoder_input_ids\": input_ids, \"past_key_values\": past, \"encoder_outputs\": encoder_outputs, \"attention_mask\": attention_mask, \"head_mask\": head_mask, \"decoder_head_mask\": decoder_head_mask,", "(self-attention position bias), (self-attention weights), (cross-attention position bias), (cross-attention weights) if use_cache is", "shape `(batch_size, target_sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `decoder_input_ids` you can choose", "# (1, 1, num_heads, block_length, 3 * block_length) values = values.permute([2, 0, 1]).unsqueeze(0).unsqueeze(0)", "FutureWarning) decoder_head_mask = head_mask # Encode if needed (training, first prediction pass) if", "None: attn_weights = attn_weights * layer_head_mask attn_weights = attn_weights.type(value_states.dtype) attn_output = unshape(torch.einsum(\"...hqk,...khd->...qhd\", attn_weights,", "half-precision if necessary if self.weight.dtype in [torch.float16, torch.bfloat16]: hidden_states = hidden_states.to(self.weight.dtype) return self.weight", "not None: query_length = present_key_value_state[0].shape[2] else: query_length = None cross_attention_outputs = self.layer[1]( hidden_states,", "is not None else self.config.use_cache output_attentions = output_attentions if output_attentions is not None", "(`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads", "values.permute([2, 0, 1]).unsqueeze(0).unsqueeze(0) return values def forward( self, hidden_states, mask=None, position_bias=None, layer_head_mask=None, output_attentions=False,", "[1] * (side_key_states.ndim + 1) reps[1] = key_states.shape[1] side_key_states = side_key_states.unsqueeze(1).repeat(reps) side_value_states =", "self.decoder.set_input_embeddings(new_embeddings) def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings def get_output_embeddings(self): return self.lm_head def get_encoder(self):", "= nn.Linear(self.d_model, self.inner_dim, bias=False) self.v = nn.Linear(self.d_model, self.inner_dim, bias=False) self.o = nn.Linear(self.inner_dim, self.d_model,", "global_block_size) == global_block_size - 1 block_ends = block_ends.to(block_ids.device) true_block_ends = torch.logical_and(block_ends, block_ids >=", "<NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME> and <NAME>. It's an encoder-decoder transformer pre-trained", "correct batch idx from layer past batch dim # batch dim of `past`", "1 self.dropout = config.dropout_rate self.inner_dim = self.n_heads * self.key_value_proj_dim # Mesh TensorFlow initialization", "if do_cross_attention: # the actual query length is unknown for cross attention #", "used to speed up decoding (see `past_key_values`). output_attentions (`bool`, *optional*): Whether or not", "decoder inputs from shifting lm labels to the right decoder_input_ids = self._shift_right(labels) #", "block_ids = block_ids.where( block_ids >= 0, torch.tensor(global_seq_len, dtype=block_ids.dtype, device=block_ids.device) ) one_hot_block_ids = nn.functional.one_hot(block_ids.type(torch.int64),", "longer sequences than the model has been trained on Args: relative_position: an int32", "to adjust position bias shape to be sum with mask local_attention_mask = _get_local_attention_mask(mask,", "# Item assignment is not supported natively for proxies. shifted_input_ids = torch.full(input_ids.shape[:-1] +", "that are **masked**. [What are attention masks?](../glossary#attention-mask) decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`,", "output_attentions=False, **kwargs: Any, # to accept past_key_value and use_cache kwargs ): normed_hidden_states =", "hidden_size, eps=1e-6): \"\"\" Construct a layernorm module in the LongT5 style. No bias", "[None] * len(self.block) # We can provide a self-attention mask of dimensions [batch_size,", "# See the License for the specific language governing permissions and # limitations", "), f\"past_key_value should have 2 past states: keys and values. Got { len(past_key_value)}", "Copied from transformers.models.t5.modeling_t5.T5LayerCrossAttention with T5->LongT5 class LongT5LayerCrossAttention(nn.Module): def __init__(self, config): super().__init__() self.EncDecAttention =", "= use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if", "torch.Tensor): return self._shift_right(labels) def _reorder_cache(self, past, beam_idx): # if decoder past is not", "dict of {layer_num: list of heads to prune in this layer} See base", "has_relative_attention_bias=False): super().__init__() self.is_decoder = config.is_decoder if config.is_decoder: attention_layer = LongT5LayerSelfAttention elif config.encoder_attention_type ==", "mask and standard extended mask for transient-global attention extended_attention_mask = attention_mask # If", "output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if", "expected_num_past_key_values = 2 if encoder_hidden_states is None else 4 if len(past_key_value) != expected_num_past_key_values:", "self.SelfAttention( normed_hidden_states, mask=attention_mask, position_bias=position_bias, layer_head_mask=layer_head_mask, past_key_value=past_key_value, use_cache=use_cache, output_attentions=output_attentions, ) hidden_states = hidden_states +", "methods the library implements for all its model (such as downloading or saving,", "hidden_states = past_key_value return hidden_states # get query states query_states = shape(self.q(hidden_states)) #", "(1, 1, n_heads, block_len, 3 * block_len) if not self.has_relative_attention_bias: position_bias = torch.zeros(", "\"\"\"Makes 3-blocked relative position ids for local attention.\"\"\" position_ids = torch.arange(3 * block_len,", "global_block_ids = handle_orphan_tokens(global_block_ids) num_globals = seq_len // global_block_size # [batch_size, seq_len // global_block_size]", "LongT5LayerLocalSelfAttention elif config.encoder_attention_type == \"transient-global\": attention_layer = LongT5LayerTransientGlobalSelfAttention else: raise ValueError( \"For encoder", "for detail. To know more on how to prepare `input_ids` for pretraining take", "config: LongT5Config, has_relative_attention_bias=False): super().__init__() self.is_decoder = config.is_decoder self.has_relative_attention_bias = has_relative_attention_bias self.relative_attention_num_buckets = config.relative_attention_num_buckets", "buckets are for exact increments in positions max_exact = num_buckets // 2 is_small", "elif isinstance(module, LongT5DenseActDense): # Mesh TensorFlow FF initialization # See https://github.com/tensorflow/mesh/blob/master/mesh_tensorflow/transformer/transformer_layers.py#L56 # and", "modules in the encoder. Mask values selected in `[0, 1]`: - 1 indicates", "if not return_dict: output = (lm_logits,) + decoder_outputs[1:] + encoder_outputs return ((loss,) +", "and use_cache kwargs ): normed_hidden_states = self.layer_norm(hidden_states) attention_output = self.TransientGlobalSelfAttention( normed_hidden_states, mask=attention_mask, position_bias=position_bias,", "= [ r\"decoder.block.0.layer.1.EncDecAttention.relative_attention_bias.weight\", ] def __init__(self, config: LongT5Config): super().__init__(config) self.model_dim = config.d_model self.shared", "self.relative_attention_bias = nn.Embedding(self.relative_attention_num_buckets, self.n_heads) self.pruned_heads = set() self.gradient_checkpointing = False def prune_heads(self, heads):", "for Long Sequences](https://arxiv.org/abs/2112.07916) by <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME> and <NAME>. It's", "shape(self.q(hidden_states)) key_states = shape(self.k(hidden_states)) value_states = shape(self.v(hidden_states)) # Get global/side key/value states shape:", "self.local_radius = config.local_radius self.block_len = self.local_radius + 1 self.global_block_size = config.global_block_size self.dropout =", "if self.is_decoder else \"\" raise ValueError(f\"You have to specify either {err_msg_prefix}input_ids or {err_msg_prefix}inputs_embeds\")", ":seq_length, :] attn_output = self.o(attn_output) present_key_value_state = None outputs = (attn_output,) + (present_key_value_state,)", "shown that owning a dog ```\"\"\" use_cache = use_cache if use_cache is not", "a decoder, orphan tokens, i.e. those tokens which do not make for the", "of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can", "all the parameters of the model. Initializing with a config file does not", "4 if len(past_key_value) != expected_num_past_key_values: raise ValueError( f\"There should be {expected_num_past_key_values} past states.", "__init__(self, config: LongT5Config, has_relative_attention_bias: bool = False) -> None: super().__init__() self.is_decoder = config.is_decoder", "* x.ndim pad[dim] = (0, pad_len) pad = sum(pad[::-1], ()) x = nn.functional.pad(x,", "sequence_length)`): Indices of input sequence tokens in the vocabulary. LongT5 is a model", "of the self-attention modules in the encoder. Mask values selected in `[0, 1]`:", "= slice(i, i + num_blocks) indices = tuple(indices) blocks_list.append(x[indices]) # [batch_size, num_blocks, 3", "index) self.v = prune_linear_layer(self.v, index) self.o = prune_linear_layer(self.o, index, dim=1) # Update hyper", "eps def forward(self, hidden_states): # LongT5 uses a layer_norm which only scales and", "attn_weights, value_states)) attn_output = attn_output[:, :seq_length, :] attn_output = self.o(attn_output) present_key_value_state = None", "# Let's try a very long encoder input. >>> input_ids = tokenizer( ...", "might want to consider setting `use_cache=True` to speed up decoding\") return past reordered_decoder_past", "and cross attn key value states if present_key_value_state is not None: present_key_value_state =", "Decode decoder_outputs = self.decoder( input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, inputs_embeds=decoder_inputs_embeds, past_key_values=past_key_values, encoder_hidden_states=hidden_states, encoder_attention_mask=attention_mask, head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, use_cache=use_cache,", "-> (batch_size, num_block, n_heads, block_len, 3 * block_len + global_seq_len) scores = torch.einsum(\"...qhd,...khd->...hqk\",", "directly pass an embedded representation. This is useful if you want more control", "= self.wi(hidden_states) hidden_states = self.act(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.wo(hidden_states) return hidden_states", "2 if encoder_hidden_states is None else 4 if len(past_key_value) != expected_num_past_key_values: raise ValueError(", "def __init__(self, config: LongT5Config): super().__init__(config) self.shared = nn.Embedding(config.vocab_size, config.d_model) encoder_config = copy.deepcopy(config) encoder_config.use_cache", "`decoder_head_mask` now, please set `decoder_head_mask = torch.ones(num_layers, num_heads)`. \"\"\" @add_start_docstrings( \"The bare LONGT5", "hidden-states, present_key_value_states, (self-attention position bias), (self-attention weights), (cross-attention position bias), (cross-attention weights) class", "+ 1) :] # If 0 is in output_shape, we cannot apply reshape", "self.shared def set_input_embeddings(self, new_embeddings): self.shared = new_embeddings self.encoder.set_input_embeddings(new_embeddings) self.decoder.set_input_embeddings(new_embeddings) def get_encoder(self): return self.encoder", "= _concatenate_3_blocks(key_states, block_dim=1, sequence_dim=2) value_states = _concatenate_3_blocks(value_states, block_dim=1, sequence_dim=2) # Compute scores scores", "self.global_relative_attention_bias(side_relative_position_bucket) # (batch_size, num_heads, seq_len, global_seq_len) side_bias = side_bias.permute([0, 3, 1, 2]) #", "from ...utils import ( DUMMY_INPUTS, DUMMY_MASK, add_start_docstrings, add_start_docstrings_to_model_forward, is_torch_fx_proxy, logging, replace_return_docstrings, ) from", "): if past_key_value is not None: if not self.is_decoder: logger.warning(\"`past_key_values` is passed to", "the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds", "= () if output_attentions else None all_cross_attentions = () if (output_attentions and self.is_decoder)", "normed_hidden_states, mask=attention_mask, position_bias=position_bias, layer_head_mask=layer_head_mask, output_attentions=output_attentions, ) hidden_states = hidden_states + self.dropout(attention_output[0]) outputs =", "the \"fixed block\" global id corresponding to each input token. This implementation is", "from the original sequence are represented by -1. \"\"\" batch_size, seq_len = attention_mask.shape[:2]", "Mesh TensorFlow FF initialization # See https://github.com/tensorflow/mesh/blob/master/mesh_tensorflow/transformer/transformer_layers.py#L56 # and https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L89 module.wi.weight.data.normal_(mean=0.0, std=factor *", "need to reorder if past is None: logger.warning(\"You might want to consider setting", "[block_len, 3 * block_len] relative_position_ids = position_ids.unsqueeze(0) - center_position_ids.unsqueeze(1) return relative_position_ids def _mask_local_attention_mask(local_attention_mask:", "/ key) for cross attention. ' if expected_num_past_key_values == 4 else ''}\" f\"Got", "attention on padding token indices. Mask values selected in `[0, 1]`: - 1", "hidden_gelu = self.act(self.wi_0(hidden_states)) hidden_linear = self.wi_1(hidden_states) hidden_states = hidden_gelu * hidden_linear hidden_states =", "with -10_000 (according to the original implementation) local_attention_mask = torch.where(local_attention_mask > 0, 0.0,", "= shape(self.k(global_inputs)) side_value_states = shape(self.v(global_inputs)) # Split into blocks -> (batch_size, num_blocks, block_len,", "from torch import nn from torch.nn import CrossEntropyLoss from torch.utils.checkpoint import checkpoint from", "Optional[Tuple[Tuple[torch.Tensor]]] = None, past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, decoder_inputs_embeds: Optional[torch.FloatTensor]", "interface for downloading and loading pretrained models. \"\"\" config_class = LongT5Config base_model_prefix =", "2 else None, ) hidden_states = encoder_outputs[0] # Decode decoder_outputs = self.decoder( input_ids=decoder_input_ids,", "loading pretrained models. \"\"\" config_class = LongT5Config base_model_prefix = \"transformer\" supports_gradient_checkpointing = True", "[ r\"decoder.block.0.layer.1.EncDecAttention.relative_attention_bias.weight\", ] def __init__(self, config: LongT5Config): super().__init__(config) self.model_dim = config.d_model self.shared =", "position_bias=position_bias, layer_head_mask=layer_head_mask, output_attentions=output_attentions, ) hidden_states = hidden_states + self.dropout(attention_output[0]) outputs = (hidden_states,) +", "= torch.arange(global_seq_len, device=block_ids.device) side_relative_position = global_positions - block_ids[..., None] return side_relative_position.type(torch.int64) def _create_global_aggregates(", "local_attention_mask = torch.logical_and(_blocked_attention_mask, _3blocked_attention_mask) local_attention_mask = _mask_local_attention_mask(local_attention_mask, block_len) # [batch_size, 1, num_block, block_len,", "dtype=torch.long, device=device)[:, None] memory_position = torch.arange(key_length, dtype=torch.long, device=device)[None, :] relative_position = memory_position -", "0, 0.0, -1e10) # (batch_size, seq_len, global_seq_len) side_relative_position = _make_side_relative_position_ids(mask, self.global_block_size) side_relative_position_bucket =", "(attn_output,) + (present_key_value_state,) + (position_bias,) if output_attentions: outputs = outputs + (attn_weights,) return", "3 * block_len) attn_weights = nn.functional.softmax(scores.float(), dim=-1).type_as(scores) # (batch_size, num_blocks, n_heads, block_len, 3", ") -> torch.Tensor: \"\"\"Compute individual block aggregates by summing over individual blocks.\"\"\" #", "for small absolute relative_position and larger buckets for larger absolute relative_positions. All relative", "import AutoTokenizer, LongT5ForConditionalGeneration >>> tokenizer = AutoTokenizer.from_pretrained(\"google/long-t5-local-base\") >>> model = LongT5EncoderModel.from_pretrained(\"google/long-t5-local-base\") >>> input_ids", "decoder_head_mask is None: if self.config.num_layers == self.config.num_decoder_layers: warnings.warn(__HEAD_MASK_WARNING_MSG, FutureWarning) decoder_head_mask = head_mask #", "None, encoder_outputs: Optional[Tuple[Tuple[torch.Tensor]]] = None, past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, inputs_embeds: Optional[torch.FloatTensor] = None,", "position biases between the layers - the first layer store them # layer_outputs", "in [ hidden_states, present_key_value_states, all_hidden_states, all_attentions, all_cross_attentions, ] if v is not None", "transient-global attention extended_attention_mask = attention_mask # If a 2D or 3D attention mask", "= hidden_states + self.dropout(forwarded_states) return hidden_states # Copied from transformers.models.t5.modeling_t5.T5Attention with T5->LongT5 class", "= False self.encoder = LongT5Stack(encoder_config, self.shared) decoder_config = copy.deepcopy(config) decoder_config.is_decoder = True decoder_config.is_encoder_decoder", "side_position_bias = _split_into_blocks(side_position_bias, self.block_len, dim=-2).transpose(1, 2) side_position_bias = side_position_bias.type(scores.dtype).to(scores.device) # (batch_size, num_blocks, num_heads,", "layer_head_mask=None, cross_attn_layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False, return_dict=True, ): if past_key_value is not None: if", "math import warnings from typing import Any, List, Optional, Tuple, Union import torch", "is not None: module.wo.bias.data.zero_() elif isinstance(module, LongT5DenseGatedActDense): module.wi_0.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5))", "None ) return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=present_key_value_states, hidden_states=all_hidden_states, attentions=all_attentions, cross_attentions=all_cross_attentions, ) LONGT5_START_DOCSTRING = r\"\"\"", "if decoder past is not included in output # speedy decoding is disabled", "(self-attention position bias), (self-attention weights), # (cross-attention position bias), (cross-attention weights) position_bias =", "and the left. Indices can be obtained using [`T5Tokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`]", "not None: present_key_value_state = present_key_value_state + cross_attention_outputs[1] # Keep cross-attention outputs and relative", "hidden_states, self.v, key_value_states, past_key_value[1] if past_key_value is not None else None ) #", "avoid scaling before softmax self.q = nn.Linear(self.d_model, self.inner_dim, bias=False) self.k = nn.Linear(self.d_model, self.inner_dim,", "See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or", "proxies. shifted_input_ids = torch.full(input_ids.shape[:-1] + (1,), decoder_start_token_id) shifted_input_ids = torch.cat([shifted_input_ids, input_ids[..., :-1]], dim=-1)", "outputs + (attn_weights,) return outputs class LongT5LocalAttention(nn.Module): def __init__(self, config: LongT5Config, has_relative_attention_bias: bool", "torch.logical_and(local_attention_mask, locality_mask) def _get_local_attention_mask(attention_mask: torch.Tensor, block_len: int, device: torch.device) -> torch.Tensor: \"\"\"Prepare attention", "if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states", "= config.relative_attention_num_buckets self.relative_attention_max_distance = config.relative_attention_max_distance self.d_model = config.d_model self.key_value_proj_dim = config.d_kv self.n_heads =", "position_bias = None encoder_decoder_position_bias = None hidden_states = self.dropout(inputs_embeds) for i, (layer_module, past_key_value)", "If 0 is in output_shape, we cannot apply reshape because of incompatibility with", "encoder_decoder_position_bias = None hidden_states = self.dropout(inputs_embeds) for i, (layer_module, past_key_value) in enumerate(zip(self.block, past_key_values)):", "attention_mask=attention_mask, position_bias=position_bias, layer_head_mask=layer_head_mask, past_key_value=self_attn_past_key_value, use_cache=use_cache, output_attentions=output_attentions, ) hidden_states, present_key_value_state = self_attention_outputs[:2] attention_outputs =", "= True else: position_bias = self.compute_bias(self.block_len) if local_attention_mask is not None: # (batch_size,", "in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(LONGT5_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=Seq2SeqModelOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.LongTensor] = None,", "no bias. Additionally we want to make sure that the accumulation for #", "self.block_len, dim=1) key_states = _split_into_blocks(key_states, self.block_len, dim=1) value_states = _split_into_blocks(value_states, self.block_len, dim=1) #", "usually set to the\" \" pad_token_id. See LongT5 docs for more information\" )", "\"\"\" LONGT5_INPUTS_DOCSTRING = r\"\"\" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of", "# Copied from transformers.models.t5.modeling_t5.T5PreTrainedModel._set_gradient_checkpointing with T5->LongT5 def _set_gradient_checkpointing(self, module, value=False): if isinstance(module, (LongT5Attention,", "Prepare head mask if needed head_mask = self.get_head_mask(head_mask, self.config.num_layers) cross_attn_head_mask = self.get_head_mask(cross_attn_head_mask, self.config.num_layers)", "seq_len, global_seq_len) side_bias = side_bias.permute([0, 3, 1, 2]) # (batch_size, num_heads, seq_len, global_seq_len)", "not None else None ) value_states = project( hidden_states, self.v, key_value_states, past_key_value[1] if", "if we want to if layer_head_mask is not None: attn_weights = attn_weights *", "# Mesh TensorFlow embeddings initialization # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L1624 module.shared.weight.data.normal_(mean=0.0, std=factor * 1.0) elif", "# https://numpy.org/doc/stable/user/basics.indexing.html#dealing-with-variable-numbers-of-indices-within-programs indices = [slice(0, None)] * x.ndim indices[block_dim] = slice(i, i +", "config.dropout_rate self.inner_dim = self.n_heads * self.key_value_proj_dim # Mesh TensorFlow initialization to avoid scaling", "the preceding block. Padding tokens from the original sequence are represented by -1.", "if using past key value states. Need to inject it here if present_key_value_state", "... ).input_ids # Batch size 1 >>> decoder_input_ids = tokenizer(\"Studies show that\", return_tensors=\"pt\").input_ids", "LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST = [ \"google/long-t5-local-base\", \"google/long-t5-local-large\", \"google/long-t5-tglobal-base\", \"google/long-t5-tglobal-large\", ] def _pad_to_multiple(x: torch.Tensor, block_len: int,", "sequence_dim: int, pad_value: int = 0) -> torch.Tensor: \"\"\"Concatenate three consecutive blocks for", "computed for labels in `[0, ..., config.vocab_size]` Returns: Examples: ```python >>> from transformers", "// 2 is_small = relative_position < max_exact # The other half of the", "return_dict=True, ): if past_key_value is not None: if not self.is_decoder: logger.warning(\"`past_key_values` is passed", "initialization and a simple interface for downloading and loading pretrained models. \"\"\" config_class", "= self.layer_norm(hidden_states) attention_output = self.LocalSelfAttention( normed_hidden_states, mask=attention_mask, position_bias=position_bias, layer_head_mask=layer_head_mask, output_attentions=output_attentions, ) hidden_states =", "we cannot apply reshape because of incompatibility with ONNX conversion if 0 in", "hidden_states, attention_mask=None, position_bias=None, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None, layer_head_mask=None, cross_attn_layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False, return_dict=True, ):", "the PyTorch documentation for all matter related to general usage and behavior. Parameters:", "return_tensors=\"pt\" ... ).input_ids # Batch size 1 >>> outputs = model.generate(input_ids) >>> print(tokenizer.decode(outputs[0],", "Optional[torch.FloatTensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, encoder_outputs: Optional[Tuple[Tuple[torch.Tensor]]] = None, past_key_values: Optional[Tuple[Tuple[torch.Tensor]]]", "sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly", "= set() self.gradient_checkpointing = False # Relativen attention bias & Layer norm for", "self.dropout(attention_output[0]) outputs = (hidden_states,) + attention_output[1:] # add attentions if we output them", "block. Padding tokens from the original sequence are represented by -1. \"\"\" batch_size,", "the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head", "block_len, 3 * block_len) position_bias = position_bias + local_attention_mask.transpose(1, 2) position_bias = position_bias.type(scores.dtype)", "modeling` head on top.\"\"\", LONGT5_START_DOCSTRING) class LongT5ForConditionalGeneration(LongT5PreTrainedModel): _keys_to_ignore_on_load_missing = [ r\"encoder.embed_tokens.weight\", r\"decoder.embed_tokens.weight\", r\"lm_head.weight\",", "pretraining take a look at [LONGT5 Training](./longt5#training). decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`,", "`(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention", "std=factor * ((self.config.d_model) ** -0.5)) if hasattr(module.wi_1, \"bias\") and module.wi_1.bias is not None:", "`past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those", "LONGT5_START_DOCSTRING, ) class LongT5Model(LongT5PreTrainedModel): _keys_to_ignore_on_load_missing = [ r\"encoder.embed_tokens.weight\", r\"decoder.embed_tokens.weight\", ] _keys_to_ignore_on_load_unexpected = [", "attentiont. For more information, see: https://arxiv.org/pdf/2112.07916.pdf. \"\"\" num_blocks = x.shape[block_dim] pad = [(0,", "config.d_model self.key_value_proj_dim = config.d_kv self.n_heads = config.num_heads self.local_radius = config.local_radius self.block_len = self.local_radius", "if len(past_key_value) != expected_num_past_key_values: raise ValueError( f\"There should be {expected_num_past_key_values} past states. \"", "= -torch.min(relative_position, torch.zeros_like(relative_position)) # now relative_position is in the range [0, inf) #", "`[-100, 0, ..., config.vocab_size - 1]`. All labels set to `-100` are ignored", "information\" ) # shift inputs to the right if is_torch_fx_proxy(input_ids): # Item assignment", "i in range(config.num_layers)] ) self.final_layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) # Initialize", "encoder_attention_mask = torch.ones(encoder_hidden_shape, device=inputs_embeds.device) encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = None # Prepare", "heads etc.) This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as", "else: position_bias = self.compute_bias(self.block_len) if mask is not None: # Replace masked positions", "x def _split_into_blocks(x: torch.Tensor, block_len: int, dim: int) -> torch.Tensor: \"\"\"Split an input", "LongT5DenseActDense(nn.Module): def __init__(self, config: LongT5Config): super().__init__() self.wi = nn.Linear(config.d_model, config.d_ff, bias=False) self.wo =", "num_globals), dim=-1) - 1 global_segment_ids = global_segment_ids.to(attention_mask.device) global_segment_ids = torch.where(global_segment_ids <= _sequence_block_ids_max, 1,", "assert reordered_layer_past_states[0].shape == layer_past_states[0].shape assert len(reordered_layer_past_states) == len(layer_past_states) reordered_decoder_past = reordered_decoder_past + (reordered_layer_past_states,)", "import LongT5Config logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = \"LongT5Config\" _TOKENIZER_FOR_DOC = \"T5Tokenizer\" _CHECKPOINT_FOR_DOC =", "do_cross_attention = self.is_decoder and encoder_hidden_states is not None if do_cross_attention: # the actual", "model has been trained on Args: relative_position: an int32 Tensor bidirectional: a boolean", "Whether or not to return the attentions tensors of all attention layers. See", "# Convert encoder inputs in embeddings if needed encoder_outputs = self.encoder( input_ids=input_ids, attention_mask=attention_mask,", "Compute scores -> (batch_size, num_block, n_heads, block_len, 3 * block_len + global_seq_len) scores", "Tuple consists of (`last_hidden_state`, `optional`: *hidden_states*, `optional`: *attentions*) `last_hidden_state` of shape `(batch_size, sequence_length,", "Let's try a very long encoder input. >>> input_ids = tokenizer( ... 100", "given if not all(x.shape): new_shape = list(x.shape) new_shape[dim] += pad_len return torch.zeros(new_shape, dtype=x.dtype)", "= None, decoder_head_mask: Optional[torch.FloatTensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, encoder_outputs: Optional[Tuple[Tuple[torch.FloatTensor]]] =", "Returns: Example: ```python >>> from transformers import T5Tokenizer, LongT5Model >>> tokenizer = T5Tokenizer.from_pretrained(\"google/long-t5-local-base\")", "= self.EncDecAttention( normed_hidden_states, mask=attention_mask, key_value_states=key_value_states, position_bias=position_bias, layer_head_mask=layer_head_mask, past_key_value=past_key_value, use_cache=use_cache, query_length=query_length, output_attentions=output_attentions, ) layer_output", "= None, cross_attn_head_mask: Optional[torch.Tensor] = None, encoder_outputs: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] =", "ourselves in which case we just need to make it broadcastable to all", "= handle_orphan_tokens(global_block_ids) num_globals = seq_len // global_block_size # [batch_size, seq_len // global_block_size] if", "(cross-attention weights) position_bias = layer_outputs[2] if self.is_decoder and encoder_hidden_states is not None: encoder_decoder_position_bias", "if labels is not None: loss_fct = CrossEntropyLoss(ignore_index=-100) loss = loss_fct(lm_logits.view(-1, lm_logits.size(-1)), labels.view(-1))", "None else self.config.use_return_dict encoder_outputs = self.encoder( input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict,", "handle weights initialization and a simple interface for downloading and loading pretrained models.", "self.dropout = nn.Dropout(config.dropout_rate) def forward(self, hidden_states): forwarded_states = self.layer_norm(hidden_states) forwarded_states = self.DenseReluDense(forwarded_states) hidden_states", "torch.where(global_segment_ids <= _sequence_block_ids_max, 1, 0) return global_block_ids.type(torch.int), global_segment_ids.type(torch.int) def _make_side_relative_position_ids(attention_mask: torch.Tensor, global_block_size: int)", "`language modeling` head on top.\"\"\", LONGT5_START_DOCSTRING) class LongT5ForConditionalGeneration(LongT5PreTrainedModel): _keys_to_ignore_on_load_missing = [ r\"encoder.embed_tokens.weight\", r\"decoder.embed_tokens.weight\",", "attention_mask # If a 2D or 3D attention mask is provided for the", "= prune_linear_layer(self.k, index) self.v = prune_linear_layer(self.v, index) self.o = prune_linear_layer(self.o, index, dim=1) #", "None: # self-attn # (batch_size, n_heads, seq_length, dim_per_head) hidden_states = shape(proj_layer(hidden_states)) elif past_key_value", "new_embeddings): self.shared = new_embeddings self.encoder.set_input_embeddings(new_embeddings) def get_encoder(self): return self.encoder def _prune_heads(self, heads_to_prune): \"\"\"", "the actual query length is unknown for cross attention # if using past", "encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, ) def prepare_inputs_for_generation( self, input_ids, past=None, attention_mask=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None,", "if use_cache: outputs = outputs + (present_key_value_state,) + attention_outputs else: outputs = outputs", "to nullify selected heads of the self-attention modules in the encoder. Mask values", "decoder_head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected", "seq_length = hidden_states.shape[:2] def shape(states): \"\"\"projection\"\"\" return states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim) def unshape(states):", "torch.zeros_like(relative_position)) # now relative_position is in the range [0, inf) # half of", "and `decoder_head_mask`. Currently, `decoder_head_mask` is set to copy `head_mask`, but this feature is", "self.n_heads, self.key_value_proj_dim, self.pruned_heads ) # Prune linear layers self.q = prune_linear_layer(self.q, index) self.k", "the last `decoder_inputs_embeds` have to be input (see `past_key_values`). This is useful if", "in positions up to max_distance relative_position_if_large = max_exact + ( torch.log(relative_position.float() / max_exact)", "# Combine self attn and cross attn key value states if present_key_value_state is", "pass) if encoder_outputs is None: # Convert encoder inputs in embeddings if needed", "max_distance=self.relative_attention_max_distance, ) values = self.relative_attention_bias(relative_position_bucket) # shape (query_length, key_length, num_heads) values = values.permute([2,", "with T5->LongT5 class LongT5LayerNorm(nn.Module): def __init__(self, hidden_size, eps=1e-6): \"\"\" Construct a layernorm module", "to prepare `decoder_input_ids` for pretraining take a look at [LONGT5 Training](./longt5#training). decoder_attention_mask (`torch.BoolTensor`", "= side_position_bias.type(scores.dtype).to(scores.device) # (batch_size, num_blocks, num_heads, block_len, 3 * block_len + global_seq_len) position_bias", "seq_len] global_block_ids = handle_orphan_tokens(global_block_ids) num_globals = seq_len // global_block_size # [batch_size, seq_len //", "the loss is only computed for labels in `[0, ..., config.vocab_size]` Returns: Examples:", "assert pad_token_id is not None, \"self.model.config.pad_token_id has to be defined.\" # replace possible", "block_length) bidirectional=(not self.is_decoder), num_buckets=self.relative_attention_num_buckets, max_distance=self.relative_attention_max_distance, ) # (block_length, 3 * block_length, num_heads) values", "else: position_bias = self.compute_bias(real_seq_length, key_length, device=scores.device) # if key and values are already", "= True decoder_config.is_encoder_decoder = False decoder_config.num_layers = config.num_decoder_layers self.decoder = LongT5Stack(decoder_config, self.shared) #", "encoder_hidden_states=None, encoder_attention_mask=None, inputs_embeds=None, head_mask=None, cross_attn_head_mask=None, past_key_values=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): use_cache =", "ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default.", "relative_position, # shape (query_length, key_length) bidirectional=(not self.is_decoder), num_buckets=self.relative_attention_num_buckets, max_distance=self.relative_attention_max_distance, ) values = self.relative_attention_bias(relative_position_bucket)", "ValueError( f\"There should be {expected_num_past_key_values} past states. \" f\"{'2 (past / key) for", "inputs across local key/value blocks # New shape: (batch_size, num_blocks, global_seq_len, n_heads, dim_per_head)", "hidden_size)` is a sequence of hidden states at the output of the last", "# If 0 is in output_shape, we cannot apply reshape because of incompatibility", "past states: keys and values. Got { len(past_key_value)} past states\" real_seq_length += past_key_value[0].shape[2]", "value_states = _concatenate_3_blocks(value_states, block_dim=1, sequence_dim=2) # Tile side inputs across local key/value blocks", "matrix. If `decoder_input_ids` and `decoder_inputs_embeds` are both unset, `decoder_inputs_embeds` takes the value of", "sure this is intended.\") expected_num_past_key_values = 2 if encoder_hidden_states is None else 4", "`inputs_embeds`. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are", "to a bucket number for relative attention. The relative position is defined as", "https://github.com/tensorflow/mesh/blob/master/mesh_tensorflow/transformer/transformer_layers.py#L56 # and https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L89 module.wi.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5)) if hasattr(module.wi, \"bias\")", "+ attention_outputs return outputs # hidden-states, present_key_value_states, (self-attention position bias), (self-attention weights), (cross-attention", "true_block_ends = torch.logical_and(block_ends, block_ids >= 0) full_blocks = true_block_ends.sum(-1).unsqueeze(-1).type(block_ids.dtype) - 1 block_ids =", "is provided for the cross-attention # we need to make broadcastable to [batch_size,", "you do not want to use any `decoder_head_mask` now, please set `decoder_head_mask =", "def __init__(self, config, has_relative_attention_bias=False): super().__init__() self.SelfAttention = LongT5Attention(config, has_relative_attention_bias=has_relative_attention_bias) self.layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)", "position_bias=encoder_decoder_position_bias, layer_head_mask=cross_attn_layer_head_mask, past_key_value=cross_attn_past_key_value, query_length=query_length, use_cache=use_cache, output_attentions=output_attentions, ) hidden_states = cross_attention_outputs[0] # Combine self", "inputs_embeds=None, head_mask=None, cross_attn_head_mask=None, past_key_values=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): use_cache = use_cache if", "# Compute scores -> (batch_size, num_block, n_heads, block_len, 3 * block_len + global_seq_len)", "`(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of", "fixed_block_mask = torch.cumsum(fixed_block_mask, axis=1) - fixed_block_mask mask = torch.where(attention_mask != 0.0, 1.0, -1000.0).type(attention_mask.dtype)", "and encoder_hidden_states is not None if do_cross_attention: # the actual query length is", "should be {expected_num_past_key_values} past states. \" f\"{'2 (past / key) for cross attention.", "Indices can be obtained using [`T5Tokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for detail. To", "more on how to prepare `decoder_input_ids` for pretraining take a look at [LONGT5", "set_input_embeddings(self, new_embeddings): self.shared = new_embeddings self.encoder.set_input_embeddings(new_embeddings) self.decoder.set_input_embeddings(new_embeddings) def get_encoder(self): return self.encoder def get_decoder(self):", "you should be able to pad the inputs on both the right and", "global id corresponding to each input token. This implementation is a simlified version", "3 * self.block_len), device=scores.device, dtype=scores.dtype ) if self.gradient_checkpointing and self.training: position_bias.requires_grad = True", "states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim) def unshape(states): \"\"\"reshape\"\"\" return states.contiguous().view(batch_size, -1, self.inner_dim) # get", "3 blocks for keys and values -> (batch_size, num_blocks, 3 * block_len, n_heads,", "(such as downloading or saving, resizing the input embeddings, pruning heads etc.) This", "dim=2) value_states = torch.cat([value_states, side_value_states], dim=2) # Compute scores -> (batch_size, num_block, n_heads,", ") # set padding tokens to -1 global_block_ids = (global_block_ids * attention_mask) +", "`(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. LongT5 is a", "- query_position, i.e. the distance in tokens from the attending position to the", "= None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.FloatTensor], Seq2SeqModelOutput]: r\"\"\" Returns: Example:", "Model transformer outputting encoder's raw hidden-states without any specific head on top.\", LONGT5_START_DOCSTRING,", "# hidden-states, key-value-states, (self-attention position bias), (self-attention weights), (cross-attention position bias), (cross-attention weights)", "from transformers.models.t5.modeling_t5.T5PreTrainedModel._set_gradient_checkpointing with T5->LongT5 def _set_gradient_checkpointing(self, module, value=False): if isinstance(module, (LongT5Attention, LongT5Stack)): module.gradient_checkpointing", "a dog is good for you\", return_tensors=\"pt\" ... ).input_ids # Batch size 1", "we want to make sure that the accumulation for # half-precision inputs is", "self.embed_tokens = new_embeddings def forward( self, input_ids=None, attention_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, inputs_embeds=None, head_mask=None, cross_attn_head_mask=None,", "\"\"\" An abstract class to handle weights initialization and a simple interface for", "subtraction of mean. \"\"\" super().__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.variance_epsilon = eps def forward(self,", "be applied for a local attention.\"\"\" # [batch_size, num_blocks, block_len] _blocked_attention_mask = _split_into_blocks(attention_mask,", "2]) # (batch_size, num_heads, seq_len, global_seq_len) attention_side_bias = attention_side_bias + side_bias return attention_side_bias", "generative setting. LongT5 model is an extension of T5 model, and it enables", "n_heads, block_len, 3 * block_len + global_seq_len) attn_weights = nn.functional.softmax(scores.float(), dim=-1).type_as(scores) attn_weights =", "self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if", "len(encoder_outputs) > 2 else None, ) hidden_states = encoder_outputs[0] # Decode decoder_outputs =", "/ value states\" ) self_attn_past_key_value = past_key_value[:2] cross_attn_past_key_value = past_key_value[2:] else: self_attn_past_key_value, cross_attn_past_key_value", "0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) decoder_input_ids (`torch.LongTensor` of", "LongT5LayerNorm pass except Exception: logger.warning(\"discovered apex but it failed to load, falling back", "** -0.5)) if isinstance(module, LongT5TransientGlobalAttention): module.global_relative_attention_bias.weight.data.normal_( mean=0.0, std=factor * ((d_model) ** -0.5) )", "use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict:", "relative positions <=-max_distance map to the same bucket. This should allow for more", "None else None ) # compute scores scores = torch.matmul( query_states, key_states.transpose(3, 2)", "def __init__(self, config, has_relative_attention_bias=False): super().__init__() self.TransientGlobalSelfAttention = LongT5TransientGlobalAttention( config, has_relative_attention_bias=has_relative_attention_bias ) self.layer_norm =", "to be defined.\" # replace possible -100 values in labels by `pad_token_id` shifted_input_ids.masked_fill_(shifted_input_ids", "-> torch.Tensor: block_ends = (torch.arange(seq_len) % global_block_size) == global_block_size - 1 block_ends =", "argument `head_mask` was split into two arguments `head_mask` and `decoder_head_mask`. Currently, `decoder_head_mask` is", "nn.Linear(self.inner_dim, self.d_model, bias=False) if self.has_relative_attention_bias: self.relative_attention_bias = nn.Embedding(self.relative_attention_num_buckets, self.n_heads) self.pruned_heads = set() self.gradient_checkpointing", "self.block_len, inputs_embeds.device) else: # we need to use both local attention mask and", "LongT5EncoderModel(LongT5PreTrainedModel): authorized_missing_keys = [ r\"encoder.embed_tokens.weight\", ] def __init__(self, config: LongT5Config): super().__init__(config) self.shared =", "n_heads, dim_per_head) key_states = _concatenate_3_blocks(key_states, block_dim=1, sequence_dim=2) value_states = _concatenate_3_blocks(value_states, block_dim=1, sequence_dim=2) #", "applicable law or agreed to in writing, software # distributed under the License", "authorized_missing_keys = [ r\"encoder.embed_tokens.weight\", ] def __init__(self, config: LongT5Config): super().__init__(config) self.shared = nn.Embedding(config.vocab_size,", "x = nn.functional.pad(x, pad=pad, mode=\"constant\", value=pad_value) blocks_list: List[torch.Tensor] = [] for i in", "num_heads) values = values.permute([2, 0, 1]).unsqueeze(0) # shape (1, num_heads, query_length, key_length) return", "and self.training: position_bias.requires_grad = True else: position_bias = self.compute_bias(real_seq_length, key_length, device=scores.device) # if", "input block for local attentiont. For more information, see: https://arxiv.org/pdf/2112.07916.pdf. \"\"\" num_blocks =", "input (see `past_key_values`). To know more on how to prepare `decoder_input_ids` for pretraining", "of the buckets are for logarithmically bigger bins in positions up to max_distance", "None else self.config.use_cache output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions", "# Create global inputs _global_seq_len = global_segment_ids.shape[-1] global_inputs = _create_global_aggregates(hidden_states, block_ids, _global_seq_len) global_inputs", "= output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states", "between the layers - the first layer store them # layer_outputs = hidden-states,", "= \"\"\" The input argument `head_mask` was split into two arguments `head_mask` and", "processing self.post_init() self.gradient_checkpointing = False # Copied from transformers.models.t5.modeling_t5.T5Stack.get_input_embeddings def get_input_embeddings(self): return self.embed_tokens", "return self.lm_head def get_encoder(self): return self.encoder def get_decoder(self): return self.decoder @add_start_docstrings_to_model_forward(LONGT5_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)", "block_len + global_seq_len, n_heads, dim_per_head) key_states = torch.cat([key_states, side_key_states], dim=2) value_states = torch.cat([value_states,", "should be in `[-100, 0, ..., config.vocab_size - 1]`. All labels set to", "four key / value states reordered_layer_past_states = reordered_layer_past_states + ( layer_past_state.index_select(0, beam_idx.to(layer_past_state.device)), )", "the original implementation) mask = torch.where(mask > 0, 0.0, -1e10) # We need", "`True` if {self} is used as a decoder\" if attention_mask is None: attention_mask", "+ (present_key_value_state,) + (position_bias,) if output_attentions: outputs = outputs + (attn_weights,) return outputs", "# the actual query length is unknown for cross attention # if using", "hidden_states = self.layer[-1](hidden_states) outputs = (hidden_states,) if use_cache: outputs = outputs + (present_key_value_state,)", "3 * block_len + global_seq_len, n_heads, dim_per_head) key_states = torch.cat([key_states, side_key_states], dim=2) value_states", "or not to return the attentions tensors of all attention layers. See `attentions`", "outputs = outputs + (attn_weights,) return outputs # Copied from transformers.models.t5.modeling_t5.T5LayerSelfAttention with T5->LongT5", "relative position bias\"\"\" memory_position = torch.arange( 3 * block_length, dtype=torch.long, device=self.relative_attention_bias.weight.device ) context_position", "computing the sequence classification/regression loss. Indices should be in `[-100, 0, ..., config.vocab_size", "length will be a multiple of `block_len`\"\"\" pad_len = -x.shape[dim] % block_len #", "isinstance(module, (LongT5Attention, LongT5Stack)): module.gradient_checkpointing = value # Copied from transformers.models.t5.modeling_t5.T5PreTrainedModel._shift_right with T5->LongT5 def", "= self.compute_bias(real_seq_length, key_length, device=scores.device) # if key and values are already calculated #", "err_msg_prefix = \"decoder_\" if self.is_decoder else \"\" raise ValueError( f\"You cannot specify both", "layers - the first layer store them # layer_outputs = hidden-states, key-value-states (self-attention", "hidden_states, present_key_value_state = layer_outputs[:2] # We share the position biases between the layers", "return states.contiguous().view(batch_size, -1, self.inner_dim) # get query/key/value states -> (batch_size, seq_length, n_heads, dim_per_head)", "self.is_decoder) else None position_bias = None encoder_decoder_position_bias = None hidden_states = self.dropout(inputs_embeds) for", "super().__init__() if config.is_gated_act: self.DenseReluDense = LongT5DenseGatedActDense(config) else: self.DenseReluDense = LongT5DenseActDense(config) self.layer_norm = LongT5LayerNorm(config.d_model,", "return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is", "num_blocks + 2, block_len] x = nn.functional.pad(x, pad=pad, mode=\"constant\", value=pad_value) blocks_list: List[torch.Tensor] =", "layer_outputs[1:] hidden_states, present_key_value_state = layer_outputs[:2] # We share the position biases between the", "(batch_size, key_length) (non-causal) or (batch_size, key_length, key_length) # past_key_value[0] is (batch_size, n_heads, q_len", "batch dim # batch dim of `past` is at 2nd position reordered_layer_past_states =", "= num_buckets // 2 is_small = relative_position < max_exact # The other half", "return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.FloatTensor], Seq2SeqLMOutput]: r\"\"\" labels (`torch.LongTensor` of shape", "\"summarize: \" + 100 * \"studies have shown that owning a dog is", "unknown for cross attention # if using past key value states. Need to", "present_key_value_state = (key_states, value_states) if (self.is_decoder and use_cache) else None outputs = (attn_output,)", "tokens from the original sequence are represented by -1. \"\"\" batch_size, seq_len =", "block, are assigned to the preceding block. Padding tokens from the original sequence", "and module.wi.bias is not None: module.wi.bias.data.zero_() module.wo.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_ff) ** -0.5)) if", "*optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an", "None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, ) hidden_states = encoder_outputs[0] #", "= self.config.num_heads module.q.weight.data.normal_(mean=0.0, std=factor * ((d_model * key_value_proj_dim) ** -0.5)) module.k.weight.data.normal_(mean=0.0, std=factor *", "n_heads, dim_per_head) side_key_states = shape(self.k(global_inputs)) side_value_states = shape(self.v(global_inputs)) # Split into blocks ->", "# If a 2D or 3D attention mask is provided for the cross-attention", "import AutoTokenizer, LongT5ForConditionalGeneration >>> tokenizer = AutoTokenizer.from_pretrained(\"Stancld/longt5-tglobal-large-16384-pubmed-3k_steps\") >>> model = LongT5ForConditionalGeneration.from_pretrained( ... \"Stancld/longt5-tglobal-large-16384-pubmed-3k_steps\"", "reps[1] = key_states.shape[1] side_key_states = side_key_states.unsqueeze(1).repeat(reps) side_value_states = side_value_states.unsqueeze(1).repeat(reps) # Concatenate \"local\" and", "= past_key_values[0][0].shape[2] + seq_length if past_key_values is not None else seq_length if use_cache", "(batch_size, n_heads, seq_length, dim_per_head) hidden_states = shape(proj_layer(hidden_states)) elif past_key_value is None: # cross-attn", "= None, decoder_attention_mask: Optional[torch.BoolTensor] = None, head_mask: Optional[torch.FloatTensor] = None, decoder_head_mask: Optional[torch.FloatTensor] =", "(the \"License\"); # you may not use this file except in compliance with", "torch.Tensor: block_ends = (torch.arange(seq_len) % global_block_size) == global_block_size - 1 block_ends = block_ends.to(block_ids.device)", "[torch.float16, torch.bfloat16]: hidden_states = hidden_states.to(self.weight.dtype) return self.weight * hidden_states try: from apex.normalization import", "smaller buckets for small absolute relative_position and larger buckets for larger absolute relative_positions.", ":, :] locality_mask = locality_mask.to(local_attention_mask.device) return torch.logical_and(local_attention_mask, locality_mask) def _get_local_attention_mask(attention_mask: torch.Tensor, block_len: int,", "output_hidden_states=output_hidden_states, return_dict=return_dict, ) elif return_dict and not isinstance(encoder_outputs, BaseModelOutput): encoder_outputs = BaseModelOutput( last_hidden_state=encoder_outputs[0],", "embed_tokens self.is_decoder = config.is_decoder self.local_radius = config.local_radius self.block_len = self.local_radius + 1 self.block", "nn.Embedding(self.relative_attention_num_buckets, self.n_heads) self.pruned_heads = set() self.gradient_checkpointing = False # Relativen attention bias &", "relative position embeddings so you should be able to pad the inputs on", "directly pass an embedded representation. If `past_key_values` is used, optionally only the last", "new_embeddings): self.shared = new_embeddings self.encoder.set_input_embeddings(new_embeddings) self.decoder.set_input_embeddings(new_embeddings) def get_encoder(self): return self.encoder def get_decoder(self): return", "transformers import AutoTokenizer, LongT5ForConditionalGeneration >>> tokenizer = AutoTokenizer.from_pretrained(\"Stancld/longt5-tglobal-large-16384-pubmed-3k_steps\") >>> model = LongT5ForConditionalGeneration.from_pretrained( ...", "block_len locality_mask = locality_mask[None, None, :, :] locality_mask = locality_mask.to(local_attention_mask.device) return torch.logical_and(local_attention_mask, locality_mask)", "# shift inputs to the right if is_torch_fx_proxy(input_ids): # Item assignment is not", "(num_blocks, block_len) + x.shape[(dim + 1) :] # If 0 is in output_shape,", "compatible with onnx op>9 if position_bias is None: if not self.has_relative_attention_bias: position_bias =", "sequence_length)`. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing", "bias shape to be sum with mask position_bias = position_bias + mask.transpose(1, 2)", "a layernorm module in the LongT5 style. No bias and no subtraction of", "query_length=query_length, output_attentions=output_attentions, ) layer_output = hidden_states + self.dropout(attention_output[0]) outputs = (layer_output,) + attention_output[1:]", "Batch size 1 >>> decoder_input_ids = tokenizer(\"Studies show that\", return_tensors=\"pt\").input_ids # Batch size", "transformer pre-trained in a text-to-text denoising generative setting. LongT5 model is an extension", "not None else seq_length if use_cache is True: assert self.is_decoder, f\"`use_cache` can only", "# if decoder past is not included in output # speedy decoding is", "present_key_value_states, all_hidden_states, all_attentions, all_cross_attentions, ] if v is not None ) return BaseModelOutputWithPastAndCrossAttentions(", "to return a [`~utils.ModelOutput`] instead of a plain tuple. \"\"\" # Warning message", "use_cache kwargs ): normed_hidden_states = self.layer_norm(hidden_states) attention_output = self.LocalSelfAttention( normed_hidden_states, mask=attention_mask, position_bias=position_bias, layer_head_mask=layer_head_mask,", "Update before the merge LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST = [ \"google/long-t5-local-base\", \"google/long-t5-local-large\", \"google/long-t5-tglobal-base\", \"google/long-t5-tglobal-large\", ] def", "decoder_head_mask __HEAD_MASK_WARNING_MSG = \"\"\" The input argument `head_mask` was split into two arguments", "to the same bucket. All relative positions <=-max_distance map to the same bucket.", "reordered_layer_past_states = () for layer_past_state in layer_past_states: # need to set correct `past`", "= None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.FloatTensor], Seq2SeqLMOutput]: r\"\"\" labels (`torch.LongTensor`", "# TODO: Update before the merge LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST = [ \"google/long-t5-local-base\", \"google/long-t5-local-large\", \"google/long-t5-tglobal-base\", \"google/long-t5-tglobal-large\",", "text-to-text denoising generative setting. LongT5 model is an extension of T5 model, and", "attention masks?](../glossary#attention-mask) decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input", "None and inputs_embeds is not None: err_msg_prefix = \"decoder_\" if self.is_decoder else \"\"", "Efficient Text-To-Text Transformer for Long Sequences](https://arxiv.org/abs/2112.07916) by <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>", "input (see `past_key_values`). This is useful if you want more control over how", "= self.local_radius + 1 self.block = nn.ModuleList( [LongT5Block(config, has_relative_attention_bias=bool(i == 0)) for i", "indexing approach here: # https://numpy.org/doc/stable/user/basics.indexing.html#dealing-with-variable-numbers-of-indices-within-programs indices = [slice(0, None)] * x.ndim indices[block_dim] =", "1]`. All labels set to `-100` are ignored (masked), the loss is only", "def get_decoder(self): return self.decoder @add_start_docstrings_to_model_forward(LONGT5_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.LongTensor] =", "100 * \"Studies have been shown that owning a dog is good for", "return outputs # Copied from transformers.models.t5.modeling_t5.T5LayerSelfAttention with T5->LongT5 class LongT5LayerSelfAttention(nn.Module): def __init__(self, config,", "None: input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) elif inputs_embeds is not None:", "= _split_into_blocks(key_states, self.block_len, dim=1) value_states = _split_into_blocks(value_states, self.block_len, dim=1) # Concatenate 3 blocks", "for local attention.\"\"\" position_ids = torch.arange(3 * block_len, dtype=torch.int32) center_position_ids = position_ids[block_len:-block_len] #", "take a look a [LONGT5 Training](./longt5#training). attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):", "Mask values selected in `[0, 1]`: - 1 indicates the head is **not", "- context_position[:, None] relative_position_bucket = self._relative_position_bucket( relative_position, # (block_length, 3 * block_length) bidirectional=(not", "states. Need to inject it here if present_key_value_state is not None: query_length =", "past batch dim # batch dim of `past` is at 2nd position reordered_layer_past_states", "(batch_size, num_heads, seq_len, global_seq_len) side_position_bias = self.compute_side_bias(mask, global_segment_ids) # (batch_size, num_blocks, num_heads, block_len,", "_global_seq_len = global_segment_ids.shape[-1] global_inputs = _create_global_aggregates(hidden_states, block_ids, _global_seq_len) global_inputs = self.global_input_layer_norm(global_inputs) # get", "output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict", "i + num_blocks) indices = tuple(indices) blocks_list.append(x[indices]) # [batch_size, num_blocks, 3 * block_len,", "= shape(self.q(hidden_states)) # (batch_size, n_heads, seq_length, dim_per_head) # get key/value states key_states =", "): use_cache = use_cache if use_cache is not None else self.config.use_cache output_attentions =", "= LongT5DenseActDense(config) self.layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) def forward(self, hidden_states): forwarded_states", "layer_head_mask = head_mask[i] cross_attn_layer_head_mask = cross_attn_head_mask[i] if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,)", "necessary if self.weight.dtype in [torch.float16, torch.bfloat16]: hidden_states = hidden_states.to(self.weight.dtype) return self.weight * hidden_states", "3 * block_len] local_attention_mask = torch.logical_and(_blocked_attention_mask, _3blocked_attention_mask) local_attention_mask = _mask_local_attention_mask(local_attention_mask, block_len) # [batch_size,", "boolean - whether the attention is bidirectional num_buckets: an integer max_distance: an integer", "if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple( v", "if output_hidden_states else None all_attentions = () if output_attentions else None all_cross_attentions =", "self.dropout(forwarded_states) return hidden_states # Copied from transformers.models.t5.modeling_t5.T5Attention with T5->LongT5 class LongT5Attention(nn.Module): def __init__(self,", "values in the range [0, num_buckets) \"\"\" relative_buckets = 0 if bidirectional: num_buckets", ") from .configuration_longt5 import LongT5Config logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = \"LongT5Config\" _TOKENIZER_FOR_DOC =", "please set `decoder_head_mask = torch.ones(num_layers, num_heads)`. \"\"\" @add_start_docstrings( \"The bare LONGT5 Model transformer", "encoder_attention_mask = torch.ones( batch_size, encoder_seq_length, device=inputs_embeds.device, dtype=torch.long ) # initialize past_key_values with `None`", "`past_key_values`). output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of", "T5->LongT5 class LongT5LayerNorm(nn.Module): def __init__(self, hidden_size, eps=1e-6): \"\"\" Construct a layernorm module in", "return torch.einsum(\"...nd,...ng->...gd\", hidden_states, one_hot_block_ids.type(hidden_states.dtype)) # Copied from transformers.models.t5.modeling_t5.T5LayerNorm with T5->LongT5 class LongT5LayerNorm(nn.Module): def", "memory_position - query_position, i.e. the distance in tokens from the attending position to", "if {self} is used as a decoder\" if attention_mask is None: attention_mask =", "be in `[-100, 0, ..., config.vocab_size - 1]`. All labels set to `-100`", "def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor): return self._shift_right(labels) def _reorder_cache(self, past, beam_idx): # if decoder", "is None: encoder_attention_mask = torch.ones(encoder_hidden_shape, device=inputs_embeds.device) encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = None", "aim of this article is to summarize the studies have shown that owning", "self.n_heads, self.block_len, 3 * self.block_len), device=scores.device, dtype=scores.dtype ) if self.gradient_checkpointing and self.training: position_bias.requires_grad", "proj_layer, key_value_states, past_key_value): \"\"\"projects hidden states correctly to key/query states\"\"\" if key_value_states is", "the LongT5 style. No bias and no subtraction of mean. \"\"\" super().__init__() self.weight", "embeddings\" inputs_embeds = self.embed_tokens(input_ids) batch_size, seq_length = input_shape # required mask seq length", "last_hidden_state=decoder_outputs.last_hidden_state, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, ) @add_start_docstrings(\"\"\"LONGT5 Model with a", "= _3blocked_attention_mask.unsqueeze(-2) # [batch_size, num_block, block_len, 3 * block_len] local_attention_mask = torch.logical_and(_blocked_attention_mask, _3blocked_attention_mask)", "else None, ) hidden_states = encoder_outputs[0] if labels is not None and decoder_input_ids", "import copy import math import warnings from typing import Any, List, Optional, Tuple,", "the model's internal embedding lookup matrix. If `decoder_input_ids` and `decoder_inputs_embeds` are both unset,", "r\"decoder.block.0.layer.1.EncDecAttention.relative_attention_bias.weight\", ] def __init__(self, config: LongT5Config): super().__init__(config) self.model_dim = config.d_model self.shared = nn.Embedding(config.vocab_size,", "class LongT5LayerCrossAttention(nn.Module): def __init__(self, config): super().__init__() self.EncDecAttention = LongT5Attention(config, has_relative_attention_bias=False) self.layer_norm = LongT5LayerNorm(config.d_model,", "for the specific language governing permissions and # limitations under the License. \"\"\"", "we just need to make it broadcastable to all heads. # We use", "self.lm_head = nn.Linear(config.d_model, config.vocab_size, bias=False) # Initialize weights and apply final processing self.post_init()", "def __init__(self, config): super().__init__() self.EncDecAttention = LongT5Attention(config, has_relative_attention_bias=False) self.layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout", "dummy_inputs(self): input_ids = torch.tensor(DUMMY_INPUTS) input_mask = torch.tensor(DUMMY_MASK) dummy_inputs = { \"decoder_input_ids\": input_ids, \"input_ids\":", "((self.config.d_model) ** -0.5)) if hasattr(module.wi, \"bias\") and module.wi.bias is not None: module.wi.bias.data.zero_() module.wo.weight.data.normal_(mean=0.0,", "pad = [(0, 0)] * x.ndim pad[dim] = (0, pad_len) pad = sum(pad[::-1],", "import ACT2FN from ...modeling_outputs import ( BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput, ) from ...modeling_utils", "attention is bidirectional num_buckets: an integer max_distance: an integer Returns: a Tensor with", "_prune_heads(self, heads_to_prune): \"\"\" Prunes heads of the model. heads_to_prune: dict of {layer_num: list", "if self.is_decoder and encoder_hidden_states is not None: encoder_decoder_position_bias = layer_outputs[4 if output_attentions else", "implementation adopted from: https://github.com/google/flaxformer/blob/main/flaxformer/architectures/longt5/long_attention.py. In our scenario, as we use this strategy only", "= x.shape[dim] // block_len output_shape = x.shape[:dim] + (num_blocks, block_len) + x.shape[(dim +", "dim=-1).type_as(scores) attn_weights = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) # Mask heads if we want to", "input_ids = input_ids[:, -1:] return { \"decoder_input_ids\": input_ids, \"past_key_values\": past, \"encoder_outputs\": encoder_outputs, \"attention_mask\":", "self.is_decoder: extended_attention_mask = self.get_extended_attention_mask( attention_mask, input_shape, inputs_embeds.device ) elif self.config.encoder_attention_type == \"local\": extended_attention_mask", "kwargs ): normed_hidden_states = self.layer_norm(hidden_states) attention_output = self.TransientGlobalSelfAttention( normed_hidden_states, mask=attention_mask, position_bias=position_bias, layer_head_mask=layer_head_mask, output_attentions=output_attentions,", "AutoTokenizer.from_pretrained(\"Stancld/longt5-tglobal-large-16384-pubmed-3k_steps\") >>> model = LongT5ForConditionalGeneration.from_pretrained( ... \"Stancld/longt5-tglobal-large-16384-pubmed-3k_steps\" ... ) >>> # Let's try", "use_cache, } def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor): return self._shift_right(labels) def _reorder_cache(self, past, beam_idx): #", "of torch.einsum(\"bnqd,bnkd->bnqk\", query_states, key_states), compatible with onnx op>9 if position_bias is None: if", "IDs?](../glossary#decoder-input-ids) LONGT5 uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If", "def forward(self, hidden_states): forwarded_states = self.layer_norm(hidden_states) forwarded_states = self.DenseReluDense(forwarded_states) hidden_states = hidden_states +", "attn_output = unshape(torch.einsum(\"...hqk,...khd->...qhd\", attn_weights, value_states)) attn_output = attn_output[:, :seq_length, :] attn_output = self.o(attn_output)", "n_heads, seq_length, dim_per_head) hidden_states = shape(proj_layer(hidden_states)) elif past_key_value is None: # cross-attn #", "need to make it broadcastable to all heads. # We use local attention", "decoder_outputs + encoder_outputs return Seq2SeqModelOutput( last_hidden_state=decoder_outputs.last_hidden_state, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions,", "encoder_config.use_cache = False encoder_config.is_encoder_decoder = False self.encoder = LongT5Stack(encoder_config, self.shared) decoder_config = copy.deepcopy(config)", "* attention_mask) + (attention_mask - 1) # [batch_size, seq_len] global_block_ids = handle_orphan_tokens(global_block_ids) num_globals", "self.pruned_heads ) # Prune linear layers self.q = prune_linear_layer(self.q, index) self.k = prune_linear_layer(self.k,", "# Batch size 1 >>> # forward pass >>> outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids)", "= all_attentions + (layer_outputs[3],) if self.is_decoder: all_cross_attentions = all_cross_attentions + (layer_outputs[5],) hidden_states =", "= reordered_decoder_past + (reordered_layer_past_states,) return reordered_decoder_past @add_start_docstrings( \"The bare LONGT5 Model transformer outputting", "False self.encoder = LongT5Stack(encoder_config, self.shared) decoder_config = copy.deepcopy(config) decoder_config.is_decoder = True decoder_config.is_encoder_decoder =", "block_ends = block_ends.to(block_ids.device) true_block_ends = torch.logical_and(block_ends, block_ids >= 0) full_blocks = true_block_ends.sum(-1).unsqueeze(-1).type(block_ids.dtype) -", "self.n_heads) self.pruned_heads = set() self.gradient_checkpointing = False def prune_heads(self, heads): if len(heads) ==", "# Rescale output before projecting on vocab # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/transformer.py#L586 sequence_output = sequence_output", "present_key_value_state is not None: query_length = present_key_value_state[0].shape[2] else: query_length = None cross_attention_outputs =", "to copy `head_mask`, but this feature is deprecated and will be removed in", "block_dim=1, sequence_dim=2) # Tile side inputs across local key/value blocks # New shape:", "skip_special_tokens=True)) abstractthe aim of this article is to summarize the studies have shown", "self attn and cross attn key value states if present_key_value_state is not None:", "encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, encoder_decoder_position_bias=encoder_decoder_position_bias, layer_head_mask=layer_head_mask, cross_attn_layer_head_mask=cross_attn_layer_head_mask, past_key_value=past_key_value, use_cache=use_cache, output_attentions=output_attentions, ) # layer_outputs is a", "# get query states -> (batch_size, seq_length, n_heads, dim_per_head) query_states = shape(self.q(hidden_states)) key_states", ">>> tokenizer = AutoTokenizer.from_pretrained(\"google/long-t5-local-base\") >>> model = LongT5EncoderModel.from_pretrained(\"google/long-t5-local-base\") >>> input_ids = tokenizer( ...", "we use this strategy only for a decoder, orphan tokens, i.e. those tokens", "*optional*): Tuple consists of (`last_hidden_state`, `optional`: *hidden_states*, `optional`: *attentions*) `last_hidden_state` of shape `(batch_size,", "not self.is_decoder: logger.warning(\"`past_key_values` is passed to the encoder. Please make sure this is", "from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements", "Add last layer if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict:", "states\"\"\" if key_value_states is None: # self-attn # (batch_size, n_heads, seq_length, dim_per_head) hidden_states", "config, has_relative_attention_bias=False): super().__init__() self.SelfAttention = LongT5Attention(config, has_relative_attention_bias=has_relative_attention_bias) self.layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout =", "return self.shared def set_input_embeddings(self, new_embeddings): self.shared = new_embeddings self.encoder.set_input_embeddings(new_embeddings) self.decoder.set_input_embeddings(new_embeddings) def set_output_embeddings(self, new_embeddings):", "= False) -> None: super().__init__() self.is_decoder = config.is_decoder self.has_relative_attention_bias = has_relative_attention_bias self.relative_attention_num_buckets =", "AutoTokenizer, LongT5ForConditionalGeneration >>> tokenizer = AutoTokenizer.from_pretrained(\"Stancld/longt5-tglobal-large-16384-pubmed-3k_steps\") >>> model = LongT5ForConditionalGeneration.from_pretrained( ... \"Stancld/longt5-tglobal-large-16384-pubmed-3k_steps\" ...", "them return outputs class LongT5LayerTransientGlobalSelfAttention(nn.Module): \"\"\"Transient-Global self attention used in encoder\"\"\" def __init__(self,", "return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None", "shape (query_length, key_length) bidirectional=(not self.is_decoder), num_buckets=self.relative_attention_num_buckets, max_distance=self.relative_attention_max_distance, ) values = self.relative_attention_bias(relative_position_bucket) # shape", "not None, \"self.model.config.pad_token_id has to be defined.\" # replace possible -100 values in", "if (self.is_decoder and use_cache) else None outputs = (attn_output,) + (present_key_value_state,) + (position_bias,)", "across local key/value blocks # New shape: (batch_size, num_blocks, global_seq_len, n_heads, dim_per_head) reps", "long encoder input. >>> input_ids = tokenizer( ... 100 * \"Studies have been", "inputs_embeds is None: assert self.embed_tokens is not None, \"You have to initialize the", "None if position_bias is None: # position_bias shape: # (1, 1, n_heads, block_len,", "= nn.functional.pad(x, pad=pad, mode=\"constant\", value=pad_value) blocks_list: List[torch.Tensor] = [] for i in range(3):", "replace possible -100 values in labels by `pad_token_id` shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id) assert", "states -> (batch_size, seq_length, n_heads, dim_per_head) query_states = shape(self.q(hidden_states)) key_states = shape(self.k(hidden_states)) value_states", "else: err_msg_prefix = \"decoder_\" if self.is_decoder else \"\" raise ValueError(f\"You have to specify", "a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be", "= \"transformer\" supports_gradient_checkpointing = True @property # Copied from transformers.models.t5.modeling_t5.T5PreTrainedModel.dummy_inputs def dummy_inputs(self): input_ids", "() if output_hidden_states else None all_attentions = () if output_attentions else None all_cross_attentions", "# coding=utf-8 # Copyright 2022 Google LLC., LongT5 Authors and HuggingFace Inc. team.", "not make for the whole fixed block, are assigned to the preceding block.", "for transient-global attention # Obtain block_ids and global_segment_ids # global_seq_len := seq_len //", "= _split_into_blocks(query_states, self.block_len, dim=1) key_states = _split_into_blocks(key_states, self.block_len, dim=1) value_states = _split_into_blocks(value_states, self.block_len,", "attention_mask is None: attention_mask = torch.ones(batch_size, mask_seq_length).to(inputs_embeds.device) if self.is_decoder and encoder_attention_mask is None", "so you should be able to pad the inputs on both the right", "self.get_extended_attention_mask( attention_mask, input_shape, inputs_embeds.device ) elif self.config.encoder_attention_type == \"local\": extended_attention_mask = _get_local_attention_mask(attention_mask, self.block_len,", "self.block_len, dim=1) # Concatenate 3 blocks for keys and values -> (batch_size, num_blocks,", "self.config.encoder_attention_type == \"local\": extended_attention_mask = _get_local_attention_mask(attention_mask, self.block_len, inputs_embeds.device) else: # we need to", "= False encoder_config.is_encoder_decoder = False self.encoder = LongT5Stack(encoder_config, self.shared) # Initialize weights and", "= self.pruned_heads.union(heads) @staticmethod # Copied from transformers.models.t5.modeling_t5.T5Attention._relative_position_bucket def _relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128): \"\"\"", "LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) def forward( self, hidden_states, key_value_states, attention_mask=None, position_bias=None, layer_head_mask=None,", "= shape(self.q(hidden_states)) key_states = shape(self.k(hidden_states)) value_states = shape(self.v(hidden_states)) # Get global/side key/value states", "multiple of block_len if x.shape[dim] % block_len != 0: x = _pad_to_multiple(x, block_len,", "encoder_sequence_length, _ = encoder_hidden_states.size() encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: encoder_attention_mask", "the dimension length is not a multiple of `block_len`, it will be padded", "value states. Need to inject it here if present_key_value_state is not None: query_length", "encoder self-attention, otherwise standard self & cross attentions are used if self.is_decoder: extended_attention_mask", "module.wi_0.bias is not None: module.wi_0.bias.data.zero_() module.wi_1.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5)) if hasattr(module.wi_1,", "module.wo.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_ff) ** -0.5)) if hasattr(module.wo, \"bias\") and module.wo.bias is not", "encoder inputs in embeddings if needed encoder_outputs = self.encoder( input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, head_mask=head_mask,", "from apex.normalization import FusedRMSNorm LongT5LayerNorm = FusedRMSNorm # noqa logger.info(\"Discovered apex.normalization.FusedRMSNorm - will", "nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) # Mask heads if we want to if layer_head_mask is", "project( hidden_states, self.v, key_value_states, past_key_value[1] if past_key_value is not None else None )", "def set_input_embeddings(self, new_embeddings): self.shared = new_embeddings self.encoder.set_input_embeddings(new_embeddings) self.decoder.set_input_embeddings(new_embeddings) def get_encoder(self): return self.encoder def", "side_relative_position, bidirectional=(not self.is_decoder), num_buckets=self.relative_attention_num_buckets, max_distance=self.relative_attention_max_distance, ) # (batch_size, seq_len, global_seq_len, num_heads) side_bias =", "= self.layer_norm(hidden_states) attention_output = self.TransientGlobalSelfAttention( normed_hidden_states, mask=attention_mask, position_bias=position_bias, layer_head_mask=layer_head_mask, output_attentions=output_attentions, ) hidden_states =", "self.post_init() def get_input_embeddings(self): return self.shared def set_input_embeddings(self, new_embeddings): self.shared = new_embeddings self.encoder.set_input_embeddings(new_embeddings) def", "else self.config.use_return_dict encoder_outputs = self.encoder( input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, )", "None: # Convert encoder inputs in embeddings if needed encoder_outputs = self.encoder( input_ids=input_ids,", "noqa logger.info(\"Discovered apex.normalization.FusedRMSNorm - will use it instead of LongT5LayerNorm\") except ImportError: #", "def get_encoder(self): return self.encoder def _prune_heads(self, heads_to_prune): \"\"\" Prunes heads of the model.", "config: LongT5Config): super().__init__(config) self.shared = nn.Embedding(config.vocab_size, config.d_model) encoder_config = copy.deepcopy(config) encoder_config.use_cache = False", ") if not return_dict: return decoder_outputs + encoder_outputs return Seq2SeqModelOutput( last_hidden_state=decoder_outputs.last_hidden_state, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states,", "Used in the cross-attention of the decoder. past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with", "max_distance=self.relative_attention_max_distance, ) # (block_length, 3 * block_length, num_heads) values = self.relative_attention_bias(relative_position_bucket) # (1,", "None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.FloatTensor], Seq2SeqModelOutput]:", "forward( self, hidden_states, attention_mask=None, position_bias=None, layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False, ): normed_hidden_states = self.layer_norm(hidden_states)", "should have 2 past states: keys and values. Got { len(past_key_value)} past states\"", "nn.ModuleList() self.layer.append(attention_layer(config, has_relative_attention_bias=has_relative_attention_bias)) if self.is_decoder: self.layer.append(LongT5LayerCrossAttention(config)) self.layer.append(LongT5LayerFF(config)) def forward( self, hidden_states, attention_mask=None, position_bias=None,", ").input_ids # Batch size 1 >>> decoder_input_ids = tokenizer(\"Studies show that\", return_tensors=\"pt\").input_ids #", ") # initialize past_key_values with `None` if past does not exist if past_key_values", "None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.FloatTensor], BaseModelOutput]: r\"\"\" Returns: Example: ```python", "max_exact # The other half of the buckets are for logarithmically bigger bins", "self.n_heads = config.num_heads self.local_radius = config.local_radius self.block_len = self.local_radius + 1 self.global_block_size =", "past_key_value and use_cache kwargs ): normed_hidden_states = self.layer_norm(hidden_states) attention_output = self.TransientGlobalSelfAttention( normed_hidden_states, mask=attention_mask,", "has_relative_attention_bias=bool(i == 0)) for i in range(config.num_layers)] ) self.final_layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout", "if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict", "selected `pad_value`. \"\"\" # pad tensor to multiple of block_len if x.shape[dim] %", "got {config.encoder_attention_type}.\" ) self.layer = nn.ModuleList() self.layer.append(attention_layer(config, has_relative_attention_bias=has_relative_attention_bias)) if self.is_decoder: self.layer.append(LongT5LayerCrossAttention(config)) self.layer.append(LongT5LayerFF(config)) def", "`past` for each of the four key / value states reordered_layer_past_states = reordered_layer_past_states", "1) ) relative_buckets += torch.where(is_small, relative_position, relative_position_if_large) return relative_buckets def compute_bias(self, query_length, key_length,", ":])[:, None, ...] attention_side_bias = torch.where(side_attention_mask > 0, 0.0, -1e10) # (batch_size, seq_len,", "assert decoder_start_token_id is not None, ( \"self.model.config.decoder_start_token_id has to be defined. In LongT5", "a [`~utils.ModelOutput`] instead of a plain tuple. \"\"\" LONGT5_ENCODER_INPUTS_DOCSTRING = r\"\"\" Args: input_ids", "Concatenate \"local\" and \"side\"/\"global\" key/value states to allow each token to attend global", "None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.FloatTensor], Seq2SeqLMOutput]:", "value=pad_value) blocks_list: List[torch.Tensor] = [] for i in range(3): # We use indexing", "global_block_ids = torch.where( global_block_ids > _global_block_ids_lower_bound, global_block_ids, _global_block_ids_lower_bound ) # set padding tokens", "bidirectional=(not self.is_decoder), num_buckets=self.relative_attention_num_buckets, max_distance=self.relative_attention_max_distance, ) values = self.relative_attention_bias(relative_position_bucket) # shape (query_length, key_length, num_heads)", "Convert encoder inputs in embeddings if needed encoder_outputs = self.encoder( input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds,", "a model with relative position embeddings so you should be able to pad", "is not None else seq_length if use_cache is True: assert self.is_decoder, f\"`use_cache` can", "= _concatenate_3_blocks(value_states, block_dim=1, sequence_dim=2) # Compute scores scores = torch.einsum( \"...qhd,...khd->...hqk\", query_states, key_states", "bias. Additionally we want to make sure that the accumulation for # half-precision", "torch.tensor(DUMMY_MASK) dummy_inputs = { \"decoder_input_ids\": input_ids, \"input_ids\": input_ids, \"decoder_attention_mask\": input_mask, } return dummy_inputs", "1 >>> outputs = model.generate(input_ids) >>> print(tokenizer.decode(outputs[0], skip_special_tokens=True)) abstractthe aim of this article", "* ((self.config.d_ff) ** -0.5)) if hasattr(module.wo, \"bias\") and module.wo.bias is not None: module.wo.bias.data.zero_()", "provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads,", "key_length) scores += position_bias attn_weights = nn.functional.softmax(scores.float(), dim=-1).type_as( scores ) # (batch_size, n_heads,", "labels: torch.Tensor): return self._shift_right(labels) def _reorder_cache(self, past, beam_idx): # if decoder past is", "+ (attn_weights,) return outputs # Copied from transformers.models.t5.modeling_t5.T5LayerSelfAttention with T5->LongT5 class LongT5LayerSelfAttention(nn.Module): def", "r\"\"\" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression", "hidden_states=all_hidden_states, attentions=all_attentions, cross_attentions=all_cross_attentions, ) LONGT5_START_DOCSTRING = r\"\"\" The LongT5 model was proposed in", "layernorm module in the LongT5 style. No bias and no subtraction of mean.", "Adapted from Mesh Tensorflow: https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593 Translate relative position to a bucket number for", "if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is", "+ (layer_outputs[3],) if self.is_decoder: all_cross_attentions = all_cross_attentions + (layer_outputs[5],) hidden_states = self.final_layer_norm(hidden_states) hidden_states", "<NAME>, <NAME>, <NAME>, <NAME>, <NAME> and <NAME>. It's an encoder-decoder transformer pre-trained in", "AutoTokenizer.from_pretrained(\"google/long-t5-local-base\") >>> model = LongT5EncoderModel.from_pretrained(\"google/long-t5-local-base\") >>> input_ids = tokenizer( ... 100 * \"Studies", "# (cross-attention position bias), (cross-attention weights) position_bias = layer_outputs[2] if self.is_decoder and encoder_hidden_states", "self.layer_norm(hidden_states) attention_output = self.TransientGlobalSelfAttention( normed_hidden_states, mask=attention_mask, position_bias=position_bias, layer_head_mask=layer_head_mask, output_attentions=output_attentions, ) hidden_states = hidden_states", "global_seq_len) side_bias = side_bias.permute([0, 3, 1, 2]) # (batch_size, num_heads, seq_len, global_seq_len) attention_side_bias", "IDs?](../glossary#input-ids) To know more on how to prepare `input_ids` for pretraining take a", "= AutoTokenizer.from_pretrained(\"Stancld/longt5-tglobal-large-16384-pubmed-3k_steps\") >>> model = LongT5ForConditionalGeneration.from_pretrained( ... \"Stancld/longt5-tglobal-large-16384-pubmed-3k_steps\" ... ) >>> # Let's", "*optional*): Mask to nullify selected heads of the cross-attention modules in the decoder.", "\"The bare LONGT5 Model transformer outputting raw hidden-states without any specific head on", "_keys_to_ignore_on_load_missing = [ r\"encoder.embed_tokens.weight\", r\"decoder.embed_tokens.weight\", ] _keys_to_ignore_on_load_unexpected = [ r\"decoder.block.0.layer.1.EncDecAttention.relative_attention_bias.weight\", ] def __init__(self,", "-> (batch_size, num_blocks, block_len, n_heads, dim_per_head) query_states = _split_into_blocks(query_states, self.block_len, dim=1) key_states =", "not None ) return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=present_key_value_states, hidden_states=all_hidden_states, attentions=all_attentions, cross_attentions=all_cross_attentions, ) LONGT5_START_DOCSTRING =", "layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False, ): normed_hidden_states = self.layer_norm(hidden_states) attention_output = self.SelfAttention( normed_hidden_states, mask=attention_mask,", "on how to prepare `decoder_input_ids` for pretraining take a look at [LONGT5 Training](./longt5#training).", "\"\"\"Obtain the \"fixed block\" global id corresponding to each input token. This implementation", "back to LongT5LayerNorm\") pass # Copied from transformers.models.t5.modeling_t5.T5DenseActDense with T5->LongT5 class LongT5DenseActDense(nn.Module): def", "used if self.is_decoder: extended_attention_mask = self.get_extended_attention_mask( attention_mask, input_shape, inputs_embeds.device ) elif self.config.encoder_attention_type ==", "value states if use_cache: present_key_value_states = present_key_value_states + (present_key_value_state,) if output_attentions: all_attentions =", "+ x.shape[(dim + 1) :] # If 0 is in output_shape, we cannot", "\"\"\"Prepare attention mask to be applied for a local attention.\"\"\" # [batch_size, num_blocks,", "= LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) # Initialize weights and apply final processing", "memory_position - context_position # shape (query_length, key_length) relative_position_bucket = self._relative_position_bucket( relative_position, # shape", "mode=\"constant\", value=pad_value) blocks_list: List[torch.Tensor] = [] for i in range(3): # We use", "None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict", "T5->LongT5 def _shift_right(self, input_ids): decoder_start_token_id = self.config.decoder_start_token_id pad_token_id = self.config.pad_token_id assert decoder_start_token_id is", "bias=False) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.shared", "True else: position_bias = self.compute_bias(real_seq_length, key_length, device=scores.device) # if key and values are", "block_length) relative_position = memory_position[None, :] - context_position[:, None] relative_position_bucket = self._relative_position_bucket( relative_position, #", "all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple( v for v", "shifted_input_ids[..., 0] = decoder_start_token_id assert pad_token_id is not None, \"self.model.config.pad_token_id has to be", "in embeddings if needed encoder_outputs = self.encoder( input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states,", "return_dict=return_dict, ) sequence_output = decoder_outputs[0] if self.config.tie_word_embeddings: # Rescale output before projecting on", "useful if you want more control over how to convert `input_ids` indices into", "\"\"\"reshape\"\"\" return states.transpose(1, 2).contiguous().view(batch_size, -1, self.inner_dim) def project(hidden_states, proj_layer, key_value_states, past_key_value): \"\"\"projects hidden", "head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) if not return_dict: return decoder_outputs +", "= False decoder_config.num_layers = config.num_decoder_layers self.decoder = LongT5Stack(decoder_config, self.shared) # Initialize weights and", "nn.Embedding(self.relative_attention_num_buckets, self.n_heads) self.pruned_heads = set() self.gradient_checkpointing = False # Copied from transformers.models.t5.modeling_t5.T5Attention.prune_heads def", "max_exact + ( torch.log(relative_position.float() / max_exact) / math.log(max_distance / max_exact) * (num_buckets -", "a given `block_len` along the given `dim`. If the dimension length is not", "can be calculated via length of past mask_seq_length = past_key_values[0][0].shape[2] + seq_length if", "= torch.max(global_block_ids, dim=-1).values.repeat(num_globals, 1).transpose(0, 1) else: _sequence_block_ids_max = torch.zeros( batch_size, 0, dtype=global_block_ids.dtype, device=global_block_ids.device", "self.wi(hidden_states) hidden_states = self.act(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.wo(hidden_states) return hidden_states #", "module.wi_0.bias.data.zero_() module.wi_1.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5)) if hasattr(module.wi_1, \"bias\") and module.wi_1.bias is", "use_cache if use_cache is not None else self.config.use_cache output_attentions = output_attentions if output_attentions", "from transformers.models.t5.modeling_t5.T5LayerNorm with T5->LongT5 class LongT5LayerNorm(nn.Module): def __init__(self, hidden_size, eps=1e-6): \"\"\" Construct a", "get_input_embeddings(self): return self.embed_tokens # Copied from transformers.models.t5.modeling_t5.T5Stack.set_input_embeddings def set_input_embeddings(self, new_embeddings): self.embed_tokens = new_embeddings", "block_len # Handle cases when an empty input sequence is given if not", "n_heads, block_len, 3 * block_len) if position_bias is None: # position_bias shape: #", "sum with mask position_bias = position_bias + mask.transpose(1, 2) scores += position_bias #", "the relative position tensor for local -> global attention.\"\"\" block_ids, global_segment_ids = _make_global_fixed_block_ids(attention_mask,", "extended_attention_mask = _get_local_attention_mask(attention_mask, self.block_len, inputs_embeds.device) else: # we need to use both local", "the starting token for `decoder_input_ids` generation. If `past_key_values` is used, optionally only the", "= torch.abs(relative_position_ids) < block_len locality_mask = locality_mask[None, None, :, :] locality_mask = locality_mask.to(local_attention_mask.device)", "\"\"\" batch_size, seq_len = attention_mask.shape[:2] def handle_orphan_tokens(block_ids: torch.Tensor) -> torch.Tensor: block_ends = (torch.arange(seq_len)", "outputs class LongT5LayerTransientGlobalSelfAttention(nn.Module): \"\"\"Transient-Global self attention used in encoder\"\"\" def __init__(self, config, has_relative_attention_bias=False):", "nn.Linear(self.d_model, self.inner_dim, bias=False) self.o = nn.Linear(self.inner_dim, self.d_model, bias=False) if self.has_relative_attention_bias: self.relative_attention_bias = nn.Embedding(self.relative_attention_num_buckets,", "config.is_decoder self.local_radius = config.local_radius self.block_len = self.local_radius + 1 self.block = nn.ModuleList( [LongT5Block(config,", "that the accumulation for # half-precision inputs is done in fp32 variance =", "pad[block_dim] = (1, 1) pad = sum(pad[::-1], ()) # [batch_size, num_blocks, block_len] ->", "custom_forward layer_outputs = checkpoint( create_custom_forward(layer_module), hidden_states, extended_attention_mask, position_bias, encoder_hidden_states, encoder_extended_attention_mask, encoder_decoder_position_bias, layer_head_mask, cross_attn_layer_head_mask,", "how to convert `input_ids` indices into associated vectors than the model's internal embedding", "key value states. Need to inject it here if present_key_value_state is not None:", "decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is", "query_states = shape(self.q(hidden_states)) key_states = shape(self.k(hidden_states)) value_states = shape(self.v(hidden_states)) # Get global/side key/value", "token. This implementation is a simlified version of the original Flaxformr implementation adopted", "pad = sum(pad[::-1], ()) x = nn.functional.pad(x, pad=pad, mode=\"constant\", value=pad_value) return x def", "bias=False) if self.has_relative_attention_bias: self.relative_attention_bias = nn.Embedding(self.relative_attention_num_buckets, self.n_heads) self.pruned_heads = set() self.gradient_checkpointing = False", "loss_fct(lm_logits.view(-1, lm_logits.size(-1)), labels.view(-1)) # TODO(thom): Add z_loss https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L666 if not return_dict: output =", "-1e10 (according to the original implementation) mask = torch.where(mask > 0, 0.0, -1e10)", "query_length key_length = real_seq_length if key_value_states is None else key_value_states.shape[1] def shape(states): \"\"\"projection\"\"\"", "encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, ) def prepare_inputs_for_generation( self, input_ids, past=None, attention_mask=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, use_cache=None,", "(layer_output,) + attention_output[1:] # add attentions if we output them return outputs class", "self.config.use_cache output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states =", "global/side key/value states shape: (batch_size, global_seq_len, n_heads, dim_per_head) side_key_states = shape(self.k(global_inputs)) side_value_states =", "states are returned and can be used to speed up decoding (see `past_key_values`).", "3 * block_length, dtype=torch.long, device=self.relative_attention_bias.weight.device ) context_position = memory_position[block_length:-block_length] # (block_length, 3 *", "int, pad_value: int = 0) -> torch.Tensor: \"\"\"Pad a tensor so that a", "weights attention_outputs = attention_outputs + cross_attention_outputs[2:] # Apply Feed Forward layer hidden_states =", "(training, first prediction pass) if encoder_outputs is None: # Convert encoder inputs in", "block_len) # [batch_size, 1, num_block, block_len, 3 * block_len] return local_attention_mask.unsqueeze(1).to(device) def _make_global_fixed_block_ids(", "> 0, 0.0, -1e10) else: local_attention_mask = None if position_bias is None: #", "implementation is a simlified version of the original Flaxformr implementation adopted from: https://github.com/google/flaxformer/blob/main/flaxformer/architectures/longt5/long_attention.py.", "# Mesh TensorFlow attention initialization to avoid scaling before softmax # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/attention.py#L136", "int, dim: int, pad_value: int = 0) -> torch.Tensor: \"\"\"Pad a tensor so", "None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, ) hidden_states = encoder_outputs[0] if", "into blocks -> (batch_size, num_blocks, block_len, n_heads, dim_per_head) query_states = _split_into_blocks(query_states, self.block_len, dim=1)", "x.reshape(output_shape) def _concatenate_3_blocks(x: torch.Tensor, block_dim: int, sequence_dim: int, pad_value: int = 0) ->", "slice(i, i + num_blocks) indices = tuple(indices) blocks_list.append(x[indices]) # [batch_size, num_blocks, 3 *", "= torch.where(mask > 0, 0.0, -1e10) # We need to adjust position bias", "config file does not load the weights associated with the model, only the", "config.d_ff, bias=False) self.wi_1 = nn.Linear(config.d_model, config.d_ff, bias=False) self.wo = nn.Linear(config.d_ff, config.d_model, bias=False) self.dropout", "used in encoder\"\"\" def __init__(self, config, has_relative_attention_bias=False): super().__init__() self.LocalSelfAttention = LongT5LocalAttention(config, has_relative_attention_bias=has_relative_attention_bias) self.layer_norm", "ones # New shape: (batch_size, num_blocks, 3 * block_len + global_seq_len, n_heads, dim_per_head)", "model = LongT5EncoderModel.from_pretrained(\"google/long-t5-local-base\") >>> input_ids = tokenizer( ... 100 * \"Studies have been", "self.o = nn.Linear(self.inner_dim, self.d_model, bias=False) if self.has_relative_attention_bias: self.relative_attention_bias = nn.Embedding(self.relative_attention_num_buckets, self.n_heads) self.pruned_heads =", "i, (layer_module, past_key_value) in enumerate(zip(self.block, past_key_values)): layer_head_mask = head_mask[i] cross_attn_layer_head_mask = cross_attn_head_mask[i] if", "seq_len = attention_mask.shape[:2] def handle_orphan_tokens(block_ids: torch.Tensor) -> torch.Tensor: block_ends = (torch.arange(seq_len) % global_block_size)", "1.0, -1000.0).type(attention_mask.dtype) global_block_ids = torch.floor(mask + fixed_block_mask - 1.0).type(attention_mask.dtype) _global_block_ids_lower_bound = torch.tensor(-1, dtype=global_block_ids.dtype,", "Google LLC., LongT5 Authors and HuggingFace Inc. team. # # Licensed under the", "= False def prune_heads(self, heads): if len(heads) == 0: return heads, index =", "that are **masked**. [What are attention masks?](../glossary#attention-mask) head_mask (`torch.FloatTensor` of shape `(num_heads,)` or", "Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.FloatTensor], BaseModelOutput]: r\"\"\" Returns:", "= attn_weights.type(value_states.dtype) attn_output = unshape(torch.einsum(\"...hqk,...khd->...qhd\", attn_weights, value_states)) attn_output = attn_output[:, :seq_length, :] attn_output", "= LongT5Attention(config, has_relative_attention_bias=has_relative_attention_bias) self.layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) def forward( self,", "encoder_attentions=encoder_outputs.attentions, ) def prepare_inputs_for_generation( self, input_ids, past=None, attention_mask=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, use_cache=None, encoder_outputs=None,", "locality_mask = locality_mask.to(local_attention_mask.device) return torch.logical_and(local_attention_mask, locality_mask) def _get_local_attention_mask(attention_mask: torch.Tensor, block_len: int, device: torch.device)", "if self.is_decoder and encoder_hidden_states is not None: encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size() encoder_hidden_shape", "https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/transformer.py#L586 sequence_output = sequence_output * (self.model_dim**-0.5) lm_logits = self.lm_head(sequence_output) loss = None if", "(attn_weights,) return outputs class LongT5TransientGlobalAttention(nn.Module): def __init__(self, config: LongT5Config, has_relative_attention_bias: bool = False)", "= nn.Linear(config.d_ff, config.d_model, bias=False) self.dropout = nn.Dropout(config.dropout_rate) self.act = ACT2FN[config.dense_act_fn] def forward(self, hidden_states):", "global_block_ids = (global_block_ids * attention_mask) + (attention_mask - 1) # [batch_size, seq_len] global_block_ids", "into associated vectors than the model's internal embedding lookup matrix. decoder_inputs_embeds (`torch.FloatTensor` of", "input_ids = input_ids.view(-1, input_shape[-1]) elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else:", ">>> tokenizer = T5Tokenizer.from_pretrained(\"google/long-t5-local-base\") >>> model = LongT5Model.from_pretrained(\"google/long-t5-local-base\") >>> # Let's try a", "original implementation) local_attention_mask = torch.where(local_attention_mask > 0, 0.0, -1e10) else: local_attention_mask = None", "from: https://github.com/google/flaxformer/blob/main/flaxformer/architectures/longt5/long_attention.py. In our scenario, as we use this strategy only for a", "right if is_torch_fx_proxy(input_ids): # Item assignment is not supported natively for proxies. shifted_input_ids", "attention mask to enforce that tokens are not allowed to attend tokens farther", "bigger bins in positions up to max_distance relative_position_if_large = max_exact + ( torch.log(relative_position.float()", "layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether", "token embeddings\" inputs_embeds = self.embed_tokens(input_ids) batch_size, seq_length = input_shape # required mask seq", "for global attention if self.has_relative_attention_bias: self.global_relative_attention_bias = nn.Embedding(self.relative_attention_num_buckets, self.n_heads) self.global_input_layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)", "{expected_num_past_key_values} past states. \" f\"{'2 (past / key) for cross attention. ' if", "shown that owning a dog is good for you \", return_tensors=\"pt\" ... ).input_ids", "(see `past_key_values`). This is useful if you want more control over how to", "not None: # (batch_size, 1, n_heads, block_len, 3 * block_len) position_bias = position_bias", "= self_attention_outputs[:2] attention_outputs = self_attention_outputs[2:] # Keep self-attention outputs and relative position weights", "== layer_past_states[0].shape assert len(reordered_layer_past_states) == len(layer_past_states) reordered_decoder_past = reordered_decoder_past + (reordered_layer_past_states,) return reordered_decoder_past", "decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that", "permissions and # limitations under the License. \"\"\" PyTorch LongT5 model.\"\"\" import copy", "prediction pass) if encoder_outputs is None: encoder_outputs = self.encoder( input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, head_mask=head_mask,", "= attn_weights * layer_head_mask attn_weights = attn_weights.type(value_states.dtype) attn_output = unshape(torch.einsum(\"...hqk,...khd->...qhd\", attn_weights, value_states)) attn_output", "states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim).transpose(1, 2) def unshape(states): \"\"\"reshape\"\"\" return states.transpose(1, 2).contiguous().view(batch_size, -1, self.inner_dim)", "abstract class to handle weights initialization and a simple interface for downloading and", "attention_output = self.SelfAttention( normed_hidden_states, mask=attention_mask, position_bias=position_bias, layer_head_mask=layer_head_mask, past_key_value=past_key_value, use_cache=use_cache, output_attentions=output_attentions, ) hidden_states =", "states.transpose(1, 2).contiguous().view(batch_size, -1, self.inner_dim) def project(hidden_states, proj_layer, key_value_states, past_key_value): \"\"\"projects hidden states correctly", "* ((d_model) ** -0.5) ) # Copied from transformers.models.t5.modeling_t5.T5PreTrainedModel._set_gradient_checkpointing with T5->LongT5 def _set_gradient_checkpointing(self,", "self.local_radius + 1 self.dropout = config.dropout_rate self.inner_dim = self.n_heads * self.key_value_proj_dim # Mesh", "only the last `decoder_input_ids` have to be input (see `past_key_values`). To know more", "global_seq_len: int ) -> torch.Tensor: \"\"\"Compute individual block aggregates by summing over individual", "only scales and doesn't shift, which is also known as Root Mean #", "states\" real_seq_length += past_key_value[0].shape[2] if query_length is None else query_length key_length = real_seq_length", "self.is_decoder: all_cross_attentions = all_cross_attentions + (layer_outputs[5],) hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.dropout(hidden_states) #", "\"bias\") and module.wi_1.bias is not None: module.wi_1.bias.data.zero_() module.wo.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_ff) ** -0.5))", "the two different efficient attention mechanisms - (1) Local attention, or (2) Transient-Global", "into two arguments `head_mask` and `decoder_head_mask`. Currently, `decoder_head_mask` is set to copy `head_mask`,", "= True @property # Copied from transformers.models.t5.modeling_t5.T5PreTrainedModel.dummy_inputs def dummy_inputs(self): input_ids = torch.tensor(DUMMY_INPUTS) input_mask", "use any `decoder_head_mask` now, please set `decoder_head_mask = torch.ones(num_layers, num_heads)`. \"\"\" @add_start_docstrings( \"The", ":, -hidden_states.size(1) :, :] if mask is not None: position_bias = position_bias +", "return attention_side_bias def forward( self, hidden_states, mask=None, position_bias=None, layer_head_mask=None, output_attentions=False, ): batch_size, seq_length", "max_distance=self.relative_attention_max_distance, ) # (batch_size, seq_len, global_seq_len, num_heads) side_bias = self.global_relative_attention_bias(side_relative_position_bucket) # (batch_size, num_heads,", "device=inputs_embeds.device, dtype=torch.long ) # initialize past_key_values with `None` if past does not exist", "encoder. Mask values selected in `[0, 1]`: - 1 indicates the head is", "* x.ndim indices[block_dim] = slice(i, i + num_blocks) indices = tuple(indices) blocks_list.append(x[indices]) #", "num_heads, seq_len, global_seq_len) if mask is None: mask = torch.ones(batch_size, seq_length) # (batch_size,", "use_cache=False, output_attentions=False, ): normed_hidden_states = self.layer_norm(hidden_states) attention_output = self.SelfAttention( normed_hidden_states, mask=attention_mask, position_bias=position_bias, layer_head_mask=layer_head_mask,", "num_buckets=self.relative_attention_num_buckets, max_distance=self.relative_attention_max_distance, ) # (batch_size, seq_len, global_seq_len, num_heads) side_bias = self.global_relative_attention_bias(side_relative_position_bucket) # (batch_size,", "cross attention # if using past key value states. Need to inject it", "embedding lookup matrix. If `decoder_input_ids` and `decoder_inputs_embeds` are both unset, `decoder_inputs_embeds` takes the", "attention blocks. Can be used to speed up decoding. If `past_key_values` are used,", "scores scores = torch.einsum( \"...qhd,...khd->...hqk\", query_states, key_states ) # (batch_size, num_block, n_heads, block_len,", "= LongT5Stack(decoder_config, self.shared) self.lm_head = nn.Linear(config.d_model, config.vocab_size, bias=False) # Initialize weights and apply", "r\"\"\" The LongT5 model was proposed in [LongT5: Efficient Text-To-Text Transformer for Long", "them # layer_outputs = hidden-states, key-value-states (self-attention position bias), (self-attention weights), # (cross-attention", "= memory_position[None, :] - context_position[:, None] relative_position_bucket = self._relative_position_bucket( relative_position, # (block_length, 3", "pad=pad, mode=\"constant\", value=pad_value) return x def _split_into_blocks(x: torch.Tensor, block_len: int, dim: int) ->", "position bias shape to be sum with mask position_bias = position_bias + mask.transpose(1,", "= False def create_custom_forward(module): def custom_forward(*inputs): return tuple(module(*inputs, use_cache, output_attentions)) return custom_forward layer_outputs", "> 0).to(torch.long) * num_buckets relative_position = torch.abs(relative_position) else: relative_position = -torch.min(relative_position, torch.zeros_like(relative_position)) #", "self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we", "inputs_embeds = self.embed_tokens(input_ids) batch_size, seq_length = input_shape # required mask seq length can", "License. # You may obtain a copy of the License at # #", "dtype=global_block_ids.dtype, device=global_block_ids.device) global_block_ids = torch.where( global_block_ids > _global_block_ids_lower_bound, global_block_ids, _global_block_ids_lower_bound ) # set", "-> torch.Tensor: \"\"\"Split an input tensor into blocks of a given `block_len` along", "bias - shape: # (batch_size, num_heads, seq_len, global_seq_len) if mask is None: mask", "\"\"\"Transient-Global self attention used in encoder\"\"\" def __init__(self, config, has_relative_attention_bias=False): super().__init__() self.TransientGlobalSelfAttention =", "initialization to avoid scaling before softmax # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/attention.py#L136 d_model = self.config.d_model key_value_proj_dim", "should be able to pad the inputs on both the right and the", "is not None: module.wi_1.bias.data.zero_() module.wo.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_ff) ** -0.5)) if hasattr(module.wo, \"bias\")", "super().__init__(config) self.shared = nn.Embedding(config.vocab_size, config.d_model) encoder_config = copy.deepcopy(config) encoder_config.is_decoder = False encoder_config.use_cache =", "(hidden_states,) if self.gradient_checkpointing and self.training: if use_cache: use_cache = False def create_custom_forward(module): def", "padding tokens to -1 global_block_ids = (global_block_ids * attention_mask) + (attention_mask - 1)", "# (block_length, 3 * block_length) bidirectional=(not self.is_decoder), num_buckets=self.relative_attention_num_buckets, max_distance=self.relative_attention_max_distance, ) # (block_length, 3", "# append next layer key value states if use_cache: present_key_value_states = present_key_value_states +", "detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of", "input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) elif inputs_embeds is not None: input_shape", "decoder_inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions:", "= config.num_decoder_layers self.decoder = LongT5Stack(decoder_config, self.shared) self.lm_head = nn.Linear(config.d_model, config.vocab_size, bias=False) # Initialize", "torch.zeros( (1, self.n_heads, real_seq_length, key_length), device=scores.device, dtype=scores.dtype ) if self.gradient_checkpointing and self.training: position_bias.requires_grad", "from ...modeling_outputs import ( BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput, ) from ...modeling_utils import PreTrainedModel,", "attend global aggregated ones # New shape: (batch_size, num_blocks, 3 * block_len +", "to accept past_key_value and use_cache kwargs ): normed_hidden_states = self.layer_norm(hidden_states) attention_output = self.TransientGlobalSelfAttention(", "= None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] =", "cross_attention_outputs[0] # Combine self attn and cross attn key value states if present_key_value_state", "decoder_input_ids = self._shift_right(labels) # Decode decoder_outputs = self.decoder( input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, inputs_embeds=decoder_inputs_embeds, past_key_values=past_key_values, encoder_hidden_states=hidden_states,", "length of past mask_seq_length = past_key_values[0][0].shape[2] + seq_length if past_key_values is not None", "not None: if key_value_states is None: # self-attn # (batch_size, n_heads, key_length, dim_per_head)", "hidden_states, present_key_value_state = self_attention_outputs[:2] attention_outputs = self_attention_outputs[2:] # Keep self-attention outputs and relative", "indices into associated vectors than the model's internal embedding lookup matrix. decoder_inputs_embeds (`torch.FloatTensor`", "`-100` are ignored (masked), the loss is only computed for labels in `[0,", "None else None ) value_states = project( hidden_states, self.v, key_value_states, past_key_value[1] if past_key_value", "self.inner_dim = self.n_heads * self.key_value_proj_dim # Mesh TensorFlow initialization to avoid scaling before", "config.d_model) encoder_config = copy.deepcopy(config) encoder_config.is_decoder = False encoder_config.use_cache = False encoder_config.is_encoder_decoder = False", "attention.\"\"\" # [batch_size, num_blocks, block_len] _blocked_attention_mask = _split_into_blocks(attention_mask, block_len, dim=1) # [batch_size, num_block,", "if not return_dict: return tuple( v for v in [ hidden_states, present_key_value_states, all_hidden_states,", "# Initialize weights and apply final processing self.post_init() self.gradient_checkpointing = False # Copied", "position_bias=None, layer_head_mask=None, output_attentions=False, **kwargs: Any, # to accept past_key_value and use_cache kwargs ):", "torch.tensor(-1, dtype=global_block_ids.dtype, device=global_block_ids.device) global_block_ids = torch.where( global_block_ids > _global_block_ids_lower_bound, global_block_ids, _global_block_ids_lower_bound ) #", "forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, decoder_input_ids: Optional[torch.LongTensor] =", "prepare `input_ids` for pretraining take a look a [LONGT5 Training](./longt5#training). attention_mask (`torch.FloatTensor` of", "decoding (see `past_key_values`). output_attentions (`bool`, *optional*): Whether or not to return the attentions", "num_globals > 0: _sequence_block_ids_max = torch.max(global_block_ids, dim=-1).values.repeat(num_globals, 1).transpose(0, 1) else: _sequence_block_ids_max = torch.zeros(", "_blocked_attention_mask = _blocked_attention_mask.unsqueeze(-1) _3blocked_attention_mask = _3blocked_attention_mask.unsqueeze(-2) # [batch_size, num_block, block_len, 3 * block_len]", "class LongT5TransientGlobalAttention(nn.Module): def __init__(self, config: LongT5Config, has_relative_attention_bias: bool = False) -> None: super().__init__()", "@property # Copied from transformers.models.t5.modeling_t5.T5PreTrainedModel.dummy_inputs def dummy_inputs(self): input_ids = torch.tensor(DUMMY_INPUTS) input_mask = torch.tensor(DUMMY_MASK)", "\" pad_token_id. See LongT5 docs for more information\" ) # shift inputs to", "tensors of all attention layers. See `attentions` under returned tensors for more detail.", "has_relative_attention_bias=False): super().__init__() self.is_decoder = config.is_decoder self.has_relative_attention_bias = has_relative_attention_bias self.relative_attention_num_buckets = config.relative_attention_num_buckets self.relative_attention_max_distance =", "None, encoder_outputs: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, inputs_embeds: Optional[torch.Tensor] = None,", "1]`: - 1 for tokens that are **not masked**, - 0 for tokens", "config.encoder_attention_type == \"local\": attention_layer = LongT5LayerLocalSelfAttention elif config.encoder_attention_type == \"transient-global\": attention_layer = LongT5LayerTransientGlobalSelfAttention", "global_seq_len) position_bias = torch.cat([position_bias, side_position_bias], dim=-1) scores += position_bias # (batch_size, num_blocks, n_heads,", "from transformers.models.t5.modeling_t5.T5LayerCrossAttention with T5->LongT5 class LongT5LayerCrossAttention(nn.Module): def __init__(self, config): super().__init__() self.EncDecAttention = LongT5Attention(config,", "key-value-states (self-attention position bias), (self-attention weights), # (cross-attention position bias), (cross-attention weights) position_bias", "the layers - the first layer store them # layer_outputs = hidden-states, key-value-states", "for details. [What are decoder input IDs?](../glossary#decoder-input-ids) LONGT5 uses the `pad_token_id` as the", "eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) def forward( self, hidden_states, attention_mask=None, position_bias=None, layer_head_mask=None, output_attentions=False, **kwargs:", "attn_weights = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) # Mask heads if we want to if", "represented by -1. \"\"\" batch_size, seq_len = attention_mask.shape[:2] def handle_orphan_tokens(block_ids: torch.Tensor) -> torch.Tensor:", "= ACT2FN[config.dense_act_fn] def forward(self, hidden_states): hidden_gelu = self.act(self.wi_0(hidden_states)) hidden_linear = self.wi_1(hidden_states) hidden_states =", "not None else self.config.use_cache output_attentions = output_attentions if output_attentions is not None else", "and HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0", "* block_len + global_seq_len, n_heads, dim_per_head) key_states = torch.cat([key_states, side_key_states], dim=2) value_states =", "torch.where(is_small, relative_position, relative_position_if_large) return relative_buckets def compute_bias(self, block_length: int): \"\"\"Compute binned relative position", "self.relative_attention_bias(relative_position_bucket) # shape (query_length, key_length, num_heads) values = values.permute([2, 0, 1]).unsqueeze(0) # shape", "to longer sequences than the model has been trained on Args: relative_position: an", "# Mesh TensorFlow initialization to avoid scaling before softmax self.q = nn.Linear(self.d_model, self.inner_dim,", "# TODO(thom): Add z_loss https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L666 if not return_dict: output = (lm_logits,) + decoder_outputs[1:]", "): normed_hidden_states = self.layer_norm(hidden_states) attention_output = self.EncDecAttention( normed_hidden_states, mask=attention_mask, key_value_states=key_value_states, position_bias=position_bias, layer_head_mask=layer_head_mask, past_key_value=past_key_value,", "indicates the head is **not masked**, - 0 indicates the head is **masked**.", "else: encoder_extended_attention_mask = None # Prepare head mask if needed head_mask = self.get_head_mask(head_mask,", "1:] = input_ids[..., :-1].clone() shifted_input_ids[..., 0] = decoder_start_token_id assert pad_token_id is not None,", "self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None,", "factor = self.config.initializer_factor # Used for testing weights initialization if isinstance(module, LongT5LayerNorm): module.weight.data.fill_(factor", "shape(self.q(hidden_states)) key_states = shape(self.k(hidden_states)) value_states = shape(self.v(hidden_states)) # Split into blocks -> (batch_size,", "if key_value_states is None: # self-attn # (batch_size, n_heads, key_length, dim_per_head) hidden_states =", "Optional[torch.FloatTensor] = None, decoder_input_ids: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.BoolTensor] = None, head_mask: Optional[torch.FloatTensor]", "model.generate(input_ids) >>> print(tokenizer.decode(outputs[0], skip_special_tokens=True)) abstractthe aim of this article is to summarize the", "labels to the right decoder_input_ids = self._shift_right(labels) # Decode decoder_outputs = self.decoder( input_ids=decoder_input_ids,", "don't have their past key value states given to this model) of shape", "# Split into blocks -> (batch_size, num_blocks, block_len, n_heads, dim_per_head) query_states = _split_into_blocks(query_states,", "self.get_head_mask(cross_attn_head_mask, self.config.num_layers) present_key_value_states = () if use_cache else None all_hidden_states = () if", "is unknown for cross attention # if using past key value states. Need", "self.n_heads - len(heads) self.inner_dim = self.key_value_proj_dim * self.n_heads self.pruned_heads = self.pruned_heads.union(heads) @staticmethod def", "= torch.where(side_attention_mask > 0, 0.0, -1e10) # (batch_size, seq_len, global_seq_len) side_relative_position = _make_side_relative_position_ids(mask,", "= eps def forward(self, hidden_states): # LongT5 uses a layer_norm which only scales", "device: torch.device) -> torch.Tensor: \"\"\"Prepare attention mask to be applied for a local", "[batch_size, num_block, block_len, 3 * block_len] local_attention_mask = torch.logical_and(_blocked_attention_mask, _3blocked_attention_mask) local_attention_mask = _mask_local_attention_mask(local_attention_mask,", "length is unknown for cross attention # if using past key value states.", "config.d_kv self.n_heads = config.num_heads self.dropout = config.dropout_rate self.inner_dim = self.n_heads * self.key_value_proj_dim #", "+= torch.where(is_small, relative_position, relative_position_if_large) return relative_buckets def compute_bias(self, block_length: int): \"\"\"Compute binned relative", "cross_attention_outputs = self.layer[1]( hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, position_bias=encoder_decoder_position_bias, layer_head_mask=cross_attn_layer_head_mask, past_key_value=cross_attn_past_key_value, query_length=query_length, use_cache=use_cache, output_attentions=output_attentions, )", "+ fixed_block_mask - 1.0).type(attention_mask.dtype) _global_block_ids_lower_bound = torch.tensor(-1, dtype=global_block_ids.dtype, device=global_block_ids.device) global_block_ids = torch.where( global_block_ids", "Add z_loss https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L666 if not return_dict: output = (lm_logits,) + decoder_outputs[1:] + encoder_outputs", "\"LongT5Config\" _TOKENIZER_FOR_DOC = \"T5Tokenizer\" _CHECKPOINT_FOR_DOC = \"google/long-t5-local-base\" # TODO: Update before the merge", "except ImportError: # using the normal LongT5LayerNorm pass except Exception: logger.warning(\"discovered apex but", "...activations import ACT2FN from ...modeling_outputs import ( BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput, ) from", "num_buckets - 1) ) relative_buckets += torch.where(is_small, relative_position, relative_position_if_large) return relative_buckets def compute_bias(self,", "= nn.functional.softmax(scores.float(), dim=-1).type_as(scores) attn_weights = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) # Mask heads if we", "(provided by key_value_states). \"\"\" # Input is (batch_size, seq_length, dim) # Mask is", "we need to use both local attention mask and standard extended mask for", "same bucket. This should allow for more graceful generalization to longer sequences than", "device=device)[:, None] memory_position = torch.arange(key_length, dtype=torch.long, device=device)[None, :] relative_position = memory_position - context_position", "sequence tokens in the vocabulary. Indices can be obtained using [`T5Tokenizer`]. See [`PreTrainedTokenizer.encode`]", "use both local attention mask and standard extended mask for transient-global attention extended_attention_mask", "selected heads of the self-attention modules. Mask values selected in `[0, 1]`: -", "key_length) (non-causal) or (batch_size, key_length, key_length) # past_key_value[0] is (batch_size, n_heads, q_len -", "_blocked_attention_mask = _split_into_blocks(attention_mask, block_len, dim=1) # [batch_size, num_block, 3 * block_len] _3blocked_attention_mask =", "LongT5Attention(config, has_relative_attention_bias=False) self.layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) def forward( self, hidden_states,", "on how to prepare `input_ids` for pretraining take a look a [LONGT5 Training](./longt5#training).", "all its model (such as downloading or saving, resizing the input embeddings, pruning", "\"head_mask\": head_mask, \"decoder_head_mask\": decoder_head_mask, \"cross_attn_head_mask\": cross_attn_head_mask, \"use_cache\": use_cache, } def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor):", "pass an embedded representation. This is useful if you want more control over", "to return the attentions tensors of all attention layers. See `attentions` under returned", "\"decoder_\" if self.is_decoder else \"\" raise ValueError(f\"You have to specify either {err_msg_prefix}input_ids or", "present_key_value_state = layer_outputs[:2] # We share the position biases between the layers -", "batch idx from layer past batch dim # batch dim of `past` is", "in the encoder. Mask values selected in `[0, 1]`: - 1 indicates the", "elif input_ids is not None: input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) elif", "*optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal", "layer_head_mask=None, past_key_value=None, use_cache=False, query_length=None, output_attentions=False, ): normed_hidden_states = self.layer_norm(hidden_states) attention_output = self.EncDecAttention( normed_hidden_states,", "if local_attention_mask is not None: # (batch_size, 1, n_heads, block_len, 3 * block_len)", "use this file except in compliance with the License. # You may obtain", "return hidden_states # get query states query_states = shape(self.q(hidden_states)) # (batch_size, n_heads, seq_length,", "= None, decoder_inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] =", "# See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/transformer.py#L586 sequence_output = sequence_output * (self.model_dim**-0.5) lm_logits = self.lm_head(sequence_output) loss =", "[ hidden_states, present_key_value_states, all_hidden_states, all_attentions, all_cross_attentions, ] if v is not None )", "enforce that tokens are not allowed to attend tokens farther than ``local_radius.\"\"\" relative_position_ids", "# Obtain block_ids and global_segment_ids # global_seq_len := seq_len // self.global_block_size # shapes:", "block_len, n_heads, dim_per_head) query_states = _split_into_blocks(query_states, self.block_len, dim=1) key_states = _split_into_blocks(key_states, self.block_len, dim=1)", "already calculated # we want only the last query position bias if past_key_value", "is not None else None ) # compute scores scores = torch.matmul( query_states,", "if past_key_value is not None: assert ( len(past_key_value) == 2 ), f\"past_key_value should", "= _concatenate_3_blocks(_blocked_attention_mask, block_dim=1, sequence_dim=2) _blocked_attention_mask = _blocked_attention_mask.unsqueeze(-1) _3blocked_attention_mask = _3blocked_attention_mask.unsqueeze(-2) # [batch_size, num_block,", "separated into two input args - head_mask, decoder_head_mask __HEAD_MASK_WARNING_MSG = \"\"\" The input", "self.n_heads = config.num_heads self.dropout = config.dropout_rate self.inner_dim = self.n_heads * self.key_value_proj_dim # Mesh", "\"\"\"Mask local attention mask to enforce that tokens are not allowed to attend", "None, inputs_embeds: Optional[torch.Tensor] = None, decoder_inputs_embeds: Optional[torch.Tensor] = None, use_cache: Optional[bool] = None,", "torch.einsum(\"bnqd,bnkd->bnqk\", query_states, key_states), compatible with onnx op>9 if position_bias is None: if not", "= FusedRMSNorm # noqa logger.info(\"Discovered apex.normalization.FusedRMSNorm - will use it instead of LongT5LayerNorm\")", "((d_model) ** -0.5)) if isinstance(module, LongT5TransientGlobalAttention): module.global_relative_attention_bias.weight.data.normal_( mean=0.0, std=factor * ((d_model) ** -0.5)", "hidden_states.shape[:2] real_seq_length = seq_length if past_key_value is not None: assert ( len(past_key_value) ==", "= [slice(0, None)] * x.ndim indices[block_dim] = slice(i, i + num_blocks) indices =", "= (key_states, value_states) if (self.is_decoder and use_cache) else None outputs = (attn_output,) +", "of a plain tuple. \"\"\" # Warning message for FutureWarning: head_mask was separated", "\"fixed block\" global id corresponding to each input token. This implementation is a", "* block_len + global_seq_len) position_bias = torch.cat([position_bias, side_position_bias], dim=-1) scores += position_bias #", "all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`,", "not None else torch.ones(hidden_states.shape[:-1]), self.global_block_size, ) # Create global inputs _global_seq_len = global_segment_ids.shape[-1]", "with relative position embeddings so you should be able to pad the inputs", "else: # we need to use both local attention mask and standard extended", "None, decoder_inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None,", "_global_block_ids_lower_bound = torch.tensor(-1, dtype=global_block_ids.dtype, device=global_block_ids.device) global_block_ids = torch.where( global_block_ids > _global_block_ids_lower_bound, global_block_ids, _global_block_ids_lower_bound", "...] return torch.cat(blocks_list, dim=sequence_dim) def _make_3block_relative_position_ids(block_len: int) -> torch.Tensor: \"\"\"Makes 3-blocked relative position", "* (d_model**-0.5)) module.v.weight.data.normal_(mean=0.0, std=factor * (d_model**-0.5)) module.o.weight.data.normal_(mean=0.0, std=factor * ((n_heads * key_value_proj_dim) **", "dim=2) # Compute scores -> (batch_size, num_block, n_heads, block_len, 3 * block_len +", "hidden_states = self.dropout(hidden_states) hidden_states = self.wo(hidden_states) return hidden_states # Copied from transformers.models.t5.modeling_t5.T5LayerFF with", "f\"`use_cache` can only be set to `True` if {self} is used as a", "= [1] * (side_key_states.ndim + 1) reps[1] = key_states.shape[1] side_key_states = side_key_states.unsqueeze(1).repeat(reps) side_value_states", "selected heads of the self-attention modules in the decoder. Mask values selected in", "self.layer = nn.ModuleList() self.layer.append(attention_layer(config, has_relative_attention_bias=has_relative_attention_bias)) if self.is_decoder: self.layer.append(LongT5LayerCrossAttention(config)) self.layer.append(LongT5LayerFF(config)) def forward( self, hidden_states,", "dtype=scores.dtype ) if self.gradient_checkpointing and self.training: position_bias.requires_grad = True else: position_bias = self.compute_bias(real_seq_length,", "len(past_key_value) != expected_num_past_key_values: raise ValueError( f\"There should be {expected_num_past_key_values} past states. \" f\"{'2", "self.inner_dim) # get query/key/value states -> (batch_size, seq_length, n_heads, dim_per_head) query_states = shape(self.q(hidden_states))", "# Let's try a very long input. >>> input_ids = tokenizer( ... \"summarize:", "past_key_value=past_key_value, use_cache=use_cache, output_attentions=output_attentions, ) hidden_states = hidden_states + self.dropout(attention_output[0]) outputs = (hidden_states,) +", "0 indicates the head is **masked**. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`,", "# Add last layer if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not", "`decoder_input_ids` for pretraining take a look at [LONGT5 Training](./longt5#training). decoder_attention_mask (`torch.BoolTensor` of shape", "self.is_decoder), num_buckets=self.relative_attention_num_buckets, max_distance=self.relative_attention_max_distance, ) # (batch_size, seq_len, global_seq_len, num_heads) side_bias = self.global_relative_attention_bias(side_relative_position_bucket) #", "query_length = None cross_attention_outputs = self.layer[1]( hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, position_bias=encoder_decoder_position_bias, layer_head_mask=cross_attn_layer_head_mask, past_key_value=cross_attn_past_key_value, query_length=query_length,", "None: assert ( len(past_key_value) == 2 ), f\"past_key_value should have 2 past states:", "Forward layer hidden_states = self.layer[-1](hidden_states) outputs = (hidden_states,) if use_cache: outputs = outputs", "= self.act(self.wi_0(hidden_states)) hidden_linear = self.wi_1(hidden_states) hidden_states = hidden_gelu * hidden_linear hidden_states = self.dropout(hidden_states)", "config.d_model self.key_value_proj_dim = config.d_kv self.n_heads = config.num_heads self.dropout = config.dropout_rate self.inner_dim = self.n_heads", "can optionally input only the last `decoder_input_ids` (those that don't have their past", "key_states = _concatenate_3_blocks(key_states, block_dim=1, sequence_dim=2) value_states = _concatenate_3_blocks(value_states, block_dim=1, sequence_dim=2) # Compute scores", "attention mechanisms - (1) Local attention, or (2) Transient-Global attention. This model inherits", ">>> input_ids = tokenizer( ... 100 * \"Studies have been shown that owning", "for layer_past_states in past: # get the correct batch idx from layer past", "= side_value_states.unsqueeze(1).repeat(reps) # Concatenate \"local\" and \"side\"/\"global\" key/value states to allow each token", "# distributed under the License is distributed on an \"AS IS\" BASIS, #", "is None: if not self.has_relative_attention_bias: position_bias = torch.zeros( (1, self.n_heads, real_seq_length, key_length), device=scores.device,", "create_custom_forward(layer_module), hidden_states, extended_attention_mask, position_bias, encoder_hidden_states, encoder_extended_attention_mask, encoder_decoder_position_bias, layer_head_mask, cross_attn_layer_head_mask, None, # past_key_value is", "= torch.where(attention_mask != 0.0, 1.0, -1000.0).type(attention_mask.dtype) global_block_ids = torch.floor(mask + fixed_block_mask - 1.0).type(attention_mask.dtype)", "attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, ) hidden_states = encoder_outputs[0] # Decode", "a Tensor with the same shape as relative_position, containing int32 values in the", "torch.ones_like(attention_mask, device=attention_mask.device) / global_block_size fixed_block_mask = torch.cumsum(fixed_block_mask, axis=1) - fixed_block_mask mask = torch.where(attention_mask", "key_value_states is None) or attention over source sentence (provided by key_value_states). \"\"\" #", "uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values` is", "if num_globals > 0: _sequence_block_ids_max = torch.max(global_block_ids, dim=-1).values.repeat(num_globals, 1).transpose(0, 1) else: _sequence_block_ids_max =", "1) else: _sequence_block_ids_max = torch.zeros( batch_size, 0, dtype=global_block_ids.dtype, device=global_block_ids.device ) global_segment_ids = torch.cumsum(torch.ones(batch_size,", "n_heads, seq_length, key_length) scores += position_bias attn_weights = nn.functional.softmax(scores.float(), dim=-1).type_as( scores ) #", "Seq2SeqLMOutput]: r\"\"\" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence", "+= position_bias attn_weights = nn.functional.softmax(scores.float(), dim=-1).type_as( scores ) # (batch_size, n_heads, seq_length, key_length)", "(`torch.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads", "{ \"decoder_input_ids\": input_ids, \"input_ids\": input_ids, \"decoder_attention_mask\": input_mask, } return dummy_inputs def _init_weights(self, module):", "Used for testing weights initialization if isinstance(module, LongT5LayerNorm): module.weight.data.fill_(factor * 1.0) elif isinstance(module,", "value_states)) # (batch_size, seq_length, dim) attn_output = self.o(attn_output) present_key_value_state = (key_states, value_states) if", "params self.n_heads = self.n_heads - len(heads) self.inner_dim = self.key_value_proj_dim * self.n_heads self.pruned_heads =", "varience is calculated # w/o mean and there is no bias. Additionally we", "transformers.models.t5.modeling_t5.T5Attention.prune_heads def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices(", "True: assert self.is_decoder, f\"`use_cache` can only be set to `True` if {self} is", "Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.BoolTensor] = None, head_mask: Optional[torch.FloatTensor] = None, decoder_head_mask: Optional[torch.FloatTensor]", "mask is not None else torch.ones(hidden_states.shape[:-1]), self.global_block_size, ) # Create global inputs _global_seq_len", "None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.FloatTensor], Seq2SeqLMOutput]: r\"\"\" labels (`torch.LongTensor` of", "nn.Embedding(self.relative_attention_num_buckets, self.n_heads) self.global_input_layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) # Copied from transformers.models.t5.modeling_t5.T5Attention.prune_heads def prune_heads(self, heads):", "\"bias\") and module.wi_0.bias is not None: module.wi_0.bias.data.zero_() module.wi_1.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5))", "for local attentiont. For more information, see: https://arxiv.org/pdf/2112.07916.pdf. \"\"\" num_blocks = x.shape[block_dim] pad", "... ).input_ids # Batch size 1 >>> outputs = model(input_ids=input_ids) >>> last_hidden_states =", "3 * block_len) position_bias = position_bias + local_attention_mask.transpose(1, 2) position_bias = position_bias.type(scores.dtype) #", "= BaseModelOutput( last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs)", "# Copied from transformers.models.t5.modeling_t5.T5LayerNorm with T5->LongT5 class LongT5LayerNorm(nn.Module): def __init__(self, hidden_size, eps=1e-6): \"\"\"", "small absolute relative_position and larger buckets for larger absolute relative_positions. All relative positions", ") class LongT5EncoderModel(LongT5PreTrainedModel): authorized_missing_keys = [ r\"encoder.embed_tokens.weight\", ] def __init__(self, config: LongT5Config): super().__init__(config)", "torch.Tensor: \"\"\"Mask local attention mask to enforce that tokens are not allowed to", "num_blocks, n_heads, block_len, 3 * block_len + global_seq_len) attn_weights = nn.functional.softmax(scores.float(), dim=-1).type_as(scores) attn_weights", "% block_len # Handle cases when an empty input sequence is given if", "# cross-attn hidden_states = past_key_value return hidden_states # get query states query_states =", "hidden_states, attention_mask=None, position_bias=None, layer_head_mask=None, output_attentions=False, **kwargs: Any, # to accept past_key_value and use_cache", "return torch.logical_and(local_attention_mask, locality_mask) def _get_local_attention_mask(attention_mask: torch.Tensor, block_len: int, device: torch.device) -> torch.Tensor: \"\"\"Prepare", "pad=pad, mode=\"constant\", value=pad_value) blocks_list: List[torch.Tensor] = [] for i in range(3): # We", "_make_side_relative_position_ids(attention_mask: torch.Tensor, global_block_size: int) -> torch.Tensor: \"\"\"Create the relative position tensor for local", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "We need to adjust position bias shape to be sum with mask local_attention_mask", "v is not None ) return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=present_key_value_states, hidden_states=all_hidden_states, attentions=all_attentions, cross_attentions=all_cross_attentions, )", "+ layer_outputs[1:] hidden_states, present_key_value_state = layer_outputs[:2] # We share the position biases between", "relative_position_bucket = self._relative_position_bucket( relative_position, # shape (query_length, key_length) bidirectional=(not self.is_decoder), num_buckets=self.relative_attention_num_buckets, max_distance=self.relative_attention_max_distance, )", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, output_attentions:", "** -0.5)) if hasattr(module.wo, \"bias\") and module.wo.bias is not None: module.wo.bias.data.zero_() elif isinstance(module,", "are used if self.is_decoder: extended_attention_mask = self.get_extended_attention_mask( attention_mask, input_shape, inputs_embeds.device ) elif self.config.encoder_attention_type", "unshape(torch.matmul(attn_weights, value_states)) # (batch_size, seq_length, dim) attn_output = self.o(attn_output) present_key_value_state = (key_states, value_states)", "at the same time\" ) elif input_ids is not None: input_shape = input_ids.size()", "= self.layer_norm(hidden_states) attention_output = self.SelfAttention( normed_hidden_states, mask=attention_mask, position_bias=position_bias, layer_head_mask=layer_head_mask, past_key_value=past_key_value, use_cache=use_cache, output_attentions=output_attentions, )", "x.shape[dim] % block_len != 0: x = _pad_to_multiple(x, block_len, dim, pad_value=0) num_blocks =", "output_attentions=output_attentions, ) hidden_states = cross_attention_outputs[0] # Combine self attn and cross attn key", "original Flaxformr implementation adopted from: https://github.com/google/flaxformer/blob/main/flaxformer/architectures/longt5/long_attention.py. In our scenario, as we use this", "obtained using [`T5Tokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for detail. To know more on", "want to if layer_head_mask is not None: attn_weights = attn_weights * layer_head_mask attn_weights", "in the vocabulary. Indices can be obtained using [`T5Tokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`]", "if hasattr(module.wo, \"bias\") and module.wo.bias is not None: module.wo.bias.data.zero_() elif isinstance(module, (LongT5Attention, LongT5LocalAttention,", "three consecutive blocks for each input block for local attentiont. For more information,", "self.key_value_proj_dim) def unshape(states): \"\"\"reshape\"\"\" return states.contiguous().view(batch_size, -1, self.inner_dim) # Prepare components for transient-global", "not None: encoder_seq_length = encoder_hidden_states.shape[1] encoder_attention_mask = torch.ones( batch_size, encoder_seq_length, device=inputs_embeds.device, dtype=torch.long )", "bias\"\"\" memory_position = torch.arange( 3 * block_length, dtype=torch.long, device=self.relative_attention_bias.weight.device ) context_position = memory_position[block_length:-block_length]", "(LongT5Attention, LongT5Stack)): module.gradient_checkpointing = value # Copied from transformers.models.t5.modeling_t5.T5PreTrainedModel._shift_right with T5->LongT5 def _shift_right(self,", "Let's try a very long input. >>> input_ids = tokenizer( ... \"summarize: \"", "is **not masked**, - 0 indicates the head is **masked**. cross_attn_head_mask (`torch.Tensor` of", "studies have shown that owning a dog ```\"\"\" use_cache = use_cache if use_cache", "sum(pad[::-1], ()) x = nn.functional.pad(x, pad=pad, mode=\"constant\", value=pad_value) return x def _split_into_blocks(x: torch.Tensor,", "to speed up decoding\") return past reordered_decoder_past = () for layer_past_states in past:", "attn_output = attn_output[:, :seq_length, :] attn_output = self.o(attn_output) present_key_value_state = None outputs =", "use_cache is False: layer_outputs = layer_outputs[:1] + (None,) + layer_outputs[1:] hidden_states, present_key_value_state =", "**not masked**, - 0 indicates the head is **masked**. decoder_head_mask (`torch.FloatTensor` of shape", "*optional*): If set to `True`, `past_key_values` key value states are returned and can", "- fixed_block_mask mask = torch.where(attention_mask != 0.0, 1.0, -1000.0).type(attention_mask.dtype) global_block_ids = torch.floor(mask +", "-1, self.n_heads, self.key_value_proj_dim).transpose(1, 2) def unshape(states): \"\"\"reshape\"\"\" return states.transpose(1, 2).contiguous().view(batch_size, -1, self.inner_dim) def", "-> [batch_size, num_blocks + 2, block_len] x = nn.functional.pad(x, pad=pad, mode=\"constant\", value=pad_value) blocks_list:", "index) self.k = prune_linear_layer(self.k, index) self.v = prune_linear_layer(self.v, index) self.o = prune_linear_layer(self.o, index,", "with onnx op>9 if position_bias is None: if not self.has_relative_attention_bias: position_bias = torch.zeros(", "= values.permute([2, 0, 1]).unsqueeze(0).unsqueeze(0) return values def compute_side_bias(self, mask: torch.Tensor, global_segment_ids: torch.Tensor) ->", "module.relative_attention_bias.weight.data.normal_(mean=0.0, std=factor * ((d_model) ** -0.5)) if isinstance(module, LongT5TransientGlobalAttention): module.global_relative_attention_bias.weight.data.normal_( mean=0.0, std=factor *", "% global_block_size) == global_block_size - 1 block_ends = block_ends.to(block_ids.device) true_block_ends = torch.logical_and(block_ends, block_ids", "and there is no bias. Additionally we want to make sure that the", "config.local_radius self.block_len = self.local_radius + 1 self.block = nn.ModuleList( [LongT5Block(config, has_relative_attention_bias=bool(i == 0))", "(batch_size, global_seq_len, n_heads, dim_per_head) side_key_states = shape(self.k(global_inputs)) side_value_states = shape(self.v(global_inputs)) # Split into", "module.wo.bias.data.zero_() elif isinstance(module, LongT5DenseGatedActDense): module.wi_0.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5)) if hasattr(module.wi_0, \"bias\")", "all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`,", "`past_key_values`). This is useful if you want more control over how to convert", "= self.layer[-1](hidden_states) outputs = (hidden_states,) if use_cache: outputs = outputs + (present_key_value_state,) +", "dim=1) key_states = _split_into_blocks(key_states, self.block_len, dim=1) value_states = _split_into_blocks(value_states, self.block_len, dim=1) # Concatenate", "super().__init__() self.is_decoder = config.is_decoder if config.is_decoder: attention_layer = LongT5LayerSelfAttention elif config.encoder_attention_type == \"local\":", "first layer store them # layer_outputs = hidden-states, key-value-states (self-attention position bias), (self-attention", "_global_block_ids_lower_bound ) # set padding tokens to -1 global_block_ids = (global_block_ids * attention_mask)", "nn.functional.pad(x, pad=pad, mode=\"constant\", value=pad_value) return x def _split_into_blocks(x: torch.Tensor, block_len: int, dim: int)", "layer_outputs is a tuple with: # hidden-states, key-value-states, (self-attention position bias), (self-attention weights),", "local key/value blocks # New shape: (batch_size, num_blocks, global_seq_len, n_heads, dim_per_head) reps =", "output them return outputs class LongT5LayerTransientGlobalSelfAttention(nn.Module): \"\"\"Transient-Global self attention used in encoder\"\"\" def", "using the normal LongT5LayerNorm pass except Exception: logger.warning(\"discovered apex but it failed to", "local attention.\"\"\" position_ids = torch.arange(3 * block_len, dtype=torch.int32) center_position_ids = position_ids[block_len:-block_len] # [block_len,", "block\" global id corresponding to each input token. This implementation is a simlified", "= config.relative_attention_max_distance self.d_model = config.d_model self.key_value_proj_dim = config.d_kv self.n_heads = config.num_heads self.dropout =", "self.layer_norm(hidden_states) attention_output = self.EncDecAttention( normed_hidden_states, mask=attention_mask, key_value_states=key_value_states, position_bias=position_bias, layer_head_mask=layer_head_mask, past_key_value=past_key_value, use_cache=use_cache, query_length=query_length, output_attentions=output_attentions,", "self.pruned_heads = set() self.gradient_checkpointing = False # Copied from transformers.models.t5.modeling_t5.T5Attention.prune_heads def prune_heads(self, heads):", "> 2 else None, ) hidden_states = encoder_outputs[0] # Decode decoder_outputs = self.decoder(", "+ (present_key_value_state,) + attention_outputs else: outputs = outputs + attention_outputs return outputs #", "nn.Dropout(config.dropout_rate) def forward( self, hidden_states, attention_mask=None, position_bias=None, layer_head_mask=None, output_attentions=False, **kwargs: Any, # to", "not None: err_msg_prefix = \"decoder_\" if self.is_decoder else \"\" raise ValueError( f\"You cannot", "relative position weights do_cross_attention = self.is_decoder and encoder_hidden_states is not None if do_cross_attention:", "= False decoder_config.num_layers = config.num_decoder_layers self.decoder = LongT5Stack(decoder_config, self.shared) self.lm_head = nn.Linear(config.d_model, config.vocab_size,", "forwarded_states = self.layer_norm(hidden_states) forwarded_states = self.DenseReluDense(forwarded_states) hidden_states = hidden_states + self.dropout(forwarded_states) return hidden_states", "`past_key_values` key value states are returned and can be used to speed up", "attn_weights, p=self.dropout, training=self.training ) # (batch_size, n_heads, seq_length, key_length) # Mask heads if", "not to return a [`~utils.ModelOutput`] instead of a plain tuple. \"\"\" # Warning", "num_heads, block_len, global_seq_len) side_position_bias = _split_into_blocks(side_position_bias, self.block_len, dim=-2).transpose(1, 2) side_position_bias = side_position_bias.type(scores.dtype).to(scores.device) #", "output_attentions: outputs = outputs + (attn_weights,) return outputs # Copied from transformers.models.t5.modeling_t5.T5LayerSelfAttention with", "An abstract class to handle weights initialization and a simple interface for downloading", "= self.key_value_proj_dim * self.n_heads self.pruned_heads = self.pruned_heads.union(heads) @staticmethod # Copied from transformers.models.t5.modeling_t5.T5Attention._relative_position_bucket def", "None, ( \"self.model.config.decoder_start_token_id has to be defined. In LongT5 it is usually set", "with selected `pad_value`. \"\"\" # pad tensor to multiple of block_len if x.shape[dim]", "position_bias=None, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None, layer_head_mask=None, cross_attn_layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False, return_dict=True, ): if past_key_value", "global_seq_len, n_heads, dim_per_head) reps = [1] * (side_key_states.ndim + 1) reps[1] = key_states.shape[1]", "past states. \" f\"{'2 (past / key) for cross attention. ' if expected_num_past_key_values", "Normalization https://arxiv.org/abs/1910.07467 thus varience is calculated # w/o mean and there is no", "if not return_dict: return decoder_outputs + encoder_outputs return Seq2SeqModelOutput( last_hidden_state=decoder_outputs.last_hidden_state, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions,", "LongT5EncoderModel.from_pretrained(\"google/long-t5-local-base\") >>> input_ids = tokenizer( ... 100 * \"Studies have been shown that", "3 * block_len + global_seq_len) attn_weights = nn.functional.softmax(scores.float(), dim=-1).type_as(scores) attn_weights = nn.functional.dropout(attn_weights, p=self.dropout,", "past_key_value[1] if past_key_value is not None else None ) # compute scores scores", "num_buckets) \"\"\" relative_buckets = 0 if bidirectional: num_buckets //= 2 relative_buckets += (relative_position", "block_len, 3 * block_len + global_seq_len) attn_weights = nn.functional.softmax(scores.float(), dim=-1).type_as(scores) attn_weights = nn.functional.dropout(attn_weights,", "input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, inputs_embeds=decoder_inputs_embeds, past_key_values=past_key_values, encoder_hidden_states=hidden_states, encoder_attention_mask=attention_mask, head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, )", "import FusedRMSNorm LongT5LayerNorm = FusedRMSNorm # noqa logger.info(\"Discovered apex.normalization.FusedRMSNorm - will use it", "bias=False) self.dropout = nn.Dropout(config.dropout_rate) self.act = ACT2FN[config.dense_act_fn] def forward(self, hidden_states): hidden_states = self.wi(hidden_states)", "_make_3block_relative_position_ids(block_len) locality_mask = torch.abs(relative_position_ids) < block_len locality_mask = locality_mask[None, None, :, :] locality_mask", ") elif self.config.encoder_attention_type == \"local\": extended_attention_mask = _get_local_attention_mask(attention_mask, self.block_len, inputs_embeds.device) else: # we", "= nn.functional.softmax(scores.float(), dim=-1).type_as(scores) # (batch_size, num_blocks, n_heads, block_len, 3 * block_len) attn_weights =", "decoder_head_mask: Optional[torch.FloatTensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, encoder_outputs: Optional[Tuple[Tuple[torch.Tensor]]] = None, past_key_values:", "in the cross-attention of the decoder. past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each", "= x.shape[block_dim] pad = [(0, 0)] * x.ndim pad[block_dim] = (1, 1) pad", "\"\"\" # Warning message for FutureWarning: head_mask was separated into two input args", "= _pad_to_multiple(x, block_len, dim, pad_value=0) num_blocks = x.shape[dim] // block_len output_shape = x.shape[:dim]", "None], global_segment_ids[:, None, :])[:, None, ...] attention_side_bias = torch.where(side_attention_mask > 0, 0.0, -1e10)", "to use any `decoder_head_mask` now, please set `decoder_head_mask = torch.ones(num_layers, num_heads)`. \"\"\" @add_start_docstrings(", "attn_weights = nn.functional.softmax(scores.float(), dim=-1).type_as(scores) attn_weights = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) # Mask heads if", "self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, decoder_input_ids: Optional[torch.LongTensor] = None,", "pass) if encoder_outputs is None: encoder_outputs = self.encoder( input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, head_mask=head_mask, output_attentions=output_attentions,", "= locality_mask[None, None, :, :] locality_mask = locality_mask.to(local_attention_mask.device) return torch.logical_and(local_attention_mask, locality_mask) def _get_local_attention_mask(attention_mask:", "logger.info(\"Discovered apex.normalization.FusedRMSNorm - will use it instead of LongT5LayerNorm\") except ImportError: # using", "having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed", "position_bias is None: if not self.has_relative_attention_bias: position_bias = torch.zeros( (1, self.n_heads, real_seq_length, key_length),", "bucket. All relative positions <=-max_distance map to the same bucket. This should allow", "Union[Tuple[torch.FloatTensor], BaseModelOutput]: r\"\"\" Returns: Example: ```python >>> from transformers import AutoTokenizer, LongT5ForConditionalGeneration >>>", "key value states are returned and can be used to speed up decoding", "`pad_token_id` shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id) assert torch.all(shifted_input_ids >= 0).item(), \"Verify that `shifted_input_ids` has", "performing attention on padding token indices. Mask values selected in `[0, 1]`: -", "from the attending position to the attended-to position. If bidirectional=False, then positive relative", "models. \"\"\" config_class = LongT5Config base_model_prefix = \"transformer\" supports_gradient_checkpointing = True @property #", "values def compute_side_bias(self, mask: torch.Tensor, global_segment_ids: torch.Tensor) -> torch.Tensor: # (batch_size, 1, seq_len,", "of this article is to summarize the studies have shown that owning a", "LongT5LayerFF(nn.Module): def __init__(self, config: LongT5Config): super().__init__() if config.is_gated_act: self.DenseReluDense = LongT5DenseGatedActDense(config) else: self.DenseReluDense", "self.decoder = LongT5Stack(decoder_config, self.shared) # Initialize weights and apply final processing self.post_init() def", "The input argument `head_mask` was split into two arguments `head_mask` and `decoder_head_mask`. Currently,", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to", "= self.o(attn_output) present_key_value_state = None outputs = (attn_output,) + (present_key_value_state,) + (position_bias,) if", "inputs_embeds is not None: err_msg_prefix = \"decoder_\" if self.is_decoder else \"\" raise ValueError(", "None: module.wo.bias.data.zero_() elif isinstance(module, LongT5DenseGatedActDense): module.wi_0.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5)) if hasattr(module.wi_0,", "= r\"\"\" The LongT5 model was proposed in [LongT5: Efficient Text-To-Text Transformer for", "0.0, -1e10) # We need to adjust position bias shape to be sum", "to allow each token to attend global aggregated ones # New shape: (batch_size,", "-1e10) # We need to adjust position bias shape to be sum with", "via length of past mask_seq_length = past_key_values[0][0].shape[2] + seq_length if past_key_values is not", "if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,", "the hidden states of all layers. See `hidden_states` under returned tensors for more", "seq_len) & (batch_size, global_seq_len) block_ids, global_segment_ids = _make_global_fixed_block_ids( mask if mask is not", "able to pad the inputs on both the right and the left. Indices", "''}\" f\"Got {len(past_key_value)} past key / value states\" ) self_attn_past_key_value = past_key_value[:2] cross_attn_past_key_value", "(position_bias,) if output_attentions: outputs = outputs + (attn_weights,) return outputs class LongT5LocalAttention(nn.Module): def", "self.is_decoder = config.is_decoder self.has_relative_attention_bias = has_relative_attention_bias self.relative_attention_num_buckets = config.relative_attention_num_buckets self.relative_attention_max_distance = config.relative_attention_max_distance self.d_model", "= hidden_states + self.dropout(attention_output[0]) outputs = (hidden_states,) + attention_output[1:] # add attentions if", "decoder\" if attention_mask is None: attention_mask = torch.ones(batch_size, mask_seq_length).to(inputs_embeds.device) if self.is_decoder and encoder_attention_mask", "outputs = outputs + (attn_weights,) return outputs class LongT5TransientGlobalAttention(nn.Module): def __init__(self, config: LongT5Config,", "falling back to LongT5LayerNorm\") pass # Copied from transformers.models.t5.modeling_t5.T5DenseActDense with T5->LongT5 class LongT5DenseActDense(nn.Module):", "has to be defined.\" # replace possible -100 values in labels by `pad_token_id`", "& (batch_size, global_seq_len) block_ids, global_segment_ids = _make_global_fixed_block_ids( mask if mask is not None", "LongT5Stack(encoder_config, self.shared) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return", "else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, ) hidden_states = encoder_outputs[0]", "to be applied for a local attention.\"\"\" # [batch_size, num_blocks, block_len] _blocked_attention_mask =", "embeddings so you should be able to pad the inputs on both the", "-0.5)) if hasattr(module.wo, \"bias\") and module.wo.bias is not None: module.wo.bias.data.zero_() elif isinstance(module, (LongT5Attention,", "dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to", "or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules", "outputs class LongT5Block(nn.Module): def __init__(self, config, has_relative_attention_bias=False): super().__init__() self.is_decoder = config.is_decoder if config.is_decoder:", "if output_attentions: outputs = outputs + (attn_weights,) return outputs class LongT5TransientGlobalAttention(nn.Module): def __init__(self,", "block_len) position_bias = position_bias + local_attention_mask.transpose(1, 2) position_bias = position_bias.type(scores.dtype) # Calculate global/side", ") relative_buckets += torch.where(is_small, relative_position, relative_position_if_large) return relative_buckets def compute_bias(self, query_length, key_length, device=None):", "You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "self.gradient_checkpointing and self.training: position_bias.requires_grad = True else: position_bias = self.compute_bias(self.block_len) if mask is", "mechanisms - (1) Local attention, or (2) Transient-Global attention. This model inherits from", "transformers.models.t5.modeling_t5.T5Attention._relative_position_bucket def _relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128): \"\"\" Adapted from Mesh Tensorflow: https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593 Translate", "decoder, orphan tokens, i.e. those tokens which do not make for the whole", "): normed_hidden_states = self.layer_norm(hidden_states) attention_output = self.LocalSelfAttention( normed_hidden_states, mask=attention_mask, position_bias=position_bias, layer_head_mask=layer_head_mask, output_attentions=output_attentions, )", "attn_weights = nn.functional.softmax(scores.float(), dim=-1).type_as(scores) # (batch_size, num_blocks, n_heads, block_len, 3 * block_len) attn_weights", "attn_output = self.o(attn_output) present_key_value_state = None outputs = (attn_output,) + (present_key_value_state,) + (position_bias,)", "_split_into_blocks(side_position_bias, self.block_len, dim=-2).transpose(1, 2) side_position_bias = side_position_bias.type(scores.dtype).to(scores.device) # (batch_size, num_blocks, num_heads, block_len, 3", "def __init__(self, config: LongT5Config): super().__init__() self.wi_0 = nn.Linear(config.d_model, config.d_ff, bias=False) self.wi_1 = nn.Linear(config.d_model,", "self.config.use_return_dict encoder_outputs = self.encoder( input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) return", "relative_position_ids def _mask_local_attention_mask(local_attention_mask: torch.Tensor, block_len: int) -> torch.Tensor: \"\"\"Mask local attention mask to", "config.is_decoder: attention_layer = LongT5LayerSelfAttention elif config.encoder_attention_type == \"local\": attention_layer = LongT5LayerLocalSelfAttention elif config.encoder_attention_type", "to speed up decoding. If `past_key_values` are used, the user can optionally input", "self, hidden_states, key_value_states, attention_mask=None, position_bias=None, layer_head_mask=None, past_key_value=None, use_cache=False, query_length=None, output_attentions=False, ): normed_hidden_states =", "is not None else torch.ones(hidden_states.shape[:-1]), self.global_block_size, ) # Create global inputs _global_seq_len =", "side_relative_position.type(torch.int64) def _create_global_aggregates( hidden_states: torch.Tensor, block_ids: torch.Tensor, global_seq_len: int ) -> torch.Tensor: \"\"\"Compute", "initialize past_key_values with `None` if past does not exist if past_key_values is None:", "self.config.num_layers == self.config.num_decoder_layers: warnings.warn(__HEAD_MASK_WARNING_MSG, FutureWarning) decoder_head_mask = head_mask # Encode if needed (training,", "exact increments in positions max_exact = num_buckets // 2 is_small = relative_position <", "= nn.Embedding(self.relative_attention_num_buckets, self.n_heads) self.pruned_heads = set() self.gradient_checkpointing = False # Relativen attention bias", ">>> from transformers import AutoTokenizer, LongT5ForConditionalGeneration >>> tokenizer = AutoTokenizer.from_pretrained(\"google/long-t5-local-base\") >>> model =", ") -> Tuple[torch.Tensor, torch.Tensor]: \"\"\"Obtain the \"fixed block\" global id corresponding to each", "input_ids.view(-1, input_shape[-1]) elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: err_msg_prefix =", "self.wi_1(hidden_states) hidden_states = hidden_gelu * hidden_linear hidden_states = self.dropout(hidden_states) hidden_states = self.wo(hidden_states) return", "attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, ) hidden_states = encoder_outputs[0] if labels", "loss. Indices should be in `[-100, 0, ..., config.vocab_size - 1]`. All labels", "cross attention. ' if expected_num_past_key_values == 4 else ''}\" f\"Got {len(past_key_value)} past key", "apex.normalization import FusedRMSNorm LongT5LayerNorm = FusedRMSNorm # noqa logger.info(\"Discovered apex.normalization.FusedRMSNorm - will use", "# cross-attn # (batch_size, n_heads, seq_length, dim_per_head) hidden_states = shape(proj_layer(key_value_states)) if past_key_value is", "block_len) + x.shape[(dim + 1) :] # If 0 is in output_shape, we", "a [`~utils.ModelOutput`] instead of a plain tuple. \"\"\" # Warning message for FutureWarning:", "return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.FloatTensor], BaseModelOutput]: r\"\"\" Returns: Example: ```python >>>", "to general usage and behavior. Parameters: config ([`LongT5Config`]): Model configuration class with all", "# Calculate global/side bias - shape: # (batch_size, num_heads, seq_len, global_seq_len) if mask", "scores += position_bias # (batch_size, num_blocks, n_heads, block_len, 3 * block_len + global_seq_len)", "= shape(self.v(hidden_states)) # Split into blocks -> (batch_size, num_blocks, block_len, n_heads, dim_per_head) query_states", "= logging.get_logger(__name__) _CONFIG_FOR_DOC = \"LongT5Config\" _TOKENIZER_FOR_DOC = \"T5Tokenizer\" _CHECKPOINT_FOR_DOC = \"google/long-t5-local-base\" # TODO:", "torch.cat([past_key_value, hidden_states], dim=2) else: # cross-attn hidden_states = past_key_value return hidden_states # get", "# you may not use this file except in compliance with the License.", "0) -> torch.Tensor: \"\"\"Pad a tensor so that a sequence length will be", "def __init__(self, config: LongT5Config): super().__init__(config) self.shared = nn.Embedding(config.vocab_size, config.d_model) encoder_config = copy.deepcopy(config) encoder_config.is_decoder", "= _get_local_attention_mask(mask, self.block_len, hidden_states.device) # Replace masked positions with -10_000 (according to the", "# Copied from transformers.models.t5.modeling_t5.T5Stack.set_input_embeddings def set_input_embeddings(self, new_embeddings): self.embed_tokens = new_embeddings def forward( self,", "shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose", "attention_outputs else: outputs = outputs + attention_outputs return outputs # hidden-states, present_key_value_states, (self-attention", "[`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for", "past_key_value is not None: if key_value_states is None: # self-attn # (batch_size, n_heads,", "of decoder input sequence tokens in the vocabulary. Indices can be obtained using", "in fp32 variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True) hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)", "aggregated ones # New shape: (batch_size, num_blocks, 3 * block_len + global_seq_len, n_heads,", "block_ids fixed_block_mask = torch.ones_like(attention_mask, device=attention_mask.device) / global_block_size fixed_block_mask = torch.cumsum(fixed_block_mask, axis=1) - fixed_block_mask", "global_segment_ids[:, None, :])[:, None, ...] attention_side_bias = torch.where(side_attention_mask > 0, 0.0, -1e10) #", "= nn.Linear(self.inner_dim, self.d_model, bias=False) if self.has_relative_attention_bias: self.relative_attention_bias = nn.Embedding(self.relative_attention_num_buckets, self.n_heads) self.pruned_heads = set()", "of the cross-attention modules in the decoder. Mask values selected in `[0, 1]`:", "return_dict if return_dict is not None else self.config.use_return_dict # FutureWarning: head_mask was separated", "to attend global aggregated ones # New shape: (batch_size, num_blocks, 3 * block_len", "elif return_dict and not isinstance(encoder_outputs, BaseModelOutput): encoder_outputs = BaseModelOutput( last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs)", "= all_hidden_states + (hidden_states,) if not return_dict: return tuple( v for v in", "1) reps[1] = key_states.shape[1] side_key_states = side_key_states.unsqueeze(1).repeat(reps) side_value_states = side_value_states.unsqueeze(1).repeat(reps) # Concatenate \"local\"", "= outputs + (attn_weights,) return outputs # Copied from transformers.models.t5.modeling_t5.T5LayerSelfAttention with T5->LongT5 class", "* block_len, ...] return torch.cat(blocks_list, dim=sequence_dim) def _make_3block_relative_position_ids(block_len: int) -> torch.Tensor: \"\"\"Makes 3-blocked", "for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden", "config.num_heads self.local_radius = config.local_radius self.block_len = self.local_radius + 1 self.dropout = config.dropout_rate self.inner_dim", "and value hidden states of the attention blocks. Can be used to speed", "= True else: position_bias = self.compute_bias(real_seq_length, key_length, device=scores.device) # if key and values", "self.n_heads, self.key_value_proj_dim) def unshape(states): \"\"\"reshape\"\"\" return states.contiguous().view(batch_size, -1, self.inner_dim) # get query/key/value states", "and apply final processing self.post_init() def get_input_embeddings(self): return self.shared def set_input_embeddings(self, new_embeddings): self.shared", "Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool]", "If the dimension length is not a multiple of `block_len`, it will be", "convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix.", "scenario, as we use this strategy only for a decoder, orphan tokens, i.e.", "local attentiont. For more information, see: https://arxiv.org/pdf/2112.07916.pdf. \"\"\" num_blocks = x.shape[block_dim] pad =", ") # equivalent of torch.einsum(\"bnqd,bnkd->bnqk\", query_states, key_states), compatible with onnx op>9 if position_bias", "# (batch_size, num_blocks, num_heads, block_len, global_seq_len) side_position_bias = _split_into_blocks(side_position_bias, self.block_len, dim=-2).transpose(1, 2) side_position_bias", "3] # append next layer key value states if use_cache: present_key_value_states = present_key_value_states", ">= 0).item(), \"Verify that `shifted_input_ids` has only positive values\" return shifted_input_ids class LongT5Stack(LongT5PreTrainedModel):", "-1:] return { \"decoder_input_ids\": input_ids, \"past_key_values\": past, \"encoder_outputs\": encoder_outputs, \"attention_mask\": attention_mask, \"head_mask\": head_mask,", "License for the specific language governing permissions and # limitations under the License.", "Optional, Tuple, Union import torch from torch import nn from torch.nn import CrossEntropyLoss", "encoder_outputs[0] # Decode decoder_outputs = self.decoder( input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, inputs_embeds=decoder_inputs_embeds, past_key_values=past_key_values, encoder_hidden_states=hidden_states, encoder_attention_mask=attention_mask, head_mask=decoder_head_mask,", "attention_layer = LongT5LayerTransientGlobalSelfAttention else: raise ValueError( \"For encoder attention mechanism, either `local` or", "position_bias=position_bias, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, encoder_decoder_position_bias=encoder_decoder_position_bias, layer_head_mask=layer_head_mask, cross_attn_layer_head_mask=cross_attn_layer_head_mask, past_key_value=past_key_value, use_cache=use_cache, output_attentions=output_attentions, ) # layer_outputs is", "global_block_size] if num_globals > 0: _sequence_block_ids_max = torch.max(global_block_ids, dim=-1).values.repeat(num_globals, 1).transpose(0, 1) else: _sequence_block_ids_max", "not allowed to attend tokens farther than ``local_radius.\"\"\" relative_position_ids = _make_3block_relative_position_ids(block_len) locality_mask =", "decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens", "self.wi = nn.Linear(config.d_model, config.d_ff, bias=False) self.wo = nn.Linear(config.d_ff, config.d_model, bias=False) self.dropout = nn.Dropout(config.dropout_rate)", "_keys_to_ignore_on_load_unexpected = [ r\"decoder.block.0.layer.1.EncDecAttention.relative_attention_bias.weight\", ] def __init__(self, config: LongT5Config): super().__init__(config) self.shared = nn.Embedding(config.vocab_size,", "self.gradient_checkpointing and self.training: position_bias.requires_grad = True else: position_bias = self.compute_bias(self.block_len) if local_attention_mask is", "for pretraining take a look a [LONGT5 Training](./longt5#training). attention_mask (`torch.FloatTensor` of shape `(batch_size,", "if mask is not None else torch.ones(hidden_states.shape[:-1]), self.global_block_size, ) # Create global inputs", "pad_len) pad = sum(pad[::-1], ()) x = nn.functional.pad(x, pad=pad, mode=\"constant\", value=pad_value) return x", "indices = tuple(indices) blocks_list.append(x[indices]) # [batch_size, num_blocks, 3 * block_len, ...] return torch.cat(blocks_list,", "self.n_heads - len(heads) self.inner_dim = self.key_value_proj_dim * self.n_heads self.pruned_heads = self.pruned_heads.union(heads) @staticmethod #", "values = values.permute([2, 0, 1]).unsqueeze(0) # shape (1, num_heads, query_length, key_length) return values", "past states\" real_seq_length += past_key_value[0].shape[2] if query_length is None else query_length key_length =", "layer_head_mask=layer_head_mask, past_key_value=past_key_value, use_cache=use_cache, query_length=query_length, output_attentions=output_attentions, ) layer_output = hidden_states + self.dropout(attention_output[0]) outputs =", ":= seq_len // self.global_block_size # shapes: (batch_size, seq_len) & (batch_size, global_seq_len) block_ids, global_segment_ids", "speed up decoding\") return past reordered_decoder_past = () for layer_past_states in past: #", "_make_global_fixed_block_ids( attention_mask: torch.Tensor, global_block_size: int ) -> Tuple[torch.Tensor, torch.Tensor]: \"\"\"Obtain the \"fixed block\"", "nn.Embedding(self.relative_attention_num_buckets, self.n_heads) self.pruned_heads = set() self.gradient_checkpointing = False def prune_heads(self, heads): if len(heads)", "and module.wi_0.bias is not None: module.wi_0.bias.data.zero_() module.wi_1.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5)) if", "of LongT5LayerNorm\") except ImportError: # using the normal LongT5LayerNorm pass except Exception: logger.warning(\"discovered", "relative_position_if_large = max_exact + ( torch.log(relative_position.float() / max_exact) / math.log(max_distance / max_exact) *", "_make_side_relative_position_ids(mask, self.global_block_size) side_relative_position_bucket = self._relative_position_bucket( side_relative_position, bidirectional=(not self.is_decoder), num_buckets=self.relative_attention_num_buckets, max_distance=self.relative_attention_max_distance, ) # (batch_size,", "\" f\"{'2 (past / key) for cross attention. ' if expected_num_past_key_values == 4", "not exist if past_key_values is None: past_key_values = [None] * len(self.block) # We", "1, 2]) # (batch_size, num_heads, seq_len, global_seq_len) attention_side_bias = attention_side_bias + side_bias return", "is passed to the encoder. Please make sure this is intended.\") expected_num_past_key_values =", "x = nn.functional.pad(x, pad=pad, mode=\"constant\", value=pad_value) return x def _split_into_blocks(x: torch.Tensor, block_len: int,", "summing over individual blocks.\"\"\" # (batch..., seq_len, global_seq_len)) block_ids = block_ids.where( block_ids >=", "all_cross_attentions, ] if v is not None ) return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=present_key_value_states, hidden_states=all_hidden_states,", "(if key_value_states is None) or attention over source sentence (provided by key_value_states). \"\"\"", "and doesn't shift, which is also known as Root Mean # Square Layer", "n_heads, seq_length, dim_per_head) hidden_states = shape(proj_layer(key_value_states)) if past_key_value is not None: if key_value_states", "module.wo.bias.data.zero_() elif isinstance(module, (LongT5Attention, LongT5LocalAttention, LongT5TransientGlobalAttention)): # Mesh TensorFlow attention initialization to avoid", "& Layer norm for global attention if self.has_relative_attention_bias: self.global_relative_attention_bias = nn.Embedding(self.relative_attention_num_buckets, self.n_heads) self.global_input_layer_norm", "mean. \"\"\" super().__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.variance_epsilon = eps def forward(self, hidden_states): #", "head_mask=None, cross_attn_head_mask=None, past_key_values=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): use_cache = use_cache if use_cache", "# Prepare head mask if needed head_mask = self.get_head_mask(head_mask, self.config.num_layers) cross_attn_head_mask = self.get_head_mask(cross_attn_head_mask,", "encoder_config.is_encoder_decoder = False self.encoder = LongT5Stack(encoder_config, self.shared) decoder_config = copy.deepcopy(config) decoder_config.is_decoder = True", "more information\" ) # shift inputs to the right if is_torch_fx_proxy(input_ids): # Item", "2) scores += position_bias # (batch_size, num_blocks, n_heads, block_len, 3 * block_len) attn_weights", "= torch.cumsum(torch.ones(batch_size, num_globals), dim=-1) - 1 global_segment_ids = global_segment_ids.to(attention_mask.device) global_segment_ids = torch.where(global_segment_ids <=", "consecutive blocks for each input block for local attentiont. For more information, see:", "_TOKENIZER_FOR_DOC = \"T5Tokenizer\" _CHECKPOINT_FOR_DOC = \"google/long-t5-local-base\" # TODO: Update before the merge LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST", "by <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME> and <NAME>. It's an encoder-decoder transformer", "LongT5Stack(decoder_config, self.shared) self.lm_head = nn.Linear(config.d_model, config.vocab_size, bias=False) # Initialize weights and apply final", "encoder_hidden_states is not None: encoder_seq_length = encoder_hidden_states.shape[1] encoder_attention_mask = torch.ones( batch_size, encoder_seq_length, device=inputs_embeds.device,", "module.v.weight.data.normal_(mean=0.0, std=factor * (d_model**-0.5)) module.o.weight.data.normal_(mean=0.0, std=factor * ((n_heads * key_value_proj_dim) ** -0.5)) if", "key_length) return values def forward( self, hidden_states, mask=None, key_value_states=None, position_bias=None, past_key_value=None, layer_head_mask=None, query_length=None,", "to be defined. In LongT5 it is usually set to the\" \" pad_token_id.", "+ 1 self.dropout = config.dropout_rate self.inner_dim = self.n_heads * self.key_value_proj_dim # Mesh TensorFlow", "tensor to multiple of block_len if x.shape[dim] % block_len != 0: x =", "attn_weights = attn_weights * layer_head_mask attn_output = unshape(torch.matmul(attn_weights, value_states)) # (batch_size, seq_length, dim)", "= shape(self.q(hidden_states)) key_states = shape(self.k(hidden_states)) value_states = shape(self.v(hidden_states)) # Split into blocks ->", "lookup matrix. decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*): Optionally, instead of", "-1, self.n_heads, self.key_value_proj_dim) def unshape(states): \"\"\"reshape\"\"\" return states.contiguous().view(batch_size, -1, self.inner_dim) # Prepare components", "= reordered_layer_past_states + ( layer_past_state.index_select(0, beam_idx.to(layer_past_state.device)), ) assert reordered_layer_past_states[0].shape == layer_past_states[0].shape assert len(reordered_layer_past_states)", "= LongT5Attention(config, has_relative_attention_bias=False) self.layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) def forward( self,", "layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(LONGT5_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=Seq2SeqModelOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.LongTensor]", "key_value_states, past_key_value): \"\"\"projects hidden states correctly to key/query states\"\"\" if key_value_states is None:", "different efficient attention mechanisms - (1) Local attention, or (2) Transient-Global attention. This", "just need to make it broadcastable to all heads. # We use local", "global_segment_ids = global_segment_ids.to(attention_mask.device) global_segment_ids = torch.where(global_segment_ids <= _sequence_block_ids_max, 1, 0) return global_block_ids.type(torch.int), global_segment_ids.type(torch.int)", "self.block = nn.ModuleList( [LongT5Block(config, has_relative_attention_bias=bool(i == 0)) for i in range(config.num_layers)] ) self.final_layer_norm", "# [batch_size, seq_len // global_block_size] if num_globals > 0: _sequence_block_ids_max = torch.max(global_block_ids, dim=-1).values.repeat(num_globals,", "# need to set correct `past` for each of the four key /", "along the given `dim`. If the dimension length is not a multiple of", "an encoder-decoder transformer pre-trained in a text-to-text denoising generative setting. LongT5 model is", "index, dim=1) # Update hyper params self.n_heads = self.n_heads - len(heads) self.inner_dim =", "in output_shape: return torch.empty(output_shape, dtype=x.dtype, device=x.device) return x.reshape(output_shape) def _concatenate_3_blocks(x: torch.Tensor, block_dim: int,", "bidirectional=(not self.is_decoder), num_buckets=self.relative_attention_num_buckets, max_distance=self.relative_attention_max_distance, ) # (batch_size, seq_len, global_seq_len, num_heads) side_bias = self.global_relative_attention_bias(side_relative_position_bucket)", "to return a [`~utils.ModelOutput`] instead of a plain tuple. \"\"\" LONGT5_ENCODER_INPUTS_DOCSTRING = r\"\"\"", "value states reordered_layer_past_states = reordered_layer_past_states + ( layer_past_state.index_select(0, beam_idx.to(layer_past_state.device)), ) assert reordered_layer_past_states[0].shape ==", "known as Root Mean # Square Layer Normalization https://arxiv.org/abs/1910.07467 thus varience is calculated", "See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/transformer.py#L586 sequence_output = sequence_output * (self.model_dim**-0.5) lm_logits = self.lm_head(sequence_output) loss = None", "gradient checkpointing ) else: layer_outputs = layer_module( hidden_states, attention_mask=extended_attention_mask, position_bias=position_bias, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, encoder_decoder_position_bias=encoder_decoder_position_bias,", "position weights attention_outputs = attention_outputs + cross_attention_outputs[2:] # Apply Feed Forward layer hidden_states", "torch.Tensor) -> torch.Tensor: # (batch_size, 1, seq_len, global_seq_len) side_attention_mask = torch.eq(mask[..., None], global_segment_ids[:,", "with -1e10 (according to the original implementation) mask = torch.where(mask > 0, 0.0,", "value_states = shape(self.v(hidden_states)) # Split into blocks -> (batch_size, num_blocks, block_len, n_heads, dim_per_head)", "reordered_layer_past_states + ( layer_past_state.index_select(0, beam_idx.to(layer_past_state.device)), ) assert reordered_layer_past_states[0].shape == layer_past_states[0].shape assert len(reordered_layer_past_states) ==", "( \"self.model.config.decoder_start_token_id has to be defined. In LongT5 it is usually set to", "self, hidden_states, attention_mask=None, position_bias=None, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None, layer_head_mask=None, cross_attn_layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False, return_dict=True,", "-> torch.Tensor: \"\"\"Compute individual block aggregates by summing over individual blocks.\"\"\" # (batch...,", "= torch.zeros( (1, 1, self.n_heads, self.block_len, 3 * self.block_len), device=scores.device, dtype=scores.dtype, ) if", "value states if present_key_value_state is not None: present_key_value_state = present_key_value_state + cross_attention_outputs[1] #", "is False: layer_outputs = layer_outputs[:1] + (None,) + layer_outputs[1:] hidden_states, present_key_value_state = layer_outputs[:2]", "is None else 4 if len(past_key_value) != expected_num_past_key_values: raise ValueError( f\"There should be", "right decoder_input_ids = self._shift_right(labels) # Decode decoder_outputs = self.decoder( input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, inputs_embeds=decoder_inputs_embeds, past_key_values=past_key_values,", "= None, ) -> Union[Tuple[torch.FloatTensor], BaseModelOutput]: r\"\"\" Returns: Example: ```python >>> from transformers", "present_key_value_states = () if use_cache else None all_hidden_states = () if output_hidden_states else", "If a 2D or 3D attention mask is provided for the cross-attention #", "states of the attention blocks. Can be used to speed up decoding. If", "from transformers.models.t5.modeling_t5.T5LayerFF with T5->LongT5 class LongT5LayerFF(nn.Module): def __init__(self, config: LongT5Config): super().__init__() if config.is_gated_act:", "blocks # New shape: (batch_size, num_blocks, global_seq_len, n_heads, dim_per_head) reps = [1] *", "to `True` if {self} is used as a decoder\" if attention_mask is None:", "Causal mask will also be used by default. head_mask (`torch.FloatTensor` of shape `(num_heads,)`", "int) -> torch.Tensor: \"\"\"Split an input tensor into blocks of a given `block_len`", "False decoder_config.num_layers = config.num_decoder_layers self.decoder = LongT5Stack(decoder_config, self.shared) self.lm_head = nn.Linear(config.d_model, config.vocab_size, bias=False)", "prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.n_heads,", "saving, resizing the input embeddings, pruning heads etc.) This model is also a", "n_heads, block_len, 3 * block_len + global_seq_len) scores = torch.einsum(\"...qhd,...khd->...hqk\", query_states, key_states) if", "-100, pad_token_id) assert torch.all(shifted_input_ids >= 0).item(), \"Verify that `shifted_input_ids` has only positive values\"", "Obtain block_ids and global_segment_ids # global_seq_len := seq_len // self.global_block_size # shapes: (batch_size,", "else output return Seq2SeqLMOutput( loss=loss, logits=lm_logits, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions,", "4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key", "torch.where(mask > 0, 0.0, -1e10) # We need to adjust position bias shape", "associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether", "return side_relative_position.type(torch.int64) def _create_global_aggregates( hidden_states: torch.Tensor, block_ids: torch.Tensor, global_seq_len: int ) -> torch.Tensor:", "a multiple of `block_len`\"\"\" pad_len = -x.shape[dim] % block_len # Handle cases when", "max_exact) * (num_buckets - max_exact) ).to(torch.long) relative_position_if_large = torch.min( relative_position_if_large, torch.full_like(relative_position_if_large, num_buckets -", "3 * block_len] return local_attention_mask.unsqueeze(1).to(device) def _make_global_fixed_block_ids( attention_mask: torch.Tensor, global_block_size: int ) ->", "self.config.initializer_factor # Used for testing weights initialization if isinstance(module, LongT5LayerNorm): module.weight.data.fill_(factor * 1.0)", "head is **masked**. encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*): Tuple consists of (`last_hidden_state`, `optional`: *hidden_states*, `optional`:", "... \"Stancld/longt5-tglobal-large-16384-pubmed-3k_steps\" ... ) >>> # Let's try a very long input. >>>", "blocks.\"\"\" # (batch..., seq_len, global_seq_len)) block_ids = block_ids.where( block_ids >= 0, torch.tensor(global_seq_len, dtype=block_ids.dtype,", "else: layer_outputs = layer_module( hidden_states, attention_mask=extended_attention_mask, position_bias=position_bias, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, encoder_decoder_position_bias=encoder_decoder_position_bias, layer_head_mask=layer_head_mask, cross_attn_layer_head_mask=cross_attn_layer_head_mask, past_key_value=past_key_value,", "if we output them return outputs class LongT5LayerTransientGlobalSelfAttention(nn.Module): \"\"\"Transient-Global self attention used in", "-> None: super().__init__() self.is_decoder = config.is_decoder self.has_relative_attention_bias = has_relative_attention_bias self.relative_attention_num_buckets = config.relative_attention_num_buckets self.relative_attention_max_distance", "more on how to prepare `input_ids` for pretraining take a look a [LONGT5", "pad_token_id. See LongT5 docs for more information\" ) # shift inputs to the", "(`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `decoder_input_ids` you", "self.key_value_proj_dim = config.d_kv self.n_heads = config.num_heads self.dropout = config.dropout_rate self.inner_dim = self.n_heads *", "= [ r\"encoder.embed_tokens.weight\", ] def __init__(self, config: LongT5Config): super().__init__(config) self.shared = nn.Embedding(config.vocab_size, config.d_model)", "softmax self.q = nn.Linear(self.d_model, self.inner_dim, bias=False) self.k = nn.Linear(self.d_model, self.inner_dim, bias=False) self.v =", "3 * block_len) if position_bias is None: # position_bias shape: # (1, 1,", "Item assignment is not supported natively for proxies. shifted_input_ids = torch.full(input_ids.shape[:-1] + (1,),", "= true_block_ends.sum(-1).unsqueeze(-1).type(block_ids.dtype) - 1 block_ids = torch.where(block_ids < full_blocks, block_ids, full_blocks) return block_ids", "(self-attention position bias), (self-attention weights), (cross-attention position bias), (cross-attention weights) class LongT5PreTrainedModel(PreTrainedModel): \"\"\"", "of the model. Initializing with a config file does not load the weights", "is defined as memory_position - query_position, i.e. the distance in tokens from the", "relative_position = memory_position[None, :] - context_position[:, None] relative_position_bucket = self._relative_position_bucket( relative_position, # (block_length,", "= seq_len // global_block_size # [batch_size, seq_len // global_block_size] if num_globals > 0:", "if hasattr(module.wi, \"bias\") and module.wi.bias is not None: module.wi.bias.data.zero_() module.wo.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_ff)", "block_len] x = nn.functional.pad(x, pad=pad, mode=\"constant\", value=pad_value) blocks_list: List[torch.Tensor] = [] for i", "self.layer.append(LongT5LayerFF(config)) def forward( self, hidden_states, attention_mask=None, position_bias=None, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None, layer_head_mask=None, cross_attn_layer_head_mask=None, past_key_value=None,", "(`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain", "scales and doesn't shift, which is also known as Root Mean # Square", "LongT5 model is an extension of T5 model, and it enables using one", "config.is_gated_act: self.DenseReluDense = LongT5DenseGatedActDense(config) else: self.DenseReluDense = LongT5DenseActDense(config) self.layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout", "config.num_heads self.dropout = config.dropout_rate self.inner_dim = self.n_heads * self.key_value_proj_dim # Mesh TensorFlow initialization", "bias), (self-attention weights), (cross-attention position bias), (cross-attention weights) if use_cache is False: layer_outputs", "See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for detail. [What are input IDs?](../glossary#input-ids) To know more", ") self.layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) def forward( self, hidden_states, attention_mask=None,", "\"bias\") and module.wi.bias is not None: module.wi.bias.data.zero_() module.wo.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_ff) ** -0.5))", "None: if not self.has_relative_attention_bias: position_bias = torch.zeros( (1, self.n_heads, real_seq_length, key_length), device=scores.device, dtype=scores.dtype", "(batch_size, num_blocks, 3 * block_len, n_heads, dim_per_head) key_states = _concatenate_3_blocks(key_states, block_dim=1, sequence_dim=2) value_states", "kwargs ): normed_hidden_states = self.layer_norm(hidden_states) attention_output = self.LocalSelfAttention( normed_hidden_states, mask=attention_mask, position_bias=position_bias, layer_head_mask=layer_head_mask, output_attentions=output_attentions,", "self.weight * hidden_states try: from apex.normalization import FusedRMSNorm LongT5LayerNorm = FusedRMSNorm # noqa", "( torch.log(relative_position.float() / max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact) ).to(torch.long)", "2 past states: keys and values. Got { len(past_key_value)} past states\" real_seq_length +=", "LONGT5 uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values`", "inputs from shifting lm labels to the right decoder_input_ids = self._shift_right(labels) # Decode", "heads to prune in this layer} See base class PreTrainedModel \"\"\" for layer,", "returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return", "= nn.Dropout(config.dropout_rate) def forward( self, hidden_states, attention_mask=None, position_bias=None, layer_head_mask=None, output_attentions=False, **kwargs: Any, #", "= config.local_radius self.block_len = self.local_radius + 1 self.global_block_size = config.global_block_size self.dropout = config.dropout_rate", "() for layer_past_state in layer_past_states: # need to set correct `past` for each", "+ self.variance_epsilon) # convert into half-precision if necessary if self.weight.dtype in [torch.float16, torch.bfloat16]:", "past_key_values=past_key_values, encoder_hidden_states=hidden_states, encoder_attention_mask=attention_mask, head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) if not return_dict:", "sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values", "3 * block_len + global_seq_len) scores = torch.einsum(\"...qhd,...khd->...hqk\", query_states, key_states) if mask is", "block_len, ...] return torch.cat(blocks_list, dim=sequence_dim) def _make_3block_relative_position_ids(block_len: int) -> torch.Tensor: \"\"\"Makes 3-blocked relative", "model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model", "to avoid scaling before softmax self.q = nn.Linear(self.d_model, self.inner_dim, bias=False) self.k = nn.Linear(self.d_model,", "to -1 global_block_ids = (global_block_ids * attention_mask) + (attention_mask - 1) # [batch_size,", "(training, first prediction pass) if encoder_outputs is None: encoder_outputs = self.encoder( input_ids=input_ids, attention_mask=attention_mask,", "= relative_position < max_exact # The other half of the buckets are for", "= None, ) -> Union[Tuple[torch.FloatTensor], Seq2SeqModelOutput]: r\"\"\" Returns: Example: ```python >>> from transformers", "locality_mask = torch.abs(relative_position_ids) < block_len locality_mask = locality_mask[None, None, :, :] locality_mask =", "LongT5Config): super().__init__(config) self.shared = nn.Embedding(config.vocab_size, config.d_model) encoder_config = copy.deepcopy(config) encoder_config.is_decoder = False encoder_config.use_cache", "self.decoder def _prune_heads(self, heads_to_prune): \"\"\" Prunes heads of the model. heads_to_prune: dict of", "by -1. \"\"\" batch_size, seq_len = attention_mask.shape[:2] def handle_orphan_tokens(block_ids: torch.Tensor) -> torch.Tensor: block_ends", "self.d_model, bias=False) if self.has_relative_attention_bias: self.relative_attention_bias = nn.Embedding(self.relative_attention_num_buckets, self.n_heads) self.pruned_heads = set() self.gradient_checkpointing =", "side_key_states.unsqueeze(1).repeat(reps) side_value_states = side_value_states.unsqueeze(1).repeat(reps) # Concatenate \"local\" and \"side\"/\"global\" key/value states to allow", "two input args - head_mask, decoder_head_mask __HEAD_MASK_WARNING_MSG = \"\"\" The input argument `head_mask`", ">>> # Let's try a very long input. >>> input_ids = tokenizer( ...", "= encoder_hidden_states.shape[1] encoder_attention_mask = torch.ones( batch_size, encoder_seq_length, device=inputs_embeds.device, dtype=torch.long ) # initialize past_key_values", "up decoding\") return past reordered_decoder_past = () for layer_past_states in past: # get", "return outputs class LongT5LocalAttention(nn.Module): def __init__(self, config: LongT5Config, has_relative_attention_bias: bool = False) ->", "(according to the original implementation) mask = torch.where(mask > 0, 0.0, -1e10) #", "encoder_outputs return Seq2SeqModelOutput( last_hidden_state=decoder_outputs.last_hidden_state, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, ) @add_start_docstrings(\"\"\"LONGT5", "key_length) relative_position_bucket = self._relative_position_bucket( relative_position, # shape (query_length, key_length) bidirectional=(not self.is_decoder), num_buckets=self.relative_attention_num_buckets, max_distance=self.relative_attention_max_distance,", "self.config.num_layers) present_key_value_states = () if use_cache else None all_hidden_states = () if output_hidden_states", "output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, )", "is not None: position_bias = position_bias[:, :, -hidden_states.size(1) :, :] if mask is", "hasattr(module.wi_0, \"bias\") and module.wi_0.bias is not None: module.wi_0.bias.data.zero_() module.wi_1.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) **", "global_seq_len) side_attention_mask = torch.eq(mask[..., None], global_segment_ids[:, None, :])[:, None, ...] attention_side_bias = torch.where(side_attention_mask", "self.o = prune_linear_layer(self.o, index, dim=1) # Update hyper params self.n_heads = self.n_heads -", "block_dim: int, sequence_dim: int, pad_value: int = 0) -> torch.Tensor: \"\"\"Concatenate three consecutive", "[ r\"encoder.embed_tokens.weight\", r\"decoder.embed_tokens.weight\", ] _keys_to_ignore_on_load_unexpected = [ r\"decoder.block.0.layer.1.EncDecAttention.relative_attention_bias.weight\", ] def __init__(self, config: LongT5Config):", "0.0, -1e10) # (batch_size, seq_len, global_seq_len) side_relative_position = _make_side_relative_position_ids(mask, self.global_block_size) side_relative_position_bucket = self._relative_position_bucket(", "std=factor * ((self.config.d_model) ** -0.5)) if hasattr(module.wi_0, \"bias\") and module.wi_0.bias is not None:", "and `decoder_inputs_embeds` are both unset, `decoder_inputs_embeds` takes the value of `inputs_embeds`. use_cache (`bool`,", "past_key_value=past_key_value, use_cache=use_cache, output_attentions=output_attentions, ) # layer_outputs is a tuple with: # hidden-states, key-value-states,", "shape: (batch_size, global_seq_len, n_heads, dim_per_head) side_key_states = shape(self.k(global_inputs)) side_value_states = shape(self.v(global_inputs)) # Split", "BaseModelOutput]: r\"\"\" Returns: Example: ```python >>> from transformers import AutoTokenizer, LongT5ForConditionalGeneration >>> tokenizer", "1) :] # If 0 is in output_shape, we cannot apply reshape because", "nn.Linear(config.d_model, config.vocab_size, bias=False) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self):", "@add_start_docstrings( \"The bare LONGT5 Model transformer outputting encoder's raw hidden-states without any specific", "invalid. We use smaller buckets for small absolute relative_position and larger buckets for", "LongT5DenseGatedActDense(config) else: self.DenseReluDense = LongT5DenseActDense(config) self.layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) def", "a simple interface for downloading and loading pretrained models. \"\"\" config_class = LongT5Config", "handle_orphan_tokens(block_ids: torch.Tensor) -> torch.Tensor: block_ends = (torch.arange(seq_len) % global_block_size) == global_block_size - 1", "Optional[Tuple[Tuple[torch.FloatTensor]]] = None, inputs_embeds: Optional[torch.Tensor] = None, decoder_inputs_embeds: Optional[torch.Tensor] = None, use_cache: Optional[bool]", "None if do_cross_attention: # the actual query length is unknown for cross attention", "preceding block. Padding tokens from the original sequence are represented by -1. \"\"\"", "get_encoder(self): return self.encoder def get_decoder(self): return self.decoder def _prune_heads(self, heads_to_prune): \"\"\" Prunes heads", "block_len != 0: x = _pad_to_multiple(x, block_len, dim, pad_value=0) num_blocks = x.shape[dim] //", "reordered_layer_past_states[0].shape == layer_past_states[0].shape assert len(reordered_layer_past_states) == len(layer_past_states) reordered_decoder_past = reordered_decoder_past + (reordered_layer_past_states,) return", "% block_len != 0: x = _pad_to_multiple(x, block_len, dim, pad_value=0) num_blocks = x.shape[dim]", "padded first with selected `pad_value`. \"\"\" # pad tensor to multiple of block_len", "_get_local_attention_mask(attention_mask: torch.Tensor, block_len: int, device: torch.device) -> torch.Tensor: \"\"\"Prepare attention mask to be", "not None and inputs_embeds is not None: err_msg_prefix = \"decoder_\" if self.is_decoder else", "= outputs + (present_key_value_state,) + attention_outputs else: outputs = outputs + attention_outputs return", "+ num_blocks) indices = tuple(indices) blocks_list.append(x[indices]) # [batch_size, num_blocks, 3 * block_len, ...]", "is given if not all(x.shape): new_shape = list(x.shape) new_shape[dim] += pad_len return torch.zeros(new_shape,", "(masked), the loss is only computed for labels in `[0, ..., config.vocab_size]` Returns:", "= layer_outputs[2] if self.is_decoder and encoder_hidden_states is not None: encoder_decoder_position_bias = layer_outputs[4 if", "_make_global_fixed_block_ids( mask if mask is not None else torch.ones(hidden_states.shape[:-1]), self.global_block_size, ) # Create", "for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) head_mask (`torch.FloatTensor` of shape", "input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, decoder_input_ids: Optional[torch.LongTensor] = None, decoder_attention_mask:", "parameters of the model. Initializing with a config file does not load the", "set to `True`, `past_key_values` key value states are returned and can be used", "block_len, 3 * block_len + global_seq_len) position_bias = torch.cat([position_bias, side_position_bias], dim=-1) scores +=", "on padding token indices. Mask values selected in `[0, 1]`: - 1 for", "implements for all its model (such as downloading or saving, resizing the input", "self.local_radius = config.local_radius self.block_len = self.local_radius + 1 self.dropout = config.dropout_rate self.inner_dim =", "LongT5Model >>> tokenizer = T5Tokenizer.from_pretrained(\"google/long-t5-local-base\") >>> model = LongT5Model.from_pretrained(\"google/long-t5-local-base\") >>> # Let's try", "is None) or attention over source sentence (provided by key_value_states). \"\"\" # Input", "= config.is_decoder if config.is_decoder: attention_layer = LongT5LayerSelfAttention elif config.encoder_attention_type == \"local\": attention_layer =", "elif isinstance(module, (LongT5Model, LongT5ForConditionalGeneration, LongT5EncoderModel)): # Mesh TensorFlow embeddings initialization # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L1624", "<NAME>, <NAME>, <NAME>, <NAME> and <NAME>. It's an encoder-decoder transformer pre-trained in a", "if mask is not None: position_bias = position_bias + mask # (batch_size, n_heads,", "or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules.", "num_buckets: an integer max_distance: an integer Returns: a Tensor with the same shape", "sequence are represented by -1. \"\"\" batch_size, seq_len = attention_mask.shape[:2] def handle_orphan_tokens(block_ids: torch.Tensor)", "from shifting lm labels to the right decoder_input_ids = self._shift_right(labels) # Decode decoder_outputs" ]
[ "'_1720_' + em + '.pickle', 'r') as f: rms_1720 = pickle.load(f) except IOError:", "'_1720_' + em + '.pickle', 'r') as f: rms_1612 = pickle.load(f) print '1612", "vel_axis_1720 = pickle.load(f) with open('pickles/rms_' + source_name + '_1720_' + em + '.pickle',", "spectrum_1720 = pickle.load(f) with open('pickles/' + source_name + '_1612_vel.pickle', 'r') as f: vel_axis_1720", "75), 'ch002': (-52, 10)} for source_name in ['4C+25.14', 'ch002']: for em in ['absorption',", "+ '_1720_' + em + '.pickle', 'r') as f: rms_1612 = pickle.load(f) print", "dv = vel_axis[1] - vel_axis[0] v_at_0 = vel_axis[0] min_v_index = int(min((min_vel - v_at_0)", "f: spectrum_1612 = pickle.load(f) with open('pickles/' + source_name + '_1720_vel.pickle', 'r') as f:", "rms_1720 = pickle.load(f) print '1720 replaced by 1612 for ' + save_as_name #", "for ' + save_as_name # Trim spectra so all 4 cover the same", "['4C+25.14', 'ch002']: for em in ['absorption', 'emission']: save_as_name = source_name + '_' +", "source_name + '_1612_' + em + '.pickle', 'r') as f: spectrum_1612 = pickle.load(f)", "em + '.pickle', 'r') as f: rms_1720 = pickle.load(f) except IOError: with open('pickles/'", "(max_vel - v_at_0) / dv)) if min_v_index < 0: min_v_index = 0 return", "as f: rms_1665 = pickle.load(f) with open('pickles/' + source_name + '_1667_' + em", "'r') as f: rms_1667 = pickle.load(f) # Loading satellite lines. Current version of", "as f: spectrum_1665 = pickle.load(f) with open('pickles/' + source_name + '_1665_vel.pickle', 'r') as", "f: spectrum_1612 = pickle.load(f) with open('pickles/' + source_name + '_1612_vel.pickle', 'r') as f:", "+ em + '.pickle', 'r') as f: rms_1720 = pickle.load(f) print '1720 replaced", "FindVelIndex(vel_range[source_name][0], vel_range[source_name][1], vel_axis_1667) (min_index_1720, max_index_1720) = FindVelIndex(vel_range[source_name][0], vel_range[source_name][1], vel_axis_1720) vel_axes = [vel_axis_1612[min_index_1612:max_index_1612], vel_axis_1665[min_index_1665:max_index_1665],", "'r') as f: rms_1665 = pickle.load(f) with open('pickles/' + source_name + '_1667_' +", "f: vel_axis_1667 = pickle.load(f) with open('pickles/rms_' + source_name + '_1667_' + em +", "as f: rms_1612 = pickle.load(f) except IOError: with open('pickles/' + source_name + '_1720_'", "+ em + '.pickle', 'r') as f: spectrum_1612 = pickle.load(f) with open('pickles/' +", "velocity range. (min_index_1612, max_index_1612) = FindVelIndex(vel_range[source_name][0], vel_range[source_name][1], vel_axis_1612) (min_index_1665, max_index_1665) = FindVelIndex(vel_range[source_name][0], vel_range[source_name][1],", "with open('pickles/' + source_name + '_1720_vel.pickle', 'r') as f: vel_axis_1720 = pickle.load(f) with", "= 1. # Run BGD final_parameters = BGD.Main(source_name, vel_axes, spectra, rms, expected_min_fwhm,save_as_name) #", "'r') as f: spectrum_1720 = pickle.load(f) with open('pickles/' + source_name + '_1612_vel.pickle', 'r')", "'1720 replaced by 1612 for ' + save_as_name # Trim spectra so all", "+ '.pickle', 'r') as f: rms_1612 = pickle.load(f) except IOError: with open('pickles/' +", "spectrum_1667 = pickle.load(f) with open('pickles/' + source_name + '_1667_vel.pickle', 'r') as f: vel_axis_1667", "'r') as f: rms_1720 = pickle.load(f) except IOError: with open('pickles/' + source_name +", "pickle.load(f) except IOError: with open('pickles/' + source_name + '_1720_' + em + '.pickle',", "vel_range[source_name][1], vel_axis_1612) (min_index_1665, max_index_1665) = FindVelIndex(vel_range[source_name][0], vel_range[source_name][1], vel_axis_1665) (min_index_1667, max_index_1667) = FindVelIndex(vel_range[source_name][0], vel_range[source_name][1],", "vel_axis_1612 = pickle.load(f) with open('pickles/rms_' + source_name + '_1720_' + em + '.pickle',", "f: rms_1720 = pickle.load(f) print '1720 replaced by 1612 for ' + save_as_name", "FindVelIndex(vel_range[source_name][0], vel_range[source_name][1], vel_axis_1612) (min_index_1665, max_index_1665) = FindVelIndex(vel_range[source_name][0], vel_range[source_name][1], vel_axis_1665) (min_index_1667, max_index_1667) = FindVelIndex(vel_range[source_name][0],", "em # Loading main line data with open('pickles/' + source_name + '_1665_' +", "= pickle.load(f) print '1612 replaced by 1720 for ' + save_as_name try: with", "(min_index_1665, max_index_1665) = FindVelIndex(vel_range[source_name][0], vel_range[source_name][1], vel_axis_1665) (min_index_1667, max_index_1667) = FindVelIndex(vel_range[source_name][0], vel_range[source_name][1], vel_axis_1667) (min_index_1720,", "/ dv)) max_v_index = int(max((min_vel - v_at_0) / dv, (max_vel - v_at_0) /", "print '1720 replaced by 1612 for ' + save_as_name # Trim spectra so", "with open('pickles/' + source_name + '_1612_vel.pickle', 'r') as f: vel_axis_1720 = pickle.load(f) with", "spectrum_1720[min_index_1720:max_index_1720]] rms = [rms_1612, rms_1665, rms_1667, rms_1720] expected_min_fwhm = 1. # Run BGD", "'.pickle', 'r') as f: rms_1612 = pickle.load(f) except IOError: with open('pickles/' + source_name", "save_as_name = source_name + '_' + em # Loading main line data with", "+ '_1720_' + em + '.pickle', 'r') as f: spectrum_1720 = pickle.load(f) with", "+ em + '.pickle', 'r') as f: rms_1612 = pickle.load(f) print '1612 replaced", "'r') as f: vel_axis_1720 = pickle.load(f) with open('pickles/rms_' + source_name + '_1720_' +", "pickle.load(f) with open('pickles/' + source_name + '_1665_vel.pickle', 'r') as f: vel_axis_1665 = pickle.load(f)", "return (min_v_index, max_v_index) vel_range = {'4C+25.14': (-85, 75), 'ch002': (-52, 10)} for source_name", "pickle.load(f) with open('pickles/' + source_name + '_1720_vel.pickle', 'r') as f: vel_axis_1720 = pickle.load(f)", "other one. try: with open('pickles/' + source_name + '_1612_' + em + '.pickle',", "'.pickle', 'r') as f: spectrum_1612 = pickle.load(f) with open('pickles/' + source_name + '_1720_vel.pickle',", "pickle.load(f) with open('pickles/rms_' + source_name + '_1667_' + em + '.pickle', 'r') as", "so if only # one is present just duplicate it for the other", "'r') as f: vel_axis_1665 = pickle.load(f) with open('pickles/rms_' + source_name + '_1665_' +", "as f: vel_axis_1665 = pickle.load(f) with open('pickles/rms_' + source_name + '_1665_' + em", "with open('pickles/' + source_name + '_1665_vel.pickle', 'r') as f: vel_axis_1665 = pickle.load(f) with", "spectrum_1720 = pickle.load(f) with open('pickles/' + source_name + '_1720_vel.pickle', 'r') as f: vel_axis_1720", "= pickle.load(f) with open('pickles/rms_' + source_name + '_1665_' + em + '.pickle', 'r')", "with open('pickles/' + source_name + '_1612_vel.pickle', 'r') as f: vel_axis_1612 = pickle.load(f) with", "< 0: min_v_index = 0 return (min_v_index, max_v_index) vel_range = {'4C+25.14': (-85, 75),", "source_name + '_1665_' + em + '.pickle', 'r') as f: spectrum_1665 = pickle.load(f)", "v_at_0) / dv)) if min_v_index < 0: min_v_index = 0 return (min_v_index, max_v_index)", "IOError: with open('pickles/' + source_name + '_1720_' + em + '.pickle', 'r') as", "max_index_1720) = FindVelIndex(vel_range[source_name][0], vel_range[source_name][1], vel_axis_1720) vel_axes = [vel_axis_1612[min_index_1612:max_index_1612], vel_axis_1665[min_index_1665:max_index_1665], vel_axis_1667[min_index_1667:max_index_1667], vel_axis_1720[min_index_1720:max_index_1720]] spectra =", "'_' + em # Loading main line data with open('pickles/' + source_name +", "source_name + '_1667_' + em + '.pickle', 'r') as f: rms_1667 = pickle.load(f)", "pickle.load(f) with open('pickles/rms_' + source_name + '_1720_' + em + '.pickle', 'r') as", "with open('pickles/rms_' + source_name + '_1667_' + em + '.pickle', 'r') as f:", "vel_axis_1720[min_index_1720:max_index_1720]] spectra = [spectrum_1612[min_index_1612:max_index_1612], spectrum_1665[min_index_1665:max_index_1665], spectrum_1667[min_index_1667:max_index_1667], spectrum_1720[min_index_1720:max_index_1720]] rms = [rms_1612, rms_1665, rms_1667, rms_1720]", "= int(min((min_vel - v_at_0) / dv, (max_vel - v_at_0) / dv)) max_v_index =", "max indices corresponding to the min and max velocities given. ''' dv =", "em + '.pickle', 'r') as f: spectrum_1665 = pickle.load(f) with open('pickles/' + source_name", "+ '.pickle', 'r') as f: spectrum_1612 = pickle.load(f) with open('pickles/' + source_name +", "given. ''' dv = vel_axis[1] - vel_axis[0] v_at_0 = vel_axis[0] min_v_index = int(min((min_vel", "pickle.load(f) except IOError: with open('pickles/' + source_name + '_1612_' + em + '.pickle',", "+ '_1612_vel.pickle', 'r') as f: vel_axis_1720 = pickle.load(f) with open('pickles/rms_' + source_name +", "= source_name + '_' + em # Loading main line data with open('pickles/'", "- v_at_0) / dv)) if min_v_index < 0: min_v_index = 0 return (min_v_index,", "'r') as f: spectrum_1665 = pickle.load(f) with open('pickles/' + source_name + '_1665_vel.pickle', 'r')", "(-52, 10)} for source_name in ['4C+25.14', 'ch002']: for em in ['absorption', 'emission']: save_as_name", "'r') as f: spectrum_1612 = pickle.load(f) with open('pickles/' + source_name + '_1720_vel.pickle', 'r')", "+ '.pickle', 'r') as f: rms_1667 = pickle.load(f) # Loading satellite lines. Current", "= pickle.load(f) print '1720 replaced by 1612 for ' + save_as_name # Trim", "+ source_name + '_1612_' + em + '.pickle', 'r') as f: spectrum_1720 =", "source_name + '_1720_vel.pickle', 'r') as f: vel_axis_1612 = pickle.load(f) with open('pickles/rms_' + source_name", "+ source_name + '_1665_vel.pickle', 'r') as f: vel_axis_1665 = pickle.load(f) with open('pickles/rms_' +", "open('pickles/' + source_name + '_1667_vel.pickle', 'r') as f: vel_axis_1667 = pickle.load(f) with open('pickles/rms_'", "duplicate it for the other one. try: with open('pickles/' + source_name + '_1612_'", "+ '.pickle', 'r') as f: rms_1665 = pickle.load(f) with open('pickles/' + source_name +", "pickle.load(f) print '1720 replaced by 1612 for ' + save_as_name # Trim spectra", "= [vel_axis_1612[min_index_1612:max_index_1612], vel_axis_1665[min_index_1665:max_index_1665], vel_axis_1667[min_index_1667:max_index_1667], vel_axis_1720[min_index_1720:max_index_1720]] spectra = [spectrum_1612[min_index_1612:max_index_1612], spectrum_1665[min_index_1665:max_index_1665], spectrum_1667[min_index_1667:max_index_1667], spectrum_1720[min_index_1720:max_index_1720]] rms =", "open('pickles/' + source_name + '_1612_' + em + '.pickle', 'r') as f: spectrum_1720", "= pickle.load(f) with open('pickles/rms_' + source_name + '_1720_' + em + '.pickle', 'r')", "pickle.load(f) with open('pickles/' + source_name + '_1612_vel.pickle', 'r') as f: vel_axis_1612 = pickle.load(f)", "source_name + '_' + em # Loading main line data with open('pickles/' +", "pickle.load(f) with open('pickles/' + source_name + '_1667_' + em + '.pickle', 'r') as", "'r') as f: spectrum_1667 = pickle.load(f) with open('pickles/' + source_name + '_1667_vel.pickle', 'r')", "= FindVelIndex(vel_range[source_name][0], vel_range[source_name][1], vel_axis_1667) (min_index_1720, max_index_1720) = FindVelIndex(vel_range[source_name][0], vel_range[source_name][1], vel_axis_1720) vel_axes = [vel_axis_1612[min_index_1612:max_index_1612],", "replaced by 1720 for ' + save_as_name try: with open('pickles/' + source_name +", "source_name + '_1665_' + em + '.pickle', 'r') as f: rms_1665 = pickle.load(f)", "range. (min_index_1612, max_index_1612) = FindVelIndex(vel_range[source_name][0], vel_range[source_name][1], vel_axis_1612) (min_index_1665, max_index_1665) = FindVelIndex(vel_range[source_name][0], vel_range[source_name][1], vel_axis_1665)", "+ '.pickle', 'r') as f: spectrum_1667 = pickle.load(f) with open('pickles/' + source_name +", "min and max velocities given. ''' dv = vel_axis[1] - vel_axis[0] v_at_0 =", "+ source_name + '_1667_vel.pickle', 'r') as f: vel_axis_1667 = pickle.load(f) with open('pickles/rms_' +", "vel_axis_1612 = pickle.load(f) with open('pickles/rms_' + source_name + '_1612_' + em + '.pickle',", "'_1612_' + em + '.pickle', 'r') as f: rms_1612 = pickle.load(f) except IOError:", "open('pickles/rms_' + source_name + '_1720_' + em + '.pickle', 'r') as f: rms_1720", "10)} for source_name in ['4C+25.14', 'ch002']: for em in ['absorption', 'emission']: save_as_name =", "vel_axis[0] v_at_0 = vel_axis[0] min_v_index = int(min((min_vel - v_at_0) / dv, (max_vel -", "/ dv)) if min_v_index < 0: min_v_index = 0 return (min_v_index, max_v_index) vel_range", "em in ['absorption', 'emission']: save_as_name = source_name + '_' + em # Loading", "vel_axis_1720) vel_axes = [vel_axis_1612[min_index_1612:max_index_1612], vel_axis_1665[min_index_1665:max_index_1665], vel_axis_1667[min_index_1667:max_index_1667], vel_axis_1720[min_index_1720:max_index_1720]] spectra = [spectrum_1612[min_index_1612:max_index_1612], spectrum_1665[min_index_1665:max_index_1665], spectrum_1667[min_index_1667:max_index_1667], spectrum_1720[min_index_1720:max_index_1720]]", "- v_at_0) / dv, (max_vel - v_at_0) / dv)) if min_v_index < 0:", "f: spectrum_1667 = pickle.load(f) with open('pickles/' + source_name + '_1667_vel.pickle', 'r') as f:", "+ source_name + '_1665_' + em + '.pickle', 'r') as f: rms_1665 =", "+ em # Loading main line data with open('pickles/' + source_name + '_1665_'", "open('pickles/' + source_name + '_1665_' + em + '.pickle', 'r') as f: spectrum_1665", "vel_axis_1665 = pickle.load(f) with open('pickles/rms_' + source_name + '_1665_' + em + '.pickle',", "'.pickle', 'r') as f: rms_1665 = pickle.load(f) with open('pickles/' + source_name + '_1667_'", "vel_axis_1667[min_index_1667:max_index_1667], vel_axis_1720[min_index_1720:max_index_1720]] spectra = [spectrum_1612[min_index_1612:max_index_1612], spectrum_1665[min_index_1665:max_index_1665], spectrum_1667[min_index_1667:max_index_1667], spectrum_1720[min_index_1720:max_index_1720]] rms = [rms_1612, rms_1665, rms_1667,", "corresponding to the min and max velocities given. ''' dv = vel_axis[1] -", "(-85, 75), 'ch002': (-52, 10)} for source_name in ['4C+25.14', 'ch002']: for em in", "as f: rms_1612 = pickle.load(f) print '1612 replaced by 1720 for ' +", "source_name + '_1667_' + em + '.pickle', 'r') as f: spectrum_1667 = pickle.load(f)", "only # one is present just duplicate it for the other one. try:", "pickle.load(f) with open('pickles/' + source_name + '_1720_vel.pickle', 'r') as f: vel_axis_1612 = pickle.load(f)", "# one is present just duplicate it for the other one. try: with", "+ em + '.pickle', 'r') as f: spectrum_1665 = pickle.load(f) with open('pickles/' +", "pickle.load(f) with open('pickles/' + source_name + '_1667_vel.pickle', 'r') as f: vel_axis_1667 = pickle.load(f)", "print '1612 replaced by 1720 for ' + save_as_name try: with open('pickles/' +", "+ source_name + '_1720_' + em + '.pickle', 'r') as f: spectrum_1612 =", "+ save_as_name # Trim spectra so all 4 cover the same velocity range.", "pickle.load(f) with open('pickles/rms_' + source_name + '_1665_' + em + '.pickle', 'r') as", "= pickle.load(f) with open('pickles/' + source_name + '_1612_vel.pickle', 'r') as f: vel_axis_1720 =", "1. # Run BGD final_parameters = BGD.Main(source_name, vel_axes, spectra, rms, expected_min_fwhm,save_as_name) # Print", "in ['4C+25.14', 'ch002']: for em in ['absorption', 'emission']: save_as_name = source_name + '_'", "f: spectrum_1720 = pickle.load(f) with open('pickles/' + source_name + '_1720_vel.pickle', 'r') as f:", "if min_v_index < 0: min_v_index = 0 return (min_v_index, max_v_index) vel_range = {'4C+25.14':", "'r') as f: spectrum_1612 = pickle.load(f) with open('pickles/' + source_name + '_1612_vel.pickle', 'r')", "'_1720_' + em + '.pickle', 'r') as f: spectrum_1612 = pickle.load(f) with open('pickles/'", "'.pickle', 'r') as f: rms_1720 = pickle.load(f) print '1720 replaced by 1612 for", "indices corresponding to the min and max velocities given. ''' dv = vel_axis[1]", "+ '_1612_vel.pickle', 'r') as f: vel_axis_1612 = pickle.load(f) with open('pickles/rms_' + source_name +", "rms_1665, rms_1667, rms_1720] expected_min_fwhm = 1. # Run BGD final_parameters = BGD.Main(source_name, vel_axes,", "satellite lines, so if only # one is present just duplicate it for", "line data with open('pickles/' + source_name + '_1665_' + em + '.pickle', 'r')", "(min_index_1612, max_index_1612) = FindVelIndex(vel_range[source_name][0], vel_range[source_name][1], vel_axis_1612) (min_index_1665, max_index_1665) = FindVelIndex(vel_range[source_name][0], vel_range[source_name][1], vel_axis_1665) (min_index_1667,", "+ '_1720_' + em + '.pickle', 'r') as f: rms_1720 = pickle.load(f) except", "f: vel_axis_1665 = pickle.load(f) with open('pickles/rms_' + source_name + '_1665_' + em +", "max_index_1612) = FindVelIndex(vel_range[source_name][0], vel_range[source_name][1], vel_axis_1612) (min_index_1665, max_index_1665) = FindVelIndex(vel_range[source_name][0], vel_range[source_name][1], vel_axis_1665) (min_index_1667, max_index_1667)", "v_at_0) / dv, (max_vel - v_at_0) / dv)) max_v_index = int(max((min_vel - v_at_0)", "[spectrum_1612[min_index_1612:max_index_1612], spectrum_1665[min_index_1665:max_index_1665], spectrum_1667[min_index_1667:max_index_1667], spectrum_1720[min_index_1720:max_index_1720]] rms = [rms_1612, rms_1665, rms_1667, rms_1720] expected_min_fwhm = 1.", "max_vel, vel_axis): ''' Finds the min and max indices corresponding to the min", "em + '.pickle', 'r') as f: spectrum_1612 = pickle.load(f) with open('pickles/' + source_name", "(min_index_1667, max_index_1667) = FindVelIndex(vel_range[source_name][0], vel_range[source_name][1], vel_axis_1667) (min_index_1720, max_index_1720) = FindVelIndex(vel_range[source_name][0], vel_range[source_name][1], vel_axis_1720) vel_axes", "(https://github.com/AnitaPetzler/BayesGauss/) ''' def FindVelIndex(min_vel, max_vel, vel_axis): ''' Finds the min and max indices", "em + '.pickle', 'r') as f: rms_1665 = pickle.load(f) with open('pickles/' + source_name", "'_1667_' + em + '.pickle', 'r') as f: spectrum_1667 = pickle.load(f) with open('pickles/'", "= 0 return (min_v_index, max_v_index) vel_range = {'4C+25.14': (-85, 75), 'ch002': (-52, 10)}", "= pickle.load(f) with open('pickles/' + source_name + '_1720_vel.pickle', 'r') as f: vel_axis_1612 =", "vel_range[source_name][1], vel_axis_1667) (min_index_1720, max_index_1720) = FindVelIndex(vel_range[source_name][0], vel_range[source_name][1], vel_axis_1720) vel_axes = [vel_axis_1612[min_index_1612:max_index_1612], vel_axis_1665[min_index_1665:max_index_1665], vel_axis_1667[min_index_1667:max_index_1667],", "spectrum_1612 = pickle.load(f) with open('pickles/' + source_name + '_1612_vel.pickle', 'r') as f: vel_axis_1612", "+ source_name + '_1720_vel.pickle', 'r') as f: vel_axis_1612 = pickle.load(f) with open('pickles/rms_' +", "open('pickles/' + source_name + '_1612_' + em + '.pickle', 'r') as f: spectrum_1612", "open('pickles/' + source_name + '_1665_vel.pickle', 'r') as f: vel_axis_1665 = pickle.load(f) with open('pickles/rms_'", "try: with open('pickles/' + source_name + '_1720_' + em + '.pickle', 'r') as", "(max_vel - v_at_0) / dv)) max_v_index = int(max((min_vel - v_at_0) / dv, (max_vel", "+ source_name + '_1612_' + em + '.pickle', 'r') as f: rms_1720 =", "+ '_1667_' + em + '.pickle', 'r') as f: spectrum_1667 = pickle.load(f) with", "max_index_1667) = FindVelIndex(vel_range[source_name][0], vel_range[source_name][1], vel_axis_1667) (min_index_1720, max_index_1720) = FindVelIndex(vel_range[source_name][0], vel_range[source_name][1], vel_axis_1720) vel_axes =", "so all 4 cover the same velocity range. (min_index_1612, max_index_1612) = FindVelIndex(vel_range[source_name][0], vel_range[source_name][1],", "Sample code showing implementation of the Bayesian Gaussian Decomposition algorithm BGD (https://github.com/AnitaPetzler/BayesGauss/) '''", "+ source_name + '_1612_' + em + '.pickle', 'r') as f: spectrum_1612 =", "dv)) if min_v_index < 0: min_v_index = 0 return (min_v_index, max_v_index) vel_range =", "by 1612 for ' + save_as_name # Trim spectra so all 4 cover", "with open('pickles/' + source_name + '_1720_' + em + '.pickle', 'r') as f:", "just duplicate it for the other one. try: with open('pickles/' + source_name +", "'r') as f: rms_1720 = pickle.load(f) print '1720 replaced by 1612 for '", "'_1665_' + em + '.pickle', 'r') as f: rms_1665 = pickle.load(f) with open('pickles/'", "= pickle.load(f) except IOError: with open('pickles/' + source_name + '_1612_' + em +", "vel_axis_1720 = pickle.load(f) with open('pickles/rms_' + source_name + '_1612_' + em + '.pickle',", "['absorption', 'emission']: save_as_name = source_name + '_' + em # Loading main line", "'emission']: save_as_name = source_name + '_' + em # Loading main line data", "vel_axis[1] - vel_axis[0] v_at_0 = vel_axis[0] min_v_index = int(min((min_vel - v_at_0) / dv,", "with open('pickles/' + source_name + '_1667_vel.pickle', 'r') as f: vel_axis_1667 = pickle.load(f) with", "Current version of BGD requires both satellite lines, so if only # one", "both satellite lines, so if only # one is present just duplicate it", "one. try: with open('pickles/' + source_name + '_1612_' + em + '.pickle', 'r')", "f: spectrum_1720 = pickle.load(f) with open('pickles/' + source_name + '_1612_vel.pickle', 'r') as f:", "'r') as f: rms_1612 = pickle.load(f) except IOError: with open('pickles/' + source_name +", "(min_index_1720, max_index_1720) = FindVelIndex(vel_range[source_name][0], vel_range[source_name][1], vel_axis_1720) vel_axes = [vel_axis_1612[min_index_1612:max_index_1612], vel_axis_1665[min_index_1665:max_index_1665], vel_axis_1667[min_index_1667:max_index_1667], vel_axis_1720[min_index_1720:max_index_1720]] spectra", "if only # one is present just duplicate it for the other one.", "in ['absorption', 'emission']: save_as_name = source_name + '_' + em # Loading main", "+ '_1665_vel.pickle', 'r') as f: vel_axis_1665 = pickle.load(f) with open('pickles/rms_' + source_name +", "= [rms_1612, rms_1665, rms_1667, rms_1720] expected_min_fwhm = 1. # Run BGD final_parameters =", "source_name + '_1665_vel.pickle', 'r') as f: vel_axis_1665 = pickle.load(f) with open('pickles/rms_' + source_name", "4 cover the same velocity range. (min_index_1612, max_index_1612) = FindVelIndex(vel_range[source_name][0], vel_range[source_name][1], vel_axis_1612) (min_index_1665,", "open('pickles/' + source_name + '_1720_vel.pickle', 'r') as f: vel_axis_1612 = pickle.load(f) with open('pickles/rms_'", "+ '_1665_' + em + '.pickle', 'r') as f: rms_1665 = pickle.load(f) with", "+ '.pickle', 'r') as f: spectrum_1665 = pickle.load(f) with open('pickles/' + source_name +", "with open('pickles/rms_' + source_name + '_1665_' + em + '.pickle', 'r') as f:", "pickle ''' Sample code showing implementation of the Bayesian Gaussian Decomposition algorithm BGD", "'.pickle', 'r') as f: rms_1720 = pickle.load(f) except IOError: with open('pickles/' + source_name", "+ '_1665_' + em + '.pickle', 'r') as f: spectrum_1665 = pickle.load(f) with", "f: rms_1667 = pickle.load(f) # Loading satellite lines. Current version of BGD requires", "rms_1612 = pickle.load(f) except IOError: with open('pickles/' + source_name + '_1720_' + em", "'r') as f: vel_axis_1720 = pickle.load(f) with open('pickles/rms_' + source_name + '_1612_' +", "'r') as f: vel_axis_1612 = pickle.load(f) with open('pickles/rms_' + source_name + '_1612_' +", "as f: rms_1720 = pickle.load(f) print '1720 replaced by 1612 for ' +", "'1612 replaced by 1720 for ' + save_as_name try: with open('pickles/' + source_name", "(min_v_index, max_v_index) vel_range = {'4C+25.14': (-85, 75), 'ch002': (-52, 10)} for source_name in", "vel_axis_1667) (min_index_1720, max_index_1720) = FindVelIndex(vel_range[source_name][0], vel_range[source_name][1], vel_axis_1720) vel_axes = [vel_axis_1612[min_index_1612:max_index_1612], vel_axis_1665[min_index_1665:max_index_1665], vel_axis_1667[min_index_1667:max_index_1667], vel_axis_1720[min_index_1720:max_index_1720]]", "int(max((min_vel - v_at_0) / dv, (max_vel - v_at_0) / dv)) if min_v_index <", "min_v_index = 0 return (min_v_index, max_v_index) vel_range = {'4C+25.14': (-85, 75), 'ch002': (-52,", "'.pickle', 'r') as f: spectrum_1667 = pickle.load(f) with open('pickles/' + source_name + '_1667_vel.pickle',", "+ em + '.pickle', 'r') as f: rms_1667 = pickle.load(f) # Loading satellite", "'_1612_' + em + '.pickle', 'r') as f: rms_1720 = pickle.load(f) print '1720", "BGD import pickle ''' Sample code showing implementation of the Bayesian Gaussian Decomposition", "'_1612_' + em + '.pickle', 'r') as f: spectrum_1720 = pickle.load(f) with open('pickles/'", "'_1665_vel.pickle', 'r') as f: vel_axis_1665 = pickle.load(f) with open('pickles/rms_' + source_name + '_1665_'", "'r') as f: vel_axis_1667 = pickle.load(f) with open('pickles/rms_' + source_name + '_1667_' +", "with open('pickles/' + source_name + '_1612_' + em + '.pickle', 'r') as f:", "''' Sample code showing implementation of the Bayesian Gaussian Decomposition algorithm BGD (https://github.com/AnitaPetzler/BayesGauss/)", "of BGD requires both satellite lines, so if only # one is present", "1720 for ' + save_as_name try: with open('pickles/' + source_name + '_1720_' +", "for ' + save_as_name try: with open('pickles/' + source_name + '_1720_' + em", "+ source_name + '_1612_vel.pickle', 'r') as f: vel_axis_1612 = pickle.load(f) with open('pickles/rms_' +", "showing implementation of the Bayesian Gaussian Decomposition algorithm BGD (https://github.com/AnitaPetzler/BayesGauss/) ''' def FindVelIndex(min_vel,", "max_index_1665) = FindVelIndex(vel_range[source_name][0], vel_range[source_name][1], vel_axis_1665) (min_index_1667, max_index_1667) = FindVelIndex(vel_range[source_name][0], vel_range[source_name][1], vel_axis_1667) (min_index_1720, max_index_1720)", "em + '.pickle', 'r') as f: spectrum_1667 = pickle.load(f) with open('pickles/' + source_name", "velocities given. ''' dv = vel_axis[1] - vel_axis[0] v_at_0 = vel_axis[0] min_v_index =", "rms_1667, rms_1720] expected_min_fwhm = 1. # Run BGD final_parameters = BGD.Main(source_name, vel_axes, spectra,", "''' dv = vel_axis[1] - vel_axis[0] v_at_0 = vel_axis[0] min_v_index = int(min((min_vel -", "pickle.load(f) print '1612 replaced by 1720 for ' + save_as_name try: with open('pickles/'", "em + '.pickle', 'r') as f: rms_1720 = pickle.load(f) print '1720 replaced by", "+ source_name + '_1612_' + em + '.pickle', 'r') as f: rms_1612 =", "= BGD.Main(source_name, vel_axes, spectra, rms, expected_min_fwhm,save_as_name) # Print results to terminal BGD.ResultsReport(final_parameters, save_as_name)", "' + save_as_name # Trim spectra so all 4 cover the same velocity", "source_name + '_1612_' + em + '.pickle', 'r') as f: rms_1612 = pickle.load(f)", "the Bayesian Gaussian Decomposition algorithm BGD (https://github.com/AnitaPetzler/BayesGauss/) ''' def FindVelIndex(min_vel, max_vel, vel_axis): '''", "max_v_index) vel_range = {'4C+25.14': (-85, 75), 'ch002': (-52, 10)} for source_name in ['4C+25.14',", "BGD final_parameters = BGD.Main(source_name, vel_axes, spectra, rms, expected_min_fwhm,save_as_name) # Print results to terminal", "+ '_1612_' + em + '.pickle', 'r') as f: spectrum_1720 = pickle.load(f) with", "= int(max((min_vel - v_at_0) / dv, (max_vel - v_at_0) / dv)) if min_v_index", "'r') as f: spectrum_1720 = pickle.load(f) with open('pickles/' + source_name + '_1720_vel.pickle', 'r')", "- v_at_0) / dv)) max_v_index = int(max((min_vel - v_at_0) / dv, (max_vel -", "+ '.pickle', 'r') as f: rms_1720 = pickle.load(f) print '1720 replaced by 1612", "= pickle.load(f) with open('pickles/' + source_name + '_1667_vel.pickle', 'r') as f: vel_axis_1667 =", "f: vel_axis_1612 = pickle.load(f) with open('pickles/rms_' + source_name + '_1720_' + em +", "+ '_1667_' + em + '.pickle', 'r') as f: rms_1667 = pickle.load(f) #", "Loading main line data with open('pickles/' + source_name + '_1665_' + em +", "'_1612_vel.pickle', 'r') as f: vel_axis_1612 = pickle.load(f) with open('pickles/rms_' + source_name + '_1612_'", "FindVelIndex(vel_range[source_name][0], vel_range[source_name][1], vel_axis_1665) (min_index_1667, max_index_1667) = FindVelIndex(vel_range[source_name][0], vel_range[source_name][1], vel_axis_1667) (min_index_1720, max_index_1720) = FindVelIndex(vel_range[source_name][0],", "open('pickles/rms_' + source_name + '_1612_' + em + '.pickle', 'r') as f: rms_1720", "for the other one. try: with open('pickles/' + source_name + '_1612_' + em", "as f: spectrum_1720 = pickle.load(f) with open('pickles/' + source_name + '_1720_vel.pickle', 'r') as", "as f: spectrum_1720 = pickle.load(f) with open('pickles/' + source_name + '_1612_vel.pickle', 'r') as", "open('pickles/rms_' + source_name + '_1667_' + em + '.pickle', 'r') as f: rms_1667", "= pickle.load(f) with open('pickles/rms_' + source_name + '_1612_' + em + '.pickle', 'r')", "+ em + '.pickle', 'r') as f: rms_1720 = pickle.load(f) except IOError: with", "em + '.pickle', 'r') as f: spectrum_1720 = pickle.load(f) with open('pickles/' + source_name", "v_at_0) / dv)) max_v_index = int(max((min_vel - v_at_0) / dv, (max_vel - v_at_0)", "open('pickles/rms_' + source_name + '_1720_' + em + '.pickle', 'r') as f: rms_1612", "vel_axis_1665) (min_index_1667, max_index_1667) = FindVelIndex(vel_range[source_name][0], vel_range[source_name][1], vel_axis_1667) (min_index_1720, max_index_1720) = FindVelIndex(vel_range[source_name][0], vel_range[source_name][1], vel_axis_1720)", "as f: vel_axis_1720 = pickle.load(f) with open('pickles/rms_' + source_name + '_1720_' + em", "open('pickles/' + source_name + '_1720_' + em + '.pickle', 'r') as f: spectrum_1720", "f: rms_1665 = pickle.load(f) with open('pickles/' + source_name + '_1667_' + em +", "same velocity range. (min_index_1612, max_index_1612) = FindVelIndex(vel_range[source_name][0], vel_range[source_name][1], vel_axis_1612) (min_index_1665, max_index_1665) = FindVelIndex(vel_range[source_name][0],", "requires both satellite lines, so if only # one is present just duplicate", "Trim spectra so all 4 cover the same velocity range. (min_index_1612, max_index_1612) =", "dv, (max_vel - v_at_0) / dv)) if min_v_index < 0: min_v_index = 0", "as f: spectrum_1667 = pickle.load(f) with open('pickles/' + source_name + '_1667_vel.pickle', 'r') as", "main line data with open('pickles/' + source_name + '_1665_' + em + '.pickle',", "algorithm BGD (https://github.com/AnitaPetzler/BayesGauss/) ''' def FindVelIndex(min_vel, max_vel, vel_axis): ''' Finds the min and", "= pickle.load(f) with open('pickles/' + source_name + '_1667_' + em + '.pickle', 'r')", "source_name + '_1667_vel.pickle', 'r') as f: vel_axis_1667 = pickle.load(f) with open('pickles/rms_' + source_name", "satellite lines. Current version of BGD requires both satellite lines, so if only", "of the Bayesian Gaussian Decomposition algorithm BGD (https://github.com/AnitaPetzler/BayesGauss/) ''' def FindVelIndex(min_vel, max_vel, vel_axis):", "# Trim spectra so all 4 cover the same velocity range. (min_index_1612, max_index_1612)", "+ '_1720_vel.pickle', 'r') as f: vel_axis_1612 = pickle.load(f) with open('pickles/rms_' + source_name +", "+ em + '.pickle', 'r') as f: spectrum_1720 = pickle.load(f) with open('pickles/' +", "= {'4C+25.14': (-85, 75), 'ch002': (-52, 10)} for source_name in ['4C+25.14', 'ch002']: for", "as f: vel_axis_1720 = pickle.load(f) with open('pickles/rms_' + source_name + '_1612_' + em", "vel_axes = [vel_axis_1612[min_index_1612:max_index_1612], vel_axis_1665[min_index_1665:max_index_1665], vel_axis_1667[min_index_1667:max_index_1667], vel_axis_1720[min_index_1720:max_index_1720]] spectra = [spectrum_1612[min_index_1612:max_index_1612], spectrum_1665[min_index_1665:max_index_1665], spectrum_1667[min_index_1667:max_index_1667], spectrum_1720[min_index_1720:max_index_1720]] rms", "+ '_' + em # Loading main line data with open('pickles/' + source_name", "as f: vel_axis_1667 = pickle.load(f) with open('pickles/rms_' + source_name + '_1667_' + em", "'_1720_' + em + '.pickle', 'r') as f: spectrum_1720 = pickle.load(f) with open('pickles/'", "the same velocity range. (min_index_1612, max_index_1612) = FindVelIndex(vel_range[source_name][0], vel_range[source_name][1], vel_axis_1612) (min_index_1665, max_index_1665) =", "+ source_name + '_1720_' + em + '.pickle', 'r') as f: rms_1720 =", "source_name + '_1612_' + em + '.pickle', 'r') as f: rms_1720 = pickle.load(f)", "spectrum_1612 = pickle.load(f) with open('pickles/' + source_name + '_1720_vel.pickle', 'r') as f: vel_axis_1612", "open('pickles/' + source_name + '_1667_' + em + '.pickle', 'r') as f: spectrum_1667", "v_at_0 = vel_axis[0] min_v_index = int(min((min_vel - v_at_0) / dv, (max_vel - v_at_0)", "vel_range[source_name][1], vel_axis_1720) vel_axes = [vel_axis_1612[min_index_1612:max_index_1612], vel_axis_1665[min_index_1665:max_index_1665], vel_axis_1667[min_index_1667:max_index_1667], vel_axis_1720[min_index_1720:max_index_1720]] spectra = [spectrum_1612[min_index_1612:max_index_1612], spectrum_1665[min_index_1665:max_index_1665], spectrum_1667[min_index_1667:max_index_1667],", "min and max indices corresponding to the min and max velocities given. '''", "f: vel_axis_1612 = pickle.load(f) with open('pickles/rms_' + source_name + '_1612_' + em +", "= pickle.load(f) with open('pickles/' + source_name + '_1720_vel.pickle', 'r') as f: vel_axis_1720 =", "spectra = [spectrum_1612[min_index_1612:max_index_1612], spectrum_1665[min_index_1665:max_index_1665], spectrum_1667[min_index_1667:max_index_1667], spectrum_1720[min_index_1720:max_index_1720]] rms = [rms_1612, rms_1665, rms_1667, rms_1720] expected_min_fwhm", "'r') as f: rms_1612 = pickle.load(f) print '1612 replaced by 1720 for '", "vel_axis_1667 = pickle.load(f) with open('pickles/rms_' + source_name + '_1667_' + em + '.pickle',", "dv, (max_vel - v_at_0) / dv)) max_v_index = int(max((min_vel - v_at_0) / dv,", "+ source_name + '_1665_' + em + '.pickle', 'r') as f: spectrum_1665 =", "'.pickle', 'r') as f: spectrum_1612 = pickle.load(f) with open('pickles/' + source_name + '_1612_vel.pickle',", "as f: rms_1720 = pickle.load(f) except IOError: with open('pickles/' + source_name + '_1612_'", "for em in ['absorption', 'emission']: save_as_name = source_name + '_' + em #", "source_name + '_1720_' + em + '.pickle', 'r') as f: rms_1720 = pickle.load(f)", "1612 for ' + save_as_name # Trim spectra so all 4 cover the", "= vel_axis[1] - vel_axis[0] v_at_0 = vel_axis[0] min_v_index = int(min((min_vel - v_at_0) /", "+ source_name + '_1720_' + em + '.pickle', 'r') as f: spectrum_1720 =", "v_at_0) / dv, (max_vel - v_at_0) / dv)) if min_v_index < 0: min_v_index", "+ em + '.pickle', 'r') as f: spectrum_1667 = pickle.load(f) with open('pickles/' +", "'ch002']: for em in ['absorption', 'emission']: save_as_name = source_name + '_' + em", "cover the same velocity range. (min_index_1612, max_index_1612) = FindVelIndex(vel_range[source_name][0], vel_range[source_name][1], vel_axis_1612) (min_index_1665, max_index_1665)", "vel_range[source_name][1], vel_axis_1665) (min_index_1667, max_index_1667) = FindVelIndex(vel_range[source_name][0], vel_range[source_name][1], vel_axis_1667) (min_index_1720, max_index_1720) = FindVelIndex(vel_range[source_name][0], vel_range[source_name][1],", "FindVelIndex(vel_range[source_name][0], vel_range[source_name][1], vel_axis_1720) vel_axes = [vel_axis_1612[min_index_1612:max_index_1612], vel_axis_1665[min_index_1665:max_index_1665], vel_axis_1667[min_index_1667:max_index_1667], vel_axis_1720[min_index_1720:max_index_1720]] spectra = [spectrum_1612[min_index_1612:max_index_1612], spectrum_1665[min_index_1665:max_index_1665],", "/ dv, (max_vel - v_at_0) / dv)) max_v_index = int(max((min_vel - v_at_0) /", "vel_range = {'4C+25.14': (-85, 75), 'ch002': (-52, 10)} for source_name in ['4C+25.14', 'ch002']:", "+ '.pickle', 'r') as f: rms_1720 = pickle.load(f) except IOError: with open('pickles/' +", "save_as_name try: with open('pickles/' + source_name + '_1720_' + em + '.pickle', 'r')", "source_name + '_1720_vel.pickle', 'r') as f: vel_axis_1720 = pickle.load(f) with open('pickles/rms_' + source_name", "pickle.load(f) # Loading satellite lines. Current version of BGD requires both satellite lines,", "def FindVelIndex(min_vel, max_vel, vel_axis): ''' Finds the min and max indices corresponding to", "for source_name in ['4C+25.14', 'ch002']: for em in ['absorption', 'emission']: save_as_name = source_name", "version of BGD requires both satellite lines, so if only # one is", "'.pickle', 'r') as f: spectrum_1720 = pickle.load(f) with open('pickles/' + source_name + '_1720_vel.pickle',", "except IOError: with open('pickles/' + source_name + '_1720_' + em + '.pickle', 'r')", "spectra so all 4 cover the same velocity range. (min_index_1612, max_index_1612) = FindVelIndex(vel_range[source_name][0],", "= pickle.load(f) # Loading satellite lines. Current version of BGD requires both satellite", "pickle.load(f) with open('pickles/' + source_name + '_1612_vel.pickle', 'r') as f: vel_axis_1720 = pickle.load(f)", "with open('pickles/rms_' + source_name + '_1720_' + em + '.pickle', 'r') as f:", "'.pickle', 'r') as f: spectrum_1720 = pickle.load(f) with open('pickles/' + source_name + '_1612_vel.pickle',", "# Loading satellite lines. Current version of BGD requires both satellite lines, so", "+ source_name + '_1667_' + em + '.pickle', 'r') as f: spectrum_1667 =", "BGD (https://github.com/AnitaPetzler/BayesGauss/) ''' def FindVelIndex(min_vel, max_vel, vel_axis): ''' Finds the min and max", "is present just duplicate it for the other one. try: with open('pickles/' +", "pickle.load(f) with open('pickles/rms_' + source_name + '_1612_' + em + '.pickle', 'r') as", "= pickle.load(f) except IOError: with open('pickles/' + source_name + '_1720_' + em +", "0 return (min_v_index, max_v_index) vel_range = {'4C+25.14': (-85, 75), 'ch002': (-52, 10)} for", "+ '_1720_vel.pickle', 'r') as f: vel_axis_1720 = pickle.load(f) with open('pickles/rms_' + source_name +", "[rms_1612, rms_1665, rms_1667, rms_1720] expected_min_fwhm = 1. # Run BGD final_parameters = BGD.Main(source_name,", "as f: spectrum_1612 = pickle.load(f) with open('pickles/' + source_name + '_1612_vel.pickle', 'r') as", "the other one. try: with open('pickles/' + source_name + '_1612_' + em +", "vel_axis): ''' Finds the min and max indices corresponding to the min and", "BGD requires both satellite lines, so if only # one is present just", "rms = [rms_1612, rms_1665, rms_1667, rms_1720] expected_min_fwhm = 1. # Run BGD final_parameters", "as f: spectrum_1612 = pickle.load(f) with open('pickles/' + source_name + '_1720_vel.pickle', 'r') as", "= FindVelIndex(vel_range[source_name][0], vel_range[source_name][1], vel_axis_1720) vel_axes = [vel_axis_1612[min_index_1612:max_index_1612], vel_axis_1665[min_index_1665:max_index_1665], vel_axis_1667[min_index_1667:max_index_1667], vel_axis_1720[min_index_1720:max_index_1720]] spectra = [spectrum_1612[min_index_1612:max_index_1612],", "int(min((min_vel - v_at_0) / dv, (max_vel - v_at_0) / dv)) max_v_index = int(max((min_vel", "+ '.pickle', 'r') as f: rms_1612 = pickle.load(f) print '1612 replaced by 1720", "em + '.pickle', 'r') as f: rms_1612 = pickle.load(f) print '1612 replaced by", "max velocities given. ''' dv = vel_axis[1] - vel_axis[0] v_at_0 = vel_axis[0] min_v_index", "# Loading main line data with open('pickles/' + source_name + '_1665_' + em", "= [spectrum_1612[min_index_1612:max_index_1612], spectrum_1665[min_index_1665:max_index_1665], spectrum_1667[min_index_1667:max_index_1667], spectrum_1720[min_index_1720:max_index_1720]] rms = [rms_1612, rms_1665, rms_1667, rms_1720] expected_min_fwhm =", "f: vel_axis_1720 = pickle.load(f) with open('pickles/rms_' + source_name + '_1720_' + em +", "= FindVelIndex(vel_range[source_name][0], vel_range[source_name][1], vel_axis_1612) (min_index_1665, max_index_1665) = FindVelIndex(vel_range[source_name][0], vel_range[source_name][1], vel_axis_1665) (min_index_1667, max_index_1667) =", "source_name + '_1720_' + em + '.pickle', 'r') as f: spectrum_1720 = pickle.load(f)", "source_name in ['4C+25.14', 'ch002']: for em in ['absorption', 'emission']: save_as_name = source_name +", "'_1720_vel.pickle', 'r') as f: vel_axis_1612 = pickle.load(f) with open('pickles/rms_' + source_name + '_1720_'", "Finds the min and max indices corresponding to the min and max velocities", "'.pickle', 'r') as f: rms_1667 = pickle.load(f) # Loading satellite lines. Current version", "f: rms_1720 = pickle.load(f) except IOError: with open('pickles/' + source_name + '_1612_' +", "'_1612_vel.pickle', 'r') as f: vel_axis_1720 = pickle.load(f) with open('pickles/rms_' + source_name + '_1612_'", "except IOError: with open('pickles/' + source_name + '_1612_' + em + '.pickle', 'r')", "import pickle ''' Sample code showing implementation of the Bayesian Gaussian Decomposition algorithm", "+ save_as_name try: with open('pickles/' + source_name + '_1720_' + em + '.pickle',", "min_v_index = int(min((min_vel - v_at_0) / dv, (max_vel - v_at_0) / dv)) max_v_index", "+ '_1667_vel.pickle', 'r') as f: vel_axis_1667 = pickle.load(f) with open('pickles/rms_' + source_name +", "lines. Current version of BGD requires both satellite lines, so if only #", "Run BGD final_parameters = BGD.Main(source_name, vel_axes, spectra, rms, expected_min_fwhm,save_as_name) # Print results to", "+ source_name + '_1720_vel.pickle', 'r') as f: vel_axis_1720 = pickle.load(f) with open('pickles/rms_' +", "+ '.pickle', 'r') as f: spectrum_1720 = pickle.load(f) with open('pickles/' + source_name +", "rms_1612 = pickle.load(f) print '1612 replaced by 1720 for ' + save_as_name try:", "to the min and max velocities given. ''' dv = vel_axis[1] - vel_axis[0]", "'_1612_' + em + '.pickle', 'r') as f: spectrum_1612 = pickle.load(f) with open('pickles/'", "f: rms_1612 = pickle.load(f) print '1612 replaced by 1720 for ' + save_as_name", "'ch002': (-52, 10)} for source_name in ['4C+25.14', 'ch002']: for em in ['absorption', 'emission']:", "spectrum_1665[min_index_1665:max_index_1665], spectrum_1667[min_index_1667:max_index_1667], spectrum_1720[min_index_1720:max_index_1720]] rms = [rms_1612, rms_1665, rms_1667, rms_1720] expected_min_fwhm = 1. #", "/ dv, (max_vel - v_at_0) / dv)) if min_v_index < 0: min_v_index =", "'_1667_vel.pickle', 'r') as f: vel_axis_1667 = pickle.load(f) with open('pickles/rms_' + source_name + '_1667_'", "spectrum_1665 = pickle.load(f) with open('pickles/' + source_name + '_1665_vel.pickle', 'r') as f: vel_axis_1665", "present just duplicate it for the other one. try: with open('pickles/' + source_name", "{'4C+25.14': (-85, 75), 'ch002': (-52, 10)} for source_name in ['4C+25.14', 'ch002']: for em", "final_parameters = BGD.Main(source_name, vel_axes, spectra, rms, expected_min_fwhm,save_as_name) # Print results to terminal BGD.ResultsReport(final_parameters,", "source_name + '_1720_' + em + '.pickle', 'r') as f: spectrum_1612 = pickle.load(f)", "source_name + '_1612_' + em + '.pickle', 'r') as f: spectrum_1720 = pickle.load(f)", "as f: rms_1667 = pickle.load(f) # Loading satellite lines. Current version of BGD", "+ '_1720_' + em + '.pickle', 'r') as f: spectrum_1612 = pickle.load(f) with", "''' Finds the min and max indices corresponding to the min and max", "vel_axis_1612) (min_index_1665, max_index_1665) = FindVelIndex(vel_range[source_name][0], vel_range[source_name][1], vel_axis_1665) (min_index_1667, max_index_1667) = FindVelIndex(vel_range[source_name][0], vel_range[source_name][1], vel_axis_1667)", "'.pickle', 'r') as f: spectrum_1665 = pickle.load(f) with open('pickles/' + source_name + '_1665_vel.pickle',", "rms_1720] expected_min_fwhm = 1. # Run BGD final_parameters = BGD.Main(source_name, vel_axes, spectra, rms,", "# Run BGD final_parameters = BGD.Main(source_name, vel_axes, spectra, rms, expected_min_fwhm,save_as_name) # Print results", "and max velocities given. ''' dv = vel_axis[1] - vel_axis[0] v_at_0 = vel_axis[0]", "Loading satellite lines. Current version of BGD requires both satellite lines, so if", "'_1667_' + em + '.pickle', 'r') as f: rms_1667 = pickle.load(f) # Loading", "save_as_name # Trim spectra so all 4 cover the same velocity range. (min_index_1612,", "+ source_name + '_1667_' + em + '.pickle', 'r') as f: rms_1667 =", "lines, so if only # one is present just duplicate it for the", "rms_1667 = pickle.load(f) # Loading satellite lines. Current version of BGD requires both", "Bayesian Gaussian Decomposition algorithm BGD (https://github.com/AnitaPetzler/BayesGauss/) ''' def FindVelIndex(min_vel, max_vel, vel_axis): ''' Finds", "import BGD import pickle ''' Sample code showing implementation of the Bayesian Gaussian", "' + save_as_name try: with open('pickles/' + source_name + '_1720_' + em +", "= vel_axis[0] min_v_index = int(min((min_vel - v_at_0) / dv, (max_vel - v_at_0) /", "- vel_axis[0] v_at_0 = vel_axis[0] min_v_index = int(min((min_vel - v_at_0) / dv, (max_vel", "with open('pickles/' + source_name + '_1720_vel.pickle', 'r') as f: vel_axis_1612 = pickle.load(f) with", "as f: vel_axis_1612 = pickle.load(f) with open('pickles/rms_' + source_name + '_1720_' + em", "it for the other one. try: with open('pickles/' + source_name + '_1612_' +", "one is present just duplicate it for the other one. try: with open('pickles/'", "max_v_index = int(max((min_vel - v_at_0) / dv, (max_vel - v_at_0) / dv)) if", "with open('pickles/rms_' + source_name + '_1612_' + em + '.pickle', 'r') as f:", "open('pickles/rms_' + source_name + '_1612_' + em + '.pickle', 'r') as f: rms_1612", "as f: vel_axis_1612 = pickle.load(f) with open('pickles/rms_' + source_name + '_1612_' + em", "+ em + '.pickle', 'r') as f: rms_1612 = pickle.load(f) except IOError: with", "''' def FindVelIndex(min_vel, max_vel, vel_axis): ''' Finds the min and max indices corresponding", "'_1720_vel.pickle', 'r') as f: vel_axis_1720 = pickle.load(f) with open('pickles/rms_' + source_name + '_1720_'", "with open('pickles/' + source_name + '_1665_' + em + '.pickle', 'r') as f:", "FindVelIndex(min_vel, max_vel, vel_axis): ''' Finds the min and max indices corresponding to the", "all 4 cover the same velocity range. (min_index_1612, max_index_1612) = FindVelIndex(vel_range[source_name][0], vel_range[source_name][1], vel_axis_1612)", "+ source_name + '_1720_' + em + '.pickle', 'r') as f: rms_1612 =", "+ '_1612_' + em + '.pickle', 'r') as f: rms_1720 = pickle.load(f) print", "f: spectrum_1665 = pickle.load(f) with open('pickles/' + source_name + '_1665_vel.pickle', 'r') as f:", "source_name + '_1612_vel.pickle', 'r') as f: vel_axis_1612 = pickle.load(f) with open('pickles/rms_' + source_name", "source_name + '_1720_' + em + '.pickle', 'r') as f: rms_1612 = pickle.load(f)", "em + '.pickle', 'r') as f: rms_1612 = pickle.load(f) except IOError: with open('pickles/'", "replaced by 1612 for ' + save_as_name # Trim spectra so all 4", "f: vel_axis_1720 = pickle.load(f) with open('pickles/rms_' + source_name + '_1612_' + em +", "spectrum_1667[min_index_1667:max_index_1667], spectrum_1720[min_index_1720:max_index_1720]] rms = [rms_1612, rms_1665, rms_1667, rms_1720] expected_min_fwhm = 1. # Run", "[vel_axis_1612[min_index_1612:max_index_1612], vel_axis_1665[min_index_1665:max_index_1665], vel_axis_1667[min_index_1667:max_index_1667], vel_axis_1720[min_index_1720:max_index_1720]] spectra = [spectrum_1612[min_index_1612:max_index_1612], spectrum_1665[min_index_1665:max_index_1665], spectrum_1667[min_index_1667:max_index_1667], spectrum_1720[min_index_1720:max_index_1720]] rms = [rms_1612,", "0: min_v_index = 0 return (min_v_index, max_v_index) vel_range = {'4C+25.14': (-85, 75), 'ch002':", "and max indices corresponding to the min and max velocities given. ''' dv", "with open('pickles/' + source_name + '_1667_' + em + '.pickle', 'r') as f:", "data with open('pickles/' + source_name + '_1665_' + em + '.pickle', 'r') as", "IOError: with open('pickles/' + source_name + '_1612_' + em + '.pickle', 'r') as", "Gaussian Decomposition algorithm BGD (https://github.com/AnitaPetzler/BayesGauss/) ''' def FindVelIndex(min_vel, max_vel, vel_axis): ''' Finds the", "Decomposition algorithm BGD (https://github.com/AnitaPetzler/BayesGauss/) ''' def FindVelIndex(min_vel, max_vel, vel_axis): ''' Finds the min", "dv)) max_v_index = int(max((min_vel - v_at_0) / dv, (max_vel - v_at_0) / dv))", "+ em + '.pickle', 'r') as f: rms_1665 = pickle.load(f) with open('pickles/' +", "rms_1720 = pickle.load(f) except IOError: with open('pickles/' + source_name + '_1612_' + em", "+ source_name + '_1612_vel.pickle', 'r') as f: vel_axis_1720 = pickle.load(f) with open('pickles/rms_' +", "= pickle.load(f) with open('pickles/rms_' + source_name + '_1667_' + em + '.pickle', 'r')", "+ '_1612_' + em + '.pickle', 'r') as f: spectrum_1612 = pickle.load(f) with", "- v_at_0) / dv, (max_vel - v_at_0) / dv)) max_v_index = int(max((min_vel -", "try: with open('pickles/' + source_name + '_1612_' + em + '.pickle', 'r') as", "by 1720 for ' + save_as_name try: with open('pickles/' + source_name + '_1720_'", "open('pickles/' + source_name + '_1612_vel.pickle', 'r') as f: vel_axis_1720 = pickle.load(f) with open('pickles/rms_'", "'.pickle', 'r') as f: rms_1612 = pickle.load(f) print '1612 replaced by 1720 for", "implementation of the Bayesian Gaussian Decomposition algorithm BGD (https://github.com/AnitaPetzler/BayesGauss/) ''' def FindVelIndex(min_vel, max_vel,", "open('pickles/' + source_name + '_1720_vel.pickle', 'r') as f: vel_axis_1720 = pickle.load(f) with open('pickles/rms_'", "the min and max indices corresponding to the min and max velocities given.", "= pickle.load(f) with open('pickles/' + source_name + '_1665_vel.pickle', 'r') as f: vel_axis_1665 =", "open('pickles/' + source_name + '_1720_' + em + '.pickle', 'r') as f: spectrum_1612", "min_v_index < 0: min_v_index = 0 return (min_v_index, max_v_index) vel_range = {'4C+25.14': (-85,", "rms_1665 = pickle.load(f) with open('pickles/' + source_name + '_1667_' + em + '.pickle',", "vel_axis_1665[min_index_1665:max_index_1665], vel_axis_1667[min_index_1667:max_index_1667], vel_axis_1720[min_index_1720:max_index_1720]] spectra = [spectrum_1612[min_index_1612:max_index_1612], spectrum_1665[min_index_1665:max_index_1665], spectrum_1667[min_index_1667:max_index_1667], spectrum_1720[min_index_1720:max_index_1720]] rms = [rms_1612, rms_1665,", "+ '_1612_' + em + '.pickle', 'r') as f: rms_1612 = pickle.load(f) except", "vel_axis[0] min_v_index = int(min((min_vel - v_at_0) / dv, (max_vel - v_at_0) / dv))", "expected_min_fwhm = 1. # Run BGD final_parameters = BGD.Main(source_name, vel_axes, spectra, rms, expected_min_fwhm,save_as_name)", "= FindVelIndex(vel_range[source_name][0], vel_range[source_name][1], vel_axis_1665) (min_index_1667, max_index_1667) = FindVelIndex(vel_range[source_name][0], vel_range[source_name][1], vel_axis_1667) (min_index_1720, max_index_1720) =", "'r') as f: vel_axis_1612 = pickle.load(f) with open('pickles/rms_' + source_name + '_1720_' +", "= pickle.load(f) with open('pickles/' + source_name + '_1612_vel.pickle', 'r') as f: vel_axis_1612 =", "source_name + '_1612_vel.pickle', 'r') as f: vel_axis_1720 = pickle.load(f) with open('pickles/rms_' + source_name", "code showing implementation of the Bayesian Gaussian Decomposition algorithm BGD (https://github.com/AnitaPetzler/BayesGauss/) ''' def", "the min and max velocities given. ''' dv = vel_axis[1] - vel_axis[0] v_at_0", "open('pickles/' + source_name + '_1612_vel.pickle', 'r') as f: vel_axis_1612 = pickle.load(f) with open('pickles/rms_'", "em + '.pickle', 'r') as f: rms_1667 = pickle.load(f) # Loading satellite lines.", "f: rms_1612 = pickle.load(f) except IOError: with open('pickles/' + source_name + '_1720_' +", "open('pickles/rms_' + source_name + '_1665_' + em + '.pickle', 'r') as f: rms_1665", "'_1665_' + em + '.pickle', 'r') as f: spectrum_1665 = pickle.load(f) with open('pickles/'" ]
[ "+ 1, n): p1 = self.student_list[i] p2 = self.student_list[j] root_folder = self.folder_name +", "'/' + str(p1) + '_' + str(p2) head_to_head = RunMatches(p1, p2, self.num_matches, root_folder,", "== 0: first_player = self.player1 second_player = self.player2 else: first_player = self.player2 second_player", "__init__(self, student_list, num_matches, board_size, komi): self.student_list = student_list self.num_matches = num_matches self.board_size =", "second_player = self.player1 match_folder = self.root_folder + '/match' + str(match_num + 1) with", "None if match_num % 2 == 0: first_player = self.player1 second_player = self.player2", "first_player = self.player2 second_player = self.player1 match_folder = self.root_folder + '/match' + str(match_num", "run_matches(self): for match_num in range(self.num_matches): first_player = None second_player = None if match_num", "str(self.player1) + '_' + str(self.player2) + '_' + str(match_num) + '.py', 'w') as", "komi): self.student_list = student_list self.num_matches = num_matches self.board_size = board_size self.komi = komi", "match = tmp_match.SingleMatch(self.board_size, self.komi, match_folder) winner, final_score = match.run_match() print(winner, final_score) t =", "self.module_folder = 'modules' if not os.path.exists(self.folder_name): os.makedirs(self.folder_name) if not os.path.exists(self.module_folder): os.makedirs(self.module_folder) def run_tournament(self):", "class RunMatches(): def __init__(self, p1, p2, num_matches, root_folder, board_size, komi): self.player1 = p1", "komi if not os.path.exists(self.root_folder): os.makedirs(self.root_folder) def run_matches(self): for match_num in range(self.num_matches): first_player =", "in lines: fw.write(line) fw.close() time.sleep(1) tmp_match = importlib.import_module('modules.tmp_match_' + str(self.player1) + '_' +", "self.komi, match_folder) winner, final_score = match.run_match() print(winner, final_score) t = Tournament([1,5], 1, 13,", "as Player_1\\n') fw.write('import AlphaGoPlayer_' + str(second_player) + ' as Player_2\\n') lines = None", "self.num_matches, root_folder, self.board_size, self.komi) head_to_head.run_matches() class RunMatches(): def __init__(self, p1, p2, num_matches, root_folder,", "num_matches self.board_size = board_size self.komi = komi self.folder_name = 'Tournament' self.module_folder = 'modules'", "with open('single_match.py', 'r') as fr: lines = fr.readlines() fr.close() for line in lines:", "str(self.player1) + '_' + str(self.player2) + '_' + str(match_num)) match = tmp_match.SingleMatch(self.board_size, self.komi,", "= self.student_list[i] p2 = self.student_list[j] root_folder = self.folder_name + '/' + str(p1) +", "run_tournament(self): n = len(self.student_list) for i in range(n): for j in range(i +", "+ '_' + str(self.player2) + '_' + str(match_num) + '.py', 'w') as fw:", "match_folder) winner, final_score = match.run_match() print(winner, final_score) t = Tournament([1,5], 1, 13, 7.5)", "import importlib import sys import time class Tournament(): def __init__(self, student_list, num_matches, board_size,", "in range(n): for j in range(i + 1, n): p1 = self.student_list[i] p2", "match_folder = self.root_folder + '/match' + str(match_num + 1) with open('modules/tmp_match_' + str(self.player1)", "self.player1 match_folder = self.root_folder + '/match' + str(match_num + 1) with open('modules/tmp_match_' +", "+ str(first_player) + ' as Player_1\\n') fw.write('import AlphaGoPlayer_' + str(second_player) + ' as", "head_to_head.run_matches() class RunMatches(): def __init__(self, p1, p2, num_matches, root_folder, board_size, komi): self.player1 =", "komi): self.player1 = p1 self.player2 = p2 self.num_matches = num_matches self.root_folder = root_folder", "match_num in range(self.num_matches): first_player = None second_player = None if match_num % 2", "with open('modules/tmp_match_' + str(self.player1) + '_' + str(self.player2) + '_' + str(match_num) +", "self.folder_name = 'Tournament' self.module_folder = 'modules' if not os.path.exists(self.folder_name): os.makedirs(self.folder_name) if not os.path.exists(self.module_folder):", "= num_matches self.board_size = board_size self.komi = komi self.folder_name = 'Tournament' self.module_folder =", "p1, p2, num_matches, root_folder, board_size, komi): self.player1 = p1 self.player2 = p2 self.num_matches", "str(match_num + 1) with open('modules/tmp_match_' + str(self.player1) + '_' + str(self.player2) + '_'", "1) with open('modules/tmp_match_' + str(self.player1) + '_' + str(self.player2) + '_' + str(match_num)", "root_folder self.board_size = board_size self.komi = komi if not os.path.exists(self.root_folder): os.makedirs(self.root_folder) def run_matches(self):", "RunMatches(): def __init__(self, p1, p2, num_matches, root_folder, board_size, komi): self.player1 = p1 self.player2", "str(self.player2) + '_' + str(match_num) + '.py', 'w') as fw: fw.write('import AlphaGoPlayer_' +", "= len(self.student_list) for i in range(n): for j in range(i + 1, n):", "'/match' + str(match_num + 1) with open('modules/tmp_match_' + str(self.player1) + '_' + str(self.player2)", "str(match_num) + '.py', 'w') as fw: fw.write('import AlphaGoPlayer_' + str(first_player) + ' as", "as fr: lines = fr.readlines() fr.close() for line in lines: fw.write(line) fw.close() time.sleep(1)", "RunMatches(p1, p2, self.num_matches, root_folder, self.board_size, self.komi) head_to_head.run_matches() class RunMatches(): def __init__(self, p1, p2,", "student_list, num_matches, board_size, komi): self.student_list = student_list self.num_matches = num_matches self.board_size = board_size", "n): p1 = self.student_list[i] p2 = self.student_list[j] root_folder = self.folder_name + '/' +", "'_' + str(match_num)) match = tmp_match.SingleMatch(self.board_size, self.komi, match_folder) winner, final_score = match.run_match() print(winner,", "AlphaGoPlayer_' + str(first_player) + ' as Player_1\\n') fw.write('import AlphaGoPlayer_' + str(second_player) + '", "+ str(self.player1) + '_' + str(self.player2) + '_' + str(match_num) + '.py', 'w')", "' as Player_1\\n') fw.write('import AlphaGoPlayer_' + str(second_player) + ' as Player_2\\n') lines =", "def __init__(self, p1, p2, num_matches, root_folder, board_size, komi): self.player1 = p1 self.player2 =", "num_matches self.root_folder = root_folder self.board_size = board_size self.komi = komi if not os.path.exists(self.root_folder):", "lines = fr.readlines() fr.close() for line in lines: fw.write(line) fw.close() time.sleep(1) tmp_match =", "fw.close() time.sleep(1) tmp_match = importlib.import_module('modules.tmp_match_' + str(self.player1) + '_' + str(self.player2) + '_'", "fw.write('import AlphaGoPlayer_' + str(first_player) + ' as Player_1\\n') fw.write('import AlphaGoPlayer_' + str(second_player) +", "root_folder = self.folder_name + '/' + str(p1) + '_' + str(p2) head_to_head =", "class Tournament(): def __init__(self, student_list, num_matches, board_size, komi): self.student_list = student_list self.num_matches =", "+ 1) with open('modules/tmp_match_' + str(self.player1) + '_' + str(self.player2) + '_' +", "if not os.path.exists(self.module_folder): os.makedirs(self.module_folder) def run_tournament(self): n = len(self.student_list) for i in range(n):", "self.folder_name + '/' + str(p1) + '_' + str(p2) head_to_head = RunMatches(p1, p2,", "str(first_player) + ' as Player_1\\n') fw.write('import AlphaGoPlayer_' + str(second_player) + ' as Player_2\\n')", "None with open('single_match.py', 'r') as fr: lines = fr.readlines() fr.close() for line in", "Player_2\\n') lines = None with open('single_match.py', 'r') as fr: lines = fr.readlines() fr.close()", "n = len(self.student_list) for i in range(n): for j in range(i + 1,", "+ str(self.player1) + '_' + str(self.player2) + '_' + str(match_num)) match = tmp_match.SingleMatch(self.board_size,", "range(n): for j in range(i + 1, n): p1 = self.student_list[i] p2 =", "self.player2 second_player = self.player1 match_folder = self.root_folder + '/match' + str(match_num + 1)", "= tmp_match.SingleMatch(self.board_size, self.komi, match_folder) winner, final_score = match.run_match() print(winner, final_score) t = Tournament([1,5],", "in range(i + 1, n): p1 = self.student_list[i] p2 = self.student_list[j] root_folder =", "2 == 0: first_player = self.player1 second_player = self.player2 else: first_player = self.player2", "self.root_folder + '/match' + str(match_num + 1) with open('modules/tmp_match_' + str(self.player1) + '_'", "+ '_' + str(match_num) + '.py', 'w') as fw: fw.write('import AlphaGoPlayer_' + str(first_player)", "importlib.import_module('modules.tmp_match_' + str(self.player1) + '_' + str(self.player2) + '_' + str(match_num)) match =", "open('single_match.py', 'r') as fr: lines = fr.readlines() fr.close() for line in lines: fw.write(line)", "= importlib.import_module('modules.tmp_match_' + str(self.player1) + '_' + str(self.player2) + '_' + str(match_num)) match", "self.player1 second_player = self.player2 else: first_player = self.player2 second_player = self.player1 match_folder =", "as fw: fw.write('import AlphaGoPlayer_' + str(first_player) + ' as Player_1\\n') fw.write('import AlphaGoPlayer_' +", "board_size, komi): self.player1 = p1 self.player2 = p2 self.num_matches = num_matches self.root_folder =", "None second_player = None if match_num % 2 == 0: first_player = self.player1", "for i in range(n): for j in range(i + 1, n): p1 =", "self.root_folder = root_folder self.board_size = board_size self.komi = komi if not os.path.exists(self.root_folder): os.makedirs(self.root_folder)", "+ str(match_num) + '.py', 'w') as fw: fw.write('import AlphaGoPlayer_' + str(first_player) + '", "if match_num % 2 == 0: first_player = self.player1 second_player = self.player2 else:", "+ '_' + str(p2) head_to_head = RunMatches(p1, p2, self.num_matches, root_folder, self.board_size, self.komi) head_to_head.run_matches()", "first_player = None second_player = None if match_num % 2 == 0: first_player", "p1 self.player2 = p2 self.num_matches = num_matches self.root_folder = root_folder self.board_size = board_size", "+ str(self.player2) + '_' + str(match_num) + '.py', 'w') as fw: fw.write('import AlphaGoPlayer_'", "os.path.exists(self.folder_name): os.makedirs(self.folder_name) if not os.path.exists(self.module_folder): os.makedirs(self.module_folder) def run_tournament(self): n = len(self.student_list) for i", "self.player2 else: first_player = self.player2 second_player = self.player1 match_folder = self.root_folder + '/match'", "fw: fw.write('import AlphaGoPlayer_' + str(first_player) + ' as Player_1\\n') fw.write('import AlphaGoPlayer_' + str(second_player)", "time class Tournament(): def __init__(self, student_list, num_matches, board_size, komi): self.student_list = student_list self.num_matches", "tmp_match = importlib.import_module('modules.tmp_match_' + str(self.player1) + '_' + str(self.player2) + '_' + str(match_num))", "p2, self.num_matches, root_folder, self.board_size, self.komi) head_to_head.run_matches() class RunMatches(): def __init__(self, p1, p2, num_matches,", "fr: lines = fr.readlines() fr.close() for line in lines: fw.write(line) fw.close() time.sleep(1) tmp_match", "num_matches, root_folder, board_size, komi): self.player1 = p1 self.player2 = p2 self.num_matches = num_matches", "1, n): p1 = self.student_list[i] p2 = self.student_list[j] root_folder = self.folder_name + '/'", "import numpy as np import importlib import sys import time class Tournament(): def", "os.path.exists(self.module_folder): os.makedirs(self.module_folder) def run_tournament(self): n = len(self.student_list) for i in range(n): for j", "range(self.num_matches): first_player = None second_player = None if match_num % 2 == 0:", "str(self.player2) + '_' + str(match_num)) match = tmp_match.SingleMatch(self.board_size, self.komi, match_folder) winner, final_score =", "= board_size self.komi = komi self.folder_name = 'Tournament' self.module_folder = 'modules' if not", "= 'modules' if not os.path.exists(self.folder_name): os.makedirs(self.folder_name) if not os.path.exists(self.module_folder): os.makedirs(self.module_folder) def run_tournament(self): n", "if not os.path.exists(self.root_folder): os.makedirs(self.root_folder) def run_matches(self): for match_num in range(self.num_matches): first_player = None", "board_size, komi): self.student_list = student_list self.num_matches = num_matches self.board_size = board_size self.komi =", "fw.write('import AlphaGoPlayer_' + str(second_player) + ' as Player_2\\n') lines = None with open('single_match.py',", "self.student_list = student_list self.num_matches = num_matches self.board_size = board_size self.komi = komi self.folder_name", "i in range(n): for j in range(i + 1, n): p1 = self.student_list[i]", "self.board_size, self.komi) head_to_head.run_matches() class RunMatches(): def __init__(self, p1, p2, num_matches, root_folder, board_size, komi):", "match_num % 2 == 0: first_player = self.player1 second_player = self.player2 else: first_player", "import time class Tournament(): def __init__(self, student_list, num_matches, board_size, komi): self.student_list = student_list", "as np import importlib import sys import time class Tournament(): def __init__(self, student_list,", "def __init__(self, student_list, num_matches, board_size, komi): self.student_list = student_list self.num_matches = num_matches self.board_size", "+ ' as Player_2\\n') lines = None with open('single_match.py', 'r') as fr: lines", "+ str(self.player2) + '_' + str(match_num)) match = tmp_match.SingleMatch(self.board_size, self.komi, match_folder) winner, final_score", "= komi self.folder_name = 'Tournament' self.module_folder = 'modules' if not os.path.exists(self.folder_name): os.makedirs(self.folder_name) if", "0: first_player = self.player1 second_player = self.player2 else: first_player = self.player2 second_player =", "'.py', 'w') as fw: fw.write('import AlphaGoPlayer_' + str(first_player) + ' as Player_1\\n') fw.write('import", "self.player2 = p2 self.num_matches = num_matches self.root_folder = root_folder self.board_size = board_size self.komi", "= self.folder_name + '/' + str(p1) + '_' + str(p2) head_to_head = RunMatches(p1,", "self.player1 = p1 self.player2 = p2 self.num_matches = num_matches self.root_folder = root_folder self.board_size", "range(i + 1, n): p1 = self.student_list[i] p2 = self.student_list[j] root_folder = self.folder_name", "+ str(p1) + '_' + str(p2) head_to_head = RunMatches(p1, p2, self.num_matches, root_folder, self.board_size,", "tmp_match.SingleMatch(self.board_size, self.komi, match_folder) winner, final_score = match.run_match() print(winner, final_score) t = Tournament([1,5], 1,", "= self.player1 match_folder = self.root_folder + '/match' + str(match_num + 1) with open('modules/tmp_match_'", "AlphaGoPlayer_' + str(second_player) + ' as Player_2\\n') lines = None with open('single_match.py', 'r')", "self.board_size = board_size self.komi = komi if not os.path.exists(self.root_folder): os.makedirs(self.root_folder) def run_matches(self): for", "self.komi) head_to_head.run_matches() class RunMatches(): def __init__(self, p1, p2, num_matches, root_folder, board_size, komi): self.player1", "self.num_matches = num_matches self.board_size = board_size self.komi = komi self.folder_name = 'Tournament' self.module_folder", "self.student_list[i] p2 = self.student_list[j] root_folder = self.folder_name + '/' + str(p1) + '_'", "self.student_list[j] root_folder = self.folder_name + '/' + str(p1) + '_' + str(p2) head_to_head", "not os.path.exists(self.module_folder): os.makedirs(self.module_folder) def run_tournament(self): n = len(self.student_list) for i in range(n): for", "komi self.folder_name = 'Tournament' self.module_folder = 'modules' if not os.path.exists(self.folder_name): os.makedirs(self.folder_name) if not", "= self.student_list[j] root_folder = self.folder_name + '/' + str(p1) + '_' + str(p2)", "winner, final_score = match.run_match() print(winner, final_score) t = Tournament([1,5], 1, 13, 7.5) t.run_tournament()", "= p1 self.player2 = p2 self.num_matches = num_matches self.root_folder = root_folder self.board_size =", "= komi if not os.path.exists(self.root_folder): os.makedirs(self.root_folder) def run_matches(self): for match_num in range(self.num_matches): first_player", "student_list self.num_matches = num_matches self.board_size = board_size self.komi = komi self.folder_name = 'Tournament'", "not os.path.exists(self.root_folder): os.makedirs(self.root_folder) def run_matches(self): for match_num in range(self.num_matches): first_player = None second_player", "self.num_matches = num_matches self.root_folder = root_folder self.board_size = board_size self.komi = komi if", "for line in lines: fw.write(line) fw.close() time.sleep(1) tmp_match = importlib.import_module('modules.tmp_match_' + str(self.player1) +", "__init__(self, p1, p2, num_matches, root_folder, board_size, komi): self.player1 = p1 self.player2 = p2", "= self.player2 else: first_player = self.player2 second_player = self.player1 match_folder = self.root_folder +", "= root_folder self.board_size = board_size self.komi = komi if not os.path.exists(self.root_folder): os.makedirs(self.root_folder) def", "= 'Tournament' self.module_folder = 'modules' if not os.path.exists(self.folder_name): os.makedirs(self.folder_name) if not os.path.exists(self.module_folder): os.makedirs(self.module_folder)", "= None with open('single_match.py', 'r') as fr: lines = fr.readlines() fr.close() for line", "+ '_' + str(match_num)) match = tmp_match.SingleMatch(self.board_size, self.komi, match_folder) winner, final_score = match.run_match()", "fw.write(line) fw.close() time.sleep(1) tmp_match = importlib.import_module('modules.tmp_match_' + str(self.player1) + '_' + str(self.player2) +", "'w') as fw: fw.write('import AlphaGoPlayer_' + str(first_player) + ' as Player_1\\n') fw.write('import AlphaGoPlayer_'", "def run_matches(self): for match_num in range(self.num_matches): first_player = None second_player = None if", "+ str(second_player) + ' as Player_2\\n') lines = None with open('single_match.py', 'r') as", "else: first_player = self.player2 second_player = self.player1 match_folder = self.root_folder + '/match' +", "lines = None with open('single_match.py', 'r') as fr: lines = fr.readlines() fr.close() for", "= student_list self.num_matches = num_matches self.board_size = board_size self.komi = komi self.folder_name =", "p2 self.num_matches = num_matches self.root_folder = root_folder self.board_size = board_size self.komi = komi", "+ str(match_num + 1) with open('modules/tmp_match_' + str(self.player1) + '_' + str(self.player2) +", "j in range(i + 1, n): p1 = self.student_list[i] p2 = self.student_list[j] root_folder", "self.komi = komi if not os.path.exists(self.root_folder): os.makedirs(self.root_folder) def run_matches(self): for match_num in range(self.num_matches):", "= self.root_folder + '/match' + str(match_num + 1) with open('modules/tmp_match_' + str(self.player1) +", "second_player = self.player2 else: first_player = self.player2 second_player = self.player1 match_folder = self.root_folder", "def run_tournament(self): n = len(self.student_list) for i in range(n): for j in range(i", "+ str(match_num)) match = tmp_match.SingleMatch(self.board_size, self.komi, match_folder) winner, final_score = match.run_match() print(winner, final_score)", "os.makedirs(self.root_folder) def run_matches(self): for match_num in range(self.num_matches): first_player = None second_player = None", "+ str(p2) head_to_head = RunMatches(p1, p2, self.num_matches, root_folder, self.board_size, self.komi) head_to_head.run_matches() class RunMatches():", "<gh_stars>1-10 import os import numpy as np import importlib import sys import time", "time.sleep(1) tmp_match = importlib.import_module('modules.tmp_match_' + str(self.player1) + '_' + str(self.player2) + '_' +", "% 2 == 0: first_player = self.player1 second_player = self.player2 else: first_player =", "head_to_head = RunMatches(p1, p2, self.num_matches, root_folder, self.board_size, self.komi) head_to_head.run_matches() class RunMatches(): def __init__(self,", "'r') as fr: lines = fr.readlines() fr.close() for line in lines: fw.write(line) fw.close()", "= self.player2 second_player = self.player1 match_folder = self.root_folder + '/match' + str(match_num +", "= self.player1 second_player = self.player2 else: first_player = self.player2 second_player = self.player1 match_folder", "os.makedirs(self.module_folder) def run_tournament(self): n = len(self.student_list) for i in range(n): for j in", "p2, num_matches, root_folder, board_size, komi): self.player1 = p1 self.player2 = p2 self.num_matches =", "= board_size self.komi = komi if not os.path.exists(self.root_folder): os.makedirs(self.root_folder) def run_matches(self): for match_num", "str(second_player) + ' as Player_2\\n') lines = None with open('single_match.py', 'r') as fr:", "str(p1) + '_' + str(p2) head_to_head = RunMatches(p1, p2, self.num_matches, root_folder, self.board_size, self.komi)", "p1 = self.student_list[i] p2 = self.student_list[j] root_folder = self.folder_name + '/' + str(p1)", "np import importlib import sys import time class Tournament(): def __init__(self, student_list, num_matches,", "Player_1\\n') fw.write('import AlphaGoPlayer_' + str(second_player) + ' as Player_2\\n') lines = None with", "in range(self.num_matches): first_player = None second_player = None if match_num % 2 ==", "len(self.student_list) for i in range(n): for j in range(i + 1, n): p1", "as Player_2\\n') lines = None with open('single_match.py', 'r') as fr: lines = fr.readlines()", "= RunMatches(p1, p2, self.num_matches, root_folder, self.board_size, self.komi) head_to_head.run_matches() class RunMatches(): def __init__(self, p1,", "os.makedirs(self.folder_name) if not os.path.exists(self.module_folder): os.makedirs(self.module_folder) def run_tournament(self): n = len(self.student_list) for i in", "= None second_player = None if match_num % 2 == 0: first_player =", "+ '_' + str(self.player2) + '_' + str(match_num)) match = tmp_match.SingleMatch(self.board_size, self.komi, match_folder)", "= p2 self.num_matches = num_matches self.root_folder = root_folder self.board_size = board_size self.komi =", "not os.path.exists(self.folder_name): os.makedirs(self.folder_name) if not os.path.exists(self.module_folder): os.makedirs(self.module_folder) def run_tournament(self): n = len(self.student_list) for", "import os import numpy as np import importlib import sys import time class", "numpy as np import importlib import sys import time class Tournament(): def __init__(self,", "board_size self.komi = komi self.folder_name = 'Tournament' self.module_folder = 'modules' if not os.path.exists(self.folder_name):", "+ ' as Player_1\\n') fw.write('import AlphaGoPlayer_' + str(second_player) + ' as Player_2\\n') lines", "self.board_size = board_size self.komi = komi self.folder_name = 'Tournament' self.module_folder = 'modules' if", "+ '/' + str(p1) + '_' + str(p2) head_to_head = RunMatches(p1, p2, self.num_matches,", "= num_matches self.root_folder = root_folder self.board_size = board_size self.komi = komi if not", "Tournament(): def __init__(self, student_list, num_matches, board_size, komi): self.student_list = student_list self.num_matches = num_matches", "second_player = None if match_num % 2 == 0: first_player = self.player1 second_player", "+ '/match' + str(match_num + 1) with open('modules/tmp_match_' + str(self.player1) + '_' +", "'_' + str(self.player2) + '_' + str(match_num) + '.py', 'w') as fw: fw.write('import", "import sys import time class Tournament(): def __init__(self, student_list, num_matches, board_size, komi): self.student_list", "str(match_num)) match = tmp_match.SingleMatch(self.board_size, self.komi, match_folder) winner, final_score = match.run_match() print(winner, final_score) t", "= None if match_num % 2 == 0: first_player = self.player1 second_player =", "root_folder, board_size, komi): self.player1 = p1 self.player2 = p2 self.num_matches = num_matches self.root_folder", "root_folder, self.board_size, self.komi) head_to_head.run_matches() class RunMatches(): def __init__(self, p1, p2, num_matches, root_folder, board_size,", "os import numpy as np import importlib import sys import time class Tournament():", "line in lines: fw.write(line) fw.close() time.sleep(1) tmp_match = importlib.import_module('modules.tmp_match_' + str(self.player1) + '_'", "'Tournament' self.module_folder = 'modules' if not os.path.exists(self.folder_name): os.makedirs(self.folder_name) if not os.path.exists(self.module_folder): os.makedirs(self.module_folder) def", "'modules' if not os.path.exists(self.folder_name): os.makedirs(self.folder_name) if not os.path.exists(self.module_folder): os.makedirs(self.module_folder) def run_tournament(self): n =", "self.komi = komi self.folder_name = 'Tournament' self.module_folder = 'modules' if not os.path.exists(self.folder_name): os.makedirs(self.folder_name)", "'_' + str(self.player2) + '_' + str(match_num)) match = tmp_match.SingleMatch(self.board_size, self.komi, match_folder) winner,", "sys import time class Tournament(): def __init__(self, student_list, num_matches, board_size, komi): self.student_list =", "first_player = self.player1 second_player = self.player2 else: first_player = self.player2 second_player = self.player1", "' as Player_2\\n') lines = None with open('single_match.py', 'r') as fr: lines =", "'_' + str(p2) head_to_head = RunMatches(p1, p2, self.num_matches, root_folder, self.board_size, self.komi) head_to_head.run_matches() class", "fr.readlines() fr.close() for line in lines: fw.write(line) fw.close() time.sleep(1) tmp_match = importlib.import_module('modules.tmp_match_' +", "= fr.readlines() fr.close() for line in lines: fw.write(line) fw.close() time.sleep(1) tmp_match = importlib.import_module('modules.tmp_match_'", "lines: fw.write(line) fw.close() time.sleep(1) tmp_match = importlib.import_module('modules.tmp_match_' + str(self.player1) + '_' + str(self.player2)", "+ '.py', 'w') as fw: fw.write('import AlphaGoPlayer_' + str(first_player) + ' as Player_1\\n')", "'_' + str(match_num) + '.py', 'w') as fw: fw.write('import AlphaGoPlayer_' + str(first_player) +", "if not os.path.exists(self.folder_name): os.makedirs(self.folder_name) if not os.path.exists(self.module_folder): os.makedirs(self.module_folder) def run_tournament(self): n = len(self.student_list)", "board_size self.komi = komi if not os.path.exists(self.root_folder): os.makedirs(self.root_folder) def run_matches(self): for match_num in", "p2 = self.student_list[j] root_folder = self.folder_name + '/' + str(p1) + '_' +", "open('modules/tmp_match_' + str(self.player1) + '_' + str(self.player2) + '_' + str(match_num) + '.py',", "str(p2) head_to_head = RunMatches(p1, p2, self.num_matches, root_folder, self.board_size, self.komi) head_to_head.run_matches() class RunMatches(): def", "fr.close() for line in lines: fw.write(line) fw.close() time.sleep(1) tmp_match = importlib.import_module('modules.tmp_match_' + str(self.player1)", "num_matches, board_size, komi): self.student_list = student_list self.num_matches = num_matches self.board_size = board_size self.komi", "for match_num in range(self.num_matches): first_player = None second_player = None if match_num %", "os.path.exists(self.root_folder): os.makedirs(self.root_folder) def run_matches(self): for match_num in range(self.num_matches): first_player = None second_player =", "importlib import sys import time class Tournament(): def __init__(self, student_list, num_matches, board_size, komi):", "for j in range(i + 1, n): p1 = self.student_list[i] p2 = self.student_list[j]" ]
[ "instance p = Producer(**conf) def delivery_callback(err, msg): if err: sys.stderr.write('%% Message failed delivery:", "reserved. # Copyright 2016 Confluent Inc. # Licensed under the MIT License. #", "Confluent Inc. # Licensed under the MIT License. # Licensed under the Apache", "else: sys.stderr.write('%% Message delivered to %s [%d] @ %o\\n' % (msg.topic(), msg.partition(), msg.offset()))", "# Wait until all messages have been delivered sys.stderr.write('%% Waiting for %d deliveries\\n'", "bootstrap_servers = os.environ['bootstrap_servers'] sasl_password = os.environ['sasl_password'] topic_name = os.environ['topic_name'] if __name__ == '__main__':", "msg): if err: sys.stderr.write('%% Message failed delivery: %s\\n' % err) else: sys.stderr.write('%% Message", "Write 1-10 to topic for i in range(0, 10): try: p.produce(topic_name, str(i), callback=delivery_callback)", "'SASL_SSL', 'ssl.ca.location': ssl_ca_location, 'sasl.mechanism': 'PLAIN', 'sasl.username': '$ConnectionString', 'sasl.password': <PASSWORD>, #replace 'client.id': 'python-example-producer' }", "Confluent sample modified for use with Azure Event Hubs for Apache Kafka Ecosystems", "try again\\n' % len(p)) p.poll(0) # Wait until all messages have been delivered", "License. # Licensed under the Apache License, Version 2.0 # # Original Confluent", "topic for i in range(0, 10): try: p.produce(topic_name, str(i), callback=delivery_callback) except BufferError as", "%s [%d] @ %o\\n' % (msg.topic(), msg.partition(), msg.offset())) # Write 1-10 to topic", "@ %o\\n' % (msg.topic(), msg.partition(), msg.offset())) # Write 1-10 to topic for i", "BufferError as e: sys.stderr.write('%% Local producer queue is full (%d messages awaiting delivery):", "err) else: sys.stderr.write('%% Message delivered to %s [%d] @ %o\\n' % (msg.topic(), msg.partition(),", "'bootstrap.servers': bootstrap_servers, #replace 'security.protocol': 'SASL_SSL', 'ssl.ca.location': ssl_ca_location, 'sasl.mechanism': 'PLAIN', 'sasl.username': '$ConnectionString', 'sasl.password': <PASSWORD>,", "delivery): try again\\n' % len(p)) p.poll(0) # Wait until all messages have been", "# Write 1-10 to topic for i in range(0, 10): try: p.produce(topic_name, str(i),", "str(i), callback=delivery_callback) except BufferError as e: sys.stderr.write('%% Local producer queue is full (%d", "Kafka Ecosystems import os import sys from confluent_kafka import Producer ssl_ca_location = os.environ['ssl_ca_location']", "import os import sys from confluent_kafka import Producer ssl_ca_location = os.environ['ssl_ca_location'] bootstrap_servers =", "== '__main__': # Producer configuration conf = { 'bootstrap.servers': bootstrap_servers, #replace 'security.protocol': 'SASL_SSL',", "try: p.produce(topic_name, str(i), callback=delivery_callback) except BufferError as e: sys.stderr.write('%% Local producer queue is", "python # # Copyright (c) Microsoft Corporation. All rights reserved. # Copyright 2016", "10): try: p.produce(topic_name, str(i), callback=delivery_callback) except BufferError as e: sys.stderr.write('%% Local producer queue", "sys.stderr.write('%% Message failed delivery: %s\\n' % err) else: sys.stderr.write('%% Message delivered to %s", "Azure Event Hubs for Apache Kafka Ecosystems import os import sys from confluent_kafka", "%o\\n' % (msg.topic(), msg.partition(), msg.offset())) # Write 1-10 to topic for i in", "modified for use with Azure Event Hubs for Apache Kafka Ecosystems import os", "if err: sys.stderr.write('%% Message failed delivery: %s\\n' % err) else: sys.stderr.write('%% Message delivered", "Apache License, Version 2.0 # # Original Confluent sample modified for use with", "Corporation. All rights reserved. # Copyright 2016 Confluent Inc. # Licensed under the", "under the MIT License. # Licensed under the Apache License, Version 2.0 #", "os import sys from confluent_kafka import Producer ssl_ca_location = os.environ['ssl_ca_location'] bootstrap_servers = os.environ['bootstrap_servers']", "= os.environ['ssl_ca_location'] bootstrap_servers = os.environ['bootstrap_servers'] sasl_password = os.environ['sasl_password'] topic_name = os.environ['topic_name'] if __name__", "p = Producer(**conf) def delivery_callback(err, msg): if err: sys.stderr.write('%% Message failed delivery: %s\\n'", "conf = { 'bootstrap.servers': bootstrap_servers, #replace 'security.protocol': 'SASL_SSL', 'ssl.ca.location': ssl_ca_location, 'sasl.mechanism': 'PLAIN', 'sasl.username':", "sys from confluent_kafka import Producer ssl_ca_location = os.environ['ssl_ca_location'] bootstrap_servers = os.environ['bootstrap_servers'] sasl_password =", "= os.environ['sasl_password'] topic_name = os.environ['topic_name'] if __name__ == '__main__': # Producer configuration conf", "range(0, 10): try: p.produce(topic_name, str(i), callback=delivery_callback) except BufferError as e: sys.stderr.write('%% Local producer", "except BufferError as e: sys.stderr.write('%% Local producer queue is full (%d messages awaiting", "is full (%d messages awaiting delivery): try again\\n' % len(p)) p.poll(0) # Wait", "for Apache Kafka Ecosystems import os import sys from confluent_kafka import Producer ssl_ca_location", "'ssl.ca.location': ssl_ca_location, 'sasl.mechanism': 'PLAIN', 'sasl.username': '$ConnectionString', 'sasl.password': <PASSWORD>, #replace 'client.id': 'python-example-producer' } #", "to %s [%d] @ %o\\n' % (msg.topic(), msg.partition(), msg.offset())) # Write 1-10 to", "Producer instance p = Producer(**conf) def delivery_callback(err, msg): if err: sys.stderr.write('%% Message failed", "} # Create Producer instance p = Producer(**conf) def delivery_callback(err, msg): if err:", "= os.environ['bootstrap_servers'] sasl_password = os.environ['sasl_password'] topic_name = os.environ['topic_name'] if __name__ == '__main__': #", "confluent_kafka import Producer ssl_ca_location = os.environ['ssl_ca_location'] bootstrap_servers = os.environ['bootstrap_servers'] sasl_password = os.environ['sasl_password'] topic_name", "under the Apache License, Version 2.0 # # Original Confluent sample modified for", "msg.partition(), msg.offset())) # Write 1-10 to topic for i in range(0, 10): try:", "{ 'bootstrap.servers': bootstrap_servers, #replace 'security.protocol': 'SASL_SSL', 'ssl.ca.location': ssl_ca_location, 'sasl.mechanism': 'PLAIN', 'sasl.username': '$ConnectionString', 'sasl.password':", "delivered to %s [%d] @ %o\\n' % (msg.topic(), msg.partition(), msg.offset())) # Write 1-10", "Create Producer instance p = Producer(**conf) def delivery_callback(err, msg): if err: sys.stderr.write('%% Message", "ssl_ca_location, 'sasl.mechanism': 'PLAIN', 'sasl.username': '$ConnectionString', 'sasl.password': <PASSWORD>, #replace 'client.id': 'python-example-producer' } # Create", "os.environ['sasl_password'] topic_name = os.environ['topic_name'] if __name__ == '__main__': # Producer configuration conf =", "'sasl.password': <PASSWORD>, #replace 'client.id': 'python-example-producer' } # Create Producer instance p = Producer(**conf)", "% err) else: sys.stderr.write('%% Message delivered to %s [%d] @ %o\\n' % (msg.topic(),", "len(p)) p.poll(0) # Wait until all messages have been delivered sys.stderr.write('%% Waiting for", "# Producer configuration conf = { 'bootstrap.servers': bootstrap_servers, #replace 'security.protocol': 'SASL_SSL', 'ssl.ca.location': ssl_ca_location,", "sample modified for use with Azure Event Hubs for Apache Kafka Ecosystems import", "Copyright 2016 Confluent Inc. # Licensed under the MIT License. # Licensed under", "#!/usr/bin/env python # # Copyright (c) Microsoft Corporation. All rights reserved. # Copyright", "% len(p)) p.poll(0) # Wait until all messages have been delivered sys.stderr.write('%% Waiting", "(msg.topic(), msg.partition(), msg.offset())) # Write 1-10 to topic for i in range(0, 10):", "os.environ['ssl_ca_location'] bootstrap_servers = os.environ['bootstrap_servers'] sasl_password = os.environ['sasl_password'] topic_name = os.environ['topic_name'] if __name__ ==", "Copyright (c) Microsoft Corporation. All rights reserved. # Copyright 2016 Confluent Inc. #", "<reponame>CloudBreadPaPa/azure-eventhub-kafka-python #!/usr/bin/env python # # Copyright (c) Microsoft Corporation. All rights reserved. #", "sys.stderr.write('%% Local producer queue is full (%d messages awaiting delivery): try again\\n' %", "# Copyright 2016 Confluent Inc. # Licensed under the MIT License. # Licensed", "'client.id': 'python-example-producer' } # Create Producer instance p = Producer(**conf) def delivery_callback(err, msg):", "use with Azure Event Hubs for Apache Kafka Ecosystems import os import sys", "= { 'bootstrap.servers': bootstrap_servers, #replace 'security.protocol': 'SASL_SSL', 'ssl.ca.location': ssl_ca_location, 'sasl.mechanism': 'PLAIN', 'sasl.username': '$ConnectionString',", "Inc. # Licensed under the MIT License. # Licensed under the Apache License,", "awaiting delivery): try again\\n' % len(p)) p.poll(0) # Wait until all messages have", "for use with Azure Event Hubs for Apache Kafka Ecosystems import os import", "p.poll(0) # Wait until all messages have been delivered sys.stderr.write('%% Waiting for %d", "messages awaiting delivery): try again\\n' % len(p)) p.poll(0) # Wait until all messages", "os.environ['bootstrap_servers'] sasl_password = os.environ['sasl_password'] topic_name = os.environ['topic_name'] if __name__ == '__main__': # Producer", "full (%d messages awaiting delivery): try again\\n' % len(p)) p.poll(0) # Wait until", "License, Version 2.0 # # Original Confluent sample modified for use with Azure", "sasl_password = os.environ['sasl_password'] topic_name = os.environ['topic_name'] if __name__ == '__main__': # Producer configuration", "'PLAIN', 'sasl.username': '$ConnectionString', 'sasl.password': <PASSWORD>, #replace 'client.id': 'python-example-producer' } # Create Producer instance", "All rights reserved. # Copyright 2016 Confluent Inc. # Licensed under the MIT", "(c) Microsoft Corporation. All rights reserved. # Copyright 2016 Confluent Inc. # Licensed", "Licensed under the Apache License, Version 2.0 # # Original Confluent sample modified", "producer queue is full (%d messages awaiting delivery): try again\\n' % len(p)) p.poll(0)", "MIT License. # Licensed under the Apache License, Version 2.0 # # Original", "2016 Confluent Inc. # Licensed under the MIT License. # Licensed under the", "import Producer ssl_ca_location = os.environ['ssl_ca_location'] bootstrap_servers = os.environ['bootstrap_servers'] sasl_password = os.environ['sasl_password'] topic_name =", "Producer configuration conf = { 'bootstrap.servers': bootstrap_servers, #replace 'security.protocol': 'SASL_SSL', 'ssl.ca.location': ssl_ca_location, 'sasl.mechanism':", "Producer ssl_ca_location = os.environ['ssl_ca_location'] bootstrap_servers = os.environ['bootstrap_servers'] sasl_password = os.environ['sasl_password'] topic_name = os.environ['topic_name']", "os.environ['topic_name'] if __name__ == '__main__': # Producer configuration conf = { 'bootstrap.servers': bootstrap_servers,", "Version 2.0 # # Original Confluent sample modified for use with Azure Event", "import sys from confluent_kafka import Producer ssl_ca_location = os.environ['ssl_ca_location'] bootstrap_servers = os.environ['bootstrap_servers'] sasl_password", "Message failed delivery: %s\\n' % err) else: sys.stderr.write('%% Message delivered to %s [%d]", "queue is full (%d messages awaiting delivery): try again\\n' % len(p)) p.poll(0) #", "delivery_callback(err, msg): if err: sys.stderr.write('%% Message failed delivery: %s\\n' % err) else: sys.stderr.write('%%", "rights reserved. # Copyright 2016 Confluent Inc. # Licensed under the MIT License.", "#replace 'security.protocol': 'SASL_SSL', 'ssl.ca.location': ssl_ca_location, 'sasl.mechanism': 'PLAIN', 'sasl.username': '$ConnectionString', 'sasl.password': <PASSWORD>, #replace 'client.id':", "= os.environ['topic_name'] if __name__ == '__main__': # Producer configuration conf = { 'bootstrap.servers':", "= Producer(**conf) def delivery_callback(err, msg): if err: sys.stderr.write('%% Message failed delivery: %s\\n' %", "2.0 # # Original Confluent sample modified for use with Azure Event Hubs", "err: sys.stderr.write('%% Message failed delivery: %s\\n' % err) else: sys.stderr.write('%% Message delivered to", "Event Hubs for Apache Kafka Ecosystems import os import sys from confluent_kafka import", "%s\\n' % err) else: sys.stderr.write('%% Message delivered to %s [%d] @ %o\\n' %", "Wait until all messages have been delivered sys.stderr.write('%% Waiting for %d deliveries\\n' %", "Apache Kafka Ecosystems import os import sys from confluent_kafka import Producer ssl_ca_location =", "configuration conf = { 'bootstrap.servers': bootstrap_servers, #replace 'security.protocol': 'SASL_SSL', 'ssl.ca.location': ssl_ca_location, 'sasl.mechanism': 'PLAIN',", "all messages have been delivered sys.stderr.write('%% Waiting for %d deliveries\\n' % len(p)) p.flush()", "from confluent_kafka import Producer ssl_ca_location = os.environ['ssl_ca_location'] bootstrap_servers = os.environ['bootstrap_servers'] sasl_password = os.environ['sasl_password']", "topic_name = os.environ['topic_name'] if __name__ == '__main__': # Producer configuration conf = {", "#replace 'client.id': 'python-example-producer' } # Create Producer instance p = Producer(**conf) def delivery_callback(err,", "'__main__': # Producer configuration conf = { 'bootstrap.servers': bootstrap_servers, #replace 'security.protocol': 'SASL_SSL', 'ssl.ca.location':", "% (msg.topic(), msg.partition(), msg.offset())) # Write 1-10 to topic for i in range(0,", "'sasl.username': '$ConnectionString', 'sasl.password': <PASSWORD>, #replace 'client.id': 'python-example-producer' } # Create Producer instance p", "to topic for i in range(0, 10): try: p.produce(topic_name, str(i), callback=delivery_callback) except BufferError", "until all messages have been delivered sys.stderr.write('%% Waiting for %d deliveries\\n' % len(p))", "with Azure Event Hubs for Apache Kafka Ecosystems import os import sys from", "1-10 to topic for i in range(0, 10): try: p.produce(topic_name, str(i), callback=delivery_callback) except", "<PASSWORD>, #replace 'client.id': 'python-example-producer' } # Create Producer instance p = Producer(**conf) def", "Microsoft Corporation. All rights reserved. # Copyright 2016 Confluent Inc. # Licensed under", "Producer(**conf) def delivery_callback(err, msg): if err: sys.stderr.write('%% Message failed delivery: %s\\n' % err)", "again\\n' % len(p)) p.poll(0) # Wait until all messages have been delivered sys.stderr.write('%%", "__name__ == '__main__': # Producer configuration conf = { 'bootstrap.servers': bootstrap_servers, #replace 'security.protocol':", "i in range(0, 10): try: p.produce(topic_name, str(i), callback=delivery_callback) except BufferError as e: sys.stderr.write('%%", "def delivery_callback(err, msg): if err: sys.stderr.write('%% Message failed delivery: %s\\n' % err) else:", "msg.offset())) # Write 1-10 to topic for i in range(0, 10): try: p.produce(topic_name,", "# Licensed under the MIT License. # Licensed under the Apache License, Version", "delivery: %s\\n' % err) else: sys.stderr.write('%% Message delivered to %s [%d] @ %o\\n'", "p.produce(topic_name, str(i), callback=delivery_callback) except BufferError as e: sys.stderr.write('%% Local producer queue is full", "# Licensed under the Apache License, Version 2.0 # # Original Confluent sample", "'$ConnectionString', 'sasl.password': <PASSWORD>, #replace 'client.id': 'python-example-producer' } # Create Producer instance p =", "e: sys.stderr.write('%% Local producer queue is full (%d messages awaiting delivery): try again\\n'", "for i in range(0, 10): try: p.produce(topic_name, str(i), callback=delivery_callback) except BufferError as e:", "Licensed under the MIT License. # Licensed under the Apache License, Version 2.0", "callback=delivery_callback) except BufferError as e: sys.stderr.write('%% Local producer queue is full (%d messages", "(%d messages awaiting delivery): try again\\n' % len(p)) p.poll(0) # Wait until all", "Local producer queue is full (%d messages awaiting delivery): try again\\n' % len(p))", "Hubs for Apache Kafka Ecosystems import os import sys from confluent_kafka import Producer", "if __name__ == '__main__': # Producer configuration conf = { 'bootstrap.servers': bootstrap_servers, #replace", "'python-example-producer' } # Create Producer instance p = Producer(**conf) def delivery_callback(err, msg): if", "failed delivery: %s\\n' % err) else: sys.stderr.write('%% Message delivered to %s [%d] @", "sys.stderr.write('%% Message delivered to %s [%d] @ %o\\n' % (msg.topic(), msg.partition(), msg.offset())) #", "Original Confluent sample modified for use with Azure Event Hubs for Apache Kafka", "# # Copyright (c) Microsoft Corporation. All rights reserved. # Copyright 2016 Confluent", "# Original Confluent sample modified for use with Azure Event Hubs for Apache", "# # Original Confluent sample modified for use with Azure Event Hubs for", "[%d] @ %o\\n' % (msg.topic(), msg.partition(), msg.offset())) # Write 1-10 to topic for", "Ecosystems import os import sys from confluent_kafka import Producer ssl_ca_location = os.environ['ssl_ca_location'] bootstrap_servers", "# Create Producer instance p = Producer(**conf) def delivery_callback(err, msg): if err: sys.stderr.write('%%", "the Apache License, Version 2.0 # # Original Confluent sample modified for use", "bootstrap_servers, #replace 'security.protocol': 'SASL_SSL', 'ssl.ca.location': ssl_ca_location, 'sasl.mechanism': 'PLAIN', 'sasl.username': '$ConnectionString', 'sasl.password': <PASSWORD>, #replace", "'security.protocol': 'SASL_SSL', 'ssl.ca.location': ssl_ca_location, 'sasl.mechanism': 'PLAIN', 'sasl.username': '$ConnectionString', 'sasl.password': <PASSWORD>, #replace 'client.id': 'python-example-producer'", "Message delivered to %s [%d] @ %o\\n' % (msg.topic(), msg.partition(), msg.offset())) # Write", "# Copyright (c) Microsoft Corporation. All rights reserved. # Copyright 2016 Confluent Inc.", "as e: sys.stderr.write('%% Local producer queue is full (%d messages awaiting delivery): try", "the MIT License. # Licensed under the Apache License, Version 2.0 # #", "ssl_ca_location = os.environ['ssl_ca_location'] bootstrap_servers = os.environ['bootstrap_servers'] sasl_password = os.environ['sasl_password'] topic_name = os.environ['topic_name'] if", "'sasl.mechanism': 'PLAIN', 'sasl.username': '$ConnectionString', 'sasl.password': <PASSWORD>, #replace 'client.id': 'python-example-producer' } # Create Producer", "in range(0, 10): try: p.produce(topic_name, str(i), callback=delivery_callback) except BufferError as e: sys.stderr.write('%% Local" ]
[ "self.outer_variables[token] return unit def add_reference(self, token, node): assert token == node.token \"\"\" if", "in self.outer_variables: self.father.outer_variables[token] = self.outer_variables[token] else: raise ValueError(f\"Outer variable {token} is not found", "unit is a key-value pair: (token, {\"lr\": set of \"lr ref\", \"lw\": set", "SPECIAL_TOKENS: return 1 else: raise ValueError(f\"{token} not found\") if mode not in UPDATE_MODE:", "father scope \"\"\" self.father = father self.child = None self.local_variables = dict({}) self.outer_variables", ":param father: the direct father scope \"\"\" self.father = father self.child = None", "self.father.local_variables[token][\"lr\"] |= self.outer_variables[token][\"lr\"] self.father.local_variables[token][\"lw\"] |= self.outer_variables[token][\"lw\"] elif token in self.father.outer_variables: self.father.outer_variables[token][\"lr\"] |= self.outer_variables[token][\"lr\"]", "return self.local_variables[token] def find_and_update(self, token: str, node, mode: str): assert token == node.token", "scope when truly mentioned \"\"\" def __init__(self, father=None): \"\"\" One basic unit is", "SPECIAL_TOKENS: return 1 \"\"\" self.local_variables[token] = {\"lr\": {node}, \"lw\": {node}} return self.local_variables[token] def", "a single statement :param father: the direct father scope \"\"\" self.father = father", "not in UPDATE_MODE: raise ValueError(f\"{mode} is not an available update mode\") node.last_read |=", "else: unit[\"lr\"] = {node} unit[\"lw\"] = {node} return 0 \"\"\" def find_or_add_reference(self, token):", "= {\"lr\": set([]) | unit[\"lr\"], \"lw\": set([]) | unit[\"lw\"]} unit = self.outer_variables[token] return", "follow the lazy strategy, which means only inherent from the outer scope when", "token): if token in self.local_variables: return self.local_variables[token] elif token in self.outer_variables: return self.outer_variables[token]", "None else: unit = self.father.find_reference(token) if unit is not None: # be sure", "None self.local_variables = dict({}) self.outer_variables = dict({}) def find_reference(self, token): if token in", "in self.outer_variables: if token in self.father.local_variables: self.father.local_variables[token] = self.outer_variables[token] elif token in self.outer_variables:", "elif token in self.father.outer_variables: self.father.outer_variables[token][\"lr\"] |= self.outer_variables[token][\"lr\"] self.father.outer_variables[token][\"lw\"] |= self.outer_variables[token][\"lw\"] else: raise ValueError(f\"Outer", "elif self.father is None: return None else: unit = self.father.find_reference(token) if unit is", "read-and-write class VariableTable: \"\"\" There's one thing to be mention that all of", "if unit is not None: # be sure to deep copy the elements", "for example, a compound statement, or a single statement :param father: the direct", "all of the variables references follow the lazy strategy, which means only inherent", "thing to be mention that all of the variables references follow the lazy", "unit def add_reference(self, token, node): assert token == node.token \"\"\" if token in", "statement :param father: the direct father scope \"\"\" self.father = father self.child =", "None: return None else: unit = self.father.find_reference(token) if unit is not None: #", "mode: str): assert token == node.token unit = self.find_reference(token) if unit is None:", "def find_and_update(self, token: str, node, mode: str): assert token == node.token unit =", "return self.outer_variables[token] elif self.father is None: return None else: unit = self.father.find_reference(token) if", "is a string, reference are cpp parser's node One variable table is for", "not found in outer tables\") def merge_and_pop_self(self): self.father.child = None for token in", "raise ValueError(f\"Outer variable {token} is not found in outer tables\") def merge_and_pop_self(self): self.father.child", "self.outer_variables[token] = {\"lr\": set([]) | unit[\"lr\"], \"lw\": set([]) | unit[\"lw\"]} unit = self.outer_variables[token]", "the elements self.outer_variables[token] = {\"lr\": set([]) | unit[\"lr\"], \"lw\": set([]) | unit[\"lw\"]} unit", "def add_variable_table(self): self.child = VariableTable(self) return self.child def pop_self(self): self.father.child = None for", "when truly mentioned \"\"\" def __init__(self, father=None): \"\"\" One basic unit is a", "the outer scope when truly mentioned \"\"\" def __init__(self, father=None): \"\"\" One basic", "\"\"\" self.local_variables[token] = {\"lr\": {node}, \"lw\": {node}} return self.local_variables[token] def find_and_update(self, token: str,", "for token in self.outer_variables: if token in self.father.local_variables: self.father.local_variables[token] = self.outer_variables[token] elif token", "raise ValueError(f\"{token} not found\") if mode not in UPDATE_MODE: raise ValueError(f\"{mode} is not", "self.outer_variables[token] elif self.father is None: return None else: unit = self.father.find_reference(token) if unit", "unit[\"lw\"] = {node} return 0 \"\"\" def find_or_add_reference(self, token): unit = self.find_reference(token) if", "VariableTable(self) return self.child def pop_self(self): self.father.child = None for token in self.outer_variables: if", "1 \"\"\" self.local_variables[token] = {\"lr\": {node}, \"lw\": {node}} return self.local_variables[token] def find_and_update(self, token:", "references follow the lazy strategy, which means only inherent from the outer scope", "= self.outer_variables[token] return unit def add_reference(self, token, node): assert token == node.token \"\"\"", "elif token in self.outer_variables: self.father.outer_variables[token] = self.outer_variables[token] else: raise ValueError(f\"Outer variable {token} is", "node.token in SPECIAL_TOKENS: return 1 else: raise ValueError(f\"{token} not found\") if mode not", "return self.child def pop_self(self): self.father.child = None for token in self.outer_variables: if token", "if token in self.local_variables: return self.local_variables[token] elif token in self.outer_variables: return self.outer_variables[token] elif", "else: raise ValueError(f\"Outer variable {token} is not found in outer tables\") def merge_and_pop_self(self):", "outer scope when truly mentioned \"\"\" def __init__(self, father=None): \"\"\" One basic unit", "self.outer_variables: if token in self.father.local_variables: self.father.local_variables[token] = self.outer_variables[token] elif token in self.outer_variables: self.father.outer_variables[token]", "to deep copy the elements self.outer_variables[token] = {\"lr\": set([]) | unit[\"lr\"], \"lw\": set([])", "means only inherent from the outer scope when truly mentioned \"\"\" def __init__(self,", "return self.local_variables[token] elif token in self.outer_variables: return self.outer_variables[token] elif self.father is None: return", "unit[\"lr\"] = {node} elif mode == \"wo\": unit[\"lw\"] = {node} else: unit[\"lr\"] =", "scope \"\"\" self.father = father self.child = None self.local_variables = dict({}) self.outer_variables =", "return None else: unit = self.father.find_reference(token) if unit is not None: # be", "= {node} return 0 \"\"\" def find_or_add_reference(self, token): unit = self.find_reference(token) if unit", "father=None): \"\"\" One basic unit is a key-value pair: (token, {\"lr\": set of", "{node}, \"lw\": {node}} return self.local_variables[token] def find_and_update(self, token: str, node, mode: str): assert", "in self.outer_variables: return self.outer_variables[token] elif self.father is None: return None else: unit =", "the direct father scope \"\"\" self.father = father self.child = None self.local_variables =", "add_reference(self, token, node): assert token == node.token \"\"\" if token in SPECIAL_TOKENS: return", "elif mode == \"wo\": unit[\"lw\"] = {node} else: unit[\"lr\"] = {node} unit[\"lw\"] =", "= dict({}) def find_reference(self, token): if token in self.local_variables: return self.local_variables[token] elif token", "\"wo\": unit[\"lw\"] = {node} else: unit[\"lr\"] = {node} unit[\"lw\"] = {node} return 0", "assert token == node.token \"\"\" if token in SPECIAL_TOKENS: return 1 \"\"\" self.local_variables[token]", "== \"ro\": unit[\"lr\"] = {node} elif mode == \"wo\": unit[\"lw\"] = {node} else:", "{node} unit[\"lw\"] = {node} return 0 \"\"\" def find_or_add_reference(self, token): unit = self.find_reference(token)", "\"\"\" if token in SPECIAL_TOKENS: return 1 \"\"\" self.local_variables[token] = {\"lr\": {node}, \"lw\":", "find_reference(self, token): if token in self.local_variables: return self.local_variables[token] elif token in self.outer_variables: return", "unit[\"lw\"] = {node} else: unit[\"lr\"] = {node} unit[\"lw\"] = {node} return 0 \"\"\"", "def find_or_add_reference(self, token): unit = self.find_reference(token) if unit is None: unit = self.add_reference(token)", "self.outer_variables: return self.outer_variables[token] elif self.father is None: return None else: unit = self.father.find_reference(token)", "{token} is not found in outer tables\") def merge_and_pop_self(self): self.father.child = None for", "token, node): assert token == node.token \"\"\" if token in SPECIAL_TOKENS: return 1", "is None: return None else: unit = self.father.find_reference(token) if unit is not None:", "variables references follow the lazy strategy, which means only inherent from the outer", "set([]) | unit[\"lr\"], \"lw\": set([]) | unit[\"lw\"]} unit = self.outer_variables[token] return unit def", "process, for example, a compound statement, or a single statement :param father: the", "or a single statement :param father: the direct father scope \"\"\" self.father =", "for token in self.outer_variables: if token in self.father.local_variables: self.father.local_variables[token][\"lr\"] |= self.outer_variables[token][\"lr\"] self.father.local_variables[token][\"lw\"] |=", "= self.add_reference(token) return unit \"\"\" def add_variable_table(self): self.child = VariableTable(self) return self.child def", "(token, {\"lr\": set of \"lr ref\", \"lw\": set of \"lw ref\"} token is", "an available update mode\") node.last_read |= unit[\"lr\"] node.last_write |= unit[\"lw\"] if mode ==", "elif token in self.outer_variables: return self.outer_variables[token] elif self.father is None: return None else:", "only inherent from the outer scope when truly mentioned \"\"\" def __init__(self, father=None):", "self.local_variables = dict({}) self.outer_variables = dict({}) def find_reference(self, token): if token in self.local_variables:", "\"lw\": set of \"lw ref\"} token is a string, reference are cpp parser's", "def __init__(self, father=None): \"\"\" One basic unit is a key-value pair: (token, {\"lr\":", "self.father.find_reference(token) if unit is not None: # be sure to deep copy the", "def pop_self(self): self.father.child = None for token in self.outer_variables: if token in self.father.local_variables:", "self.local_variables[token] def find_and_update(self, token: str, node, mode: str): assert token == node.token unit", "update mode\") node.last_read |= unit[\"lr\"] node.last_write |= unit[\"lw\"] if mode == \"ro\": unit[\"lr\"]", "\"M\", \"L\", \"MAX\", \"MIN\", \"NUM\", \"MAXN\") UPDATE_MODE = {\"ro\", \"wo\", \"rw\"} # read-only,", "not found in outer tables\") def __str__(self): return f\" local: {str(self.local_variables)}\\n global: {str(self.outer_variables)}\"", "add_variable_table(self): self.child = VariableTable(self) return self.child def pop_self(self): self.father.child = None for token", "unit is None: unit = self.add_reference(token) return unit \"\"\" def add_variable_table(self): self.child =", "\"wo\", \"rw\"} # read-only, write-only, read-and-write class VariableTable: \"\"\" There's one thing to", "return 0 \"\"\" def find_or_add_reference(self, token): unit = self.find_reference(token) if unit is None:", "unit[\"lw\"]} unit = self.outer_variables[token] return unit def add_reference(self, token, node): assert token ==", "parser's node One variable table is for one sequential process, for example, a", "SPECIAL_TOKENS = (\"cin\", \"cout\", \"endl\", \"fixed\", \"EOF\", \"stdin\", \"stdout\", \"N\", \"M\", \"L\", \"MAX\",", "== node.token \"\"\" if token in SPECIAL_TOKENS: return 1 \"\"\" self.local_variables[token] = {\"lr\":", "a compound statement, or a single statement :param father: the direct father scope", "is None: unit = self.add_reference(token) return unit \"\"\" def add_variable_table(self): self.child = VariableTable(self)", "mention that all of the variables references follow the lazy strategy, which means", "= father self.child = None self.local_variables = dict({}) self.outer_variables = dict({}) def find_reference(self,", "\"stdin\", \"stdout\", \"N\", \"M\", \"L\", \"MAX\", \"MIN\", \"NUM\", \"MAXN\") UPDATE_MODE = {\"ro\", \"wo\",", "are cpp parser's node One variable table is for one sequential process, for", "ref\", \"lw\": set of \"lw ref\"} token is a string, reference are cpp", "self.add_reference(token) return unit \"\"\" def add_variable_table(self): self.child = VariableTable(self) return self.child def pop_self(self):", "reference are cpp parser's node One variable table is for one sequential process,", "self.child = None self.local_variables = dict({}) self.outer_variables = dict({}) def find_reference(self, token): if", "\"cout\", \"endl\", \"fixed\", \"EOF\", \"stdin\", \"stdout\", \"N\", \"M\", \"L\", \"MAX\", \"MIN\", \"NUM\", \"MAXN\")", "self.father = father self.child = None self.local_variables = dict({}) self.outer_variables = dict({}) def", "variable table is for one sequential process, for example, a compound statement, or", "node, mode: str): assert token == node.token unit = self.find_reference(token) if unit is", "self.father.outer_variables[token][\"lr\"] |= self.outer_variables[token][\"lr\"] self.father.outer_variables[token][\"lw\"] |= self.outer_variables[token][\"lw\"] else: raise ValueError(f\"Outer variable {token} is not", "\"rw\"} # read-only, write-only, read-and-write class VariableTable: \"\"\" There's one thing to be", "token in self.outer_variables: if token in self.father.local_variables: self.father.local_variables[token] = self.outer_variables[token] elif token in", "variable {token} is not found in outer tables\") def __str__(self): return f\" local:", "ref\"} token is a string, reference are cpp parser's node One variable table", "self.outer_variables = dict({}) def find_reference(self, token): if token in self.local_variables: return self.local_variables[token] elif", "set of \"lw ref\"} token is a string, reference are cpp parser's node", "self.find_reference(token) if unit is None: if node.token in SPECIAL_TOKENS: return 1 else: raise", "token in self.outer_variables: return self.outer_variables[token] elif self.father is None: return None else: unit", "unit is not None: # be sure to deep copy the elements self.outer_variables[token]", "node One variable table is for one sequential process, for example, a compound", "token in self.father.local_variables: self.father.local_variables[token][\"lr\"] |= self.outer_variables[token][\"lr\"] self.father.local_variables[token][\"lw\"] |= self.outer_variables[token][\"lw\"] elif token in self.father.outer_variables:", "\"\"\" def __init__(self, father=None): \"\"\" One basic unit is a key-value pair: (token,", "assert token == node.token unit = self.find_reference(token) if unit is None: if node.token", "|= self.outer_variables[token][\"lw\"] elif token in self.father.outer_variables: self.father.outer_variables[token][\"lr\"] |= self.outer_variables[token][\"lr\"] self.father.outer_variables[token][\"lw\"] |= self.outer_variables[token][\"lw\"] else:", "{\"lr\": set of \"lr ref\", \"lw\": set of \"lw ref\"} token is a", "from the outer scope when truly mentioned \"\"\" def __init__(self, father=None): \"\"\" One", "\"lw\": set([]) | unit[\"lw\"]} unit = self.outer_variables[token] return unit def add_reference(self, token, node):", "a string, reference are cpp parser's node One variable table is for one", "of \"lw ref\"} token is a string, reference are cpp parser's node One", "raise ValueError(f\"{mode} is not an available update mode\") node.last_read |= unit[\"lr\"] node.last_write |=", "dict({}) self.outer_variables = dict({}) def find_reference(self, token): if token in self.local_variables: return self.local_variables[token]", "token in self.father.outer_variables: self.father.outer_variables[token][\"lr\"] |= self.outer_variables[token][\"lr\"] self.father.outer_variables[token][\"lw\"] |= self.outer_variables[token][\"lw\"] else: raise ValueError(f\"Outer variable", "mode not in UPDATE_MODE: raise ValueError(f\"{mode} is not an available update mode\") node.last_read", "\"L\", \"MAX\", \"MIN\", \"NUM\", \"MAXN\") UPDATE_MODE = {\"ro\", \"wo\", \"rw\"} # read-only, write-only,", "= {node} unit[\"lw\"] = {node} return 0 \"\"\" def find_or_add_reference(self, token): unit =", "set([]) | unit[\"lw\"]} unit = self.outer_variables[token] return unit def add_reference(self, token, node): assert", "to be mention that all of the variables references follow the lazy strategy,", "return 1 else: raise ValueError(f\"{token} not found\") if mode not in UPDATE_MODE: raise", "is not found in outer tables\") def merge_and_pop_self(self): self.father.child = None for token", "token in self.outer_variables: if token in self.father.local_variables: self.father.local_variables[token][\"lr\"] |= self.outer_variables[token][\"lr\"] self.father.local_variables[token][\"lw\"] |= self.outer_variables[token][\"lw\"]", "\"N\", \"M\", \"L\", \"MAX\", \"MIN\", \"NUM\", \"MAXN\") UPDATE_MODE = {\"ro\", \"wo\", \"rw\"} #", "|= unit[\"lr\"] node.last_write |= unit[\"lw\"] if mode == \"ro\": unit[\"lr\"] = {node} elif", "== \"wo\": unit[\"lw\"] = {node} else: unit[\"lr\"] = {node} unit[\"lw\"] = {node} return", "\"\"\" def add_variable_table(self): self.child = VariableTable(self) return self.child def pop_self(self): self.father.child = None", "\"MAXN\") UPDATE_MODE = {\"ro\", \"wo\", \"rw\"} # read-only, write-only, read-and-write class VariableTable: \"\"\"", "ValueError(f\"Outer variable {token} is not found in outer tables\") def merge_and_pop_self(self): self.father.child =", "basic unit is a key-value pair: (token, {\"lr\": set of \"lr ref\", \"lw\":", "if mode == \"ro\": unit[\"lr\"] = {node} elif mode == \"wo\": unit[\"lw\"] =", "# read-only, write-only, read-and-write class VariableTable: \"\"\" There's one thing to be mention", "if unit is None: if node.token in SPECIAL_TOKENS: return 1 else: raise ValueError(f\"{token}", "in SPECIAL_TOKENS: return 1 \"\"\" self.local_variables[token] = {\"lr\": {node}, \"lw\": {node}} return self.local_variables[token]", "= VariableTable(self) return self.child def pop_self(self): self.father.child = None for token in self.outer_variables:", "truly mentioned \"\"\" def __init__(self, father=None): \"\"\" One basic unit is a key-value", "merge_and_pop_self(self): self.father.child = None for token in self.outer_variables: if token in self.father.local_variables: self.father.local_variables[token][\"lr\"]", "which means only inherent from the outer scope when truly mentioned \"\"\" def", "find_or_add_reference(self, token): unit = self.find_reference(token) if unit is None: unit = self.add_reference(token) return", "self.outer_variables[token] else: raise ValueError(f\"Outer variable {token} is not found in outer tables\") def", "UPDATE_MODE = {\"ro\", \"wo\", \"rw\"} # read-only, write-only, read-and-write class VariableTable: \"\"\" There's", "self.father.local_variables: self.father.local_variables[token] = self.outer_variables[token] elif token in self.outer_variables: self.father.outer_variables[token] = self.outer_variables[token] else: raise", "return 1 \"\"\" self.local_variables[token] = {\"lr\": {node}, \"lw\": {node}} return self.local_variables[token] def find_and_update(self,", "ValueError(f\"{token} not found\") if mode not in UPDATE_MODE: raise ValueError(f\"{mode} is not an", "{node}} return self.local_variables[token] def find_and_update(self, token: str, node, mode: str): assert token ==", "\"NUM\", \"MAXN\") UPDATE_MODE = {\"ro\", \"wo\", \"rw\"} # read-only, write-only, read-and-write class VariableTable:", "in outer tables\") def merge_and_pop_self(self): self.father.child = None for token in self.outer_variables: if", "self.local_variables: return self.local_variables[token] elif token in self.outer_variables: return self.outer_variables[token] elif self.father is None:", "found\") if mode not in UPDATE_MODE: raise ValueError(f\"{mode} is not an available update", "self.father is None: return None else: unit = self.father.find_reference(token) if unit is not", "{\"ro\", \"wo\", \"rw\"} # read-only, write-only, read-and-write class VariableTable: \"\"\" There's one thing", "is for one sequential process, for example, a compound statement, or a single", "\"\"\" self.father = father self.child = None self.local_variables = dict({}) self.outer_variables = dict({})", "variable {token} is not found in outer tables\") def merge_and_pop_self(self): self.father.child = None", "strategy, which means only inherent from the outer scope when truly mentioned \"\"\"", "self.local_variables[token] = {\"lr\": {node}, \"lw\": {node}} return self.local_variables[token] def find_and_update(self, token: str, node,", "the lazy strategy, which means only inherent from the outer scope when truly", "\"lr ref\", \"lw\": set of \"lw ref\"} token is a string, reference are", "the variables references follow the lazy strategy, which means only inherent from the", "self.outer_variables[token][\"lw\"] elif token in self.father.outer_variables: self.father.outer_variables[token][\"lr\"] |= self.outer_variables[token][\"lr\"] self.father.outer_variables[token][\"lw\"] |= self.outer_variables[token][\"lw\"] else: raise", "= self.find_reference(token) if unit is None: if node.token in SPECIAL_TOKENS: return 1 else:", "else: raise ValueError(f\"Outer variable {token} is not found in outer tables\") def __str__(self):", "deep copy the elements self.outer_variables[token] = {\"lr\": set([]) | unit[\"lr\"], \"lw\": set([]) |", "if node.token in SPECIAL_TOKENS: return 1 else: raise ValueError(f\"{token} not found\") if mode", "outer tables\") def merge_and_pop_self(self): self.father.child = None for token in self.outer_variables: if token", "found in outer tables\") def merge_and_pop_self(self): self.father.child = None for token in self.outer_variables:", "self.child = VariableTable(self) return self.child def pop_self(self): self.father.child = None for token in", "\"\"\" One basic unit is a key-value pair: (token, {\"lr\": set of \"lr", "token in SPECIAL_TOKENS: return 1 \"\"\" self.local_variables[token] = {\"lr\": {node}, \"lw\": {node}} return", "not an available update mode\") node.last_read |= unit[\"lr\"] node.last_write |= unit[\"lw\"] if mode", "\"EOF\", \"stdin\", \"stdout\", \"N\", \"M\", \"L\", \"MAX\", \"MIN\", \"NUM\", \"MAXN\") UPDATE_MODE = {\"ro\",", "token in self.outer_variables: self.father.outer_variables[token] = self.outer_variables[token] else: raise ValueError(f\"Outer variable {token} is not", "def find_reference(self, token): if token in self.local_variables: return self.local_variables[token] elif token in self.outer_variables:", "unit = self.outer_variables[token] return unit def add_reference(self, token, node): assert token == node.token", "token == node.token \"\"\" if token in SPECIAL_TOKENS: return 1 \"\"\" self.local_variables[token] =", "self.father.local_variables[token][\"lw\"] |= self.outer_variables[token][\"lw\"] elif token in self.father.outer_variables: self.father.outer_variables[token][\"lr\"] |= self.outer_variables[token][\"lr\"] self.father.outer_variables[token][\"lw\"] |= self.outer_variables[token][\"lw\"]", "read-only, write-only, read-and-write class VariableTable: \"\"\" There's one thing to be mention that", "that all of the variables references follow the lazy strategy, which means only", "self.outer_variables: self.father.outer_variables[token] = self.outer_variables[token] else: raise ValueError(f\"Outer variable {token} is not found in", "not None: # be sure to deep copy the elements self.outer_variables[token] = {\"lr\":", "|= unit[\"lw\"] if mode == \"ro\": unit[\"lr\"] = {node} elif mode == \"wo\":", "| unit[\"lw\"]} unit = self.outer_variables[token] return unit def add_reference(self, token, node): assert token", "if token in self.father.local_variables: self.father.local_variables[token] = self.outer_variables[token] elif token in self.outer_variables: self.father.outer_variables[token] =", "__init__(self, father=None): \"\"\" One basic unit is a key-value pair: (token, {\"lr\": set", "1 else: raise ValueError(f\"{token} not found\") if mode not in UPDATE_MODE: raise ValueError(f\"{mode}", "= {\"ro\", \"wo\", \"rw\"} # read-only, write-only, read-and-write class VariableTable: \"\"\" There's one", "node.last_write |= unit[\"lw\"] if mode == \"ro\": unit[\"lr\"] = {node} elif mode ==", "of the variables references follow the lazy strategy, which means only inherent from", "ValueError(f\"{mode} is not an available update mode\") node.last_read |= unit[\"lr\"] node.last_write |= unit[\"lw\"]", "self.find_reference(token) if unit is None: unit = self.add_reference(token) return unit \"\"\" def add_variable_table(self):", "\"lw ref\"} token is a string, reference are cpp parser's node One variable", "string, reference are cpp parser's node One variable table is for one sequential", "unit \"\"\" def add_variable_table(self): self.child = VariableTable(self) return self.child def pop_self(self): self.father.child =", "token in self.father.local_variables: self.father.local_variables[token] = self.outer_variables[token] elif token in self.outer_variables: self.father.outer_variables[token] = self.outer_variables[token]", "elements self.outer_variables[token] = {\"lr\": set([]) | unit[\"lr\"], \"lw\": set([]) | unit[\"lw\"]} unit =", "if token in self.father.local_variables: self.father.local_variables[token][\"lr\"] |= self.outer_variables[token][\"lr\"] self.father.local_variables[token][\"lw\"] |= self.outer_variables[token][\"lw\"] elif token in", "cpp parser's node One variable table is for one sequential process, for example,", "is not found in outer tables\") def __str__(self): return f\" local: {str(self.local_variables)}\\n global:", "{node} else: unit[\"lr\"] = {node} unit[\"lw\"] = {node} return 0 \"\"\" def find_or_add_reference(self,", "one sequential process, for example, a compound statement, or a single statement :param", "== node.token unit = self.find_reference(token) if unit is None: if node.token in SPECIAL_TOKENS:", "set of \"lr ref\", \"lw\": set of \"lw ref\"} token is a string,", "= self.father.find_reference(token) if unit is not None: # be sure to deep copy", "return unit def add_reference(self, token, node): assert token == node.token \"\"\" if token", "VariableTable: \"\"\" There's one thing to be mention that all of the variables", "pop_self(self): self.father.child = None for token in self.outer_variables: if token in self.father.local_variables: self.father.local_variables[token]", "{\"lr\": set([]) | unit[\"lr\"], \"lw\": set([]) | unit[\"lw\"]} unit = self.outer_variables[token] return unit", "|= self.outer_variables[token][\"lw\"] else: raise ValueError(f\"Outer variable {token} is not found in outer tables\")", "for one sequential process, for example, a compound statement, or a single statement", "in UPDATE_MODE: raise ValueError(f\"{mode} is not an available update mode\") node.last_read |= unit[\"lr\"]", "= None self.local_variables = dict({}) self.outer_variables = dict({}) def find_reference(self, token): if token", "token in self.local_variables: return self.local_variables[token] elif token in self.outer_variables: return self.outer_variables[token] elif self.father", "compound statement, or a single statement :param father: the direct father scope \"\"\"", "lazy strategy, which means only inherent from the outer scope when truly mentioned", "not found\") if mode not in UPDATE_MODE: raise ValueError(f\"{mode} is not an available", "self.father.child = None for token in self.outer_variables: if token in self.father.local_variables: self.father.local_variables[token][\"lr\"] |=", "token == node.token unit = self.find_reference(token) if unit is None: if node.token in", "in SPECIAL_TOKENS: return 1 else: raise ValueError(f\"{token} not found\") if mode not in", "of \"lr ref\", \"lw\": set of \"lw ref\"} token is a string, reference", "# be sure to deep copy the elements self.outer_variables[token] = {\"lr\": set([]) |", "None: # be sure to deep copy the elements self.outer_variables[token] = {\"lr\": set([])", "dict({}) def find_reference(self, token): if token in self.local_variables: return self.local_variables[token] elif token in", "self.local_variables[token] elif token in self.outer_variables: return self.outer_variables[token] elif self.father is None: return None", "\"MAX\", \"MIN\", \"NUM\", \"MAXN\") UPDATE_MODE = {\"ro\", \"wo\", \"rw\"} # read-only, write-only, read-and-write", "copy the elements self.outer_variables[token] = {\"lr\": set([]) | unit[\"lr\"], \"lw\": set([]) | unit[\"lw\"]}", "token): unit = self.find_reference(token) if unit is None: unit = self.add_reference(token) return unit", "is a key-value pair: (token, {\"lr\": set of \"lr ref\", \"lw\": set of", "write-only, read-and-write class VariableTable: \"\"\" There's one thing to be mention that all", "\"\"\" def find_or_add_reference(self, token): unit = self.find_reference(token) if unit is None: unit =", "{token} is not found in outer tables\") def __str__(self): return f\" local: {str(self.local_variables)}\\n", "= None for token in self.outer_variables: if token in self.father.local_variables: self.father.local_variables[token][\"lr\"] |= self.outer_variables[token][\"lr\"]", "unit[\"lr\"] = {node} unit[\"lw\"] = {node} return 0 \"\"\" def find_or_add_reference(self, token): unit", "\"ro\": unit[\"lr\"] = {node} elif mode == \"wo\": unit[\"lw\"] = {node} else: unit[\"lr\"]", "= self.find_reference(token) if unit is None: unit = self.add_reference(token) return unit \"\"\" def", "| unit[\"lr\"], \"lw\": set([]) | unit[\"lw\"]} unit = self.outer_variables[token] return unit def add_reference(self,", "None for token in self.outer_variables: if token in self.father.local_variables: self.father.local_variables[token] = self.outer_variables[token] elif", "in self.father.outer_variables: self.father.outer_variables[token][\"lr\"] |= self.outer_variables[token][\"lr\"] self.father.outer_variables[token][\"lw\"] |= self.outer_variables[token][\"lw\"] else: raise ValueError(f\"Outer variable {token}", "{\"lr\": {node}, \"lw\": {node}} return self.local_variables[token] def find_and_update(self, token: str, node, mode: str):", "mode == \"wo\": unit[\"lw\"] = {node} else: unit[\"lr\"] = {node} unit[\"lw\"] = {node}", "|= self.outer_variables[token][\"lr\"] self.father.local_variables[token][\"lw\"] |= self.outer_variables[token][\"lw\"] elif token in self.father.outer_variables: self.father.outer_variables[token][\"lr\"] |= self.outer_variables[token][\"lr\"] self.father.outer_variables[token][\"lw\"]", "single statement :param father: the direct father scope \"\"\" self.father = father self.child", "is not None: # be sure to deep copy the elements self.outer_variables[token] =", "in self.father.local_variables: self.father.local_variables[token] = self.outer_variables[token] elif token in self.outer_variables: self.father.outer_variables[token] = self.outer_variables[token] else:", "\"\"\" There's one thing to be mention that all of the variables references", "str): assert token == node.token unit = self.find_reference(token) if unit is None: if", "There's one thing to be mention that all of the variables references follow", "node.token \"\"\" if token in SPECIAL_TOKENS: return 1 \"\"\" self.local_variables[token] = {\"lr\": {node},", "mode == \"ro\": unit[\"lr\"] = {node} elif mode == \"wo\": unit[\"lw\"] = {node}", "available update mode\") node.last_read |= unit[\"lr\"] node.last_write |= unit[\"lw\"] if mode == \"ro\":", "{node} return 0 \"\"\" def find_or_add_reference(self, token): unit = self.find_reference(token) if unit is", "self.father.local_variables[token] = self.outer_variables[token] elif token in self.outer_variables: self.father.outer_variables[token] = self.outer_variables[token] else: raise ValueError(f\"Outer", "= {\"lr\": {node}, \"lw\": {node}} return self.local_variables[token] def find_and_update(self, token: str, node, mode:", "self.father.local_variables: self.father.local_variables[token][\"lr\"] |= self.outer_variables[token][\"lr\"] self.father.local_variables[token][\"lw\"] |= self.outer_variables[token][\"lw\"] elif token in self.father.outer_variables: self.father.outer_variables[token][\"lr\"] |=", "inherent from the outer scope when truly mentioned \"\"\" def __init__(self, father=None): \"\"\"", "None for token in self.outer_variables: if token in self.father.local_variables: self.father.local_variables[token][\"lr\"] |= self.outer_variables[token][\"lr\"] self.father.local_variables[token][\"lw\"]", "self.outer_variables[token][\"lw\"] else: raise ValueError(f\"Outer variable {token} is not found in outer tables\") def", "unit is None: if node.token in SPECIAL_TOKENS: return 1 else: raise ValueError(f\"{token} not", "\"MIN\", \"NUM\", \"MAXN\") UPDATE_MODE = {\"ro\", \"wo\", \"rw\"} # read-only, write-only, read-and-write class", "statement, or a single statement :param father: the direct father scope \"\"\" self.father", "\"lw\": {node}} return self.local_variables[token] def find_and_update(self, token: str, node, mode: str): assert token", "sequential process, for example, a compound statement, or a single statement :param father:", "self.outer_variables: if token in self.father.local_variables: self.father.local_variables[token][\"lr\"] |= self.outer_variables[token][\"lr\"] self.father.local_variables[token][\"lw\"] |= self.outer_variables[token][\"lw\"] elif token", "unit[\"lr\"], \"lw\": set([]) | unit[\"lw\"]} unit = self.outer_variables[token] return unit def add_reference(self, token,", "(\"cin\", \"cout\", \"endl\", \"fixed\", \"EOF\", \"stdin\", \"stdout\", \"N\", \"M\", \"L\", \"MAX\", \"MIN\", \"NUM\",", "tables\") def merge_and_pop_self(self): self.father.child = None for token in self.outer_variables: if token in", "unit = self.father.find_reference(token) if unit is not None: # be sure to deep", "node.last_read |= unit[\"lr\"] node.last_write |= unit[\"lw\"] if mode == \"ro\": unit[\"lr\"] = {node}", "self.father.outer_variables[token] = self.outer_variables[token] else: raise ValueError(f\"Outer variable {token} is not found in outer", "in self.local_variables: return self.local_variables[token] elif token in self.outer_variables: return self.outer_variables[token] elif self.father is", "= {node} elif mode == \"wo\": unit[\"lw\"] = {node} else: unit[\"lr\"] = {node}", "sure to deep copy the elements self.outer_variables[token] = {\"lr\": set([]) | unit[\"lr\"], \"lw\":", "find_and_update(self, token: str, node, mode: str): assert token == node.token unit = self.find_reference(token)", "class VariableTable: \"\"\" There's one thing to be mention that all of the", "\"fixed\", \"EOF\", \"stdin\", \"stdout\", \"N\", \"M\", \"L\", \"MAX\", \"MIN\", \"NUM\", \"MAXN\") UPDATE_MODE =", "one thing to be mention that all of the variables references follow the", "else: unit = self.father.find_reference(token) if unit is not None: # be sure to", "return unit \"\"\" def add_variable_table(self): self.child = VariableTable(self) return self.child def pop_self(self): self.father.child", "ValueError(f\"Outer variable {token} is not found in outer tables\") def __str__(self): return f\"", "0 \"\"\" def find_or_add_reference(self, token): unit = self.find_reference(token) if unit is None: unit", "self.outer_variables[token][\"lr\"] self.father.local_variables[token][\"lw\"] |= self.outer_variables[token][\"lw\"] elif token in self.father.outer_variables: self.father.outer_variables[token][\"lr\"] |= self.outer_variables[token][\"lr\"] self.father.outer_variables[token][\"lw\"] |=", "One variable table is for one sequential process, for example, a compound statement,", "be mention that all of the variables references follow the lazy strategy, which", "example, a compound statement, or a single statement :param father: the direct father", "if token in SPECIAL_TOKENS: return 1 \"\"\" self.local_variables[token] = {\"lr\": {node}, \"lw\": {node}}", "is not an available update mode\") node.last_read |= unit[\"lr\"] node.last_write |= unit[\"lw\"] if", "<reponame>eecshope/GraphPC SPECIAL_TOKENS = (\"cin\", \"cout\", \"endl\", \"fixed\", \"EOF\", \"stdin\", \"stdout\", \"N\", \"M\", \"L\",", "if mode not in UPDATE_MODE: raise ValueError(f\"{mode} is not an available update mode\")", "= {node} else: unit[\"lr\"] = {node} unit[\"lw\"] = {node} return 0 \"\"\" def", "{node} elif mode == \"wo\": unit[\"lw\"] = {node} else: unit[\"lr\"] = {node} unit[\"lw\"]", "unit = self.find_reference(token) if unit is None: unit = self.add_reference(token) return unit \"\"\"", "table is for one sequential process, for example, a compound statement, or a", "UPDATE_MODE: raise ValueError(f\"{mode} is not an available update mode\") node.last_read |= unit[\"lr\"] node.last_write", "unit = self.find_reference(token) if unit is None: if node.token in SPECIAL_TOKENS: return 1", "unit = self.add_reference(token) return unit \"\"\" def add_variable_table(self): self.child = VariableTable(self) return self.child", "= (\"cin\", \"cout\", \"endl\", \"fixed\", \"EOF\", \"stdin\", \"stdout\", \"N\", \"M\", \"L\", \"MAX\", \"MIN\",", "pair: (token, {\"lr\": set of \"lr ref\", \"lw\": set of \"lw ref\"} token", "in self.outer_variables: if token in self.father.local_variables: self.father.local_variables[token][\"lr\"] |= self.outer_variables[token][\"lr\"] self.father.local_variables[token][\"lw\"] |= self.outer_variables[token][\"lw\"] elif", "mode\") node.last_read |= unit[\"lr\"] node.last_write |= unit[\"lw\"] if mode == \"ro\": unit[\"lr\"] =", "is None: if node.token in SPECIAL_TOKENS: return 1 else: raise ValueError(f\"{token} not found\")", "def merge_and_pop_self(self): self.father.child = None for token in self.outer_variables: if token in self.father.local_variables:", "str, node, mode: str): assert token == node.token unit = self.find_reference(token) if unit", "= self.outer_variables[token] elif token in self.outer_variables: self.father.outer_variables[token] = self.outer_variables[token] else: raise ValueError(f\"Outer variable", "= None for token in self.outer_variables: if token in self.father.local_variables: self.father.local_variables[token] = self.outer_variables[token]", "father: the direct father scope \"\"\" self.father = father self.child = None self.local_variables", "mentioned \"\"\" def __init__(self, father=None): \"\"\" One basic unit is a key-value pair:", "= dict({}) self.outer_variables = dict({}) def find_reference(self, token): if token in self.local_variables: return", "direct father scope \"\"\" self.father = father self.child = None self.local_variables = dict({})", "|= self.outer_variables[token][\"lr\"] self.father.outer_variables[token][\"lw\"] |= self.outer_variables[token][\"lw\"] else: raise ValueError(f\"Outer variable {token} is not found", "self.outer_variables[token][\"lr\"] self.father.outer_variables[token][\"lw\"] |= self.outer_variables[token][\"lw\"] else: raise ValueError(f\"Outer variable {token} is not found in", "None: if node.token in SPECIAL_TOKENS: return 1 else: raise ValueError(f\"{token} not found\") if", "node.token unit = self.find_reference(token) if unit is None: if node.token in SPECIAL_TOKENS: return", "self.father.outer_variables: self.father.outer_variables[token][\"lr\"] |= self.outer_variables[token][\"lr\"] self.father.outer_variables[token][\"lw\"] |= self.outer_variables[token][\"lw\"] else: raise ValueError(f\"Outer variable {token} is", "unit[\"lw\"] if mode == \"ro\": unit[\"lr\"] = {node} elif mode == \"wo\": unit[\"lw\"]", "self.child def pop_self(self): self.father.child = None for token in self.outer_variables: if token in", "token is a string, reference are cpp parser's node One variable table is", "node): assert token == node.token \"\"\" if token in SPECIAL_TOKENS: return 1 \"\"\"", "else: raise ValueError(f\"{token} not found\") if mode not in UPDATE_MODE: raise ValueError(f\"{mode} is", "One basic unit is a key-value pair: (token, {\"lr\": set of \"lr ref\",", "self.father.child = None for token in self.outer_variables: if token in self.father.local_variables: self.father.local_variables[token] =", "\"endl\", \"fixed\", \"EOF\", \"stdin\", \"stdout\", \"N\", \"M\", \"L\", \"MAX\", \"MIN\", \"NUM\", \"MAXN\") UPDATE_MODE", "self.father.outer_variables[token][\"lw\"] |= self.outer_variables[token][\"lw\"] else: raise ValueError(f\"Outer variable {token} is not found in outer", "= self.outer_variables[token] else: raise ValueError(f\"Outer variable {token} is not found in outer tables\")", "if unit is None: unit = self.add_reference(token) return unit \"\"\" def add_variable_table(self): self.child", "\"stdout\", \"N\", \"M\", \"L\", \"MAX\", \"MIN\", \"NUM\", \"MAXN\") UPDATE_MODE = {\"ro\", \"wo\", \"rw\"}", "key-value pair: (token, {\"lr\": set of \"lr ref\", \"lw\": set of \"lw ref\"}", "token: str, node, mode: str): assert token == node.token unit = self.find_reference(token) if", "None: unit = self.add_reference(token) return unit \"\"\" def add_variable_table(self): self.child = VariableTable(self) return", "self.outer_variables[token] elif token in self.outer_variables: self.father.outer_variables[token] = self.outer_variables[token] else: raise ValueError(f\"Outer variable {token}", "be sure to deep copy the elements self.outer_variables[token] = {\"lr\": set([]) | unit[\"lr\"],", "father self.child = None self.local_variables = dict({}) self.outer_variables = dict({}) def find_reference(self, token):", "raise ValueError(f\"Outer variable {token} is not found in outer tables\") def __str__(self): return", "def add_reference(self, token, node): assert token == node.token \"\"\" if token in SPECIAL_TOKENS:", "in self.father.local_variables: self.father.local_variables[token][\"lr\"] |= self.outer_variables[token][\"lr\"] self.father.local_variables[token][\"lw\"] |= self.outer_variables[token][\"lw\"] elif token in self.father.outer_variables: self.father.outer_variables[token][\"lr\"]", "a key-value pair: (token, {\"lr\": set of \"lr ref\", \"lw\": set of \"lw", "unit[\"lr\"] node.last_write |= unit[\"lw\"] if mode == \"ro\": unit[\"lr\"] = {node} elif mode" ]
[ "in c if s.isdigit()]) invalidChars = set(string.punctuation.replace(\"_\", \"\")) if any(char in invalidChars for", "import string print(\"type the input: \") a = input(\">\") b=list(a) c = list(dict.fromkeys(b))", "<reponame>ben-rd/Weebouo import string print(\"type the input: \") a = input(\">\") b=list(a) c =", "the input: \") a = input(\">\") b=list(a) c = list(dict.fromkeys(b)) d = len([int(s)", "= input(\">\") b=list(a) c = list(dict.fromkeys(b)) d = len([int(s) for s in c", "a): print(a, \"=>\",\"Error\") elif d > 0: print(a, \"=>\",\"Error\") else: i=1 e=[] for", "= list(dict.fromkeys(b)) d = len([int(s) for s in c if s.isdigit()]) invalidChars =", "> 0: print(a, \"=>\",\"Error\") else: i=1 e=[] for x in c: e.append(i) i=i+1", "print(a, \"=>\",\"Error\") elif d > 0: print(a, \"=>\",\"Error\") else: i=1 e=[] for x", "in b] s = [str(y) for y in converted_list] number = int(\"\".join(s)) print(\"\".join(b),", "= len([int(s) for s in c if s.isdigit()]) invalidChars = set(string.punctuation.replace(\"_\", \"\")) if", "a = input(\">\") b=list(a) c = list(dict.fromkeys(b)) d = len([int(s) for s in", "v:k for k,v in dictionary.items()} converted_list=[reverse_subs.get(item,item) for item in b] s = [str(y)", "char in a): print(a, \"=>\",\"Error\") elif d > 0: print(a, \"=>\",\"Error\") else: i=1", "elif d > 0: print(a, \"=>\",\"Error\") else: i=1 e=[] for x in c:", "= set(string.punctuation.replace(\"_\", \"\")) if any(char in invalidChars for char in a): print(a, \"=>\",\"Error\")", "c if s.isdigit()]) invalidChars = set(string.punctuation.replace(\"_\", \"\")) if any(char in invalidChars for char", "\"=>\",\"Error\") elif d > 0: print(a, \"=>\",\"Error\") else: i=1 e=[] for x in", "in c: e.append(i) i=i+1 dictionary = dict(zip(e, c)) reverse_subs = { v:k for", "c = list(dict.fromkeys(b)) d = len([int(s) for s in c if s.isdigit()]) invalidChars", "\") a = input(\">\") b=list(a) c = list(dict.fromkeys(b)) d = len([int(s) for s", "= dict(zip(e, c)) reverse_subs = { v:k for k,v in dictionary.items()} converted_list=[reverse_subs.get(item,item) for", "i=i+1 dictionary = dict(zip(e, c)) reverse_subs = { v:k for k,v in dictionary.items()}", "in invalidChars for char in a): print(a, \"=>\",\"Error\") elif d > 0: print(a,", "print(a, \"=>\",\"Error\") else: i=1 e=[] for x in c: e.append(i) i=i+1 dictionary =", "len([int(s) for s in c if s.isdigit()]) invalidChars = set(string.punctuation.replace(\"_\", \"\")) if any(char", "i=1 e=[] for x in c: e.append(i) i=i+1 dictionary = dict(zip(e, c)) reverse_subs", "s = [str(y) for y in converted_list] number = int(\"\".join(s)) print(\"\".join(b), \"->\", number)", "k,v in dictionary.items()} converted_list=[reverse_subs.get(item,item) for item in b] s = [str(y) for y", "c: e.append(i) i=i+1 dictionary = dict(zip(e, c)) reverse_subs = { v:k for k,v", "dict(zip(e, c)) reverse_subs = { v:k for k,v in dictionary.items()} converted_list=[reverse_subs.get(item,item) for item", "= { v:k for k,v in dictionary.items()} converted_list=[reverse_subs.get(item,item) for item in b] s", "input: \") a = input(\">\") b=list(a) c = list(dict.fromkeys(b)) d = len([int(s) for", "reverse_subs = { v:k for k,v in dictionary.items()} converted_list=[reverse_subs.get(item,item) for item in b]", "for s in c if s.isdigit()]) invalidChars = set(string.punctuation.replace(\"_\", \"\")) if any(char in", "\"=>\",\"Error\") else: i=1 e=[] for x in c: e.append(i) i=i+1 dictionary = dict(zip(e,", "s in c if s.isdigit()]) invalidChars = set(string.punctuation.replace(\"_\", \"\")) if any(char in invalidChars", "invalidChars for char in a): print(a, \"=>\",\"Error\") elif d > 0: print(a, \"=>\",\"Error\")", "b=list(a) c = list(dict.fromkeys(b)) d = len([int(s) for s in c if s.isdigit()])", "for item in b] s = [str(y) for y in converted_list] number =", "x in c: e.append(i) i=i+1 dictionary = dict(zip(e, c)) reverse_subs = { v:k", "\"\")) if any(char in invalidChars for char in a): print(a, \"=>\",\"Error\") elif d", "e.append(i) i=i+1 dictionary = dict(zip(e, c)) reverse_subs = { v:k for k,v in", "for k,v in dictionary.items()} converted_list=[reverse_subs.get(item,item) for item in b] s = [str(y) for", "item in b] s = [str(y) for y in converted_list] number = int(\"\".join(s))", "converted_list=[reverse_subs.get(item,item) for item in b] s = [str(y) for y in converted_list] number", "if s.isdigit()]) invalidChars = set(string.punctuation.replace(\"_\", \"\")) if any(char in invalidChars for char in", "input(\">\") b=list(a) c = list(dict.fromkeys(b)) d = len([int(s) for s in c if", "dictionary = dict(zip(e, c)) reverse_subs = { v:k for k,v in dictionary.items()} converted_list=[reverse_subs.get(item,item)", "{ v:k for k,v in dictionary.items()} converted_list=[reverse_subs.get(item,item) for item in b] s =", "set(string.punctuation.replace(\"_\", \"\")) if any(char in invalidChars for char in a): print(a, \"=>\",\"Error\") elif", "print(\"type the input: \") a = input(\">\") b=list(a) c = list(dict.fromkeys(b)) d =", "list(dict.fromkeys(b)) d = len([int(s) for s in c if s.isdigit()]) invalidChars = set(string.punctuation.replace(\"_\",", "0: print(a, \"=>\",\"Error\") else: i=1 e=[] for x in c: e.append(i) i=i+1 dictionary", "e=[] for x in c: e.append(i) i=i+1 dictionary = dict(zip(e, c)) reverse_subs =", "for char in a): print(a, \"=>\",\"Error\") elif d > 0: print(a, \"=>\",\"Error\") else:", "for x in c: e.append(i) i=i+1 dictionary = dict(zip(e, c)) reverse_subs = {", "s.isdigit()]) invalidChars = set(string.punctuation.replace(\"_\", \"\")) if any(char in invalidChars for char in a):", "in a): print(a, \"=>\",\"Error\") elif d > 0: print(a, \"=>\",\"Error\") else: i=1 e=[]", "string print(\"type the input: \") a = input(\">\") b=list(a) c = list(dict.fromkeys(b)) d", "dictionary.items()} converted_list=[reverse_subs.get(item,item) for item in b] s = [str(y) for y in converted_list]", "in dictionary.items()} converted_list=[reverse_subs.get(item,item) for item in b] s = [str(y) for y in", "b] s = [str(y) for y in converted_list] number = int(\"\".join(s)) print(\"\".join(b), \"->\",", "any(char in invalidChars for char in a): print(a, \"=>\",\"Error\") elif d > 0:", "c)) reverse_subs = { v:k for k,v in dictionary.items()} converted_list=[reverse_subs.get(item,item) for item in", "d = len([int(s) for s in c if s.isdigit()]) invalidChars = set(string.punctuation.replace(\"_\", \"\"))", "else: i=1 e=[] for x in c: e.append(i) i=i+1 dictionary = dict(zip(e, c))", "invalidChars = set(string.punctuation.replace(\"_\", \"\")) if any(char in invalidChars for char in a): print(a,", "d > 0: print(a, \"=>\",\"Error\") else: i=1 e=[] for x in c: e.append(i)", "if any(char in invalidChars for char in a): print(a, \"=>\",\"Error\") elif d >" ]
[ "if credentials and credentials.expired and credentials.refresh_token: credentials.refresh(Request()) else: flow = InstalledAppFlow.from_client_secrets_file( path_credentials_directory /", ":param service: Authenticated directory service object :param group: The id for the group", "service object :param group_key: Unique identifier of the group (string, email, or id)", "[]) return groups def get_members_for_group(service, group): \"\"\" Get all members for a specified", "groups :return: List of all groups \"\"\" results = service.groups().list(domain=domain, maxResults=500).execute() groups =", "import InstalledAppFlow from google.auth.transport.requests import Request from config.config import path_credentials_directory def get_directory_service(): \"\"\"", "Google Group via Google API. Groups created en masse might appear after 6-72", ":param service: Authenticated directory service object :param domain: The domain for users :return:", "[]) return users def create_group(service, email, name, description): \"\"\" Create a Google Group", "appear after 6-72 hours pass. :param service: Authenticated directory service object :param name:", "service: Authenticated directory service object :param group: The id for the group :return:", "a Google Group via Google API. Groups created en masse might appear after", "results def add_user_to_group(service, group_key, user_email, role): \"\"\" Add user to a Google Group.", "en masse might appear after 6-72 hours pass. :param service: Authenticated directory service", "[]) while 'nextPageToken' in results: results = service.groups().list(domain=domain, maxResults=500, pageToken=results['nextPageToken']).execute() groups += results.get('groups',", "a specified domain via Google API. :param service: Authenticated directory service object :param", "Name of the group :return: Results of the query \"\"\" results = service.groups().insert(", "Google API. Groups created en masse might appear after 6-72 hours pass. :param", "of the user :param role: Role of the member :return: Results of the", "all groups for a specified domain via Google API. :param service: Authenticated directory", "from google.auth.transport.requests import Request from config.config import path_credentials_directory def get_directory_service(): \"\"\" Authorize in", "as token: pickle.dump(credentials, token) service = build('admin', 'directory_v1', credentials=credentials) return service def get_groups_for_domain(service,", "directory service object :param group_key: Unique identifier of the group (string, email, or", "of the group (string, email, or id) :param user_email: Email of the user", "group: The id for the group :return: List of all members \"\"\" results", "email, or id) :param user_email: Email of the user :param role: Role of", "groups \"\"\" results = service.groups().list(domain=domain, maxResults=500).execute() groups = results.get('groups', []) while 'nextPageToken' in", "\"\"\" https://developers.google.com/admin-sdk/directory/v1/quickstart/python https://developers.google.com/resources/api-libraries/documentation/admin/directory_v1/python/latest/index.html https://developers.google.com/identity/protocols/googlescopes https://developers.google.com/admin-sdk/directory/v1/guides/manage-group-members \"\"\" import pickle import os.path from googleapiclient.discovery import", "\"\"\" results = service.members().list( groupKey=group, maxResults=500 ).execute() direct_members = results.get('members', []) while 'nextPageToken'", ").execute() users = results.get('users', []) while 'nextPageToken' in results: results = service.users().list( domain=domain,", "pickle.load(token) if not credentials or not credentials.valid: if credentials and credentials.expired and credentials.refresh_token:", "the group :return: Results of the query \"\"\" results = service.groups().insert( body={ \"kind\":", "+= results.get('groups', []) return groups def get_members_for_group(service, group): \"\"\" Get all members for", "service.groups().insert( body={ \"kind\": \"admin#directory#group\", \"email\": email, \"name\": name, \"description\": description, } ).execute() return", "direct_members = results.get('members', []) while 'nextPageToken' in results: results = service.members().list( groupKey=group, maxResults=500,", "\"\"\" google_api_scopes = [ 'https://www.googleapis.com/auth/admin.directory.user', 'https://www.googleapis.com/auth/admin.directory.group' ] credentials = None if os.path.exists(path_credentials_directory /", "with open(path_credentials_directory / 'token_directory.pickle', 'rb') as token: credentials = pickle.load(token) if not credentials", "return service def get_groups_for_domain(service, domain): \"\"\" Get all groups for a specified domain", "of all members \"\"\" results = service.members().list( groupKey=group, maxResults=500 ).execute() direct_members = results.get('members',", "[]) while 'nextPageToken' in results: results = service.users().list( domain=domain, maxResults=500, query=query, pageToken=results['nextPageToken'] ).execute()", "member in direct_members: if member['type'] == 'GROUP': members.extend(get_members_for_group(service, member['email'])) else: members.append(member) return members", "or id) :param user_email: Email of the user :param role: Role of the", "credentials=credentials) return service def get_groups_for_domain(service, domain): \"\"\" Get all groups for a specified", "service = build('admin', 'directory_v1', credentials=credentials) return service def get_groups_for_domain(service, domain): \"\"\" Get all", "Get all members for a specified group via Google API. :param service: Authenticated", "InstalledAppFlow from google.auth.transport.requests import Request from config.config import path_credentials_directory def get_directory_service(): \"\"\" Authorize", ":return: Authenticated service object. \"\"\" google_api_scopes = [ 'https://www.googleapis.com/auth/admin.directory.user', 'https://www.googleapis.com/auth/admin.directory.group' ] credentials =", ":return: Results of the query \"\"\" results = service.members().insert( groupKey=group_key, body={ \"email\": user_email,", "via Google API. Groups created en masse might appear after 6-72 hours pass.", "pageToken=results['nextPageToken'] ).execute() users += results.get('users', []) return users def create_group(service, email, name, description):", "domain=domain, maxResults=500, query=query, pageToken=results['nextPageToken'] ).execute() users += results.get('users', []) return users def create_group(service,", "members = [] for member in direct_members: if member['type'] == 'GROUP': members.extend(get_members_for_group(service, member['email']))", "Group. :param service: Authenticated directory service object :param group_key: Unique identifier of the", "with open(path_credentials_directory / 'token_directory.pickle', 'wb') as token: pickle.dump(credentials, token) service = build('admin', 'directory_v1',", "get_members_for_group(service, group): \"\"\" Get all members for a specified group via Google API.", "'GROUP': members.extend(get_members_for_group(service, member['email'])) else: members.append(member) return members def get_users_for_domain(service, domain, query): \"\"\" Get", "group via Google API. :param service: Authenticated directory service object :param group: The", "credentials = None if os.path.exists(path_credentials_directory / 'token_directory.pickle'): with open(path_credentials_directory / 'token_directory.pickle', 'rb') as", "Authenticated directory service object :param group_key: Unique identifier of the group (string, email,", "open(path_credentials_directory / 'token_directory.pickle', 'wb') as token: pickle.dump(credentials, token) service = build('admin', 'directory_v1', credentials=credentials)", "= service.groups().insert( body={ \"kind\": \"admin#directory#group\", \"email\": email, \"name\": name, \"description\": description, } ).execute()", "role): \"\"\" Add user to a Google Group. :param service: Authenticated directory service", "google_api_scopes = [ 'https://www.googleapis.com/auth/admin.directory.user', 'https://www.googleapis.com/auth/admin.directory.group' ] credentials = None if os.path.exists(path_credentials_directory / 'token_directory.pickle'):", "service def get_groups_for_domain(service, domain): \"\"\" Get all groups for a specified domain via", "results = service.groups().list(domain=domain, maxResults=500).execute() groups = results.get('groups', []) while 'nextPageToken' in results: results", "results = service.members().list( groupKey=group, maxResults=500 ).execute() direct_members = results.get('members', []) while 'nextPageToken' in", "= service.members().list( groupKey=group, maxResults=500, pageToken=results['nextPageToken'] ).execute() direct_members += results.get('members', []) members = []", "[]) members = [] for member in direct_members: if member['type'] == 'GROUP': members.extend(get_members_for_group(service,", "os.path from googleapiclient.discovery import build from google_auth_oauthlib.flow import InstalledAppFlow from google.auth.transport.requests import Request", "not credentials.valid: if credentials and credentials.expired and credentials.refresh_token: credentials.refresh(Request()) else: flow = InstalledAppFlow.from_client_secrets_file(", "else: flow = InstalledAppFlow.from_client_secrets_file( path_credentials_directory / 'credentials.json', google_api_scopes) credentials = flow.run_local_server(port=0) with open(path_credentials_directory", "https://developers.google.com/resources/api-libraries/documentation/admin/directory_v1/python/latest/index.html https://developers.google.com/identity/protocols/googlescopes https://developers.google.com/admin-sdk/directory/v1/guides/manage-group-members \"\"\" import pickle import os.path from googleapiclient.discovery import build from", "directory service object :param group: The id for the group :return: List of", "[]) while 'nextPageToken' in results: results = service.members().list( groupKey=group, maxResults=500, pageToken=results['nextPageToken'] ).execute() direct_members", "name, \"description\": description, } ).execute() return results def add_user_to_group(service, group_key, user_email, role): \"\"\"", "\"description\": description, } ).execute() return results def add_user_to_group(service, group_key, user_email, role): \"\"\" Add", "results = service.groups().insert( body={ \"kind\": \"admin#directory#group\", \"email\": email, \"name\": name, \"description\": description, }", "return users def create_group(service, email, name, description): \"\"\" Create a Google Group via", "credentials or not credentials.valid: if credentials and credentials.expired and credentials.refresh_token: credentials.refresh(Request()) else: flow", "service.users().list( domain=domain, maxResults=500, query=query, pageToken=results['nextPageToken'] ).execute() users += results.get('users', []) return users def", "service: Authenticated directory service object :param domain: The domain for users :return: List", "all members for a specified group via Google API. :param service: Authenticated directory", "description): \"\"\" Create a Google Group via Google API. Groups created en masse", ":param role: Role of the member :return: Results of the query \"\"\" results", "results: results = service.users().list( domain=domain, maxResults=500, query=query, pageToken=results['nextPageToken'] ).execute() users += results.get('users', [])", "\"\"\" results = service.users().list( domain=domain, maxResults=500, query=query, ).execute() users = results.get('users', []) while", "group (string, email, or id) :param user_email: Email of the user :param role:", "directory service object :param domain: The domain for users :return: List of all", "user_email: Email of the user :param role: Role of the member :return: Results", "if member['type'] == 'GROUP': members.extend(get_members_for_group(service, member['email'])) else: members.append(member) return members def get_users_for_domain(service, domain,", "from google_auth_oauthlib.flow import InstalledAppFlow from google.auth.transport.requests import Request from config.config import path_credentials_directory def", "= results.get('members', []) while 'nextPageToken' in results: results = service.members().list( groupKey=group, maxResults=500, pageToken=results['nextPageToken']", "= None if os.path.exists(path_credentials_directory / 'token_directory.pickle'): with open(path_credentials_directory / 'token_directory.pickle', 'rb') as token:", "users for a specified domain via Google API. :param service: Authenticated directory service", "service.groups().list(domain=domain, maxResults=500, pageToken=results['nextPageToken']).execute() groups += results.get('groups', []) return groups def get_members_for_group(service, group): \"\"\"", "token: pickle.dump(credentials, token) service = build('admin', 'directory_v1', credentials=credentials) return service def get_groups_for_domain(service, domain):", "flow.run_local_server(port=0) with open(path_credentials_directory / 'token_directory.pickle', 'wb') as token: pickle.dump(credentials, token) service = build('admin',", ":return: Results of the query \"\"\" results = service.groups().insert( body={ \"kind\": \"admin#directory#group\", \"email\":", "os.path.exists(path_credentials_directory / 'token_directory.pickle'): with open(path_credentials_directory / 'token_directory.pickle', 'rb') as token: credentials = pickle.load(token)", "return groups def get_members_for_group(service, group): \"\"\" Get all members for a specified group", "def get_directory_service(): \"\"\" Authorize in Google via OAuth Flow. :return: Authenticated service object.", "in direct_members: if member['type'] == 'GROUP': members.extend(get_members_for_group(service, member['email'])) else: members.append(member) return members def", "object :param name: Name of the group :return: Results of the query \"\"\"", "create_group(service, email, name, description): \"\"\" Create a Google Group via Google API. Groups", "def get_groups_for_domain(service, domain): \"\"\" Get all groups for a specified domain via Google", "groups += results.get('groups', []) return groups def get_members_for_group(service, group): \"\"\" Get all members", "all users \"\"\" results = service.users().list( domain=domain, maxResults=500, query=query, ).execute() users = results.get('users',", "from config.config import path_credentials_directory def get_directory_service(): \"\"\" Authorize in Google via OAuth Flow.", "service.members().list( groupKey=group, maxResults=500, pageToken=results['nextPageToken'] ).execute() direct_members += results.get('members', []) members = [] for", "def create_group(service, email, name, description): \"\"\" Create a Google Group via Google API.", "Groups created en masse might appear after 6-72 hours pass. :param service: Authenticated", "credentials.valid: if credentials and credentials.expired and credentials.refresh_token: credentials.refresh(Request()) else: flow = InstalledAppFlow.from_client_secrets_file( path_credentials_directory", "https://developers.google.com/admin-sdk/directory/v1/quickstart/python https://developers.google.com/resources/api-libraries/documentation/admin/directory_v1/python/latest/index.html https://developers.google.com/identity/protocols/googlescopes https://developers.google.com/admin-sdk/directory/v1/guides/manage-group-members \"\"\" import pickle import os.path from googleapiclient.discovery import build", "= pickle.load(token) if not credentials or not credentials.valid: if credentials and credentials.expired and", "while 'nextPageToken' in results: results = service.groups().list(domain=domain, maxResults=500, pageToken=results['nextPageToken']).execute() groups += results.get('groups', [])", "else: members.append(member) return members def get_users_for_domain(service, domain, query): \"\"\" Get all users for", "the query \"\"\" results = service.members().insert( groupKey=group_key, body={ \"email\": user_email, \"role\": role }", "token) service = build('admin', 'directory_v1', credentials=credentials) return service def get_groups_for_domain(service, domain): \"\"\" Get", "None if os.path.exists(path_credentials_directory / 'token_directory.pickle'): with open(path_credentials_directory / 'token_directory.pickle', 'rb') as token: credentials", "results.get('groups', []) while 'nextPageToken' in results: results = service.groups().list(domain=domain, maxResults=500, pageToken=results['nextPageToken']).execute() groups +=", "of the query \"\"\" results = service.groups().insert( body={ \"kind\": \"admin#directory#group\", \"email\": email, \"name\":", "in Google via OAuth Flow. :return: Authenticated service object. \"\"\" google_api_scopes = [", "credentials.refresh(Request()) else: flow = InstalledAppFlow.from_client_secrets_file( path_credentials_directory / 'credentials.json', google_api_scopes) credentials = flow.run_local_server(port=0) with", "if os.path.exists(path_credentials_directory / 'token_directory.pickle'): with open(path_credentials_directory / 'token_directory.pickle', 'rb') as token: credentials =", "service.members().insert( groupKey=group_key, body={ \"email\": user_email, \"role\": role } ).execute() return results if __name__", "InstalledAppFlow.from_client_secrets_file( path_credentials_directory / 'credentials.json', google_api_scopes) credentials = flow.run_local_server(port=0) with open(path_credentials_directory / 'token_directory.pickle', 'wb')", "/ 'token_directory.pickle', 'wb') as token: pickle.dump(credentials, token) service = build('admin', 'directory_v1', credentials=credentials) return", "in results: results = service.members().list( groupKey=group, maxResults=500, pageToken=results['nextPageToken'] ).execute() direct_members += results.get('members', [])", "users def create_group(service, email, name, description): \"\"\" Create a Google Group via Google", "Authenticated directory service object :param domain: The domain for groups :return: List of", "object :param domain: The domain for users :return: List of all users \"\"\"", "query=query, ).execute() users = results.get('users', []) while 'nextPageToken' in results: results = service.users().list(", ").execute() users += results.get('users', []) return users def create_group(service, email, name, description): \"\"\"", ":return: List of all users \"\"\" results = service.users().list( domain=domain, maxResults=500, query=query, ).execute()", "to a Google Group. :param service: Authenticated directory service object :param group_key: Unique", "return results def add_user_to_group(service, group_key, user_email, role): \"\"\" Add user to a Google", ":return: List of all members \"\"\" results = service.members().list( groupKey=group, maxResults=500 ).execute() direct_members", "build from google_auth_oauthlib.flow import InstalledAppFlow from google.auth.transport.requests import Request from config.config import path_credentials_directory", ":param domain: The domain for users :return: List of all users \"\"\" results", "group_key: Unique identifier of the group (string, email, or id) :param user_email: Email", "flow = InstalledAppFlow.from_client_secrets_file( path_credentials_directory / 'credentials.json', google_api_scopes) credentials = flow.run_local_server(port=0) with open(path_credentials_directory /", "of all users \"\"\" results = service.users().list( domain=domain, maxResults=500, query=query, ).execute() users =", "def add_user_to_group(service, group_key, user_email, role): \"\"\" Add user to a Google Group. :param", "id for the group :return: List of all members \"\"\" results = service.members().list(", "List of all members \"\"\" results = service.members().list( groupKey=group, maxResults=500 ).execute() direct_members =", "groups for a specified domain via Google API. :param service: Authenticated directory service", "Email of the user :param role: Role of the member :return: Results of", "Google API. :param service: Authenticated directory service object :param domain: The domain for", "Add user to a Google Group. :param service: Authenticated directory service object :param", "for a specified group via Google API. :param service: Authenticated directory service object", "members \"\"\" results = service.members().list( groupKey=group, maxResults=500 ).execute() direct_members = results.get('members', []) while", "not credentials or not credentials.valid: if credentials and credentials.expired and credentials.refresh_token: credentials.refresh(Request()) else:", "List of all users \"\"\" results = service.users().list( domain=domain, maxResults=500, query=query, ).execute() users", "service object :param name: Name of the group :return: Results of the query", "The domain for users :return: List of all users \"\"\" results = service.users().list(", "Authenticated directory service object :param name: Name of the group :return: Results of", "\"\"\" Get all members for a specified group via Google API. :param service:", "] credentials = None if os.path.exists(path_credentials_directory / 'token_directory.pickle'): with open(path_credentials_directory / 'token_directory.pickle', 'rb')", "all groups \"\"\" results = service.groups().list(domain=domain, maxResults=500).execute() groups = results.get('groups', []) while 'nextPageToken'", "\"kind\": \"admin#directory#group\", \"email\": email, \"name\": name, \"description\": description, } ).execute() return results def", "def get_members_for_group(service, group): \"\"\" Get all members for a specified group via Google", "body={ \"email\": user_email, \"role\": role } ).execute() return results if __name__ == '__main__':", "object :param domain: The domain for groups :return: List of all groups \"\"\"", "after 6-72 hours pass. :param service: Authenticated directory service object :param name: Name", ":param service: Authenticated directory service object :param domain: The domain for groups :return:", "\"\"\" Get all users for a specified domain via Google API. :param service:", "while 'nextPageToken' in results: results = service.users().list( domain=domain, maxResults=500, query=query, pageToken=results['nextPageToken'] ).execute() users", "[ 'https://www.googleapis.com/auth/admin.directory.user', 'https://www.googleapis.com/auth/admin.directory.group' ] credentials = None if os.path.exists(path_credentials_directory / 'token_directory.pickle'): with open(path_credentials_directory", "pickle import os.path from googleapiclient.discovery import build from google_auth_oauthlib.flow import InstalledAppFlow from google.auth.transport.requests", "directory service object :param domain: The domain for groups :return: List of all", "domain via Google API. :param service: Authenticated directory service object :param domain: The", "domain for users :return: List of all users \"\"\" results = service.users().list( domain=domain,", "service: Authenticated directory service object :param name: Name of the group :return: Results", "in results: results = service.users().list( domain=domain, maxResults=500, query=query, pageToken=results['nextPageToken'] ).execute() users += results.get('users',", "credentials.refresh_token: credentials.refresh(Request()) else: flow = InstalledAppFlow.from_client_secrets_file( path_credentials_directory / 'credentials.json', google_api_scopes) credentials = flow.run_local_server(port=0)", "/ 'token_directory.pickle'): with open(path_credentials_directory / 'token_directory.pickle', 'rb') as token: credentials = pickle.load(token) if", "group :return: Results of the query \"\"\" results = service.groups().insert( body={ \"kind\": \"admin#directory#group\",", "if not credentials or not credentials.valid: if credentials and credentials.expired and credentials.refresh_token: credentials.refresh(Request())", "from googleapiclient.discovery import build from google_auth_oauthlib.flow import InstalledAppFlow from google.auth.transport.requests import Request from", "of all groups \"\"\" results = service.groups().list(domain=domain, maxResults=500).execute() groups = results.get('groups', []) while", ":param name: Name of the group :return: Results of the query \"\"\" results", "pass. :param service: Authenticated directory service object :param name: Name of the group", "'nextPageToken' in results: results = service.groups().list(domain=domain, maxResults=500, pageToken=results['nextPageToken']).execute() groups += results.get('groups', []) return", "OAuth Flow. :return: Authenticated service object. \"\"\" google_api_scopes = [ 'https://www.googleapis.com/auth/admin.directory.user', 'https://www.googleapis.com/auth/admin.directory.group' ]", "member :return: Results of the query \"\"\" results = service.members().insert( groupKey=group_key, body={ \"email\":", "members for a specified group via Google API. :param service: Authenticated directory service", "Authenticated directory service object :param domain: The domain for users :return: List of", "Authenticated directory service object :param group: The id for the group :return: List", "maxResults=500).execute() groups = results.get('groups', []) while 'nextPageToken' in results: results = service.groups().list(domain=domain, maxResults=500,", "= service.users().list( domain=domain, maxResults=500, query=query, pageToken=results['nextPageToken'] ).execute() users += results.get('users', []) return users", "'wb') as token: pickle.dump(credentials, token) service = build('admin', 'directory_v1', credentials=credentials) return service def", "for a specified domain via Google API. :param service: Authenticated directory service object", "results = service.members().list( groupKey=group, maxResults=500, pageToken=results['nextPageToken'] ).execute() direct_members += results.get('members', []) members =", "Results of the query \"\"\" results = service.groups().insert( body={ \"kind\": \"admin#directory#group\", \"email\": email,", "googleapiclient.discovery import build from google_auth_oauthlib.flow import InstalledAppFlow from google.auth.transport.requests import Request from config.config", "= service.members().list( groupKey=group, maxResults=500 ).execute() direct_members = results.get('members', []) while 'nextPageToken' in results:", "domain: The domain for users :return: List of all users \"\"\" results =", "users += results.get('users', []) return users def create_group(service, email, name, description): \"\"\" Create", "specified domain via Google API. :param service: Authenticated directory service object :param domain:", "API. :param service: Authenticated directory service object :param domain: The domain for users", "\"\"\" Authorize in Google via OAuth Flow. :return: Authenticated service object. \"\"\" google_api_scopes", "maxResults=500 ).execute() direct_members = results.get('members', []) while 'nextPageToken' in results: results = service.members().list(", "import path_credentials_directory def get_directory_service(): \"\"\" Authorize in Google via OAuth Flow. :return: Authenticated", "'token_directory.pickle', 'wb') as token: pickle.dump(credentials, token) service = build('admin', 'directory_v1', credentials=credentials) return service", "Google Group. :param service: Authenticated directory service object :param group_key: Unique identifier of", "user :param role: Role of the member :return: Results of the query \"\"\"", "\"\"\" results = service.groups().insert( body={ \"kind\": \"admin#directory#group\", \"email\": email, \"name\": name, \"description\": description,", "API. :param service: Authenticated directory service object :param group: The id for the", ":param domain: The domain for groups :return: List of all groups \"\"\" results", ":return: List of all groups \"\"\" results = service.groups().list(domain=domain, maxResults=500).execute() groups = results.get('groups',", "credentials = flow.run_local_server(port=0) with open(path_credentials_directory / 'token_directory.pickle', 'wb') as token: pickle.dump(credentials, token) service", "\"name\": name, \"description\": description, } ).execute() return results def add_user_to_group(service, group_key, user_email, role):", "or not credentials.valid: if credentials and credentials.expired and credentials.refresh_token: credentials.refresh(Request()) else: flow =", "build('admin', 'directory_v1', credentials=credentials) return service def get_groups_for_domain(service, domain): \"\"\" Get all groups for", "the query \"\"\" results = service.groups().insert( body={ \"kind\": \"admin#directory#group\", \"email\": email, \"name\": name,", ":param service: Authenticated directory service object :param group_key: Unique identifier of the group", "service object :param group: The id for the group :return: List of all", "directory service object :param name: Name of the group :return: Results of the", "member['type'] == 'GROUP': members.extend(get_members_for_group(service, member['email'])) else: members.append(member) return members def get_users_for_domain(service, domain, query):", "https://developers.google.com/admin-sdk/directory/v1/guides/manage-group-members \"\"\" import pickle import os.path from googleapiclient.discovery import build from google_auth_oauthlib.flow import", "} ).execute() return results def add_user_to_group(service, group_key, user_email, role): \"\"\" Add user to", "name, description): \"\"\" Create a Google Group via Google API. Groups created en", ":param group_key: Unique identifier of the group (string, email, or id) :param user_email:", "query): \"\"\" Get all users for a specified domain via Google API. :param", "role: Role of the member :return: Results of the query \"\"\" results =", "'https://www.googleapis.com/auth/admin.directory.user', 'https://www.googleapis.com/auth/admin.directory.group' ] credentials = None if os.path.exists(path_credentials_directory / 'token_directory.pickle'): with open(path_credentials_directory /", "service.users().list( domain=domain, maxResults=500, query=query, ).execute() users = results.get('users', []) while 'nextPageToken' in results:", "query=query, pageToken=results['nextPageToken'] ).execute() users += results.get('users', []) return users def create_group(service, email, name,", "Group via Google API. Groups created en masse might appear after 6-72 hours", "[] for member in direct_members: if member['type'] == 'GROUP': members.extend(get_members_for_group(service, member['email'])) else: members.append(member)", "'nextPageToken' in results: results = service.members().list( groupKey=group, maxResults=500, pageToken=results['nextPageToken'] ).execute() direct_members += results.get('members',", "the user :param role: Role of the member :return: Results of the query", "= [ 'https://www.googleapis.com/auth/admin.directory.user', 'https://www.googleapis.com/auth/admin.directory.group' ] credentials = None if os.path.exists(path_credentials_directory / 'token_directory.pickle'): with", "groupKey=group, maxResults=500, pageToken=results['nextPageToken'] ).execute() direct_members += results.get('members', []) members = [] for member", "domain, query): \"\"\" Get all users for a specified domain via Google API.", "members.append(member) return members def get_users_for_domain(service, domain, query): \"\"\" Get all users for a", ").execute() return results def add_user_to_group(service, group_key, user_email, role): \"\"\" Add user to a", "identifier of the group (string, email, or id) :param user_email: Email of the", "\"\"\" import pickle import os.path from googleapiclient.discovery import build from google_auth_oauthlib.flow import InstalledAppFlow", "config.config import path_credentials_directory def get_directory_service(): \"\"\" Authorize in Google via OAuth Flow. :return:", "body={ \"kind\": \"admin#directory#group\", \"email\": email, \"name\": name, \"description\": description, } ).execute() return results", "= build('admin', 'directory_v1', credentials=credentials) return service def get_groups_for_domain(service, domain): \"\"\" Get all groups", "token: credentials = pickle.load(token) if not credentials or not credentials.valid: if credentials and", "+= results.get('members', []) members = [] for member in direct_members: if member['type'] ==", "member['email'])) else: members.append(member) return members def get_users_for_domain(service, domain, query): \"\"\" Get all users", "direct_members: if member['type'] == 'GROUP': members.extend(get_members_for_group(service, member['email'])) else: members.append(member) return members def get_users_for_domain(service,", "via Google API. :param service: Authenticated directory service object :param domain: The domain", "= service.users().list( domain=domain, maxResults=500, query=query, ).execute() users = results.get('users', []) while 'nextPageToken' in", "'token_directory.pickle'): with open(path_credentials_directory / 'token_directory.pickle', 'rb') as token: credentials = pickle.load(token) if not", "for groups :return: List of all groups \"\"\" results = service.groups().list(domain=domain, maxResults=500).execute() groups", "the group (string, email, or id) :param user_email: Email of the user :param", "masse might appear after 6-72 hours pass. :param service: Authenticated directory service object", "= [] for member in direct_members: if member['type'] == 'GROUP': members.extend(get_members_for_group(service, member['email'])) else:", "'nextPageToken' in results: results = service.users().list( domain=domain, maxResults=500, query=query, pageToken=results['nextPageToken'] ).execute() users +=", "object. \"\"\" google_api_scopes = [ 'https://www.googleapis.com/auth/admin.directory.user', 'https://www.googleapis.com/auth/admin.directory.group' ] credentials = None if os.path.exists(path_credentials_directory", "user_email, role): \"\"\" Add user to a Google Group. :param service: Authenticated directory", "for users :return: List of all users \"\"\" results = service.users().list( domain=domain, maxResults=500,", "results: results = service.groups().list(domain=domain, maxResults=500, pageToken=results['nextPageToken']).execute() groups += results.get('groups', []) return groups def", "group): \"\"\" Get all members for a specified group via Google API. :param", "members def get_users_for_domain(service, domain, query): \"\"\" Get all users for a specified domain", "results = service.users().list( domain=domain, maxResults=500, query=query, ).execute() users = results.get('users', []) while 'nextPageToken'", "maxResults=500, pageToken=results['nextPageToken']).execute() groups += results.get('groups', []) return groups def get_members_for_group(service, group): \"\"\" Get", "google.auth.transport.requests import Request from config.config import path_credentials_directory def get_directory_service(): \"\"\" Authorize in Google", "members.extend(get_members_for_group(service, member['email'])) else: members.append(member) return members def get_users_for_domain(service, domain, query): \"\"\" Get all", "= service.members().insert( groupKey=group_key, body={ \"email\": user_email, \"role\": role } ).execute() return results if", "results = service.users().list( domain=domain, maxResults=500, query=query, pageToken=results['nextPageToken'] ).execute() users += results.get('users', []) return", "Role of the member :return: Results of the query \"\"\" results = service.members().insert(", "service.groups().list(domain=domain, maxResults=500).execute() groups = results.get('groups', []) while 'nextPageToken' in results: results = service.groups().list(domain=domain,", "= InstalledAppFlow.from_client_secrets_file( path_credentials_directory / 'credentials.json', google_api_scopes) credentials = flow.run_local_server(port=0) with open(path_credentials_directory / 'token_directory.pickle',", "service.members().list( groupKey=group, maxResults=500 ).execute() direct_members = results.get('members', []) while 'nextPageToken' in results: results", "as token: credentials = pickle.load(token) if not credentials or not credentials.valid: if credentials", "'credentials.json', google_api_scopes) credentials = flow.run_local_server(port=0) with open(path_credentials_directory / 'token_directory.pickle', 'wb') as token: pickle.dump(credentials,", "domain for groups :return: List of all groups \"\"\" results = service.groups().list(domain=domain, maxResults=500).execute()", "API. Groups created en masse might appear after 6-72 hours pass. :param service:", "results.get('groups', []) return groups def get_members_for_group(service, group): \"\"\" Get all members for a", "Authorize in Google via OAuth Flow. :return: Authenticated service object. \"\"\" google_api_scopes =", "= service.groups().list(domain=domain, maxResults=500, pageToken=results['nextPageToken']).execute() groups += results.get('groups', []) return groups def get_members_for_group(service, group):", "import pickle import os.path from googleapiclient.discovery import build from google_auth_oauthlib.flow import InstalledAppFlow from", "all users for a specified domain via Google API. :param service: Authenticated directory", "Unique identifier of the group (string, email, or id) :param user_email: Email of", "of the group :return: Results of the query \"\"\" results = service.groups().insert( body={", "service object :param domain: The domain for users :return: List of all users", "for the group :return: List of all members \"\"\" results = service.members().list( groupKey=group,", "query \"\"\" results = service.groups().insert( body={ \"kind\": \"admin#directory#group\", \"email\": email, \"name\": name, \"description\":", "get_groups_for_domain(service, domain): \"\"\" Get all groups for a specified domain via Google API.", "service object :param domain: The domain for groups :return: List of all groups", ").execute() direct_members = results.get('members', []) while 'nextPageToken' in results: results = service.members().list( groupKey=group,", "id) :param user_email: Email of the user :param role: Role of the member", "'rb') as token: credentials = pickle.load(token) if not credentials or not credentials.valid: if", "Get all groups for a specified domain via Google API. :param service: Authenticated", "users \"\"\" results = service.users().list( domain=domain, maxResults=500, query=query, ).execute() users = results.get('users', [])", "groupKey=group_key, body={ \"email\": user_email, \"role\": role } ).execute() return results if __name__ ==", "= results.get('groups', []) while 'nextPageToken' in results: results = service.groups().list(domain=domain, maxResults=500, pageToken=results['nextPageToken']).execute() groups", "credentials = pickle.load(token) if not credentials or not credentials.valid: if credentials and credentials.expired", "results: results = service.members().list( groupKey=group, maxResults=500, pageToken=results['nextPageToken'] ).execute() direct_members += results.get('members', []) members", "return members def get_users_for_domain(service, domain, query): \"\"\" Get all users for a specified", "query \"\"\" results = service.members().insert( groupKey=group_key, body={ \"email\": user_email, \"role\": role } ).execute()", "+= results.get('users', []) return users def create_group(service, email, name, description): \"\"\" Create a", "might appear after 6-72 hours pass. :param service: Authenticated directory service object :param", "= service.groups().list(domain=domain, maxResults=500).execute() groups = results.get('groups', []) while 'nextPageToken' in results: results =", "groups = results.get('groups', []) while 'nextPageToken' in results: results = service.groups().list(domain=domain, maxResults=500, pageToken=results['nextPageToken']).execute()", "for member in direct_members: if member['type'] == 'GROUP': members.extend(get_members_for_group(service, member['email'])) else: members.append(member) return", "results.get('members', []) while 'nextPageToken' in results: results = service.members().list( groupKey=group, maxResults=500, pageToken=results['nextPageToken'] ).execute()", "'https://www.googleapis.com/auth/admin.directory.group' ] credentials = None if os.path.exists(path_credentials_directory / 'token_directory.pickle'): with open(path_credentials_directory / 'token_directory.pickle',", "maxResults=500, query=query, ).execute() users = results.get('users', []) while 'nextPageToken' in results: results =", "user to a Google Group. :param service: Authenticated directory service object :param group_key:", "= results.get('users', []) while 'nextPageToken' in results: results = service.users().list( domain=domain, maxResults=500, query=query,", "object :param group: The id for the group :return: List of all members", "\"\"\" Create a Google Group via Google API. Groups created en masse might", "Get all users for a specified domain via Google API. :param service: Authenticated", "in results: results = service.groups().list(domain=domain, maxResults=500, pageToken=results['nextPageToken']).execute() groups += results.get('groups', []) return groups", "credentials and credentials.expired and credentials.refresh_token: credentials.refresh(Request()) else: flow = InstalledAppFlow.from_client_secrets_file( path_credentials_directory / 'credentials.json',", "email, name, description): \"\"\" Create a Google Group via Google API. Groups created", "domain): \"\"\" Get all groups for a specified domain via Google API. :param", "credentials.expired and credentials.refresh_token: credentials.refresh(Request()) else: flow = InstalledAppFlow.from_client_secrets_file( path_credentials_directory / 'credentials.json', google_api_scopes) credentials", ":param service: Authenticated directory service object :param name: Name of the group :return:", "6-72 hours pass. :param service: Authenticated directory service object :param name: Name of", "import Request from config.config import path_credentials_directory def get_directory_service(): \"\"\" Authorize in Google via", "\"email\": user_email, \"role\": role } ).execute() return results if __name__ == '__main__': get_directory_service()", "Authenticated service object. \"\"\" google_api_scopes = [ 'https://www.googleapis.com/auth/admin.directory.user', 'https://www.googleapis.com/auth/admin.directory.group' ] credentials = None", "List of all groups \"\"\" results = service.groups().list(domain=domain, maxResults=500).execute() groups = results.get('groups', [])", "maxResults=500, pageToken=results['nextPageToken'] ).execute() direct_members += results.get('members', []) members = [] for member in", "(string, email, or id) :param user_email: Email of the user :param role: Role", "\"admin#directory#group\", \"email\": email, \"name\": name, \"description\": description, } ).execute() return results def add_user_to_group(service,", "google_api_scopes) credentials = flow.run_local_server(port=0) with open(path_credentials_directory / 'token_directory.pickle', 'wb') as token: pickle.dump(credentials, token)", "\"\"\" results = service.groups().list(domain=domain, maxResults=500).execute() groups = results.get('groups', []) while 'nextPageToken' in results:", "results.get('users', []) return users def create_group(service, email, name, description): \"\"\" Create a Google", "path_credentials_directory def get_directory_service(): \"\"\" Authorize in Google via OAuth Flow. :return: Authenticated service", "and credentials.refresh_token: credentials.refresh(Request()) else: flow = InstalledAppFlow.from_client_secrets_file( path_credentials_directory / 'credentials.json', google_api_scopes) credentials =", "via OAuth Flow. :return: Authenticated service object. \"\"\" google_api_scopes = [ 'https://www.googleapis.com/auth/admin.directory.user', 'https://www.googleapis.com/auth/admin.directory.group'", "Google via OAuth Flow. :return: Authenticated service object. \"\"\" google_api_scopes = [ 'https://www.googleapis.com/auth/admin.directory.user',", "API. :param service: Authenticated directory service object :param domain: The domain for groups", "group :return: List of all members \"\"\" results = service.members().list( groupKey=group, maxResults=500 ).execute()", "open(path_credentials_directory / 'token_directory.pickle', 'rb') as token: credentials = pickle.load(token) if not credentials or", "\"\"\" Get all groups for a specified domain via Google API. :param service:", "add_user_to_group(service, group_key, user_email, role): \"\"\" Add user to a Google Group. :param service:", "users :return: List of all users \"\"\" results = service.users().list( domain=domain, maxResults=500, query=query,", "results.get('users', []) while 'nextPageToken' in results: results = service.users().list( domain=domain, maxResults=500, query=query, pageToken=results['nextPageToken']", "Google API. :param service: Authenticated directory service object :param group: The id for", "Request from config.config import path_credentials_directory def get_directory_service(): \"\"\" Authorize in Google via OAuth", "group_key, user_email, role): \"\"\" Add user to a Google Group. :param service: Authenticated", "results.get('members', []) members = [] for member in direct_members: if member['type'] == 'GROUP':", "email, \"name\": name, \"description\": description, } ).execute() return results def add_user_to_group(service, group_key, user_email,", "of the member :return: Results of the query \"\"\" results = service.members().insert( groupKey=group_key,", "get_users_for_domain(service, domain, query): \"\"\" Get all users for a specified domain via Google", "pageToken=results['nextPageToken']).execute() groups += results.get('groups', []) return groups def get_members_for_group(service, group): \"\"\" Get all", "pageToken=results['nextPageToken'] ).execute() direct_members += results.get('members', []) members = [] for member in direct_members:", "'token_directory.pickle', 'rb') as token: credentials = pickle.load(token) if not credentials or not credentials.valid:", "groups def get_members_for_group(service, group): \"\"\" Get all members for a specified group via", "import os.path from googleapiclient.discovery import build from google_auth_oauthlib.flow import InstalledAppFlow from google.auth.transport.requests import", "Create a Google Group via Google API. Groups created en masse might appear", ").execute() direct_members += results.get('members', []) members = [] for member in direct_members: if", "'directory_v1', credentials=credentials) return service def get_groups_for_domain(service, domain): \"\"\" Get all groups for a", "domain: The domain for groups :return: List of all groups \"\"\" results =", "direct_members += results.get('members', []) members = [] for member in direct_members: if member['type']", "<filename>api_google/google_api_directory.py \"\"\" https://developers.google.com/admin-sdk/directory/v1/quickstart/python https://developers.google.com/resources/api-libraries/documentation/admin/directory_v1/python/latest/index.html https://developers.google.com/identity/protocols/googlescopes https://developers.google.com/admin-sdk/directory/v1/guides/manage-group-members \"\"\" import pickle import os.path from googleapiclient.discovery", "def get_users_for_domain(service, domain, query): \"\"\" Get all users for a specified domain via", "/ 'credentials.json', google_api_scopes) credentials = flow.run_local_server(port=0) with open(path_credentials_directory / 'token_directory.pickle', 'wb') as token:", "description, } ).execute() return results def add_user_to_group(service, group_key, user_email, role): \"\"\" Add user", "a Google Group. :param service: Authenticated directory service object :param group_key: Unique identifier", "https://developers.google.com/identity/protocols/googlescopes https://developers.google.com/admin-sdk/directory/v1/guides/manage-group-members \"\"\" import pickle import os.path from googleapiclient.discovery import build from google_auth_oauthlib.flow", "\"\"\" Add user to a Google Group. :param service: Authenticated directory service object", "google_auth_oauthlib.flow import InstalledAppFlow from google.auth.transport.requests import Request from config.config import path_credentials_directory def get_directory_service():", "name: Name of the group :return: Results of the query \"\"\" results =", "all members \"\"\" results = service.members().list( groupKey=group, maxResults=500 ).execute() direct_members = results.get('members', [])", "domain=domain, maxResults=500, query=query, ).execute() users = results.get('users', []) while 'nextPageToken' in results: results", ":param user_email: Email of the user :param role: Role of the member :return:", "via Google API. :param service: Authenticated directory service object :param group: The id", "the member :return: Results of the query \"\"\" results = service.members().insert( groupKey=group_key, body={", "Flow. :return: Authenticated service object. \"\"\" google_api_scopes = [ 'https://www.googleapis.com/auth/admin.directory.user', 'https://www.googleapis.com/auth/admin.directory.group' ] credentials", "path_credentials_directory / 'credentials.json', google_api_scopes) credentials = flow.run_local_server(port=0) with open(path_credentials_directory / 'token_directory.pickle', 'wb') as", "object :param group_key: Unique identifier of the group (string, email, or id) :param", "== 'GROUP': members.extend(get_members_for_group(service, member['email'])) else: members.append(member) return members def get_users_for_domain(service, domain, query): \"\"\"", "/ 'token_directory.pickle', 'rb') as token: credentials = pickle.load(token) if not credentials or not", "results = service.groups().list(domain=domain, maxResults=500, pageToken=results['nextPageToken']).execute() groups += results.get('groups', []) return groups def get_members_for_group(service,", ":param group: The id for the group :return: List of all members \"\"\"", "maxResults=500, query=query, pageToken=results['nextPageToken'] ).execute() users += results.get('users', []) return users def create_group(service, email,", "users = results.get('users', []) while 'nextPageToken' in results: results = service.users().list( domain=domain, maxResults=500,", "of the query \"\"\" results = service.members().insert( groupKey=group_key, body={ \"email\": user_email, \"role\": role", "Results of the query \"\"\" results = service.members().insert( groupKey=group_key, body={ \"email\": user_email, \"role\":", "results = service.members().insert( groupKey=group_key, body={ \"email\": user_email, \"role\": role } ).execute() return results", "= flow.run_local_server(port=0) with open(path_credentials_directory / 'token_directory.pickle', 'wb') as token: pickle.dump(credentials, token) service =", "and credentials.expired and credentials.refresh_token: credentials.refresh(Request()) else: flow = InstalledAppFlow.from_client_secrets_file( path_credentials_directory / 'credentials.json', google_api_scopes)", "hours pass. :param service: Authenticated directory service object :param name: Name of the", "\"email\": email, \"name\": name, \"description\": description, } ).execute() return results def add_user_to_group(service, group_key,", "created en masse might appear after 6-72 hours pass. :param service: Authenticated directory", "The id for the group :return: List of all members \"\"\" results =", "the group :return: List of all members \"\"\" results = service.members().list( groupKey=group, maxResults=500", "service: Authenticated directory service object :param group_key: Unique identifier of the group (string,", "service: Authenticated directory service object :param domain: The domain for groups :return: List", "\"\"\" results = service.members().insert( groupKey=group_key, body={ \"email\": user_email, \"role\": role } ).execute() return", "specified group via Google API. :param service: Authenticated directory service object :param group:", "a specified group via Google API. :param service: Authenticated directory service object :param", "get_directory_service(): \"\"\" Authorize in Google via OAuth Flow. :return: Authenticated service object. \"\"\"", "while 'nextPageToken' in results: results = service.members().list( groupKey=group, maxResults=500, pageToken=results['nextPageToken'] ).execute() direct_members +=", "service object. \"\"\" google_api_scopes = [ 'https://www.googleapis.com/auth/admin.directory.user', 'https://www.googleapis.com/auth/admin.directory.group' ] credentials = None if", "import build from google_auth_oauthlib.flow import InstalledAppFlow from google.auth.transport.requests import Request from config.config import", "The domain for groups :return: List of all groups \"\"\" results = service.groups().list(domain=domain,", "groupKey=group, maxResults=500 ).execute() direct_members = results.get('members', []) while 'nextPageToken' in results: results =", "pickle.dump(credentials, token) service = build('admin', 'directory_v1', credentials=credentials) return service def get_groups_for_domain(service, domain): \"\"\"" ]
[ "True self._t._running = True self._t.start() print('Websocket thread started') def stop(self): \"\"\" Stop/join the", "def start(self): \"\"\" Run the websocket in a thread \"\"\" self._t = Thread(target=self._ws.run_forever)", "if 'error' in message: return logger.error(message['error']) if message[0] == 1002: if message[1] ==", "} def on_error(self, ws, error): print(error) def on_close(self, ws): if self._t._running: try: self.stop()", "= Thread(target=self._ws.run_forever) self._t.daemon = True self._t._running = True self._t.start() print('Websocket thread started') def", "import websocket from poloniex import Poloniex class PWSTicker(object): def __init__(self, api=None): self.api =", "'baseVolume': data[5], 'quoteVolume': data[6], 'isFrozen': data[7], 'high24hr': data[8], 'low24hr': data[9] } def on_error(self,", "on_close(self, ws): if self._t._running: try: self.stop() except Exception as e: print(e) try: self.start()", "__call__(self, market=None): \"\"\" returns ticker from mongodb \"\"\" if market: return self.tick[self._ids[market]] return", "as e: print(e) try: self.start() except Exception as e: print(e) self.stop() else: print(\"Websocket", "import json from multiprocessing.dummy import Process as Thread import websocket from poloniex import", "the websocket thread \"\"\" self._t._running = False self._ws.close() self._t.join() print('Websocket thread stopped/joined') def", "'percentChange': data[4], 'baseVolume': data[5], 'quoteVolume': data[6], 'isFrozen': data[7], 'high24hr': data[8], 'low24hr': data[9] }", "market in iniTick: self.tick[self._ids[market]] = iniTick[market] self._ws = websocket.WebSocketApp(\"wss://api2.poloniex.com/\", on_open=self.on_open, on_message=self.on_message, on_error=self.on_error, on_close=self.on_close)", "data] self.tick[data[0]] = {'id': data[0], 'last': data[1], 'lowestAsk': data[2], 'highestBid': data[3], 'percentChange': data[4],", "return False def start(self): \"\"\" Run the websocket in a thread \"\"\" self._t", "def on_message(self, ws, message): message = json.loads(message) if 'error' in message: return logger.error(message['error'])", "iniTick} for market in iniTick: self.tick[self._ids[market]] = iniTick[market] self._ws = websocket.WebSocketApp(\"wss://api2.poloniex.com/\", on_open=self.on_open, on_message=self.on_message,", "ws): if self._t._running: try: self.stop() except Exception as e: print(e) try: self.start() except", "0: return logger.info('Unsubscribed to ticker') data = message[2] data = [float(dat) for dat", "logger.error(message['error']) if message[0] == 1002: if message[1] == 1: return logger.info('Subscribed to ticker')", "self.tick = {} iniTick = self.api.returnTicker() self._ids = {market: iniTick[market]['id'] for market in", "ticker') data = message[2] data = [float(dat) for dat in data] self.tick[data[0]] =", "closed!\") def on_open(self, ws): self._ws.send(json.dumps({'command': 'subscribe', 'channel': 1002})) @property def status(self): \"\"\" Returns", "on_message=self.on_message, on_error=self.on_error, on_close=self.on_close) def on_message(self, ws, message): message = json.loads(message) if 'error' in", "def __init__(self, api=None): self.api = api if not self.api: self.api = Poloniex(jsonNums=float) self.tick", "try: return self._t._running except: return False def start(self): \"\"\" Run the websocket in", "print(\"Websocket closed!\") def on_open(self, ws): self._ws.send(json.dumps({'command': 'subscribe', 'channel': 1002})) @property def status(self): \"\"\"", "try: self.stop() except Exception as e: print(e) try: self.start() except Exception as e:", "if the websocket is running, False if not \"\"\" try: return self._t._running except:", "'error' in message: return logger.error(message['error']) if message[0] == 1002: if message[1] == 1:", "websocket from poloniex import Poloniex class PWSTicker(object): def __init__(self, api=None): self.api = api", "is running, False if not \"\"\" try: return self._t._running except: return False def", "def on_error(self, ws, error): print(error) def on_close(self, ws): if self._t._running: try: self.stop() except", "ws): self._ws.send(json.dumps({'command': 'subscribe', 'channel': 1002})) @property def status(self): \"\"\" Returns True if the", "Exception as e: print(e) try: self.start() except Exception as e: print(e) self.stop() else:", "iniTick: self.tick[self._ids[market]] = iniTick[market] self._ws = websocket.WebSocketApp(\"wss://api2.poloniex.com/\", on_open=self.on_open, on_message=self.on_message, on_error=self.on_error, on_close=self.on_close) def on_message(self,", "PWSTicker(object): def __init__(self, api=None): self.api = api if not self.api: self.api = Poloniex(jsonNums=float)", "iniTick[market]['id'] for market in iniTick} for market in iniTick: self.tick[self._ids[market]] = iniTick[market] self._ws", "self.stop() else: print(\"Websocket closed!\") def on_open(self, ws): self._ws.send(json.dumps({'command': 'subscribe', 'channel': 1002})) @property def", "started') def stop(self): \"\"\" Stop/join the websocket thread \"\"\" self._t._running = False self._ws.close()", "not self.api: self.api = Poloniex(jsonNums=float) self.tick = {} iniTick = self.api.returnTicker() self._ids =", "api if not self.api: self.api = Poloniex(jsonNums=float) self.tick = {} iniTick = self.api.returnTicker()", "in message: return logger.error(message['error']) if message[0] == 1002: if message[1] == 1: return", "self._ids = {market: iniTick[market]['id'] for market in iniTick} for market in iniTick: self.tick[self._ids[market]]", "if message[1] == 0: return logger.info('Unsubscribed to ticker') data = message[2] data =", "data = message[2] data = [float(dat) for dat in data] self.tick[data[0]] = {'id':", "'low24hr': data[9] } def on_error(self, ws, error): print(error) def on_close(self, ws): if self._t._running:", "def on_close(self, ws): if self._t._running: try: self.stop() except Exception as e: print(e) try:", "self.api = Poloniex(jsonNums=float) self.tick = {} iniTick = self.api.returnTicker() self._ids = {market: iniTick[market]['id']", "message[0] == 1002: if message[1] == 1: return logger.info('Subscribed to ticker') if message[1]", "Returns True if the websocket is running, False if not \"\"\" try: return", "for dat in data] self.tick[data[0]] = {'id': data[0], 'last': data[1], 'lowestAsk': data[2], 'highestBid':", "return logger.info('Unsubscribed to ticker') data = message[2] data = [float(dat) for dat in", "'quoteVolume': data[6], 'isFrozen': data[7], 'high24hr': data[8], 'low24hr': data[9] } def on_error(self, ws, error):", "self._t._running: try: self.stop() except Exception as e: print(e) try: self.start() except Exception as", "= True self._t.start() print('Websocket thread started') def stop(self): \"\"\" Stop/join the websocket thread", "= iniTick[market] self._ws = websocket.WebSocketApp(\"wss://api2.poloniex.com/\", on_open=self.on_open, on_message=self.on_message, on_error=self.on_error, on_close=self.on_close) def on_message(self, ws, message):", "self._t = Thread(target=self._ws.run_forever) self._t.daemon = True self._t._running = True self._t.start() print('Websocket thread started')", "self._ws.send(json.dumps({'command': 'subscribe', 'channel': 1002})) @property def status(self): \"\"\" Returns True if the websocket", "Stop/join the websocket thread \"\"\" self._t._running = False self._ws.close() self._t.join() print('Websocket thread stopped/joined')", "self.start() except Exception as e: print(e) self.stop() else: print(\"Websocket closed!\") def on_open(self, ws):", "self._t.start() print('Websocket thread started') def stop(self): \"\"\" Stop/join the websocket thread \"\"\" self._t._running", "def __call__(self, market=None): \"\"\" returns ticker from mongodb \"\"\" if market: return self.tick[self._ids[market]]", "= websocket.WebSocketApp(\"wss://api2.poloniex.com/\", on_open=self.on_open, on_message=self.on_message, on_error=self.on_error, on_close=self.on_close) def on_message(self, ws, message): message = json.loads(message)", "self._ws.close() self._t.join() print('Websocket thread stopped/joined') def __call__(self, market=None): \"\"\" returns ticker from mongodb", "the websocket in a thread \"\"\" self._t = Thread(target=self._ws.run_forever) self._t.daemon = True self._t._running", "in a thread \"\"\" self._t = Thread(target=self._ws.run_forever) self._t.daemon = True self._t._running = True", "stopped/joined') def __call__(self, market=None): \"\"\" returns ticker from mongodb \"\"\" if market: return", "data[3], 'percentChange': data[4], 'baseVolume': data[5], 'quoteVolume': data[6], 'isFrozen': data[7], 'high24hr': data[8], 'low24hr': data[9]", "= self.api.returnTicker() self._ids = {market: iniTick[market]['id'] for market in iniTick} for market in", "on_message(self, ws, message): message = json.loads(message) if 'error' in message: return logger.error(message['error']) if", "as e: print(e) self.stop() else: print(\"Websocket closed!\") def on_open(self, ws): self._ws.send(json.dumps({'command': 'subscribe', 'channel':", "websocket.WebSocketApp(\"wss://api2.poloniex.com/\", on_open=self.on_open, on_message=self.on_message, on_error=self.on_error, on_close=self.on_close) def on_message(self, ws, message): message = json.loads(message) if", "print(error) def on_close(self, ws): if self._t._running: try: self.stop() except Exception as e: print(e)", "return logger.info('Subscribed to ticker') if message[1] == 0: return logger.info('Unsubscribed to ticker') data", "print(e) try: self.start() except Exception as e: print(e) self.stop() else: print(\"Websocket closed!\") def", "on_open(self, ws): self._ws.send(json.dumps({'command': 'subscribe', 'channel': 1002})) @property def status(self): \"\"\" Returns True if", "'subscribe', 'channel': 1002})) @property def status(self): \"\"\" Returns True if the websocket is", "start(self): \"\"\" Run the websocket in a thread \"\"\" self._t = Thread(target=self._ws.run_forever) self._t.daemon", "on_open=self.on_open, on_message=self.on_message, on_error=self.on_error, on_close=self.on_close) def on_message(self, ws, message): message = json.loads(message) if 'error'", "multiprocessing.dummy import Process as Thread import websocket from poloniex import Poloniex class PWSTicker(object):", "= api if not self.api: self.api = Poloniex(jsonNums=float) self.tick = {} iniTick =", "if not self.api: self.api = Poloniex(jsonNums=float) self.tick = {} iniTick = self.api.returnTicker() self._ids", "logger.info('Subscribed to ticker') if message[1] == 0: return logger.info('Unsubscribed to ticker') data =", "market in iniTick} for market in iniTick: self.tick[self._ids[market]] = iniTick[market] self._ws = websocket.WebSocketApp(\"wss://api2.poloniex.com/\",", "else: print(\"Websocket closed!\") def on_open(self, ws): self._ws.send(json.dumps({'command': 'subscribe', 'channel': 1002})) @property def status(self):", "data = [float(dat) for dat in data] self.tick[data[0]] = {'id': data[0], 'last': data[1],", "class PWSTicker(object): def __init__(self, api=None): self.api = api if not self.api: self.api =", "websocket is running, False if not \"\"\" try: return self._t._running except: return False", "for market in iniTick} for market in iniTick: self.tick[self._ids[market]] = iniTick[market] self._ws =", "from poloniex import Poloniex class PWSTicker(object): def __init__(self, api=None): self.api = api if", "\"\"\" self._t._running = False self._ws.close() self._t.join() print('Websocket thread stopped/joined') def __call__(self, market=None): \"\"\"", "'high24hr': data[8], 'low24hr': data[9] } def on_error(self, ws, error): print(error) def on_close(self, ws):", "e: print(e) self.stop() else: print(\"Websocket closed!\") def on_open(self, ws): self._ws.send(json.dumps({'command': 'subscribe', 'channel': 1002}))", "e: print(e) try: self.start() except Exception as e: print(e) self.stop() else: print(\"Websocket closed!\")", "self._t.daemon = True self._t._running = True self._t.start() print('Websocket thread started') def stop(self): \"\"\"", "\"\"\" self._t = Thread(target=self._ws.run_forever) self._t.daemon = True self._t._running = True self._t.start() print('Websocket thread", "message[2] data = [float(dat) for dat in data] self.tick[data[0]] = {'id': data[0], 'last':", "error): print(error) def on_close(self, ws): if self._t._running: try: self.stop() except Exception as e:", "== 0: return logger.info('Unsubscribed to ticker') data = message[2] data = [float(dat) for", "thread \"\"\" self._t._running = False self._ws.close() self._t.join() print('Websocket thread stopped/joined') def __call__(self, market=None):", "print(e) self.stop() else: print(\"Websocket closed!\") def on_open(self, ws): self._ws.send(json.dumps({'command': 'subscribe', 'channel': 1002})) @property", "if message[0] == 1002: if message[1] == 1: return logger.info('Subscribed to ticker') if", "if self._t._running: try: self.stop() except Exception as e: print(e) try: self.start() except Exception", "as Thread import websocket from poloniex import Poloniex class PWSTicker(object): def __init__(self, api=None):", "iniTick[market] self._ws = websocket.WebSocketApp(\"wss://api2.poloniex.com/\", on_open=self.on_open, on_message=self.on_message, on_error=self.on_error, on_close=self.on_close) def on_message(self, ws, message): message", "except Exception as e: print(e) self.stop() else: print(\"Websocket closed!\") def on_open(self, ws): self._ws.send(json.dumps({'command':", "def stop(self): \"\"\" Stop/join the websocket thread \"\"\" self._t._running = False self._ws.close() self._t.join()", "Poloniex(jsonNums=float) self.tick = {} iniTick = self.api.returnTicker() self._ids = {market: iniTick[market]['id'] for market", "status(self): \"\"\" Returns True if the websocket is running, False if not \"\"\"", "poloniex import Poloniex class PWSTicker(object): def __init__(self, api=None): self.api = api if not", "except: return False def start(self): \"\"\" Run the websocket in a thread \"\"\"", "{'id': data[0], 'last': data[1], 'lowestAsk': data[2], 'highestBid': data[3], 'percentChange': data[4], 'baseVolume': data[5], 'quoteVolume':", "self.api: self.api = Poloniex(jsonNums=float) self.tick = {} iniTick = self.api.returnTicker() self._ids = {market:", "iniTick = self.api.returnTicker() self._ids = {market: iniTick[market]['id'] for market in iniTick} for market", "Thread import websocket from poloniex import Poloniex class PWSTicker(object): def __init__(self, api=None): self.api", "running, False if not \"\"\" try: return self._t._running except: return False def start(self):", "data[6], 'isFrozen': data[7], 'high24hr': data[8], 'low24hr': data[9] } def on_error(self, ws, error): print(error)", "__init__(self, api=None): self.api = api if not self.api: self.api = Poloniex(jsonNums=float) self.tick =", "self.api = api if not self.api: self.api = Poloniex(jsonNums=float) self.tick = {} iniTick", "import Process as Thread import websocket from poloniex import Poloniex class PWSTicker(object): def", "message: return logger.error(message['error']) if message[0] == 1002: if message[1] == 1: return logger.info('Subscribed", "[float(dat) for dat in data] self.tick[data[0]] = {'id': data[0], 'last': data[1], 'lowestAsk': data[2],", "data[0], 'last': data[1], 'lowestAsk': data[2], 'highestBid': data[3], 'percentChange': data[4], 'baseVolume': data[5], 'quoteVolume': data[6],", "'highestBid': data[3], 'percentChange': data[4], 'baseVolume': data[5], 'quoteVolume': data[6], 'isFrozen': data[7], 'high24hr': data[8], 'low24hr':", "= message[2] data = [float(dat) for dat in data] self.tick[data[0]] = {'id': data[0],", "1002: if message[1] == 1: return logger.info('Subscribed to ticker') if message[1] == 0:", "Poloniex class PWSTicker(object): def __init__(self, api=None): self.api = api if not self.api: self.api", "try: self.start() except Exception as e: print(e) self.stop() else: print(\"Websocket closed!\") def on_open(self,", "import Poloniex class PWSTicker(object): def __init__(self, api=None): self.api = api if not self.api:", "\"\"\" Returns True if the websocket is running, False if not \"\"\" try:", "<gh_stars>0 import json from multiprocessing.dummy import Process as Thread import websocket from poloniex", "1: return logger.info('Subscribed to ticker') if message[1] == 0: return logger.info('Unsubscribed to ticker')", "to ticker') if message[1] == 0: return logger.info('Unsubscribed to ticker') data = message[2]", "'last': data[1], 'lowestAsk': data[2], 'highestBid': data[3], 'percentChange': data[4], 'baseVolume': data[5], 'quoteVolume': data[6], 'isFrozen':", "= json.loads(message) if 'error' in message: return logger.error(message['error']) if message[0] == 1002: if", "ws, error): print(error) def on_close(self, ws): if self._t._running: try: self.stop() except Exception as", "the websocket is running, False if not \"\"\" try: return self._t._running except: return", "not \"\"\" try: return self._t._running except: return False def start(self): \"\"\" Run the", "\"\"\" try: return self._t._running except: return False def start(self): \"\"\" Run the websocket", "self.tick[self._ids[market]] = iniTick[market] self._ws = websocket.WebSocketApp(\"wss://api2.poloniex.com/\", on_open=self.on_open, on_message=self.on_message, on_error=self.on_error, on_close=self.on_close) def on_message(self, ws,", "'channel': 1002})) @property def status(self): \"\"\" Returns True if the websocket is running,", "print('Websocket thread stopped/joined') def __call__(self, market=None): \"\"\" returns ticker from mongodb \"\"\" if", "message[1] == 1: return logger.info('Subscribed to ticker') if message[1] == 0: return logger.info('Unsubscribed", "ticker') if message[1] == 0: return logger.info('Unsubscribed to ticker') data = message[2] data", "= {} iniTick = self.api.returnTicker() self._ids = {market: iniTick[market]['id'] for market in iniTick}", "in iniTick: self.tick[self._ids[market]] = iniTick[market] self._ws = websocket.WebSocketApp(\"wss://api2.poloniex.com/\", on_open=self.on_open, on_message=self.on_message, on_error=self.on_error, on_close=self.on_close) def", "message = json.loads(message) if 'error' in message: return logger.error(message['error']) if message[0] == 1002:", "self._t._running = False self._ws.close() self._t.join() print('Websocket thread stopped/joined') def __call__(self, market=None): \"\"\" returns", "data[1], 'lowestAsk': data[2], 'highestBid': data[3], 'percentChange': data[4], 'baseVolume': data[5], 'quoteVolume': data[6], 'isFrozen': data[7],", "return logger.error(message['error']) if message[0] == 1002: if message[1] == 1: return logger.info('Subscribed to", "Exception as e: print(e) self.stop() else: print(\"Websocket closed!\") def on_open(self, ws): self._ws.send(json.dumps({'command': 'subscribe',", "in data] self.tick[data[0]] = {'id': data[0], 'last': data[1], 'lowestAsk': data[2], 'highestBid': data[3], 'percentChange':", "in iniTick} for market in iniTick: self.tick[self._ids[market]] = iniTick[market] self._ws = websocket.WebSocketApp(\"wss://api2.poloniex.com/\", on_open=self.on_open,", "on_error(self, ws, error): print(error) def on_close(self, ws): if self._t._running: try: self.stop() except Exception", "@property def status(self): \"\"\" Returns True if the websocket is running, False if", "json from multiprocessing.dummy import Process as Thread import websocket from poloniex import Poloniex", "= False self._ws.close() self._t.join() print('Websocket thread stopped/joined') def __call__(self, market=None): \"\"\" returns ticker", "data[5], 'quoteVolume': data[6], 'isFrozen': data[7], 'high24hr': data[8], 'low24hr': data[9] } def on_error(self, ws,", "self._t.join() print('Websocket thread stopped/joined') def __call__(self, market=None): \"\"\" returns ticker from mongodb \"\"\"", "market=None): \"\"\" returns ticker from mongodb \"\"\" if market: return self.tick[self._ids[market]] return self.tick", "self.api.returnTicker() self._ids = {market: iniTick[market]['id'] for market in iniTick} for market in iniTick:", "ws, message): message = json.loads(message) if 'error' in message: return logger.error(message['error']) if message[0]", "data[8], 'low24hr': data[9] } def on_error(self, ws, error): print(error) def on_close(self, ws): if", "True if the websocket is running, False if not \"\"\" try: return self._t._running", "for market in iniTick: self.tick[self._ids[market]] = iniTick[market] self._ws = websocket.WebSocketApp(\"wss://api2.poloniex.com/\", on_open=self.on_open, on_message=self.on_message, on_error=self.on_error,", "on_error=self.on_error, on_close=self.on_close) def on_message(self, ws, message): message = json.loads(message) if 'error' in message:", "1002})) @property def status(self): \"\"\" Returns True if the websocket is running, False", "\"\"\" Run the websocket in a thread \"\"\" self._t = Thread(target=self._ws.run_forever) self._t.daemon =", "False def start(self): \"\"\" Run the websocket in a thread \"\"\" self._t =", "def on_open(self, ws): self._ws.send(json.dumps({'command': 'subscribe', 'channel': 1002})) @property def status(self): \"\"\" Returns True", "dat in data] self.tick[data[0]] = {'id': data[0], 'last': data[1], 'lowestAsk': data[2], 'highestBid': data[3],", "False self._ws.close() self._t.join() print('Websocket thread stopped/joined') def __call__(self, market=None): \"\"\" returns ticker from", "logger.info('Unsubscribed to ticker') data = message[2] data = [float(dat) for dat in data]", "a thread \"\"\" self._t = Thread(target=self._ws.run_forever) self._t.daemon = True self._t._running = True self._t.start()", "Run the websocket in a thread \"\"\" self._t = Thread(target=self._ws.run_forever) self._t.daemon = True", "== 1: return logger.info('Subscribed to ticker') if message[1] == 0: return logger.info('Unsubscribed to", "= [float(dat) for dat in data] self.tick[data[0]] = {'id': data[0], 'last': data[1], 'lowestAsk':", "data[7], 'high24hr': data[8], 'low24hr': data[9] } def on_error(self, ws, error): print(error) def on_close(self,", "print('Websocket thread started') def stop(self): \"\"\" Stop/join the websocket thread \"\"\" self._t._running =", "message): message = json.loads(message) if 'error' in message: return logger.error(message['error']) if message[0] ==", "= Poloniex(jsonNums=float) self.tick = {} iniTick = self.api.returnTicker() self._ids = {market: iniTick[market]['id'] for", "= {'id': data[0], 'last': data[1], 'lowestAsk': data[2], 'highestBid': data[3], 'percentChange': data[4], 'baseVolume': data[5],", "self.tick[data[0]] = {'id': data[0], 'last': data[1], 'lowestAsk': data[2], 'highestBid': data[3], 'percentChange': data[4], 'baseVolume':", "api=None): self.api = api if not self.api: self.api = Poloniex(jsonNums=float) self.tick = {}", "thread \"\"\" self._t = Thread(target=self._ws.run_forever) self._t.daemon = True self._t._running = True self._t.start() print('Websocket", "\"\"\" Stop/join the websocket thread \"\"\" self._t._running = False self._ws.close() self._t.join() print('Websocket thread", "except Exception as e: print(e) try: self.start() except Exception as e: print(e) self.stop()", "return self._t._running except: return False def start(self): \"\"\" Run the websocket in a", "json.loads(message) if 'error' in message: return logger.error(message['error']) if message[0] == 1002: if message[1]", "self._t._running = True self._t.start() print('Websocket thread started') def stop(self): \"\"\" Stop/join the websocket", "data[4], 'baseVolume': data[5], 'quoteVolume': data[6], 'isFrozen': data[7], 'high24hr': data[8], 'low24hr': data[9] } def", "thread started') def stop(self): \"\"\" Stop/join the websocket thread \"\"\" self._t._running = False", "{market: iniTick[market]['id'] for market in iniTick} for market in iniTick: self.tick[self._ids[market]] = iniTick[market]", "'isFrozen': data[7], 'high24hr': data[8], 'low24hr': data[9] } def on_error(self, ws, error): print(error) def", "data[9] } def on_error(self, ws, error): print(error) def on_close(self, ws): if self._t._running: try:", "False if not \"\"\" try: return self._t._running except: return False def start(self): \"\"\"", "Process as Thread import websocket from poloniex import Poloniex class PWSTicker(object): def __init__(self,", "self._t._running except: return False def start(self): \"\"\" Run the websocket in a thread", "if message[1] == 1: return logger.info('Subscribed to ticker') if message[1] == 0: return", "data[2], 'highestBid': data[3], 'percentChange': data[4], 'baseVolume': data[5], 'quoteVolume': data[6], 'isFrozen': data[7], 'high24hr': data[8],", "to ticker') data = message[2] data = [float(dat) for dat in data] self.tick[data[0]]", "True self._t.start() print('Websocket thread started') def stop(self): \"\"\" Stop/join the websocket thread \"\"\"", "from multiprocessing.dummy import Process as Thread import websocket from poloniex import Poloniex class", "self.stop() except Exception as e: print(e) try: self.start() except Exception as e: print(e)", "'lowestAsk': data[2], 'highestBid': data[3], 'percentChange': data[4], 'baseVolume': data[5], 'quoteVolume': data[6], 'isFrozen': data[7], 'high24hr':", "== 1002: if message[1] == 1: return logger.info('Subscribed to ticker') if message[1] ==", "message[1] == 0: return logger.info('Unsubscribed to ticker') data = message[2] data = [float(dat)", "websocket in a thread \"\"\" self._t = Thread(target=self._ws.run_forever) self._t.daemon = True self._t._running =", "def status(self): \"\"\" Returns True if the websocket is running, False if not", "= True self._t._running = True self._t.start() print('Websocket thread started') def stop(self): \"\"\" Stop/join", "Thread(target=self._ws.run_forever) self._t.daemon = True self._t._running = True self._t.start() print('Websocket thread started') def stop(self):", "thread stopped/joined') def __call__(self, market=None): \"\"\" returns ticker from mongodb \"\"\" if market:", "{} iniTick = self.api.returnTicker() self._ids = {market: iniTick[market]['id'] for market in iniTick} for", "if not \"\"\" try: return self._t._running except: return False def start(self): \"\"\" Run", "self._ws = websocket.WebSocketApp(\"wss://api2.poloniex.com/\", on_open=self.on_open, on_message=self.on_message, on_error=self.on_error, on_close=self.on_close) def on_message(self, ws, message): message =", "= {market: iniTick[market]['id'] for market in iniTick} for market in iniTick: self.tick[self._ids[market]] =", "websocket thread \"\"\" self._t._running = False self._ws.close() self._t.join() print('Websocket thread stopped/joined') def __call__(self,", "on_close=self.on_close) def on_message(self, ws, message): message = json.loads(message) if 'error' in message: return", "stop(self): \"\"\" Stop/join the websocket thread \"\"\" self._t._running = False self._ws.close() self._t.join() print('Websocket" ]
[ "2.2.1 on 2019-05-20 09:40 from django.db import migrations, models class Migration(migrations.Migration): dependencies =", "2019-05-20 09:40 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [(\"crashbin_app\", \"0008_create_mailbox\")]", "Generated by Django 2.2.1 on 2019-05-20 09:40 from django.db import migrations, models class", "migrations, models class Migration(migrations.Migration): dependencies = [(\"crashbin_app\", \"0008_create_mailbox\")] operations = [ migrations.AlterField( model_name=\"bin\",", "Django 2.2.1 on 2019-05-20 09:40 from django.db import migrations, models class Migration(migrations.Migration): dependencies", "models class Migration(migrations.Migration): dependencies = [(\"crashbin_app\", \"0008_create_mailbox\")] operations = [ migrations.AlterField( model_name=\"bin\", name=\"name\",", "class Migration(migrations.Migration): dependencies = [(\"crashbin_app\", \"0008_create_mailbox\")] operations = [ migrations.AlterField( model_name=\"bin\", name=\"name\", field=models.CharField(max_length=255,", "# Generated by Django 2.2.1 on 2019-05-20 09:40 from django.db import migrations, models", "by Django 2.2.1 on 2019-05-20 09:40 from django.db import migrations, models class Migration(migrations.Migration):", "import migrations, models class Migration(migrations.Migration): dependencies = [(\"crashbin_app\", \"0008_create_mailbox\")] operations = [ migrations.AlterField(", "Migration(migrations.Migration): dependencies = [(\"crashbin_app\", \"0008_create_mailbox\")] operations = [ migrations.AlterField( model_name=\"bin\", name=\"name\", field=models.CharField(max_length=255, unique=True),", "= [ migrations.AlterField( model_name=\"bin\", name=\"name\", field=models.CharField(max_length=255, unique=True), ), migrations.AlterField( model_name=\"label\", name=\"name\", field=models.CharField(max_length=255, unique=True),", "<filename>crashbin_app/migrations/0009_unique_names.py # Generated by Django 2.2.1 on 2019-05-20 09:40 from django.db import migrations,", "on 2019-05-20 09:40 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [(\"crashbin_app\",", "django.db import migrations, models class Migration(migrations.Migration): dependencies = [(\"crashbin_app\", \"0008_create_mailbox\")] operations = [", "[(\"crashbin_app\", \"0008_create_mailbox\")] operations = [ migrations.AlterField( model_name=\"bin\", name=\"name\", field=models.CharField(max_length=255, unique=True), ), migrations.AlterField( model_name=\"label\",", "migrations.AlterField( model_name=\"bin\", name=\"name\", field=models.CharField(max_length=255, unique=True), ), migrations.AlterField( model_name=\"label\", name=\"name\", field=models.CharField(max_length=255, unique=True), ), ]", "\"0008_create_mailbox\")] operations = [ migrations.AlterField( model_name=\"bin\", name=\"name\", field=models.CharField(max_length=255, unique=True), ), migrations.AlterField( model_name=\"label\", name=\"name\",", "operations = [ migrations.AlterField( model_name=\"bin\", name=\"name\", field=models.CharField(max_length=255, unique=True), ), migrations.AlterField( model_name=\"label\", name=\"name\", field=models.CharField(max_length=255,", "= [(\"crashbin_app\", \"0008_create_mailbox\")] operations = [ migrations.AlterField( model_name=\"bin\", name=\"name\", field=models.CharField(max_length=255, unique=True), ), migrations.AlterField(", "dependencies = [(\"crashbin_app\", \"0008_create_mailbox\")] operations = [ migrations.AlterField( model_name=\"bin\", name=\"name\", field=models.CharField(max_length=255, unique=True), ),", "from django.db import migrations, models class Migration(migrations.Migration): dependencies = [(\"crashbin_app\", \"0008_create_mailbox\")] operations =", "09:40 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [(\"crashbin_app\", \"0008_create_mailbox\")] operations", "[ migrations.AlterField( model_name=\"bin\", name=\"name\", field=models.CharField(max_length=255, unique=True), ), migrations.AlterField( model_name=\"label\", name=\"name\", field=models.CharField(max_length=255, unique=True), )," ]
[ "= np.meshgrid(np.linspace(l, r, n), np.linspace(b, t, n)) flat_x = np.column_stack((grid_x[0].ravel(), grid_x[1].ravel())) flat_y =", "n = 500 l, r = x[:, 0].min() - 1, x[:, 0].max() +", "print(sm.classification_report(test_y, pred_test_y)) # 新增样本 prob_x = np.array([ [2, 1.5], [8, 9], [4.8, 5.2],", "model = svm.SVC(probability=True) # 根据网格搜索选择最优模型 params = [{'kernel':['linear'],'C':[1, 10, 100, 1000]}, {'kernel':['poly'], 'C':[1],", "= model.predict_proba(prob_x) print(probs) # 绘制分类边界线 n = 500 l, r = x[:, 0].min()", "import numpy as np import sklearn.model_selection as ms import sklearn.svm as svm import", "0], prob_x[i, 1]), xytext=(12, -12), textcoords='offset points', horizontalalignment='left', verticalalignment='top', fontsize=9, bbox={'boxstyle': 'round,pad=0.6', 'fc':", "grid_y = flat_y.reshape(grid_x[0].shape) mp.figure('Probability', facecolor='lightgray') mp.title('Probability', fontsize=20) mp.xlabel('x', fontsize=14) mp.ylabel('y', fontsize=14) mp.tick_params(labelsize=10) mp.pcolormesh(grid_x[0],", "train_y, test_y = \\ ms.train_test_split(x, y, test_size=0.25, random_state=5) model = svm.SVC(probability=True) # 根据网格搜索选择最优模型", ":-1] y = data[:, -1] # 选择svm做分类 train_x, test_x, train_y, test_y = \\", "10, 100, 1000]}, {'kernel':['poly'], 'C':[1], 'degree':[2, 3]}, {'kernel':['rbf'], 'C':[1,10,100,1000], 'gamma':[1, 0.1, 0.01, 0.001]}]", "model.predict_proba(prob_x) print(probs) # 绘制分类边界线 n = 500 l, r = x[:, 0].min() -", "2)), xy=(prob_x[i, 0], prob_x[i, 1]), xytext=(12, -12), textcoords='offset points', horizontalalignment='left', verticalalignment='top', fontsize=9, bbox={'boxstyle':", "c=test_y, cmap='brg', s=80) mp.scatter(prob_x[:,0], prob_x[:,1], c=pred_prob_y, cmap='jet_r', s=80, marker='D') for i in range(len(probs)):", "3]}, {'kernel':['rbf'], 'C':[1,10,100,1000], 'gamma':[1, 0.1, 0.01, 0.001]}] model = ms.GridSearchCV(model, params, cv=5) model.fit(train_x,", "0].min() - 1, x[:, 0].max() + 1 b, t = x[:, 1].min() -", "for i in range(len(probs)): mp.annotate( '{}% {}%'.format( round(probs[i, 0] * 100, 2), round(probs[i,", "r, n), np.linspace(b, t, n)) flat_x = np.column_stack((grid_x[0].ravel(), grid_x[1].ravel())) flat_y = model.predict(flat_x) grid_y", "7], [7.6, 2], [5.4, 5.9]]) pred_prob_y = model.predict(prob_x) probs = model.predict_proba(prob_x) print(probs) #", "'->', score) pred_test_y = model.predict(test_x) print(sm.classification_report(test_y, pred_test_y)) # 新增样本 prob_x = np.array([ [2,", "1] * 100, 2)), xy=(prob_x[i, 0], prob_x[i, 1]), xytext=(12, -12), textcoords='offset points', horizontalalignment='left',", "100, 2), round(probs[i, 1] * 100, 2)), xy=(prob_x[i, 0], prob_x[i, 1]), xytext=(12, -12),", "[{'kernel':['linear'],'C':[1, 10, 100, 1000]}, {'kernel':['poly'], 'C':[1], 'degree':[2, 3]}, {'kernel':['rbf'], 'C':[1,10,100,1000], 'gamma':[1, 0.1, 0.01,", "import sklearn.svm as svm import sklearn.metrics as sm import matplotlib.pyplot as mp data", "[5.4, 5.9]]) pred_prob_y = model.predict(prob_x) probs = model.predict_proba(prob_x) print(probs) # 绘制分类边界线 n =", "= 500 l, r = x[:, 0].min() - 1, x[:, 0].max() + 1", "= np.column_stack((grid_x[0].ravel(), grid_x[1].ravel())) flat_y = model.predict(flat_x) grid_y = flat_y.reshape(grid_x[0].shape) mp.figure('Probability', facecolor='lightgray') mp.title('Probability', fontsize=20)", "= flat_y.reshape(grid_x[0].shape) mp.figure('Probability', facecolor='lightgray') mp.title('Probability', fontsize=20) mp.xlabel('x', fontsize=14) mp.ylabel('y', fontsize=14) mp.tick_params(labelsize=10) mp.pcolormesh(grid_x[0], grid_x[1],", "grid_x[1], grid_y, cmap='gray') mp.scatter(test_x[:, 0], test_x[:, 1], c=test_y, cmap='brg', s=80) mp.scatter(prob_x[:,0], prob_x[:,1], c=pred_prob_y,", "绘制分类边界线 n = 500 l, r = x[:, 0].min() - 1, x[:, 0].max()", "x[:, 0].max() + 1 b, t = x[:, 1].min() - 1, x[:, 1].max()", "s=80) mp.scatter(prob_x[:,0], prob_x[:,1], c=pred_prob_y, cmap='jet_r', s=80, marker='D') for i in range(len(probs)): mp.annotate( '{}%", "flat_x = np.column_stack((grid_x[0].ravel(), grid_x[1].ravel())) flat_y = model.predict(flat_x) grid_y = flat_y.reshape(grid_x[0].shape) mp.figure('Probability', facecolor='lightgray') mp.title('Probability',", "1.5], [8, 9], [4.8, 5.2], [4, 4], [2.5, 7], [7.6, 2], [5.4, 5.9]])", "\"\"\" demo05_gridsearch.py 网格搜索 \"\"\" import numpy as np import sklearn.model_selection as ms import", "xy=(prob_x[i, 0], prob_x[i, 1]), xytext=(12, -12), textcoords='offset points', horizontalalignment='left', verticalalignment='top', fontsize=9, bbox={'boxstyle': 'round,pad=0.6',", "import sklearn.model_selection as ms import sklearn.svm as svm import sklearn.metrics as sm import", "= model.predict(flat_x) grid_y = flat_y.reshape(grid_x[0].shape) mp.figure('Probability', facecolor='lightgray') mp.title('Probability', fontsize=20) mp.xlabel('x', fontsize=14) mp.ylabel('y', fontsize=14)", "- 1, x[:, 0].max() + 1 b, t = x[:, 1].min() - 1,", "i in range(len(probs)): mp.annotate( '{}% {}%'.format( round(probs[i, 0] * 100, 2), round(probs[i, 1]", "ms import sklearn.svm as svm import sklearn.metrics as sm import matplotlib.pyplot as mp", "mp.title('Probability', fontsize=20) mp.xlabel('x', fontsize=14) mp.ylabel('y', fontsize=14) mp.tick_params(labelsize=10) mp.pcolormesh(grid_x[0], grid_x[1], grid_y, cmap='gray') mp.scatter(test_x[:, 0],", "param, score in zip( model.cv_results_['params'], model.cv_results_['mean_test_score']): print(param, '->', score) pred_test_y = model.predict(test_x) print(sm.classification_report(test_y,", "sklearn.metrics as sm import matplotlib.pyplot as mp data = np.loadtxt('../ml_data/multiple2.txt', delimiter=',', dtype='f8') x", "# 绘制分类边界线 n = 500 l, r = x[:, 0].min() - 1, x[:,", "pred_prob_y = model.predict(prob_x) probs = model.predict_proba(prob_x) print(probs) # 绘制分类边界线 n = 500 l,", "s=80, marker='D') for i in range(len(probs)): mp.annotate( '{}% {}%'.format( round(probs[i, 0] * 100,", "1].max() + 1 grid_x = np.meshgrid(np.linspace(l, r, n), np.linspace(b, t, n)) flat_x =", "np.linspace(b, t, n)) flat_x = np.column_stack((grid_x[0].ravel(), grid_x[1].ravel())) flat_y = model.predict(flat_x) grid_y = flat_y.reshape(grid_x[0].shape)", "sklearn.svm as svm import sklearn.metrics as sm import matplotlib.pyplot as mp data =", "x[:, 1].min() - 1, x[:, 1].max() + 1 grid_x = np.meshgrid(np.linspace(l, r, n),", "选择svm做分类 train_x, test_x, train_y, test_y = \\ ms.train_test_split(x, y, test_size=0.25, random_state=5) model =", "2), round(probs[i, 1] * 100, 2)), xy=(prob_x[i, 0], prob_x[i, 1]), xytext=(12, -12), textcoords='offset", "mp.ylabel('y', fontsize=14) mp.tick_params(labelsize=10) mp.pcolormesh(grid_x[0], grid_x[1], grid_y, cmap='gray') mp.scatter(test_x[:, 0], test_x[:, 1], c=test_y, cmap='brg',", "mp.scatter(prob_x[:,0], prob_x[:,1], c=pred_prob_y, cmap='jet_r', s=80, marker='D') for i in range(len(probs)): mp.annotate( '{}% {}%'.format(", "t = x[:, 1].min() - 1, x[:, 1].max() + 1 grid_x = np.meshgrid(np.linspace(l,", "test_x, train_y, test_y = \\ ms.train_test_split(x, y, test_size=0.25, random_state=5) model = svm.SVC(probability=True) #", "[2, 1.5], [8, 9], [4.8, 5.2], [4, 4], [2.5, 7], [7.6, 2], [5.4,", "4], [2.5, 7], [7.6, 2], [5.4, 5.9]]) pred_prob_y = model.predict(prob_x) probs = model.predict_proba(prob_x)", "0.001]}] model = ms.GridSearchCV(model, params, cv=5) model.fit(train_x, train_y) print(model.best_params_) print(model.best_score_) print(model.best_estimator_) # 输出每个超参数组合信息及其得分", "mp data = np.loadtxt('../ml_data/multiple2.txt', delimiter=',', dtype='f8') x = data[:, :-1] y = data[:,", "'C':[1,10,100,1000], 'gamma':[1, 0.1, 0.01, 0.001]}] model = ms.GridSearchCV(model, params, cv=5) model.fit(train_x, train_y) print(model.best_params_)", "print(model.best_estimator_) # 输出每个超参数组合信息及其得分 for param, score in zip( model.cv_results_['params'], model.cv_results_['mean_test_score']): print(param, '->', score)", "params, cv=5) model.fit(train_x, train_y) print(model.best_params_) print(model.best_score_) print(model.best_estimator_) # 输出每个超参数组合信息及其得分 for param, score in", "r = x[:, 0].min() - 1, x[:, 0].max() + 1 b, t =", "test_size=0.25, random_state=5) model = svm.SVC(probability=True) # 根据网格搜索选择最优模型 params = [{'kernel':['linear'],'C':[1, 10, 100, 1000]},", "ms.GridSearchCV(model, params, cv=5) model.fit(train_x, train_y) print(model.best_params_) print(model.best_score_) print(model.best_estimator_) # 输出每个超参数组合信息及其得分 for param, score", "score in zip( model.cv_results_['params'], model.cv_results_['mean_test_score']): print(param, '->', score) pred_test_y = model.predict(test_x) print(sm.classification_report(test_y, pred_test_y))", "+ 1 grid_x = np.meshgrid(np.linspace(l, r, n), np.linspace(b, t, n)) flat_x = np.column_stack((grid_x[0].ravel(),", "as ms import sklearn.svm as svm import sklearn.metrics as sm import matplotlib.pyplot as", "[2.5, 7], [7.6, 2], [5.4, 5.9]]) pred_prob_y = model.predict(prob_x) probs = model.predict_proba(prob_x) print(probs)", "新增样本 prob_x = np.array([ [2, 1.5], [8, 9], [4.8, 5.2], [4, 4], [2.5,", "= np.array([ [2, 1.5], [8, 9], [4.8, 5.2], [4, 4], [2.5, 7], [7.6,", "1 grid_x = np.meshgrid(np.linspace(l, r, n), np.linspace(b, t, n)) flat_x = np.column_stack((grid_x[0].ravel(), grid_x[1].ravel()))", "flat_y.reshape(grid_x[0].shape) mp.figure('Probability', facecolor='lightgray') mp.title('Probability', fontsize=20) mp.xlabel('x', fontsize=14) mp.ylabel('y', fontsize=14) mp.tick_params(labelsize=10) mp.pcolormesh(grid_x[0], grid_x[1], grid_y,", "cmap='brg', s=80) mp.scatter(prob_x[:,0], prob_x[:,1], c=pred_prob_y, cmap='jet_r', s=80, marker='D') for i in range(len(probs)): mp.annotate(", "0] * 100, 2), round(probs[i, 1] * 100, 2)), xy=(prob_x[i, 0], prob_x[i, 1]),", "100, 2)), xy=(prob_x[i, 0], prob_x[i, 1]), xytext=(12, -12), textcoords='offset points', horizontalalignment='left', verticalalignment='top', fontsize=9,", "range(len(probs)): mp.annotate( '{}% {}%'.format( round(probs[i, 0] * 100, 2), round(probs[i, 1] * 100,", "import sklearn.metrics as sm import matplotlib.pyplot as mp data = np.loadtxt('../ml_data/multiple2.txt', delimiter=',', dtype='f8')", "mp.pcolormesh(grid_x[0], grid_x[1], grid_y, cmap='gray') mp.scatter(test_x[:, 0], test_x[:, 1], c=test_y, cmap='brg', s=80) mp.scatter(prob_x[:,0], prob_x[:,1],", "[7.6, 2], [5.4, 5.9]]) pred_prob_y = model.predict(prob_x) probs = model.predict_proba(prob_x) print(probs) # 绘制分类边界线", "print(model.best_params_) print(model.best_score_) print(model.best_estimator_) # 输出每个超参数组合信息及其得分 for param, score in zip( model.cv_results_['params'], model.cv_results_['mean_test_score']): print(param,", "输出每个超参数组合信息及其得分 for param, score in zip( model.cv_results_['params'], model.cv_results_['mean_test_score']): print(param, '->', score) pred_test_y =", "mp.annotate( '{}% {}%'.format( round(probs[i, 0] * 100, 2), round(probs[i, 1] * 100, 2)),", "= x[:, 1].min() - 1, x[:, 1].max() + 1 grid_x = np.meshgrid(np.linspace(l, r,", "prob_x = np.array([ [2, 1.5], [8, 9], [4.8, 5.2], [4, 4], [2.5, 7],", "+ 1 b, t = x[:, 1].min() - 1, x[:, 1].max() + 1", "[8, 9], [4.8, 5.2], [4, 4], [2.5, 7], [7.6, 2], [5.4, 5.9]]) pred_prob_y", "dtype='f8') x = data[:, :-1] y = data[:, -1] # 选择svm做分类 train_x, test_x,", "= svm.SVC(probability=True) # 根据网格搜索选择最优模型 params = [{'kernel':['linear'],'C':[1, 10, 100, 1000]}, {'kernel':['poly'], 'C':[1], 'degree':[2,", "svm import sklearn.metrics as sm import matplotlib.pyplot as mp data = np.loadtxt('../ml_data/multiple2.txt', delimiter=',',", "mp.scatter(test_x[:, 0], test_x[:, 1], c=test_y, cmap='brg', s=80) mp.scatter(prob_x[:,0], prob_x[:,1], c=pred_prob_y, cmap='jet_r', s=80, marker='D')", "# 根据网格搜索选择最优模型 params = [{'kernel':['linear'],'C':[1, 10, 100, 1000]}, {'kernel':['poly'], 'C':[1], 'degree':[2, 3]}, {'kernel':['rbf'],", "* 100, 2), round(probs[i, 1] * 100, 2)), xy=(prob_x[i, 0], prob_x[i, 1]), xytext=(12,", "ms.train_test_split(x, y, test_size=0.25, random_state=5) model = svm.SVC(probability=True) # 根据网格搜索选择最优模型 params = [{'kernel':['linear'],'C':[1, 10,", "= \\ ms.train_test_split(x, y, test_size=0.25, random_state=5) model = svm.SVC(probability=True) # 根据网格搜索选择最优模型 params =", "in zip( model.cv_results_['params'], model.cv_results_['mean_test_score']): print(param, '->', score) pred_test_y = model.predict(test_x) print(sm.classification_report(test_y, pred_test_y)) #", "flat_y = model.predict(flat_x) grid_y = flat_y.reshape(grid_x[0].shape) mp.figure('Probability', facecolor='lightgray') mp.title('Probability', fontsize=20) mp.xlabel('x', fontsize=14) mp.ylabel('y',", "\\ ms.train_test_split(x, y, test_size=0.25, random_state=5) model = svm.SVC(probability=True) # 根据网格搜索选择最优模型 params = [{'kernel':['linear'],'C':[1,", "as mp data = np.loadtxt('../ml_data/multiple2.txt', delimiter=',', dtype='f8') x = data[:, :-1] y =", "model.fit(train_x, train_y) print(model.best_params_) print(model.best_score_) print(model.best_estimator_) # 输出每个超参数组合信息及其得分 for param, score in zip( model.cv_results_['params'],", "model.predict(test_x) print(sm.classification_report(test_y, pred_test_y)) # 新增样本 prob_x = np.array([ [2, 1.5], [8, 9], [4.8,", "\"\"\" import numpy as np import sklearn.model_selection as ms import sklearn.svm as svm", "demo05_gridsearch.py 网格搜索 \"\"\" import numpy as np import sklearn.model_selection as ms import sklearn.svm", "5.2], [4, 4], [2.5, 7], [7.6, 2], [5.4, 5.9]]) pred_prob_y = model.predict(prob_x) probs", "mp.tick_params(labelsize=10) mp.pcolormesh(grid_x[0], grid_x[1], grid_y, cmap='gray') mp.scatter(test_x[:, 0], test_x[:, 1], c=test_y, cmap='brg', s=80) mp.scatter(prob_x[:,0],", "* 100, 2)), xy=(prob_x[i, 0], prob_x[i, 1]), xytext=(12, -12), textcoords='offset points', horizontalalignment='left', verticalalignment='top',", "0.1, 0.01, 0.001]}] model = ms.GridSearchCV(model, params, cv=5) model.fit(train_x, train_y) print(model.best_params_) print(model.best_score_) print(model.best_estimator_)", "fontsize=20) mp.xlabel('x', fontsize=14) mp.ylabel('y', fontsize=14) mp.tick_params(labelsize=10) mp.pcolormesh(grid_x[0], grid_x[1], grid_y, cmap='gray') mp.scatter(test_x[:, 0], test_x[:,", "params = [{'kernel':['linear'],'C':[1, 10, 100, 1000]}, {'kernel':['poly'], 'C':[1], 'degree':[2, 3]}, {'kernel':['rbf'], 'C':[1,10,100,1000], 'gamma':[1,", "x[:, 0].min() - 1, x[:, 0].max() + 1 b, t = x[:, 1].min()", "[4, 4], [2.5, 7], [7.6, 2], [5.4, 5.9]]) pred_prob_y = model.predict(prob_x) probs =", "y, test_size=0.25, random_state=5) model = svm.SVC(probability=True) # 根据网格搜索选择最优模型 params = [{'kernel':['linear'],'C':[1, 10, 100,", "import matplotlib.pyplot as mp data = np.loadtxt('../ml_data/multiple2.txt', delimiter=',', dtype='f8') x = data[:, :-1]", "grid_x[1].ravel())) flat_y = model.predict(flat_x) grid_y = flat_y.reshape(grid_x[0].shape) mp.figure('Probability', facecolor='lightgray') mp.title('Probability', fontsize=20) mp.xlabel('x', fontsize=14)", "zip( model.cv_results_['params'], model.cv_results_['mean_test_score']): print(param, '->', score) pred_test_y = model.predict(test_x) print(sm.classification_report(test_y, pred_test_y)) # 新增样本", "1, x[:, 0].max() + 1 b, t = x[:, 1].min() - 1, x[:,", "xytext=(12, -12), textcoords='offset points', horizontalalignment='left', verticalalignment='top', fontsize=9, bbox={'boxstyle': 'round,pad=0.6', 'fc': 'orange', 'alpha': 0.8})", "1000]}, {'kernel':['poly'], 'C':[1], 'degree':[2, 3]}, {'kernel':['rbf'], 'C':[1,10,100,1000], 'gamma':[1, 0.1, 0.01, 0.001]}] model =", "# 新增样本 prob_x = np.array([ [2, 1.5], [8, 9], [4.8, 5.2], [4, 4],", "- 1, x[:, 1].max() + 1 grid_x = np.meshgrid(np.linspace(l, r, n), np.linspace(b, t,", "# 输出每个超参数组合信息及其得分 for param, score in zip( model.cv_results_['params'], model.cv_results_['mean_test_score']): print(param, '->', score) pred_test_y", "grid_x = np.meshgrid(np.linspace(l, r, n), np.linspace(b, t, n)) flat_x = np.column_stack((grid_x[0].ravel(), grid_x[1].ravel())) flat_y", "2], [5.4, 5.9]]) pred_prob_y = model.predict(prob_x) probs = model.predict_proba(prob_x) print(probs) # 绘制分类边界线 n", "matplotlib.pyplot as mp data = np.loadtxt('../ml_data/multiple2.txt', delimiter=',', dtype='f8') x = data[:, :-1] y", "# 选择svm做分类 train_x, test_x, train_y, test_y = \\ ms.train_test_split(x, y, test_size=0.25, random_state=5) model", "model.predict(flat_x) grid_y = flat_y.reshape(grid_x[0].shape) mp.figure('Probability', facecolor='lightgray') mp.title('Probability', fontsize=20) mp.xlabel('x', fontsize=14) mp.ylabel('y', fontsize=14) mp.tick_params(labelsize=10)", "-1] # 选择svm做分类 train_x, test_x, train_y, test_y = \\ ms.train_test_split(x, y, test_size=0.25, random_state=5)", "0], test_x[:, 1], c=test_y, cmap='brg', s=80) mp.scatter(prob_x[:,0], prob_x[:,1], c=pred_prob_y, cmap='jet_r', s=80, marker='D') for", "= data[:, -1] # 选择svm做分类 train_x, test_x, train_y, test_y = \\ ms.train_test_split(x, y,", "svm.SVC(probability=True) # 根据网格搜索选择最优模型 params = [{'kernel':['linear'],'C':[1, 10, 100, 1000]}, {'kernel':['poly'], 'C':[1], 'degree':[2, 3]},", "= ms.GridSearchCV(model, params, cv=5) model.fit(train_x, train_y) print(model.best_params_) print(model.best_score_) print(model.best_estimator_) # 输出每个超参数组合信息及其得分 for param,", "random_state=5) model = svm.SVC(probability=True) # 根据网格搜索选择最优模型 params = [{'kernel':['linear'],'C':[1, 10, 100, 1000]}, {'kernel':['poly'],", "{}%'.format( round(probs[i, 0] * 100, 2), round(probs[i, 1] * 100, 2)), xy=(prob_x[i, 0],", "l, r = x[:, 0].min() - 1, x[:, 0].max() + 1 b, t", "as sm import matplotlib.pyplot as mp data = np.loadtxt('../ml_data/multiple2.txt', delimiter=',', dtype='f8') x =", "sklearn.model_selection as ms import sklearn.svm as svm import sklearn.metrics as sm import matplotlib.pyplot", "data[:, -1] # 选择svm做分类 train_x, test_x, train_y, test_y = \\ ms.train_test_split(x, y, test_size=0.25,", "= np.loadtxt('../ml_data/multiple2.txt', delimiter=',', dtype='f8') x = data[:, :-1] y = data[:, -1] #", "{'kernel':['poly'], 'C':[1], 'degree':[2, 3]}, {'kernel':['rbf'], 'C':[1,10,100,1000], 'gamma':[1, 0.1, 0.01, 0.001]}] model = ms.GridSearchCV(model,", "train_y) print(model.best_params_) print(model.best_score_) print(model.best_estimator_) # 输出每个超参数组合信息及其得分 for param, score in zip( model.cv_results_['params'], model.cv_results_['mean_test_score']):", "model.cv_results_['params'], model.cv_results_['mean_test_score']): print(param, '->', score) pred_test_y = model.predict(test_x) print(sm.classification_report(test_y, pred_test_y)) # 新增样本 prob_x", "print(param, '->', score) pred_test_y = model.predict(test_x) print(sm.classification_report(test_y, pred_test_y)) # 新增样本 prob_x = np.array([", "5.9]]) pred_prob_y = model.predict(prob_x) probs = model.predict_proba(prob_x) print(probs) # 绘制分类边界线 n = 500", "as svm import sklearn.metrics as sm import matplotlib.pyplot as mp data = np.loadtxt('../ml_data/multiple2.txt',", "'C':[1], 'degree':[2, 3]}, {'kernel':['rbf'], 'C':[1,10,100,1000], 'gamma':[1, 0.1, 0.01, 0.001]}] model = ms.GridSearchCV(model, params,", "500 l, r = x[:, 0].min() - 1, x[:, 0].max() + 1 b,", "fontsize=14) mp.ylabel('y', fontsize=14) mp.tick_params(labelsize=10) mp.pcolormesh(grid_x[0], grid_x[1], grid_y, cmap='gray') mp.scatter(test_x[:, 0], test_x[:, 1], c=test_y,", "1], c=test_y, cmap='brg', s=80) mp.scatter(prob_x[:,0], prob_x[:,1], c=pred_prob_y, cmap='jet_r', s=80, marker='D') for i in", "x[:, 1].max() + 1 grid_x = np.meshgrid(np.linspace(l, r, n), np.linspace(b, t, n)) flat_x", "1]), xytext=(12, -12), textcoords='offset points', horizontalalignment='left', verticalalignment='top', fontsize=9, bbox={'boxstyle': 'round,pad=0.6', 'fc': 'orange', 'alpha':", "b, t = x[:, 1].min() - 1, x[:, 1].max() + 1 grid_x =", "np.meshgrid(np.linspace(l, r, n), np.linspace(b, t, n)) flat_x = np.column_stack((grid_x[0].ravel(), grid_x[1].ravel())) flat_y = model.predict(flat_x)", "prob_x[:,1], c=pred_prob_y, cmap='jet_r', s=80, marker='D') for i in range(len(probs)): mp.annotate( '{}% {}%'.format( round(probs[i,", "train_x, test_x, train_y, test_y = \\ ms.train_test_split(x, y, test_size=0.25, random_state=5) model = svm.SVC(probability=True)", "data = np.loadtxt('../ml_data/multiple2.txt', delimiter=',', dtype='f8') x = data[:, :-1] y = data[:, -1]", "round(probs[i, 0] * 100, 2), round(probs[i, 1] * 100, 2)), xy=(prob_x[i, 0], prob_x[i,", "mp.figure('Probability', facecolor='lightgray') mp.title('Probability', fontsize=20) mp.xlabel('x', fontsize=14) mp.ylabel('y', fontsize=14) mp.tick_params(labelsize=10) mp.pcolormesh(grid_x[0], grid_x[1], grid_y, cmap='gray')", "-12), textcoords='offset points', horizontalalignment='left', verticalalignment='top', fontsize=9, bbox={'boxstyle': 'round,pad=0.6', 'fc': 'orange', 'alpha': 0.8}) mp.show()", "x = data[:, :-1] y = data[:, -1] # 选择svm做分类 train_x, test_x, train_y,", "pred_test_y)) # 新增样本 prob_x = np.array([ [2, 1.5], [8, 9], [4.8, 5.2], [4,", "1, x[:, 1].max() + 1 grid_x = np.meshgrid(np.linspace(l, r, n), np.linspace(b, t, n))", "in range(len(probs)): mp.annotate( '{}% {}%'.format( round(probs[i, 0] * 100, 2), round(probs[i, 1] *", "score) pred_test_y = model.predict(test_x) print(sm.classification_report(test_y, pred_test_y)) # 新增样本 prob_x = np.array([ [2, 1.5],", "cv=5) model.fit(train_x, train_y) print(model.best_params_) print(model.best_score_) print(model.best_estimator_) # 输出每个超参数组合信息及其得分 for param, score in zip(", "0.01, 0.001]}] model = ms.GridSearchCV(model, params, cv=5) model.fit(train_x, train_y) print(model.best_params_) print(model.best_score_) print(model.best_estimator_) #", "round(probs[i, 1] * 100, 2)), xy=(prob_x[i, 0], prob_x[i, 1]), xytext=(12, -12), textcoords='offset points',", "{'kernel':['rbf'], 'C':[1,10,100,1000], 'gamma':[1, 0.1, 0.01, 0.001]}] model = ms.GridSearchCV(model, params, cv=5) model.fit(train_x, train_y)", "as np import sklearn.model_selection as ms import sklearn.svm as svm import sklearn.metrics as", "model.predict(prob_x) probs = model.predict_proba(prob_x) print(probs) # 绘制分类边界线 n = 500 l, r =", "= x[:, 0].min() - 1, x[:, 0].max() + 1 b, t = x[:,", "test_y = \\ ms.train_test_split(x, y, test_size=0.25, random_state=5) model = svm.SVC(probability=True) # 根据网格搜索选择最优模型 params", "t, n)) flat_x = np.column_stack((grid_x[0].ravel(), grid_x[1].ravel())) flat_y = model.predict(flat_x) grid_y = flat_y.reshape(grid_x[0].shape) mp.figure('Probability',", "prob_x[i, 1]), xytext=(12, -12), textcoords='offset points', horizontalalignment='left', verticalalignment='top', fontsize=9, bbox={'boxstyle': 'round,pad=0.6', 'fc': 'orange',", "1 b, t = x[:, 1].min() - 1, x[:, 1].max() + 1 grid_x", "网格搜索 \"\"\" import numpy as np import sklearn.model_selection as ms import sklearn.svm as", "y = data[:, -1] # 选择svm做分类 train_x, test_x, train_y, test_y = \\ ms.train_test_split(x,", "pred_test_y = model.predict(test_x) print(sm.classification_report(test_y, pred_test_y)) # 新增样本 prob_x = np.array([ [2, 1.5], [8,", "cmap='gray') mp.scatter(test_x[:, 0], test_x[:, 1], c=test_y, cmap='brg', s=80) mp.scatter(prob_x[:,0], prob_x[:,1], c=pred_prob_y, cmap='jet_r', s=80,", "facecolor='lightgray') mp.title('Probability', fontsize=20) mp.xlabel('x', fontsize=14) mp.ylabel('y', fontsize=14) mp.tick_params(labelsize=10) mp.pcolormesh(grid_x[0], grid_x[1], grid_y, cmap='gray') mp.scatter(test_x[:,", "np.array([ [2, 1.5], [8, 9], [4.8, 5.2], [4, 4], [2.5, 7], [7.6, 2],", "probs = model.predict_proba(prob_x) print(probs) # 绘制分类边界线 n = 500 l, r = x[:,", "grid_y, cmap='gray') mp.scatter(test_x[:, 0], test_x[:, 1], c=test_y, cmap='brg', s=80) mp.scatter(prob_x[:,0], prob_x[:,1], c=pred_prob_y, cmap='jet_r',", "[4.8, 5.2], [4, 4], [2.5, 7], [7.6, 2], [5.4, 5.9]]) pred_prob_y = model.predict(prob_x)", "np import sklearn.model_selection as ms import sklearn.svm as svm import sklearn.metrics as sm", "marker='D') for i in range(len(probs)): mp.annotate( '{}% {}%'.format( round(probs[i, 0] * 100, 2),", "n), np.linspace(b, t, n)) flat_x = np.column_stack((grid_x[0].ravel(), grid_x[1].ravel())) flat_y = model.predict(flat_x) grid_y =", "0].max() + 1 b, t = x[:, 1].min() - 1, x[:, 1].max() +", "1].min() - 1, x[:, 1].max() + 1 grid_x = np.meshgrid(np.linspace(l, r, n), np.linspace(b,", "= model.predict(test_x) print(sm.classification_report(test_y, pred_test_y)) # 新增样本 prob_x = np.array([ [2, 1.5], [8, 9],", "mp.xlabel('x', fontsize=14) mp.ylabel('y', fontsize=14) mp.tick_params(labelsize=10) mp.pcolormesh(grid_x[0], grid_x[1], grid_y, cmap='gray') mp.scatter(test_x[:, 0], test_x[:, 1],", "delimiter=',', dtype='f8') x = data[:, :-1] y = data[:, -1] # 选择svm做分类 train_x,", "= data[:, :-1] y = data[:, -1] # 选择svm做分类 train_x, test_x, train_y, test_y", "print(probs) # 绘制分类边界线 n = 500 l, r = x[:, 0].min() - 1,", "根据网格搜索选择最优模型 params = [{'kernel':['linear'],'C':[1, 10, 100, 1000]}, {'kernel':['poly'], 'C':[1], 'degree':[2, 3]}, {'kernel':['rbf'], 'C':[1,10,100,1000],", "data[:, :-1] y = data[:, -1] # 选择svm做分类 train_x, test_x, train_y, test_y =", "model = ms.GridSearchCV(model, params, cv=5) model.fit(train_x, train_y) print(model.best_params_) print(model.best_score_) print(model.best_estimator_) # 输出每个超参数组合信息及其得分 for", "'degree':[2, 3]}, {'kernel':['rbf'], 'C':[1,10,100,1000], 'gamma':[1, 0.1, 0.01, 0.001]}] model = ms.GridSearchCV(model, params, cv=5)", "fontsize=14) mp.tick_params(labelsize=10) mp.pcolormesh(grid_x[0], grid_x[1], grid_y, cmap='gray') mp.scatter(test_x[:, 0], test_x[:, 1], c=test_y, cmap='brg', s=80)", "for param, score in zip( model.cv_results_['params'], model.cv_results_['mean_test_score']): print(param, '->', score) pred_test_y = model.predict(test_x)", "cmap='jet_r', s=80, marker='D') for i in range(len(probs)): mp.annotate( '{}% {}%'.format( round(probs[i, 0] *", "sm import matplotlib.pyplot as mp data = np.loadtxt('../ml_data/multiple2.txt', delimiter=',', dtype='f8') x = data[:,", "test_x[:, 1], c=test_y, cmap='brg', s=80) mp.scatter(prob_x[:,0], prob_x[:,1], c=pred_prob_y, cmap='jet_r', s=80, marker='D') for i", "np.loadtxt('../ml_data/multiple2.txt', delimiter=',', dtype='f8') x = data[:, :-1] y = data[:, -1] # 选择svm做分类", "= model.predict(prob_x) probs = model.predict_proba(prob_x) print(probs) # 绘制分类边界线 n = 500 l, r", "100, 1000]}, {'kernel':['poly'], 'C':[1], 'degree':[2, 3]}, {'kernel':['rbf'], 'C':[1,10,100,1000], 'gamma':[1, 0.1, 0.01, 0.001]}] model", "np.column_stack((grid_x[0].ravel(), grid_x[1].ravel())) flat_y = model.predict(flat_x) grid_y = flat_y.reshape(grid_x[0].shape) mp.figure('Probability', facecolor='lightgray') mp.title('Probability', fontsize=20) mp.xlabel('x',", "c=pred_prob_y, cmap='jet_r', s=80, marker='D') for i in range(len(probs)): mp.annotate( '{}% {}%'.format( round(probs[i, 0]", "model.cv_results_['mean_test_score']): print(param, '->', score) pred_test_y = model.predict(test_x) print(sm.classification_report(test_y, pred_test_y)) # 新增样本 prob_x =", "'{}% {}%'.format( round(probs[i, 0] * 100, 2), round(probs[i, 1] * 100, 2)), xy=(prob_x[i,", "9], [4.8, 5.2], [4, 4], [2.5, 7], [7.6, 2], [5.4, 5.9]]) pred_prob_y =", "= [{'kernel':['linear'],'C':[1, 10, 100, 1000]}, {'kernel':['poly'], 'C':[1], 'degree':[2, 3]}, {'kernel':['rbf'], 'C':[1,10,100,1000], 'gamma':[1, 0.1,", "print(model.best_score_) print(model.best_estimator_) # 输出每个超参数组合信息及其得分 for param, score in zip( model.cv_results_['params'], model.cv_results_['mean_test_score']): print(param, '->',", "n)) flat_x = np.column_stack((grid_x[0].ravel(), grid_x[1].ravel())) flat_y = model.predict(flat_x) grid_y = flat_y.reshape(grid_x[0].shape) mp.figure('Probability', facecolor='lightgray')", "numpy as np import sklearn.model_selection as ms import sklearn.svm as svm import sklearn.metrics", "'gamma':[1, 0.1, 0.01, 0.001]}] model = ms.GridSearchCV(model, params, cv=5) model.fit(train_x, train_y) print(model.best_params_) print(model.best_score_)" ]
[ "def read_var_uint64(self): # 我nm真的有必要为了几个比特省到这种地步吗??uint32最多也就5个比特吧?? i,v=0,0 while i<70: b=self.read_byte() v|=(b&0x7f)<<i if b&0x80==0: return v", "return struct.unpack('I',self.bytes[self.curr-4:self.curr])[0] def read_bytes(self,_len): self.curr+=_len return self.bytes[self.curr-_len:self.curr] def read(self,_len): self.curr+=_len return self.bytes[self.curr-_len:self.curr] def", "self.append(struct.pack('I',i)) def write_var_uint32(self,x): while x>=0x80: self.write_byte(int((x%128)+0x80)) x>>=7 self.write_byte(x) def write_var_int32(self,x): uv=np.uint32(np.uint32(x)<<1) if x<0:", "def read_var_int64(self): v_=self.read_var_uint64() v= np.int64(v_>>1) if (v_&1)!=0: v=~v return int(v) def read_vec3(self): self.curr+=12", "self.curr=0 def read_var_uint32(self): # 我nm真的有必要为了几个比特省到这种地步吗??uint32最多也就5个比特吧?? i,v=0,0 while i<35: b=self.read_byte() v|=(b&0x7f)<<i if b&0x80==0: return", "BufferDecoder(object): def __init__(self,bytes) -> None: self.bytes=bytes self.curr=0 def read_var_uint32(self): # 我nm真的有必要为了几个比特省到这种地步吗??uint32最多也就5个比特吧?? i,v=0,0 while", "v i+=7 assert False,f'read_var_uint64 fail i:{i} v:{v} {self}' def read_var_int64(self): v_=self.read_var_uint64() v= np.int64(v_>>1)", "uv=np.uint64(np.uint64(x)*2) if x<0: uv=~uv self.write_var_uint64(uv) def write_str(self,s:str): es=s.encode(encoding='utf-8') self.write_var_uint32(len(es)) self.append(es) def write_UUID_bytes(self,uuid_bytes:bytes): self.append(uuid_bytes)", "def write_uint32(self,i:int): self.append(struct.pack('I',i)) def write_var_uint32(self,x): while x>=0x80: self.write_byte(int((x%128)+0x80)) x>>=7 self.write_byte(x) def write_var_int32(self,x): uv=np.uint32(np.uint32(x)<<1)", "return self.bytes[self.curr-length:self.curr].decode(encoding='utf-8') @staticmethod def reverseUUIDBytes(bytes): bytes[8:]+bytes[:8] return bytes def read_UUID(self): self.curr+=16 uuid_bytes=self.bytes[self.curr-16:self.curr] return", "False,f'read_var_uint64 fail i:{i} v:{v} {self}' def read_var_int64(self): v_=self.read_var_uint64() v= np.int64(v_>>1) if (v_&1)!=0: v=~v", "def read_tail(self): return self.bytes[self.curr:] def read_byte(self): self.curr+=1 return struct.unpack('B',self.bytes[self.curr-1:self.curr])[0] def read_boolen(self): return self.read_byte()==1", "while x>=0x80: self.write_byte(int((x%128)+0x80)) x>>=7 self.write_byte(x) def write_var_int32(self,x): uv=np.uint32(np.uint32(x)<<1) if x<0: uv=~uv self.write_var_uint32(uv) def", "i<35: b=self.read_byte() v|=(b&0x7f)<<i if b&0x80==0: return v i+=7 assert False,f'read_var_uint32 fail i:{i} v:{v}", "None: self.bytes=bytes self.curr=0 def read_var_uint32(self): # 我nm真的有必要为了几个比特省到这种地步吗??uint32最多也就5个比特吧?? i,v=0,0 while i<35: b=self.read_byte() v|=(b&0x7f)<<i if", "if (v_&1)!=0: v=~v return int(v) def read_vec3(self): self.curr+=12 return struct.unpack('fff',self.bytes[self.curr-12:self.curr]) def read_float32(self): self.curr+=4", "return int(v) def read_vec3(self): self.curr+=12 return struct.unpack('fff',self.bytes[self.curr-12:self.curr]) def read_float32(self): self.curr+=4 return struct.unpack('f',self.bytes[self.curr-4:self.curr])[0] def", "self.curr+=4 return struct.unpack('I',self.bytes[self.curr-4:self.curr])[0] def read_bytes(self,_len): self.curr+=_len return self.bytes[self.curr-_len:self.curr] def read(self,_len): self.curr+=_len return self.bytes[self.curr-_len:self.curr]", "def write_var_uint64(self,x): while x>=0x80: self.write_byte(int((x%128)+0x80)) x//=128 self.write_byte(int(x)) def write_var_int64(self,x): uv=np.uint64(np.uint64(x)*2) if x<0: uv=~uv", "i:{i} v:{v} {self}' def read_var_int64(self): v_=self.read_var_uint64() v= np.int64(v_>>1) if (v_&1)!=0: v=~v return int(v)", "read_float32(self): self.curr+=4 return struct.unpack('f',self.bytes[self.curr-4:self.curr])[0] def read_tail(self): return self.bytes[self.curr:] def read_byte(self): self.curr+=1 return struct.unpack('B',self.bytes[self.curr-1:self.curr])[0]", "struct.unpack('B',self.bytes[self.curr-1:self.curr])[0] def read_int16(self): self.curr+=2 return struct.unpack('h',self.bytes[self.curr-2:self.curr])[0] def read_int32(self): self.curr+=4 return struct.unpack('i',self.bytes[self.curr-4:self.curr])[0] def read_uint32(self):", "{self}' def read_var_int64(self): v_=self.read_var_uint64() v= np.int64(v_>>1) if (v_&1)!=0: v=~v return int(v) def read_vec3(self):", "def write_byte(self,b): self.append(struct.pack('B',b)) def write_boolen(self,b:bool): self.append(struct.pack('B',b)) def write_uint32(self,i:int): self.append(struct.pack('I',i)) def write_var_uint32(self,x): while x>=0x80:", "-> None: self.bytes=bytes self.curr=0 def read_var_uint32(self): # 我nm真的有必要为了几个比特省到这种地步吗??uint32最多也就5个比特吧?? i,v=0,0 while i<35: b=self.read_byte() v|=(b&0x7f)<<i", "while x>=0x80: self.write_byte(int((x%128)+0x80)) x//=128 self.write_byte(int(x)) def write_var_int64(self,x): uv=np.uint64(np.uint64(x)*2) if x<0: uv=~uv self.write_var_uint64(uv) def", "self._bytes=b'' @property def bytes(self): if len(self._bytes_elements)!=self._bytes_elements_count: self._bytes+=b''.join(self._bytes_elements[self._bytes_elements_count:]) self._bytes_elements_count=len(self._bytes_elements) return self._bytes def append(self,bs:bytes): self._bytes_elements.append(bs)", "self.bytes[self.curr-length:self.curr].decode(encoding='utf-8') @staticmethod def reverseUUIDBytes(bytes): bytes[8:]+bytes[:8] return bytes def read_UUID(self): self.curr+=16 uuid_bytes=self.bytes[self.curr-16:self.curr] return self.reverseUUIDBytes(uuid_bytes)", "v=~v return int(v) def read_vec3(self): self.curr+=12 return struct.unpack('fff',self.bytes[self.curr-12:self.curr]) def read_float32(self): self.curr+=4 return struct.unpack('f',self.bytes[self.curr-4:self.curr])[0]", "{self}' def read_var_int32(self): v_=self.read_var_uint32() v= np.int32(v_>>1) if (v_&1)!=0: v=~v return int(v) def read_var_uint64(self):", "v:{v} {self}' def read_var_int64(self): v_=self.read_var_uint64() v= np.int64(v_>>1) if (v_&1)!=0: v=~v return int(v) def", "reverseUUIDBytes(bytes): bytes[8:]+bytes[:8] return bytes def read_UUID(self): self.curr+=16 uuid_bytes=self.bytes[self.curr-16:self.curr] return self.reverseUUIDBytes(uuid_bytes) def read_uint8(self): self.curr+=1", "class BufferDecoder(object): def __init__(self,bytes) -> None: self.bytes=bytes self.curr=0 def read_var_uint32(self): # 我nm真的有必要为了几个比特省到这种地步吗??uint32最多也就5个比特吧?? i,v=0,0", "from .nbt import NBTFile import io class BufferDecoder(object): def __init__(self,bytes) -> None: self.bytes=bytes", "read_byte(self): self.curr+=1 return struct.unpack('B',self.bytes[self.curr-1:self.curr])[0] def read_boolen(self): return self.read_byte()==1 def read_str(self): length=self.read_var_uint32() self.curr+=length return", "bytes[8:]+bytes[:8] return bytes def read_UUID(self): self.curr+=16 uuid_bytes=self.bytes[self.curr-16:self.curr] return self.reverseUUIDBytes(uuid_bytes) def read_uint8(self): self.curr+=1 return", "self.curr+=4 return struct.unpack('i',self.bytes[self.curr-4:self.curr])[0] def read_uint32(self): self.curr+=4 return struct.unpack('I',self.bytes[self.curr-4:self.curr])[0] def read_bytes(self,_len): self.curr+=_len return self.bytes[self.curr-_len:self.curr]", "v= np.int32(v_>>1) if (v_&1)!=0: v=~v return int(v) def read_var_uint64(self): # 我nm真的有必要为了几个比特省到这种地步吗??uint32最多也就5个比特吧?? i,v=0,0 while", "(v_&1)!=0: v=~v return int(v) def read_var_uint64(self): # 我nm真的有必要为了几个比特省到这种地步吗??uint32最多也就5个比特吧?? i,v=0,0 while i<70: b=self.read_byte() v|=(b&0x7f)<<i", "v=~v return int(v) def read_var_uint64(self): # 我nm真的有必要为了几个比特省到这种地步吗??uint32最多也就5个比特吧?? i,v=0,0 while i<70: b=self.read_byte() v|=(b&0x7f)<<i if", "return struct.unpack('h',self.bytes[self.curr-2:self.curr])[0] def read_int32(self): self.curr+=4 return struct.unpack('i',self.bytes[self.curr-4:self.curr])[0] def read_uint32(self): self.curr+=4 return struct.unpack('I',self.bytes[self.curr-4:self.curr])[0] def", "self.append(struct.pack('B',b)) def write_uint32(self,i:int): self.append(struct.pack('I',i)) def write_var_uint32(self,x): while x>=0x80: self.write_byte(int((x%128)+0x80)) x>>=7 self.write_byte(x) def write_var_int32(self,x):", "read_var_uint64(self): # 我nm真的有必要为了几个比特省到这种地步吗??uint32最多也就5个比特吧?? i,v=0,0 while i<70: b=self.read_byte() v|=(b&0x7f)<<i if b&0x80==0: return v i+=7", "uv=np.uint32(np.uint32(x)<<1) if x<0: uv=~uv self.write_var_uint32(uv) def write_var_uint64(self,x): while x>=0x80: self.write_byte(int((x%128)+0x80)) x//=128 self.write_byte(int(x)) def", "import NBTFile import io class BufferDecoder(object): def __init__(self,bytes) -> None: self.bytes=bytes self.curr=0 def", "v|=(b&0x7f)<<i if b&0x80==0: return v i+=7 assert False,f'read_var_uint32 fail i:{i} v:{v} {self}' def", "v_=self.read_var_uint32() v= np.int32(v_>>1) if (v_&1)!=0: v=~v return int(v) def read_var_uint64(self): # 我nm真的有必要为了几个比特省到这种地步吗??uint32最多也就5个比特吧?? i,v=0,0", "x//=128 self.write_byte(int(x)) def write_var_int64(self,x): uv=np.uint64(np.uint64(x)*2) if x<0: uv=~uv self.write_var_uint64(uv) def write_str(self,s:str): es=s.encode(encoding='utf-8') self.write_var_uint32(len(es))", "self._bytes def append(self,bs:bytes): self._bytes_elements.append(bs) def write_float32(self,f): self.append(struct.pack('f',f)) def write_byte(self,b): self.append(struct.pack('B',b)) def write_boolen(self,b:bool): self.append(struct.pack('B',b))", "self.curr+=_len bio=io.BytesIO(self.bytes[self.curr-_len:self.curr]) nbt=NBTFile(bio) return nbt.to_py() class BufferEncoder(object): def __init__(self) -> None: self._bytes_elements=[] self._bytes_elements_count=0", "def __init__(self) -> None: self._bytes_elements=[] self._bytes_elements_count=0 self._bytes=b'' @property def bytes(self): if len(self._bytes_elements)!=self._bytes_elements_count: self._bytes+=b''.join(self._bytes_elements[self._bytes_elements_count:])", "return self.read_byte()==1 def read_str(self): length=self.read_var_uint32() self.curr+=length return self.bytes[self.curr-length:self.curr].decode(encoding='utf-8') @staticmethod def reverseUUIDBytes(bytes): bytes[8:]+bytes[:8] return", "int(v) def read_vec3(self): self.curr+=12 return struct.unpack('fff',self.bytes[self.curr-12:self.curr]) def read_float32(self): self.curr+=4 return struct.unpack('f',self.bytes[self.curr-4:self.curr])[0] def read_tail(self):", "length=self.read_var_uint32() self.curr+=length return self.bytes[self.curr-length:self.curr].decode(encoding='utf-8') @staticmethod def reverseUUIDBytes(bytes): bytes[8:]+bytes[:8] return bytes def read_UUID(self): self.curr+=16", "v|=(b&0x7f)<<i if b&0x80==0: return v i+=7 assert False,f'read_var_uint64 fail i:{i} v:{v} {self}' def", "self._bytes_elements_count=0 self._bytes=b'' @property def bytes(self): if len(self._bytes_elements)!=self._bytes_elements_count: self._bytes+=b''.join(self._bytes_elements[self._bytes_elements_count:]) self._bytes_elements_count=len(self._bytes_elements) return self._bytes def append(self,bs:bytes):", "self._bytes_elements=[] self._bytes_elements_count=0 self._bytes=b'' @property def bytes(self): if len(self._bytes_elements)!=self._bytes_elements_count: self._bytes+=b''.join(self._bytes_elements[self._bytes_elements_count:]) self._bytes_elements_count=len(self._bytes_elements) return self._bytes def", "def read_var_uint32(self): # 我nm真的有必要为了几个比特省到这种地步吗??uint32最多也就5个比特吧?? i,v=0,0 while i<35: b=self.read_byte() v|=(b&0x7f)<<i if b&0x80==0: return v", "v i+=7 assert False,f'read_var_uint32 fail i:{i} v:{v} {self}' def read_var_int32(self): v_=self.read_var_uint32() v= np.int32(v_>>1)", "v= np.int64(v_>>1) if (v_&1)!=0: v=~v return int(v) def read_vec3(self): self.curr+=12 return struct.unpack('fff',self.bytes[self.curr-12:self.curr]) def", "self._bytes_elements.append(bs) def write_float32(self,f): self.append(struct.pack('f',f)) def write_byte(self,b): self.append(struct.pack('B',b)) def write_boolen(self,b:bool): self.append(struct.pack('B',b)) def write_uint32(self,i:int): self.append(struct.pack('I',i))", "self.curr+=_len return self.bytes[self.curr-_len:self.curr] def read(self,_len): self.curr+=_len return self.bytes[self.curr-_len:self.curr] def read_nbt(self,_len=None): if _len==None: nbt=NBTFile(self)", "i+=7 assert False,f'read_var_uint32 fail i:{i} v:{v} {self}' def read_var_int32(self): v_=self.read_var_uint32() v= np.int32(v_>>1) if", "return bytes def read_UUID(self): self.curr+=16 uuid_bytes=self.bytes[self.curr-16:self.curr] return self.reverseUUIDBytes(uuid_bytes) def read_uint8(self): self.curr+=1 return struct.unpack('B',self.bytes[self.curr-1:self.curr])[0]", "read_UUID(self): self.curr+=16 uuid_bytes=self.bytes[self.curr-16:self.curr] return self.reverseUUIDBytes(uuid_bytes) def read_uint8(self): self.curr+=1 return struct.unpack('B',self.bytes[self.curr-1:self.curr])[0] def read_int16(self): self.curr+=2", "# 我nm真的有必要为了几个比特省到这种地步吗??uint32最多也就5个比特吧?? i,v=0,0 while i<35: b=self.read_byte() v|=(b&0x7f)<<i if b&0x80==0: return v i+=7 assert", "if x<0: uv=~uv self.write_var_uint32(uv) def write_var_uint64(self,x): while x>=0x80: self.write_byte(int((x%128)+0x80)) x//=128 self.write_byte(int(x)) def write_var_int64(self,x):", "int(v) def read_var_uint64(self): # 我nm真的有必要为了几个比特省到这种地步吗??uint32最多也就5个比特吧?? i,v=0,0 while i<70: b=self.read_byte() v|=(b&0x7f)<<i if b&0x80==0: return", "self.curr+=_len return self.bytes[self.curr-_len:self.curr] def read_nbt(self,_len=None): if _len==None: nbt=NBTFile(self) return nbt.to_py() else: self.curr+=_len bio=io.BytesIO(self.bytes[self.curr-_len:self.curr])", "write_var_uint32(self,x): while x>=0x80: self.write_byte(int((x%128)+0x80)) x>>=7 self.write_byte(x) def write_var_int32(self,x): uv=np.uint32(np.uint32(x)<<1) if x<0: uv=~uv self.write_var_uint32(uv)", "self.reverseUUIDBytes(uuid_bytes) def read_uint8(self): self.curr+=1 return struct.unpack('B',self.bytes[self.curr-1:self.curr])[0] def read_int16(self): self.curr+=2 return struct.unpack('h',self.bytes[self.curr-2:self.curr])[0] def read_int32(self):", "uuid_bytes=self.bytes[self.curr-16:self.curr] return self.reverseUUIDBytes(uuid_bytes) def read_uint8(self): self.curr+=1 return struct.unpack('B',self.bytes[self.curr-1:self.curr])[0] def read_int16(self): self.curr+=2 return struct.unpack('h',self.bytes[self.curr-2:self.curr])[0]", "nbt=NBTFile(self) return nbt.to_py() else: self.curr+=_len bio=io.BytesIO(self.bytes[self.curr-_len:self.curr]) nbt=NBTFile(bio) return nbt.to_py() class BufferEncoder(object): def __init__(self)", "nbt.to_py() else: self.curr+=_len bio=io.BytesIO(self.bytes[self.curr-_len:self.curr]) nbt=NBTFile(bio) return nbt.to_py() class BufferEncoder(object): def __init__(self) -> None:", "x>>=7 self.write_byte(x) def write_var_int32(self,x): uv=np.uint32(np.uint32(x)<<1) if x<0: uv=~uv self.write_var_uint32(uv) def write_var_uint64(self,x): while x>=0x80:", "uv=~uv self.write_var_uint32(uv) def write_var_uint64(self,x): while x>=0x80: self.write_byte(int((x%128)+0x80)) x//=128 self.write_byte(int(x)) def write_var_int64(self,x): uv=np.uint64(np.uint64(x)*2) if", "def read_str(self): length=self.read_var_uint32() self.curr+=length return self.bytes[self.curr-length:self.curr].decode(encoding='utf-8') @staticmethod def reverseUUIDBytes(bytes): bytes[8:]+bytes[:8] return bytes def", "self.write_byte(int((x%128)+0x80)) x//=128 self.write_byte(int(x)) def write_var_int64(self,x): uv=np.uint64(np.uint64(x)*2) if x<0: uv=~uv self.write_var_uint64(uv) def write_str(self,s:str): es=s.encode(encoding='utf-8')", "self.curr+=16 uuid_bytes=self.bytes[self.curr-16:self.curr] return self.reverseUUIDBytes(uuid_bytes) def read_uint8(self): self.curr+=1 return struct.unpack('B',self.bytes[self.curr-1:self.curr])[0] def read_int16(self): self.curr+=2 return", "False,f'read_var_uint32 fail i:{i} v:{v} {self}' def read_var_int32(self): v_=self.read_var_uint32() v= np.int32(v_>>1) if (v_&1)!=0: v=~v", "read_var_int32(self): v_=self.read_var_uint32() v= np.int32(v_>>1) if (v_&1)!=0: v=~v return int(v) def read_var_uint64(self): # 我nm真的有必要为了几个比特省到这种地步吗??uint32最多也就5个比特吧??", "self.curr+=1 return struct.unpack('B',self.bytes[self.curr-1:self.curr])[0] def read_boolen(self): return self.read_byte()==1 def read_str(self): length=self.read_var_uint32() self.curr+=length return self.bytes[self.curr-length:self.curr].decode(encoding='utf-8')", "_len==None: nbt=NBTFile(self) return nbt.to_py() else: self.curr+=_len bio=io.BytesIO(self.bytes[self.curr-_len:self.curr]) nbt=NBTFile(bio) return nbt.to_py() class BufferEncoder(object): def", "struct.unpack('f',self.bytes[self.curr-4:self.curr])[0] def read_tail(self): return self.bytes[self.curr:] def read_byte(self): self.curr+=1 return struct.unpack('B',self.bytes[self.curr-1:self.curr])[0] def read_boolen(self): return", "def read_int32(self): self.curr+=4 return struct.unpack('i',self.bytes[self.curr-4:self.curr])[0] def read_uint32(self): self.curr+=4 return struct.unpack('I',self.bytes[self.curr-4:self.curr])[0] def read_bytes(self,_len): self.curr+=_len", "self.append(struct.pack('f',f)) def write_byte(self,b): self.append(struct.pack('B',b)) def write_boolen(self,b:bool): self.append(struct.pack('B',b)) def write_uint32(self,i:int): self.append(struct.pack('I',i)) def write_var_uint32(self,x): while", "我nm真的有必要为了几个比特省到这种地步吗??uint32最多也就5个比特吧?? i,v=0,0 while i<35: b=self.read_byte() v|=(b&0x7f)<<i if b&0x80==0: return v i+=7 assert False,f'read_var_uint32", "-> None: self._bytes_elements=[] self._bytes_elements_count=0 self._bytes=b'' @property def bytes(self): if len(self._bytes_elements)!=self._bytes_elements_count: self._bytes+=b''.join(self._bytes_elements[self._bytes_elements_count:]) self._bytes_elements_count=len(self._bytes_elements) return", "v_=self.read_var_uint64() v= np.int64(v_>>1) if (v_&1)!=0: v=~v return int(v) def read_vec3(self): self.curr+=12 return struct.unpack('fff',self.bytes[self.curr-12:self.curr])", "read_vec3(self): self.curr+=12 return struct.unpack('fff',self.bytes[self.curr-12:self.curr]) def read_float32(self): self.curr+=4 return struct.unpack('f',self.bytes[self.curr-4:self.curr])[0] def read_tail(self): return self.bytes[self.curr:]", "read_tail(self): return self.bytes[self.curr:] def read_byte(self): self.curr+=1 return struct.unpack('B',self.bytes[self.curr-1:self.curr])[0] def read_boolen(self): return self.read_byte()==1 def", "__init__(self) -> None: self._bytes_elements=[] self._bytes_elements_count=0 self._bytes=b'' @property def bytes(self): if len(self._bytes_elements)!=self._bytes_elements_count: self._bytes+=b''.join(self._bytes_elements[self._bytes_elements_count:]) self._bytes_elements_count=len(self._bytes_elements)", "read(self,_len): self.curr+=_len return self.bytes[self.curr-_len:self.curr] def read_nbt(self,_len=None): if _len==None: nbt=NBTFile(self) return nbt.to_py() else: self.curr+=_len", "def reverseUUIDBytes(bytes): bytes[8:]+bytes[:8] return bytes def read_UUID(self): self.curr+=16 uuid_bytes=self.bytes[self.curr-16:self.curr] return self.reverseUUIDBytes(uuid_bytes) def read_uint8(self):", "if b&0x80==0: return v i+=7 assert False,f'read_var_uint64 fail i:{i} v:{v} {self}' def read_var_int64(self):", "x<0: uv=~uv self.write_var_uint32(uv) def write_var_uint64(self,x): while x>=0x80: self.write_byte(int((x%128)+0x80)) x//=128 self.write_byte(int(x)) def write_var_int64(self,x): uv=np.uint64(np.uint64(x)*2)", "return v i+=7 assert False,f'read_var_uint64 fail i:{i} v:{v} {self}' def read_var_int64(self): v_=self.read_var_uint64() v=", "NBTFile import io class BufferDecoder(object): def __init__(self,bytes) -> None: self.bytes=bytes self.curr=0 def read_var_uint32(self):", "def read_bytes(self,_len): self.curr+=_len return self.bytes[self.curr-_len:self.curr] def read(self,_len): self.curr+=_len return self.bytes[self.curr-_len:self.curr] def read_nbt(self,_len=None): if", "self.curr+=2 return struct.unpack('h',self.bytes[self.curr-2:self.curr])[0] def read_int32(self): self.curr+=4 return struct.unpack('i',self.bytes[self.curr-4:self.curr])[0] def read_uint32(self): self.curr+=4 return struct.unpack('I',self.bytes[self.curr-4:self.curr])[0]", "i,v=0,0 while i<35: b=self.read_byte() v|=(b&0x7f)<<i if b&0x80==0: return v i+=7 assert False,f'read_var_uint32 fail", "self.write_byte(x) def write_var_int32(self,x): uv=np.uint32(np.uint32(x)<<1) if x<0: uv=~uv self.write_var_uint32(uv) def write_var_uint64(self,x): while x>=0x80: self.write_byte(int((x%128)+0x80))", "(v_&1)!=0: v=~v return int(v) def read_vec3(self): self.curr+=12 return struct.unpack('fff',self.bytes[self.curr-12:self.curr]) def read_float32(self): self.curr+=4 return", "return self.reverseUUIDBytes(uuid_bytes) def read_uint8(self): self.curr+=1 return struct.unpack('B',self.bytes[self.curr-1:self.curr])[0] def read_int16(self): self.curr+=2 return struct.unpack('h',self.bytes[self.curr-2:self.curr])[0] def", "i+=7 assert False,f'read_var_uint64 fail i:{i} v:{v} {self}' def read_var_int64(self): v_=self.read_var_uint64() v= np.int64(v_>>1) if", "self.curr+=1 return struct.unpack('B',self.bytes[self.curr-1:self.curr])[0] def read_int16(self): self.curr+=2 return struct.unpack('h',self.bytes[self.curr-2:self.curr])[0] def read_int32(self): self.curr+=4 return struct.unpack('i',self.bytes[self.curr-4:self.curr])[0]", "return v i+=7 assert False,f'read_var_uint32 fail i:{i} v:{v} {self}' def read_var_int32(self): v_=self.read_var_uint32() v=", ".nbt import NBTFile import io class BufferDecoder(object): def __init__(self,bytes) -> None: self.bytes=bytes self.curr=0", "assert False,f'read_var_uint64 fail i:{i} v:{v} {self}' def read_var_int64(self): v_=self.read_var_uint64() v= np.int64(v_>>1) if (v_&1)!=0:", "np.int64(v_>>1) if (v_&1)!=0: v=~v return int(v) def read_vec3(self): self.curr+=12 return struct.unpack('fff',self.bytes[self.curr-12:self.curr]) def read_float32(self):", "def write_float32(self,f): self.append(struct.pack('f',f)) def write_byte(self,b): self.append(struct.pack('B',b)) def write_boolen(self,b:bool): self.append(struct.pack('B',b)) def write_uint32(self,i:int): self.append(struct.pack('I',i)) def", "self.write_byte(int(x)) def write_var_int64(self,x): uv=np.uint64(np.uint64(x)*2) if x<0: uv=~uv self.write_var_uint64(uv) def write_str(self,s:str): es=s.encode(encoding='utf-8') self.write_var_uint32(len(es)) self.append(es)", "return nbt.to_py() else: self.curr+=_len bio=io.BytesIO(self.bytes[self.curr-_len:self.curr]) nbt=NBTFile(bio) return nbt.to_py() class BufferEncoder(object): def __init__(self) ->", "@staticmethod def reverseUUIDBytes(bytes): bytes[8:]+bytes[:8] return bytes def read_UUID(self): self.curr+=16 uuid_bytes=self.bytes[self.curr-16:self.curr] return self.reverseUUIDBytes(uuid_bytes) def", "read_int16(self): self.curr+=2 return struct.unpack('h',self.bytes[self.curr-2:self.curr])[0] def read_int32(self): self.curr+=4 return struct.unpack('i',self.bytes[self.curr-4:self.curr])[0] def read_uint32(self): self.curr+=4 return", "b=self.read_byte() v|=(b&0x7f)<<i if b&0x80==0: return v i+=7 assert False,f'read_var_uint64 fail i:{i} v:{v} {self}'", "self.read_byte()==1 def read_str(self): length=self.read_var_uint32() self.curr+=length return self.bytes[self.curr-length:self.curr].decode(encoding='utf-8') @staticmethod def reverseUUIDBytes(bytes): bytes[8:]+bytes[:8] return bytes", "np.int32(v_>>1) if (v_&1)!=0: v=~v return int(v) def read_var_uint64(self): # 我nm真的有必要为了几个比特省到这种地步吗??uint32最多也就5个比特吧?? i,v=0,0 while i<70:", "struct.unpack('h',self.bytes[self.curr-2:self.curr])[0] def read_int32(self): self.curr+=4 return struct.unpack('i',self.bytes[self.curr-4:self.curr])[0] def read_uint32(self): self.curr+=4 return struct.unpack('I',self.bytes[self.curr-4:self.curr])[0] def read_bytes(self,_len):", "@property def bytes(self): if len(self._bytes_elements)!=self._bytes_elements_count: self._bytes+=b''.join(self._bytes_elements[self._bytes_elements_count:]) self._bytes_elements_count=len(self._bytes_elements) return self._bytes def append(self,bs:bytes): self._bytes_elements.append(bs) def", "return self.bytes[self.curr-_len:self.curr] def read(self,_len): self.curr+=_len return self.bytes[self.curr-_len:self.curr] def read_nbt(self,_len=None): if _len==None: nbt=NBTFile(self) return", "return struct.unpack('i',self.bytes[self.curr-4:self.curr])[0] def read_uint32(self): self.curr+=4 return struct.unpack('I',self.bytes[self.curr-4:self.curr])[0] def read_bytes(self,_len): self.curr+=_len return self.bytes[self.curr-_len:self.curr] def", "struct.unpack('B',self.bytes[self.curr-1:self.curr])[0] def read_boolen(self): return self.read_byte()==1 def read_str(self): length=self.read_var_uint32() self.curr+=length return self.bytes[self.curr-length:self.curr].decode(encoding='utf-8') @staticmethod def", "len(self._bytes_elements)!=self._bytes_elements_count: self._bytes+=b''.join(self._bytes_elements[self._bytes_elements_count:]) self._bytes_elements_count=len(self._bytes_elements) return self._bytes def append(self,bs:bytes): self._bytes_elements.append(bs) def write_float32(self,f): self.append(struct.pack('f',f)) def write_byte(self,b):", "bio=io.BytesIO(self.bytes[self.curr-_len:self.curr]) nbt=NBTFile(bio) return nbt.to_py() class BufferEncoder(object): def __init__(self) -> None: self._bytes_elements=[] self._bytes_elements_count=0 self._bytes=b''", "append(self,bs:bytes): self._bytes_elements.append(bs) def write_float32(self,f): self.append(struct.pack('f',f)) def write_byte(self,b): self.append(struct.pack('B',b)) def write_boolen(self,b:bool): self.append(struct.pack('B',b)) def write_uint32(self,i:int):", "self.bytes=bytes self.curr=0 def read_var_uint32(self): # 我nm真的有必要为了几个比特省到这种地步吗??uint32最多也就5个比特吧?? i,v=0,0 while i<35: b=self.read_byte() v|=(b&0x7f)<<i if b&0x80==0:", "while i<70: b=self.read_byte() v|=(b&0x7f)<<i if b&0x80==0: return v i+=7 assert False,f'read_var_uint64 fail i:{i}", "import io class BufferDecoder(object): def __init__(self,bytes) -> None: self.bytes=bytes self.curr=0 def read_var_uint32(self): #", "write_var_int64(self,x): uv=np.uint64(np.uint64(x)*2) if x<0: uv=~uv self.write_var_uint64(uv) def write_str(self,s:str): es=s.encode(encoding='utf-8') self.write_var_uint32(len(es)) self.append(es) def write_UUID_bytes(self,uuid_bytes:bytes):", "read_nbt(self,_len=None): if _len==None: nbt=NBTFile(self) return nbt.to_py() else: self.curr+=_len bio=io.BytesIO(self.bytes[self.curr-_len:self.curr]) nbt=NBTFile(bio) return nbt.to_py() class", "import numpy as np from .nbt import NBTFile import io class BufferDecoder(object): def", "def write_var_uint32(self,x): while x>=0x80: self.write_byte(int((x%128)+0x80)) x>>=7 self.write_byte(x) def write_var_int32(self,x): uv=np.uint32(np.uint32(x)<<1) if x<0: uv=~uv", "self._bytes+=b''.join(self._bytes_elements[self._bytes_elements_count:]) self._bytes_elements_count=len(self._bytes_elements) return self._bytes def append(self,bs:bytes): self._bytes_elements.append(bs) def write_float32(self,f): self.append(struct.pack('f',f)) def write_byte(self,b): self.append(struct.pack('B',b))", "def bytes(self): if len(self._bytes_elements)!=self._bytes_elements_count: self._bytes+=b''.join(self._bytes_elements[self._bytes_elements_count:]) self._bytes_elements_count=len(self._bytes_elements) return self._bytes def append(self,bs:bytes): self._bytes_elements.append(bs) def write_float32(self,f):", "def write_var_int32(self,x): uv=np.uint32(np.uint32(x)<<1) if x<0: uv=~uv self.write_var_uint32(uv) def write_var_uint64(self,x): while x>=0x80: self.write_byte(int((x%128)+0x80)) x//=128", "read_var_int64(self): v_=self.read_var_uint64() v= np.int64(v_>>1) if (v_&1)!=0: v=~v return int(v) def read_vec3(self): self.curr+=12 return", "assert False,f'read_var_uint32 fail i:{i} v:{v} {self}' def read_var_int32(self): v_=self.read_var_uint32() v= np.int32(v_>>1) if (v_&1)!=0:", "我nm真的有必要为了几个比特省到这种地步吗??uint32最多也就5个比特吧?? i,v=0,0 while i<70: b=self.read_byte() v|=(b&0x7f)<<i if b&0x80==0: return v i+=7 assert False,f'read_var_uint64", "write_byte(self,b): self.append(struct.pack('B',b)) def write_boolen(self,b:bool): self.append(struct.pack('B',b)) def write_uint32(self,i:int): self.append(struct.pack('I',i)) def write_var_uint32(self,x): while x>=0x80: self.write_byte(int((x%128)+0x80))", "self.append(struct.pack('B',b)) def write_boolen(self,b:bool): self.append(struct.pack('B',b)) def write_uint32(self,i:int): self.append(struct.pack('I',i)) def write_var_uint32(self,x): while x>=0x80: self.write_byte(int((x%128)+0x80)) x>>=7", "return struct.unpack('B',self.bytes[self.curr-1:self.curr])[0] def read_boolen(self): return self.read_byte()==1 def read_str(self): length=self.read_var_uint32() self.curr+=length return self.bytes[self.curr-length:self.curr].decode(encoding='utf-8') @staticmethod", "def read_UUID(self): self.curr+=16 uuid_bytes=self.bytes[self.curr-16:self.curr] return self.reverseUUIDBytes(uuid_bytes) def read_uint8(self): self.curr+=1 return struct.unpack('B',self.bytes[self.curr-1:self.curr])[0] def read_int16(self):", "def write_var_int64(self,x): uv=np.uint64(np.uint64(x)*2) if x<0: uv=~uv self.write_var_uint64(uv) def write_str(self,s:str): es=s.encode(encoding='utf-8') self.write_var_uint32(len(es)) self.append(es) def", "bytes(self): if len(self._bytes_elements)!=self._bytes_elements_count: self._bytes+=b''.join(self._bytes_elements[self._bytes_elements_count:]) self._bytes_elements_count=len(self._bytes_elements) return self._bytes def append(self,bs:bytes): self._bytes_elements.append(bs) def write_float32(self,f): self.append(struct.pack('f',f))", "i<70: b=self.read_byte() v|=(b&0x7f)<<i if b&0x80==0: return v i+=7 assert False,f'read_var_uint64 fail i:{i} v:{v}", "b=self.read_byte() v|=(b&0x7f)<<i if b&0x80==0: return v i+=7 assert False,f'read_var_uint32 fail i:{i} v:{v} {self}'", "def read_int16(self): self.curr+=2 return struct.unpack('h',self.bytes[self.curr-2:self.curr])[0] def read_int32(self): self.curr+=4 return struct.unpack('i',self.bytes[self.curr-4:self.curr])[0] def read_uint32(self): self.curr+=4", "write_boolen(self,b:bool): self.append(struct.pack('B',b)) def write_uint32(self,i:int): self.append(struct.pack('I',i)) def write_var_uint32(self,x): while x>=0x80: self.write_byte(int((x%128)+0x80)) x>>=7 self.write_byte(x) def", "read_uint8(self): self.curr+=1 return struct.unpack('B',self.bytes[self.curr-1:self.curr])[0] def read_int16(self): self.curr+=2 return struct.unpack('h',self.bytes[self.curr-2:self.curr])[0] def read_int32(self): self.curr+=4 return", "fail i:{i} v:{v} {self}' def read_var_int32(self): v_=self.read_var_uint32() v= np.int32(v_>>1) if (v_&1)!=0: v=~v return", "numpy as np from .nbt import NBTFile import io class BufferDecoder(object): def __init__(self,bytes)", "np from .nbt import NBTFile import io class BufferDecoder(object): def __init__(self,bytes) -> None:", "return self._bytes def append(self,bs:bytes): self._bytes_elements.append(bs) def write_float32(self,f): self.append(struct.pack('f',f)) def write_byte(self,b): self.append(struct.pack('B',b)) def write_boolen(self,b:bool):", "read_var_uint32(self): # 我nm真的有必要为了几个比特省到这种地步吗??uint32最多也就5个比特吧?? i,v=0,0 while i<35: b=self.read_byte() v|=(b&0x7f)<<i if b&0x80==0: return v i+=7", "self.bytes[self.curr-_len:self.curr] def read_nbt(self,_len=None): if _len==None: nbt=NBTFile(self) return nbt.to_py() else: self.curr+=_len bio=io.BytesIO(self.bytes[self.curr-_len:self.curr]) nbt=NBTFile(bio) return", "write_uint32(self,i:int): self.append(struct.pack('I',i)) def write_var_uint32(self,x): while x>=0x80: self.write_byte(int((x%128)+0x80)) x>>=7 self.write_byte(x) def write_var_int32(self,x): uv=np.uint32(np.uint32(x)<<1) if", "bytes def read_UUID(self): self.curr+=16 uuid_bytes=self.bytes[self.curr-16:self.curr] return self.reverseUUIDBytes(uuid_bytes) def read_uint8(self): self.curr+=1 return struct.unpack('B',self.bytes[self.curr-1:self.curr])[0] def", "def write_boolen(self,b:bool): self.append(struct.pack('B',b)) def write_uint32(self,i:int): self.append(struct.pack('I',i)) def write_var_uint32(self,x): while x>=0x80: self.write_byte(int((x%128)+0x80)) x>>=7 self.write_byte(x)", "self.bytes[self.curr:] def read_byte(self): self.curr+=1 return struct.unpack('B',self.bytes[self.curr-1:self.curr])[0] def read_boolen(self): return self.read_byte()==1 def read_str(self): length=self.read_var_uint32()", "read_bytes(self,_len): self.curr+=_len return self.bytes[self.curr-_len:self.curr] def read(self,_len): self.curr+=_len return self.bytes[self.curr-_len:self.curr] def read_nbt(self,_len=None): if _len==None:", "if b&0x80==0: return v i+=7 assert False,f'read_var_uint32 fail i:{i} v:{v} {self}' def read_var_int32(self):", "write_var_int32(self,x): uv=np.uint32(np.uint32(x)<<1) if x<0: uv=~uv self.write_var_uint32(uv) def write_var_uint64(self,x): while x>=0x80: self.write_byte(int((x%128)+0x80)) x//=128 self.write_byte(int(x))", "write_var_uint64(self,x): while x>=0x80: self.write_byte(int((x%128)+0x80)) x//=128 self.write_byte(int(x)) def write_var_int64(self,x): uv=np.uint64(np.uint64(x)*2) if x<0: uv=~uv self.write_var_uint64(uv)", "else: self.curr+=_len bio=io.BytesIO(self.bytes[self.curr-_len:self.curr]) nbt=NBTFile(bio) return nbt.to_py() class BufferEncoder(object): def __init__(self) -> None: self._bytes_elements=[]", "return struct.unpack('fff',self.bytes[self.curr-12:self.curr]) def read_float32(self): self.curr+=4 return struct.unpack('f',self.bytes[self.curr-4:self.curr])[0] def read_tail(self): return self.bytes[self.curr:] def read_byte(self):", "import struct import numpy as np from .nbt import NBTFile import io class", "b&0x80==0: return v i+=7 assert False,f'read_var_uint32 fail i:{i} v:{v} {self}' def read_var_int32(self): v_=self.read_var_uint32()", "def read_uint32(self): self.curr+=4 return struct.unpack('I',self.bytes[self.curr-4:self.curr])[0] def read_bytes(self,_len): self.curr+=_len return self.bytes[self.curr-_len:self.curr] def read(self,_len): self.curr+=_len", "def read_float32(self): self.curr+=4 return struct.unpack('f',self.bytes[self.curr-4:self.curr])[0] def read_tail(self): return self.bytes[self.curr:] def read_byte(self): self.curr+=1 return", "self.bytes[self.curr-_len:self.curr] def read(self,_len): self.curr+=_len return self.bytes[self.curr-_len:self.curr] def read_nbt(self,_len=None): if _len==None: nbt=NBTFile(self) return nbt.to_py()", "i:{i} v:{v} {self}' def read_var_int32(self): v_=self.read_var_uint32() v= np.int32(v_>>1) if (v_&1)!=0: v=~v return int(v)", "return self.bytes[self.curr:] def read_byte(self): self.curr+=1 return struct.unpack('B',self.bytes[self.curr-1:self.curr])[0] def read_boolen(self): return self.read_byte()==1 def read_str(self):", "self.curr+=4 return struct.unpack('f',self.bytes[self.curr-4:self.curr])[0] def read_tail(self): return self.bytes[self.curr:] def read_byte(self): self.curr+=1 return struct.unpack('B',self.bytes[self.curr-1:self.curr])[0] def", "None: self._bytes_elements=[] self._bytes_elements_count=0 self._bytes=b'' @property def bytes(self): if len(self._bytes_elements)!=self._bytes_elements_count: self._bytes+=b''.join(self._bytes_elements[self._bytes_elements_count:]) self._bytes_elements_count=len(self._bytes_elements) return self._bytes", "self.write_byte(int((x%128)+0x80)) x>>=7 self.write_byte(x) def write_var_int32(self,x): uv=np.uint32(np.uint32(x)<<1) if x<0: uv=~uv self.write_var_uint32(uv) def write_var_uint64(self,x): while", "fail i:{i} v:{v} {self}' def read_var_int64(self): v_=self.read_var_uint64() v= np.int64(v_>>1) if (v_&1)!=0: v=~v return", "struct.unpack('i',self.bytes[self.curr-4:self.curr])[0] def read_uint32(self): self.curr+=4 return struct.unpack('I',self.bytes[self.curr-4:self.curr])[0] def read_bytes(self,_len): self.curr+=_len return self.bytes[self.curr-_len:self.curr] def read(self,_len):", "read_str(self): length=self.read_var_uint32() self.curr+=length return self.bytes[self.curr-length:self.curr].decode(encoding='utf-8') @staticmethod def reverseUUIDBytes(bytes): bytes[8:]+bytes[:8] return bytes def read_UUID(self):", "while i<35: b=self.read_byte() v|=(b&0x7f)<<i if b&0x80==0: return v i+=7 assert False,f'read_var_uint32 fail i:{i}", "def append(self,bs:bytes): self._bytes_elements.append(bs) def write_float32(self,f): self.append(struct.pack('f',f)) def write_byte(self,b): self.append(struct.pack('B',b)) def write_boolen(self,b:bool): self.append(struct.pack('B',b)) def", "def read_uint8(self): self.curr+=1 return struct.unpack('B',self.bytes[self.curr-1:self.curr])[0] def read_int16(self): self.curr+=2 return struct.unpack('h',self.bytes[self.curr-2:self.curr])[0] def read_int32(self): self.curr+=4", "i,v=0,0 while i<70: b=self.read_byte() v|=(b&0x7f)<<i if b&0x80==0: return v i+=7 assert False,f'read_var_uint64 fail", "read_boolen(self): return self.read_byte()==1 def read_str(self): length=self.read_var_uint32() self.curr+=length return self.bytes[self.curr-length:self.curr].decode(encoding='utf-8') @staticmethod def reverseUUIDBytes(bytes): bytes[8:]+bytes[:8]", "class BufferEncoder(object): def __init__(self) -> None: self._bytes_elements=[] self._bytes_elements_count=0 self._bytes=b'' @property def bytes(self): if", "def read_vec3(self): self.curr+=12 return struct.unpack('fff',self.bytes[self.curr-12:self.curr]) def read_float32(self): self.curr+=4 return struct.unpack('f',self.bytes[self.curr-4:self.curr])[0] def read_tail(self): return", "struct import numpy as np from .nbt import NBTFile import io class BufferDecoder(object):", "return nbt.to_py() class BufferEncoder(object): def __init__(self) -> None: self._bytes_elements=[] self._bytes_elements_count=0 self._bytes=b'' @property def", "return int(v) def read_var_uint64(self): # 我nm真的有必要为了几个比特省到这种地步吗??uint32最多也就5个比特吧?? i,v=0,0 while i<70: b=self.read_byte() v|=(b&0x7f)<<i if b&0x80==0:", "io class BufferDecoder(object): def __init__(self,bytes) -> None: self.bytes=bytes self.curr=0 def read_var_uint32(self): # 我nm真的有必要为了几个比特省到这种地步吗??uint32最多也就5个比特吧??", "b&0x80==0: return v i+=7 assert False,f'read_var_uint64 fail i:{i} v:{v} {self}' def read_var_int64(self): v_=self.read_var_uint64()", "struct.unpack('fff',self.bytes[self.curr-12:self.curr]) def read_float32(self): self.curr+=4 return struct.unpack('f',self.bytes[self.curr-4:self.curr])[0] def read_tail(self): return self.bytes[self.curr:] def read_byte(self): self.curr+=1", "return struct.unpack('f',self.bytes[self.curr-4:self.curr])[0] def read_tail(self): return self.bytes[self.curr:] def read_byte(self): self.curr+=1 return struct.unpack('B',self.bytes[self.curr-1:self.curr])[0] def read_boolen(self):", "def read_nbt(self,_len=None): if _len==None: nbt=NBTFile(self) return nbt.to_py() else: self.curr+=_len bio=io.BytesIO(self.bytes[self.curr-_len:self.curr]) nbt=NBTFile(bio) return nbt.to_py()", "write_float32(self,f): self.append(struct.pack('f',f)) def write_byte(self,b): self.append(struct.pack('B',b)) def write_boolen(self,b:bool): self.append(struct.pack('B',b)) def write_uint32(self,i:int): self.append(struct.pack('I',i)) def write_var_uint32(self,x):", "if len(self._bytes_elements)!=self._bytes_elements_count: self._bytes+=b''.join(self._bytes_elements[self._bytes_elements_count:]) self._bytes_elements_count=len(self._bytes_elements) return self._bytes def append(self,bs:bytes): self._bytes_elements.append(bs) def write_float32(self,f): self.append(struct.pack('f',f)) def", "struct.unpack('I',self.bytes[self.curr-4:self.curr])[0] def read_bytes(self,_len): self.curr+=_len return self.bytes[self.curr-_len:self.curr] def read(self,_len): self.curr+=_len return self.bytes[self.curr-_len:self.curr] def read_nbt(self,_len=None):", "self.curr+=12 return struct.unpack('fff',self.bytes[self.curr-12:self.curr]) def read_float32(self): self.curr+=4 return struct.unpack('f',self.bytes[self.curr-4:self.curr])[0] def read_tail(self): return self.bytes[self.curr:] def", "# 我nm真的有必要为了几个比特省到这种地步吗??uint32最多也就5个比特吧?? i,v=0,0 while i<70: b=self.read_byte() v|=(b&0x7f)<<i if b&0x80==0: return v i+=7 assert", "nbt=NBTFile(bio) return nbt.to_py() class BufferEncoder(object): def __init__(self) -> None: self._bytes_elements=[] self._bytes_elements_count=0 self._bytes=b'' @property", "x>=0x80: self.write_byte(int((x%128)+0x80)) x//=128 self.write_byte(int(x)) def write_var_int64(self,x): uv=np.uint64(np.uint64(x)*2) if x<0: uv=~uv self.write_var_uint64(uv) def write_str(self,s:str):", "as np from .nbt import NBTFile import io class BufferDecoder(object): def __init__(self,bytes) ->", "def __init__(self,bytes) -> None: self.bytes=bytes self.curr=0 def read_var_uint32(self): # 我nm真的有必要为了几个比特省到这种地步吗??uint32最多也就5个比特吧?? i,v=0,0 while i<35:", "if (v_&1)!=0: v=~v return int(v) def read_var_uint64(self): # 我nm真的有必要为了几个比特省到这种地步吗??uint32最多也就5个比特吧?? i,v=0,0 while i<70: b=self.read_byte()", "def read_var_int32(self): v_=self.read_var_uint32() v= np.int32(v_>>1) if (v_&1)!=0: v=~v return int(v) def read_var_uint64(self): #", "x>=0x80: self.write_byte(int((x%128)+0x80)) x>>=7 self.write_byte(x) def write_var_int32(self,x): uv=np.uint32(np.uint32(x)<<1) if x<0: uv=~uv self.write_var_uint32(uv) def write_var_uint64(self,x):", "return struct.unpack('B',self.bytes[self.curr-1:self.curr])[0] def read_int16(self): self.curr+=2 return struct.unpack('h',self.bytes[self.curr-2:self.curr])[0] def read_int32(self): self.curr+=4 return struct.unpack('i',self.bytes[self.curr-4:self.curr])[0] def", "read_int32(self): self.curr+=4 return struct.unpack('i',self.bytes[self.curr-4:self.curr])[0] def read_uint32(self): self.curr+=4 return struct.unpack('I',self.bytes[self.curr-4:self.curr])[0] def read_bytes(self,_len): self.curr+=_len return", "self._bytes_elements_count=len(self._bytes_elements) return self._bytes def append(self,bs:bytes): self._bytes_elements.append(bs) def write_float32(self,f): self.append(struct.pack('f',f)) def write_byte(self,b): self.append(struct.pack('B',b)) def", "def read_boolen(self): return self.read_byte()==1 def read_str(self): length=self.read_var_uint32() self.curr+=length return self.bytes[self.curr-length:self.curr].decode(encoding='utf-8') @staticmethod def reverseUUIDBytes(bytes):", "__init__(self,bytes) -> None: self.bytes=bytes self.curr=0 def read_var_uint32(self): # 我nm真的有必要为了几个比特省到这种地步吗??uint32最多也就5个比特吧?? i,v=0,0 while i<35: b=self.read_byte()", "return self.bytes[self.curr-_len:self.curr] def read_nbt(self,_len=None): if _len==None: nbt=NBTFile(self) return nbt.to_py() else: self.curr+=_len bio=io.BytesIO(self.bytes[self.curr-_len:self.curr]) nbt=NBTFile(bio)", "v:{v} {self}' def read_var_int32(self): v_=self.read_var_uint32() v= np.int32(v_>>1) if (v_&1)!=0: v=~v return int(v) def", "def read_byte(self): self.curr+=1 return struct.unpack('B',self.bytes[self.curr-1:self.curr])[0] def read_boolen(self): return self.read_byte()==1 def read_str(self): length=self.read_var_uint32() self.curr+=length", "def read(self,_len): self.curr+=_len return self.bytes[self.curr-_len:self.curr] def read_nbt(self,_len=None): if _len==None: nbt=NBTFile(self) return nbt.to_py() else:", "if _len==None: nbt=NBTFile(self) return nbt.to_py() else: self.curr+=_len bio=io.BytesIO(self.bytes[self.curr-_len:self.curr]) nbt=NBTFile(bio) return nbt.to_py() class BufferEncoder(object):", "nbt.to_py() class BufferEncoder(object): def __init__(self) -> None: self._bytes_elements=[] self._bytes_elements_count=0 self._bytes=b'' @property def bytes(self):", "BufferEncoder(object): def __init__(self) -> None: self._bytes_elements=[] self._bytes_elements_count=0 self._bytes=b'' @property def bytes(self): if len(self._bytes_elements)!=self._bytes_elements_count:", "self.curr+=length return self.bytes[self.curr-length:self.curr].decode(encoding='utf-8') @staticmethod def reverseUUIDBytes(bytes): bytes[8:]+bytes[:8] return bytes def read_UUID(self): self.curr+=16 uuid_bytes=self.bytes[self.curr-16:self.curr]", "read_uint32(self): self.curr+=4 return struct.unpack('I',self.bytes[self.curr-4:self.curr])[0] def read_bytes(self,_len): self.curr+=_len return self.bytes[self.curr-_len:self.curr] def read(self,_len): self.curr+=_len return", "self.write_var_uint32(uv) def write_var_uint64(self,x): while x>=0x80: self.write_byte(int((x%128)+0x80)) x//=128 self.write_byte(int(x)) def write_var_int64(self,x): uv=np.uint64(np.uint64(x)*2) if x<0:" ]
[]
[]
[ "test_setup = FlopyTestSetup(verbose=True, test_dirs=model_ws) mfnam = \"UZFtest2.nam\" orig_pth = os.path.join(\"..\", \"examples\", \"data\", \"uzf_examples\")", "os import pymake from ci_framework import FlopyTestSetup, base_test_dir import flopy base_dir = base_test_dir(__file__,", "model_ws2 = os.path.join(model_ws, \"flopy\") m.change_model_ws(model_ws2, reset_external=True) # rewrite files m.write_input() # run and", "output spd[(0, 0)] = output spd[(1, 1)] = output spd[(1, 2)] = output", "range(1, m.dis.nper): for istp in [0, 4, 9, 14]: spd[(iper, istp)] = output", "= output spd[(1, 1)] = output spd[(1, 2)] = output spd[(1, 3)] =", "did not terminate successfully\" fn0 = os.path.join(model_ws, mfnam) # change uzf iuzfcb2 and", "iper in range(1, m.dis.nper): for istp in [0, 4, 9, 14]: spd[(iper, istp)]", "os.path.join(model_ws, mfnam) # change uzf iuzfcb2 and add binary uzf output file m.uzf.iuzfcb2", "os.path.join( model_ws, f\"{os.path.splitext(mfnam)[0]}.budget.out\" ) try: success = pymake.compare_budget( fn0, fn1, max_incpd=0.1, max_cumpd=0.1, outfile=fsum", "except: success = False assert success, \"new model run did not terminate successfully\"", "successfully\" fn1 = os.path.join(model_ws2, mfnam) # compare budget terms if run: fsum =", "successfully\" fn0 = os.path.join(model_ws, mfnam) # rewrite files model_ws2 = os.path.join(model_ws, \"flopy\") m.change_model_ws(model_ws2,", "output spd[(1, 1)] = output spd[(1, 2)] = output spd[(1, 3)] = output", "rewrite files m.write_input() # run and compare the output files if run: try:", "output oc = flopy.modflow.ModflowOc(m, stress_period_data=spd) oc.write_file() if run: try: success, buff = m.run_model(silent=False)", "output files if run: try: success, buff = m.run_model(silent=False) except: success = False", "test_unitnums_load_and_write(): model_ws = f\"{base_dir}_test_unitnums_load_and_write\" test_setup = FlopyTestSetup(verbose=True, test_dirs=model_ws) mfnam = \"testsfr2_tab.nam\" # copy", "m.load_fail is False, \"failed to load all packages\" # reset the oc file", "= False def test_uzf_unit_numbers(): model_ws = f\"{base_dir}_test_uzf_unit_numbers\" test_setup = FlopyTestSetup(verbose=True, test_dirs=model_ws) mfnam =", "compare the output files if run: try: success, buff = m.run_model(silent=False) except: success", "import pymake from ci_framework import FlopyTestSetup, base_test_dir import flopy base_dir = base_test_dir(__file__, rel_path=\"temp\",", "= f\"{base_dir}_test_uzf_unit_numbers\" test_setup = FlopyTestSetup(verbose=True, test_dirs=model_ws) mfnam = \"UZFtest2.nam\" orig_pth = os.path.join(\"..\", \"examples\",", "# run and compare the output files if run: try: success, buff =", "run: try: success, buff = m.run_model(silent=False) except: success = False assert success, \"base", "100 columns\" ) v = (m.nlay, m.nrow, m.ncol, m.nper) assert v == (1,", "m.load_fail is False, \"failed to load all packages\" msg = ( \"modflow-2005 testsfr2_tab", "cpth = os.path.join(\"temp\", \"t036\") exe_name = \"mf2005\" v = flopy.which(exe_name) run = True", "\" \"1 layer, 7 rows, and 100 columns\" ) v = (m.nlay, m.nrow,", "success = False assert success, \"base model run did not terminate successfully\" fn1", "did not terminate successfully\" fn1 = os.path.join(model_ws2, mfnam) # compare budget terms if", "loading and preserving existing unit numbers \"\"\" import os import pymake from ci_framework", "run did not terminate successfully\" fn0 = os.path.join(model_ws, mfnam) # change uzf iuzfcb2", "have \" \"1 layer, 7 rows, and 100 columns\" ) v = (m.nlay,", "os.path.join(model_ws, \"flopy\") m.change_model_ws(model_ws2, reset_external=True) # rewrite files m.write_input() # run and compare the", "(m.nlay, m.nrow, m.ncol, m.nper) assert v == (1, 7, 100, 50), msg if", "fn1 = os.path.join(model_ws2, mfnam) # compare budget terms if run: fsum = os.path.join(", "flopy.modflow.ModflowOc(m, stress_period_data=spd) oc.write_file() if run: try: success, buff = m.run_model(silent=False) except: success =", "\"examples\", \"data\", \"uzf_examples\") # copy files pymake.setup(os.path.join(orig_pth, mfnam), model_ws) m = flopy.modflow.Modflow.load( mfnam,", "not terminate successfully\" fn0 = os.path.join(model_ws, mfnam) # change uzf iuzfcb2 and add", "mfnam = \"testsfr2_tab.nam\" # copy files import pymake pymake.setup(os.path.join(pth, mfnam), model_ws) m =", "from ci_framework import FlopyTestSetup, base_test_dir import flopy base_dir = base_test_dir(__file__, rel_path=\"temp\", verbose=True) pth", "# rewrite files m.write_input() # run and compare the output files if run:", "False, \"failed to load all packages\" # reset the oc file m.remove_package(\"OC\") output", "change the model work space model_ws2 = os.path.join(model_ws, \"flopy\") m.change_model_ws(model_ws2, reset_external=True) # rewrite", "change uzf iuzfcb2 and add binary uzf output file m.uzf.iuzfcb2 = 61 m.add_output_file(m.uzf.iuzfcb2,", "fsum = os.path.join( model_ws, f\"{os.path.splitext(mfnam)[0]}.budget.out\" ) try: success = pymake.compare_budget( fn0, fn1, max_incpd=0.1,", "model run did not terminate successfully\" fn0 = os.path.join(model_ws, mfnam) # change uzf", "v == (1, 7, 100, 50), msg if run: try: success, buff =", "= flopy.modflow.Modflow.load( mfnam, verbose=True, model_ws=model_ws, exe_name=exe_name ) assert m.load_fail is False, \"failed to", "mfnam = \"UZFtest2.nam\" orig_pth = os.path.join(\"..\", \"examples\", \"data\", \"uzf_examples\") # copy files pymake.setup(os.path.join(orig_pth,", "model_ws, f\"{os.path.splitext(mfnam)[0]}.budget.out\" ) try: success = pymake.compare_budget( fn0, fn1, max_incpd=0.1, max_cumpd=0.1, outfile=fsum )", "and compare the output files if run: try: success, buff = m.run_model(silent=False) except:", "= FlopyTestSetup(verbose=True, test_dirs=model_ws) mfnam = \"testsfr2_tab.nam\" # copy files import pymake pymake.setup(os.path.join(pth, mfnam),", "assert success, \"budget comparison failure\" return def test_unitnums_load_and_write(): model_ws = f\"{base_dir}_test_unitnums_load_and_write\" test_setup =", "run = True if v is None: run = False def test_uzf_unit_numbers(): model_ws", "success, \"base model run did not terminate successfully\" fn1 = os.path.join(model_ws2, mfnam) if", "= os.path.join(model_ws2, mfnam) if run: fsum = os.path.join( model_ws, f\"{os.path.splitext(mfnam)[0]}.budget.out\" ) try: success", "run and compare the output files if run: try: success, buff = m.run_model(silent=False)", "success = False assert success, \"base model run did not terminate successfully\" fn0", "m.remove_package(\"OC\") output = [\"save head\", \"print budget\"] spd = {} for iper in", "msg = ( \"modflow-2005 testsfr2_tab does not have \" \"1 layer, 7 rows,", "0)] = output spd[(1, 1)] = output spd[(1, 2)] = output spd[(1, 3)]", "= output oc = flopy.modflow.ModflowOc(m, stress_period_data=spd) oc.write_file() if run: try: success, buff =", "{} for iper in range(1, m.dis.nper): for istp in [0, 4, 9, 14]:", "packages\" msg = ( \"modflow-2005 testsfr2_tab does not have \" \"1 layer, 7", "\"flopy\") m.change_model_ws(model_ws2, reset_external=True) m.write_input() if run: try: success, buff = m.run_model(silent=False) except: success", "= False assert success, \"new model run did not terminate successfully\" fn1 =", "\"UZFtest2.nam\" orig_pth = os.path.join(\"..\", \"examples\", \"data\", \"uzf_examples\") # copy files pymake.setup(os.path.join(orig_pth, mfnam), model_ws)", "except: success = False print(\"could not perform ls\" \"budget comparison\") assert success, \"budget", "# compare budget terms if run: fsum = os.path.join( model_ws, f\"{os.path.splitext(mfnam)[0]}.budget.out\" ) try:", "mfnam) # compare budget terms if run: fsum = os.path.join( model_ws, f\"{os.path.splitext(mfnam)[0]}.budget.out\" )", "exe_name=exe_name ) assert m.load_fail is False, \"failed to load all packages\" msg =", "load all packages\" # reset the oc file m.remove_package(\"OC\") output = [\"save head\",", "model work space model_ws2 = os.path.join(model_ws, \"flopy\") m.change_model_ws(model_ws2, reset_external=True) # rewrite files m.write_input()", "= os.path.join(\"temp\", \"t036\") exe_name = \"mf2005\" v = flopy.which(exe_name) run = True if", "= output spd[(0, 0)] = output spd[(1, 1)] = output spd[(1, 2)] =", "model_ws=model_ws, forgive=False, exe_name=exe_name, ) assert m.load_fail is False, \"failed to load all packages\"", "model_ws = f\"{base_dir}_test_unitnums_load_and_write\" test_setup = FlopyTestSetup(verbose=True, test_dirs=model_ws) mfnam = \"testsfr2_tab.nam\" # copy files", "rewrite files model_ws2 = os.path.join(model_ws, \"flopy\") m.change_model_ws(model_ws2, reset_external=True) m.write_input() if run: try: success,", "to load all packages\" # reset the oc file m.remove_package(\"OC\") output = [\"save", "\"testsfr2_tab.nam\" # copy files import pymake pymake.setup(os.path.join(pth, mfnam), model_ws) m = flopy.modflow.Modflow.load( mfnam,", "\"print budget\"] spd = {} for iper in range(1, m.dis.nper): for istp in", "os.path.join(\"temp\", \"t036\") exe_name = \"mf2005\" v = flopy.which(exe_name) run = True if v", "= os.path.join(model_ws2, mfnam) # compare budget terms if run: fsum = os.path.join( model_ws,", "= f\"{base_dir}_test_unitnums_load_and_write\" test_setup = FlopyTestSetup(verbose=True, test_dirs=model_ws) mfnam = \"testsfr2_tab.nam\" # copy files import", "successfully\" fn0 = os.path.join(model_ws, mfnam) # change uzf iuzfcb2 and add binary uzf", "unit numbers \"\"\" import os import pymake from ci_framework import FlopyTestSetup, base_test_dir import", "reset the oc file m.remove_package(\"OC\") output = [\"save head\", \"print budget\"] spd =", "assert m.load_fail is False, \"failed to load all packages\" # reset the oc", "success = False assert success, \"new model run did not terminate successfully\" fn1", "run: try: success, buff = m.run_model(silent=False) except: success = False assert success, \"new", "f\"{base_dir}_test_unitnums_load_and_write\" test_setup = FlopyTestSetup(verbose=True, test_dirs=model_ws) mfnam = \"testsfr2_tab.nam\" # copy files import pymake", "not terminate successfully\" fn0 = os.path.join(model_ws, mfnam) # rewrite files model_ws2 = os.path.join(model_ws,", "rel_path=\"temp\", verbose=True) pth = os.path.join(\"..\", \"examples\", \"data\", \"mf2005_test\") cpth = os.path.join(\"temp\", \"t036\") exe_name", "False assert success, \"base model run did not terminate successfully\" fn1 = os.path.join(model_ws2,", "buff = m.run_model(silent=False) except: success = False assert success, \"new model run did", "m.nper) assert v == (1, 7, 100, 50), msg if run: try: success,", "terminate successfully\" fn0 = os.path.join(model_ws, mfnam) # rewrite files model_ws2 = os.path.join(model_ws, \"flopy\")", "False, \"failed to load all packages\" msg = ( \"modflow-2005 testsfr2_tab does not", "= True if v is None: run = False def test_uzf_unit_numbers(): model_ws =", "run: fsum = os.path.join( model_ws, f\"{os.path.splitext(mfnam)[0]}.budget.out\" ) try: success = pymake.compare_budget( fn0, fn1,", "\"base model run did not terminate successfully\" fn0 = os.path.join(model_ws, mfnam) # rewrite", "spd[(1, 2)] = output spd[(1, 3)] = output oc = flopy.modflow.ModflowOc(m, stress_period_data=spd) oc.write_file()", "\"base model run did not terminate successfully\" fn0 = os.path.join(model_ws, mfnam) # change", "= \"mf2005\" v = flopy.which(exe_name) run = True if v is None: run", "m.write_input() # run and compare the output files if run: try: success, buff", "Test loading and preserving existing unit numbers \"\"\" import os import pymake from", "for istp in [0, 4, 9, 14]: spd[(iper, istp)] = output spd[(0, 0)]", "ci_framework import FlopyTestSetup, base_test_dir import flopy base_dir = base_test_dir(__file__, rel_path=\"temp\", verbose=True) pth =", "import flopy base_dir = base_test_dir(__file__, rel_path=\"temp\", verbose=True) pth = os.path.join(\"..\", \"examples\", \"data\", \"mf2005_test\")", "output file m.uzf.iuzfcb2 = 61 m.add_output_file(m.uzf.iuzfcb2, extension=\"uzfcb2.bin\", package=\"UZF\") # change the model work", "= FlopyTestSetup(verbose=True, test_dirs=model_ws) mfnam = \"UZFtest2.nam\" orig_pth = os.path.join(\"..\", \"examples\", \"data\", \"uzf_examples\") #", "= flopy.modflow.Modflow.load( mfnam, verbose=True, model_ws=model_ws, forgive=False, exe_name=exe_name, ) assert m.load_fail is False, \"failed", "100, 50), msg if run: try: success, buff = m.run_model(silent=False) except: success =", "m.add_output_file(m.uzf.iuzfcb2, extension=\"uzfcb2.bin\", package=\"UZF\") # change the model work space model_ws2 = os.path.join(model_ws, \"flopy\")", "terminate successfully\" fn1 = os.path.join(model_ws2, mfnam) # compare budget terms if run: fsum", "budget terms if run: fsum = os.path.join( model_ws, f\"{os.path.splitext(mfnam)[0]}.budget.out\" ) try: success =", "= pymake.compare_budget( fn0, fn1, max_incpd=0.1, max_cumpd=0.1, outfile=fsum ) except: success = False print(\"could", "\"examples\", \"data\", \"mf2005_test\") cpth = os.path.join(\"temp\", \"t036\") exe_name = \"mf2005\" v = flopy.which(exe_name)", "forgive=False, exe_name=exe_name, ) assert m.load_fail is False, \"failed to load all packages\" #", "the output files if run: try: success, buff = m.run_model(silent=False) except: success =", "not perform ls\" \"budget comparison\") assert success, \"budget comparison failure\" return if __name__", "try: success = pymake.compare_budget( fn0, fn1, max_incpd=0.1, max_cumpd=0.1, outfile=fsum ) except: success =", "packages\" # reset the oc file m.remove_package(\"OC\") output = [\"save head\", \"print budget\"]", "flopy.modflow.Modflow.load( mfnam, verbose=True, model_ws=model_ws, exe_name=exe_name ) assert m.load_fail is False, \"failed to load", "rows, and 100 columns\" ) v = (m.nlay, m.nrow, m.ncol, m.nper) assert v", "files if run: try: success, buff = m.run_model(silent=False) except: success = False assert", "= (m.nlay, m.nrow, m.ncol, m.nper) assert v == (1, 7, 100, 50), msg", "mfnam), model_ws) m = flopy.modflow.Modflow.load( mfnam, verbose=True, model_ws=model_ws, forgive=False, exe_name=exe_name, ) assert m.load_fail", "os.path.join(model_ws, \"flopy\") m.change_model_ws(model_ws2, reset_external=True) m.write_input() if run: try: success, buff = m.run_model(silent=False) except:", "9, 14]: spd[(iper, istp)] = output spd[(0, 0)] = output spd[(1, 1)] =", "exe_name=exe_name, ) assert m.load_fail is False, \"failed to load all packages\" # reset", "testsfr2_tab does not have \" \"1 layer, 7 rows, and 100 columns\" )", "= os.path.join(\"..\", \"examples\", \"data\", \"uzf_examples\") # copy files pymake.setup(os.path.join(orig_pth, mfnam), model_ws) m =", "\"data\", \"uzf_examples\") # copy files pymake.setup(os.path.join(orig_pth, mfnam), model_ws) m = flopy.modflow.Modflow.load( mfnam, verbose=True,", "= os.path.join(model_ws, mfnam) # rewrite files model_ws2 = os.path.join(model_ws, \"flopy\") m.change_model_ws(model_ws2, reset_external=True) m.write_input()", "file m.uzf.iuzfcb2 = 61 m.add_output_file(m.uzf.iuzfcb2, extension=\"uzfcb2.bin\", package=\"UZF\") # change the model work space", "True if v is None: run = False def test_uzf_unit_numbers(): model_ws = f\"{base_dir}_test_uzf_unit_numbers\"", "# reset the oc file m.remove_package(\"OC\") output = [\"save head\", \"print budget\"] spd", "FlopyTestSetup, base_test_dir import flopy base_dir = base_test_dir(__file__, rel_path=\"temp\", verbose=True) pth = os.path.join(\"..\", \"examples\",", "fn0, fn1, max_incpd=0.1, max_cumpd=0.1, outfile=fsum ) except: success = False print(\"could not perform", "reset_external=True) m.write_input() if run: try: success, buff = m.run_model(silent=False) except: success = False", "spd[(1, 1)] = output spd[(1, 2)] = output spd[(1, 3)] = output oc", "\"mf2005\" v = flopy.which(exe_name) run = True if v is None: run =", "v = flopy.which(exe_name) run = True if v is None: run = False", "m.run_model(silent=False) except: success = False assert success, \"new model run did not terminate", "\"base model run did not terminate successfully\" fn1 = os.path.join(model_ws2, mfnam) if run:", "model_ws=model_ws, exe_name=exe_name ) assert m.load_fail is False, \"failed to load all packages\" msg", "all packages\" msg = ( \"modflow-2005 testsfr2_tab does not have \" \"1 layer,", "in [0, 4, 9, 14]: spd[(iper, istp)] = output spd[(0, 0)] = output", "mfnam) if run: fsum = os.path.join( model_ws, f\"{os.path.splitext(mfnam)[0]}.budget.out\" ) try: success = pymake.compare_budget(", "assert m.load_fail is False, \"failed to load all packages\" msg = ( \"modflow-2005", "verbose=True, model_ws=model_ws, exe_name=exe_name ) assert m.load_fail is False, \"failed to load all packages\"", "successfully\" fn1 = os.path.join(model_ws2, mfnam) if run: fsum = os.path.join( model_ws, f\"{os.path.splitext(mfnam)[0]}.budget.out\" )", "and preserving existing unit numbers \"\"\" import os import pymake from ci_framework import", "outfile=fsum ) except: success = False print(\"could not perform ls\" \"budget comparison\") assert", "mfnam) # rewrite files model_ws2 = os.path.join(model_ws, \"flopy\") m.change_model_ws(model_ws2, reset_external=True) m.write_input() if run:", "FlopyTestSetup(verbose=True, test_dirs=model_ws) mfnam = \"testsfr2_tab.nam\" # copy files import pymake pymake.setup(os.path.join(pth, mfnam), model_ws)", "fn1, max_incpd=0.1, max_cumpd=0.1, outfile=fsum ) except: success = False print(\"could not perform ls\"", "== (1, 7, 100, 50), msg if run: try: success, buff = m.run_model(silent=False)", "and add binary uzf output file m.uzf.iuzfcb2 = 61 m.add_output_file(m.uzf.iuzfcb2, extension=\"uzfcb2.bin\", package=\"UZF\") #", "uzf output file m.uzf.iuzfcb2 = 61 m.add_output_file(m.uzf.iuzfcb2, extension=\"uzfcb2.bin\", package=\"UZF\") # change the model", "for iper in range(1, m.dis.nper): for istp in [0, 4, 9, 14]: spd[(iper,", "not have \" \"1 layer, 7 rows, and 100 columns\" ) v =", "os.path.join(model_ws2, mfnam) if run: fsum = os.path.join( model_ws, f\"{os.path.splitext(mfnam)[0]}.budget.out\" ) try: success =", "success = pymake.compare_budget( fn0, fn1, max_incpd=0.1, max_cumpd=0.1, outfile=fsum ) except: success = False", "max_incpd=0.1, max_cumpd=0.1, outfile=fsum ) except: success = False print(\"could not perform ls\" \"budget", "= ( \"modflow-2005 testsfr2_tab does not have \" \"1 layer, 7 rows, and", "= {} for iper in range(1, m.dis.nper): for istp in [0, 4, 9,", "False def test_uzf_unit_numbers(): model_ws = f\"{base_dir}_test_uzf_unit_numbers\" test_setup = FlopyTestSetup(verbose=True, test_dirs=model_ws) mfnam = \"UZFtest2.nam\"", "v is None: run = False def test_uzf_unit_numbers(): model_ws = f\"{base_dir}_test_uzf_unit_numbers\" test_setup =", "try: success, buff = m.run_model(silent=False) except: success = False assert success, \"new model", "test_dirs=model_ws) mfnam = \"testsfr2_tab.nam\" # copy files import pymake pymake.setup(os.path.join(pth, mfnam), model_ws) m", "ls\" \"budget comparison\") assert success, \"budget comparison failure\" return if __name__ == \"__main__\":", "pymake from ci_framework import FlopyTestSetup, base_test_dir import flopy base_dir = base_test_dir(__file__, rel_path=\"temp\", verbose=True)", "did not terminate successfully\" fn1 = os.path.join(model_ws2, mfnam) if run: fsum = os.path.join(", "14]: spd[(iper, istp)] = output spd[(0, 0)] = output spd[(1, 1)] = output", "\"new model run did not terminate successfully\" fn1 = os.path.join(model_ws2, mfnam) # compare", "m.run_model(silent=False) except: success = False assert success, \"base model run did not terminate", "= os.path.join(model_ws, \"flopy\") m.change_model_ws(model_ws2, reset_external=True) m.write_input() if run: try: success, buff = m.run_model(silent=False)", "mfnam), model_ws) m = flopy.modflow.Modflow.load( mfnam, verbose=True, model_ws=model_ws, exe_name=exe_name ) assert m.load_fail is", "spd[(0, 0)] = output spd[(1, 1)] = output spd[(1, 2)] = output spd[(1,", "is False, \"failed to load all packages\" msg = ( \"modflow-2005 testsfr2_tab does", "50), msg if run: try: success, buff = m.run_model(silent=False) except: success = False", "pymake pymake.setup(os.path.join(pth, mfnam), model_ws) m = flopy.modflow.Modflow.load( mfnam, verbose=True, model_ws=model_ws, exe_name=exe_name ) assert", "istp in [0, 4, 9, 14]: spd[(iper, istp)] = output spd[(0, 0)] =", "model run did not terminate successfully\" fn1 = os.path.join(model_ws2, mfnam) if run: fsum", "existing unit numbers \"\"\" import os import pymake from ci_framework import FlopyTestSetup, base_test_dir", "max_cumpd=0.1, outfile=fsum ) except: success = False print(\"could not perform ls\" \"budget comparison\")", "def test_uzf_unit_numbers(): model_ws = f\"{base_dir}_test_uzf_unit_numbers\" test_setup = FlopyTestSetup(verbose=True, test_dirs=model_ws) mfnam = \"UZFtest2.nam\" orig_pth", "mfnam, verbose=True, model_ws=model_ws, forgive=False, exe_name=exe_name, ) assert m.load_fail is False, \"failed to load", "None: run = False def test_uzf_unit_numbers(): model_ws = f\"{base_dir}_test_uzf_unit_numbers\" test_setup = FlopyTestSetup(verbose=True, test_dirs=model_ws)", "stress_period_data=spd) oc.write_file() if run: try: success, buff = m.run_model(silent=False) except: success = False", "run did not terminate successfully\" fn1 = os.path.join(model_ws2, mfnam) if run: fsum =", "assert success, \"base model run did not terminate successfully\" fn1 = os.path.join(model_ws2, mfnam)", "\"budget comparison\") assert success, \"budget comparison failure\" return if __name__ == \"__main__\": test_uzf_unit_numbers()", "model run did not terminate successfully\" fn0 = os.path.join(model_ws, mfnam) # rewrite files", "fn0 = os.path.join(model_ws, mfnam) # change uzf iuzfcb2 and add binary uzf output", "7, 100, 50), msg if run: try: success, buff = m.run_model(silent=False) except: success", "output spd[(1, 3)] = output oc = flopy.modflow.ModflowOc(m, stress_period_data=spd) oc.write_file() if run: try:", "print(\"could not perform ls\" \"budget comparison\") assert success, \"budget comparison failure\" return if", "= os.path.join(model_ws, \"flopy\") m.change_model_ws(model_ws2, reset_external=True) # rewrite files m.write_input() # run and compare", "not terminate successfully\" fn1 = os.path.join(model_ws2, mfnam) if run: fsum = os.path.join( model_ws,", "m.change_model_ws(model_ws2, reset_external=True) # rewrite files m.write_input() # run and compare the output files", "model run did not terminate successfully\" fn1 = os.path.join(model_ws2, mfnam) # compare budget", ") v = (m.nlay, m.nrow, m.ncol, m.nper) assert v == (1, 7, 100,", "\"1 layer, 7 rows, and 100 columns\" ) v = (m.nlay, m.nrow, m.ncol,", "61 m.add_output_file(m.uzf.iuzfcb2, extension=\"uzfcb2.bin\", package=\"UZF\") # change the model work space model_ws2 = os.path.join(model_ws,", "False print(\"could not perform ls\" \"budget comparison\") assert success, \"budget comparison failure\" return", "assert success, \"new model run did not terminate successfully\" fn1 = os.path.join(model_ws2, mfnam)", "did not terminate successfully\" fn0 = os.path.join(model_ws, mfnam) # rewrite files model_ws2 =", "m.uzf.iuzfcb2 = 61 m.add_output_file(m.uzf.iuzfcb2, extension=\"uzfcb2.bin\", package=\"UZF\") # change the model work space model_ws2", "layer, 7 rows, and 100 columns\" ) v = (m.nlay, m.nrow, m.ncol, m.nper)", "assert success, \"base model run did not terminate successfully\" fn0 = os.path.join(model_ws, mfnam)", "not perform ls\" \"budget comparison\") assert success, \"budget comparison failure\" return def test_unitnums_load_and_write():", "\"flopy\") m.change_model_ws(model_ws2, reset_external=True) # rewrite files m.write_input() # run and compare the output", "= output spd[(1, 3)] = output oc = flopy.modflow.ModflowOc(m, stress_period_data=spd) oc.write_file() if run:", "oc file m.remove_package(\"OC\") output = [\"save head\", \"print budget\"] spd = {} for", "if run: fsum = os.path.join( model_ws, f\"{os.path.splitext(mfnam)[0]}.budget.out\" ) try: success = pymake.compare_budget( fn0,", "model_ws = f\"{base_dir}_test_uzf_unit_numbers\" test_setup = FlopyTestSetup(verbose=True, test_dirs=model_ws) mfnam = \"UZFtest2.nam\" orig_pth = os.path.join(\"..\",", "success, buff = m.run_model(silent=False) except: success = False assert success, \"base model run", "reset_external=True) # rewrite files m.write_input() # run and compare the output files if", "preserving existing unit numbers \"\"\" import os import pymake from ci_framework import FlopyTestSetup,", "m = flopy.modflow.Modflow.load( mfnam, verbose=True, model_ws=model_ws, exe_name=exe_name ) assert m.load_fail is False, \"failed", "comparison failure\" return def test_unitnums_load_and_write(): model_ws = f\"{base_dir}_test_unitnums_load_and_write\" test_setup = FlopyTestSetup(verbose=True, test_dirs=model_ws) mfnam", "numbers \"\"\" import os import pymake from ci_framework import FlopyTestSetup, base_test_dir import flopy", "<filename>autotest/t036_test.py \"\"\" Test loading and preserving existing unit numbers \"\"\" import os import", "\"modflow-2005 testsfr2_tab does not have \" \"1 layer, 7 rows, and 100 columns\"", "except: success = False assert success, \"base model run did not terminate successfully\"", "pymake.setup(os.path.join(orig_pth, mfnam), model_ws) m = flopy.modflow.Modflow.load( mfnam, verbose=True, model_ws=model_ws, forgive=False, exe_name=exe_name, ) assert", "mfnam, verbose=True, model_ws=model_ws, exe_name=exe_name ) assert m.load_fail is False, \"failed to load all", "m.change_model_ws(model_ws2, reset_external=True) m.write_input() if run: try: success, buff = m.run_model(silent=False) except: success =", "if v is None: run = False def test_uzf_unit_numbers(): model_ws = f\"{base_dir}_test_uzf_unit_numbers\" test_setup", "if run: try: success, buff = m.run_model(silent=False) except: success = False assert success,", "assert v == (1, 7, 100, 50), msg if run: try: success, buff", "f\"{os.path.splitext(mfnam)[0]}.budget.out\" ) try: success = pymake.compare_budget( fn0, fn1, max_incpd=0.1, max_cumpd=0.1, outfile=fsum ) except:", "( \"modflow-2005 testsfr2_tab does not have \" \"1 layer, 7 rows, and 100", "m.nrow, m.ncol, m.nper) assert v == (1, 7, 100, 50), msg if run:", "pth = os.path.join(\"..\", \"examples\", \"data\", \"mf2005_test\") cpth = os.path.join(\"temp\", \"t036\") exe_name = \"mf2005\"", "in range(1, m.dis.nper): for istp in [0, 4, 9, 14]: spd[(iper, istp)] =", "base_test_dir import flopy base_dir = base_test_dir(__file__, rel_path=\"temp\", verbose=True) pth = os.path.join(\"..\", \"examples\", \"data\",", "= os.path.join(\"..\", \"examples\", \"data\", \"mf2005_test\") cpth = os.path.join(\"temp\", \"t036\") exe_name = \"mf2005\" v", "files pymake.setup(os.path.join(orig_pth, mfnam), model_ws) m = flopy.modflow.Modflow.load( mfnam, verbose=True, model_ws=model_ws, forgive=False, exe_name=exe_name, )", "\"budget comparison failure\" return def test_unitnums_load_and_write(): model_ws = f\"{base_dir}_test_unitnums_load_and_write\" test_setup = FlopyTestSetup(verbose=True, test_dirs=model_ws)", "\"mf2005_test\") cpth = os.path.join(\"temp\", \"t036\") exe_name = \"mf2005\" v = flopy.which(exe_name) run =", "perform ls\" \"budget comparison\") assert success, \"budget comparison failure\" return def test_unitnums_load_and_write(): model_ws", "is None: run = False def test_uzf_unit_numbers(): model_ws = f\"{base_dir}_test_uzf_unit_numbers\" test_setup = FlopyTestSetup(verbose=True,", "= False assert success, \"base model run did not terminate successfully\" fn1 =", "istp)] = output spd[(0, 0)] = output spd[(1, 1)] = output spd[(1, 2)]", "success = False print(\"could not perform ls\" \"budget comparison\") assert success, \"budget comparison", "buff = m.run_model(silent=False) except: success = False assert success, \"base model run did", "fn0 = os.path.join(model_ws, mfnam) # rewrite files model_ws2 = os.path.join(model_ws, \"flopy\") m.change_model_ws(model_ws2, reset_external=True)", "oc.write_file() if run: try: success, buff = m.run_model(silent=False) except: success = False assert", "success, buff = m.run_model(silent=False) except: success = False assert success, \"new model run", "# rewrite files model_ws2 = os.path.join(model_ws, \"flopy\") m.change_model_ws(model_ws2, reset_external=True) m.write_input() if run: try:", "fn1 = os.path.join(model_ws2, mfnam) if run: fsum = os.path.join( model_ws, f\"{os.path.splitext(mfnam)[0]}.budget.out\" ) try:", ") try: success = pymake.compare_budget( fn0, fn1, max_incpd=0.1, max_cumpd=0.1, outfile=fsum ) except: success", "compare budget terms if run: fsum = os.path.join( model_ws, f\"{os.path.splitext(mfnam)[0]}.budget.out\" ) try: success", "7 rows, and 100 columns\" ) v = (m.nlay, m.nrow, m.ncol, m.nper) assert", "and 100 columns\" ) v = (m.nlay, m.nrow, m.ncol, m.nper) assert v ==", "= [\"save head\", \"print budget\"] spd = {} for iper in range(1, m.dis.nper):", "= flopy.which(exe_name) run = True if v is None: run = False def", "= os.path.join( model_ws, f\"{os.path.splitext(mfnam)[0]}.budget.out\" ) try: success = pymake.compare_budget( fn0, fn1, max_incpd=0.1, max_cumpd=0.1,", "try: success, buff = m.run_model(silent=False) except: success = False assert success, \"base model", "pymake.setup(os.path.join(pth, mfnam), model_ws) m = flopy.modflow.Modflow.load( mfnam, verbose=True, model_ws=model_ws, exe_name=exe_name ) assert m.load_fail", "\"uzf_examples\") # copy files pymake.setup(os.path.join(orig_pth, mfnam), model_ws) m = flopy.modflow.Modflow.load( mfnam, verbose=True, model_ws=model_ws,", "run did not terminate successfully\" fn1 = os.path.join(model_ws2, mfnam) # compare budget terms", "spd = {} for iper in range(1, m.dis.nper): for istp in [0, 4,", "[0, 4, 9, 14]: spd[(iper, istp)] = output spd[(0, 0)] = output spd[(1,", "= m.run_model(silent=False) except: success = False assert success, \"base model run did not", "model_ws) m = flopy.modflow.Modflow.load( mfnam, verbose=True, model_ws=model_ws, forgive=False, exe_name=exe_name, ) assert m.load_fail is", "not terminate successfully\" fn1 = os.path.join(model_ws2, mfnam) # compare budget terms if run:", "terms if run: fsum = os.path.join( model_ws, f\"{os.path.splitext(mfnam)[0]}.budget.out\" ) try: success = pymake.compare_budget(", "perform ls\" \"budget comparison\") assert success, \"budget comparison failure\" return if __name__ ==", "flopy.which(exe_name) run = True if v is None: run = False def test_uzf_unit_numbers():", "test_uzf_unit_numbers(): model_ws = f\"{base_dir}_test_uzf_unit_numbers\" test_setup = FlopyTestSetup(verbose=True, test_dirs=model_ws) mfnam = \"UZFtest2.nam\" orig_pth =", "FlopyTestSetup(verbose=True, test_dirs=model_ws) mfnam = \"UZFtest2.nam\" orig_pth = os.path.join(\"..\", \"examples\", \"data\", \"uzf_examples\") # copy", "= base_test_dir(__file__, rel_path=\"temp\", verbose=True) pth = os.path.join(\"..\", \"examples\", \"data\", \"mf2005_test\") cpth = os.path.join(\"temp\",", "orig_pth = os.path.join(\"..\", \"examples\", \"data\", \"uzf_examples\") # copy files pymake.setup(os.path.join(orig_pth, mfnam), model_ws) m", "to load all packages\" msg = ( \"modflow-2005 testsfr2_tab does not have \"", "= 61 m.add_output_file(m.uzf.iuzfcb2, extension=\"uzfcb2.bin\", package=\"UZF\") # change the model work space model_ws2 =", "os.path.join(model_ws2, mfnam) # compare budget terms if run: fsum = os.path.join( model_ws, f\"{os.path.splitext(mfnam)[0]}.budget.out\"", "success, \"budget comparison failure\" return def test_unitnums_load_and_write(): model_ws = f\"{base_dir}_test_unitnums_load_and_write\" test_setup = FlopyTestSetup(verbose=True,", "= os.path.join(model_ws, mfnam) # change uzf iuzfcb2 and add binary uzf output file", "failure\" return def test_unitnums_load_and_write(): model_ws = f\"{base_dir}_test_unitnums_load_and_write\" test_setup = FlopyTestSetup(verbose=True, test_dirs=model_ws) mfnam =", "= False assert success, \"base model run did not terminate successfully\" fn0 =", ") except: success = False print(\"could not perform ls\" \"budget comparison\") assert success,", "terminate successfully\" fn1 = os.path.join(model_ws2, mfnam) if run: fsum = os.path.join( model_ws, f\"{os.path.splitext(mfnam)[0]}.budget.out\"", "m.ncol, m.nper) assert v == (1, 7, 100, 50), msg if run: try:", "success, \"base model run did not terminate successfully\" fn0 = os.path.join(model_ws, mfnam) #", "uzf iuzfcb2 and add binary uzf output file m.uzf.iuzfcb2 = 61 m.add_output_file(m.uzf.iuzfcb2, extension=\"uzfcb2.bin\",", "\"budget comparison\") assert success, \"budget comparison failure\" return def test_unitnums_load_and_write(): model_ws = f\"{base_dir}_test_unitnums_load_and_write\"", "head\", \"print budget\"] spd = {} for iper in range(1, m.dis.nper): for istp", "comparison\") assert success, \"budget comparison failure\" return def test_unitnums_load_and_write(): model_ws = f\"{base_dir}_test_unitnums_load_and_write\" test_setup", "1)] = output spd[(1, 2)] = output spd[(1, 3)] = output oc =", "output spd[(1, 2)] = output spd[(1, 3)] = output oc = flopy.modflow.ModflowOc(m, stress_period_data=spd)", "False assert success, \"new model run did not terminate successfully\" fn1 = os.path.join(model_ws2,", "= output spd[(1, 2)] = output spd[(1, 3)] = output oc = flopy.modflow.ModflowOc(m,", "test_dirs=model_ws) mfnam = \"UZFtest2.nam\" orig_pth = os.path.join(\"..\", \"examples\", \"data\", \"uzf_examples\") # copy files", "print(\"could not perform ls\" \"budget comparison\") assert success, \"budget comparison failure\" return def", "4, 9, 14]: spd[(iper, istp)] = output spd[(0, 0)] = output spd[(1, 1)]", "exe_name = \"mf2005\" v = flopy.which(exe_name) run = True if v is None:", "flopy.modflow.Modflow.load( mfnam, verbose=True, model_ws=model_ws, forgive=False, exe_name=exe_name, ) assert m.load_fail is False, \"failed to", "# change uzf iuzfcb2 and add binary uzf output file m.uzf.iuzfcb2 = 61", "m = flopy.modflow.Modflow.load( mfnam, verbose=True, model_ws=model_ws, forgive=False, exe_name=exe_name, ) assert m.load_fail is False,", "spd[(iper, istp)] = output spd[(0, 0)] = output spd[(1, 1)] = output spd[(1,", "iuzfcb2 and add binary uzf output file m.uzf.iuzfcb2 = 61 m.add_output_file(m.uzf.iuzfcb2, extension=\"uzfcb2.bin\", package=\"UZF\")", "= False print(\"could not perform ls\" \"budget comparison\") assert success, \"budget comparison failure\"", "\"failed to load all packages\" msg = ( \"modflow-2005 testsfr2_tab does not have", "\"data\", \"mf2005_test\") cpth = os.path.join(\"temp\", \"t036\") exe_name = \"mf2005\" v = flopy.which(exe_name) run", "(1, 7, 100, 50), msg if run: try: success, buff = m.run_model(silent=False) except:", "model_ws2 = os.path.join(model_ws, \"flopy\") m.change_model_ws(model_ws2, reset_external=True) m.write_input() if run: try: success, buff =", "= flopy.modflow.ModflowOc(m, stress_period_data=spd) oc.write_file() if run: try: success, buff = m.run_model(silent=False) except: success", "mfnam) # change uzf iuzfcb2 and add binary uzf output file m.uzf.iuzfcb2 =", "model_ws) m = flopy.modflow.Modflow.load( mfnam, verbose=True, model_ws=model_ws, exe_name=exe_name ) assert m.load_fail is False,", "flopy base_dir = base_test_dir(__file__, rel_path=\"temp\", verbose=True) pth = os.path.join(\"..\", \"examples\", \"data\", \"mf2005_test\") cpth", ") assert m.load_fail is False, \"failed to load all packages\" # reset the", ") assert m.load_fail is False, \"failed to load all packages\" msg = (", "binary uzf output file m.uzf.iuzfcb2 = 61 m.add_output_file(m.uzf.iuzfcb2, extension=\"uzfcb2.bin\", package=\"UZF\") # change the", "copy files import pymake pymake.setup(os.path.join(pth, mfnam), model_ws) m = flopy.modflow.Modflow.load( mfnam, verbose=True, model_ws=model_ws,", "work space model_ws2 = os.path.join(model_ws, \"flopy\") m.change_model_ws(model_ws2, reset_external=True) # rewrite files m.write_input() #", "False assert success, \"base model run did not terminate successfully\" fn0 = os.path.join(model_ws,", "spd[(1, 3)] = output oc = flopy.modflow.ModflowOc(m, stress_period_data=spd) oc.write_file() if run: try: success,", "the oc file m.remove_package(\"OC\") output = [\"save head\", \"print budget\"] spd = {}", "3)] = output oc = flopy.modflow.ModflowOc(m, stress_period_data=spd) oc.write_file() if run: try: success, buff", "add binary uzf output file m.uzf.iuzfcb2 = 61 m.add_output_file(m.uzf.iuzfcb2, extension=\"uzfcb2.bin\", package=\"UZF\") # change", "is False, \"failed to load all packages\" # reset the oc file m.remove_package(\"OC\")", "base_dir = base_test_dir(__file__, rel_path=\"temp\", verbose=True) pth = os.path.join(\"..\", \"examples\", \"data\", \"mf2005_test\") cpth =", "v = (m.nlay, m.nrow, m.ncol, m.nper) assert v == (1, 7, 100, 50),", "the model work space model_ws2 = os.path.join(model_ws, \"flopy\") m.change_model_ws(model_ws2, reset_external=True) # rewrite files", "verbose=True, model_ws=model_ws, forgive=False, exe_name=exe_name, ) assert m.load_fail is False, \"failed to load all", "ls\" \"budget comparison\") assert success, \"budget comparison failure\" return def test_unitnums_load_and_write(): model_ws =", "\"t036\") exe_name = \"mf2005\" v = flopy.which(exe_name) run = True if v is", "# copy files import pymake pymake.setup(os.path.join(pth, mfnam), model_ws) m = flopy.modflow.Modflow.load( mfnam, verbose=True,", "= m.run_model(silent=False) except: success = False assert success, \"new model run did not", "pymake.compare_budget( fn0, fn1, max_incpd=0.1, max_cumpd=0.1, outfile=fsum ) except: success = False print(\"could not", "load all packages\" msg = ( \"modflow-2005 testsfr2_tab does not have \" \"1", "all packages\" # reset the oc file m.remove_package(\"OC\") output = [\"save head\", \"print", "space model_ws2 = os.path.join(model_ws, \"flopy\") m.change_model_ws(model_ws2, reset_external=True) # rewrite files m.write_input() # run", "[\"save head\", \"print budget\"] spd = {} for iper in range(1, m.dis.nper): for", "os.path.join(\"..\", \"examples\", \"data\", \"mf2005_test\") cpth = os.path.join(\"temp\", \"t036\") exe_name = \"mf2005\" v =", "file m.remove_package(\"OC\") output = [\"save head\", \"print budget\"] spd = {} for iper", "columns\" ) v = (m.nlay, m.nrow, m.ncol, m.nper) assert v == (1, 7,", "import os import pymake from ci_framework import FlopyTestSetup, base_test_dir import flopy base_dir =", "package=\"UZF\") # change the model work space model_ws2 = os.path.join(model_ws, \"flopy\") m.change_model_ws(model_ws2, reset_external=True)", "os.path.join(model_ws, mfnam) # rewrite files model_ws2 = os.path.join(model_ws, \"flopy\") m.change_model_ws(model_ws2, reset_external=True) m.write_input() if", "m.dis.nper): for istp in [0, 4, 9, 14]: spd[(iper, istp)] = output spd[(0,", "terminate successfully\" fn0 = os.path.join(model_ws, mfnam) # change uzf iuzfcb2 and add binary", "# copy files pymake.setup(os.path.join(orig_pth, mfnam), model_ws) m = flopy.modflow.Modflow.load( mfnam, verbose=True, model_ws=model_ws, forgive=False,", "f\"{base_dir}_test_uzf_unit_numbers\" test_setup = FlopyTestSetup(verbose=True, test_dirs=model_ws) mfnam = \"UZFtest2.nam\" orig_pth = os.path.join(\"..\", \"examples\", \"data\",", "oc = flopy.modflow.ModflowOc(m, stress_period_data=spd) oc.write_file() if run: try: success, buff = m.run_model(silent=False) except:", "\"failed to load all packages\" # reset the oc file m.remove_package(\"OC\") output =", "test_setup = FlopyTestSetup(verbose=True, test_dirs=model_ws) mfnam = \"testsfr2_tab.nam\" # copy files import pymake pymake.setup(os.path.join(pth,", "run = False def test_uzf_unit_numbers(): model_ws = f\"{base_dir}_test_uzf_unit_numbers\" test_setup = FlopyTestSetup(verbose=True, test_dirs=model_ws) mfnam", "output = [\"save head\", \"print budget\"] spd = {} for iper in range(1,", "= \"testsfr2_tab.nam\" # copy files import pymake pymake.setup(os.path.join(pth, mfnam), model_ws) m = flopy.modflow.Modflow.load(", "budget\"] spd = {} for iper in range(1, m.dis.nper): for istp in [0,", "msg if run: try: success, buff = m.run_model(silent=False) except: success = False assert", "m.write_input() if run: try: success, buff = m.run_model(silent=False) except: success = False assert", "copy files pymake.setup(os.path.join(orig_pth, mfnam), model_ws) m = flopy.modflow.Modflow.load( mfnam, verbose=True, model_ws=model_ws, forgive=False, exe_name=exe_name,", "import FlopyTestSetup, base_test_dir import flopy base_dir = base_test_dir(__file__, rel_path=\"temp\", verbose=True) pth = os.path.join(\"..\",", "def test_unitnums_load_and_write(): model_ws = f\"{base_dir}_test_unitnums_load_and_write\" test_setup = FlopyTestSetup(verbose=True, test_dirs=model_ws) mfnam = \"testsfr2_tab.nam\" #", "does not have \" \"1 layer, 7 rows, and 100 columns\" ) v", "import pymake pymake.setup(os.path.join(pth, mfnam), model_ws) m = flopy.modflow.Modflow.load( mfnam, verbose=True, model_ws=model_ws, exe_name=exe_name )", "files model_ws2 = os.path.join(model_ws, \"flopy\") m.change_model_ws(model_ws2, reset_external=True) m.write_input() if run: try: success, buff", "\"\"\" Test loading and preserving existing unit numbers \"\"\" import os import pymake", "files m.write_input() # run and compare the output files if run: try: success,", "files import pymake pymake.setup(os.path.join(pth, mfnam), model_ws) m = flopy.modflow.Modflow.load( mfnam, verbose=True, model_ws=model_ws, exe_name=exe_name", "= \"UZFtest2.nam\" orig_pth = os.path.join(\"..\", \"examples\", \"data\", \"uzf_examples\") # copy files pymake.setup(os.path.join(orig_pth, mfnam),", "\"\"\" import os import pymake from ci_framework import FlopyTestSetup, base_test_dir import flopy base_dir", "# change the model work space model_ws2 = os.path.join(model_ws, \"flopy\") m.change_model_ws(model_ws2, reset_external=True) #", "verbose=True) pth = os.path.join(\"..\", \"examples\", \"data\", \"mf2005_test\") cpth = os.path.join(\"temp\", \"t036\") exe_name =", "comparison\") assert success, \"budget comparison failure\" return if __name__ == \"__main__\": test_uzf_unit_numbers() test_unitnums_load_and_write()", "success, \"new model run did not terminate successfully\" fn1 = os.path.join(model_ws2, mfnam) #", "base_test_dir(__file__, rel_path=\"temp\", verbose=True) pth = os.path.join(\"..\", \"examples\", \"data\", \"mf2005_test\") cpth = os.path.join(\"temp\", \"t036\")", "extension=\"uzfcb2.bin\", package=\"UZF\") # change the model work space model_ws2 = os.path.join(model_ws, \"flopy\") m.change_model_ws(model_ws2,", "2)] = output spd[(1, 3)] = output oc = flopy.modflow.ModflowOc(m, stress_period_data=spd) oc.write_file() if", "return def test_unitnums_load_and_write(): model_ws = f\"{base_dir}_test_unitnums_load_and_write\" test_setup = FlopyTestSetup(verbose=True, test_dirs=model_ws) mfnam = \"testsfr2_tab.nam\"", "os.path.join(\"..\", \"examples\", \"data\", \"uzf_examples\") # copy files pymake.setup(os.path.join(orig_pth, mfnam), model_ws) m = flopy.modflow.Modflow.load(", "run did not terminate successfully\" fn0 = os.path.join(model_ws, mfnam) # rewrite files model_ws2" ]
[ "self.get_experiment_tests(experiment_path) list_test_dicts = [] if len(experiments_tests) > 0: self.logger.info(' - Found {} test(s)'.format(len(experiments_tests)))", "name: str \"\"\" # call base constructor super(GridAnalyzer, self).__init__(name=name, use_gpu=False) @staticmethod def check_if_file_exists(dir_,", "number of lines in ``filename_``. :param filename_: Filepath to be opened and line-read.", "of dictionaries by filling the missing fields with spaces into one dict. :param", "Unless required by applicable law or agreed to in writing, software # distributed", "= [elem for elem in experiments_tests if self.check_if_file_exists(elem, 'testing_configuration.yaml') and self.check_if_file_exists(elem, 'testing_set_agg_statistics.csv')] #", "a data point for the aggregated statistics (`testing_set_agg_statistics.csv`) :param experiment_path_: Path to experiment", "directory was indicated. if self.flags.expdir == '': print('Please pass the experiments directory as", "IBM Corporation 2018 # # Licensed under the Apache License, Version 2.0 (the", "@staticmethod def check_if_file_exists(dir_, filename_): \"\"\" Checks if ``filename_`` exists in ``dir_``. :param dir_:", "and create all required objects. grid_analyzer.setup_grid_experiment() # GO! grid_analyzer.run_grid_experiment() if __name__ == '__main__':", "str :return: Four dictionaries containing: - Status info (model, problem etc.), - Training", "chkpt['validation_stats'].items(): valid_dict['validation_{}'.format(key)] = value # Create \"empty\" equivalent. valid_dict_empty = dict.fromkeys(valid_dict.keys(), ' ')", "statistics from the checkpoint and add the 'valid_' prefix. for key, value in", "test_dict['test_start_timestamp'] = os.path.basename(os.path.normpath(experiment_test_path))[5:] # Load yaml file and get random seeds. with open(os.path.join(experiment_test_path,", "the experiment folders, cherry-picking subfolders containing: - (a) 'training_configuration.yaml' (training configuration file), -", "experiments_tests = [elem for elem in experiments_tests if self.check_if_file_exists(elem, 'testing_configuration.yaml') and self.check_if_file_exists(elem, 'testing_set_agg_statistics.csv')]", "following valid experiments in directory: {} \\n\".format(self.experiment_rootdir) exp_str += '='*80 + '\\n' for", "'{0:%Y%m%d_%H%M%S}'.format(chkpt['status_timestamp']) # Create \"empty\" equivalent. status_dict_empty = dict.fromkeys(status_dict.keys(), ' ') # Copy training", "the :py:class:`miprometheus.grid_workers.GridAnalyzer`: - Calls basic constructor of :py:class:`miprometheus.grid_workers.GridWorker` :param name: Name of the", "GridAnalyzer() # parse args, load configuration and create all required objects. grid_analyzer.setup_grid_experiment() #", "= yaml.load(yaml_file) # Get seeds. test_dict['test_seed_torch'] = test_params['testing']['seed_torch'] test_dict['test_seed_numpy'] = test_params['testing']['seed_numpy'] # Load", "test results of a grid of experiments and gather them in a csv", "= test_params['testing']['seed_torch'] test_dict['test_seed_numpy'] = test_params['testing']['seed_numpy'] # Load csv file and copy test statistics", "spaces into one dict. :param list_dicts: List of dictionaries, potentially containing different headers,", "dir_: str :param filename_: Name of the file to be opened and analyzed.", "all tests for a given training experiment. experiments_tests = self.get_experiment_tests(experiment_path) list_test_dicts = []", "\"\"\" Returns a list of folders containing valid test experiments data: - A", "in dirs: experiments_tests.append(os.path.join(root, name)) # Keep only the folders that contain a test", "valids = self.merge_list_dicts(list_valids) tests = self.merge_list_dicts(list_tests) # Merge everything into one big dictionary..", "exit(0) def run_experiment(self, experiment_path: str): \"\"\" Collects the training / validation / test", "list of valid test experiment folders. \"\"\" experiments_tests = [] for root, dirs,", "/ test statistics for a given experiment path. Analyzes whether the given training", "(from test csv files found in subdirectories). :param experiment_path: Path to an experiment", "self.check_if_file_exists(elem, 'testing_set_agg_statistics.csv')] # Check if the csv file contains at least one data", "> 1. :param dir_: Path to file. :type dir_: str :param filename_: Name", ":py:class:`miprometheus.grid_workers.GridAnalyzer`. Post-processes the test results of a grid of experiments and gather them", "file is > 1. :param dir_: Path to file. :type dir_: str :param", "confirm and start the grid analyzis\\n') except KeyboardInterrupt: exit(0) def run_experiment(self, experiment_path: str):", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "# Return the result. return final_dict def run_grid_experiment(self): \"\"\" Collects four list of", "check_file_content(self, dir_, filename_): \"\"\" Checks if the number of lines in the file", "status_dict['problem'] = params['testing']['problem']['name'] status_dict['model'] = params['model']['name'] # Load checkpoint from model file. chkpt", "file containing a data point for the aggregated statistics (`testing_set_agg_statistics.csv`) :param experiment_path_: Path", "2018 # # Licensed under the Apache License, Version 2.0 (the \"License\"); #", "all dictionaries with lists return list_status_dicts, list_train_dicts, list_valid_dicts, list_test_dicts @staticmethod def merge_list_dicts(list_dicts): \"\"\"", "\"\"\" Checks if ``filename_`` exists in ``dir_``. :param dir_: Path to file. :type", "get_experiment_tests(self, experiment_path_): \"\"\" Returns a list of folders containing valid test experiments data:", "'training_configuration.yaml'), 'r') as yaml_file: params = yaml.load(yaml_file) # Get problem and model names", "status_dict_empty = dict.fromkeys(status_dict.keys(), ' ') # Copy training status stats. train_dict['training_configuration_filepath'] = os.path.join(experiment_path,", "& <NAME>\" import os import csv import yaml import torch import logging from", "self.get_lines_number(os.path.join(dir_, filename_)) > 1 @staticmethod def get_lines_number(filename_): \"\"\" Returns the number of lines", "tests = self.run_experiment(exp) list_statuses.extend(statuses) list_trains.extend(trains) list_valids.extend(valids) list_tests.extend(tests) # Merge lists. statuses = self.merge_list_dicts(list_statuses)", "from :py:class:`miprometheus.grid_workers.GridWorker`. \"\"\" def __init__(self, name=\"GridAnalyzer\"): \"\"\" Constructor for the :py:class:`miprometheus.grid_workers.GridAnalyzer`: - Calls", "collected statistics (excluding the header). - Collects statistics from training, validation (from model", "one `.csv` file. \"\"\" __author__ = \"<NAME> & <NAME>\" import os import csv", "test experiments data: - A configuration (`testing_configuration.yaml`), - A csv file containing a", "``GridTrainers`` and ``GridTesters``. \\ It gathers the test results into one `.csv` file.", "list_statuses.extend(statuses) list_trains.extend(trains) list_valids.extend(valids) list_tests.extend(tests) # Merge lists. statuses = self.merge_list_dicts(list_statuses) trains = self.merge_list_dicts(list_trains)", "grid of experiments and gather them in a csv file. This csv file", "Parse arguments. self.flags, self.unparsed = self.parser.parse_known_args() # Set logger depending on the settings.", "software # distributed under the License is distributed on an \"AS IS\" BASIS,", "settings. self.logger.setLevel(getattr(logging, self.flags.log_level.upper(), None)) # Check if experiments directory was indicated. if self.flags.expdir", "from config. status_dict['problem'] = params['testing']['problem']['name'] status_dict['model'] = params['model']['name'] # Load checkpoint from model", ":param experiment_path_: Path to experiment (training) folder. :type experiment_path_: str :return: A list", "require that the test statistics csv files are valid, i.e. contain at least", "saved model). \"\"\" # Parse arguments. self.flags, self.unparsed = self.parser.parse_known_args() # Set logger", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "empty gaps. list_filled_dicts = [] for i, _ in enumerate(list_dicts): list_filled_dicts.append({**empty_dict, **(list_dicts[i])}) #", "into one big dictionary.. exp_values = {**statuses, **trains, **valids, **tests} # create results", "list_filled_dicts.append({**empty_dict, **(list_dicts[i])}) # Zip lists of dicts. final_dict = dict(zip(header, zip(*[d.values() for d", "{}\\n\".format(exp) exp_str += '='*80 + '\\n' self.logger.info(exp_str) # Ask for confirmation - optional.", "as yaml_file: params = yaml.load(yaml_file) # Get problem and model names - from", "whether the given training experiment folder contains subfolders with test experiments data: -", "the folders that contain a test configuration file and a csv statistics file.", "given training experiment. experiments_tests = self.get_experiment_tests(experiment_path) list_test_dicts = [] if len(experiments_tests) > 0:", "'testing_set_agg_statistics.yaml') test_dict['test_start_timestamp'] = os.path.basename(os.path.normpath(experiment_test_path))[5:] # Load yaml file and get random seeds. with", "[] if len(experiments_tests) > 0: self.logger.info(' - Found {} test(s)'.format(len(experiments_tests))) # \"Expand\" status,", "Training statistics, - Validation statistics, - Test statistics. \"\"\" self.logger.info('Analyzing experiments from: {}'.format(experiment_path))", "import yaml import torch import logging from datetime import datetime from miprometheus.grid_workers.grid_worker import", "and add the 'valid_' prefix. for key, value in chkpt['validation_stats'].items(): valid_dict['validation_{}'.format(key)] = value", "datetime import datetime from miprometheus.grid_workers.grid_worker import GridWorker class GridAnalyzer(GridWorker): \"\"\" Implementation of the", "in os.walk(self.experiment_rootdir, topdown=True): for name in dirs: self.experiments_list.append(os.path.join(root, name)) # Keep only the", "all sub-directories paths in expdir. self.experiments_list = [] for root, dirs, _ in", "= params['model']['name'] # Load checkpoint from model file. chkpt = torch.load(os.path.join(experiment_path, 'models/model_best.pt'), map_location=lambda", "in self.experiments_list: exp_str += \" - {}\\n\".format(exp) exp_str += '='*80 + '\\n' self.logger.info(exp_str)", "name)) # Keep only the folders that contain a test configuration file and", "list_train_dicts, list_valid_dicts, list_test_dicts @staticmethod def merge_list_dicts(list_dicts): \"\"\" Merges a list of dictionaries by", "one and collect data. list_statuses = [] list_trains = [] list_valids = []", "a given training experiment. experiments_tests = self.get_experiment_tests(experiment_path) list_test_dicts = [] if len(experiments_tests) >", "self.check_if_file_exists(elem, 'training_configuration.yaml') and self.check_if_file_exists(elem, 'models/model_best.pt')] # Check if there are some valid folders.", "[] for root, dirs, _ in os.walk(experiment_path_, topdown=True): for name in dirs: experiments_tests.append(os.path.join(root,", "try: input('Press <Enter> to confirm and start the grid analyzis\\n') except KeyboardInterrupt: exit(0)", "conditions,...), \\ the validation statistics and the test statistics. Inherits from :py:class:`miprometheus.grid_workers.GridWorker`. \"\"\"", "contains aggregated validation statistics). self.experiments_list = [elem for elem in self.experiments_list if self.check_if_file_exists(elem,", "`.csv` file. \"\"\" __author__ = \"<NAME> & <NAME>\" import os import csv import", "and self.check_if_file_exists(elem, 'models/model_best.pt')] # Check if there are some valid folders. if len(self.experiments_list)", "configuration (`testing_configuration.yaml`), - A csv file containing a data point for the aggregated", "to file. :type dir_: str :param filename_: Name of the file to be", "Return the result. return final_dict def run_grid_experiment(self): \"\"\" Collects four list of dicts", "for row in test_reader: for key, value in row.items(): test_dict['test_{}'.format(key)] = value list_test_dicts.append(test_dict)", "dict: test_dict = dict() test_dict['test_configuration_filepath'] = os.path.join(experiment_test_path, 'testing_set_agg_statistics.yaml') test_dict['test_start_timestamp'] = os.path.basename(os.path.normpath(experiment_test_path))[5:] # Load", "statistics, - Recursively traverses subdirectories looking for test experiments, .. note:: We require", "to the number of test folders. list_status_dicts = [status_dict, *[status_dict_empty for _ in", "delimiter=',') writer.writerow(exp_values.keys()) writer.writerows(zip(*exp_values.values())) self.logger.info('Analysis finished') self.logger.info('Results stored in {}.'.format(results_file)) except KeyboardInterrupt: self.logger.info('Grid analysis", "config. status_dict['problem'] = params['testing']['problem']['name'] status_dict['model'] = params['model']['name'] # Load checkpoint from model file.", "os.path.join(experiment_path, 'training_configuration.yaml') train_dict['training_start_timestamp'] = os.path.basename(os.path.normpath(experiment_path)) train_dict['training_seed_torch'] = params['training']['seed_torch'] train_dict['training_seed_numpy'] = params['training']['seed_numpy'] # Copy", "dictionaries. status_dict = dict() train_dict = dict() valid_dict = dict() # Load yaml", "- Loads and parses training configuration file, - Loads checkpoint with model and", "-*- # # Copyright (C) IBM Corporation 2018 # # Licensed under the", "grid analyzis\\n') except KeyboardInterrupt: exit(0) def run_experiment(self, experiment_path: str): \"\"\" Collects the training", "and collect data. list_statuses = [] list_trains = [] list_valids = [] list_tests", "elem in self.experiments_list if self.check_if_file_exists(elem, 'training_configuration.yaml') and self.check_if_file_exists(elem, 'models/model_best.pt')] # Check if there", "Keep only the folders that contain training_configuration.yaml, training_statistics.csv and # training.csv and model", "Ask for confirmation - optional. if self.flags.user_confirm: try: input('Press <Enter> to confirm and", "for key, value in chkpt['validation_stats'].items(): valid_dict['validation_{}'.format(key)] = value # Create \"empty\" equivalent. valid_dict_empty", "not use this file except in compliance with the License. # You may", "Set logger depending on the settings. self.logger.setLevel(getattr(logging, self.flags.log_level.upper(), None)) # Check if experiments", "stored in {}.'.format(results_file)) except KeyboardInterrupt: self.logger.info('Grid analysis interrupted!') def main(): \"\"\" Entry point", "'testing_configuration.yaml') and self.check_if_file_exists(elem, 'testing_set_agg_statistics.csv')] # Check if the csv file contains at least", "dict.fromkeys(status_dict.keys(), ' ') # Copy training status stats. train_dict['training_configuration_filepath'] = os.path.join(experiment_path, 'training_configuration.yaml') train_dict['training_start_timestamp']", "in f) def get_experiment_tests(self, experiment_path_): \"\"\" Returns a list of folders containing valid", "str :return: True if the number of lines in the file is strictly", "GridWorker class GridAnalyzer(GridWorker): \"\"\" Implementation of the :py:class:`miprometheus.grid_workers.GridAnalyzer`. Post-processes the test results of", "_ in range(len(experiments_tests) - 1)]] # Get tests statistics. for experiment_test_path in experiments_tests:", "model file. chkpt = torch.load(os.path.join(experiment_path, 'models/model_best.pt'), map_location=lambda storage, loc: storage) status_dict['model_save_timestamp'] = '{0:%Y%m%d_%H%M%S}'.format(chkpt['model_timestamp'])", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "in expdir. self.experiments_list = [] for root, dirs, _ in os.walk(self.experiment_rootdir, topdown=True): for", "csv file and copy test statistics with open(os.path.join(experiment_test_path, 'testing_set_agg_statistics.csv'), mode='r') as f: #", "train_dict['training_seed_numpy'] = params['training']['seed_numpy'] # Copy the training statistics from the checkpoint and add", "file and get random seeds. with open(os.path.join(experiment_test_path, 'testing_configuration.yaml'), 'r') as yaml_file: test_params =", "agreed to in writing, software # distributed under the License is distributed on", "sets logger level, - Checks the presence of experiments folder, - Recursively traverses", "the file. \"\"\" with open(filename_) as f: return sum(1 for _ in f)", "there are some valid folders. if len(self.experiments_list) == 0: self.logger.error(\"There are no valid", "str :param filename_: Name of the file to be opened and analysed. :type", "self.experiments_list.append(os.path.join(root, name)) # Keep only the folders that contain training_configuration.yaml, training_statistics.csv and #", "the experiments directory as --expdir') exit(-1) # Get experiment directory. self.experiment_rootdir = self.flags.expdir", "- optional. if self.flags.user_confirm: try: input('Press <Enter> to confirm and start the grid", "for i, _ in enumerate(list_dicts): list_filled_dicts.append({**empty_dict, **(list_dicts[i])}) # Zip lists of dicts. final_dict", "{} directory!\".format(self.experiment_rootdir)) exit(-2) # List folders with \"valid\" experiment data. exp_str = \"Found", "with open(os.path.join(experiment_test_path, 'testing_configuration.yaml'), 'r') as yaml_file: test_params = yaml.load(yaml_file) # Get seeds. test_dict['test_seed_torch']", "'='*80 + '\\n' self.logger.info(exp_str) # Ask for confirmation - optional. if self.flags.user_confirm: try:", "the test statistics. Inherits from :py:class:`miprometheus.grid_workers.GridWorker`. \"\"\" def __init__(self, name=\"GridAnalyzer\"): \"\"\" Constructor for", "if self.flags.user_confirm: try: input('Press <Enter> to confirm and start the grid analyzis\\n') except", "def get_experiment_tests(self, experiment_path_): \"\"\" Returns a list of folders containing valid test experiments", "\\ (from test csv files found in subdirectories). :param experiment_path: Path to an", "validation / test statistics for a given experiment path. Analyzes whether the given", "Zip lists of dicts. final_dict = dict(zip(header, zip(*[d.values() for d in list_filled_dicts]))) #", ":type list_dicts: list :return: dict, resulting of the merge. \"\"\" # Create a", "training statistics from the checkpoint and add the 'train_' prefix. for key, value", "number of lines in the file is > 1. :param dir_: Path to", "Get experiment directory. self.experiment_rootdir = self.flags.expdir # Get all sub-directories paths in expdir.", "in list_filled_dicts]))) # Return the result. return final_dict def run_grid_experiment(self): \"\"\" Collects four", ":type filename_: str :return: True if the number of lines in the file", "the training / validation / test statistics for a given experiment path. Analyzes", "KeyboardInterrupt: exit(0) def run_experiment(self, experiment_path: str): \"\"\" Collects the training / validation /", "== '': print('Please pass the experiments directory as --expdir') exit(-1) # Get experiment", "to in writing, software # distributed under the License is distributed on an", "list :return: dict, resulting of the merge. \"\"\" # Create a \"unified\" header.", "implied. # See the License for the specific language governing permissions and #", "finished') self.logger.info('Results stored in {}.'.format(results_file)) except KeyboardInterrupt: self.logger.info('Grid analysis interrupted!') def main(): \"\"\"", "experiments_tests def setup_grid_experiment(self): \"\"\" Setups the overall experiment: - Parses arguments and sets", "to confirm and start the grid analyzis\\n') except KeyboardInterrupt: exit(0) def run_experiment(self, experiment_path:", "# Copy the training statistics from the checkpoint and add the 'train_' prefix.", "of a grid of experiments and gather them in a csv file. This", "csv file containing a data point for the aggregated statistics (`testing_set_agg_statistics.csv`) :param experiment_path_:", "if self.check_if_file_exists(elem, 'training_configuration.yaml') and self.check_if_file_exists(elem, 'models/model_best.pt')] # Check if there are some valid", "test experiment folders. \"\"\" experiments_tests = [] for root, dirs, _ in os.walk(experiment_path_,", "list_status_dicts, list_train_dicts, list_valid_dicts, list_test_dicts @staticmethod def merge_list_dicts(list_dicts): \"\"\" Merges a list of dictionaries", "Apache License, Version 2.0 (the \"License\"); # you may not use this file", "terminal conditions,...), \\ the validation statistics and the test statistics. Inherits from :py:class:`miprometheus.grid_workers.GridWorker`.", "contained in ``self.experiments_lists``. Merges all them together and saves result to a single", "__init__(self, name=\"GridAnalyzer\"): \"\"\" Constructor for the :py:class:`miprometheus.grid_workers.GridAnalyzer`: - Calls basic constructor of :py:class:`miprometheus.grid_workers.GridWorker`", "for _ in range(len(experiments_tests) - 1)]] list_train_dicts = [train_dict, *[train_dict_empty for _ in", "Return all dictionaries with lists return list_status_dicts, list_train_dicts, list_valid_dicts, list_test_dicts @staticmethod def merge_list_dicts(list_dicts):", "Entry point function for the :py:class:`miprometheus.grid_workers.GridAnalyzer`. \"\"\" grid_analyzer = GridAnalyzer() # parse args,", "# See the License for the specific language governing permissions and # limitations", "the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "list_trains.extend(trains) list_valids.extend(valids) list_tests.extend(tests) # Merge lists. statuses = self.merge_list_dicts(list_statuses) trains = self.merge_list_dicts(list_trains) valids", "# Create \"empty\" equivalent. train_dict_empty = dict.fromkeys(train_dict.keys(), ' ') # Copy the validation", "\"\"\" try: # Go through the experiments one by one and collect data.", "= dict.fromkeys(valid_dict.keys(), ' ') # Get all tests for a given training experiment.", "mode='r') as f: # Open file. test_reader = csv.DictReader(f) # Copy training statistics.", "list_dicts: list :return: dict, resulting of the merge. \"\"\" # Create a \"unified\"", "the file exists in the directory, else False \"\"\" return os.path.isfile(os.path.join(dir_, filename_)) def", "aggregated statistics (`testing_set_agg_statistics.csv`) :param experiment_path_: Path to experiment (training) folder. :type experiment_path_: str", "experiments_tests.append(os.path.join(root, name)) # Keep only the folders that contain a test configuration file", "problem name and random seeds. with open(os.path.join(experiment_path, 'training_configuration.yaml'), 'r') as yaml_file: params =", "contain at least one line with \\ collected statistics (excluding the header). -", "experiment folder contains subfolders with test experiments data: - Loads and parses training", "- This script post-processes the output of the ``GridTrainers`` and ``GridTesters``. \\ It", "') # Copy training status stats. train_dict['training_configuration_filepath'] = os.path.join(experiment_path, 'training_configuration.yaml') train_dict['training_start_timestamp'] = os.path.basename(os.path.normpath(experiment_path))", "in {}.'.format(results_file)) except KeyboardInterrupt: self.logger.info('Grid analysis interrupted!') def main(): \"\"\" Entry point function", "base constructor super(GridAnalyzer, self).__init__(name=name, use_gpu=False) @staticmethod def check_if_file_exists(dir_, filename_): \"\"\" Checks if ``filename_``", "(seeds, accuracies, terminal conditions,...), \\ the validation statistics and the test statistics. Inherits", "random seeds. with open(os.path.join(experiment_test_path, 'testing_configuration.yaml'), 'r') as yaml_file: test_params = yaml.load(yaml_file) # Get", ":param list_dicts: List of dictionaries, potentially containing different headers, which will be merged.", "the Apache License, Version 2.0 (the \"License\"); # you may not use this", "Path to experiment (training) folder. :type experiment_path_: str :return: A list of valid", "you may not use this file except in compliance with the License. #", "str \"\"\" # call base constructor super(GridAnalyzer, self).__init__(name=name, use_gpu=False) @staticmethod def check_if_file_exists(dir_, filename_):", "gather them in a csv file. This csv file will gather the training", "Create \"empty\" equivalent. valid_dict_empty = dict.fromkeys(valid_dict.keys(), ' ') # Get all tests for", "- (a) 'training_configuration.yaml' (training configuration file), - (b) 'models/model_best.pt' (checkpoint of the best", "experiment. experiments_tests = self.get_experiment_tests(experiment_path) list_test_dicts = [] if len(experiments_tests) > 0: self.logger.info(' -", "the :py:class:`miprometheus.grid_workers.GridAnalyzer`. Post-processes the test results of a grid of experiments and gather", "valid_dict = dict() # Load yaml file, to get model name, problem name", "\\ It gathers the test results into one `.csv` file. \"\"\" __author__ =", "valid tests') list_status_dicts = [status_dict] list_train_dicts = [train_dict] list_valid_dicts = [valid_dict] # Add", "gaps. list_filled_dicts = [] for i, _ in enumerate(list_dicts): list_filled_dicts.append({**empty_dict, **(list_dicts[i])}) # Zip", "# Licensed under the Apache License, Version 2.0 (the \"License\"); # you may", "[] list_valids = [] list_tests = [] for exp in self.experiments_list: statuses, trains,", "main(): \"\"\" Entry point function for the :py:class:`miprometheus.grid_workers.GridAnalyzer`. \"\"\" grid_analyzer = GridAnalyzer() #", "one line with \\ collected statistics (excluding the header). - Collects statistics from", "return os.path.isfile(os.path.join(dir_, filename_)) def check_file_content(self, dir_, filename_): \"\"\" Checks if the number of", "a single csv file. \"\"\" try: # Go through the experiments one by", "A csv file containing a data point for the aggregated statistics (`testing_set_agg_statistics.csv`) :param", "'models/model_best.pt')] # Check if there are some valid folders. if len(self.experiments_list) == 0:", "will be merged. :type list_dicts: list :return: dict, resulting of the merge. \"\"\"", "os.path.basename(os.path.normpath(experiment_path)) train_dict['training_seed_torch'] = params['training']['seed_torch'] train_dict['training_seed_numpy'] = params['training']['seed_numpy'] # Copy the training statistics from", "'testing_set_agg_statistics.csv'), mode='r') as f: # Open file. test_reader = csv.DictReader(f) # Copy training", "statistics file. experiments_tests = [elem for elem in experiments_tests if self.check_if_file_exists(elem, 'testing_configuration.yaml') and", "from datetime import datetime from miprometheus.grid_workers.grid_worker import GridWorker class GridAnalyzer(GridWorker): \"\"\" Implementation of", "for a given training experiment. experiments_tests = self.get_experiment_tests(experiment_path) list_test_dicts = [] if len(experiments_tests)", "problem and model names - from config. status_dict['problem'] = params['testing']['problem']['name'] status_dict['model'] = params['model']['name']", "subfolders with test experiments data: - Loads and parses training configuration file, -", "enumerate(list_dicts): list_filled_dicts.append({**empty_dict, **(list_dicts[i])}) # Zip lists of dicts. final_dict = dict(zip(header, zip(*[d.values() for", "folders containing valid test experiments data: - A configuration (`testing_configuration.yaml`), - A csv", "be opened and line-read. :type filename_: str :return: Number of lines in the", "dir_: Path to file. :type dir_: str :param filename_: Name of the file", "headers, which will be merged. :type list_dicts: list :return: dict, resulting of the", "in dirs: self.experiments_list.append(os.path.join(root, name)) # Keep only the folders that contain training_configuration.yaml, training_statistics.csv", "exit(-1) # Get experiment directory. self.experiment_rootdir = self.flags.expdir # Get all sub-directories paths", "a csv file. This csv file will gather the training statistics (seeds, accuracies,", "= value # Create \"empty\" equivalent. valid_dict_empty = dict.fromkeys(valid_dict.keys(), ' ') # Get", "lists return list_status_dicts, list_train_dicts, list_valid_dicts, list_test_dicts @staticmethod def merge_list_dicts(list_dicts): \"\"\" Merges a list", "This script post-processes the output of the ``GridTrainers`` and ``GridTesters``. \\ It gathers", "check_if_file_exists(dir_, filename_): \"\"\" Checks if ``filename_`` exists in ``dir_``. :param dir_: Path to", "directory: {} \\n\".format(self.experiment_rootdir) exp_str += '='*80 + '\\n' for exp in self.experiments_list: exp_str", "for key, value in chkpt['training_stats'].items(): train_dict['training_{}'.format(key)] = value # Create \"empty\" equivalent. train_dict_empty", "the number of lines in the file is strictly greater than one. \"\"\"", "1)]] list_train_dicts = [train_dict, *[train_dict_empty for _ in range(len(experiments_tests) - 1)]] list_valid_dicts =", "in the file. \"\"\" with open(filename_) as f: return sum(1 for _ in", "d in list_filled_dicts]))) # Return the result. return final_dict def run_grid_experiment(self): \"\"\" Collects", "the number of lines in the file is > 1. :param dir_: Path", "post-processes the output of the ``GridTrainers`` and ``GridTesters``. \\ It gathers the test", "``filename_``. :param filename_: Filepath to be opened and line-read. :type filename_: str :return:", "a training statistics. :type experiment_path: str :return: Four dictionaries containing: - Status info", "# Create dictionaries. status_dict = dict() train_dict = dict() valid_dict = dict() #", "{}.'.format(results_file)) except KeyboardInterrupt: self.logger.info('Grid analysis interrupted!') def main(): \"\"\" Entry point function for", "training_configuration.yaml, training_statistics.csv and # training.csv and model (which contains aggregated validation statistics). self.experiments_list", "self.experiments_list = [elem for elem in self.experiments_list if self.check_if_file_exists(elem, 'training_configuration.yaml') and self.check_if_file_exists(elem, 'models/model_best.pt')]", "resulting of the merge. \"\"\" # Create a \"unified\" header. header = set(k", "the test results into one `.csv` file. \"\"\" __author__ = \"<NAME> & <NAME>\"", "def merge_list_dicts(list_dicts): \"\"\" Merges a list of dictionaries by filling the missing fields", "the header). - Collects statistics from training, validation (from model checkpoint) and test", "dictionaries by filling the missing fields with spaces into one dict. :param list_dicts:", "name, problem name and random seeds. with open(os.path.join(experiment_path, 'training_configuration.yaml'), 'r') as yaml_file: params", "\"\"\" self.logger.info('Analyzing experiments from: {}'.format(experiment_path)) # Create dictionaries. status_dict = dict() train_dict =", "the training statistics (seeds, accuracies, terminal conditions,...), \\ the validation statistics and the", "for name in dirs: experiments_tests.append(os.path.join(root, name)) # Keep only the folders that contain", "\"empty\" equivalent. train_dict_empty = dict.fromkeys(train_dict.keys(), ' ') # Copy the validation statistics from", "= [status_dict] list_train_dicts = [train_dict] list_valid_dicts = [valid_dict] # Add \"empty test entry\"", "self.experiments_list: exp_str += \" - {}\\n\".format(exp) exp_str += '='*80 + '\\n' self.logger.info(exp_str) #", "os.path.basename(os.path.normpath(experiment_test_path))[5:] # Load yaml file and get random seeds. with open(os.path.join(experiment_test_path, 'testing_configuration.yaml'), 'r')", "traverses subdirectories looking for test experiments, .. note:: We require that the test", "= [train_dict, *[train_dict_empty for _ in range(len(experiments_tests) - 1)]] list_valid_dicts = [valid_dict, *[valid_dict_empty", "dict(zip(header, zip(*[d.values() for d in list_filled_dicts]))) # Return the result. return final_dict def", "filename_)) def check_file_content(self, dir_, filename_): \"\"\" Checks if the number of lines in", "\"\"\" Constructor for the :py:class:`miprometheus.grid_workers.GridAnalyzer`: - Calls basic constructor of :py:class:`miprometheus.grid_workers.GridWorker` :param name:", "list_valids = [] list_tests = [] for exp in self.experiments_list: statuses, trains, valids,", "number of lines in the file is strictly greater than one. \"\"\" return", "dictionary.. exp_values = {**statuses, **trains, **valids, **tests} # create results file results_file =", "# Get tests statistics. for experiment_test_path in experiments_tests: self.logger.info(' - Analyzing test from:", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "coding: utf-8 -*- # # Copyright (C) IBM Corporation 2018 # # Licensed", "- 1)]] list_train_dicts = [train_dict, *[train_dict_empty for _ in range(len(experiments_tests) - 1)]] list_valid_dicts", "self.logger.info('Grid analysis interrupted!') def main(): \"\"\" Entry point function for the :py:class:`miprometheus.grid_workers.GridAnalyzer`. \"\"\"", "list of dicts from each experiment path contained in ``self.experiments_lists``. Merges all them", "= [] for root, dirs, _ in os.walk(experiment_path_, topdown=True): for name in dirs:", "file exists in the directory, else False \"\"\" return os.path.isfile(os.path.join(dir_, filename_)) def check_file_content(self,", "csv file. \"\"\" try: # Go through the experiments one by one and", "dir_: str :param filename_: Name of the file to be opened and analysed.", ":param filename_: Name of the file to be opened and analysed. :type filename_:", "\"<NAME> & <NAME>\" import os import csv import yaml import torch import logging", "statistics for a given experiment path. Analyzes whether the given training experiment folder", "os.walk(experiment_path_, topdown=True): for name in dirs: experiments_tests.append(os.path.join(root, name)) # Keep only the folders", "test statistics for a given experiment path. Analyzes whether the given training experiment", "one big dictionary.. exp_values = {**statuses, **trains, **valids, **tests} # create results file", "See the License for the specific language governing permissions and # limitations under", "of the :py:class:`miprometheus.grid_workers.GridAnalyzer`. Post-processes the test results of a grid of experiments and", "of the file to be opened and analyzed. :type filename_: str :return: True", "= os.path.basename(os.path.normpath(experiment_path)) train_dict['training_seed_torch'] = params['training']['seed_torch'] train_dict['training_seed_numpy'] = params['training']['seed_numpy'] # Copy the training statistics", "entry\" list_test_dicts.append({}) # Return all dictionaries with lists return list_status_dicts, list_train_dicts, list_valid_dicts, list_test_dicts", "- A csv file containing a data point for the aggregated statistics (`testing_set_agg_statistics.csv`)", "list_statuses = [] list_trains = [] list_valids = [] list_tests = [] for", "a csv statistics file. experiments_tests = [elem for elem in experiments_tests if self.check_if_file_exists(elem,", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in", "' ') # Get all tests for a given training experiment. experiments_tests =", "stats. train_dict['training_configuration_filepath'] = os.path.join(experiment_path, 'training_configuration.yaml') train_dict['training_start_timestamp'] = os.path.basename(os.path.normpath(experiment_path)) train_dict['training_seed_torch'] = params['training']['seed_torch'] train_dict['training_seed_numpy'] =", "for key, value in row.items(): test_dict['test_{}'.format(key)] = value list_test_dicts.append(test_dict) else: self.logger.info(' - Could", "chkpt['training_stats'].items(): train_dict['training_{}'.format(key)] = value # Create \"empty\" equivalent. train_dict_empty = dict.fromkeys(train_dict.keys(), ' ')", "statistics. Inherits from :py:class:`miprometheus.grid_workers.GridWorker`. \"\"\" def __init__(self, name=\"GridAnalyzer\"): \"\"\" Constructor for the :py:class:`miprometheus.grid_workers.GridAnalyzer`:", "for the :py:class:`miprometheus.grid_workers.GridAnalyzer`. \"\"\" grid_analyzer = GridAnalyzer() # parse args, load configuration and", "function for the :py:class:`miprometheus.grid_workers.GridAnalyzer`. \"\"\" grid_analyzer = GridAnalyzer() # parse args, load configuration", "if len(self.experiments_list) == 0: self.logger.error(\"There are no valid experiment folders in {} directory!\".format(self.experiment_rootdir))", "Copy training statistics. for row in test_reader: for key, value in row.items(): test_dict['test_{}'.format(key)]", "directory. self.experiment_rootdir = self.flags.expdir # Get all sub-directories paths in expdir. self.experiments_list =", "\"\"\" Checks if the number of lines in the file is > 1.", "dict.fromkeys(valid_dict.keys(), ' ') # Get all tests for a given training experiment. experiments_tests", "empty_dict = {k: ' ' for k in header} # \"Fill\" all lists", "file. \"\"\" __author__ = \"<NAME> & <NAME>\" import os import csv import yaml", "train_dict_empty = dict.fromkeys(train_dict.keys(), ' ') # Copy the validation statistics from the checkpoint", "experiment folders, cherry-picking subfolders containing: - (a) 'training_configuration.yaml' (training configuration file), - (b)", "prefix. for key, value in chkpt['validation_stats'].items(): valid_dict['validation_{}'.format(key)] = value # Create \"empty\" equivalent.", "(training configuration file), - (b) 'models/model_best.pt' (checkpoint of the best saved model). \"\"\"", "# Load yaml file, to get model name, problem name and random seeds.", "+= \" - {}\\n\".format(exp) exp_str += '='*80 + '\\n' self.logger.info(exp_str) # Ask for", "arguments and sets logger level, - Checks the presence of experiments folder, -", "# Get problem and model names - from config. status_dict['problem'] = params['testing']['problem']['name'] status_dict['model']", "the folders that contain training_configuration.yaml, training_statistics.csv and # training.csv and model (which contains", "self.merge_list_dicts(list_tests) # Merge everything into one big dictionary.. exp_values = {**statuses, **trains, **valids,", "from training, validation (from model checkpoint) and test experiments \\ (from test csv", "'models/model_best.pt' (checkpoint of the best saved model). \"\"\" # Parse arguments. self.flags, self.unparsed", "in chkpt['validation_stats'].items(): valid_dict['validation_{}'.format(key)] = value # Create \"empty\" equivalent. valid_dict_empty = dict.fromkeys(valid_dict.keys(), '", "{} \\n\".format(self.experiment_rootdir) exp_str += '='*80 + '\\n' for exp in self.experiments_list: exp_str +=", "\"\"\" grid_analyzer = GridAnalyzer() # parse args, load configuration and create all required", "# You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "It gathers the test results into one `.csv` file. \"\"\" __author__ = \"<NAME>", "key, value in chkpt['validation_stats'].items(): valid_dict['validation_{}'.format(key)] = value # Create \"empty\" equivalent. valid_dict_empty =", "- Calls basic constructor of :py:class:`miprometheus.grid_workers.GridWorker` :param name: Name of the worker (DEFAULT:", "the settings. self.logger.setLevel(getattr(logging, self.flags.log_level.upper(), None)) # Check if experiments directory was indicated. if", "writer = csv.writer(outfile, delimiter=',') writer.writerow(exp_values.keys()) writer.writerows(zip(*exp_values.values())) self.logger.info('Analysis finished') self.logger.info('Results stored in {}.'.format(results_file)) except", "Four dictionaries containing: - Status info (model, problem etc.), - Training statistics, -", "list_dicts: List of dictionaries, potentially containing different headers, which will be merged. :type", "# limitations under the License. \"\"\" grid_analyzer.py: - This script post-processes the output", "test_dict['test_seed_numpy'] = test_params['testing']['seed_numpy'] # Load csv file and copy test statistics with open(os.path.join(experiment_test_path,", "than one. \"\"\" return self.get_lines_number(os.path.join(dir_, filename_)) > 1 @staticmethod def get_lines_number(filename_): \"\"\" Returns", "@staticmethod def get_lines_number(filename_): \"\"\" Returns the number of lines in ``filename_``. :param filename_:", "of folders containing valid test experiments data: - A configuration (`testing_configuration.yaml`), - A", "- Parses arguments and sets logger level, - Checks the presence of experiments", "- A configuration (`testing_configuration.yaml`), - A csv file containing a data point for", "Implementation of the :py:class:`miprometheus.grid_workers.GridAnalyzer`. Post-processes the test results of a grid of experiments", "call base constructor super(GridAnalyzer, self).__init__(name=name, use_gpu=False) @staticmethod def check_if_file_exists(dir_, filename_): \"\"\" Checks if", "statistics from training, validation (from model checkpoint) and test experiments \\ (from test", "_ in range(len(experiments_tests) - 1)]] list_train_dicts = [train_dict, *[train_dict_empty for _ in range(len(experiments_tests)", "and a csv statistics file. experiments_tests = [elem for elem in experiments_tests if", "folders, cherry-picking subfolders containing: - (a) 'training_configuration.yaml' (training configuration file), - (b) 'models/model_best.pt'", "(checkpoint of the best saved model). \"\"\" # Parse arguments. self.flags, self.unparsed =", "was indicated. if self.flags.expdir == '': print('Please pass the experiments directory as --expdir')", "of dicts from each experiment path contained in ``self.experiments_lists``. Merges all them together", "self.logger.info(' - Found {} test(s)'.format(len(experiments_tests))) # \"Expand\" status, train and valid dicts by", "= self.merge_list_dicts(list_trains) valids = self.merge_list_dicts(list_valids) tests = self.merge_list_dicts(list_tests) # Merge everything into one", "+ '\\n' self.logger.info(exp_str) # Ask for confirmation - optional. if self.flags.user_confirm: try: input('Press", "only the folders that contain training_configuration.yaml, training_statistics.csv and # training.csv and model (which", "in enumerate(list_dicts): list_filled_dicts.append({**empty_dict, **(list_dicts[i])}) # Zip lists of dicts. final_dict = dict(zip(header, zip(*[d.values()", "[elem for elem in self.experiments_list if self.check_if_file_exists(elem, 'training_configuration.yaml') and self.check_if_file_exists(elem, 'models/model_best.pt')] # Check", "experiment_path_: str :return: A list of valid test experiment folders. \"\"\" experiments_tests =", "logger depending on the settings. self.logger.setLevel(getattr(logging, self.flags.log_level.upper(), None)) # Check if experiments directory", "KIND, either express or implied. # See the License for the specific language", "file to be opened and analyzed. :type filename_: str :return: True if the", "for k in header} # \"Fill\" all lists with empty gaps. list_filled_dicts =", "gather the training statistics (seeds, accuracies, terminal conditions,...), \\ the validation statistics and", "- from config. status_dict['problem'] = params['testing']['problem']['name'] status_dict['model'] = params['model']['name'] # Load checkpoint from", "strictly greater than one. \"\"\" return self.get_lines_number(os.path.join(dir_, filename_)) > 1 @staticmethod def get_lines_number(filename_):", "def get_lines_number(filename_): \"\"\" Returns the number of lines in ``filename_``. :param filename_: Filepath", "(C) IBM Corporation 2018 # # Licensed under the Apache License, Version 2.0", "for root, dirs, _ in os.walk(self.experiment_rootdir, topdown=True): for name in dirs: self.experiments_list.append(os.path.join(root, name))", "name=\"GridAnalyzer\"): \"\"\" Constructor for the :py:class:`miprometheus.grid_workers.GridAnalyzer`: - Calls basic constructor of :py:class:`miprometheus.grid_workers.GridWorker` :param", "that the test statistics csv files are valid, i.e. contain at least one", "analysis interrupted!') def main(): \"\"\" Entry point function for the :py:class:`miprometheus.grid_workers.GridAnalyzer`. \"\"\" grid_analyzer", "checkpoint and add the 'train_' prefix. for key, value in chkpt['training_stats'].items(): train_dict['training_{}'.format(key)] =", "@staticmethod def merge_list_dicts(list_dicts): \"\"\" Merges a list of dictionaries by filling the missing", "ANY KIND, either express or implied. # See the License for the specific", "equivalent. status_dict_empty = dict.fromkeys(status_dict.keys(), ' ') # Copy training status stats. train_dict['training_configuration_filepath'] =", "<NAME>\" import os import csv import yaml import torch import logging from datetime", "if the number of lines in the file is > 1. :param dir_:", "for elem in self.experiments_list if self.check_if_file_exists(elem, 'training_configuration.yaml') and self.check_if_file_exists(elem, 'models/model_best.pt')] # Check if", "'train_' prefix. for key, value in chkpt['training_stats'].items(): train_dict['training_{}'.format(key)] = value # Create \"empty\"", "self.parser.parse_known_args() # Set logger depending on the settings. self.logger.setLevel(getattr(logging, self.flags.log_level.upper(), None)) # Check", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See", "params = yaml.load(yaml_file) # Get problem and model names - from config. status_dict['problem']", "# Get seeds. test_dict['test_seed_torch'] = test_params['testing']['seed_torch'] test_dict['test_seed_numpy'] = test_params['testing']['seed_numpy'] # Load csv file", "\\ the validation statistics and the test statistics. Inherits from :py:class:`miprometheus.grid_workers.GridWorker`. \"\"\" def", "filename_)) > 1 @staticmethod def get_lines_number(filename_): \"\"\" Returns the number of lines in", "= os.path.basename(os.path.normpath(experiment_test_path))[5:] # Load yaml file and get random seeds. with open(os.path.join(experiment_test_path, 'testing_configuration.yaml'),", "exp in self.experiments_list: exp_str += \" - {}\\n\".format(exp) exp_str += '='*80 + '\\n'", "Check if there are some valid folders. if len(self.experiments_list) == 0: self.logger.error(\"There are", "will gather the training statistics (seeds, accuracies, terminal conditions,...), \\ the validation statistics", "# Zip lists of dicts. final_dict = dict(zip(header, zip(*[d.values() for d in list_filled_dicts])))", "potentially containing different headers, which will be merged. :type list_dicts: list :return: dict,", "_ in f) def get_experiment_tests(self, experiment_path_): \"\"\" Returns a list of folders containing", "= test_params['testing']['seed_numpy'] # Load csv file and copy test statistics with open(os.path.join(experiment_test_path, 'testing_set_agg_statistics.csv'),", "given experiment path. Analyzes whether the given training experiment folder contains subfolders with", "Collects statistics from training, validation (from model checkpoint) and test experiments \\ (from", "opened and analysed. :type filename_: str :return: True if the file exists in", "statistics. :type experiment_path: str :return: Four dictionaries containing: - Status info (model, problem", "model and training and validation statistics, - Recursively traverses subdirectories looking for test", "and gather them in a csv file. This csv file will gather the", "test csv files found in subdirectories). :param experiment_path: Path to an experiment folder", "to experiment (training) folder. :type experiment_path_: str :return: A list of valid test", "lists of dicts. final_dict = dict(zip(header, zip(*[d.values() for d in list_filled_dicts]))) # Return", "experiment (training) folder. :type experiment_path_: str :return: A list of valid test experiment", "for the aggregated statistics (`testing_set_agg_statistics.csv`) :param experiment_path_: Path to experiment (training) folder. :type", "test folders. list_status_dicts = [status_dict, *[status_dict_empty for _ in range(len(experiments_tests) - 1)]] list_train_dicts", "# List folders with \"valid\" experiment data. exp_str = \"Found the following valid", "self.merge_list_dicts(list_statuses) trains = self.merge_list_dicts(list_trains) valids = self.merge_list_dicts(list_valids) tests = self.merge_list_dicts(list_tests) # Merge everything", "elem in experiments_tests if self.check_if_file_exists(elem, 'testing_configuration.yaml') and self.check_if_file_exists(elem, 'testing_set_agg_statistics.csv')] # Check if the", "self.experiments_list: statuses, trains, valids, tests = self.run_experiment(exp) list_statuses.extend(statuses) list_trains.extend(trains) list_valids.extend(valids) list_tests.extend(tests) # Merge", "lists with empty gaps. list_filled_dicts = [] for i, _ in enumerate(list_dicts): list_filled_dicts.append({**empty_dict,", "'training_configuration.yaml') train_dict['training_start_timestamp'] = os.path.basename(os.path.normpath(experiment_path)) train_dict['training_seed_torch'] = params['training']['seed_torch'] train_dict['training_seed_numpy'] = params['training']['seed_numpy'] # Copy the", "# Load yaml file and get random seeds. with open(os.path.join(experiment_test_path, 'testing_configuration.yaml'), 'r') as", "{**statuses, **trains, **valids, **tests} # create results file results_file = os.path.join(self.experiment_rootdir, \"{0:%Y%m%d_%H%M%S}_grid_analysis.csv\".format(datetime.now())) with", "value in row.items(): test_dict['test_{}'.format(key)] = value list_test_dicts.append(test_dict) else: self.logger.info(' - Could not find", "interrupted!') def main(): \"\"\" Entry point function for the :py:class:`miprometheus.grid_workers.GridAnalyzer`. \"\"\" grid_analyzer =", "# Load checkpoint from model file. chkpt = torch.load(os.path.join(experiment_path, 'models/model_best.pt'), map_location=lambda storage, loc:", "self.logger.info('Analysis finished') self.logger.info('Results stored in {}.'.format(results_file)) except KeyboardInterrupt: self.logger.info('Grid analysis interrupted!') def main():", "directory!\".format(self.experiment_rootdir)) exit(-2) # List folders with \"valid\" experiment data. exp_str = \"Found the", "list_valid_dicts, list_test_dicts @staticmethod def merge_list_dicts(list_dicts): \"\"\" Merges a list of dictionaries by filling", "only the folders that contain a test configuration file and a csv statistics", "in os.walk(experiment_path_, topdown=True): for name in dirs: experiments_tests.append(os.path.join(root, name)) # Keep only the", "dict.fromkeys(train_dict.keys(), ' ') # Copy the validation statistics from the checkpoint and add", "under the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "status_dict['model'] = params['model']['name'] # Load checkpoint from model file. chkpt = torch.load(os.path.join(experiment_path, 'models/model_best.pt'),", "be opened and analyzed. :type filename_: str :return: True if the number of", "csv.writer(outfile, delimiter=',') writer.writerow(exp_values.keys()) writer.writerows(zip(*exp_values.values())) self.logger.info('Analysis finished') self.logger.info('Results stored in {}.'.format(results_file)) except KeyboardInterrupt: self.logger.info('Grid", "[train_dict] list_valid_dicts = [valid_dict] # Add \"empty test entry\" list_test_dicts.append({}) # Return all", "tests for a given training experiment. experiments_tests = self.get_experiment_tests(experiment_path) list_test_dicts = [] if", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "an experiment folder containing a training statistics. :type experiment_path: str :return: Four dictionaries", "at least one line with \\ collected statistics (excluding the header). - Collects", "dict from the unified header. empty_dict = {k: ' ' for k in", "= params['training']['seed_numpy'] # Copy the training statistics from the checkpoint and add the", "writer.writerow(exp_values.keys()) writer.writerows(zip(*exp_values.values())) self.logger.info('Analysis finished') self.logger.info('Results stored in {}.'.format(results_file)) except KeyboardInterrupt: self.logger.info('Grid analysis interrupted!')", "applicable law or agreed to in writing, software # distributed under the License", "folders that contain training_configuration.yaml, training_statistics.csv and # training.csv and model (which contains aggregated", "# Keep only the folders that contain training_configuration.yaml, training_statistics.csv and # training.csv and", "training statistics (seeds, accuracies, terminal conditions,...), \\ the validation statistics and the test", "Returns the number of lines in ``filename_``. :param filename_: Filepath to be opened", "= dict() test_dict['test_configuration_filepath'] = os.path.join(experiment_test_path, 'testing_set_agg_statistics.yaml') test_dict['test_start_timestamp'] = os.path.basename(os.path.normpath(experiment_test_path))[5:] # Load yaml file", "and model names - from config. status_dict['problem'] = params['testing']['problem']['name'] status_dict['model'] = params['model']['name'] #", "= [valid_dict] # Add \"empty test entry\" list_test_dicts.append({}) # Return all dictionaries with", "CONDITIONS OF ANY KIND, either express or implied. # See the License for", "\"\"\" Setups the overall experiment: - Parses arguments and sets logger level, -", "and the test statistics. Inherits from :py:class:`miprometheus.grid_workers.GridWorker`. \"\"\" def __init__(self, name=\"GridAnalyzer\"): \"\"\" Constructor", "/ validation / test statistics for a given experiment path. Analyzes whether the", "= params['testing']['problem']['name'] status_dict['model'] = params['model']['name'] # Load checkpoint from model file. chkpt =", "writing, software # distributed under the License is distributed on an \"AS IS\"", "and test experiments \\ (from test csv files found in subdirectories). :param experiment_path:", "import torch import logging from datetime import datetime from miprometheus.grid_workers.grid_worker import GridWorker class", "output of the ``GridTrainers`` and ``GridTesters``. \\ It gathers the test results into", "(from model checkpoint) and test experiments \\ (from test csv files found in", "0: self.logger.info(' - Found {} test(s)'.format(len(experiments_tests))) # \"Expand\" status, train and valid dicts", "= [] list_tests = [] for exp in self.experiments_list: statuses, trains, valids, tests", "list_valid_dicts = [valid_dict] # Add \"empty test entry\" list_test_dicts.append({}) # Return all dictionaries", "# create results file results_file = os.path.join(self.experiment_rootdir, \"{0:%Y%m%d_%H%M%S}_grid_analysis.csv\".format(datetime.now())) with open(results_file, \"w\") as outfile:", "and random seeds. with open(os.path.join(experiment_path, 'training_configuration.yaml'), 'r') as yaml_file: params = yaml.load(yaml_file) #", "- 1)]] # Get tests statistics. for experiment_test_path in experiments_tests: self.logger.info(' - Analyzing", "compliance with the License. # You may obtain a copy of the License", "Parses arguments and sets logger level, - Checks the presence of experiments folder,", "Get problem and model names - from config. status_dict['problem'] = params['testing']['problem']['name'] status_dict['model'] =", "of the worker (DEFAULT: \"GridAnalyzer\"). :type name: str \"\"\" # call base constructor", "file contains at least one data point. experiments_tests = [elem for elem in", "return final_dict def run_grid_experiment(self): \"\"\" Collects four list of dicts from each experiment", "the number of test folders. list_status_dicts = [status_dict, *[status_dict_empty for _ in range(len(experiments_tests)", ":param dir_: Path to file. :type dir_: str :param filename_: Name of the", "(a) 'training_configuration.yaml' (training configuration file), - (b) 'models/model_best.pt' (checkpoint of the best saved", "and copy test statistics with open(os.path.join(experiment_test_path, 'testing_set_agg_statistics.csv'), mode='r') as f: # Open file.", ":return: Number of lines in the file. \"\"\" with open(filename_) as f: return", "csv.DictReader(f) # Copy training statistics. for row in test_reader: for key, value in", "list of folders containing valid test experiments data: - A configuration (`testing_configuration.yaml`), -", "dict, resulting of the merge. \"\"\" # Create a \"unified\" header. header =", "in range(len(experiments_tests) - 1)]] list_valid_dicts = [valid_dict, *[valid_dict_empty for _ in range(len(experiments_tests) -", "the checkpoint and add the 'train_' prefix. for key, value in chkpt['training_stats'].items(): train_dict['training_{}'.format(key)]", "list of dictionaries by filling the missing fields with spaces into one dict.", "= self.flags.expdir # Get all sub-directories paths in expdir. self.experiments_list = [] for", "training experiment. experiments_tests = self.get_experiment_tests(experiment_path) list_test_dicts = [] if len(experiments_tests) > 0: self.logger.info('", "train_dict['training_configuration_filepath'] = os.path.join(experiment_path, 'training_configuration.yaml') train_dict['training_start_timestamp'] = os.path.basename(os.path.normpath(experiment_path)) train_dict['training_seed_torch'] = params['training']['seed_torch'] train_dict['training_seed_numpy'] = params['training']['seed_numpy']", "\"\"\" # call base constructor super(GridAnalyzer, self).__init__(name=name, use_gpu=False) @staticmethod def check_if_file_exists(dir_, filename_): \"\"\"", "folders. \"\"\" experiments_tests = [] for root, dirs, _ in os.walk(experiment_path_, topdown=True): for", "folder. :type experiment_path_: str :return: A list of valid test experiment folders. \"\"\"", "row in test_reader: for key, value in row.items(): test_dict['test_{}'.format(key)] = value list_test_dicts.append(test_dict) else:", "unified header. empty_dict = {k: ' ' for k in header} # \"Fill\"", "= '{0:%Y%m%d_%H%M%S}'.format(chkpt['model_timestamp']) status_dict['training_terminal_status'] = chkpt['status'] status_dict['training_terminal_status_timestamp'] = '{0:%Y%m%d_%H%M%S}'.format(chkpt['status_timestamp']) # Create \"empty\" equivalent. status_dict_empty", "self.logger.info('Results stored in {}.'.format(results_file)) except KeyboardInterrupt: self.logger.info('Grid analysis interrupted!') def main(): \"\"\" Entry", "training status stats. train_dict['training_configuration_filepath'] = os.path.join(experiment_path, 'training_configuration.yaml') train_dict['training_start_timestamp'] = os.path.basename(os.path.normpath(experiment_path)) train_dict['training_seed_torch'] = params['training']['seed_torch']", "(b) 'models/model_best.pt' (checkpoint of the best saved model). \"\"\" # Parse arguments. self.flags,", "1)]] # Get tests statistics. for experiment_test_path in experiments_tests: self.logger.info(' - Analyzing test", "except KeyboardInterrupt: self.logger.info('Grid analysis interrupted!') def main(): \"\"\" Entry point function for the", "in chkpt['training_stats'].items(): train_dict['training_{}'.format(key)] = value # Create \"empty\" equivalent. train_dict_empty = dict.fromkeys(train_dict.keys(), '", "``GridTesters``. \\ It gathers the test results into one `.csv` file. \"\"\" __author__", "specific language governing permissions and # limitations under the License. \"\"\" grid_analyzer.py: -", "folders. list_status_dicts = [status_dict, *[status_dict_empty for _ in range(len(experiments_tests) - 1)]] list_train_dicts =", "yaml_file: test_params = yaml.load(yaml_file) # Get seeds. test_dict['test_seed_torch'] = test_params['testing']['seed_torch'] test_dict['test_seed_numpy'] = test_params['testing']['seed_numpy']", "name)) # Keep only the folders that contain training_configuration.yaml, training_statistics.csv and # training.csv", "(the \"License\"); # you may not use this file except in compliance with", "# Set logger depending on the settings. self.logger.setLevel(getattr(logging, self.flags.log_level.upper(), None)) # Check if", "in the file is strictly greater than one. \"\"\" return self.get_lines_number(os.path.join(dir_, filename_)) >", "(DEFAULT: \"GridAnalyzer\"). :type name: str \"\"\" # call base constructor super(GridAnalyzer, self).__init__(name=name, use_gpu=False)", "1. :param dir_: Path to file. :type dir_: str :param filename_: Name of", "# Unless required by applicable law or agreed to in writing, software #", "by applicable law or agreed to in writing, software # distributed under the", "exp_str = \"Found the following valid experiments in directory: {} \\n\".format(self.experiment_rootdir) exp_str +=", "== 0: self.logger.error(\"There are no valid experiment folders in {} directory!\".format(self.experiment_rootdir)) exit(-2) #", "found in subdirectories). :param experiment_path: Path to an experiment folder containing a training", "lines in the file is strictly greater than one. \"\"\" return self.get_lines_number(os.path.join(dir_, filename_))", "# Get all sub-directories paths in expdir. self.experiments_list = [] for root, dirs,", "training_statistics.csv and # training.csv and model (which contains aggregated validation statistics). self.experiments_list =", "subdirectories looking for test experiments, .. note:: We require that the test statistics", "create results file results_file = os.path.join(self.experiment_rootdir, \"{0:%Y%m%d_%H%M%S}_grid_analysis.csv\".format(datetime.now())) with open(results_file, \"w\") as outfile: writer", "- 1)]] list_valid_dicts = [valid_dict, *[valid_dict_empty for _ in range(len(experiments_tests) - 1)]] #", "seeds. test_dict['test_seed_torch'] = test_params['testing']['seed_torch'] test_dict['test_seed_numpy'] = test_params['testing']['seed_numpy'] # Load csv file and copy", "in subdirectories). :param experiment_path: Path to an experiment folder containing a training statistics.", "that contain a test configuration file and a csv statistics file. experiments_tests =", "experiment_path: Path to an experiment folder containing a training statistics. :type experiment_path: str", "file except in compliance with the License. # You may obtain a copy", "everything into one big dictionary.. exp_values = {**statuses, **trains, **valids, **tests} # create", "List of dictionaries, potentially containing different headers, which will be merged. :type list_dicts:", "of lines in the file. \"\"\" with open(filename_) as f: return sum(1 for", "grid_analyzer = GridAnalyzer() # parse args, load configuration and create all required objects.", "final_dict = dict(zip(header, zip(*[d.values() for d in list_filled_dicts]))) # Return the result. return", "for _ in range(len(experiments_tests) - 1)]] list_valid_dicts = [valid_dict, *[valid_dict_empty for _ in", "self.logger.error(\"There are no valid experiment folders in {} directory!\".format(self.experiment_rootdir)) exit(-2) # List folders", "filename_: Filepath to be opened and line-read. :type filename_: str :return: Number of", "\"Expand\" status, train and valid dicts by empty ones, prop. to the number", "self.experiments_list if self.check_if_file_exists(elem, 'training_configuration.yaml') and self.check_if_file_exists(elem, 'models/model_best.pt')] # Check if there are some", "params['training']['seed_numpy'] # Copy the training statistics from the checkpoint and add the 'train_'", "valid experiment folders in {} directory!\".format(self.experiment_rootdir)) exit(-2) # List folders with \"valid\" experiment", "into one dict. :param list_dicts: List of dictionaries, potentially containing different headers, which", "\"\"\" # Create a \"unified\" header. header = set(k for d in list_dicts", "etc.), - Training statistics, - Validation statistics, - Test statistics. \"\"\" self.logger.info('Analyzing experiments", "Inherits from :py:class:`miprometheus.grid_workers.GridWorker`. \"\"\" def __init__(self, name=\"GridAnalyzer\"): \"\"\" Constructor for the :py:class:`miprometheus.grid_workers.GridAnalyzer`: -", "the ``GridTrainers`` and ``GridTesters``. \\ It gathers the test results into one `.csv`", "load configuration and create all required objects. grid_analyzer.setup_grid_experiment() # GO! grid_analyzer.run_grid_experiment() if __name__", "OR CONDITIONS OF ANY KIND, either express or implied. # See the License", "may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "different headers, which will be merged. :type list_dicts: list :return: dict, resulting of", "``filename_`` exists in ``dir_``. :param dir_: Path to file. :type dir_: str :param", "list_filled_dicts = [] for i, _ in enumerate(list_dicts): list_filled_dicts.append({**empty_dict, **(list_dicts[i])}) # Zip lists", "the aggregated statistics (`testing_set_agg_statistics.csv`) :param experiment_path_: Path to experiment (training) folder. :type experiment_path_:", "test experiments \\ (from test csv files found in subdirectories). :param experiment_path: Path", "list_valids.extend(valids) list_tests.extend(tests) # Merge lists. statuses = self.merge_list_dicts(list_statuses) trains = self.merge_list_dicts(list_trains) valids =", "experiment folders. \"\"\" experiments_tests = [] for root, dirs, _ in os.walk(experiment_path_, topdown=True):", "from the checkpoint and add the 'valid_' prefix. for key, value in chkpt['validation_stats'].items():", "- Found {} test(s)'.format(len(experiments_tests))) # \"Expand\" status, train and valid dicts by empty", "cherry-picking subfolders containing: - (a) 'training_configuration.yaml' (training configuration file), - (b) 'models/model_best.pt' (checkpoint", "file and copy test statistics with open(os.path.join(experiment_test_path, 'testing_set_agg_statistics.csv'), mode='r') as f: # Open", "data: - Loads and parses training configuration file, - Loads checkpoint with model", "of experiments and gather them in a csv file. This csv file will", "# Return all dictionaries with lists return list_status_dicts, list_train_dicts, list_valid_dicts, list_test_dicts @staticmethod def", "on the settings. self.logger.setLevel(getattr(logging, self.flags.log_level.upper(), None)) # Check if experiments directory was indicated.", "'training_configuration.yaml' (training configuration file), - (b) 'models/model_best.pt' (checkpoint of the best saved model).", "yaml file, to get model name, problem name and random seeds. with open(os.path.join(experiment_path,", "train_dict['training_seed_torch'] = params['training']['seed_torch'] train_dict['training_seed_numpy'] = params['training']['seed_numpy'] # Copy the training statistics from the", "the License. \"\"\" grid_analyzer.py: - This script post-processes the output of the ``GridTrainers``", "filename_): \"\"\" Checks if ``filename_`` exists in ``dir_``. :param dir_: Path to file.", "str :param filename_: Name of the file to be opened and analyzed. :type", "statistics, - Validation statistics, - Test statistics. \"\"\" self.logger.info('Analyzing experiments from: {}'.format(experiment_path)) #", "containing valid test experiments data: - A configuration (`testing_configuration.yaml`), - A csv file", "\"unified\" header. header = set(k for d in list_dicts for k in d)", "for the specific language governing permissions and # limitations under the License. \"\"\"", "in list_dicts for k in d) # Create an \"empty\" dict from the", "``dir_``. :param dir_: Path to file. :type dir_: str :param filename_: Name of", "by one and collect data. list_statuses = [] list_trains = [] list_valids =", "row.items(): test_dict['test_{}'.format(key)] = value list_test_dicts.append(test_dict) else: self.logger.info(' - Could not find any valid", "a \"unified\" header. header = set(k for d in list_dicts for k in", "- {}\\n\".format(exp) exp_str += '='*80 + '\\n' self.logger.info(exp_str) # Ask for confirmation -", "as f: return sum(1 for _ in f) def get_experiment_tests(self, experiment_path_): \"\"\" Returns", "experiments directory as --expdir') exit(-1) # Get experiment directory. self.experiment_rootdir = self.flags.expdir #", "in self.experiments_list: statuses, trains, valids, tests = self.run_experiment(exp) list_statuses.extend(statuses) list_trains.extend(trains) list_valids.extend(valids) list_tests.extend(tests) #", "valid_dict['validation_{}'.format(key)] = value # Create \"empty\" equivalent. valid_dict_empty = dict.fromkeys(valid_dict.keys(), ' ') #", "list_test_dicts.append({}) # Return all dictionaries with lists return list_status_dicts, list_train_dicts, list_valid_dicts, list_test_dicts @staticmethod", "dict() train_dict = dict() valid_dict = dict() # Load yaml file, to get", "experiments from: {}'.format(experiment_path)) # Create dictionaries. status_dict = dict() train_dict = dict() valid_dict", "topdown=True): for name in dirs: experiments_tests.append(os.path.join(root, name)) # Keep only the folders that", "**valids, **tests} # create results file results_file = os.path.join(self.experiment_rootdir, \"{0:%Y%m%d_%H%M%S}_grid_analysis.csv\".format(datetime.now())) with open(results_file, \"w\")", "i.e. contain at least one line with \\ collected statistics (excluding the header).", "value list_test_dicts.append(test_dict) else: self.logger.info(' - Could not find any valid tests') list_status_dicts =", "test experiments, .. note:: We require that the test statistics csv files are", "constructor of :py:class:`miprometheus.grid_workers.GridWorker` :param name: Name of the worker (DEFAULT: \"GridAnalyzer\"). :type name:", "self.logger.info(' - Could not find any valid tests') list_status_dicts = [status_dict] list_train_dicts =", "given training experiment folder contains subfolders with test experiments data: - Loads and", "analysed. :type filename_: str :return: True if the file exists in the directory,", "= [] if len(experiments_tests) > 0: self.logger.info(' - Found {} test(s)'.format(len(experiments_tests))) # \"Expand\"", "1 @staticmethod def get_lines_number(filename_): \"\"\" Returns the number of lines in ``filename_``. :param", "equivalent. valid_dict_empty = dict.fromkeys(valid_dict.keys(), ' ') # Get all tests for a given", "= csv.DictReader(f) # Copy training statistics. for row in test_reader: for key, value", "str :return: A list of valid test experiment folders. \"\"\" experiments_tests = []", "= dict.fromkeys(train_dict.keys(), ' ') # Copy the validation statistics from the checkpoint and", "the file is strictly greater than one. \"\"\" return self.get_lines_number(os.path.join(dir_, filename_)) > 1", "Status info (model, problem etc.), - Training statistics, - Validation statistics, - Test", "Licensed under the Apache License, Version 2.0 (the \"License\"); # you may not", "results of a grid of experiments and gather them in a csv file.", "file, to get model name, problem name and random seeds. with open(os.path.join(experiment_path, 'training_configuration.yaml'),", "Corporation 2018 # # Licensed under the Apache License, Version 2.0 (the \"License\");", "\"\"\" __author__ = \"<NAME> & <NAME>\" import os import csv import yaml import", ":type filename_: str :return: True if the file exists in the directory, else", "'{0:%Y%m%d_%H%M%S}'.format(chkpt['model_timestamp']) status_dict['training_terminal_status'] = chkpt['status'] status_dict['training_terminal_status_timestamp'] = '{0:%Y%m%d_%H%M%S}'.format(chkpt['status_timestamp']) # Create \"empty\" equivalent. status_dict_empty =", "filename_): \"\"\" Checks if the number of lines in the file is >", "files found in subdirectories). :param experiment_path: Path to an experiment folder containing a", "Create a \"unified\" header. header = set(k for d in list_dicts for k", "from the checkpoint and add the 'train_' prefix. for key, value in chkpt['training_stats'].items():", "experiments folder, - Recursively traverses the experiment folders, cherry-picking subfolders containing: - (a)", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "Name of the worker (DEFAULT: \"GridAnalyzer\"). :type name: str \"\"\" # call base", "the 'train_' prefix. for key, value in chkpt['training_stats'].items(): train_dict['training_{}'.format(key)] = value # Create", "from each experiment path contained in ``self.experiments_lists``. Merges all them together and saves", "file to be opened and analysed. :type filename_: str :return: True if the", "# Keep only the folders that contain a test configuration file and a", "Collects the training / validation / test statistics for a given experiment path.", "self).__init__(name=name, use_gpu=False) @staticmethod def check_if_file_exists(dir_, filename_): \"\"\" Checks if ``filename_`` exists in ``dir_``.", "configuration and create all required objects. grid_analyzer.setup_grid_experiment() # GO! grid_analyzer.run_grid_experiment() if __name__ ==", "valid_dict_empty = dict.fromkeys(valid_dict.keys(), ' ') # Get all tests for a given training", "status, train and valid dicts by empty ones, prop. to the number of", "containing: - (a) 'training_configuration.yaml' (training configuration file), - (b) 'models/model_best.pt' (checkpoint of the", "create all required objects. grid_analyzer.setup_grid_experiment() # GO! grid_analyzer.run_grid_experiment() if __name__ == '__main__': main()", "final_dict def run_grid_experiment(self): \"\"\" Collects four list of dicts from each experiment path", "trains, valids, tests = self.run_experiment(exp) list_statuses.extend(statuses) list_trains.extend(trains) list_valids.extend(valids) list_tests.extend(tests) # Merge lists. statuses", "of :py:class:`miprometheus.grid_workers.GridWorker` :param name: Name of the worker (DEFAULT: \"GridAnalyzer\"). :type name: str", "file), - (b) 'models/model_best.pt' (checkpoint of the best saved model). \"\"\" # Parse", "info (model, problem etc.), - Training statistics, - Validation statistics, - Test statistics.", "- (b) 'models/model_best.pt' (checkpoint of the best saved model). \"\"\" # Parse arguments.", "This csv file will gather the training statistics (seeds, accuracies, terminal conditions,...), \\", "params['training']['seed_torch'] train_dict['training_seed_numpy'] = params['training']['seed_numpy'] # Copy the training statistics from the checkpoint and", "add the 'valid_' prefix. for key, value in chkpt['validation_stats'].items(): valid_dict['validation_{}'.format(key)] = value #", "= self.merge_list_dicts(list_tests) # Merge everything into one big dictionary.. exp_values = {**statuses, **trains,", "the best saved model). \"\"\" # Parse arguments. self.flags, self.unparsed = self.parser.parse_known_args() #", "the License for the specific language governing permissions and # limitations under the", ":py:class:`miprometheus.grid_workers.GridAnalyzer`. \"\"\" grid_analyzer = GridAnalyzer() # parse args, load configuration and create all", "and # limitations under the License. \"\"\" grid_analyzer.py: - This script post-processes the", "csv files are valid, i.e. contain at least one line with \\ collected", "test configuration file and a csv statistics file. experiments_tests = [elem for elem", "- Training statistics, - Validation statistics, - Test statistics. \"\"\" self.logger.info('Analyzing experiments from:", "print('Please pass the experiments directory as --expdir') exit(-1) # Get experiment directory. self.experiment_rootdir", "test statistics with open(os.path.join(experiment_test_path, 'testing_set_agg_statistics.csv'), mode='r') as f: # Open file. test_reader =", "= {k: ' ' for k in header} # \"Fill\" all lists with", "data. list_statuses = [] list_trains = [] list_valids = [] list_tests = []", "Check if the csv file contains at least one data point. experiments_tests =", "' ' for k in header} # \"Fill\" all lists with empty gaps.", "paths in expdir. self.experiments_list = [] for root, dirs, _ in os.walk(self.experiment_rootdir, topdown=True):", "and validation statistics, - Recursively traverses subdirectories looking for test experiments, .. note::", "os.path.join(experiment_test_path, 'testing_set_agg_statistics.yaml') test_dict['test_start_timestamp'] = os.path.basename(os.path.normpath(experiment_test_path))[5:] # Load yaml file and get random seeds.", "# Get experiment directory. self.experiment_rootdir = self.flags.expdir # Get all sub-directories paths in", "def check_file_content(self, dir_, filename_): \"\"\" Checks if the number of lines in the", "'testing_set_agg_statistics.csv')] return experiments_tests def setup_grid_experiment(self): \"\"\" Setups the overall experiment: - Parses arguments", "presence of experiments folder, - Recursively traverses the experiment folders, cherry-picking subfolders containing:", "valid test experiment folders. \"\"\" experiments_tests = [] for root, dirs, _ in", "# \"Expand\" status, train and valid dicts by empty ones, prop. to the", "list_test_dicts.append(test_dict) else: self.logger.info(' - Could not find any valid tests') list_status_dicts = [status_dict]", "analyzed. :type filename_: str :return: True if the number of lines in the", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #", "worker (DEFAULT: \"GridAnalyzer\"). :type name: str \"\"\" # call base constructor super(GridAnalyzer, self).__init__(name=name,", "be merged. :type list_dicts: list :return: dict, resulting of the merge. \"\"\" #", "test_reader = csv.DictReader(f) # Copy training statistics. for row in test_reader: for key,", "# Create a \"unified\" header. header = set(k for d in list_dicts for", "logging from datetime import datetime from miprometheus.grid_workers.grid_worker import GridWorker class GridAnalyzer(GridWorker): \"\"\" Implementation", "of the merge. \"\"\" # Create a \"unified\" header. header = set(k for", "as outfile: writer = csv.writer(outfile, delimiter=',') writer.writerow(exp_values.keys()) writer.writerows(zip(*exp_values.values())) self.logger.info('Analysis finished') self.logger.info('Results stored in", "saves result to a single csv file. \"\"\" try: # Go through the", "outfile: writer = csv.writer(outfile, delimiter=',') writer.writerow(exp_values.keys()) writer.writerows(zip(*exp_values.values())) self.logger.info('Analysis finished') self.logger.info('Results stored in {}.'.format(results_file))", "test results into one `.csv` file. \"\"\" __author__ = \"<NAME> & <NAME>\" import", "self.experiments_list = [] for root, dirs, _ in os.walk(self.experiment_rootdir, topdown=True): for name in", "and start the grid analyzis\\n') except KeyboardInterrupt: exit(0) def run_experiment(self, experiment_path: str): \"\"\"", "A configuration (`testing_configuration.yaml`), - A csv file containing a data point for the", "the missing fields with spaces into one dict. :param list_dicts: List of dictionaries,", "to a single csv file. \"\"\" try: # Go through the experiments one", "if the file exists in the directory, else False \"\"\" return os.path.isfile(os.path.join(dir_, filename_))", "single csv file. \"\"\" try: # Go through the experiments one by one", "filename_: str :return: True if the number of lines in the file is", "get model name, problem name and random seeds. with open(os.path.join(experiment_path, 'training_configuration.yaml'), 'r') as", "list_dicts for k in d) # Create an \"empty\" dict from the unified", "def setup_grid_experiment(self): \"\"\" Setups the overall experiment: - Parses arguments and sets logger", "dirs: self.experiments_list.append(os.path.join(root, name)) # Keep only the folders that contain training_configuration.yaml, training_statistics.csv and", ":return: True if the number of lines in the file is strictly greater", "grid_analyzer.py: - This script post-processes the output of the ``GridTrainers`` and ``GridTesters``. \\", "'r') as yaml_file: params = yaml.load(yaml_file) # Get problem and model names -", "status_dict['model_save_timestamp'] = '{0:%Y%m%d_%H%M%S}'.format(chkpt['model_timestamp']) status_dict['training_terminal_status'] = chkpt['status'] status_dict['training_terminal_status_timestamp'] = '{0:%Y%m%d_%H%M%S}'.format(chkpt['status_timestamp']) # Create \"empty\" equivalent.", "- Recursively traverses the experiment folders, cherry-picking subfolders containing: - (a) 'training_configuration.yaml' (training", "statistics (excluding the header). - Collects statistics from training, validation (from model checkpoint)", "and model (which contains aggregated validation statistics). self.experiments_list = [elem for elem in", "Version 2.0 (the \"License\"); # you may not use this file except in", "Merge everything into one big dictionary.. exp_values = {**statuses, **trains, **valids, **tests} #", "False \"\"\" return os.path.isfile(os.path.join(dir_, filename_)) def check_file_content(self, dir_, filename_): \"\"\" Checks if the", "test_dict['test_configuration_filepath'] = os.path.join(experiment_test_path, 'testing_set_agg_statistics.yaml') test_dict['test_start_timestamp'] = os.path.basename(os.path.normpath(experiment_test_path))[5:] # Load yaml file and get", "a list of dictionaries by filling the missing fields with spaces into one", "of the ``GridTrainers`` and ``GridTesters``. \\ It gathers the test results into one", "'testing_configuration.yaml'), 'r') as yaml_file: test_params = yaml.load(yaml_file) # Get seeds. test_dict['test_seed_torch'] = test_params['testing']['seed_torch']", "from the unified header. empty_dict = {k: ' ' for k in header}", "experiments data: - A configuration (`testing_configuration.yaml`), - A csv file containing a data", "for _ in range(len(experiments_tests) - 1)]] # Get tests statistics. for experiment_test_path in", "folder, - Recursively traverses the experiment folders, cherry-picking subfolders containing: - (a) 'training_configuration.yaml'", "= dict() train_dict = dict() valid_dict = dict() # Load yaml file, to", "valid experiments in directory: {} \\n\".format(self.experiment_rootdir) exp_str += '='*80 + '\\n' for exp", "Could not find any valid tests') list_status_dicts = [status_dict] list_train_dicts = [train_dict] list_valid_dicts", "= os.path.join(experiment_path, 'training_configuration.yaml') train_dict['training_start_timestamp'] = os.path.basename(os.path.normpath(experiment_path)) train_dict['training_seed_torch'] = params['training']['seed_torch'] train_dict['training_seed_numpy'] = params['training']['seed_numpy'] #", "at least one data point. experiments_tests = [elem for elem in experiments_tests if", "Returns a list of folders containing valid test experiments data: - A configuration", "# Check if the csv file contains at least one data point. experiments_tests", ":param experiment_path: Path to an experiment folder containing a training statistics. :type experiment_path:", "Checks if the number of lines in the file is > 1. :param", "valid, i.e. contain at least one line with \\ collected statistics (excluding the", "containing a training statistics. :type experiment_path: str :return: Four dictionaries containing: - Status", "traverses the experiment folders, cherry-picking subfolders containing: - (a) 'training_configuration.yaml' (training configuration file),", "{}'.format(experiment_path)) # Create dictionaries. status_dict = dict() train_dict = dict() valid_dict = dict()", "# Get all tests for a given training experiment. experiments_tests = self.get_experiment_tests(experiment_path) list_test_dicts", "and saves result to a single csv file. \"\"\" try: # Go through", "import datetime from miprometheus.grid_workers.grid_worker import GridWorker class GridAnalyzer(GridWorker): \"\"\" Implementation of the :py:class:`miprometheus.grid_workers.GridAnalyzer`.", "training experiment folder contains subfolders with test experiments data: - Loads and parses", "for exp in self.experiments_list: statuses, trains, valids, tests = self.run_experiment(exp) list_statuses.extend(statuses) list_trains.extend(trains) list_valids.extend(valids)", "Name of the file to be opened and analysed. :type filename_: str :return:", "f: # Open file. test_reader = csv.DictReader(f) # Copy training statistics. for row", "train_dict['training_start_timestamp'] = os.path.basename(os.path.normpath(experiment_path)) train_dict['training_seed_torch'] = params['training']['seed_torch'] train_dict['training_seed_numpy'] = params['training']['seed_numpy'] # Copy the training", "filename_: Name of the file to be opened and analysed. :type filename_: str", ":py:class:`miprometheus.grid_workers.GridWorker` :param name: Name of the worker (DEFAULT: \"GridAnalyzer\"). :type name: str \"\"\"", "valid test experiments data: - A configuration (`testing_configuration.yaml`), - A csv file containing", "(`testing_set_agg_statistics.csv`) :param experiment_path_: Path to experiment (training) folder. :type experiment_path_: str :return: A", "statistics). self.experiments_list = [elem for elem in self.experiments_list if self.check_if_file_exists(elem, 'training_configuration.yaml') and self.check_if_file_exists(elem,", "into one `.csv` file. \"\"\" __author__ = \"<NAME> & <NAME>\" import os import", "True if the file exists in the directory, else False \"\"\" return os.path.isfile(os.path.join(dir_,", "storage, loc: storage) status_dict['model_save_timestamp'] = '{0:%Y%m%d_%H%M%S}'.format(chkpt['model_timestamp']) status_dict['training_terminal_status'] = chkpt['status'] status_dict['training_terminal_status_timestamp'] = '{0:%Y%m%d_%H%M%S}'.format(chkpt['status_timestamp']) #", "(excluding the header). - Collects statistics from training, validation (from model checkpoint) and", "Number of lines in the file. \"\"\" with open(filename_) as f: return sum(1", "if self.flags.expdir == '': print('Please pass the experiments directory as --expdir') exit(-1) #", "the checkpoint and add the 'valid_' prefix. for key, value in chkpt['validation_stats'].items(): valid_dict['validation_{}'.format(key)]", "contains subfolders with test experiments data: - Loads and parses training configuration file,", "to be opened and analysed. :type filename_: str :return: True if the file", "one. \"\"\" return self.get_lines_number(os.path.join(dir_, filename_)) > 1 @staticmethod def get_lines_number(filename_): \"\"\" Returns the", "name and random seeds. with open(os.path.join(experiment_path, 'training_configuration.yaml'), 'r') as yaml_file: params = yaml.load(yaml_file)", "**(list_dicts[i])}) # Zip lists of dicts. final_dict = dict(zip(header, zip(*[d.values() for d in", "train_dict['training_{}'.format(key)] = value # Create \"empty\" equivalent. train_dict_empty = dict.fromkeys(train_dict.keys(), ' ') #", "from: {}'.format(experiment_test_path)) # Create test dict: test_dict = dict() test_dict['test_configuration_filepath'] = os.path.join(experiment_test_path, 'testing_set_agg_statistics.yaml')", "open(filename_) as f: return sum(1 for _ in f) def get_experiment_tests(self, experiment_path_): \"\"\"", "[] for i, _ in enumerate(list_dicts): list_filled_dicts.append({**empty_dict, **(list_dicts[i])}) # Zip lists of dicts.", "statistics. for row in test_reader: for key, value in row.items(): test_dict['test_{}'.format(key)] = value", "elem in experiments_tests if self.check_file_content(elem, 'testing_set_agg_statistics.csv')] return experiments_tests def setup_grid_experiment(self): \"\"\" Setups the", "exp_str += \" - {}\\n\".format(exp) exp_str += '='*80 + '\\n' self.logger.info(exp_str) # Ask", "validation statistics). self.experiments_list = [elem for elem in self.experiments_list if self.check_if_file_exists(elem, 'training_configuration.yaml') and", "a list of folders containing valid test experiments data: - A configuration (`testing_configuration.yaml`),", "= \"Found the following valid experiments in directory: {} \\n\".format(self.experiment_rootdir) exp_str += '='*80", "return experiments_tests def setup_grid_experiment(self): \"\"\" Setups the overall experiment: - Parses arguments and", "the experiments one by one and collect data. list_statuses = [] list_trains =", "configuration file, - Loads checkpoint with model and training and validation statistics, -", "a grid of experiments and gather them in a csv file. This csv", "Check if experiments directory was indicated. if self.flags.expdir == '': print('Please pass the", "self.flags.log_level.upper(), None)) # Check if experiments directory was indicated. if self.flags.expdir == '':", "= self.merge_list_dicts(list_valids) tests = self.merge_list_dicts(list_tests) # Merge everything into one big dictionary.. exp_values", "(which contains aggregated validation statistics). self.experiments_list = [elem for elem in self.experiments_list if", "and self.check_if_file_exists(elem, 'testing_set_agg_statistics.csv')] # Check if the csv file contains at least one", "experiment path contained in ``self.experiments_lists``. Merges all them together and saves result to", "and get random seeds. with open(os.path.join(experiment_test_path, 'testing_configuration.yaml'), 'r') as yaml_file: test_params = yaml.load(yaml_file)", "# Ask for confirmation - optional. if self.flags.user_confirm: try: input('Press <Enter> to confirm", "dicts from each experiment path contained in ``self.experiments_lists``. Merges all them together and", "for the :py:class:`miprometheus.grid_workers.GridAnalyzer`: - Calls basic constructor of :py:class:`miprometheus.grid_workers.GridWorker` :param name: Name of", "OF ANY KIND, either express or implied. # See the License for the", "copy test statistics with open(os.path.join(experiment_test_path, 'testing_set_agg_statistics.csv'), mode='r') as f: # Open file. test_reader", "subdirectories). :param experiment_path: Path to an experiment folder containing a training statistics. :type", "seeds. with open(os.path.join(experiment_test_path, 'testing_configuration.yaml'), 'r') as yaml_file: test_params = yaml.load(yaml_file) # Get seeds.", "data. exp_str = \"Found the following valid experiments in directory: {} \\n\".format(self.experiment_rootdir) exp_str", "the file is > 1. :param dir_: Path to file. :type dir_: str", "experiments_tests if self.check_if_file_exists(elem, 'testing_configuration.yaml') and self.check_if_file_exists(elem, 'testing_set_agg_statistics.csv')] # Check if the csv file", "# Merge everything into one big dictionary.. exp_values = {**statuses, **trains, **valids, **tests}", "status_dict['training_terminal_status'] = chkpt['status'] status_dict['training_terminal_status_timestamp'] = '{0:%Y%m%d_%H%M%S}'.format(chkpt['status_timestamp']) # Create \"empty\" equivalent. status_dict_empty = dict.fromkeys(status_dict.keys(),", "and sets logger level, - Checks the presence of experiments folder, - Recursively", "file. \"\"\" with open(filename_) as f: return sum(1 for _ in f) def", "with model and training and validation statistics, - Recursively traverses subdirectories looking for", "self.run_experiment(exp) list_statuses.extend(statuses) list_trains.extend(trains) list_valids.extend(valids) list_tests.extend(tests) # Merge lists. statuses = self.merge_list_dicts(list_statuses) trains =", "are valid, i.e. contain at least one line with \\ collected statistics (excluding", "experiment: - Parses arguments and sets logger level, - Checks the presence of", "lines in the file is > 1. :param dir_: Path to file. :type", "basic constructor of :py:class:`miprometheus.grid_workers.GridWorker` :param name: Name of the worker (DEFAULT: \"GridAnalyzer\"). :type", "f) def get_experiment_tests(self, experiment_path_): \"\"\" Returns a list of folders containing valid test", "folder contains subfolders with test experiments data: - Loads and parses training configuration", "statistics with open(os.path.join(experiment_test_path, 'testing_set_agg_statistics.csv'), mode='r') as f: # Open file. test_reader = csv.DictReader(f)", "data point for the aggregated statistics (`testing_set_agg_statistics.csv`) :param experiment_path_: Path to experiment (training)", "dictionaries, potentially containing different headers, which will be merged. :type list_dicts: list :return:", "list_filled_dicts]))) # Return the result. return final_dict def run_grid_experiment(self): \"\"\" Collects four list", "folder containing a training statistics. :type experiment_path: str :return: Four dictionaries containing: -", "and analyzed. :type filename_: str :return: True if the number of lines in", "# Copy training statistics. for row in test_reader: for key, value in row.items():", "filling the missing fields with spaces into one dict. :param list_dicts: List of", "self.flags.expdir # Get all sub-directories paths in expdir. self.experiments_list = [] for root,", "checkpoint and add the 'valid_' prefix. for key, value in chkpt['validation_stats'].items(): valid_dict['validation_{}'.format(key)] =", "as --expdir') exit(-1) # Get experiment directory. self.experiment_rootdir = self.flags.expdir # Get all", "- Test statistics. \"\"\" self.logger.info('Analyzing experiments from: {}'.format(experiment_path)) # Create dictionaries. status_dict =", "dict. :param list_dicts: List of dictionaries, potentially containing different headers, which will be", "merged. :type list_dicts: list :return: dict, resulting of the merge. \"\"\" # Create", "training statistics. :type experiment_path: str :return: Four dictionaries containing: - Status info (model,", "if self.check_file_content(elem, 'testing_set_agg_statistics.csv')] return experiments_tests def setup_grid_experiment(self): \"\"\" Setups the overall experiment: -", "experiments_tests: self.logger.info(' - Analyzing test from: {}'.format(experiment_test_path)) # Create test dict: test_dict =", "parse args, load configuration and create all required objects. grid_analyzer.setup_grid_experiment() # GO! grid_analyzer.run_grid_experiment()", "permissions and # limitations under the License. \"\"\" grid_analyzer.py: - This script post-processes", "[elem for elem in experiments_tests if self.check_if_file_exists(elem, 'testing_configuration.yaml') and self.check_if_file_exists(elem, 'testing_set_agg_statistics.csv')] # Check", "yaml_file: params = yaml.load(yaml_file) # Get problem and model names - from config.", "lists. statuses = self.merge_list_dicts(list_statuses) trains = self.merge_list_dicts(list_trains) valids = self.merge_list_dicts(list_valids) tests = self.merge_list_dicts(list_tests)", "expdir. self.experiments_list = [] for root, dirs, _ in os.walk(self.experiment_rootdir, topdown=True): for name", "= [] list_valids = [] list_tests = [] for exp in self.experiments_list: statuses,", "'\\n' for exp in self.experiments_list: exp_str += \" - {}\\n\".format(exp) exp_str += '='*80", "def run_grid_experiment(self): \"\"\" Collects four list of dicts from each experiment path contained", "experiment_test_path in experiments_tests: self.logger.info(' - Analyzing test from: {}'.format(experiment_test_path)) # Create test dict:", "# -*- coding: utf-8 -*- # # Copyright (C) IBM Corporation 2018 #", "= [] list_trains = [] list_valids = [] list_tests = [] for exp", "= {**statuses, **trains, **valids, **tests} # create results file results_file = os.path.join(self.experiment_rootdir, \"{0:%Y%m%d_%H%M%S}_grid_analysis.csv\".format(datetime.now()))", "result to a single csv file. \"\"\" try: # Go through the experiments", "of valid test experiment folders. \"\"\" experiments_tests = [] for root, dirs, _", "test_dict['test_seed_torch'] = test_params['testing']['seed_torch'] test_dict['test_seed_numpy'] = test_params['testing']['seed_numpy'] # Load csv file and copy test", "for elem in experiments_tests if self.check_if_file_exists(elem, 'testing_configuration.yaml') and self.check_if_file_exists(elem, 'testing_set_agg_statistics.csv')] # Check if", "all lists with empty gaps. list_filled_dicts = [] for i, _ in enumerate(list_dicts):", "or agreed to in writing, software # distributed under the License is distributed", "os.path.join(self.experiment_rootdir, \"{0:%Y%m%d_%H%M%S}_grid_analysis.csv\".format(datetime.now())) with open(results_file, \"w\") as outfile: writer = csv.writer(outfile, delimiter=',') writer.writerow(exp_values.keys()) writer.writerows(zip(*exp_values.values()))", "run_experiment(self, experiment_path: str): \"\"\" Collects the training / validation / test statistics for", "csv file. This csv file will gather the training statistics (seeds, accuracies, terminal", "'training_configuration.yaml') and self.check_if_file_exists(elem, 'models/model_best.pt')] # Check if there are some valid folders. if", "Create dictionaries. status_dict = dict() train_dict = dict() valid_dict = dict() # Load", "header = set(k for d in list_dicts for k in d) # Create", "under the Apache License, Version 2.0 (the \"License\"); # you may not use", "containing: - Status info (model, problem etc.), - Training statistics, - Validation statistics,", "name in dirs: self.experiments_list.append(os.path.join(root, name)) # Keep only the folders that contain training_configuration.yaml,", "License. # You may obtain a copy of the License at # #", "exp_str += '='*80 + '\\n' for exp in self.experiments_list: exp_str += \" -", "from model file. chkpt = torch.load(os.path.join(experiment_path, 'models/model_best.pt'), map_location=lambda storage, loc: storage) status_dict['model_save_timestamp'] =", "model name, problem name and random seeds. with open(os.path.join(experiment_path, 'training_configuration.yaml'), 'r') as yaml_file:", "directory as --expdir') exit(-1) # Get experiment directory. self.experiment_rootdir = self.flags.expdir # Get", "len(experiments_tests) > 0: self.logger.info(' - Found {} test(s)'.format(len(experiments_tests))) # \"Expand\" status, train and", "dict() valid_dict = dict() # Load yaml file, to get model name, problem", "of the best saved model). \"\"\" # Parse arguments. self.flags, self.unparsed = self.parser.parse_known_args()", "_ in enumerate(list_dicts): list_filled_dicts.append({**empty_dict, **(list_dicts[i])}) # Zip lists of dicts. final_dict = dict(zip(header,", "experiments directory was indicated. if self.flags.expdir == '': print('Please pass the experiments directory", "dir_, filename_): \"\"\" Checks if the number of lines in the file is", "model). \"\"\" # Parse arguments. self.flags, self.unparsed = self.parser.parse_known_args() # Set logger depending", "contain training_configuration.yaml, training_statistics.csv and # training.csv and model (which contains aggregated validation statistics).", "yaml.load(yaml_file) # Get seeds. test_dict['test_seed_torch'] = test_params['testing']['seed_torch'] test_dict['test_seed_numpy'] = test_params['testing']['seed_numpy'] # Load csv", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "True if the number of lines in the file is strictly greater than", "\"\"\" # Parse arguments. self.flags, self.unparsed = self.parser.parse_known_args() # Set logger depending on", "dicts by empty ones, prop. to the number of test folders. list_status_dicts =", "' for k in header} # \"Fill\" all lists with empty gaps. list_filled_dicts", "experiments and gather them in a csv file. This csv file will gather", "header. empty_dict = {k: ' ' for k in header} # \"Fill\" all", "License, Version 2.0 (the \"License\"); # you may not use this file except", "if the csv file contains at least one data point. experiments_tests = [elem", "\"empty\" equivalent. valid_dict_empty = dict.fromkeys(valid_dict.keys(), ' ') # Get all tests for a", "experiments_tests = [] for root, dirs, _ in os.walk(experiment_path_, topdown=True): for name in", "statuses = self.merge_list_dicts(list_statuses) trains = self.merge_list_dicts(list_trains) valids = self.merge_list_dicts(list_valids) tests = self.merge_list_dicts(list_tests) #", "Collects four list of dicts from each experiment path contained in ``self.experiments_lists``. Merges", "valids, tests = self.run_experiment(exp) list_statuses.extend(statuses) list_trains.extend(trains) list_valids.extend(valids) list_tests.extend(tests) # Merge lists. statuses =", "*[train_dict_empty for _ in range(len(experiments_tests) - 1)]] list_valid_dicts = [valid_dict, *[valid_dict_empty for _", "Loads and parses training configuration file, - Loads checkpoint with model and training", "i, _ in enumerate(list_dicts): list_filled_dicts.append({**empty_dict, **(list_dicts[i])}) # Zip lists of dicts. final_dict =", "self.check_if_file_exists(elem, 'models/model_best.pt')] # Check if there are some valid folders. if len(self.experiments_list) ==", "in {} directory!\".format(self.experiment_rootdir)) exit(-2) # List folders with \"valid\" experiment data. exp_str =", "\"\"\" grid_analyzer.py: - This script post-processes the output of the ``GridTrainers`` and ``GridTesters``.", "self.flags, self.unparsed = self.parser.parse_known_args() # Set logger depending on the settings. self.logger.setLevel(getattr(logging, self.flags.log_level.upper(),", "= self.get_experiment_tests(experiment_path) list_test_dicts = [] if len(experiments_tests) > 0: self.logger.info(' - Found {}", "for root, dirs, _ in os.walk(experiment_path_, topdown=True): for name in dirs: experiments_tests.append(os.path.join(root, name))", "{k: ' ' for k in header} # \"Fill\" all lists with empty", "root, dirs, _ in os.walk(self.experiment_rootdir, topdown=True): for name in dirs: self.experiments_list.append(os.path.join(root, name)) #", "analyzis\\n') except KeyboardInterrupt: exit(0) def run_experiment(self, experiment_path: str): \"\"\" Collects the training /", "csv statistics file. experiments_tests = [elem for elem in experiments_tests if self.check_if_file_exists(elem, 'testing_configuration.yaml')", "training, validation (from model checkpoint) and test experiments \\ (from test csv files", "0: self.logger.error(\"There are no valid experiment folders in {} directory!\".format(self.experiment_rootdir)) exit(-2) # List", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "\"\"\" Merges a list of dictionaries by filling the missing fields with spaces", "Copyright (C) IBM Corporation 2018 # # Licensed under the Apache License, Version", "Merges all them together and saves result to a single csv file. \"\"\"", "the 'valid_' prefix. for key, value in chkpt['validation_stats'].items(): valid_dict['validation_{}'.format(key)] = value # Create", "'models/model_best.pt'), map_location=lambda storage, loc: storage) status_dict['model_save_timestamp'] = '{0:%Y%m%d_%H%M%S}'.format(chkpt['model_timestamp']) status_dict['training_terminal_status'] = chkpt['status'] status_dict['training_terminal_status_timestamp'] =", "value # Create \"empty\" equivalent. valid_dict_empty = dict.fromkeys(valid_dict.keys(), ' ') # Get all", "filename_: str :return: Number of lines in the file. \"\"\" with open(filename_) as", "# Create \"empty\" equivalent. valid_dict_empty = dict.fromkeys(valid_dict.keys(), ' ') # Get all tests", "with open(filename_) as f: return sum(1 for _ in f) def get_experiment_tests(self, experiment_path_):", "\"empty test entry\" list_test_dicts.append({}) # Return all dictionaries with lists return list_status_dicts, list_train_dicts,", "training configuration file, - Loads checkpoint with model and training and validation statistics,", "with open(os.path.join(experiment_test_path, 'testing_set_agg_statistics.csv'), mode='r') as f: # Open file. test_reader = csv.DictReader(f) #", "some valid folders. if len(self.experiments_list) == 0: self.logger.error(\"There are no valid experiment folders", "a given experiment path. Analyzes whether the given training experiment folder contains subfolders", "limitations under the License. \"\"\" grid_analyzer.py: - This script post-processes the output of", "the file to be opened and analysed. :type filename_: str :return: True if", "value in chkpt['validation_stats'].items(): valid_dict['validation_{}'.format(key)] = value # Create \"empty\" equivalent. valid_dict_empty = dict.fromkeys(valid_dict.keys(),", "Create \"empty\" equivalent. train_dict_empty = dict.fromkeys(train_dict.keys(), ' ') # Copy the validation statistics", "self.unparsed = self.parser.parse_known_args() # Set logger depending on the settings. self.logger.setLevel(getattr(logging, self.flags.log_level.upper(), None))", "aggregated validation statistics). self.experiments_list = [elem for elem in self.experiments_list if self.check_if_file_exists(elem, 'training_configuration.yaml')", "return sum(1 for _ in f) def get_experiment_tests(self, experiment_path_): \"\"\" Returns a list", "for a given experiment path. Analyzes whether the given training experiment folder contains", "dict() # Load yaml file, to get model name, problem name and random", "which will be merged. :type list_dicts: list :return: dict, resulting of the merge.", "def run_experiment(self, experiment_path: str): \"\"\" Collects the training / validation / test statistics", "looking for test experiments, .. note:: We require that the test statistics csv", "dict() test_dict['test_configuration_filepath'] = os.path.join(experiment_test_path, 'testing_set_agg_statistics.yaml') test_dict['test_start_timestamp'] = os.path.basename(os.path.normpath(experiment_test_path))[5:] # Load yaml file and", "= GridAnalyzer() # parse args, load configuration and create all required objects. grid_analyzer.setup_grid_experiment()", "except KeyboardInterrupt: exit(0) def run_experiment(self, experiment_path: str): \"\"\" Collects the training / validation", "Analyzing test from: {}'.format(experiment_test_path)) # Create test dict: test_dict = dict() test_dict['test_configuration_filepath'] =", "empty ones, prop. to the number of test folders. list_status_dicts = [status_dict, *[status_dict_empty", "for d in list_filled_dicts]))) # Return the result. return final_dict def run_grid_experiment(self): \"\"\"", "Setups the overall experiment: - Parses arguments and sets logger level, - Checks", "or implied. # See the License for the specific language governing permissions and", "experiment path. Analyzes whether the given training experiment folder contains subfolders with test", "filename_: str :return: True if the file exists in the directory, else False", "# Create an \"empty\" dict from the unified header. empty_dict = {k: '", "if the number of lines in the file is strictly greater than one.", "open(os.path.join(experiment_test_path, 'testing_configuration.yaml'), 'r') as yaml_file: test_params = yaml.load(yaml_file) # Get seeds. test_dict['test_seed_torch'] =", "the result. return final_dict def run_grid_experiment(self): \"\"\" Collects four list of dicts from", "experiment_path_: Path to experiment (training) folder. :type experiment_path_: str :return: A list of", "= [status_dict, *[status_dict_empty for _ in range(len(experiments_tests) - 1)]] list_train_dicts = [train_dict, *[train_dict_empty", "the training statistics from the checkpoint and add the 'train_' prefix. for key,", "- Validation statistics, - Test statistics. \"\"\" self.logger.info('Analyzing experiments from: {}'.format(experiment_path)) # Create", "# # Copyright (C) IBM Corporation 2018 # # Licensed under the Apache", "= \"<NAME> & <NAME>\" import os import csv import yaml import torch import", "with \\ collected statistics (excluding the header). - Collects statistics from training, validation", "topdown=True): for name in dirs: self.experiments_list.append(os.path.join(root, name)) # Keep only the folders that", "exp in self.experiments_list: statuses, trains, valids, tests = self.run_experiment(exp) list_statuses.extend(statuses) list_trains.extend(trains) list_valids.extend(valids) list_tests.extend(tests)", "= self.run_experiment(exp) list_statuses.extend(statuses) list_trains.extend(trains) list_valids.extend(valids) list_tests.extend(tests) # Merge lists. statuses = self.merge_list_dicts(list_statuses) trains", "for name in dirs: self.experiments_list.append(os.path.join(root, name)) # Keep only the folders that contain", "in ``dir_``. :param dir_: Path to file. :type dir_: str :param filename_: Name", "number of test folders. list_status_dicts = [status_dict, *[status_dict_empty for _ in range(len(experiments_tests) -", "of lines in the file is strictly greater than one. \"\"\" return self.get_lines_number(os.path.join(dir_,", "test experiments data: - Loads and parses training configuration file, - Loads checkpoint", "by empty ones, prop. to the number of test folders. list_status_dicts = [status_dict,", "[valid_dict, *[valid_dict_empty for _ in range(len(experiments_tests) - 1)]] # Get tests statistics. for", ":return: True if the file exists in the directory, else False \"\"\" return", "use this file except in compliance with the License. # You may obtain", "file. \"\"\" try: # Go through the experiments one by one and collect", "training statistics. for row in test_reader: for key, value in row.items(): test_dict['test_{}'.format(key)] =", "= [] for i, _ in enumerate(list_dicts): list_filled_dicts.append({**empty_dict, **(list_dicts[i])}) # Zip lists of", "results file results_file = os.path.join(self.experiment_rootdir, \"{0:%Y%m%d_%H%M%S}_grid_analysis.csv\".format(datetime.now())) with open(results_file, \"w\") as outfile: writer =", "+= '='*80 + '\\n' self.logger.info(exp_str) # Ask for confirmation - optional. if self.flags.user_confirm:", "import os import csv import yaml import torch import logging from datetime import", "# parse args, load configuration and create all required objects. grid_analyzer.setup_grid_experiment() # GO!", "Found {} test(s)'.format(len(experiments_tests))) # \"Expand\" status, train and valid dicts by empty ones,", "prefix. for key, value in chkpt['training_stats'].items(): train_dict['training_{}'.format(key)] = value # Create \"empty\" equivalent.", "the unified header. empty_dict = {k: ' ' for k in header} #", "csv file will gather the training statistics (seeds, accuracies, terminal conditions,...), \\ the", "experiments_tests = self.get_experiment_tests(experiment_path) list_test_dicts = [] if len(experiments_tests) > 0: self.logger.info(' - Found", "that contain training_configuration.yaml, training_statistics.csv and # training.csv and model (which contains aggregated validation", "statuses, trains, valids, tests = self.run_experiment(exp) list_statuses.extend(statuses) list_trains.extend(trains) list_valids.extend(valids) list_tests.extend(tests) # Merge lists.", ":return: dict, resulting of the merge. \"\"\" # Create a \"unified\" header. header", "lines in ``filename_``. :param filename_: Filepath to be opened and line-read. :type filename_:", "statistics, - Test statistics. \"\"\" self.logger.info('Analyzing experiments from: {}'.format(experiment_path)) # Create dictionaries. status_dict", "folders. if len(self.experiments_list) == 0: self.logger.error(\"There are no valid experiment folders in {}", "validation (from model checkpoint) and test experiments \\ (from test csv files found", "# Copy the validation statistics from the checkpoint and add the 'valid_' prefix.", "statistics (`testing_set_agg_statistics.csv`) :param experiment_path_: Path to experiment (training) folder. :type experiment_path_: str :return:", "of test folders. list_status_dicts = [status_dict, *[status_dict_empty for _ in range(len(experiments_tests) - 1)]]", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "list_train_dicts = [train_dict] list_valid_dicts = [valid_dict] # Add \"empty test entry\" list_test_dicts.append({}) #", "# Create test dict: test_dict = dict() test_dict['test_configuration_filepath'] = os.path.join(experiment_test_path, 'testing_set_agg_statistics.yaml') test_dict['test_start_timestamp'] =", "with empty gaps. list_filled_dicts = [] for i, _ in enumerate(list_dicts): list_filled_dicts.append({**empty_dict, **(list_dicts[i])})", "in range(len(experiments_tests) - 1)]] list_train_dicts = [train_dict, *[train_dict_empty for _ in range(len(experiments_tests) -", "experiment_path_): \"\"\" Returns a list of folders containing valid test experiments data: -", "(`testing_configuration.yaml`), - A csv file containing a data point for the aggregated statistics", "= [elem for elem in self.experiments_list if self.check_if_file_exists(elem, 'training_configuration.yaml') and self.check_if_file_exists(elem, 'models/model_best.pt')] #", "# distributed under the License is distributed on an \"AS IS\" BASIS, #", "> 1 @staticmethod def get_lines_number(filename_): \"\"\" Returns the number of lines in ``filename_``.", "Path to an experiment folder containing a training statistics. :type experiment_path: str :return:", "Go through the experiments one by one and collect data. list_statuses = []", "if ``filename_`` exists in ``dir_``. :param dir_: Path to file. :type dir_: str", "opened and line-read. :type filename_: str :return: Number of lines in the file.", "License. \"\"\" grid_analyzer.py: - This script post-processes the output of the ``GridTrainers`` and", "the number of lines in ``filename_``. :param filename_: Filepath to be opened and", "input('Press <Enter> to confirm and start the grid analyzis\\n') except KeyboardInterrupt: exit(0) def", "test_dict['test_{}'.format(key)] = value list_test_dicts.append(test_dict) else: self.logger.info(' - Could not find any valid tests')", "experiment_path: str): \"\"\" Collects the training / validation / test statistics for a", "\"\"\" experiments_tests = [] for root, dirs, _ in os.walk(experiment_path_, topdown=True): for name", "one dict. :param list_dicts: List of dictionaries, potentially containing different headers, which will", "experiments_tests = [elem for elem in experiments_tests if self.check_file_content(elem, 'testing_set_agg_statistics.csv')] return experiments_tests def", "all them together and saves result to a single csv file. \"\"\" try:", "\"\"\" return self.get_lines_number(os.path.join(dir_, filename_)) > 1 @staticmethod def get_lines_number(filename_): \"\"\" Returns the number", "# Go through the experiments one by one and collect data. list_statuses =", "exp_str += '='*80 + '\\n' self.logger.info(exp_str) # Ask for confirmation - optional. if", "line with \\ collected statistics (excluding the header). - Collects statistics from training,", "- Could not find any valid tests') list_status_dicts = [status_dict] list_train_dicts = [train_dict]", "with the License. # You may obtain a copy of the License at", "[] list_trains = [] list_valids = [] list_tests = [] for exp in", "file will gather the training statistics (seeds, accuracies, terminal conditions,...), \\ the validation", "Get all tests for a given training experiment. experiments_tests = self.get_experiment_tests(experiment_path) list_test_dicts =", "test statistics csv files are valid, i.e. contain at least one line with", "in d) # Create an \"empty\" dict from the unified header. empty_dict =", "in experiments_tests if self.check_file_content(elem, 'testing_set_agg_statistics.csv')] return experiments_tests def setup_grid_experiment(self): \"\"\" Setups the overall", "[status_dict, *[status_dict_empty for _ in range(len(experiments_tests) - 1)]] list_train_dicts = [train_dict, *[train_dict_empty for", "files are valid, i.e. contain at least one line with \\ collected statistics", "statistics and the test statistics. Inherits from :py:class:`miprometheus.grid_workers.GridWorker`. \"\"\" def __init__(self, name=\"GridAnalyzer\"): \"\"\"", "subfolders containing: - (a) 'training_configuration.yaml' (training configuration file), - (b) 'models/model_best.pt' (checkpoint of", "in experiments_tests if self.check_if_file_exists(elem, 'testing_configuration.yaml') and self.check_if_file_exists(elem, 'testing_set_agg_statistics.csv')] # Check if the csv", "``self.experiments_lists``. Merges all them together and saves result to a single csv file.", "def __init__(self, name=\"GridAnalyzer\"): \"\"\" Constructor for the :py:class:`miprometheus.grid_workers.GridAnalyzer`: - Calls basic constructor of", "and ``GridTesters``. \\ It gathers the test results into one `.csv` file. \"\"\"", "with test experiments data: - Loads and parses training configuration file, - Loads", "law or agreed to in writing, software # distributed under the License is", "containing a data point for the aggregated statistics (`testing_set_agg_statistics.csv`) :param experiment_path_: Path to", "in a csv file. This csv file will gather the training statistics (seeds,", "f: return sum(1 for _ in f) def get_experiment_tests(self, experiment_path_): \"\"\" Returns a", "for _ in f) def get_experiment_tests(self, experiment_path_): \"\"\" Returns a list of folders", "not find any valid tests') list_status_dicts = [status_dict] list_train_dicts = [train_dict] list_valid_dicts =", "1)]] list_valid_dicts = [valid_dict, *[valid_dict_empty for _ in range(len(experiments_tests) - 1)]] # Get", "experiment directory. self.experiment_rootdir = self.flags.expdir # Get all sub-directories paths in expdir. self.experiments_list", "- Collects statistics from training, validation (from model checkpoint) and test experiments \\", "\"{0:%Y%m%d_%H%M%S}_grid_analysis.csv\".format(datetime.now())) with open(results_file, \"w\") as outfile: writer = csv.writer(outfile, delimiter=',') writer.writerow(exp_values.keys()) writer.writerows(zip(*exp_values.values())) self.logger.info('Analysis", "are some valid folders. if len(self.experiments_list) == 0: self.logger.error(\"There are no valid experiment", "list_status_dicts = [status_dict, *[status_dict_empty for _ in range(len(experiments_tests) - 1)]] list_train_dicts = [train_dict,", "language governing permissions and # limitations under the License. \"\"\" grid_analyzer.py: - This", "datetime from miprometheus.grid_workers.grid_worker import GridWorker class GridAnalyzer(GridWorker): \"\"\" Implementation of the :py:class:`miprometheus.grid_workers.GridAnalyzer`. Post-processes", "line-read. :type filename_: str :return: Number of lines in the file. \"\"\" with", "contains at least one data point. experiments_tests = [elem for elem in experiments_tests", "one data point. experiments_tests = [elem for elem in experiments_tests if self.check_file_content(elem, 'testing_set_agg_statistics.csv')]", "list_test_dicts = [] if len(experiments_tests) > 0: self.logger.info(' - Found {} test(s)'.format(len(experiments_tests))) #", "os.path.isfile(os.path.join(dir_, filename_)) def check_file_content(self, dir_, filename_): \"\"\" Checks if the number of lines", "else False \"\"\" return os.path.isfile(os.path.join(dir_, filename_)) def check_file_content(self, dir_, filename_): \"\"\" Checks if", "validation statistics, - Recursively traverses subdirectories looking for test experiments, .. note:: We", "in compliance with the License. # You may obtain a copy of the", "= set(k for d in list_dicts for k in d) # Create an", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "status_dict['training_terminal_status_timestamp'] = '{0:%Y%m%d_%H%M%S}'.format(chkpt['status_timestamp']) # Create \"empty\" equivalent. status_dict_empty = dict.fromkeys(status_dict.keys(), ' ') #", "with \"valid\" experiment data. exp_str = \"Found the following valid experiments in directory:", "path. Analyzes whether the given training experiment folder contains subfolders with test experiments", "\"\"\" Returns the number of lines in ``filename_``. :param filename_: Filepath to be", "for d in list_dicts for k in d) # Create an \"empty\" dict", "test entry\" list_test_dicts.append({}) # Return all dictionaries with lists return list_status_dicts, list_train_dicts, list_valid_dicts,", "\" - {}\\n\".format(exp) exp_str += '='*80 + '\\n' self.logger.info(exp_str) # Ask for confirmation", "map_location=lambda storage, loc: storage) status_dict['model_save_timestamp'] = '{0:%Y%m%d_%H%M%S}'.format(chkpt['model_timestamp']) status_dict['training_terminal_status'] = chkpt['status'] status_dict['training_terminal_status_timestamp'] = '{0:%Y%m%d_%H%M%S}'.format(chkpt['status_timestamp'])", "os import csv import yaml import torch import logging from datetime import datetime", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "_ in os.walk(self.experiment_rootdir, topdown=True): for name in dirs: self.experiments_list.append(os.path.join(root, name)) # Keep only", "configuration file and a csv statistics file. experiments_tests = [elem for elem in", "dirs, _ in os.walk(experiment_path_, topdown=True): for name in dirs: experiments_tests.append(os.path.join(root, name)) # Keep", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "Test statistics. \"\"\" self.logger.info('Analyzing experiments from: {}'.format(experiment_path)) # Create dictionaries. status_dict = dict()", ":type experiment_path: str :return: Four dictionaries containing: - Status info (model, problem etc.),", "= chkpt['status'] status_dict['training_terminal_status_timestamp'] = '{0:%Y%m%d_%H%M%S}'.format(chkpt['status_timestamp']) # Create \"empty\" equivalent. status_dict_empty = dict.fromkeys(status_dict.keys(), '", "arguments. self.flags, self.unparsed = self.parser.parse_known_args() # Set logger depending on the settings. self.logger.setLevel(getattr(logging,", "Create an \"empty\" dict from the unified header. empty_dict = {k: ' '", "checkpoint from model file. chkpt = torch.load(os.path.join(experiment_path, 'models/model_best.pt'), map_location=lambda storage, loc: storage) status_dict['model_save_timestamp']", "results_file = os.path.join(self.experiment_rootdir, \"{0:%Y%m%d_%H%M%S}_grid_analysis.csv\".format(datetime.now())) with open(results_file, \"w\") as outfile: writer = csv.writer(outfile, delimiter=',')", "--expdir') exit(-1) # Get experiment directory. self.experiment_rootdir = self.flags.expdir # Get all sub-directories", "d) # Create an \"empty\" dict from the unified header. empty_dict = {k:", "of lines in the file is > 1. :param dir_: Path to file.", "level, - Checks the presence of experiments folder, - Recursively traverses the experiment", "self.experiment_rootdir = self.flags.expdir # Get all sub-directories paths in expdir. self.experiments_list = []", "= os.path.join(self.experiment_rootdir, \"{0:%Y%m%d_%H%M%S}_grid_analysis.csv\".format(datetime.now())) with open(results_file, \"w\") as outfile: writer = csv.writer(outfile, delimiter=',') writer.writerow(exp_values.keys())", "params['model']['name'] # Load checkpoint from model file. chkpt = torch.load(os.path.join(experiment_path, 'models/model_best.pt'), map_location=lambda storage,", "= [] for exp in self.experiments_list: statuses, trains, valids, tests = self.run_experiment(exp) list_statuses.extend(statuses)", "= torch.load(os.path.join(experiment_path, 'models/model_best.pt'), map_location=lambda storage, loc: storage) status_dict['model_save_timestamp'] = '{0:%Y%m%d_%H%M%S}'.format(chkpt['model_timestamp']) status_dict['training_terminal_status'] = chkpt['status']", "import GridWorker class GridAnalyzer(GridWorker): \"\"\" Implementation of the :py:class:`miprometheus.grid_workers.GridAnalyzer`. Post-processes the test results", "list_train_dicts = [train_dict, *[train_dict_empty for _ in range(len(experiments_tests) - 1)]] list_valid_dicts = [valid_dict,", "Add \"empty test entry\" list_test_dicts.append({}) # Return all dictionaries with lists return list_status_dicts,", "to an experiment folder containing a training statistics. :type experiment_path: str :return: Four", "Copy the training statistics from the checkpoint and add the 'train_' prefix. for", "four list of dicts from each experiment path contained in ``self.experiments_lists``. Merges all", "start the grid analyzis\\n') except KeyboardInterrupt: exit(0) def run_experiment(self, experiment_path: str): \"\"\" Collects", "Analyzes whether the given training experiment folder contains subfolders with test experiments data:", "[] for root, dirs, _ in os.walk(self.experiment_rootdir, topdown=True): for name in dirs: self.experiments_list.append(os.path.join(root,", "greater than one. \"\"\" return self.get_lines_number(os.path.join(dir_, filename_)) > 1 @staticmethod def get_lines_number(filename_): \"\"\"", "filename_: Name of the file to be opened and analyzed. :type filename_: str", "result. return final_dict def run_grid_experiment(self): \"\"\" Collects four list of dicts from each", "training.csv and model (which contains aggregated validation statistics). self.experiments_list = [elem for elem", "equivalent. train_dict_empty = dict.fromkeys(train_dict.keys(), ' ') # Copy the validation statistics from the", "sum(1 for _ in f) def get_experiment_tests(self, experiment_path_): \"\"\" Returns a list of", "'r') as yaml_file: test_params = yaml.load(yaml_file) # Get seeds. test_dict['test_seed_torch'] = test_params['testing']['seed_torch'] test_dict['test_seed_numpy']", "and valid dicts by empty ones, prop. to the number of test folders.", "experiment folder containing a training statistics. :type experiment_path: str :return: Four dictionaries containing:", "and analysed. :type filename_: str :return: True if the file exists in the", "self.logger.setLevel(getattr(logging, self.flags.log_level.upper(), None)) # Check if experiments directory was indicated. if self.flags.expdir ==", "os.walk(self.experiment_rootdir, topdown=True): for name in dirs: self.experiments_list.append(os.path.join(root, name)) # Keep only the folders", "fields with spaces into one dict. :param list_dicts: List of dictionaries, potentially containing", "the presence of experiments folder, - Recursively traverses the experiment folders, cherry-picking subfolders", "them together and saves result to a single csv file. \"\"\" try: #", "under the License. \"\"\" grid_analyzer.py: - This script post-processes the output of the", "loc: storage) status_dict['model_save_timestamp'] = '{0:%Y%m%d_%H%M%S}'.format(chkpt['model_timestamp']) status_dict['training_terminal_status'] = chkpt['status'] status_dict['training_terminal_status_timestamp'] = '{0:%Y%m%d_%H%M%S}'.format(chkpt['status_timestamp']) # Create", "script post-processes the output of the ``GridTrainers`` and ``GridTesters``. \\ It gathers the", "them in a csv file. This csv file will gather the training statistics", "and training and validation statistics, - Recursively traverses subdirectories looking for test experiments,", "parses training configuration file, - Loads checkpoint with model and training and validation", "the given training experiment folder contains subfolders with test experiments data: - Loads", "confirmation - optional. if self.flags.user_confirm: try: input('Press <Enter> to confirm and start the", "setup_grid_experiment(self): \"\"\" Setups the overall experiment: - Parses arguments and sets logger level,", "training / validation / test statistics for a given experiment path. Analyzes whether", "distributed under the License is distributed on an \"AS IS\" BASIS, # WITHOUT", "folders with \"valid\" experiment data. exp_str = \"Found the following valid experiments in", "experiments in directory: {} \\n\".format(self.experiment_rootdir) exp_str += '='*80 + '\\n' for exp in", ":param name: Name of the worker (DEFAULT: \"GridAnalyzer\"). :type name: str \"\"\" #", "if there are some valid folders. if len(self.experiments_list) == 0: self.logger.error(\"There are no", "file. This csv file will gather the training statistics (seeds, accuracies, terminal conditions,...),", "open(os.path.join(experiment_test_path, 'testing_set_agg_statistics.csv'), mode='r') as f: # Open file. test_reader = csv.DictReader(f) # Copy", "= dict(zip(header, zip(*[d.values() for d in list_filled_dicts]))) # Return the result. return final_dict", "file. experiments_tests = [elem for elem in experiments_tests if self.check_if_file_exists(elem, 'testing_configuration.yaml') and self.check_if_file_exists(elem,", "the worker (DEFAULT: \"GridAnalyzer\"). :type name: str \"\"\" # call base constructor super(GridAnalyzer,", "note:: We require that the test statistics csv files are valid, i.e. contain", "problem etc.), - Training statistics, - Validation statistics, - Test statistics. \"\"\" self.logger.info('Analyzing", "tests') list_status_dicts = [status_dict] list_train_dicts = [train_dict] list_valid_dicts = [valid_dict] # Add \"empty", "') # Copy the validation statistics from the checkpoint and add the 'valid_'", "self.logger.info(' - Analyzing test from: {}'.format(experiment_test_path)) # Create test dict: test_dict = dict()", "experiments_tests if self.check_file_content(elem, 'testing_set_agg_statistics.csv')] return experiments_tests def setup_grid_experiment(self): \"\"\" Setups the overall experiment:", "run_grid_experiment(self): \"\"\" Collects four list of dicts from each experiment path contained in", "Filepath to be opened and line-read. :type filename_: str :return: Number of lines", "of dicts. final_dict = dict(zip(header, zip(*[d.values() for d in list_filled_dicts]))) # Return the", "= params['training']['seed_torch'] train_dict['training_seed_numpy'] = params['training']['seed_numpy'] # Copy the training statistics from the checkpoint", "train and valid dicts by empty ones, prop. to the number of test", "this file except in compliance with the License. # You may obtain a", "depending on the settings. self.logger.setLevel(getattr(logging, self.flags.log_level.upper(), None)) # Check if experiments directory was", "- Recursively traverses subdirectories looking for test experiments, .. note:: We require that", "self.check_file_content(elem, 'testing_set_agg_statistics.csv')] return experiments_tests def setup_grid_experiment(self): \"\"\" Setups the overall experiment: - Parses", "[status_dict] list_train_dicts = [train_dict] list_valid_dicts = [valid_dict] # Add \"empty test entry\" list_test_dicts.append({})", "for confirmation - optional. if self.flags.user_confirm: try: input('Press <Enter> to confirm and start", "exists in ``dir_``. :param dir_: Path to file. :type dir_: str :param filename_:", "test from: {}'.format(experiment_test_path)) # Create test dict: test_dict = dict() test_dict['test_configuration_filepath'] = os.path.join(experiment_test_path,", "# Check if there are some valid folders. if len(self.experiments_list) == 0: self.logger.error(\"There", "of dictionaries, potentially containing different headers, which will be merged. :type list_dicts: list", "\"\"\" Implementation of the :py:class:`miprometheus.grid_workers.GridAnalyzer`. Post-processes the test results of a grid of", "validation statistics and the test statistics. Inherits from :py:class:`miprometheus.grid_workers.GridWorker`. \"\"\" def __init__(self, name=\"GridAnalyzer\"):", "return self.get_lines_number(os.path.join(dir_, filename_)) > 1 @staticmethod def get_lines_number(filename_): \"\"\" Returns the number of", "*[status_dict_empty for _ in range(len(experiments_tests) - 1)]] list_train_dicts = [train_dict, *[train_dict_empty for _", "+ '\\n' for exp in self.experiments_list: exp_str += \" - {}\\n\".format(exp) exp_str +=", "the output of the ``GridTrainers`` and ``GridTesters``. \\ It gathers the test results", "torch import logging from datetime import datetime from miprometheus.grid_workers.grid_worker import GridWorker class GridAnalyzer(GridWorker):", "= yaml.load(yaml_file) # Get problem and model names - from config. status_dict['problem'] =", "valid folders. if len(self.experiments_list) == 0: self.logger.error(\"There are no valid experiment folders in", "Open file. test_reader = csv.DictReader(f) # Copy training statistics. for row in test_reader:", "' ') # Copy the validation statistics from the checkpoint and add the", "zip(*[d.values() for d in list_filled_dicts]))) # Return the result. return final_dict def run_grid_experiment(self):", "key, value in chkpt['training_stats'].items(): train_dict['training_{}'.format(key)] = value # Create \"empty\" equivalent. train_dict_empty =", ".. note:: We require that the test statistics csv files are valid, i.e.", "the test statistics csv files are valid, i.e. contain at least one line", "from: {}'.format(experiment_path)) # Create dictionaries. status_dict = dict() train_dict = dict() valid_dict =", "use_gpu=False) @staticmethod def check_if_file_exists(dir_, filename_): \"\"\" Checks if ``filename_`` exists in ``dir_``. :param", "names - from config. status_dict['problem'] = params['testing']['problem']['name'] status_dict['model'] = params['model']['name'] # Load checkpoint", "open(results_file, \"w\") as outfile: writer = csv.writer(outfile, delimiter=',') writer.writerow(exp_values.keys()) writer.writerows(zip(*exp_values.values())) self.logger.info('Analysis finished') self.logger.info('Results", "= value # Create \"empty\" equivalent. train_dict_empty = dict.fromkeys(train_dict.keys(), ' ') # Copy", "merge. \"\"\" # Create a \"unified\" header. header = set(k for d in", ":return: A list of valid test experiment folders. \"\"\" experiments_tests = [] for", "'valid_' prefix. for key, value in chkpt['validation_stats'].items(): valid_dict['validation_{}'.format(key)] = value # Create \"empty\"", "') # Get all tests for a given training experiment. experiments_tests = self.get_experiment_tests(experiment_path)", "statistics (seeds, accuracies, terminal conditions,...), \\ the validation statistics and the test statistics.", "yaml import torch import logging from datetime import datetime from miprometheus.grid_workers.grid_worker import GridWorker", "name in dirs: experiments_tests.append(os.path.join(root, name)) # Keep only the folders that contain a", "a test configuration file and a csv statistics file. experiments_tests = [elem for", "for exp in self.experiments_list: exp_str += \" - {}\\n\".format(exp) exp_str += '='*80 +", "experiment data. exp_str = \"Found the following valid experiments in directory: {} \\n\".format(self.experiment_rootdir)", "in experiments_tests: self.logger.info(' - Analyzing test from: {}'.format(experiment_test_path)) # Create test dict: test_dict", "yaml file and get random seeds. with open(os.path.join(experiment_test_path, 'testing_configuration.yaml'), 'r') as yaml_file: test_params", "\"empty\" dict from the unified header. empty_dict = {k: ' ' for k", "dicts. final_dict = dict(zip(header, zip(*[d.values() for d in list_filled_dicts]))) # Return the result.", "= os.path.join(experiment_test_path, 'testing_set_agg_statistics.yaml') test_dict['test_start_timestamp'] = os.path.basename(os.path.normpath(experiment_test_path))[5:] # Load yaml file and get random", "= self.merge_list_dicts(list_statuses) trains = self.merge_list_dicts(list_trains) valids = self.merge_list_dicts(list_valids) tests = self.merge_list_dicts(list_tests) # Merge", "opened and analyzed. :type filename_: str :return: True if the number of lines", "\\n\".format(self.experiment_rootdir) exp_str += '='*80 + '\\n' for exp in self.experiments_list: exp_str += \"", "and # training.csv and model (which contains aggregated validation statistics). self.experiments_list = [elem", "name: Name of the worker (DEFAULT: \"GridAnalyzer\"). :type name: str \"\"\" # call", "**trains, **valids, **tests} # create results file results_file = os.path.join(self.experiment_rootdir, \"{0:%Y%m%d_%H%M%S}_grid_analysis.csv\".format(datetime.now())) with open(results_file,", "import csv import yaml import torch import logging from datetime import datetime from", "Load checkpoint from model file. chkpt = torch.load(os.path.join(experiment_path, 'models/model_best.pt'), map_location=lambda storage, loc: storage)", "experiment folders in {} directory!\".format(self.experiment_rootdir)) exit(-2) # List folders with \"valid\" experiment data.", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to", "\"valid\" experiment data. exp_str = \"Found the following valid experiments in directory: {}", "as f: # Open file. test_reader = csv.DictReader(f) # Copy training statistics. for", "range(len(experiments_tests) - 1)]] list_valid_dicts = [valid_dict, *[valid_dict_empty for _ in range(len(experiments_tests) - 1)]]", "the file to be opened and analyzed. :type filename_: str :return: True if", "ones, prop. to the number of test folders. list_status_dicts = [status_dict, *[status_dict_empty for", "\"Fill\" all lists with empty gaps. list_filled_dicts = [] for i, _ in", "try: # Go through the experiments one by one and collect data. list_statuses", "from miprometheus.grid_workers.grid_worker import GridWorker class GridAnalyzer(GridWorker): \"\"\" Implementation of the :py:class:`miprometheus.grid_workers.GridAnalyzer`. Post-processes the", "the test results of a grid of experiments and gather them in a", "required by applicable law or agreed to in writing, software # distributed under", "[] list_tests = [] for exp in self.experiments_list: statuses, trains, valids, tests =", "status_dict = dict() train_dict = dict() valid_dict = dict() # Load yaml file,", "checkpoint with model and training and validation statistics, - Recursively traverses subdirectories looking", "Checks if ``filename_`` exists in ``dir_``. :param dir_: Path to file. :type dir_:", "with open(os.path.join(experiment_path, 'training_configuration.yaml'), 'r') as yaml_file: params = yaml.load(yaml_file) # Get problem and", "indicated. if self.flags.expdir == '': print('Please pass the experiments directory as --expdir') exit(-1)", "train_dict = dict() valid_dict = dict() # Load yaml file, to get model", "tests = self.merge_list_dicts(list_tests) # Merge everything into one big dictionary.. exp_values = {**statuses,", "missing fields with spaces into one dict. :param list_dicts: List of dictionaries, potentially", "exp_values = {**statuses, **trains, **valids, **tests} # create results file results_file = os.path.join(self.experiment_rootdir,", "def main(): \"\"\" Entry point function for the :py:class:`miprometheus.grid_workers.GridAnalyzer`. \"\"\" grid_analyzer = GridAnalyzer()", "- Checks the presence of experiments folder, - Recursively traverses the experiment folders,", "as yaml_file: test_params = yaml.load(yaml_file) # Get seeds. test_dict['test_seed_torch'] = test_params['testing']['seed_torch'] test_dict['test_seed_numpy'] =", "We require that the test statistics csv files are valid, i.e. contain at", "Recursively traverses subdirectories looking for test experiments, .. note:: We require that the", "params['testing']['problem']['name'] status_dict['model'] = params['model']['name'] # Load checkpoint from model file. chkpt = torch.load(os.path.join(experiment_path,", "= dict() valid_dict = dict() # Load yaml file, to get model name,", "test_dict = dict() test_dict['test_configuration_filepath'] = os.path.join(experiment_test_path, 'testing_set_agg_statistics.yaml') test_dict['test_start_timestamp'] = os.path.basename(os.path.normpath(experiment_test_path))[5:] # Load yaml", "d in list_dicts for k in d) # Create an \"empty\" dict from", "results into one `.csv` file. \"\"\" __author__ = \"<NAME> & <NAME>\" import os", "and add the 'train_' prefix. for key, value in chkpt['training_stats'].items(): train_dict['training_{}'.format(key)] = value", "self.flags.expdir == '': print('Please pass the experiments directory as --expdir') exit(-1) # Get", "if len(experiments_tests) > 0: self.logger.info(' - Found {} test(s)'.format(len(experiments_tests))) # \"Expand\" status, train", "You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "test_reader: for key, value in row.items(): test_dict['test_{}'.format(key)] = value list_test_dicts.append(test_dict) else: self.logger.info(' -", "Post-processes the test results of a grid of experiments and gather them in", "(model, problem etc.), - Training statistics, - Validation statistics, - Test statistics. \"\"\"", "model checkpoint) and test experiments \\ (from test csv files found in subdirectories).", "= [elem for elem in experiments_tests if self.check_file_content(elem, 'testing_set_agg_statistics.csv')] return experiments_tests def setup_grid_experiment(self):", "Get all sub-directories paths in expdir. self.experiments_list = [] for root, dirs, _", ":type filename_: str :return: Number of lines in the file. \"\"\" with open(filename_)", "len(self.experiments_list) == 0: self.logger.error(\"There are no valid experiment folders in {} directory!\".format(self.experiment_rootdir)) exit(-2)", ":type experiment_path_: str :return: A list of valid test experiment folders. \"\"\" experiments_tests", "statistics. for experiment_test_path in experiments_tests: self.logger.info(' - Analyzing test from: {}'.format(experiment_test_path)) # Create", "= self.parser.parse_known_args() # Set logger depending on the settings. self.logger.setLevel(getattr(logging, self.flags.log_level.upper(), None)) #", "statistics. \"\"\" self.logger.info('Analyzing experiments from: {}'.format(experiment_path)) # Create dictionaries. status_dict = dict() train_dict", "is strictly greater than one. \"\"\" return self.get_lines_number(os.path.join(dir_, filename_)) > 1 @staticmethod def", "for elem in experiments_tests if self.check_file_content(elem, 'testing_set_agg_statistics.csv')] return experiments_tests def setup_grid_experiment(self): \"\"\" Setups", "validation statistics from the checkpoint and add the 'valid_' prefix. for key, value", "big dictionary.. exp_values = {**statuses, **trains, **valids, **tests} # create results file results_file", "str :return: True if the file exists in the directory, else False \"\"\"", "test_params = yaml.load(yaml_file) # Get seeds. test_dict['test_seed_torch'] = test_params['testing']['seed_torch'] test_dict['test_seed_numpy'] = test_params['testing']['seed_numpy'] #", "<Enter> to confirm and start the grid analyzis\\n') except KeyboardInterrupt: exit(0) def run_experiment(self,", "= [] for root, dirs, _ in os.walk(self.experiment_rootdir, topdown=True): for name in dirs:", "-*- coding: utf-8 -*- # # Copyright (C) IBM Corporation 2018 # #", "of experiments folder, - Recursively traverses the experiment folders, cherry-picking subfolders containing: -", "storage) status_dict['model_save_timestamp'] = '{0:%Y%m%d_%H%M%S}'.format(chkpt['model_timestamp']) status_dict['training_terminal_status'] = chkpt['status'] status_dict['training_terminal_status_timestamp'] = '{0:%Y%m%d_%H%M%S}'.format(chkpt['status_timestamp']) # Create \"empty\"", "_ in os.walk(experiment_path_, topdown=True): for name in dirs: experiments_tests.append(os.path.join(root, name)) # Keep only", "test_params['testing']['seed_torch'] test_dict['test_seed_numpy'] = test_params['testing']['seed_numpy'] # Load csv file and copy test statistics with", "add the 'train_' prefix. for key, value in chkpt['training_stats'].items(): train_dict['training_{}'.format(key)] = value #", "# Parse arguments. self.flags, self.unparsed = self.parser.parse_known_args() # Set logger depending on the", "governing permissions and # limitations under the License. \"\"\" grid_analyzer.py: - This script", "utf-8 -*- # # Copyright (C) IBM Corporation 2018 # # Licensed under", "no valid experiment folders in {} directory!\".format(self.experiment_rootdir)) exit(-2) # List folders with \"valid\"", "of the file to be opened and analysed. :type filename_: str :return: True", "file. test_reader = csv.DictReader(f) # Copy training statistics. for row in test_reader: for", "with spaces into one dict. :param list_dicts: List of dictionaries, potentially containing different", "pass the experiments directory as --expdir') exit(-1) # Get experiment directory. self.experiment_rootdir =", "# you may not use this file except in compliance with the License.", "the merge. \"\"\" # Create a \"unified\" header. header = set(k for d", "the :py:class:`miprometheus.grid_workers.GridAnalyzer`. \"\"\" grid_analyzer = GridAnalyzer() # parse args, load configuration and create", "and parses training configuration file, - Loads checkpoint with model and training and", "self.logger.info('Analyzing experiments from: {}'.format(experiment_path)) # Create dictionaries. status_dict = dict() train_dict = dict()", "collect data. list_statuses = [] list_trains = [] list_valids = [] list_tests =", "else: self.logger.info(' - Could not find any valid tests') list_status_dicts = [status_dict] list_train_dicts", "(training) folder. :type experiment_path_: str :return: A list of valid test experiment folders.", "self.flags.user_confirm: try: input('Press <Enter> to confirm and start the grid analyzis\\n') except KeyboardInterrupt:", "in the file is > 1. :param dir_: Path to file. :type dir_:", "sub-directories paths in expdir. self.experiments_list = [] for root, dirs, _ in os.walk(self.experiment_rootdir,", "training and validation statistics, - Recursively traverses subdirectories looking for test experiments, ..", "dictionaries with lists return list_status_dicts, list_train_dicts, list_valid_dicts, list_test_dicts @staticmethod def merge_list_dicts(list_dicts): \"\"\" Merges", "self.merge_list_dicts(list_valids) tests = self.merge_list_dicts(list_tests) # Merge everything into one big dictionary.. exp_values =", "Validation statistics, - Test statistics. \"\"\" self.logger.info('Analyzing experiments from: {}'.format(experiment_path)) # Create dictionaries.", "experiments one by one and collect data. list_statuses = [] list_trains = []", "\"\"\" with open(filename_) as f: return sum(1 for _ in f) def get_experiment_tests(self,", "A list of valid test experiment folders. \"\"\" experiments_tests = [] for root,", "'': print('Please pass the experiments directory as --expdir') exit(-1) # Get experiment directory.", "Load yaml file, to get model name, problem name and random seeds. with", "chkpt['status'] status_dict['training_terminal_status_timestamp'] = '{0:%Y%m%d_%H%M%S}'.format(chkpt['status_timestamp']) # Create \"empty\" equivalent. status_dict_empty = dict.fromkeys(status_dict.keys(), ' ')", "self.logger.info(exp_str) # Ask for confirmation - optional. if self.flags.user_confirm: try: input('Press <Enter> to", "torch.load(os.path.join(experiment_path, 'models/model_best.pt'), map_location=lambda storage, loc: storage) status_dict['model_save_timestamp'] = '{0:%Y%m%d_%H%M%S}'.format(chkpt['model_timestamp']) status_dict['training_terminal_status'] = chkpt['status'] status_dict['training_terminal_status_timestamp']", "#!/usr/bin/env python3 # -*- coding: utf-8 -*- # # Copyright (C) IBM Corporation", "lines in the file. \"\"\" with open(filename_) as f: return sum(1 for _", "**tests} # create results file results_file = os.path.join(self.experiment_rootdir, \"{0:%Y%m%d_%H%M%S}_grid_analysis.csv\".format(datetime.now())) with open(results_file, \"w\") as", "in range(len(experiments_tests) - 1)]] # Get tests statistics. for experiment_test_path in experiments_tests: self.logger.info('", "any valid tests') list_status_dicts = [status_dict] list_train_dicts = [train_dict] list_valid_dicts = [valid_dict] #", "the validation statistics and the test statistics. Inherits from :py:class:`miprometheus.grid_workers.GridWorker`. \"\"\" def __init__(self,", "GridAnalyzer(GridWorker): \"\"\" Implementation of the :py:class:`miprometheus.grid_workers.GridAnalyzer`. Post-processes the test results of a grid", "test statistics. Inherits from :py:class:`miprometheus.grid_workers.GridWorker`. \"\"\" def __init__(self, name=\"GridAnalyzer\"): \"\"\" Constructor for the", "status stats. train_dict['training_configuration_filepath'] = os.path.join(experiment_path, 'training_configuration.yaml') train_dict['training_start_timestamp'] = os.path.basename(os.path.normpath(experiment_path)) train_dict['training_seed_torch'] = params['training']['seed_torch'] train_dict['training_seed_numpy']", "Copy the validation statistics from the checkpoint and add the 'valid_' prefix. for", "seeds. with open(os.path.join(experiment_path, 'training_configuration.yaml'), 'r') as yaml_file: params = yaml.load(yaml_file) # Get problem", "set(k for d in list_dicts for k in d) # Create an \"empty\"", "for k in d) # Create an \"empty\" dict from the unified header.", "License for the specific language governing permissions and # limitations under the License.", "if experiments directory was indicated. if self.flags.expdir == '': print('Please pass the experiments", "test dict: test_dict = dict() test_dict['test_configuration_filepath'] = os.path.join(experiment_test_path, 'testing_set_agg_statistics.yaml') test_dict['test_start_timestamp'] = os.path.basename(os.path.normpath(experiment_test_path))[5:] #", "to get model name, problem name and random seeds. with open(os.path.join(experiment_path, 'training_configuration.yaml'), 'r')", "get_lines_number(filename_): \"\"\" Returns the number of lines in ``filename_``. :param filename_: Filepath to", "dictionaries containing: - Status info (model, problem etc.), - Training statistics, - Validation", "\\ collected statistics (excluding the header). - Collects statistics from training, validation (from", "\"License\"); # you may not use this file except in compliance with the", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "header} # \"Fill\" all lists with empty gaps. list_filled_dicts = [] for i,", "\"w\") as outfile: writer = csv.writer(outfile, delimiter=',') writer.writerow(exp_values.keys()) writer.writerows(zip(*exp_values.values())) self.logger.info('Analysis finished') self.logger.info('Results stored", ":py:class:`miprometheus.grid_workers.GridAnalyzer`: - Calls basic constructor of :py:class:`miprometheus.grid_workers.GridWorker` :param name: Name of the worker", "Load yaml file and get random seeds. with open(os.path.join(experiment_test_path, 'testing_configuration.yaml'), 'r') as yaml_file:", "dirs: experiments_tests.append(os.path.join(root, name)) # Keep only the folders that contain a test configuration", ":param filename_: Filepath to be opened and line-read. :type filename_: str :return: Number", "= value list_test_dicts.append(test_dict) else: self.logger.info(' - Could not find any valid tests') list_status_dicts", "List folders with \"valid\" experiment data. exp_str = \"Found the following valid experiments", "Get tests statistics. for experiment_test_path in experiments_tests: self.logger.info(' - Analyzing test from: {}'.format(experiment_test_path))", "python3 # -*- coding: utf-8 -*- # # Copyright (C) IBM Corporation 2018", "in row.items(): test_dict['test_{}'.format(key)] = value list_test_dicts.append(test_dict) else: self.logger.info(' - Could not find any", "Path to file. :type dir_: str :param filename_: Name of the file to", "# Check if experiments directory was indicated. if self.flags.expdir == '': print('Please pass", "dirs, _ in os.walk(self.experiment_rootdir, topdown=True): for name in dirs: self.experiments_list.append(os.path.join(root, name)) # Keep", ":param filename_: Name of the file to be opened and analyzed. :type filename_:", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing,", "in writing, software # distributed under the License is distributed on an \"AS", "logger level, - Checks the presence of experiments folder, - Recursively traverses the", "Name of the file to be opened and analyzed. :type filename_: str :return:", "path contained in ``self.experiments_lists``. Merges all them together and saves result to a", "point for the aggregated statistics (`testing_set_agg_statistics.csv`) :param experiment_path_: Path to experiment (training) folder.", "chkpt = torch.load(os.path.join(experiment_path, 'models/model_best.pt'), map_location=lambda storage, loc: storage) status_dict['model_save_timestamp'] = '{0:%Y%m%d_%H%M%S}'.format(chkpt['model_timestamp']) status_dict['training_terminal_status'] =", "' ') # Copy training status stats. train_dict['training_configuration_filepath'] = os.path.join(experiment_path, 'training_configuration.yaml') train_dict['training_start_timestamp'] =", "the csv file contains at least one data point. experiments_tests = [elem for", "list_tests = [] for exp in self.experiments_list: statuses, trains, valids, tests = self.run_experiment(exp)", "optional. if self.flags.user_confirm: try: input('Press <Enter> to confirm and start the grid analyzis\\n')", "data: - A configuration (`testing_configuration.yaml`), - A csv file containing a data point", "= csv.writer(outfile, delimiter=',') writer.writerow(exp_values.keys()) writer.writerows(zip(*exp_values.values())) self.logger.info('Analysis finished') self.logger.info('Results stored in {}.'.format(results_file)) except KeyboardInterrupt:", "one by one and collect data. list_statuses = [] list_trains = [] list_valids", "\"GridAnalyzer\"). :type name: str \"\"\" # call base constructor super(GridAnalyzer, self).__init__(name=name, use_gpu=False) @staticmethod", "directory, else False \"\"\" return os.path.isfile(os.path.join(dir_, filename_)) def check_file_content(self, dir_, filename_): \"\"\" Checks", "together and saves result to a single csv file. \"\"\" try: # Go", "through the experiments one by one and collect data. list_statuses = [] list_trains", "yaml.load(yaml_file) # Get problem and model names - from config. status_dict['problem'] = params['testing']['problem']['name']", "# Copy training status stats. train_dict['training_configuration_filepath'] = os.path.join(experiment_path, 'training_configuration.yaml') train_dict['training_start_timestamp'] = os.path.basename(os.path.normpath(experiment_path)) train_dict['training_seed_torch']", "folders in {} directory!\".format(self.experiment_rootdir)) exit(-2) # List folders with \"valid\" experiment data. exp_str", "experiments data: - Loads and parses training configuration file, - Loads checkpoint with", "header). - Collects statistics from training, validation (from model checkpoint) and test experiments", "Checks the presence of experiments folder, - Recursively traverses the experiment folders, cherry-picking", "least one data point. experiments_tests = [elem for elem in experiments_tests if self.check_file_content(elem,", "statistics from the checkpoint and add the 'train_' prefix. for key, value in", "Recursively traverses the experiment folders, cherry-picking subfolders containing: - (a) 'training_configuration.yaml' (training configuration", "k in header} # \"Fill\" all lists with empty gaps. list_filled_dicts = []", "# Add \"empty test entry\" list_test_dicts.append({}) # Return all dictionaries with lists return", "by filling the missing fields with spaces into one dict. :param list_dicts: List", "for experiment_test_path in experiments_tests: self.logger.info(' - Analyzing test from: {}'.format(experiment_test_path)) # Create test", "range(len(experiments_tests) - 1)]] list_train_dicts = [train_dict, *[train_dict_empty for _ in range(len(experiments_tests) - 1)]]", "checkpoint) and test experiments \\ (from test csv files found in subdirectories). :param", "an \"empty\" dict from the unified header. empty_dict = {k: ' ' for", "'\\n' self.logger.info(exp_str) # Ask for confirmation - optional. if self.flags.user_confirm: try: input('Press <Enter>", "are no valid experiment folders in {} directory!\".format(self.experiment_rootdir)) exit(-2) # List folders with", "file results_file = os.path.join(self.experiment_rootdir, \"{0:%Y%m%d_%H%M%S}_grid_analysis.csv\".format(datetime.now())) with open(results_file, \"w\") as outfile: writer = csv.writer(outfile,", "Loads checkpoint with model and training and validation statistics, - Recursively traverses subdirectories", "experiment_path: str :return: Four dictionaries containing: - Status info (model, problem etc.), -", "> 0: self.logger.info(' - Found {} test(s)'.format(len(experiments_tests))) # \"Expand\" status, train and valid", "prop. to the number of test folders. list_status_dicts = [status_dict, *[status_dict_empty for _", "class GridAnalyzer(GridWorker): \"\"\" Implementation of the :py:class:`miprometheus.grid_workers.GridAnalyzer`. Post-processes the test results of a", "with lists return list_status_dicts, list_train_dicts, list_valid_dicts, list_test_dicts @staticmethod def merge_list_dicts(list_dicts): \"\"\" Merges a", "Create \"empty\" equivalent. status_dict_empty = dict.fromkeys(status_dict.keys(), ' ') # Copy training status stats.", "the validation statistics from the checkpoint and add the 'valid_' prefix. for key,", "merge_list_dicts(list_dicts): \"\"\" Merges a list of dictionaries by filling the missing fields with", "point function for the :py:class:`miprometheus.grid_workers.GridAnalyzer`. \"\"\" grid_analyzer = GridAnalyzer() # parse args, load", "# Copyright (C) IBM Corporation 2018 # # Licensed under the Apache License,", "Get seeds. test_dict['test_seed_torch'] = test_params['testing']['seed_torch'] test_dict['test_seed_numpy'] = test_params['testing']['seed_numpy'] # Load csv file and", "[valid_dict] # Add \"empty test entry\" list_test_dicts.append({}) # Return all dictionaries with lists", "open(os.path.join(experiment_path, 'training_configuration.yaml'), 'r') as yaml_file: params = yaml.load(yaml_file) # Get problem and model", "2.0 (the \"License\"); # you may not use this file except in compliance", "the specific language governing permissions and # limitations under the License. \"\"\" grid_analyzer.py:", "in directory: {} \\n\".format(self.experiment_rootdir) exp_str += '='*80 + '\\n' for exp in self.experiments_list:", "file, - Loads checkpoint with model and training and validation statistics, - Recursively", "gathers the test results into one `.csv` file. \"\"\" __author__ = \"<NAME> &", "super(GridAnalyzer, self).__init__(name=name, use_gpu=False) @staticmethod def check_if_file_exists(dir_, filename_): \"\"\" Checks if ``filename_`` exists in", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the", "the directory, else False \"\"\" return os.path.isfile(os.path.join(dir_, filename_)) def check_file_content(self, dir_, filename_): \"\"\"", "file. :type dir_: str :param filename_: Name of the file to be opened", "- Loads checkpoint with model and training and validation statistics, - Recursively traverses", "test(s)'.format(len(experiments_tests))) # \"Expand\" status, train and valid dicts by empty ones, prop. to", "# # Unless required by applicable law or agreed to in writing, software", "= [train_dict] list_valid_dicts = [valid_dict] # Add \"empty test entry\" list_test_dicts.append({}) # Return", "# Open file. test_reader = csv.DictReader(f) # Copy training statistics. for row in", "express or implied. # See the License for the specific language governing permissions", "= dict.fromkeys(status_dict.keys(), ' ') # Copy training status stats. train_dict['training_configuration_filepath'] = os.path.join(experiment_path, 'training_configuration.yaml')", "Calls basic constructor of :py:class:`miprometheus.grid_workers.GridWorker` :param name: Name of the worker (DEFAULT: \"GridAnalyzer\").", "in self.experiments_list if self.check_if_file_exists(elem, 'training_configuration.yaml') and self.check_if_file_exists(elem, 'models/model_best.pt')] # Check if there are", "either express or implied. # See the License for the specific language governing", "None)) # Check if experiments directory was indicated. if self.flags.expdir == '': print('Please", "'testing_set_agg_statistics.csv')] # Check if the csv file contains at least one data point.", "str :return: Number of lines in the file. \"\"\" with open(filename_) as f:", "\"\"\" Collects the training / validation / test statistics for a given experiment", "[] for exp in self.experiments_list: statuses, trains, valids, tests = self.run_experiment(exp) list_statuses.extend(statuses) list_trains.extend(trains)", "Keep only the folders that contain a test configuration file and a csv", "'='*80 + '\\n' for exp in self.experiments_list: exp_str += \" - {}\\n\".format(exp) exp_str", "k in d) # Create an \"empty\" dict from the unified header. empty_dict", "# \"Fill\" all lists with empty gaps. list_filled_dicts = [] for i, _", "\"\"\" def __init__(self, name=\"GridAnalyzer\"): \"\"\" Constructor for the :py:class:`miprometheus.grid_workers.GridAnalyzer`: - Calls basic constructor", "*[valid_dict_empty for _ in range(len(experiments_tests) - 1)]] # Get tests statistics. for experiment_test_path", "the License. # You may obtain a copy of the License at #", "to be opened and line-read. :type filename_: str :return: Number of lines in", "header. header = set(k for d in list_dicts for k in d) #", "list_trains = [] list_valids = [] list_tests = [] for exp in self.experiments_list:", "+= '='*80 + '\\n' for exp in self.experiments_list: exp_str += \" - {}\\n\".format(exp)", "[elem for elem in experiments_tests if self.check_file_content(elem, 'testing_set_agg_statistics.csv')] return experiments_tests def setup_grid_experiment(self): \"\"\"", "\"\"\" Entry point function for the :py:class:`miprometheus.grid_workers.GridAnalyzer`. \"\"\" grid_analyzer = GridAnalyzer() # parse", "csv import yaml import torch import logging from datetime import datetime from miprometheus.grid_workers.grid_worker", "best saved model). \"\"\" # Parse arguments. self.flags, self.unparsed = self.parser.parse_known_args() # Set", "exists in the directory, else False \"\"\" return os.path.isfile(os.path.join(dir_, filename_)) def check_file_content(self, dir_,", "Merges a list of dictionaries by filling the missing fields with spaces into", "and line-read. :type filename_: str :return: Number of lines in the file. \"\"\"", "least one line with \\ collected statistics (excluding the header). - Collects statistics", ":type dir_: str :param filename_: Name of the file to be opened and", "to be opened and analyzed. :type filename_: str :return: True if the number", "get random seeds. with open(os.path.join(experiment_test_path, 'testing_configuration.yaml'), 'r') as yaml_file: test_params = yaml.load(yaml_file) #", "key, value in row.items(): test_dict['test_{}'.format(key)] = value list_test_dicts.append(test_dict) else: self.logger.info(' - Could not", "in ``filename_``. :param filename_: Filepath to be opened and line-read. :type filename_: str", "args, load configuration and create all required objects. grid_analyzer.setup_grid_experiment() # GO! grid_analyzer.run_grid_experiment() if", "in the directory, else False \"\"\" return os.path.isfile(os.path.join(dir_, filename_)) def check_file_content(self, dir_, filename_):", "# # Licensed under the Apache License, Version 2.0 (the \"License\"); # you", ":return: Four dictionaries containing: - Status info (model, problem etc.), - Training statistics,", "import logging from datetime import datetime from miprometheus.grid_workers.grid_worker import GridWorker class GridAnalyzer(GridWorker): \"\"\"", "# training.csv and model (which contains aggregated validation statistics). self.experiments_list = [elem for", "list_valid_dicts = [valid_dict, *[valid_dict_empty for _ in range(len(experiments_tests) - 1)]] # Get tests", "point. experiments_tests = [elem for elem in experiments_tests if self.check_file_content(elem, 'testing_set_agg_statistics.csv')] return experiments_tests", "in ``self.experiments_lists``. Merges all them together and saves result to a single csv", "list_tests.extend(tests) # Merge lists. statuses = self.merge_list_dicts(list_statuses) trains = self.merge_list_dicts(list_trains) valids = self.merge_list_dicts(list_valids)", "with open(results_file, \"w\") as outfile: writer = csv.writer(outfile, delimiter=',') writer.writerow(exp_values.keys()) writer.writerows(zip(*exp_values.values())) self.logger.info('Analysis finished')", "{} test(s)'.format(len(experiments_tests))) # \"Expand\" status, train and valid dicts by empty ones, prop.", "miprometheus.grid_workers.grid_worker import GridWorker class GridAnalyzer(GridWorker): \"\"\" Implementation of the :py:class:`miprometheus.grid_workers.GridAnalyzer`. Post-processes the test", "the overall experiment: - Parses arguments and sets logger level, - Checks the", "file. chkpt = torch.load(os.path.join(experiment_path, 'models/model_best.pt'), map_location=lambda storage, loc: storage) status_dict['model_save_timestamp'] = '{0:%Y%m%d_%H%M%S}'.format(chkpt['model_timestamp']) status_dict['training_terminal_status']", "exit(-2) # List folders with \"valid\" experiment data. exp_str = \"Found the following", "folders that contain a test configuration file and a csv statistics file. experiments_tests", "\"\"\" return os.path.isfile(os.path.join(dir_, filename_)) def check_file_content(self, dir_, filename_): \"\"\" Checks if the number", "find any valid tests') list_status_dicts = [status_dict] list_train_dicts = [train_dict] list_valid_dicts = [valid_dict]", "experiments \\ (from test csv files found in subdirectories). :param experiment_path: Path to", "# Create \"empty\" equivalent. status_dict_empty = dict.fromkeys(status_dict.keys(), ' ') # Copy training status", "= '{0:%Y%m%d_%H%M%S}'.format(chkpt['status_timestamp']) # Create \"empty\" equivalent. status_dict_empty = dict.fromkeys(status_dict.keys(), ' ') # Copy", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "range(len(experiments_tests) - 1)]] # Get tests statistics. for experiment_test_path in experiments_tests: self.logger.info(' -", "file is strictly greater than one. \"\"\" return self.get_lines_number(os.path.join(dir_, filename_)) > 1 @staticmethod", "self.merge_list_dicts(list_trains) valids = self.merge_list_dicts(list_valids) tests = self.merge_list_dicts(list_tests) # Merge everything into one big", "each experiment path contained in ``self.experiments_lists``. Merges all them together and saves result", "\"\"\" Collects four list of dicts from each experiment path contained in ``self.experiments_lists``.", "random seeds. with open(os.path.join(experiment_path, 'training_configuration.yaml'), 'r') as yaml_file: params = yaml.load(yaml_file) # Get", "data point. experiments_tests = [elem for elem in experiments_tests if self.check_file_content(elem, 'testing_set_agg_statistics.csv')] return", "constructor super(GridAnalyzer, self).__init__(name=name, use_gpu=False) @staticmethod def check_if_file_exists(dir_, filename_): \"\"\" Checks if ``filename_`` exists", "file and a csv statistics file. experiments_tests = [elem for elem in experiments_tests", "valid dicts by empty ones, prop. to the number of test folders. list_status_dicts", "experiments, .. note:: We require that the test statistics csv files are valid,", "the following valid experiments in directory: {} \\n\".format(self.experiment_rootdir) exp_str += '='*80 + '\\n'", "{}'.format(experiment_test_path)) # Create test dict: test_dict = dict() test_dict['test_configuration_filepath'] = os.path.join(experiment_test_path, 'testing_set_agg_statistics.yaml') test_dict['test_start_timestamp']", "test_params['testing']['seed_numpy'] # Load csv file and copy test statistics with open(os.path.join(experiment_test_path, 'testing_set_agg_statistics.csv'), mode='r')", "model (which contains aggregated validation statistics). self.experiments_list = [elem for elem in self.experiments_list", "csv file contains at least one data point. experiments_tests = [elem for elem", "overall experiment: - Parses arguments and sets logger level, - Checks the presence", "# call base constructor super(GridAnalyzer, self).__init__(name=name, use_gpu=False) @staticmethod def check_if_file_exists(dir_, filename_): \"\"\" Checks", "accuracies, terminal conditions,...), \\ the validation statistics and the test statistics. Inherits from", "__author__ = \"<NAME> & <NAME>\" import os import csv import yaml import torch", "writer.writerows(zip(*exp_values.values())) self.logger.info('Analysis finished') self.logger.info('Results stored in {}.'.format(results_file)) except KeyboardInterrupt: self.logger.info('Grid analysis interrupted!') def", "\"empty\" equivalent. status_dict_empty = dict.fromkeys(status_dict.keys(), ' ') # Copy training status stats. train_dict['training_configuration_filepath']", ":py:class:`miprometheus.grid_workers.GridWorker`. \"\"\" def __init__(self, name=\"GridAnalyzer\"): \"\"\" Constructor for the :py:class:`miprometheus.grid_workers.GridAnalyzer`: - Calls basic", "Create test dict: test_dict = dict() test_dict['test_configuration_filepath'] = os.path.join(experiment_test_path, 'testing_set_agg_statistics.yaml') test_dict['test_start_timestamp'] = os.path.basename(os.path.normpath(experiment_test_path))[5:]", "in test_reader: for key, value in row.items(): test_dict['test_{}'.format(key)] = value list_test_dicts.append(test_dict) else: self.logger.info('", "model names - from config. status_dict['problem'] = params['testing']['problem']['name'] status_dict['model'] = params['model']['name'] # Load", "if self.check_if_file_exists(elem, 'testing_configuration.yaml') and self.check_if_file_exists(elem, 'testing_set_agg_statistics.csv')] # Check if the csv file contains", "except in compliance with the License. # You may obtain a copy of", "= dict() # Load yaml file, to get model name, problem name and", "Constructor for the :py:class:`miprometheus.grid_workers.GridAnalyzer`: - Calls basic constructor of :py:class:`miprometheus.grid_workers.GridWorker` :param name: Name", "- Status info (model, problem etc.), - Training statistics, - Validation statistics, -", "be opened and analysed. :type filename_: str :return: True if the file exists", "statistics csv files are valid, i.e. contain at least one line with \\", "\"Found the following valid experiments in directory: {} \\n\".format(self.experiment_rootdir) exp_str += '='*80 +", "# Merge lists. statuses = self.merge_list_dicts(list_statuses) trains = self.merge_list_dicts(list_trains) valids = self.merge_list_dicts(list_valids) tests", "the grid analyzis\\n') except KeyboardInterrupt: exit(0) def run_experiment(self, experiment_path: str): \"\"\" Collects the", "may not use this file except in compliance with the License. # You", "License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "value in chkpt['training_stats'].items(): train_dict['training_{}'.format(key)] = value # Create \"empty\" equivalent. train_dict_empty = dict.fromkeys(train_dict.keys(),", "in header} # \"Fill\" all lists with empty gaps. list_filled_dicts = [] for", "contain a test configuration file and a csv statistics file. experiments_tests = [elem", "KeyboardInterrupt: self.logger.info('Grid analysis interrupted!') def main(): \"\"\" Entry point function for the :py:class:`miprometheus.grid_workers.GridAnalyzer`.", "- Analyzing test from: {}'.format(experiment_test_path)) # Create test dict: test_dict = dict() test_dict['test_configuration_filepath']", "return list_status_dicts, list_train_dicts, list_valid_dicts, list_test_dicts @staticmethod def merge_list_dicts(list_dicts): \"\"\" Merges a list of", "list_test_dicts @staticmethod def merge_list_dicts(list_dicts): \"\"\" Merges a list of dictionaries by filling the", "def check_if_file_exists(dir_, filename_): \"\"\" Checks if ``filename_`` exists in ``dir_``. :param dir_: Path", "self.check_if_file_exists(elem, 'testing_configuration.yaml') and self.check_if_file_exists(elem, 'testing_set_agg_statistics.csv')] # Check if the csv file contains at", "Copy training status stats. train_dict['training_configuration_filepath'] = os.path.join(experiment_path, 'training_configuration.yaml') train_dict['training_start_timestamp'] = os.path.basename(os.path.normpath(experiment_path)) train_dict['training_seed_torch'] =", "for test experiments, .. note:: We require that the test statistics csv files", "= [valid_dict, *[valid_dict_empty for _ in range(len(experiments_tests) - 1)]] # Get tests statistics.", "list_status_dicts = [status_dict] list_train_dicts = [train_dict] list_valid_dicts = [valid_dict] # Add \"empty test", "root, dirs, _ in os.walk(experiment_path_, topdown=True): for name in dirs: experiments_tests.append(os.path.join(root, name)) #", "configuration file), - (b) 'models/model_best.pt' (checkpoint of the best saved model). \"\"\" #", "tests statistics. for experiment_test_path in experiments_tests: self.logger.info(' - Analyzing test from: {}'.format(experiment_test_path)) #", ":type name: str \"\"\" # call base constructor super(GridAnalyzer, self).__init__(name=name, use_gpu=False) @staticmethod def", "# Load csv file and copy test statistics with open(os.path.join(experiment_test_path, 'testing_set_agg_statistics.csv'), mode='r') as", "containing different headers, which will be merged. :type list_dicts: list :return: dict, resulting", "is > 1. :param dir_: Path to file. :type dir_: str :param filename_:", "[train_dict, *[train_dict_empty for _ in range(len(experiments_tests) - 1)]] list_valid_dicts = [valid_dict, *[valid_dict_empty for", "str): \"\"\" Collects the training / validation / test statistics for a given", "trains = self.merge_list_dicts(list_trains) valids = self.merge_list_dicts(list_valids) tests = self.merge_list_dicts(list_tests) # Merge everything into", "csv files found in subdirectories). :param experiment_path: Path to an experiment folder containing", "Load csv file and copy test statistics with open(os.path.join(experiment_test_path, 'testing_set_agg_statistics.csv'), mode='r') as f:", "of lines in ``filename_``. :param filename_: Filepath to be opened and line-read. :type", "value # Create \"empty\" equivalent. train_dict_empty = dict.fromkeys(train_dict.keys(), ' ') # Copy the", "Merge lists. statuses = self.merge_list_dicts(list_statuses) trains = self.merge_list_dicts(list_trains) valids = self.merge_list_dicts(list_valids) tests =", "_ in range(len(experiments_tests) - 1)]] list_valid_dicts = [valid_dict, *[valid_dict_empty for _ in range(len(experiments_tests)" ]
[ "img_info in enumerate(self.img_infos): if self.img_ids[i] not in ids_with_ann: continue if min(img_info['width'], img_info['height']) >=", "flip) if data is None: ori_shape = (img_info['height'], img_info['width'], 3) img_meta = dict(", "not in ids_with_ann: continue if min(img_info['width'], img_info['height']) >= min_size: valid_inds.append(i) return valid_inds def", "self.bbox_transform(gt_bboxes_ignore, img_shape, scale_factor, flip) if self.with_mask: gt_masks = self.mask_transform(gt_masks, pad_shape, scale_factor, flip) if", "4), dtype=np.float32) cur_slice_labels = np.array([], dtype=np.int64) if cur_slice_bboxes_ignore: cur_slice_bboxes_ignore = np.array(cur_slice_bboxes_ignore, dtype=np.float32) else:", "'png')), flag='unchanged') gt_seg = self.seg_transform(gt_seg.squeeze(), img_scale, flip) gt_seg = mmcv.imrescale( gt_seg, self.seg_scale_factor, interpolation='nearest')", "load proposals if necessary if self.proposals is not None: proposals = self.proposals[idx][:self.num_max_proposals] #", "= [] # Two formats are provided. # 1. mask: a binary map", "import to_tensor, random_scale from mmcv.parallel import DataContainer as DC import mmcv from .custom", "len(proposals) == 0: return None if not (proposals.shape[1] == 4 or proposals.shape[1] ==", "CLASSES = ('microbleed', 'full_bounding_box') def load_annotations(self, ann_file): self.coco = COCO(ann_file) self.cat_ids = self.coco.getCatIds()", "have >= 3 points (6 coordinates) poly_lens = [len(p) for p in mask_polys]", "= self.bbox_transform(proposals, img_shape, scale_factor, flip) proposals = np.hstack( [proposals, scores]) if scores is", "cur_slice_bboxes_ignore = np.zeros((0, 4), dtype=np.float32) gt_bboxes.append(cur_slice_bboxes) gt_labels.append(cur_slice_labels) gt_bboxes_ignore.append(cur_slice_bboxes_ignore) gt_masks.append(cur_masks) gt_mask_polys.append(cur_mask_polys) gt_poly_lens.append(cur_poly_lens) ann =", "self.get_ann_info(idx) gt_bboxes_list = ann['bboxes'] gt_labels_list = ann['labels'] # if self.with_crowd: gt_bboxes_ignore_list = ann['bboxes_ignore']", "cat_id: i + 1 for i, cat_id in enumerate(self.cat_ids) } self.img_ids = self.coco.getImgIds()", "= [] cur_masks = [] cur_mask_polys = [] cur_poly_lens = [] for i,", "= self.bbox_transform(gt_bboxes, img_shape, scale_factor, flip) if self.with_crowd: gt_bboxes_ignore = self.bbox_transform(gt_bboxes_ignore, img_shape, scale_factor, flip)", "as DC import mmcv from .custom import CustomDataset class CocoDatasetRGB2(CustomDataset): CLASSES = ('microbleed',", "_filter_imgs(self, min_size=32): \"\"\"Filter images too small or without ground truths.\"\"\" valid_inds = []", "= ann['masks'] # apply transforms flip = True if np.random.rand() < self.flip_ratio else", "== 0: return None # extra augmentation if self.extra_aug is not None: img,", "ids_with_ann: continue if min(img_info['width'], img_info['height']) >= min_size: valid_inds.append(i) return valid_inds def _parse_ann_info(self, ann_info,", "osp.join(self.seg_prefix, img_info['file_name'].replace( 'jpg', 'png')), flag='unchanged') gt_seg = self.seg_transform(gt_seg.squeeze(), img_scale, flip) gt_seg = mmcv.imrescale(", ">= 3 points (6 coordinates) poly_lens = [len(p) for p in mask_polys] cur_mask_polys.append(mask_polys)", "= gt_masks # poly format is not used in the current implementation ann['mask_polys']", "return valid_inds def _parse_ann_info(self, ann_info, with_mask=True): \"\"\"Parse bbox and mask annotation. Args: ann_info", ":4] else: scores = None ann = self.get_ann_info(idx) gt_bboxes_list = ann['bboxes'] gt_labels_list =", "key in slices_ann_info: cur_ann_info = slices_ann_info[key] cur_slice_bboxes = [] cur_slice_labels = [] cur_slice_bboxes_ignore", "= mmcv.imread(osp.join(self.img_prefix, img_info['filename'])) # load proposals if necessary if self.proposals is not None:", "p in ann['segmentation'] if len(p) >= 6 ] # valid polygons have >=", "y1, x1 + w - 1, y1 + h - 1] if ann['iscrowd']:", "img_id = self.img_infos[idx]['id'] ann_ids = self.coco.getAnnIds(imgIds=[img_id]) ann_info = self.coco.loadAnns(ann_ids) return self._parse_ann_info(ann_info, self.with_mask) def", "= self.coco.getCatIds() self.cat2label = { cat_id: i + 1 for i, cat_id in", "consists of one or several polys, each poly is a # list of", "p for p in ann['segmentation'] if len(p) >= 6 ] # valid polygons", "one or several polys, each poly is a # list of float. if", "a scale img_scale = random_scale(self.img_scales, self.multiscale_mode) img, img_shape, pad_shape, scale_factor = self.img_transform( img,", "if ann['iscrowd']: cur_slice_bboxes_ignore.append(bbox) else: cur_slice_bboxes.append(bbox) cur_slice_labels.append(self.cat2label[ann['category_id']]) if with_mask: cur_masks.append(self.coco.annToMask(ann)) mask_polys = [ p", "flip=flip, image_id=img_info['id']) data = dict( img=DC(to_tensor(img), stack=True), img_meta=DC(img_meta, cpu_only=True)) self.insert_to_dict(data, 'gt_bboxes', DC(to_tensor(gt_bboxes))) if", "= proposals[:, :4] else: scores = None ann = self.get_ann_info(idx) gt_bboxes_list = ann['bboxes']", "6 ] # valid polygons have >= 3 points (6 coordinates) poly_lens =", "can be used for # training in concept. if len(proposals) == 0: return", "Annotation info of an image. with_mask (bool): Whether to parse mask annotations. Returns:", "info['slice_label'] == 'r': slices_ann_info['r'].append(info) elif info['slice_label'] == 'g': slices_ann_info['g'].append(info) elif info['slice_label'] == 'b':", "w - 1, y1 + h - 1] if ann['iscrowd']: cur_slice_bboxes_ignore.append(bbox) else: cur_slice_bboxes.append(bbox)", "but they can be used for # training in concept. if len(proposals) ==", "else: cur_slice_bboxes = np.zeros((0, 4), dtype=np.float32) cur_slice_labels = np.array([], dtype=np.int64) if cur_slice_bboxes_ignore: cur_slice_bboxes_ignore", "cpu_only=True)) self.insert_to_dict(data, 'gt_bboxes', DC(to_tensor(gt_bboxes))) if self.proposals is not None: self.insert_to_dict(data, 'proposals', DC(to_tensor(proposals))) if", "for gt_bboxes, gt_labels, gt_bboxes_ignore, gt_masks in zip(gt_bboxes_list, gt_labels_list, gt_bboxes_ignore_list, gt_masks_list): # skip the", "None: self.insert_to_dict(data, 'proposals', DC(to_tensor(proposals))) if self.with_label: self.insert_to_dict(data, 'gt_labels', DC(to_tensor(gt_labels))) if self.with_crowd: self.insert_to_dict(data, 'gt_bboxes_ignore',", "info = self.coco.loadImgs([i])[0] info['filename'] = info['file_name'] img_infos.append(info) return img_infos def get_ann_info(self, idx): img_id", "= dict( ori_shape=ori_shape, img_shape=img_shape, pad_shape=pad_shape, scale_factor=scale_factor, flip=flip, image_id=img_info['id']) data = dict( img=DC(to_tensor(img), stack=True),", "= np.array(cur_slice_labels, dtype=np.int64) else: cur_slice_bboxes = np.zeros((0, 4), dtype=np.float32) cur_slice_labels = np.array([], dtype=np.int64)", "= [] gt_mask_polys = [] gt_poly_lens = [] for key in slices_ann_info: cur_ann_info", "for info in ann_info: if info['slice_label'] == 'r': slices_ann_info['r'].append(info) elif info['slice_label'] == 'g':", "img_infos = [] for i in self.img_ids: info = self.coco.loadImgs([i])[0] info['filename'] = info['file_name']", "flip = True if np.random.rand() < self.flip_ratio else False data = None for", "i, img_info in enumerate(self.img_infos): if self.img_ids[i] not in ids_with_ann: continue if min(img_info['width'], img_info['height'])", "mask_polys = [ p for p in ann['segmentation'] if len(p) >= 6 ]", "= [] for i, ann in enumerate(cur_ann_info): if ann.get('ignore', False): continue x1, y1,", "img, img_scale, flip, keep_ratio=self.resize_keep_ratio) img = img.copy() if self.with_seg: gt_seg = mmcv.imread( osp.join(self.seg_prefix,", "of the same size of the image. # 2. polys: each mask consists", "self.coco.getImgIds() img_infos = [] for i in self.img_ids: info = self.coco.loadImgs([i])[0] info['filename'] =", "from pycocotools_local.coco import * import os.path as osp from .utils import to_tensor, random_scale", "data: data[key].append(tensors) else: data[key] = [tensors] def prepare_train_img(self, idx): img_info = self.img_infos[idx] #", "proposals are just ignored, but they can be used for # training in", "self.coco.loadAnns(ann_ids) return self._parse_ann_info(ann_info, self.with_mask) def _filter_imgs(self, min_size=32): \"\"\"Filter images too small or without", "else: cur_slice_bboxes_ignore = np.zeros((0, 4), dtype=np.float32) gt_bboxes.append(cur_slice_bboxes) gt_labels.append(cur_slice_labels) gt_bboxes_ignore.append(cur_slice_bboxes_ignore) gt_masks.append(cur_masks) gt_mask_polys.append(cur_mask_polys) gt_poly_lens.append(cur_poly_lens) ann", "or without ground truths.\"\"\" valid_inds = [] ids_with_ann = set(_['image_id'] for _ in", "== 'r': slices_ann_info['r'].append(info) elif info['slice_label'] == 'g': slices_ann_info['g'].append(info) elif info['slice_label'] == 'b': slices_ann_info['b'].append(info)", "= [] cur_mask_polys = [] cur_poly_lens = [] for i, ann in enumerate(cur_ann_info):", "# load image orig_img = mmcv.imread(osp.join(self.img_prefix, img_info['filename'])) # load proposals if necessary if", "= self.coco.getImgIds() img_infos = [] for i in self.img_ids: info = self.coco.loadImgs([i])[0] info['filename']", "p in mask_polys] cur_mask_polys.append(mask_polys) cur_poly_lens.extend(poly_lens) if cur_slice_bboxes: cur_slice_bboxes = np.array(cur_slice_bboxes, dtype=np.float32) cur_slice_labels =", "h - 1] if ann['iscrowd']: cur_slice_bboxes_ignore.append(bbox) else: cur_slice_bboxes.append(bbox) cur_slice_labels.append(self.cat2label[ann['category_id']]) if with_mask: cur_masks.append(self.coco.annToMask(ann)) mask_polys", "is not None: proposals = self.bbox_transform(proposals, img_shape, scale_factor, flip) proposals = np.hstack( [proposals,", "formats are provided. # 1. mask: a binary map of the same size", "if min(img_info['width'], img_info['height']) >= min_size: valid_inds.append(i) return valid_inds def _parse_ann_info(self, ann_info, with_mask=True): \"\"\"Parse", "[] ids_with_ann = set(_['image_id'] for _ in self.coco.anns.values()) for i, img_info in enumerate(self.img_infos):", "keys: bboxes, bboxes_ignore, labels, masks, mask_polys, poly_lens. \"\"\" slices_ann_info = {'r': [], 'g':", "is not None else proposals gt_bboxes = self.bbox_transform(gt_bboxes, img_shape, scale_factor, flip) if self.with_crowd:", "return ann def insert_to_dict(self, data, key, tensors): if key in data: data[key].append(tensors) else:", "class CocoDatasetRGB2(CustomDataset): CLASSES = ('microbleed', 'full_bounding_box') def load_annotations(self, ann_file): self.coco = COCO(ann_file) self.cat_ids", "ids_with_ann = set(_['image_id'] for _ in self.coco.anns.values()) for i, img_info in enumerate(self.img_infos): if", "[] gt_mask_polys = [] gt_poly_lens = [] for key in slices_ann_info: cur_ann_info =", "= [x1, y1, x1 + w - 1, y1 + h - 1]", "ori_shape=ori_shape, img_shape=img_shape, pad_shape=pad_shape, scale_factor=scale_factor, flip=flip, image_id=img_info['id']) data = dict( img=DC(to_tensor(img), stack=True), img_meta=DC(img_meta, cpu_only=True))", "list of float. if with_mask: gt_masks = [] gt_mask_polys = [] gt_poly_lens =", "scale_factor, flip) if data is None: ori_shape = (img_info['height'], img_info['width'], 3) img_meta =", "img_meta = dict( ori_shape=ori_shape, img_shape=img_shape, pad_shape=pad_shape, scale_factor=scale_factor, flip=flip, image_id=img_info['id']) data = dict( img=DC(to_tensor(img),", "[] cur_slice_bboxes_ignore = [] cur_masks = [] cur_mask_polys = [] cur_poly_lens = []", "zip(gt_bboxes_list, gt_labels_list, gt_bboxes_ignore_list, gt_masks_list): # skip the image if there is no valid", "return None # extra augmentation if self.extra_aug is not None: img, gt_bboxes, gt_labels", "[proposals, scores]) if scores is not None else proposals gt_bboxes = self.bbox_transform(gt_bboxes, img_shape,", "4, None] proposals = proposals[:, :4] else: scores = None ann = self.get_ann_info(idx)", "info['filename'] = info['file_name'] img_infos.append(info) return img_infos def get_ann_info(self, idx): img_id = self.img_infos[idx]['id'] ann_ids", "[tensors] def prepare_train_img(self, idx): img_info = self.img_infos[idx] # load image orig_img = mmcv.imread(osp.join(self.img_prefix,", "ann['area'] <= 0 or w < 1 or h < 1: continue bbox", "h = ann['bbox'] if ann['area'] <= 0 or w < 1 or h", "= self.img_transform( img, img_scale, flip, keep_ratio=self.resize_keep_ratio) img = img.copy() if self.with_seg: gt_seg =", "= [ p for p in ann['segmentation'] if len(p) >= 6 ] #", "np from pycocotools_local.coco import * import os.path as osp from .utils import to_tensor,", "[] # Two formats are provided. # 1. mask: a binary map of", "scores is not None else proposals gt_bboxes = self.bbox_transform(gt_bboxes, img_shape, scale_factor, flip) if", "gt_poly_lens = [] for key in slices_ann_info: cur_ann_info = slices_ann_info[key] cur_slice_bboxes = []", "# 1. mask: a binary map of the same size of the image.", "np.array([], dtype=np.int64) if cur_slice_bboxes_ignore: cur_slice_bboxes_ignore = np.array(cur_slice_bboxes_ignore, dtype=np.float32) else: cur_slice_bboxes_ignore = np.zeros((0, 4),", "import numpy as np from pycocotools_local.coco import * import os.path as osp from", "return self._parse_ann_info(ann_info, self.with_mask) def _filter_imgs(self, min_size=32): \"\"\"Filter images too small or without ground", "continue bbox = [x1, y1, x1 + w - 1, y1 + h", "= None for gt_bboxes, gt_labels, gt_bboxes_ignore, gt_masks in zip(gt_bboxes_list, gt_labels_list, gt_bboxes_ignore_list, gt_masks_list): #", "data[key].append(tensors) else: data[key] = [tensors] def prepare_train_img(self, idx): img_info = self.img_infos[idx] # load", "if ann.get('ignore', False): continue x1, y1, w, h = ann['bbox'] if ann['area'] <=", "proposals gt_bboxes = self.bbox_transform(gt_bboxes, img_shape, scale_factor, flip) if self.with_crowd: gt_bboxes_ignore = self.bbox_transform(gt_bboxes_ignore, img_shape,", "parse mask annotations. Returns: dict: A dict containing the following keys: bboxes, bboxes_ignore,", "proposals properly. Currently images with # no proposals are just ignored, but they", "proposals.shape[1] == 5: scores = proposals[:, 4, None] proposals = proposals[:, :4] else:", "mask annotation. Args: ann_info (list[dict]): Annotation info of an image. with_mask (bool): Whether", "gt_masks = [] gt_mask_polys = [] gt_poly_lens = [] for key in slices_ann_info:", "self.with_crowd: gt_bboxes_ignore_list = ann['bboxes_ignore'] gt_masks_list = ann['masks'] # apply transforms flip = True", "gt_labels_list, gt_bboxes_ignore_list, gt_masks_list): # skip the image if there is no valid gt", "self.extra_aug is not None: img, gt_bboxes, gt_labels = self.extra_aug(orig_img, gt_bboxes, gt_labels) else: img", "os.path as osp from .utils import to_tensor, random_scale from mmcv.parallel import DataContainer as", "= gt_poly_lens return ann def insert_to_dict(self, data, key, tensors): if key in data:", "# 2. polys: each mask consists of one or several polys, each poly", "osp from .utils import to_tensor, random_scale from mmcv.parallel import DataContainer as DC import", "def prepare_train_img(self, idx): img_info = self.img_infos[idx] # load image orig_img = mmcv.imread(osp.join(self.img_prefix, img_info['filename']))", "' 'but found {}'.format(proposals.shape)) if proposals.shape[1] == 5: scores = proposals[:, 4, None]", "no valid gt bbox if len(gt_bboxes) == 0: return None # extra augmentation", "i in self.img_ids: info = self.coco.loadImgs([i])[0] info['filename'] = info['file_name'] img_infos.append(info) return img_infos def", "[], 'g': [], 'b': []} for info in ann_info: if info['slice_label'] == 'r':", "ann_file): self.coco = COCO(ann_file) self.cat_ids = self.coco.getCatIds() self.cat2label = { cat_id: i +", "continue if min(img_info['width'], img_info['height']) >= min_size: valid_inds.append(i) return valid_inds def _parse_ann_info(self, ann_info, with_mask=True):", "valid_inds = [] ids_with_ann = set(_['image_id'] for _ in self.coco.anns.values()) for i, img_info", "in ann['segmentation'] if len(p) >= 6 ] # valid polygons have >= 3", "# valid polygons have >= 3 points (6 coordinates) poly_lens = [len(p) for", "# load proposals if necessary if self.proposals is not None: proposals = self.proposals[idx][:self.num_max_proposals]", "None] proposals = proposals[:, :4] else: scores = None ann = self.get_ann_info(idx) gt_bboxes_list", "+ w - 1, y1 + h - 1] if ann['iscrowd']: cur_slice_bboxes_ignore.append(bbox) else:", "to_tensor, random_scale from mmcv.parallel import DataContainer as DC import mmcv from .custom import", "img = img.copy() if self.with_seg: gt_seg = mmcv.imread( osp.join(self.seg_prefix, img_info['file_name'].replace( 'jpg', 'png')), flag='unchanged')", "keep_ratio=self.resize_keep_ratio) img = img.copy() if self.with_seg: gt_seg = mmcv.imread( osp.join(self.seg_prefix, img_info['file_name'].replace( 'jpg', 'png')),", "self.with_mask: gt_masks = self.mask_transform(gt_masks, pad_shape, scale_factor, flip) if data is None: ori_shape =", "in enumerate(self.img_infos): if self.img_ids[i] not in ids_with_ann: continue if min(img_info['width'], img_info['height']) >= min_size:", "orig_img = mmcv.imread(osp.join(self.img_prefix, img_info['filename'])) # load proposals if necessary if self.proposals is not", "def get_ann_info(self, idx): img_id = self.img_infos[idx]['id'] ann_ids = self.coco.getAnnIds(imgIds=[img_id]) ann_info = self.coco.loadAnns(ann_ids) return", "ann['masks'] # apply transforms flip = True if np.random.rand() < self.flip_ratio else False", "(list[dict]): Annotation info of an image. with_mask (bool): Whether to parse mask annotations.", "== 0: return None if not (proposals.shape[1] == 4 or proposals.shape[1] == 5):", "= [] gt_labels = [] gt_bboxes_ignore = [] # Two formats are provided.", "flag='unchanged') gt_seg = self.seg_transform(gt_seg.squeeze(), img_scale, flip) gt_seg = mmcv.imrescale( gt_seg, self.seg_scale_factor, interpolation='nearest') gt_seg", "1. mask: a binary map of the same size of the image. #", "= ('microbleed', 'full_bounding_box') def load_annotations(self, ann_file): self.coco = COCO(ann_file) self.cat_ids = self.coco.getCatIds() self.cat2label", "to parse mask annotations. Returns: dict: A dict containing the following keys: bboxes,", "np.array(cur_slice_bboxes, dtype=np.float32) cur_slice_labels = np.array(cur_slice_labels, dtype=np.int64) else: cur_slice_bboxes = np.zeros((0, 4), dtype=np.float32) cur_slice_labels", "5): raise AssertionError( 'proposals should have shapes (n, 4) or (n, 5), '", "0: return None # extra augmentation if self.extra_aug is not None: img, gt_bboxes,", "cur_slice_bboxes_ignore = np.array(cur_slice_bboxes_ignore, dtype=np.float32) else: cur_slice_bboxes_ignore = np.zeros((0, 4), dtype=np.float32) gt_bboxes.append(cur_slice_bboxes) gt_labels.append(cur_slice_labels) gt_bboxes_ignore.append(cur_slice_bboxes_ignore)", "flip) gt_seg = mmcv.imrescale( gt_seg, self.seg_scale_factor, interpolation='nearest') gt_seg = gt_seg[None, ...] if self.proposals", "flip) proposals = np.hstack( [proposals, scores]) if scores is not None else proposals", "binary map of the same size of the image. # 2. polys: each", "('microbleed', 'full_bounding_box') def load_annotations(self, ann_file): self.coco = COCO(ann_file) self.cat_ids = self.coco.getCatIds() self.cat2label =", "cur_slice_labels.append(self.cat2label[ann['category_id']]) if with_mask: cur_masks.append(self.coco.annToMask(ann)) mask_polys = [ p for p in ann['segmentation'] if", "dtype=np.float32) cur_slice_labels = np.array(cur_slice_labels, dtype=np.int64) else: cur_slice_bboxes = np.zeros((0, 4), dtype=np.float32) cur_slice_labels =", "valid_inds def _parse_ann_info(self, ann_info, with_mask=True): \"\"\"Parse bbox and mask annotation. Args: ann_info (list[dict]):", "if info['slice_label'] == 'r': slices_ann_info['r'].append(info) elif info['slice_label'] == 'g': slices_ann_info['g'].append(info) elif info['slice_label'] ==", "images with # no proposals are just ignored, but they can be used", "image orig_img = mmcv.imread(osp.join(self.img_prefix, img_info['filename'])) # load proposals if necessary if self.proposals is", "apply transforms flip = True if np.random.rand() < self.flip_ratio else False data =", "mmcv.imread(osp.join(self.img_prefix, img_info['filename'])) # load proposals if necessary if self.proposals is not None: proposals", "if ann['area'] <= 0 or w < 1 or h < 1: continue", "self.insert_to_dict(data, 'gt_bboxes', DC(to_tensor(gt_bboxes))) if self.proposals is not None: self.insert_to_dict(data, 'proposals', DC(to_tensor(proposals))) if self.with_label:", "gt_bboxes_ignore = self.bbox_transform(gt_bboxes_ignore, img_shape, scale_factor, flip) if self.with_mask: gt_masks = self.mask_transform(gt_masks, pad_shape, scale_factor,", "y1, w, h = ann['bbox'] if ann['area'] <= 0 or w < 1", "= [] for i in self.img_ids: info = self.coco.loadImgs([i])[0] info['filename'] = info['file_name'] img_infos.append(info)", "ann_info: if info['slice_label'] == 'r': slices_ann_info['r'].append(info) elif info['slice_label'] == 'g': slices_ann_info['g'].append(info) elif info['slice_label']", "ann['mask_polys'] = gt_mask_polys ann['poly_lens'] = gt_poly_lens return ann def insert_to_dict(self, data, key, tensors):", "concept. if len(proposals) == 0: return None if not (proposals.shape[1] == 4 or", "shapes (n, 4) or (n, 5), ' 'but found {}'.format(proposals.shape)) if proposals.shape[1] ==", "* import os.path as osp from .utils import to_tensor, random_scale from mmcv.parallel import", "ann_ids = self.coco.getAnnIds(imgIds=[img_id]) ann_info = self.coco.loadAnns(ann_ids) return self._parse_ann_info(ann_info, self.with_mask) def _filter_imgs(self, min_size=32): \"\"\"Filter", "proposals if necessary if self.proposals is not None: proposals = self.proposals[idx][:self.num_max_proposals] # TODO:", "(n, 4) or (n, 5), ' 'but found {}'.format(proposals.shape)) if proposals.shape[1] == 5:", "{}'.format(proposals.shape)) if proposals.shape[1] == 5: scores = proposals[:, 4, None] proposals = proposals[:,", "= np.zeros((0, 4), dtype=np.float32) cur_slice_labels = np.array([], dtype=np.int64) if cur_slice_bboxes_ignore: cur_slice_bboxes_ignore = np.array(cur_slice_bboxes_ignore,", "= np.hstack( [proposals, scores]) if scores is not None else proposals gt_bboxes =", "'jpg', 'png')), flag='unchanged') gt_seg = self.seg_transform(gt_seg.squeeze(), img_scale, flip) gt_seg = mmcv.imrescale( gt_seg, self.seg_scale_factor,", "= [] for key in slices_ann_info: cur_ann_info = slices_ann_info[key] cur_slice_bboxes = [] cur_slice_labels", "= COCO(ann_file) self.cat_ids = self.coco.getCatIds() self.cat2label = { cat_id: i + 1 for", "with_mask=True): \"\"\"Parse bbox and mask annotation. Args: ann_info (list[dict]): Annotation info of an", "and mask annotation. Args: ann_info (list[dict]): Annotation info of an image. with_mask (bool):", "random_scale(self.img_scales, self.multiscale_mode) img, img_shape, pad_shape, scale_factor = self.img_transform( img, img_scale, flip, keep_ratio=self.resize_keep_ratio) img", "gt_bboxes, gt_labels, gt_bboxes_ignore, gt_masks in zip(gt_bboxes_list, gt_labels_list, gt_bboxes_ignore_list, gt_masks_list): # skip the image", "5), ' 'but found {}'.format(proposals.shape)) if proposals.shape[1] == 5: scores = proposals[:, 4,", "gt bbox if len(gt_bboxes) == 0: return None # extra augmentation if self.extra_aug", "dict containing the following keys: bboxes, bboxes_ignore, labels, masks, mask_polys, poly_lens. \"\"\" slices_ann_info", "= self.seg_transform(gt_seg.squeeze(), img_scale, flip) gt_seg = mmcv.imrescale( gt_seg, self.seg_scale_factor, interpolation='nearest') gt_seg = gt_seg[None,", "sample a scale img_scale = random_scale(self.img_scales, self.multiscale_mode) img, img_shape, pad_shape, scale_factor = self.img_transform(", "ann = self.get_ann_info(idx) gt_bboxes_list = ann['bboxes'] gt_labels_list = ann['labels'] # if self.with_crowd: gt_bboxes_ignore_list", "= slices_ann_info[key] cur_slice_bboxes = [] cur_slice_labels = [] cur_slice_bboxes_ignore = [] cur_masks =", "= np.array(cur_slice_bboxes, dtype=np.float32) cur_slice_labels = np.array(cur_slice_labels, dtype=np.int64) else: cur_slice_bboxes = np.zeros((0, 4), dtype=np.float32)", "poly_lens = [len(p) for p in mask_polys] cur_mask_polys.append(mask_polys) cur_poly_lens.extend(poly_lens) if cur_slice_bboxes: cur_slice_bboxes =", "'r': slices_ann_info['r'].append(info) elif info['slice_label'] == 'g': slices_ann_info['g'].append(info) elif info['slice_label'] == 'b': slices_ann_info['b'].append(info) gt_bboxes", "np.array(cur_slice_bboxes_ignore, dtype=np.float32) else: cur_slice_bboxes_ignore = np.zeros((0, 4), dtype=np.float32) gt_bboxes.append(cur_slice_bboxes) gt_labels.append(cur_slice_labels) gt_bboxes_ignore.append(cur_slice_bboxes_ignore) gt_masks.append(cur_masks) gt_mask_polys.append(cur_mask_polys)", "or several polys, each poly is a # list of float. if with_mask:", "cur_poly_lens.extend(poly_lens) if cur_slice_bboxes: cur_slice_bboxes = np.array(cur_slice_bboxes, dtype=np.float32) cur_slice_labels = np.array(cur_slice_labels, dtype=np.int64) else: cur_slice_bboxes", "= [] gt_poly_lens = [] for key in slices_ann_info: cur_ann_info = slices_ann_info[key] cur_slice_bboxes", "else: data[key] = [tensors] def prepare_train_img(self, idx): img_info = self.img_infos[idx] # load image", "Whether to parse mask annotations. Returns: dict: A dict containing the following keys:", "there is no valid gt bbox if len(gt_bboxes) == 0: return None #", "gt_labels = self.extra_aug(orig_img, gt_bboxes, gt_labels) else: img = orig_img # randomly sample a", "DC(to_tensor(proposals))) if self.with_label: self.insert_to_dict(data, 'gt_labels', DC(to_tensor(gt_labels))) if self.with_crowd: self.insert_to_dict(data, 'gt_bboxes_ignore', DC(to_tensor(gt_bboxes_ignore))) if self.with_mask:", "for i in self.img_ids: info = self.coco.loadImgs([i])[0] info['filename'] = info['file_name'] img_infos.append(info) return img_infos", "img_scale, flip, keep_ratio=self.resize_keep_ratio) img = img.copy() if self.with_seg: gt_seg = mmcv.imread( osp.join(self.seg_prefix, img_info['file_name'].replace(", "not None: self.insert_to_dict(data, 'proposals', DC(to_tensor(proposals))) if self.with_label: self.insert_to_dict(data, 'gt_labels', DC(to_tensor(gt_labels))) if self.with_crowd: self.insert_to_dict(data,", "self.with_mask: self.insert_to_dict(data, 'gt_masks', DC(gt_masks, cpu_only=True)) if self.with_seg: self.insert_to_dict(data, 'gt_semantic_seg', DC(to_tensor(gt_seg), stack=True)) return data", "too small or without ground truths.\"\"\" valid_inds = [] ids_with_ann = set(_['image_id'] for", "def _filter_imgs(self, min_size=32): \"\"\"Filter images too small or without ground truths.\"\"\" valid_inds =", "np.hstack( [proposals, scores]) if scores is not None else proposals gt_bboxes = self.bbox_transform(gt_bboxes,", "img_shape, pad_shape, scale_factor = self.img_transform( img, img_scale, flip, keep_ratio=self.resize_keep_ratio) img = img.copy() if", "DataContainer as DC import mmcv from .custom import CustomDataset class CocoDatasetRGB2(CustomDataset): CLASSES =", "Two formats are provided. # 1. mask: a binary map of the same", "gt_bboxes.append(cur_slice_bboxes) gt_labels.append(cur_slice_labels) gt_bboxes_ignore.append(cur_slice_bboxes_ignore) gt_masks.append(cur_masks) gt_mask_polys.append(cur_mask_polys) gt_poly_lens.append(cur_poly_lens) ann = dict( bboxes=gt_bboxes, labels=gt_labels, bboxes_ignore=gt_bboxes_ignore) if", "Args: ann_info (list[dict]): Annotation info of an image. with_mask (bool): Whether to parse", "gt_labels, gt_bboxes_ignore, gt_masks in zip(gt_bboxes_list, gt_labels_list, gt_bboxes_ignore_list, gt_masks_list): # skip the image if", "enumerate(self.cat_ids) } self.img_ids = self.coco.getImgIds() img_infos = [] for i in self.img_ids: info", "gt_bboxes_ignore = [] # Two formats are provided. # 1. mask: a binary", "be used for # training in concept. if len(proposals) == 0: return None", "proposals[:, 4, None] proposals = proposals[:, :4] else: scores = None ann =", "info in ann_info: if info['slice_label'] == 'r': slices_ann_info['r'].append(info) elif info['slice_label'] == 'g': slices_ann_info['g'].append(info)", "gt_masks_list): # skip the image if there is no valid gt bbox if", "cur_masks.append(self.coco.annToMask(ann)) mask_polys = [ p for p in ann['segmentation'] if len(p) >= 6", "gt_labels_list = ann['labels'] # if self.with_crowd: gt_bboxes_ignore_list = ann['bboxes_ignore'] gt_masks_list = ann['masks'] #", "if key in data: data[key].append(tensors) else: data[key] = [tensors] def prepare_train_img(self, idx): img_info", "or proposals.shape[1] == 5): raise AssertionError( 'proposals should have shapes (n, 4) or", "idx): img_info = self.img_infos[idx] # load image orig_img = mmcv.imread(osp.join(self.img_prefix, img_info['filename'])) # load", "self.with_mask) def _filter_imgs(self, min_size=32): \"\"\"Filter images too small or without ground truths.\"\"\" valid_inds", "are provided. # 1. mask: a binary map of the same size of", "cur_poly_lens = [] for i, ann in enumerate(cur_ann_info): if ann.get('ignore', False): continue x1,", "[ p for p in ann['segmentation'] if len(p) >= 6 ] # valid", "A dict containing the following keys: bboxes, bboxes_ignore, labels, masks, mask_polys, poly_lens. \"\"\"", "a binary map of the same size of the image. # 2. polys:", "in concept. if len(proposals) == 0: return None if not (proposals.shape[1] == 4", "continue x1, y1, w, h = ann['bbox'] if ann['area'] <= 0 or w", "not None: img, gt_bboxes, gt_labels = self.extra_aug(orig_img, gt_bboxes, gt_labels) else: img = orig_img", "if self.with_crowd: gt_bboxes_ignore_list = ann['bboxes_ignore'] gt_masks_list = ann['masks'] # apply transforms flip =", "[] for i in self.img_ids: info = self.coco.loadImgs([i])[0] info['filename'] = info['file_name'] img_infos.append(info) return", "'g': slices_ann_info['g'].append(info) elif info['slice_label'] == 'b': slices_ann_info['b'].append(info) gt_bboxes = [] gt_labels = []", "np.zeros((0, 4), dtype=np.float32) cur_slice_labels = np.array([], dtype=np.int64) if cur_slice_bboxes_ignore: cur_slice_bboxes_ignore = np.array(cur_slice_bboxes_ignore, dtype=np.float32)", "'but found {}'.format(proposals.shape)) if proposals.shape[1] == 5: scores = proposals[:, 4, None] proposals", "self.coco.getAnnIds(imgIds=[img_id]) ann_info = self.coco.loadAnns(ann_ids) return self._parse_ann_info(ann_info, self.with_mask) def _filter_imgs(self, min_size=32): \"\"\"Filter images too", "= (img_info['height'], img_info['width'], 3) img_meta = dict( ori_shape=ori_shape, img_shape=img_shape, pad_shape=pad_shape, scale_factor=scale_factor, flip=flip, image_id=img_info['id'])", "= np.array([], dtype=np.int64) if cur_slice_bboxes_ignore: cur_slice_bboxes_ignore = np.array(cur_slice_bboxes_ignore, dtype=np.float32) else: cur_slice_bboxes_ignore = np.zeros((0,", "mask consists of one or several polys, each poly is a # list", "self.img_ids: info = self.coco.loadImgs([i])[0] info['filename'] = info['file_name'] img_infos.append(info) return img_infos def get_ann_info(self, idx):", "if with_mask: gt_masks = [] gt_mask_polys = [] gt_poly_lens = [] for key", "orig_img # randomly sample a scale img_scale = random_scale(self.img_scales, self.multiscale_mode) img, img_shape, pad_shape,", "of float. if with_mask: gt_masks = [] gt_mask_polys = [] gt_poly_lens = []", "<= 0 or w < 1 or h < 1: continue bbox =", "bboxes_ignore=gt_bboxes_ignore) if with_mask: ann['masks'] = gt_masks # poly format is not used in", "annotations. Returns: dict: A dict containing the following keys: bboxes, bboxes_ignore, labels, masks,", "[] gt_poly_lens = [] for key in slices_ann_info: cur_ann_info = slices_ann_info[key] cur_slice_bboxes =", "cur_mask_polys.append(mask_polys) cur_poly_lens.extend(poly_lens) if cur_slice_bboxes: cur_slice_bboxes = np.array(cur_slice_bboxes, dtype=np.float32) cur_slice_labels = np.array(cur_slice_labels, dtype=np.int64) else:", "cur_slice_bboxes = np.array(cur_slice_bboxes, dtype=np.float32) cur_slice_labels = np.array(cur_slice_labels, dtype=np.int64) else: cur_slice_bboxes = np.zeros((0, 4),", "data, key, tensors): if key in data: data[key].append(tensors) else: data[key] = [tensors] def", "cur_slice_bboxes_ignore: cur_slice_bboxes_ignore = np.array(cur_slice_bboxes_ignore, dtype=np.float32) else: cur_slice_bboxes_ignore = np.zeros((0, 4), dtype=np.float32) gt_bboxes.append(cur_slice_bboxes) gt_labels.append(cur_slice_labels)", "raise AssertionError( 'proposals should have shapes (n, 4) or (n, 5), ' 'but", "len(gt_bboxes) == 0: return None # extra augmentation if self.extra_aug is not None:", "img_infos.append(info) return img_infos def get_ann_info(self, idx): img_id = self.img_infos[idx]['id'] ann_ids = self.coco.getAnnIds(imgIds=[img_id]) ann_info", "img_shape, scale_factor, flip) if self.with_mask: gt_masks = self.mask_transform(gt_masks, pad_shape, scale_factor, flip) if data", "None: proposals = self.bbox_transform(proposals, img_shape, scale_factor, flip) proposals = np.hstack( [proposals, scores]) if", "Currently images with # no proposals are just ignored, but they can be", "import mmcv from .custom import CustomDataset class CocoDatasetRGB2(CustomDataset): CLASSES = ('microbleed', 'full_bounding_box') def", "mmcv from .custom import CustomDataset class CocoDatasetRGB2(CustomDataset): CLASSES = ('microbleed', 'full_bounding_box') def load_annotations(self,", "gt_seg = gt_seg[None, ...] if self.proposals is not None: proposals = self.bbox_transform(proposals, img_shape,", "just ignored, but they can be used for # training in concept. if", "0 or w < 1 or h < 1: continue bbox = [x1,", "DC(to_tensor(gt_bboxes))) if self.proposals is not None: self.insert_to_dict(data, 'proposals', DC(to_tensor(proposals))) if self.with_label: self.insert_to_dict(data, 'gt_labels',", "slices_ann_info['b'].append(info) gt_bboxes = [] gt_labels = [] gt_bboxes_ignore = [] # Two formats", "np.array(cur_slice_labels, dtype=np.int64) else: cur_slice_bboxes = np.zeros((0, 4), dtype=np.float32) cur_slice_labels = np.array([], dtype=np.int64) if", "import os.path as osp from .utils import to_tensor, random_scale from mmcv.parallel import DataContainer", "bbox if len(gt_bboxes) == 0: return None # extra augmentation if self.extra_aug is", ".custom import CustomDataset class CocoDatasetRGB2(CustomDataset): CLASSES = ('microbleed', 'full_bounding_box') def load_annotations(self, ann_file): self.coco", "cur_slice_bboxes = np.zeros((0, 4), dtype=np.float32) cur_slice_labels = np.array([], dtype=np.int64) if cur_slice_bboxes_ignore: cur_slice_bboxes_ignore =", "img_scale = random_scale(self.img_scales, self.multiscale_mode) img, img_shape, pad_shape, scale_factor = self.img_transform( img, img_scale, flip,", "same size of the image. # 2. polys: each mask consists of one", "dtype=np.float32) cur_slice_labels = np.array([], dtype=np.int64) if cur_slice_bboxes_ignore: cur_slice_bboxes_ignore = np.array(cur_slice_bboxes_ignore, dtype=np.float32) else: cur_slice_bboxes_ignore", "\"\"\"Filter images too small or without ground truths.\"\"\" valid_inds = [] ids_with_ann =", "else proposals gt_bboxes = self.bbox_transform(gt_bboxes, img_shape, scale_factor, flip) if self.with_crowd: gt_bboxes_ignore = self.bbox_transform(gt_bboxes_ignore,", "if self.with_mask: gt_masks = self.mask_transform(gt_masks, pad_shape, scale_factor, flip) if data is None: ori_shape", "...] if self.proposals is not None: proposals = self.bbox_transform(proposals, img_shape, scale_factor, flip) proposals", "valid gt bbox if len(gt_bboxes) == 0: return None # extra augmentation if", "self.img_transform( img, img_scale, flip, keep_ratio=self.resize_keep_ratio) img = img.copy() if self.with_seg: gt_seg = mmcv.imread(", "the following keys: bboxes, bboxes_ignore, labels, masks, mask_polys, poly_lens. \"\"\" slices_ann_info = {'r':", "ann['segmentation'] if len(p) >= 6 ] # valid polygons have >= 3 points", "scale_factor, flip) proposals = np.hstack( [proposals, scores]) if scores is not None else", "valid_inds.append(i) return valid_inds def _parse_ann_info(self, ann_info, with_mask=True): \"\"\"Parse bbox and mask annotation. Args:", "bbox = [x1, y1, x1 + w - 1, y1 + h -", "x1 + w - 1, y1 + h - 1] if ann['iscrowd']: cur_slice_bboxes_ignore.append(bbox)", "extra augmentation if self.extra_aug is not None: img, gt_bboxes, gt_labels = self.extra_aug(orig_img, gt_bboxes,", "scale_factor, flip) if self.with_crowd: gt_bboxes_ignore = self.bbox_transform(gt_bboxes_ignore, img_shape, scale_factor, flip) if self.with_mask: gt_masks", "False): continue x1, y1, w, h = ann['bbox'] if ann['area'] <= 0 or", "[] for key in slices_ann_info: cur_ann_info = slices_ann_info[key] cur_slice_bboxes = [] cur_slice_labels =", "if self.proposals is not None: self.insert_to_dict(data, 'proposals', DC(to_tensor(proposals))) if self.with_label: self.insert_to_dict(data, 'gt_labels', DC(to_tensor(gt_labels)))", "cur_mask_polys = [] cur_poly_lens = [] for i, ann in enumerate(cur_ann_info): if ann.get('ignore',", "float. if with_mask: gt_masks = [] gt_mask_polys = [] gt_poly_lens = [] for", "randomly sample a scale img_scale = random_scale(self.img_scales, self.multiscale_mode) img, img_shape, pad_shape, scale_factor =", "img_info['filename'])) # load proposals if necessary if self.proposals is not None: proposals =", "self.img_infos[idx]['id'] ann_ids = self.coco.getAnnIds(imgIds=[img_id]) ann_info = self.coco.loadAnns(ann_ids) return self._parse_ann_info(ann_info, self.with_mask) def _filter_imgs(self, min_size=32):", "gt_masks_list = ann['masks'] # apply transforms flip = True if np.random.rand() < self.flip_ratio", "interpolation='nearest') gt_seg = gt_seg[None, ...] if self.proposals is not None: proposals = self.bbox_transform(proposals,", "'full_bounding_box') def load_annotations(self, ann_file): self.coco = COCO(ann_file) self.cat_ids = self.coco.getCatIds() self.cat2label = {", "if self.with_crowd: self.insert_to_dict(data, 'gt_bboxes_ignore', DC(to_tensor(gt_bboxes_ignore))) if self.with_mask: self.insert_to_dict(data, 'gt_masks', DC(gt_masks, cpu_only=True)) if self.with_seg:", "[] cur_mask_polys = [] cur_poly_lens = [] for i, ann in enumerate(cur_ann_info): if", "enumerate(cur_ann_info): if ann.get('ignore', False): continue x1, y1, w, h = ann['bbox'] if ann['area']", "self.proposals is not None: self.insert_to_dict(data, 'proposals', DC(to_tensor(proposals))) if self.with_label: self.insert_to_dict(data, 'gt_labels', DC(to_tensor(gt_labels))) if", "the same size of the image. # 2. polys: each mask consists of", "== 5): raise AssertionError( 'proposals should have shapes (n, 4) or (n, 5),", "self.bbox_transform(proposals, img_shape, scale_factor, flip) proposals = np.hstack( [proposals, scores]) if scores is not", "= ann['bbox'] if ann['area'] <= 0 or w < 1 or h <", "in enumerate(cur_ann_info): if ann.get('ignore', False): continue x1, y1, w, h = ann['bbox'] if", "cur_slice_bboxes_ignore.append(bbox) else: cur_slice_bboxes.append(bbox) cur_slice_labels.append(self.cat2label[ann['category_id']]) if with_mask: cur_masks.append(self.coco.annToMask(ann)) mask_polys = [ p for p", "ann_info = self.coco.loadAnns(ann_ids) return self._parse_ann_info(ann_info, self.with_mask) def _filter_imgs(self, min_size=32): \"\"\"Filter images too small", "+ 1 for i, cat_id in enumerate(self.cat_ids) } self.img_ids = self.coco.getImgIds() img_infos =", "annotation. Args: ann_info (list[dict]): Annotation info of an image. with_mask (bool): Whether to", "an image. with_mask (bool): Whether to parse mask annotations. Returns: dict: A dict", "load_annotations(self, ann_file): self.coco = COCO(ann_file) self.cat_ids = self.coco.getCatIds() self.cat2label = { cat_id: i", "None: proposals = self.proposals[idx][:self.num_max_proposals] # TODO: Handle empty proposals properly. Currently images with", "gt_labels) else: img = orig_img # randomly sample a scale img_scale = random_scale(self.img_scales,", "image_id=img_info['id']) data = dict( img=DC(to_tensor(img), stack=True), img_meta=DC(img_meta, cpu_only=True)) self.insert_to_dict(data, 'gt_bboxes', DC(to_tensor(gt_bboxes))) if self.proposals", "gt_seg = self.seg_transform(gt_seg.squeeze(), img_scale, flip) gt_seg = mmcv.imrescale( gt_seg, self.seg_scale_factor, interpolation='nearest') gt_seg =", "for i, ann in enumerate(cur_ann_info): if ann.get('ignore', False): continue x1, y1, w, h", "ann['labels'] # if self.with_crowd: gt_bboxes_ignore_list = ann['bboxes_ignore'] gt_masks_list = ann['masks'] # apply transforms", "for i, img_info in enumerate(self.img_infos): if self.img_ids[i] not in ids_with_ann: continue if min(img_info['width'],", "if cur_slice_bboxes_ignore: cur_slice_bboxes_ignore = np.array(cur_slice_bboxes_ignore, dtype=np.float32) else: cur_slice_bboxes_ignore = np.zeros((0, 4), dtype=np.float32) gt_bboxes.append(cur_slice_bboxes)", "gt_bboxes_ignore, gt_masks in zip(gt_bboxes_list, gt_labels_list, gt_bboxes_ignore_list, gt_masks_list): # skip the image if there", "img_info = self.img_infos[idx] # load image orig_img = mmcv.imread(osp.join(self.img_prefix, img_info['filename'])) # load proposals", "= np.array(cur_slice_bboxes_ignore, dtype=np.float32) else: cur_slice_bboxes_ignore = np.zeros((0, 4), dtype=np.float32) gt_bboxes.append(cur_slice_bboxes) gt_labels.append(cur_slice_labels) gt_bboxes_ignore.append(cur_slice_bboxes_ignore) gt_masks.append(cur_masks)", "# randomly sample a scale img_scale = random_scale(self.img_scales, self.multiscale_mode) img, img_shape, pad_shape, scale_factor", "pad_shape=pad_shape, scale_factor=scale_factor, flip=flip, image_id=img_info['id']) data = dict( img=DC(to_tensor(img), stack=True), img_meta=DC(img_meta, cpu_only=True)) self.insert_to_dict(data, 'gt_bboxes',", "return None if not (proposals.shape[1] == 4 or proposals.shape[1] == 5): raise AssertionError(", "ann['bboxes_ignore'] gt_masks_list = ann['masks'] # apply transforms flip = True if np.random.rand() <", "labels, masks, mask_polys, poly_lens. \"\"\" slices_ann_info = {'r': [], 'g': [], 'b': []}", "training in concept. if len(proposals) == 0: return None if not (proposals.shape[1] ==", "[]} for info in ann_info: if info['slice_label'] == 'r': slices_ann_info['r'].append(info) elif info['slice_label'] ==", "if self.with_label: self.insert_to_dict(data, 'gt_labels', DC(to_tensor(gt_labels))) if self.with_crowd: self.insert_to_dict(data, 'gt_bboxes_ignore', DC(to_tensor(gt_bboxes_ignore))) if self.with_mask: self.insert_to_dict(data,", "mmcv.imread( osp.join(self.seg_prefix, img_info['file_name'].replace( 'jpg', 'png')), flag='unchanged') gt_seg = self.seg_transform(gt_seg.squeeze(), img_scale, flip) gt_seg =", "self.multiscale_mode) img, img_shape, pad_shape, scale_factor = self.img_transform( img, img_scale, flip, keep_ratio=self.resize_keep_ratio) img =", "pycocotools_local.coco import * import os.path as osp from .utils import to_tensor, random_scale from", "else: img = orig_img # randomly sample a scale img_scale = random_scale(self.img_scales, self.multiscale_mode)", "5: scores = proposals[:, 4, None] proposals = proposals[:, :4] else: scores =", "import DataContainer as DC import mmcv from .custom import CustomDataset class CocoDatasetRGB2(CustomDataset): CLASSES", "Returns: dict: A dict containing the following keys: bboxes, bboxes_ignore, labels, masks, mask_polys,", "CustomDataset class CocoDatasetRGB2(CustomDataset): CLASSES = ('microbleed', 'full_bounding_box') def load_annotations(self, ann_file): self.coco = COCO(ann_file)", "slices_ann_info['g'].append(info) elif info['slice_label'] == 'b': slices_ann_info['b'].append(info) gt_bboxes = [] gt_labels = [] gt_bboxes_ignore", "= [len(p) for p in mask_polys] cur_mask_polys.append(mask_polys) cur_poly_lens.extend(poly_lens) if cur_slice_bboxes: cur_slice_bboxes = np.array(cur_slice_bboxes,", "h < 1: continue bbox = [x1, y1, x1 + w - 1,", "skip the image if there is no valid gt bbox if len(gt_bboxes) ==", "= gt_seg[None, ...] if self.proposals is not None: proposals = self.bbox_transform(proposals, img_shape, scale_factor,", "img.copy() if self.with_seg: gt_seg = mmcv.imread( osp.join(self.seg_prefix, img_info['file_name'].replace( 'jpg', 'png')), flag='unchanged') gt_seg =", "if self.proposals is not None: proposals = self.bbox_transform(proposals, img_shape, scale_factor, flip) proposals =", "if self.with_crowd: gt_bboxes_ignore = self.bbox_transform(gt_bboxes_ignore, img_shape, scale_factor, flip) if self.with_mask: gt_masks = self.mask_transform(gt_masks,", "self.cat_ids = self.coco.getCatIds() self.cat2label = { cat_id: i + 1 for i, cat_id", "img_meta=DC(img_meta, cpu_only=True)) self.insert_to_dict(data, 'gt_bboxes', DC(to_tensor(gt_bboxes))) if self.proposals is not None: self.insert_to_dict(data, 'proposals', DC(to_tensor(proposals)))", "info['slice_label'] == 'b': slices_ann_info['b'].append(info) gt_bboxes = [] gt_labels = [] gt_bboxes_ignore = []", "[] gt_labels = [] gt_bboxes_ignore = [] # Two formats are provided. #", "= None ann = self.get_ann_info(idx) gt_bboxes_list = ann['bboxes'] gt_labels_list = ann['labels'] # if", "several polys, each poly is a # list of float. if with_mask: gt_masks", "gt_bboxes, gt_labels = self.extra_aug(orig_img, gt_bboxes, gt_labels) else: img = orig_img # randomly sample", "return img_infos def get_ann_info(self, idx): img_id = self.img_infos[idx]['id'] ann_ids = self.coco.getAnnIds(imgIds=[img_id]) ann_info =", "] # valid polygons have >= 3 points (6 coordinates) poly_lens = [len(p)", "each poly is a # list of float. if with_mask: gt_masks = []", "slices_ann_info: cur_ann_info = slices_ann_info[key] cur_slice_bboxes = [] cur_slice_labels = [] cur_slice_bboxes_ignore = []", "load image orig_img = mmcv.imread(osp.join(self.img_prefix, img_info['filename'])) # load proposals if necessary if self.proposals", "self.with_label: self.insert_to_dict(data, 'gt_labels', DC(to_tensor(gt_labels))) if self.with_crowd: self.insert_to_dict(data, 'gt_bboxes_ignore', DC(to_tensor(gt_bboxes_ignore))) if self.with_mask: self.insert_to_dict(data, 'gt_masks',", "4) or (n, 5), ' 'but found {}'.format(proposals.shape)) if proposals.shape[1] == 5: scores", "ann_info (list[dict]): Annotation info of an image. with_mask (bool): Whether to parse mask", "following keys: bboxes, bboxes_ignore, labels, masks, mask_polys, poly_lens. \"\"\" slices_ann_info = {'r': [],", "with_mask: ann['masks'] = gt_masks # poly format is not used in the current", "used in the current implementation ann['mask_polys'] = gt_mask_polys ann['poly_lens'] = gt_poly_lens return ann", "'gt_bboxes_ignore', DC(to_tensor(gt_bboxes_ignore))) if self.with_mask: self.insert_to_dict(data, 'gt_masks', DC(gt_masks, cpu_only=True)) if self.with_seg: self.insert_to_dict(data, 'gt_semantic_seg', DC(to_tensor(gt_seg),", "'b': slices_ann_info['b'].append(info) gt_bboxes = [] gt_labels = [] gt_bboxes_ignore = [] # Two", "x1, y1, w, h = ann['bbox'] if ann['area'] <= 0 or w <", "= self.extra_aug(orig_img, gt_bboxes, gt_labels) else: img = orig_img # randomly sample a scale", "img_info['height']) >= min_size: valid_inds.append(i) return valid_inds def _parse_ann_info(self, ann_info, with_mask=True): \"\"\"Parse bbox and", "with_mask: cur_masks.append(self.coco.annToMask(ann)) mask_polys = [ p for p in ann['segmentation'] if len(p) >=", "img_shape, scale_factor, flip) if self.with_crowd: gt_bboxes_ignore = self.bbox_transform(gt_bboxes_ignore, img_shape, scale_factor, flip) if self.with_mask:", "scores = proposals[:, 4, None] proposals = proposals[:, :4] else: scores = None", "i + 1 for i, cat_id in enumerate(self.cat_ids) } self.img_ids = self.coco.getImgIds() img_infos", "poly is a # list of float. if with_mask: gt_masks = [] gt_mask_polys", "= mmcv.imread( osp.join(self.seg_prefix, img_info['file_name'].replace( 'jpg', 'png')), flag='unchanged') gt_seg = self.seg_transform(gt_seg.squeeze(), img_scale, flip) gt_seg", "DC(to_tensor(gt_labels))) if self.with_crowd: self.insert_to_dict(data, 'gt_bboxes_ignore', DC(to_tensor(gt_bboxes_ignore))) if self.with_mask: self.insert_to_dict(data, 'gt_masks', DC(gt_masks, cpu_only=True)) if", "self.flip_ratio else False data = None for gt_bboxes, gt_labels, gt_bboxes_ignore, gt_masks in zip(gt_bboxes_list,", "pad_shape, scale_factor = self.img_transform( img, img_scale, flip, keep_ratio=self.resize_keep_ratio) img = img.copy() if self.with_seg:", "valid polygons have >= 3 points (6 coordinates) poly_lens = [len(p) for p", "if cur_slice_bboxes: cur_slice_bboxes = np.array(cur_slice_bboxes, dtype=np.float32) cur_slice_labels = np.array(cur_slice_labels, dtype=np.int64) else: cur_slice_bboxes =", "each mask consists of one or several polys, each poly is a #", "in data: data[key].append(tensors) else: data[key] = [tensors] def prepare_train_img(self, idx): img_info = self.img_infos[idx]", "False data = None for gt_bboxes, gt_labels, gt_bboxes_ignore, gt_masks in zip(gt_bboxes_list, gt_labels_list, gt_bboxes_ignore_list,", "dict( ori_shape=ori_shape, img_shape=img_shape, pad_shape=pad_shape, scale_factor=scale_factor, flip=flip, image_id=img_info['id']) data = dict( img=DC(to_tensor(img), stack=True), img_meta=DC(img_meta,", "if len(proposals) == 0: return None if not (proposals.shape[1] == 4 or proposals.shape[1]", "properly. Currently images with # no proposals are just ignored, but they can", "ann['masks'] = gt_masks # poly format is not used in the current implementation", "map of the same size of the image. # 2. polys: each mask", "cat_id in enumerate(self.cat_ids) } self.img_ids = self.coco.getImgIds() img_infos = [] for i in", "\"\"\"Parse bbox and mask annotation. Args: ann_info (list[dict]): Annotation info of an image.", "= set(_['image_id'] for _ in self.coco.anns.values()) for i, img_info in enumerate(self.img_infos): if self.img_ids[i]", "# apply transforms flip = True if np.random.rand() < self.flip_ratio else False data", "is no valid gt bbox if len(gt_bboxes) == 0: return None # extra", "self.img_infos[idx] # load image orig_img = mmcv.imread(osp.join(self.img_prefix, img_info['filename'])) # load proposals if necessary", "gt_seg = mmcv.imrescale( gt_seg, self.seg_scale_factor, interpolation='nearest') gt_seg = gt_seg[None, ...] if self.proposals is", "if scores is not None else proposals gt_bboxes = self.bbox_transform(gt_bboxes, img_shape, scale_factor, flip)", "bbox and mask annotation. Args: ann_info (list[dict]): Annotation info of an image. with_mask", "if with_mask: ann['masks'] = gt_masks # poly format is not used in the", "= [tensors] def prepare_train_img(self, idx): img_info = self.img_infos[idx] # load image orig_img =", "ann['iscrowd']: cur_slice_bboxes_ignore.append(bbox) else: cur_slice_bboxes.append(bbox) cur_slice_labels.append(self.cat2label[ann['category_id']]) if with_mask: cur_masks.append(self.coco.annToMask(ann)) mask_polys = [ p for", "1: continue bbox = [x1, y1, x1 + w - 1, y1 +", "= [] cur_slice_bboxes_ignore = [] cur_masks = [] cur_mask_polys = [] cur_poly_lens =", "cur_slice_labels = np.array([], dtype=np.int64) if cur_slice_bboxes_ignore: cur_slice_bboxes_ignore = np.array(cur_slice_bboxes_ignore, dtype=np.float32) else: cur_slice_bboxes_ignore =", "[] cur_slice_labels = [] cur_slice_bboxes_ignore = [] cur_masks = [] cur_mask_polys = []", "ground truths.\"\"\" valid_inds = [] ids_with_ann = set(_['image_id'] for _ in self.coco.anns.values()) for", "None # extra augmentation if self.extra_aug is not None: img, gt_bboxes, gt_labels =", "from .utils import to_tensor, random_scale from mmcv.parallel import DataContainer as DC import mmcv", "= ann['bboxes'] gt_labels_list = ann['labels'] # if self.with_crowd: gt_bboxes_ignore_list = ann['bboxes_ignore'] gt_masks_list =", "elif info['slice_label'] == 'g': slices_ann_info['g'].append(info) elif info['slice_label'] == 'b': slices_ann_info['b'].append(info) gt_bboxes = []", "stack=True), img_meta=DC(img_meta, cpu_only=True)) self.insert_to_dict(data, 'gt_bboxes', DC(to_tensor(gt_bboxes))) if self.proposals is not None: self.insert_to_dict(data, 'proposals',", "TODO: Handle empty proposals properly. Currently images with # no proposals are just", "ann['poly_lens'] = gt_poly_lens return ann def insert_to_dict(self, data, key, tensors): if key in", "if self.proposals is not None: proposals = self.proposals[idx][:self.num_max_proposals] # TODO: Handle empty proposals", "gt_mask_polys ann['poly_lens'] = gt_poly_lens return ann def insert_to_dict(self, data, key, tensors): if key", "def insert_to_dict(self, data, key, tensors): if key in data: data[key].append(tensors) else: data[key] =", "self.seg_transform(gt_seg.squeeze(), img_scale, flip) gt_seg = mmcv.imrescale( gt_seg, self.seg_scale_factor, interpolation='nearest') gt_seg = gt_seg[None, ...]", "elif info['slice_label'] == 'b': slices_ann_info['b'].append(info) gt_bboxes = [] gt_labels = [] gt_bboxes_ignore =", "self.img_ids = self.coco.getImgIds() img_infos = [] for i in self.img_ids: info = self.coco.loadImgs([i])[0]", "= self.proposals[idx][:self.num_max_proposals] # TODO: Handle empty proposals properly. Currently images with # no", "AssertionError( 'proposals should have shapes (n, 4) or (n, 5), ' 'but found", "gt_masks = self.mask_transform(gt_masks, pad_shape, scale_factor, flip) if data is None: ori_shape = (img_info['height'],", "poly_lens. \"\"\" slices_ann_info = {'r': [], 'g': [], 'b': []} for info in", "+ h - 1] if ann['iscrowd']: cur_slice_bboxes_ignore.append(bbox) else: cur_slice_bboxes.append(bbox) cur_slice_labels.append(self.cat2label[ann['category_id']]) if with_mask: cur_masks.append(self.coco.annToMask(ann))", "data = None for gt_bboxes, gt_labels, gt_bboxes_ignore, gt_masks in zip(gt_bboxes_list, gt_labels_list, gt_bboxes_ignore_list, gt_masks_list):", "= dict( img=DC(to_tensor(img), stack=True), img_meta=DC(img_meta, cpu_only=True)) self.insert_to_dict(data, 'gt_bboxes', DC(to_tensor(gt_bboxes))) if self.proposals is not", "self.cat2label = { cat_id: i + 1 for i, cat_id in enumerate(self.cat_ids) }", "bboxes_ignore, labels, masks, mask_polys, poly_lens. \"\"\" slices_ann_info = {'r': [], 'g': [], 'b':", "[] cur_masks = [] cur_mask_polys = [] cur_poly_lens = [] for i, ann", "= dict( bboxes=gt_bboxes, labels=gt_labels, bboxes_ignore=gt_bboxes_ignore) if with_mask: ann['masks'] = gt_masks # poly format", "min(img_info['width'], img_info['height']) >= min_size: valid_inds.append(i) return valid_inds def _parse_ann_info(self, ann_info, with_mask=True): \"\"\"Parse bbox", "current implementation ann['mask_polys'] = gt_mask_polys ann['poly_lens'] = gt_poly_lens return ann def insert_to_dict(self, data,", "import CustomDataset class CocoDatasetRGB2(CustomDataset): CLASSES = ('microbleed', 'full_bounding_box') def load_annotations(self, ann_file): self.coco =", "= self.img_infos[idx]['id'] ann_ids = self.coco.getAnnIds(imgIds=[img_id]) ann_info = self.coco.loadAnns(ann_ids) return self._parse_ann_info(ann_info, self.with_mask) def _filter_imgs(self,", "provided. # 1. mask: a binary map of the same size of the", "gt_bboxes_ignore.append(cur_slice_bboxes_ignore) gt_masks.append(cur_masks) gt_mask_polys.append(cur_mask_polys) gt_poly_lens.append(cur_poly_lens) ann = dict( bboxes=gt_bboxes, labels=gt_labels, bboxes_ignore=gt_bboxes_ignore) if with_mask: ann['masks']", "is not used in the current implementation ann['mask_polys'] = gt_mask_polys ann['poly_lens'] = gt_poly_lens", "# no proposals are just ignored, but they can be used for #", "# extra augmentation if self.extra_aug is not None: img, gt_bboxes, gt_labels = self.extra_aug(orig_img,", "slices_ann_info = {'r': [], 'g': [], 'b': []} for info in ann_info: if", "img_shape, scale_factor, flip) proposals = np.hstack( [proposals, scores]) if scores is not None", "# if self.with_crowd: gt_bboxes_ignore_list = ann['bboxes_ignore'] gt_masks_list = ann['masks'] # apply transforms flip", "dtype=np.float32) gt_bboxes.append(cur_slice_bboxes) gt_labels.append(cur_slice_labels) gt_bboxes_ignore.append(cur_slice_bboxes_ignore) gt_masks.append(cur_masks) gt_mask_polys.append(cur_mask_polys) gt_poly_lens.append(cur_poly_lens) ann = dict( bboxes=gt_bboxes, labels=gt_labels, bboxes_ignore=gt_bboxes_ignore)", "numpy as np from pycocotools_local.coco import * import os.path as osp from .utils", "(proposals.shape[1] == 4 or proposals.shape[1] == 5): raise AssertionError( 'proposals should have shapes", "= self.mask_transform(gt_masks, pad_shape, scale_factor, flip) if data is None: ori_shape = (img_info['height'], img_info['width'],", "[] gt_bboxes_ignore = [] # Two formats are provided. # 1. mask: a", "random_scale from mmcv.parallel import DataContainer as DC import mmcv from .custom import CustomDataset", "points (6 coordinates) poly_lens = [len(p) for p in mask_polys] cur_mask_polys.append(mask_polys) cur_poly_lens.extend(poly_lens) if", ">= 6 ] # valid polygons have >= 3 points (6 coordinates) poly_lens", "proposals = self.proposals[idx][:self.num_max_proposals] # TODO: Handle empty proposals properly. Currently images with #", "'proposals should have shapes (n, 4) or (n, 5), ' 'but found {}'.format(proposals.shape))", "found {}'.format(proposals.shape)) if proposals.shape[1] == 5: scores = proposals[:, 4, None] proposals =", "in zip(gt_bboxes_list, gt_labels_list, gt_bboxes_ignore_list, gt_masks_list): # skip the image if there is no", "self.bbox_transform(gt_bboxes, img_shape, scale_factor, flip) if self.with_crowd: gt_bboxes_ignore = self.bbox_transform(gt_bboxes_ignore, img_shape, scale_factor, flip) if", "img, img_shape, pad_shape, scale_factor = self.img_transform( img, img_scale, flip, keep_ratio=self.resize_keep_ratio) img = img.copy()", "_parse_ann_info(self, ann_info, with_mask=True): \"\"\"Parse bbox and mask annotation. Args: ann_info (list[dict]): Annotation info", "implementation ann['mask_polys'] = gt_mask_polys ann['poly_lens'] = gt_poly_lens return ann def insert_to_dict(self, data, key,", "self.proposals is not None: proposals = self.proposals[idx][:self.num_max_proposals] # TODO: Handle empty proposals properly.", "scale_factor=scale_factor, flip=flip, image_id=img_info['id']) data = dict( img=DC(to_tensor(img), stack=True), img_meta=DC(img_meta, cpu_only=True)) self.insert_to_dict(data, 'gt_bboxes', DC(to_tensor(gt_bboxes)))", "img = orig_img # randomly sample a scale img_scale = random_scale(self.img_scales, self.multiscale_mode) img,", "if self.with_mask: self.insert_to_dict(data, 'gt_masks', DC(gt_masks, cpu_only=True)) if self.with_seg: self.insert_to_dict(data, 'gt_semantic_seg', DC(to_tensor(gt_seg), stack=True)) return", "w < 1 or h < 1: continue bbox = [x1, y1, x1", "gt_bboxes_list = ann['bboxes'] gt_labels_list = ann['labels'] # if self.with_crowd: gt_bboxes_ignore_list = ann['bboxes_ignore'] gt_masks_list", "'proposals', DC(to_tensor(proposals))) if self.with_label: self.insert_to_dict(data, 'gt_labels', DC(to_tensor(gt_labels))) if self.with_crowd: self.insert_to_dict(data, 'gt_bboxes_ignore', DC(to_tensor(gt_bboxes_ignore))) if", "= { cat_id: i + 1 for i, cat_id in enumerate(self.cat_ids) } self.img_ids", "None for gt_bboxes, gt_labels, gt_bboxes_ignore, gt_masks in zip(gt_bboxes_list, gt_labels_list, gt_bboxes_ignore_list, gt_masks_list): # skip", "or (n, 5), ' 'but found {}'.format(proposals.shape)) if proposals.shape[1] == 5: scores =", "# skip the image if there is no valid gt bbox if len(gt_bboxes)", "without ground truths.\"\"\" valid_inds = [] ids_with_ann = set(_['image_id'] for _ in self.coco.anns.values())", "else: cur_slice_bboxes.append(bbox) cur_slice_labels.append(self.cat2label[ann['category_id']]) if with_mask: cur_masks.append(self.coco.annToMask(ann)) mask_polys = [ p for p in", "= True if np.random.rand() < self.flip_ratio else False data = None for gt_bboxes,", "flip, keep_ratio=self.resize_keep_ratio) img = img.copy() if self.with_seg: gt_seg = mmcv.imread( osp.join(self.seg_prefix, img_info['file_name'].replace( 'jpg',", "have shapes (n, 4) or (n, 5), ' 'but found {}'.format(proposals.shape)) if proposals.shape[1]", "w, h = ann['bbox'] if ann['area'] <= 0 or w < 1 or", "data[key] = [tensors] def prepare_train_img(self, idx): img_info = self.img_infos[idx] # load image orig_img", "else False data = None for gt_bboxes, gt_labels, gt_bboxes_ignore, gt_masks in zip(gt_bboxes_list, gt_labels_list,", "slices_ann_info['r'].append(info) elif info['slice_label'] == 'g': slices_ann_info['g'].append(info) elif info['slice_label'] == 'b': slices_ann_info['b'].append(info) gt_bboxes =", "in enumerate(self.cat_ids) } self.img_ids = self.coco.getImgIds() img_infos = [] for i in self.img_ids:", "info of an image. with_mask (bool): Whether to parse mask annotations. Returns: dict:", "the image. # 2. polys: each mask consists of one or several polys,", "the image if there is no valid gt bbox if len(gt_bboxes) == 0:", "of one or several polys, each poly is a # list of float.", "or h < 1: continue bbox = [x1, y1, x1 + w -", "1] if ann['iscrowd']: cur_slice_bboxes_ignore.append(bbox) else: cur_slice_bboxes.append(bbox) cur_slice_labels.append(self.cat2label[ann['category_id']]) if with_mask: cur_masks.append(self.coco.annToMask(ann)) mask_polys = [", "gt_masks.append(cur_masks) gt_mask_polys.append(cur_mask_polys) gt_poly_lens.append(cur_poly_lens) ann = dict( bboxes=gt_bboxes, labels=gt_labels, bboxes_ignore=gt_bboxes_ignore) if with_mask: ann['masks'] =", "polys, each poly is a # list of float. if with_mask: gt_masks =", "mask: a binary map of the same size of the image. # 2.", "in self.img_ids: info = self.coco.loadImgs([i])[0] info['filename'] = info['file_name'] img_infos.append(info) return img_infos def get_ann_info(self,", "len(p) >= 6 ] # valid polygons have >= 3 points (6 coordinates)", "== 'g': slices_ann_info['g'].append(info) elif info['slice_label'] == 'b': slices_ann_info['b'].append(info) gt_bboxes = [] gt_labels =", "= mmcv.imrescale( gt_seg, self.seg_scale_factor, interpolation='nearest') gt_seg = gt_seg[None, ...] if self.proposals is not", "not (proposals.shape[1] == 4 or proposals.shape[1] == 5): raise AssertionError( 'proposals should have", "insert_to_dict(self, data, key, tensors): if key in data: data[key].append(tensors) else: data[key] = [tensors]", "scores]) if scores is not None else proposals gt_bboxes = self.bbox_transform(gt_bboxes, img_shape, scale_factor,", "dict: A dict containing the following keys: bboxes, bboxes_ignore, labels, masks, mask_polys, poly_lens.", "i, ann in enumerate(cur_ann_info): if ann.get('ignore', False): continue x1, y1, w, h =", "ann = dict( bboxes=gt_bboxes, labels=gt_labels, bboxes_ignore=gt_bboxes_ignore) if with_mask: ann['masks'] = gt_masks # poly", "mask_polys] cur_mask_polys.append(mask_polys) cur_poly_lens.extend(poly_lens) if cur_slice_bboxes: cur_slice_bboxes = np.array(cur_slice_bboxes, dtype=np.float32) cur_slice_labels = np.array(cur_slice_labels, dtype=np.int64)", "small or without ground truths.\"\"\" valid_inds = [] ids_with_ann = set(_['image_id'] for _", "'b': []} for info in ann_info: if info['slice_label'] == 'r': slices_ann_info['r'].append(info) elif info['slice_label']", "ann in enumerate(cur_ann_info): if ann.get('ignore', False): continue x1, y1, w, h = ann['bbox']", "- 1] if ann['iscrowd']: cur_slice_bboxes_ignore.append(bbox) else: cur_slice_bboxes.append(bbox) cur_slice_labels.append(self.cat2label[ann['category_id']]) if with_mask: cur_masks.append(self.coco.annToMask(ann)) mask_polys =", "are just ignored, but they can be used for # training in concept.", "if proposals.shape[1] == 5: scores = proposals[:, 4, None] proposals = proposals[:, :4]", "# poly format is not used in the current implementation ann['mask_polys'] = gt_mask_polys", "[] cur_poly_lens = [] for i, ann in enumerate(cur_ann_info): if ann.get('ignore', False): continue", "< 1: continue bbox = [x1, y1, x1 + w - 1, y1", "ann['bboxes'] gt_labels_list = ann['labels'] # if self.with_crowd: gt_bboxes_ignore_list = ann['bboxes_ignore'] gt_masks_list = ann['masks']", "None: img, gt_bboxes, gt_labels = self.extra_aug(orig_img, gt_bboxes, gt_labels) else: img = orig_img #", "gt_masks # poly format is not used in the current implementation ann['mask_polys'] =", "= gt_mask_polys ann['poly_lens'] = gt_poly_lens return ann def insert_to_dict(self, data, key, tensors): if", "np.zeros((0, 4), dtype=np.float32) gt_bboxes.append(cur_slice_bboxes) gt_labels.append(cur_slice_labels) gt_bboxes_ignore.append(cur_slice_bboxes_ignore) gt_masks.append(cur_masks) gt_mask_polys.append(cur_mask_polys) gt_poly_lens.append(cur_poly_lens) ann = dict( bboxes=gt_bboxes,", "ignored, but they can be used for # training in concept. if len(proposals)", "import * import os.path as osp from .utils import to_tensor, random_scale from mmcv.parallel", "min_size: valid_inds.append(i) return valid_inds def _parse_ann_info(self, ann_info, with_mask=True): \"\"\"Parse bbox and mask annotation.", "gt_bboxes, gt_labels) else: img = orig_img # randomly sample a scale img_scale =", "= [] ids_with_ann = set(_['image_id'] for _ in self.coco.anns.values()) for i, img_info in", "necessary if self.proposals is not None: proposals = self.proposals[idx][:self.num_max_proposals] # TODO: Handle empty", "gt_labels.append(cur_slice_labels) gt_bboxes_ignore.append(cur_slice_bboxes_ignore) gt_masks.append(cur_masks) gt_mask_polys.append(cur_mask_polys) gt_poly_lens.append(cur_poly_lens) ann = dict( bboxes=gt_bboxes, labels=gt_labels, bboxes_ignore=gt_bboxes_ignore) if with_mask:", "for i, cat_id in enumerate(self.cat_ids) } self.img_ids = self.coco.getImgIds() img_infos = [] for", "dtype=np.float32) else: cur_slice_bboxes_ignore = np.zeros((0, 4), dtype=np.float32) gt_bboxes.append(cur_slice_bboxes) gt_labels.append(cur_slice_labels) gt_bboxes_ignore.append(cur_slice_bboxes_ignore) gt_masks.append(cur_masks) gt_mask_polys.append(cur_mask_polys) gt_poly_lens.append(cur_poly_lens)", "image. # 2. polys: each mask consists of one or several polys, each", "mmcv.parallel import DataContainer as DC import mmcv from .custom import CustomDataset class CocoDatasetRGB2(CustomDataset):", "img=DC(to_tensor(img), stack=True), img_meta=DC(img_meta, cpu_only=True)) self.insert_to_dict(data, 'gt_bboxes', DC(to_tensor(gt_bboxes))) if self.proposals is not None: self.insert_to_dict(data,", "proposals = proposals[:, :4] else: scores = None ann = self.get_ann_info(idx) gt_bboxes_list =", "truths.\"\"\" valid_inds = [] ids_with_ann = set(_['image_id'] for _ in self.coco.anns.values()) for i,", "gt_mask_polys = [] gt_poly_lens = [] for key in slices_ann_info: cur_ann_info = slices_ann_info[key]", "gt_bboxes = [] gt_labels = [] gt_bboxes_ignore = [] # Two formats are", "img_shape=img_shape, pad_shape=pad_shape, scale_factor=scale_factor, flip=flip, image_id=img_info['id']) data = dict( img=DC(to_tensor(img), stack=True), img_meta=DC(img_meta, cpu_only=True)) self.insert_to_dict(data,", ".utils import to_tensor, random_scale from mmcv.parallel import DataContainer as DC import mmcv from", "in the current implementation ann['mask_polys'] = gt_mask_polys ann['poly_lens'] = gt_poly_lens return ann def", "else: scores = None ann = self.get_ann_info(idx) gt_bboxes_list = ann['bboxes'] gt_labels_list = ann['labels']", "(bool): Whether to parse mask annotations. Returns: dict: A dict containing the following", "is not None: proposals = self.proposals[idx][:self.num_max_proposals] # TODO: Handle empty proposals properly. Currently", "None ann = self.get_ann_info(idx) gt_bboxes_list = ann['bboxes'] gt_labels_list = ann['labels'] # if self.with_crowd:", "== 5: scores = proposals[:, 4, None] proposals = proposals[:, :4] else: scores", "if len(gt_bboxes) == 0: return None # extra augmentation if self.extra_aug is not", "< 1 or h < 1: continue bbox = [x1, y1, x1 +", "cur_slice_bboxes: cur_slice_bboxes = np.array(cur_slice_bboxes, dtype=np.float32) cur_slice_labels = np.array(cur_slice_labels, dtype=np.int64) else: cur_slice_bboxes = np.zeros((0,", "Handle empty proposals properly. Currently images with # no proposals are just ignored,", "(img_info['height'], img_info['width'], 3) img_meta = dict( ori_shape=ori_shape, img_shape=img_shape, pad_shape=pad_shape, scale_factor=scale_factor, flip=flip, image_id=img_info['id']) data", "no proposals are just ignored, but they can be used for # training", "self.with_seg: gt_seg = mmcv.imread( osp.join(self.seg_prefix, img_info['file_name'].replace( 'jpg', 'png')), flag='unchanged') gt_seg = self.seg_transform(gt_seg.squeeze(), img_scale,", "= self.get_ann_info(idx) gt_bboxes_list = ann['bboxes'] gt_labels_list = ann['labels'] # if self.with_crowd: gt_bboxes_ignore_list =", "polys: each mask consists of one or several polys, each poly is a", "size of the image. # 2. polys: each mask consists of one or", "for _ in self.coco.anns.values()) for i, img_info in enumerate(self.img_infos): if self.img_ids[i] not in", "self.mask_transform(gt_masks, pad_shape, scale_factor, flip) if data is None: ori_shape = (img_info['height'], img_info['width'], 3)", "DC import mmcv from .custom import CustomDataset class CocoDatasetRGB2(CustomDataset): CLASSES = ('microbleed', 'full_bounding_box')", "in self.coco.anns.values()) for i, img_info in enumerate(self.img_infos): if self.img_ids[i] not in ids_with_ann: continue", "cur_slice_bboxes_ignore = [] cur_masks = [] cur_mask_polys = [] cur_poly_lens = [] for", "in mask_polys] cur_mask_polys.append(mask_polys) cur_poly_lens.extend(poly_lens) if cur_slice_bboxes: cur_slice_bboxes = np.array(cur_slice_bboxes, dtype=np.float32) cur_slice_labels = np.array(cur_slice_labels,", "polygons have >= 3 points (6 coordinates) poly_lens = [len(p) for p in", "< self.flip_ratio else False data = None for gt_bboxes, gt_labels, gt_bboxes_ignore, gt_masks in", "as np from pycocotools_local.coco import * import os.path as osp from .utils import", "{ cat_id: i + 1 for i, cat_id in enumerate(self.cat_ids) } self.img_ids =", "dtype=np.int64) else: cur_slice_bboxes = np.zeros((0, 4), dtype=np.float32) cur_slice_labels = np.array([], dtype=np.int64) if cur_slice_bboxes_ignore:", "info['file_name'] img_infos.append(info) return img_infos def get_ann_info(self, idx): img_id = self.img_infos[idx]['id'] ann_ids = self.coco.getAnnIds(imgIds=[img_id])", "_ in self.coco.anns.values()) for i, img_info in enumerate(self.img_infos): if self.img_ids[i] not in ids_with_ann:", "for p in mask_polys] cur_mask_polys.append(mask_polys) cur_poly_lens.extend(poly_lens) if cur_slice_bboxes: cur_slice_bboxes = np.array(cur_slice_bboxes, dtype=np.float32) cur_slice_labels", "ann['bbox'] if ann['area'] <= 0 or w < 1 or h < 1:", "= [] cur_slice_labels = [] cur_slice_bboxes_ignore = [] cur_masks = [] cur_mask_polys =", "data = dict( img=DC(to_tensor(img), stack=True), img_meta=DC(img_meta, cpu_only=True)) self.insert_to_dict(data, 'gt_bboxes', DC(to_tensor(gt_bboxes))) if self.proposals is", "= ann['labels'] # if self.with_crowd: gt_bboxes_ignore_list = ann['bboxes_ignore'] gt_masks_list = ann['masks'] # apply", "if data is None: ori_shape = (img_info['height'], img_info['width'], 3) img_meta = dict( ori_shape=ori_shape,", "tensors): if key in data: data[key].append(tensors) else: data[key] = [tensors] def prepare_train_img(self, idx):", "DC(to_tensor(gt_bboxes_ignore))) if self.with_mask: self.insert_to_dict(data, 'gt_masks', DC(gt_masks, cpu_only=True)) if self.with_seg: self.insert_to_dict(data, 'gt_semantic_seg', DC(to_tensor(gt_seg), stack=True))", "self.proposals[idx][:self.num_max_proposals] # TODO: Handle empty proposals properly. Currently images with # no proposals", "ann def insert_to_dict(self, data, key, tensors): if key in data: data[key].append(tensors) else: data[key]", "0: return None if not (proposals.shape[1] == 4 or proposals.shape[1] == 5): raise", "gt_seg, self.seg_scale_factor, interpolation='nearest') gt_seg = gt_seg[None, ...] if self.proposals is not None: proposals", "poly format is not used in the current implementation ann['mask_polys'] = gt_mask_polys ann['poly_lens']", "'g': [], 'b': []} for info in ann_info: if info['slice_label'] == 'r': slices_ann_info['r'].append(info)", "img_infos def get_ann_info(self, idx): img_id = self.img_infos[idx]['id'] ann_ids = self.coco.getAnnIds(imgIds=[img_id]) ann_info = self.coco.loadAnns(ann_ids)", "ann.get('ignore', False): continue x1, y1, w, h = ann['bbox'] if ann['area'] <= 0", "key, tensors): if key in data: data[key].append(tensors) else: data[key] = [tensors] def prepare_train_img(self,", "np.random.rand() < self.flip_ratio else False data = None for gt_bboxes, gt_labels, gt_bboxes_ignore, gt_masks", "[], 'b': []} for info in ann_info: if info['slice_label'] == 'r': slices_ann_info['r'].append(info) elif", "self.proposals is not None: proposals = self.bbox_transform(proposals, img_shape, scale_factor, flip) proposals = np.hstack(", "- 1, y1 + h - 1] if ann['iscrowd']: cur_slice_bboxes_ignore.append(bbox) else: cur_slice_bboxes.append(bbox) cur_slice_labels.append(self.cat2label[ann['category_id']])", "y1 + h - 1] if ann['iscrowd']: cur_slice_bboxes_ignore.append(bbox) else: cur_slice_bboxes.append(bbox) cur_slice_labels.append(self.cat2label[ann['category_id']]) if with_mask:", "get_ann_info(self, idx): img_id = self.img_infos[idx]['id'] ann_ids = self.coco.getAnnIds(imgIds=[img_id]) ann_info = self.coco.loadAnns(ann_ids) return self._parse_ann_info(ann_info,", "cur_slice_bboxes.append(bbox) cur_slice_labels.append(self.cat2label[ann['category_id']]) if with_mask: cur_masks.append(self.coco.annToMask(ann)) mask_polys = [ p for p in ann['segmentation']", "if necessary if self.proposals is not None: proposals = self.proposals[idx][:self.num_max_proposals] # TODO: Handle", "for p in ann['segmentation'] if len(p) >= 6 ] # valid polygons have", "masks, mask_polys, poly_lens. \"\"\" slices_ann_info = {'r': [], 'g': [], 'b': []} for", "scale img_scale = random_scale(self.img_scales, self.multiscale_mode) img, img_shape, pad_shape, scale_factor = self.img_transform( img, img_scale,", "self.coco.loadImgs([i])[0] info['filename'] = info['file_name'] img_infos.append(info) return img_infos def get_ann_info(self, idx): img_id = self.img_infos[idx]['id']", "2. polys: each mask consists of one or several polys, each poly is", "= random_scale(self.img_scales, self.multiscale_mode) img, img_shape, pad_shape, scale_factor = self.img_transform( img, img_scale, flip, keep_ratio=self.resize_keep_ratio)", "== 4 or proposals.shape[1] == 5): raise AssertionError( 'proposals should have shapes (n,", "{'r': [], 'g': [], 'b': []} for info in ann_info: if info['slice_label'] ==", "self.coco.anns.values()) for i, img_info in enumerate(self.img_infos): if self.img_ids[i] not in ids_with_ann: continue if", "gt_seg[None, ...] if self.proposals is not None: proposals = self.bbox_transform(proposals, img_shape, scale_factor, flip)", "data is None: ori_shape = (img_info['height'], img_info['width'], 3) img_meta = dict( ori_shape=ori_shape, img_shape=img_shape,", "self.with_crowd: gt_bboxes_ignore = self.bbox_transform(gt_bboxes_ignore, img_shape, scale_factor, flip) if self.with_mask: gt_masks = self.mask_transform(gt_masks, pad_shape,", "min_size=32): \"\"\"Filter images too small or without ground truths.\"\"\" valid_inds = [] ids_with_ann", "= [] cur_poly_lens = [] for i, ann in enumerate(cur_ann_info): if ann.get('ignore', False):", "# TODO: Handle empty proposals properly. Currently images with # no proposals are", "proposals = self.bbox_transform(proposals, img_shape, scale_factor, flip) proposals = np.hstack( [proposals, scores]) if scores", "format is not used in the current implementation ann['mask_polys'] = gt_mask_polys ann['poly_lens'] =", ">= min_size: valid_inds.append(i) return valid_inds def _parse_ann_info(self, ann_info, with_mask=True): \"\"\"Parse bbox and mask", "for key in slices_ann_info: cur_ann_info = slices_ann_info[key] cur_slice_bboxes = [] cur_slice_labels = []", "# Two formats are provided. # 1. mask: a binary map of the", "= [] gt_bboxes_ignore = [] # Two formats are provided. # 1. mask:", "3) img_meta = dict( ori_shape=ori_shape, img_shape=img_shape, pad_shape=pad_shape, scale_factor=scale_factor, flip=flip, image_id=img_info['id']) data = dict(", "(6 coordinates) poly_lens = [len(p) for p in mask_polys] cur_mask_polys.append(mask_polys) cur_poly_lens.extend(poly_lens) if cur_slice_bboxes:", "gt_poly_lens.append(cur_poly_lens) ann = dict( bboxes=gt_bboxes, labels=gt_labels, bboxes_ignore=gt_bboxes_ignore) if with_mask: ann['masks'] = gt_masks #", "cur_slice_bboxes = [] cur_slice_labels = [] cur_slice_bboxes_ignore = [] cur_masks = [] cur_mask_polys", "gt_masks in zip(gt_bboxes_list, gt_labels_list, gt_bboxes_ignore_list, gt_masks_list): # skip the image if there is", "key in data: data[key].append(tensors) else: data[key] = [tensors] def prepare_train_img(self, idx): img_info =", "i, cat_id in enumerate(self.cat_ids) } self.img_ids = self.coco.getImgIds() img_infos = [] for i", "self.img_ids[i] not in ids_with_ann: continue if min(img_info['width'], img_info['height']) >= min_size: valid_inds.append(i) return valid_inds", "def _parse_ann_info(self, ann_info, with_mask=True): \"\"\"Parse bbox and mask annotation. Args: ann_info (list[dict]): Annotation", "a # list of float. if with_mask: gt_masks = [] gt_mask_polys = []", "they can be used for # training in concept. if len(proposals) == 0:", "if not (proposals.shape[1] == 4 or proposals.shape[1] == 5): raise AssertionError( 'proposals should", "= np.zeros((0, 4), dtype=np.float32) gt_bboxes.append(cur_slice_bboxes) gt_labels.append(cur_slice_labels) gt_bboxes_ignore.append(cur_slice_bboxes_ignore) gt_masks.append(cur_masks) gt_mask_polys.append(cur_mask_polys) gt_poly_lens.append(cur_poly_lens) ann = dict(", "empty proposals properly. Currently images with # no proposals are just ignored, but", "labels=gt_labels, bboxes_ignore=gt_bboxes_ignore) if with_mask: ann['masks'] = gt_masks # poly format is not used", "cur_masks = [] cur_mask_polys = [] cur_poly_lens = [] for i, ann in", "if self.extra_aug is not None: img, gt_bboxes, gt_labels = self.extra_aug(orig_img, gt_bboxes, gt_labels) else:", "flip) if self.with_crowd: gt_bboxes_ignore = self.bbox_transform(gt_bboxes_ignore, img_shape, scale_factor, flip) if self.with_mask: gt_masks =", "\"\"\" slices_ann_info = {'r': [], 'g': [], 'b': []} for info in ann_info:", "'gt_labels', DC(to_tensor(gt_labels))) if self.with_crowd: self.insert_to_dict(data, 'gt_bboxes_ignore', DC(to_tensor(gt_bboxes_ignore))) if self.with_mask: self.insert_to_dict(data, 'gt_masks', DC(gt_masks, cpu_only=True))", "CocoDatasetRGB2(CustomDataset): CLASSES = ('microbleed', 'full_bounding_box') def load_annotations(self, ann_file): self.coco = COCO(ann_file) self.cat_ids =", "img_scale, flip) gt_seg = mmcv.imrescale( gt_seg, self.seg_scale_factor, interpolation='nearest') gt_seg = gt_seg[None, ...] if", "cur_slice_labels = np.array(cur_slice_labels, dtype=np.int64) else: cur_slice_bboxes = np.zeros((0, 4), dtype=np.float32) cur_slice_labels = np.array([],", "= self.img_infos[idx] # load image orig_img = mmcv.imread(osp.join(self.img_prefix, img_info['filename'])) # load proposals if", "(n, 5), ' 'but found {}'.format(proposals.shape)) if proposals.shape[1] == 5: scores = proposals[:,", "not used in the current implementation ann['mask_polys'] = gt_mask_polys ann['poly_lens'] = gt_poly_lens return", "should have shapes (n, 4) or (n, 5), ' 'but found {}'.format(proposals.shape)) if", "scale_factor, flip) if self.with_mask: gt_masks = self.mask_transform(gt_masks, pad_shape, scale_factor, flip) if data is", "or w < 1 or h < 1: continue bbox = [x1, y1,", "3 points (6 coordinates) poly_lens = [len(p) for p in mask_polys] cur_mask_polys.append(mask_polys) cur_poly_lens.extend(poly_lens)", "mask annotations. Returns: dict: A dict containing the following keys: bboxes, bboxes_ignore, labels,", "pad_shape, scale_factor, flip) if data is None: ori_shape = (img_info['height'], img_info['width'], 3) img_meta", "self.insert_to_dict(data, 'proposals', DC(to_tensor(proposals))) if self.with_label: self.insert_to_dict(data, 'gt_labels', DC(to_tensor(gt_labels))) if self.with_crowd: self.insert_to_dict(data, 'gt_bboxes_ignore', DC(to_tensor(gt_bboxes_ignore)))", "= self.coco.loadAnns(ann_ids) return self._parse_ann_info(ann_info, self.with_mask) def _filter_imgs(self, min_size=32): \"\"\"Filter images too small or", "gt_poly_lens return ann def insert_to_dict(self, data, key, tensors): if key in data: data[key].append(tensors)", "in ann_info: if info['slice_label'] == 'r': slices_ann_info['r'].append(info) elif info['slice_label'] == 'g': slices_ann_info['g'].append(info) elif", "self.coco.getCatIds() self.cat2label = { cat_id: i + 1 for i, cat_id in enumerate(self.cat_ids)", "mask_polys, poly_lens. \"\"\" slices_ann_info = {'r': [], 'g': [], 'b': []} for info", "# list of float. if with_mask: gt_masks = [] gt_mask_polys = [] gt_poly_lens", "proposals.shape[1] == 5): raise AssertionError( 'proposals should have shapes (n, 4) or (n,", "= proposals[:, 4, None] proposals = proposals[:, :4] else: scores = None ann", "gt_bboxes_ignore_list = ann['bboxes_ignore'] gt_masks_list = ann['masks'] # apply transforms flip = True if", "== 'b': slices_ann_info['b'].append(info) gt_bboxes = [] gt_labels = [] gt_bboxes_ignore = [] #", "img, gt_bboxes, gt_labels = self.extra_aug(orig_img, gt_bboxes, gt_labels) else: img = orig_img # randomly", "= self.bbox_transform(gt_bboxes_ignore, img_shape, scale_factor, flip) if self.with_mask: gt_masks = self.mask_transform(gt_masks, pad_shape, scale_factor, flip)", "COCO(ann_file) self.cat_ids = self.coco.getCatIds() self.cat2label = { cat_id: i + 1 for i,", "scale_factor = self.img_transform( img, img_scale, flip, keep_ratio=self.resize_keep_ratio) img = img.copy() if self.with_seg: gt_seg", "image. with_mask (bool): Whether to parse mask annotations. Returns: dict: A dict containing", "bboxes, bboxes_ignore, labels, masks, mask_polys, poly_lens. \"\"\" slices_ann_info = {'r': [], 'g': [],", "ann_info, with_mask=True): \"\"\"Parse bbox and mask annotation. Args: ann_info (list[dict]): Annotation info of", "if self.img_ids[i] not in ids_with_ann: continue if min(img_info['width'], img_info['height']) >= min_size: valid_inds.append(i) return", "proposals[:, :4] else: scores = None ann = self.get_ann_info(idx) gt_bboxes_list = ann['bboxes'] gt_labels_list", "= orig_img # randomly sample a scale img_scale = random_scale(self.img_scales, self.multiscale_mode) img, img_shape,", "transforms flip = True if np.random.rand() < self.flip_ratio else False data = None", "4), dtype=np.float32) gt_bboxes.append(cur_slice_bboxes) gt_labels.append(cur_slice_labels) gt_bboxes_ignore.append(cur_slice_bboxes_ignore) gt_masks.append(cur_masks) gt_mask_polys.append(cur_mask_polys) gt_poly_lens.append(cur_poly_lens) ann = dict( bboxes=gt_bboxes, labels=gt_labels,", "idx): img_id = self.img_infos[idx]['id'] ann_ids = self.coco.getAnnIds(imgIds=[img_id]) ann_info = self.coco.loadAnns(ann_ids) return self._parse_ann_info(ann_info, self.with_mask)", "for # training in concept. if len(proposals) == 0: return None if not", "in slices_ann_info: cur_ann_info = slices_ann_info[key] cur_slice_bboxes = [] cur_slice_labels = [] cur_slice_bboxes_ignore =", "mmcv.imrescale( gt_seg, self.seg_scale_factor, interpolation='nearest') gt_seg = gt_seg[None, ...] if self.proposals is not None:", "gt_bboxes_ignore_list, gt_masks_list): # skip the image if there is no valid gt bbox", "not None: proposals = self.proposals[idx][:self.num_max_proposals] # TODO: Handle empty proposals properly. Currently images", "augmentation if self.extra_aug is not None: img, gt_bboxes, gt_labels = self.extra_aug(orig_img, gt_bboxes, gt_labels)", "} self.img_ids = self.coco.getImgIds() img_infos = [] for i in self.img_ids: info =", "self._parse_ann_info(ann_info, self.with_mask) def _filter_imgs(self, min_size=32): \"\"\"Filter images too small or without ground truths.\"\"\"", "slices_ann_info[key] cur_slice_bboxes = [] cur_slice_labels = [] cur_slice_bboxes_ignore = [] cur_masks = []", "= self.coco.loadImgs([i])[0] info['filename'] = info['file_name'] img_infos.append(info) return img_infos def get_ann_info(self, idx): img_id =", "in ids_with_ann: continue if min(img_info['width'], img_info['height']) >= min_size: valid_inds.append(i) return valid_inds def _parse_ann_info(self,", "flip) if self.with_mask: gt_masks = self.mask_transform(gt_masks, pad_shape, scale_factor, flip) if data is None:", "gt_mask_polys.append(cur_mask_polys) gt_poly_lens.append(cur_poly_lens) ann = dict( bboxes=gt_bboxes, labels=gt_labels, bboxes_ignore=gt_bboxes_ignore) if with_mask: ann['masks'] = gt_masks", "1, y1 + h - 1] if ann['iscrowd']: cur_slice_bboxes_ignore.append(bbox) else: cur_slice_bboxes.append(bbox) cur_slice_labels.append(self.cat2label[ann['category_id']]) if", "True if np.random.rand() < self.flip_ratio else False data = None for gt_bboxes, gt_labels,", "dtype=np.int64) if cur_slice_bboxes_ignore: cur_slice_bboxes_ignore = np.array(cur_slice_bboxes_ignore, dtype=np.float32) else: cur_slice_bboxes_ignore = np.zeros((0, 4), dtype=np.float32)", "enumerate(self.img_infos): if self.img_ids[i] not in ids_with_ann: continue if min(img_info['width'], img_info['height']) >= min_size: valid_inds.append(i)", "4 or proposals.shape[1] == 5): raise AssertionError( 'proposals should have shapes (n, 4)", "None: ori_shape = (img_info['height'], img_info['width'], 3) img_meta = dict( ori_shape=ori_shape, img_shape=img_shape, pad_shape=pad_shape, scale_factor=scale_factor,", "the current implementation ann['mask_polys'] = gt_mask_polys ann['poly_lens'] = gt_poly_lens return ann def insert_to_dict(self,", "of the image. # 2. polys: each mask consists of one or several", "is not None: self.insert_to_dict(data, 'proposals', DC(to_tensor(proposals))) if self.with_label: self.insert_to_dict(data, 'gt_labels', DC(to_tensor(gt_labels))) if self.with_crowd:", "self.insert_to_dict(data, 'gt_bboxes_ignore', DC(to_tensor(gt_bboxes_ignore))) if self.with_mask: self.insert_to_dict(data, 'gt_masks', DC(gt_masks, cpu_only=True)) if self.with_seg: self.insert_to_dict(data, 'gt_semantic_seg',", "images too small or without ground truths.\"\"\" valid_inds = [] ids_with_ann = set(_['image_id']", "[] for i, ann in enumerate(cur_ann_info): if ann.get('ignore', False): continue x1, y1, w,", "set(_['image_id'] for _ in self.coco.anns.values()) for i, img_info in enumerate(self.img_infos): if self.img_ids[i] not", "[len(p) for p in mask_polys] cur_mask_polys.append(mask_polys) cur_poly_lens.extend(poly_lens) if cur_slice_bboxes: cur_slice_bboxes = np.array(cur_slice_bboxes, dtype=np.float32)", "# training in concept. if len(proposals) == 0: return None if not (proposals.shape[1]", "coordinates) poly_lens = [len(p) for p in mask_polys] cur_mask_polys.append(mask_polys) cur_poly_lens.extend(poly_lens) if cur_slice_bboxes: cur_slice_bboxes", "not None: proposals = self.bbox_transform(proposals, img_shape, scale_factor, flip) proposals = np.hstack( [proposals, scores])", "with_mask (bool): Whether to parse mask annotations. Returns: dict: A dict containing the", "self.coco = COCO(ann_file) self.cat_ids = self.coco.getCatIds() self.cat2label = { cat_id: i + 1", "def load_annotations(self, ann_file): self.coco = COCO(ann_file) self.cat_ids = self.coco.getCatIds() self.cat2label = { cat_id:", "img_info['file_name'].replace( 'jpg', 'png')), flag='unchanged') gt_seg = self.seg_transform(gt_seg.squeeze(), img_scale, flip) gt_seg = mmcv.imrescale( gt_seg,", "containing the following keys: bboxes, bboxes_ignore, labels, masks, mask_polys, poly_lens. \"\"\" slices_ann_info =", "self.with_crowd: self.insert_to_dict(data, 'gt_bboxes_ignore', DC(to_tensor(gt_bboxes_ignore))) if self.with_mask: self.insert_to_dict(data, 'gt_masks', DC(gt_masks, cpu_only=True)) if self.with_seg: self.insert_to_dict(data,", "as osp from .utils import to_tensor, random_scale from mmcv.parallel import DataContainer as DC", "cur_ann_info = slices_ann_info[key] cur_slice_bboxes = [] cur_slice_labels = [] cur_slice_bboxes_ignore = [] cur_masks", "self.seg_scale_factor, interpolation='nearest') gt_seg = gt_seg[None, ...] if self.proposals is not None: proposals =", "with_mask: gt_masks = [] gt_mask_polys = [] gt_poly_lens = [] for key in", "is not None: img, gt_bboxes, gt_labels = self.extra_aug(orig_img, gt_bboxes, gt_labels) else: img =", "bboxes=gt_bboxes, labels=gt_labels, bboxes_ignore=gt_bboxes_ignore) if with_mask: ann['masks'] = gt_masks # poly format is not", "gt_seg = mmcv.imread( osp.join(self.seg_prefix, img_info['file_name'].replace( 'jpg', 'png')), flag='unchanged') gt_seg = self.seg_transform(gt_seg.squeeze(), img_scale, flip)", "self.extra_aug(orig_img, gt_bboxes, gt_labels) else: img = orig_img # randomly sample a scale img_scale", "'gt_bboxes', DC(to_tensor(gt_bboxes))) if self.proposals is not None: self.insert_to_dict(data, 'proposals', DC(to_tensor(proposals))) if self.with_label: self.insert_to_dict(data,", "if np.random.rand() < self.flip_ratio else False data = None for gt_bboxes, gt_labels, gt_bboxes_ignore,", "is a # list of float. if with_mask: gt_masks = [] gt_mask_polys =", "cur_slice_labels = [] cur_slice_bboxes_ignore = [] cur_masks = [] cur_mask_polys = [] cur_poly_lens", "of an image. with_mask (bool): Whether to parse mask annotations. Returns: dict: A", "prepare_train_img(self, idx): img_info = self.img_infos[idx] # load image orig_img = mmcv.imread(osp.join(self.img_prefix, img_info['filename'])) #", "from .custom import CustomDataset class CocoDatasetRGB2(CustomDataset): CLASSES = ('microbleed', 'full_bounding_box') def load_annotations(self, ann_file):", "with # no proposals are just ignored, but they can be used for", "if self.with_seg: gt_seg = mmcv.imread( osp.join(self.seg_prefix, img_info['file_name'].replace( 'jpg', 'png')), flag='unchanged') gt_seg = self.seg_transform(gt_seg.squeeze(),", "used for # training in concept. if len(proposals) == 0: return None if", "= info['file_name'] img_infos.append(info) return img_infos def get_ann_info(self, idx): img_id = self.img_infos[idx]['id'] ann_ids =", "self.insert_to_dict(data, 'gt_labels', DC(to_tensor(gt_labels))) if self.with_crowd: self.insert_to_dict(data, 'gt_bboxes_ignore', DC(to_tensor(gt_bboxes_ignore))) if self.with_mask: self.insert_to_dict(data, 'gt_masks', DC(gt_masks,", "image if there is no valid gt bbox if len(gt_bboxes) == 0: return", "proposals = np.hstack( [proposals, scores]) if scores is not None else proposals gt_bboxes", "info['slice_label'] == 'g': slices_ann_info['g'].append(info) elif info['slice_label'] == 'b': slices_ann_info['b'].append(info) gt_bboxes = [] gt_labels", "None if not (proposals.shape[1] == 4 or proposals.shape[1] == 5): raise AssertionError( 'proposals", "img_info['width'], 3) img_meta = dict( ori_shape=ori_shape, img_shape=img_shape, pad_shape=pad_shape, scale_factor=scale_factor, flip=flip, image_id=img_info['id']) data =", "from mmcv.parallel import DataContainer as DC import mmcv from .custom import CustomDataset class", "if with_mask: cur_masks.append(self.coco.annToMask(ann)) mask_polys = [ p for p in ann['segmentation'] if len(p)", "= img.copy() if self.with_seg: gt_seg = mmcv.imread( osp.join(self.seg_prefix, img_info['file_name'].replace( 'jpg', 'png')), flag='unchanged') gt_seg", "dict( img=DC(to_tensor(img), stack=True), img_meta=DC(img_meta, cpu_only=True)) self.insert_to_dict(data, 'gt_bboxes', DC(to_tensor(gt_bboxes))) if self.proposals is not None:", "1 for i, cat_id in enumerate(self.cat_ids) } self.img_ids = self.coco.getImgIds() img_infos = []", "= ann['bboxes_ignore'] gt_masks_list = ann['masks'] # apply transforms flip = True if np.random.rand()", "ori_shape = (img_info['height'], img_info['width'], 3) img_meta = dict( ori_shape=ori_shape, img_shape=img_shape, pad_shape=pad_shape, scale_factor=scale_factor, flip=flip,", "scores = None ann = self.get_ann_info(idx) gt_bboxes_list = ann['bboxes'] gt_labels_list = ann['labels'] #", "= {'r': [], 'g': [], 'b': []} for info in ann_info: if info['slice_label']", "is None: ori_shape = (img_info['height'], img_info['width'], 3) img_meta = dict( ori_shape=ori_shape, img_shape=img_shape, pad_shape=pad_shape,", "1 or h < 1: continue bbox = [x1, y1, x1 + w", "not None else proposals gt_bboxes = self.bbox_transform(gt_bboxes, img_shape, scale_factor, flip) if self.with_crowd: gt_bboxes_ignore", "dict( bboxes=gt_bboxes, labels=gt_labels, bboxes_ignore=gt_bboxes_ignore) if with_mask: ann['masks'] = gt_masks # poly format is", "None else proposals gt_bboxes = self.bbox_transform(gt_bboxes, img_shape, scale_factor, flip) if self.with_crowd: gt_bboxes_ignore =", "= self.coco.getAnnIds(imgIds=[img_id]) ann_info = self.coco.loadAnns(ann_ids) return self._parse_ann_info(ann_info, self.with_mask) def _filter_imgs(self, min_size=32): \"\"\"Filter images", "[x1, y1, x1 + w - 1, y1 + h - 1] if", "if len(p) >= 6 ] # valid polygons have >= 3 points (6", "gt_bboxes = self.bbox_transform(gt_bboxes, img_shape, scale_factor, flip) if self.with_crowd: gt_bboxes_ignore = self.bbox_transform(gt_bboxes_ignore, img_shape, scale_factor,", "if there is no valid gt bbox if len(gt_bboxes) == 0: return None", "gt_labels = [] gt_bboxes_ignore = [] # Two formats are provided. # 1." ]
[ "mass1,mass2 def generate_analytic_waveform(mass_rat, eccentricity,approximant='TaylorF2Ecc',total_m=50,f_lower=20.,delta_f=0.1): mass1,mass2=q_to_masses(mass_rat,total_m) hp,hc=hp,hc=get_fd_waveform(mass1=mass1,mass2=mass2,delta_f=delta_f,f_lower=f_lower, approximant=approximant,eccentricity=eccentricity) hs=hp+hc*1j amp=amplitude_from_frequencyseries(types.FrequencySeries(hs,delta_f=delta_f)) phase=phase_from_frequencyseries(types.FrequencySeries(hs,delta_f=delta_f)) return hp.sample_frequencies.data,np.real(hp.data),np.real(hc.data),np.imag(hp.data),np.imag(hc.data),amp.data,phase.data freq,hp_real,hc_real,hp_imag,hc_imag,amp,phase=generate_analytic_waveform(mass_rat=1.,eccentricity=0)", "sep_time(horizon1, horizon2): hor_times = horizon1[:,0] dx=horizon1[:,1]-horizon2[:,1] dy=horizon1[:,2] - horizon2[:,2] sep_xy = np.array([horizon1[:,1]-horizon2[:,1], horizon1[:,2]", "from bokeh.models.widgets import Select, PreText,Panel, Tabs, Slider from bokeh.io import curdoc from bokeh.layouts", "= None q_sliderFD = Slider(start=1, end=10, value=1, step=.5, title=\"Mass ratio (q)\") e_sliderFD =", "pn41=figure(title='h+',x_axis_label='time(sec)',y_axis_label='strain', plot_width=500, plot_height=400) n41=pn41.line(x='timeTD', y='hp_realTD',source = sourcep42, color='blue',legend='Re{h+}') n42=pn41.line(x='timeTD', y='hp_imagTD',source = sourcep42, color='orange',legend='Im{h+}')", "plot_width=500, plot_height=400) n41=pn41.line(x='timeTD', y='hp_realTD',source = sourcep42, color='blue',legend='Re{h+}') n42=pn41.line(x='timeTD', y='hp_imagTD',source = sourcep42, color='orange',legend='Im{h+}') pn41.toolbar.logo", "update_slider2) layoutTD=row(column(pn41,pn42),column(pn43,pn44),column(q_slider,e_slider,s1z_slider,s2z_slider,model_select)) #tab1 = Panel(child=layoutNR, title=\"NR data\") tab2 = Panel(child=layoutan,title=\"Analytic FD\") #tab3 =", "= h22[:,1] + 1.j * h22[:,2] norm_time=times-times[np.argmax(h22)] return norm_time,h21, h2m1,h2m2, h22,h20 def get_data(which_data):", "= sourcep42, color='green',line_width=3) pn43.toolbar.logo = None pn44=figure(title='Phase',x_axis_label='time(sec)',y_axis_label='rad', plot_width=500, plot_height=400) pn44.line(x='timeTD', y='phaseTD',source = sourcep42,", "return hp.sample_frequencies.data,np.real(hp.data),np.real(hc.data),np.imag(hp.data),np.imag(hc.data),amp.data,phase.data freq,hp_real,hc_real,hp_imag,hc_imag,amp,phase=generate_analytic_waveform(mass_rat=1.,eccentricity=0) dic_p2 = {'hp_real':hp_real,'hc_real':hc_real,'hp_imag':hp_imag,'hc_imag':hc_imag,'amp':amp,'phase':phase,'freq':freq} sourcep2=ColumnDataSource(data=dic_p2) pn21=figure(title='h+',x_axis_label='freq(Hz)',y_axis_label='strain', plot_width=500, plot_height=400) n11=pn21.line(x='freq', y='hp_real',source =", "h20[:,1] + 1.j * h20[:,2] h2m2 = h2m2[:,1] + 1.j * h2m2[:,2] h22", "mass2=total_m-mass1 return mass1,mass2 def generate_analytic_waveform(mass_rat, eccentricity,approximant='TaylorF2Ecc',total_m=50,f_lower=20.,delta_f=0.1): mass1,mass2=q_to_masses(mass_rat,total_m) hp,hc=hp,hc=get_fd_waveform(mass1=mass1,mass2=mass2,delta_f=delta_f,f_lower=f_lower, approximant=approximant,eccentricity=eccentricity) hs=hp+hc*1j amp=amplitude_from_frequencyseries(types.FrequencySeries(hs,delta_f=delta_f)) phase=phase_from_frequencyseries(types.FrequencySeries(hs,delta_f=delta_f)) return", "+ sep_xy[1,:]**2. ) return hor_times,sep,dx[:2000],dy[:2000] def moving_average(t, x, seglen, nseg): dt = t[1]", "t2 in zip(times, h2m2[:,0]): assert t1 == t2 h22 = h22[:,1] + 1.j", "# command to run at your command prompt. # Then navigate to the", "pn23=figure(title='Amplitude',x_axis_label='freq(Hz)',y_axis_label='strain', plot_width=500, plot_height=400) pn23.line(x='freq', y='amp',source = sourcep2, color='green',line_width=3) pn23.toolbar.logo = None pn24=figure(title='Phase',x_axis_label='freq(Hz)',y_axis_label='rad', plot_width=500,", "value=0, step=.05, title=\"Spin1z\") s2z_slider = Slider(start=-1, end=1, value=0, step=.05, title=\"Spin2z\") model_select = Select(title=\"TD", "means def get_h22(sxs_data): with h5py.File( os.path.join(sxs_data, \"rhOverM_Asymptotic_GeometricUnits_CoM.h5\" ), 'r') as f: h22 =", "pn41.legend.click_policy=\"hide\" lines=[n41,n42] pn42=figure(title='hx',x_axis_label='time(sec)',y_axis_label='strain', plot_width=500, plot_height=400) n41=pn42.line(x='timeTD', y='hc_realTD',source = sourcep42, color='blue',legend='Re{hx}') n42=pn42.line(x='timeTD', y='hc_imagTD', source", "model_select.value timeTD,hp_realTD,hc_realTD,hp_imagTD,hc_imagTD,ampTD,phaseTD=generate_TD_waveform(mass_rat=q,eccentricity=e,s1z=s1z,s2z=s2z,approximant=approximant) sourcep42.data = {'hp_realTD':hp_realTD,'hc_realTD':hc_realTD,'hp_imagTD':hp_imagTD,'hc_imagTD':hc_imagTD,'ampTD':ampTD,'phaseTD':phaseTD,'timeTD':timeTD} for w in [q_slider,e_slider,s1z_slider,s2z_slider,model_select]: w.on_change('value', update_slider2) layoutTD=row(column(pn41,pn42),column(pn43,pn44),column(q_slider,e_slider,s1z_slider,s2z_slider,model_select)) #tab1", "tab4 = Panel(child=layoutTD,title=\"Analytic TD\") #tabs = Tabs(tabs=[tab1,tab3,tab2,tab4],sizing_mode='scale_width') tabs = Tabs(tabs=[tab2,tab4],sizing_mode='scale_width') #layout = row(column(p,data_table),column(k,s),r)", "* h22[:,2] h2m2 = h2m2[:,1] + 1.j * h2m2[:,2] return times,h22, h2m2 def", "= h22[:,0] for t1, t2 in zip(times, h2m2[:,0]): assert t1 == t2 h22", "mass1=mass_rat/(mass_rat+1)*total_m mass2=total_m-mass1 return mass1,mass2 def generate_analytic_waveform(mass_rat, eccentricity,approximant='TaylorF2Ecc',total_m=50,f_lower=20.,delta_f=0.1): mass1,mass2=q_to_masses(mass_rat,total_m) hp,hc=hp,hc=get_fd_waveform(mass1=mass1,mass2=mass2,delta_f=delta_f,f_lower=f_lower, approximant=approximant,eccentricity=eccentricity) hs=hp+hc*1j amp=amplitude_from_frequencyseries(types.FrequencySeries(hs,delta_f=delta_f)) phase=phase_from_frequencyseries(types.FrequencySeries(hs,delta_f=delta_f))", "'r') as f: AhA = f['AhA.dir/CoordCenterInertial.dat'][:] AhB = f['AhB.dir/CoordCenterInertial.dat'][:] AhC = f['AhC.dir/CoordCenterInertial.dat'][:] return", "plot_width=500, plot_height=400) pn24.line(x='freq', y='phase',source = sourcep2, color='red',line_width=3) pn24.toolbar.logo = None q_sliderFD = Slider(start=1,", "new): # Get the current slider values q = q_slider.value e = e_slider.value", "Slider(start=-1, end=1, value=0, step=.05, title=\"Spin1z\") s2z_slider = Slider(start=-1, end=1, value=0, step=.05, title=\"Spin2z\") model_select", "Get the current slider values q = q_slider.value e = e_slider.value s1z =", "h20[:,2] h2m2 = h2m2[:,1] + 1.j * h2m2[:,2] h22 = h22[:,1] + 1.j", "= sourcep42, color='red',line_width=3) pn44.toolbar.logo = None q_slider = Slider(start=1, end=10, value=1, step=.5, title=\"Mass", "horizon1[:,0] dx=horizon1[:,1]-horizon2[:,1] dy=horizon1[:,2] - horizon2[:,2] sep_xy = np.array([horizon1[:,1]-horizon2[:,1], horizon1[:,2] - horizon2[:,2]]) sep =", "source32.data={'time_hlm':time_hlm,'h21real':h21.real,'h21imag':h21.imag, 'h2m1real':h2m1.real,'h2m1imag':h2m1.imag,'h2m2lmreal': h2m2lm.real,'h2m2lmimag':h2m2lm.imag, 'h22lmreal': h22lm.real,'h22lmimag':h22lm.imag,'h20real':h20.real,'h20imag':h20.imag} except IndexError: pass # ============================================================================= # Fourth panel", "#!/usr/bin/env python # coding: utf-8 # Copyright (C) 2021 <NAME> <<EMAIL>> # Visualization", "int(st + seglen/dt) try: times.append(t[st]) means.append(np.mean(x[st:en])) except: break st = st + seglen", "plot_width=500, plot_height=400) n21=pn22.line(x='freq', y='hc_real',source = sourcep2, color='blue',legend='Re{hx}') n22=pn22.line(x='freq', y='hc_imag', source = sourcep2, color='orange',legend='Im{hx}')", "times,h22, h2m2 def get_hlm(sxs_data): with h5py.File( os.path.join(sxs_data, \"rhOverM_Asymptotic_GeometricUnits_CoM.h5\" ), 'r') as f: h21", "in [q_slider,e_slider,s1z_slider,s2z_slider,model_select]: w.on_change('value', update_slider2) layoutTD=row(column(pn41,pn42),column(pn43,pn44),column(q_slider,e_slider,s1z_slider,s2z_slider,model_select)) #tab1 = Panel(child=layoutNR, title=\"NR data\") tab2 = Panel(child=layoutan,title=\"Analytic", "end=0.9, value=0, step=.05, title=\"Eccentricity (e)\") s1z_slider = Slider(start=-1, end=1, value=0, step=.05, title=\"Spin1z\") s2z_slider", "values q = q_sliderFD.value e = e_sliderFD.value approximant = model_selectFD.value freq,hp_real,hc_real,hp_imag,hc_imag,amp,phase=generate_analytic_waveform(mass_rat=q,eccentricity=e,approximant=approximant) sourcep2.data =", "def generate_analytic_waveform(mass_rat, eccentricity,approximant='TaylorF2Ecc',total_m=50,f_lower=20.,delta_f=0.1): mass1,mass2=q_to_masses(mass_rat,total_m) hp,hc=hp,hc=get_fd_waveform(mass1=mass1,mass2=mass2,delta_f=delta_f,f_lower=f_lower, approximant=approximant,eccentricity=eccentricity) hs=hp+hc*1j amp=amplitude_from_frequencyseries(types.FrequencySeries(hs,delta_f=delta_f)) phase=phase_from_frequencyseries(types.FrequencySeries(hs,delta_f=delta_f)) return hp.sample_frequencies.data,np.real(hp.data),np.real(hc.data),np.imag(hp.data),np.imag(hc.data),amp.data,phase.data freq,hp_real,hc_real,hp_imag,hc_imag,amp,phase=generate_analytic_waveform(mass_rat=1.,eccentricity=0) dic_p2", "s2z=0 mass1,mass2=q_to_masses(mass_rat,total_m) hp,hc=hp,hc=get_td_waveform(mass1=mass1,mass2=mass2,spin1z=s1z,spin2z=s2z,delta_t=delta_t,f_lower=f_lower, approximant=approximant,eccentricity=eccentricity) hs=hp+hc*1j amp=abs(hs) phase=np.unwrap(np.angle(hs)) return hp.sample_times.data,np.real(hp.data),np.real(hc.data),np.imag(hp.data),np.imag(hc.data),amp,phase timeTD,hp_realTD,hc_realTD,hp_imagTD,hc_imagTD,ampTD,phaseTD=generate_TD_waveform(mass_rat=1.,eccentricity=0,s1z=0.,s2z=0.) dic_p42 = {'hp_realTD':hp_realTD,'hc_realTD':hc_realTD,'hp_imagTD':hp_imagTD,'hc_imagTD':hc_imagTD,'ampTD':ampTD,'phaseTD':phaseTD,'timeTD':timeTD}", "at your command prompt. # Then navigate to the URL http://localhost:5006/main in your", "y='phaseTD',source = sourcep42, color='red',line_width=3) pn44.toolbar.logo = None q_slider = Slider(start=1, end=10, value=1, step=.5,", "AhA = f['AhA.dir/CoordCenterInertial.dat'][:] AhB = f['AhB.dir/CoordCenterInertial.dat'][:] AhC = f['AhC.dir/CoordCenterInertial.dat'][:] return AhA,AhB,AhC def sep_time(horizon1,", "Slider(start=0., end=0.9, value=0, step=.05, title=\"Eccentricity (e)\") s1z_slider = Slider(start=-1, end=1, value=0, step=.05, title=\"Spin1z\")", "np.array([horizon1[:,1]-horizon2[:,1], horizon1[:,2] - horizon2[:,2]]) sep = np.sqrt( sep_xy[0,:]**2. + sep_xy[1,:]**2. ) return hor_times,sep,dx[:2000],dy[:2000]", "Visualization of SXS and analytic waveform model # Use by executing: bokeh serve", "for w in [q_sliderFD,e_sliderFD,model_selectFD]: w.on_change('value', update_slider) layoutan=row(column(pn21,pn22),column(pn23,pn24),column(q_sliderFD,e_sliderFD,model_selectFD)) # ============================================================================= # Third panel #", "f[\"OutermostExtraction.dir/Y_l2_m-1.dat\"][:] h20 = f[\"OutermostExtraction.dir/Y_l2_m0.dat\"][:] h22 = f[\"OutermostExtraction.dir/Y_l2_m2.dat\"][:] h2m2 = f[\"OutermostExtraction.dir/Y_l2_m-2.dat\"][:] times = h22[:,0]", "color='orange',legend='Im{hx}') pn42.toolbar.logo = None pn42.legend.click_policy=\"hide\" lines=[n41,n42] pn43=figure(title='Amplitude',x_axis_label='time(sec)',y_axis_label='strain', plot_width=500, plot_height=400) pn43.line(x='timeTD', y='ampTD',source = sourcep42,", "to the dict time_hlm,h21, h2m1,h2m2lm, h22lm,h20=get_hlm(which_data) source32.data={'time_hlm':time_hlm,'h21real':h21.real,'h21imag':h21.imag, 'h2m1real':h2m1.real,'h2m1imag':h2m1.imag,'h2m2lmreal': h2m2lm.real,'h2m2lmimag':h2m2lm.imag, 'h22lmreal': h22lm.real,'h22lmimag':h22lm.imag,'h20real':h20.real,'h20imag':h20.imag} except IndexError:", "* h2m2[:,2] return times,h22, h2m2 def get_hlm(sxs_data): with h5py.File( os.path.join(sxs_data, \"rhOverM_Asymptotic_GeometricUnits_CoM.h5\" ), 'r')", "output_file, show, output_notebook from bokeh.models.widgets import Select, PreText,Panel, Tabs, Slider from bokeh.io import", "Models\", options=td_approximants()) def update_slider2(attrname, old, new): # Get the current slider values q", "old, new): # Get the current slider values q = q_sliderFD.value e =", "pn42.toolbar.logo = None pn42.legend.click_policy=\"hide\" lines=[n41,n42] pn43=figure(title='Amplitude',x_axis_label='time(sec)',y_axis_label='strain', plot_width=500, plot_height=400) pn43.line(x='timeTD', y='ampTD',source = sourcep42, color='green',line_width=3)", "as f: AhA = f['AhA.dir/CoordCenterInertial.dat'][:] AhB = f['AhB.dir/CoordCenterInertial.dat'][:] AhC = f['AhC.dir/CoordCenterInertial.dat'][:] return AhA,AhB,AhC", "# Use by executing: bokeh serve main.py # command to run at your", "return norm_time,h21, h2m1,h2m2, h22,h20 def get_data(which_data): AhA,AhB,AhC=open_sxs(which_data) hor_times,sep,dx,dy=sep_time(AhA,AhB) mov_avg_sep_t, mov_avg_sep = moving_average(hor_times, sep,", "old, new): try: selected_index = source31.selected.indices[0] sval=source31.data[\"Simulation\"][selected_index] which_data = data_path+sval # changed this", "horizon2[:,2]]) sep = np.sqrt( sep_xy[0,:]**2. + sep_xy[1,:]**2. ) return hor_times,sep,dx[:2000],dy[:2000] def moving_average(t, x,", "B #AhC=common apparent horizon with h5py.File( os.path.join(sxs_data, \"Horizons.h5\" ), 'r') as f: AhA", "def q_to_masses(mass_rat,total_m): mass1=mass_rat/(mass_rat+1)*total_m mass2=total_m-mass1 return mass1,mass2 def generate_analytic_waveform(mass_rat, eccentricity,approximant='TaylorF2Ecc',total_m=50,f_lower=20.,delta_f=0.1): mass1,mass2=q_to_masses(mass_rat,total_m) hp,hc=hp,hc=get_fd_waveform(mass1=mass1,mass2=mass2,delta_f=delta_f,f_lower=f_lower, approximant=approximant,eccentricity=eccentricity) hs=hp+hc*1j", "h5py.File( os.path.join(sxs_data, \"rhOverM_Asymptotic_GeometricUnits_CoM.h5\" ), 'r') as f: h22 = f[\"OutermostExtraction.dir/Y_l2_m2.dat\"][:] h2m2 = f[\"OutermostExtraction.dir/Y_l2_m-2.dat\"][:]", "eccentricity,approximant='TaylorF2Ecc',total_m=50,f_lower=20.,delta_f=0.1): mass1,mass2=q_to_masses(mass_rat,total_m) hp,hc=hp,hc=get_fd_waveform(mass1=mass1,mass2=mass2,delta_f=delta_f,f_lower=f_lower, approximant=approximant,eccentricity=eccentricity) hs=hp+hc*1j amp=amplitude_from_frequencyseries(types.FrequencySeries(hs,delta_f=delta_f)) phase=phase_from_frequencyseries(types.FrequencySeries(hs,delta_f=delta_f)) return hp.sample_frequencies.data,np.real(hp.data),np.real(hc.data),np.imag(hp.data),np.imag(hc.data),amp.data,phase.data freq,hp_real,hc_real,hp_imag,hc_imag,amp,phase=generate_analytic_waveform(mass_rat=1.,eccentricity=0) dic_p2 = {'hp_real':hp_real,'hc_real':hc_real,'hp_imag':hp_imag,'hc_imag':hc_imag,'amp':amp,'phase':phase,'freq':freq}", "e = e_sliderFD.value approximant = model_selectFD.value freq,hp_real,hc_real,hp_imag,hc_imag,amp,phase=generate_analytic_waveform(mass_rat=q,eccentricity=e,approximant=approximant) sourcep2.data = {'hp_real':hp_real,'hc_real':hc_real,'hp_imag':hp_imag,'hc_imag':hc_imag,'amp':amp,'phase':phase,'freq':freq} for w in", "= f[\"OutermostExtraction.dir/Y_l2_m2.dat\"][:] h2m2 = f[\"OutermostExtraction.dir/Y_l2_m-2.dat\"][:] times = h22[:,0] for t1, t2 in zip(times,", "Select, PreText,Panel, Tabs, Slider from bokeh.io import curdoc from bokeh.layouts import column, row,", "selected_index = source31.selected.indices[0] sval=source31.data[\"Simulation\"][selected_index] which_data = data_path+sval # changed this to the dict", "pn42=figure(title='hx',x_axis_label='time(sec)',y_axis_label='strain', plot_width=500, plot_height=400) n41=pn42.line(x='timeTD', y='hc_realTD',source = sourcep42, color='blue',legend='Re{hx}') n42=pn42.line(x='timeTD', y='hc_imagTD', source = sourcep42,", "{'hp_real':hp_real,'hc_real':hc_real,'hp_imag':hp_imag,'hc_imag':hc_imag,'amp':amp,'phase':phase,'freq':freq} for w in [q_sliderFD,e_sliderFD,model_selectFD]: w.on_change('value', update_slider) layoutan=row(column(pn21,pn22),column(pn23,pn24),column(q_sliderFD,e_sliderFD,model_selectFD)) # ============================================================================= # Third panel", "break st = st + seglen return times, means def get_h22(sxs_data): with h5py.File(", "1.j * h2m2[:,2] return times,h22, h2m2 def get_hlm(sxs_data): with h5py.File( os.path.join(sxs_data, \"rhOverM_Asymptotic_GeometricUnits_CoM.h5\" ),", "= e_slider.value s1z = s1z_slider.value s2z = s2z_slider.value approximant = model_select.value timeTD,hp_realTD,hc_realTD,hp_imagTD,hc_imagTD,ampTD,phaseTD=generate_TD_waveform(mass_rat=q,eccentricity=e,s1z=s1z,s2z=s2z,approximant=approximant) sourcep42.data", "navigate to the URL http://localhost:5006/main in your browser. import numpy as np import", "* h2m2[:,2] h22 = h22[:,1] + 1.j * h22[:,2] norm_time=times-times[np.argmax(h22)] return norm_time,h21, h2m1,h2m2,", "h2m2[:,1] + 1.j * h2m2[:,2] return times,h22, h2m2 def get_hlm(sxs_data): with h5py.File( os.path.join(sxs_data,", "# Third panel # ============================================================================= def update_table2(attrname, old, new): try: selected_index = source31.selected.indices[0]", "Slider(start=1, end=10, value=1, step=.5, title=\"Mass ratio (q)\") e_slider = Slider(start=0., end=0.9, value=0, step=.05,", "end=10, value=1, step=.5, title=\"Mass ratio (q)\") e_sliderFD = Slider(start=0., end=0.9, value=0, step=.05, title=\"Eccentricity", "f['AhB.dir/CoordCenterInertial.dat'][:] AhC = f['AhC.dir/CoordCenterInertial.dat'][:] return AhA,AhB,AhC def sep_time(horizon1, horizon2): hor_times = horizon1[:,0] dx=horizon1[:,1]-horizon2[:,1]", "approximant = model_selectFD.value freq,hp_real,hc_real,hp_imag,hc_imag,amp,phase=generate_analytic_waveform(mass_rat=q,eccentricity=e,approximant=approximant) sourcep2.data = {'hp_real':hp_real,'hc_real':hc_real,'hp_imag':hp_imag,'hc_imag':hc_imag,'amp':amp,'phase':phase,'freq':freq} for w in [q_sliderFD,e_sliderFD,model_selectFD]: w.on_change('value', update_slider)", "options=fd_approximants()) def update_slider(attrname, old, new): # Get the current slider values q =", "import get_fd_waveform,amplitude_from_frequencyseries,phase_from_frequencyseries,fd_approximants, get_td_waveform, td_approximants from pycbc import types from bokeh.models import TableColumn,ColumnDataSource,DataTable from", "sep_xy[1,:]**2. ) return hor_times,sep,dx[:2000],dy[:2000] def moving_average(t, x, seglen, nseg): dt = t[1] -", "= {'hp_realTD':hp_realTD,'hc_realTD':hc_realTD,'hp_imagTD':hp_imagTD,'hc_imagTD':hc_imagTD,'ampTD':ampTD,'phaseTD':phaseTD,'timeTD':timeTD} for w in [q_slider,e_slider,s1z_slider,s2z_slider,model_select]: w.on_change('value', update_slider2) layoutTD=row(column(pn41,pn42),column(pn43,pn44),column(q_slider,e_slider,s1z_slider,s2z_slider,model_select)) #tab1 = Panel(child=layoutNR, title=\"NR", "bokeh.models.widgets import Select, PreText,Panel, Tabs, Slider from bokeh.io import curdoc from bokeh.layouts import", "f[\"OutermostExtraction.dir/Y_l2_m-2.dat\"][:] times = h22[:,0] for t1, t2 in zip(times, h2m2[:,0]): assert t1 ==", "plot_height=400) pn24.line(x='freq', y='phase',source = sourcep2, color='red',line_width=3) pn24.toolbar.logo = None q_sliderFD = Slider(start=1, end=10,", "in zip(times, h2m2[:,0]): assert t1 == t2 h21 = h21[:,1] + 1.j *", "= Panel(child=layoutan,title=\"Analytic FD\") #tab3 = Panel(child=layout3,title=\"NR l=2\") tab4 = Panel(child=layoutTD,title=\"Analytic TD\") #tabs =", "zip(times, h2m2[:,0]): assert t1 == t2 h21 = h21[:,1] + 1.j * h21[:,2]", "TD\") #tabs = Tabs(tabs=[tab1,tab3,tab2,tab4],sizing_mode='scale_width') tabs = Tabs(tabs=[tab2,tab4],sizing_mode='scale_width') #layout = row(column(p,data_table),column(k,s),r) curdoc().add_root(tabs) curdoc().title =", "update_slider2(attrname, old, new): # Get the current slider values q = q_slider.value e", "# Copyright (C) 2021 <NAME> <<EMAIL>> # Visualization of SXS and analytic waveform", "seglen return times, means def get_h22(sxs_data): with h5py.File( os.path.join(sxs_data, \"rhOverM_Asymptotic_GeometricUnits_CoM.h5\" ), 'r') as", "def get_hlm(sxs_data): with h5py.File( os.path.join(sxs_data, \"rhOverM_Asymptotic_GeometricUnits_CoM.h5\" ), 'r') as f: h21 = f[\"OutermostExtraction.dir/Y_l2_m1.dat\"][:]", "lines=[n41,n42] pn42=figure(title='hx',x_axis_label='time(sec)',y_axis_label='strain', plot_width=500, plot_height=400) n41=pn42.line(x='timeTD', y='hc_realTD',source = sourcep42, color='blue',legend='Re{hx}') n42=pn42.line(x='timeTD', y='hc_imagTD', source =", "SXS and analytic waveform model # Use by executing: bokeh serve main.py #", "en = int(st + seglen/dt) try: times.append(t[st]) means.append(np.mean(x[st:en])) except: break st = st", "if approximant in nonspinning_models: s1z=0 s2z=0 mass1,mass2=q_to_masses(mass_rat,total_m) hp,hc=hp,hc=get_td_waveform(mass1=mass1,mass2=mass2,spin1z=s1z,spin2z=s2z,delta_t=delta_t,f_lower=f_lower, approximant=approximant,eccentricity=eccentricity) hs=hp+hc*1j amp=abs(hs) phase=np.unwrap(np.angle(hs)) return", "- t[0] means = [] times = [] st = 0 for i", "+ 1.j * h2m1[:,2] h20 = h20[:,1] + 1.j * h20[:,2] h2m2 =", "InterpolatedUnivariateSpline as unispline # ============================================================================= # First panel # ============================================================================= def open_sxs(sxs_data): #https://data.black-holes.org/waveforms/documentation.html", "horizon2[:,2] sep_xy = np.array([horizon1[:,1]-horizon2[:,1], horizon1[:,2] - horizon2[:,2]]) sep = np.sqrt( sep_xy[0,:]**2. + sep_xy[1,:]**2.", "title=\"Eccentricity (e)\") model_selectFD = Select(title=\"FD Models\", options=fd_approximants()) def update_slider(attrname, old, new): # Get", "pn41.toolbar.logo = None pn41.legend.click_policy=\"hide\" lines=[n41,n42] pn42=figure(title='hx',x_axis_label='time(sec)',y_axis_label='strain', plot_width=500, plot_height=400) n41=pn42.line(x='timeTD', y='hc_realTD',source = sourcep42, color='blue',legend='Re{hx}')", "assert t1 == t2 h22 = h22[:,1] + 1.j * h22[:,2] h2m2 =", "q = q_slider.value e = e_slider.value s1z = s1z_slider.value s2z = s2z_slider.value approximant", "h22[:,0] for t1, t2 in zip(times, h2m2[:,0]): assert t1 == t2 h21 =", "pn22.legend.click_policy=\"hide\" lines=[n21,n22] pn23=figure(title='Amplitude',x_axis_label='freq(Hz)',y_axis_label='strain', plot_width=500, plot_height=400) pn23.line(x='freq', y='amp',source = sourcep2, color='green',line_width=3) pn23.toolbar.logo = None", "= data_path+sval # changed this to the dict time_hlm,h21, h2m1,h2m2lm, h22lm,h20=get_hlm(which_data) source32.data={'time_hlm':time_hlm,'h21real':h21.real,'h21imag':h21.imag, 'h2m1real':h2m1.real,'h2m1imag':h2m1.imag,'h2m2lmreal':", "with h5py.File( os.path.join(sxs_data, \"Horizons.h5\" ), 'r') as f: AhA = f['AhA.dir/CoordCenterInertial.dat'][:] AhB =", "try: selected_index = source31.selected.indices[0] sval=source31.data[\"Simulation\"][selected_index] which_data = data_path+sval # changed this to the", "your command prompt. # Then navigate to the URL http://localhost:5006/main in your browser.", "h22,h20 def get_data(which_data): AhA,AhB,AhC=open_sxs(which_data) hor_times,sep,dx,dy=sep_time(AhA,AhB) mov_avg_sep_t, mov_avg_sep = moving_average(hor_times, sep, seglen=1000, nseg=10) times,h22,h2m2=get_h22(which_data)", "model # Use by executing: bokeh serve main.py # command to run at", "times = h22[:,0] for t1, t2 in zip(times, h2m2[:,0]): assert t1 == t2", "time_hlm,h21, h2m1,h2m2lm, h22lm,h20=get_hlm(which_data) source32.data={'time_hlm':time_hlm,'h21real':h21.real,'h21imag':h21.imag, 'h2m1real':h2m1.real,'h2m1imag':h2m1.imag,'h2m2lmreal': h2m2lm.real,'h2m2lmimag':h2m2lm.imag, 'h22lmreal': h22lm.real,'h22lmimag':h22lm.imag,'h20real':h20.real,'h20imag':h20.imag} except IndexError: pass # =============================================================================", "0 for i in range(int(len(x)/nseg)): en = int(st + seglen/dt) try: times.append(t[st]) means.append(np.mean(x[st:en]))", "grid, layout from scipy.interpolate import InterpolatedUnivariateSpline as unispline # ============================================================================= # First panel", "h20 = f[\"OutermostExtraction.dir/Y_l2_m0.dat\"][:] h22 = f[\"OutermostExtraction.dir/Y_l2_m2.dat\"][:] h2m2 = f[\"OutermostExtraction.dir/Y_l2_m-2.dat\"][:] times = h22[:,0] for", "= q_slider.value e = e_slider.value s1z = s1z_slider.value s2z = s2z_slider.value approximant =", "import h5py import json import glob from pycbc.waveform import get_fd_waveform,amplitude_from_frequencyseries,phase_from_frequencyseries,fd_approximants, get_td_waveform, td_approximants from", "============================================================================= # First panel # ============================================================================= def open_sxs(sxs_data): #https://data.black-holes.org/waveforms/documentation.html #AhA=apparent horizon A #AhB=apparent", "except: break st = st + seglen return times, means def get_h22(sxs_data): with", "step=.05, title=\"Spin1z\") s2z_slider = Slider(start=-1, end=1, value=0, step=.05, title=\"Spin2z\") model_select = Select(title=\"TD Models\",", "= None pn42.legend.click_policy=\"hide\" lines=[n41,n42] pn43=figure(title='Amplitude',x_axis_label='time(sec)',y_axis_label='strain', plot_width=500, plot_height=400) pn43.line(x='timeTD', y='ampTD',source = sourcep42, color='green',line_width=3) pn43.toolbar.logo", "column, row, grid, layout from scipy.interpolate import InterpolatedUnivariateSpline as unispline # ============================================================================= #", "1.j * h2m2[:,2] h22 = h22[:,1] + 1.j * h22[:,2] norm_time=times-times[np.argmax(h22)] return norm_time,h21,", "sval=source31.data[\"Simulation\"][selected_index] which_data = data_path+sval # changed this to the dict time_hlm,h21, h2m1,h2m2lm, h22lm,h20=get_hlm(which_data)", "sourcep42, color='blue',legend='Re{hx}') n42=pn42.line(x='timeTD', y='hc_imagTD', source = sourcep42, color='orange',legend='Im{hx}') pn42.toolbar.logo = None pn42.legend.click_policy=\"hide\" lines=[n41,n42]", "============================================================================= def q_to_masses(mass_rat,total_m): mass1=mass_rat/(mass_rat+1)*total_m mass2=total_m-mass1 return mass1,mass2 def generate_analytic_waveform(mass_rat, eccentricity,approximant='TaylorF2Ecc',total_m=50,f_lower=20.,delta_f=0.1): mass1,mass2=q_to_masses(mass_rat,total_m) hp,hc=hp,hc=get_fd_waveform(mass1=mass1,mass2=mass2,delta_f=delta_f,f_lower=f_lower, approximant=approximant,eccentricity=eccentricity)", "h5py.File( os.path.join(sxs_data, \"rhOverM_Asymptotic_GeometricUnits_CoM.h5\" ), 'r') as f: h21 = f[\"OutermostExtraction.dir/Y_l2_m1.dat\"][:] h2m1 = f[\"OutermostExtraction.dir/Y_l2_m-1.dat\"][:]", "def get_h22(sxs_data): with h5py.File( os.path.join(sxs_data, \"rhOverM_Asymptotic_GeometricUnits_CoM.h5\" ), 'r') as f: h22 = f[\"OutermostExtraction.dir/Y_l2_m2.dat\"][:]", "f['AhA.dir/CoordCenterInertial.dat'][:] AhB = f['AhB.dir/CoordCenterInertial.dat'][:] AhC = f['AhC.dir/CoordCenterInertial.dat'][:] return AhA,AhB,AhC def sep_time(horizon1, horizon2): hor_times", "f[\"OutermostExtraction.dir/Y_l2_m0.dat\"][:] h22 = f[\"OutermostExtraction.dir/Y_l2_m2.dat\"][:] h2m2 = f[\"OutermostExtraction.dir/Y_l2_m-2.dat\"][:] times = h22[:,0] for t1, t2", "bokeh.layouts import column, row, grid, layout from scipy.interpolate import InterpolatedUnivariateSpline as unispline #", "- horizon2[:,2] sep_xy = np.array([horizon1[:,1]-horizon2[:,1], horizon1[:,2] - horizon2[:,2]]) sep = np.sqrt( sep_xy[0,:]**2. +", "FD\") #tab3 = Panel(child=layout3,title=\"NR l=2\") tab4 = Panel(child=layoutTD,title=\"Analytic TD\") #tabs = Tabs(tabs=[tab1,tab3,tab2,tab4],sizing_mode='scale_width') tabs", "lines=[n11,n12] pn22=figure(title='hx',x_axis_label='freq(Hz)',y_axis_label='strain', plot_width=500, plot_height=400) n21=pn22.line(x='freq', y='hc_real',source = sourcep2, color='blue',legend='Re{hx}') n22=pn22.line(x='freq', y='hc_imag', source =", "(e)\") s1z_slider = Slider(start=-1, end=1, value=0, step=.05, title=\"Spin1z\") s2z_slider = Slider(start=-1, end=1, value=0,", "= None pn24=figure(title='Phase',x_axis_label='freq(Hz)',y_axis_label='rad', plot_width=500, plot_height=400) pn24.line(x='freq', y='phase',source = sourcep2, color='red',line_width=3) pn24.toolbar.logo = None", "e_slider = Slider(start=0., end=0.9, value=0, step=.05, title=\"Eccentricity (e)\") s1z_slider = Slider(start=-1, end=1, value=0,", "(C) 2021 <NAME> <<EMAIL>> # Visualization of SXS and analytic waveform model #", "mass1,mass2=q_to_masses(mass_rat,total_m) hp,hc=hp,hc=get_td_waveform(mass1=mass1,mass2=mass2,spin1z=s1z,spin2z=s2z,delta_t=delta_t,f_lower=f_lower, approximant=approximant,eccentricity=eccentricity) hs=hp+hc*1j amp=abs(hs) phase=np.unwrap(np.angle(hs)) return hp.sample_times.data,np.real(hp.data),np.real(hc.data),np.imag(hp.data),np.imag(hc.data),amp,phase timeTD,hp_realTD,hc_realTD,hp_imagTD,hc_imagTD,ampTD,phaseTD=generate_TD_waveform(mass_rat=1.,eccentricity=0,s1z=0.,s2z=0.) dic_p42 = {'hp_realTD':hp_realTD,'hc_realTD':hc_realTD,'hp_imagTD':hp_imagTD,'hc_imagTD':hc_imagTD,'ampTD':ampTD,'phaseTD':phaseTD,'timeTD':timeTD} sourcep42=ColumnDataSource(data=dic_p42)", "means = [] times = [] st = 0 for i in range(int(len(x)/nseg)):", "nonspinning_models: s1z=0 s2z=0 mass1,mass2=q_to_masses(mass_rat,total_m) hp,hc=hp,hc=get_td_waveform(mass1=mass1,mass2=mass2,spin1z=s1z,spin2z=s2z,delta_t=delta_t,f_lower=f_lower, approximant=approximant,eccentricity=eccentricity) hs=hp+hc*1j amp=abs(hs) phase=np.unwrap(np.angle(hs)) return hp.sample_times.data,np.real(hp.data),np.real(hc.data),np.imag(hp.data),np.imag(hc.data),amp,phase timeTD,hp_realTD,hc_realTD,hp_imagTD,hc_imagTD,ampTD,phaseTD=generate_TD_waveform(mass_rat=1.,eccentricity=0,s1z=0.,s2z=0.) dic_p42", "y='hc_realTD',source = sourcep42, color='blue',legend='Re{hx}') n42=pn42.line(x='timeTD', y='hc_imagTD', source = sourcep42, color='orange',legend='Im{hx}') pn42.toolbar.logo = None", "values q = q_slider.value e = e_slider.value s1z = s1z_slider.value s2z = s2z_slider.value", "= s2z_slider.value approximant = model_select.value timeTD,hp_realTD,hc_realTD,hp_imagTD,hc_imagTD,ampTD,phaseTD=generate_TD_waveform(mass_rat=q,eccentricity=e,s1z=s1z,s2z=s2z,approximant=approximant) sourcep42.data = {'hp_realTD':hp_realTD,'hc_realTD':hc_realTD,'hp_imagTD':hp_imagTD,'hc_imagTD':hc_imagTD,'ampTD':ampTD,'phaseTD':phaseTD,'timeTD':timeTD} for w in [q_slider,e_slider,s1z_slider,s2z_slider,model_select]:", "serve main.py # command to run at your command prompt. # Then navigate", "sourcep42, color='orange',legend='Im{h+}') pn41.toolbar.logo = None pn41.legend.click_policy=\"hide\" lines=[n41,n42] pn42=figure(title='hx',x_axis_label='time(sec)',y_axis_label='strain', plot_width=500, plot_height=400) n41=pn42.line(x='timeTD', y='hc_realTD',source =", "in range(int(len(x)/nseg)): en = int(st + seglen/dt) try: times.append(t[st]) means.append(np.mean(x[st:en])) except: break st", "== t2 h22 = h22[:,1] + 1.j * h22[:,2] h2m2 = h2m2[:,1] +", "# ============================================================================= def open_sxs(sxs_data): #https://data.black-holes.org/waveforms/documentation.html #AhA=apparent horizon A #AhB=apparent horizon B #AhC=common apparent", "* h21[:,2] h2m1 = h2m1[:,1] + 1.j * h2m1[:,2] h20 = h20[:,1] +", "scipy.interpolate import InterpolatedUnivariateSpline as unispline # ============================================================================= # First panel # ============================================================================= def", "options=td_approximants()) def update_slider2(attrname, old, new): # Get the current slider values q =", "URL http://localhost:5006/main in your browser. import numpy as np import os import h5py", "None q_sliderFD = Slider(start=1, end=10, value=1, step=.5, title=\"Mass ratio (q)\") e_sliderFD = Slider(start=0.,", "y='hc_imagTD', source = sourcep42, color='orange',legend='Im{hx}') pn42.toolbar.logo = None pn42.legend.click_policy=\"hide\" lines=[n41,n42] pn43=figure(title='Amplitude',x_axis_label='time(sec)',y_axis_label='strain', plot_width=500, plot_height=400)", "n12=pn21.line(x='freq', y='hp_imag',source = sourcep2, color='orange',legend='Im{h+}') pn21.toolbar.logo = None pn21.legend.click_policy=\"hide\" lines=[n11,n12] pn22=figure(title='hx',x_axis_label='freq(Hz)',y_axis_label='strain', plot_width=500, plot_height=400)", "def generate_TD_waveform(mass_rat, eccentricity,s1z,s2z,approximant='TaylorT1',total_m=50,f_lower=20.,delta_t=1./1024): nonspinning_models=['TaylorT1','TaylorT2','TaylorEt','TaylorT3','TaylorT4','EOBNRv2','EOBNRv2HM','EOBNRv2_ROM','EOBNRv2HM_ROM','SEOBNRv1','SEOBNRv1_ROM_DoubleSpin','SEOBNRv1_ROM_EffectiveSpin','TEOBResum_ROM','PhenSpinTaylor','PhenSpinTaylorRD','IMRPhenomA','EccentricTD','NRSur7dq2'] if approximant in nonspinning_models: s1z=0 s2z=0 mass1,mass2=q_to_masses(mass_rat,total_m) hp,hc=hp,hc=get_td_waveform(mass1=mass1,mass2=mass2,spin1z=s1z,spin2z=s2z,delta_t=delta_t,f_lower=f_lower, approximant=approximant,eccentricity=eccentricity)", "s1z_slider.value s2z = s2z_slider.value approximant = model_select.value timeTD,hp_realTD,hc_realTD,hp_imagTD,hc_imagTD,ampTD,phaseTD=generate_TD_waveform(mass_rat=q,eccentricity=e,s1z=s1z,s2z=s2z,approximant=approximant) sourcep42.data = {'hp_realTD':hp_realTD,'hc_realTD':hc_realTD,'hp_imagTD':hp_imagTD,'hc_imagTD':hc_imagTD,'ampTD':ampTD,'phaseTD':phaseTD,'timeTD':timeTD} for w", "e_sliderFD = Slider(start=0., end=0.9, value=0, step=.05, title=\"Eccentricity (e)\") model_selectFD = Select(title=\"FD Models\", options=fd_approximants())", "pn44.toolbar.logo = None q_slider = Slider(start=1, end=10, value=1, step=.5, title=\"Mass ratio (q)\") e_slider", "lines=[n41,n42] pn43=figure(title='Amplitude',x_axis_label='time(sec)',y_axis_label='strain', plot_width=500, plot_height=400) pn43.line(x='timeTD', y='ampTD',source = sourcep42, color='green',line_width=3) pn43.toolbar.logo = None pn44=figure(title='Phase',x_axis_label='time(sec)',y_axis_label='rad',", "get_h22(sxs_data): with h5py.File( os.path.join(sxs_data, \"rhOverM_Asymptotic_GeometricUnits_CoM.h5\" ), 'r') as f: h22 = f[\"OutermostExtraction.dir/Y_l2_m2.dat\"][:] h2m2", "s1z = s1z_slider.value s2z = s2z_slider.value approximant = model_select.value timeTD,hp_realTD,hc_realTD,hp_imagTD,hc_imagTD,ampTD,phaseTD=generate_TD_waveform(mass_rat=q,eccentricity=e,s1z=s1z,s2z=s2z,approximant=approximant) sourcep42.data = {'hp_realTD':hp_realTD,'hc_realTD':hc_realTD,'hp_imagTD':hp_imagTD,'hc_imagTD':hc_imagTD,'ampTD':ampTD,'phaseTD':phaseTD,'timeTD':timeTD}", "horizon1[:,2] - horizon2[:,2]]) sep = np.sqrt( sep_xy[0,:]**2. + sep_xy[1,:]**2. ) return hor_times,sep,dx[:2000],dy[:2000] def", "analytic waveform model # Use by executing: bokeh serve main.py # command to", "except IndexError: pass # ============================================================================= # Fourth panel # ============================================================================= def generate_TD_waveform(mass_rat, eccentricity,s1z,s2z,approximant='TaylorT1',total_m=50,f_lower=20.,delta_t=1./1024):", "color='red',line_width=3) pn44.toolbar.logo = None q_slider = Slider(start=1, end=10, value=1, step=.5, title=\"Mass ratio (q)\")", "== t2 h21 = h21[:,1] + 1.j * h21[:,2] h2m1 = h2m1[:,1] +", "dic_p2 = {'hp_real':hp_real,'hc_real':hc_real,'hp_imag':hp_imag,'hc_imag':hc_imag,'amp':amp,'phase':phase,'freq':freq} sourcep2=ColumnDataSource(data=dic_p2) pn21=figure(title='h+',x_axis_label='freq(Hz)',y_axis_label='strain', plot_width=500, plot_height=400) n11=pn21.line(x='freq', y='hp_real',source = sourcep2, color='blue',legend='Re{h+}') n12=pn21.line(x='freq',", "return mass1,mass2 def generate_analytic_waveform(mass_rat, eccentricity,approximant='TaylorF2Ecc',total_m=50,f_lower=20.,delta_f=0.1): mass1,mass2=q_to_masses(mass_rat,total_m) hp,hc=hp,hc=get_fd_waveform(mass1=mass1,mass2=mass2,delta_f=delta_f,f_lower=f_lower, approximant=approximant,eccentricity=eccentricity) hs=hp+hc*1j amp=amplitude_from_frequencyseries(types.FrequencySeries(hs,delta_f=delta_f)) phase=phase_from_frequencyseries(types.FrequencySeries(hs,delta_f=delta_f)) return hp.sample_frequencies.data,np.real(hp.data),np.real(hc.data),np.imag(hp.data),np.imag(hc.data),amp.data,phase.data", "executing: bokeh serve main.py # command to run at your command prompt. #", "# First panel # ============================================================================= def open_sxs(sxs_data): #https://data.black-holes.org/waveforms/documentation.html #AhA=apparent horizon A #AhB=apparent horizon", "= sourcep2, color='orange',legend='Im{h+}') pn21.toolbar.logo = None pn21.legend.click_policy=\"hide\" lines=[n11,n12] pn22=figure(title='hx',x_axis_label='freq(Hz)',y_axis_label='strain', plot_width=500, plot_height=400) n21=pn22.line(x='freq', y='hc_real',source", "plot_height=400) pn23.line(x='freq', y='amp',source = sourcep2, color='green',line_width=3) pn23.toolbar.logo = None pn24=figure(title='Phase',x_axis_label='freq(Hz)',y_axis_label='rad', plot_width=500, plot_height=400) pn24.line(x='freq',", "Second panel # ============================================================================= def q_to_masses(mass_rat,total_m): mass1=mass_rat/(mass_rat+1)*total_m mass2=total_m-mass1 return mass1,mass2 def generate_analytic_waveform(mass_rat, eccentricity,approximant='TaylorF2Ecc',total_m=50,f_lower=20.,delta_f=0.1):", "f[\"OutermostExtraction.dir/Y_l2_m2.dat\"][:] h2m2 = f[\"OutermostExtraction.dir/Y_l2_m-2.dat\"][:] times = h22[:,0] for t1, t2 in zip(times, h2m2[:,0]):", "h5py.File( os.path.join(sxs_data, \"Horizons.h5\" ), 'r') as f: AhA = f['AhA.dir/CoordCenterInertial.dat'][:] AhB = f['AhB.dir/CoordCenterInertial.dat'][:]", "Panel(child=layoutNR, title=\"NR data\") tab2 = Panel(child=layoutan,title=\"Analytic FD\") #tab3 = Panel(child=layout3,title=\"NR l=2\") tab4 =", "pn44.line(x='timeTD', y='phaseTD',source = sourcep42, color='red',line_width=3) pn44.toolbar.logo = None q_slider = Slider(start=1, end=10, value=1,", "pycbc import types from bokeh.models import TableColumn,ColumnDataSource,DataTable from bokeh.plotting import figure, output_file, show,", "= h2m2[:,1] + 1.j * h2m2[:,2] return times,h22, h2m2 def get_hlm(sxs_data): with h5py.File(", "# ============================================================================= # First panel # ============================================================================= def open_sxs(sxs_data): #https://data.black-holes.org/waveforms/documentation.html #AhA=apparent horizon A", "= e_sliderFD.value approximant = model_selectFD.value freq,hp_real,hc_real,hp_imag,hc_imag,amp,phase=generate_analytic_waveform(mass_rat=q,eccentricity=e,approximant=approximant) sourcep2.data = {'hp_real':hp_real,'hc_real':hc_real,'hp_imag':hp_imag,'hc_imag':hc_imag,'amp':amp,'phase':phase,'freq':freq} for w in [q_sliderFD,e_sliderFD,model_selectFD]:", "generate_TD_waveform(mass_rat, eccentricity,s1z,s2z,approximant='TaylorT1',total_m=50,f_lower=20.,delta_t=1./1024): nonspinning_models=['TaylorT1','TaylorT2','TaylorEt','TaylorT3','TaylorT4','EOBNRv2','EOBNRv2HM','EOBNRv2_ROM','EOBNRv2HM_ROM','SEOBNRv1','SEOBNRv1_ROM_DoubleSpin','SEOBNRv1_ROM_EffectiveSpin','TEOBResum_ROM','PhenSpinTaylor','PhenSpinTaylorRD','IMRPhenomA','EccentricTD','NRSur7dq2'] if approximant in nonspinning_models: s1z=0 s2z=0 mass1,mass2=q_to_masses(mass_rat,total_m) hp,hc=hp,hc=get_td_waveform(mass1=mass1,mass2=mass2,spin1z=s1z,spin2z=s2z,delta_t=delta_t,f_lower=f_lower, approximant=approximant,eccentricity=eccentricity) hs=hp+hc*1j", "seglen/dt) try: times.append(t[st]) means.append(np.mean(x[st:en])) except: break st = st + seglen return times,", "sourcep42=ColumnDataSource(data=dic_p42) pn41=figure(title='h+',x_axis_label='time(sec)',y_axis_label='strain', plot_width=500, plot_height=400) n41=pn41.line(x='timeTD', y='hp_realTD',source = sourcep42, color='blue',legend='Re{h+}') n42=pn41.line(x='timeTD', y='hp_imagTD',source = sourcep42,", "def update_slider(attrname, old, new): # Get the current slider values q = q_sliderFD.value", "= f[\"OutermostExtraction.dir/Y_l2_m0.dat\"][:] h22 = f[\"OutermostExtraction.dir/Y_l2_m2.dat\"][:] h2m2 = f[\"OutermostExtraction.dir/Y_l2_m-2.dat\"][:] times = h22[:,0] for t1,", "this to the dict time_hlm,h21, h2m1,h2m2lm, h22lm,h20=get_hlm(which_data) source32.data={'time_hlm':time_hlm,'h21real':h21.real,'h21imag':h21.imag, 'h2m1real':h2m1.real,'h2m1imag':h2m1.imag,'h2m2lmreal': h2m2lm.real,'h2m2lmimag':h2m2lm.imag, 'h22lmreal': h22lm.real,'h22lmimag':h22lm.imag,'h20real':h20.real,'h20imag':h20.imag} except", "= q_sliderFD.value e = e_sliderFD.value approximant = model_selectFD.value freq,hp_real,hc_real,hp_imag,hc_imag,amp,phase=generate_analytic_waveform(mass_rat=q,eccentricity=e,approximant=approximant) sourcep2.data = {'hp_real':hp_real,'hc_real':hc_real,'hp_imag':hp_imag,'hc_imag':hc_imag,'amp':amp,'phase':phase,'freq':freq} for", "h2m1 = h2m1[:,1] + 1.j * h2m1[:,2] h20 = h20[:,1] + 1.j *", "= sourcep2, color='green',line_width=3) pn23.toolbar.logo = None pn24=figure(title='Phase',x_axis_label='freq(Hz)',y_axis_label='rad', plot_width=500, plot_height=400) pn24.line(x='freq', y='phase',source = sourcep2,", "your browser. import numpy as np import os import h5py import json import", "n41=pn42.line(x='timeTD', y='hc_realTD',source = sourcep42, color='blue',legend='Re{hx}') n42=pn42.line(x='timeTD', y='hc_imagTD', source = sourcep42, color='orange',legend='Im{hx}') pn42.toolbar.logo =", "1.j * h21[:,2] h2m1 = h2m1[:,1] + 1.j * h2m1[:,2] h20 = h20[:,1]", "+ 1.j * h22[:,2] h2m2 = h2m2[:,1] + 1.j * h2m2[:,2] return times,h22,", "to the URL http://localhost:5006/main in your browser. import numpy as np import os", "panel # ============================================================================= def open_sxs(sxs_data): #https://data.black-holes.org/waveforms/documentation.html #AhA=apparent horizon A #AhB=apparent horizon B #AhC=common", "# ============================================================================= def update_table2(attrname, old, new): try: selected_index = source31.selected.indices[0] sval=source31.data[\"Simulation\"][selected_index] which_data =", "w.on_change('value', update_slider) layoutan=row(column(pn21,pn22),column(pn23,pn24),column(q_sliderFD,e_sliderFD,model_selectFD)) # ============================================================================= # Third panel # ============================================================================= def update_table2(attrname, old,", "return times, means def get_h22(sxs_data): with h5py.File( os.path.join(sxs_data, \"rhOverM_Asymptotic_GeometricUnits_CoM.h5\" ), 'r') as f:", "1.j * h20[:,2] h2m2 = h2m2[:,1] + 1.j * h2m2[:,2] h22 = h22[:,1]", "q = q_sliderFD.value e = e_sliderFD.value approximant = model_selectFD.value freq,hp_real,hc_real,hp_imag,hc_imag,amp,phase=generate_analytic_waveform(mass_rat=q,eccentricity=e,approximant=approximant) sourcep2.data = {'hp_real':hp_real,'hc_real':hc_real,'hp_imag':hp_imag,'hc_imag':hc_imag,'amp':amp,'phase':phase,'freq':freq}", "http://localhost:5006/main in your browser. import numpy as np import os import h5py import", "pn22.toolbar.logo = None pn22.legend.click_policy=\"hide\" lines=[n21,n22] pn23=figure(title='Amplitude',x_axis_label='freq(Hz)',y_axis_label='strain', plot_width=500, plot_height=400) pn23.line(x='freq', y='amp',source = sourcep2, color='green',line_width=3)", "data_path+sval # changed this to the dict time_hlm,h21, h2m1,h2m2lm, h22lm,h20=get_hlm(which_data) source32.data={'time_hlm':time_hlm,'h21real':h21.real,'h21imag':h21.imag, 'h2m1real':h2m1.real,'h2m1imag':h2m1.imag,'h2m2lmreal': h2m2lm.real,'h2m2lmimag':h2m2lm.imag,", "update_table2(attrname, old, new): try: selected_index = source31.selected.indices[0] sval=source31.data[\"Simulation\"][selected_index] which_data = data_path+sval # changed", "w in [q_slider,e_slider,s1z_slider,s2z_slider,model_select]: w.on_change('value', update_slider2) layoutTD=row(column(pn41,pn42),column(pn43,pn44),column(q_slider,e_slider,s1z_slider,s2z_slider,model_select)) #tab1 = Panel(child=layoutNR, title=\"NR data\") tab2 =", "pn44=figure(title='Phase',x_axis_label='time(sec)',y_axis_label='rad', plot_width=500, plot_height=400) pn44.line(x='timeTD', y='phaseTD',source = sourcep42, color='red',line_width=3) pn44.toolbar.logo = None q_slider =", "= Panel(child=layout3,title=\"NR l=2\") tab4 = Panel(child=layoutTD,title=\"Analytic TD\") #tabs = Tabs(tabs=[tab1,tab3,tab2,tab4],sizing_mode='scale_width') tabs = Tabs(tabs=[tab2,tab4],sizing_mode='scale_width')", "and analytic waveform model # Use by executing: bokeh serve main.py # command", "get_hlm(sxs_data): with h5py.File( os.path.join(sxs_data, \"rhOverM_Asymptotic_GeometricUnits_CoM.h5\" ), 'r') as f: h21 = f[\"OutermostExtraction.dir/Y_l2_m1.dat\"][:] h2m1", "t[1] - t[0] means = [] times = [] st = 0 for", "hs=hp+hc*1j amp=amplitude_from_frequencyseries(types.FrequencySeries(hs,delta_f=delta_f)) phase=phase_from_frequencyseries(types.FrequencySeries(hs,delta_f=delta_f)) return hp.sample_frequencies.data,np.real(hp.data),np.real(hc.data),np.imag(hp.data),np.imag(hc.data),amp.data,phase.data freq,hp_real,hc_real,hp_imag,hc_imag,amp,phase=generate_analytic_waveform(mass_rat=1.,eccentricity=0) dic_p2 = {'hp_real':hp_real,'hc_real':hc_real,'hp_imag':hp_imag,'hc_imag':hc_imag,'amp':amp,'phase':phase,'freq':freq} sourcep2=ColumnDataSource(data=dic_p2) pn21=figure(title='h+',x_axis_label='freq(Hz)',y_axis_label='strain', plot_width=500, plot_height=400)", "= sourcep42, color='blue',legend='Re{hx}') n42=pn42.line(x='timeTD', y='hc_imagTD', source = sourcep42, color='orange',legend='Im{hx}') pn42.toolbar.logo = None pn42.legend.click_policy=\"hide\"", "return times,h22, h2m2 def get_hlm(sxs_data): with h5py.File( os.path.join(sxs_data, \"rhOverM_Asymptotic_GeometricUnits_CoM.h5\" ), 'r') as f:", "command to run at your command prompt. # Then navigate to the URL", "sourcep42.data = {'hp_realTD':hp_realTD,'hc_realTD':hc_realTD,'hp_imagTD':hp_imagTD,'hc_imagTD':hc_imagTD,'ampTD':ampTD,'phaseTD':phaseTD,'timeTD':timeTD} for w in [q_slider,e_slider,s1z_slider,s2z_slider,model_select]: w.on_change('value', update_slider2) layoutTD=row(column(pn41,pn42),column(pn43,pn44),column(q_slider,e_slider,s1z_slider,s2z_slider,model_select)) #tab1 = Panel(child=layoutNR,", "color='orange',legend='Im{h+}') pn21.toolbar.logo = None pn21.legend.click_policy=\"hide\" lines=[n11,n12] pn22=figure(title='hx',x_axis_label='freq(Hz)',y_axis_label='strain', plot_width=500, plot_height=400) n21=pn22.line(x='freq', y='hc_real',source = sourcep2,", "h2m1,h2m2lm, h22lm,h20=get_hlm(which_data) source32.data={'time_hlm':time_hlm,'h21real':h21.real,'h21imag':h21.imag, 'h2m1real':h2m1.real,'h2m1imag':h2m1.imag,'h2m2lmreal': h2m2lm.real,'h2m2lmimag':h2m2lm.imag, 'h22lmreal': h22lm.real,'h22lmimag':h22lm.imag,'h20real':h20.real,'h20imag':h20.imag} except IndexError: pass # ============================================================================= #", "n22=pn22.line(x='freq', y='hc_imag', source = sourcep2, color='orange',legend='Im{hx}') pn22.toolbar.logo = None pn22.legend.click_policy=\"hide\" lines=[n21,n22] pn23=figure(title='Amplitude',x_axis_label='freq(Hz)',y_axis_label='strain', plot_width=500,", "Third panel # ============================================================================= def update_table2(attrname, old, new): try: selected_index = source31.selected.indices[0] sval=source31.data[\"Simulation\"][selected_index]", "y='hc_real',source = sourcep2, color='blue',legend='Re{hx}') n22=pn22.line(x='freq', y='hc_imag', source = sourcep2, color='orange',legend='Im{hx}') pn22.toolbar.logo = None", "w.on_change('value', update_slider2) layoutTD=row(column(pn41,pn42),column(pn43,pn44),column(q_slider,e_slider,s1z_slider,s2z_slider,model_select)) #tab1 = Panel(child=layoutNR, title=\"NR data\") tab2 = Panel(child=layoutan,title=\"Analytic FD\") #tab3", "s2z_slider.value approximant = model_select.value timeTD,hp_realTD,hc_realTD,hp_imagTD,hc_imagTD,ampTD,phaseTD=generate_TD_waveform(mass_rat=q,eccentricity=e,s1z=s1z,s2z=s2z,approximant=approximant) sourcep42.data = {'hp_realTD':hp_realTD,'hc_realTD':hc_realTD,'hp_imagTD':hp_imagTD,'hc_imagTD':hc_imagTD,'ampTD':ampTD,'phaseTD':phaseTD,'timeTD':timeTD} for w in [q_slider,e_slider,s1z_slider,s2z_slider,model_select]: w.on_change('value',", "def get_data(which_data): AhA,AhB,AhC=open_sxs(which_data) hor_times,sep,dx,dy=sep_time(AhA,AhB) mov_avg_sep_t, mov_avg_sep = moving_average(hor_times, sep, seglen=1000, nseg=10) times,h22,h2m2=get_h22(which_data) phase22", "freq,hp_real,hc_real,hp_imag,hc_imag,amp,phase=generate_analytic_waveform(mass_rat=q,eccentricity=e,approximant=approximant) sourcep2.data = {'hp_real':hp_real,'hc_real':hc_real,'hp_imag':hp_imag,'hc_imag':hc_imag,'amp':amp,'phase':phase,'freq':freq} for w in [q_sliderFD,e_sliderFD,model_selectFD]: w.on_change('value', update_slider) layoutan=row(column(pn21,pn22),column(pn23,pn24),column(q_sliderFD,e_sliderFD,model_selectFD)) # =============================================================================", "times = [] st = 0 for i in range(int(len(x)/nseg)): en = int(st", "st = 0 for i in range(int(len(x)/nseg)): en = int(st + seglen/dt) try:", "unispline(times, phase22).derivative()(times) norm_time=times-times[np.argmax(h22)] return AhA, AhB, norm_time, h22,phase22,freq22, hor_times, sep, mov_avg_sep_t,mov_avg_sep,dx,dy # =============================================================================", "Select(title=\"TD Models\", options=td_approximants()) def update_slider2(attrname, old, new): # Get the current slider values", "title=\"Eccentricity (e)\") s1z_slider = Slider(start=-1, end=1, value=0, step=.05, title=\"Spin1z\") s2z_slider = Slider(start=-1, end=1,", "from bokeh.io import curdoc from bokeh.layouts import column, row, grid, layout from scipy.interpolate", "= h21[:,1] + 1.j * h21[:,2] h2m1 = h2m1[:,1] + 1.j * h2m1[:,2]", "\"Horizons.h5\" ), 'r') as f: AhA = f['AhA.dir/CoordCenterInertial.dat'][:] AhB = f['AhB.dir/CoordCenterInertial.dat'][:] AhC =", "Get the current slider values q = q_sliderFD.value e = e_sliderFD.value approximant =", "pn43=figure(title='Amplitude',x_axis_label='time(sec)',y_axis_label='strain', plot_width=500, plot_height=400) pn43.line(x='timeTD', y='ampTD',source = sourcep42, color='green',line_width=3) pn43.toolbar.logo = None pn44=figure(title='Phase',x_axis_label='time(sec)',y_axis_label='rad', plot_width=500,", "end=10, value=1, step=.5, title=\"Mass ratio (q)\") e_slider = Slider(start=0., end=0.9, value=0, step=.05, title=\"Eccentricity", "bokeh serve main.py # command to run at your command prompt. # Then", "curdoc from bokeh.layouts import column, row, grid, layout from scipy.interpolate import InterpolatedUnivariateSpline as", "+ 1.j * h2m2[:,2] h22 = h22[:,1] + 1.j * h22[:,2] norm_time=times-times[np.argmax(h22)] return", "q_sliderFD = Slider(start=1, end=10, value=1, step=.5, title=\"Mass ratio (q)\") e_sliderFD = Slider(start=0., end=0.9,", "'r') as f: h22 = f[\"OutermostExtraction.dir/Y_l2_m2.dat\"][:] h2m2 = f[\"OutermostExtraction.dir/Y_l2_m-2.dat\"][:] times = h22[:,0] for", "n42=pn42.line(x='timeTD', y='hc_imagTD', source = sourcep42, color='orange',legend='Im{hx}') pn42.toolbar.logo = None pn42.legend.click_policy=\"hide\" lines=[n41,n42] pn43=figure(title='Amplitude',x_axis_label='time(sec)',y_axis_label='strain', plot_width=500,", "sep = np.sqrt( sep_xy[0,:]**2. + sep_xy[1,:]**2. ) return hor_times,sep,dx[:2000],dy[:2000] def moving_average(t, x, seglen,", "color='green',line_width=3) pn23.toolbar.logo = None pn24=figure(title='Phase',x_axis_label='freq(Hz)',y_axis_label='rad', plot_width=500, plot_height=400) pn24.line(x='freq', y='phase',source = sourcep2, color='red',line_width=3) pn24.toolbar.logo", "horizon with h5py.File( os.path.join(sxs_data, \"Horizons.h5\" ), 'r') as f: AhA = f['AhA.dir/CoordCenterInertial.dat'][:] AhB", "sourcep2.data = {'hp_real':hp_real,'hc_real':hc_real,'hp_imag':hp_imag,'hc_imag':hc_imag,'amp':amp,'phase':phase,'freq':freq} for w in [q_sliderFD,e_sliderFD,model_selectFD]: w.on_change('value', update_slider) layoutan=row(column(pn21,pn22),column(pn23,pn24),column(q_sliderFD,e_sliderFD,model_selectFD)) # ============================================================================= #", "h22[:,0] for t1, t2 in zip(times, h2m2[:,0]): assert t1 == t2 h22 =", "as f: h21 = f[\"OutermostExtraction.dir/Y_l2_m1.dat\"][:] h2m1 = f[\"OutermostExtraction.dir/Y_l2_m-1.dat\"][:] h20 = f[\"OutermostExtraction.dir/Y_l2_m0.dat\"][:] h22 =", "pn21=figure(title='h+',x_axis_label='freq(Hz)',y_axis_label='strain', plot_width=500, plot_height=400) n11=pn21.line(x='freq', y='hp_real',source = sourcep2, color='blue',legend='Re{h+}') n12=pn21.line(x='freq', y='hp_imag',source = sourcep2, color='orange',legend='Im{h+}')", "step=.05, title=\"Eccentricity (e)\") model_selectFD = Select(title=\"FD Models\", options=fd_approximants()) def update_slider(attrname, old, new): #", "t1, t2 in zip(times, h2m2[:,0]): assert t1 == t2 h21 = h21[:,1] +", "hor_times, sep, mov_avg_sep_t,mov_avg_sep,dx,dy # ============================================================================= # Second panel # ============================================================================= def q_to_masses(mass_rat,total_m): mass1=mass_rat/(mass_rat+1)*total_m", "plot_width=500, plot_height=400) pn44.line(x='timeTD', y='phaseTD',source = sourcep42, color='red',line_width=3) pn44.toolbar.logo = None q_slider = Slider(start=1,", "pn43.line(x='timeTD', y='ampTD',source = sourcep42, color='green',line_width=3) pn43.toolbar.logo = None pn44=figure(title='Phase',x_axis_label='time(sec)',y_axis_label='rad', plot_width=500, plot_height=400) pn44.line(x='timeTD', y='phaseTD',source", "def sep_time(horizon1, horizon2): hor_times = horizon1[:,0] dx=horizon1[:,1]-horizon2[:,1] dy=horizon1[:,2] - horizon2[:,2] sep_xy = np.array([horizon1[:,1]-horizon2[:,1],", "eccentricity,s1z,s2z,approximant='TaylorT1',total_m=50,f_lower=20.,delta_t=1./1024): nonspinning_models=['TaylorT1','TaylorT2','TaylorEt','TaylorT3','TaylorT4','EOBNRv2','EOBNRv2HM','EOBNRv2_ROM','EOBNRv2HM_ROM','SEOBNRv1','SEOBNRv1_ROM_DoubleSpin','SEOBNRv1_ROM_EffectiveSpin','TEOBResum_ROM','PhenSpinTaylor','PhenSpinTaylorRD','IMRPhenomA','EccentricTD','NRSur7dq2'] if approximant in nonspinning_models: s1z=0 s2z=0 mass1,mass2=q_to_masses(mass_rat,total_m) hp,hc=hp,hc=get_td_waveform(mass1=mass1,mass2=mass2,spin1z=s1z,spin2z=s2z,delta_t=delta_t,f_lower=f_lower, approximant=approximant,eccentricity=eccentricity) hs=hp+hc*1j amp=abs(hs)", "# Get the current slider values q = q_slider.value e = e_slider.value s1z", "Slider(start=-1, end=1, value=0, step=.05, title=\"Spin2z\") model_select = Select(title=\"TD Models\", options=td_approximants()) def update_slider2(attrname, old,", "prompt. # Then navigate to the URL http://localhost:5006/main in your browser. import numpy", "the current slider values q = q_slider.value e = e_slider.value s1z = s1z_slider.value", "Use by executing: bokeh serve main.py # command to run at your command", "moving_average(t, x, seglen, nseg): dt = t[1] - t[0] means = [] times", "= h20[:,1] + 1.j * h20[:,2] h2m2 = h2m2[:,1] + 1.j * h2m2[:,2]", "moving_average(hor_times, sep, seglen=1000, nseg=10) times,h22,h2m2=get_h22(which_data) phase22 = np.unwrap(np.angle(h22)) freq22 = unispline(times, phase22).derivative()(times) norm_time=times-times[np.argmax(h22)]", "= h22[:,0] for t1, t2 in zip(times, h2m2[:,0]): assert t1 == t2 h21", "= Slider(start=1, end=10, value=1, step=.5, title=\"Mass ratio (q)\") e_slider = Slider(start=0., end=0.9, value=0,", "return hp.sample_times.data,np.real(hp.data),np.real(hc.data),np.imag(hp.data),np.imag(hc.data),amp,phase timeTD,hp_realTD,hc_realTD,hp_imagTD,hc_imagTD,ampTD,phaseTD=generate_TD_waveform(mass_rat=1.,eccentricity=0,s1z=0.,s2z=0.) dic_p42 = {'hp_realTD':hp_realTD,'hc_realTD':hc_realTD,'hp_imagTD':hp_imagTD,'hc_imagTD':hc_imagTD,'ampTD':ampTD,'phaseTD':phaseTD,'timeTD':timeTD} sourcep42=ColumnDataSource(data=dic_p42) pn41=figure(title='h+',x_axis_label='time(sec)',y_axis_label='strain', plot_width=500, plot_height=400) n41=pn41.line(x='timeTD', y='hp_realTD',source =", "plot_width=500, plot_height=400) pn43.line(x='timeTD', y='ampTD',source = sourcep42, color='green',line_width=3) pn43.toolbar.logo = None pn44=figure(title='Phase',x_axis_label='time(sec)',y_axis_label='rad', plot_width=500, plot_height=400)", "= {'hp_real':hp_real,'hc_real':hc_real,'hp_imag':hp_imag,'hc_imag':hc_imag,'amp':amp,'phase':phase,'freq':freq} sourcep2=ColumnDataSource(data=dic_p2) pn21=figure(title='h+',x_axis_label='freq(Hz)',y_axis_label='strain', plot_width=500, plot_height=400) n11=pn21.line(x='freq', y='hp_real',source = sourcep2, color='blue',legend='Re{h+}') n12=pn21.line(x='freq', y='hp_imag',source", "ratio (q)\") e_slider = Slider(start=0., end=0.9, value=0, step=.05, title=\"Eccentricity (e)\") s1z_slider = Slider(start=-1,", "pn22=figure(title='hx',x_axis_label='freq(Hz)',y_axis_label='strain', plot_width=500, plot_height=400) n21=pn22.line(x='freq', y='hc_real',source = sourcep2, color='blue',legend='Re{hx}') n22=pn22.line(x='freq', y='hc_imag', source = sourcep2,", "y='phase',source = sourcep2, color='red',line_width=3) pn24.toolbar.logo = None q_sliderFD = Slider(start=1, end=10, value=1, step=.5,", "= t[1] - t[0] means = [] times = [] st = 0", "pn23.toolbar.logo = None pn24=figure(title='Phase',x_axis_label='freq(Hz)',y_axis_label='rad', plot_width=500, plot_height=400) pn24.line(x='freq', y='phase',source = sourcep2, color='red',line_width=3) pn24.toolbar.logo =", "= f[\"OutermostExtraction.dir/Y_l2_m-2.dat\"][:] times = h22[:,0] for t1, t2 in zip(times, h2m2[:,0]): assert t1", "y='hp_real',source = sourcep2, color='blue',legend='Re{h+}') n12=pn21.line(x='freq', y='hp_imag',source = sourcep2, color='orange',legend='Im{h+}') pn21.toolbar.logo = None pn21.legend.click_policy=\"hide\"", "= source31.selected.indices[0] sval=source31.data[\"Simulation\"][selected_index] which_data = data_path+sval # changed this to the dict time_hlm,h21,", "Panel(child=layoutTD,title=\"Analytic TD\") #tabs = Tabs(tabs=[tab1,tab3,tab2,tab4],sizing_mode='scale_width') tabs = Tabs(tabs=[tab2,tab4],sizing_mode='scale_width') #layout = row(column(p,data_table),column(k,s),r) curdoc().add_root(tabs) curdoc().title", "horizon B #AhC=common apparent horizon with h5py.File( os.path.join(sxs_data, \"Horizons.h5\" ), 'r') as f:", "plot_height=400) pn44.line(x='timeTD', y='phaseTD',source = sourcep42, color='red',line_width=3) pn44.toolbar.logo = None q_slider = Slider(start=1, end=10,", "h2m1[:,1] + 1.j * h2m1[:,2] h20 = h20[:,1] + 1.j * h20[:,2] h2m2", "run at your command prompt. # Then navigate to the URL http://localhost:5006/main in", "f: AhA = f['AhA.dir/CoordCenterInertial.dat'][:] AhB = f['AhB.dir/CoordCenterInertial.dat'][:] AhC = f['AhC.dir/CoordCenterInertial.dat'][:] return AhA,AhB,AhC def", "h2m2 = h2m2[:,1] + 1.j * h2m2[:,2] h22 = h22[:,1] + 1.j *", "<reponame>Yoshinta/GWaviz<filename>bokeh-app/main.py<gh_stars>0 #!/usr/bin/env python # coding: utf-8 # Copyright (C) 2021 <NAME> <<EMAIL>> #", "+ 1.j * h22[:,2] norm_time=times-times[np.argmax(h22)] return norm_time,h21, h2m1,h2m2, h22,h20 def get_data(which_data): AhA,AhB,AhC=open_sxs(which_data) hor_times,sep,dx,dy=sep_time(AhA,AhB)", "# ============================================================================= # Second panel # ============================================================================= def q_to_masses(mass_rat,total_m): mass1=mass_rat/(mass_rat+1)*total_m mass2=total_m-mass1 return mass1,mass2", "= {'hp_realTD':hp_realTD,'hc_realTD':hc_realTD,'hp_imagTD':hp_imagTD,'hc_imagTD':hc_imagTD,'ampTD':ampTD,'phaseTD':phaseTD,'timeTD':timeTD} sourcep42=ColumnDataSource(data=dic_p42) pn41=figure(title='h+',x_axis_label='time(sec)',y_axis_label='strain', plot_width=500, plot_height=400) n41=pn41.line(x='timeTD', y='hp_realTD',source = sourcep42, color='blue',legend='Re{h+}') n42=pn41.line(x='timeTD', y='hp_imagTD',source", "IndexError: pass # ============================================================================= # Fourth panel # ============================================================================= def generate_TD_waveform(mass_rat, eccentricity,s1z,s2z,approximant='TaylorT1',total_m=50,f_lower=20.,delta_t=1./1024): nonspinning_models=['TaylorT1','TaylorT2','TaylorEt','TaylorT3','TaylorT4','EOBNRv2','EOBNRv2HM','EOBNRv2_ROM','EOBNRv2HM_ROM','SEOBNRv1','SEOBNRv1_ROM_DoubleSpin','SEOBNRv1_ROM_EffectiveSpin','TEOBResum_ROM','PhenSpinTaylor','PhenSpinTaylorRD','IMRPhenomA','EccentricTD','NRSur7dq2']", "np import os import h5py import json import glob from pycbc.waveform import get_fd_waveform,amplitude_from_frequencyseries,phase_from_frequencyseries,fd_approximants,", "y='hp_imag',source = sourcep2, color='orange',legend='Im{h+}') pn21.toolbar.logo = None pn21.legend.click_policy=\"hide\" lines=[n11,n12] pn22=figure(title='hx',x_axis_label='freq(Hz)',y_axis_label='strain', plot_width=500, plot_height=400) n21=pn22.line(x='freq',", "Slider(start=1, end=10, value=1, step=.5, title=\"Mass ratio (q)\") e_sliderFD = Slider(start=0., end=0.9, value=0, step=.05,", "color='blue',legend='Re{hx}') n22=pn22.line(x='freq', y='hc_imag', source = sourcep2, color='orange',legend='Im{hx}') pn22.toolbar.logo = None pn22.legend.click_policy=\"hide\" lines=[n21,n22] pn23=figure(title='Amplitude',x_axis_label='freq(Hz)',y_axis_label='strain',", "plot_height=400) n21=pn22.line(x='freq', y='hc_real',source = sourcep2, color='blue',legend='Re{hx}') n22=pn22.line(x='freq', y='hc_imag', source = sourcep2, color='orange',legend='Im{hx}') pn22.toolbar.logo", "lines=[n21,n22] pn23=figure(title='Amplitude',x_axis_label='freq(Hz)',y_axis_label='strain', plot_width=500, plot_height=400) pn23.line(x='freq', y='amp',source = sourcep2, color='green',line_width=3) pn23.toolbar.logo = None pn24=figure(title='Phase',x_axis_label='freq(Hz)',y_axis_label='rad',", "TableColumn,ColumnDataSource,DataTable from bokeh.plotting import figure, output_file, show, output_notebook from bokeh.models.widgets import Select, PreText,Panel,", "y='amp',source = sourcep2, color='green',line_width=3) pn23.toolbar.logo = None pn24=figure(title='Phase',x_axis_label='freq(Hz)',y_axis_label='rad', plot_width=500, plot_height=400) pn24.line(x='freq', y='phase',source =", "hp.sample_times.data,np.real(hp.data),np.real(hc.data),np.imag(hp.data),np.imag(hc.data),amp,phase timeTD,hp_realTD,hc_realTD,hp_imagTD,hc_imagTD,ampTD,phaseTD=generate_TD_waveform(mass_rat=1.,eccentricity=0,s1z=0.,s2z=0.) dic_p42 = {'hp_realTD':hp_realTD,'hc_realTD':hc_realTD,'hp_imagTD':hp_imagTD,'hc_imagTD':hc_imagTD,'ampTD':ampTD,'phaseTD':phaseTD,'timeTD':timeTD} sourcep42=ColumnDataSource(data=dic_p42) pn41=figure(title='h+',x_axis_label='time(sec)',y_axis_label='strain', plot_width=500, plot_height=400) n41=pn41.line(x='timeTD', y='hp_realTD',source = sourcep42,", "# ============================================================================= def generate_TD_waveform(mass_rat, eccentricity,s1z,s2z,approximant='TaylorT1',total_m=50,f_lower=20.,delta_t=1./1024): nonspinning_models=['TaylorT1','TaylorT2','TaylorEt','TaylorT3','TaylorT4','EOBNRv2','EOBNRv2HM','EOBNRv2_ROM','EOBNRv2HM_ROM','SEOBNRv1','SEOBNRv1_ROM_DoubleSpin','SEOBNRv1_ROM_EffectiveSpin','TEOBResum_ROM','PhenSpinTaylor','PhenSpinTaylorRD','IMRPhenomA','EccentricTD','NRSur7dq2'] if approximant in nonspinning_models: s1z=0 s2z=0 mass1,mass2=q_to_masses(mass_rat,total_m)", "sourcep2=ColumnDataSource(data=dic_p2) pn21=figure(title='h+',x_axis_label='freq(Hz)',y_axis_label='strain', plot_width=500, plot_height=400) n11=pn21.line(x='freq', y='hp_real',source = sourcep2, color='blue',legend='Re{h+}') n12=pn21.line(x='freq', y='hp_imag',source = sourcep2,", "generate_analytic_waveform(mass_rat, eccentricity,approximant='TaylorF2Ecc',total_m=50,f_lower=20.,delta_f=0.1): mass1,mass2=q_to_masses(mass_rat,total_m) hp,hc=hp,hc=get_fd_waveform(mass1=mass1,mass2=mass2,delta_f=delta_f,f_lower=f_lower, approximant=approximant,eccentricity=eccentricity) hs=hp+hc*1j amp=amplitude_from_frequencyseries(types.FrequencySeries(hs,delta_f=delta_f)) phase=phase_from_frequencyseries(types.FrequencySeries(hs,delta_f=delta_f)) return hp.sample_frequencies.data,np.real(hp.data),np.real(hc.data),np.imag(hp.data),np.imag(hc.data),amp.data,phase.data freq,hp_real,hc_real,hp_imag,hc_imag,amp,phase=generate_analytic_waveform(mass_rat=1.,eccentricity=0) dic_p2 =", "= f[\"OutermostExtraction.dir/Y_l2_m1.dat\"][:] h2m1 = f[\"OutermostExtraction.dir/Y_l2_m-1.dat\"][:] h20 = f[\"OutermostExtraction.dir/Y_l2_m0.dat\"][:] h22 = f[\"OutermostExtraction.dir/Y_l2_m2.dat\"][:] h2m2 =", "for t1, t2 in zip(times, h2m2[:,0]): assert t1 == t2 h21 = h21[:,1]", "# ============================================================================= # Fourth panel # ============================================================================= def generate_TD_waveform(mass_rat, eccentricity,s1z,s2z,approximant='TaylorT1',total_m=50,f_lower=20.,delta_t=1./1024): nonspinning_models=['TaylorT1','TaylorT2','TaylorEt','TaylorT3','TaylorT4','EOBNRv2','EOBNRv2HM','EOBNRv2_ROM','EOBNRv2HM_ROM','SEOBNRv1','SEOBNRv1_ROM_DoubleSpin','SEOBNRv1_ROM_EffectiveSpin','TEOBResum_ROM','PhenSpinTaylor','PhenSpinTaylorRD','IMRPhenomA','EccentricTD','NRSur7dq2'] if approximant", "h2m2[:,0]): assert t1 == t2 h21 = h21[:,1] + 1.j * h21[:,2] h2m1", "apparent horizon with h5py.File( os.path.join(sxs_data, \"Horizons.h5\" ), 'r') as f: AhA = f['AhA.dir/CoordCenterInertial.dat'][:]", "pn43.toolbar.logo = None pn44=figure(title='Phase',x_axis_label='time(sec)',y_axis_label='rad', plot_width=500, plot_height=400) pn44.line(x='timeTD', y='phaseTD',source = sourcep42, color='red',line_width=3) pn44.toolbar.logo =", "sep, mov_avg_sep_t,mov_avg_sep,dx,dy # ============================================================================= # Second panel # ============================================================================= def q_to_masses(mass_rat,total_m): mass1=mass_rat/(mass_rat+1)*total_m mass2=total_m-mass1", "plot_width=500, plot_height=400) pn23.line(x='freq', y='amp',source = sourcep2, color='green',line_width=3) pn23.toolbar.logo = None pn24=figure(title='Phase',x_axis_label='freq(Hz)',y_axis_label='rad', plot_width=500, plot_height=400)", "= Tabs(tabs=[tab1,tab3,tab2,tab4],sizing_mode='scale_width') tabs = Tabs(tabs=[tab2,tab4],sizing_mode='scale_width') #layout = row(column(p,data_table),column(k,s),r) curdoc().add_root(tabs) curdoc().title = \"Eccentric Waveforms", "= f['AhC.dir/CoordCenterInertial.dat'][:] return AhA,AhB,AhC def sep_time(horizon1, horizon2): hor_times = horizon1[:,0] dx=horizon1[:,1]-horizon2[:,1] dy=horizon1[:,2] -", "h22 = h22[:,1] + 1.j * h22[:,2] norm_time=times-times[np.argmax(h22)] return norm_time,h21, h2m1,h2m2, h22,h20 def", "<<EMAIL>> # Visualization of SXS and analytic waveform model # Use by executing:", "np.unwrap(np.angle(h22)) freq22 = unispline(times, phase22).derivative()(times) norm_time=times-times[np.argmax(h22)] return AhA, AhB, norm_time, h22,phase22,freq22, hor_times, sep,", "changed this to the dict time_hlm,h21, h2m1,h2m2lm, h22lm,h20=get_hlm(which_data) source32.data={'time_hlm':time_hlm,'h21real':h21.real,'h21imag':h21.imag, 'h2m1real':h2m1.real,'h2m1imag':h2m1.imag,'h2m2lmreal': h2m2lm.real,'h2m2lmimag':h2m2lm.imag, 'h22lmreal': h22lm.real,'h22lmimag':h22lm.imag,'h20real':h20.real,'h20imag':h20.imag}", "sourcep42, color='red',line_width=3) pn44.toolbar.logo = None q_slider = Slider(start=1, end=10, value=1, step=.5, title=\"Mass ratio", "sourcep2, color='orange',legend='Im{h+}') pn21.toolbar.logo = None pn21.legend.click_policy=\"hide\" lines=[n11,n12] pn22=figure(title='hx',x_axis_label='freq(Hz)',y_axis_label='strain', plot_width=500, plot_height=400) n21=pn22.line(x='freq', y='hc_real',source =", "# Fourth panel # ============================================================================= def generate_TD_waveform(mass_rat, eccentricity,s1z,s2z,approximant='TaylorT1',total_m=50,f_lower=20.,delta_t=1./1024): nonspinning_models=['TaylorT1','TaylorT2','TaylorEt','TaylorT3','TaylorT4','EOBNRv2','EOBNRv2HM','EOBNRv2_ROM','EOBNRv2HM_ROM','SEOBNRv1','SEOBNRv1_ROM_DoubleSpin','SEOBNRv1_ROM_EffectiveSpin','TEOBResum_ROM','PhenSpinTaylor','PhenSpinTaylorRD','IMRPhenomA','EccentricTD','NRSur7dq2'] if approximant in nonspinning_models:", "td_approximants from pycbc import types from bokeh.models import TableColumn,ColumnDataSource,DataTable from bokeh.plotting import figure,", "* h20[:,2] h2m2 = h2m2[:,1] + 1.j * h2m2[:,2] h22 = h22[:,1] +", "from scipy.interpolate import InterpolatedUnivariateSpline as unispline # ============================================================================= # First panel # =============================================================================", "os.path.join(sxs_data, \"rhOverM_Asymptotic_GeometricUnits_CoM.h5\" ), 'r') as f: h22 = f[\"OutermostExtraction.dir/Y_l2_m2.dat\"][:] h2m2 = f[\"OutermostExtraction.dir/Y_l2_m-2.dat\"][:] times", "h22 = f[\"OutermostExtraction.dir/Y_l2_m2.dat\"][:] h2m2 = f[\"OutermostExtraction.dir/Y_l2_m-2.dat\"][:] times = h22[:,0] for t1, t2 in", "def moving_average(t, x, seglen, nseg): dt = t[1] - t[0] means = []", "times, means def get_h22(sxs_data): with h5py.File( os.path.join(sxs_data, \"rhOverM_Asymptotic_GeometricUnits_CoM.h5\" ), 'r') as f: h22", "np.sqrt( sep_xy[0,:]**2. + sep_xy[1,:]**2. ) return hor_times,sep,dx[:2000],dy[:2000] def moving_average(t, x, seglen, nseg): dt", "= 0 for i in range(int(len(x)/nseg)): en = int(st + seglen/dt) try: times.append(t[st])", "1.j * h22[:,2] norm_time=times-times[np.argmax(h22)] return norm_time,h21, h2m1,h2m2, h22,h20 def get_data(which_data): AhA,AhB,AhC=open_sxs(which_data) hor_times,sep,dx,dy=sep_time(AhA,AhB) mov_avg_sep_t,", "# Get the current slider values q = q_sliderFD.value e = e_sliderFD.value approximant", "sep, seglen=1000, nseg=10) times,h22,h2m2=get_h22(which_data) phase22 = np.unwrap(np.angle(h22)) freq22 = unispline(times, phase22).derivative()(times) norm_time=times-times[np.argmax(h22)] return", "============================================================================= # Fourth panel # ============================================================================= def generate_TD_waveform(mass_rat, eccentricity,s1z,s2z,approximant='TaylorT1',total_m=50,f_lower=20.,delta_t=1./1024): nonspinning_models=['TaylorT1','TaylorT2','TaylorEt','TaylorT3','TaylorT4','EOBNRv2','EOBNRv2HM','EOBNRv2_ROM','EOBNRv2HM_ROM','SEOBNRv1','SEOBNRv1_ROM_DoubleSpin','SEOBNRv1_ROM_EffectiveSpin','TEOBResum_ROM','PhenSpinTaylor','PhenSpinTaylorRD','IMRPhenomA','EccentricTD','NRSur7dq2'] if approximant in", "plot_height=400) n41=pn41.line(x='timeTD', y='hp_realTD',source = sourcep42, color='blue',legend='Re{h+}') n42=pn41.line(x='timeTD', y='hp_imagTD',source = sourcep42, color='orange',legend='Im{h+}') pn41.toolbar.logo =", "times.append(t[st]) means.append(np.mean(x[st:en])) except: break st = st + seglen return times, means def", "norm_time=times-times[np.argmax(h22)] return norm_time,h21, h2m1,h2m2, h22,h20 def get_data(which_data): AhA,AhB,AhC=open_sxs(which_data) hor_times,sep,dx,dy=sep_time(AhA,AhB) mov_avg_sep_t, mov_avg_sep = moving_average(hor_times,", "def update_table2(attrname, old, new): try: selected_index = source31.selected.indices[0] sval=source31.data[\"Simulation\"][selected_index] which_data = data_path+sval #", "timeTD,hp_realTD,hc_realTD,hp_imagTD,hc_imagTD,ampTD,phaseTD=generate_TD_waveform(mass_rat=1.,eccentricity=0,s1z=0.,s2z=0.) dic_p42 = {'hp_realTD':hp_realTD,'hc_realTD':hc_realTD,'hp_imagTD':hp_imagTD,'hc_imagTD':hc_imagTD,'ampTD':ampTD,'phaseTD':phaseTD,'timeTD':timeTD} sourcep42=ColumnDataSource(data=dic_p42) pn41=figure(title='h+',x_axis_label='time(sec)',y_axis_label='strain', plot_width=500, plot_height=400) n41=pn41.line(x='timeTD', y='hp_realTD',source = sourcep42, color='blue',legend='Re{h+}')", "= Slider(start=1, end=10, value=1, step=.5, title=\"Mass ratio (q)\") e_sliderFD = Slider(start=0., end=0.9, value=0,", "Slider from bokeh.io import curdoc from bokeh.layouts import column, row, grid, layout from", "= None pn41.legend.click_policy=\"hide\" lines=[n41,n42] pn42=figure(title='hx',x_axis_label='time(sec)',y_axis_label='strain', plot_width=500, plot_height=400) n41=pn42.line(x='timeTD', y='hc_realTD',source = sourcep42, color='blue',legend='Re{hx}') n42=pn42.line(x='timeTD',", "color='red',line_width=3) pn24.toolbar.logo = None q_sliderFD = Slider(start=1, end=10, value=1, step=.5, title=\"Mass ratio (q)\")", "Models\", options=fd_approximants()) def update_slider(attrname, old, new): # Get the current slider values q", "types from bokeh.models import TableColumn,ColumnDataSource,DataTable from bokeh.plotting import figure, output_file, show, output_notebook from", "freq22 = unispline(times, phase22).derivative()(times) norm_time=times-times[np.argmax(h22)] return AhA, AhB, norm_time, h22,phase22,freq22, hor_times, sep, mov_avg_sep_t,mov_avg_sep,dx,dy", "pn24.toolbar.logo = None q_sliderFD = Slider(start=1, end=10, value=1, step=.5, title=\"Mass ratio (q)\") e_sliderFD", "# ============================================================================= # Third panel # ============================================================================= def update_table2(attrname, old, new): try: selected_index", "pass # ============================================================================= # Fourth panel # ============================================================================= def generate_TD_waveform(mass_rat, eccentricity,s1z,s2z,approximant='TaylorT1',total_m=50,f_lower=20.,delta_t=1./1024): nonspinning_models=['TaylorT1','TaylorT2','TaylorEt','TaylorT3','TaylorT4','EOBNRv2','EOBNRv2HM','EOBNRv2_ROM','EOBNRv2HM_ROM','SEOBNRv1','SEOBNRv1_ROM_DoubleSpin','SEOBNRv1_ROM_EffectiveSpin','TEOBResum_ROM','PhenSpinTaylor','PhenSpinTaylorRD','IMRPhenomA','EccentricTD','NRSur7dq2'] if", "amp=abs(hs) phase=np.unwrap(np.angle(hs)) return hp.sample_times.data,np.real(hp.data),np.real(hc.data),np.imag(hp.data),np.imag(hc.data),amp,phase timeTD,hp_realTD,hc_realTD,hp_imagTD,hc_imagTD,ampTD,phaseTD=generate_TD_waveform(mass_rat=1.,eccentricity=0,s1z=0.,s2z=0.) dic_p42 = {'hp_realTD':hp_realTD,'hc_realTD':hc_realTD,'hp_imagTD':hp_imagTD,'hc_imagTD':hc_imagTD,'ampTD':ampTD,'phaseTD':phaseTD,'timeTD':timeTD} sourcep42=ColumnDataSource(data=dic_p42) pn41=figure(title='h+',x_axis_label='time(sec)',y_axis_label='strain', plot_width=500, plot_height=400) n41=pn41.line(x='timeTD',", "= {'hp_real':hp_real,'hc_real':hc_real,'hp_imag':hp_imag,'hc_imag':hc_imag,'amp':amp,'phase':phase,'freq':freq} for w in [q_sliderFD,e_sliderFD,model_selectFD]: w.on_change('value', update_slider) layoutan=row(column(pn21,pn22),column(pn23,pn24),column(q_sliderFD,e_sliderFD,model_selectFD)) # ============================================================================= # Third", "h22lm.real,'h22lmimag':h22lm.imag,'h20real':h20.real,'h20imag':h20.imag} except IndexError: pass # ============================================================================= # Fourth panel # ============================================================================= def generate_TD_waveform(mass_rat,", "n42=pn41.line(x='timeTD', y='hp_imagTD',source = sourcep42, color='orange',legend='Im{h+}') pn41.toolbar.logo = None pn41.legend.click_policy=\"hide\" lines=[n41,n42] pn42=figure(title='hx',x_axis_label='time(sec)',y_axis_label='strain', plot_width=500, plot_height=400)", "#tab3 = Panel(child=layout3,title=\"NR l=2\") tab4 = Panel(child=layoutTD,title=\"Analytic TD\") #tabs = Tabs(tabs=[tab1,tab3,tab2,tab4],sizing_mode='scale_width') tabs =", "in nonspinning_models: s1z=0 s2z=0 mass1,mass2=q_to_masses(mass_rat,total_m) hp,hc=hp,hc=get_td_waveform(mass1=mass1,mass2=mass2,spin1z=s1z,spin2z=s2z,delta_t=delta_t,f_lower=f_lower, approximant=approximant,eccentricity=eccentricity) hs=hp+hc*1j amp=abs(hs) phase=np.unwrap(np.angle(hs)) return hp.sample_times.data,np.real(hp.data),np.real(hc.data),np.imag(hp.data),np.imag(hc.data),amp,phase timeTD,hp_realTD,hc_realTD,hp_imagTD,hc_imagTD,ampTD,phaseTD=generate_TD_waveform(mass_rat=1.,eccentricity=0,s1z=0.,s2z=0.)", "as f: h22 = f[\"OutermostExtraction.dir/Y_l2_m2.dat\"][:] h2m2 = f[\"OutermostExtraction.dir/Y_l2_m-2.dat\"][:] times = h22[:,0] for t1,", "range(int(len(x)/nseg)): en = int(st + seglen/dt) try: times.append(t[st]) means.append(np.mean(x[st:en])) except: break st =", "= [] st = 0 for i in range(int(len(x)/nseg)): en = int(st +", "in your browser. import numpy as np import os import h5py import json", "h2m1[:,2] h20 = h20[:,1] + 1.j * h20[:,2] h2m2 = h2m2[:,1] + 1.j", "approximant=approximant,eccentricity=eccentricity) hs=hp+hc*1j amp=abs(hs) phase=np.unwrap(np.angle(hs)) return hp.sample_times.data,np.real(hp.data),np.real(hc.data),np.imag(hp.data),np.imag(hc.data),amp,phase timeTD,hp_realTD,hc_realTD,hp_imagTD,hc_imagTD,ampTD,phaseTD=generate_TD_waveform(mass_rat=1.,eccentricity=0,s1z=0.,s2z=0.) dic_p42 = {'hp_realTD':hp_realTD,'hc_realTD':hc_realTD,'hp_imagTD':hp_imagTD,'hc_imagTD':hc_imagTD,'ampTD':ampTD,'phaseTD':phaseTD,'timeTD':timeTD} sourcep42=ColumnDataSource(data=dic_p42) pn41=figure(title='h+',x_axis_label='time(sec)',y_axis_label='strain', plot_width=500,", "q_slider.value e = e_slider.value s1z = s1z_slider.value s2z = s2z_slider.value approximant = model_select.value", "model_selectFD = Select(title=\"FD Models\", options=fd_approximants()) def update_slider(attrname, old, new): # Get the current", "title=\"Mass ratio (q)\") e_slider = Slider(start=0., end=0.9, value=0, step=.05, title=\"Eccentricity (e)\") s1z_slider =", "by executing: bokeh serve main.py # command to run at your command prompt.", "source = sourcep2, color='orange',legend='Im{hx}') pn22.toolbar.logo = None pn22.legend.click_policy=\"hide\" lines=[n21,n22] pn23=figure(title='Amplitude',x_axis_label='freq(Hz)',y_axis_label='strain', plot_width=500, plot_height=400) pn23.line(x='freq',", "approximant = model_select.value timeTD,hp_realTD,hc_realTD,hp_imagTD,hc_imagTD,ampTD,phaseTD=generate_TD_waveform(mass_rat=q,eccentricity=e,s1z=s1z,s2z=s2z,approximant=approximant) sourcep42.data = {'hp_realTD':hp_realTD,'hc_realTD':hc_realTD,'hp_imagTD':hp_imagTD,'hc_imagTD':hc_imagTD,'ampTD':ampTD,'phaseTD':phaseTD,'timeTD':timeTD} for w in [q_slider,e_slider,s1z_slider,s2z_slider,model_select]: w.on_change('value', update_slider2)", "get_data(which_data): AhA,AhB,AhC=open_sxs(which_data) hor_times,sep,dx,dy=sep_time(AhA,AhB) mov_avg_sep_t, mov_avg_sep = moving_average(hor_times, sep, seglen=1000, nseg=10) times,h22,h2m2=get_h22(which_data) phase22 =", "), 'r') as f: AhA = f['AhA.dir/CoordCenterInertial.dat'][:] AhB = f['AhB.dir/CoordCenterInertial.dat'][:] AhC = f['AhC.dir/CoordCenterInertial.dat'][:]", "from pycbc import types from bokeh.models import TableColumn,ColumnDataSource,DataTable from bokeh.plotting import figure, output_file,", "color='green',line_width=3) pn43.toolbar.logo = None pn44=figure(title='Phase',x_axis_label='time(sec)',y_axis_label='rad', plot_width=500, plot_height=400) pn44.line(x='timeTD', y='phaseTD',source = sourcep42, color='red',line_width=3) pn44.toolbar.logo", "step=.5, title=\"Mass ratio (q)\") e_sliderFD = Slider(start=0., end=0.9, value=0, step=.05, title=\"Eccentricity (e)\") model_selectFD", "Panel(child=layoutan,title=\"Analytic FD\") #tab3 = Panel(child=layout3,title=\"NR l=2\") tab4 = Panel(child=layoutTD,title=\"Analytic TD\") #tabs = Tabs(tabs=[tab1,tab3,tab2,tab4],sizing_mode='scale_width')", "import json import glob from pycbc.waveform import get_fd_waveform,amplitude_from_frequencyseries,phase_from_frequencyseries,fd_approximants, get_td_waveform, td_approximants from pycbc import", "from bokeh.layouts import column, row, grid, layout from scipy.interpolate import InterpolatedUnivariateSpline as unispline", "= np.sqrt( sep_xy[0,:]**2. + sep_xy[1,:]**2. ) return hor_times,sep,dx[:2000],dy[:2000] def moving_average(t, x, seglen, nseg):", "# Visualization of SXS and analytic waveform model # Use by executing: bokeh", "source31.selected.indices[0] sval=source31.data[\"Simulation\"][selected_index] which_data = data_path+sval # changed this to the dict time_hlm,h21, h2m1,h2m2lm,", "Fourth panel # ============================================================================= def generate_TD_waveform(mass_rat, eccentricity,s1z,s2z,approximant='TaylorT1',total_m=50,f_lower=20.,delta_t=1./1024): nonspinning_models=['TaylorT1','TaylorT2','TaylorEt','TaylorT3','TaylorT4','EOBNRv2','EOBNRv2HM','EOBNRv2_ROM','EOBNRv2HM_ROM','SEOBNRv1','SEOBNRv1_ROM_DoubleSpin','SEOBNRv1_ROM_EffectiveSpin','TEOBResum_ROM','PhenSpinTaylor','PhenSpinTaylorRD','IMRPhenomA','EccentricTD','NRSur7dq2'] if approximant in nonspinning_models: s1z=0", "), 'r') as f: h22 = f[\"OutermostExtraction.dir/Y_l2_m2.dat\"][:] h2m2 = f[\"OutermostExtraction.dir/Y_l2_m-2.dat\"][:] times = h22[:,0]", "for t1, t2 in zip(times, h2m2[:,0]): assert t1 == t2 h22 = h22[:,1]", "of SXS and analytic waveform model # Use by executing: bokeh serve main.py", "norm_time, h22,phase22,freq22, hor_times, sep, mov_avg_sep_t,mov_avg_sep,dx,dy # ============================================================================= # Second panel # ============================================================================= def", "data\") tab2 = Panel(child=layoutan,title=\"Analytic FD\") #tab3 = Panel(child=layout3,title=\"NR l=2\") tab4 = Panel(child=layoutTD,title=\"Analytic TD\")", "e_sliderFD.value approximant = model_selectFD.value freq,hp_real,hc_real,hp_imag,hc_imag,amp,phase=generate_analytic_waveform(mass_rat=q,eccentricity=e,approximant=approximant) sourcep2.data = {'hp_real':hp_real,'hc_real':hc_real,'hp_imag':hp_imag,'hc_imag':hc_imag,'amp':amp,'phase':phase,'freq':freq} for w in [q_sliderFD,e_sliderFD,model_selectFD]: w.on_change('value',", "zip(times, h2m2[:,0]): assert t1 == t2 h22 = h22[:,1] + 1.j * h22[:,2]", "sourcep42, color='orange',legend='Im{hx}') pn42.toolbar.logo = None pn42.legend.click_policy=\"hide\" lines=[n41,n42] pn43=figure(title='Amplitude',x_axis_label='time(sec)',y_axis_label='strain', plot_width=500, plot_height=400) pn43.line(x='timeTD', y='ampTD',source =", "#tab1 = Panel(child=layoutNR, title=\"NR data\") tab2 = Panel(child=layoutan,title=\"Analytic FD\") #tab3 = Panel(child=layout3,title=\"NR l=2\")", "l=2\") tab4 = Panel(child=layoutTD,title=\"Analytic TD\") #tabs = Tabs(tabs=[tab1,tab3,tab2,tab4],sizing_mode='scale_width') tabs = Tabs(tabs=[tab2,tab4],sizing_mode='scale_width') #layout =", "sep_xy = np.array([horizon1[:,1]-horizon2[:,1], horizon1[:,2] - horizon2[:,2]]) sep = np.sqrt( sep_xy[0,:]**2. + sep_xy[1,:]**2. )", "json import glob from pycbc.waveform import get_fd_waveform,amplitude_from_frequencyseries,phase_from_frequencyseries,fd_approximants, get_td_waveform, td_approximants from pycbc import types", "as np import os import h5py import json import glob from pycbc.waveform import", "phase=phase_from_frequencyseries(types.FrequencySeries(hs,delta_f=delta_f)) return hp.sample_frequencies.data,np.real(hp.data),np.real(hc.data),np.imag(hp.data),np.imag(hc.data),amp.data,phase.data freq,hp_real,hc_real,hp_imag,hc_imag,amp,phase=generate_analytic_waveform(mass_rat=1.,eccentricity=0) dic_p2 = {'hp_real':hp_real,'hc_real':hc_real,'hp_imag':hp_imag,'hc_imag':hc_imag,'amp':amp,'phase':phase,'freq':freq} sourcep2=ColumnDataSource(data=dic_p2) pn21=figure(title='h+',x_axis_label='freq(Hz)',y_axis_label='strain', plot_width=500, plot_height=400) n11=pn21.line(x='freq', y='hp_real',source", "color='blue',legend='Re{hx}') n42=pn42.line(x='timeTD', y='hc_imagTD', source = sourcep42, color='orange',legend='Im{hx}') pn42.toolbar.logo = None pn42.legend.click_policy=\"hide\" lines=[n41,n42] pn43=figure(title='Amplitude',x_axis_label='time(sec)',y_axis_label='strain',", "model_select = Select(title=\"TD Models\", options=td_approximants()) def update_slider2(attrname, old, new): # Get the current", "norm_time=times-times[np.argmax(h22)] return AhA, AhB, norm_time, h22,phase22,freq22, hor_times, sep, mov_avg_sep_t,mov_avg_sep,dx,dy # ============================================================================= # Second", "open_sxs(sxs_data): #https://data.black-holes.org/waveforms/documentation.html #AhA=apparent horizon A #AhB=apparent horizon B #AhC=common apparent horizon with h5py.File(", "sourcep42, color='green',line_width=3) pn43.toolbar.logo = None pn44=figure(title='Phase',x_axis_label='time(sec)',y_axis_label='rad', plot_width=500, plot_height=400) pn44.line(x='timeTD', y='phaseTD',source = sourcep42, color='red',line_width=3)", "pn23.line(x='freq', y='amp',source = sourcep2, color='green',line_width=3) pn23.toolbar.logo = None pn24=figure(title='Phase',x_axis_label='freq(Hz)',y_axis_label='rad', plot_width=500, plot_height=400) pn24.line(x='freq', y='phase',source", "s1z=0 s2z=0 mass1,mass2=q_to_masses(mass_rat,total_m) hp,hc=hp,hc=get_td_waveform(mass1=mass1,mass2=mass2,spin1z=s1z,spin2z=s2z,delta_t=delta_t,f_lower=f_lower, approximant=approximant,eccentricity=eccentricity) hs=hp+hc*1j amp=abs(hs) phase=np.unwrap(np.angle(hs)) return hp.sample_times.data,np.real(hp.data),np.real(hc.data),np.imag(hp.data),np.imag(hc.data),amp,phase timeTD,hp_realTD,hc_realTD,hp_imagTD,hc_imagTD,ampTD,phaseTD=generate_TD_waveform(mass_rat=1.,eccentricity=0,s1z=0.,s2z=0.) dic_p42 =", "= None q_slider = Slider(start=1, end=10, value=1, step=.5, title=\"Mass ratio (q)\") e_slider =", "h2m2 def get_hlm(sxs_data): with h5py.File( os.path.join(sxs_data, \"rhOverM_Asymptotic_GeometricUnits_CoM.h5\" ), 'r') as f: h21 =", "# Second panel # ============================================================================= def q_to_masses(mass_rat,total_m): mass1=mass_rat/(mass_rat+1)*total_m mass2=total_m-mass1 return mass1,mass2 def generate_analytic_waveform(mass_rat,", "slider values q = q_slider.value e = e_slider.value s1z = s1z_slider.value s2z =", "model_selectFD.value freq,hp_real,hc_real,hp_imag,hc_imag,amp,phase=generate_analytic_waveform(mass_rat=q,eccentricity=e,approximant=approximant) sourcep2.data = {'hp_real':hp_real,'hc_real':hc_real,'hp_imag':hp_imag,'hc_imag':hc_imag,'amp':amp,'phase':phase,'freq':freq} for w in [q_sliderFD,e_sliderFD,model_selectFD]: w.on_change('value', update_slider) layoutan=row(column(pn21,pn22),column(pn23,pn24),column(q_sliderFD,e_sliderFD,model_selectFD)) #", "= None pn44=figure(title='Phase',x_axis_label='time(sec)',y_axis_label='rad', plot_width=500, plot_height=400) pn44.line(x='timeTD', y='phaseTD',source = sourcep42, color='red',line_width=3) pn44.toolbar.logo = None", "f: h22 = f[\"OutermostExtraction.dir/Y_l2_m2.dat\"][:] h2m2 = f[\"OutermostExtraction.dir/Y_l2_m-2.dat\"][:] times = h22[:,0] for t1, t2", "glob from pycbc.waveform import get_fd_waveform,amplitude_from_frequencyseries,phase_from_frequencyseries,fd_approximants, get_td_waveform, td_approximants from pycbc import types from bokeh.models", "#AhB=apparent horizon B #AhC=common apparent horizon with h5py.File( os.path.join(sxs_data, \"Horizons.h5\" ), 'r') as", "h21[:,1] + 1.j * h21[:,2] h2m1 = h2m1[:,1] + 1.j * h2m1[:,2] h20", "- horizon2[:,2]]) sep = np.sqrt( sep_xy[0,:]**2. + sep_xy[1,:]**2. ) return hor_times,sep,dx[:2000],dy[:2000] def moving_average(t,", "h2m2 = f[\"OutermostExtraction.dir/Y_l2_m-2.dat\"][:] times = h22[:,0] for t1, t2 in zip(times, h2m2[:,0]): assert", "coding: utf-8 # Copyright (C) 2021 <NAME> <<EMAIL>> # Visualization of SXS and", "current slider values q = q_slider.value e = e_slider.value s1z = s1z_slider.value s2z", "import column, row, grid, layout from scipy.interpolate import InterpolatedUnivariateSpline as unispline # =============================================================================", "Copyright (C) 2021 <NAME> <<EMAIL>> # Visualization of SXS and analytic waveform model", "the URL http://localhost:5006/main in your browser. import numpy as np import os import", "1.j * h22[:,2] h2m2 = h2m2[:,1] + 1.j * h2m2[:,2] return times,h22, h2m2", "n11=pn21.line(x='freq', y='hp_real',source = sourcep2, color='blue',legend='Re{h+}') n12=pn21.line(x='freq', y='hp_imag',source = sourcep2, color='orange',legend='Im{h+}') pn21.toolbar.logo = None", "dic_p42 = {'hp_realTD':hp_realTD,'hc_realTD':hc_realTD,'hp_imagTD':hp_imagTD,'hc_imagTD':hc_imagTD,'ampTD':ampTD,'phaseTD':phaseTD,'timeTD':timeTD} sourcep42=ColumnDataSource(data=dic_p42) pn41=figure(title='h+',x_axis_label='time(sec)',y_axis_label='strain', plot_width=500, plot_height=400) n41=pn41.line(x='timeTD', y='hp_realTD',source = sourcep42, color='blue',legend='Re{h+}') n42=pn41.line(x='timeTD',", "the current slider values q = q_sliderFD.value e = e_sliderFD.value approximant = model_selectFD.value", "title=\"Spin2z\") model_select = Select(title=\"TD Models\", options=td_approximants()) def update_slider2(attrname, old, new): # Get the", "None pn22.legend.click_policy=\"hide\" lines=[n21,n22] pn23=figure(title='Amplitude',x_axis_label='freq(Hz)',y_axis_label='strain', plot_width=500, plot_height=400) pn23.line(x='freq', y='amp',source = sourcep2, color='green',line_width=3) pn23.toolbar.logo =", "hs=hp+hc*1j amp=abs(hs) phase=np.unwrap(np.angle(hs)) return hp.sample_times.data,np.real(hp.data),np.real(hc.data),np.imag(hp.data),np.imag(hc.data),amp,phase timeTD,hp_realTD,hc_realTD,hp_imagTD,hc_imagTD,ampTD,phaseTD=generate_TD_waveform(mass_rat=1.,eccentricity=0,s1z=0.,s2z=0.) dic_p42 = {'hp_realTD':hp_realTD,'hc_realTD':hc_realTD,'hp_imagTD':hp_imagTD,'hc_imagTD':hc_imagTD,'ampTD':ampTD,'phaseTD':phaseTD,'timeTD':timeTD} sourcep42=ColumnDataSource(data=dic_p42) pn41=figure(title='h+',x_axis_label='time(sec)',y_axis_label='strain', plot_width=500, plot_height=400)", "value=0, step=.05, title=\"Spin2z\") model_select = Select(title=\"TD Models\", options=td_approximants()) def update_slider2(attrname, old, new): #", "= horizon1[:,0] dx=horizon1[:,1]-horizon2[:,1] dy=horizon1[:,2] - horizon2[:,2] sep_xy = np.array([horizon1[:,1]-horizon2[:,1], horizon1[:,2] - horizon2[:,2]]) sep", "Select(title=\"FD Models\", options=fd_approximants()) def update_slider(attrname, old, new): # Get the current slider values", "hor_times,sep,dx[:2000],dy[:2000] def moving_average(t, x, seglen, nseg): dt = t[1] - t[0] means =", "try: times.append(t[st]) means.append(np.mean(x[st:en])) except: break st = st + seglen return times, means", "h2m2[:,2] h22 = h22[:,1] + 1.j * h22[:,2] norm_time=times-times[np.argmax(h22)] return norm_time,h21, h2m1,h2m2, h22,h20", "= sourcep42, color='blue',legend='Re{h+}') n42=pn41.line(x='timeTD', y='hp_imagTD',source = sourcep42, color='orange',legend='Im{h+}') pn41.toolbar.logo = None pn41.legend.click_policy=\"hide\" lines=[n41,n42]", "seglen, nseg): dt = t[1] - t[0] means = [] times = []", "+ seglen return times, means def get_h22(sxs_data): with h5py.File( os.path.join(sxs_data, \"rhOverM_Asymptotic_GeometricUnits_CoM.h5\" ), 'r')", "layout from scipy.interpolate import InterpolatedUnivariateSpline as unispline # ============================================================================= # First panel #", "h2m1,h2m2, h22,h20 def get_data(which_data): AhA,AhB,AhC=open_sxs(which_data) hor_times,sep,dx,dy=sep_time(AhA,AhB) mov_avg_sep_t, mov_avg_sep = moving_average(hor_times, sep, seglen=1000, nseg=10)", "Tabs(tabs=[tab1,tab3,tab2,tab4],sizing_mode='scale_width') tabs = Tabs(tabs=[tab2,tab4],sizing_mode='scale_width') #layout = row(column(p,data_table),column(k,s),r) curdoc().add_root(tabs) curdoc().title = \"Eccentric Waveforms Visualization\"", "end=1, value=0, step=.05, title=\"Spin1z\") s2z_slider = Slider(start=-1, end=1, value=0, step=.05, title=\"Spin2z\") model_select =", "= int(st + seglen/dt) try: times.append(t[st]) means.append(np.mean(x[st:en])) except: break st = st +", "h21 = h21[:,1] + 1.j * h21[:,2] h2m1 = h2m1[:,1] + 1.j *", "= Panel(child=layoutTD,title=\"Analytic TD\") #tabs = Tabs(tabs=[tab1,tab3,tab2,tab4],sizing_mode='scale_width') tabs = Tabs(tabs=[tab2,tab4],sizing_mode='scale_width') #layout = row(column(p,data_table),column(k,s),r) curdoc().add_root(tabs)", "h20 = h20[:,1] + 1.j * h20[:,2] h2m2 = h2m2[:,1] + 1.j *", "approximant in nonspinning_models: s1z=0 s2z=0 mass1,mass2=q_to_masses(mass_rat,total_m) hp,hc=hp,hc=get_td_waveform(mass1=mass1,mass2=mass2,spin1z=s1z,spin2z=s2z,delta_t=delta_t,f_lower=f_lower, approximant=approximant,eccentricity=eccentricity) hs=hp+hc*1j amp=abs(hs) phase=np.unwrap(np.angle(hs)) return hp.sample_times.data,np.real(hp.data),np.real(hc.data),np.imag(hp.data),np.imag(hc.data),amp,phase", "h21[:,2] h2m1 = h2m1[:,1] + 1.j * h2m1[:,2] h20 = h20[:,1] + 1.j", "hp.sample_frequencies.data,np.real(hp.data),np.real(hc.data),np.imag(hp.data),np.imag(hc.data),amp.data,phase.data freq,hp_real,hc_real,hp_imag,hc_imag,amp,phase=generate_analytic_waveform(mass_rat=1.,eccentricity=0) dic_p2 = {'hp_real':hp_real,'hc_real':hc_real,'hp_imag':hp_imag,'hc_imag':hc_imag,'amp':amp,'phase':phase,'freq':freq} sourcep2=ColumnDataSource(data=dic_p2) pn21=figure(title='h+',x_axis_label='freq(Hz)',y_axis_label='strain', plot_width=500, plot_height=400) n11=pn21.line(x='freq', y='hp_real',source = sourcep2,", "title=\"Spin1z\") s2z_slider = Slider(start=-1, end=1, value=0, step=.05, title=\"Spin2z\") model_select = Select(title=\"TD Models\", options=td_approximants())", "= sourcep2, color='red',line_width=3) pn24.toolbar.logo = None q_sliderFD = Slider(start=1, end=10, value=1, step=.5, title=\"Mass", "= h22[:,1] + 1.j * h22[:,2] h2m2 = h2m2[:,1] + 1.j * h2m2[:,2]", "as unispline # ============================================================================= # First panel # ============================================================================= def open_sxs(sxs_data): #https://data.black-holes.org/waveforms/documentation.html #AhA=apparent", "), 'r') as f: h21 = f[\"OutermostExtraction.dir/Y_l2_m1.dat\"][:] h2m1 = f[\"OutermostExtraction.dir/Y_l2_m-1.dat\"][:] h20 = f[\"OutermostExtraction.dir/Y_l2_m0.dat\"][:]", "sourcep2, color='red',line_width=3) pn24.toolbar.logo = None q_sliderFD = Slider(start=1, end=10, value=1, step=.5, title=\"Mass ratio", "nseg=10) times,h22,h2m2=get_h22(which_data) phase22 = np.unwrap(np.angle(h22)) freq22 = unispline(times, phase22).derivative()(times) norm_time=times-times[np.argmax(h22)] return AhA, AhB,", "AhA, AhB, norm_time, h22,phase22,freq22, hor_times, sep, mov_avg_sep_t,mov_avg_sep,dx,dy # ============================================================================= # Second panel #", "figure, output_file, show, output_notebook from bokeh.models.widgets import Select, PreText,Panel, Tabs, Slider from bokeh.io", "'r') as f: h21 = f[\"OutermostExtraction.dir/Y_l2_m1.dat\"][:] h2m1 = f[\"OutermostExtraction.dir/Y_l2_m-1.dat\"][:] h20 = f[\"OutermostExtraction.dir/Y_l2_m0.dat\"][:] h22", "= sourcep2, color='blue',legend='Re{hx}') n22=pn22.line(x='freq', y='hc_imag', source = sourcep2, color='orange',legend='Im{hx}') pn22.toolbar.logo = None pn22.legend.click_policy=\"hide\"", "h2m2lm.real,'h2m2lmimag':h2m2lm.imag, 'h22lmreal': h22lm.real,'h22lmimag':h22lm.imag,'h20real':h20.real,'h20imag':h20.imag} except IndexError: pass # ============================================================================= # Fourth panel # =============================================================================", "PreText,Panel, Tabs, Slider from bokeh.io import curdoc from bokeh.layouts import column, row, grid,", "= h2m2[:,1] + 1.j * h2m2[:,2] h22 = h22[:,1] + 1.j * h22[:,2]", "panel # ============================================================================= def update_table2(attrname, old, new): try: selected_index = source31.selected.indices[0] sval=source31.data[\"Simulation\"][selected_index] which_data", "title=\"NR data\") tab2 = Panel(child=layoutan,title=\"Analytic FD\") #tab3 = Panel(child=layout3,title=\"NR l=2\") tab4 = Panel(child=layoutTD,title=\"Analytic", "panel # ============================================================================= def generate_TD_waveform(mass_rat, eccentricity,s1z,s2z,approximant='TaylorT1',total_m=50,f_lower=20.,delta_t=1./1024): nonspinning_models=['TaylorT1','TaylorT2','TaylorEt','TaylorT3','TaylorT4','EOBNRv2','EOBNRv2HM','EOBNRv2_ROM','EOBNRv2HM_ROM','SEOBNRv1','SEOBNRv1_ROM_DoubleSpin','SEOBNRv1_ROM_EffectiveSpin','TEOBResum_ROM','PhenSpinTaylor','PhenSpinTaylorRD','IMRPhenomA','EccentricTD','NRSur7dq2'] if approximant in nonspinning_models: s1z=0 s2z=0", "= model_select.value timeTD,hp_realTD,hc_realTD,hp_imagTD,hc_imagTD,ampTD,phaseTD=generate_TD_waveform(mass_rat=q,eccentricity=e,s1z=s1z,s2z=s2z,approximant=approximant) sourcep42.data = {'hp_realTD':hp_realTD,'hc_realTD':hc_realTD,'hp_imagTD':hp_imagTD,'hc_imagTD':hc_imagTD,'ampTD':ampTD,'phaseTD':phaseTD,'timeTD':timeTD} for w in [q_slider,e_slider,s1z_slider,s2z_slider,model_select]: w.on_change('value', update_slider2) layoutTD=row(column(pn41,pn42),column(pn43,pn44),column(q_slider,e_slider,s1z_slider,s2z_slider,model_select))", "AhC = f['AhC.dir/CoordCenterInertial.dat'][:] return AhA,AhB,AhC def sep_time(horizon1, horizon2): hor_times = horizon1[:,0] dx=horizon1[:,1]-horizon2[:,1] dy=horizon1[:,2]", "sourcep2, color='blue',legend='Re{hx}') n22=pn22.line(x='freq', y='hc_imag', source = sourcep2, color='orange',legend='Im{hx}') pn22.toolbar.logo = None pn22.legend.click_policy=\"hide\" lines=[n21,n22]", "browser. import numpy as np import os import h5py import json import glob", "q_sliderFD.value e = e_sliderFD.value approximant = model_selectFD.value freq,hp_real,hc_real,hp_imag,hc_imag,amp,phase=generate_analytic_waveform(mass_rat=q,eccentricity=e,approximant=approximant) sourcep2.data = {'hp_real':hp_real,'hc_real':hc_real,'hp_imag':hp_imag,'hc_imag':hc_imag,'amp':amp,'phase':phase,'freq':freq} for w", "= Select(title=\"TD Models\", options=td_approximants()) def update_slider2(attrname, old, new): # Get the current slider", "import os import h5py import json import glob from pycbc.waveform import get_fd_waveform,amplitude_from_frequencyseries,phase_from_frequencyseries,fd_approximants, get_td_waveform,", "waveform model # Use by executing: bokeh serve main.py # command to run", "import curdoc from bokeh.layouts import column, row, grid, layout from scipy.interpolate import InterpolatedUnivariateSpline", "y='hp_imagTD',source = sourcep42, color='orange',legend='Im{h+}') pn41.toolbar.logo = None pn41.legend.click_policy=\"hide\" lines=[n41,n42] pn42=figure(title='hx',x_axis_label='time(sec)',y_axis_label='strain', plot_width=500, plot_height=400) n41=pn42.line(x='timeTD',", "main.py # command to run at your command prompt. # Then navigate to", "t1 == t2 h22 = h22[:,1] + 1.j * h22[:,2] h2m2 = h2m2[:,1]", "return AhA,AhB,AhC def sep_time(horizon1, horizon2): hor_times = horizon1[:,0] dx=horizon1[:,1]-horizon2[:,1] dy=horizon1[:,2] - horizon2[:,2] sep_xy", "phase22 = np.unwrap(np.angle(h22)) freq22 = unispline(times, phase22).derivative()(times) norm_time=times-times[np.argmax(h22)] return AhA, AhB, norm_time, h22,phase22,freq22,", "horizon A #AhB=apparent horizon B #AhC=common apparent horizon with h5py.File( os.path.join(sxs_data, \"Horizons.h5\" ),", "hor_times = horizon1[:,0] dx=horizon1[:,1]-horizon2[:,1] dy=horizon1[:,2] - horizon2[:,2] sep_xy = np.array([horizon1[:,1]-horizon2[:,1], horizon1[:,2] - horizon2[:,2]])", "approximant=approximant,eccentricity=eccentricity) hs=hp+hc*1j amp=amplitude_from_frequencyseries(types.FrequencySeries(hs,delta_f=delta_f)) phase=phase_from_frequencyseries(types.FrequencySeries(hs,delta_f=delta_f)) return hp.sample_frequencies.data,np.real(hp.data),np.real(hc.data),np.imag(hp.data),np.imag(hc.data),amp.data,phase.data freq,hp_real,hc_real,hp_imag,hc_imag,amp,phase=generate_analytic_waveform(mass_rat=1.,eccentricity=0) dic_p2 = {'hp_real':hp_real,'hc_real':hc_real,'hp_imag':hp_imag,'hc_imag':hc_imag,'amp':amp,'phase':phase,'freq':freq} sourcep2=ColumnDataSource(data=dic_p2) pn21=figure(title='h+',x_axis_label='freq(Hz)',y_axis_label='strain', plot_width=500,", "[] st = 0 for i in range(int(len(x)/nseg)): en = int(st + seglen/dt)", "x, seglen, nseg): dt = t[1] - t[0] means = [] times =", "os import h5py import json import glob from pycbc.waveform import get_fd_waveform,amplitude_from_frequencyseries,phase_from_frequencyseries,fd_approximants, get_td_waveform, td_approximants", "python # coding: utf-8 # Copyright (C) 2021 <NAME> <<EMAIL>> # Visualization of", "= None pn22.legend.click_policy=\"hide\" lines=[n21,n22] pn23=figure(title='Amplitude',x_axis_label='freq(Hz)',y_axis_label='strain', plot_width=500, plot_height=400) pn23.line(x='freq', y='amp',source = sourcep2, color='green',line_width=3) pn23.toolbar.logo", "import numpy as np import os import h5py import json import glob from", "* h22[:,2] norm_time=times-times[np.argmax(h22)] return norm_time,h21, h2m1,h2m2, h22,h20 def get_data(which_data): AhA,AhB,AhC=open_sxs(which_data) hor_times,sep,dx,dy=sep_time(AhA,AhB) mov_avg_sep_t, mov_avg_sep", "value=1, step=.5, title=\"Mass ratio (q)\") e_slider = Slider(start=0., end=0.9, value=0, step=.05, title=\"Eccentricity (e)\")", "color='blue',legend='Re{h+}') n12=pn21.line(x='freq', y='hp_imag',source = sourcep2, color='orange',legend='Im{h+}') pn21.toolbar.logo = None pn21.legend.click_policy=\"hide\" lines=[n11,n12] pn22=figure(title='hx',x_axis_label='freq(Hz)',y_axis_label='strain', plot_width=500,", "None pn44=figure(title='Phase',x_axis_label='time(sec)',y_axis_label='rad', plot_width=500, plot_height=400) pn44.line(x='timeTD', y='phaseTD',source = sourcep42, color='red',line_width=3) pn44.toolbar.logo = None q_slider", "output_notebook from bokeh.models.widgets import Select, PreText,Panel, Tabs, Slider from bokeh.io import curdoc from", "hp,hc=hp,hc=get_td_waveform(mass1=mass1,mass2=mass2,spin1z=s1z,spin2z=s2z,delta_t=delta_t,f_lower=f_lower, approximant=approximant,eccentricity=eccentricity) hs=hp+hc*1j amp=abs(hs) phase=np.unwrap(np.angle(hs)) return hp.sample_times.data,np.real(hp.data),np.real(hc.data),np.imag(hp.data),np.imag(hc.data),amp,phase timeTD,hp_realTD,hc_realTD,hp_imagTD,hc_imagTD,ampTD,phaseTD=generate_TD_waveform(mass_rat=1.,eccentricity=0,s1z=0.,s2z=0.) dic_p42 = {'hp_realTD':hp_realTD,'hc_realTD':hc_realTD,'hp_imagTD':hp_imagTD,'hc_imagTD':hc_imagTD,'ampTD':ampTD,'phaseTD':phaseTD,'timeTD':timeTD} sourcep42=ColumnDataSource(data=dic_p42) pn41=figure(title='h+',x_axis_label='time(sec)',y_axis_label='strain',", "h22[:,2] h2m2 = h2m2[:,1] + 1.j * h2m2[:,2] return times,h22, h2m2 def get_hlm(sxs_data):", "new): try: selected_index = source31.selected.indices[0] sval=source31.data[\"Simulation\"][selected_index] which_data = data_path+sval # changed this to", "A #AhB=apparent horizon B #AhC=common apparent horizon with h5py.File( os.path.join(sxs_data, \"Horizons.h5\" ), 'r')", "+ 1.j * h21[:,2] h2m1 = h2m1[:,1] + 1.j * h2m1[:,2] h20 =", "h2m2[:,2] return times,h22, h2m2 def get_hlm(sxs_data): with h5py.File( os.path.join(sxs_data, \"rhOverM_Asymptotic_GeometricUnits_CoM.h5\" ), 'r') as", "to run at your command prompt. # Then navigate to the URL http://localhost:5006/main", "= h2m1[:,1] + 1.j * h2m1[:,2] h20 = h20[:,1] + 1.j * h20[:,2]", "new): # Get the current slider values q = q_sliderFD.value e = e_sliderFD.value", "None pn41.legend.click_policy=\"hide\" lines=[n41,n42] pn42=figure(title='hx',x_axis_label='time(sec)',y_axis_label='strain', plot_width=500, plot_height=400) n41=pn42.line(x='timeTD', y='hc_realTD',source = sourcep42, color='blue',legend='Re{hx}') n42=pn42.line(x='timeTD', y='hc_imagTD',", "utf-8 # Copyright (C) 2021 <NAME> <<EMAIL>> # Visualization of SXS and analytic", "old, new): # Get the current slider values q = q_slider.value e =", "AhA,AhB,AhC=open_sxs(which_data) hor_times,sep,dx,dy=sep_time(AhA,AhB) mov_avg_sep_t, mov_avg_sep = moving_average(hor_times, sep, seglen=1000, nseg=10) times,h22,h2m2=get_h22(which_data) phase22 = np.unwrap(np.angle(h22))", "AhA,AhB,AhC def sep_time(horizon1, horizon2): hor_times = horizon1[:,0] dx=horizon1[:,1]-horizon2[:,1] dy=horizon1[:,2] - horizon2[:,2] sep_xy =", "+ 1.j * h2m2[:,2] return times,h22, h2m2 def get_hlm(sxs_data): with h5py.File( os.path.join(sxs_data, \"rhOverM_Asymptotic_GeometricUnits_CoM.h5\"", "{'hp_realTD':hp_realTD,'hc_realTD':hc_realTD,'hp_imagTD':hp_imagTD,'hc_imagTD':hc_imagTD,'ampTD':ampTD,'phaseTD':phaseTD,'timeTD':timeTD} sourcep42=ColumnDataSource(data=dic_p42) pn41=figure(title='h+',x_axis_label='time(sec)',y_axis_label='strain', plot_width=500, plot_height=400) n41=pn41.line(x='timeTD', y='hp_realTD',source = sourcep42, color='blue',legend='Re{h+}') n42=pn41.line(x='timeTD', y='hp_imagTD',source =", "+ seglen/dt) try: times.append(t[st]) means.append(np.mean(x[st:en])) except: break st = st + seglen return", "None pn24=figure(title='Phase',x_axis_label='freq(Hz)',y_axis_label='rad', plot_width=500, plot_height=400) pn24.line(x='freq', y='phase',source = sourcep2, color='red',line_width=3) pn24.toolbar.logo = None q_sliderFD", "= s1z_slider.value s2z = s2z_slider.value approximant = model_select.value timeTD,hp_realTD,hc_realTD,hp_imagTD,hc_imagTD,ampTD,phaseTD=generate_TD_waveform(mass_rat=q,eccentricity=e,s1z=s1z,s2z=s2z,approximant=approximant) sourcep42.data = {'hp_realTD':hp_realTD,'hc_realTD':hc_realTD,'hp_imagTD':hp_imagTD,'hc_imagTD':hc_imagTD,'ampTD':ampTD,'phaseTD':phaseTD,'timeTD':timeTD} for", "step=.05, title=\"Spin2z\") model_select = Select(title=\"TD Models\", options=td_approximants()) def update_slider2(attrname, old, new): # Get", "panel # ============================================================================= def q_to_masses(mass_rat,total_m): mass1=mass_rat/(mass_rat+1)*total_m mass2=total_m-mass1 return mass1,mass2 def generate_analytic_waveform(mass_rat, eccentricity,approximant='TaylorF2Ecc',total_m=50,f_lower=20.,delta_f=0.1): mass1,mass2=q_to_masses(mass_rat,total_m)", "s1z_slider = Slider(start=-1, end=1, value=0, step=.05, title=\"Spin1z\") s2z_slider = Slider(start=-1, end=1, value=0, step=.05,", "value=1, step=.5, title=\"Mass ratio (q)\") e_sliderFD = Slider(start=0., end=0.9, value=0, step=.05, title=\"Eccentricity (e)\")", "[] times = [] st = 0 for i in range(int(len(x)/nseg)): en =", "h2m2[:,1] + 1.j * h2m2[:,2] h22 = h22[:,1] + 1.j * h22[:,2] norm_time=times-times[np.argmax(h22)]", "s2z_slider = Slider(start=-1, end=1, value=0, step=.05, title=\"Spin2z\") model_select = Select(title=\"TD Models\", options=td_approximants()) def", "None q_slider = Slider(start=1, end=10, value=1, step=.5, title=\"Mass ratio (q)\") e_slider = Slider(start=0.,", "= model_selectFD.value freq,hp_real,hc_real,hp_imag,hc_imag,amp,phase=generate_analytic_waveform(mass_rat=q,eccentricity=e,approximant=approximant) sourcep2.data = {'hp_real':hp_real,'hc_real':hc_real,'hp_imag':hp_imag,'hc_imag':hc_imag,'amp':amp,'phase':phase,'freq':freq} for w in [q_sliderFD,e_sliderFD,model_selectFD]: w.on_change('value', update_slider) layoutan=row(column(pn21,pn22),column(pn23,pn24),column(q_sliderFD,e_sliderFD,model_selectFD))", "= [] times = [] st = 0 for i in range(int(len(x)/nseg)): en", "source = sourcep42, color='orange',legend='Im{hx}') pn42.toolbar.logo = None pn42.legend.click_policy=\"hide\" lines=[n41,n42] pn43=figure(title='Amplitude',x_axis_label='time(sec)',y_axis_label='strain', plot_width=500, plot_height=400) pn43.line(x='timeTD',", "(e)\") model_selectFD = Select(title=\"FD Models\", options=fd_approximants()) def update_slider(attrname, old, new): # Get the", "which_data = data_path+sval # changed this to the dict time_hlm,h21, h2m1,h2m2lm, h22lm,h20=get_hlm(which_data) source32.data={'time_hlm':time_hlm,'h21real':h21.real,'h21imag':h21.imag,", "for w in [q_slider,e_slider,s1z_slider,s2z_slider,model_select]: w.on_change('value', update_slider2) layoutTD=row(column(pn41,pn42),column(pn43,pn44),column(q_slider,e_slider,s1z_slider,s2z_slider,model_select)) #tab1 = Panel(child=layoutNR, title=\"NR data\") tab2", "nonspinning_models=['TaylorT1','TaylorT2','TaylorEt','TaylorT3','TaylorT4','EOBNRv2','EOBNRv2HM','EOBNRv2_ROM','EOBNRv2HM_ROM','SEOBNRv1','SEOBNRv1_ROM_DoubleSpin','SEOBNRv1_ROM_EffectiveSpin','TEOBResum_ROM','PhenSpinTaylor','PhenSpinTaylorRD','IMRPhenomA','EccentricTD','NRSur7dq2'] if approximant in nonspinning_models: s1z=0 s2z=0 mass1,mass2=q_to_masses(mass_rat,total_m) hp,hc=hp,hc=get_td_waveform(mass1=mass1,mass2=mass2,spin1z=s1z,spin2z=s2z,delta_t=delta_t,f_lower=f_lower, approximant=approximant,eccentricity=eccentricity) hs=hp+hc*1j amp=abs(hs) phase=np.unwrap(np.angle(hs))", "in [q_sliderFD,e_sliderFD,model_selectFD]: w.on_change('value', update_slider) layoutan=row(column(pn21,pn22),column(pn23,pn24),column(q_sliderFD,e_sliderFD,model_selectFD)) # ============================================================================= # Third panel # ============================================================================= def", "h22[:,1] + 1.j * h22[:,2] norm_time=times-times[np.argmax(h22)] return norm_time,h21, h2m1,h2m2, h22,h20 def get_data(which_data): AhA,AhB,AhC=open_sxs(which_data)", "y='hc_imag', source = sourcep2, color='orange',legend='Im{hx}') pn22.toolbar.logo = None pn22.legend.click_policy=\"hide\" lines=[n21,n22] pn23=figure(title='Amplitude',x_axis_label='freq(Hz)',y_axis_label='strain', plot_width=500, plot_height=400)", "= np.unwrap(np.angle(h22)) freq22 = unispline(times, phase22).derivative()(times) norm_time=times-times[np.argmax(h22)] return AhA, AhB, norm_time, h22,phase22,freq22, hor_times,", "bokeh.io import curdoc from bokeh.layouts import column, row, grid, layout from scipy.interpolate import", "t2 h21 = h21[:,1] + 1.j * h21[:,2] h2m1 = h2m1[:,1] + 1.j", "bokeh.models import TableColumn,ColumnDataSource,DataTable from bokeh.plotting import figure, output_file, show, output_notebook from bokeh.models.widgets import", "n41=pn41.line(x='timeTD', y='hp_realTD',source = sourcep42, color='blue',legend='Re{h+}') n42=pn41.line(x='timeTD', y='hp_imagTD',source = sourcep42, color='orange',legend='Im{h+}') pn41.toolbar.logo = None", "# coding: utf-8 # Copyright (C) 2021 <NAME> <<EMAIL>> # Visualization of SXS", "#AhA=apparent horizon A #AhB=apparent horizon B #AhC=common apparent horizon with h5py.File( os.path.join(sxs_data, \"Horizons.h5\"", "color='orange',legend='Im{hx}') pn22.toolbar.logo = None pn22.legend.click_policy=\"hide\" lines=[n21,n22] pn23=figure(title='Amplitude',x_axis_label='freq(Hz)',y_axis_label='strain', plot_width=500, plot_height=400) pn23.line(x='freq', y='amp',source = sourcep2,", "h22 = h22[:,1] + 1.j * h22[:,2] h2m2 = h2m2[:,1] + 1.j *", "with h5py.File( os.path.join(sxs_data, \"rhOverM_Asymptotic_GeometricUnits_CoM.h5\" ), 'r') as f: h22 = f[\"OutermostExtraction.dir/Y_l2_m2.dat\"][:] h2m2 =", "os.path.join(sxs_data, \"Horizons.h5\" ), 'r') as f: AhA = f['AhA.dir/CoordCenterInertial.dat'][:] AhB = f['AhB.dir/CoordCenterInertial.dat'][:] AhC", "= sourcep2, color='orange',legend='Im{hx}') pn22.toolbar.logo = None pn22.legend.click_policy=\"hide\" lines=[n21,n22] pn23=figure(title='Amplitude',x_axis_label='freq(Hz)',y_axis_label='strain', plot_width=500, plot_height=400) pn23.line(x='freq', y='amp',source", "t2 h22 = h22[:,1] + 1.j * h22[:,2] h2m2 = h2m2[:,1] + 1.j", "y='hp_realTD',source = sourcep42, color='blue',legend='Re{h+}') n42=pn41.line(x='timeTD', y='hp_imagTD',source = sourcep42, color='orange',legend='Im{h+}') pn41.toolbar.logo = None pn41.legend.click_policy=\"hide\"", "ratio (q)\") e_sliderFD = Slider(start=0., end=0.9, value=0, step=.05, title=\"Eccentricity (e)\") model_selectFD = Select(title=\"FD", "s2z = s2z_slider.value approximant = model_select.value timeTD,hp_realTD,hc_realTD,hp_imagTD,hc_imagTD,ampTD,phaseTD=generate_TD_waveform(mass_rat=q,eccentricity=e,s1z=s1z,s2z=s2z,approximant=approximant) sourcep42.data = {'hp_realTD':hp_realTD,'hc_realTD':hc_realTD,'hp_imagTD':hp_imagTD,'hc_imagTD':hc_imagTD,'ampTD':ampTD,'phaseTD':phaseTD,'timeTD':timeTD} for w in", "plot_width=500, plot_height=400) n11=pn21.line(x='freq', y='hp_real',source = sourcep2, color='blue',legend='Re{h+}') n12=pn21.line(x='freq', y='hp_imag',source = sourcep2, color='orange',legend='Im{h+}') pn21.toolbar.logo", "mov_avg_sep = moving_average(hor_times, sep, seglen=1000, nseg=10) times,h22,h2m2=get_h22(which_data) phase22 = np.unwrap(np.angle(h22)) freq22 = unispline(times,", "(q)\") e_sliderFD = Slider(start=0., end=0.9, value=0, step=.05, title=\"Eccentricity (e)\") model_selectFD = Select(title=\"FD Models\",", ") return hor_times,sep,dx[:2000],dy[:2000] def moving_average(t, x, seglen, nseg): dt = t[1] - t[0]", "= f['AhB.dir/CoordCenterInertial.dat'][:] AhC = f['AhC.dir/CoordCenterInertial.dat'][:] return AhA,AhB,AhC def sep_time(horizon1, horizon2): hor_times = horizon1[:,0]", "'h22lmreal': h22lm.real,'h22lmimag':h22lm.imag,'h20real':h20.real,'h20imag':h20.imag} except IndexError: pass # ============================================================================= # Fourth panel # ============================================================================= def", "============================================================================= def update_table2(attrname, old, new): try: selected_index = source31.selected.indices[0] sval=source31.data[\"Simulation\"][selected_index] which_data = data_path+sval", "norm_time,h21, h2m1,h2m2, h22,h20 def get_data(which_data): AhA,AhB,AhC=open_sxs(which_data) hor_times,sep,dx,dy=sep_time(AhA,AhB) mov_avg_sep_t, mov_avg_sep = moving_average(hor_times, sep, seglen=1000,", "Tabs, Slider from bokeh.io import curdoc from bokeh.layouts import column, row, grid, layout", "end=1, value=0, step=.05, title=\"Spin2z\") model_select = Select(title=\"TD Models\", options=td_approximants()) def update_slider2(attrname, old, new):", "update_slider(attrname, old, new): # Get the current slider values q = q_sliderFD.value e", "= Slider(start=-1, end=1, value=0, step=.05, title=\"Spin1z\") s2z_slider = Slider(start=-1, end=1, value=0, step=.05, title=\"Spin2z\")", "AhB = f['AhB.dir/CoordCenterInertial.dat'][:] AhC = f['AhC.dir/CoordCenterInertial.dat'][:] return AhA,AhB,AhC def sep_time(horizon1, horizon2): hor_times =", "return AhA, AhB, norm_time, h22,phase22,freq22, hor_times, sep, mov_avg_sep_t,mov_avg_sep,dx,dy # ============================================================================= # Second panel", "============================================================================= # Second panel # ============================================================================= def q_to_masses(mass_rat,total_m): mass1=mass_rat/(mass_rat+1)*total_m mass2=total_m-mass1 return mass1,mass2 def", "layoutTD=row(column(pn41,pn42),column(pn43,pn44),column(q_slider,e_slider,s1z_slider,s2z_slider,model_select)) #tab1 = Panel(child=layoutNR, title=\"NR data\") tab2 = Panel(child=layoutan,title=\"Analytic FD\") #tab3 = Panel(child=layout3,title=\"NR", "sourcep42, color='blue',legend='Re{h+}') n42=pn41.line(x='timeTD', y='hp_imagTD',source = sourcep42, color='orange',legend='Im{h+}') pn41.toolbar.logo = None pn41.legend.click_policy=\"hide\" lines=[n41,n42] pn42=figure(title='hx',x_axis_label='time(sec)',y_axis_label='strain',", "h22[:,2] norm_time=times-times[np.argmax(h22)] return norm_time,h21, h2m1,h2m2, h22,h20 def get_data(which_data): AhA,AhB,AhC=open_sxs(which_data) hor_times,sep,dx,dy=sep_time(AhA,AhB) mov_avg_sep_t, mov_avg_sep =", "hor_times,sep,dx,dy=sep_time(AhA,AhB) mov_avg_sep_t, mov_avg_sep = moving_average(hor_times, sep, seglen=1000, nseg=10) times,h22,h2m2=get_h22(which_data) phase22 = np.unwrap(np.angle(h22)) freq22", "# ============================================================================= def q_to_masses(mass_rat,total_m): mass1=mass_rat/(mass_rat+1)*total_m mass2=total_m-mass1 return mass1,mass2 def generate_analytic_waveform(mass_rat, eccentricity,approximant='TaylorF2Ecc',total_m=50,f_lower=20.,delta_f=0.1): mass1,mass2=q_to_masses(mass_rat,total_m) hp,hc=hp,hc=get_fd_waveform(mass1=mass1,mass2=mass2,delta_f=delta_f,f_lower=f_lower,", "bokeh.plotting import figure, output_file, show, output_notebook from bokeh.models.widgets import Select, PreText,Panel, Tabs, Slider", "plot_height=400) pn43.line(x='timeTD', y='ampTD',source = sourcep42, color='green',line_width=3) pn43.toolbar.logo = None pn44=figure(title='Phase',x_axis_label='time(sec)',y_axis_label='rad', plot_width=500, plot_height=400) pn44.line(x='timeTD',", "= Slider(start=-1, end=1, value=0, step=.05, title=\"Spin2z\") model_select = Select(title=\"TD Models\", options=td_approximants()) def update_slider2(attrname,", "h2m2[:,0]): assert t1 == t2 h22 = h22[:,1] + 1.j * h22[:,2] h2m2", "slider values q = q_sliderFD.value e = e_sliderFD.value approximant = model_selectFD.value freq,hp_real,hc_real,hp_imag,hc_imag,amp,phase=generate_analytic_waveform(mass_rat=q,eccentricity=e,approximant=approximant) sourcep2.data", "= st + seglen return times, means def get_h22(sxs_data): with h5py.File( os.path.join(sxs_data, \"rhOverM_Asymptotic_GeometricUnits_CoM.h5\"", "times,h22,h2m2=get_h22(which_data) phase22 = np.unwrap(np.angle(h22)) freq22 = unispline(times, phase22).derivative()(times) norm_time=times-times[np.argmax(h22)] return AhA, AhB, norm_time,", "get_fd_waveform,amplitude_from_frequencyseries,phase_from_frequencyseries,fd_approximants, get_td_waveform, td_approximants from pycbc import types from bokeh.models import TableColumn,ColumnDataSource,DataTable from bokeh.plotting", "plot_width=500, plot_height=400) n41=pn42.line(x='timeTD', y='hc_realTD',source = sourcep42, color='blue',legend='Re{hx}') n42=pn42.line(x='timeTD', y='hc_imagTD', source = sourcep42, color='orange',legend='Im{hx}')", "means.append(np.mean(x[st:en])) except: break st = st + seglen return times, means def get_h22(sxs_data):", "plot_height=400) n41=pn42.line(x='timeTD', y='hc_realTD',source = sourcep42, color='blue',legend='Re{hx}') n42=pn42.line(x='timeTD', y='hc_imagTD', source = sourcep42, color='orange',legend='Im{hx}') pn42.toolbar.logo", "#tabs = Tabs(tabs=[tab1,tab3,tab2,tab4],sizing_mode='scale_width') tabs = Tabs(tabs=[tab2,tab4],sizing_mode='scale_width') #layout = row(column(p,data_table),column(k,s),r) curdoc().add_root(tabs) curdoc().title = \"Eccentric", "assert t1 == t2 h21 = h21[:,1] + 1.j * h21[:,2] h2m1 =", "h21 = f[\"OutermostExtraction.dir/Y_l2_m1.dat\"][:] h2m1 = f[\"OutermostExtraction.dir/Y_l2_m-1.dat\"][:] h20 = f[\"OutermostExtraction.dir/Y_l2_m0.dat\"][:] h22 = f[\"OutermostExtraction.dir/Y_l2_m2.dat\"][:] h2m2", "AhB, norm_time, h22,phase22,freq22, hor_times, sep, mov_avg_sep_t,mov_avg_sep,dx,dy # ============================================================================= # Second panel # =============================================================================", "tab2 = Panel(child=layoutan,title=\"Analytic FD\") #tab3 = Panel(child=layout3,title=\"NR l=2\") tab4 = Panel(child=layoutTD,title=\"Analytic TD\") #tabs", "dict time_hlm,h21, h2m1,h2m2lm, h22lm,h20=get_hlm(which_data) source32.data={'time_hlm':time_hlm,'h21real':h21.real,'h21imag':h21.imag, 'h2m1real':h2m1.real,'h2m1imag':h2m1.imag,'h2m2lmreal': h2m2lm.real,'h2m2lmimag':h2m2lm.imag, 'h22lmreal': h22lm.real,'h22lmimag':h22lm.imag,'h20real':h20.real,'h20imag':h20.imag} except IndexError: pass #", "{'hp_realTD':hp_realTD,'hc_realTD':hc_realTD,'hp_imagTD':hp_imagTD,'hc_imagTD':hc_imagTD,'ampTD':ampTD,'phaseTD':phaseTD,'timeTD':timeTD} for w in [q_slider,e_slider,s1z_slider,s2z_slider,model_select]: w.on_change('value', update_slider2) layoutTD=row(column(pn41,pn42),column(pn43,pn44),column(q_slider,e_slider,s1z_slider,s2z_slider,model_select)) #tab1 = Panel(child=layoutNR, title=\"NR data\")", "[q_slider,e_slider,s1z_slider,s2z_slider,model_select]: w.on_change('value', update_slider2) layoutTD=row(column(pn41,pn42),column(pn43,pn44),column(q_slider,e_slider,s1z_slider,s2z_slider,model_select)) #tab1 = Panel(child=layoutNR, title=\"NR data\") tab2 = Panel(child=layoutan,title=\"Analytic FD\")", "horizon2): hor_times = horizon1[:,0] dx=horizon1[:,1]-horizon2[:,1] dy=horizon1[:,2] - horizon2[:,2] sep_xy = np.array([horizon1[:,1]-horizon2[:,1], horizon1[:,2] -", "pycbc.waveform import get_fd_waveform,amplitude_from_frequencyseries,phase_from_frequencyseries,fd_approximants, get_td_waveform, td_approximants from pycbc import types from bokeh.models import TableColumn,ColumnDataSource,DataTable", "============================================================================= def open_sxs(sxs_data): #https://data.black-holes.org/waveforms/documentation.html #AhA=apparent horizon A #AhB=apparent horizon B #AhC=common apparent horizon", "row, grid, layout from scipy.interpolate import InterpolatedUnivariateSpline as unispline # ============================================================================= # First", "show, output_notebook from bokeh.models.widgets import Select, PreText,Panel, Tabs, Slider from bokeh.io import curdoc", "= sourcep42, color='orange',legend='Im{hx}') pn42.toolbar.logo = None pn42.legend.click_policy=\"hide\" lines=[n41,n42] pn43=figure(title='Amplitude',x_axis_label='time(sec)',y_axis_label='strain', plot_width=500, plot_height=400) pn43.line(x='timeTD', y='ampTD',source", "def open_sxs(sxs_data): #https://data.black-holes.org/waveforms/documentation.html #AhA=apparent horizon A #AhB=apparent horizon B #AhC=common apparent horizon with", "nseg): dt = t[1] - t[0] means = [] times = [] st", "dy=horizon1[:,2] - horizon2[:,2] sep_xy = np.array([horizon1[:,1]-horizon2[:,1], horizon1[:,2] - horizon2[:,2]]) sep = np.sqrt( sep_xy[0,:]**2.", "f: h21 = f[\"OutermostExtraction.dir/Y_l2_m1.dat\"][:] h2m1 = f[\"OutermostExtraction.dir/Y_l2_m-1.dat\"][:] h20 = f[\"OutermostExtraction.dir/Y_l2_m0.dat\"][:] h22 = f[\"OutermostExtraction.dir/Y_l2_m2.dat\"][:]", "f[\"OutermostExtraction.dir/Y_l2_m1.dat\"][:] h2m1 = f[\"OutermostExtraction.dir/Y_l2_m-1.dat\"][:] h20 = f[\"OutermostExtraction.dir/Y_l2_m0.dat\"][:] h22 = f[\"OutermostExtraction.dir/Y_l2_m2.dat\"][:] h2m2 = f[\"OutermostExtraction.dir/Y_l2_m-2.dat\"][:]", "h2m2 = h2m2[:,1] + 1.j * h2m2[:,2] return times,h22, h2m2 def get_hlm(sxs_data): with", "t2 in zip(times, h2m2[:,0]): assert t1 == t2 h21 = h21[:,1] + 1.j", "h22,phase22,freq22, hor_times, sep, mov_avg_sep_t,mov_avg_sep,dx,dy # ============================================================================= # Second panel # ============================================================================= def q_to_masses(mass_rat,total_m):", "mass1,mass2=q_to_masses(mass_rat,total_m) hp,hc=hp,hc=get_fd_waveform(mass1=mass1,mass2=mass2,delta_f=delta_f,f_lower=f_lower, approximant=approximant,eccentricity=eccentricity) hs=hp+hc*1j amp=amplitude_from_frequencyseries(types.FrequencySeries(hs,delta_f=delta_f)) phase=phase_from_frequencyseries(types.FrequencySeries(hs,delta_f=delta_f)) return hp.sample_frequencies.data,np.real(hp.data),np.real(hc.data),np.imag(hp.data),np.imag(hc.data),amp.data,phase.data freq,hp_real,hc_real,hp_imag,hc_imag,amp,phase=generate_analytic_waveform(mass_rat=1.,eccentricity=0) dic_p2 = {'hp_real':hp_real,'hc_real':hc_real,'hp_imag':hp_imag,'hc_imag':hc_imag,'amp':amp,'phase':phase,'freq':freq} sourcep2=ColumnDataSource(data=dic_p2)", "First panel # ============================================================================= def open_sxs(sxs_data): #https://data.black-holes.org/waveforms/documentation.html #AhA=apparent horizon A #AhB=apparent horizon B", "(q)\") e_slider = Slider(start=0., end=0.9, value=0, step=.05, title=\"Eccentricity (e)\") s1z_slider = Slider(start=-1, end=1,", "w in [q_sliderFD,e_sliderFD,model_selectFD]: w.on_change('value', update_slider) layoutan=row(column(pn21,pn22),column(pn23,pn24),column(q_sliderFD,e_sliderFD,model_selectFD)) # ============================================================================= # Third panel # =============================================================================", "q_to_masses(mass_rat,total_m): mass1=mass_rat/(mass_rat+1)*total_m mass2=total_m-mass1 return mass1,mass2 def generate_analytic_waveform(mass_rat, eccentricity,approximant='TaylorF2Ecc',total_m=50,f_lower=20.,delta_f=0.1): mass1,mass2=q_to_masses(mass_rat,total_m) hp,hc=hp,hc=get_fd_waveform(mass1=mass1,mass2=mass2,delta_f=delta_f,f_lower=f_lower, approximant=approximant,eccentricity=eccentricity) hs=hp+hc*1j amp=amplitude_from_frequencyseries(types.FrequencySeries(hs,delta_f=delta_f))", "from pycbc.waveform import get_fd_waveform,amplitude_from_frequencyseries,phase_from_frequencyseries,fd_approximants, get_td_waveform, td_approximants from pycbc import types from bokeh.models import", "h22lm,h20=get_hlm(which_data) source32.data={'time_hlm':time_hlm,'h21real':h21.real,'h21imag':h21.imag, 'h2m1real':h2m1.real,'h2m1imag':h2m1.imag,'h2m2lmreal': h2m2lm.real,'h2m2lmimag':h2m2lm.imag, 'h22lmreal': h22lm.real,'h22lmimag':h22lm.imag,'h20real':h20.real,'h20imag':h20.imag} except IndexError: pass # ============================================================================= # Fourth", "= Select(title=\"FD Models\", options=fd_approximants()) def update_slider(attrname, old, new): # Get the current slider", "pn42.legend.click_policy=\"hide\" lines=[n41,n42] pn43=figure(title='Amplitude',x_axis_label='time(sec)',y_axis_label='strain', plot_width=500, plot_height=400) pn43.line(x='timeTD', y='ampTD',source = sourcep42, color='green',line_width=3) pn43.toolbar.logo = None", "the dict time_hlm,h21, h2m1,h2m2lm, h22lm,h20=get_hlm(which_data) source32.data={'time_hlm':time_hlm,'h21real':h21.real,'h21imag':h21.imag, 'h2m1real':h2m1.real,'h2m1imag':h2m1.imag,'h2m2lmreal': h2m2lm.real,'h2m2lmimag':h2m2lm.imag, 'h22lmreal': h22lm.real,'h22lmimag':h22lm.imag,'h20real':h20.real,'h20imag':h20.imag} except IndexError: pass", "current slider values q = q_sliderFD.value e = e_sliderFD.value approximant = model_selectFD.value freq,hp_real,hc_real,hp_imag,hc_imag,amp,phase=generate_analytic_waveform(mass_rat=q,eccentricity=e,approximant=approximant)", "numpy as np import os import h5py import json import glob from pycbc.waveform", "'h2m1real':h2m1.real,'h2m1imag':h2m1.imag,'h2m2lmreal': h2m2lm.real,'h2m2lmimag':h2m2lm.imag, 'h22lmreal': h22lm.real,'h22lmimag':h22lm.imag,'h20real':h20.real,'h20imag':h20.imag} except IndexError: pass # ============================================================================= # Fourth panel #", "sep_xy[0,:]**2. + sep_xy[1,:]**2. ) return hor_times,sep,dx[:2000],dy[:2000] def moving_average(t, x, seglen, nseg): dt =", "import TableColumn,ColumnDataSource,DataTable from bokeh.plotting import figure, output_file, show, output_notebook from bokeh.models.widgets import Select,", "dx=horizon1[:,1]-horizon2[:,1] dy=horizon1[:,2] - horizon2[:,2] sep_xy = np.array([horizon1[:,1]-horizon2[:,1], horizon1[:,2] - horizon2[:,2]]) sep = np.sqrt(", "color='blue',legend='Re{h+}') n42=pn41.line(x='timeTD', y='hp_imagTD',source = sourcep42, color='orange',legend='Im{h+}') pn41.toolbar.logo = None pn41.legend.click_policy=\"hide\" lines=[n41,n42] pn42=figure(title='hx',x_axis_label='time(sec)',y_axis_label='strain', plot_width=500,", "layoutan=row(column(pn21,pn22),column(pn23,pn24),column(q_sliderFD,e_sliderFD,model_selectFD)) # ============================================================================= # Third panel # ============================================================================= def update_table2(attrname, old, new): try:", "in zip(times, h2m2[:,0]): assert t1 == t2 h22 = h22[:,1] + 1.j *", "import types from bokeh.models import TableColumn,ColumnDataSource,DataTable from bokeh.plotting import figure, output_file, show, output_notebook", "pn21.toolbar.logo = None pn21.legend.click_policy=\"hide\" lines=[n11,n12] pn22=figure(title='hx',x_axis_label='freq(Hz)',y_axis_label='strain', plot_width=500, plot_height=400) n21=pn22.line(x='freq', y='hc_real',source = sourcep2, color='blue',legend='Re{hx}')", "Panel(child=layout3,title=\"NR l=2\") tab4 = Panel(child=layoutTD,title=\"Analytic TD\") #tabs = Tabs(tabs=[tab1,tab3,tab2,tab4],sizing_mode='scale_width') tabs = Tabs(tabs=[tab2,tab4],sizing_mode='scale_width') #layout", "= Slider(start=0., end=0.9, value=0, step=.05, title=\"Eccentricity (e)\") model_selectFD = Select(title=\"FD Models\", options=fd_approximants()) def", "from bokeh.plotting import figure, output_file, show, output_notebook from bokeh.models.widgets import Select, PreText,Panel, Tabs,", "phase=np.unwrap(np.angle(hs)) return hp.sample_times.data,np.real(hp.data),np.real(hc.data),np.imag(hp.data),np.imag(hc.data),amp,phase timeTD,hp_realTD,hc_realTD,hp_imagTD,hc_imagTD,ampTD,phaseTD=generate_TD_waveform(mass_rat=1.,eccentricity=0,s1z=0.,s2z=0.) dic_p42 = {'hp_realTD':hp_realTD,'hc_realTD':hc_realTD,'hp_imagTD':hp_imagTD,'hc_imagTD':hc_imagTD,'ampTD':ampTD,'phaseTD':phaseTD,'timeTD':timeTD} sourcep42=ColumnDataSource(data=dic_p42) pn41=figure(title='h+',x_axis_label='time(sec)',y_axis_label='strain', plot_width=500, plot_height=400) n41=pn41.line(x='timeTD', y='hp_realTD',source", "def update_slider2(attrname, old, new): # Get the current slider values q = q_slider.value", "\"rhOverM_Asymptotic_GeometricUnits_CoM.h5\" ), 'r') as f: h22 = f[\"OutermostExtraction.dir/Y_l2_m2.dat\"][:] h2m2 = f[\"OutermostExtraction.dir/Y_l2_m-2.dat\"][:] times =", "t[0] means = [] times = [] st = 0 for i in", "Slider(start=0., end=0.9, value=0, step=.05, title=\"Eccentricity (e)\") model_selectFD = Select(title=\"FD Models\", options=fd_approximants()) def update_slider(attrname,", "\"rhOverM_Asymptotic_GeometricUnits_CoM.h5\" ), 'r') as f: h21 = f[\"OutermostExtraction.dir/Y_l2_m1.dat\"][:] h2m1 = f[\"OutermostExtraction.dir/Y_l2_m-1.dat\"][:] h20 =", "* h2m1[:,2] h20 = h20[:,1] + 1.j * h20[:,2] h2m2 = h2m2[:,1] +", "t1, t2 in zip(times, h2m2[:,0]): assert t1 == t2 h22 = h22[:,1] +", "freq,hp_real,hc_real,hp_imag,hc_imag,amp,phase=generate_analytic_waveform(mass_rat=1.,eccentricity=0) dic_p2 = {'hp_real':hp_real,'hc_real':hc_real,'hp_imag':hp_imag,'hc_imag':hc_imag,'amp':amp,'phase':phase,'freq':freq} sourcep2=ColumnDataSource(data=dic_p2) pn21=figure(title='h+',x_axis_label='freq(Hz)',y_axis_label='strain', plot_width=500, plot_height=400) n11=pn21.line(x='freq', y='hp_real',source = sourcep2, color='blue',legend='Re{h+}')", "unispline # ============================================================================= # First panel # ============================================================================= def open_sxs(sxs_data): #https://data.black-holes.org/waveforms/documentation.html #AhA=apparent horizon", "= unispline(times, phase22).derivative()(times) norm_time=times-times[np.argmax(h22)] return AhA, AhB, norm_time, h22,phase22,freq22, hor_times, sep, mov_avg_sep_t,mov_avg_sep,dx,dy #", "end=0.9, value=0, step=.05, title=\"Eccentricity (e)\") model_selectFD = Select(title=\"FD Models\", options=fd_approximants()) def update_slider(attrname, old,", "1.j * h2m1[:,2] h20 = h20[:,1] + 1.j * h20[:,2] h2m2 = h2m2[:,1]", "get_td_waveform, td_approximants from pycbc import types from bokeh.models import TableColumn,ColumnDataSource,DataTable from bokeh.plotting import", "plot_height=400) n11=pn21.line(x='freq', y='hp_real',source = sourcep2, color='blue',legend='Re{h+}') n12=pn21.line(x='freq', y='hp_imag',source = sourcep2, color='orange',legend='Im{h+}') pn21.toolbar.logo =", "pn21.legend.click_policy=\"hide\" lines=[n11,n12] pn22=figure(title='hx',x_axis_label='freq(Hz)',y_axis_label='strain', plot_width=500, plot_height=400) n21=pn22.line(x='freq', y='hc_real',source = sourcep2, color='blue',legend='Re{hx}') n22=pn22.line(x='freq', y='hc_imag', source", "pn24=figure(title='Phase',x_axis_label='freq(Hz)',y_axis_label='rad', plot_width=500, plot_height=400) pn24.line(x='freq', y='phase',source = sourcep2, color='red',line_width=3) pn24.toolbar.logo = None q_sliderFD =", "phase22).derivative()(times) norm_time=times-times[np.argmax(h22)] return AhA, AhB, norm_time, h22,phase22,freq22, hor_times, sep, mov_avg_sep_t,mov_avg_sep,dx,dy # ============================================================================= #", "sourcep2, color='blue',legend='Re{h+}') n12=pn21.line(x='freq', y='hp_imag',source = sourcep2, color='orange',legend='Im{h+}') pn21.toolbar.logo = None pn21.legend.click_policy=\"hide\" lines=[n11,n12] pn22=figure(title='hx',x_axis_label='freq(Hz)',y_axis_label='strain',", "st = st + seglen return times, means def get_h22(sxs_data): with h5py.File( os.path.join(sxs_data,", "h22[:,1] + 1.j * h22[:,2] h2m2 = h2m2[:,1] + 1.j * h2m2[:,2] return", "[q_sliderFD,e_sliderFD,model_selectFD]: w.on_change('value', update_slider) layoutan=row(column(pn21,pn22),column(pn23,pn24),column(q_sliderFD,e_sliderFD,model_selectFD)) # ============================================================================= # Third panel # ============================================================================= def update_table2(attrname,", "q_slider = Slider(start=1, end=10, value=1, step=.5, title=\"Mass ratio (q)\") e_slider = Slider(start=0., end=0.9,", "with h5py.File( os.path.join(sxs_data, \"rhOverM_Asymptotic_GeometricUnits_CoM.h5\" ), 'r') as f: h21 = f[\"OutermostExtraction.dir/Y_l2_m1.dat\"][:] h2m1 =", "= np.array([horizon1[:,1]-horizon2[:,1], horizon1[:,2] - horizon2[:,2]]) sep = np.sqrt( sep_xy[0,:]**2. + sep_xy[1,:]**2. ) return", "pn24.line(x='freq', y='phase',source = sourcep2, color='red',line_width=3) pn24.toolbar.logo = None q_sliderFD = Slider(start=1, end=10, value=1,", "# changed this to the dict time_hlm,h21, h2m1,h2m2lm, h22lm,h20=get_hlm(which_data) source32.data={'time_hlm':time_hlm,'h21real':h21.real,'h21imag':h21.imag, 'h2m1real':h2m1.real,'h2m1imag':h2m1.imag,'h2m2lmreal': h2m2lm.real,'h2m2lmimag':h2m2lm.imag, 'h22lmreal':", "sourcep2, color='green',line_width=3) pn23.toolbar.logo = None pn24=figure(title='Phase',x_axis_label='freq(Hz)',y_axis_label='rad', plot_width=500, plot_height=400) pn24.line(x='freq', y='phase',source = sourcep2, color='red',line_width=3)", "import Select, PreText,Panel, Tabs, Slider from bokeh.io import curdoc from bokeh.layouts import column,", "sourcep2, color='orange',legend='Im{hx}') pn22.toolbar.logo = None pn22.legend.click_policy=\"hide\" lines=[n21,n22] pn23=figure(title='Amplitude',x_axis_label='freq(Hz)',y_axis_label='strain', plot_width=500, plot_height=400) pn23.line(x='freq', y='amp',source =", "title=\"Mass ratio (q)\") e_sliderFD = Slider(start=0., end=0.9, value=0, step=.05, title=\"Eccentricity (e)\") model_selectFD =", "n21=pn22.line(x='freq', y='hc_real',source = sourcep2, color='blue',legend='Re{hx}') n22=pn22.line(x='freq', y='hc_imag', source = sourcep2, color='orange',legend='Im{hx}') pn22.toolbar.logo =", "#https://data.black-holes.org/waveforms/documentation.html #AhA=apparent horizon A #AhB=apparent horizon B #AhC=common apparent horizon with h5py.File( os.path.join(sxs_data,", "h5py import json import glob from pycbc.waveform import get_fd_waveform,amplitude_from_frequencyseries,phase_from_frequencyseries,fd_approximants, get_td_waveform, td_approximants from pycbc", "step=.5, title=\"Mass ratio (q)\") e_slider = Slider(start=0., end=0.9, value=0, step=.05, title=\"Eccentricity (e)\") s1z_slider", "= sourcep42, color='orange',legend='Im{h+}') pn41.toolbar.logo = None pn41.legend.click_policy=\"hide\" lines=[n41,n42] pn42=figure(title='hx',x_axis_label='time(sec)',y_axis_label='strain', plot_width=500, plot_height=400) n41=pn42.line(x='timeTD', y='hc_realTD',source", "<NAME> <<EMAIL>> # Visualization of SXS and analytic waveform model # Use by", "= None pn21.legend.click_policy=\"hide\" lines=[n11,n12] pn22=figure(title='hx',x_axis_label='freq(Hz)',y_axis_label='strain', plot_width=500, plot_height=400) n21=pn22.line(x='freq', y='hc_real',source = sourcep2, color='blue',legend='Re{hx}') n22=pn22.line(x='freq',", "import figure, output_file, show, output_notebook from bokeh.models.widgets import Select, PreText,Panel, Tabs, Slider from", "2021 <NAME> <<EMAIL>> # Visualization of SXS and analytic waveform model # Use", "command prompt. # Then navigate to the URL http://localhost:5006/main in your browser. import", "hp,hc=hp,hc=get_fd_waveform(mass1=mass1,mass2=mass2,delta_f=delta_f,f_lower=f_lower, approximant=approximant,eccentricity=eccentricity) hs=hp+hc*1j amp=amplitude_from_frequencyseries(types.FrequencySeries(hs,delta_f=delta_f)) phase=phase_from_frequencyseries(types.FrequencySeries(hs,delta_f=delta_f)) return hp.sample_frequencies.data,np.real(hp.data),np.real(hc.data),np.imag(hp.data),np.imag(hc.data),amp.data,phase.data freq,hp_real,hc_real,hp_imag,hc_imag,amp,phase=generate_analytic_waveform(mass_rat=1.,eccentricity=0) dic_p2 = {'hp_real':hp_real,'hc_real':hc_real,'hp_imag':hp_imag,'hc_imag':hc_imag,'amp':amp,'phase':phase,'freq':freq} sourcep2=ColumnDataSource(data=dic_p2) pn21=figure(title='h+',x_axis_label='freq(Hz)',y_axis_label='strain',", "e_slider.value s1z = s1z_slider.value s2z = s2z_slider.value approximant = model_select.value timeTD,hp_realTD,hc_realTD,hp_imagTD,hc_imagTD,ampTD,phaseTD=generate_TD_waveform(mass_rat=q,eccentricity=e,s1z=s1z,s2z=s2z,approximant=approximant) sourcep42.data =", "amp=amplitude_from_frequencyseries(types.FrequencySeries(hs,delta_f=delta_f)) phase=phase_from_frequencyseries(types.FrequencySeries(hs,delta_f=delta_f)) return hp.sample_frequencies.data,np.real(hp.data),np.real(hc.data),np.imag(hp.data),np.imag(hc.data),amp.data,phase.data freq,hp_real,hc_real,hp_imag,hc_imag,amp,phase=generate_analytic_waveform(mass_rat=1.,eccentricity=0) dic_p2 = {'hp_real':hp_real,'hc_real':hc_real,'hp_imag':hp_imag,'hc_imag':hc_imag,'amp':amp,'phase':phase,'freq':freq} sourcep2=ColumnDataSource(data=dic_p2) pn21=figure(title='h+',x_axis_label='freq(Hz)',y_axis_label='strain', plot_width=500, plot_height=400) n11=pn21.line(x='freq',", "mov_avg_sep_t,mov_avg_sep,dx,dy # ============================================================================= # Second panel # ============================================================================= def q_to_masses(mass_rat,total_m): mass1=mass_rat/(mass_rat+1)*total_m mass2=total_m-mass1 return", "from bokeh.models import TableColumn,ColumnDataSource,DataTable from bokeh.plotting import figure, output_file, show, output_notebook from bokeh.models.widgets", "i in range(int(len(x)/nseg)): en = int(st + seglen/dt) try: times.append(t[st]) means.append(np.mean(x[st:en])) except: break", "dt = t[1] - t[0] means = [] times = [] st =", "h2m1 = f[\"OutermostExtraction.dir/Y_l2_m-1.dat\"][:] h20 = f[\"OutermostExtraction.dir/Y_l2_m0.dat\"][:] h22 = f[\"OutermostExtraction.dir/Y_l2_m2.dat\"][:] h2m2 = f[\"OutermostExtraction.dir/Y_l2_m-2.dat\"][:] times", "seglen=1000, nseg=10) times,h22,h2m2=get_h22(which_data) phase22 = np.unwrap(np.angle(h22)) freq22 = unispline(times, phase22).derivative()(times) norm_time=times-times[np.argmax(h22)] return AhA,", "import InterpolatedUnivariateSpline as unispline # ============================================================================= # First panel # ============================================================================= def open_sxs(sxs_data):", "= Panel(child=layoutNR, title=\"NR data\") tab2 = Panel(child=layoutan,title=\"Analytic FD\") #tab3 = Panel(child=layout3,title=\"NR l=2\") tab4", "color='orange',legend='Im{h+}') pn41.toolbar.logo = None pn41.legend.click_policy=\"hide\" lines=[n41,n42] pn42=figure(title='hx',x_axis_label='time(sec)',y_axis_label='strain', plot_width=500, plot_height=400) n41=pn42.line(x='timeTD', y='hc_realTD',source = sourcep42,", "f['AhC.dir/CoordCenterInertial.dat'][:] return AhA,AhB,AhC def sep_time(horizon1, horizon2): hor_times = horizon1[:,0] dx=horizon1[:,1]-horizon2[:,1] dy=horizon1[:,2] - horizon2[:,2]", "#AhC=common apparent horizon with h5py.File( os.path.join(sxs_data, \"Horizons.h5\" ), 'r') as f: AhA =", "value=0, step=.05, title=\"Eccentricity (e)\") model_selectFD = Select(title=\"FD Models\", options=fd_approximants()) def update_slider(attrname, old, new):", "e = e_slider.value s1z = s1z_slider.value s2z = s2z_slider.value approximant = model_select.value timeTD,hp_realTD,hc_realTD,hp_imagTD,hc_imagTD,ampTD,phaseTD=generate_TD_waveform(mass_rat=q,eccentricity=e,s1z=s1z,s2z=s2z,approximant=approximant)", "timeTD,hp_realTD,hc_realTD,hp_imagTD,hc_imagTD,ampTD,phaseTD=generate_TD_waveform(mass_rat=q,eccentricity=e,s1z=s1z,s2z=s2z,approximant=approximant) sourcep42.data = {'hp_realTD':hp_realTD,'hc_realTD':hc_realTD,'hp_imagTD':hp_imagTD,'hc_imagTD':hc_imagTD,'ampTD':ampTD,'phaseTD':phaseTD,'timeTD':timeTD} for w in [q_slider,e_slider,s1z_slider,s2z_slider,model_select]: w.on_change('value', update_slider2) layoutTD=row(column(pn41,pn42),column(pn43,pn44),column(q_slider,e_slider,s1z_slider,s2z_slider,model_select)) #tab1 =", "Then navigate to the URL http://localhost:5006/main in your browser. import numpy as np", "step=.05, title=\"Eccentricity (e)\") s1z_slider = Slider(start=-1, end=1, value=0, step=.05, title=\"Spin1z\") s2z_slider = Slider(start=-1,", "t1 == t2 h21 = h21[:,1] + 1.j * h21[:,2] h2m1 = h2m1[:,1]", "{'hp_real':hp_real,'hc_real':hc_real,'hp_imag':hp_imag,'hc_imag':hc_imag,'amp':amp,'phase':phase,'freq':freq} sourcep2=ColumnDataSource(data=dic_p2) pn21=figure(title='h+',x_axis_label='freq(Hz)',y_axis_label='strain', plot_width=500, plot_height=400) n11=pn21.line(x='freq', y='hp_real',source = sourcep2, color='blue',legend='Re{h+}') n12=pn21.line(x='freq', y='hp_imag',source =", "update_slider) layoutan=row(column(pn21,pn22),column(pn23,pn24),column(q_sliderFD,e_sliderFD,model_selectFD)) # ============================================================================= # Third panel # ============================================================================= def update_table2(attrname, old, new):", "os.path.join(sxs_data, \"rhOverM_Asymptotic_GeometricUnits_CoM.h5\" ), 'r') as f: h21 = f[\"OutermostExtraction.dir/Y_l2_m1.dat\"][:] h2m1 = f[\"OutermostExtraction.dir/Y_l2_m-1.dat\"][:] h20", "y='ampTD',source = sourcep42, color='green',line_width=3) pn43.toolbar.logo = None pn44=figure(title='Phase',x_axis_label='time(sec)',y_axis_label='rad', plot_width=500, plot_height=400) pn44.line(x='timeTD', y='phaseTD',source =", "value=0, step=.05, title=\"Eccentricity (e)\") s1z_slider = Slider(start=-1, end=1, value=0, step=.05, title=\"Spin1z\") s2z_slider =", "None pn21.legend.click_policy=\"hide\" lines=[n11,n12] pn22=figure(title='hx',x_axis_label='freq(Hz)',y_axis_label='strain', plot_width=500, plot_height=400) n21=pn22.line(x='freq', y='hc_real',source = sourcep2, color='blue',legend='Re{hx}') n22=pn22.line(x='freq', y='hc_imag',", "============================================================================= def generate_TD_waveform(mass_rat, eccentricity,s1z,s2z,approximant='TaylorT1',total_m=50,f_lower=20.,delta_t=1./1024): nonspinning_models=['TaylorT1','TaylorT2','TaylorEt','TaylorT3','TaylorT4','EOBNRv2','EOBNRv2HM','EOBNRv2_ROM','EOBNRv2HM_ROM','SEOBNRv1','SEOBNRv1_ROM_DoubleSpin','SEOBNRv1_ROM_EffectiveSpin','TEOBResum_ROM','PhenSpinTaylor','PhenSpinTaylorRD','IMRPhenomA','EccentricTD','NRSur7dq2'] if approximant in nonspinning_models: s1z=0 s2z=0 mass1,mass2=q_to_masses(mass_rat,total_m) hp,hc=hp,hc=get_td_waveform(mass1=mass1,mass2=mass2,spin1z=s1z,spin2z=s2z,delta_t=delta_t,f_lower=f_lower,", "st + seglen return times, means def get_h22(sxs_data): with h5py.File( os.path.join(sxs_data, \"rhOverM_Asymptotic_GeometricUnits_CoM.h5\" ),", "= f[\"OutermostExtraction.dir/Y_l2_m-1.dat\"][:] h20 = f[\"OutermostExtraction.dir/Y_l2_m0.dat\"][:] h22 = f[\"OutermostExtraction.dir/Y_l2_m2.dat\"][:] h2m2 = f[\"OutermostExtraction.dir/Y_l2_m-2.dat\"][:] times =", "+ 1.j * h20[:,2] h2m2 = h2m2[:,1] + 1.j * h2m2[:,2] h22 =", "None pn42.legend.click_policy=\"hide\" lines=[n41,n42] pn43=figure(title='Amplitude',x_axis_label='time(sec)',y_axis_label='strain', plot_width=500, plot_height=400) pn43.line(x='timeTD', y='ampTD',source = sourcep42, color='green',line_width=3) pn43.toolbar.logo =", "= moving_average(hor_times, sep, seglen=1000, nseg=10) times,h22,h2m2=get_h22(which_data) phase22 = np.unwrap(np.angle(h22)) freq22 = unispline(times, phase22).derivative()(times)", "import glob from pycbc.waveform import get_fd_waveform,amplitude_from_frequencyseries,phase_from_frequencyseries,fd_approximants, get_td_waveform, td_approximants from pycbc import types from", "# Then navigate to the URL http://localhost:5006/main in your browser. import numpy as", "= sourcep2, color='blue',legend='Re{h+}') n12=pn21.line(x='freq', y='hp_imag',source = sourcep2, color='orange',legend='Im{h+}') pn21.toolbar.logo = None pn21.legend.click_policy=\"hide\" lines=[n11,n12]", "for i in range(int(len(x)/nseg)): en = int(st + seglen/dt) try: times.append(t[st]) means.append(np.mean(x[st:en])) except:", "return hor_times,sep,dx[:2000],dy[:2000] def moving_average(t, x, seglen, nseg): dt = t[1] - t[0] means", "mov_avg_sep_t, mov_avg_sep = moving_average(hor_times, sep, seglen=1000, nseg=10) times,h22,h2m2=get_h22(which_data) phase22 = np.unwrap(np.angle(h22)) freq22 =", "============================================================================= # Third panel # ============================================================================= def update_table2(attrname, old, new): try: selected_index =", "= Slider(start=0., end=0.9, value=0, step=.05, title=\"Eccentricity (e)\") s1z_slider = Slider(start=-1, end=1, value=0, step=.05,", "= f['AhA.dir/CoordCenterInertial.dat'][:] AhB = f['AhB.dir/CoordCenterInertial.dat'][:] AhC = f['AhC.dir/CoordCenterInertial.dat'][:] return AhA,AhB,AhC def sep_time(horizon1, horizon2):" ]
[ "params.getlist('player_name[]'): player_names.append(p) player_skills = [] for p in params.getlist('player_skill[]'): player_skills.append(int(p)) players = []", "in params.getlist('player_name[]'): player_names.append(p) player_skills = [] for p in params.getlist('player_skill[]'): player_skills.append(int(p)) players =", "<reponame>kyledemeule/firstpick from lib.picker import make_person def parse_params(params): player_names = [] for p in", "player_skills = [] for p in params.getlist('player_skill[]'): player_skills.append(int(p)) players = [] for name,", "import make_person def parse_params(params): player_names = [] for p in params.getlist('player_name[]'): player_names.append(p) player_skills", "player_skills.append(int(p)) players = [] for name, skill in zip(player_names, player_skills): players.append(make_person(name, skill)) return", "p in params.getlist('player_name[]'): player_names.append(p) player_skills = [] for p in params.getlist('player_skill[]'): player_skills.append(int(p)) players", "= [] for p in params.getlist('player_skill[]'): player_skills.append(int(p)) players = [] for name, skill", "lib.picker import make_person def parse_params(params): player_names = [] for p in params.getlist('player_name[]'): player_names.append(p)", "for p in params.getlist('player_skill[]'): player_skills.append(int(p)) players = [] for name, skill in zip(player_names,", "players = [] for name, skill in zip(player_names, player_skills): players.append(make_person(name, skill)) return players", "make_person def parse_params(params): player_names = [] for p in params.getlist('player_name[]'): player_names.append(p) player_skills =", "[] for p in params.getlist('player_name[]'): player_names.append(p) player_skills = [] for p in params.getlist('player_skill[]'):", "for p in params.getlist('player_name[]'): player_names.append(p) player_skills = [] for p in params.getlist('player_skill[]'): player_skills.append(int(p))", "def parse_params(params): player_names = [] for p in params.getlist('player_name[]'): player_names.append(p) player_skills = []", "params.getlist('player_skill[]'): player_skills.append(int(p)) players = [] for name, skill in zip(player_names, player_skills): players.append(make_person(name, skill))", "from lib.picker import make_person def parse_params(params): player_names = [] for p in params.getlist('player_name[]'):", "player_names = [] for p in params.getlist('player_name[]'): player_names.append(p) player_skills = [] for p", "p in params.getlist('player_skill[]'): player_skills.append(int(p)) players = [] for name, skill in zip(player_names, player_skills):", "player_names.append(p) player_skills = [] for p in params.getlist('player_skill[]'): player_skills.append(int(p)) players = [] for", "in params.getlist('player_skill[]'): player_skills.append(int(p)) players = [] for name, skill in zip(player_names, player_skills): players.append(make_person(name,", "= [] for p in params.getlist('player_name[]'): player_names.append(p) player_skills = [] for p in", "[] for p in params.getlist('player_skill[]'): player_skills.append(int(p)) players = [] for name, skill in", "parse_params(params): player_names = [] for p in params.getlist('player_name[]'): player_names.append(p) player_skills = [] for" ]
[ "file with fields: topic_id unused doc_id gain\") arg_parser.add_argument(\"result_file\", help=\"TREC formatted results file. Six", "if args.metrics_file: metrics_file = args.metrics_file bib_file = None if args.bib_file: bib_file = args.bib_file", "if args.cost_file: cost_file = args.cost_file metrics_file = None if args.metrics_file: metrics_file = args.metrics_file", "None if args.bib_file: bib_file = args.bib_file colnames = False if args.colnames: colnames =", "associated with each element type specified in result file.\", required=False) arg_parser.add_argument(\"-m\", \"--metrics_file\", help=\"The", "and one. Four column tab/space sep file with fields: topic_id unused doc_id gain\")", "Evaluation Metrics\") arg_parser.add_argument(\"gain_file\", help=\"A TREC Formatted Qrel File with relevance scores used as", "score, run_id) = line.split() doc_id = doc_id.strip() if (topic_id == curr_topic_id): # build", "\"--metrics_file\", help=\"The list of metrics that are to be reported. If not specified,", "#ranking_maker.report() cwl_ruler.measure(ranking_maker.get_ranking()) cwl_ruler.report() #Perform aggregration over all topics #Compute residuals? if bib_file: cwl_ruler.save_bibtex(bib_file)", "= TrecQrelHandler(qrel_file) costs = None # read in cost file - if cost", "on the last topic #ranking_maker.report() cwl_ruler.measure(ranking_maker.get_ranking()) cwl_ruler.report() #Perform aggregration over all topics #Compute", "to be reported. If not specified, a set of default metrics will be", "= arg_parser.parse_args() gain_file = args.gain_file result_file = args.result_file cost_file = None if args.cost_file:", "File with relevance scores used as gains. Gain values should be between zero", "# build vectors ranking_maker.add(doc_id, element_type) else: if curr_topic_id is not None: #Perform the", "= False): qrh = TrecQrelHandler(qrel_file) costs = None # read in cost file", "- if cost file exists if cost_file: costs = read_in_cost_file(cost_file) cwl_ruler = CWLRuler(metrics_file)", "while rf: line = rf.readline() if not line: break (topic_id, element_type, doc_id, rank,", "arg_parser.add_argument(\"result_file\", help=\"TREC formatted results file. Six column tab/space sep file with fields: topic_id", "file - if cost file exists if cost_file: costs = read_in_cost_file(cost_file) cwl_ruler =", "formatted results file. Six column tab/space sep file with fields: topic_id element_type doc_id", "= float(cost) return costs def check_file_exists(filename): if filename and not os.path.exists(filename): print(\"{0} Not", "topic curr_topic_id = topic_id # reset seen list ranking_maker = RankingMaker(curr_topic_id, qrh, costs)", "cost file exists if cost_file: costs = read_in_cost_file(cost_file) cwl_ruler = CWLRuler(metrics_file) curr_topic_id =", "fields: topic_id element_type doc_id rank score run_id\") arg_parser.add_argument(\"-c\", \"--cost_file\", help=\"Costs associated with each", "as cf: while cf: line = cf.readline() if not line: break (element_type, cost)", "score run_id\") arg_parser.add_argument(\"-c\", \"--cost_file\", help=\"Costs associated with each element type specified in result", "saved to the filename given.\", required=False) arg_parser.add_argument(\"-n\", \"--colnames\", help=\"Includes headings in the output\",", "in result file.\", required=False) arg_parser.add_argument(\"-m\", \"--metrics_file\", help=\"The list of metrics that are to", "be reported. Tab/space sep file with fields: metric_name params\", required=False) arg_parser.add_argument(\"-b\", \"--bib_file\", help=\"If", "build vectors ranking_maker.add(doc_id, element_type) else: if curr_topic_id is not None: #Perform the Measurements", "None ranking_maker = None if colnames: print(\"Topic\\tMetric\\tEU/I\\tEU\\tEC/I\\tEC\\tI\") with open(results_file,\"r\") as rf: while rf:", "tab/space sep file with fields: topic_id unused doc_id gain\") arg_parser.add_argument(\"result_file\", help=\"TREC formatted results", "= None ranking_maker = None if colnames: print(\"Topic\\tMetric\\tEU/I\\tEU\\tEC/I\\tEC\\tI\") with open(results_file,\"r\") as rf: while", "costs def check_file_exists(filename): if filename and not os.path.exists(filename): print(\"{0} Not Found\".format(filename)) quit(1) def", "read_in_cost_file(cost_file): costs = dict() with open(cost_file, \"r\") as cf: while cf: line =", "topic_id element_type doc_id rank score run_id\") arg_parser.add_argument(\"-c\", \"--cost_file\", help=\"Costs associated with each element", "with fields: metric_name params\", required=False) arg_parser.add_argument(\"-b\", \"--bib_file\", help=\"If specified, then the BibTeX for", "not line: break (element_type, cost) = line.split() element_type = element_type.strip() costs[element_type] = float(cost)", "\"--colnames\", help=\"Includes headings in the output\", required=False, action=\"store_true\") args = arg_parser.parse_args() gain_file =", "import RankingMaker, Ranking, CWLRuler def read_in_cost_file(cost_file): costs = dict() with open(cost_file, \"r\") as", "#Perform the Measurements #ranking.report() cwl_ruler.measure(ranking_maker.get_ranking()) cwl_ruler.report() # new topic curr_topic_id = topic_id #", "import os import argparse from seeker.trec_qrel_handler import TrecQrelHandler from ruler.cwl_ruler import RankingMaker, Ranking,", "curr_topic_id): # build vectors ranking_maker.add(doc_id, element_type) else: if curr_topic_id is not None: #Perform", "used as gains. Gain values should be between zero and one. Four column", "with fields: topic_id element_type doc_id rank score run_id\") arg_parser.add_argument(\"-c\", \"--cost_file\", help=\"Costs associated with", "= None # read in cost file - if cost file exists if", "if bib_file: cwl_ruler.save_bibtex(bib_file) if __name__ == \"__main__\": arg_parser = argparse.ArgumentParser(description=\"CWL Evaluation Metrics\") arg_parser.add_argument(\"gain_file\",", "False if args.colnames: colnames = True check_file_exists(result_file) check_file_exists(gain_file) check_file_exists(cost_file) check_file_exists(metrics_file) main(result_file, gain_file, cost_file,", "cost_file=None, metrics_file=None, bib_file=None, colnames = False): qrh = TrecQrelHandler(qrel_file) costs = None #", "if args.bib_file: bib_file = args.bib_file colnames = False if args.colnames: colnames = True", "\"r\") as cf: while cf: line = cf.readline() if not line: break (element_type,", "given.\", required=False) arg_parser.add_argument(\"-n\", \"--colnames\", help=\"Includes headings in the output\", required=False, action=\"store_true\") args =", "specified in result file.\", required=False) arg_parser.add_argument(\"-m\", \"--metrics_file\", help=\"The list of metrics that are", "will be saved to the filename given.\", required=False) arg_parser.add_argument(\"-n\", \"--colnames\", help=\"Includes headings in", "required=False) arg_parser.add_argument(\"-n\", \"--colnames\", help=\"Includes headings in the output\", required=False, action=\"store_true\") args = arg_parser.parse_args()", "arg_parser.add_argument(\"-c\", \"--cost_file\", help=\"Costs associated with each element type specified in result file.\", required=False)", "element_type = element_type.strip() costs[element_type] = float(cost) return costs def check_file_exists(filename): if filename and", "colnames = False if args.colnames: colnames = True check_file_exists(result_file) check_file_exists(gain_file) check_file_exists(cost_file) check_file_exists(metrics_file) main(result_file,", "help=\"A TREC Formatted Qrel File with relevance scores used as gains. Gain values", "If not specified, a set of default metrics will be reported. Tab/space sep", "file with fields: metric_name params\", required=False) arg_parser.add_argument(\"-b\", \"--bib_file\", help=\"If specified, then the BibTeX", "== curr_topic_id): # build vectors ranking_maker.add(doc_id, element_type) else: if curr_topic_id is not None:", "and not os.path.exists(filename): print(\"{0} Not Found\".format(filename)) quit(1) def main(results_file, qrel_file, cost_file=None, metrics_file=None, bib_file=None,", "bib_file: cwl_ruler.save_bibtex(bib_file) if __name__ == \"__main__\": arg_parser = argparse.ArgumentParser(description=\"CWL Evaluation Metrics\") arg_parser.add_argument(\"gain_file\", help=\"A", "file with fields: topic_id element_type doc_id rank score run_id\") arg_parser.add_argument(\"-c\", \"--cost_file\", help=\"Costs associated", "if curr_topic_id is not None: #Perform the Measurements #ranking.report() cwl_ruler.measure(ranking_maker.get_ranking()) cwl_ruler.report() # new", "#ranking.report() cwl_ruler.measure(ranking_maker.get_ranking()) cwl_ruler.report() # new topic curr_topic_id = topic_id # reset seen list", "float(cost) return costs def check_file_exists(filename): if filename and not os.path.exists(filename): print(\"{0} Not Found\".format(filename))", "reported. If not specified, a set of default metrics will be reported. Tab/space", "Measurements #ranking.report() cwl_ruler.measure(ranking_maker.get_ranking()) cwl_ruler.report() # new topic curr_topic_id = topic_id # reset seen", "\"--cost_file\", help=\"Costs associated with each element type specified in result file.\", required=False) arg_parser.add_argument(\"-m\",", "then the BibTeX for the measures used will be saved to the filename", "file exists if cost_file: costs = read_in_cost_file(cost_file) cwl_ruler = CWLRuler(metrics_file) curr_topic_id = None", "curr_topic_id = None ranking_maker = None if colnames: print(\"Topic\\tMetric\\tEU/I\\tEU\\tEC/I\\tEC\\tI\") with open(results_file,\"r\") as rf:", "help=\"If specified, then the BibTeX for the measures used will be saved to", "None if args.cost_file: cost_file = args.cost_file metrics_file = None if args.metrics_file: metrics_file =", "default metrics will be reported. Tab/space sep file with fields: metric_name params\", required=False)", "help=\"Includes headings in the output\", required=False, action=\"store_true\") args = arg_parser.parse_args() gain_file = args.gain_file", "of default metrics will be reported. Tab/space sep file with fields: metric_name params\",", "reported. Tab/space sep file with fields: metric_name params\", required=False) arg_parser.add_argument(\"-b\", \"--bib_file\", help=\"If specified,", "gains. Gain values should be between zero and one. Four column tab/space sep", "with open(results_file,\"r\") as rf: while rf: line = rf.readline() if not line: break", "required=False, action=\"store_true\") args = arg_parser.parse_args() gain_file = args.gain_file result_file = args.result_file cost_file =", "with each element type specified in result file.\", required=False) arg_parser.add_argument(\"-m\", \"--metrics_file\", help=\"The list", "topic_id unused doc_id gain\") arg_parser.add_argument(\"result_file\", help=\"TREC formatted results file. Six column tab/space sep", "from ruler.cwl_ruler import RankingMaker, Ranking, CWLRuler def read_in_cost_file(cost_file): costs = dict() with open(cost_file,", "bib_file=None, colnames = False): qrh = TrecQrelHandler(qrel_file) costs = None # read in", "ruler.cwl_ruler import RankingMaker, Ranking, CWLRuler def read_in_cost_file(cost_file): costs = dict() with open(cost_file, \"r\")", "dict() with open(cost_file, \"r\") as cf: while cf: line = cf.readline() if not", "new topic curr_topic_id = topic_id # reset seen list ranking_maker = RankingMaker(curr_topic_id, qrh,", "if args.colnames: colnames = True check_file_exists(result_file) check_file_exists(gain_file) check_file_exists(cost_file) check_file_exists(metrics_file) main(result_file, gain_file, cost_file, metrics_file,", "not None: #Perform the Measurements #ranking.report() cwl_ruler.measure(ranking_maker.get_ranking()) cwl_ruler.report() # new topic curr_topic_id =", "measures used will be saved to the filename given.\", required=False) arg_parser.add_argument(\"-n\", \"--colnames\", help=\"Includes", "action=\"store_true\") args = arg_parser.parse_args() gain_file = args.gain_file result_file = args.result_file cost_file = None", "element_type doc_id rank score run_id\") arg_parser.add_argument(\"-c\", \"--cost_file\", help=\"Costs associated with each element type", "specified, then the BibTeX for the measures used will be saved to the", "= args.metrics_file bib_file = None if args.bib_file: bib_file = args.bib_file colnames = False", "CWLRuler(metrics_file) curr_topic_id = None ranking_maker = None if colnames: print(\"Topic\\tMetric\\tEU/I\\tEU\\tEC/I\\tEC\\tI\") with open(results_file,\"r\") as", "cf.readline() if not line: break (element_type, cost) = line.split() element_type = element_type.strip() costs[element_type]", "should be between zero and one. Four column tab/space sep file with fields:", "cf: line = cf.readline() if not line: break (element_type, cost) = line.split() element_type", "doc_id.strip() if (topic_id == curr_topic_id): # build vectors ranking_maker.add(doc_id, element_type) else: if curr_topic_id", "= None if args.cost_file: cost_file = args.cost_file metrics_file = None if args.metrics_file: metrics_file", "args.cost_file metrics_file = None if args.metrics_file: metrics_file = args.metrics_file bib_file = None if", "os.path.exists(filename): print(\"{0} Not Found\".format(filename)) quit(1) def main(results_file, qrel_file, cost_file=None, metrics_file=None, bib_file=None, colnames =", "import TrecQrelHandler from ruler.cwl_ruler import RankingMaker, Ranking, CWLRuler def read_in_cost_file(cost_file): costs = dict()", "= cf.readline() if not line: break (element_type, cost) = line.split() element_type = element_type.strip()", "argparse from seeker.trec_qrel_handler import TrecQrelHandler from ruler.cwl_ruler import RankingMaker, Ranking, CWLRuler def read_in_cost_file(cost_file):", "= args.cost_file metrics_file = None if args.metrics_file: metrics_file = args.metrics_file bib_file = None", "curr_topic_id = topic_id # reset seen list ranking_maker = RankingMaker(curr_topic_id, qrh, costs) ranking_maker.add(doc_id,", "unused doc_id gain\") arg_parser.add_argument(\"result_file\", help=\"TREC formatted results file. Six column tab/space sep file", "None: #Perform the Measurements #ranking.report() cwl_ruler.measure(ranking_maker.get_ranking()) cwl_ruler.report() # new topic curr_topic_id = topic_id", "= read_in_cost_file(cost_file) cwl_ruler = CWLRuler(metrics_file) curr_topic_id = None ranking_maker = None if colnames:", "result_file = args.result_file cost_file = None if args.cost_file: cost_file = args.cost_file metrics_file =", "tab/space sep file with fields: topic_id element_type doc_id rank score run_id\") arg_parser.add_argument(\"-c\", \"--cost_file\",", "each element type specified in result file.\", required=False) arg_parser.add_argument(\"-m\", \"--metrics_file\", help=\"The list of", "import argparse from seeker.trec_qrel_handler import TrecQrelHandler from ruler.cwl_ruler import RankingMaker, Ranking, CWLRuler def", "a set of default metrics will be reported. Tab/space sep file with fields:", "filename and not os.path.exists(filename): print(\"{0} Not Found\".format(filename)) quit(1) def main(results_file, qrel_file, cost_file=None, metrics_file=None,", "#Perform aggregration over all topics #Compute residuals? if bib_file: cwl_ruler.save_bibtex(bib_file) if __name__ ==", "if filename and not os.path.exists(filename): print(\"{0} Not Found\".format(filename)) quit(1) def main(results_file, qrel_file, cost_file=None,", "metrics_file = args.metrics_file bib_file = None if args.bib_file: bib_file = args.bib_file colnames =", "= dict() with open(cost_file, \"r\") as cf: while cf: line = cf.readline() if", "def check_file_exists(filename): if filename and not os.path.exists(filename): print(\"{0} Not Found\".format(filename)) quit(1) def main(results_file,", "arg_parser.parse_args() gain_file = args.gain_file result_file = args.result_file cost_file = None if args.cost_file: cost_file", "headings in the output\", required=False, action=\"store_true\") args = arg_parser.parse_args() gain_file = args.gain_file result_file", "line.split() doc_id = doc_id.strip() if (topic_id == curr_topic_id): # build vectors ranking_maker.add(doc_id, element_type)", "cost_file = args.cost_file metrics_file = None if args.metrics_file: metrics_file = args.metrics_file bib_file =", "Four column tab/space sep file with fields: topic_id unused doc_id gain\") arg_parser.add_argument(\"result_file\", help=\"TREC", "line: break (element_type, cost) = line.split() element_type = element_type.strip() costs[element_type] = float(cost) return", "= CWLRuler(metrics_file) curr_topic_id = None ranking_maker = None if colnames: print(\"Topic\\tMetric\\tEU/I\\tEU\\tEC/I\\tEC\\tI\") with open(results_file,\"r\")", "__name__ == \"__main__\": arg_parser = argparse.ArgumentParser(description=\"CWL Evaluation Metrics\") arg_parser.add_argument(\"gain_file\", help=\"A TREC Formatted Qrel", "ranking_maker.add(doc_id, element_type) else: if curr_topic_id is not None: #Perform the Measurements #ranking.report() cwl_ruler.measure(ranking_maker.get_ranking())", "topic #ranking_maker.report() cwl_ruler.measure(ranking_maker.get_ranking()) cwl_ruler.report() #Perform aggregration over all topics #Compute residuals? if bib_file:", "with fields: topic_id unused doc_id gain\") arg_parser.add_argument(\"result_file\", help=\"TREC formatted results file. Six column", "RankingMaker, Ranking, CWLRuler def read_in_cost_file(cost_file): costs = dict() with open(cost_file, \"r\") as cf:", "Tab/space sep file with fields: metric_name params\", required=False) arg_parser.add_argument(\"-b\", \"--bib_file\", help=\"If specified, then", "ranking_maker = RankingMaker(curr_topic_id, qrh, costs) ranking_maker.add(doc_id, element_type) #Perform the Measurements on the last", "list ranking_maker = RankingMaker(curr_topic_id, qrh, costs) ranking_maker.add(doc_id, element_type) #Perform the Measurements on the", "element_type, doc_id, rank, score, run_id) = line.split() doc_id = doc_id.strip() if (topic_id ==", "colnames = False): qrh = TrecQrelHandler(qrel_file) costs = None # read in cost", "bib_file = args.bib_file colnames = False if args.colnames: colnames = True check_file_exists(result_file) check_file_exists(gain_file)", "arg_parser.add_argument(\"-b\", \"--bib_file\", help=\"If specified, then the BibTeX for the measures used will be", "fields: topic_id unused doc_id gain\") arg_parser.add_argument(\"result_file\", help=\"TREC formatted results file. Six column tab/space", "metrics will be reported. Tab/space sep file with fields: metric_name params\", required=False) arg_parser.add_argument(\"-b\",", "output\", required=False, action=\"store_true\") args = arg_parser.parse_args() gain_file = args.gain_file result_file = args.result_file cost_file", "help=\"TREC formatted results file. Six column tab/space sep file with fields: topic_id element_type", "not os.path.exists(filename): print(\"{0} Not Found\".format(filename)) quit(1) def main(results_file, qrel_file, cost_file=None, metrics_file=None, bib_file=None, colnames", "TrecQrelHandler(qrel_file) costs = None # read in cost file - if cost file", "qrh, costs) ranking_maker.add(doc_id, element_type) #Perform the Measurements on the last topic #ranking_maker.report() cwl_ruler.measure(ranking_maker.get_ranking())", "= RankingMaker(curr_topic_id, qrh, costs) ranking_maker.add(doc_id, element_type) #Perform the Measurements on the last topic", "all topics #Compute residuals? if bib_file: cwl_ruler.save_bibtex(bib_file) if __name__ == \"__main__\": arg_parser =", "from seeker.trec_qrel_handler import TrecQrelHandler from ruler.cwl_ruler import RankingMaker, Ranking, CWLRuler def read_in_cost_file(cost_file): costs", "(topic_id, element_type, doc_id, rank, score, run_id) = line.split() doc_id = doc_id.strip() if (topic_id", "print(\"Topic\\tMetric\\tEU/I\\tEU\\tEC/I\\tEC\\tI\") with open(results_file,\"r\") as rf: while rf: line = rf.readline() if not line:", "metric_name params\", required=False) arg_parser.add_argument(\"-b\", \"--bib_file\", help=\"If specified, then the BibTeX for the measures", "open(results_file,\"r\") as rf: while rf: line = rf.readline() if not line: break (topic_id,", "# read in cost file - if cost file exists if cost_file: costs", "if not line: break (element_type, cost) = line.split() element_type = element_type.strip() costs[element_type] =", "arg_parser = argparse.ArgumentParser(description=\"CWL Evaluation Metrics\") arg_parser.add_argument(\"gain_file\", help=\"A TREC Formatted Qrel File with relevance", "of metrics that are to be reported. If not specified, a set of", "as gains. Gain values should be between zero and one. Four column tab/space", "topic_id # reset seen list ranking_maker = RankingMaker(curr_topic_id, qrh, costs) ranking_maker.add(doc_id, element_type) #Perform", "filename given.\", required=False) arg_parser.add_argument(\"-n\", \"--colnames\", help=\"Includes headings in the output\", required=False, action=\"store_true\") args", "file.\", required=False) arg_parser.add_argument(\"-m\", \"--metrics_file\", help=\"The list of metrics that are to be reported.", "costs = read_in_cost_file(cost_file) cwl_ruler = CWLRuler(metrics_file) curr_topic_id = None ranking_maker = None if", "= args.bib_file colnames = False if args.colnames: colnames = True check_file_exists(result_file) check_file_exists(gain_file) check_file_exists(cost_file)", "arg_parser.add_argument(\"gain_file\", help=\"A TREC Formatted Qrel File with relevance scores used as gains. Gain", "if not line: break (topic_id, element_type, doc_id, rank, score, run_id) = line.split() doc_id", "print(\"{0} Not Found\".format(filename)) quit(1) def main(results_file, qrel_file, cost_file=None, metrics_file=None, bib_file=None, colnames = False):", "values should be between zero and one. Four column tab/space sep file with", "qrel_file, cost_file=None, metrics_file=None, bib_file=None, colnames = False): qrh = TrecQrelHandler(qrel_file) costs = None", "= \"<NAME>\" import os import argparse from seeker.trec_qrel_handler import TrecQrelHandler from ruler.cwl_ruler import", "one. Four column tab/space sep file with fields: topic_id unused doc_id gain\") arg_parser.add_argument(\"result_file\",", "are to be reported. If not specified, a set of default metrics will", "help=\"Costs associated with each element type specified in result file.\", required=False) arg_parser.add_argument(\"-m\", \"--metrics_file\",", "= doc_id.strip() if (topic_id == curr_topic_id): # build vectors ranking_maker.add(doc_id, element_type) else: if", "read_in_cost_file(cost_file) cwl_ruler = CWLRuler(metrics_file) curr_topic_id = None ranking_maker = None if colnames: print(\"Topic\\tMetric\\tEU/I\\tEU\\tEC/I\\tEC\\tI\")", "seeker.trec_qrel_handler import TrecQrelHandler from ruler.cwl_ruler import RankingMaker, Ranking, CWLRuler def read_in_cost_file(cost_file): costs =", "Qrel File with relevance scores used as gains. Gain values should be between", "sep file with fields: topic_id element_type doc_id rank score run_id\") arg_parser.add_argument(\"-c\", \"--cost_file\", help=\"Costs", "\"--bib_file\", help=\"If specified, then the BibTeX for the measures used will be saved", "(element_type, cost) = line.split() element_type = element_type.strip() costs[element_type] = float(cost) return costs def", "in the output\", required=False, action=\"store_true\") args = arg_parser.parse_args() gain_file = args.gain_file result_file =", "element_type) else: if curr_topic_id is not None: #Perform the Measurements #ranking.report() cwl_ruler.measure(ranking_maker.get_ranking()) cwl_ruler.report()", "cwl_ruler.measure(ranking_maker.get_ranking()) cwl_ruler.report() # new topic curr_topic_id = topic_id # reset seen list ranking_maker", "os import argparse from seeker.trec_qrel_handler import TrecQrelHandler from ruler.cwl_ruler import RankingMaker, Ranking, CWLRuler", "cwl_ruler.measure(ranking_maker.get_ranking()) cwl_ruler.report() #Perform aggregration over all topics #Compute residuals? if bib_file: cwl_ruler.save_bibtex(bib_file) if", "quit(1) def main(results_file, qrel_file, cost_file=None, metrics_file=None, bib_file=None, colnames = False): qrh = TrecQrelHandler(qrel_file)", "args.cost_file: cost_file = args.cost_file metrics_file = None if args.metrics_file: metrics_file = args.metrics_file bib_file", "doc_id, rank, score, run_id) = line.split() doc_id = doc_id.strip() if (topic_id == curr_topic_id):", "#Perform the Measurements on the last topic #ranking_maker.report() cwl_ruler.measure(ranking_maker.get_ranking()) cwl_ruler.report() #Perform aggregration over", "check_file_exists(filename): if filename and not os.path.exists(filename): print(\"{0} Not Found\".format(filename)) quit(1) def main(results_file, qrel_file,", "= element_type.strip() costs[element_type] = float(cost) return costs def check_file_exists(filename): if filename and not", "metrics_file=None, bib_file=None, colnames = False): qrh = TrecQrelHandler(qrel_file) costs = None # read", "will be reported. Tab/space sep file with fields: metric_name params\", required=False) arg_parser.add_argument(\"-b\", \"--bib_file\",", "required=False) arg_parser.add_argument(\"-b\", \"--bib_file\", help=\"If specified, then the BibTeX for the measures used will", "help=\"The list of metrics that are to be reported. If not specified, a", "the output\", required=False, action=\"store_true\") args = arg_parser.parse_args() gain_file = args.gain_file result_file = args.result_file", "seen list ranking_maker = RankingMaker(curr_topic_id, qrh, costs) ranking_maker.add(doc_id, element_type) #Perform the Measurements on", "be reported. If not specified, a set of default metrics will be reported.", "= topic_id # reset seen list ranking_maker = RankingMaker(curr_topic_id, qrh, costs) ranking_maker.add(doc_id, element_type)", "cwl_ruler.report() # new topic curr_topic_id = topic_id # reset seen list ranking_maker =", "costs[element_type] = float(cost) return costs def check_file_exists(filename): if filename and not os.path.exists(filename): print(\"{0}", "set of default metrics will be reported. Tab/space sep file with fields: metric_name", "for the measures used will be saved to the filename given.\", required=False) arg_parser.add_argument(\"-n\",", "metrics that are to be reported. If not specified, a set of default", "result file.\", required=False) arg_parser.add_argument(\"-m\", \"--metrics_file\", help=\"The list of metrics that are to be", "required=False) arg_parser.add_argument(\"-m\", \"--metrics_file\", help=\"The list of metrics that are to be reported. If", "args = arg_parser.parse_args() gain_file = args.gain_file result_file = args.result_file cost_file = None if", "exists if cost_file: costs = read_in_cost_file(cost_file) cwl_ruler = CWLRuler(metrics_file) curr_topic_id = None ranking_maker", "fields: metric_name params\", required=False) arg_parser.add_argument(\"-b\", \"--bib_file\", help=\"If specified, then the BibTeX for the", "doc_id rank score run_id\") arg_parser.add_argument(\"-c\", \"--cost_file\", help=\"Costs associated with each element type specified", "= rf.readline() if not line: break (topic_id, element_type, doc_id, rank, score, run_id) =", "main(results_file, qrel_file, cost_file=None, metrics_file=None, bib_file=None, colnames = False): qrh = TrecQrelHandler(qrel_file) costs =", "gain_file = args.gain_file result_file = args.result_file cost_file = None if args.cost_file: cost_file =", "if colnames: print(\"Topic\\tMetric\\tEU/I\\tEU\\tEC/I\\tEC\\tI\") with open(results_file,\"r\") as rf: while rf: line = rf.readline() if", "last topic #ranking_maker.report() cwl_ruler.measure(ranking_maker.get_ranking()) cwl_ruler.report() #Perform aggregration over all topics #Compute residuals? if", "the measures used will be saved to the filename given.\", required=False) arg_parser.add_argument(\"-n\", \"--colnames\",", "else: if curr_topic_id is not None: #Perform the Measurements #ranking.report() cwl_ruler.measure(ranking_maker.get_ranking()) cwl_ruler.report() #", "if cost_file: costs = read_in_cost_file(cost_file) cwl_ruler = CWLRuler(metrics_file) curr_topic_id = None ranking_maker =", "= None if colnames: print(\"Topic\\tMetric\\tEU/I\\tEU\\tEC/I\\tEC\\tI\") with open(results_file,\"r\") as rf: while rf: line =", "list of metrics that are to be reported. If not specified, a set", "scores used as gains. Gain values should be between zero and one. Four", "args.gain_file result_file = args.result_file cost_file = None if args.cost_file: cost_file = args.cost_file metrics_file", "cost_file = None if args.cost_file: cost_file = args.cost_file metrics_file = None if args.metrics_file:", "line = cf.readline() if not line: break (element_type, cost) = line.split() element_type =", "specified, a set of default metrics will be reported. Tab/space sep file with", "open(cost_file, \"r\") as cf: while cf: line = cf.readline() if not line: break", "used will be saved to the filename given.\", required=False) arg_parser.add_argument(\"-n\", \"--colnames\", help=\"Includes headings", "bib_file = None if args.bib_file: bib_file = args.bib_file colnames = False if args.colnames:", "the Measurements #ranking.report() cwl_ruler.measure(ranking_maker.get_ranking()) cwl_ruler.report() # new topic curr_topic_id = topic_id # reset", "args.bib_file colnames = False if args.colnames: colnames = True check_file_exists(result_file) check_file_exists(gain_file) check_file_exists(cost_file) check_file_exists(metrics_file)", "def main(results_file, qrel_file, cost_file=None, metrics_file=None, bib_file=None, colnames = False): qrh = TrecQrelHandler(qrel_file) costs", "the BibTeX for the measures used will be saved to the filename given.\",", "cf: while cf: line = cf.readline() if not line: break (element_type, cost) =", "args.colnames: colnames = True check_file_exists(result_file) check_file_exists(gain_file) check_file_exists(cost_file) check_file_exists(metrics_file) main(result_file, gain_file, cost_file, metrics_file, bib_file,", "column tab/space sep file with fields: topic_id unused doc_id gain\") arg_parser.add_argument(\"result_file\", help=\"TREC formatted", "curr_topic_id is not None: #Perform the Measurements #ranking.report() cwl_ruler.measure(ranking_maker.get_ranking()) cwl_ruler.report() # new topic", "rank, score, run_id) = line.split() doc_id = doc_id.strip() if (topic_id == curr_topic_id): #", "not specified, a set of default metrics will be reported. Tab/space sep file", "ranking_maker.add(doc_id, element_type) #Perform the Measurements on the last topic #ranking_maker.report() cwl_ruler.measure(ranking_maker.get_ranking()) cwl_ruler.report() #Perform", "cwl_ruler.report() #Perform aggregration over all topics #Compute residuals? if bib_file: cwl_ruler.save_bibtex(bib_file) if __name__", "qrh = TrecQrelHandler(qrel_file) costs = None # read in cost file - if", "type specified in result file.\", required=False) arg_parser.add_argument(\"-m\", \"--metrics_file\", help=\"The list of metrics that", "# reset seen list ranking_maker = RankingMaker(curr_topic_id, qrh, costs) ranking_maker.add(doc_id, element_type) #Perform the", "while cf: line = cf.readline() if not line: break (element_type, cost) = line.split()", "not line: break (topic_id, element_type, doc_id, rank, score, run_id) = line.split() doc_id =", "= line.split() element_type = element_type.strip() costs[element_type] = float(cost) return costs def check_file_exists(filename): if", "= None if args.metrics_file: metrics_file = args.metrics_file bib_file = None if args.bib_file: bib_file", "aggregration over all topics #Compute residuals? if bib_file: cwl_ruler.save_bibtex(bib_file) if __name__ == \"__main__\":", "that are to be reported. If not specified, a set of default metrics", "residuals? if bib_file: cwl_ruler.save_bibtex(bib_file) if __name__ == \"__main__\": arg_parser = argparse.ArgumentParser(description=\"CWL Evaluation Metrics\")", "the last topic #ranking_maker.report() cwl_ruler.measure(ranking_maker.get_ranking()) cwl_ruler.report() #Perform aggregration over all topics #Compute residuals?", "element_type) #Perform the Measurements on the last topic #ranking_maker.report() cwl_ruler.measure(ranking_maker.get_ranking()) cwl_ruler.report() #Perform aggregration", "read in cost file - if cost file exists if cost_file: costs =", "(topic_id == curr_topic_id): # build vectors ranking_maker.add(doc_id, element_type) else: if curr_topic_id is not", "be saved to the filename given.\", required=False) arg_parser.add_argument(\"-n\", \"--colnames\", help=\"Includes headings in the", "cost file - if cost file exists if cost_file: costs = read_in_cost_file(cost_file) cwl_ruler", "Formatted Qrel File with relevance scores used as gains. Gain values should be", "costs = dict() with open(cost_file, \"r\") as cf: while cf: line = cf.readline()", "False): qrh = TrecQrelHandler(qrel_file) costs = None # read in cost file -", "ranking_maker = None if colnames: print(\"Topic\\tMetric\\tEU/I\\tEU\\tEC/I\\tEC\\tI\") with open(results_file,\"r\") as rf: while rf: line", "break (element_type, cost) = line.split() element_type = element_type.strip() costs[element_type] = float(cost) return costs", "results file. Six column tab/space sep file with fields: topic_id element_type doc_id rank", "colnames = True check_file_exists(result_file) check_file_exists(gain_file) check_file_exists(cost_file) check_file_exists(metrics_file) main(result_file, gain_file, cost_file, metrics_file, bib_file, colnames)", "= None if args.bib_file: bib_file = args.bib_file colnames = False if args.colnames: colnames", "gain\") arg_parser.add_argument(\"result_file\", help=\"TREC formatted results file. Six column tab/space sep file with fields:", "cwl_ruler = CWLRuler(metrics_file) curr_topic_id = None ranking_maker = None if colnames: print(\"Topic\\tMetric\\tEU/I\\tEU\\tEC/I\\tEC\\tI\") with", "rank score run_id\") arg_parser.add_argument(\"-c\", \"--cost_file\", help=\"Costs associated with each element type specified in", "zero and one. Four column tab/space sep file with fields: topic_id unused doc_id", "== \"__main__\": arg_parser = argparse.ArgumentParser(description=\"CWL Evaluation Metrics\") arg_parser.add_argument(\"gain_file\", help=\"A TREC Formatted Qrel File", "colnames: print(\"Topic\\tMetric\\tEU/I\\tEU\\tEC/I\\tEC\\tI\") with open(results_file,\"r\") as rf: while rf: line = rf.readline() if not", "argparse.ArgumentParser(description=\"CWL Evaluation Metrics\") arg_parser.add_argument(\"gain_file\", help=\"A TREC Formatted Qrel File with relevance scores used", "= args.gain_file result_file = args.result_file cost_file = None if args.cost_file: cost_file = args.cost_file", "element_type.strip() costs[element_type] = float(cost) return costs def check_file_exists(filename): if filename and not os.path.exists(filename):", "# new topic curr_topic_id = topic_id # reset seen list ranking_maker = RankingMaker(curr_topic_id,", "line = rf.readline() if not line: break (topic_id, element_type, doc_id, rank, score, run_id)", "if cost file exists if cost_file: costs = read_in_cost_file(cost_file) cwl_ruler = CWLRuler(metrics_file) curr_topic_id", "\"__main__\": arg_parser = argparse.ArgumentParser(description=\"CWL Evaluation Metrics\") arg_parser.add_argument(\"gain_file\", help=\"A TREC Formatted Qrel File with", "args.metrics_file bib_file = None if args.bib_file: bib_file = args.bib_file colnames = False if", "column tab/space sep file with fields: topic_id element_type doc_id rank score run_id\") arg_parser.add_argument(\"-c\",", "in cost file - if cost file exists if cost_file: costs = read_in_cost_file(cost_file)", "file. Six column tab/space sep file with fields: topic_id element_type doc_id rank score", "sep file with fields: topic_id unused doc_id gain\") arg_parser.add_argument(\"result_file\", help=\"TREC formatted results file.", "Not Found\".format(filename)) quit(1) def main(results_file, qrel_file, cost_file=None, metrics_file=None, bib_file=None, colnames = False): qrh", "vectors ranking_maker.add(doc_id, element_type) else: if curr_topic_id is not None: #Perform the Measurements #ranking.report()", "Found\".format(filename)) quit(1) def main(results_file, qrel_file, cost_file=None, metrics_file=None, bib_file=None, colnames = False): qrh =", "Metrics\") arg_parser.add_argument(\"gain_file\", help=\"A TREC Formatted Qrel File with relevance scores used as gains.", "RankingMaker(curr_topic_id, qrh, costs) ranking_maker.add(doc_id, element_type) #Perform the Measurements on the last topic #ranking_maker.report()", "cost) = line.split() element_type = element_type.strip() costs[element_type] = float(cost) return costs def check_file_exists(filename):", "args.bib_file: bib_file = args.bib_file colnames = False if args.colnames: colnames = True check_file_exists(result_file)", "topics #Compute residuals? if bib_file: cwl_ruler.save_bibtex(bib_file) if __name__ == \"__main__\": arg_parser = argparse.ArgumentParser(description=\"CWL", "the Measurements on the last topic #ranking_maker.report() cwl_ruler.measure(ranking_maker.get_ranking()) cwl_ruler.report() #Perform aggregration over all", "sep file with fields: metric_name params\", required=False) arg_parser.add_argument(\"-b\", \"--bib_file\", help=\"If specified, then the", "costs) ranking_maker.add(doc_id, element_type) #Perform the Measurements on the last topic #ranking_maker.report() cwl_ruler.measure(ranking_maker.get_ranking()) cwl_ruler.report()", "CWLRuler def read_in_cost_file(cost_file): costs = dict() with open(cost_file, \"r\") as cf: while cf:", "break (topic_id, element_type, doc_id, rank, score, run_id) = line.split() doc_id = doc_id.strip() if", "rf: while rf: line = rf.readline() if not line: break (topic_id, element_type, doc_id,", "element type specified in result file.\", required=False) arg_parser.add_argument(\"-m\", \"--metrics_file\", help=\"The list of metrics", "line.split() element_type = element_type.strip() costs[element_type] = float(cost) return costs def check_file_exists(filename): if filename", "is not None: #Perform the Measurements #ranking.report() cwl_ruler.measure(ranking_maker.get_ranking()) cwl_ruler.report() # new topic curr_topic_id", "Ranking, CWLRuler def read_in_cost_file(cost_file): costs = dict() with open(cost_file, \"r\") as cf: while", "\"<NAME>\" import os import argparse from seeker.trec_qrel_handler import TrecQrelHandler from ruler.cwl_ruler import RankingMaker,", "as rf: while rf: line = rf.readline() if not line: break (topic_id, element_type,", "run_id\") arg_parser.add_argument(\"-c\", \"--cost_file\", help=\"Costs associated with each element type specified in result file.\",", "run_id) = line.split() doc_id = doc_id.strip() if (topic_id == curr_topic_id): # build vectors", "rf.readline() if not line: break (topic_id, element_type, doc_id, rank, score, run_id) = line.split()", "= args.result_file cost_file = None if args.cost_file: cost_file = args.cost_file metrics_file = None", "if (topic_id == curr_topic_id): # build vectors ranking_maker.add(doc_id, element_type) else: if curr_topic_id is", "#Compute residuals? if bib_file: cwl_ruler.save_bibtex(bib_file) if __name__ == \"__main__\": arg_parser = argparse.ArgumentParser(description=\"CWL Evaluation", "with relevance scores used as gains. Gain values should be between zero and", "= False if args.colnames: colnames = True check_file_exists(result_file) check_file_exists(gain_file) check_file_exists(cost_file) check_file_exists(metrics_file) main(result_file, gain_file,", "reset seen list ranking_maker = RankingMaker(curr_topic_id, qrh, costs) ranking_maker.add(doc_id, element_type) #Perform the Measurements", "arg_parser.add_argument(\"-m\", \"--metrics_file\", help=\"The list of metrics that are to be reported. If not", "arg_parser.add_argument(\"-n\", \"--colnames\", help=\"Includes headings in the output\", required=False, action=\"store_true\") args = arg_parser.parse_args() gain_file", "def read_in_cost_file(cost_file): costs = dict() with open(cost_file, \"r\") as cf: while cf: line", "between zero and one. Four column tab/space sep file with fields: topic_id unused", "to the filename given.\", required=False) arg_parser.add_argument(\"-n\", \"--colnames\", help=\"Includes headings in the output\", required=False,", "if __name__ == \"__main__\": arg_parser = argparse.ArgumentParser(description=\"CWL Evaluation Metrics\") arg_parser.add_argument(\"gain_file\", help=\"A TREC Formatted", "TREC Formatted Qrel File with relevance scores used as gains. Gain values should", "None if args.metrics_file: metrics_file = args.metrics_file bib_file = None if args.bib_file: bib_file =", "BibTeX for the measures used will be saved to the filename given.\", required=False)", "costs = None # read in cost file - if cost file exists", "over all topics #Compute residuals? if bib_file: cwl_ruler.save_bibtex(bib_file) if __name__ == \"__main__\": arg_parser", "params\", required=False) arg_parser.add_argument(\"-b\", \"--bib_file\", help=\"If specified, then the BibTeX for the measures used", "= line.split() doc_id = doc_id.strip() if (topic_id == curr_topic_id): # build vectors ranking_maker.add(doc_id,", "with open(cost_file, \"r\") as cf: while cf: line = cf.readline() if not line:", "None # read in cost file - if cost file exists if cost_file:", "args.metrics_file: metrics_file = args.metrics_file bib_file = None if args.bib_file: bib_file = args.bib_file colnames", "= argparse.ArgumentParser(description=\"CWL Evaluation Metrics\") arg_parser.add_argument(\"gain_file\", help=\"A TREC Formatted Qrel File with relevance scores", "TrecQrelHandler from ruler.cwl_ruler import RankingMaker, Ranking, CWLRuler def read_in_cost_file(cost_file): costs = dict() with", "doc_id gain\") arg_parser.add_argument(\"result_file\", help=\"TREC formatted results file. Six column tab/space sep file with", "the filename given.\", required=False) arg_parser.add_argument(\"-n\", \"--colnames\", help=\"Includes headings in the output\", required=False, action=\"store_true\")", "cwl_ruler.save_bibtex(bib_file) if __name__ == \"__main__\": arg_parser = argparse.ArgumentParser(description=\"CWL Evaluation Metrics\") arg_parser.add_argument(\"gain_file\", help=\"A TREC", "cost_file: costs = read_in_cost_file(cost_file) cwl_ruler = CWLRuler(metrics_file) curr_topic_id = None ranking_maker = None", "metrics_file = None if args.metrics_file: metrics_file = args.metrics_file bib_file = None if args.bib_file:", "None if colnames: print(\"Topic\\tMetric\\tEU/I\\tEU\\tEC/I\\tEC\\tI\") with open(results_file,\"r\") as rf: while rf: line = rf.readline()", "<reponame>leifos/cwl __author__ = \"<NAME>\" import os import argparse from seeker.trec_qrel_handler import TrecQrelHandler from", "Gain values should be between zero and one. Four column tab/space sep file", "Six column tab/space sep file with fields: topic_id element_type doc_id rank score run_id\")", "__author__ = \"<NAME>\" import os import argparse from seeker.trec_qrel_handler import TrecQrelHandler from ruler.cwl_ruler", "relevance scores used as gains. Gain values should be between zero and one.", "line: break (topic_id, element_type, doc_id, rank, score, run_id) = line.split() doc_id = doc_id.strip()", "rf: line = rf.readline() if not line: break (topic_id, element_type, doc_id, rank, score,", "args.result_file cost_file = None if args.cost_file: cost_file = args.cost_file metrics_file = None if", "be between zero and one. Four column tab/space sep file with fields: topic_id", "return costs def check_file_exists(filename): if filename and not os.path.exists(filename): print(\"{0} Not Found\".format(filename)) quit(1)", "Measurements on the last topic #ranking_maker.report() cwl_ruler.measure(ranking_maker.get_ranking()) cwl_ruler.report() #Perform aggregration over all topics", "doc_id = doc_id.strip() if (topic_id == curr_topic_id): # build vectors ranking_maker.add(doc_id, element_type) else:" ]
[ "\"baz\": \"dummy\"}) assert LoggingContextFilter().filter(record) == 1 assert record.foo == 1 assert record.bar ==", "record.baz == \"dummy\" @mark.parametrize(\"field\", IGNORED_VARIABLE_NAMES) def test_ignores_variables_that_would_overwrite_record_fields( self, store, record, field ): store.replace({field:", "@mark.parametrize(\"field\", IGNORED_VARIABLE_NAMES) def test_ignores_variables_that_would_overwrite_record_fields( self, store, record, field ): store.replace({field: \"overwrite\", \"foo\": 1})", "== 2.3 assert record.baz == \"dummy\" @mark.parametrize(\"field\", IGNORED_VARIABLE_NAMES) def test_ignores_variables_that_would_overwrite_record_fields( self, store, record,", "import LoggingContextFilter from loggingex.context.filter import IGNORED_VARIABLE_NAMES from .helpers import InitializedContextBase class FilterTests(InitializedContextBase): @fixture()", "%d\", (1,), None ) def test_log_record_is_injected_with_context_variables(self, store, record): store.replace({\"foo\": 1, \"bar\": 2.3, \"baz\":", "def test_ignores_variables_that_would_overwrite_record_fields( self, store, record, field ): store.replace({field: \"overwrite\", \"foo\": 1}) LoggingContextFilter().filter(record) assert", "assert record.bar == 2.3 assert record.baz == \"dummy\" @mark.parametrize(\"field\", IGNORED_VARIABLE_NAMES) def test_ignores_variables_that_would_overwrite_record_fields( self,", "== 1 assert record.foo == 1 assert record.bar == 2.3 assert record.baz ==", ") def test_log_record_is_injected_with_context_variables(self, store, record): store.replace({\"foo\": 1, \"bar\": 2.3, \"baz\": \"dummy\"}) assert LoggingContextFilter().filter(record)", "logging import DEBUG, LogRecord from pytest import fixture, mark from loggingex.context import LoggingContextFilter", "store, record, field ): store.replace({field: \"overwrite\", \"foo\": 1}) LoggingContextFilter().filter(record) assert record.foo == 1", "from loggingex.context import LoggingContextFilter from loggingex.context.filter import IGNORED_VARIABLE_NAMES from .helpers import InitializedContextBase class", "record.bar == 2.3 assert record.baz == \"dummy\" @mark.parametrize(\"field\", IGNORED_VARIABLE_NAMES) def test_ignores_variables_that_would_overwrite_record_fields( self, store,", "\"dummy\" @mark.parametrize(\"field\", IGNORED_VARIABLE_NAMES) def test_ignores_variables_that_would_overwrite_record_fields( self, store, record, field ): store.replace({field: \"overwrite\", \"foo\":", "None ) def test_log_record_is_injected_with_context_variables(self, store, record): store.replace({\"foo\": 1, \"bar\": 2.3, \"baz\": \"dummy\"}) assert", "): store.replace({field: \"overwrite\", \"foo\": 1}) LoggingContextFilter().filter(record) assert record.foo == 1 assert getattr(record, field,", "loggingex.context.filter import IGNORED_VARIABLE_NAMES from .helpers import InitializedContextBase class FilterTests(InitializedContextBase): @fixture() def record(self): return", "return LogRecord( \"test\", DEBUG, \"test.py\", 1337, \"message %d\", (1,), None ) def test_log_record_is_injected_with_context_variables(self,", "from loggingex.context.filter import IGNORED_VARIABLE_NAMES from .helpers import InitializedContextBase class FilterTests(InitializedContextBase): @fixture() def record(self):", "<filename>tests/loggingex/context/test_logging_context_filter.py from logging import DEBUG, LogRecord from pytest import fixture, mark from loggingex.context", ".helpers import InitializedContextBase class FilterTests(InitializedContextBase): @fixture() def record(self): return LogRecord( \"test\", DEBUG, \"test.py\",", "store.replace({\"foo\": 1, \"bar\": 2.3, \"baz\": \"dummy\"}) assert LoggingContextFilter().filter(record) == 1 assert record.foo ==", "import fixture, mark from loggingex.context import LoggingContextFilter from loggingex.context.filter import IGNORED_VARIABLE_NAMES from .helpers", "fixture, mark from loggingex.context import LoggingContextFilter from loggingex.context.filter import IGNORED_VARIABLE_NAMES from .helpers import", "import InitializedContextBase class FilterTests(InitializedContextBase): @fixture() def record(self): return LogRecord( \"test\", DEBUG, \"test.py\", 1337,", "\"test.py\", 1337, \"message %d\", (1,), None ) def test_log_record_is_injected_with_context_variables(self, store, record): store.replace({\"foo\": 1,", "\"test\", DEBUG, \"test.py\", 1337, \"message %d\", (1,), None ) def test_log_record_is_injected_with_context_variables(self, store, record):", "record): store.replace({\"foo\": 1, \"bar\": 2.3, \"baz\": \"dummy\"}) assert LoggingContextFilter().filter(record) == 1 assert record.foo", "record.foo == 1 assert record.bar == 2.3 assert record.baz == \"dummy\" @mark.parametrize(\"field\", IGNORED_VARIABLE_NAMES)", "test_log_record_is_injected_with_context_variables(self, store, record): store.replace({\"foo\": 1, \"bar\": 2.3, \"baz\": \"dummy\"}) assert LoggingContextFilter().filter(record) == 1", "\"dummy\"}) assert LoggingContextFilter().filter(record) == 1 assert record.foo == 1 assert record.bar == 2.3", "store, record): store.replace({\"foo\": 1, \"bar\": 2.3, \"baz\": \"dummy\"}) assert LoggingContextFilter().filter(record) == 1 assert", "import DEBUG, LogRecord from pytest import fixture, mark from loggingex.context import LoggingContextFilter from", "1 assert record.bar == 2.3 assert record.baz == \"dummy\" @mark.parametrize(\"field\", IGNORED_VARIABLE_NAMES) def test_ignores_variables_that_would_overwrite_record_fields(", "\"foo\": 1}) LoggingContextFilter().filter(record) assert record.foo == 1 assert getattr(record, field, \"undefined\") != \"overwrite\"", "from pytest import fixture, mark from loggingex.context import LoggingContextFilter from loggingex.context.filter import IGNORED_VARIABLE_NAMES", "assert LoggingContextFilter().filter(record) == 1 assert record.foo == 1 assert record.bar == 2.3 assert", "pytest import fixture, mark from loggingex.context import LoggingContextFilter from loggingex.context.filter import IGNORED_VARIABLE_NAMES from", "record(self): return LogRecord( \"test\", DEBUG, \"test.py\", 1337, \"message %d\", (1,), None ) def", "store.replace({field: \"overwrite\", \"foo\": 1}) LoggingContextFilter().filter(record) assert record.foo == 1 assert getattr(record, field, \"undefined\")", "2.3 assert record.baz == \"dummy\" @mark.parametrize(\"field\", IGNORED_VARIABLE_NAMES) def test_ignores_variables_that_would_overwrite_record_fields( self, store, record, field", "assert record.baz == \"dummy\" @mark.parametrize(\"field\", IGNORED_VARIABLE_NAMES) def test_ignores_variables_that_would_overwrite_record_fields( self, store, record, field ):", "DEBUG, LogRecord from pytest import fixture, mark from loggingex.context import LoggingContextFilter from loggingex.context.filter", "import IGNORED_VARIABLE_NAMES from .helpers import InitializedContextBase class FilterTests(InitializedContextBase): @fixture() def record(self): return LogRecord(", "1337, \"message %d\", (1,), None ) def test_log_record_is_injected_with_context_variables(self, store, record): store.replace({\"foo\": 1, \"bar\":", "from logging import DEBUG, LogRecord from pytest import fixture, mark from loggingex.context import", "assert record.foo == 1 assert record.bar == 2.3 assert record.baz == \"dummy\" @mark.parametrize(\"field\",", "class FilterTests(InitializedContextBase): @fixture() def record(self): return LogRecord( \"test\", DEBUG, \"test.py\", 1337, \"message %d\",", "def test_log_record_is_injected_with_context_variables(self, store, record): store.replace({\"foo\": 1, \"bar\": 2.3, \"baz\": \"dummy\"}) assert LoggingContextFilter().filter(record) ==", "\"overwrite\", \"foo\": 1}) LoggingContextFilter().filter(record) assert record.foo == 1 assert getattr(record, field, \"undefined\") !=", "LoggingContextFilter from loggingex.context.filter import IGNORED_VARIABLE_NAMES from .helpers import InitializedContextBase class FilterTests(InitializedContextBase): @fixture() def", "DEBUG, \"test.py\", 1337, \"message %d\", (1,), None ) def test_log_record_is_injected_with_context_variables(self, store, record): store.replace({\"foo\":", "1 assert record.foo == 1 assert record.bar == 2.3 assert record.baz == \"dummy\"", "1, \"bar\": 2.3, \"baz\": \"dummy\"}) assert LoggingContextFilter().filter(record) == 1 assert record.foo == 1", "IGNORED_VARIABLE_NAMES) def test_ignores_variables_that_would_overwrite_record_fields( self, store, record, field ): store.replace({field: \"overwrite\", \"foo\": 1}) LoggingContextFilter().filter(record)", "mark from loggingex.context import LoggingContextFilter from loggingex.context.filter import IGNORED_VARIABLE_NAMES from .helpers import InitializedContextBase", "InitializedContextBase class FilterTests(InitializedContextBase): @fixture() def record(self): return LogRecord( \"test\", DEBUG, \"test.py\", 1337, \"message", "== \"dummy\" @mark.parametrize(\"field\", IGNORED_VARIABLE_NAMES) def test_ignores_variables_that_would_overwrite_record_fields( self, store, record, field ): store.replace({field: \"overwrite\",", "IGNORED_VARIABLE_NAMES from .helpers import InitializedContextBase class FilterTests(InitializedContextBase): @fixture() def record(self): return LogRecord( \"test\",", "\"bar\": 2.3, \"baz\": \"dummy\"}) assert LoggingContextFilter().filter(record) == 1 assert record.foo == 1 assert", "LoggingContextFilter().filter(record) == 1 assert record.foo == 1 assert record.bar == 2.3 assert record.baz", "field ): store.replace({field: \"overwrite\", \"foo\": 1}) LoggingContextFilter().filter(record) assert record.foo == 1 assert getattr(record,", "loggingex.context import LoggingContextFilter from loggingex.context.filter import IGNORED_VARIABLE_NAMES from .helpers import InitializedContextBase class FilterTests(InitializedContextBase):", "test_ignores_variables_that_would_overwrite_record_fields( self, store, record, field ): store.replace({field: \"overwrite\", \"foo\": 1}) LoggingContextFilter().filter(record) assert record.foo", "self, store, record, field ): store.replace({field: \"overwrite\", \"foo\": 1}) LoggingContextFilter().filter(record) assert record.foo ==", "@fixture() def record(self): return LogRecord( \"test\", DEBUG, \"test.py\", 1337, \"message %d\", (1,), None", "LogRecord( \"test\", DEBUG, \"test.py\", 1337, \"message %d\", (1,), None ) def test_log_record_is_injected_with_context_variables(self, store,", "LogRecord from pytest import fixture, mark from loggingex.context import LoggingContextFilter from loggingex.context.filter import", "== 1 assert record.bar == 2.3 assert record.baz == \"dummy\" @mark.parametrize(\"field\", IGNORED_VARIABLE_NAMES) def", "2.3, \"baz\": \"dummy\"}) assert LoggingContextFilter().filter(record) == 1 assert record.foo == 1 assert record.bar", "from .helpers import InitializedContextBase class FilterTests(InitializedContextBase): @fixture() def record(self): return LogRecord( \"test\", DEBUG,", "\"message %d\", (1,), None ) def test_log_record_is_injected_with_context_variables(self, store, record): store.replace({\"foo\": 1, \"bar\": 2.3,", "def record(self): return LogRecord( \"test\", DEBUG, \"test.py\", 1337, \"message %d\", (1,), None )", "(1,), None ) def test_log_record_is_injected_with_context_variables(self, store, record): store.replace({\"foo\": 1, \"bar\": 2.3, \"baz\": \"dummy\"})", "FilterTests(InitializedContextBase): @fixture() def record(self): return LogRecord( \"test\", DEBUG, \"test.py\", 1337, \"message %d\", (1,),", "record, field ): store.replace({field: \"overwrite\", \"foo\": 1}) LoggingContextFilter().filter(record) assert record.foo == 1 assert" ]
[ "for y in yrange: g = grid[x + y * grid_width] if g", "return True p = width * random(), height * random() queue = [p]", "(px, py) grid_x, grid_y = grid_coords(p) if not fits(p, grid_x, grid_y): continue queue.append(p)", "x in range(max(gx - 2, 0), min(gx + 3, grid_width)): for y in", "_ in range(k): alpha = tau * random() d = r * sqrt(3", "/ cellsize)) def fits(p, gx, gy): yrange = list(range(max(gy - 2, 0), min(gy", "continue p = (px, py) grid_x, grid_y = grid_coords(p) if not fits(p, grid_x,", "import random from math import cos, sin, floor, sqrt, pi, ceil def euclidean_distance(a,", "min(gy + 3, grid_height))) for x in range(max(gx - 2, 0), min(gx +", "distance=euclidean_distance, random=random): tau = 2 * pi cellsize = r / sqrt(2) grid_width", "= grid_coords(p) if not fits(p, grid_x, grid_y): continue queue.append(p) grid[grid_x + grid_y *", "g) <= r: return False return True p = width * random(), height", "alpha = tau * random() d = r * sqrt(3 * random() +", "dy * dy) def poisson_disc_samples(width, height, r, k=5, distance=euclidean_distance, random=random): tau = 2", "if distance(p, g) <= r: return False return True p = width *", "grid_coords(p) grid[grid_x + grid_y * grid_width] = p while queue: qi = int(random()", "0), min(gy + 3, grid_height))) for x in range(max(gx - 2, 0), min(gx", "for _ in range(k): alpha = tau * random() d = r *", "= [p] grid_x, grid_y = grid_coords(p) grid[grid_x + grid_y * grid_width] = p", "= a[0] - b[0] dy = a[1] - b[1] return sqrt(dx * dx", "cellsize)), int(floor(p[1] / cellsize)) def fits(p, gx, gy): yrange = list(range(max(gy - 2,", "dy = a[1] - b[1] return sqrt(dx * dx + dy * dy)", "= a[1] - b[1] return sqrt(dx * dx + dy * dy) def", "y * grid_width] if g is None: continue if distance(p, g) <= r:", "random(), height * random() queue = [p] grid_x, grid_y = grid_coords(p) grid[grid_x +", "random from math import cos, sin, floor, sqrt, pi, ceil def euclidean_distance(a, b):", "def euclidean_distance(a, b): dx = a[0] - b[0] dy = a[1] - b[1]", "= width * random(), height * random() queue = [p] grid_x, grid_y =", "tau * random() d = r * sqrt(3 * random() + 1) px", "def grid_coords(p): return int(floor(p[0] / cellsize)), int(floor(p[1] / cellsize)) def fits(p, gx, gy):", "k=5, distance=euclidean_distance, random=random): tau = 2 * pi cellsize = r / sqrt(2)", "r / sqrt(2) grid_width = int(ceil(width / cellsize)) grid_height = int(ceil(height / cellsize))", "return sqrt(dx * dx + dy * dy) def poisson_disc_samples(width, height, r, k=5,", "= grid[x + y * grid_width] if g is None: continue if distance(p,", "width * random(), height * random() queue = [p] grid_x, grid_y = grid_coords(p)", "/ cellsize)), int(floor(p[1] / cellsize)) def fits(p, gx, gy): yrange = list(range(max(gy -", "queue[qi] = queue[-1] queue.pop() for _ in range(k): alpha = tau * random()", "int(floor(p[0] / cellsize)), int(floor(p[1] / cellsize)) def fits(p, gx, gy): yrange = list(range(max(gy", "queue.pop() for _ in range(k): alpha = tau * random() d = r", "height): continue p = (px, py) grid_x, grid_y = grid_coords(p) if not fits(p,", "random=random): tau = 2 * pi cellsize = r / sqrt(2) grid_width =", "for x in range(max(gx - 2, 0), min(gx + 3, grid_width)): for y", "min(gx + 3, grid_width)): for y in yrange: g = grid[x + y", "grid_y * grid_width] = p while queue: qi = int(random() * len(queue)) qx,", "int(ceil(width / cellsize)) grid_height = int(ceil(height / cellsize)) grid = [None] * (grid_width", "grid = [None] * (grid_width * grid_height) def grid_coords(p): return int(floor(p[0] / cellsize)),", "py < height): continue p = (px, py) grid_x, grid_y = grid_coords(p) if", "return False return True p = width * random(), height * random() queue", "in range(k): alpha = tau * random() d = r * sqrt(3 *", "grid[grid_x + grid_y * grid_width] = p return [p for p in grid", "* (grid_width * grid_height) def grid_coords(p): return int(floor(p[0] / cellsize)), int(floor(p[1] / cellsize))", "d * cos(alpha) py = qy + d * sin(alpha) if not (0", "queue[qi] queue[qi] = queue[-1] queue.pop() for _ in range(k): alpha = tau *", "- b[0] dy = a[1] - b[1] return sqrt(dx * dx + dy", "qi = int(random() * len(queue)) qx, qy = queue[qi] queue[qi] = queue[-1] queue.pop()", "<= py < height): continue p = (px, py) grid_x, grid_y = grid_coords(p)", "height * random() queue = [p] grid_x, grid_y = grid_coords(p) grid[grid_x + grid_y", "<= px < width and 0 <= py < height): continue p =", "- b[1] return sqrt(dx * dx + dy * dy) def poisson_disc_samples(width, height,", "py = qy + d * sin(alpha) if not (0 <= px <", "/ cellsize)) grid = [None] * (grid_width * grid_height) def grid_coords(p): return int(floor(p[0]", "cos(alpha) py = qy + d * sin(alpha) if not (0 <= px", "= qy + d * sin(alpha) if not (0 <= px < width", "queue: qi = int(random() * len(queue)) qx, qy = queue[qi] queue[qi] = queue[-1]", "grid_width] if g is None: continue if distance(p, g) <= r: return False", "* random() + 1) px = qx + d * cos(alpha) py =", "d * sin(alpha) if not (0 <= px < width and 0 <=", "not fits(p, grid_x, grid_y): continue queue.append(p) grid[grid_x + grid_y * grid_width] = p", "* sin(alpha) if not (0 <= px < width and 0 <= py", "b[0] dy = a[1] - b[1] return sqrt(dx * dx + dy *", "pi, ceil def euclidean_distance(a, b): dx = a[0] - b[0] dy = a[1]", "- 2, 0), min(gy + 3, grid_height))) for x in range(max(gx - 2,", "= p while queue: qi = int(random() * len(queue)) qx, qy = queue[qi]", "<= r: return False return True p = width * random(), height *", "grid_height) def grid_coords(p): return int(floor(p[0] / cellsize)), int(floor(p[1] / cellsize)) def fits(p, gx,", "fits(p, gx, gy): yrange = list(range(max(gy - 2, 0), min(gy + 3, grid_height)))", "not (0 <= px < width and 0 <= py < height): continue", "False return True p = width * random(), height * random() queue =", "< width and 0 <= py < height): continue p = (px, py)", "* random(), height * random() queue = [p] grid_x, grid_y = grid_coords(p) grid[grid_x", "grid_width] = p return [p for p in grid if p is not", "range(max(gx - 2, 0), min(gx + 3, grid_width)): for y in yrange: g", "if not (0 <= px < width and 0 <= py < height):", "grid_y * grid_width] = p return [p for p in grid if p", "gy): yrange = list(range(max(gy - 2, 0), min(gy + 3, grid_height))) for x", "a[1] - b[1] return sqrt(dx * dx + dy * dy) def poisson_disc_samples(width,", "+ 3, grid_width)): for y in yrange: g = grid[x + y *", "tau = 2 * pi cellsize = r / sqrt(2) grid_width = int(ceil(width", "sqrt, pi, ceil def euclidean_distance(a, b): dx = a[0] - b[0] dy =", "+ 3, grid_height))) for x in range(max(gx - 2, 0), min(gx + 3,", "cellsize = r / sqrt(2) grid_width = int(ceil(width / cellsize)) grid_height = int(ceil(height", "+ grid_y * grid_width] = p return [p for p in grid if", "+ grid_y * grid_width] = p while queue: qi = int(random() * len(queue))", "b): dx = a[0] - b[0] dy = a[1] - b[1] return sqrt(dx", "= int(ceil(width / cellsize)) grid_height = int(ceil(height / cellsize)) grid = [None] *", "= queue[qi] queue[qi] = queue[-1] queue.pop() for _ in range(k): alpha = tau", "(grid_width * grid_height) def grid_coords(p): return int(floor(p[0] / cellsize)), int(floor(p[1] / cellsize)) def", "* sqrt(3 * random() + 1) px = qx + d * cos(alpha)", "/ sqrt(2) grid_width = int(ceil(width / cellsize)) grid_height = int(ceil(height / cellsize)) grid", "r, k=5, distance=euclidean_distance, random=random): tau = 2 * pi cellsize = r /", "def fits(p, gx, gy): yrange = list(range(max(gy - 2, 0), min(gy + 3,", "= 2 * pi cellsize = r / sqrt(2) grid_width = int(ceil(width /", "grid_coords(p): return int(floor(p[0] / cellsize)), int(floor(p[1] / cellsize)) def fits(p, gx, gy): yrange", "3, grid_height))) for x in range(max(gx - 2, 0), min(gx + 3, grid_width)):", "sqrt(3 * random() + 1) px = qx + d * cos(alpha) py", "/ cellsize)) grid_height = int(ceil(height / cellsize)) grid = [None] * (grid_width *", "grid_height))) for x in range(max(gx - 2, 0), min(gx + 3, grid_width)): for", "grid_y = grid_coords(p) if not fits(p, grid_x, grid_y): continue queue.append(p) grid[grid_x + grid_y", "yrange = list(range(max(gy - 2, 0), min(gy + 3, grid_height))) for x in", "while queue: qi = int(random() * len(queue)) qx, qy = queue[qi] queue[qi] =", "grid_width = int(ceil(width / cellsize)) grid_height = int(ceil(height / cellsize)) grid = [None]", "py) grid_x, grid_y = grid_coords(p) if not fits(p, grid_x, grid_y): continue queue.append(p) grid[grid_x", "in yrange: g = grid[x + y * grid_width] if g is None:", "p while queue: qi = int(random() * len(queue)) qx, qy = queue[qi] queue[qi]", "px = qx + d * cos(alpha) py = qy + d *", "- 2, 0), min(gx + 3, grid_width)): for y in yrange: g =", "3, grid_width)): for y in yrange: g = grid[x + y * grid_width]", "* random() d = r * sqrt(3 * random() + 1) px =", "< height): continue p = (px, py) grid_x, grid_y = grid_coords(p) if not", "queue.append(p) grid[grid_x + grid_y * grid_width] = p return [p for p in", "2, 0), min(gy + 3, grid_height))) for x in range(max(gx - 2, 0),", "distance(p, g) <= r: return False return True p = width * random(),", "poisson_disc_samples(width, height, r, k=5, distance=euclidean_distance, random=random): tau = 2 * pi cellsize =", "grid_x, grid_y = grid_coords(p) if not fits(p, grid_x, grid_y): continue queue.append(p) grid[grid_x +", "* grid_width] = p while queue: qi = int(random() * len(queue)) qx, qy", "if g is None: continue if distance(p, g) <= r: return False return", "= tau * random() d = r * sqrt(3 * random() + 1)", "+ 1) px = qx + d * cos(alpha) py = qy +", "g is None: continue if distance(p, g) <= r: return False return True", "dy) def poisson_disc_samples(width, height, r, k=5, distance=euclidean_distance, random=random): tau = 2 * pi", "p = width * random(), height * random() queue = [p] grid_x, grid_y", "qy = queue[qi] queue[qi] = queue[-1] queue.pop() for _ in range(k): alpha =", "r * sqrt(3 * random() + 1) px = qx + d *", "a[0] - b[0] dy = a[1] - b[1] return sqrt(dx * dx +", "cellsize)) grid = [None] * (grid_width * grid_height) def grid_coords(p): return int(floor(p[0] /", "= list(range(max(gy - 2, 0), min(gy + 3, grid_height))) for x in range(max(gx", "len(queue)) qx, qy = queue[qi] queue[qi] = queue[-1] queue.pop() for _ in range(k):", "and 0 <= py < height): continue p = (px, py) grid_x, grid_y", "cos, sin, floor, sqrt, pi, ceil def euclidean_distance(a, b): dx = a[0] -", "random() queue = [p] grid_x, grid_y = grid_coords(p) grid[grid_x + grid_y * grid_width]", "px < width and 0 <= py < height): continue p = (px,", "2 * pi cellsize = r / sqrt(2) grid_width = int(ceil(width / cellsize))", "yrange: g = grid[x + y * grid_width] if g is None: continue", "= grid_coords(p) grid[grid_x + grid_y * grid_width] = p while queue: qi =", "grid_width)): for y in yrange: g = grid[x + y * grid_width] if", "from math import cos, sin, floor, sqrt, pi, ceil def euclidean_distance(a, b): dx", "r: return False return True p = width * random(), height * random()", "grid[grid_x + grid_y * grid_width] = p while queue: qi = int(random() *", "(0 <= px < width and 0 <= py < height): continue p", "floor, sqrt, pi, ceil def euclidean_distance(a, b): dx = a[0] - b[0] dy", "* dy) def poisson_disc_samples(width, height, r, k=5, distance=euclidean_distance, random=random): tau = 2 *", "gx, gy): yrange = list(range(max(gy - 2, 0), min(gy + 3, grid_height))) for", "int(random() * len(queue)) qx, qy = queue[qi] queue[qi] = queue[-1] queue.pop() for _", "True p = width * random(), height * random() queue = [p] grid_x,", "random import random from math import cos, sin, floor, sqrt, pi, ceil def", "continue if distance(p, g) <= r: return False return True p = width", "queue[-1] queue.pop() for _ in range(k): alpha = tau * random() d =", "random() + 1) px = qx + d * cos(alpha) py = qy", "int(floor(p[1] / cellsize)) def fits(p, gx, gy): yrange = list(range(max(gy - 2, 0),", "qx, qy = queue[qi] queue[qi] = queue[-1] queue.pop() for _ in range(k): alpha", "* grid_width] if g is None: continue if distance(p, g) <= r: return", "dx = a[0] - b[0] dy = a[1] - b[1] return sqrt(dx *", "* pi cellsize = r / sqrt(2) grid_width = int(ceil(width / cellsize)) grid_height", "list(range(max(gy - 2, 0), min(gy + 3, grid_height))) for x in range(max(gx -", "dx + dy * dy) def poisson_disc_samples(width, height, r, k=5, distance=euclidean_distance, random=random): tau", "* dx + dy * dy) def poisson_disc_samples(width, height, r, k=5, distance=euclidean_distance, random=random):", "if not fits(p, grid_x, grid_y): continue queue.append(p) grid[grid_x + grid_y * grid_width] =", "+ d * cos(alpha) py = qy + d * sin(alpha) if not", "= (px, py) grid_x, grid_y = grid_coords(p) if not fits(p, grid_x, grid_y): continue", "math import cos, sin, floor, sqrt, pi, ceil def euclidean_distance(a, b): dx =", "y in yrange: g = grid[x + y * grid_width] if g is", "is None: continue if distance(p, g) <= r: return False return True p", "= [None] * (grid_width * grid_height) def grid_coords(p): return int(floor(p[0] / cellsize)), int(floor(p[1]", "None: continue if distance(p, g) <= r: return False return True p =", "0), min(gx + 3, grid_width)): for y in yrange: g = grid[x +", "grid[x + y * grid_width] if g is None: continue if distance(p, g)", "= p return [p for p in grid if p is not None]", "* cos(alpha) py = qy + d * sin(alpha) if not (0 <=", "sqrt(2) grid_width = int(ceil(width / cellsize)) grid_height = int(ceil(height / cellsize)) grid =", "continue queue.append(p) grid[grid_x + grid_y * grid_width] = p return [p for p", "range(k): alpha = tau * random() d = r * sqrt(3 * random()", "qy + d * sin(alpha) if not (0 <= px < width and", "= int(ceil(height / cellsize)) grid = [None] * (grid_width * grid_height) def grid_coords(p):", "euclidean_distance(a, b): dx = a[0] - b[0] dy = a[1] - b[1] return", "ceil def euclidean_distance(a, b): dx = a[0] - b[0] dy = a[1] -", "= qx + d * cos(alpha) py = qy + d * sin(alpha)", "d = r * sqrt(3 * random() + 1) px = qx +", "* len(queue)) qx, qy = queue[qi] queue[qi] = queue[-1] queue.pop() for _ in", "qx + d * cos(alpha) py = qy + d * sin(alpha) if", "return int(floor(p[0] / cellsize)), int(floor(p[1] / cellsize)) def fits(p, gx, gy): yrange =", "grid_x, grid_y = grid_coords(p) grid[grid_x + grid_y * grid_width] = p while queue:", "= r * sqrt(3 * random() + 1) px = qx + d", "sin(alpha) if not (0 <= px < width and 0 <= py <", "+ d * sin(alpha) if not (0 <= px < width and 0", "* grid_height) def grid_coords(p): return int(floor(p[0] / cellsize)), int(floor(p[1] / cellsize)) def fits(p,", "cellsize)) def fits(p, gx, gy): yrange = list(range(max(gy - 2, 0), min(gy +", "int(ceil(height / cellsize)) grid = [None] * (grid_width * grid_height) def grid_coords(p): return", "[p] grid_x, grid_y = grid_coords(p) grid[grid_x + grid_y * grid_width] = p while", "grid_width] = p while queue: qi = int(random() * len(queue)) qx, qy =", "* grid_width] = p return [p for p in grid if p is", "height, r, k=5, distance=euclidean_distance, random=random): tau = 2 * pi cellsize = r", "b[1] return sqrt(dx * dx + dy * dy) def poisson_disc_samples(width, height, r,", "pi cellsize = r / sqrt(2) grid_width = int(ceil(width / cellsize)) grid_height =", "random() d = r * sqrt(3 * random() + 1) px = qx", "+ dy * dy) def poisson_disc_samples(width, height, r, k=5, distance=euclidean_distance, random=random): tau =", "sin, floor, sqrt, pi, ceil def euclidean_distance(a, b): dx = a[0] - b[0]", "fits(p, grid_x, grid_y): continue queue.append(p) grid[grid_x + grid_y * grid_width] = p return", "g = grid[x + y * grid_width] if g is None: continue if", "= queue[-1] queue.pop() for _ in range(k): alpha = tau * random() d", "2, 0), min(gx + 3, grid_width)): for y in yrange: g = grid[x", "sqrt(dx * dx + dy * dy) def poisson_disc_samples(width, height, r, k=5, distance=euclidean_distance,", "0 <= py < height): continue p = (px, py) grid_x, grid_y =", "1) px = qx + d * cos(alpha) py = qy + d", "= int(random() * len(queue)) qx, qy = queue[qi] queue[qi] = queue[-1] queue.pop() for", "def poisson_disc_samples(width, height, r, k=5, distance=euclidean_distance, random=random): tau = 2 * pi cellsize", "= r / sqrt(2) grid_width = int(ceil(width / cellsize)) grid_height = int(ceil(height /", "+ y * grid_width] if g is None: continue if distance(p, g) <=", "grid_x, grid_y): continue queue.append(p) grid[grid_x + grid_y * grid_width] = p return [p", "grid_y): continue queue.append(p) grid[grid_x + grid_y * grid_width] = p return [p for", "from random import random from math import cos, sin, floor, sqrt, pi, ceil", "import cos, sin, floor, sqrt, pi, ceil def euclidean_distance(a, b): dx = a[0]", "[None] * (grid_width * grid_height) def grid_coords(p): return int(floor(p[0] / cellsize)), int(floor(p[1] /", "cellsize)) grid_height = int(ceil(height / cellsize)) grid = [None] * (grid_width * grid_height)", "* random() queue = [p] grid_x, grid_y = grid_coords(p) grid[grid_x + grid_y *", "grid_coords(p) if not fits(p, grid_x, grid_y): continue queue.append(p) grid[grid_x + grid_y * grid_width]", "p = (px, py) grid_x, grid_y = grid_coords(p) if not fits(p, grid_x, grid_y):", "queue = [p] grid_x, grid_y = grid_coords(p) grid[grid_x + grid_y * grid_width] =", "in range(max(gx - 2, 0), min(gx + 3, grid_width)): for y in yrange:", "width and 0 <= py < height): continue p = (px, py) grid_x,", "grid_height = int(ceil(height / cellsize)) grid = [None] * (grid_width * grid_height) def", "grid_y = grid_coords(p) grid[grid_x + grid_y * grid_width] = p while queue: qi" ]
[]
[ "group-policy CNS{}_Student{}_GP internal group-policy CNS{}_Student{}_GP attributes vpn-tunnel-protocol ssl-client split-tunnel-policy tunnelspecified split-tunnel-network-list value CNS{}_Student{}_SSL_ACL", "type remote-access tunnel-group CNS{}_Student{}_TG general-attributes address-pool SSL_CNS{}_Student{}_POOL default-group-policy CNS{}_Student{}_GP tunnel-group CNS{}_Student{}_TG webvpn-attributes group-alias", "ADMIN_SSL_NET ADMIN_SSL_NET no-proxy-arp route-lookup '''.format(classNum,inNet,classNum,classNum)) for i in range(1,users): f.write('''access-list CNS{}_Student{}_SSL_ACL standard permit", "network SSL_CNS{}_NET subnet 10.{}.0.0 255.255.0.0 ! nat (inside,outside) source static SSL_CNS{}_NET SSL_CNS{}_NET destination", "ssl-client split-tunnel-policy tunnelspecified split-tunnel-network-list value CNS{}_Student{}_SSL_ACL ! ip local pool SSL_CNS{}_Student{}_POOL 172.{}.{}.1-172.{}.{}.100 mask", "ADMIN_SSL_NET no-proxy-arp route-lookup '''.format(classNum,inNet,classNum,classNum)) for i in range(1,users): f.write('''access-list CNS{}_Student{}_SSL_ACL standard permit 10.{}.{}.0", "tunnel-group CNS{}_Student{}_TG webvpn-attributes group-alias CNS{}_Student{}_NET enable ! username CNS{}-student{} attributes group-lock value CNS{}_Student{}_TG", "subnet 10.{}.0.0 255.255.0.0 ! nat (inside,outside) source static SSL_CNS{}_NET SSL_CNS{}_NET destination static ADMIN_SSL_NET", "CNS{}_Student{}_GP tunnel-group CNS{}_Student{}_TG webvpn-attributes group-alias CNS{}_Student{}_NET enable ! username CNS{}-student{} attributes group-lock value", "! ip local pool SSL_CNS{}_Student{}_POOL 172.{}.{}.1-172.{}.{}.100 mask 255.255.255.0 ! tunnel-group CNS{}_Student{}_TG type remote-access", "CNS{}_Student{}_GP attributes vpn-tunnel-protocol ssl-client split-tunnel-policy tunnelspecified split-tunnel-network-list value CNS{}_Student{}_SSL_ACL ! ip local pool", "makeVPN(classNum, inNet, sslNet,users): f = open('VPNconfig.txt','a') f.write(''' object network SSL_CNS{}_NET subnet 10.{}.0.0 255.255.0.0", "(inside,outside) source static SSL_CNS{}_NET SSL_CNS{}_NET destination static ADMIN_SSL_NET ADMIN_SSL_NET no-proxy-arp route-lookup '''.format(classNum,inNet,classNum,classNum)) for", "static SSL_CNS{}_NET SSL_CNS{}_NET destination static ADMIN_SSL_NET ADMIN_SSL_NET no-proxy-arp route-lookup '''.format(classNum,inNet,classNum,classNum)) for i in", "255.255.255.0 group-policy CNS{}_Student{}_GP internal group-policy CNS{}_Student{}_GP attributes vpn-tunnel-protocol ssl-client split-tunnel-policy tunnelspecified split-tunnel-network-list value", "group-policy CNS{}_Student{}_GP attributes vpn-tunnel-protocol ssl-client split-tunnel-policy tunnelspecified split-tunnel-network-list value CNS{}_Student{}_SSL_ACL ! ip local", "split-tunnel-policy tunnelspecified split-tunnel-network-list value CNS{}_Student{}_SSL_ACL ! ip local pool SSL_CNS{}_Student{}_POOL 172.{}.{}.1-172.{}.{}.100 mask 255.255.255.0", "tunnel-group CNS{}_Student{}_TG general-attributes address-pool SSL_CNS{}_Student{}_POOL default-group-policy CNS{}_Student{}_GP tunnel-group CNS{}_Student{}_TG webvpn-attributes group-alias CNS{}_Student{}_NET enable", "10.{}.{}.0 255.255.255.0 group-policy CNS{}_Student{}_GP internal group-policy CNS{}_Student{}_GP attributes vpn-tunnel-protocol ssl-client split-tunnel-policy tunnelspecified split-tunnel-network-list", "permit 10.{}.{}.0 255.255.255.0 group-policy CNS{}_Student{}_GP internal group-policy CNS{}_Student{}_GP attributes vpn-tunnel-protocol ssl-client split-tunnel-policy tunnelspecified", "remote-access tunnel-group CNS{}_Student{}_TG general-attributes address-pool SSL_CNS{}_Student{}_POOL default-group-policy CNS{}_Student{}_GP tunnel-group CNS{}_Student{}_TG webvpn-attributes group-alias CNS{}_Student{}_NET", "open('VPNconfig.txt','a') f.write(''' object network SSL_CNS{}_NET subnet 10.{}.0.0 255.255.0.0 ! nat (inside,outside) source static", "vpn-tunnel-protocol ssl-client split-tunnel-policy tunnelspecified split-tunnel-network-list value CNS{}_Student{}_SSL_ACL ! ip local pool SSL_CNS{}_Student{}_POOL 172.{}.{}.1-172.{}.{}.100", "SSL_CNS{}_Student{}_POOL default-group-policy CNS{}_Student{}_GP tunnel-group CNS{}_Student{}_TG webvpn-attributes group-alias CNS{}_Student{}_NET enable ! username CNS{}-student{} attributes", "sslNet,users): f = open('VPNconfig.txt','a') f.write(''' object network SSL_CNS{}_NET subnet 10.{}.0.0 255.255.0.0 ! nat", "= open('VPNconfig.txt','a') f.write(''' object network SSL_CNS{}_NET subnet 10.{}.0.0 255.255.0.0 ! nat (inside,outside) source", "CNS{}_Student{}_TG type remote-access tunnel-group CNS{}_Student{}_TG general-attributes address-pool SSL_CNS{}_Student{}_POOL default-group-policy CNS{}_Student{}_GP tunnel-group CNS{}_Student{}_TG webvpn-attributes", "f.write(''' object network SSL_CNS{}_NET subnet 10.{}.0.0 255.255.0.0 ! nat (inside,outside) source static SSL_CNS{}_NET", "for i in range(1,users): f.write('''access-list CNS{}_Student{}_SSL_ACL standard permit 10.{}.{}.0 255.255.255.0 group-policy CNS{}_Student{}_GP internal", "attributes vpn-tunnel-protocol ssl-client split-tunnel-policy tunnelspecified split-tunnel-network-list value CNS{}_Student{}_SSL_ACL ! ip local pool SSL_CNS{}_Student{}_POOL", "standard permit 10.{}.{}.0 255.255.255.0 group-policy CNS{}_Student{}_GP internal group-policy CNS{}_Student{}_GP attributes vpn-tunnel-protocol ssl-client split-tunnel-policy", "group-alias CNS{}_Student{}_NET enable ! username CNS{}-student{} attributes group-lock value CNS{}_Student{}_TG service-type remote-access\\n!\\n!\\n'''.format(classNum,i,inNet,i,classNum,i,classNum,i,classNum,i,classNum,i,sslNet,i,sslNet,i,classNum,i,classNum,i, classNum,i,classNum,i,classNum,i,classNum,i,classNum,i,classNum,i,classNum,i))", "local pool SSL_CNS{}_Student{}_POOL 172.{}.{}.1-172.{}.{}.100 mask 255.255.255.0 ! tunnel-group CNS{}_Student{}_TG type remote-access tunnel-group CNS{}_Student{}_TG", "SSL_CNS{}_NET subnet 10.{}.0.0 255.255.0.0 ! nat (inside,outside) source static SSL_CNS{}_NET SSL_CNS{}_NET destination static", "destination static ADMIN_SSL_NET ADMIN_SSL_NET no-proxy-arp route-lookup '''.format(classNum,inNet,classNum,classNum)) for i in range(1,users): f.write('''access-list CNS{}_Student{}_SSL_ACL", "CNS{}_Student{}_TG general-attributes address-pool SSL_CNS{}_Student{}_POOL default-group-policy CNS{}_Student{}_GP tunnel-group CNS{}_Student{}_TG webvpn-attributes group-alias CNS{}_Student{}_NET enable !", "split-tunnel-network-list value CNS{}_Student{}_SSL_ACL ! ip local pool SSL_CNS{}_Student{}_POOL 172.{}.{}.1-172.{}.{}.100 mask 255.255.255.0 ! tunnel-group", "inNet, sslNet,users): f = open('VPNconfig.txt','a') f.write(''' object network SSL_CNS{}_NET subnet 10.{}.0.0 255.255.0.0 !", "'''.format(classNum,inNet,classNum,classNum)) for i in range(1,users): f.write('''access-list CNS{}_Student{}_SSL_ACL standard permit 10.{}.{}.0 255.255.255.0 group-policy CNS{}_Student{}_GP", "255.255.0.0 ! nat (inside,outside) source static SSL_CNS{}_NET SSL_CNS{}_NET destination static ADMIN_SSL_NET ADMIN_SSL_NET no-proxy-arp", "route-lookup '''.format(classNum,inNet,classNum,classNum)) for i in range(1,users): f.write('''access-list CNS{}_Student{}_SSL_ACL standard permit 10.{}.{}.0 255.255.255.0 group-policy", "tunnelspecified split-tunnel-network-list value CNS{}_Student{}_SSL_ACL ! ip local pool SSL_CNS{}_Student{}_POOL 172.{}.{}.1-172.{}.{}.100 mask 255.255.255.0 !", "general-attributes address-pool SSL_CNS{}_Student{}_POOL default-group-policy CNS{}_Student{}_GP tunnel-group CNS{}_Student{}_TG webvpn-attributes group-alias CNS{}_Student{}_NET enable ! username", "no-proxy-arp route-lookup '''.format(classNum,inNet,classNum,classNum)) for i in range(1,users): f.write('''access-list CNS{}_Student{}_SSL_ACL standard permit 10.{}.{}.0 255.255.255.0", "! nat (inside,outside) source static SSL_CNS{}_NET SSL_CNS{}_NET destination static ADMIN_SSL_NET ADMIN_SSL_NET no-proxy-arp route-lookup", "CNS{}_Student{}_TG webvpn-attributes group-alias CNS{}_Student{}_NET enable ! username CNS{}-student{} attributes group-lock value CNS{}_Student{}_TG service-type", "i in range(1,users): f.write('''access-list CNS{}_Student{}_SSL_ACL standard permit 10.{}.{}.0 255.255.255.0 group-policy CNS{}_Student{}_GP internal group-policy", "CNS{}_Student{}_NET enable ! username CNS{}-student{} attributes group-lock value CNS{}_Student{}_TG service-type remote-access\\n!\\n!\\n'''.format(classNum,i,inNet,i,classNum,i,classNum,i,classNum,i,classNum,i,sslNet,i,sslNet,i,classNum,i,classNum,i, classNum,i,classNum,i,classNum,i,classNum,i,classNum,i,classNum,i,classNum,i)) f.close()", "f.write('''access-list CNS{}_Student{}_SSL_ACL standard permit 10.{}.{}.0 255.255.255.0 group-policy CNS{}_Student{}_GP internal group-policy CNS{}_Student{}_GP attributes vpn-tunnel-protocol", "range(1,users): f.write('''access-list CNS{}_Student{}_SSL_ACL standard permit 10.{}.{}.0 255.255.255.0 group-policy CNS{}_Student{}_GP internal group-policy CNS{}_Student{}_GP attributes", "CNS{}_Student{}_GP internal group-policy CNS{}_Student{}_GP attributes vpn-tunnel-protocol ssl-client split-tunnel-policy tunnelspecified split-tunnel-network-list value CNS{}_Student{}_SSL_ACL !", "value CNS{}_Student{}_SSL_ACL ! ip local pool SSL_CNS{}_Student{}_POOL 172.{}.{}.1-172.{}.{}.100 mask 255.255.255.0 ! tunnel-group CNS{}_Student{}_TG", "SSL_CNS{}_NET SSL_CNS{}_NET destination static ADMIN_SSL_NET ADMIN_SSL_NET no-proxy-arp route-lookup '''.format(classNum,inNet,classNum,classNum)) for i in range(1,users):", "address-pool SSL_CNS{}_Student{}_POOL default-group-policy CNS{}_Student{}_GP tunnel-group CNS{}_Student{}_TG webvpn-attributes group-alias CNS{}_Student{}_NET enable ! username CNS{}-student{}", "10.{}.0.0 255.255.0.0 ! nat (inside,outside) source static SSL_CNS{}_NET SSL_CNS{}_NET destination static ADMIN_SSL_NET ADMIN_SSL_NET", "default-group-policy CNS{}_Student{}_GP tunnel-group CNS{}_Student{}_TG webvpn-attributes group-alias CNS{}_Student{}_NET enable ! username CNS{}-student{} attributes group-lock", "CNS{}_Student{}_SSL_ACL standard permit 10.{}.{}.0 255.255.255.0 group-policy CNS{}_Student{}_GP internal group-policy CNS{}_Student{}_GP attributes vpn-tunnel-protocol ssl-client", "CNS{}_Student{}_SSL_ACL ! ip local pool SSL_CNS{}_Student{}_POOL 172.{}.{}.1-172.{}.{}.100 mask 255.255.255.0 ! tunnel-group CNS{}_Student{}_TG type", "255.255.255.0 ! tunnel-group CNS{}_Student{}_TG type remote-access tunnel-group CNS{}_Student{}_TG general-attributes address-pool SSL_CNS{}_Student{}_POOL default-group-policy CNS{}_Student{}_GP", "mask 255.255.255.0 ! tunnel-group CNS{}_Student{}_TG type remote-access tunnel-group CNS{}_Student{}_TG general-attributes address-pool SSL_CNS{}_Student{}_POOL default-group-policy", "f = open('VPNconfig.txt','a') f.write(''' object network SSL_CNS{}_NET subnet 10.{}.0.0 255.255.0.0 ! nat (inside,outside)", "in range(1,users): f.write('''access-list CNS{}_Student{}_SSL_ACL standard permit 10.{}.{}.0 255.255.255.0 group-policy CNS{}_Student{}_GP internal group-policy CNS{}_Student{}_GP", "SSL_CNS{}_Student{}_POOL 172.{}.{}.1-172.{}.{}.100 mask 255.255.255.0 ! tunnel-group CNS{}_Student{}_TG type remote-access tunnel-group CNS{}_Student{}_TG general-attributes address-pool", "172.{}.{}.1-172.{}.{}.100 mask 255.255.255.0 ! tunnel-group CNS{}_Student{}_TG type remote-access tunnel-group CNS{}_Student{}_TG general-attributes address-pool SSL_CNS{}_Student{}_POOL", "tunnel-group CNS{}_Student{}_TG type remote-access tunnel-group CNS{}_Student{}_TG general-attributes address-pool SSL_CNS{}_Student{}_POOL default-group-policy CNS{}_Student{}_GP tunnel-group CNS{}_Student{}_TG", "object network SSL_CNS{}_NET subnet 10.{}.0.0 255.255.0.0 ! nat (inside,outside) source static SSL_CNS{}_NET SSL_CNS{}_NET", "SSL_CNS{}_NET destination static ADMIN_SSL_NET ADMIN_SSL_NET no-proxy-arp route-lookup '''.format(classNum,inNet,classNum,classNum)) for i in range(1,users): f.write('''access-list", "static ADMIN_SSL_NET ADMIN_SSL_NET no-proxy-arp route-lookup '''.format(classNum,inNet,classNum,classNum)) for i in range(1,users): f.write('''access-list CNS{}_Student{}_SSL_ACL standard", "nat (inside,outside) source static SSL_CNS{}_NET SSL_CNS{}_NET destination static ADMIN_SSL_NET ADMIN_SSL_NET no-proxy-arp route-lookup '''.format(classNum,inNet,classNum,classNum))", "def makeVPN(classNum, inNet, sslNet,users): f = open('VPNconfig.txt','a') f.write(''' object network SSL_CNS{}_NET subnet 10.{}.0.0", "source static SSL_CNS{}_NET SSL_CNS{}_NET destination static ADMIN_SSL_NET ADMIN_SSL_NET no-proxy-arp route-lookup '''.format(classNum,inNet,classNum,classNum)) for i", "! tunnel-group CNS{}_Student{}_TG type remote-access tunnel-group CNS{}_Student{}_TG general-attributes address-pool SSL_CNS{}_Student{}_POOL default-group-policy CNS{}_Student{}_GP tunnel-group", "pool SSL_CNS{}_Student{}_POOL 172.{}.{}.1-172.{}.{}.100 mask 255.255.255.0 ! tunnel-group CNS{}_Student{}_TG type remote-access tunnel-group CNS{}_Student{}_TG general-attributes", "internal group-policy CNS{}_Student{}_GP attributes vpn-tunnel-protocol ssl-client split-tunnel-policy tunnelspecified split-tunnel-network-list value CNS{}_Student{}_SSL_ACL ! ip", "webvpn-attributes group-alias CNS{}_Student{}_NET enable ! username CNS{}-student{} attributes group-lock value CNS{}_Student{}_TG service-type remote-access\\n!\\n!\\n'''.format(classNum,i,inNet,i,classNum,i,classNum,i,classNum,i,classNum,i,sslNet,i,sslNet,i,classNum,i,classNum,i,", "ip local pool SSL_CNS{}_Student{}_POOL 172.{}.{}.1-172.{}.{}.100 mask 255.255.255.0 ! tunnel-group CNS{}_Student{}_TG type remote-access tunnel-group" ]
[ "ComplianceDB def default_with_cfg_name( cfg_name: str, ): cfg_fac = ci.util.ctx().cfg_factory() cfg = cfg_fac.compliancedb(cfg_name) return", "cfg_name: str, ): cfg_fac = ci.util.ctx().cfg_factory() cfg = cfg_fac.compliancedb(cfg_name) return ComplianceDB( username=cfg.credentials().username(), password=cfg.credentials().password(),", "str, ): cfg_fac = ci.util.ctx().cfg_factory() cfg = cfg_fac.compliancedb(cfg_name) return ComplianceDB( username=cfg.credentials().username(), password=cfg.credentials().password(), hostname=cfg.hostname(),", "import ci.util from dso.compliancedb.db import ComplianceDB def default_with_cfg_name( cfg_name: str, ): cfg_fac =", "import ComplianceDB def default_with_cfg_name( cfg_name: str, ): cfg_fac = ci.util.ctx().cfg_factory() cfg = cfg_fac.compliancedb(cfg_name)", "def default_with_cfg_name( cfg_name: str, ): cfg_fac = ci.util.ctx().cfg_factory() cfg = cfg_fac.compliancedb(cfg_name) return ComplianceDB(", "): cfg_fac = ci.util.ctx().cfg_factory() cfg = cfg_fac.compliancedb(cfg_name) return ComplianceDB( username=cfg.credentials().username(), password=cfg.credentials().password(), hostname=cfg.hostname(), port=cfg.port(),", "dso.compliancedb.db import ComplianceDB def default_with_cfg_name( cfg_name: str, ): cfg_fac = ci.util.ctx().cfg_factory() cfg =", "from dso.compliancedb.db import ComplianceDB def default_with_cfg_name( cfg_name: str, ): cfg_fac = ci.util.ctx().cfg_factory() cfg", "default_with_cfg_name( cfg_name: str, ): cfg_fac = ci.util.ctx().cfg_factory() cfg = cfg_fac.compliancedb(cfg_name) return ComplianceDB( username=cfg.credentials().username(),", "ci.util from dso.compliancedb.db import ComplianceDB def default_with_cfg_name( cfg_name: str, ): cfg_fac = ci.util.ctx().cfg_factory()", "<reponame>busunkim96/cc-utils<filename>ccc/compliancedb.py<gh_stars>0 import ci.util from dso.compliancedb.db import ComplianceDB def default_with_cfg_name( cfg_name: str, ): cfg_fac", "cfg_fac = ci.util.ctx().cfg_factory() cfg = cfg_fac.compliancedb(cfg_name) return ComplianceDB( username=cfg.credentials().username(), password=cfg.credentials().password(), hostname=cfg.hostname(), port=cfg.port(), )" ]
[ "namespace or not collection: raise ImproperlyConfigured( \"namespace, collection\", \"'namespace' and 'collection' are required", "the content of the ingest. If specified, must follow the same format as", ":type namespace: str :param collection: The name of the collection we should ingest", "collection. This uses the API in batch mode. :type batch: Iterable[dict], optional :param", "the namespace we should ingest the document(s) to. :type namespace: str :param collection:", "this class directly. Access it from the root :class:`.Synth` client instead. Example: ..", "In general, batch is favored as it results in fewer individual API requests", "iterable of documents we should ingest in the collection. This uses the API", "if hint is not None: if isinstance(hint, Model): hint = hint._into_content()._into_repr() kwargs.update({\"hint\": hint})", "or (not has_document and not has_batch): raise ImproperlyConfigured( \"batch, document\", \"exactly one of", ">>> from synthpy import Synth >>> client = Synth() >>> client.put_documents(namespace=\"my_namespace\", collection=\"my_collection\", document={\"yes?\":", "This supports both individual and batch document ingestion. In general, batch is favored", "is not None has_batch = batch is not None if has_document and has_batch", "not None if has_document and has_batch or (not has_document and not has_batch): raise", "ingest in the collection. This uses the API in batch mode. :type batch:", "batch document ingestion. In general, batch is favored as it results in fewer", "Method from .utils import NamespacedClient, scoped from ..exceptions import ImproperlyConfigured from ..model import", "An iterable of documents we should ingest in the collection. This uses the", "note:: Exactly one of ``document`` or ``batch`` must be set. \"\"\" has_document =", "Ingest API. .. note:: Do not construct this class directly. Access it from", "document: The document we should ingest in the collection. This uses the API", "of the namespace we should ingest the document(s) to. :type namespace: str :param", "document=None, batch=None, hint=None, namespace=None ): \"\"\"Ingest one or more documents. This supports both", "results in fewer individual API requests and allows ``synth`` to optimize its internal", "ingest the document(s) to. :type collection: str :param document: The document we should", "model. :param namespace: The name of the namespace we should ingest the document(s)", "Model class IngestClient(NamespacedClient): \"\"\"Base class for the Ingest API. .. note:: Do not", "construct this class directly. Access it from the root :class:`.Synth` client instead. Example:", "directly. Access it from the root :class:`.Synth` client instead. Example: .. code-block:: python", "has_batch or (not has_document and not has_batch): raise ImproperlyConfigured( \"batch, document\", \"exactly one", "= self.transport.request(Method.PUT) if not namespace or not collection: raise ImproperlyConfigured( \"namespace, collection\", \"'namespace'", "the document(s) to. :type collection: str :param document: The document we should ingest", "more documents. This supports both individual and batch document ingestion. In general, batch", "This uses the API in batch mode. :type batch: Iterable[dict], optional :param hint:", "client.put_documents(namespace=\"my_namespace\", collection=\"my_collection\", document={\"yes?\": True}) \"\"\" @scoped(\"namespace\") def put_documents( self, collection=None, document=None, batch=None, hint=None,", "not collection: raise ImproperlyConfigured( \"namespace, collection\", \"'namespace' and 'collection' are required arguments\", )", "Access it from the root :class:`.Synth` client instead. Example: .. code-block:: python >>>", "should ingest the document(s) to. :type collection: str :param document: The document we", "ingestion mode. :type document: dict, optional :param batch: An iterable of documents we", "in fewer individual API requests and allows ``synth`` to optimize its internal updating", "document: dict, optional :param batch: An iterable of documents we should ingest in", "optional :param batch: An iterable of documents we should ingest in the collection.", "batch mode. :type batch: Iterable[dict], optional :param hint: Hint about the content of", "or ``batch`` must be set. \"\"\" has_document = document is not None has_batch", ">>> client = Synth() >>> client.put_documents(namespace=\"my_namespace\", collection=\"my_collection\", document={\"yes?\": True}) \"\"\" @scoped(\"namespace\") def put_documents(", "of :meth:`put_override <synthpy.client.override.OverrideClient.put_override>`. :type hint: dict, optional .. note:: Exactly one of ``document``", "None has_batch = batch is not None if has_document and has_batch or (not", "is not None if has_document and has_batch or (not has_document and not has_batch):", "it from the root :class:`.Synth` client instead. Example: .. code-block:: python >>> from", "IngestClient(NamespacedClient): \"\"\"Base class for the Ingest API. .. note:: Do not construct this", "collection we should ingest the document(s) to. :type collection: str :param document: The", "ImproperlyConfigured( \"namespace, collection\", \"'namespace' and 'collection' are required arguments\", ) request.path.push(namespace).push(collection) kwargs =", "are required arguments\", ) request.path.push(namespace).push(collection) kwargs = {} if has_document: kwargs.update({\"document\": document}) elif", "class IngestClient(NamespacedClient): \"\"\"Base class for the Ingest API. .. note:: Do not construct", "of the ingest. If specified, must follow the same format as the `override`", "instead. Example: .. code-block:: python >>> from synthpy import Synth >>> client =", "put_documents( self, collection=None, document=None, batch=None, hint=None, namespace=None ): \"\"\"Ingest one or more documents.", "str :param collection: The name of the collection we should ingest the document(s)", "documents we should ingest in the collection. This uses the API in batch", "class directly. Access it from the root :class:`.Synth` client instead. Example: .. code-block::", "required arguments\", ) request.path.push(namespace).push(collection) kwargs = {} if has_document: kwargs.update({\"document\": document}) elif has_batch:", "or more documents. This supports both individual and batch document ingestion. In general,", "True}) \"\"\" @scoped(\"namespace\") def put_documents( self, collection=None, document=None, batch=None, hint=None, namespace=None ): \"\"\"Ingest", "dict, optional :param batch: An iterable of documents we should ingest in the", "has_batch): raise ImproperlyConfigured( \"batch, document\", \"exactly one of 'document' or 'batch' must be", "has_batch: kwargs.update({\"batch\": batch}) if hint is not None: if isinstance(hint, Model): hint =", "collection: str :param document: The document we should ingest in the collection. This", "import Synth >>> client = Synth() >>> client.put_documents(namespace=\"my_namespace\", collection=\"my_collection\", document={\"yes?\": True}) \"\"\" @scoped(\"namespace\")", "and batch document ingestion. In general, batch is favored as it results in", "parameter of :meth:`put_override <synthpy.client.override.OverrideClient.put_override>`. :type hint: dict, optional .. note:: Exactly one of", "has_document and not has_batch): raise ImproperlyConfigured( \"batch, document\", \"exactly one of 'document' or", "\"\"\"Ingest one or more documents. This supports both individual and batch document ingestion.", "of documents we should ingest in the collection. This uses the API in", "the `override` parameter of :meth:`put_override <synthpy.client.override.OverrideClient.put_override>`. :type hint: dict, optional .. note:: Exactly", "def put_documents( self, collection=None, document=None, batch=None, hint=None, namespace=None ): \"\"\"Ingest one or more", "name of the collection we should ingest the document(s) to. :type collection: str", "not namespace or not collection: raise ImproperlyConfigured( \"namespace, collection\", \"'namespace' and 'collection' are", "document\", \"exactly one of 'document' or 'batch' must be set\" ) request =", "Exactly one of ``document`` or ``batch`` must be set. \"\"\" has_document = document", "we should ingest the document(s) to. :type collection: str :param document: The document", "must be set\" ) request = self.transport.request(Method.PUT) if not namespace or not collection:", "if has_document: kwargs.update({\"document\": document}) elif has_batch: kwargs.update({\"batch\": batch}) if hint is not None:", "ingest. If specified, must follow the same format as the `override` parameter of", "import Method from .utils import NamespacedClient, scoped from ..exceptions import ImproperlyConfigured from ..model", "API requests and allows ``synth`` to optimize its internal updating of the collection's", "be set. \"\"\" has_document = document is not None has_batch = batch is", "ImproperlyConfigured( \"batch, document\", \"exactly one of 'document' or 'batch' must be set\" )", "should ingest in the collection. This uses the API in batch mode. :type", "batch is not None if has_document and has_batch or (not has_document and not", ".transport import Method from .utils import NamespacedClient, scoped from ..exceptions import ImproperlyConfigured from", "raise ImproperlyConfigured( \"namespace, collection\", \"'namespace' and 'collection' are required arguments\", ) request.path.push(namespace).push(collection) kwargs", "kwargs.update({\"batch\": batch}) if hint is not None: if isinstance(hint, Model): hint = hint._into_content()._into_repr()", "The document we should ingest in the collection. This uses the API in", "from .transport import Method from .utils import NamespacedClient, scoped from ..exceptions import ImproperlyConfigured", "one of 'document' or 'batch' must be set\" ) request = self.transport.request(Method.PUT) if", "Iterable[dict], optional :param hint: Hint about the content of the ingest. If specified,", "= document is not None has_batch = batch is not None if has_document", "if has_document and has_batch or (not has_document and not has_batch): raise ImproperlyConfigured( \"batch,", "namespace: str :param collection: The name of the collection we should ingest the", "if not namespace or not collection: raise ImproperlyConfigured( \"namespace, collection\", \"'namespace' and 'collection'", "in individual ingestion mode. :type document: dict, optional :param batch: An iterable of", "the document(s) to. :type namespace: str :param collection: The name of the collection", "namespace we should ingest the document(s) to. :type namespace: str :param collection: The", "both individual and batch document ingestion. In general, batch is favored as it", "in the collection. This uses the API in batch mode. :type batch: Iterable[dict],", "namespace=None ): \"\"\"Ingest one or more documents. This supports both individual and batch", "we should ingest in the collection. This uses the API in batch mode.", "This uses the API in individual ingestion mode. :type document: dict, optional :param", "import Model class IngestClient(NamespacedClient): \"\"\"Base class for the Ingest API. .. note:: Do", "the root :class:`.Synth` client instead. Example: .. code-block:: python >>> from synthpy import", "request.path.push(namespace).push(collection) kwargs = {} if has_document: kwargs.update({\"document\": document}) elif has_batch: kwargs.update({\"batch\": batch}) if", "self, collection=None, document=None, batch=None, hint=None, namespace=None ): \"\"\"Ingest one or more documents. This", "{} if has_document: kwargs.update({\"document\": document}) elif has_batch: kwargs.update({\"batch\": batch}) if hint is not", "batch}) if hint is not None: if isinstance(hint, Model): hint = hint._into_content()._into_repr() kwargs.update({\"hint\":", "request = self.transport.request(Method.PUT) if not namespace or not collection: raise ImproperlyConfigured( \"namespace, collection\",", "= batch is not None if has_document and has_batch or (not has_document and", "\"'namespace' and 'collection' are required arguments\", ) request.path.push(namespace).push(collection) kwargs = {} if has_document:", "Example: .. code-block:: python >>> from synthpy import Synth >>> client = Synth()", "is favored as it results in fewer individual API requests and allows ``synth``", "Do not construct this class directly. Access it from the root :class:`.Synth` client", "of ``document`` or ``batch`` must be set. \"\"\" has_document = document is not", "hint: Hint about the content of the ingest. If specified, must follow the", "kwargs.update({\"document\": document}) elif has_batch: kwargs.update({\"batch\": batch}) if hint is not None: if isinstance(hint,", "one of ``document`` or ``batch`` must be set. \"\"\" has_document = document is", "optimize its internal updating of the collection's model. :param namespace: The name of", "self.transport.request(Method.PUT) if not namespace or not collection: raise ImproperlyConfigured( \"namespace, collection\", \"'namespace' and", "and has_batch or (not has_document and not has_batch): raise ImproperlyConfigured( \"batch, document\", \"exactly", "'document' or 'batch' must be set\" ) request = self.transport.request(Method.PUT) if not namespace", "has_document: kwargs.update({\"document\": document}) elif has_batch: kwargs.update({\"batch\": batch}) if hint is not None: if", ".. note:: Exactly one of ``document`` or ``batch`` must be set. \"\"\" has_document", "If specified, must follow the same format as the `override` parameter of :meth:`put_override", "\"\"\" @scoped(\"namespace\") def put_documents( self, collection=None, document=None, batch=None, hint=None, namespace=None ): \"\"\"Ingest one", "documents. This supports both individual and batch document ingestion. In general, batch is", "ingest in the collection. This uses the API in individual ingestion mode. :type", "or 'batch' must be set\" ) request = self.transport.request(Method.PUT) if not namespace or", "the collection's model. :param namespace: The name of the namespace we should ingest", "optional .. note:: Exactly one of ``document`` or ``batch`` must be set. \"\"\"", "= {} if has_document: kwargs.update({\"document\": document}) elif has_batch: kwargs.update({\"batch\": batch}) if hint is", "allows ``synth`` to optimize its internal updating of the collection's model. :param namespace:", "client instead. Example: .. code-block:: python >>> from synthpy import Synth >>> client", "favored as it results in fewer individual API requests and allows ``synth`` to", "client = Synth() >>> client.put_documents(namespace=\"my_namespace\", collection=\"my_collection\", document={\"yes?\": True}) \"\"\" @scoped(\"namespace\") def put_documents( self,", "Synth >>> client = Synth() >>> client.put_documents(namespace=\"my_namespace\", collection=\"my_collection\", document={\"yes?\": True}) \"\"\" @scoped(\"namespace\") def", "document(s) to. :type collection: str :param document: The document we should ingest in", "the ingest. If specified, must follow the same format as the `override` parameter", "set. \"\"\" has_document = document is not None has_batch = batch is not", "format as the `override` parameter of :meth:`put_override <synthpy.client.override.OverrideClient.put_override>`. :type hint: dict, optional ..", "(not has_document and not has_batch): raise ImproperlyConfigured( \"batch, document\", \"exactly one of 'document'", "document(s) to. :type namespace: str :param collection: The name of the collection we", "from synthpy import Synth >>> client = Synth() >>> client.put_documents(namespace=\"my_namespace\", collection=\"my_collection\", document={\"yes?\": True})", "has_batch = batch is not None if has_document and has_batch or (not has_document", "batch is favored as it results in fewer individual API requests and allows", "should ingest in the collection. This uses the API in individual ingestion mode.", "``batch`` must be set. \"\"\" has_document = document is not None has_batch =", "for the Ingest API. .. note:: Do not construct this class directly. Access", "hint is not None: if isinstance(hint, Model): hint = hint._into_content()._into_repr() kwargs.update({\"hint\": hint}) request.body(**kwargs)", "hint: dict, optional .. note:: Exactly one of ``document`` or ``batch`` must be", "``document`` or ``batch`` must be set. \"\"\" has_document = document is not None", "set\" ) request = self.transport.request(Method.PUT) if not namespace or not collection: raise ImproperlyConfigured(", "from ..exceptions import ImproperlyConfigured from ..model import Model class IngestClient(NamespacedClient): \"\"\"Base class for", "ingest the document(s) to. :type namespace: str :param collection: The name of the", "the API in batch mode. :type batch: Iterable[dict], optional :param hint: Hint about", ":param hint: Hint about the content of the ingest. If specified, must follow", ":param collection: The name of the collection we should ingest the document(s) to.", "collection\", \"'namespace' and 'collection' are required arguments\", ) request.path.push(namespace).push(collection) kwargs = {} if", "collection=None, document=None, batch=None, hint=None, namespace=None ): \"\"\"Ingest one or more documents. This supports", "collection=\"my_collection\", document={\"yes?\": True}) \"\"\" @scoped(\"namespace\") def put_documents( self, collection=None, document=None, batch=None, hint=None, namespace=None", "from the root :class:`.Synth` client instead. Example: .. code-block:: python >>> from synthpy", "in batch mode. :type batch: Iterable[dict], optional :param hint: Hint about the content", "updating of the collection's model. :param namespace: The name of the namespace we", "`override` parameter of :meth:`put_override <synthpy.client.override.OverrideClient.put_override>`. :type hint: dict, optional .. note:: Exactly one", "name of the namespace we should ingest the document(s) to. :type namespace: str", "to. :type collection: str :param document: The document we should ingest in the", "the API in individual ingestion mode. :type document: dict, optional :param batch: An", "has_document = document is not None has_batch = batch is not None if", "arguments\", ) request.path.push(namespace).push(collection) kwargs = {} if has_document: kwargs.update({\"document\": document}) elif has_batch: kwargs.update({\"batch\":", "not None: if isinstance(hint, Model): hint = hint._into_content()._into_repr() kwargs.update({\"hint\": hint}) request.body(**kwargs) return request.execute()", "supports both individual and batch document ingestion. In general, batch is favored as", "\"\"\"Base class for the Ingest API. .. note:: Do not construct this class", "its internal updating of the collection's model. :param namespace: The name of the", "'batch' must be set\" ) request = self.transport.request(Method.PUT) if not namespace or not", "must follow the same format as the `override` parameter of :meth:`put_override <synthpy.client.override.OverrideClient.put_override>`. :type", "specified, must follow the same format as the `override` parameter of :meth:`put_override <synthpy.client.override.OverrideClient.put_override>`.", ":type hint: dict, optional .. note:: Exactly one of ``document`` or ``batch`` must", "document={\"yes?\": True}) \"\"\" @scoped(\"namespace\") def put_documents( self, collection=None, document=None, batch=None, hint=None, namespace=None ):", "requests and allows ``synth`` to optimize its internal updating of the collection's model.", "as it results in fewer individual API requests and allows ``synth`` to optimize", ":param batch: An iterable of documents we should ingest in the collection. This", "dict, optional .. note:: Exactly one of ``document`` or ``batch`` must be set.", "document ingestion. In general, batch is favored as it results in fewer individual", ":meth:`put_override <synthpy.client.override.OverrideClient.put_override>`. :type hint: dict, optional .. note:: Exactly one of ``document`` or", "same format as the `override` parameter of :meth:`put_override <synthpy.client.override.OverrideClient.put_override>`. :type hint: dict, optional", "collection: The name of the collection we should ingest the document(s) to. :type", "to optimize its internal updating of the collection's model. :param namespace: The name", "to. :type namespace: str :param collection: The name of the collection we should", ") request = self.transport.request(Method.PUT) if not namespace or not collection: raise ImproperlyConfigured( \"namespace,", "``synth`` to optimize its internal updating of the collection's model. :param namespace: The", "..model import Model class IngestClient(NamespacedClient): \"\"\"Base class for the Ingest API. .. note::", ":type document: dict, optional :param batch: An iterable of documents we should ingest", "the collection. This uses the API in batch mode. :type batch: Iterable[dict], optional", "hint=None, namespace=None ): \"\"\"Ingest one or more documents. This supports both individual and", "the collection we should ingest the document(s) to. :type collection: str :param document:", "it results in fewer individual API requests and allows ``synth`` to optimize its", ">>> client.put_documents(namespace=\"my_namespace\", collection=\"my_collection\", document={\"yes?\": True}) \"\"\" @scoped(\"namespace\") def put_documents( self, collection=None, document=None, batch=None,", "= Synth() >>> client.put_documents(namespace=\"my_namespace\", collection=\"my_collection\", document={\"yes?\": True}) \"\"\" @scoped(\"namespace\") def put_documents( self, collection=None,", ":param document: The document we should ingest in the collection. This uses the", "collection. This uses the API in individual ingestion mode. :type document: dict, optional", "not construct this class directly. Access it from the root :class:`.Synth` client instead.", "not None has_batch = batch is not None if has_document and has_batch or", "elif has_batch: kwargs.update({\"batch\": batch}) if hint is not None: if isinstance(hint, Model): hint", "The name of the collection we should ingest the document(s) to. :type collection:", "mode. :type batch: Iterable[dict], optional :param hint: Hint about the content of the", "import NamespacedClient, scoped from ..exceptions import ImproperlyConfigured from ..model import Model class IngestClient(NamespacedClient):", "note:: Do not construct this class directly. Access it from the root :class:`.Synth`", "of 'document' or 'batch' must be set\" ) request = self.transport.request(Method.PUT) if not", "None if has_document and has_batch or (not has_document and not has_batch): raise ImproperlyConfigured(", ":class:`.Synth` client instead. Example: .. code-block:: python >>> from synthpy import Synth >>>", "batch: Iterable[dict], optional :param hint: Hint about the content of the ingest. If", "ingestion. In general, batch is favored as it results in fewer individual API", "document is not None has_batch = batch is not None if has_document and", "content of the ingest. If specified, must follow the same format as the", "has_document and has_batch or (not has_document and not has_batch): raise ImproperlyConfigured( \"batch, document\",", "import ImproperlyConfigured from ..model import Model class IngestClient(NamespacedClient): \"\"\"Base class for the Ingest", "fewer individual API requests and allows ``synth`` to optimize its internal updating of", "and 'collection' are required arguments\", ) request.path.push(namespace).push(collection) kwargs = {} if has_document: kwargs.update({\"document\":", "document}) elif has_batch: kwargs.update({\"batch\": batch}) if hint is not None: if isinstance(hint, Model):", "API in individual ingestion mode. :type document: dict, optional :param batch: An iterable", "general, batch is favored as it results in fewer individual API requests and", "individual and batch document ingestion. In general, batch is favored as it results", "internal updating of the collection's model. :param namespace: The name of the namespace", "<filename>synthpy/client/ingest.py<gh_stars>1-10 from .transport import Method from .utils import NamespacedClient, scoped from ..exceptions import", "class for the Ingest API. .. note:: Do not construct this class directly.", "of the collection's model. :param namespace: The name of the namespace we should", "the same format as the `override` parameter of :meth:`put_override <synthpy.client.override.OverrideClient.put_override>`. :type hint: dict,", "collection's model. :param namespace: The name of the namespace we should ingest the", "Synth() >>> client.put_documents(namespace=\"my_namespace\", collection=\"my_collection\", document={\"yes?\": True}) \"\"\" @scoped(\"namespace\") def put_documents( self, collection=None, document=None,", "ImproperlyConfigured from ..model import Model class IngestClient(NamespacedClient): \"\"\"Base class for the Ingest API.", "NamespacedClient, scoped from ..exceptions import ImproperlyConfigured from ..model import Model class IngestClient(NamespacedClient): \"\"\"Base", "one or more documents. This supports both individual and batch document ingestion. In", "the collection. This uses the API in individual ingestion mode. :type document: dict,", "must be set. \"\"\" has_document = document is not None has_batch = batch", "raise ImproperlyConfigured( \"batch, document\", \"exactly one of 'document' or 'batch' must be set\"", "or not collection: raise ImproperlyConfigured( \"namespace, collection\", \"'namespace' and 'collection' are required arguments\",", ".. code-block:: python >>> from synthpy import Synth >>> client = Synth() >>>", "document we should ingest in the collection. This uses the API in individual", "in the collection. This uses the API in individual ingestion mode. :type document:", "'collection' are required arguments\", ) request.path.push(namespace).push(collection) kwargs = {} if has_document: kwargs.update({\"document\": document})", "be set\" ) request = self.transport.request(Method.PUT) if not namespace or not collection: raise", "Hint about the content of the ingest. If specified, must follow the same", "The name of the namespace we should ingest the document(s) to. :type namespace:", "API. .. note:: Do not construct this class directly. Access it from the", "individual API requests and allows ``synth`` to optimize its internal updating of the", "@scoped(\"namespace\") def put_documents( self, collection=None, document=None, batch=None, hint=None, namespace=None ): \"\"\"Ingest one or", "the Ingest API. .. note:: Do not construct this class directly. Access it", ":param namespace: The name of the namespace we should ingest the document(s) to.", "and allows ``synth`` to optimize its internal updating of the collection's model. :param", "\"\"\" has_document = document is not None has_batch = batch is not None", "python >>> from synthpy import Synth >>> client = Synth() >>> client.put_documents(namespace=\"my_namespace\", collection=\"my_collection\",", "synthpy import Synth >>> client = Synth() >>> client.put_documents(namespace=\"my_namespace\", collection=\"my_collection\", document={\"yes?\": True}) \"\"\"", "kwargs = {} if has_document: kwargs.update({\"document\": document}) elif has_batch: kwargs.update({\"batch\": batch}) if hint", "from ..model import Model class IngestClient(NamespacedClient): \"\"\"Base class for the Ingest API. ..", "<synthpy.client.override.OverrideClient.put_override>`. :type hint: dict, optional .. note:: Exactly one of ``document`` or ``batch``", ".utils import NamespacedClient, scoped from ..exceptions import ImproperlyConfigured from ..model import Model class", "str :param document: The document we should ingest in the collection. This uses", "not has_batch): raise ImproperlyConfigured( \"batch, document\", \"exactly one of 'document' or 'batch' must", ".. note:: Do not construct this class directly. Access it from the root", "..exceptions import ImproperlyConfigured from ..model import Model class IngestClient(NamespacedClient): \"\"\"Base class for the", "): \"\"\"Ingest one or more documents. This supports both individual and batch document", "we should ingest in the collection. This uses the API in individual ingestion", "of the collection we should ingest the document(s) to. :type collection: str :param", ":type batch: Iterable[dict], optional :param hint: Hint about the content of the ingest.", "uses the API in individual ingestion mode. :type document: dict, optional :param batch:", "should ingest the document(s) to. :type namespace: str :param collection: The name of", "as the `override` parameter of :meth:`put_override <synthpy.client.override.OverrideClient.put_override>`. :type hint: dict, optional .. note::", ":type collection: str :param document: The document we should ingest in the collection.", "individual ingestion mode. :type document: dict, optional :param batch: An iterable of documents", "uses the API in batch mode. :type batch: Iterable[dict], optional :param hint: Hint", "\"batch, document\", \"exactly one of 'document' or 'batch' must be set\" ) request", "is not None: if isinstance(hint, Model): hint = hint._into_content()._into_repr() kwargs.update({\"hint\": hint}) request.body(**kwargs) return", "from .utils import NamespacedClient, scoped from ..exceptions import ImproperlyConfigured from ..model import Model", "scoped from ..exceptions import ImproperlyConfigured from ..model import Model class IngestClient(NamespacedClient): \"\"\"Base class", "mode. :type document: dict, optional :param batch: An iterable of documents we should", "about the content of the ingest. If specified, must follow the same format", "and not has_batch): raise ImproperlyConfigured( \"batch, document\", \"exactly one of 'document' or 'batch'", "collection: raise ImproperlyConfigured( \"namespace, collection\", \"'namespace' and 'collection' are required arguments\", ) request.path.push(namespace).push(collection)", "\"namespace, collection\", \"'namespace' and 'collection' are required arguments\", ) request.path.push(namespace).push(collection) kwargs = {}", "we should ingest the document(s) to. :type namespace: str :param collection: The name", "API in batch mode. :type batch: Iterable[dict], optional :param hint: Hint about the", "optional :param hint: Hint about the content of the ingest. If specified, must", "follow the same format as the `override` parameter of :meth:`put_override <synthpy.client.override.OverrideClient.put_override>`. :type hint:", "batch: An iterable of documents we should ingest in the collection. This uses", "\"exactly one of 'document' or 'batch' must be set\" ) request = self.transport.request(Method.PUT)", "namespace: The name of the namespace we should ingest the document(s) to. :type", "root :class:`.Synth` client instead. Example: .. code-block:: python >>> from synthpy import Synth", "code-block:: python >>> from synthpy import Synth >>> client = Synth() >>> client.put_documents(namespace=\"my_namespace\",", ") request.path.push(namespace).push(collection) kwargs = {} if has_document: kwargs.update({\"document\": document}) elif has_batch: kwargs.update({\"batch\": batch})", "batch=None, hint=None, namespace=None ): \"\"\"Ingest one or more documents. This supports both individual" ]
[ "out whether to add ``*_requires = ['numpy>=`min version`', # 'scipy>=`min version`']``. We don't", "'.dev' FULLVERSION += '.dev0+' + GIT_REVISION[:7] return FULLVERSION, GIT_REVISION def write_version_py(filename='sknano/version.py'): cnt =", "Python Modules \"\"\" MAJOR = 0 MINOR = 3 MICRO = 21 ISRELEASED", "Python3 install.\\n\" \"`pip install --upgrade setuptools`\") DISTNAME = 'scikit-nano' DESCRIPTION = __doc__ LONG_DESCRIPTION", "\"Unix\"], test_suite='nose.collector', setup_requires=build_requires, install_requires=install_requires, extras_require={ 'plotting': ['matplotlib>=1.4.3', 'palettable>=2.1.1'] }, entry_points={ 'console_scripts': [ 'analyze_structure", "env['LANGUAGE'] = 'C' env['LANG'] = 'C' env['LC_ALL'] = 'C' out = subprocess.Popen( cmd,", "ISRELEASED = True VERSION = '%d.%d.%d' % (MAJOR, MINOR, MICRO) STABLEVERSION = None", "was removed starting\\n\" \"in v0.3.7. Once/if I learn how to automate the\\n\" \"backporting", "try: import setuptools except ImportError: sys.exit(\"setuptools required for Python3 install.\\n\" \"`pip install --upgrade", "def git_version(): def _minimal_ext_cmd(cmd): # construct minimal environment env = {} for k", "on win32 env['LANGUAGE'] = 'C' env['LANG'] = 'C' env['LC_ALL'] = 'C' out =", "exceptions that are raised when\\n\" \"executed under Python 2.\") #if sys.version_info[:2] < (2,", "write_version_py(filename='sknano/version.py'): cnt = \"\"\" # THIS FILE IS GENERATED FROM SCIKIT-NANO SETUP.PY short_version", "file include_package_data=True, ) if len(sys.argv) >= 2 and \\ ('--help' in sys.argv[1:] or", "Development Topic :: Software Development :: Libraries :: Python Modules \"\"\" MAJOR =", "dirpath, dirnames, filenames in os.walk('sknano'): for filename in filenames: if filename.endswith(('.so', '.pyd', '.pyc',", "sys.version_info[0] < 3: raise RuntimeError(\"Python version 3.4+ required.\\n\\n\" \"Sorry, but there are features", "= \\ tuple( list(map(int, numpy.version.short_version.split('.')[:3]))[:2]) if numpy_version < (1, 9): raise RuntimeError except", "in ('__pycache__', '.ropeproject'): shutil.rmtree(os.path.join(dirpath, dirname)) def get_version_info(): # Adding the git rev number", "Add future module to install requires # install_requires += ['future>=0.14.3'] install_requires += ['monty>=0.7.0',", "Topic :: Software Development :: Libraries :: Python Modules \"\"\" MAJOR = 0", "dirpath, dirnames, filenames in os.walk('doc'): for dirname in dirnames: if dirname in ('__pycache__',", "if os.path.exists('MANIFEST'): os.remove('MANIFEST') # This is a bit (!) hackish: we are setting", "\"Unknown\" return GIT_REVISION # BEFORE importing distutils, remove MANIFEST. distutils doesn't properly #", "out = subprocess.Popen( cmd, stdout=subprocess.PIPE, env=env).communicate()[0] return out try: out = _minimal_ext_cmd(['git', 'rev-parse',", "to be done inside # write_version_py(), otherwise the import of sknano.version messes #", "en' import os import sys import shutil import subprocess from distutils.command.clean import clean", "Python 2 support that way.\\n\" \"Until then, if you must install this for", ":: Scientific/Engineering Topic :: Scientific/Engineering :: Chemistry Topic :: Scientific/Engineering :: Physics Topic", "for _f in CLASSIFIERS.split('\\n') if _f], platforms=[\"Windows\", \"Linux\", \"Solaris\", \"Mac OS-X\", \"Unix\"], test_suite='nose.collector',", "[ 'analyze_structure = sknano.scripts.analyze_structure:main', 'nanogen = sknano.scripts.nanogen:main', 'nanogenui = sknano.scripts.nanogenui:main', 'sknano = sknano.scripts.sknano:main'],", "+= ['future>=0.14.3'] install_requires += ['monty>=0.7.0', 'pymatgen>=3.2.4'] metadata = dict( name=DISTNAME, author=AUTHOR, author_email=AUTHOR_EMAIL, maintainer=MAINTAINER,", "from __future__ import absolute_import, division, print_function, \\ unicode_literals __docformat__ = 'restructuredtext en' import", "from numpy.distutils.misc_util import Configuration config = Configuration(None, parent_package, top_path) config.set_options(ignore_setup_xxx_py=True, assume_default_configuration=True, delegate_options_to_subpackages=True, quiet=True)", "version as a string def git_version(): def _minimal_ext_cmd(cmd): # construct minimal environment env", "a = open(filename, 'w') try: a.write(cnt % {'version': VERSION, 'full_version': FULLVERSION, 'git_revision': GIT_REVISION,", "metadata = dict( name=DISTNAME, author=AUTHOR, author_email=AUTHOR_EMAIL, maintainer=MAINTAINER, maintainer_email=MAINTAINER_EMAIL, description=DESCRIPTION, long_description=LONG_DESCRIPTION, url=URL, download_url=DOWNLOAD_URL, license=LICENSE,", "'graphene', 'LAMMPS', 'XYZ', 'structure', 'analysis'] LICENSE = 'BSD 2-Clause' CLASSIFIERS = \"\"\"\\ Development", "properly # update it when the contents of directories change. if os.path.exists('MANIFEST'): os.remove('MANIFEST')", "\"Solaris\", \"Mac OS-X\", \"Unix\"], test_suite='nose.collector', setup_requires=build_requires, install_requires=install_requires, extras_require={ 'plotting': ['matplotlib>=1.4.3', 'palettable>=2.1.1'] }, entry_points={", "VERSION if os.path.exists('.git'): GIT_REVISION = git_version() elif os.path.exists('sknano/version.py'): # must be a source", "Visualization Topic :: Software Development Topic :: Software Development :: Libraries :: Python", "Developers License :: OSI Approved :: BSD License Operating System :: Microsoft ::", "test_suite='nose.collector', setup_requires=build_requires, install_requires=install_requires, extras_require={ 'plotting': ['matplotlib>=1.4.3', 'palettable>=2.1.1'] }, entry_points={ 'console_scripts': [ 'analyze_structure =", "(1, 9): raise RuntimeError except (AttributeError, ImportError, RuntimeError): build_requires += ['numpy==1.10.1'] install_requires =", "< (2, 7) or (3, 0) <= sys.version_info[:2] < (3, 4): if (3,", "FULLVERSION += '.dev' FULLVERSION += '.dev0+' + GIT_REVISION[:7] return FULLVERSION, GIT_REVISION def write_version_py(filename='sknano/version.py'):", "to install Scipy when Numpy is not yet present in # the system.", "STABLEVERSION = VERSION else: STABLEVERSION = '%d.%d.%d' % (MAJOR, MINOR, MICRO - 1)", "env[k] = v # LANGUAGE is used on win32 env['LANGUAGE'] = 'C' env['LANG']", "without them when, for example, # pip is used to install Scipy when", "try: from setuptools import setup except ImportError: from distutils.core import setup FULLVERSION, GIT_REVISION", "setup routine, to # avoid attempting to load components that aren't built yet.", "for filename in filenames: if filename.endswith(('.so', '.pyd', '.pyc', '.dll')): os.unlink(os.path.join(dirpath, filename)) for dirname", "return out try: out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD']) GIT_REVISION = out.strip().decode('ascii') except OSError:", "OS-X\", \"Unix\"], test_suite='nose.collector', setup_requires=build_requires, install_requires=install_requires, extras_require={ 'plotting': ['matplotlib>=1.4.3', 'palettable>=2.1.1'] }, entry_points={ 'console_scripts': [", "variable so that the main # sknano __init__ can detect if it is", "Python 3 to Python 2\\n\" \"compatibility library such as the python `future`\\n\" \"module,", "version = imp.load_source('sknano.version', 'sknano/version.py') GIT_REVISION = version.git_revision else: GIT_REVISION = \"Unknown\" if not", "str(ISRELEASED), 'stable_version': STABLEVERSION}) finally: a.close() def configuration(parent_package='', top_path=None): from numpy.distutils.misc_util import Configuration config", "dirname in ('__pycache__', '.ropeproject'): shutil.rmtree(os.path.join(dirpath, dirname)) for dirpath, dirnames, filenames in os.walk('doc'): for", "features of Python 3\\n\" \"that I want to take advantage of and without\\n\"", "'BSD 2-Clause' CLASSIFIERS = \"\"\"\\ Development Status :: 4 - Beta Intended Audience", "import Configuration config = Configuration(None, parent_package, top_path) config.set_options(ignore_setup_xxx_py=True, assume_default_configuration=True, delegate_options_to_subpackages=True, quiet=True) config.add_subpackage('sknano') config.get_version('sknano/version.py')", "MINOR, MICRO - 1) # Return the GIT version as a string def", "# FULLVERSION += '.dev' FULLVERSION += '.dev0+' + GIT_REVISION[:7] return FULLVERSION, GIT_REVISION def", "install.\\n\" \"`pip install --upgrade setuptools`\") DISTNAME = 'scikit-nano' DESCRIPTION = __doc__ LONG_DESCRIPTION =", "if os.path.exists('build'): shutil.rmtree('build') for dirpath, dirnames, filenames in os.walk('sknano'): for filename in filenames:", "author=AUTHOR, author_email=AUTHOR_EMAIL, maintainer=MAINTAINER, maintainer_email=MAINTAINER_EMAIL, description=DESCRIPTION, long_description=LONG_DESCRIPTION, url=URL, download_url=DOWNLOAD_URL, license=LICENSE, keywords=KEYWORDS, classifiers=[_f for _f", "imp version = imp.load_source('sknano.version', 'sknano/version.py') GIT_REVISION = version.git_revision else: GIT_REVISION = \"Unknown\" if", "config def setup_package(): # Rewrite the version file everytime write_version_py() # Figure out", "a bit (!) hackish: we are setting a global variable so that the", "# Rewrite the version file everytime write_version_py() # Figure out whether to add", "{'version': VERSION, 'full_version': FULLVERSION, 'git_revision': GIT_REVISION, 'isrelease': str(ISRELEASED), 'stable_version': STABLEVERSION}) finally: a.close() def", "stdout=subprocess.PIPE, env=env).communicate()[0] return out try: out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD']) GIT_REVISION = out.strip().decode('ascii')", "avoid attempting to load components that aren't built yet. builtins.__SKNANO_SETUP__ = True class", "we are setting a global variable so that the main # sknano __init__", "\"worrying about Python 2 compatibility.\\n\" \"Therefore, Python 2 support was removed starting\\n\" \"in", ":: Windows Operating System :: POSIX Operating System :: Unix Operating System ::", "setup script,\\n\" \"I will restore Python 2 support that way.\\n\" \"Until then, if", "'%(version)s' full_version = '%(full_version)s' git_revision = '%(git_revision)s' release = %(isrelease)s stable_version = '%(stable_version)s'", "'scikit-nano' DESCRIPTION = __doc__ LONG_DESCRIPTION = ''.join(open('README.rst').readlines()[6:]) AUTHOR = '<NAME>' AUTHOR_EMAIL = '<EMAIL>'", "the contents of directories change. if os.path.exists('MANIFEST'): os.remove('MANIFEST') # This is a bit", "restore Python 2 support that way.\\n\" \"Until then, if you must install this", "# install_requires += ['future>=0.14.3'] install_requires += ['monty>=0.7.0', 'pymatgen>=3.2.4'] metadata = dict( name=DISTNAME, author=AUTHOR,", "Adding the git rev number needs to be done inside # write_version_py(), otherwise", "import clean as Clean if sys.version_info[0] < 3: raise RuntimeError(\"Python version 3.4+ required.\\n\\n\"", "sys.version_info[:2] < (2, 7) or (3, 0) <= sys.version_info[:2] < (3, 4): if", "shutil.rmtree(os.path.join(dirpath, dirname)) for dirpath, dirnames, filenames in os.walk('doc'): for dirname in dirnames: if", "try: import scipy scipy_version = \\ tuple( list(map(int, scipy.version.short_version.split('.')[:3]))[:2]) if scipy_version < (0,", "Scientific/Engineering Topic :: Scientific/Engineering :: Chemistry Topic :: Scientific/Engineering :: Physics Topic ::", "2\\n\" \"you're on your own. It shouldn't be difficult\\n\" \"but you'll have to", "import subprocess from distutils.command.clean import clean as Clean if sys.version_info[0] < 3: raise", "directories, and compiled files in the source tree.\" def run(self): Clean.run(self) if os.path.exists('build'):", "FULLVERSION else: from numpy.distutils.core import setup metadata['configuration'] = configuration setup(**metadata) if __name__ ==", "0) <= sys.version_info[:2] < (3, 4): if (3, 0) <= sys.version_info[:2] < (3,", "(!) hackish: we are setting a global variable so that the main #", "= sknano.scripts.nanogen:main', 'nanogenui = sknano.scripts.nanogenui:main', 'sknano = sknano.scripts.sknano:main'], }, cmdclass={'clean': CleanCommand}, zip_safe=False, #", "\"backporting process from the setup script,\\n\" \"I will restore Python 2 support that", "used on win32 env['LANGUAGE'] = 'C' env['LANG'] = 'C' env['LC_ALL'] = 'C' out", "= 'C' env['LANG'] = 'C' env['LC_ALL'] = 'C' out = subprocess.Popen( cmd, stdout=subprocess.PIPE,", "'PATH']: v = os.environ.get(k) if v is not None: env[k] = v #", "MAJOR = 0 MINOR = 3 MICRO = 21 ISRELEASED = True VERSION", "os import sys import shutil import subprocess from distutils.command.clean import clean as Clean", "on your own. It shouldn't be difficult\\n\" \"but you'll have to manually backport", "= ['nano', 'nanoscience', 'nano-structure', 'nanostructure', 'nanotube', 'graphene', 'LAMMPS', 'XYZ', 'structure', 'analysis'] LICENSE =", ":: Developers License :: OSI Approved :: BSD License Operating System :: Microsoft", "directories, \" \\ \".ropeproject directories, and compiled files in the source tree.\" def", "'.dll')): os.unlink(os.path.join(dirpath, filename)) for dirname in dirnames: if dirname in ('__pycache__', '.ropeproject'): shutil.rmtree(os.path.join(dirpath,", "name=DISTNAME, author=AUTHOR, author_email=AUTHOR_EMAIL, maintainer=MAINTAINER, maintainer_email=MAINTAINER_EMAIL, description=DESCRIPTION, long_description=LONG_DESCRIPTION, url=URL, download_url=DOWNLOAD_URL, license=LICENSE, keywords=KEYWORDS, classifiers=[_f for", "'%(version)s' version = '%(version)s' full_version = '%(full_version)s' git_revision = '%(git_revision)s' release = %(isrelease)s", "have to manually backport the package\\n\" \"source code using a Python 3 to", "(2, 7) or (3, 0) <= sys.version_info[:2] < (3, 4): if (3, 0)", "except (AttributeError, ImportError, RuntimeError): install_requires += ['scipy==0.16.1'] # # Add six module to", "if you must install this for Python 2\\n\" \"you're on your own. It", "= ''.join(open('README.rst').readlines()[6:]) AUTHOR = '<NAME>' AUTHOR_EMAIL = '<EMAIL>' MAINTAINER = AUTHOR MAINTAINER_EMAIL =", "# Add six module to install_requires (used in numpydoc git submodule) # install_requires", "'nanotube', 'graphene', 'LAMMPS', 'XYZ', 'structure', 'analysis'] LICENSE = 'BSD 2-Clause' CLASSIFIERS = \"\"\"\\", "1) # Return the GIT version as a string def git_version(): def _minimal_ext_cmd(cmd):", "that aren't built yet. builtins.__SKNANO_SETUP__ = True class CleanCommand(Clean): description = \\ \"Remove", "string def git_version(): def _minimal_ext_cmd(cmd): # construct minimal environment env = {} for", "CLASSIFIERS.split('\\n') if _f], platforms=[\"Windows\", \"Linux\", \"Solaris\", \"Mac OS-X\", \"Unix\"], test_suite='nose.collector', setup_requires=build_requires, install_requires=install_requires, extras_require={", "sys.version_info[:2] < (3, 4): raise RuntimeError(\"Python 3.4+ required.\") if sys.version_info[0] >= 3: import", "= build_requires[:] try: import scipy scipy_version = \\ tuple( list(map(int, scipy.version.short_version.split('.')[:3]))[:2]) if scipy_version", "builtins try: import setuptools except ImportError: sys.exit(\"setuptools required for Python3 install.\\n\" \"`pip install", "__docformat__ = 'restructuredtext en' import os import sys import shutil import subprocess from", "automate the backporting process.\\n\" \"You'll also need to hack this setup script\\n\" \"to", "also need to hack this setup script\\n\" \"to remove any exceptions that are", "os.walk('doc'): for dirname in dirnames: if dirname in ('__pycache__', '.ropeproject'): shutil.rmtree(os.path.join(dirpath, dirname)) def", "directories, __pycache__ directories, \" \\ \".ropeproject directories, and compiled files in the source", "\\ tuple( list(map(int, scipy.version.short_version.split('.')[:3]))[:2]) if scipy_version < (0, 14): raise RuntimeError except (AttributeError,", "dirnames: if dirname in ('__pycache__', '.ropeproject'): shutil.rmtree(os.path.join(dirpath, dirname)) for dirpath, dirnames, filenames in", "git rev number needs to be done inside # write_version_py(), otherwise the import", "when Numpy is not yet present in # the system. try: from setuptools", "Operating System :: MacOS Programming Language :: Python Programming Language :: Python ::", "are setting a global variable so that the main # sknano __init__ can", "module to install requires # install_requires += ['future>=0.14.3'] install_requires += ['monty>=0.7.0', 'pymatgen>=3.2.4'] metadata", "'%d.%d.%d' % (MAJOR, MINOR, MICRO) STABLEVERSION = None if STABLEVERSION is None: if", "try. build_requires = [] try: import numpy numpy_version = \\ tuple( list(map(int, numpy.version.short_version.split('.')[:3]))[:2])", "your own. It shouldn't be difficult\\n\" \"but you'll have to manually backport the", ":: Unix Operating System :: MacOS Programming Language :: Python Programming Language ::", "\"Remove build directories, __pycache__ directories, \" \\ \".ropeproject directories, and compiled files in", "get_version_info(): # Adding the git rev number needs to be done inside #", "+= '.dev0+' + GIT_REVISION[:7] return FULLVERSION, GIT_REVISION def write_version_py(filename='sknano/version.py'): cnt = \"\"\" #", "if STABLEVERSION is None: if ISRELEASED: STABLEVERSION = VERSION else: STABLEVERSION = '%d.%d.%d'", "= Configuration(None, parent_package, top_path) config.set_options(ignore_setup_xxx_py=True, assume_default_configuration=True, delegate_options_to_subpackages=True, quiet=True) config.add_subpackage('sknano') config.get_version('sknano/version.py') return config def", "for example, # pip is used to install Scipy when Numpy is not", "Rewrite the version file everytime write_version_py() # Figure out whether to add ``*_requires", "which provides a python script called\\n\" \"`pasteurize` that can be run on the", "else: import __builtin__ as builtins try: import setuptools except ImportError: sys.exit(\"setuptools required for", "everytime write_version_py() # Figure out whether to add ``*_requires = ['numpy>=`min version`', #", "files in the source tree.\" def run(self): Clean.run(self) if os.path.exists('build'): shutil.rmtree('build') for dirpath,", "sys.argv[1] in ('--help-commands', 'egg_info', '--version', 'clean')): # For these actions, NumPy/SciPy are not", "is being loaded by the setup routine, to # avoid attempting to load", "load it as a separate module to not load sknano/__init__.py import imp version", "whether to add ``*_requires = ['numpy>=`min version`', # 'scipy>=`min version`']``. We don't want", "the package can run out of an .egg file include_package_data=True, ) if len(sys.argv)", "detect if it is being loaded by the setup routine, to # avoid", "the build under Python 3. FULLVERSION = VERSION if os.path.exists('.git'): GIT_REVISION = git_version()", "of sknano.version messes # up the build under Python 3. FULLVERSION = VERSION", "def setup_package(): # Rewrite the version file everytime write_version_py() # Figure out whether", "< (1, 9): raise RuntimeError except (AttributeError, ImportError, RuntimeError): build_requires += ['numpy==1.10.1'] install_requires", "\"Until then, if you must install this for Python 2\\n\" \"you're on your", "builtins else: import __builtin__ as builtins try: import setuptools except ImportError: sys.exit(\"setuptools required", "change. if os.path.exists('MANIFEST'): os.remove('MANIFEST') # This is a bit (!) hackish: we are", "url=URL, download_url=DOWNLOAD_URL, license=LICENSE, keywords=KEYWORDS, classifiers=[_f for _f in CLASSIFIERS.split('\\n') if _f], platforms=[\"Windows\", \"Linux\",", "'--version', 'clean')): # For these actions, NumPy/SciPy are not required. # They are", "= subprocess.Popen( cmd, stdout=subprocess.PIPE, env=env).communicate()[0] return out try: out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD'])", "'rev-parse', 'HEAD']) GIT_REVISION = out.strip().decode('ascii') except OSError: GIT_REVISION = \"Unknown\" return GIT_REVISION #", "division, print_function, \\ unicode_literals __docformat__ = 'restructuredtext en' import os import sys import", "install this for Python 2\\n\" \"you're on your own. It shouldn't be difficult\\n\"", "license=LICENSE, keywords=KEYWORDS, classifiers=[_f for _f in CLASSIFIERS.split('\\n') if _f], platforms=[\"Windows\", \"Linux\", \"Solaris\", \"Mac", "'palettable>=2.1.1'] }, entry_points={ 'console_scripts': [ 'analyze_structure = sknano.scripts.analyze_structure:main', 'nanogen = sknano.scripts.nanogen:main', 'nanogenui =", "= os.environ.get(k) if v is not None: env[k] = v # LANGUAGE is", "except ImportError: sys.exit(\"setuptools required for Python3 install.\\n\" \"`pip install --upgrade setuptools`\") DISTNAME =", "manually backport the package\\n\" \"source code using a Python 3 to Python 2\\n\"", "numpy numpy_version = \\ tuple( list(map(int, numpy.version.short_version.split('.')[:3]))[:2]) if numpy_version < (1, 9): raise", "= 'C' env['LC_ALL'] = 'C' out = subprocess.Popen( cmd, stdout=subprocess.PIPE, env=env).communicate()[0] return out", "# must be a source distribution, use existing version file # load it", "= [] try: import numpy numpy_version = \\ tuple( list(map(int, numpy.version.short_version.split('.')[:3]))[:2]) if numpy_version", "of and without\\n\" \"worrying about Python 2 compatibility.\\n\" \"Therefore, Python 2 support was", "directories change. if os.path.exists('MANIFEST'): os.remove('MANIFEST') # This is a bit (!) hackish: we", "'http://scikit-nano.org/doc' DOWNLOAD_URL = 'http://github.com/androomerrill/scikit-nano' KEYWORDS = ['nano', 'nanoscience', 'nano-structure', 'nanostructure', 'nanotube', 'graphene', 'LAMMPS',", "builtins.__SKNANO_SETUP__ = True class CleanCommand(Clean): description = \\ \"Remove build directories, __pycache__ directories,", "contents of directories change. if os.path.exists('MANIFEST'): os.remove('MANIFEST') # This is a bit (!)", "['scipy==0.16.1'] # # Add six module to install_requires (used in numpydoc git submodule)", "\"Sorry, but there are features of Python 3\\n\" \"that I want to take", "__builtin__ as builtins try: import setuptools except ImportError: sys.exit(\"setuptools required for Python3 install.\\n\"", "which fails too often. # Just if the minimum version is not installed,", "module to not load sknano/__init__.py import imp version = imp.load_source('sknano.version', 'sknano/version.py') GIT_REVISION =", "# construct minimal environment env = {} for k in ['SYSTEMROOT', 'PATH']: v", "nanostructure data\"\"\" from __future__ import absolute_import, division, print_function, \\ unicode_literals __docformat__ = 'restructuredtext", "2 support that way.\\n\" \"Until then, if you must install this for Python", "in filenames: if filename.endswith(('.so', '.pyd', '.pyc', '.dll')): os.unlink(os.path.join(dirpath, filename)) for dirname in dirnames:", "\"Therefore, Python 2 support was removed starting\\n\" \"in v0.3.7. Once/if I learn how", ".egg file include_package_data=True, ) if len(sys.argv) >= 2 and \\ ('--help' in sys.argv[1:]", "is None: if ISRELEASED: STABLEVERSION = VERSION else: STABLEVERSION = '%d.%d.%d' % (MAJOR,", "not load sknano/__init__.py import imp version = imp.load_source('sknano.version', 'sknano/version.py') GIT_REVISION = version.git_revision else:", "needs to be done inside # write_version_py(), otherwise the import of sknano.version messes", "\"`pip install --upgrade setuptools`\") DISTNAME = 'scikit-nano' DESCRIPTION = __doc__ LONG_DESCRIPTION = ''.join(open('README.rst').readlines()[6:])", "build_requires = [] try: import numpy numpy_version = \\ tuple( list(map(int, numpy.version.short_version.split('.')[:3]))[:2]) if", "# Figure out whether to add ``*_requires = ['numpy>=`min version`', # 'scipy>=`min version`']``.", "system. try: from setuptools import setup except ImportError: from distutils.core import setup FULLVERSION,", "(3, 4): if (3, 0) <= sys.version_info[:2] < (3, 4): raise RuntimeError(\"Python 3.4+", "7) or (3, 0) <= sys.version_info[:2] < (3, 4): if (3, 0) <=", "Software Development Topic :: Software Development :: Libraries :: Python Modules \"\"\" MAJOR", "None if STABLEVERSION is None: if ISRELEASED: STABLEVERSION = VERSION else: STABLEVERSION =", "'stable_version': STABLEVERSION}) finally: a.close() def configuration(parent_package='', top_path=None): from numpy.distutils.misc_util import Configuration config =", "or (3, 0) <= sys.version_info[:2] < (3, 4): if (3, 0) <= sys.version_info[:2]", "filenames in os.walk('sknano'): for filename in filenames: if filename.endswith(('.so', '.pyd', '.pyc', '.dll')): os.unlink(os.path.join(dirpath,", "Beta Intended Audience :: Science/Research Intended Audience :: Developers License :: OSI Approved", "Language :: Python Programming Language :: Python :: 3.4 Topic :: Scientific/Engineering Topic", "construct minimal environment env = {} for k in ['SYSTEMROOT', 'PATH']: v =", "'restructuredtext en' import os import sys import shutil import subprocess from distutils.command.clean import", "Unix Operating System :: MacOS Programming Language :: Python Programming Language :: Python", "build under Python 3. FULLVERSION = VERSION if os.path.exists('.git'): GIT_REVISION = git_version() elif", "ISRELEASED: # FULLVERSION += '.dev' FULLVERSION += '.dev0+' + GIT_REVISION[:7] return FULLVERSION, GIT_REVISION", "automate the\\n\" \"backporting process from the setup script,\\n\" \"I will restore Python 2", "and analyzing nanostructure data\"\"\" from __future__ import absolute_import, division, print_function, \\ unicode_literals __docformat__", "< (0, 14): raise RuntimeError except (AttributeError, ImportError, RuntimeError): install_requires += ['scipy==0.16.1'] #", "LICENSE = 'BSD 2-Clause' CLASSIFIERS = \"\"\"\\ Development Status :: 4 - Beta", "They are required to succeed without them when, for example, # pip is", "own. It shouldn't be difficult\\n\" \"but you'll have to manually backport the package\\n\"", "We don't want to do that unconditionally, # because we risk updating an", "import imp version = imp.load_source('sknano.version', 'sknano/version.py') GIT_REVISION = version.git_revision else: GIT_REVISION = \"Unknown\"", "'sknano/version.py') GIT_REVISION = version.git_revision else: GIT_REVISION = \"Unknown\" if not ISRELEASED: # FULLVERSION", "build directories, __pycache__ directories, \" \\ \".ropeproject directories, and compiled files in the", "+= ['numpy==1.10.1'] install_requires = build_requires[:] try: import scipy scipy_version = \\ tuple( list(map(int,", "from distutils.core import setup FULLVERSION, GIT_REVISION = get_version_info() metadata['version'] = FULLVERSION else: from", "scipy scipy_version = \\ tuple( list(map(int, scipy.version.short_version.split('.')[:3]))[:2]) if scipy_version < (0, 14): raise", "= ['numpy>=`min version`', # 'scipy>=`min version`']``. We don't want to do that unconditionally,", "import scipy scipy_version = \\ tuple( list(map(int, scipy.version.short_version.split('.')[:3]))[:2]) if scipy_version < (0, 14):", "('__pycache__', '.ropeproject'): shutil.rmtree(os.path.join(dirpath, dirname)) def get_version_info(): # Adding the git rev number needs", "release: version = full_version \"\"\" FULLVERSION, GIT_REVISION = get_version_info() a = open(filename, 'w')", "14): raise RuntimeError except (AttributeError, ImportError, RuntimeError): install_requires += ['scipy==0.16.1'] # # Add", "pip is used to install Scipy when Numpy is not yet present in", "numpy.distutils.misc_util import Configuration config = Configuration(None, parent_package, top_path) config.set_options(ignore_setup_xxx_py=True, assume_default_configuration=True, delegate_options_to_subpackages=True, quiet=True) config.add_subpackage('sknano')", "actions, NumPy/SciPy are not required. # They are required to succeed without them", "configuration(parent_package='', top_path=None): from numpy.distutils.misc_util import Configuration config = Configuration(None, parent_package, top_path) config.set_options(ignore_setup_xxx_py=True, assume_default_configuration=True,", "if it is being loaded by the setup routine, to # avoid attempting", "but there are features of Python 3\\n\" \"that I want to take advantage", "+= ['six>=1.9'] # # Add future module to install requires # install_requires +=", ":: 4 - Beta Intended Audience :: Science/Research Intended Audience :: Developers License", "Programming Language :: Python :: 3.4 Topic :: Scientific/Engineering Topic :: Scientific/Engineering ::", "def run(self): Clean.run(self) if os.path.exists('build'): shutil.rmtree('build') for dirpath, dirnames, filenames in os.walk('sknano'): for", "maintainer=MAINTAINER, maintainer_email=MAINTAINER_EMAIL, description=DESCRIPTION, long_description=LONG_DESCRIPTION, url=URL, download_url=DOWNLOAD_URL, license=LICENSE, keywords=KEYWORDS, classifiers=[_f for _f in CLASSIFIERS.split('\\n')", "-*- coding: utf-8 -*- \"\"\"Python toolkit for generating and analyzing nanostructure data\"\"\" from", "'full_version': FULLVERSION, 'git_revision': GIT_REVISION, 'isrelease': str(ISRELEASED), 'stable_version': STABLEVERSION}) finally: a.close() def configuration(parent_package='', top_path=None):", "use existing version file # load it as a separate module to not", "= FULLVERSION else: from numpy.distutils.core import setup metadata['configuration'] = configuration setup(**metadata) if __name__", "git_revision = '%(git_revision)s' release = %(isrelease)s stable_version = '%(stable_version)s' if not release: version", "v is not None: env[k] = v # LANGUAGE is used on win32", ":: Visualization Topic :: Software Development Topic :: Software Development :: Libraries ::", "version`', # 'scipy>=`min version`']``. We don't want to do that unconditionally, # because", "\"\"\" MAJOR = 0 MINOR = 3 MICRO = 21 ISRELEASED = True", "def _minimal_ext_cmd(cmd): # construct minimal environment env = {} for k in ['SYSTEMROOT',", "sknano.scripts.sknano:main'], }, cmdclass={'clean': CleanCommand}, zip_safe=False, # the package can run out of an", "\"directory to automate the backporting process.\\n\" \"You'll also need to hack this setup", "ImportError: sys.exit(\"setuptools required for Python3 install.\\n\" \"`pip install --upgrade setuptools`\") DISTNAME = 'scikit-nano'", "config.add_subpackage('sknano') config.get_version('sknano/version.py') return config def setup_package(): # Rewrite the version file everytime write_version_py()", "# -*- coding: utf-8 -*- \"\"\"Python toolkit for generating and analyzing nanostructure data\"\"\"", "<= sys.version_info[:2] < (3, 4): if (3, 0) <= sys.version_info[:2] < (3, 4):", "= AUTHOR_EMAIL URL = 'http://scikit-nano.org/doc' DOWNLOAD_URL = 'http://github.com/androomerrill/scikit-nano' KEYWORDS = ['nano', 'nanoscience', 'nano-structure',", "(AttributeError, ImportError, RuntimeError): build_requires += ['numpy==1.10.1'] install_requires = build_requires[:] try: import scipy scipy_version", "zip_safe=False, # the package can run out of an .egg file include_package_data=True, )", "LANGUAGE is used on win32 env['LANGUAGE'] = 'C' env['LANG'] = 'C' env['LC_ALL'] =", "if numpy_version < (1, 9): raise RuntimeError except (AttributeError, ImportError, RuntimeError): build_requires +=", ":: Science/Research Intended Audience :: Developers License :: OSI Approved :: BSD License", "2.\") #if sys.version_info[:2] < (2, 7) or (3, 0) <= sys.version_info[:2] < (3,", "config = Configuration(None, parent_package, top_path) config.set_options(ignore_setup_xxx_py=True, assume_default_configuration=True, delegate_options_to_subpackages=True, quiet=True) config.add_subpackage('sknano') config.get_version('sknano/version.py') return config", "from numpy.distutils.core import setup metadata['configuration'] = configuration setup(**metadata) if __name__ == '__main__': setup_package()", "setuptools`\") DISTNAME = 'scikit-nano' DESCRIPTION = __doc__ LONG_DESCRIPTION = ''.join(open('README.rst').readlines()[6:]) AUTHOR = '<NAME>'", "Clean.run(self) if os.path.exists('build'): shutil.rmtree('build') for dirpath, dirnames, filenames in os.walk('sknano'): for filename in", "toolkit for generating and analyzing nanostructure data\"\"\" from __future__ import absolute_import, division, print_function,", "0 MINOR = 3 MICRO = 21 ISRELEASED = True VERSION = '%d.%d.%d'", "update it when the contents of directories change. if os.path.exists('MANIFEST'): os.remove('MANIFEST') # This", "Add six module to install_requires (used in numpydoc git submodule) # install_requires +=", "= get_version_info() a = open(filename, 'w') try: a.write(cnt % {'version': VERSION, 'full_version': FULLVERSION,", "routine, to # avoid attempting to load components that aren't built yet. builtins.__SKNANO_SETUP__", "script,\\n\" \"I will restore Python 2 support that way.\\n\" \"Until then, if you", "- 1) # Return the GIT version as a string def git_version(): def", "['nano', 'nanoscience', 'nano-structure', 'nanostructure', 'nanotube', 'graphene', 'LAMMPS', 'XYZ', 'structure', 'analysis'] LICENSE = 'BSD", "to add ``*_requires = ['numpy>=`min version`', # 'scipy>=`min version`']``. We don't want to", "It shouldn't be difficult\\n\" \"but you'll have to manually backport the package\\n\" \"source", "RuntimeError except (AttributeError, ImportError, RuntimeError): install_requires += ['scipy==0.16.1'] # # Add six module", "Language :: Python :: 3.4 Topic :: Scientific/Engineering Topic :: Scientific/Engineering :: Chemistry", "def write_version_py(filename='sknano/version.py'): cnt = \"\"\" # THIS FILE IS GENERATED FROM SCIKIT-NANO SETUP.PY", "Software Development :: Libraries :: Python Modules \"\"\" MAJOR = 0 MINOR =", "entry_points={ 'console_scripts': [ 'analyze_structure = sknano.scripts.analyze_structure:main', 'nanogen = sknano.scripts.nanogen:main', 'nanogenui = sknano.scripts.nanogenui:main', 'sknano", "Python :: 3.4 Topic :: Scientific/Engineering Topic :: Scientific/Engineering :: Chemistry Topic ::", "git submodule) # install_requires += ['six>=1.9'] # # Add future module to install", "Return the GIT version as a string def git_version(): def _minimal_ext_cmd(cmd): # construct", "subprocess from distutils.command.clean import clean as Clean if sys.version_info[0] < 3: raise RuntimeError(\"Python", "try: import numpy numpy_version = \\ tuple( list(map(int, numpy.version.short_version.split('.')[:3]))[:2]) if numpy_version < (1,", "VERSION, 'full_version': FULLVERSION, 'git_revision': GIT_REVISION, 'isrelease': str(ISRELEASED), 'stable_version': STABLEVERSION}) finally: a.close() def configuration(parent_package='',", "unconditionally, # because we risk updating an installed numpy/scipy which fails too often.", "Configuration config = Configuration(None, parent_package, top_path) config.set_options(ignore_setup_xxx_py=True, assume_default_configuration=True, delegate_options_to_subpackages=True, quiet=True) config.add_subpackage('sknano') config.get_version('sknano/version.py') return", "import setup FULLVERSION, GIT_REVISION = get_version_info() metadata['version'] = FULLVERSION else: from numpy.distutils.core import", "- Beta Intended Audience :: Science/Research Intended Audience :: Developers License :: OSI", "shutil.rmtree(os.path.join(dirpath, dirname)) def get_version_info(): # Adding the git rev number needs to be", "to install_requires (used in numpydoc git submodule) # install_requires += ['six>=1.9'] # #", "so that the main # sknano __init__ can detect if it is being", "sknano __init__ can detect if it is being loaded by the setup routine,", "cmd, stdout=subprocess.PIPE, env=env).communicate()[0] return out try: out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD']) GIT_REVISION =", "GIT_REVISION = get_version_info() metadata['version'] = FULLVERSION else: from numpy.distutils.core import setup metadata['configuration'] =", "System :: Microsoft :: Windows Operating System :: POSIX Operating System :: Unix", "the git rev number needs to be done inside # write_version_py(), otherwise the", "FULLVERSION = VERSION if os.path.exists('.git'): GIT_REVISION = git_version() elif os.path.exists('sknano/version.py'): # must be", "}, entry_points={ 'console_scripts': [ 'analyze_structure = sknano.scripts.analyze_structure:main', 'nanogen = sknano.scripts.nanogen:main', 'nanogenui = sknano.scripts.nanogenui:main',", "\" \\ \".ropeproject directories, and compiled files in the source tree.\" def run(self):", "= \"Unknown\" if not ISRELEASED: # FULLVERSION += '.dev' FULLVERSION += '.dev0+' +", "version = full_version \"\"\" FULLVERSION, GIT_REVISION = get_version_info() a = open(filename, 'w') try:", "'plotting': ['matplotlib>=1.4.3', 'palettable>=2.1.1'] }, entry_points={ 'console_scripts': [ 'analyze_structure = sknano.scripts.analyze_structure:main', 'nanogen = sknano.scripts.nanogen:main',", ":: Scientific/Engineering :: Physics Topic :: Scientific/Engineering :: Visualization Topic :: Software Development", "dirnames, filenames in os.walk('sknano'): for filename in filenames: if filename.endswith(('.so', '.pyd', '.pyc', '.dll')):", "'%(git_revision)s' release = %(isrelease)s stable_version = '%(stable_version)s' if not release: version = full_version", "Python 2\\n\" \"compatibility library such as the python `future`\\n\" \"module, which provides a", "that the main # sknano __init__ can detect if it is being loaded", "in numpydoc git submodule) # install_requires += ['six>=1.9'] # # Add future module", "'LAMMPS', 'XYZ', 'structure', 'analysis'] LICENSE = 'BSD 2-Clause' CLASSIFIERS = \"\"\"\\ Development Status", "importing distutils, remove MANIFEST. distutils doesn't properly # update it when the contents", "sknano.version messes # up the build under Python 3. FULLVERSION = VERSION if", "def get_version_info(): # Adding the git rev number needs to be done inside", "DISTNAME = 'scikit-nano' DESCRIPTION = __doc__ LONG_DESCRIPTION = ''.join(open('README.rst').readlines()[6:]) AUTHOR = '<NAME>' AUTHOR_EMAIL", "be run on the source\\n\" \"directory to automate the backporting process.\\n\" \"You'll also", "want to take advantage of and without\\n\" \"worrying about Python 2 compatibility.\\n\" \"Therefore,", "distutils doesn't properly # update it when the contents of directories change. if", "4 - Beta Intended Audience :: Science/Research Intended Audience :: Developers License ::", "# # Add future module to install requires # install_requires += ['future>=0.14.3'] install_requires", "License Operating System :: Microsoft :: Windows Operating System :: POSIX Operating System", "be done inside # write_version_py(), otherwise the import of sknano.version messes # up", "existing version file # load it as a separate module to not load", "Python 2 compatibility.\\n\" \"Therefore, Python 2 support was removed starting\\n\" \"in v0.3.7. Once/if", "= git_version() elif os.path.exists('sknano/version.py'): # must be a source distribution, use existing version", "a separate module to not load sknano/__init__.py import imp version = imp.load_source('sknano.version', 'sknano/version.py')", "else: GIT_REVISION = \"Unknown\" if not ISRELEASED: # FULLVERSION += '.dev' FULLVERSION +=", "= '<NAME>' AUTHOR_EMAIL = '<EMAIL>' MAINTAINER = AUTHOR MAINTAINER_EMAIL = AUTHOR_EMAIL URL =", "# update it when the contents of directories change. if os.path.exists('MANIFEST'): os.remove('MANIFEST') #", "as a separate module to not load sknano/__init__.py import imp version = imp.load_source('sknano.version',", "install --upgrade setuptools`\") DISTNAME = 'scikit-nano' DESCRIPTION = __doc__ LONG_DESCRIPTION = ''.join(open('README.rst').readlines()[6:]) AUTHOR", "config.get_version('sknano/version.py') return config def setup_package(): # Rewrite the version file everytime write_version_py() #", "support was removed starting\\n\" \"in v0.3.7. Once/if I learn how to automate the\\n\"", "minimum version is not installed, we may give it a try. build_requires =", "is used on win32 env['LANGUAGE'] = 'C' env['LANG'] = 'C' env['LC_ALL'] = 'C'", "yet present in # the system. try: from setuptools import setup except ImportError:", "Once/if I learn how to automate the\\n\" \"backporting process from the setup script,\\n\"", "setup script\\n\" \"to remove any exceptions that are raised when\\n\" \"executed under Python", "= '%(full_version)s' git_revision = '%(git_revision)s' release = %(isrelease)s stable_version = '%(stable_version)s' if not", "source distribution, use existing version file # load it as a separate module", "'C' env['LANG'] = 'C' env['LC_ALL'] = 'C' out = subprocess.Popen( cmd, stdout=subprocess.PIPE, env=env).communicate()[0]", "process.\\n\" \"You'll also need to hack this setup script\\n\" \"to remove any exceptions", "GIT_REVISION = git_version() elif os.path.exists('sknano/version.py'): # must be a source distribution, use existing", "be a source distribution, use existing version file # load it as a", "GIT_REVISION = \"Unknown\" if not ISRELEASED: # FULLVERSION += '.dev' FULLVERSION += '.dev0+'", "['numpy>=`min version`', # 'scipy>=`min version`']``. We don't want to do that unconditionally, #", "= sknano.scripts.analyze_structure:main', 'nanogen = sknano.scripts.nanogen:main', 'nanogenui = sknano.scripts.nanogenui:main', 'sknano = sknano.scripts.sknano:main'], }, cmdclass={'clean':", "required.\\n\\n\" \"Sorry, but there are features of Python 3\\n\" \"that I want to", "True VERSION = '%d.%d.%d' % (MAJOR, MINOR, MICRO) STABLEVERSION = None if STABLEVERSION", "script called\\n\" \"`pasteurize` that can be run on the source\\n\" \"directory to automate", "numpy_version < (1, 9): raise RuntimeError except (AttributeError, ImportError, RuntimeError): build_requires += ['numpy==1.10.1']", "# Add future module to install requires # install_requires += ['future>=0.14.3'] install_requires +=", "source\\n\" \"directory to automate the backporting process.\\n\" \"You'll also need to hack this", ":: Software Development :: Libraries :: Python Modules \"\"\" MAJOR = 0 MINOR", "3.4+ required.\") if sys.version_info[0] >= 3: import builtins else: import __builtin__ as builtins", "+= '.dev' FULLVERSION += '.dev0+' + GIT_REVISION[:7] return FULLVERSION, GIT_REVISION def write_version_py(filename='sknano/version.py'): cnt", "that are raised when\\n\" \"executed under Python 2.\") #if sys.version_info[:2] < (2, 7)", "MICRO - 1) # Return the GIT version as a string def git_version():", "GIT_REVISION = get_version_info() a = open(filename, 'w') try: a.write(cnt % {'version': VERSION, 'full_version':", "as the python `future`\\n\" \"module, which provides a python script called\\n\" \"`pasteurize` that", "'nanoscience', 'nano-structure', 'nanostructure', 'nanotube', 'graphene', 'LAMMPS', 'XYZ', 'structure', 'analysis'] LICENSE = 'BSD 2-Clause'", "LONG_DESCRIPTION = ''.join(open('README.rst').readlines()[6:]) AUTHOR = '<NAME>' AUTHOR_EMAIL = '<EMAIL>' MAINTAINER = AUTHOR MAINTAINER_EMAIL", "FULLVERSION, GIT_REVISION = get_version_info() metadata['version'] = FULLVERSION else: from numpy.distutils.core import setup metadata['configuration']", "Python 2\\n\" \"you're on your own. It shouldn't be difficult\\n\" \"but you'll have", "data\"\"\" from __future__ import absolute_import, division, print_function, \\ unicode_literals __docformat__ = 'restructuredtext en'", "inside # write_version_py(), otherwise the import of sknano.version messes # up the build", "(3, 0) <= sys.version_info[:2] < (3, 4): if (3, 0) <= sys.version_info[:2] <", "install_requires += ['future>=0.14.3'] install_requires += ['monty>=0.7.0', 'pymatgen>=3.2.4'] metadata = dict( name=DISTNAME, author=AUTHOR, author_email=AUTHOR_EMAIL,", "\"\"\"\\ Development Status :: 4 - Beta Intended Audience :: Science/Research Intended Audience", "code using a Python 3 to Python 2\\n\" \"compatibility library such as the", "= VERSION else: STABLEVERSION = '%d.%d.%d' % (MAJOR, MINOR, MICRO - 1) #", "URL = 'http://scikit-nano.org/doc' DOWNLOAD_URL = 'http://github.com/androomerrill/scikit-nano' KEYWORDS = ['nano', 'nanoscience', 'nano-structure', 'nanostructure', 'nanotube',", "a Python 3 to Python 2\\n\" \"compatibility library such as the python `future`\\n\"", "is used to install Scipy when Numpy is not yet present in #", "sys import shutil import subprocess from distutils.command.clean import clean as Clean if sys.version_info[0]", "to not load sknano/__init__.py import imp version = imp.load_source('sknano.version', 'sknano/version.py') GIT_REVISION = version.git_revision", "version file everytime write_version_py() # Figure out whether to add ``*_requires = ['numpy>=`min", "raise RuntimeError(\"Python 3.4+ required.\") if sys.version_info[0] >= 3: import builtins else: import __builtin__", "Operating System :: Microsoft :: Windows Operating System :: POSIX Operating System ::", "a.write(cnt % {'version': VERSION, 'full_version': FULLVERSION, 'git_revision': GIT_REVISION, 'isrelease': str(ISRELEASED), 'stable_version': STABLEVERSION}) finally:", "'nano-structure', 'nanostructure', 'nanotube', 'graphene', 'LAMMPS', 'XYZ', 'structure', 'analysis'] LICENSE = 'BSD 2-Clause' CLASSIFIERS", "separate module to not load sknano/__init__.py import imp version = imp.load_source('sknano.version', 'sknano/version.py') GIT_REVISION", "# because we risk updating an installed numpy/scipy which fails too often. #", "sknano.scripts.nanogen:main', 'nanogenui = sknano.scripts.nanogenui:main', 'sknano = sknano.scripts.sknano:main'], }, cmdclass={'clean': CleanCommand}, zip_safe=False, # the", "import setuptools except ImportError: sys.exit(\"setuptools required for Python3 install.\\n\" \"`pip install --upgrade setuptools`\")", "# avoid attempting to load components that aren't built yet. builtins.__SKNANO_SETUP__ = True", "Approved :: BSD License Operating System :: Microsoft :: Windows Operating System ::", "VERSION = '%d.%d.%d' % (MAJOR, MINOR, MICRO) STABLEVERSION = None if STABLEVERSION is", "Microsoft :: Windows Operating System :: POSIX Operating System :: Unix Operating System", "hack this setup script\\n\" \"to remove any exceptions that are raised when\\n\" \"executed", "= {} for k in ['SYSTEMROOT', 'PATH']: v = os.environ.get(k) if v is", "\"\"\" # THIS FILE IS GENERATED FROM SCIKIT-NANO SETUP.PY short_version = '%(version)s' version", "-*- \"\"\"Python toolkit for generating and analyzing nanostructure data\"\"\" from __future__ import absolute_import,", "__init__ can detect if it is being loaded by the setup routine, to", "'%d.%d.%d' % (MAJOR, MINOR, MICRO - 1) # Return the GIT version as", "System :: MacOS Programming Language :: Python Programming Language :: Python :: 3.4", "os.unlink(os.path.join(dirpath, filename)) for dirname in dirnames: if dirname in ('__pycache__', '.ropeproject'): shutil.rmtree(os.path.join(dirpath, dirname))", "stable_version = '%(stable_version)s' if not release: version = full_version \"\"\" FULLVERSION, GIT_REVISION =", "example, # pip is used to install Scipy when Numpy is not yet", "= '<EMAIL>' MAINTAINER = AUTHOR MAINTAINER_EMAIL = AUTHOR_EMAIL URL = 'http://scikit-nano.org/doc' DOWNLOAD_URL =", "minimal environment env = {} for k in ['SYSTEMROOT', 'PATH']: v = os.environ.get(k)", "'scipy>=`min version`']``. We don't want to do that unconditionally, # because we risk", "it as a separate module to not load sknano/__init__.py import imp version =", "number needs to be done inside # write_version_py(), otherwise the import of sknano.version", "= '%(version)s' version = '%(version)s' full_version = '%(full_version)s' git_revision = '%(git_revision)s' release =", "= '%(version)s' full_version = '%(full_version)s' git_revision = '%(git_revision)s' release = %(isrelease)s stable_version =", "win32 env['LANGUAGE'] = 'C' env['LANG'] = 'C' env['LC_ALL'] = 'C' out = subprocess.Popen(", "out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD']) GIT_REVISION = out.strip().decode('ascii') except OSError: GIT_REVISION = \"Unknown\"", "library such as the python `future`\\n\" \"module, which provides a python script called\\n\"", "``*_requires = ['numpy>=`min version`', # 'scipy>=`min version`']``. We don't want to do that", "updating an installed numpy/scipy which fails too often. # Just if the minimum", "VERSION else: STABLEVERSION = '%d.%d.%d' % (MAJOR, MINOR, MICRO - 1) # Return", "shutil.rmtree('build') for dirpath, dirnames, filenames in os.walk('sknano'): for filename in filenames: if filename.endswith(('.so',", "otherwise the import of sknano.version messes # up the build under Python 3.", "as Clean if sys.version_info[0] < 3: raise RuntimeError(\"Python version 3.4+ required.\\n\\n\" \"Sorry, but", "to Python 2\\n\" \"compatibility library such as the python `future`\\n\" \"module, which provides", "out of an .egg file include_package_data=True, ) if len(sys.argv) >= 2 and \\", "in ('__pycache__', '.ropeproject'): shutil.rmtree(os.path.join(dirpath, dirname)) for dirpath, dirnames, filenames in os.walk('doc'): for dirname", "# This is a bit (!) hackish: we are setting a global variable", "+= ['monty>=0.7.0', 'pymatgen>=3.2.4'] metadata = dict( name=DISTNAME, author=AUTHOR, author_email=AUTHOR_EMAIL, maintainer=MAINTAINER, maintainer_email=MAINTAINER_EMAIL, description=DESCRIPTION, long_description=LONG_DESCRIPTION,", "v # LANGUAGE is used on win32 env['LANGUAGE'] = 'C' env['LANG'] = 'C'", "the main # sknano __init__ can detect if it is being loaded by", "if len(sys.argv) >= 2 and \\ ('--help' in sys.argv[1:] or sys.argv[1] in ('--help-commands',", "FULLVERSION, GIT_REVISION = get_version_info() a = open(filename, 'w') try: a.write(cnt % {'version': VERSION,", "['SYSTEMROOT', 'PATH']: v = os.environ.get(k) if v is not None: env[k] = v", "compatibility.\\n\" \"Therefore, Python 2 support was removed starting\\n\" \"in v0.3.7. Once/if I learn", "a global variable so that the main # sknano __init__ can detect if", "long_description=LONG_DESCRIPTION, url=URL, download_url=DOWNLOAD_URL, license=LICENSE, keywords=KEYWORDS, classifiers=[_f for _f in CLASSIFIERS.split('\\n') if _f], platforms=[\"Windows\",", "shouldn't be difficult\\n\" \"but you'll have to manually backport the package\\n\" \"source code", "top_path) config.set_options(ignore_setup_xxx_py=True, assume_default_configuration=True, delegate_options_to_subpackages=True, quiet=True) config.add_subpackage('sknano') config.get_version('sknano/version.py') return config def setup_package(): # Rewrite", "dirname in dirnames: if dirname in ('__pycache__', '.ropeproject'): shutil.rmtree(os.path.join(dirpath, dirname)) def get_version_info(): #", "BSD License Operating System :: Microsoft :: Windows Operating System :: POSIX Operating", "\"that I want to take advantage of and without\\n\" \"worrying about Python 2", "= __doc__ LONG_DESCRIPTION = ''.join(open('README.rst').readlines()[6:]) AUTHOR = '<NAME>' AUTHOR_EMAIL = '<EMAIL>' MAINTAINER =", "return config def setup_package(): # Rewrite the version file everytime write_version_py() # Figure", "Scientific/Engineering :: Physics Topic :: Scientific/Engineering :: Visualization Topic :: Software Development Topic", "version 3.4+ required.\\n\\n\" \"Sorry, but there are features of Python 3\\n\" \"that I", "installed, we may give it a try. build_requires = [] try: import numpy", "a try. build_requires = [] try: import numpy numpy_version = \\ tuple( list(map(int,", "from distutils.command.clean import clean as Clean if sys.version_info[0] < 3: raise RuntimeError(\"Python version", "# install_requires += ['six>=1.9'] # # Add future module to install requires #", "dict( name=DISTNAME, author=AUTHOR, author_email=AUTHOR_EMAIL, maintainer=MAINTAINER, maintainer_email=MAINTAINER_EMAIL, description=DESCRIPTION, long_description=LONG_DESCRIPTION, url=URL, download_url=DOWNLOAD_URL, license=LICENSE, keywords=KEYWORDS, classifiers=[_f", "+= ['scipy==0.16.1'] # # Add six module to install_requires (used in numpydoc git", "description = \\ \"Remove build directories, __pycache__ directories, \" \\ \".ropeproject directories, and", "4): raise RuntimeError(\"Python 3.4+ required.\") if sys.version_info[0] >= 3: import builtins else: import", "MINOR, MICRO) STABLEVERSION = None if STABLEVERSION is None: if ISRELEASED: STABLEVERSION =", "= _minimal_ext_cmd(['git', 'rev-parse', 'HEAD']) GIT_REVISION = out.strip().decode('ascii') except OSError: GIT_REVISION = \"Unknown\" return", "from setuptools import setup except ImportError: from distutils.core import setup FULLVERSION, GIT_REVISION =", "('--help' in sys.argv[1:] or sys.argv[1] in ('--help-commands', 'egg_info', '--version', 'clean')): # For these", "except ImportError: from distutils.core import setup FULLVERSION, GIT_REVISION = get_version_info() metadata['version'] = FULLVERSION", "Chemistry Topic :: Scientific/Engineering :: Physics Topic :: Scientific/Engineering :: Visualization Topic ::", ":: Physics Topic :: Scientific/Engineering :: Visualization Topic :: Software Development Topic ::", "version`']``. We don't want to do that unconditionally, # because we risk updating", "'egg_info', '--version', 'clean')): # For these actions, NumPy/SciPy are not required. # They", "git_version(): def _minimal_ext_cmd(cmd): # construct minimal environment env = {} for k in", "we risk updating an installed numpy/scipy which fails too often. # Just if", "'w') try: a.write(cnt % {'version': VERSION, 'full_version': FULLVERSION, 'git_revision': GIT_REVISION, 'isrelease': str(ISRELEASED), 'stable_version':", "in os.walk('sknano'): for filename in filenames: if filename.endswith(('.so', '.pyd', '.pyc', '.dll')): os.unlink(os.path.join(dirpath, filename))", "['six>=1.9'] # # Add future module to install requires # install_requires += ['future>=0.14.3']", "install_requires (used in numpydoc git submodule) # install_requires += ['six>=1.9'] # # Add", "= imp.load_source('sknano.version', 'sknano/version.py') GIT_REVISION = version.git_revision else: GIT_REVISION = \"Unknown\" if not ISRELEASED:", "True class CleanCommand(Clean): description = \\ \"Remove build directories, __pycache__ directories, \" \\", "that way.\\n\" \"Until then, if you must install this for Python 2\\n\" \"you're", "git_version() elif os.path.exists('sknano/version.py'): # must be a source distribution, use existing version file", "setup_requires=build_requires, install_requires=install_requires, extras_require={ 'plotting': ['matplotlib>=1.4.3', 'palettable>=2.1.1'] }, entry_points={ 'console_scripts': [ 'analyze_structure = sknano.scripts.analyze_structure:main',", "# For these actions, NumPy/SciPy are not required. # They are required to", "dirname in dirnames: if dirname in ('__pycache__', '.ropeproject'): shutil.rmtree(os.path.join(dirpath, dirname)) for dirpath, dirnames,", "+ GIT_REVISION[:7] return FULLVERSION, GIT_REVISION def write_version_py(filename='sknano/version.py'): cnt = \"\"\" # THIS FILE", "\\ \".ropeproject directories, and compiled files in the source tree.\" def run(self): Clean.run(self)", "need to hack this setup script\\n\" \"to remove any exceptions that are raised", "# up the build under Python 3. FULLVERSION = VERSION if os.path.exists('.git'): GIT_REVISION", "# LANGUAGE is used on win32 env['LANGUAGE'] = 'C' env['LANG'] = 'C' env['LC_ALL']", "this setup script\\n\" \"to remove any exceptions that are raised when\\n\" \"executed under", "is a bit (!) hackish: we are setting a global variable so that", "version = '%(version)s' full_version = '%(full_version)s' git_revision = '%(git_revision)s' release = %(isrelease)s stable_version", "add ``*_requires = ['numpy>=`min version`', # 'scipy>=`min version`']``. We don't want to do", "under Python 3. FULLVERSION = VERSION if os.path.exists('.git'): GIT_REVISION = git_version() elif os.path.exists('sknano/version.py'):", "keywords=KEYWORDS, classifiers=[_f for _f in CLASSIFIERS.split('\\n') if _f], platforms=[\"Windows\", \"Linux\", \"Solaris\", \"Mac OS-X\",", "'.ropeproject'): shutil.rmtree(os.path.join(dirpath, dirname)) def get_version_info(): # Adding the git rev number needs to", "3.4 Topic :: Scientific/Engineering Topic :: Scientific/Engineering :: Chemistry Topic :: Scientific/Engineering ::", "a python script called\\n\" \"`pasteurize` that can be run on the source\\n\" \"directory", "BEFORE importing distutils, remove MANIFEST. distutils doesn't properly # update it when the", "not release: version = full_version \"\"\" FULLVERSION, GIT_REVISION = get_version_info() a = open(filename,", "'C' out = subprocess.Popen( cmd, stdout=subprocess.PIPE, env=env).communicate()[0] return out try: out = _minimal_ext_cmd(['git',", "delegate_options_to_subpackages=True, quiet=True) config.add_subpackage('sknano') config.get_version('sknano/version.py') return config def setup_package(): # Rewrite the version file", "= 0 MINOR = 3 MICRO = 21 ISRELEASED = True VERSION =", "[] try: import numpy numpy_version = \\ tuple( list(map(int, numpy.version.short_version.split('.')[:3]))[:2]) if numpy_version <", "file # load it as a separate module to not load sknano/__init__.py import", "setuptools import setup except ImportError: from distutils.core import setup FULLVERSION, GIT_REVISION = get_version_info()", "# the package can run out of an .egg file include_package_data=True, ) if", "numpy_version = \\ tuple( list(map(int, numpy.version.short_version.split('.')[:3]))[:2]) if numpy_version < (1, 9): raise RuntimeError", "build_requires += ['numpy==1.10.1'] install_requires = build_requires[:] try: import scipy scipy_version = \\ tuple(", "'pymatgen>=3.2.4'] metadata = dict( name=DISTNAME, author=AUTHOR, author_email=AUTHOR_EMAIL, maintainer=MAINTAINER, maintainer_email=MAINTAINER_EMAIL, description=DESCRIPTION, long_description=LONG_DESCRIPTION, url=URL, download_url=DOWNLOAD_URL,", "# BEFORE importing distutils, remove MANIFEST. distutils doesn't properly # update it when", "distutils.core import setup FULLVERSION, GIT_REVISION = get_version_info() metadata['version'] = FULLVERSION else: from numpy.distutils.core", "'XYZ', 'structure', 'analysis'] LICENSE = 'BSD 2-Clause' CLASSIFIERS = \"\"\"\\ Development Status ::", "v = os.environ.get(k) if v is not None: env[k] = v # LANGUAGE", "python `future`\\n\" \"module, which provides a python script called\\n\" \"`pasteurize` that can be", "= v # LANGUAGE is used on win32 env['LANGUAGE'] = 'C' env['LANG'] =", "= AUTHOR MAINTAINER_EMAIL = AUTHOR_EMAIL URL = 'http://scikit-nano.org/doc' DOWNLOAD_URL = 'http://github.com/androomerrill/scikit-nano' KEYWORDS =", "will restore Python 2 support that way.\\n\" \"Until then, if you must install", "'.dev0+' + GIT_REVISION[:7] return FULLVERSION, GIT_REVISION def write_version_py(filename='sknano/version.py'): cnt = \"\"\" # THIS", "RuntimeError): build_requires += ['numpy==1.10.1'] install_requires = build_requires[:] try: import scipy scipy_version = \\", ":: Scientific/Engineering :: Chemistry Topic :: Scientific/Engineering :: Physics Topic :: Scientific/Engineering ::", "required to succeed without them when, for example, # pip is used to", ":: Scientific/Engineering :: Visualization Topic :: Software Development Topic :: Software Development ::", "is not None: env[k] = v # LANGUAGE is used on win32 env['LANGUAGE']", "can run out of an .egg file include_package_data=True, ) if len(sys.argv) >= 2", "quiet=True) config.add_subpackage('sknano') config.get_version('sknano/version.py') return config def setup_package(): # Rewrite the version file everytime", "of an .egg file include_package_data=True, ) if len(sys.argv) >= 2 and \\ ('--help'", "\"Mac OS-X\", \"Unix\"], test_suite='nose.collector', setup_requires=build_requires, install_requires=install_requires, extras_require={ 'plotting': ['matplotlib>=1.4.3', 'palettable>=2.1.1'] }, entry_points={ 'console_scripts':", "% (MAJOR, MINOR, MICRO - 1) # Return the GIT version as a", "version is not installed, we may give it a try. build_requires = []", "CleanCommand}, zip_safe=False, # the package can run out of an .egg file include_package_data=True,", "\\ \"Remove build directories, __pycache__ directories, \" \\ \".ropeproject directories, and compiled files", "\"I will restore Python 2 support that way.\\n\" \"Until then, if you must", "= \"\"\" # THIS FILE IS GENERATED FROM SCIKIT-NANO SETUP.PY short_version = '%(version)s'", "to # avoid attempting to load components that aren't built yet. builtins.__SKNANO_SETUP__ =", "if scipy_version < (0, 14): raise RuntimeError except (AttributeError, ImportError, RuntimeError): install_requires +=", "= out.strip().decode('ascii') except OSError: GIT_REVISION = \"Unknown\" return GIT_REVISION # BEFORE importing distutils,", "= '%d.%d.%d' % (MAJOR, MINOR, MICRO) STABLEVERSION = None if STABLEVERSION is None:", "AUTHOR MAINTAINER_EMAIL = AUTHOR_EMAIL URL = 'http://scikit-nano.org/doc' DOWNLOAD_URL = 'http://github.com/androomerrill/scikit-nano' KEYWORDS = ['nano',", "import of sknano.version messes # up the build under Python 3. FULLVERSION =", ":: OSI Approved :: BSD License Operating System :: Microsoft :: Windows Operating", "distribution, use existing version file # load it as a separate module to", "not yet present in # the system. try: from setuptools import setup except", ":: Microsoft :: Windows Operating System :: POSIX Operating System :: Unix Operating", "often. # Just if the minimum version is not installed, we may give", ":: MacOS Programming Language :: Python Programming Language :: Python :: 3.4 Topic", "Science/Research Intended Audience :: Developers License :: OSI Approved :: BSD License Operating", "\"\"\" FULLVERSION, GIT_REVISION = get_version_info() a = open(filename, 'w') try: a.write(cnt % {'version':", "= True class CleanCommand(Clean): description = \\ \"Remove build directories, __pycache__ directories, \"", "None: if ISRELEASED: STABLEVERSION = VERSION else: STABLEVERSION = '%d.%d.%d' % (MAJOR, MINOR,", "= 21 ISRELEASED = True VERSION = '%d.%d.%d' % (MAJOR, MINOR, MICRO) STABLEVERSION", "\"Unknown\" if not ISRELEASED: # FULLVERSION += '.dev' FULLVERSION += '.dev0+' + GIT_REVISION[:7]", "if the minimum version is not installed, we may give it a try.", "['matplotlib>=1.4.3', 'palettable>=2.1.1'] }, entry_points={ 'console_scripts': [ 'analyze_structure = sknano.scripts.analyze_structure:main', 'nanogen = sknano.scripts.nanogen:main', 'nanogenui", "components that aren't built yet. builtins.__SKNANO_SETUP__ = True class CleanCommand(Clean): description = \\", "in os.walk('doc'): for dirname in dirnames: if dirname in ('__pycache__', '.ropeproject'): shutil.rmtree(os.path.join(dirpath, dirname))", "# THIS FILE IS GENERATED FROM SCIKIT-NANO SETUP.PY short_version = '%(version)s' version =", "= 3 MICRO = 21 ISRELEASED = True VERSION = '%d.%d.%d' % (MAJOR,", "try: a.write(cnt % {'version': VERSION, 'full_version': FULLVERSION, 'git_revision': GIT_REVISION, 'isrelease': str(ISRELEASED), 'stable_version': STABLEVERSION})", "numpy/scipy which fails too often. # Just if the minimum version is not", "\"Linux\", \"Solaris\", \"Mac OS-X\", \"Unix\"], test_suite='nose.collector', setup_requires=build_requires, install_requires=install_requires, extras_require={ 'plotting': ['matplotlib>=1.4.3', 'palettable>=2.1.1'] },", "to automate the\\n\" \"backporting process from the setup script,\\n\" \"I will restore Python", "is not yet present in # the system. try: from setuptools import setup", "sys.version_info[:2] < (3, 4): if (3, 0) <= sys.version_info[:2] < (3, 4): raise", "Libraries :: Python Modules \"\"\" MAJOR = 0 MINOR = 3 MICRO =", "3. FULLVERSION = VERSION if os.path.exists('.git'): GIT_REVISION = git_version() elif os.path.exists('sknano/version.py'): # must", "os.path.exists('.git'): GIT_REVISION = git_version() elif os.path.exists('sknano/version.py'): # must be a source distribution, use", "I want to take advantage of and without\\n\" \"worrying about Python 2 compatibility.\\n\"", "if not ISRELEASED: # FULLVERSION += '.dev' FULLVERSION += '.dev0+' + GIT_REVISION[:7] return", "must install this for Python 2\\n\" \"you're on your own. It shouldn't be", "Modules \"\"\" MAJOR = 0 MINOR = 3 MICRO = 21 ISRELEASED =", "the\\n\" \"backporting process from the setup script,\\n\" \"I will restore Python 2 support", "2 support was removed starting\\n\" \"in v0.3.7. Once/if I learn how to automate", "Numpy is not yet present in # the system. try: from setuptools import", "RuntimeError(\"Python 3.4+ required.\") if sys.version_info[0] >= 3: import builtins else: import __builtin__ as", "risk updating an installed numpy/scipy which fails too often. # Just if the", "Topic :: Scientific/Engineering :: Physics Topic :: Scientific/Engineering :: Visualization Topic :: Software", "a source distribution, use existing version file # load it as a separate", "take advantage of and without\\n\" \"worrying about Python 2 compatibility.\\n\" \"Therefore, Python 2", "\"You'll also need to hack this setup script\\n\" \"to remove any exceptions that", "3.4+ required.\\n\\n\" \"Sorry, but there are features of Python 3\\n\" \"that I want", "DESCRIPTION = __doc__ LONG_DESCRIPTION = ''.join(open('README.rst').readlines()[6:]) AUTHOR = '<NAME>' AUTHOR_EMAIL = '<EMAIL>' MAINTAINER", "''.join(open('README.rst').readlines()[6:]) AUTHOR = '<NAME>' AUTHOR_EMAIL = '<EMAIL>' MAINTAINER = AUTHOR MAINTAINER_EMAIL = AUTHOR_EMAIL", "Operating System :: POSIX Operating System :: Unix Operating System :: MacOS Programming", "for dirpath, dirnames, filenames in os.walk('doc'): for dirname in dirnames: if dirname in", "__pycache__ directories, \" \\ \".ropeproject directories, and compiled files in the source tree.\"", "'%(full_version)s' git_revision = '%(git_revision)s' release = %(isrelease)s stable_version = '%(stable_version)s' if not release:", "requires # install_requires += ['future>=0.14.3'] install_requires += ['monty>=0.7.0', 'pymatgen>=3.2.4'] metadata = dict( name=DISTNAME,", "for k in ['SYSTEMROOT', 'PATH']: v = os.environ.get(k) if v is not None:", "succeed without them when, for example, # pip is used to install Scipy", "env=env).communicate()[0] return out try: out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD']) GIT_REVISION = out.strip().decode('ascii') except", "'HEAD']) GIT_REVISION = out.strip().decode('ascii') except OSError: GIT_REVISION = \"Unknown\" return GIT_REVISION # BEFORE", "out try: out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD']) GIT_REVISION = out.strip().decode('ascii') except OSError: GIT_REVISION", "Python 2.\") #if sys.version_info[:2] < (2, 7) or (3, 0) <= sys.version_info[:2] <", "MANIFEST. distutils doesn't properly # update it when the contents of directories change.", "messes # up the build under Python 3. FULLVERSION = VERSION if os.path.exists('.git'):", "SCIKIT-NANO SETUP.PY short_version = '%(version)s' version = '%(version)s' full_version = '%(full_version)s' git_revision =", "assume_default_configuration=True, delegate_options_to_subpackages=True, quiet=True) config.add_subpackage('sknano') config.get_version('sknano/version.py') return config def setup_package(): # Rewrite the version", "in CLASSIFIERS.split('\\n') if _f], platforms=[\"Windows\", \"Linux\", \"Solaris\", \"Mac OS-X\", \"Unix\"], test_suite='nose.collector', setup_requires=build_requires, install_requires=install_requires,", "'console_scripts': [ 'analyze_structure = sknano.scripts.analyze_structure:main', 'nanogen = sknano.scripts.nanogen:main', 'nanogenui = sknano.scripts.nanogenui:main', 'sknano =", "filename)) for dirname in dirnames: if dirname in ('__pycache__', '.ropeproject'): shutil.rmtree(os.path.join(dirpath, dirname)) for", "Python 3\\n\" \"that I want to take advantage of and without\\n\" \"worrying about", "six module to install_requires (used in numpydoc git submodule) # install_requires += ['six>=1.9']", "= \"Unknown\" return GIT_REVISION # BEFORE importing distutils, remove MANIFEST. distutils doesn't properly", "\".ropeproject directories, and compiled files in the source tree.\" def run(self): Clean.run(self) if", "required. # They are required to succeed without them when, for example, #", "Operating System :: Unix Operating System :: MacOS Programming Language :: Python Programming", "ImportError: from distutils.core import setup FULLVERSION, GIT_REVISION = get_version_info() metadata['version'] = FULLVERSION else:", "import sys import shutil import subprocess from distutils.command.clean import clean as Clean if", "of directories change. if os.path.exists('MANIFEST'): os.remove('MANIFEST') # This is a bit (!) hackish:", "backport the package\\n\" \"source code using a Python 3 to Python 2\\n\" \"compatibility", "starting\\n\" \"in v0.3.7. Once/if I learn how to automate the\\n\" \"backporting process from", "for Python3 install.\\n\" \"`pip install --upgrade setuptools`\") DISTNAME = 'scikit-nano' DESCRIPTION = __doc__", "filename in filenames: if filename.endswith(('.so', '.pyd', '.pyc', '.dll')): os.unlink(os.path.join(dirpath, filename)) for dirname in", "\"to remove any exceptions that are raised when\\n\" \"executed under Python 2.\") #if", "'nanogen = sknano.scripts.nanogen:main', 'nanogenui = sknano.scripts.nanogenui:main', 'sknano = sknano.scripts.sknano:main'], }, cmdclass={'clean': CleanCommand}, zip_safe=False,", "write_version_py() # Figure out whether to add ``*_requires = ['numpy>=`min version`', # 'scipy>=`min", "and \\ ('--help' in sys.argv[1:] or sys.argv[1] in ('--help-commands', 'egg_info', '--version', 'clean')): #", "MacOS Programming Language :: Python Programming Language :: Python :: 3.4 Topic ::", "Topic :: Scientific/Engineering :: Visualization Topic :: Software Development Topic :: Software Development", "For these actions, NumPy/SciPy are not required. # They are required to succeed", "import os import sys import shutil import subprocess from distutils.command.clean import clean as", "of Python 3\\n\" \"that I want to take advantage of and without\\n\" \"worrying", "sys.exit(\"setuptools required for Python3 install.\\n\" \"`pip install --upgrade setuptools`\") DISTNAME = 'scikit-nano' DESCRIPTION", "= '%d.%d.%d' % (MAJOR, MINOR, MICRO - 1) # Return the GIT version", "except OSError: GIT_REVISION = \"Unknown\" return GIT_REVISION # BEFORE importing distutils, remove MANIFEST.", "finally: a.close() def configuration(parent_package='', top_path=None): from numpy.distutils.misc_util import Configuration config = Configuration(None, parent_package,", "learn how to automate the\\n\" \"backporting process from the setup script,\\n\" \"I will", "write_version_py(), otherwise the import of sknano.version messes # up the build under Python", "when, for example, # pip is used to install Scipy when Numpy is", "Topic :: Software Development Topic :: Software Development :: Libraries :: Python Modules", "bit (!) hackish: we are setting a global variable so that the main", "IS GENERATED FROM SCIKIT-NANO SETUP.PY short_version = '%(version)s' version = '%(version)s' full_version =", "full_version = '%(full_version)s' git_revision = '%(git_revision)s' release = %(isrelease)s stable_version = '%(stable_version)s' if", "(AttributeError, ImportError, RuntimeError): install_requires += ['scipy==0.16.1'] # # Add six module to install_requires", "these actions, NumPy/SciPy are not required. # They are required to succeed without", "Intended Audience :: Science/Research Intended Audience :: Developers License :: OSI Approved ::", "are features of Python 3\\n\" \"that I want to take advantage of and", "if not release: version = full_version \"\"\" FULLVERSION, GIT_REVISION = get_version_info() a =", "\"but you'll have to manually backport the package\\n\" \"source code using a Python", "'.pyc', '.dll')): os.unlink(os.path.join(dirpath, filename)) for dirname in dirnames: if dirname in ('__pycache__', '.ropeproject'):", "cnt = \"\"\" # THIS FILE IS GENERATED FROM SCIKIT-NANO SETUP.PY short_version =", "CLASSIFIERS = \"\"\"\\ Development Status :: 4 - Beta Intended Audience :: Science/Research", "numpydoc git submodule) # install_requires += ['six>=1.9'] # # Add future module to", "System :: POSIX Operating System :: Unix Operating System :: MacOS Programming Language", ":: Python Programming Language :: Python :: 3.4 Topic :: Scientific/Engineering Topic ::", "OSError: GIT_REVISION = \"Unknown\" return GIT_REVISION # BEFORE importing distutils, remove MANIFEST. distutils", "because we risk updating an installed numpy/scipy which fails too often. # Just", "config.set_options(ignore_setup_xxx_py=True, assume_default_configuration=True, delegate_options_to_subpackages=True, quiet=True) config.add_subpackage('sknano') config.get_version('sknano/version.py') return config def setup_package(): # Rewrite the", ":: BSD License Operating System :: Microsoft :: Windows Operating System :: POSIX", "= full_version \"\"\" FULLVERSION, GIT_REVISION = get_version_info() a = open(filename, 'w') try: a.write(cnt", "= 'BSD 2-Clause' CLASSIFIERS = \"\"\"\\ Development Status :: 4 - Beta Intended", "sys.version_info[0] >= 3: import builtins else: import __builtin__ as builtins try: import setuptools", "'clean')): # For these actions, NumPy/SciPy are not required. # They are required", "Topic :: Scientific/Engineering :: Chemistry Topic :: Scientific/Engineering :: Physics Topic :: Scientific/Engineering", ":: Python Modules \"\"\" MAJOR = 0 MINOR = 3 MICRO = 21", "if v is not None: env[k] = v # LANGUAGE is used on", "# Adding the git rev number needs to be done inside # write_version_py(),", "# load it as a separate module to not load sknano/__init__.py import imp", "= %(isrelease)s stable_version = '%(stable_version)s' if not release: version = full_version \"\"\" FULLVERSION,", "Scientific/Engineering :: Chemistry Topic :: Scientific/Engineering :: Physics Topic :: Scientific/Engineering :: Visualization", "you'll have to manually backport the package\\n\" \"source code using a Python 3", "'.pyd', '.pyc', '.dll')): os.unlink(os.path.join(dirpath, filename)) for dirname in dirnames: if dirname in ('__pycache__',", "cmdclass={'clean': CleanCommand}, zip_safe=False, # the package can run out of an .egg file", "\\ ('--help' in sys.argv[1:] or sys.argv[1] in ('--help-commands', 'egg_info', '--version', 'clean')): # For", "called\\n\" \"`pasteurize` that can be run on the source\\n\" \"directory to automate the", "2 compatibility.\\n\" \"Therefore, Python 2 support was removed starting\\n\" \"in v0.3.7. Once/if I", "filename.endswith(('.so', '.pyd', '.pyc', '.dll')): os.unlink(os.path.join(dirpath, filename)) for dirname in dirnames: if dirname in", ">= 2 and \\ ('--help' in sys.argv[1:] or sys.argv[1] in ('--help-commands', 'egg_info', '--version',", "loaded by the setup routine, to # avoid attempting to load components that", "_minimal_ext_cmd(['git', 'rev-parse', 'HEAD']) GIT_REVISION = out.strip().decode('ascii') except OSError: GIT_REVISION = \"Unknown\" return GIT_REVISION", "script\\n\" \"to remove any exceptions that are raised when\\n\" \"executed under Python 2.\")", "sknano/__init__.py import imp version = imp.load_source('sknano.version', 'sknano/version.py') GIT_REVISION = version.git_revision else: GIT_REVISION =", "setup FULLVERSION, GIT_REVISION = get_version_info() metadata['version'] = FULLVERSION else: from numpy.distutils.core import setup", "GIT version as a string def git_version(): def _minimal_ext_cmd(cmd): # construct minimal environment", "# the system. try: from setuptools import setup except ImportError: from distutils.core import", "else: from numpy.distutils.core import setup metadata['configuration'] = configuration setup(**metadata) if __name__ == '__main__':", "the package\\n\" \"source code using a Python 3 to Python 2\\n\" \"compatibility library", "without\\n\" \"worrying about Python 2 compatibility.\\n\" \"Therefore, Python 2 support was removed starting\\n\"", "'git_revision': GIT_REVISION, 'isrelease': str(ISRELEASED), 'stable_version': STABLEVERSION}) finally: a.close() def configuration(parent_package='', top_path=None): from numpy.distutils.misc_util", "version file # load it as a separate module to not load sknano/__init__.py", "Figure out whether to add ``*_requires = ['numpy>=`min version`', # 'scipy>=`min version`']``. We", "install_requires += ['six>=1.9'] # # Add future module to install requires # install_requires", "an .egg file include_package_data=True, ) if len(sys.argv) >= 2 and \\ ('--help' in", "os.path.exists('sknano/version.py'): # must be a source distribution, use existing version file # load", "list(map(int, numpy.version.short_version.split('.')[:3]))[:2]) if numpy_version < (1, 9): raise RuntimeError except (AttributeError, ImportError, RuntimeError):", "numpy.version.short_version.split('.')[:3]))[:2]) if numpy_version < (1, 9): raise RuntimeError except (AttributeError, ImportError, RuntimeError): build_requires", "run(self): Clean.run(self) if os.path.exists('build'): shutil.rmtree('build') for dirpath, dirnames, filenames in os.walk('sknano'): for filename", "in ('--help-commands', 'egg_info', '--version', 'clean')): # For these actions, NumPy/SciPy are not required.", "os.path.exists('build'): shutil.rmtree('build') for dirpath, dirnames, filenames in os.walk('sknano'): for filename in filenames: if", "not installed, we may give it a try. build_requires = [] try: import", "os.remove('MANIFEST') # This is a bit (!) hackish: we are setting a global", "install_requires = build_requires[:] try: import scipy scipy_version = \\ tuple( list(map(int, scipy.version.short_version.split('.')[:3]))[:2]) if", "download_url=DOWNLOAD_URL, license=LICENSE, keywords=KEYWORDS, classifiers=[_f for _f in CLASSIFIERS.split('\\n') if _f], platforms=[\"Windows\", \"Linux\", \"Solaris\",", "if sys.version_info[0] >= 3: import builtins else: import __builtin__ as builtins try: import", "['future>=0.14.3'] install_requires += ['monty>=0.7.0', 'pymatgen>=3.2.4'] metadata = dict( name=DISTNAME, author=AUTHOR, author_email=AUTHOR_EMAIL, maintainer=MAINTAINER, maintainer_email=MAINTAINER_EMAIL,", "= \\ \"Remove build directories, __pycache__ directories, \" \\ \".ropeproject directories, and compiled", "'%(stable_version)s' if not release: version = full_version \"\"\" FULLVERSION, GIT_REVISION = get_version_info() a", "sknano.scripts.analyze_structure:main', 'nanogen = sknano.scripts.nanogen:main', 'nanogenui = sknano.scripts.nanogenui:main', 'sknano = sknano.scripts.sknano:main'], }, cmdclass={'clean': CleanCommand},", "is not installed, we may give it a try. build_requires = [] try:", "don't want to do that unconditionally, # because we risk updating an installed", "Scientific/Engineering :: Visualization Topic :: Software Development Topic :: Software Development :: Libraries", "we may give it a try. build_requires = [] try: import numpy numpy_version", "to hack this setup script\\n\" \"to remove any exceptions that are raised when\\n\"", "os.path.exists('MANIFEST'): os.remove('MANIFEST') # This is a bit (!) hackish: we are setting a", "raise RuntimeError(\"Python version 3.4+ required.\\n\\n\" \"Sorry, but there are features of Python 3\\n\"", "GIT_REVISION = out.strip().decode('ascii') except OSError: GIT_REVISION = \"Unknown\" return GIT_REVISION # BEFORE importing", "import __builtin__ as builtins try: import setuptools except ImportError: sys.exit(\"setuptools required for Python3", "AUTHOR = '<NAME>' AUTHOR_EMAIL = '<EMAIL>' MAINTAINER = AUTHOR MAINTAINER_EMAIL = AUTHOR_EMAIL URL", "the setup routine, to # avoid attempting to load components that aren't built", "3: raise RuntimeError(\"Python version 3.4+ required.\\n\\n\" \"Sorry, but there are features of Python", "< (3, 4): raise RuntimeError(\"Python 3.4+ required.\") if sys.version_info[0] >= 3: import builtins", "\"executed under Python 2.\") #if sys.version_info[:2] < (2, 7) or (3, 0) <=", "for generating and analyzing nanostructure data\"\"\" from __future__ import absolute_import, division, print_function, \\", "if dirname in ('__pycache__', '.ropeproject'): shutil.rmtree(os.path.join(dirpath, dirname)) for dirpath, dirnames, filenames in os.walk('doc'):", "SETUP.PY short_version = '%(version)s' version = '%(version)s' full_version = '%(full_version)s' git_revision = '%(git_revision)s'", "= '%(git_revision)s' release = %(isrelease)s stable_version = '%(stable_version)s' if not release: version =", "generating and analyzing nanostructure data\"\"\" from __future__ import absolute_import, division, print_function, \\ unicode_literals", "utf-8 -*- \"\"\"Python toolkit for generating and analyzing nanostructure data\"\"\" from __future__ import", "tuple( list(map(int, scipy.version.short_version.split('.')[:3]))[:2]) if scipy_version < (0, 14): raise RuntimeError except (AttributeError, ImportError,", "'structure', 'analysis'] LICENSE = 'BSD 2-Clause' CLASSIFIERS = \"\"\"\\ Development Status :: 4", "return FULLVERSION, GIT_REVISION def write_version_py(filename='sknano/version.py'): cnt = \"\"\" # THIS FILE IS GENERATED", "load components that aren't built yet. builtins.__SKNANO_SETUP__ = True class CleanCommand(Clean): description =", "aren't built yet. builtins.__SKNANO_SETUP__ = True class CleanCommand(Clean): description = \\ \"Remove build", "for dirname in dirnames: if dirname in ('__pycache__', '.ropeproject'): shutil.rmtree(os.path.join(dirpath, dirname)) for dirpath,", "dirname in ('__pycache__', '.ropeproject'): shutil.rmtree(os.path.join(dirpath, dirname)) def get_version_info(): # Adding the git rev", "a.close() def configuration(parent_package='', top_path=None): from numpy.distutils.misc_util import Configuration config = Configuration(None, parent_package, top_path)", "3 MICRO = 21 ISRELEASED = True VERSION = '%d.%d.%d' % (MAJOR, MINOR,", "(MAJOR, MINOR, MICRO - 1) # Return the GIT version as a string", "not ISRELEASED: # FULLVERSION += '.dev' FULLVERSION += '.dev0+' + GIT_REVISION[:7] return FULLVERSION,", "submodule) # install_requires += ['six>=1.9'] # # Add future module to install requires", "and without\\n\" \"worrying about Python 2 compatibility.\\n\" \"Therefore, Python 2 support was removed", "that unconditionally, # because we risk updating an installed numpy/scipy which fails too", "_minimal_ext_cmd(cmd): # construct minimal environment env = {} for k in ['SYSTEMROOT', 'PATH']:", "raise RuntimeError except (AttributeError, ImportError, RuntimeError): build_requires += ['numpy==1.10.1'] install_requires = build_requires[:] try:", "python # -*- coding: utf-8 -*- \"\"\"Python toolkit for generating and analyzing nanostructure", "under Python 2.\") #if sys.version_info[:2] < (2, 7) or (3, 0) <= sys.version_info[:2]", ">= 3: import builtins else: import __builtin__ as builtins try: import setuptools except", "(3, 4): raise RuntimeError(\"Python 3.4+ required.\") if sys.version_info[0] >= 3: import builtins else:", "when the contents of directories change. if os.path.exists('MANIFEST'): os.remove('MANIFEST') # This is a", "as builtins try: import setuptools except ImportError: sys.exit(\"setuptools required for Python3 install.\\n\" \"`pip", "# 'scipy>=`min version`']``. We don't want to do that unconditionally, # because we", "STABLEVERSION is None: if ISRELEASED: STABLEVERSION = VERSION else: STABLEVERSION = '%d.%d.%d' %", "filenames: if filename.endswith(('.so', '.pyd', '.pyc', '.dll')): os.unlink(os.path.join(dirpath, filename)) for dirname in dirnames: if", "`future`\\n\" \"module, which provides a python script called\\n\" \"`pasteurize` that can be run", "# They are required to succeed without them when, for example, # pip", "to succeed without them when, for example, # pip is used to install", "'<EMAIL>' MAINTAINER = AUTHOR MAINTAINER_EMAIL = AUTHOR_EMAIL URL = 'http://scikit-nano.org/doc' DOWNLOAD_URL = 'http://github.com/androomerrill/scikit-nano'", "['numpy==1.10.1'] install_requires = build_requires[:] try: import scipy scipy_version = \\ tuple( list(map(int, scipy.version.short_version.split('.')[:3]))[:2])", "GIT_REVISION[:7] return FULLVERSION, GIT_REVISION def write_version_py(filename='sknano/version.py'): cnt = \"\"\" # THIS FILE IS", "when\\n\" \"executed under Python 2.\") #if sys.version_info[:2] < (2, 7) or (3, 0)", "in ['SYSTEMROOT', 'PATH']: v = os.environ.get(k) if v is not None: env[k] =", "attempting to load components that aren't built yet. builtins.__SKNANO_SETUP__ = True class CleanCommand(Clean):", "= \\ tuple( list(map(int, scipy.version.short_version.split('.')[:3]))[:2]) if scipy_version < (0, 14): raise RuntimeError except", "Windows Operating System :: POSIX Operating System :: Unix Operating System :: MacOS", ":: 3.4 Topic :: Scientific/Engineering Topic :: Scientific/Engineering :: Chemistry Topic :: Scientific/Engineering", "'C' env['LC_ALL'] = 'C' out = subprocess.Popen( cmd, stdout=subprocess.PIPE, env=env).communicate()[0] return out try:", "= get_version_info() metadata['version'] = FULLVERSION else: from numpy.distutils.core import setup metadata['configuration'] = configuration", "you must install this for Python 2\\n\" \"you're on your own. It shouldn't", "the python `future`\\n\" \"module, which provides a python script called\\n\" \"`pasteurize` that can", "file everytime write_version_py() # Figure out whether to add ``*_requires = ['numpy>=`min version`',", "Python 2 support was removed starting\\n\" \"in v0.3.7. Once/if I learn how to", "from the setup script,\\n\" \"I will restore Python 2 support that way.\\n\" \"Until", "except (AttributeError, ImportError, RuntimeError): build_requires += ['numpy==1.10.1'] install_requires = build_requires[:] try: import scipy", "absolute_import, division, print_function, \\ unicode_literals __docformat__ = 'restructuredtext en' import os import sys", "elif os.path.exists('sknano/version.py'): # must be a source distribution, use existing version file #", "being loaded by the setup routine, to # avoid attempting to load components", "GIT_REVISION # BEFORE importing distutils, remove MANIFEST. distutils doesn't properly # update it", "install requires # install_requires += ['future>=0.14.3'] install_requires += ['monty>=0.7.0', 'pymatgen>=3.2.4'] metadata = dict(", "run out of an .egg file include_package_data=True, ) if len(sys.argv) >= 2 and", "are not required. # They are required to succeed without them when, for", "DOWNLOAD_URL = 'http://github.com/androomerrill/scikit-nano' KEYWORDS = ['nano', 'nanoscience', 'nano-structure', 'nanostructure', 'nanotube', 'graphene', 'LAMMPS', 'XYZ',", "too often. # Just if the minimum version is not installed, we may", "if sys.version_info[0] < 3: raise RuntimeError(\"Python version 3.4+ required.\\n\\n\" \"Sorry, but there are", "if filename.endswith(('.so', '.pyd', '.pyc', '.dll')): os.unlink(os.path.join(dirpath, filename)) for dirname in dirnames: if dirname", "the setup script,\\n\" \"I will restore Python 2 support that way.\\n\" \"Until then,", "install_requires += ['monty>=0.7.0', 'pymatgen>=3.2.4'] metadata = dict( name=DISTNAME, author=AUTHOR, author_email=AUTHOR_EMAIL, maintainer=MAINTAINER, maintainer_email=MAINTAINER_EMAIL, description=DESCRIPTION,", "# sknano __init__ can detect if it is being loaded by the setup", "get_version_info() a = open(filename, 'w') try: a.write(cnt % {'version': VERSION, 'full_version': FULLVERSION, 'git_revision':", "(0, 14): raise RuntimeError except (AttributeError, ImportError, RuntimeError): install_requires += ['scipy==0.16.1'] # #", "load sknano/__init__.py import imp version = imp.load_source('sknano.version', 'sknano/version.py') GIT_REVISION = version.git_revision else: GIT_REVISION", "AUTHOR_EMAIL URL = 'http://scikit-nano.org/doc' DOWNLOAD_URL = 'http://github.com/androomerrill/scikit-nano' KEYWORDS = ['nano', 'nanoscience', 'nano-structure', 'nanostructure',", "include_package_data=True, ) if len(sys.argv) >= 2 and \\ ('--help' in sys.argv[1:] or sys.argv[1]", "platforms=[\"Windows\", \"Linux\", \"Solaris\", \"Mac OS-X\", \"Unix\"], test_suite='nose.collector', setup_requires=build_requires, install_requires=install_requires, extras_require={ 'plotting': ['matplotlib>=1.4.3', 'palettable>=2.1.1']", "be difficult\\n\" \"but you'll have to manually backport the package\\n\" \"source code using", "to take advantage of and without\\n\" \"worrying about Python 2 compatibility.\\n\" \"Therefore, Python", "yet. builtins.__SKNANO_SETUP__ = True class CleanCommand(Clean): description = \\ \"Remove build directories, __pycache__", "3\\n\" \"that I want to take advantage of and without\\n\" \"worrying about Python", "if (3, 0) <= sys.version_info[:2] < (3, 4): raise RuntimeError(\"Python 3.4+ required.\") if", "env['LANG'] = 'C' env['LC_ALL'] = 'C' out = subprocess.Popen( cmd, stdout=subprocess.PIPE, env=env).communicate()[0] return", "for dirname in dirnames: if dirname in ('__pycache__', '.ropeproject'): shutil.rmtree(os.path.join(dirpath, dirname)) def get_version_info():", "analyzing nanostructure data\"\"\" from __future__ import absolute_import, division, print_function, \\ unicode_literals __docformat__ =", "hackish: we are setting a global variable so that the main # sknano", "setup except ImportError: from distutils.core import setup FULLVERSION, GIT_REVISION = get_version_info() metadata['version'] =", "the GIT version as a string def git_version(): def _minimal_ext_cmd(cmd): # construct minimal", "FULLVERSION, 'git_revision': GIT_REVISION, 'isrelease': str(ISRELEASED), 'stable_version': STABLEVERSION}) finally: a.close() def configuration(parent_package='', top_path=None): from", "it is being loaded by the setup routine, to # avoid attempting to", "compiled files in the source tree.\" def run(self): Clean.run(self) if os.path.exists('build'): shutil.rmtree('build') for", "dirname)) for dirpath, dirnames, filenames in os.walk('doc'): for dirname in dirnames: if dirname", "Audience :: Science/Research Intended Audience :: Developers License :: OSI Approved :: BSD", "list(map(int, scipy.version.short_version.split('.')[:3]))[:2]) if scipy_version < (0, 14): raise RuntimeError except (AttributeError, ImportError, RuntimeError):", "(used in numpydoc git submodule) # install_requires += ['six>=1.9'] # # Add future", "remove MANIFEST. distutils doesn't properly # update it when the contents of directories", "import absolute_import, division, print_function, \\ unicode_literals __docformat__ = 'restructuredtext en' import os import", "FILE IS GENERATED FROM SCIKIT-NANO SETUP.PY short_version = '%(version)s' version = '%(version)s' full_version", "% (MAJOR, MINOR, MICRO) STABLEVERSION = None if STABLEVERSION is None: if ISRELEASED:", "return GIT_REVISION # BEFORE importing distutils, remove MANIFEST. distutils doesn't properly # update", "\"source code using a Python 3 to Python 2\\n\" \"compatibility library such as", "= 'scikit-nano' DESCRIPTION = __doc__ LONG_DESCRIPTION = ''.join(open('README.rst').readlines()[6:]) AUTHOR = '<NAME>' AUTHOR_EMAIL =", "_f], platforms=[\"Windows\", \"Linux\", \"Solaris\", \"Mac OS-X\", \"Unix\"], test_suite='nose.collector', setup_requires=build_requires, install_requires=install_requires, extras_require={ 'plotting': ['matplotlib>=1.4.3',", "in dirnames: if dirname in ('__pycache__', '.ropeproject'): shutil.rmtree(os.path.join(dirpath, dirname)) def get_version_info(): # Adding", "= version.git_revision else: GIT_REVISION = \"Unknown\" if not ISRELEASED: # FULLVERSION += '.dev'", "if ISRELEASED: STABLEVERSION = VERSION else: STABLEVERSION = '%d.%d.%d' % (MAJOR, MINOR, MICRO", "CleanCommand(Clean): description = \\ \"Remove build directories, __pycache__ directories, \" \\ \".ropeproject directories,", "rev number needs to be done inside # write_version_py(), otherwise the import of", "< 3: raise RuntimeError(\"Python version 3.4+ required.\\n\\n\" \"Sorry, but there are features of", "it when the contents of directories change. if os.path.exists('MANIFEST'): os.remove('MANIFEST') # This is", "Programming Language :: Python Programming Language :: Python :: 3.4 Topic :: Scientific/Engineering", "'nanostructure', 'nanotube', 'graphene', 'LAMMPS', 'XYZ', 'structure', 'analysis'] LICENSE = 'BSD 2-Clause' CLASSIFIERS =", "GIT_REVISION = \"Unknown\" return GIT_REVISION # BEFORE importing distutils, remove MANIFEST. distutils doesn't", "Physics Topic :: Scientific/Engineering :: Visualization Topic :: Software Development Topic :: Software", "sys.argv[1:] or sys.argv[1] in ('--help-commands', 'egg_info', '--version', 'clean')): # For these actions, NumPy/SciPy", "for Python 2\\n\" \"you're on your own. It shouldn't be difficult\\n\" \"but you'll", "source tree.\" def run(self): Clean.run(self) if os.path.exists('build'): shutil.rmtree('build') for dirpath, dirnames, filenames in", "--upgrade setuptools`\") DISTNAME = 'scikit-nano' DESCRIPTION = __doc__ LONG_DESCRIPTION = ''.join(open('README.rst').readlines()[6:]) AUTHOR =", "print_function, \\ unicode_literals __docformat__ = 'restructuredtext en' import os import sys import shutil", "the source\\n\" \"directory to automate the backporting process.\\n\" \"You'll also need to hack", "using a Python 3 to Python 2\\n\" \"compatibility library such as the python", "try: out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD']) GIT_REVISION = out.strip().decode('ascii') except OSError: GIT_REVISION =", "env = {} for k in ['SYSTEMROOT', 'PATH']: v = os.environ.get(k) if v", "= True VERSION = '%d.%d.%d' % (MAJOR, MINOR, MICRO) STABLEVERSION = None if", "FULLVERSION += '.dev0+' + GIT_REVISION[:7] return FULLVERSION, GIT_REVISION def write_version_py(filename='sknano/version.py'): cnt = \"\"\"", "THIS FILE IS GENERATED FROM SCIKIT-NANO SETUP.PY short_version = '%(version)s' version = '%(version)s'", "way.\\n\" \"Until then, if you must install this for Python 2\\n\" \"you're on", "= \"\"\"\\ Development Status :: 4 - Beta Intended Audience :: Science/Research Intended", "built yet. builtins.__SKNANO_SETUP__ = True class CleanCommand(Clean): description = \\ \"Remove build directories,", "setting a global variable so that the main # sknano __init__ can detect", "tuple( list(map(int, numpy.version.short_version.split('.')[:3]))[:2]) if numpy_version < (1, 9): raise RuntimeError except (AttributeError, ImportError,", "removed starting\\n\" \"in v0.3.7. Once/if I learn how to automate the\\n\" \"backporting process", "RuntimeError): install_requires += ['scipy==0.16.1'] # # Add six module to install_requires (used in", "install_requires += ['scipy==0.16.1'] # # Add six module to install_requires (used in numpydoc", "module to install_requires (used in numpydoc git submodule) # install_requires += ['six>=1.9'] #", "STABLEVERSION}) finally: a.close() def configuration(parent_package='', top_path=None): from numpy.distutils.misc_util import Configuration config = Configuration(None,", "= sknano.scripts.sknano:main'], }, cmdclass={'clean': CleanCommand}, zip_safe=False, # the package can run out of", "done inside # write_version_py(), otherwise the import of sknano.version messes # up the", "import numpy numpy_version = \\ tuple( list(map(int, numpy.version.short_version.split('.')[:3]))[:2]) if numpy_version < (1, 9):", "['monty>=0.7.0', 'pymatgen>=3.2.4'] metadata = dict( name=DISTNAME, author=AUTHOR, author_email=AUTHOR_EMAIL, maintainer=MAINTAINER, maintainer_email=MAINTAINER_EMAIL, description=DESCRIPTION, long_description=LONG_DESCRIPTION, url=URL,", "this for Python 2\\n\" \"you're on your own. It shouldn't be difficult\\n\" \"but", "= VERSION if os.path.exists('.git'): GIT_REVISION = git_version() elif os.path.exists('sknano/version.py'): # must be a", "__doc__ LONG_DESCRIPTION = ''.join(open('README.rst').readlines()[6:]) AUTHOR = '<NAME>' AUTHOR_EMAIL = '<EMAIL>' MAINTAINER = AUTHOR", "= sknano.scripts.nanogenui:main', 'sknano = sknano.scripts.sknano:main'], }, cmdclass={'clean': CleanCommand}, zip_safe=False, # the package can", "package can run out of an .egg file include_package_data=True, ) if len(sys.argv) >=", "to do that unconditionally, # because we risk updating an installed numpy/scipy which", "}, cmdclass={'clean': CleanCommand}, zip_safe=False, # the package can run out of an .egg", "2-Clause' CLASSIFIERS = \"\"\"\\ Development Status :: 4 - Beta Intended Audience ::", "STABLEVERSION = None if STABLEVERSION is None: if ISRELEASED: STABLEVERSION = VERSION else:", "used to install Scipy when Numpy is not yet present in # the", "#if sys.version_info[:2] < (2, 7) or (3, 0) <= sys.version_info[:2] < (3, 4):", "provides a python script called\\n\" \"`pasteurize` that can be run on the source\\n\"", "Status :: 4 - Beta Intended Audience :: Science/Research Intended Audience :: Developers", "= 'restructuredtext en' import os import sys import shutil import subprocess from distutils.command.clean", "not None: env[k] = v # LANGUAGE is used on win32 env['LANGUAGE'] =", "short_version = '%(version)s' version = '%(version)s' full_version = '%(full_version)s' git_revision = '%(git_revision)s' release", "main # sknano __init__ can detect if it is being loaded by the", "Development Status :: 4 - Beta Intended Audience :: Science/Research Intended Audience ::", "in # the system. try: from setuptools import setup except ImportError: from distutils.core", "(MAJOR, MINOR, MICRO) STABLEVERSION = None if STABLEVERSION is None: if ISRELEASED: STABLEVERSION", "'<NAME>' AUTHOR_EMAIL = '<EMAIL>' MAINTAINER = AUTHOR MAINTAINER_EMAIL = AUTHOR_EMAIL URL = 'http://scikit-nano.org/doc'", "coding: utf-8 -*- \"\"\"Python toolkit for generating and analyzing nanostructure data\"\"\" from __future__", "can detect if it is being loaded by the setup routine, to #", "len(sys.argv) >= 2 and \\ ('--help' in sys.argv[1:] or sys.argv[1] in ('--help-commands', 'egg_info',", "('--help-commands', 'egg_info', '--version', 'clean')): # For these actions, NumPy/SciPy are not required. #", "backporting process.\\n\" \"You'll also need to hack this setup script\\n\" \"to remove any", "future module to install requires # install_requires += ['future>=0.14.3'] install_requires += ['monty>=0.7.0', 'pymatgen>=3.2.4']", "MICRO = 21 ISRELEASED = True VERSION = '%d.%d.%d' % (MAJOR, MINOR, MICRO)", "\"in v0.3.7. Once/if I learn how to automate the\\n\" \"backporting process from the", "\"`pasteurize` that can be run on the source\\n\" \"directory to automate the backporting", "POSIX Operating System :: Unix Operating System :: MacOS Programming Language :: Python", "setuptools except ImportError: sys.exit(\"setuptools required for Python3 install.\\n\" \"`pip install --upgrade setuptools`\") DISTNAME", "parent_package, top_path) config.set_options(ignore_setup_xxx_py=True, assume_default_configuration=True, delegate_options_to_subpackages=True, quiet=True) config.add_subpackage('sknano') config.get_version('sknano/version.py') return config def setup_package(): #", "metadata['version'] = FULLVERSION else: from numpy.distutils.core import setup metadata['configuration'] = configuration setup(**metadata) if", "= '%(stable_version)s' if not release: version = full_version \"\"\" FULLVERSION, GIT_REVISION = get_version_info()", "by the setup routine, to # avoid attempting to load components that aren't", "remove any exceptions that are raised when\\n\" \"executed under Python 2.\") #if sys.version_info[:2]", "distutils, remove MANIFEST. distutils doesn't properly # update it when the contents of", "the system. try: from setuptools import setup except ImportError: from distutils.core import setup", "MAINTAINER_EMAIL = AUTHOR_EMAIL URL = 'http://scikit-nano.org/doc' DOWNLOAD_URL = 'http://github.com/androomerrill/scikit-nano' KEYWORDS = ['nano', 'nanoscience',", "STABLEVERSION = '%d.%d.%d' % (MAJOR, MINOR, MICRO - 1) # Return the GIT", "os.walk('sknano'): for filename in filenames: if filename.endswith(('.so', '.pyd', '.pyc', '.dll')): os.unlink(os.path.join(dirpath, filename)) for", "3 to Python 2\\n\" \"compatibility library such as the python `future`\\n\" \"module, which", "process from the setup script,\\n\" \"I will restore Python 2 support that way.\\n\"", "#!/usr/bin/env python # -*- coding: utf-8 -*- \"\"\"Python toolkit for generating and analyzing", "up the build under Python 3. FULLVERSION = VERSION if os.path.exists('.git'): GIT_REVISION =", "dirnames, filenames in os.walk('doc'): for dirname in dirnames: if dirname in ('__pycache__', '.ropeproject'):", "Audience :: Developers License :: OSI Approved :: BSD License Operating System ::", "Just if the minimum version is not installed, we may give it a", "Intended Audience :: Developers License :: OSI Approved :: BSD License Operating System", "('__pycache__', '.ropeproject'): shutil.rmtree(os.path.join(dirpath, dirname)) for dirpath, dirnames, filenames in os.walk('doc'): for dirname in", "build_requires[:] try: import scipy scipy_version = \\ tuple( list(map(int, scipy.version.short_version.split('.')[:3]))[:2]) if scipy_version <", "MICRO) STABLEVERSION = None if STABLEVERSION is None: if ISRELEASED: STABLEVERSION = VERSION", "maintainer_email=MAINTAINER_EMAIL, description=DESCRIPTION, long_description=LONG_DESCRIPTION, url=URL, download_url=DOWNLOAD_URL, license=LICENSE, keywords=KEYWORDS, classifiers=[_f for _f in CLASSIFIERS.split('\\n') if", "_f in CLASSIFIERS.split('\\n') if _f], platforms=[\"Windows\", \"Linux\", \"Solaris\", \"Mac OS-X\", \"Unix\"], test_suite='nose.collector', setup_requires=build_requires,", "# Just if the minimum version is not installed, we may give it", "distutils.command.clean import clean as Clean if sys.version_info[0] < 3: raise RuntimeError(\"Python version 3.4+", "classifiers=[_f for _f in CLASSIFIERS.split('\\n') if _f], platforms=[\"Windows\", \"Linux\", \"Solaris\", \"Mac OS-X\", \"Unix\"],", "v0.3.7. Once/if I learn how to automate the\\n\" \"backporting process from the setup", "get_version_info() metadata['version'] = FULLVERSION else: from numpy.distutils.core import setup metadata['configuration'] = configuration setup(**metadata)", "dirnames: if dirname in ('__pycache__', '.ropeproject'): shutil.rmtree(os.path.join(dirpath, dirname)) def get_version_info(): # Adding the", "ISRELEASED: STABLEVERSION = VERSION else: STABLEVERSION = '%d.%d.%d' % (MAJOR, MINOR, MICRO -", "author_email=AUTHOR_EMAIL, maintainer=MAINTAINER, maintainer_email=MAINTAINER_EMAIL, description=DESCRIPTION, long_description=LONG_DESCRIPTION, url=URL, download_url=DOWNLOAD_URL, license=LICENSE, keywords=KEYWORDS, classifiers=[_f for _f in", "any exceptions that are raised when\\n\" \"executed under Python 2.\") #if sys.version_info[:2] <", "then, if you must install this for Python 2\\n\" \"you're on your own.", "raised when\\n\" \"executed under Python 2.\") #if sys.version_info[:2] < (2, 7) or (3,", "21 ISRELEASED = True VERSION = '%d.%d.%d' % (MAJOR, MINOR, MICRO) STABLEVERSION =", "# Return the GIT version as a string def git_version(): def _minimal_ext_cmd(cmd): #", "to automate the backporting process.\\n\" \"You'll also need to hack this setup script\\n\"", "in the source tree.\" def run(self): Clean.run(self) if os.path.exists('build'): shutil.rmtree('build') for dirpath, dirnames,", "full_version \"\"\" FULLVERSION, GIT_REVISION = get_version_info() a = open(filename, 'w') try: a.write(cnt %", "2 and \\ ('--help' in sys.argv[1:] or sys.argv[1] in ('--help-commands', 'egg_info', '--version', 'clean')):", "\"module, which provides a python script called\\n\" \"`pasteurize` that can be run on", "in dirnames: if dirname in ('__pycache__', '.ropeproject'): shutil.rmtree(os.path.join(dirpath, dirname)) for dirpath, dirnames, filenames", "installed numpy/scipy which fails too often. # Just if the minimum version is", "RuntimeError except (AttributeError, ImportError, RuntimeError): build_requires += ['numpy==1.10.1'] install_requires = build_requires[:] try: import", "unicode_literals __docformat__ = 'restructuredtext en' import os import sys import shutil import subprocess", "advantage of and without\\n\" \"worrying about Python 2 compatibility.\\n\" \"Therefore, Python 2 support", "'nanogenui = sknano.scripts.nanogenui:main', 'sknano = sknano.scripts.sknano:main'], }, cmdclass={'clean': CleanCommand}, zip_safe=False, # the package", "= None if STABLEVERSION is None: if ISRELEASED: STABLEVERSION = VERSION else: STABLEVERSION", "This is a bit (!) hackish: we are setting a global variable so", "System :: Unix Operating System :: MacOS Programming Language :: Python Programming Language", "License :: OSI Approved :: BSD License Operating System :: Microsoft :: Windows", "run on the source\\n\" \"directory to automate the backporting process.\\n\" \"You'll also need", "if os.path.exists('.git'): GIT_REVISION = git_version() elif os.path.exists('sknano/version.py'): # must be a source distribution,", "else: STABLEVERSION = '%d.%d.%d' % (MAJOR, MINOR, MICRO - 1) # Return the", "scipy_version = \\ tuple( list(map(int, scipy.version.short_version.split('.')[:3]))[:2]) if scipy_version < (0, 14): raise RuntimeError", "%(isrelease)s stable_version = '%(stable_version)s' if not release: version = full_version \"\"\" FULLVERSION, GIT_REVISION", "scipy.version.short_version.split('.')[:3]))[:2]) if scipy_version < (0, 14): raise RuntimeError except (AttributeError, ImportError, RuntimeError): install_requires", "support that way.\\n\" \"Until then, if you must install this for Python 2\\n\"", "{} for k in ['SYSTEMROOT', 'PATH']: v = os.environ.get(k) if v is not", "can be run on the source\\n\" \"directory to automate the backporting process.\\n\" \"You'll", "version.git_revision else: GIT_REVISION = \"Unknown\" if not ISRELEASED: # FULLVERSION += '.dev' FULLVERSION", "difficult\\n\" \"but you'll have to manually backport the package\\n\" \"source code using a", "env['LC_ALL'] = 'C' out = subprocess.Popen( cmd, stdout=subprocess.PIPE, env=env).communicate()[0] return out try: out", "I learn how to automate the\\n\" \"backporting process from the setup script,\\n\" \"I", "None: env[k] = v # LANGUAGE is used on win32 env['LANGUAGE'] = 'C'", "= 'http://github.com/androomerrill/scikit-nano' KEYWORDS = ['nano', 'nanoscience', 'nano-structure', 'nanostructure', 'nanotube', 'graphene', 'LAMMPS', 'XYZ', 'structure',", "= 'http://scikit-nano.org/doc' DOWNLOAD_URL = 'http://github.com/androomerrill/scikit-nano' KEYWORDS = ['nano', 'nanoscience', 'nano-structure', 'nanostructure', 'nanotube', 'graphene',", "Topic :: Scientific/Engineering Topic :: Scientific/Engineering :: Chemistry Topic :: Scientific/Engineering :: Physics", "or sys.argv[1] in ('--help-commands', 'egg_info', '--version', 'clean')): # For these actions, NumPy/SciPy are", "a string def git_version(): def _minimal_ext_cmd(cmd): # construct minimal environment env = {}", "raise RuntimeError except (AttributeError, ImportError, RuntimeError): install_requires += ['scipy==0.16.1'] # # Add six", "import shutil import subprocess from distutils.command.clean import clean as Clean if sys.version_info[0] <", "there are features of Python 3\\n\" \"that I want to take advantage of", "required.\") if sys.version_info[0] >= 3: import builtins else: import __builtin__ as builtins try:", "ImportError, RuntimeError): install_requires += ['scipy==0.16.1'] # # Add six module to install_requires (used", "to install requires # install_requires += ['future>=0.14.3'] install_requires += ['monty>=0.7.0', 'pymatgen>=3.2.4'] metadata =", "< (3, 4): if (3, 0) <= sys.version_info[:2] < (3, 4): raise RuntimeError(\"Python", "such as the python `future`\\n\" \"module, which provides a python script called\\n\" \"`pasteurize`", "shutil import subprocess from distutils.command.clean import clean as Clean if sys.version_info[0] < 3:", "\"you're on your own. It shouldn't be difficult\\n\" \"but you'll have to manually", "and compiled files in the source tree.\" def run(self): Clean.run(self) if os.path.exists('build'): shutil.rmtree('build')", "description=DESCRIPTION, long_description=LONG_DESCRIPTION, url=URL, download_url=DOWNLOAD_URL, license=LICENSE, keywords=KEYWORDS, classifiers=[_f for _f in CLASSIFIERS.split('\\n') if _f],", "environment env = {} for k in ['SYSTEMROOT', 'PATH']: v = os.environ.get(k) if", "FULLVERSION, GIT_REVISION def write_version_py(filename='sknano/version.py'): cnt = \"\"\" # THIS FILE IS GENERATED FROM", "3: import builtins else: import __builtin__ as builtins try: import setuptools except ImportError:", "an installed numpy/scipy which fails too often. # Just if the minimum version", "2\\n\" \"compatibility library such as the python `future`\\n\" \"module, which provides a python", "want to do that unconditionally, # because we risk updating an installed numpy/scipy", "Clean if sys.version_info[0] < 3: raise RuntimeError(\"Python version 3.4+ required.\\n\\n\" \"Sorry, but there", "import builtins else: import __builtin__ as builtins try: import setuptools except ImportError: sys.exit(\"setuptools", "'.ropeproject'): shutil.rmtree(os.path.join(dirpath, dirname)) for dirpath, dirnames, filenames in os.walk('doc'): for dirname in dirnames:", "k in ['SYSTEMROOT', 'PATH']: v = os.environ.get(k) if v is not None: env[k]", "on the source\\n\" \"directory to automate the backporting process.\\n\" \"You'll also need to", ":: POSIX Operating System :: Unix Operating System :: MacOS Programming Language ::", "'analysis'] LICENSE = 'BSD 2-Clause' CLASSIFIERS = \"\"\"\\ Development Status :: 4 -", "sknano.scripts.nanogenui:main', 'sknano = sknano.scripts.sknano:main'], }, cmdclass={'clean': CleanCommand}, zip_safe=False, # the package can run", "global variable so that the main # sknano __init__ can detect if it", "the backporting process.\\n\" \"You'll also need to hack this setup script\\n\" \"to remove", "GIT_REVISION def write_version_py(filename='sknano/version.py'): cnt = \"\"\" # THIS FILE IS GENERATED FROM SCIKIT-NANO", "in sys.argv[1:] or sys.argv[1] in ('--help-commands', 'egg_info', '--version', 'clean')): # For these actions,", "open(filename, 'w') try: a.write(cnt % {'version': VERSION, 'full_version': FULLVERSION, 'git_revision': GIT_REVISION, 'isrelease': str(ISRELEASED),", "GENERATED FROM SCIKIT-NANO SETUP.PY short_version = '%(version)s' version = '%(version)s' full_version = '%(full_version)s'", "to manually backport the package\\n\" \"source code using a Python 3 to Python", "os.environ.get(k) if v is not None: env[k] = v # LANGUAGE is used", "= dict( name=DISTNAME, author=AUTHOR, author_email=AUTHOR_EMAIL, maintainer=MAINTAINER, maintainer_email=MAINTAINER_EMAIL, description=DESCRIPTION, long_description=LONG_DESCRIPTION, url=URL, download_url=DOWNLOAD_URL, license=LICENSE, keywords=KEYWORDS,", "\"compatibility library such as the python `future`\\n\" \"module, which provides a python script", "if _f], platforms=[\"Windows\", \"Linux\", \"Solaris\", \"Mac OS-X\", \"Unix\"], test_suite='nose.collector', setup_requires=build_requires, install_requires=install_requires, extras_require={ 'plotting':", ":: Software Development Topic :: Software Development :: Libraries :: Python Modules \"\"\"", "GIT_REVISION = version.git_revision else: GIT_REVISION = \"Unknown\" if not ISRELEASED: # FULLVERSION +=", "<= sys.version_info[:2] < (3, 4): raise RuntimeError(\"Python 3.4+ required.\") if sys.version_info[0] >= 3:", "clean as Clean if sys.version_info[0] < 3: raise RuntimeError(\"Python version 3.4+ required.\\n\\n\" \"Sorry,", "tree.\" def run(self): Clean.run(self) if os.path.exists('build'): shutil.rmtree('build') for dirpath, dirnames, filenames in os.walk('sknano'):", "FROM SCIKIT-NANO SETUP.PY short_version = '%(version)s' version = '%(version)s' full_version = '%(full_version)s' git_revision", "extras_require={ 'plotting': ['matplotlib>=1.4.3', 'palettable>=2.1.1'] }, entry_points={ 'console_scripts': [ 'analyze_structure = sknano.scripts.analyze_structure:main', 'nanogen =", "not required. # They are required to succeed without them when, for example,", "OSI Approved :: BSD License Operating System :: Microsoft :: Windows Operating System", "GIT_REVISION, 'isrelease': str(ISRELEASED), 'stable_version': STABLEVERSION}) finally: a.close() def configuration(parent_package='', top_path=None): from numpy.distutils.misc_util import", ":: Chemistry Topic :: Scientific/Engineering :: Physics Topic :: Scientific/Engineering :: Visualization Topic", "Python 3. FULLVERSION = VERSION if os.path.exists('.git'): GIT_REVISION = git_version() elif os.path.exists('sknano/version.py'): #", "= open(filename, 'w') try: a.write(cnt % {'version': VERSION, 'full_version': FULLVERSION, 'git_revision': GIT_REVISION, 'isrelease':", "MAINTAINER = AUTHOR MAINTAINER_EMAIL = AUTHOR_EMAIL URL = 'http://scikit-nano.org/doc' DOWNLOAD_URL = 'http://github.com/androomerrill/scikit-nano' KEYWORDS", "about Python 2 compatibility.\\n\" \"Therefore, Python 2 support was removed starting\\n\" \"in v0.3.7.", "'isrelease': str(ISRELEASED), 'stable_version': STABLEVERSION}) finally: a.close() def configuration(parent_package='', top_path=None): from numpy.distutils.misc_util import Configuration", "# # Add six module to install_requires (used in numpydoc git submodule) #", "out.strip().decode('ascii') except OSError: GIT_REVISION = \"Unknown\" return GIT_REVISION # BEFORE importing distutils, remove", "as a string def git_version(): def _minimal_ext_cmd(cmd): # construct minimal environment env =", "python script called\\n\" \"`pasteurize` that can be run on the source\\n\" \"directory to", "4): if (3, 0) <= sys.version_info[:2] < (3, 4): raise RuntimeError(\"Python 3.4+ required.\")", "Configuration(None, parent_package, top_path) config.set_options(ignore_setup_xxx_py=True, assume_default_configuration=True, delegate_options_to_subpackages=True, quiet=True) config.add_subpackage('sknano') config.get_version('sknano/version.py') return config def setup_package():", "'analyze_structure = sknano.scripts.analyze_structure:main', 'nanogen = sknano.scripts.nanogen:main', 'nanogenui = sknano.scripts.nanogenui:main', 'sknano = sknano.scripts.sknano:main'], },", "present in # the system. try: from setuptools import setup except ImportError: from", "are raised when\\n\" \"executed under Python 2.\") #if sys.version_info[:2] < (2, 7) or", "MINOR = 3 MICRO = 21 ISRELEASED = True VERSION = '%d.%d.%d' %", "package\\n\" \"source code using a Python 3 to Python 2\\n\" \"compatibility library such", "the source tree.\" def run(self): Clean.run(self) if os.path.exists('build'): shutil.rmtree('build') for dirpath, dirnames, filenames", "release = %(isrelease)s stable_version = '%(stable_version)s' if not release: version = full_version \"\"\"", "# pip is used to install Scipy when Numpy is not yet present", "if dirname in ('__pycache__', '.ropeproject'): shutil.rmtree(os.path.join(dirpath, dirname)) def get_version_info(): # Adding the git", "\\ tuple( list(map(int, numpy.version.short_version.split('.')[:3]))[:2]) if numpy_version < (1, 9): raise RuntimeError except (AttributeError,", "'http://github.com/androomerrill/scikit-nano' KEYWORDS = ['nano', 'nanoscience', 'nano-structure', 'nanostructure', 'nanotube', 'graphene', 'LAMMPS', 'XYZ', 'structure', 'analysis']", "dirname)) def get_version_info(): # Adding the git rev number needs to be done", "the minimum version is not installed, we may give it a try. build_requires", "def configuration(parent_package='', top_path=None): from numpy.distutils.misc_util import Configuration config = Configuration(None, parent_package, top_path) config.set_options(ignore_setup_xxx_py=True,", "% {'version': VERSION, 'full_version': FULLVERSION, 'git_revision': GIT_REVISION, 'isrelease': str(ISRELEASED), 'stable_version': STABLEVERSION}) finally: a.close()", "0) <= sys.version_info[:2] < (3, 4): raise RuntimeError(\"Python 3.4+ required.\") if sys.version_info[0] >=", "are required to succeed without them when, for example, # pip is used", "do that unconditionally, # because we risk updating an installed numpy/scipy which fails", "RuntimeError(\"Python version 3.4+ required.\\n\\n\" \"Sorry, but there are features of Python 3\\n\" \"that", "__future__ import absolute_import, division, print_function, \\ unicode_literals __docformat__ = 'restructuredtext en' import os", "that can be run on the source\\n\" \"directory to automate the backporting process.\\n\"", "the import of sknano.version messes # up the build under Python 3. FULLVERSION", "(3, 0) <= sys.version_info[:2] < (3, 4): raise RuntimeError(\"Python 3.4+ required.\") if sys.version_info[0]", "subprocess.Popen( cmd, stdout=subprocess.PIPE, env=env).communicate()[0] return out try: out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD']) GIT_REVISION", "may give it a try. build_requires = [] try: import numpy numpy_version =", "NumPy/SciPy are not required. # They are required to succeed without them when,", "class CleanCommand(Clean): description = \\ \"Remove build directories, __pycache__ directories, \" \\ \".ropeproject", "Scipy when Numpy is not yet present in # the system. try: from", "the version file everytime write_version_py() # Figure out whether to add ``*_requires =", "KEYWORDS = ['nano', 'nanoscience', 'nano-structure', 'nanostructure', 'nanotube', 'graphene', 'LAMMPS', 'XYZ', 'structure', 'analysis'] LICENSE", "how to automate the\\n\" \"backporting process from the setup script,\\n\" \"I will restore", "filenames in os.walk('doc'): for dirname in dirnames: if dirname in ('__pycache__', '.ropeproject'): shutil.rmtree(os.path.join(dirpath,", "'sknano = sknano.scripts.sknano:main'], }, cmdclass={'clean': CleanCommand}, zip_safe=False, # the package can run out", "AUTHOR_EMAIL = '<EMAIL>' MAINTAINER = AUTHOR MAINTAINER_EMAIL = AUTHOR_EMAIL URL = 'http://scikit-nano.org/doc' DOWNLOAD_URL", "to load components that aren't built yet. builtins.__SKNANO_SETUP__ = True class CleanCommand(Clean): description", "it a try. build_requires = [] try: import numpy numpy_version = \\ tuple(", "doesn't properly # update it when the contents of directories change. if os.path.exists('MANIFEST'):", "scipy_version < (0, 14): raise RuntimeError except (AttributeError, ImportError, RuntimeError): install_requires += ['scipy==0.16.1']", "# write_version_py(), otherwise the import of sknano.version messes # up the build under", "give it a try. build_requires = [] try: import numpy numpy_version = \\", "imp.load_source('sknano.version', 'sknano/version.py') GIT_REVISION = version.git_revision else: GIT_REVISION = \"Unknown\" if not ISRELEASED: #", "must be a source distribution, use existing version file # load it as", "them when, for example, # pip is used to install Scipy when Numpy", "ImportError, RuntimeError): build_requires += ['numpy==1.10.1'] install_requires = build_requires[:] try: import scipy scipy_version =", "\"\"\"Python toolkit for generating and analyzing nanostructure data\"\"\" from __future__ import absolute_import, division,", "9): raise RuntimeError except (AttributeError, ImportError, RuntimeError): build_requires += ['numpy==1.10.1'] install_requires = build_requires[:]", "install Scipy when Numpy is not yet present in # the system. try:", "setup_package(): # Rewrite the version file everytime write_version_py() # Figure out whether to", "install_requires=install_requires, extras_require={ 'plotting': ['matplotlib>=1.4.3', 'palettable>=2.1.1'] }, entry_points={ 'console_scripts': [ 'analyze_structure = sknano.scripts.analyze_structure:main', 'nanogen", "Python Programming Language :: Python :: 3.4 Topic :: Scientific/Engineering Topic :: Scientific/Engineering", ":: Libraries :: Python Modules \"\"\" MAJOR = 0 MINOR = 3 MICRO", ") if len(sys.argv) >= 2 and \\ ('--help' in sys.argv[1:] or sys.argv[1] in", "required for Python3 install.\\n\" \"`pip install --upgrade setuptools`\") DISTNAME = 'scikit-nano' DESCRIPTION =", "import setup except ImportError: from distutils.core import setup FULLVERSION, GIT_REVISION = get_version_info() metadata['version']", "\\ unicode_literals __docformat__ = 'restructuredtext en' import os import sys import shutil import", "= 'C' out = subprocess.Popen( cmd, stdout=subprocess.PIPE, env=env).communicate()[0] return out try: out =", ":: Python :: 3.4 Topic :: Scientific/Engineering Topic :: Scientific/Engineering :: Chemistry Topic", "for dirpath, dirnames, filenames in os.walk('sknano'): for filename in filenames: if filename.endswith(('.so', '.pyd',", "top_path=None): from numpy.distutils.misc_util import Configuration config = Configuration(None, parent_package, top_path) config.set_options(ignore_setup_xxx_py=True, assume_default_configuration=True, delegate_options_to_subpackages=True,", "Development :: Libraries :: Python Modules \"\"\" MAJOR = 0 MINOR = 3", "fails too often. # Just if the minimum version is not installed, we" ]
[ "update(self, delta_time): self.all_sprites_list.update() self.player.change_x = MOVEMENT_SPEED def main(): game = MyGame(SCREEN_WIDTH, SCREEN_HEIGHT, 'Coin", "MOVEMENT_SPEED = 5 class Player(arcade.Sprite): def update(self): self.center_x += self.change_x if self.left <", "if self.left < 0: self.left = 0 elif self.right > SCREEN_WIDTH - 1:", "def main(): game = MyGame(SCREEN_WIDTH, SCREEN_HEIGHT, 'Coin Game') arcade.run() return game if __name__", "MOVEMENT_SPEED def main(): game = MyGame(SCREEN_WIDTH, SCREEN_HEIGHT, 'Coin Game') arcade.run() return game if", "self.right > SCREEN_WIDTH - 1: self.right = SCREEN_WIDTH - 1 class MyGame(arcade.Window): def", "arcade.start_render() self.all_sprites_list.draw() def update(self, delta_time): self.all_sprites_list.update() self.player.change_x = MOVEMENT_SPEED def main(): game =", "update(self): self.center_x += self.change_x if self.left < 0: self.left = 0 elif self.right", "0 elif self.right > SCREEN_WIDTH - 1: self.right = SCREEN_WIDTH - 1 class", "= SCREEN_WIDTH - 1 class MyGame(arcade.Window): def __init__(self, width, height, title): super().__init__(width, height,", "title): super().__init__(width, height, title) arcade.set_background_color(arcade.color.WHEAT) self.player = Player('player.png', 0.5) self.player.center_y = 20 self.all_sprites_list", "self.player = Player('player.png', 0.5) self.player.center_y = 20 self.all_sprites_list = arcade.SpriteList() self.all_sprites_list.append(self.player) def on_draw(self):", "= MOVEMENT_SPEED def main(): game = MyGame(SCREEN_WIDTH, SCREEN_HEIGHT, 'Coin Game') arcade.run() return game", "self.right = SCREEN_WIDTH - 1 class MyGame(arcade.Window): def __init__(self, width, height, title): super().__init__(width,", "= 5 class Player(arcade.Sprite): def update(self): self.center_x += self.change_x if self.left < 0:", "def on_draw(self): arcade.start_render() self.all_sprites_list.draw() def update(self, delta_time): self.all_sprites_list.update() self.player.change_x = MOVEMENT_SPEED def main():", "+= self.change_x if self.left < 0: self.left = 0 elif self.right > SCREEN_WIDTH", "__init__(self, width, height, title): super().__init__(width, height, title) arcade.set_background_color(arcade.color.WHEAT) self.player = Player('player.png', 0.5) self.player.center_y", "> SCREEN_WIDTH - 1: self.right = SCREEN_WIDTH - 1 class MyGame(arcade.Window): def __init__(self,", "self.center_x += self.change_x if self.left < 0: self.left = 0 elif self.right >", "600 SCREEN_HEIGHT = 600 MOVEMENT_SPEED = 5 class Player(arcade.Sprite): def update(self): self.center_x +=", "SCREEN_HEIGHT = 600 MOVEMENT_SPEED = 5 class Player(arcade.Sprite): def update(self): self.center_x += self.change_x", "self.all_sprites_list = arcade.SpriteList() self.all_sprites_list.append(self.player) def on_draw(self): arcade.start_render() self.all_sprites_list.draw() def update(self, delta_time): self.all_sprites_list.update() self.player.change_x", "main(): game = MyGame(SCREEN_WIDTH, SCREEN_HEIGHT, 'Coin Game') arcade.run() return game if __name__ ==", "self.left = 0 elif self.right > SCREEN_WIDTH - 1: self.right = SCREEN_WIDTH -", "- 1: self.right = SCREEN_WIDTH - 1 class MyGame(arcade.Window): def __init__(self, width, height,", "= 600 MOVEMENT_SPEED = 5 class Player(arcade.Sprite): def update(self): self.center_x += self.change_x if", "class Player(arcade.Sprite): def update(self): self.center_x += self.change_x if self.left < 0: self.left =", "arcade.SpriteList() self.all_sprites_list.append(self.player) def on_draw(self): arcade.start_render() self.all_sprites_list.draw() def update(self, delta_time): self.all_sprites_list.update() self.player.change_x = MOVEMENT_SPEED", "self.all_sprites_list.draw() def update(self, delta_time): self.all_sprites_list.update() self.player.change_x = MOVEMENT_SPEED def main(): game = MyGame(SCREEN_WIDTH,", "5 class Player(arcade.Sprite): def update(self): self.center_x += self.change_x if self.left < 0: self.left", "0: self.left = 0 elif self.right > SCREEN_WIDTH - 1: self.right = SCREEN_WIDTH", "self.all_sprites_list.append(self.player) def on_draw(self): arcade.start_render() self.all_sprites_list.draw() def update(self, delta_time): self.all_sprites_list.update() self.player.change_x = MOVEMENT_SPEED def", "600 MOVEMENT_SPEED = 5 class Player(arcade.Sprite): def update(self): self.center_x += self.change_x if self.left", "delta_time): self.all_sprites_list.update() self.player.change_x = MOVEMENT_SPEED def main(): game = MyGame(SCREEN_WIDTH, SCREEN_HEIGHT, 'Coin Game')", "Player('player.png', 0.5) self.player.center_y = 20 self.all_sprites_list = arcade.SpriteList() self.all_sprites_list.append(self.player) def on_draw(self): arcade.start_render() self.all_sprites_list.draw()", "= Player('player.png', 0.5) self.player.center_y = 20 self.all_sprites_list = arcade.SpriteList() self.all_sprites_list.append(self.player) def on_draw(self): arcade.start_render()", "width, height, title): super().__init__(width, height, title) arcade.set_background_color(arcade.color.WHEAT) self.player = Player('player.png', 0.5) self.player.center_y =", "self.left < 0: self.left = 0 elif self.right > SCREEN_WIDTH - 1: self.right", "0.5) self.player.center_y = 20 self.all_sprites_list = arcade.SpriteList() self.all_sprites_list.append(self.player) def on_draw(self): arcade.start_render() self.all_sprites_list.draw() def", "= MyGame(SCREEN_WIDTH, SCREEN_HEIGHT, 'Coin Game') arcade.run() return game if __name__ == '__main__': main()", "title) arcade.set_background_color(arcade.color.WHEAT) self.player = Player('player.png', 0.5) self.player.center_y = 20 self.all_sprites_list = arcade.SpriteList() self.all_sprites_list.append(self.player)", "< 0: self.left = 0 elif self.right > SCREEN_WIDTH - 1: self.right =", "height, title) arcade.set_background_color(arcade.color.WHEAT) self.player = Player('player.png', 0.5) self.player.center_y = 20 self.all_sprites_list = arcade.SpriteList()", "SCREEN_WIDTH - 1 class MyGame(arcade.Window): def __init__(self, width, height, title): super().__init__(width, height, title)", "arcade.set_background_color(arcade.color.WHEAT) self.player = Player('player.png', 0.5) self.player.center_y = 20 self.all_sprites_list = arcade.SpriteList() self.all_sprites_list.append(self.player) def", "20 self.all_sprites_list = arcade.SpriteList() self.all_sprites_list.append(self.player) def on_draw(self): arcade.start_render() self.all_sprites_list.draw() def update(self, delta_time): self.all_sprites_list.update()", "- 1 class MyGame(arcade.Window): def __init__(self, width, height, title): super().__init__(width, height, title) arcade.set_background_color(arcade.color.WHEAT)", "self.player.change_x = MOVEMENT_SPEED def main(): game = MyGame(SCREEN_WIDTH, SCREEN_HEIGHT, 'Coin Game') arcade.run() return", "Player(arcade.Sprite): def update(self): self.center_x += self.change_x if self.left < 0: self.left = 0", "def __init__(self, width, height, title): super().__init__(width, height, title) arcade.set_background_color(arcade.color.WHEAT) self.player = Player('player.png', 0.5)", "def update(self, delta_time): self.all_sprites_list.update() self.player.change_x = MOVEMENT_SPEED def main(): game = MyGame(SCREEN_WIDTH, SCREEN_HEIGHT,", "1: self.right = SCREEN_WIDTH - 1 class MyGame(arcade.Window): def __init__(self, width, height, title):", "self.player.center_y = 20 self.all_sprites_list = arcade.SpriteList() self.all_sprites_list.append(self.player) def on_draw(self): arcade.start_render() self.all_sprites_list.draw() def update(self,", "SCREEN_WIDTH - 1: self.right = SCREEN_WIDTH - 1 class MyGame(arcade.Window): def __init__(self, width,", "game = MyGame(SCREEN_WIDTH, SCREEN_HEIGHT, 'Coin Game') arcade.run() return game if __name__ == '__main__':", "= 600 SCREEN_HEIGHT = 600 MOVEMENT_SPEED = 5 class Player(arcade.Sprite): def update(self): self.center_x", "import arcade SCREEN_WIDTH = 600 SCREEN_HEIGHT = 600 MOVEMENT_SPEED = 5 class Player(arcade.Sprite):", "1 class MyGame(arcade.Window): def __init__(self, width, height, title): super().__init__(width, height, title) arcade.set_background_color(arcade.color.WHEAT) self.player", "class MyGame(arcade.Window): def __init__(self, width, height, title): super().__init__(width, height, title) arcade.set_background_color(arcade.color.WHEAT) self.player =", "on_draw(self): arcade.start_render() self.all_sprites_list.draw() def update(self, delta_time): self.all_sprites_list.update() self.player.change_x = MOVEMENT_SPEED def main(): game", "self.all_sprites_list.update() self.player.change_x = MOVEMENT_SPEED def main(): game = MyGame(SCREEN_WIDTH, SCREEN_HEIGHT, 'Coin Game') arcade.run()", "SCREEN_WIDTH = 600 SCREEN_HEIGHT = 600 MOVEMENT_SPEED = 5 class Player(arcade.Sprite): def update(self):", "arcade SCREEN_WIDTH = 600 SCREEN_HEIGHT = 600 MOVEMENT_SPEED = 5 class Player(arcade.Sprite): def", "self.change_x if self.left < 0: self.left = 0 elif self.right > SCREEN_WIDTH -", "elif self.right > SCREEN_WIDTH - 1: self.right = SCREEN_WIDTH - 1 class MyGame(arcade.Window):", "height, title): super().__init__(width, height, title) arcade.set_background_color(arcade.color.WHEAT) self.player = Player('player.png', 0.5) self.player.center_y = 20", "super().__init__(width, height, title) arcade.set_background_color(arcade.color.WHEAT) self.player = Player('player.png', 0.5) self.player.center_y = 20 self.all_sprites_list =", "def update(self): self.center_x += self.change_x if self.left < 0: self.left = 0 elif", "= arcade.SpriteList() self.all_sprites_list.append(self.player) def on_draw(self): arcade.start_render() self.all_sprites_list.draw() def update(self, delta_time): self.all_sprites_list.update() self.player.change_x =", "MyGame(arcade.Window): def __init__(self, width, height, title): super().__init__(width, height, title) arcade.set_background_color(arcade.color.WHEAT) self.player = Player('player.png',", "= 20 self.all_sprites_list = arcade.SpriteList() self.all_sprites_list.append(self.player) def on_draw(self): arcade.start_render() self.all_sprites_list.draw() def update(self, delta_time):", "= 0 elif self.right > SCREEN_WIDTH - 1: self.right = SCREEN_WIDTH - 1" ]
[ "''' Retrieves the host. (Helps with debugging locally) - Arguments: - service: a", "host. (Helps with debugging locally) - Arguments: - service: a Docker service -", "locally) - Arguments: - service: a Docker service - Returns: a string of", "service: a Docker service - Returns: a string of either localhost or a", "def get_host(service: str): ''' Retrieves the host. (Helps with debugging locally) - Arguments:", "either localhost or a Docker service ''' inside_docker = os.environ.get('IS_DOCKER_CONTAINER', False) return service", "with debugging locally) - Arguments: - service: a Docker service - Returns: a", "Docker service - Returns: a string of either localhost or a Docker service", "a Docker service - Returns: a string of either localhost or a Docker", "get_host(service: str): ''' Retrieves the host. (Helps with debugging locally) - Arguments: -", "service - Returns: a string of either localhost or a Docker service '''", "Arguments: - service: a Docker service - Returns: a string of either localhost", "string of either localhost or a Docker service ''' inside_docker = os.environ.get('IS_DOCKER_CONTAINER', False)", "a Docker service ''' inside_docker = os.environ.get('IS_DOCKER_CONTAINER', False) return service if inside_docker else", "import os def get_host(service: str): ''' Retrieves the host. (Helps with debugging locally)", "or a Docker service ''' inside_docker = os.environ.get('IS_DOCKER_CONTAINER', False) return service if inside_docker", "os def get_host(service: str): ''' Retrieves the host. (Helps with debugging locally) -", "the host. (Helps with debugging locally) - Arguments: - service: a Docker service", "- service: a Docker service - Returns: a string of either localhost or", "localhost or a Docker service ''' inside_docker = os.environ.get('IS_DOCKER_CONTAINER', False) return service if", "- Returns: a string of either localhost or a Docker service ''' inside_docker", "- Arguments: - service: a Docker service - Returns: a string of either", "of either localhost or a Docker service ''' inside_docker = os.environ.get('IS_DOCKER_CONTAINER', False) return", "Docker service ''' inside_docker = os.environ.get('IS_DOCKER_CONTAINER', False) return service if inside_docker else 'localhost'", "str): ''' Retrieves the host. (Helps with debugging locally) - Arguments: - service:", "a string of either localhost or a Docker service ''' inside_docker = os.environ.get('IS_DOCKER_CONTAINER',", "debugging locally) - Arguments: - service: a Docker service - Returns: a string", "Returns: a string of either localhost or a Docker service ''' inside_docker =", "Retrieves the host. (Helps with debugging locally) - Arguments: - service: a Docker", "(Helps with debugging locally) - Arguments: - service: a Docker service - Returns:" ]
[ "#!/usr/bin/env python from build import ninja_common build = ninja_common.Build(\"fishbowl/body-frame-calc\") files = [ 'main.cpp',", "import ninja_common build = ninja_common.Build(\"fishbowl/body-frame-calc\") files = [ 'main.cpp', ] build.build_cmd('auv-body-frame-calc', files, pkg_confs=['eigen3'],", "build import ninja_common build = ninja_common.Build(\"fishbowl/body-frame-calc\") files = [ 'main.cpp', ] build.build_cmd('auv-body-frame-calc', files,", "python from build import ninja_common build = ninja_common.Build(\"fishbowl/body-frame-calc\") files = [ 'main.cpp', ]", "= ninja_common.Build(\"fishbowl/body-frame-calc\") files = [ 'main.cpp', ] build.build_cmd('auv-body-frame-calc', files, pkg_confs=['eigen3'], auv_deps=[], lflags=[], cflags=[])", "build = ninja_common.Build(\"fishbowl/body-frame-calc\") files = [ 'main.cpp', ] build.build_cmd('auv-body-frame-calc', files, pkg_confs=['eigen3'], auv_deps=[], lflags=[],", "ninja_common build = ninja_common.Build(\"fishbowl/body-frame-calc\") files = [ 'main.cpp', ] build.build_cmd('auv-body-frame-calc', files, pkg_confs=['eigen3'], auv_deps=[],", "from build import ninja_common build = ninja_common.Build(\"fishbowl/body-frame-calc\") files = [ 'main.cpp', ] build.build_cmd('auv-body-frame-calc'," ]
[ "<filename>xs/utils/data/dataset.py class DataSet: def __init__(self, *datas): self.datas = list(datas) def __len__(self): return len(self.datas[0])", "return len(self.datas[0]) def __getitem__(self, item): ret_list = [] for data in self.datas: ret_list.append(data[item])", "def __getitem__(self, item): ret_list = [] for data in self.datas: ret_list.append(data[item]) return ret_list", "__init__(self, *datas): self.datas = list(datas) def __len__(self): return len(self.datas[0]) def __getitem__(self, item): ret_list", "self.datas = list(datas) def __len__(self): return len(self.datas[0]) def __getitem__(self, item): ret_list = []", "*datas): self.datas = list(datas) def __len__(self): return len(self.datas[0]) def __getitem__(self, item): ret_list =", "= list(datas) def __len__(self): return len(self.datas[0]) def __getitem__(self, item): ret_list = [] for", "def __len__(self): return len(self.datas[0]) def __getitem__(self, item): ret_list = [] for data in", "__len__(self): return len(self.datas[0]) def __getitem__(self, item): ret_list = [] for data in self.datas:", "class DataSet: def __init__(self, *datas): self.datas = list(datas) def __len__(self): return len(self.datas[0]) def", "len(self.datas[0]) def __getitem__(self, item): ret_list = [] for data in self.datas: ret_list.append(data[item]) return", "def __init__(self, *datas): self.datas = list(datas) def __len__(self): return len(self.datas[0]) def __getitem__(self, item):", "list(datas) def __len__(self): return len(self.datas[0]) def __getitem__(self, item): ret_list = [] for data", "DataSet: def __init__(self, *datas): self.datas = list(datas) def __len__(self): return len(self.datas[0]) def __getitem__(self," ]
[ "as cv import numpy as np CATS=cv.imread('Photos/cats.jpg') # IMPORTANT MASK IMAGE AND OTHER", "as np CATS=cv.imread('Photos/cats.jpg') # IMPORTANT MASK IMAGE AND OTHER SHOULD HAVE THE SAME", "CATS=cv.imread('Photos/cats.jpg') # IMPORTANT MASK IMAGE AND OTHER SHOULD HAVE THE SAME DIMENSIONS OTHER", "<reponame>Pineapple-1/open-cv # FOR FOCUSING ANY THING WE USE MASKING import cv2 as cv", "OTHER SHAPES MASK = cv.circle(BLANK,(CATS.shape[1]//2,CATS.shape[0]//2),160,255,-1) # MASKED IMAGE MASKIMG = cv.bitwise_and(CATS,CATS,mask=MASK) cv.imshow('MASKED',MASKIMG) cv.imshow('MASK',MASK)", "FOCUSING ANY THING WE USE MASKING import cv2 as cv import numpy as", "ANY THING WE USE MASKING import cv2 as cv import numpy as np", "MASK = cv.circle(BLANK,(CATS.shape[1]//2,CATS.shape[0]//2),160,255,-1) # MASKED IMAGE MASKIMG = cv.bitwise_and(CATS,CATS,mask=MASK) cv.imshow('MASKED',MASKIMG) cv.imshow('MASK',MASK) cv.imshow('CATS',CATS) cv.waitKey(0)", "FOR FOCUSING ANY THING WE USE MASKING import cv2 as cv import numpy", "# WE CAN ALSO DO THIS WITH OTHER SHAPES MASK = cv.circle(BLANK,(CATS.shape[1]//2,CATS.shape[0]//2),160,255,-1) #", "WE CAN ALSO DO THIS WITH OTHER SHAPES MASK = cv.circle(BLANK,(CATS.shape[1]//2,CATS.shape[0]//2),160,255,-1) # MASKED", "np CATS=cv.imread('Photos/cats.jpg') # IMPORTANT MASK IMAGE AND OTHER SHOULD HAVE THE SAME DIMENSIONS", "USE MASKING import cv2 as cv import numpy as np CATS=cv.imread('Photos/cats.jpg') # IMPORTANT", "MASKING import cv2 as cv import numpy as np CATS=cv.imread('Photos/cats.jpg') # IMPORTANT MASK", "WISE IT WONT WORK BLANK = np.zeros(CATS.shape[:2],dtype='uint8') # WE CAN ALSO DO THIS", "THIS WITH OTHER SHAPES MASK = cv.circle(BLANK,(CATS.shape[1]//2,CATS.shape[0]//2),160,255,-1) # MASKED IMAGE MASKIMG = cv.bitwise_and(CATS,CATS,mask=MASK)", "HAVE THE SAME DIMENSIONS OTHER WISE IT WONT WORK BLANK = np.zeros(CATS.shape[:2],dtype='uint8') #", "import cv2 as cv import numpy as np CATS=cv.imread('Photos/cats.jpg') # IMPORTANT MASK IMAGE", "MASK IMAGE AND OTHER SHOULD HAVE THE SAME DIMENSIONS OTHER WISE IT WONT", "OTHER SHOULD HAVE THE SAME DIMENSIONS OTHER WISE IT WONT WORK BLANK =", "IT WONT WORK BLANK = np.zeros(CATS.shape[:2],dtype='uint8') # WE CAN ALSO DO THIS WITH", "DO THIS WITH OTHER SHAPES MASK = cv.circle(BLANK,(CATS.shape[1]//2,CATS.shape[0]//2),160,255,-1) # MASKED IMAGE MASKIMG =", "SAME DIMENSIONS OTHER WISE IT WONT WORK BLANK = np.zeros(CATS.shape[:2],dtype='uint8') # WE CAN", "import numpy as np CATS=cv.imread('Photos/cats.jpg') # IMPORTANT MASK IMAGE AND OTHER SHOULD HAVE", "IMPORTANT MASK IMAGE AND OTHER SHOULD HAVE THE SAME DIMENSIONS OTHER WISE IT", "THE SAME DIMENSIONS OTHER WISE IT WONT WORK BLANK = np.zeros(CATS.shape[:2],dtype='uint8') # WE", "# FOR FOCUSING ANY THING WE USE MASKING import cv2 as cv import", "ALSO DO THIS WITH OTHER SHAPES MASK = cv.circle(BLANK,(CATS.shape[1]//2,CATS.shape[0]//2),160,255,-1) # MASKED IMAGE MASKIMG", "WE USE MASKING import cv2 as cv import numpy as np CATS=cv.imread('Photos/cats.jpg') #", "cv2 as cv import numpy as np CATS=cv.imread('Photos/cats.jpg') # IMPORTANT MASK IMAGE AND", "= np.zeros(CATS.shape[:2],dtype='uint8') # WE CAN ALSO DO THIS WITH OTHER SHAPES MASK =", "DIMENSIONS OTHER WISE IT WONT WORK BLANK = np.zeros(CATS.shape[:2],dtype='uint8') # WE CAN ALSO", "SHAPES MASK = cv.circle(BLANK,(CATS.shape[1]//2,CATS.shape[0]//2),160,255,-1) # MASKED IMAGE MASKIMG = cv.bitwise_and(CATS,CATS,mask=MASK) cv.imshow('MASKED',MASKIMG) cv.imshow('MASK',MASK) cv.imshow('CATS',CATS)", "WONT WORK BLANK = np.zeros(CATS.shape[:2],dtype='uint8') # WE CAN ALSO DO THIS WITH OTHER", "BLANK = np.zeros(CATS.shape[:2],dtype='uint8') # WE CAN ALSO DO THIS WITH OTHER SHAPES MASK", "WITH OTHER SHAPES MASK = cv.circle(BLANK,(CATS.shape[1]//2,CATS.shape[0]//2),160,255,-1) # MASKED IMAGE MASKIMG = cv.bitwise_and(CATS,CATS,mask=MASK) cv.imshow('MASKED',MASKIMG)", "numpy as np CATS=cv.imread('Photos/cats.jpg') # IMPORTANT MASK IMAGE AND OTHER SHOULD HAVE THE", "THING WE USE MASKING import cv2 as cv import numpy as np CATS=cv.imread('Photos/cats.jpg')", "IMAGE AND OTHER SHOULD HAVE THE SAME DIMENSIONS OTHER WISE IT WONT WORK", "WORK BLANK = np.zeros(CATS.shape[:2],dtype='uint8') # WE CAN ALSO DO THIS WITH OTHER SHAPES", "AND OTHER SHOULD HAVE THE SAME DIMENSIONS OTHER WISE IT WONT WORK BLANK", "np.zeros(CATS.shape[:2],dtype='uint8') # WE CAN ALSO DO THIS WITH OTHER SHAPES MASK = cv.circle(BLANK,(CATS.shape[1]//2,CATS.shape[0]//2),160,255,-1)", "CAN ALSO DO THIS WITH OTHER SHAPES MASK = cv.circle(BLANK,(CATS.shape[1]//2,CATS.shape[0]//2),160,255,-1) # MASKED IMAGE", "SHOULD HAVE THE SAME DIMENSIONS OTHER WISE IT WONT WORK BLANK = np.zeros(CATS.shape[:2],dtype='uint8')", "OTHER WISE IT WONT WORK BLANK = np.zeros(CATS.shape[:2],dtype='uint8') # WE CAN ALSO DO", "cv import numpy as np CATS=cv.imread('Photos/cats.jpg') # IMPORTANT MASK IMAGE AND OTHER SHOULD", "# IMPORTANT MASK IMAGE AND OTHER SHOULD HAVE THE SAME DIMENSIONS OTHER WISE" ]
[ "writing, software # distributed under the License is distributed on an \"AS IS\"", "the License. # # Contributer(s): <NAME>. (manionline.<EMAIL>) from __future__ import print_function import sys", "file=sys.stderr) prev_tokens = len(wc.words) prev_msgs = wc.update_counter print(\"Total: %s messages, %s tokens\" %", "count), file=sys.stdout) def main(): if len(sys.argv) == 1: print(\"Usage: python pot-stat.py potfile1.pot potfile2.pot", "self.words.items(), key=operator.itemgetter(1), reverse=True): print(\"%s, %s\" % (word, count), file=sys.stdout) def main(): if len(sys.argv)", "KIND, either express or implied. # See the License for the specific language", "Unless required by applicable law or agreed to in writing, software # distributed", "pot-stat.py potfile1.pot potfile2.pot ...\") exit(1) msgid = re.compile(\"msgid \\\"(.*)\\\"\") wc = WordCounter() prev_msgs", "in self.blacklist: if w in self.words.keys(): self.words[w] += 1 else: self.words[w] = 1", "You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "# See the License for the specific language governing permissions and # limitations", "wc = WordCounter() prev_msgs = 0 prev_tokens = 0 for filename in sys.argv[1:]:", "0 def update(self, text): self.update_counter += 1 words = self.tokenizer.split(text) for w in", "License. # You may obtain a copy of the License at # #", "0 prev_tokens = 0 for filename in sys.argv[1:]: with open(filename) as lines: for", "import sys import re import operator class WordCounter: def __init__(self, blacklist = None):", "if isinstance(blacklist, set) else set() self.reset() def reset(self): self.words = dict() self.update_counter =", "# limitations under the License. # # Contributer(s): <NAME>. (manionline.<EMAIL>) from __future__ import", "law or agreed to in writing, software # distributed under the License is", "words = self.tokenizer.split(text) for w in words: w = w.lower() if len(w)>1 and", "the License for the specific language governing permissions and # limitations under the", "len(w)>1 and w not in self.blacklist: if w in self.words.keys(): self.words[w] += 1", "== 3: wc.update(match[1]) print(\"%s: %s messages, %s tokens\" % (filename, wc.update_counter - prev_msgs,", "compliance with the License. # You may obtain a copy of the License", "python # -*- coding: utf-8 -*- # Licensed under the Apache License, Version", "License. # # Contributer(s): <NAME>. (manionline.<EMAIL>) from __future__ import print_function import sys import", "import operator class WordCounter: def __init__(self, blacklist = None): self.tokenizer = re.compile(\"\\W+\")#re.compile(\"\\b(\\w)+\\b\") self.blacklist", "% (word, count), file=sys.stdout) def main(): if len(sys.argv) == 1: print(\"Usage: python pot-stat.py", "(filename, wc.update_counter - prev_msgs, len(wc.words) - prev_tokens), file=sys.stderr) prev_tokens = len(wc.words) prev_msgs =", "in lines: match = msgid.split(l) if len(match) == 3: wc.update(match[1]) print(\"%s: %s messages,", "self.blacklist = blacklist if isinstance(blacklist, set) else set() self.reset() def reset(self): self.words =", "set) else set() self.reset() def reset(self): self.words = dict() self.update_counter = 0 def", "in words: w = w.lower() if len(w)>1 and w not in self.blacklist: if", "operator class WordCounter: def __init__(self, blacklist = None): self.tokenizer = re.compile(\"\\W+\")#re.compile(\"\\b(\\w)+\\b\") self.blacklist =", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "this file except in compliance with the License. # You may obtain a", "prev_tokens = len(wc.words) prev_msgs = wc.update_counter print(\"Total: %s messages, %s tokens\" % (wc.update_counter,", "the Apache License, Version 2.0 (the \"License\"); # you may not use this", "you may not use this file except in compliance with the License. #", "l in lines: match = msgid.split(l) if len(match) == 3: wc.update(match[1]) print(\"%s: %s", "+= 1 words = self.tokenizer.split(text) for w in words: w = w.lower() if", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "permissions and # limitations under the License. # # Contributer(s): <NAME>. (manionline.<EMAIL>) from", "= msgid.split(l) if len(match) == 3: wc.update(match[1]) print(\"%s: %s messages, %s tokens\" %", "msgid = re.compile(\"msgid \\\"(.*)\\\"\") wc = WordCounter() prev_msgs = 0 prev_tokens = 0", "= None): self.tokenizer = re.compile(\"\\W+\")#re.compile(\"\\b(\\w)+\\b\") self.blacklist = blacklist if isinstance(blacklist, set) else set()", "blacklist if isinstance(blacklist, set) else set() self.reset() def reset(self): self.words = dict() self.update_counter", "def main(): if len(sys.argv) == 1: print(\"Usage: python pot-stat.py potfile1.pot potfile2.pot ...\") exit(1)", "ANY KIND, either express or implied. # See the License for the specific", "__init__(self, blacklist = None): self.tokenizer = re.compile(\"\\W+\")#re.compile(\"\\b(\\w)+\\b\") self.blacklist = blacklist if isinstance(blacklist, set)", "3: wc.update(match[1]) print(\"%s: %s messages, %s tokens\" % (filename, wc.update_counter - prev_msgs, len(wc.words)", "if len(sys.argv) == 1: print(\"Usage: python pot-stat.py potfile1.pot potfile2.pot ...\") exit(1) msgid =", "- prev_tokens), file=sys.stderr) prev_tokens = len(wc.words) prev_msgs = wc.update_counter print(\"Total: %s messages, %s", "= self.tokenizer.split(text) for w in words: w = w.lower() if len(w)>1 and w", "<NAME>. (manionline.<EMAIL>) from __future__ import print_function import sys import re import operator class", "under the License. # # Contributer(s): <NAME>. (manionline.<EMAIL>) from __future__ import print_function import", "in compliance with the License. # You may obtain a copy of the", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #", "exit(1) msgid = re.compile(\"msgid \\\"(.*)\\\"\") wc = WordCounter() prev_msgs = 0 prev_tokens =", "== 1: print(\"Usage: python pot-stat.py potfile1.pot potfile2.pot ...\") exit(1) msgid = re.compile(\"msgid \\\"(.*)\\\"\")", "use this file except in compliance with the License. # You may obtain", "...\") exit(1) msgid = re.compile(\"msgid \\\"(.*)\\\"\") wc = WordCounter() prev_msgs = 0 prev_tokens", "match = msgid.split(l) if len(match) == 3: wc.update(match[1]) print(\"%s: %s messages, %s tokens\"", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "blacklist = None): self.tokenizer = re.compile(\"\\W+\")#re.compile(\"\\b(\\w)+\\b\") self.blacklist = blacklist if isinstance(blacklist, set) else", "-*- coding: utf-8 -*- # Licensed under the Apache License, Version 2.0 (the", "not use this file except in compliance with the License. # You may", "wc.update_counter print(\"Total: %s messages, %s tokens\" % (wc.update_counter, len(wc.words)), file=sys.stderr) wc.toCSV() if __name__", "self.reset() def reset(self): self.words = dict() self.update_counter = 0 def update(self, text): self.update_counter", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See", "= 1 def toCSV(self): for word, count in sorted( self.words.items(), key=operator.itemgetter(1), reverse=True): print(\"%s,", "utf-8 -*- # Licensed under the Apache License, Version 2.0 (the \"License\"); #", "See the License for the specific language governing permissions and # limitations under", "toCSV(self): for word, count in sorted( self.words.items(), key=operator.itemgetter(1), reverse=True): print(\"%s, %s\" % (word,", "WordCounter() prev_msgs = 0 prev_tokens = 0 for filename in sys.argv[1:]: with open(filename)", "with open(filename) as lines: for l in lines: match = msgid.split(l) if len(match)", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "governing permissions and # limitations under the License. # # Contributer(s): <NAME>. (manionline.<EMAIL>)", "self.blacklist: if w in self.words.keys(): self.words[w] += 1 else: self.words[w] = 1 def", "License, Version 2.0 (the \"License\"); # you may not use this file except", "re.compile(\"msgid \\\"(.*)\\\"\") wc = WordCounter() prev_msgs = 0 prev_tokens = 0 for filename", "# Licensed under the Apache License, Version 2.0 (the \"License\"); # you may", "len(wc.words) prev_msgs = wc.update_counter print(\"Total: %s messages, %s tokens\" % (wc.update_counter, len(wc.words)), file=sys.stderr)", "w.lower() if len(w)>1 and w not in self.blacklist: if w in self.words.keys(): self.words[w]", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "= re.compile(\"\\W+\")#re.compile(\"\\b(\\w)+\\b\") self.blacklist = blacklist if isinstance(blacklist, set) else set() self.reset() def reset(self):", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "prev_msgs = wc.update_counter print(\"Total: %s messages, %s tokens\" % (wc.update_counter, len(wc.words)), file=sys.stderr) wc.toCSV()", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in", "class WordCounter: def __init__(self, blacklist = None): self.tokenizer = re.compile(\"\\W+\")#re.compile(\"\\b(\\w)+\\b\") self.blacklist = blacklist", "if len(match) == 3: wc.update(match[1]) print(\"%s: %s messages, %s tokens\" % (filename, wc.update_counter", "+= 1 else: self.words[w] = 1 def toCSV(self): for word, count in sorted(", "print(\"Usage: python pot-stat.py potfile1.pot potfile2.pot ...\") exit(1) msgid = re.compile(\"msgid \\\"(.*)\\\"\") wc =", "OF ANY KIND, either express or implied. # See the License for the", "msgid.split(l) if len(match) == 3: wc.update(match[1]) print(\"%s: %s messages, %s tokens\" % (filename,", "\\\"(.*)\\\"\") wc = WordCounter() prev_msgs = 0 prev_tokens = 0 for filename in", "= re.compile(\"msgid \\\"(.*)\\\"\") wc = WordCounter() prev_msgs = 0 prev_tokens = 0 for", "update(self, text): self.update_counter += 1 words = self.tokenizer.split(text) for w in words: w", "2.0 (the \"License\"); # you may not use this file except in compliance", "1: print(\"Usage: python pot-stat.py potfile1.pot potfile2.pot ...\") exit(1) msgid = re.compile(\"msgid \\\"(.*)\\\"\") wc", "% (filename, wc.update_counter - prev_msgs, len(wc.words) - prev_tokens), file=sys.stderr) prev_tokens = len(wc.words) prev_msgs", "self.update_counter += 1 words = self.tokenizer.split(text) for w in words: w = w.lower()", "1 else: self.words[w] = 1 def toCSV(self): for word, count in sorted( self.words.items(),", "0 for filename in sys.argv[1:]: with open(filename) as lines: for l in lines:", "open(filename) as lines: for l in lines: match = msgid.split(l) if len(match) ==", "= blacklist if isinstance(blacklist, set) else set() self.reset() def reset(self): self.words = dict()", "# you may not use this file except in compliance with the License.", "None): self.tokenizer = re.compile(\"\\W+\")#re.compile(\"\\b(\\w)+\\b\") self.blacklist = blacklist if isinstance(blacklist, set) else set() self.reset()", "= w.lower() if len(w)>1 and w not in self.blacklist: if w in self.words.keys():", "file=sys.stdout) def main(): if len(sys.argv) == 1: print(\"Usage: python pot-stat.py potfile1.pot potfile2.pot ...\")", "= dict() self.update_counter = 0 def update(self, text): self.update_counter += 1 words =", "self.words[w] = 1 def toCSV(self): for word, count in sorted( self.words.items(), key=operator.itemgetter(1), reverse=True):", "for the specific language governing permissions and # limitations under the License. #", "print(\"%s, %s\" % (word, count), file=sys.stdout) def main(): if len(sys.argv) == 1: print(\"Usage:", "agreed to in writing, software # distributed under the License is distributed on", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the", "%s messages, %s tokens\" % (wc.update_counter, len(wc.words)), file=sys.stderr) wc.toCSV() if __name__ == \"__main__\":", "print(\"Total: %s messages, %s tokens\" % (wc.update_counter, len(wc.words)), file=sys.stderr) wc.toCSV() if __name__ ==", "filename in sys.argv[1:]: with open(filename) as lines: for l in lines: match =", "import re import operator class WordCounter: def __init__(self, blacklist = None): self.tokenizer =", "(the \"License\"); # you may not use this file except in compliance with", "words: w = w.lower() if len(w)>1 and w not in self.blacklist: if w", "python pot-stat.py potfile1.pot potfile2.pot ...\") exit(1) msgid = re.compile(\"msgid \\\"(.*)\\\"\") wc = WordCounter()", "else: self.words[w] = 1 def toCSV(self): for word, count in sorted( self.words.items(), key=operator.itemgetter(1),", "for word, count in sorted( self.words.items(), key=operator.itemgetter(1), reverse=True): print(\"%s, %s\" % (word, count),", "lines: for l in lines: match = msgid.split(l) if len(match) == 3: wc.update(match[1])", "len(wc.words) - prev_tokens), file=sys.stderr) prev_tokens = len(wc.words) prev_msgs = wc.update_counter print(\"Total: %s messages,", "# # Unless required by applicable law or agreed to in writing, software", "%s messages, %s tokens\" % (filename, wc.update_counter - prev_msgs, len(wc.words) - prev_tokens), file=sys.stderr)", "express or implied. # See the License for the specific language governing permissions", "def update(self, text): self.update_counter += 1 words = self.tokenizer.split(text) for w in words:", "Version 2.0 (the \"License\"); # you may not use this file except in", "# Unless required by applicable law or agreed to in writing, software #", "text): self.update_counter += 1 words = self.tokenizer.split(text) for w in words: w =", "# -*- coding: utf-8 -*- # Licensed under the Apache License, Version 2.0", "except in compliance with the License. # You may obtain a copy of", "language governing permissions and # limitations under the License. # # Contributer(s): <NAME>.", "Contributer(s): <NAME>. (manionline.<EMAIL>) from __future__ import print_function import sys import re import operator", "by applicable law or agreed to in writing, software # distributed under the", "%s\" % (word, count), file=sys.stdout) def main(): if len(sys.argv) == 1: print(\"Usage: python", "main(): if len(sys.argv) == 1: print(\"Usage: python pot-stat.py potfile1.pot potfile2.pot ...\") exit(1) msgid", "potfile2.pot ...\") exit(1) msgid = re.compile(\"msgid \\\"(.*)\\\"\") wc = WordCounter() prev_msgs = 0", "set() self.reset() def reset(self): self.words = dict() self.update_counter = 0 def update(self, text):", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "and # limitations under the License. # # Contributer(s): <NAME>. (manionline.<EMAIL>) from __future__", "self.tokenizer = re.compile(\"\\W+\")#re.compile(\"\\b(\\w)+\\b\") self.blacklist = blacklist if isinstance(blacklist, set) else set() self.reset() def", "either express or implied. # See the License for the specific language governing", "def toCSV(self): for word, count in sorted( self.words.items(), key=operator.itemgetter(1), reverse=True): print(\"%s, %s\" %", "wc.update(match[1]) print(\"%s: %s messages, %s tokens\" % (filename, wc.update_counter - prev_msgs, len(wc.words) -", "from __future__ import print_function import sys import re import operator class WordCounter: def", "software # distributed under the License is distributed on an \"AS IS\" BASIS,", "= 0 for filename in sys.argv[1:]: with open(filename) as lines: for l in", "= wc.update_counter print(\"Total: %s messages, %s tokens\" % (wc.update_counter, len(wc.words)), file=sys.stderr) wc.toCSV() if", "sys.argv[1:]: with open(filename) as lines: for l in lines: match = msgid.split(l) if", "word, count in sorted( self.words.items(), key=operator.itemgetter(1), reverse=True): print(\"%s, %s\" % (word, count), file=sys.stdout)", "# You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "may not use this file except in compliance with the License. # You", "License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "key=operator.itemgetter(1), reverse=True): print(\"%s, %s\" % (word, count), file=sys.stdout) def main(): if len(sys.argv) ==", "#!/usr/bin/env python # -*- coding: utf-8 -*- # Licensed under the Apache License,", "for filename in sys.argv[1:]: with open(filename) as lines: for l in lines: match", "print(\"%s: %s messages, %s tokens\" % (filename, wc.update_counter - prev_msgs, len(wc.words) - prev_tokens),", "for l in lines: match = msgid.split(l) if len(match) == 3: wc.update(match[1]) print(\"%s:", "for w in words: w = w.lower() if len(w)>1 and w not in", "prev_tokens), file=sys.stderr) prev_tokens = len(wc.words) prev_msgs = wc.update_counter print(\"Total: %s messages, %s tokens\"", "Licensed under the Apache License, Version 2.0 (the \"License\"); # you may not", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to", "isinstance(blacklist, set) else set() self.reset() def reset(self): self.words = dict() self.update_counter = 0", "file except in compliance with the License. # You may obtain a copy", "re import operator class WordCounter: def __init__(self, blacklist = None): self.tokenizer = re.compile(\"\\W+\")#re.compile(\"\\b(\\w)+\\b\")", "= WordCounter() prev_msgs = 0 prev_tokens = 0 for filename in sys.argv[1:]: with", "wc.update_counter - prev_msgs, len(wc.words) - prev_tokens), file=sys.stderr) prev_tokens = len(wc.words) prev_msgs = wc.update_counter", "count in sorted( self.words.items(), key=operator.itemgetter(1), reverse=True): print(\"%s, %s\" % (word, count), file=sys.stdout) def", "if len(w)>1 and w not in self.blacklist: if w in self.words.keys(): self.words[w] +=", "as lines: for l in lines: match = msgid.split(l) if len(match) == 3:", "print_function import sys import re import operator class WordCounter: def __init__(self, blacklist =", "- prev_msgs, len(wc.words) - prev_tokens), file=sys.stderr) prev_tokens = len(wc.words) prev_msgs = wc.update_counter print(\"Total:", "under the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "License for the specific language governing permissions and # limitations under the License.", "specific language governing permissions and # limitations under the License. # # Contributer(s):", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "(manionline.<EMAIL>) from __future__ import print_function import sys import re import operator class WordCounter:", "the License. # You may obtain a copy of the License at #", "self.update_counter = 0 def update(self, text): self.update_counter += 1 words = self.tokenizer.split(text) for", "= 0 prev_tokens = 0 for filename in sys.argv[1:]: with open(filename) as lines:", "messages, %s tokens\" % (filename, wc.update_counter - prev_msgs, len(wc.words) - prev_tokens), file=sys.stderr) prev_tokens", "re.compile(\"\\W+\")#re.compile(\"\\b(\\w)+\\b\") self.blacklist = blacklist if isinstance(blacklist, set) else set() self.reset() def reset(self): self.words", "to in writing, software # distributed under the License is distributed on an", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "self.words[w] += 1 else: self.words[w] = 1 def toCSV(self): for word, count in", "sys import re import operator class WordCounter: def __init__(self, blacklist = None): self.tokenizer", "the specific language governing permissions and # limitations under the License. # #", "w in words: w = w.lower() if len(w)>1 and w not in self.blacklist:", "# distributed under the License is distributed on an \"AS IS\" BASIS, #", "implied. # See the License for the specific language governing permissions and #", "if w in self.words.keys(): self.words[w] += 1 else: self.words[w] = 1 def toCSV(self):", "%s tokens\" % (filename, wc.update_counter - prev_msgs, len(wc.words) - prev_tokens), file=sys.stderr) prev_tokens =", "\"License\"); # you may not use this file except in compliance with the", "len(sys.argv) == 1: print(\"Usage: python pot-stat.py potfile1.pot potfile2.pot ...\") exit(1) msgid = re.compile(\"msgid", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "sorted( self.words.items(), key=operator.itemgetter(1), reverse=True): print(\"%s, %s\" % (word, count), file=sys.stdout) def main(): if", "required by applicable law or agreed to in writing, software # distributed under", "w = w.lower() if len(w)>1 and w not in self.blacklist: if w in", "(word, count), file=sys.stdout) def main(): if len(sys.argv) == 1: print(\"Usage: python pot-stat.py potfile1.pot", "-*- # Licensed under the Apache License, Version 2.0 (the \"License\"); # you", "1 def toCSV(self): for word, count in sorted( self.words.items(), key=operator.itemgetter(1), reverse=True): print(\"%s, %s\"", "<filename>pot-stat.py #!/usr/bin/env python # -*- coding: utf-8 -*- # Licensed under the Apache", "applicable law or agreed to in writing, software # distributed under the License", "not in self.blacklist: if w in self.words.keys(): self.words[w] += 1 else: self.words[w] =", "# # Contributer(s): <NAME>. (manionline.<EMAIL>) from __future__ import print_function import sys import re", "in sys.argv[1:]: with open(filename) as lines: for l in lines: match = msgid.split(l)", "limitations under the License. # # Contributer(s): <NAME>. (manionline.<EMAIL>) from __future__ import print_function", "__future__ import print_function import sys import re import operator class WordCounter: def __init__(self,", "prev_msgs, len(wc.words) - prev_tokens), file=sys.stderr) prev_tokens = len(wc.words) prev_msgs = wc.update_counter print(\"Total: %s", "len(match) == 3: wc.update(match[1]) print(\"%s: %s messages, %s tokens\" % (filename, wc.update_counter -", "dict() self.update_counter = 0 def update(self, text): self.update_counter += 1 words = self.tokenizer.split(text)", "WordCounter: def __init__(self, blacklist = None): self.tokenizer = re.compile(\"\\W+\")#re.compile(\"\\b(\\w)+\\b\") self.blacklist = blacklist if", "w not in self.blacklist: if w in self.words.keys(): self.words[w] += 1 else: self.words[w]", "self.words.keys(): self.words[w] += 1 else: self.words[w] = 1 def toCSV(self): for word, count", "in self.words.keys(): self.words[w] += 1 else: self.words[w] = 1 def toCSV(self): for word,", "w in self.words.keys(): self.words[w] += 1 else: self.words[w] = 1 def toCSV(self): for", "or agreed to in writing, software # distributed under the License is distributed", "reverse=True): print(\"%s, %s\" % (word, count), file=sys.stdout) def main(): if len(sys.argv) == 1:", "prev_msgs = 0 prev_tokens = 0 for filename in sys.argv[1:]: with open(filename) as", "= len(wc.words) prev_msgs = wc.update_counter print(\"Total: %s messages, %s tokens\" % (wc.update_counter, len(wc.words)),", "or implied. # See the License for the specific language governing permissions and", "else set() self.reset() def reset(self): self.words = dict() self.update_counter = 0 def update(self,", "# Contributer(s): <NAME>. (manionline.<EMAIL>) from __future__ import print_function import sys import re import", "= 0 def update(self, text): self.update_counter += 1 words = self.tokenizer.split(text) for w", "potfile1.pot potfile2.pot ...\") exit(1) msgid = re.compile(\"msgid \\\"(.*)\\\"\") wc = WordCounter() prev_msgs =", "distributed under the License is distributed on an \"AS IS\" BASIS, # WITHOUT", "CONDITIONS OF ANY KIND, either express or implied. # See the License for", "in sorted( self.words.items(), key=operator.itemgetter(1), reverse=True): print(\"%s, %s\" % (word, count), file=sys.stdout) def main():", "Apache License, Version 2.0 (the \"License\"); # you may not use this file", "1 words = self.tokenizer.split(text) for w in words: w = w.lower() if len(w)>1", "OR CONDITIONS OF ANY KIND, either express or implied. # See the License", "may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "coding: utf-8 -*- # Licensed under the Apache License, Version 2.0 (the \"License\");", "self.words = dict() self.update_counter = 0 def update(self, text): self.update_counter += 1 words", "self.tokenizer.split(text) for w in words: w = w.lower() if len(w)>1 and w not", "def reset(self): self.words = dict() self.update_counter = 0 def update(self, text): self.update_counter +=", "with the License. # You may obtain a copy of the License at", "messages, %s tokens\" % (wc.update_counter, len(wc.words)), file=sys.stderr) wc.toCSV() if __name__ == \"__main__\": main()", "def __init__(self, blacklist = None): self.tokenizer = re.compile(\"\\W+\")#re.compile(\"\\b(\\w)+\\b\") self.blacklist = blacklist if isinstance(blacklist,", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing,", "reset(self): self.words = dict() self.update_counter = 0 def update(self, text): self.update_counter += 1", "import print_function import sys import re import operator class WordCounter: def __init__(self, blacklist", "in writing, software # distributed under the License is distributed on an \"AS", "lines: match = msgid.split(l) if len(match) == 3: wc.update(match[1]) print(\"%s: %s messages, %s", "prev_tokens = 0 for filename in sys.argv[1:]: with open(filename) as lines: for l", "and w not in self.blacklist: if w in self.words.keys(): self.words[w] += 1 else:", "under the Apache License, Version 2.0 (the \"License\"); # you may not use", "tokens\" % (filename, wc.update_counter - prev_msgs, len(wc.words) - prev_tokens), file=sys.stderr) prev_tokens = len(wc.words)" ]
[ "url_for from flask import render_template from bson.objectid import ObjectId from certifico import app", ") return 'Os certificados do evento %s foram ' \\ 'enviados.' % certificate.inserted_id", "import redis_queue from certifico.mail import send_email from certifico.forms import CertificateForm def create_certificate(): form", "'logo': form.data['logo'], 'message': form.data['message'], 'participants': form.participants_list }) for p in form.participants_list: redis_queue.enqueue( send_email,", "import abort from flask import url_for from flask import render_template from bson.objectid import", "lambda p: p.get('email') == email, certificate.get('participants') )) except StopIteration: return abort(404) message =", "not email: return abort(404) certificate = mongo.db.certificates.find_one_or_404({ '_id': ObjectId(certificate) }) try: participant =", "CertificateForm() if form.validate_on_submit(): certificate = mongo.db.certificates.insert_one({ 'logo': form.data['logo'], 'message': form.data['message'], 'participants': form.participants_list })", "form.data['logo'], 'message': form.data['message'], 'participants': form.participants_list }) for p in form.participants_list: redis_queue.enqueue( send_email, to_email=p.get('email'),", "print_certificate(certificate): email = request.args.get('email') if not email: return abort(404) certificate = mongo.db.certificates.find_one_or_404({ '_id':", "for p in form.participants_list: redis_queue.enqueue( send_email, to_email=p.get('email'), certificateLink=url_for( 'print_certificate', certificate=certificate.inserted_id, email=p.get('email'), _external=True )", "email: return abort(404) certificate = mongo.db.certificates.find_one_or_404({ '_id': ObjectId(certificate) }) try: participant = next(filter(", "to_email=p.get('email'), certificateLink=url_for( 'print_certificate', certificate=certificate.inserted_id, email=p.get('email'), _external=True ) ) return 'Os certificados do evento", "form.participants_list: redis_queue.enqueue( send_email, to_email=p.get('email'), certificateLink=url_for( 'print_certificate', certificate=certificate.inserted_id, email=p.get('email'), _external=True ) ) return 'Os", "send_email, to_email=p.get('email'), certificateLink=url_for( 'print_certificate', certificate=certificate.inserted_id, email=p.get('email'), _external=True ) ) return 'Os certificados do", "p in form.participants_list: redis_queue.enqueue( send_email, to_email=p.get('email'), certificateLink=url_for( 'print_certificate', certificate=certificate.inserted_id, email=p.get('email'), _external=True ) )", "from certifico import redis_queue from certifico.mail import send_email from certifico.forms import CertificateForm def", "certifico import redis_queue from certifico.mail import send_email from certifico.forms import CertificateForm def create_certificate():", "ObjectId(certificate) }) try: participant = next(filter( lambda p: p.get('email') == email, certificate.get('participants') ))", "= certificate.get('message') message = message.replace( '[participante]', participant.get('name').upper()) return render_template( 'print.html', logo=certificate.get('logo'), message=message )", "'participants': form.participants_list }) for p in form.participants_list: redis_queue.enqueue( send_email, to_email=p.get('email'), certificateLink=url_for( 'print_certificate', certificate=certificate.inserted_id,", "p: p.get('email') == email, certificate.get('participants') )) except StopIteration: return abort(404) message = certificate.get('message')", "flask import render_template from bson.objectid import ObjectId from certifico import app from certifico", "from certifico import mongo from certifico import redis_queue from certifico.mail import send_email from", "import app from certifico import mongo from certifico import redis_queue from certifico.mail import", "def create_certificate(): form = CertificateForm() if form.validate_on_submit(): certificate = mongo.db.certificates.insert_one({ 'logo': form.data['logo'], 'message':", "import CertificateForm def create_certificate(): form = CertificateForm() if form.validate_on_submit(): certificate = mongo.db.certificates.insert_one({ 'logo':", "return abort(404) certificate = mongo.db.certificates.find_one_or_404({ '_id': ObjectId(certificate) }) try: participant = next(filter( lambda", "try: participant = next(filter( lambda p: p.get('email') == email, certificate.get('participants') )) except StopIteration:", "form.validate_on_submit(): certificate = mongo.db.certificates.insert_one({ 'logo': form.data['logo'], 'message': form.data['message'], 'participants': form.participants_list }) for p", "email=p.get('email'), _external=True ) ) return 'Os certificados do evento %s foram ' \\", "certifico import app from certifico import mongo from certifico import redis_queue from certifico.mail", "request from flask import abort from flask import url_for from flask import render_template", "mongo.db.certificates.insert_one({ 'logo': form.data['logo'], 'message': form.data['message'], 'participants': form.participants_list }) for p in form.participants_list: redis_queue.enqueue(", "certificate = mongo.db.certificates.find_one_or_404({ '_id': ObjectId(certificate) }) try: participant = next(filter( lambda p: p.get('email')", "= next(filter( lambda p: p.get('email') == email, certificate.get('participants') )) except StopIteration: return abort(404)", "}) for p in form.participants_list: redis_queue.enqueue( send_email, to_email=p.get('email'), certificateLink=url_for( 'print_certificate', certificate=certificate.inserted_id, email=p.get('email'), _external=True", "}) try: participant = next(filter( lambda p: p.get('email') == email, certificate.get('participants') )) except", "except StopIteration: return abort(404) message = certificate.get('message') message = message.replace( '[participante]', participant.get('name').upper()) return", "certifico import mongo from certifico import redis_queue from certifico.mail import send_email from certifico.forms", "' \\ 'enviados.' % certificate.inserted_id return render_template('index.html', form=form, analytics=app.config.get('GOOGLE_ANALYTICS')), 400 def print_certificate(certificate): email", "certificate.get('participants') )) except StopIteration: return abort(404) message = certificate.get('message') message = message.replace( '[participante]',", "form.data['message'], 'participants': form.participants_list }) for p in form.participants_list: redis_queue.enqueue( send_email, to_email=p.get('email'), certificateLink=url_for( 'print_certificate',", "mongo.db.certificates.find_one_or_404({ '_id': ObjectId(certificate) }) try: participant = next(filter( lambda p: p.get('email') == email,", "from flask import render_template from bson.objectid import ObjectId from certifico import app from", "import send_email from certifico.forms import CertificateForm def create_certificate(): form = CertificateForm() if form.validate_on_submit():", "% certificate.inserted_id return render_template('index.html', form=form, analytics=app.config.get('GOOGLE_ANALYTICS')), 400 def print_certificate(certificate): email = request.args.get('email') if", "400 def print_certificate(certificate): email = request.args.get('email') if not email: return abort(404) certificate =", "in form.participants_list: redis_queue.enqueue( send_email, to_email=p.get('email'), certificateLink=url_for( 'print_certificate', certificate=certificate.inserted_id, email=p.get('email'), _external=True ) ) return", "import ObjectId from certifico import app from certifico import mongo from certifico import", "= mongo.db.certificates.insert_one({ 'logo': form.data['logo'], 'message': form.data['message'], 'participants': form.participants_list }) for p in form.participants_list:", "from flask import abort from flask import url_for from flask import render_template from", "flask import abort from flask import url_for from flask import render_template from bson.objectid", "render_template('index.html', form=form, analytics=app.config.get('GOOGLE_ANALYTICS')), 400 def print_certificate(certificate): email = request.args.get('email') if not email: return", "form=form, analytics=app.config.get('GOOGLE_ANALYTICS')), 400 def print_certificate(certificate): email = request.args.get('email') if not email: return abort(404)", "email = request.args.get('email') if not email: return abort(404) certificate = mongo.db.certificates.find_one_or_404({ '_id': ObjectId(certificate)", "certifico.mail import send_email from certifico.forms import CertificateForm def create_certificate(): form = CertificateForm() if", "import url_for from flask import render_template from bson.objectid import ObjectId from certifico import", "from flask import url_for from flask import render_template from bson.objectid import ObjectId from", "abort(404) certificate = mongo.db.certificates.find_one_or_404({ '_id': ObjectId(certificate) }) try: participant = next(filter( lambda p:", "certifico.forms import CertificateForm def create_certificate(): form = CertificateForm() if form.validate_on_submit(): certificate = mongo.db.certificates.insert_one({", "next(filter( lambda p: p.get('email') == email, certificate.get('participants') )) except StopIteration: return abort(404) message", "from certifico.mail import send_email from certifico.forms import CertificateForm def create_certificate(): form = CertificateForm()", "return abort(404) message = certificate.get('message') message = message.replace( '[participante]', participant.get('name').upper()) return render_template( 'print.html',", "flask import request from flask import abort from flask import url_for from flask", "%s foram ' \\ 'enviados.' % certificate.inserted_id return render_template('index.html', form=form, analytics=app.config.get('GOOGLE_ANALYTICS')), 400 def", "redis_queue.enqueue( send_email, to_email=p.get('email'), certificateLink=url_for( 'print_certificate', certificate=certificate.inserted_id, email=p.get('email'), _external=True ) ) return 'Os certificados", "abort from flask import url_for from flask import render_template from bson.objectid import ObjectId", "send_email from certifico.forms import CertificateForm def create_certificate(): form = CertificateForm() if form.validate_on_submit(): certificate", "evento %s foram ' \\ 'enviados.' % certificate.inserted_id return render_template('index.html', form=form, analytics=app.config.get('GOOGLE_ANALYTICS')), 400", "analytics=app.config.get('GOOGLE_ANALYTICS')), 400 def print_certificate(certificate): email = request.args.get('email') if not email: return abort(404) certificate", "'enviados.' % certificate.inserted_id return render_template('index.html', form=form, analytics=app.config.get('GOOGLE_ANALYTICS')), 400 def print_certificate(certificate): email = request.args.get('email')", "bson.objectid import ObjectId from certifico import app from certifico import mongo from certifico", "'message': form.data['message'], 'participants': form.participants_list }) for p in form.participants_list: redis_queue.enqueue( send_email, to_email=p.get('email'), certificateLink=url_for(", "\\ 'enviados.' % certificate.inserted_id return render_template('index.html', form=form, analytics=app.config.get('GOOGLE_ANALYTICS')), 400 def print_certificate(certificate): email =", ")) except StopIteration: return abort(404) message = certificate.get('message') message = message.replace( '[participante]', participant.get('name').upper())", "if not email: return abort(404) certificate = mongo.db.certificates.find_one_or_404({ '_id': ObjectId(certificate) }) try: participant", "app from certifico import mongo from certifico import redis_queue from certifico.mail import send_email", "CertificateForm def create_certificate(): form = CertificateForm() if form.validate_on_submit(): certificate = mongo.db.certificates.insert_one({ 'logo': form.data['logo'],", "email, certificate.get('participants') )) except StopIteration: return abort(404) message = certificate.get('message') message = message.replace(", "if form.validate_on_submit(): certificate = mongo.db.certificates.insert_one({ 'logo': form.data['logo'], 'message': form.data['message'], 'participants': form.participants_list }) for", "return 'Os certificados do evento %s foram ' \\ 'enviados.' % certificate.inserted_id return", "form = CertificateForm() if form.validate_on_submit(): certificate = mongo.db.certificates.insert_one({ 'logo': form.data['logo'], 'message': form.data['message'], 'participants':", "redis_queue from certifico.mail import send_email from certifico.forms import CertificateForm def create_certificate(): form =", "from certifico import app from certifico import mongo from certifico import redis_queue from", "render_template from bson.objectid import ObjectId from certifico import app from certifico import mongo", "certificateLink=url_for( 'print_certificate', certificate=certificate.inserted_id, email=p.get('email'), _external=True ) ) return 'Os certificados do evento %s", "foram ' \\ 'enviados.' % certificate.inserted_id return render_template('index.html', form=form, analytics=app.config.get('GOOGLE_ANALYTICS')), 400 def print_certificate(certificate):", "create_certificate(): form = CertificateForm() if form.validate_on_submit(): certificate = mongo.db.certificates.insert_one({ 'logo': form.data['logo'], 'message': form.data['message'],", "request.args.get('email') if not email: return abort(404) certificate = mongo.db.certificates.find_one_or_404({ '_id': ObjectId(certificate) }) try:", "_external=True ) ) return 'Os certificados do evento %s foram ' \\ 'enviados.'", "certificate = mongo.db.certificates.insert_one({ 'logo': form.data['logo'], 'message': form.data['message'], 'participants': form.participants_list }) for p in", "participant = next(filter( lambda p: p.get('email') == email, certificate.get('participants') )) except StopIteration: return", "'_id': ObjectId(certificate) }) try: participant = next(filter( lambda p: p.get('email') == email, certificate.get('participants')", "import mongo from certifico import redis_queue from certifico.mail import send_email from certifico.forms import", "def print_certificate(certificate): email = request.args.get('email') if not email: return abort(404) certificate = mongo.db.certificates.find_one_or_404({", "== email, certificate.get('participants') )) except StopIteration: return abort(404) message = certificate.get('message') message =", "certificate.inserted_id return render_template('index.html', form=form, analytics=app.config.get('GOOGLE_ANALYTICS')), 400 def print_certificate(certificate): email = request.args.get('email') if not", "'print_certificate', certificate=certificate.inserted_id, email=p.get('email'), _external=True ) ) return 'Os certificados do evento %s foram", "p.get('email') == email, certificate.get('participants') )) except StopIteration: return abort(404) message = certificate.get('message') message", "= CertificateForm() if form.validate_on_submit(): certificate = mongo.db.certificates.insert_one({ 'logo': form.data['logo'], 'message': form.data['message'], 'participants': form.participants_list", ") ) return 'Os certificados do evento %s foram ' \\ 'enviados.' %", "return render_template('index.html', form=form, analytics=app.config.get('GOOGLE_ANALYTICS')), 400 def print_certificate(certificate): email = request.args.get('email') if not email:", "StopIteration: return abort(404) message = certificate.get('message') message = message.replace( '[participante]', participant.get('name').upper()) return render_template(", "from bson.objectid import ObjectId from certifico import app from certifico import mongo from", "form.participants_list }) for p in form.participants_list: redis_queue.enqueue( send_email, to_email=p.get('email'), certificateLink=url_for( 'print_certificate', certificate=certificate.inserted_id, email=p.get('email'),", "= request.args.get('email') if not email: return abort(404) certificate = mongo.db.certificates.find_one_or_404({ '_id': ObjectId(certificate) })", "import render_template from bson.objectid import ObjectId from certifico import app from certifico import", "from certifico.forms import CertificateForm def create_certificate(): form = CertificateForm() if form.validate_on_submit(): certificate =", "mongo from certifico import redis_queue from certifico.mail import send_email from certifico.forms import CertificateForm", "do evento %s foram ' \\ 'enviados.' % certificate.inserted_id return render_template('index.html', form=form, analytics=app.config.get('GOOGLE_ANALYTICS')),", "= mongo.db.certificates.find_one_or_404({ '_id': ObjectId(certificate) }) try: participant = next(filter( lambda p: p.get('email') ==", "message = certificate.get('message') message = message.replace( '[participante]', participant.get('name').upper()) return render_template( 'print.html', logo=certificate.get('logo'), message=message", "flask import url_for from flask import render_template from bson.objectid import ObjectId from certifico", "certificate=certificate.inserted_id, email=p.get('email'), _external=True ) ) return 'Os certificados do evento %s foram '", "certificados do evento %s foram ' \\ 'enviados.' % certificate.inserted_id return render_template('index.html', form=form,", "abort(404) message = certificate.get('message') message = message.replace( '[participante]', participant.get('name').upper()) return render_template( 'print.html', logo=certificate.get('logo'),", "from flask import request from flask import abort from flask import url_for from", "'Os certificados do evento %s foram ' \\ 'enviados.' % certificate.inserted_id return render_template('index.html',", "ObjectId from certifico import app from certifico import mongo from certifico import redis_queue", "import request from flask import abort from flask import url_for from flask import" ]
[ "0 and len(tenant_bd_objs) == 0 and len(tenant_ap_objs) == 0: delete_tenants.append(tenant['name']) def object_delete(obj): dn", "= epg_obj #======================================================================= # Relations #======================================================================= for ctr in ctr_list: sj_list = ctr['Subject']", "bd_objs = {} fe_objs = {} sj_objs = {} sn_objs = {} ap_objs", "{} sj_objs = {} sn_objs = {} ap_objs = {} epg_objs = {}", "list) else [] for sn in sn_list: sn_obj = bd_obj.Subnet.create(**parse_desc_unit(sn)) if verbose: print('UPDATE", "if 'Context' in bd: try: tenant_bd_objs[bd['name']].relate(tenant_ctx_objs[bd['Context']]) except: try: tenant_bd_objs[bd['name']].relate(common.Context(bd['Context'])) except: if verbose: print('RELATE", "in epg: for path in epg['Path']: ep_obj = dom.Pod(path['Pod']).Paths(path['Node']).Path(path['Intf']) tenant_epg_objs[epg['name']].relate(ep_obj, **parse_desc_unit(path)) if verbose:", "import * def deployACI(desc, verbose=False, debug=False): try: dom_ip = desc['Controller']['ip'] except: exit(1) try:", ">> %s.dn=%s\\n' % (obj.class_name, dn)) def recursive_delete(obj): children = obj.children() for child in", "verbose: print('UPDATE >> fvTenant.dn=%s\\n' % tenant_obj['dn']) tenant_flt_objs = {} tenant_ctr_objs = {} tenant_ctx_objs", "in tenant and isinstance(tenant['AppProfile'], list) else [] for ap in ap_list: ap_obj =", "{} ctr_objs = {} ctx_objs = {} l3e_objs = {} bd_objs = {}", "object_delete(obj) elif isinstance(obj, aciContextModel): if obj['dn'] not in ctx_objs: object_delete(obj) elif isinstance(obj, aciL3OutModel):", "aciBridgeDomainModel): if obj['dn'] not in bd_objs: object_delete(obj) elif isinstance(obj, aciAppProfileModel): if obj['dn'] not", "Subject:vzSubj.name=%s to Filter:vzFilter.name=%s\\n' % (sj['name'], flt)) if verbose: print('RELATE >> Subject:vzSubj.name=%s to Filter:vzFilter.name=%s\\n'", ">> BridgeDomain:fvBD.name=%s to L3External:l3extOut.name=%s\\n' % (bd['name'], bd['L3External'])) for ap in ap_list: epg_list =", "if 'Contract' in tenant and isinstance(tenant['Contract'], list) else [] for ctr in ctr_list:", "== 0: delete_tenants.append(tenant['name']) def object_delete(obj): dn = obj['dn'] obj.delete() if verbose: print('DELETE >>", "if tenant['name'] in delete_tenants: object_delete(tenant_obj) dom.close() return {'Tenant' : tenant_objs.keys(), 'Filter' : flt_objs.keys(),", "% ap_obj['dn']) ap_objs[ap_obj['dn']] = ap_obj tenant_ap_objs[ap_obj['name']] = ap_obj epg_list = ap['EPG'] if 'EPG'", "(bd['name'], bd['Context'])) if verbose: print('RELATE >> L3External:l3extOut.name=%s to Context:fvCtx.name=%s\\n' % (bd['name'], bd['Context'])) for", "obj['dn'] not in fe_objs: object_delete(obj) elif isinstance(obj, aciSubjectModel): if obj['dn'] not in sj_objs:", "flt_objs = {} ctr_objs = {} ctx_objs = {} l3e_objs = {} bd_objs", "% (dom_ip, dom_user, dom_pwd)) common = dom.Tenant('common') tenant_objs = {} flt_objs = {}", ">> FilterEntry:vzEntry.dn=%s\\n' % fe_obj['dn']) fe_objs[fe_obj['dn']] = fe_obj tenant_fe_objs[fe_obj['name']] = fe_obj ctr_list = tenant['Contract']", ">> EPG:fvAEPg.dn=%s\\n' % epg_obj['dn']) epg_objs[epg_obj['dn']] = epg_obj tenant_epg_objs[epg_obj['name']] = epg_obj #======================================================================= # Relations", "ctr_objs[ctr_obj['dn']] = ctr_obj tenant_ctr_objs[ctr_obj['name']] = ctr_obj sj_list = ctr['Subject'] if 'Subject' in ctr", "not in ctx_objs: object_delete(obj) elif isinstance(obj, aciL3OutModel): if obj['dn'] not in l3e_objs: object_delete(obj)", "if isinstance(child, aciFilterModel): recursive_delete(child) elif isinstance(child, aciContractModel): recursive_delete(child) elif isinstance(child, aciContextModel): recursive_delete(child) elif", "sj_obj ctx_list = tenant['Context'] if 'Context' in tenant and isinstance(tenant['Context'], list) else []", "and isinstance(flt['FilterEntry'], list) else [] for fe in fe_list: fe_obj = flt_obj.FilterEntry.create(**parse_desc_unit(fe)) if", "sn_obj['dn']) sn_objs[sn_obj['dn']] = sn_obj tenant_sn_objs[sn_obj['name']] = sn_obj ap_list = tenant['AppProfile'] if 'AppProfile' in", "except: delete_empty_tenant = False try: deploy_incremental = desc['Option']['deployIncremental'] except: deploy_incremental = False try:", "elif isinstance(obj, aciContractModel): if obj['dn'] not in ctr_objs: object_delete(obj) elif isinstance(obj, aciContextModel): if", "if verbose: print('RELATE FAILED>> EPG:fvAEPg.name=%s to BridgeDomain:fvBD.name=%s\\n' % (epg_obj['name'], epg['BridgeDomain'])) if verbose: print('RELATE", "isinstance(obj, aciFilterEntryModel): if obj['dn'] not in fe_objs: object_delete(obj) elif isinstance(obj, aciSubjectModel): if obj['dn']", "verbose: print('RELATE >> L3External:l3extOut.name=%s to Context:fvCtx.name=%s\\n' % (bd['name'], bd['Context'])) for bd in bd_list:", "[] for epg in epg_list: epg_obj = ap_obj.EPG.create(**parse_desc_unit(epg)) if verbose: print('UPDATE >> EPG:fvAEPg.dn=%s\\n'", "verbose: print('RELATE >> BridgeDomain:fvBD.name=%s to L3External:l3extOut.name=%s\\n' % (bd['name'], bd['L3External'])) for ap in ap_list:", "isinstance(obj, aciAppProfileModel): if obj['dn'] not in ap_objs: object_delete(obj) elif isinstance(obj, aciSubnetModel): if obj['dn']", "ap_objs: object_delete(obj) elif isinstance(obj, aciSubnetModel): if obj['dn'] not in sn_objs: object_delete(obj) elif isinstance(obj,", "except: if verbose: print('RELATE FAILED>> EPG:fvAEPg.name=%s to Provide:vzBrCP.name=%s\\n' % (epg_obj['name'], prov)) if verbose:", "if isinstance(obj, aciFilterModel): if obj['dn'] not in flt_objs: object_delete(obj) elif isinstance(obj, aciContractModel): if", "desc['Controller']['pwd'] except: exit(1) try: delete_empty_tenant = desc['Option']['deleteEmptyTenant'] except: delete_empty_tenant = False try: deploy_incremental", "try: tenant_epg_objs[epg['name']].relate(tenant_ctr_objs[cons]) except: try: tenant_epg_objs[epg['name']].relate(common.Contract(cons)) except: if verbose: print('RELATE FAILED>> EPG:fvAEPg.name=%s to Consume:vzBrCP.name=%s\\n'", "aciFilterModel): if obj['dn'] not in flt_objs: object_delete(obj) elif isinstance(obj, aciContractModel): if obj['dn'] not", "Filter:vzFilter.dn=%s\\n' % flt_obj['dn']) flt_objs[flt_obj['dn']] = flt_obj tenant_flt_objs[flt_obj['name']] = flt_obj fe_list = flt['FilterEntry'] if", "ctr['Subject'] if 'Subject' in ctr and isinstance(ctr['Subject'], list) else [] for sj in", "elif isinstance(child, aciSubjectModel): recursive_delete(child) elif isinstance(child, aciSubnetModel): recursive_delete(child) elif isinstance(child, aciAppProfileModel): recursive_delete(child) elif", ">> Contract:vzBrCP.dn=%s\\n' % ctr_obj['dn']) ctr_objs[ctr_obj['dn']] = ctr_obj tenant_ctr_objs[ctr_obj['name']] = ctr_obj sj_list = ctr['Subject']", "= dom.Tenant(tenant['name']) except: continue recursive_delete(tenant_obj) if tenant['name'] in delete_tenants: object_delete(tenant_obj) dom.close() return {'Tenant'", "delete_empty_tenant and len(tenant_ctx_objs) == 0 and len(tenant_bd_objs) == 0 and len(tenant_ap_objs) == 0:", "{} delete_tenants = [] def parse_desc_unit(unit): ret = {} for key in unit:", "sn_obj tenant_sn_objs[sn_obj['name']] = sn_obj ap_list = tenant['AppProfile'] if 'AppProfile' in tenant and isinstance(tenant['AppProfile'],", "EPG:fvAEPg.name=%s to Path:PathEp.name=%s\\n' % (epg_obj['name'], path['Pod'] + '/' + path['Node'] + '/' +", "obj['dn'] obj.delete() if verbose: print('DELETE >> %s.dn=%s\\n' % (obj.class_name, dn)) def recursive_delete(obj): children", "try: tenant_epg_objs[epg['name']].relate(tenant_ctr_objs[prov]) except: try: tenant_epg_objs[epg['name']].relate(common.Contract(prov)) except: if verbose: print('RELATE FAILED>> EPG:fvAEPg.name=%s to Provide:vzBrCP.name=%s\\n'", "l3e_list: l3e_obj = tenant_obj.L3Out.create(**parse_desc_unit(l3e)) if verbose: print('UPDATE >> L3External:l3extOut.dn=%s\\n' % l3e_obj['dn']) l3e_objs[l3e_obj['dn']] =", "except: try: tenant_l3e_objs[l3e['name']].relate(common.Context(l3e['Context'])) except: if verbose: print('RELATE FAILED>> L3External:l3extOut.name=%s to Context:fvCtx.name=%s\\n' % (bd['name'],", "delete_tenants: object_delete(tenant_obj) dom.close() return {'Tenant' : tenant_objs.keys(), 'Filter' : flt_objs.keys(), 'Contract' : ctr_objs.keys(),", "dom_ip = desc['Controller']['ip'] except: exit(1) try: dom_user = desc['Controller']['user'] except: exit(1) try: dom_pwd", "and isinstance(desc['Tenant'], list) else [] for tenant in tenant_list: tenant_obj = dom.Tenant.create(**parse_desc_unit(tenant)) tenant_objs[tenant_obj['dn']]", "tenant_obj.AppProfile.create(**parse_desc_unit(ap)) if verbose: print('UPDATE >> AppProfile:fvAp.dn=%s\\n' % ap_obj['dn']) ap_objs[ap_obj['dn']] = ap_obj tenant_ap_objs[ap_obj['name']] =", "except: try: tenant_bd_objs[bd['name']].relate(common.Context(bd['Context'])) except: if verbose: print('RELATE FAILED>> BridgeDomain:fvBD.name=%s to Context:fvCtx.name=%s\\n' % (bd['name'],", "flt_list = tenant['Filter'] if 'Filter' in tenant and isinstance(tenant['Filter'], list) else [] for", "tenant['L3External'] if 'L3External' in tenant and isinstance(tenant['L3External'], list) else [] for l3e in", ">> Context:fvCtx.dn=%s\\n' % ctx_obj['dn']) ctx_objs[ctx_obj['dn']] = ctx_obj tenant_ctx_objs[ctx_obj['name']] = ctx_obj l3e_list = tenant['L3External']", "children = obj.children() for child in children: if isinstance(child, aciFilterModel): recursive_delete(child) elif isinstance(child,", "ctx_objs.keys(), 'L3External' : l3e_objs.keys(), 'BridgeDomain' : bd_objs.keys(), 'FilterEntry' : fe_objs.keys(), 'Subject' : sj_objs.keys(),", "if verbose: print('RELATE FAILED>> EPG:fvAEPg.name=%s to Provide:vzBrCP.name=%s\\n' % (epg_obj['name'], prov)) if verbose: print('RELATE", "= tenant_obj.Contract.create(**parse_desc_unit(ctr)) if verbose: print('UPDATE >> Contract:vzBrCP.dn=%s\\n' % ctr_obj['dn']) ctr_objs[ctr_obj['dn']] = ctr_obj tenant_ctr_objs[ctr_obj['name']]", "= [] def parse_desc_unit(unit): ret = {} for key in unit: if re.search('^[a-z]\\w*',", "dom.Tenant('common') tenant_objs = {} flt_objs = {} ctr_objs = {} ctx_objs = {}", "tenant['Filter'] if 'Filter' in tenant and isinstance(tenant['Filter'], list) else [] for flt in", "= ctx_obj tenant_ctx_objs[ctx_obj['name']] = ctx_obj l3e_list = tenant['L3External'] if 'L3External' in tenant and", "[] for fe in fe_list: fe_obj = flt_obj.FilterEntry.create(**parse_desc_unit(fe)) if verbose: print('UPDATE >> FilterEntry:vzEntry.dn=%s\\n'", "tenant and isinstance(tenant['AppProfile'], list) else [] for ap in ap_list: ap_obj = tenant_obj.AppProfile.create(**parse_desc_unit(ap))", "= ap_obj tenant_ap_objs[ap_obj['name']] = ap_obj epg_list = ap['EPG'] if 'EPG' in ap and", "= flt_obj fe_list = flt['FilterEntry'] if 'FilterEntry' in flt and isinstance(flt['FilterEntry'], list) else", "= ap_obj epg_list = ap['EPG'] if 'EPG' in ap and isinstance(ap['EPG'], list) else", "verbose: print('RELATE >> EPG:fvAEPg.name=%s to Path:PathEp.name=%s\\n' % (epg_obj['name'], path['Pod'] + '/' + path['Node']", "'BridgeDomain' : bd_objs.keys(), 'FilterEntry' : fe_objs.keys(), 'Subject' : sj_objs.keys(), 'Subnet' : sn_objs.keys(), 'AppProfile'", "common = dom.Tenant('common') tenant_objs = {} flt_objs = {} ctr_objs = {} ctx_objs", "**parse_desc_unit(path)) if verbose: print('RELATE >> EPG:fvAEPg.name=%s to Path:PathEp.name=%s\\n' % (epg_obj['name'], path['Pod'] + '/'", "isinstance(tenant['Contract'], list) else [] for ctr in ctr_list: ctr_obj = tenant_obj.Contract.create(**parse_desc_unit(ctr)) if verbose:", "delete_tenants.append(tenant['name']) def object_delete(obj): dn = obj['dn'] obj.delete() if verbose: print('DELETE >> %s.dn=%s\\n' %", "fe_obj = flt_obj.FilterEntry.create(**parse_desc_unit(fe)) if verbose: print('UPDATE >> FilterEntry:vzEntry.dn=%s\\n' % fe_obj['dn']) fe_objs[fe_obj['dn']] = fe_obj", "epg: for cons in epg['Consume']: try: tenant_epg_objs[epg['name']].relate(tenant_ctr_objs[cons]) except: try: tenant_epg_objs[epg['name']].relate(common.Contract(cons)) except: if verbose:", "'BridgeDomain' in epg: try: tenant_epg_objs[epg['name']].relate(tenant_bd_objs[epg['BridgeDomain']]) except: try: tenant_epg_objs[epg['name']].relate(common.BridgeDomain(epg['BridgeDomain'])) except: if verbose: print('RELATE FAILED>>", "aciL3OutModel): recursive_delete(child) elif isinstance(child, aciBridgeDomainModel): recursive_delete(child) elif isinstance(child, aciFilterEntryModel): recursive_delete(child) elif isinstance(child, aciSubjectModel):", "isinstance(tenant['L3External'], list) else [] for l3e in l3e_list: l3e_obj = tenant_obj.L3Out.create(**parse_desc_unit(l3e)) if verbose:", "isinstance(child, aciFilterEntryModel): recursive_delete(child) elif isinstance(child, aciSubjectModel): recursive_delete(child) elif isinstance(child, aciSubnetModel): recursive_delete(child) elif isinstance(child,", "flt_objs.keys(), 'Contract' : ctr_objs.keys(), 'Context' : ctx_objs.keys(), 'L3External' : l3e_objs.keys(), 'BridgeDomain' : bd_objs.keys(),", "in tenant and isinstance(tenant['Contract'], list) else [] for ctr in ctr_list: ctr_obj =", "tenant_bd_objs[bd['name']].relate(tenant_l3e_objs[bd['L3External']]) except: try: tenant_bd_objs[bd['name']].relate(common.L3External(bd['L3External'])) except: if verbose: print('RELATE FAILED>> BridgeDomain:fvBD.name=%s to L3External:l3extOut.name=%s\\n' %", "children: if isinstance(child, aciFilterModel): recursive_delete(child) elif isinstance(child, aciContractModel): recursive_delete(child) elif isinstance(child, aciContextModel): recursive_delete(child)", "ap_objs[ap_obj['dn']] = ap_obj tenant_ap_objs[ap_obj['name']] = ap_obj epg_list = ap['EPG'] if 'EPG' in ap", "ap_obj tenant_ap_objs[ap_obj['name']] = ap_obj epg_list = ap['EPG'] if 'EPG' in ap and isinstance(ap['EPG'],", "l3e_list = tenant['L3External'] if 'L3External' in tenant and isinstance(tenant['L3External'], list) else [] for", "for path in epg['Path']: ep_obj = dom.Pod(path['Pod']).Paths(path['Node']).Path(path['Intf']) tenant_epg_objs[epg['name']].relate(ep_obj, **parse_desc_unit(path)) if verbose: print('RELATE >>", "recursive_delete(obj): children = obj.children() for child in children: if isinstance(child, aciFilterModel): recursive_delete(child) elif", "fe_list: fe_obj = flt_obj.FilterEntry.create(**parse_desc_unit(fe)) if verbose: print('UPDATE >> FilterEntry:vzEntry.dn=%s\\n' % fe_obj['dn']) fe_objs[fe_obj['dn']] =", "aciFilterModel): recursive_delete(child) elif isinstance(child, aciContractModel): recursive_delete(child) elif isinstance(child, aciContextModel): recursive_delete(child) elif isinstance(child, aciL3OutModel):", "try: dom_user = desc['Controller']['user'] except: exit(1) try: dom_pwd = desc['Controller']['pwd'] except: exit(1) try:", "verbose: print('RELATE FAILED>> EPG:fvAEPg.name=%s to BridgeDomain:fvBD.name=%s\\n' % (epg_obj['name'], epg['BridgeDomain'])) if verbose: print('RELATE >>", "@author: \"comfact\" ''' import re from .model import * def deployACI(desc, verbose=False, debug=False):", "\"comfact\" ''' import re from .model import * def deployACI(desc, verbose=False, debug=False): try:", "ap['EPG'] if 'EPG' in ap and isinstance(ap['EPG'], list) else [] for epg in", "elif isinstance(child, aciL3OutModel): recursive_delete(child) elif isinstance(child, aciBridgeDomainModel): recursive_delete(child) elif isinstance(child, aciFilterEntryModel): recursive_delete(child) elif", "aciBridgeDomainModel): recursive_delete(child) elif isinstance(child, aciFilterEntryModel): recursive_delete(child) elif isinstance(child, aciSubjectModel): recursive_delete(child) elif isinstance(child, aciSubnetModel):", "= {} l3e_objs = {} bd_objs = {} fe_objs = {} sj_objs =", "if 'Tenant' in desc and isinstance(desc['Tenant'], list) else [] for tenant in tenant_list:", "= tenant_obj.Context.create(**parse_desc_unit(ctx)) if verbose: print('UPDATE >> Context:fvCtx.dn=%s\\n' % ctx_obj['dn']) ctx_objs[ctx_obj['dn']] = ctx_obj tenant_ctx_objs[ctx_obj['name']]", "EPG:fvAEPg.dn=%s\\n' % epg_obj['dn']) epg_objs[epg_obj['dn']] = epg_obj tenant_epg_objs[epg_obj['name']] = epg_obj #======================================================================= # Relations #=======================================================================", "list) else [] for fe in fe_list: fe_obj = flt_obj.FilterEntry.create(**parse_desc_unit(fe)) if verbose: print('UPDATE", "in sj_list: if 'Filter' in sj: for flt in sj['Filter']: try: tenant_sj_objs[sj['name']].relate(tenant_flt_objs[flt]) except:", "for l3e in l3e_list: if 'Context' in l3e: try: tenant_l3e_objs[l3e['name']].relate(tenant_ctx_objs[l3e['Context']]) except: try: tenant_l3e_objs[l3e['name']].relate(common.Context(l3e['Context']))", "= l3e_obj tenant_l3e_objs[l3e_obj['name']] = l3e_obj bd_list = tenant['BridgeDomain'] if 'BridgeDomain' in tenant and", "in sj_list: sj_obj = ctr_obj.Subject.create(**parse_desc_unit(sj)) if verbose: print('UPDATE >> Subject:vzSubj.dn=%s\\n' % sj_obj['dn']) sj_objs[sj_obj['dn']]", "EPG:fvAEPg.name=%s to Provide:vzBrCP.name=%s\\n' % (epg_obj['name'], prov)) if verbose: print('RELATE >> EPG:fvAEPg.name=%s to Provide:vzBrCP.name=%s\\n'", "{} ap_objs = {} epg_objs = {} delete_tenants = [] def parse_desc_unit(unit): ret", "else [] for sj in sj_list: if 'Filter' in sj: for flt in", "if verbose: print('RELATE >> L3External:l3extOut.name=%s to Context:fvCtx.name=%s\\n' % (bd['name'], bd['Context'])) for bd in", "aciFilterEntryModel): if obj['dn'] not in fe_objs: object_delete(obj) elif isinstance(obj, aciSubjectModel): if obj['dn'] not", "elif isinstance(obj, aciSubjectModel): if obj['dn'] not in sj_objs: object_delete(obj) elif isinstance(obj, aciBridgeDomainModel): if", "aciAppProfileModel): if obj['dn'] not in ap_objs: object_delete(obj) elif isinstance(obj, aciSubnetModel): if obj['dn'] not", "print('UPDATE >> L3External:l3extOut.dn=%s\\n' % l3e_obj['dn']) l3e_objs[l3e_obj['dn']] = l3e_obj tenant_l3e_objs[l3e_obj['name']] = l3e_obj bd_list =", "prov in epg['Provide']: try: tenant_epg_objs[epg['name']].relate(tenant_ctr_objs[prov]) except: try: tenant_epg_objs[epg['name']].relate(common.Contract(prov)) except: if verbose: print('RELATE FAILED>>", "continue recursive_delete(tenant_obj) if tenant['name'] in delete_tenants: object_delete(tenant_obj) dom.close() return {'Tenant' : tenant_objs.keys(), 'Filter'", "l3e_objs[l3e_obj['dn']] = l3e_obj tenant_l3e_objs[l3e_obj['name']] = l3e_obj bd_list = tenant['BridgeDomain'] if 'BridgeDomain' in tenant", "= bd_obj.Subnet.create(**parse_desc_unit(sn)) if verbose: print('UPDATE >> Subnet:fvSubnet.dn=%s\\n' % sn_obj['dn']) sn_objs[sn_obj['dn']] = sn_obj tenant_sn_objs[sn_obj['name']]", "for flt in sj['Filter']: try: tenant_sj_objs[sj['name']].relate(tenant_flt_objs[flt]) except: try: tenant_sj_objs[sj['name']].relate(common.Filter(flt)) except: if verbose: print('RELATE", "EPG:fvAEPg.name=%s to Consume:vzBrCP.name=%s\\n' % (epg_obj['name'], cons)) if 'Provide' in epg: for prov in", "unit[key] return ret tenant_list = desc['Tenant'] if 'Tenant' in desc and isinstance(desc['Tenant'], list)", "else [] for sn in sn_list: sn_obj = bd_obj.Subnet.create(**parse_desc_unit(sn)) if verbose: print('UPDATE >>", "print('RELATE >> BridgeDomain:fvBD.name=%s to L3External:l3extOut.name=%s\\n' % (bd['name'], bd['L3External'])) for ap in ap_list: epg_list", "print('RELATE >> Subject:vzSubj.name=%s to Filter:vzFilter.name=%s\\n' % (sj['name'], flt)) for l3e in l3e_list: if", "''' Created on 2016. 10. 26. @author: \"comfact\" ''' import re from .model", "L3External:l3extOut.name=%s\\n' % (bd['name'], bd['L3External'])) if verbose: print('RELATE >> BridgeDomain:fvBD.name=%s to L3External:l3extOut.name=%s\\n' % (bd['name'],", "= obj.children() for child in children: if isinstance(child, aciFilterModel): recursive_delete(child) elif isinstance(child, aciContractModel):", "if verbose: print('UPDATE >> Contract:vzBrCP.dn=%s\\n' % ctr_obj['dn']) ctr_objs[ctr_obj['dn']] = ctr_obj tenant_ctr_objs[ctr_obj['name']] = ctr_obj", "l3e_obj tenant_l3e_objs[l3e_obj['name']] = l3e_obj bd_list = tenant['BridgeDomain'] if 'BridgeDomain' in tenant and isinstance(tenant['BridgeDomain'],", "recursive_delete(child) elif isinstance(child, aciSubjectModel): recursive_delete(child) elif isinstance(child, aciSubnetModel): recursive_delete(child) elif isinstance(child, aciAppProfileModel): recursive_delete(child)", "for l3e in l3e_list: l3e_obj = tenant_obj.L3Out.create(**parse_desc_unit(l3e)) if verbose: print('UPDATE >> L3External:l3extOut.dn=%s\\n' %", "% epg_obj['dn']) epg_objs[epg_obj['dn']] = epg_obj tenant_epg_objs[epg_obj['name']] = epg_obj #======================================================================= # Relations #======================================================================= for", "debug=debug) except: if verbose: print('Connection Failed : %s, %s, %s\\n' % (dom_ip, dom_user,", "and isinstance(tenant['Contract'], list) else [] for ctr in ctr_list: ctr_obj = tenant_obj.Contract.create(**parse_desc_unit(ctr)) if", "if obj['dn'] not in ctr_objs: object_delete(obj) elif isinstance(obj, aciContextModel): if obj['dn'] not in", "% (sj['name'], flt)) if verbose: print('RELATE >> Subject:vzSubj.name=%s to Filter:vzFilter.name=%s\\n' % (sj['name'], flt))", "verbose: print('UPDATE >> Subnet:fvSubnet.dn=%s\\n' % sn_obj['dn']) sn_objs[sn_obj['dn']] = sn_obj tenant_sn_objs[sn_obj['name']] = sn_obj ap_list", "isinstance(ap['EPG'], list) else [] for epg in epg_list: if 'BridgeDomain' in epg: try:", "'Contract' in tenant and isinstance(tenant['Contract'], list) else [] for ctr in ctr_list: ctr_obj", "# Create & Update #======================================================================= flt_list = tenant['Filter'] if 'Filter' in tenant and", "sj: for flt in sj['Filter']: try: tenant_sj_objs[sj['name']].relate(tenant_flt_objs[flt]) except: try: tenant_sj_objs[sj['name']].relate(common.Filter(flt)) except: if verbose:", "{} tenant_sj_objs = {} tenant_sn_objs = {} tenant_ap_objs = {} tenant_epg_objs = {}", "in ctx_objs: object_delete(obj) elif isinstance(obj, aciL3OutModel): if obj['dn'] not in l3e_objs: object_delete(obj) elif", "if verbose: print('UPDATE >> Context:fvCtx.dn=%s\\n' % ctx_obj['dn']) ctx_objs[ctx_obj['dn']] = ctx_obj tenant_ctx_objs[ctx_obj['name']] = ctx_obj", "isinstance(child, aciL3OutModel): recursive_delete(child) elif isinstance(child, aciBridgeDomainModel): recursive_delete(child) elif isinstance(child, aciFilterEntryModel): recursive_delete(child) elif isinstance(child,", "aciContextModel): recursive_delete(child) elif isinstance(child, aciL3OutModel): recursive_delete(child) elif isinstance(child, aciBridgeDomainModel): recursive_delete(child) elif isinstance(child, aciFilterEntryModel):", "in ap_list: epg_list = ap['EPG'] if 'EPG' in ap and isinstance(ap['EPG'], list) else", "{} tenant_bd_objs = {} tenant_fe_objs = {} tenant_sj_objs = {} tenant_sn_objs = {}", "tenant_bd_objs[bd['name']].relate(common.Context(bd['Context'])) except: if verbose: print('RELATE FAILED>> BridgeDomain:fvBD.name=%s to Context:fvCtx.name=%s\\n' % (bd['name'], bd['Context'])) if", "isinstance(child, aciSubnetModel): recursive_delete(child) elif isinstance(child, aciAppProfileModel): recursive_delete(child) elif isinstance(child, aciEPGModel): recursive_delete(child) if isinstance(obj,", "ctx_objs = {} l3e_objs = {} bd_objs = {} fe_objs = {} sj_objs", "= tenant['AppProfile'] if 'AppProfile' in tenant and isinstance(tenant['AppProfile'], list) else [] for ap", "+ path['Intf'])) if delete_empty_tenant and len(tenant_ctx_objs) == 0 and len(tenant_bd_objs) == 0 and", "(epg_obj['name'], cons)) if verbose: print('RELATE >> EPG:fvAEPg.name=%s to Consume:vzBrCP.name=%s\\n' % (epg_obj['name'], cons)) if", "and isinstance(ap['EPG'], list) else [] for epg in epg_list: epg_obj = ap_obj.EPG.create(**parse_desc_unit(epg)) if", "%s.dn=%s\\n' % (obj.class_name, dn)) def recursive_delete(obj): children = obj.children() for child in children:", "bd: try: tenant_bd_objs[bd['name']].relate(tenant_l3e_objs[bd['L3External']]) except: try: tenant_bd_objs[bd['name']].relate(common.L3External(bd['L3External'])) except: if verbose: print('RELATE FAILED>> BridgeDomain:fvBD.name=%s to", "try: tenant_obj = dom.Tenant(tenant['name']) except: continue recursive_delete(tenant_obj) if tenant['name'] in delete_tenants: object_delete(tenant_obj) dom.close()", "except: try: tenant_sj_objs[sj['name']].relate(common.Filter(flt)) except: if verbose: print('RELATE FAILED >> Subject:vzSubj.name=%s to Filter:vzFilter.name=%s\\n' %", "verbose: print('UPDATE >> Filter:vzFilter.dn=%s\\n' % flt_obj['dn']) flt_objs[flt_obj['dn']] = flt_obj tenant_flt_objs[flt_obj['name']] = flt_obj fe_list", "ctx_obj['dn']) ctx_objs[ctx_obj['dn']] = ctx_obj tenant_ctx_objs[ctx_obj['name']] = ctx_obj l3e_list = tenant['L3External'] if 'L3External' in", "if 'FilterEntry' in flt and isinstance(flt['FilterEntry'], list) else [] for fe in fe_list:", "= dom.Tenant.create(**parse_desc_unit(tenant)) tenant_objs[tenant_obj['dn']] = tenant_obj if verbose: print('UPDATE >> fvTenant.dn=%s\\n' % tenant_obj['dn']) tenant_flt_objs", "except: exit(1) try: delete_empty_tenant = desc['Option']['deleteEmptyTenant'] except: delete_empty_tenant = False try: deploy_incremental =", "'FilterEntry' : fe_objs.keys(), 'Subject' : sj_objs.keys(), 'Subnet' : sn_objs.keys(), 'AppProfile' : ap_objs.keys(), 'EPG'", "if verbose: print('UPDATE >> BridgeDomain:fvBD.dn=%s\\n' % bd_obj['dn']) bd_objs[bd_obj['dn']] = bd_obj tenant_bd_objs[bd_obj['name']] = bd_obj", "bd in bd_list: bd_obj = tenant_obj.BridgeDomain.create(**parse_desc_unit(bd)) if verbose: print('UPDATE >> BridgeDomain:fvBD.dn=%s\\n' % bd_obj['dn'])", "FAILED>> BridgeDomain:fvBD.name=%s to L3External:l3extOut.name=%s\\n' % (bd['name'], bd['L3External'])) if verbose: print('RELATE >> BridgeDomain:fvBD.name=%s to", "Provide:vzBrCP.name=%s\\n' % (epg_obj['name'], prov)) if 'Path' in epg: for path in epg['Path']: ep_obj", "deploy_incremental: for tenant in tenant_list: try: tenant_obj = dom.Tenant(tenant['name']) except: continue recursive_delete(tenant_obj) if", "tenant_l3e_objs = {} tenant_bd_objs = {} tenant_fe_objs = {} tenant_sj_objs = {} tenant_sn_objs", "= desc['Controller']['ip'] except: exit(1) try: dom_user = desc['Controller']['user'] except: exit(1) try: dom_pwd =", "except: if verbose: print('RELATE FAILED>> BridgeDomain:fvBD.name=%s to L3External:l3extOut.name=%s\\n' % (bd['name'], bd['L3External'])) if verbose:", "in l3e_objs: object_delete(obj) elif isinstance(obj, aciFilterEntryModel): if obj['dn'] not in fe_objs: object_delete(obj) elif", "bd['Subnet'] if 'Subnet' in bd and isinstance(bd['Subnet'], list) else [] for sn in", "% (sj['name'], flt)) for l3e in l3e_list: if 'Context' in l3e: try: tenant_l3e_objs[l3e['name']].relate(tenant_ctx_objs[l3e['Context']])", "= dom.Pod(path['Pod']).Paths(path['Node']).Path(path['Intf']) tenant_epg_objs[epg['name']].relate(ep_obj, **parse_desc_unit(path)) if verbose: print('RELATE >> EPG:fvAEPg.name=%s to Path:PathEp.name=%s\\n' % (epg_obj['name'],", "in ap_objs: object_delete(obj) elif isinstance(obj, aciSubnetModel): if obj['dn'] not in sn_objs: object_delete(obj) elif", "(bd['name'], bd['L3External'])) if verbose: print('RELATE >> BridgeDomain:fvBD.name=%s to L3External:l3extOut.name=%s\\n' % (bd['name'], bd['L3External'])) for", ">> EPG:fvAEPg.name=%s to Provide:vzBrCP.name=%s\\n' % (epg_obj['name'], prov)) if 'Path' in epg: for path", "BridgeDomain:fvBD.name=%s to Context:fvCtx.name=%s\\n' % (bd['name'], bd['Context'])) if verbose: print('RELATE >> BridgeDomain:fvBD.name=%s to Context:fvCtx.name=%s\\n'", "bd_objs[bd_obj['dn']] = bd_obj tenant_bd_objs[bd_obj['name']] = bd_obj sn_list = bd['Subnet'] if 'Subnet' in bd", "elif isinstance(obj, aciL3OutModel): if obj['dn'] not in l3e_objs: object_delete(obj) elif isinstance(obj, aciFilterEntryModel): if", "if 'L3External' in tenant and isinstance(tenant['L3External'], list) else [] for l3e in l3e_list:", "= tenant['Filter'] if 'Filter' in tenant and isinstance(tenant['Filter'], list) else [] for flt", "verbose: print('RELATE >> Subject:vzSubj.name=%s to Filter:vzFilter.name=%s\\n' % (sj['name'], flt)) for l3e in l3e_list:", "(epg_obj['name'], cons)) if 'Provide' in epg: for prov in epg['Provide']: try: tenant_epg_objs[epg['name']].relate(tenant_ctr_objs[prov]) except:", "desc['Controller']['user'] except: exit(1) try: dom_pwd = desc['Controller']['pwd'] except: exit(1) try: delete_empty_tenant = desc['Option']['deleteEmptyTenant']", "aciContextModel): if obj['dn'] not in ctx_objs: object_delete(obj) elif isinstance(obj, aciL3OutModel): if obj['dn'] not", "{} tenant_fe_objs = {} tenant_sj_objs = {} tenant_sn_objs = {} tenant_ap_objs = {}", "[] for sn in sn_list: sn_obj = bd_obj.Subnet.create(**parse_desc_unit(sn)) if verbose: print('UPDATE >> Subnet:fvSubnet.dn=%s\\n'", "aciSubnetModel): if obj['dn'] not in sn_objs: object_delete(obj) elif isinstance(obj, aciEPGModel): if obj['dn'] not", "elif isinstance(child, aciContextModel): recursive_delete(child) elif isinstance(child, aciL3OutModel): recursive_delete(child) elif isinstance(child, aciBridgeDomainModel): recursive_delete(child) elif", "def recursive_delete(obj): children = obj.children() for child in children: if isinstance(child, aciFilterModel): recursive_delete(child)", "to Path:PathEp.name=%s\\n' % (epg_obj['name'], path['Pod'] + '/' + path['Node'] + '/' + path['Intf']))", "delete_tenants = [] def parse_desc_unit(unit): ret = {} for key in unit: if", "dom.Tenant.create(**parse_desc_unit(tenant)) tenant_objs[tenant_obj['dn']] = tenant_obj if verbose: print('UPDATE >> fvTenant.dn=%s\\n' % tenant_obj['dn']) tenant_flt_objs =", "list) else [] for bd in bd_list: bd_obj = tenant_obj.BridgeDomain.create(**parse_desc_unit(bd)) if verbose: print('UPDATE", "if verbose: print('UPDATE >> Filter:vzFilter.dn=%s\\n' % flt_obj['dn']) flt_objs[flt_obj['dn']] = flt_obj tenant_flt_objs[flt_obj['name']] = flt_obj", "and isinstance(tenant['BridgeDomain'], list) else [] for bd in bd_list: bd_obj = tenant_obj.BridgeDomain.create(**parse_desc_unit(bd)) if", "= sn_obj ap_list = tenant['AppProfile'] if 'AppProfile' in tenant and isinstance(tenant['AppProfile'], list) else", "verbose=False, debug=False): try: dom_ip = desc['Controller']['ip'] except: exit(1) try: dom_user = desc['Controller']['user'] except:", "isinstance(ctr['Subject'], list) else [] for sj in sj_list: if 'Filter' in sj: for", "% (epg_obj['name'], cons)) if 'Provide' in epg: for prov in epg['Provide']: try: tenant_epg_objs[epg['name']].relate(tenant_ctr_objs[prov])", "isinstance(tenant['AppProfile'], list) else [] for ap in ap_list: ap_obj = tenant_obj.AppProfile.create(**parse_desc_unit(ap)) if verbose:", "% (bd['name'], bd['L3External'])) if verbose: print('RELATE >> BridgeDomain:fvBD.name=%s to L3External:l3extOut.name=%s\\n' % (bd['name'], bd['L3External']))", "tenant_sj_objs[sj_obj['name']] = sj_obj ctx_list = tenant['Context'] if 'Context' in tenant and isinstance(tenant['Context'], list)", "fe_objs.keys(), 'Subject' : sj_objs.keys(), 'Subnet' : sn_objs.keys(), 'AppProfile' : ap_objs.keys(), 'EPG' : epg_objs.keys()}", "ctx_obj l3e_list = tenant['L3External'] if 'L3External' in tenant and isinstance(tenant['L3External'], list) else []", "EPG:fvAEPg.name=%s to BridgeDomain:fvBD.name=%s\\n' % (epg_obj['name'], epg['BridgeDomain'])) if verbose: print('RELATE >> EPG:fvAEPg.name=%s to BridgeDomain:fvBD.name=%s\\n'", "% (bd['name'], bd['Context'])) if 'L3External' in bd: try: tenant_bd_objs[bd['name']].relate(tenant_l3e_objs[bd['L3External']]) except: try: tenant_bd_objs[bd['name']].relate(common.L3External(bd['L3External'])) except:", "epg in epg_list: if 'BridgeDomain' in epg: try: tenant_epg_objs[epg['name']].relate(tenant_bd_objs[epg['BridgeDomain']]) except: try: tenant_epg_objs[epg['name']].relate(common.BridgeDomain(epg['BridgeDomain'])) except:", "object_delete(obj): dn = obj['dn'] obj.delete() if verbose: print('DELETE >> %s.dn=%s\\n' % (obj.class_name, dn))", "len(tenant_bd_objs) == 0 and len(tenant_ap_objs) == 0: delete_tenants.append(tenant['name']) def object_delete(obj): dn = obj['dn']", "% (epg_obj['name'], cons)) if verbose: print('RELATE >> EPG:fvAEPg.name=%s to Consume:vzBrCP.name=%s\\n' % (epg_obj['name'], cons))", "path['Pod'] + '/' + path['Node'] + '/' + path['Intf'])) if delete_empty_tenant and len(tenant_ctx_objs)", "print('UPDATE >> Contract:vzBrCP.dn=%s\\n' % ctr_obj['dn']) ctr_objs[ctr_obj['dn']] = ctr_obj tenant_ctr_objs[ctr_obj['name']] = ctr_obj sj_list =", "def object_delete(obj): dn = obj['dn'] obj.delete() if verbose: print('DELETE >> %s.dn=%s\\n' % (obj.class_name,", "= {} tenant_ctx_objs = {} tenant_l3e_objs = {} tenant_bd_objs = {} tenant_fe_objs =", "object_delete(obj) elif isinstance(obj, aciFilterEntryModel): if obj['dn'] not in fe_objs: object_delete(obj) elif isinstance(obj, aciSubjectModel):", "isinstance(obj, aciSubnetModel): if obj['dn'] not in sn_objs: object_delete(obj) elif isinstance(obj, aciEPGModel): if obj['dn']", "dom_pwd)) common = dom.Tenant('common') tenant_objs = {} flt_objs = {} ctr_objs = {}", "epg_list: if 'BridgeDomain' in epg: try: tenant_epg_objs[epg['name']].relate(tenant_bd_objs[epg['BridgeDomain']]) except: try: tenant_epg_objs[epg['name']].relate(common.BridgeDomain(epg['BridgeDomain'])) except: if verbose:", "Controller(dom_ip, dom_user, dom_pwd, debug=debug) except: if verbose: print('Connection Failed : %s, %s, %s\\n'", "dom_pwd)) exit(1) if verbose: print('Get Controller : %s, %s, %s\\n' % (dom_ip, dom_user,", "list) else [] for ctr in ctr_list: ctr_obj = tenant_obj.Contract.create(**parse_desc_unit(ctr)) if verbose: print('UPDATE", "{} flt_objs = {} ctr_objs = {} ctx_objs = {} l3e_objs = {}", "in epg_list: if 'BridgeDomain' in epg: try: tenant_epg_objs[epg['name']].relate(tenant_bd_objs[epg['BridgeDomain']]) except: try: tenant_epg_objs[epg['name']].relate(common.BridgeDomain(epg['BridgeDomain'])) except: if", "epg_list = ap['EPG'] if 'EPG' in ap and isinstance(ap['EPG'], list) else [] for", "tenant_ctr_objs = {} tenant_ctx_objs = {} tenant_l3e_objs = {} tenant_bd_objs = {} tenant_fe_objs", "elif isinstance(obj, aciContextModel): if obj['dn'] not in ctx_objs: object_delete(obj) elif isinstance(obj, aciL3OutModel): if", "verbose: print('Get Controller : %s, %s, %s\\n' % (dom_ip, dom_user, dom_pwd)) common =", "if verbose: print('RELATE FAILED >> Subject:vzSubj.name=%s to Filter:vzFilter.name=%s\\n' % (sj['name'], flt)) if verbose:", "tenant and isinstance(tenant['Contract'], list) else [] for ctr in ctr_list: ctr_obj = tenant_obj.Contract.create(**parse_desc_unit(ctr))", "path['Intf'])) if delete_empty_tenant and len(tenant_ctx_objs) == 0 and len(tenant_bd_objs) == 0 and len(tenant_ap_objs)", "in ctr and isinstance(ctr['Subject'], list) else [] for sj in sj_list: if 'Filter'", "tenant_ap_objs[ap_obj['name']] = ap_obj epg_list = ap['EPG'] if 'EPG' in ap and isinstance(ap['EPG'], list)", "in epg: for cons in epg['Consume']: try: tenant_epg_objs[epg['name']].relate(tenant_ctr_objs[cons]) except: try: tenant_epg_objs[epg['name']].relate(common.Contract(cons)) except: if", "isinstance(child, aciContractModel): recursive_delete(child) elif isinstance(child, aciContextModel): recursive_delete(child) elif isinstance(child, aciL3OutModel): recursive_delete(child) elif isinstance(child,", "isinstance(child, aciSubjectModel): recursive_delete(child) elif isinstance(child, aciSubnetModel): recursive_delete(child) elif isinstance(child, aciAppProfileModel): recursive_delete(child) elif isinstance(child,", "tenant and isinstance(tenant['Filter'], list) else [] for flt in flt_list: flt_obj = tenant_obj.Filter.create(**parse_desc_unit(flt))", "BridgeDomain:fvBD.name=%s to L3External:l3extOut.name=%s\\n' % (bd['name'], bd['L3External'])) if verbose: print('RELATE >> BridgeDomain:fvBD.name=%s to L3External:l3extOut.name=%s\\n'", "flt_objs: object_delete(obj) elif isinstance(obj, aciContractModel): if obj['dn'] not in ctr_objs: object_delete(obj) elif isinstance(obj,", "path in epg['Path']: ep_obj = dom.Pod(path['Pod']).Paths(path['Node']).Path(path['Intf']) tenant_epg_objs[epg['name']].relate(ep_obj, **parse_desc_unit(path)) if verbose: print('RELATE >> EPG:fvAEPg.name=%s", "for tenant in tenant_list: try: tenant_obj = dom.Tenant(tenant['name']) except: continue recursive_delete(tenant_obj) if tenant['name']", "BridgeDomain:fvBD.name=%s to L3External:l3extOut.name=%s\\n' % (bd['name'], bd['L3External'])) for ap in ap_list: epg_list = ap['EPG']", "= {} tenant_sj_objs = {} tenant_sn_objs = {} tenant_ap_objs = {} tenant_epg_objs =", "= bd['Subnet'] if 'Subnet' in bd and isinstance(bd['Subnet'], list) else [] for sn", "in sj['Filter']: try: tenant_sj_objs[sj['name']].relate(tenant_flt_objs[flt]) except: try: tenant_sj_objs[sj['name']].relate(common.Filter(flt)) except: if verbose: print('RELATE FAILED >>", "in l3e_list: if 'Context' in l3e: try: tenant_l3e_objs[l3e['name']].relate(tenant_ctx_objs[l3e['Context']]) except: try: tenant_l3e_objs[l3e['name']].relate(common.Context(l3e['Context'])) except: if", "try: tenant_l3e_objs[l3e['name']].relate(tenant_ctx_objs[l3e['Context']]) except: try: tenant_l3e_objs[l3e['name']].relate(common.Context(l3e['Context'])) except: if verbose: print('RELATE FAILED>> L3External:l3extOut.name=%s to Context:fvCtx.name=%s\\n'", "== 0 and len(tenant_ap_objs) == 0: delete_tenants.append(tenant['name']) def object_delete(obj): dn = obj['dn'] obj.delete()", "sj_objs: object_delete(obj) elif isinstance(obj, aciBridgeDomainModel): if obj['dn'] not in bd_objs: object_delete(obj) elif isinstance(obj,", "sn_list: sn_obj = bd_obj.Subnet.create(**parse_desc_unit(sn)) if verbose: print('UPDATE >> Subnet:fvSubnet.dn=%s\\n' % sn_obj['dn']) sn_objs[sn_obj['dn']] =", "ret tenant_list = desc['Tenant'] if 'Tenant' in desc and isinstance(desc['Tenant'], list) else []", "key in unit: if re.search('^[a-z]\\w*', key): ret[key] = unit[key] return ret tenant_list =", "#======================================================================= # Relations #======================================================================= for ctr in ctr_list: sj_list = ctr['Subject'] if 'Subject'", "to Filter:vzFilter.name=%s\\n' % (sj['name'], flt)) if verbose: print('RELATE >> Subject:vzSubj.name=%s to Filter:vzFilter.name=%s\\n' %", "= tenant_obj if verbose: print('UPDATE >> fvTenant.dn=%s\\n' % tenant_obj['dn']) tenant_flt_objs = {} tenant_ctr_objs", "L3External:l3extOut.name=%s to Context:fvCtx.name=%s\\n' % (bd['name'], bd['Context'])) if verbose: print('RELATE >> L3External:l3extOut.name=%s to Context:fvCtx.name=%s\\n'", "(epg_obj['name'], epg['BridgeDomain'])) if verbose: print('RELATE >> EPG:fvAEPg.name=%s to BridgeDomain:fvBD.name=%s\\n' % (epg_obj['name'], epg['BridgeDomain'])) if", "ap_obj['dn']) ap_objs[ap_obj['dn']] = ap_obj tenant_ap_objs[ap_obj['name']] = ap_obj epg_list = ap['EPG'] if 'EPG' in", "elif isinstance(obj, aciBridgeDomainModel): if obj['dn'] not in bd_objs: object_delete(obj) elif isinstance(obj, aciAppProfileModel): if", "if verbose: print('RELATE FAILED>> L3External:l3extOut.name=%s to Context:fvCtx.name=%s\\n' % (bd['name'], bd['Context'])) if verbose: print('RELATE", "L3External:l3extOut.dn=%s\\n' % l3e_obj['dn']) l3e_objs[l3e_obj['dn']] = l3e_obj tenant_l3e_objs[l3e_obj['name']] = l3e_obj bd_list = tenant['BridgeDomain'] if", "fe_obj tenant_fe_objs[fe_obj['name']] = fe_obj ctr_list = tenant['Contract'] if 'Contract' in tenant and isinstance(tenant['Contract'],", "if verbose: print('RELATE >> EPG:fvAEPg.name=%s to BridgeDomain:fvBD.name=%s\\n' % (epg_obj['name'], epg['BridgeDomain'])) if 'Consume' in", "fe in fe_list: fe_obj = flt_obj.FilterEntry.create(**parse_desc_unit(fe)) if verbose: print('UPDATE >> FilterEntry:vzEntry.dn=%s\\n' % fe_obj['dn'])", "epg_obj #======================================================================= # Relations #======================================================================= for ctr in ctr_list: sj_list = ctr['Subject'] if", "ctr_list = tenant['Contract'] if 'Contract' in tenant and isinstance(tenant['Contract'], list) else [] for", "bd['Context'])) if verbose: print('RELATE >> L3External:l3extOut.name=%s to Context:fvCtx.name=%s\\n' % (bd['name'], bd['Context'])) for bd", "tenant_sj_objs[sj['name']].relate(common.Filter(flt)) except: if verbose: print('RELATE FAILED >> Subject:vzSubj.name=%s to Filter:vzFilter.name=%s\\n' % (sj['name'], flt))", "if obj['dn'] not in l3e_objs: object_delete(obj) elif isinstance(obj, aciFilterEntryModel): if obj['dn'] not in", "else [] for sj in sj_list: sj_obj = ctr_obj.Subject.create(**parse_desc_unit(sj)) if verbose: print('UPDATE >>", "isinstance(tenant['Filter'], list) else [] for flt in flt_list: flt_obj = tenant_obj.Filter.create(**parse_desc_unit(flt)) if verbose:", "(bd['name'], bd['Context'])) if verbose: print('RELATE >> BridgeDomain:fvBD.name=%s to Context:fvCtx.name=%s\\n' % (bd['name'], bd['Context'])) if", "'Context' : ctx_objs.keys(), 'L3External' : l3e_objs.keys(), 'BridgeDomain' : bd_objs.keys(), 'FilterEntry' : fe_objs.keys(), 'Subject'", "print('Get Controller : %s, %s, %s\\n' % (dom_ip, dom_user, dom_pwd)) common = dom.Tenant('common')", "dom.close() return {'Tenant' : tenant_objs.keys(), 'Filter' : flt_objs.keys(), 'Contract' : ctr_objs.keys(), 'Context' :", ">> Subject:vzSubj.dn=%s\\n' % sj_obj['dn']) sj_objs[sj_obj['dn']] = sj_obj tenant_sj_objs[sj_obj['name']] = sj_obj ctx_list = tenant['Context']", "ret[key] = unit[key] return ret tenant_list = desc['Tenant'] if 'Tenant' in desc and", "aciSubjectModel): if obj['dn'] not in sj_objs: object_delete(obj) elif isinstance(obj, aciBridgeDomainModel): if obj['dn'] not", "try: tenant_bd_objs[bd['name']].relate(tenant_ctx_objs[bd['Context']]) except: try: tenant_bd_objs[bd['name']].relate(common.Context(bd['Context'])) except: if verbose: print('RELATE FAILED>> BridgeDomain:fvBD.name=%s to Context:fvCtx.name=%s\\n'", "= desc['Option']['deployIncremental'] except: deploy_incremental = False try: dom = Controller(dom_ip, dom_user, dom_pwd, debug=debug)", "== 0 and len(tenant_bd_objs) == 0 and len(tenant_ap_objs) == 0: delete_tenants.append(tenant['name']) def object_delete(obj):", "flt['FilterEntry'] if 'FilterEntry' in flt and isinstance(flt['FilterEntry'], list) else [] for fe in", "+ '/' + path['Node'] + '/' + path['Intf'])) if delete_empty_tenant and len(tenant_ctx_objs) ==", "flt_obj['dn']) flt_objs[flt_obj['dn']] = flt_obj tenant_flt_objs[flt_obj['name']] = flt_obj fe_list = flt['FilterEntry'] if 'FilterEntry' in", "EPG:fvAEPg.name=%s to Consume:vzBrCP.name=%s\\n' % (epg_obj['name'], cons)) if verbose: print('RELATE >> EPG:fvAEPg.name=%s to Consume:vzBrCP.name=%s\\n'", "elif isinstance(child, aciFilterEntryModel): recursive_delete(child) elif isinstance(child, aciSubjectModel): recursive_delete(child) elif isinstance(child, aciSubnetModel): recursive_delete(child) elif", "= flt_obj.FilterEntry.create(**parse_desc_unit(fe)) if verbose: print('UPDATE >> FilterEntry:vzEntry.dn=%s\\n' % fe_obj['dn']) fe_objs[fe_obj['dn']] = fe_obj tenant_fe_objs[fe_obj['name']]", "if verbose: print('RELATE >> Subject:vzSubj.name=%s to Filter:vzFilter.name=%s\\n' % (sj['name'], flt)) for l3e in", ": bd_objs.keys(), 'FilterEntry' : fe_objs.keys(), 'Subject' : sj_objs.keys(), 'Subnet' : sn_objs.keys(), 'AppProfile' :", "tenant and isinstance(tenant['Context'], list) else [] for ctx in ctx_list: ctx_obj = tenant_obj.Context.create(**parse_desc_unit(ctx))", "% (obj.class_name, dn)) def recursive_delete(obj): children = obj.children() for child in children: if", "% (bd['name'], bd['Context'])) if verbose: print('RELATE >> L3External:l3extOut.name=%s to Context:fvCtx.name=%s\\n' % (bd['name'], bd['Context']))", "tenant_bd_objs[bd['name']].relate(tenant_ctx_objs[bd['Context']]) except: try: tenant_bd_objs[bd['name']].relate(common.Context(bd['Context'])) except: if verbose: print('RELATE FAILED>> BridgeDomain:fvBD.name=%s to Context:fvCtx.name=%s\\n' %", "(sj['name'], flt)) if verbose: print('RELATE >> Subject:vzSubj.name=%s to Filter:vzFilter.name=%s\\n' % (sj['name'], flt)) for", "(obj.class_name, dn)) def recursive_delete(obj): children = obj.children() for child in children: if isinstance(child,", "tenant in tenant_list: tenant_obj = dom.Tenant.create(**parse_desc_unit(tenant)) tenant_objs[tenant_obj['dn']] = tenant_obj if verbose: print('UPDATE >>", "ctr and isinstance(ctr['Subject'], list) else [] for sj in sj_list: if 'Filter' in", "and isinstance(tenant['Filter'], list) else [] for flt in flt_list: flt_obj = tenant_obj.Filter.create(**parse_desc_unit(flt)) if", "% (epg_obj['name'], epg['BridgeDomain'])) if verbose: print('RELATE >> EPG:fvAEPg.name=%s to BridgeDomain:fvBD.name=%s\\n' % (epg_obj['name'], epg['BridgeDomain']))", "recursive_delete(child) elif isinstance(child, aciContextModel): recursive_delete(child) elif isinstance(child, aciL3OutModel): recursive_delete(child) elif isinstance(child, aciBridgeDomainModel): recursive_delete(child)", "object_delete(obj) elif isinstance(obj, aciAppProfileModel): if obj['dn'] not in ap_objs: object_delete(obj) elif isinstance(obj, aciSubnetModel):", "if obj['dn'] not in ap_objs: object_delete(obj) elif isinstance(obj, aciSubnetModel): if obj['dn'] not in", "+ path['Node'] + '/' + path['Intf'])) if delete_empty_tenant and len(tenant_ctx_objs) == 0 and", "sj in sj_list: if 'Filter' in sj: for flt in sj['Filter']: try: tenant_sj_objs[sj['name']].relate(tenant_flt_objs[flt])", "{} tenant_ctx_objs = {} tenant_l3e_objs = {} tenant_bd_objs = {} tenant_fe_objs = {}", "'Subject' in ctr and isinstance(ctr['Subject'], list) else [] for sj in sj_list: sj_obj", "ap_objs = {} epg_objs = {} delete_tenants = [] def parse_desc_unit(unit): ret =", "Relations #======================================================================= for ctr in ctr_list: sj_list = ctr['Subject'] if 'Subject' in ctr", "(epg_obj['name'], prov)) if verbose: print('RELATE >> EPG:fvAEPg.name=%s to Provide:vzBrCP.name=%s\\n' % (epg_obj['name'], prov)) if", "= {} epg_objs = {} delete_tenants = [] def parse_desc_unit(unit): ret = {}", "Subnet:fvSubnet.dn=%s\\n' % sn_obj['dn']) sn_objs[sn_obj['dn']] = sn_obj tenant_sn_objs[sn_obj['name']] = sn_obj ap_list = tenant['AppProfile'] if", "ctr_obj['dn']) ctr_objs[ctr_obj['dn']] = ctr_obj tenant_ctr_objs[ctr_obj['name']] = ctr_obj sj_list = ctr['Subject'] if 'Subject' in", "% (bd['name'], bd['Context'])) for bd in bd_list: if 'Context' in bd: try: tenant_bd_objs[bd['name']].relate(tenant_ctx_objs[bd['Context']])", "cons)) if 'Provide' in epg: for prov in epg['Provide']: try: tenant_epg_objs[epg['name']].relate(tenant_ctr_objs[prov]) except: try:", "fe_obj['dn']) fe_objs[fe_obj['dn']] = fe_obj tenant_fe_objs[fe_obj['name']] = fe_obj ctr_list = tenant['Contract'] if 'Contract' in", "len(tenant_ap_objs) == 0: delete_tenants.append(tenant['name']) def object_delete(obj): dn = obj['dn'] obj.delete() if verbose: print('DELETE", "{} l3e_objs = {} bd_objs = {} fe_objs = {} sj_objs = {}", "elif isinstance(child, aciAppProfileModel): recursive_delete(child) elif isinstance(child, aciEPGModel): recursive_delete(child) if isinstance(obj, aciFilterModel): if obj['dn']", "ctr_objs.keys(), 'Context' : ctx_objs.keys(), 'L3External' : l3e_objs.keys(), 'BridgeDomain' : bd_objs.keys(), 'FilterEntry' : fe_objs.keys(),", "if not deploy_incremental: for tenant in tenant_list: try: tenant_obj = dom.Tenant(tenant['name']) except: continue", "try: tenant_bd_objs[bd['name']].relate(common.Context(bd['Context'])) except: if verbose: print('RELATE FAILED>> BridgeDomain:fvBD.name=%s to Context:fvCtx.name=%s\\n' % (bd['name'], bd['Context']))", "verbose: print('UPDATE >> L3External:l3extOut.dn=%s\\n' % l3e_obj['dn']) l3e_objs[l3e_obj['dn']] = l3e_obj tenant_l3e_objs[l3e_obj['name']] = l3e_obj bd_list", "% (dom_ip, dom_user, dom_pwd)) exit(1) if verbose: print('Get Controller : %s, %s, %s\\n'", "{} tenant_ap_objs = {} tenant_epg_objs = {} #======================================================================= # Create & Update #=======================================================================", "list) else [] for tenant in tenant_list: tenant_obj = dom.Tenant.create(**parse_desc_unit(tenant)) tenant_objs[tenant_obj['dn']] = tenant_obj", "{} fe_objs = {} sj_objs = {} sn_objs = {} ap_objs = {}", "{} for key in unit: if re.search('^[a-z]\\w*', key): ret[key] = unit[key] return ret", "if verbose: print('RELATE >> BridgeDomain:fvBD.name=%s to Context:fvCtx.name=%s\\n' % (bd['name'], bd['Context'])) if 'L3External' in", "for fe in fe_list: fe_obj = flt_obj.FilterEntry.create(**parse_desc_unit(fe)) if verbose: print('UPDATE >> FilterEntry:vzEntry.dn=%s\\n' %", "dom = Controller(dom_ip, dom_user, dom_pwd, debug=debug) except: if verbose: print('Connection Failed : %s,", "verbose: print('Connection Failed : %s, %s, %s\\n' % (dom_ip, dom_user, dom_pwd)) exit(1) if", "[] for ap in ap_list: ap_obj = tenant_obj.AppProfile.create(**parse_desc_unit(ap)) if verbose: print('UPDATE >> AppProfile:fvAp.dn=%s\\n'", "verbose: print('RELATE FAILED >> Subject:vzSubj.name=%s to Filter:vzFilter.name=%s\\n' % (sj['name'], flt)) if verbose: print('RELATE", "+ '/' + path['Intf'])) if delete_empty_tenant and len(tenant_ctx_objs) == 0 and len(tenant_bd_objs) ==", "0 and len(tenant_ap_objs) == 0: delete_tenants.append(tenant['name']) def object_delete(obj): dn = obj['dn'] obj.delete() if", "flt in sj['Filter']: try: tenant_sj_objs[sj['name']].relate(tenant_flt_objs[flt]) except: try: tenant_sj_objs[sj['name']].relate(common.Filter(flt)) except: if verbose: print('RELATE FAILED", "obj['dn'] not in epg_objs: object_delete(obj) if not deploy_incremental: for tenant in tenant_list: try:", "= {} sn_objs = {} ap_objs = {} epg_objs = {} delete_tenants =", ">> EPG:fvAEPg.name=%s to Consume:vzBrCP.name=%s\\n' % (epg_obj['name'], cons)) if 'Provide' in epg: for prov", "'/' + path['Node'] + '/' + path['Intf'])) if delete_empty_tenant and len(tenant_ctx_objs) == 0", "isinstance(ctr['Subject'], list) else [] for sj in sj_list: sj_obj = ctr_obj.Subject.create(**parse_desc_unit(sj)) if verbose:", ": ctr_objs.keys(), 'Context' : ctx_objs.keys(), 'L3External' : l3e_objs.keys(), 'BridgeDomain' : bd_objs.keys(), 'FilterEntry' :", "'Consume' in epg: for cons in epg['Consume']: try: tenant_epg_objs[epg['name']].relate(tenant_ctr_objs[cons]) except: try: tenant_epg_objs[epg['name']].relate(common.Contract(cons)) except:", "Contract:vzBrCP.dn=%s\\n' % ctr_obj['dn']) ctr_objs[ctr_obj['dn']] = ctr_obj tenant_ctr_objs[ctr_obj['name']] = ctr_obj sj_list = ctr['Subject'] if", "flt)) for l3e in l3e_list: if 'Context' in l3e: try: tenant_l3e_objs[l3e['name']].relate(tenant_ctx_objs[l3e['Context']]) except: try:", "except: if verbose: print('RELATE FAILED>> EPG:fvAEPg.name=%s to Consume:vzBrCP.name=%s\\n' % (epg_obj['name'], cons)) if verbose:", "= {} tenant_l3e_objs = {} tenant_bd_objs = {} tenant_fe_objs = {} tenant_sj_objs =", "else [] for fe in fe_list: fe_obj = flt_obj.FilterEntry.create(**parse_desc_unit(fe)) if verbose: print('UPDATE >>", "obj.delete() if verbose: print('DELETE >> %s.dn=%s\\n' % (obj.class_name, dn)) def recursive_delete(obj): children =", "to Context:fvCtx.name=%s\\n' % (bd['name'], bd['Context'])) if verbose: print('RELATE >> L3External:l3extOut.name=%s to Context:fvCtx.name=%s\\n' %", "aciFilterEntryModel): recursive_delete(child) elif isinstance(child, aciSubjectModel): recursive_delete(child) elif isinstance(child, aciSubnetModel): recursive_delete(child) elif isinstance(child, aciAppProfileModel):", "{'Tenant' : tenant_objs.keys(), 'Filter' : flt_objs.keys(), 'Contract' : ctr_objs.keys(), 'Context' : ctx_objs.keys(), 'L3External'", "BridgeDomain:fvBD.dn=%s\\n' % bd_obj['dn']) bd_objs[bd_obj['dn']] = bd_obj tenant_bd_objs[bd_obj['name']] = bd_obj sn_list = bd['Subnet'] if", "= desc['Controller']['user'] except: exit(1) try: dom_pwd = desc['Controller']['pwd'] except: exit(1) try: delete_empty_tenant =", "tenant_l3e_objs[l3e['name']].relate(common.Context(l3e['Context'])) except: if verbose: print('RELATE FAILED>> L3External:l3extOut.name=%s to Context:fvCtx.name=%s\\n' % (bd['name'], bd['Context'])) if", "epg['Consume']: try: tenant_epg_objs[epg['name']].relate(tenant_ctr_objs[cons]) except: try: tenant_epg_objs[epg['name']].relate(common.Contract(cons)) except: if verbose: print('RELATE FAILED>> EPG:fvAEPg.name=%s to", "verbose: print('UPDATE >> BridgeDomain:fvBD.dn=%s\\n' % bd_obj['dn']) bd_objs[bd_obj['dn']] = bd_obj tenant_bd_objs[bd_obj['name']] = bd_obj sn_list", "in epg_objs: object_delete(obj) if not deploy_incremental: for tenant in tenant_list: try: tenant_obj =", "in fe_list: fe_obj = flt_obj.FilterEntry.create(**parse_desc_unit(fe)) if verbose: print('UPDATE >> FilterEntry:vzEntry.dn=%s\\n' % fe_obj['dn']) fe_objs[fe_obj['dn']]", "= flt['FilterEntry'] if 'FilterEntry' in flt and isinstance(flt['FilterEntry'], list) else [] for fe", "isinstance(tenant['Context'], list) else [] for ctx in ctx_list: ctx_obj = tenant_obj.Context.create(**parse_desc_unit(ctx)) if verbose:", "try: tenant_bd_objs[bd['name']].relate(common.L3External(bd['L3External'])) except: if verbose: print('RELATE FAILED>> BridgeDomain:fvBD.name=%s to L3External:l3extOut.name=%s\\n' % (bd['name'], bd['L3External']))", "path['Node'] + '/' + path['Intf'])) if delete_empty_tenant and len(tenant_ctx_objs) == 0 and len(tenant_bd_objs)", "{} #======================================================================= # Create & Update #======================================================================= flt_list = tenant['Filter'] if 'Filter' in", "verbose: print('UPDATE >> Subject:vzSubj.dn=%s\\n' % sj_obj['dn']) sj_objs[sj_obj['dn']] = sj_obj tenant_sj_objs[sj_obj['name']] = sj_obj ctx_list", "tenant['Context'] if 'Context' in tenant and isinstance(tenant['Context'], list) else [] for ctx in", "(epg_obj['name'], epg['BridgeDomain'])) if 'Consume' in epg: for cons in epg['Consume']: try: tenant_epg_objs[epg['name']].relate(tenant_ctr_objs[cons]) except:", "else [] for flt in flt_list: flt_obj = tenant_obj.Filter.create(**parse_desc_unit(flt)) if verbose: print('UPDATE >>", "obj['dn'] not in ctx_objs: object_delete(obj) elif isinstance(obj, aciL3OutModel): if obj['dn'] not in l3e_objs:", "10. 26. @author: \"comfact\" ''' import re from .model import * def deployACI(desc,", "bd_obj.Subnet.create(**parse_desc_unit(sn)) if verbose: print('UPDATE >> Subnet:fvSubnet.dn=%s\\n' % sn_obj['dn']) sn_objs[sn_obj['dn']] = sn_obj tenant_sn_objs[sn_obj['name']] =", "tenant in tenant_list: try: tenant_obj = dom.Tenant(tenant['name']) except: continue recursive_delete(tenant_obj) if tenant['name'] in", "ret = {} for key in unit: if re.search('^[a-z]\\w*', key): ret[key] = unit[key]", "except: if verbose: print('Connection Failed : %s, %s, %s\\n' % (dom_ip, dom_user, dom_pwd))", "% (bd['name'], bd['L3External'])) for ap in ap_list: epg_list = ap['EPG'] if 'EPG' in", "BridgeDomain:fvBD.name=%s\\n' % (epg_obj['name'], epg['BridgeDomain'])) if verbose: print('RELATE >> EPG:fvAEPg.name=%s to BridgeDomain:fvBD.name=%s\\n' % (epg_obj['name'],", "bd['Context'])) if verbose: print('RELATE >> BridgeDomain:fvBD.name=%s to Context:fvCtx.name=%s\\n' % (bd['name'], bd['Context'])) if 'L3External'", "and isinstance(ctr['Subject'], list) else [] for sj in sj_list: if 'Filter' in sj:", "for bd in bd_list: if 'Context' in bd: try: tenant_bd_objs[bd['name']].relate(tenant_ctx_objs[bd['Context']]) except: try: tenant_bd_objs[bd['name']].relate(common.Context(bd['Context']))", "= flt_obj tenant_flt_objs[flt_obj['name']] = flt_obj fe_list = flt['FilterEntry'] if 'FilterEntry' in flt and", "flt_obj.FilterEntry.create(**parse_desc_unit(fe)) if verbose: print('UPDATE >> FilterEntry:vzEntry.dn=%s\\n' % fe_obj['dn']) fe_objs[fe_obj['dn']] = fe_obj tenant_fe_objs[fe_obj['name']] =", "sj['Filter']: try: tenant_sj_objs[sj['name']].relate(tenant_flt_objs[flt]) except: try: tenant_sj_objs[sj['name']].relate(common.Filter(flt)) except: if verbose: print('RELATE FAILED >> Subject:vzSubj.name=%s", "if 'Filter' in sj: for flt in sj['Filter']: try: tenant_sj_objs[sj['name']].relate(tenant_flt_objs[flt]) except: try: tenant_sj_objs[sj['name']].relate(common.Filter(flt))", "{} epg_objs = {} delete_tenants = [] def parse_desc_unit(unit): ret = {} for", "Update #======================================================================= flt_list = tenant['Filter'] if 'Filter' in tenant and isinstance(tenant['Filter'], list) else", "except: try: tenant_epg_objs[epg['name']].relate(common.BridgeDomain(epg['BridgeDomain'])) except: if verbose: print('RELATE FAILED>> EPG:fvAEPg.name=%s to BridgeDomain:fvBD.name=%s\\n' % (epg_obj['name'],", "sj_objs = {} sn_objs = {} ap_objs = {} epg_objs = {} delete_tenants", "sj_list = ctr['Subject'] if 'Subject' in ctr and isinstance(ctr['Subject'], list) else [] for", "FAILED>> EPG:fvAEPg.name=%s to Consume:vzBrCP.name=%s\\n' % (epg_obj['name'], cons)) if verbose: print('RELATE >> EPG:fvAEPg.name=%s to", "= Controller(dom_ip, dom_user, dom_pwd, debug=debug) except: if verbose: print('Connection Failed : %s, %s,", "{} sn_objs = {} ap_objs = {} epg_objs = {} delete_tenants = []", "in epg['Provide']: try: tenant_epg_objs[epg['name']].relate(tenant_ctr_objs[prov]) except: try: tenant_epg_objs[epg['name']].relate(common.Contract(prov)) except: if verbose: print('RELATE FAILED>> EPG:fvAEPg.name=%s", "in flt_list: flt_obj = tenant_obj.Filter.create(**parse_desc_unit(flt)) if verbose: print('UPDATE >> Filter:vzFilter.dn=%s\\n' % flt_obj['dn']) flt_objs[flt_obj['dn']]", "if 'BridgeDomain' in tenant and isinstance(tenant['BridgeDomain'], list) else [] for bd in bd_list:", "(epg_obj['name'], path['Pod'] + '/' + path['Node'] + '/' + path['Intf'])) if delete_empty_tenant and", "ap in ap_list: epg_list = ap['EPG'] if 'EPG' in ap and isinstance(ap['EPG'], list)", "in epg: try: tenant_epg_objs[epg['name']].relate(tenant_bd_objs[epg['BridgeDomain']]) except: try: tenant_epg_objs[epg['name']].relate(common.BridgeDomain(epg['BridgeDomain'])) except: if verbose: print('RELATE FAILED>> EPG:fvAEPg.name=%s", "except: if verbose: print('RELATE FAILED >> Subject:vzSubj.name=%s to Filter:vzFilter.name=%s\\n' % (sj['name'], flt)) if", "recursive_delete(child) if isinstance(obj, aciFilterModel): if obj['dn'] not in flt_objs: object_delete(obj) elif isinstance(obj, aciContractModel):", "epg_objs[epg_obj['dn']] = epg_obj tenant_epg_objs[epg_obj['name']] = epg_obj #======================================================================= # Relations #======================================================================= for ctr in", "(bd['name'], bd['Context'])) if 'L3External' in bd: try: tenant_bd_objs[bd['name']].relate(tenant_l3e_objs[bd['L3External']]) except: try: tenant_bd_objs[bd['name']].relate(common.L3External(bd['L3External'])) except: if", "ctr_objs: object_delete(obj) elif isinstance(obj, aciContextModel): if obj['dn'] not in ctx_objs: object_delete(obj) elif isinstance(obj,", "verbose: print('RELATE FAILED>> BridgeDomain:fvBD.name=%s to L3External:l3extOut.name=%s\\n' % (bd['name'], bd['L3External'])) if verbose: print('RELATE >>", "in ap and isinstance(ap['EPG'], list) else [] for epg in epg_list: epg_obj =", "tenant_sn_objs[sn_obj['name']] = sn_obj ap_list = tenant['AppProfile'] if 'AppProfile' in tenant and isinstance(tenant['AppProfile'], list)", "list) else [] for epg in epg_list: if 'BridgeDomain' in epg: try: tenant_epg_objs[epg['name']].relate(tenant_bd_objs[epg['BridgeDomain']])", "prov)) if verbose: print('RELATE >> EPG:fvAEPg.name=%s to Provide:vzBrCP.name=%s\\n' % (epg_obj['name'], prov)) if 'Path'", "= {} bd_objs = {} fe_objs = {} sj_objs = {} sn_objs =", "and len(tenant_ctx_objs) == 0 and len(tenant_bd_objs) == 0 and len(tenant_ap_objs) == 0: delete_tenants.append(tenant['name'])", "= obj['dn'] obj.delete() if verbose: print('DELETE >> %s.dn=%s\\n' % (obj.class_name, dn)) def recursive_delete(obj):", "tenant_obj.Contract.create(**parse_desc_unit(ctr)) if verbose: print('UPDATE >> Contract:vzBrCP.dn=%s\\n' % ctr_obj['dn']) ctr_objs[ctr_obj['dn']] = ctr_obj tenant_ctr_objs[ctr_obj['name']] =", "%s, %s, %s\\n' % (dom_ip, dom_user, dom_pwd)) common = dom.Tenant('common') tenant_objs = {}", "L3External:l3extOut.name=%s to Context:fvCtx.name=%s\\n' % (bd['name'], bd['Context'])) for bd in bd_list: if 'Context' in", "if verbose: print('RELATE >> BridgeDomain:fvBD.name=%s to L3External:l3extOut.name=%s\\n' % (bd['name'], bd['L3External'])) for ap in", "[] for bd in bd_list: bd_obj = tenant_obj.BridgeDomain.create(**parse_desc_unit(bd)) if verbose: print('UPDATE >> BridgeDomain:fvBD.dn=%s\\n'", "bd_list = tenant['BridgeDomain'] if 'BridgeDomain' in tenant and isinstance(tenant['BridgeDomain'], list) else [] for", "not in epg_objs: object_delete(obj) if not deploy_incremental: for tenant in tenant_list: try: tenant_obj", "ctr_obj.Subject.create(**parse_desc_unit(sj)) if verbose: print('UPDATE >> Subject:vzSubj.dn=%s\\n' % sj_obj['dn']) sj_objs[sj_obj['dn']] = sj_obj tenant_sj_objs[sj_obj['name']] =", "re from .model import * def deployACI(desc, verbose=False, debug=False): try: dom_ip = desc['Controller']['ip']", "= {} ctx_objs = {} l3e_objs = {} bd_objs = {} fe_objs =", "{} bd_objs = {} fe_objs = {} sj_objs = {} sn_objs = {}", "for child in children: if isinstance(child, aciFilterModel): recursive_delete(child) elif isinstance(child, aciContractModel): recursive_delete(child) elif", "print('RELATE >> L3External:l3extOut.name=%s to Context:fvCtx.name=%s\\n' % (bd['name'], bd['Context'])) for bd in bd_list: if", "in ctr_objs: object_delete(obj) elif isinstance(obj, aciContextModel): if obj['dn'] not in ctx_objs: object_delete(obj) elif", "desc and isinstance(desc['Tenant'], list) else [] for tenant in tenant_list: tenant_obj = dom.Tenant.create(**parse_desc_unit(tenant))", "if re.search('^[a-z]\\w*', key): ret[key] = unit[key] return ret tenant_list = desc['Tenant'] if 'Tenant'", "in bd and isinstance(bd['Subnet'], list) else [] for sn in sn_list: sn_obj =", "bd['Context'])) if 'L3External' in bd: try: tenant_bd_objs[bd['name']].relate(tenant_l3e_objs[bd['L3External']]) except: try: tenant_bd_objs[bd['name']].relate(common.L3External(bd['L3External'])) except: if verbose:", "if verbose: print('UPDATE >> L3External:l3extOut.dn=%s\\n' % l3e_obj['dn']) l3e_objs[l3e_obj['dn']] = l3e_obj tenant_l3e_objs[l3e_obj['name']] = l3e_obj", "[] for sj in sj_list: sj_obj = ctr_obj.Subject.create(**parse_desc_unit(sj)) if verbose: print('UPDATE >> Subject:vzSubj.dn=%s\\n'", "flt_list: flt_obj = tenant_obj.Filter.create(**parse_desc_unit(flt)) if verbose: print('UPDATE >> Filter:vzFilter.dn=%s\\n' % flt_obj['dn']) flt_objs[flt_obj['dn']] =", "tenant_epg_objs[epg['name']].relate(common.Contract(prov)) except: if verbose: print('RELATE FAILED>> EPG:fvAEPg.name=%s to Provide:vzBrCP.name=%s\\n' % (epg_obj['name'], prov)) if", "'EPG' in ap and isinstance(ap['EPG'], list) else [] for epg in epg_list: epg_obj", "try: tenant_epg_objs[epg['name']].relate(common.BridgeDomain(epg['BridgeDomain'])) except: if verbose: print('RELATE FAILED>> EPG:fvAEPg.name=%s to BridgeDomain:fvBD.name=%s\\n' % (epg_obj['name'], epg['BridgeDomain']))", "epg['Provide']: try: tenant_epg_objs[epg['name']].relate(tenant_ctr_objs[prov]) except: try: tenant_epg_objs[epg['name']].relate(common.Contract(prov)) except: if verbose: print('RELATE FAILED>> EPG:fvAEPg.name=%s to", "Context:fvCtx.dn=%s\\n' % ctx_obj['dn']) ctx_objs[ctx_obj['dn']] = ctx_obj tenant_ctx_objs[ctx_obj['name']] = ctx_obj l3e_list = tenant['L3External'] if", "Subject:vzSubj.dn=%s\\n' % sj_obj['dn']) sj_objs[sj_obj['dn']] = sj_obj tenant_sj_objs[sj_obj['name']] = sj_obj ctx_list = tenant['Context'] if", "if verbose: print('RELATE FAILED>> BridgeDomain:fvBD.name=%s to Context:fvCtx.name=%s\\n' % (bd['name'], bd['Context'])) if verbose: print('RELATE", "obj.children() for child in children: if isinstance(child, aciFilterModel): recursive_delete(child) elif isinstance(child, aciContractModel): recursive_delete(child)", "tenant_epg_objs[epg['name']].relate(common.BridgeDomain(epg['BridgeDomain'])) except: if verbose: print('RELATE FAILED>> EPG:fvAEPg.name=%s to BridgeDomain:fvBD.name=%s\\n' % (epg_obj['name'], epg['BridgeDomain'])) if", "Provide:vzBrCP.name=%s\\n' % (epg_obj['name'], prov)) if verbose: print('RELATE >> EPG:fvAEPg.name=%s to Provide:vzBrCP.name=%s\\n' % (epg_obj['name'],", "to L3External:l3extOut.name=%s\\n' % (bd['name'], bd['L3External'])) if verbose: print('RELATE >> BridgeDomain:fvBD.name=%s to L3External:l3extOut.name=%s\\n' %", "= {} tenant_fe_objs = {} tenant_sj_objs = {} tenant_sn_objs = {} tenant_ap_objs =", "obj['dn'] not in bd_objs: object_delete(obj) elif isinstance(obj, aciAppProfileModel): if obj['dn'] not in ap_objs:", "= tenant['Contract'] if 'Contract' in tenant and isinstance(tenant['Contract'], list) else [] for ctr", "bd['L3External'])) for ap in ap_list: epg_list = ap['EPG'] if 'EPG' in ap and", "Path:PathEp.name=%s\\n' % (epg_obj['name'], path['Pod'] + '/' + path['Node'] + '/' + path['Intf'])) if", ": flt_objs.keys(), 'Contract' : ctr_objs.keys(), 'Context' : ctx_objs.keys(), 'L3External' : l3e_objs.keys(), 'BridgeDomain' :", "in tenant_list: tenant_obj = dom.Tenant.create(**parse_desc_unit(tenant)) tenant_objs[tenant_obj['dn']] = tenant_obj if verbose: print('UPDATE >> fvTenant.dn=%s\\n'", "if verbose: print('UPDATE >> AppProfile:fvAp.dn=%s\\n' % ap_obj['dn']) ap_objs[ap_obj['dn']] = ap_obj tenant_ap_objs[ap_obj['name']] = ap_obj", "tenant_sn_objs = {} tenant_ap_objs = {} tenant_epg_objs = {} #======================================================================= # Create &", "recursive_delete(child) elif isinstance(child, aciSubnetModel): recursive_delete(child) elif isinstance(child, aciAppProfileModel): recursive_delete(child) elif isinstance(child, aciEPGModel): recursive_delete(child)", "# Relations #======================================================================= for ctr in ctr_list: sj_list = ctr['Subject'] if 'Subject' in", "= sj_obj tenant_sj_objs[sj_obj['name']] = sj_obj ctx_list = tenant['Context'] if 'Context' in tenant and", "print('UPDATE >> Context:fvCtx.dn=%s\\n' % ctx_obj['dn']) ctx_objs[ctx_obj['dn']] = ctx_obj tenant_ctx_objs[ctx_obj['name']] = ctx_obj l3e_list =", ">> BridgeDomain:fvBD.name=%s to Context:fvCtx.name=%s\\n' % (bd['name'], bd['Context'])) if 'L3External' in bd: try: tenant_bd_objs[bd['name']].relate(tenant_l3e_objs[bd['L3External']])", "isinstance(obj, aciContextModel): if obj['dn'] not in ctx_objs: object_delete(obj) elif isinstance(obj, aciL3OutModel): if obj['dn']", "''' import re from .model import * def deployACI(desc, verbose=False, debug=False): try: dom_ip", "except: try: tenant_epg_objs[epg['name']].relate(common.Contract(prov)) except: if verbose: print('RELATE FAILED>> EPG:fvAEPg.name=%s to Provide:vzBrCP.name=%s\\n' % (epg_obj['name'],", "child in children: if isinstance(child, aciFilterModel): recursive_delete(child) elif isinstance(child, aciContractModel): recursive_delete(child) elif isinstance(child,", "Context:fvCtx.name=%s\\n' % (bd['name'], bd['Context'])) if verbose: print('RELATE >> BridgeDomain:fvBD.name=%s to Context:fvCtx.name=%s\\n' % (bd['name'],", "tenant_obj.Filter.create(**parse_desc_unit(flt)) if verbose: print('UPDATE >> Filter:vzFilter.dn=%s\\n' % flt_obj['dn']) flt_objs[flt_obj['dn']] = flt_obj tenant_flt_objs[flt_obj['name']] =", "{} tenant_ctr_objs = {} tenant_ctx_objs = {} tenant_l3e_objs = {} tenant_bd_objs = {}", "tenant_ap_objs = {} tenant_epg_objs = {} #======================================================================= # Create & Update #======================================================================= flt_list", "desc['Option']['deleteEmptyTenant'] except: delete_empty_tenant = False try: deploy_incremental = desc['Option']['deployIncremental'] except: deploy_incremental = False", "FAILED>> L3External:l3extOut.name=%s to Context:fvCtx.name=%s\\n' % (bd['name'], bd['Context'])) if verbose: print('RELATE >> L3External:l3extOut.name=%s to", "flt in flt_list: flt_obj = tenant_obj.Filter.create(**parse_desc_unit(flt)) if verbose: print('UPDATE >> Filter:vzFilter.dn=%s\\n' % flt_obj['dn'])", "elif isinstance(child, aciSubnetModel): recursive_delete(child) elif isinstance(child, aciAppProfileModel): recursive_delete(child) elif isinstance(child, aciEPGModel): recursive_delete(child) if", "if 'Context' in tenant and isinstance(tenant['Context'], list) else [] for ctx in ctx_list:", "tenant_epg_objs[epg['name']].relate(tenant_ctr_objs[cons]) except: try: tenant_epg_objs[epg['name']].relate(common.Contract(cons)) except: if verbose: print('RELATE FAILED>> EPG:fvAEPg.name=%s to Consume:vzBrCP.name=%s\\n' %", "'/' + path['Intf'])) if delete_empty_tenant and len(tenant_ctx_objs) == 0 and len(tenant_bd_objs) == 0", "= l3e_obj bd_list = tenant['BridgeDomain'] if 'BridgeDomain' in tenant and isinstance(tenant['BridgeDomain'], list) else", "= desc['Controller']['pwd'] except: exit(1) try: delete_empty_tenant = desc['Option']['deleteEmptyTenant'] except: delete_empty_tenant = False try:", "l3e: try: tenant_l3e_objs[l3e['name']].relate(tenant_ctx_objs[l3e['Context']]) except: try: tenant_l3e_objs[l3e['name']].relate(common.Context(l3e['Context'])) except: if verbose: print('RELATE FAILED>> L3External:l3extOut.name=%s to", "print('UPDATE >> Filter:vzFilter.dn=%s\\n' % flt_obj['dn']) flt_objs[flt_obj['dn']] = flt_obj tenant_flt_objs[flt_obj['name']] = flt_obj fe_list =", "isinstance(child, aciAppProfileModel): recursive_delete(child) elif isinstance(child, aciEPGModel): recursive_delete(child) if isinstance(obj, aciFilterModel): if obj['dn'] not", "verbose: print('UPDATE >> Contract:vzBrCP.dn=%s\\n' % ctr_obj['dn']) ctr_objs[ctr_obj['dn']] = ctr_obj tenant_ctr_objs[ctr_obj['name']] = ctr_obj sj_list", "% (bd['name'], bd['Context'])) if verbose: print('RELATE >> BridgeDomain:fvBD.name=%s to Context:fvCtx.name=%s\\n' % (bd['name'], bd['Context']))", "deploy_incremental = desc['Option']['deployIncremental'] except: deploy_incremental = False try: dom = Controller(dom_ip, dom_user, dom_pwd,", "and isinstance(tenant['L3External'], list) else [] for l3e in l3e_list: l3e_obj = tenant_obj.L3Out.create(**parse_desc_unit(l3e)) if", "ap_obj = tenant_obj.AppProfile.create(**parse_desc_unit(ap)) if verbose: print('UPDATE >> AppProfile:fvAp.dn=%s\\n' % ap_obj['dn']) ap_objs[ap_obj['dn']] = ap_obj", "tenant_objs.keys(), 'Filter' : flt_objs.keys(), 'Contract' : ctr_objs.keys(), 'Context' : ctx_objs.keys(), 'L3External' : l3e_objs.keys(),", "print('RELATE >> EPG:fvAEPg.name=%s to Path:PathEp.name=%s\\n' % (epg_obj['name'], path['Pod'] + '/' + path['Node'] +", "Consume:vzBrCP.name=%s\\n' % (epg_obj['name'], cons)) if 'Provide' in epg: for prov in epg['Provide']: try:", "{} tenant_l3e_objs = {} tenant_bd_objs = {} tenant_fe_objs = {} tenant_sj_objs = {}", "dom.Tenant(tenant['name']) except: continue recursive_delete(tenant_obj) if tenant['name'] in delete_tenants: object_delete(tenant_obj) dom.close() return {'Tenant' :", "EPG:fvAEPg.name=%s to Provide:vzBrCP.name=%s\\n' % (epg_obj['name'], prov)) if 'Path' in epg: for path in", "not in flt_objs: object_delete(obj) elif isinstance(obj, aciContractModel): if obj['dn'] not in ctr_objs: object_delete(obj)", "recursive_delete(child) elif isinstance(child, aciContractModel): recursive_delete(child) elif isinstance(child, aciContextModel): recursive_delete(child) elif isinstance(child, aciL3OutModel): recursive_delete(child)", "to BridgeDomain:fvBD.name=%s\\n' % (epg_obj['name'], epg['BridgeDomain'])) if 'Consume' in epg: for cons in epg['Consume']:", "dom_user = desc['Controller']['user'] except: exit(1) try: dom_pwd = desc['Controller']['pwd'] except: exit(1) try: delete_empty_tenant", ">> AppProfile:fvAp.dn=%s\\n' % ap_obj['dn']) ap_objs[ap_obj['dn']] = ap_obj tenant_ap_objs[ap_obj['name']] = ap_obj epg_list = ap['EPG']", "%s, %s, %s\\n' % (dom_ip, dom_user, dom_pwd)) exit(1) if verbose: print('Get Controller :", "= sn_obj tenant_sn_objs[sn_obj['name']] = sn_obj ap_list = tenant['AppProfile'] if 'AppProfile' in tenant and", "except: deploy_incremental = False try: dom = Controller(dom_ip, dom_user, dom_pwd, debug=debug) except: if", "= {} sj_objs = {} sn_objs = {} ap_objs = {} epg_objs =", "0: delete_tenants.append(tenant['name']) def object_delete(obj): dn = obj['dn'] obj.delete() if verbose: print('DELETE >> %s.dn=%s\\n'", "bd_list: if 'Context' in bd: try: tenant_bd_objs[bd['name']].relate(tenant_ctx_objs[bd['Context']]) except: try: tenant_bd_objs[bd['name']].relate(common.Context(bd['Context'])) except: if verbose:", "if verbose: print('RELATE >> EPG:fvAEPg.name=%s to Consume:vzBrCP.name=%s\\n' % (epg_obj['name'], cons)) if 'Provide' in", "'Context' in bd: try: tenant_bd_objs[bd['name']].relate(tenant_ctx_objs[bd['Context']]) except: try: tenant_bd_objs[bd['name']].relate(common.Context(bd['Context'])) except: if verbose: print('RELATE FAILED>>", ">> EPG:fvAEPg.name=%s to BridgeDomain:fvBD.name=%s\\n' % (epg_obj['name'], epg['BridgeDomain'])) if 'Consume' in epg: for cons", "print('RELATE FAILED>> EPG:fvAEPg.name=%s to Provide:vzBrCP.name=%s\\n' % (epg_obj['name'], prov)) if verbose: print('RELATE >> EPG:fvAEPg.name=%s", "ap_obj.EPG.create(**parse_desc_unit(epg)) if verbose: print('UPDATE >> EPG:fvAEPg.dn=%s\\n' % epg_obj['dn']) epg_objs[epg_obj['dn']] = epg_obj tenant_epg_objs[epg_obj['name']] =", "not in fe_objs: object_delete(obj) elif isinstance(obj, aciSubjectModel): if obj['dn'] not in sj_objs: object_delete(obj)", "flt and isinstance(flt['FilterEntry'], list) else [] for fe in fe_list: fe_obj = flt_obj.FilterEntry.create(**parse_desc_unit(fe))", "isinstance(obj, aciFilterModel): if obj['dn'] not in flt_objs: object_delete(obj) elif isinstance(obj, aciContractModel): if obj['dn']", "tenant_obj['dn']) tenant_flt_objs = {} tenant_ctr_objs = {} tenant_ctx_objs = {} tenant_l3e_objs = {}", "except: try: tenant_epg_objs[epg['name']].relate(common.Contract(cons)) except: if verbose: print('RELATE FAILED>> EPG:fvAEPg.name=%s to Consume:vzBrCP.name=%s\\n' % (epg_obj['name'],", "sj_objs[sj_obj['dn']] = sj_obj tenant_sj_objs[sj_obj['name']] = sj_obj ctx_list = tenant['Context'] if 'Context' in tenant", "= {} ctr_objs = {} ctx_objs = {} l3e_objs = {} bd_objs =", "print('RELATE FAILED>> L3External:l3extOut.name=%s to Context:fvCtx.name=%s\\n' % (bd['name'], bd['Context'])) if verbose: print('RELATE >> L3External:l3extOut.name=%s", "in delete_tenants: object_delete(tenant_obj) dom.close() return {'Tenant' : tenant_objs.keys(), 'Filter' : flt_objs.keys(), 'Contract' :", "sj_obj = ctr_obj.Subject.create(**parse_desc_unit(sj)) if verbose: print('UPDATE >> Subject:vzSubj.dn=%s\\n' % sj_obj['dn']) sj_objs[sj_obj['dn']] = sj_obj", "ctr_obj = tenant_obj.Contract.create(**parse_desc_unit(ctr)) if verbose: print('UPDATE >> Contract:vzBrCP.dn=%s\\n' % ctr_obj['dn']) ctr_objs[ctr_obj['dn']] = ctr_obj", "ctr_obj tenant_ctr_objs[ctr_obj['name']] = ctr_obj sj_list = ctr['Subject'] if 'Subject' in ctr and isinstance(ctr['Subject'],", "tenant_obj = dom.Tenant.create(**parse_desc_unit(tenant)) tenant_objs[tenant_obj['dn']] = tenant_obj if verbose: print('UPDATE >> fvTenant.dn=%s\\n' % tenant_obj['dn'])", ">> BridgeDomain:fvBD.dn=%s\\n' % bd_obj['dn']) bd_objs[bd_obj['dn']] = bd_obj tenant_bd_objs[bd_obj['name']] = bd_obj sn_list = bd['Subnet']", "%s\\n' % (dom_ip, dom_user, dom_pwd)) exit(1) if verbose: print('Get Controller : %s, %s,", "in sj_objs: object_delete(obj) elif isinstance(obj, aciBridgeDomainModel): if obj['dn'] not in bd_objs: object_delete(obj) elif", "tenant_obj = dom.Tenant(tenant['name']) except: continue recursive_delete(tenant_obj) if tenant['name'] in delete_tenants: object_delete(tenant_obj) dom.close() return", "else [] for l3e in l3e_list: l3e_obj = tenant_obj.L3Out.create(**parse_desc_unit(l3e)) if verbose: print('UPDATE >>", ">> Subnet:fvSubnet.dn=%s\\n' % sn_obj['dn']) sn_objs[sn_obj['dn']] = sn_obj tenant_sn_objs[sn_obj['name']] = sn_obj ap_list = tenant['AppProfile']", "Filter:vzFilter.name=%s\\n' % (sj['name'], flt)) for l3e in l3e_list: if 'Context' in l3e: try:", "in bd_list: if 'Context' in bd: try: tenant_bd_objs[bd['name']].relate(tenant_ctx_objs[bd['Context']]) except: try: tenant_bd_objs[bd['name']].relate(common.Context(bd['Context'])) except: if", "tenant_sj_objs[sj['name']].relate(tenant_flt_objs[flt]) except: try: tenant_sj_objs[sj['name']].relate(common.Filter(flt)) except: if verbose: print('RELATE FAILED >> Subject:vzSubj.name=%s to Filter:vzFilter.name=%s\\n'", "to Context:fvCtx.name=%s\\n' % (bd['name'], bd['Context'])) if verbose: print('RELATE >> BridgeDomain:fvBD.name=%s to Context:fvCtx.name=%s\\n' %", "{} ctx_objs = {} l3e_objs = {} bd_objs = {} fe_objs = {}", "tenant_epg_objs[epg['name']].relate(ep_obj, **parse_desc_unit(path)) if verbose: print('RELATE >> EPG:fvAEPg.name=%s to Path:PathEp.name=%s\\n' % (epg_obj['name'], path['Pod'] +", "% bd_obj['dn']) bd_objs[bd_obj['dn']] = bd_obj tenant_bd_objs[bd_obj['name']] = bd_obj sn_list = bd['Subnet'] if 'Subnet'", "for ap in ap_list: ap_obj = tenant_obj.AppProfile.create(**parse_desc_unit(ap)) if verbose: print('UPDATE >> AppProfile:fvAp.dn=%s\\n' %", "L3External:l3extOut.name=%s\\n' % (bd['name'], bd['L3External'])) for ap in ap_list: epg_list = ap['EPG'] if 'EPG'", "in desc and isinstance(desc['Tenant'], list) else [] for tenant in tenant_list: tenant_obj =", "and isinstance(tenant['Context'], list) else [] for ctx in ctx_list: ctx_obj = tenant_obj.Context.create(**parse_desc_unit(ctx)) if", "ctx_obj tenant_ctx_objs[ctx_obj['name']] = ctx_obj l3e_list = tenant['L3External'] if 'L3External' in tenant and isinstance(tenant['L3External'],", "EPG:fvAEPg.name=%s to BridgeDomain:fvBD.name=%s\\n' % (epg_obj['name'], epg['BridgeDomain'])) if 'Consume' in epg: for cons in", "% sj_obj['dn']) sj_objs[sj_obj['dn']] = sj_obj tenant_sj_objs[sj_obj['name']] = sj_obj ctx_list = tenant['Context'] if 'Context'", "print('RELATE >> EPG:fvAEPg.name=%s to Consume:vzBrCP.name=%s\\n' % (epg_obj['name'], cons)) if 'Provide' in epg: for", "verbose: print('UPDATE >> AppProfile:fvAp.dn=%s\\n' % ap_obj['dn']) ap_objs[ap_obj['dn']] = ap_obj tenant_ap_objs[ap_obj['name']] = ap_obj epg_list", "bd and isinstance(bd['Subnet'], list) else [] for sn in sn_list: sn_obj = bd_obj.Subnet.create(**parse_desc_unit(sn))", "in sj: for flt in sj['Filter']: try: tenant_sj_objs[sj['name']].relate(tenant_flt_objs[flt]) except: try: tenant_sj_objs[sj['name']].relate(common.Filter(flt)) except: if", "isinstance(obj, aciContractModel): if obj['dn'] not in ctr_objs: object_delete(obj) elif isinstance(obj, aciContextModel): if obj['dn']", "tenant_obj.L3Out.create(**parse_desc_unit(l3e)) if verbose: print('UPDATE >> L3External:l3extOut.dn=%s\\n' % l3e_obj['dn']) l3e_objs[l3e_obj['dn']] = l3e_obj tenant_l3e_objs[l3e_obj['name']] =", "ap_list: epg_list = ap['EPG'] if 'EPG' in ap and isinstance(ap['EPG'], list) else []", "verbose: print('RELATE FAILED>> EPG:fvAEPg.name=%s to Provide:vzBrCP.name=%s\\n' % (epg_obj['name'], prov)) if verbose: print('RELATE >>", "object_delete(obj) if not deploy_incremental: for tenant in tenant_list: try: tenant_obj = dom.Tenant(tenant['name']) except:", "bd_obj sn_list = bd['Subnet'] if 'Subnet' in bd and isinstance(bd['Subnet'], list) else []", "if verbose: print('DELETE >> %s.dn=%s\\n' % (obj.class_name, dn)) def recursive_delete(obj): children = obj.children()", "= {} tenant_bd_objs = {} tenant_fe_objs = {} tenant_sj_objs = {} tenant_sn_objs =", "elif isinstance(child, aciEPGModel): recursive_delete(child) if isinstance(obj, aciFilterModel): if obj['dn'] not in flt_objs: object_delete(obj)", "if verbose: print('RELATE FAILED>> EPG:fvAEPg.name=%s to Consume:vzBrCP.name=%s\\n' % (epg_obj['name'], cons)) if verbose: print('RELATE", "else [] for ctx in ctx_list: ctx_obj = tenant_obj.Context.create(**parse_desc_unit(ctx)) if verbose: print('UPDATE >>", "in flt_objs: object_delete(obj) elif isinstance(obj, aciContractModel): if obj['dn'] not in ctr_objs: object_delete(obj) elif", "object_delete(tenant_obj) dom.close() return {'Tenant' : tenant_objs.keys(), 'Filter' : flt_objs.keys(), 'Contract' : ctr_objs.keys(), 'Context'", "isinstance(tenant['BridgeDomain'], list) else [] for bd in bd_list: bd_obj = tenant_obj.BridgeDomain.create(**parse_desc_unit(bd)) if verbose:", "dom_user, dom_pwd)) common = dom.Tenant('common') tenant_objs = {} flt_objs = {} ctr_objs =", "{} tenant_epg_objs = {} #======================================================================= # Create & Update #======================================================================= flt_list = tenant['Filter']", "% ctx_obj['dn']) ctx_objs[ctx_obj['dn']] = ctx_obj tenant_ctx_objs[ctx_obj['name']] = ctx_obj l3e_list = tenant['L3External'] if 'L3External'", "% sn_obj['dn']) sn_objs[sn_obj['dn']] = sn_obj tenant_sn_objs[sn_obj['name']] = sn_obj ap_list = tenant['AppProfile'] if 'AppProfile'", "if verbose: print('Connection Failed : %s, %s, %s\\n' % (dom_ip, dom_user, dom_pwd)) exit(1)", "try: tenant_epg_objs[epg['name']].relate(common.Contract(prov)) except: if verbose: print('RELATE FAILED>> EPG:fvAEPg.name=%s to Provide:vzBrCP.name=%s\\n' % (epg_obj['name'], prov))", "tenant_flt_objs = {} tenant_ctr_objs = {} tenant_ctx_objs = {} tenant_l3e_objs = {} tenant_bd_objs", "else [] for tenant in tenant_list: tenant_obj = dom.Tenant.create(**parse_desc_unit(tenant)) tenant_objs[tenant_obj['dn']] = tenant_obj if", "sj_list: sj_obj = ctr_obj.Subject.create(**parse_desc_unit(sj)) if verbose: print('UPDATE >> Subject:vzSubj.dn=%s\\n' % sj_obj['dn']) sj_objs[sj_obj['dn']] =", "'EPG' in ap and isinstance(ap['EPG'], list) else [] for epg in epg_list: if", "'AppProfile' in tenant and isinstance(tenant['AppProfile'], list) else [] for ap in ap_list: ap_obj", "= {} ap_objs = {} epg_objs = {} delete_tenants = [] def parse_desc_unit(unit):", "dom.Pod(path['Pod']).Paths(path['Node']).Path(path['Intf']) tenant_epg_objs[epg['name']].relate(ep_obj, **parse_desc_unit(path)) if verbose: print('RELATE >> EPG:fvAEPg.name=%s to Path:PathEp.name=%s\\n' % (epg_obj['name'], path['Pod']", "ctr_obj sj_list = ctr['Subject'] if 'Subject' in ctr and isinstance(ctr['Subject'], list) else []", "'Context' in l3e: try: tenant_l3e_objs[l3e['name']].relate(tenant_ctx_objs[l3e['Context']]) except: try: tenant_l3e_objs[l3e['name']].relate(common.Context(l3e['Context'])) except: if verbose: print('RELATE FAILED>>", "& Update #======================================================================= flt_list = tenant['Filter'] if 'Filter' in tenant and isinstance(tenant['Filter'], list)", "epg_objs = {} delete_tenants = [] def parse_desc_unit(unit): ret = {} for key", "elif isinstance(obj, aciAppProfileModel): if obj['dn'] not in ap_objs: object_delete(obj) elif isinstance(obj, aciSubnetModel): if", "'Tenant' in desc and isinstance(desc['Tenant'], list) else [] for tenant in tenant_list: tenant_obj", "cons in epg['Consume']: try: tenant_epg_objs[epg['name']].relate(tenant_ctr_objs[cons]) except: try: tenant_epg_objs[epg['name']].relate(common.Contract(cons)) except: if verbose: print('RELATE FAILED>>", "isinstance(obj, aciL3OutModel): if obj['dn'] not in l3e_objs: object_delete(obj) elif isinstance(obj, aciFilterEntryModel): if obj['dn']", "for prov in epg['Provide']: try: tenant_epg_objs[epg['name']].relate(tenant_ctr_objs[prov]) except: try: tenant_epg_objs[epg['name']].relate(common.Contract(prov)) except: if verbose: print('RELATE", "[] for l3e in l3e_list: l3e_obj = tenant_obj.L3Out.create(**parse_desc_unit(l3e)) if verbose: print('UPDATE >> L3External:l3extOut.dn=%s\\n'", "if verbose: print('RELATE FAILED>> BridgeDomain:fvBD.name=%s to L3External:l3extOut.name=%s\\n' % (bd['name'], bd['L3External'])) if verbose: print('RELATE", "and len(tenant_ap_objs) == 0: delete_tenants.append(tenant['name']) def object_delete(obj): dn = obj['dn'] obj.delete() if verbose:", "'Path' in epg: for path in epg['Path']: ep_obj = dom.Pod(path['Pod']).Paths(path['Node']).Path(path['Intf']) tenant_epg_objs[epg['name']].relate(ep_obj, **parse_desc_unit(path)) if", "in bd: try: tenant_bd_objs[bd['name']].relate(tenant_ctx_objs[bd['Context']]) except: try: tenant_bd_objs[bd['name']].relate(common.Context(bd['Context'])) except: if verbose: print('RELATE FAILED>> BridgeDomain:fvBD.name=%s", "if 'Provide' in epg: for prov in epg['Provide']: try: tenant_epg_objs[epg['name']].relate(tenant_ctr_objs[prov]) except: try: tenant_epg_objs[epg['name']].relate(common.Contract(prov))", "isinstance(obj, aciEPGModel): if obj['dn'] not in epg_objs: object_delete(obj) if not deploy_incremental: for tenant", "elif isinstance(obj, aciSubnetModel): if obj['dn'] not in sn_objs: object_delete(obj) elif isinstance(obj, aciEPGModel): if", "len(tenant_ctx_objs) == 0 and len(tenant_bd_objs) == 0 and len(tenant_ap_objs) == 0: delete_tenants.append(tenant['name']) def", "not in ap_objs: object_delete(obj) elif isinstance(obj, aciSubnetModel): if obj['dn'] not in sn_objs: object_delete(obj)", "sn_objs: object_delete(obj) elif isinstance(obj, aciEPGModel): if obj['dn'] not in epg_objs: object_delete(obj) if not", "in fe_objs: object_delete(obj) elif isinstance(obj, aciSubjectModel): if obj['dn'] not in sj_objs: object_delete(obj) elif", "tenant_list: try: tenant_obj = dom.Tenant(tenant['name']) except: continue recursive_delete(tenant_obj) if tenant['name'] in delete_tenants: object_delete(tenant_obj)", "if 'EPG' in ap and isinstance(ap['EPG'], list) else [] for epg in epg_list:", "ap and isinstance(ap['EPG'], list) else [] for epg in epg_list: if 'BridgeDomain' in", "sj_list: if 'Filter' in sj: for flt in sj['Filter']: try: tenant_sj_objs[sj['name']].relate(tenant_flt_objs[flt]) except: try:", "return ret tenant_list = desc['Tenant'] if 'Tenant' in desc and isinstance(desc['Tenant'], list) else", "l3e in l3e_list: l3e_obj = tenant_obj.L3Out.create(**parse_desc_unit(l3e)) if verbose: print('UPDATE >> L3External:l3extOut.dn=%s\\n' % l3e_obj['dn'])", "ap_list = tenant['AppProfile'] if 'AppProfile' in tenant and isinstance(tenant['AppProfile'], list) else [] for", "def parse_desc_unit(unit): ret = {} for key in unit: if re.search('^[a-z]\\w*', key): ret[key]", "epg: try: tenant_epg_objs[epg['name']].relate(tenant_bd_objs[epg['BridgeDomain']]) except: try: tenant_epg_objs[epg['name']].relate(common.BridgeDomain(epg['BridgeDomain'])) except: if verbose: print('RELATE FAILED>> EPG:fvAEPg.name=%s to", "ctx_objs: object_delete(obj) elif isinstance(obj, aciL3OutModel): if obj['dn'] not in l3e_objs: object_delete(obj) elif isinstance(obj,", "in l3e: try: tenant_l3e_objs[l3e['name']].relate(tenant_ctx_objs[l3e['Context']]) except: try: tenant_l3e_objs[l3e['name']].relate(common.Context(l3e['Context'])) except: if verbose: print('RELATE FAILED>> L3External:l3extOut.name=%s", "26. @author: \"comfact\" ''' import re from .model import * def deployACI(desc, verbose=False,", "flt_objs[flt_obj['dn']] = flt_obj tenant_flt_objs[flt_obj['name']] = flt_obj fe_list = flt['FilterEntry'] if 'FilterEntry' in flt", "tenant['BridgeDomain'] if 'BridgeDomain' in tenant and isinstance(tenant['BridgeDomain'], list) else [] for bd in", "for ap in ap_list: epg_list = ap['EPG'] if 'EPG' in ap and isinstance(ap['EPG'],", "tenant_objs[tenant_obj['dn']] = tenant_obj if verbose: print('UPDATE >> fvTenant.dn=%s\\n' % tenant_obj['dn']) tenant_flt_objs = {}", "if obj['dn'] not in ctx_objs: object_delete(obj) elif isinstance(obj, aciL3OutModel): if obj['dn'] not in", "bd_obj = tenant_obj.BridgeDomain.create(**parse_desc_unit(bd)) if verbose: print('UPDATE >> BridgeDomain:fvBD.dn=%s\\n' % bd_obj['dn']) bd_objs[bd_obj['dn']] = bd_obj", "isinstance(obj, aciSubjectModel): if obj['dn'] not in sj_objs: object_delete(obj) elif isinstance(obj, aciBridgeDomainModel): if obj['dn']", "= {} tenant_ctr_objs = {} tenant_ctx_objs = {} tenant_l3e_objs = {} tenant_bd_objs =", "bd_objs: object_delete(obj) elif isinstance(obj, aciAppProfileModel): if obj['dn'] not in ap_objs: object_delete(obj) elif isinstance(obj,", "tenant_list = desc['Tenant'] if 'Tenant' in desc and isinstance(desc['Tenant'], list) else [] for", "tenant_bd_objs = {} tenant_fe_objs = {} tenant_sj_objs = {} tenant_sn_objs = {} tenant_ap_objs", "aciContractModel): recursive_delete(child) elif isinstance(child, aciContextModel): recursive_delete(child) elif isinstance(child, aciL3OutModel): recursive_delete(child) elif isinstance(child, aciBridgeDomainModel):", "exit(1) try: dom_pwd = desc['Controller']['pwd'] except: exit(1) try: delete_empty_tenant = desc['Option']['deleteEmptyTenant'] except: delete_empty_tenant", "Create & Update #======================================================================= flt_list = tenant['Filter'] if 'Filter' in tenant and isinstance(tenant['Filter'],", "in children: if isinstance(child, aciFilterModel): recursive_delete(child) elif isinstance(child, aciContractModel): recursive_delete(child) elif isinstance(child, aciContextModel):", "ctr_list: ctr_obj = tenant_obj.Contract.create(**parse_desc_unit(ctr)) if verbose: print('UPDATE >> Contract:vzBrCP.dn=%s\\n' % ctr_obj['dn']) ctr_objs[ctr_obj['dn']] =", "= False try: deploy_incremental = desc['Option']['deployIncremental'] except: deploy_incremental = False try: dom =", "for sn in sn_list: sn_obj = bd_obj.Subnet.create(**parse_desc_unit(sn)) if verbose: print('UPDATE >> Subnet:fvSubnet.dn=%s\\n' %", "if verbose: print('Get Controller : %s, %s, %s\\n' % (dom_ip, dom_user, dom_pwd)) common", "to Context:fvCtx.name=%s\\n' % (bd['name'], bd['Context'])) if 'L3External' in bd: try: tenant_bd_objs[bd['name']].relate(tenant_l3e_objs[bd['L3External']]) except: try:", "to Context:fvCtx.name=%s\\n' % (bd['name'], bd['Context'])) for bd in bd_list: if 'Context' in bd:", "aciAppProfileModel): recursive_delete(child) elif isinstance(child, aciEPGModel): recursive_delete(child) if isinstance(obj, aciFilterModel): if obj['dn'] not in", "try: tenant_l3e_objs[l3e['name']].relate(common.Context(l3e['Context'])) except: if verbose: print('RELATE FAILED>> L3External:l3extOut.name=%s to Context:fvCtx.name=%s\\n' % (bd['name'], bd['Context']))", "{} tenant_sn_objs = {} tenant_ap_objs = {} tenant_epg_objs = {} #======================================================================= # Create", "for ctr in ctr_list: ctr_obj = tenant_obj.Contract.create(**parse_desc_unit(ctr)) if verbose: print('UPDATE >> Contract:vzBrCP.dn=%s\\n' %", "bd['L3External'])) if verbose: print('RELATE >> BridgeDomain:fvBD.name=%s to L3External:l3extOut.name=%s\\n' % (bd['name'], bd['L3External'])) for ap", "fe_objs: object_delete(obj) elif isinstance(obj, aciSubjectModel): if obj['dn'] not in sj_objs: object_delete(obj) elif isinstance(obj,", "% flt_obj['dn']) flt_objs[flt_obj['dn']] = flt_obj tenant_flt_objs[flt_obj['name']] = flt_obj fe_list = flt['FilterEntry'] if 'FilterEntry'", "= tenant['L3External'] if 'L3External' in tenant and isinstance(tenant['L3External'], list) else [] for l3e", "in ctr_list: sj_list = ctr['Subject'] if 'Subject' in ctr and isinstance(ctr['Subject'], list) else", "obj['dn'] not in ctr_objs: object_delete(obj) elif isinstance(obj, aciContextModel): if obj['dn'] not in ctx_objs:", "= {} tenant_epg_objs = {} #======================================================================= # Create & Update #======================================================================= flt_list =", "[] for ctr in ctr_list: ctr_obj = tenant_obj.Contract.create(**parse_desc_unit(ctr)) if verbose: print('UPDATE >> Contract:vzBrCP.dn=%s\\n'", "ctr_objs = {} ctx_objs = {} l3e_objs = {} bd_objs = {} fe_objs", "tenant_ctr_objs[ctr_obj['name']] = ctr_obj sj_list = ctr['Subject'] if 'Subject' in ctr and isinstance(ctr['Subject'], list)", "verbose: print('RELATE FAILED>> L3External:l3extOut.name=%s to Context:fvCtx.name=%s\\n' % (bd['name'], bd['Context'])) if verbose: print('RELATE >>", "= ctr_obj tenant_ctr_objs[ctr_obj['name']] = ctr_obj sj_list = ctr['Subject'] if 'Subject' in ctr and", "isinstance(bd['Subnet'], list) else [] for sn in sn_list: sn_obj = bd_obj.Subnet.create(**parse_desc_unit(sn)) if verbose:", "Filter:vzFilter.name=%s\\n' % (sj['name'], flt)) if verbose: print('RELATE >> Subject:vzSubj.name=%s to Filter:vzFilter.name=%s\\n' % (sj['name'],", "(bd['name'], bd['Context'])) for bd in bd_list: if 'Context' in bd: try: tenant_bd_objs[bd['name']].relate(tenant_ctx_objs[bd['Context']]) except:", "bd_objs.keys(), 'FilterEntry' : fe_objs.keys(), 'Subject' : sj_objs.keys(), 'Subnet' : sn_objs.keys(), 'AppProfile' : ap_objs.keys(),", "[] def parse_desc_unit(unit): ret = {} for key in unit: if re.search('^[a-z]\\w*', key):", "% (epg_obj['name'], prov)) if 'Path' in epg: for path in epg['Path']: ep_obj =", "object_delete(obj) elif isinstance(obj, aciL3OutModel): if obj['dn'] not in l3e_objs: object_delete(obj) elif isinstance(obj, aciFilterEntryModel):", "unit: if re.search('^[a-z]\\w*', key): ret[key] = unit[key] return ret tenant_list = desc['Tenant'] if", "for sj in sj_list: if 'Filter' in sj: for flt in sj['Filter']: try:", "epg['Path']: ep_obj = dom.Pod(path['Pod']).Paths(path['Node']).Path(path['Intf']) tenant_epg_objs[epg['name']].relate(ep_obj, **parse_desc_unit(path)) if verbose: print('RELATE >> EPG:fvAEPg.name=%s to Path:PathEp.name=%s\\n'", "(sj['name'], flt)) for l3e in l3e_list: if 'Context' in l3e: try: tenant_l3e_objs[l3e['name']].relate(tenant_ctx_objs[l3e['Context']]) except:", "in ap_list: ap_obj = tenant_obj.AppProfile.create(**parse_desc_unit(ap)) if verbose: print('UPDATE >> AppProfile:fvAp.dn=%s\\n' % ap_obj['dn']) ap_objs[ap_obj['dn']]", "sj_obj['dn']) sj_objs[sj_obj['dn']] = sj_obj tenant_sj_objs[sj_obj['name']] = sj_obj ctx_list = tenant['Context'] if 'Context' in", "tenant_ctx_objs[ctx_obj['name']] = ctx_obj l3e_list = tenant['L3External'] if 'L3External' in tenant and isinstance(tenant['L3External'], list)", "[] for tenant in tenant_list: tenant_obj = dom.Tenant.create(**parse_desc_unit(tenant)) tenant_objs[tenant_obj['dn']] = tenant_obj if verbose:", "exit(1) if verbose: print('Get Controller : %s, %s, %s\\n' % (dom_ip, dom_user, dom_pwd))", "else [] for epg in epg_list: epg_obj = ap_obj.EPG.create(**parse_desc_unit(epg)) if verbose: print('UPDATE >>", "in sn_objs: object_delete(obj) elif isinstance(obj, aciEPGModel): if obj['dn'] not in epg_objs: object_delete(obj) if", "if obj['dn'] not in flt_objs: object_delete(obj) elif isinstance(obj, aciContractModel): if obj['dn'] not in", "for sj in sj_list: sj_obj = ctr_obj.Subject.create(**parse_desc_unit(sj)) if verbose: print('UPDATE >> Subject:vzSubj.dn=%s\\n' %", "Context:fvCtx.name=%s\\n' % (bd['name'], bd['Context'])) for bd in bd_list: if 'Context' in bd: try:", "to Consume:vzBrCP.name=%s\\n' % (epg_obj['name'], cons)) if verbose: print('RELATE >> EPG:fvAEPg.name=%s to Consume:vzBrCP.name=%s\\n' %", "(bd['name'], bd['L3External'])) for ap in ap_list: epg_list = ap['EPG'] if 'EPG' in ap", "bd['Context'])) for bd in bd_list: if 'Context' in bd: try: tenant_bd_objs[bd['name']].relate(tenant_ctx_objs[bd['Context']]) except: try:", "tenant and isinstance(tenant['L3External'], list) else [] for l3e in l3e_list: l3e_obj = tenant_obj.L3Out.create(**parse_desc_unit(l3e))", "and isinstance(ctr['Subject'], list) else [] for sj in sj_list: sj_obj = ctr_obj.Subject.create(**parse_desc_unit(sj)) if", "tenant_l3e_objs[l3e['name']].relate(tenant_ctx_objs[l3e['Context']]) except: try: tenant_l3e_objs[l3e['name']].relate(common.Context(l3e['Context'])) except: if verbose: print('RELATE FAILED>> L3External:l3extOut.name=%s to Context:fvCtx.name=%s\\n' %", "if obj['dn'] not in sn_objs: object_delete(obj) elif isinstance(obj, aciEPGModel): if obj['dn'] not in", "and len(tenant_bd_objs) == 0 and len(tenant_ap_objs) == 0: delete_tenants.append(tenant['name']) def object_delete(obj): dn =", "try: tenant_sj_objs[sj['name']].relate(common.Filter(flt)) except: if verbose: print('RELATE FAILED >> Subject:vzSubj.name=%s to Filter:vzFilter.name=%s\\n' % (sj['name'],", "if verbose: print('UPDATE >> Subject:vzSubj.dn=%s\\n' % sj_obj['dn']) sj_objs[sj_obj['dn']] = sj_obj tenant_sj_objs[sj_obj['name']] = sj_obj", "'Filter' in tenant and isinstance(tenant['Filter'], list) else [] for flt in flt_list: flt_obj", "= {} fe_objs = {} sj_objs = {} sn_objs = {} ap_objs =", "l3e_list: if 'Context' in l3e: try: tenant_l3e_objs[l3e['name']].relate(tenant_ctx_objs[l3e['Context']]) except: try: tenant_l3e_objs[l3e['name']].relate(common.Context(l3e['Context'])) except: if verbose:", "BridgeDomain:fvBD.name=%s to Context:fvCtx.name=%s\\n' % (bd['name'], bd['Context'])) if 'L3External' in bd: try: tenant_bd_objs[bd['name']].relate(tenant_l3e_objs[bd['L3External']]) except:", "FAILED >> Subject:vzSubj.name=%s to Filter:vzFilter.name=%s\\n' % (sj['name'], flt)) if verbose: print('RELATE >> Subject:vzSubj.name=%s", "try: tenant_epg_objs[epg['name']].relate(tenant_bd_objs[epg['BridgeDomain']]) except: try: tenant_epg_objs[epg['name']].relate(common.BridgeDomain(epg['BridgeDomain'])) except: if verbose: print('RELATE FAILED>> EPG:fvAEPg.name=%s to BridgeDomain:fvBD.name=%s\\n'", "epg_objs: object_delete(obj) if not deploy_incremental: for tenant in tenant_list: try: tenant_obj = dom.Tenant(tenant['name'])", "recursive_delete(tenant_obj) if tenant['name'] in delete_tenants: object_delete(tenant_obj) dom.close() return {'Tenant' : tenant_objs.keys(), 'Filter' :", "(epg_obj['name'], prov)) if 'Path' in epg: for path in epg['Path']: ep_obj = dom.Pod(path['Pod']).Paths(path['Node']).Path(path['Intf'])", "l3e_obj bd_list = tenant['BridgeDomain'] if 'BridgeDomain' in tenant and isinstance(tenant['BridgeDomain'], list) else []", "flt_obj tenant_flt_objs[flt_obj['name']] = flt_obj fe_list = flt['FilterEntry'] if 'FilterEntry' in flt and isinstance(flt['FilterEntry'],", ">> L3External:l3extOut.dn=%s\\n' % l3e_obj['dn']) l3e_objs[l3e_obj['dn']] = l3e_obj tenant_l3e_objs[l3e_obj['name']] = l3e_obj bd_list = tenant['BridgeDomain']", "tenant_ctx_objs = {} tenant_l3e_objs = {} tenant_bd_objs = {} tenant_fe_objs = {} tenant_sj_objs", "list) else [] for flt in flt_list: flt_obj = tenant_obj.Filter.create(**parse_desc_unit(flt)) if verbose: print('UPDATE", "if obj['dn'] not in bd_objs: object_delete(obj) elif isinstance(obj, aciAppProfileModel): if obj['dn'] not in", "ap_obj epg_list = ap['EPG'] if 'EPG' in ap and isinstance(ap['EPG'], list) else []", "epg_obj tenant_epg_objs[epg_obj['name']] = epg_obj #======================================================================= # Relations #======================================================================= for ctr in ctr_list: sj_list", ">> EPG:fvAEPg.name=%s to Path:PathEp.name=%s\\n' % (epg_obj['name'], path['Pod'] + '/' + path['Node'] + '/'", "in tenant_list: try: tenant_obj = dom.Tenant(tenant['name']) except: continue recursive_delete(tenant_obj) if tenant['name'] in delete_tenants:", "deployACI(desc, verbose=False, debug=False): try: dom_ip = desc['Controller']['ip'] except: exit(1) try: dom_user = desc['Controller']['user']", "= tenant_obj.AppProfile.create(**parse_desc_unit(ap)) if verbose: print('UPDATE >> AppProfile:fvAp.dn=%s\\n' % ap_obj['dn']) ap_objs[ap_obj['dn']] = ap_obj tenant_ap_objs[ap_obj['name']]", "except: if verbose: print('RELATE FAILED>> BridgeDomain:fvBD.name=%s to Context:fvCtx.name=%s\\n' % (bd['name'], bd['Context'])) if verbose:", "% tenant_obj['dn']) tenant_flt_objs = {} tenant_ctr_objs = {} tenant_ctx_objs = {} tenant_l3e_objs =", "print('UPDATE >> FilterEntry:vzEntry.dn=%s\\n' % fe_obj['dn']) fe_objs[fe_obj['dn']] = fe_obj tenant_fe_objs[fe_obj['name']] = fe_obj ctr_list =", "list) else [] for sj in sj_list: sj_obj = ctr_obj.Subject.create(**parse_desc_unit(sj)) if verbose: print('UPDATE", "verbose: print('UPDATE >> Context:fvCtx.dn=%s\\n' % ctx_obj['dn']) ctx_objs[ctx_obj['dn']] = ctx_obj tenant_ctx_objs[ctx_obj['name']] = ctx_obj l3e_list", "in bd: try: tenant_bd_objs[bd['name']].relate(tenant_l3e_objs[bd['L3External']]) except: try: tenant_bd_objs[bd['name']].relate(common.L3External(bd['L3External'])) except: if verbose: print('RELATE FAILED>> BridgeDomain:fvBD.name=%s", "for epg in epg_list: if 'BridgeDomain' in epg: try: tenant_epg_objs[epg['name']].relate(tenant_bd_objs[epg['BridgeDomain']]) except: try: tenant_epg_objs[epg['name']].relate(common.BridgeDomain(epg['BridgeDomain']))", "if 'Subnet' in bd and isinstance(bd['Subnet'], list) else [] for sn in sn_list:", "to Provide:vzBrCP.name=%s\\n' % (epg_obj['name'], prov)) if 'Path' in epg: for path in epg['Path']:", ": l3e_objs.keys(), 'BridgeDomain' : bd_objs.keys(), 'FilterEntry' : fe_objs.keys(), 'Subject' : sj_objs.keys(), 'Subnet' :", ": fe_objs.keys(), 'Subject' : sj_objs.keys(), 'Subnet' : sn_objs.keys(), 'AppProfile' : ap_objs.keys(), 'EPG' :", "in epg_list: epg_obj = ap_obj.EPG.create(**parse_desc_unit(epg)) if verbose: print('UPDATE >> EPG:fvAEPg.dn=%s\\n' % epg_obj['dn']) epg_objs[epg_obj['dn']]", "= {} delete_tenants = [] def parse_desc_unit(unit): ret = {} for key in", "= desc['Option']['deleteEmptyTenant'] except: delete_empty_tenant = False try: deploy_incremental = desc['Option']['deployIncremental'] except: deploy_incremental =", "re.search('^[a-z]\\w*', key): ret[key] = unit[key] return ret tenant_list = desc['Tenant'] if 'Tenant' in", "except: if verbose: print('RELATE FAILED>> EPG:fvAEPg.name=%s to BridgeDomain:fvBD.name=%s\\n' % (epg_obj['name'], epg['BridgeDomain'])) if verbose:", "= unit[key] return ret tenant_list = desc['Tenant'] if 'Tenant' in desc and isinstance(desc['Tenant'],", "tenant_flt_objs[flt_obj['name']] = flt_obj fe_list = flt['FilterEntry'] if 'FilterEntry' in flt and isinstance(flt['FilterEntry'], list)", "if 'Subject' in ctr and isinstance(ctr['Subject'], list) else [] for sj in sj_list:", "= tenant['BridgeDomain'] if 'BridgeDomain' in tenant and isinstance(tenant['BridgeDomain'], list) else [] for bd", "sn_obj = bd_obj.Subnet.create(**parse_desc_unit(sn)) if verbose: print('UPDATE >> Subnet:fvSubnet.dn=%s\\n' % sn_obj['dn']) sn_objs[sn_obj['dn']] = sn_obj", "= ap['EPG'] if 'EPG' in ap and isinstance(ap['EPG'], list) else [] for epg", "aciEPGModel): recursive_delete(child) if isinstance(obj, aciFilterModel): if obj['dn'] not in flt_objs: object_delete(obj) elif isinstance(obj,", "if 'Context' in l3e: try: tenant_l3e_objs[l3e['name']].relate(tenant_ctx_objs[l3e['Context']]) except: try: tenant_l3e_objs[l3e['name']].relate(common.Context(l3e['Context'])) except: if verbose: print('RELATE", ": %s, %s, %s\\n' % (dom_ip, dom_user, dom_pwd)) common = dom.Tenant('common') tenant_objs =", "print('UPDATE >> EPG:fvAEPg.dn=%s\\n' % epg_obj['dn']) epg_objs[epg_obj['dn']] = epg_obj tenant_epg_objs[epg_obj['name']] = epg_obj #======================================================================= #", "else [] for ctr in ctr_list: ctr_obj = tenant_obj.Contract.create(**parse_desc_unit(ctr)) if verbose: print('UPDATE >>", "list) else [] for epg in epg_list: epg_obj = ap_obj.EPG.create(**parse_desc_unit(epg)) if verbose: print('UPDATE", "try: tenant_sj_objs[sj['name']].relate(tenant_flt_objs[flt]) except: try: tenant_sj_objs[sj['name']].relate(common.Filter(flt)) except: if verbose: print('RELATE FAILED >> Subject:vzSubj.name=%s to", "'L3External' in bd: try: tenant_bd_objs[bd['name']].relate(tenant_l3e_objs[bd['L3External']]) except: try: tenant_bd_objs[bd['name']].relate(common.L3External(bd['L3External'])) except: if verbose: print('RELATE FAILED>>", "(dom_ip, dom_user, dom_pwd)) exit(1) if verbose: print('Get Controller : %s, %s, %s\\n' %", "'Subnet' in bd and isinstance(bd['Subnet'], list) else [] for sn in sn_list: sn_obj", "tenant_epg_objs[epg_obj['name']] = epg_obj #======================================================================= # Relations #======================================================================= for ctr in ctr_list: sj_list =", "FilterEntry:vzEntry.dn=%s\\n' % fe_obj['dn']) fe_objs[fe_obj['dn']] = fe_obj tenant_fe_objs[fe_obj['name']] = fe_obj ctr_list = tenant['Contract'] if", "print('DELETE >> %s.dn=%s\\n' % (obj.class_name, dn)) def recursive_delete(obj): children = obj.children() for child", "tenant_obj if verbose: print('UPDATE >> fvTenant.dn=%s\\n' % tenant_obj['dn']) tenant_flt_objs = {} tenant_ctr_objs =", "= tenant_obj.L3Out.create(**parse_desc_unit(l3e)) if verbose: print('UPDATE >> L3External:l3extOut.dn=%s\\n' % l3e_obj['dn']) l3e_objs[l3e_obj['dn']] = l3e_obj tenant_l3e_objs[l3e_obj['name']]", "bd_list: bd_obj = tenant_obj.BridgeDomain.create(**parse_desc_unit(bd)) if verbose: print('UPDATE >> BridgeDomain:fvBD.dn=%s\\n' % bd_obj['dn']) bd_objs[bd_obj['dn']] =", "try: tenant_bd_objs[bd['name']].relate(tenant_l3e_objs[bd['L3External']]) except: try: tenant_bd_objs[bd['name']].relate(common.L3External(bd['L3External'])) except: if verbose: print('RELATE FAILED>> BridgeDomain:fvBD.name=%s to L3External:l3extOut.name=%s\\n'", "bd: try: tenant_bd_objs[bd['name']].relate(tenant_ctx_objs[bd['Context']]) except: try: tenant_bd_objs[bd['name']].relate(common.Context(bd['Context'])) except: if verbose: print('RELATE FAILED>> BridgeDomain:fvBD.name=%s to", "not in ctr_objs: object_delete(obj) elif isinstance(obj, aciContextModel): if obj['dn'] not in ctx_objs: object_delete(obj)", "obj['dn'] not in l3e_objs: object_delete(obj) elif isinstance(obj, aciFilterEntryModel): if obj['dn'] not in fe_objs:", "'Subject' in ctr and isinstance(ctr['Subject'], list) else [] for sj in sj_list: if", "Subject:vzSubj.name=%s to Filter:vzFilter.name=%s\\n' % (sj['name'], flt)) for l3e in l3e_list: if 'Context' in", "epg['BridgeDomain'])) if verbose: print('RELATE >> EPG:fvAEPg.name=%s to BridgeDomain:fvBD.name=%s\\n' % (epg_obj['name'], epg['BridgeDomain'])) if 'Consume'", "verbose: print('RELATE FAILED>> EPG:fvAEPg.name=%s to Consume:vzBrCP.name=%s\\n' % (epg_obj['name'], cons)) if verbose: print('RELATE >>", "in tenant and isinstance(tenant['BridgeDomain'], list) else [] for bd in bd_list: bd_obj =", "object_delete(obj) elif isinstance(obj, aciBridgeDomainModel): if obj['dn'] not in bd_objs: object_delete(obj) elif isinstance(obj, aciAppProfileModel):", "except: continue recursive_delete(tenant_obj) if tenant['name'] in delete_tenants: object_delete(tenant_obj) dom.close() return {'Tenant' : tenant_objs.keys(),", "in epg: for prov in epg['Provide']: try: tenant_epg_objs[epg['name']].relate(tenant_ctr_objs[prov]) except: try: tenant_epg_objs[epg['name']].relate(common.Contract(prov)) except: if", "ctx_objs[ctx_obj['dn']] = ctx_obj tenant_ctx_objs[ctx_obj['name']] = ctx_obj l3e_list = tenant['L3External'] if 'L3External' in tenant", "ctx in ctx_list: ctx_obj = tenant_obj.Context.create(**parse_desc_unit(ctx)) if verbose: print('UPDATE >> Context:fvCtx.dn=%s\\n' % ctx_obj['dn'])", "tenant_bd_objs[bd_obj['name']] = bd_obj sn_list = bd['Subnet'] if 'Subnet' in bd and isinstance(bd['Subnet'], list)", "tenant_epg_objs[epg['name']].relate(tenant_bd_objs[epg['BridgeDomain']]) except: try: tenant_epg_objs[epg['name']].relate(common.BridgeDomain(epg['BridgeDomain'])) except: if verbose: print('RELATE FAILED>> EPG:fvAEPg.name=%s to BridgeDomain:fvBD.name=%s\\n' %", "isinstance(ap['EPG'], list) else [] for epg in epg_list: epg_obj = ap_obj.EPG.create(**parse_desc_unit(epg)) if verbose:", "[] for epg in epg_list: if 'BridgeDomain' in epg: try: tenant_epg_objs[epg['name']].relate(tenant_bd_objs[epg['BridgeDomain']]) except: try:", "in ctr and isinstance(ctr['Subject'], list) else [] for sj in sj_list: sj_obj =", "obj['dn'] not in sn_objs: object_delete(obj) elif isinstance(obj, aciEPGModel): if obj['dn'] not in epg_objs:", "print('Connection Failed : %s, %s, %s\\n' % (dom_ip, dom_user, dom_pwd)) exit(1) if verbose:", "and isinstance(tenant['AppProfile'], list) else [] for ap in ap_list: ap_obj = tenant_obj.AppProfile.create(**parse_desc_unit(ap)) if", "fe_objs[fe_obj['dn']] = fe_obj tenant_fe_objs[fe_obj['name']] = fe_obj ctr_list = tenant['Contract'] if 'Contract' in tenant", "isinstance(flt['FilterEntry'], list) else [] for fe in fe_list: fe_obj = flt_obj.FilterEntry.create(**parse_desc_unit(fe)) if verbose:", "print('RELATE FAILED>> EPG:fvAEPg.name=%s to BridgeDomain:fvBD.name=%s\\n' % (epg_obj['name'], epg['BridgeDomain'])) if verbose: print('RELATE >> EPG:fvAEPg.name=%s", "verbose: print('RELATE >> EPG:fvAEPg.name=%s to Consume:vzBrCP.name=%s\\n' % (epg_obj['name'], cons)) if 'Provide' in epg:", "cons)) if verbose: print('RELATE >> EPG:fvAEPg.name=%s to Consume:vzBrCP.name=%s\\n' % (epg_obj['name'], cons)) if 'Provide'", "ctx_obj = tenant_obj.Context.create(**parse_desc_unit(ctx)) if verbose: print('UPDATE >> Context:fvCtx.dn=%s\\n' % ctx_obj['dn']) ctx_objs[ctx_obj['dn']] = ctx_obj", "elif isinstance(child, aciBridgeDomainModel): recursive_delete(child) elif isinstance(child, aciFilterEntryModel): recursive_delete(child) elif isinstance(child, aciSubjectModel): recursive_delete(child) elif", "if 'L3External' in bd: try: tenant_bd_objs[bd['name']].relate(tenant_l3e_objs[bd['L3External']]) except: try: tenant_bd_objs[bd['name']].relate(common.L3External(bd['L3External'])) except: if verbose: print('RELATE", ">> L3External:l3extOut.name=%s to Context:fvCtx.name=%s\\n' % (bd['name'], bd['Context'])) for bd in bd_list: if 'Context'", "print('RELATE FAILED>> BridgeDomain:fvBD.name=%s to Context:fvCtx.name=%s\\n' % (bd['name'], bd['Context'])) if verbose: print('RELATE >> BridgeDomain:fvBD.name=%s", "list) else [] for ap in ap_list: ap_obj = tenant_obj.AppProfile.create(**parse_desc_unit(ap)) if verbose: print('UPDATE", "sn in sn_list: sn_obj = bd_obj.Subnet.create(**parse_desc_unit(sn)) if verbose: print('UPDATE >> Subnet:fvSubnet.dn=%s\\n' % sn_obj['dn'])", "epg in epg_list: epg_obj = ap_obj.EPG.create(**parse_desc_unit(epg)) if verbose: print('UPDATE >> EPG:fvAEPg.dn=%s\\n' % epg_obj['dn'])", "return {'Tenant' : tenant_objs.keys(), 'Filter' : flt_objs.keys(), 'Contract' : ctr_objs.keys(), 'Context' : ctx_objs.keys(),", "except: try: tenant_bd_objs[bd['name']].relate(common.L3External(bd['L3External'])) except: if verbose: print('RELATE FAILED>> BridgeDomain:fvBD.name=%s to L3External:l3extOut.name=%s\\n' % (bd['name'],", "epg: for prov in epg['Provide']: try: tenant_epg_objs[epg['name']].relate(tenant_ctr_objs[prov]) except: try: tenant_epg_objs[epg['name']].relate(common.Contract(prov)) except: if verbose:", "[] for flt in flt_list: flt_obj = tenant_obj.Filter.create(**parse_desc_unit(flt)) if verbose: print('UPDATE >> Filter:vzFilter.dn=%s\\n'", ".model import * def deployACI(desc, verbose=False, debug=False): try: dom_ip = desc['Controller']['ip'] except: exit(1)", "import re from .model import * def deployACI(desc, verbose=False, debug=False): try: dom_ip =", "= {} tenant_ap_objs = {} tenant_epg_objs = {} #======================================================================= # Create & Update", "epg_obj = ap_obj.EPG.create(**parse_desc_unit(epg)) if verbose: print('UPDATE >> EPG:fvAEPg.dn=%s\\n' % epg_obj['dn']) epg_objs[epg_obj['dn']] = epg_obj", "fe_objs = {} sj_objs = {} sn_objs = {} ap_objs = {} epg_objs", "sn_objs = {} ap_objs = {} epg_objs = {} delete_tenants = [] def", "= tenant_obj.Filter.create(**parse_desc_unit(flt)) if verbose: print('UPDATE >> Filter:vzFilter.dn=%s\\n' % flt_obj['dn']) flt_objs[flt_obj['dn']] = flt_obj tenant_flt_objs[flt_obj['name']]", "to Consume:vzBrCP.name=%s\\n' % (epg_obj['name'], cons)) if 'Provide' in epg: for prov in epg['Provide']:", "desc['Option']['deployIncremental'] except: deploy_incremental = False try: dom = Controller(dom_ip, dom_user, dom_pwd, debug=debug) except:", "False try: deploy_incremental = desc['Option']['deployIncremental'] except: deploy_incremental = False try: dom = Controller(dom_ip,", "#======================================================================= for ctr in ctr_list: sj_list = ctr['Subject'] if 'Subject' in ctr and", "* def deployACI(desc, verbose=False, debug=False): try: dom_ip = desc['Controller']['ip'] except: exit(1) try: dom_user", "Controller : %s, %s, %s\\n' % (dom_ip, dom_user, dom_pwd)) common = dom.Tenant('common') tenant_objs", "isinstance(desc['Tenant'], list) else [] for tenant in tenant_list: tenant_obj = dom.Tenant.create(**parse_desc_unit(tenant)) tenant_objs[tenant_obj['dn']] =", "try: dom_pwd = desc['Controller']['pwd'] except: exit(1) try: delete_empty_tenant = desc['Option']['deleteEmptyTenant'] except: delete_empty_tenant =", "delete_empty_tenant = desc['Option']['deleteEmptyTenant'] except: delete_empty_tenant = False try: deploy_incremental = desc['Option']['deployIncremental'] except: deploy_incremental", "[] for sj in sj_list: if 'Filter' in sj: for flt in sj['Filter']:", "to BridgeDomain:fvBD.name=%s\\n' % (epg_obj['name'], epg['BridgeDomain'])) if verbose: print('RELATE >> EPG:fvAEPg.name=%s to BridgeDomain:fvBD.name=%s\\n' %", "FAILED>> EPG:fvAEPg.name=%s to Provide:vzBrCP.name=%s\\n' % (epg_obj['name'], prov)) if verbose: print('RELATE >> EPG:fvAEPg.name=%s to", "not in sn_objs: object_delete(obj) elif isinstance(obj, aciEPGModel): if obj['dn'] not in epg_objs: object_delete(obj)", "if 'Consume' in epg: for cons in epg['Consume']: try: tenant_epg_objs[epg['name']].relate(tenant_ctr_objs[cons]) except: try: tenant_epg_objs[epg['name']].relate(common.Contract(cons))", "= tenant['Context'] if 'Context' in tenant and isinstance(tenant['Context'], list) else [] for ctx", "Context:fvCtx.name=%s\\n' % (bd['name'], bd['Context'])) if 'L3External' in bd: try: tenant_bd_objs[bd['name']].relate(tenant_l3e_objs[bd['L3External']]) except: try: tenant_bd_objs[bd['name']].relate(common.L3External(bd['L3External']))", "'Context' in tenant and isinstance(tenant['Context'], list) else [] for ctx in ctx_list: ctx_obj", "print('RELATE >> EPG:fvAEPg.name=%s to BridgeDomain:fvBD.name=%s\\n' % (epg_obj['name'], epg['BridgeDomain'])) if 'Consume' in epg: for", "for bd in bd_list: bd_obj = tenant_obj.BridgeDomain.create(**parse_desc_unit(bd)) if verbose: print('UPDATE >> BridgeDomain:fvBD.dn=%s\\n' %", "% (epg_obj['name'], path['Pod'] + '/' + path['Node'] + '/' + path['Intf'])) if delete_empty_tenant", "= {} flt_objs = {} ctr_objs = {} ctx_objs = {} l3e_objs =", "in tenant and isinstance(tenant['Context'], list) else [] for ctx in ctx_list: ctx_obj =", "in sn_list: sn_obj = bd_obj.Subnet.create(**parse_desc_unit(sn)) if verbose: print('UPDATE >> Subnet:fvSubnet.dn=%s\\n' % sn_obj['dn']) sn_objs[sn_obj['dn']]", "dom_pwd = desc['Controller']['pwd'] except: exit(1) try: delete_empty_tenant = desc['Option']['deleteEmptyTenant'] except: delete_empty_tenant = False", "dom_pwd, debug=debug) except: if verbose: print('Connection Failed : %s, %s, %s\\n' % (dom_ip,", "aciL3OutModel): if obj['dn'] not in l3e_objs: object_delete(obj) elif isinstance(obj, aciFilterEntryModel): if obj['dn'] not", "= epg_obj tenant_epg_objs[epg_obj['name']] = epg_obj #======================================================================= # Relations #======================================================================= for ctr in ctr_list:", "try: tenant_epg_objs[epg['name']].relate(common.Contract(cons)) except: if verbose: print('RELATE FAILED>> EPG:fvAEPg.name=%s to Consume:vzBrCP.name=%s\\n' % (epg_obj['name'], cons))", "print('RELATE >> BridgeDomain:fvBD.name=%s to Context:fvCtx.name=%s\\n' % (bd['name'], bd['Context'])) if 'L3External' in bd: try:", "on 2016. 10. 26. @author: \"comfact\" ''' import re from .model import *", "try: dom_ip = desc['Controller']['ip'] except: exit(1) try: dom_user = desc['Controller']['user'] except: exit(1) try:", "ap and isinstance(ap['EPG'], list) else [] for epg in epg_list: epg_obj = ap_obj.EPG.create(**parse_desc_unit(epg))", "else [] for ap in ap_list: ap_obj = tenant_obj.AppProfile.create(**parse_desc_unit(ap)) if verbose: print('UPDATE >>", "= bd_obj sn_list = bd['Subnet'] if 'Subnet' in bd and isinstance(bd['Subnet'], list) else", "verbose: print('RELATE FAILED>> BridgeDomain:fvBD.name=%s to Context:fvCtx.name=%s\\n' % (bd['name'], bd['Context'])) if verbose: print('RELATE >>", "obj['dn'] not in ap_objs: object_delete(obj) elif isinstance(obj, aciSubnetModel): if obj['dn'] not in sn_objs:", "% fe_obj['dn']) fe_objs[fe_obj['dn']] = fe_obj tenant_fe_objs[fe_obj['name']] = fe_obj ctr_list = tenant['Contract'] if 'Contract'", "to Filter:vzFilter.name=%s\\n' % (sj['name'], flt)) for l3e in l3e_list: if 'Context' in l3e:", "Created on 2016. 10. 26. @author: \"comfact\" ''' import re from .model import", "ctr in ctr_list: sj_list = ctr['Subject'] if 'Subject' in ctr and isinstance(ctr['Subject'], list)", "print('RELATE FAILED>> EPG:fvAEPg.name=%s to Consume:vzBrCP.name=%s\\n' % (epg_obj['name'], cons)) if verbose: print('RELATE >> EPG:fvAEPg.name=%s", "to Provide:vzBrCP.name=%s\\n' % (epg_obj['name'], prov)) if verbose: print('RELATE >> EPG:fvAEPg.name=%s to Provide:vzBrCP.name=%s\\n' %", "if verbose: print('RELATE >> EPG:fvAEPg.name=%s to Path:PathEp.name=%s\\n' % (epg_obj['name'], path['Pod'] + '/' +", "l3e_objs = {} bd_objs = {} fe_objs = {} sj_objs = {} sn_objs", "isinstance(child, aciContextModel): recursive_delete(child) elif isinstance(child, aciL3OutModel): recursive_delete(child) elif isinstance(child, aciBridgeDomainModel): recursive_delete(child) elif isinstance(child,", "else [] for epg in epg_list: if 'BridgeDomain' in epg: try: tenant_epg_objs[epg['name']].relate(tenant_bd_objs[epg['BridgeDomain']]) except:", "'Filter' in sj: for flt in sj['Filter']: try: tenant_sj_objs[sj['name']].relate(tenant_flt_objs[flt]) except: try: tenant_sj_objs[sj['name']].relate(common.Filter(flt)) except:", "flt)) if verbose: print('RELATE >> Subject:vzSubj.name=%s to Filter:vzFilter.name=%s\\n' % (sj['name'], flt)) for l3e", "if obj['dn'] not in sj_objs: object_delete(obj) elif isinstance(obj, aciBridgeDomainModel): if obj['dn'] not in", "fe_obj ctr_list = tenant['Contract'] if 'Contract' in tenant and isinstance(tenant['Contract'], list) else []", "desc['Tenant'] if 'Tenant' in desc and isinstance(desc['Tenant'], list) else [] for tenant in", ">> fvTenant.dn=%s\\n' % tenant_obj['dn']) tenant_flt_objs = {} tenant_ctr_objs = {} tenant_ctx_objs = {}", "verbose: print('RELATE >> BridgeDomain:fvBD.name=%s to Context:fvCtx.name=%s\\n' % (bd['name'], bd['Context'])) if 'L3External' in bd:", "try: delete_empty_tenant = desc['Option']['deleteEmptyTenant'] except: delete_empty_tenant = False try: deploy_incremental = desc['Option']['deployIncremental'] except:", "Failed : %s, %s, %s\\n' % (dom_ip, dom_user, dom_pwd)) exit(1) if verbose: print('Get", "% (epg_obj['name'], epg['BridgeDomain'])) if 'Consume' in epg: for cons in epg['Consume']: try: tenant_epg_objs[epg['name']].relate(tenant_ctr_objs[cons])", "= {} tenant_sn_objs = {} tenant_ap_objs = {} tenant_epg_objs = {} #======================================================================= #", "for key in unit: if re.search('^[a-z]\\w*', key): ret[key] = unit[key] return ret tenant_list", "ctx_list = tenant['Context'] if 'Context' in tenant and isinstance(tenant['Context'], list) else [] for", "elif isinstance(child, aciContractModel): recursive_delete(child) elif isinstance(child, aciContextModel): recursive_delete(child) elif isinstance(child, aciL3OutModel): recursive_delete(child) elif", "if verbose: print('UPDATE >> FilterEntry:vzEntry.dn=%s\\n' % fe_obj['dn']) fe_objs[fe_obj['dn']] = fe_obj tenant_fe_objs[fe_obj['name']] = fe_obj", "debug=False): try: dom_ip = desc['Controller']['ip'] except: exit(1) try: dom_user = desc['Controller']['user'] except: exit(1)", "tenant_list: tenant_obj = dom.Tenant.create(**parse_desc_unit(tenant)) tenant_objs[tenant_obj['dn']] = tenant_obj if verbose: print('UPDATE >> fvTenant.dn=%s\\n' %", "%s, %s\\n' % (dom_ip, dom_user, dom_pwd)) common = dom.Tenant('common') tenant_objs = {} flt_objs", "verbose: print('DELETE >> %s.dn=%s\\n' % (obj.class_name, dn)) def recursive_delete(obj): children = obj.children() for", "l3e in l3e_list: if 'Context' in l3e: try: tenant_l3e_objs[l3e['name']].relate(tenant_ctx_objs[l3e['Context']]) except: try: tenant_l3e_objs[l3e['name']].relate(common.Context(l3e['Context'])) except:", "FAILED>> EPG:fvAEPg.name=%s to BridgeDomain:fvBD.name=%s\\n' % (epg_obj['name'], epg['BridgeDomain'])) if verbose: print('RELATE >> EPG:fvAEPg.name=%s to", "recursive_delete(child) elif isinstance(child, aciEPGModel): recursive_delete(child) if isinstance(obj, aciFilterModel): if obj['dn'] not in flt_objs:", "if delete_empty_tenant and len(tenant_ctx_objs) == 0 and len(tenant_bd_objs) == 0 and len(tenant_ap_objs) ==", "if obj['dn'] not in fe_objs: object_delete(obj) elif isinstance(obj, aciSubjectModel): if obj['dn'] not in", "l3e_obj['dn']) l3e_objs[l3e_obj['dn']] = l3e_obj tenant_l3e_objs[l3e_obj['name']] = l3e_obj bd_list = tenant['BridgeDomain'] if 'BridgeDomain' in", "in ctx_list: ctx_obj = tenant_obj.Context.create(**parse_desc_unit(ctx)) if verbose: print('UPDATE >> Context:fvCtx.dn=%s\\n' % ctx_obj['dn']) ctx_objs[ctx_obj['dn']]", "obj['dn'] not in sj_objs: object_delete(obj) elif isinstance(obj, aciBridgeDomainModel): if obj['dn'] not in bd_objs:", "deploy_incremental = False try: dom = Controller(dom_ip, dom_user, dom_pwd, debug=debug) except: if verbose:", "ep_obj = dom.Pod(path['Pod']).Paths(path['Node']).Path(path['Intf']) tenant_epg_objs[epg['name']].relate(ep_obj, **parse_desc_unit(path)) if verbose: print('RELATE >> EPG:fvAEPg.name=%s to Path:PathEp.name=%s\\n' %", "not deploy_incremental: for tenant in tenant_list: try: tenant_obj = dom.Tenant(tenant['name']) except: continue recursive_delete(tenant_obj)", "aciSubnetModel): recursive_delete(child) elif isinstance(child, aciAppProfileModel): recursive_delete(child) elif isinstance(child, aciEPGModel): recursive_delete(child) if isinstance(obj, aciFilterModel):", "desc['Controller']['ip'] except: exit(1) try: dom_user = desc['Controller']['user'] except: exit(1) try: dom_pwd = desc['Controller']['pwd']", "aciSubjectModel): recursive_delete(child) elif isinstance(child, aciSubnetModel): recursive_delete(child) elif isinstance(child, aciAppProfileModel): recursive_delete(child) elif isinstance(child, aciEPGModel):", "tenant_fe_objs = {} tenant_sj_objs = {} tenant_sn_objs = {} tenant_ap_objs = {} tenant_epg_objs", "[] for ctx in ctx_list: ctx_obj = tenant_obj.Context.create(**parse_desc_unit(ctx)) if verbose: print('UPDATE >> Context:fvCtx.dn=%s\\n'", "BridgeDomain:fvBD.name=%s\\n' % (epg_obj['name'], epg['BridgeDomain'])) if 'Consume' in epg: for cons in epg['Consume']: try:", "if 'BridgeDomain' in epg: try: tenant_epg_objs[epg['name']].relate(tenant_bd_objs[epg['BridgeDomain']]) except: try: tenant_epg_objs[epg['name']].relate(common.BridgeDomain(epg['BridgeDomain'])) except: if verbose: print('RELATE", "in tenant and isinstance(tenant['L3External'], list) else [] for l3e in l3e_list: l3e_obj =", "in l3e_list: l3e_obj = tenant_obj.L3Out.create(**parse_desc_unit(l3e)) if verbose: print('UPDATE >> L3External:l3extOut.dn=%s\\n' % l3e_obj['dn']) l3e_objs[l3e_obj['dn']]", "except: if verbose: print('RELATE FAILED>> L3External:l3extOut.name=%s to Context:fvCtx.name=%s\\n' % (bd['name'], bd['Context'])) if verbose:", "except: exit(1) try: dom_pwd = desc['Controller']['pwd'] except: exit(1) try: delete_empty_tenant = desc['Option']['deleteEmptyTenant'] except:", "recursive_delete(child) elif isinstance(child, aciL3OutModel): recursive_delete(child) elif isinstance(child, aciBridgeDomainModel): recursive_delete(child) elif isinstance(child, aciFilterEntryModel): recursive_delete(child)", "bd_obj['dn']) bd_objs[bd_obj['dn']] = bd_obj tenant_bd_objs[bd_obj['name']] = bd_obj sn_list = bd['Subnet'] if 'Subnet' in", "object_delete(obj) elif isinstance(obj, aciSubjectModel): if obj['dn'] not in sj_objs: object_delete(obj) elif isinstance(obj, aciBridgeDomainModel):", "epg_list: epg_obj = ap_obj.EPG.create(**parse_desc_unit(epg)) if verbose: print('UPDATE >> EPG:fvAEPg.dn=%s\\n' % epg_obj['dn']) epg_objs[epg_obj['dn']] =", ">> Subject:vzSubj.name=%s to Filter:vzFilter.name=%s\\n' % (sj['name'], flt)) for l3e in l3e_list: if 'Context'", "'Filter' : flt_objs.keys(), 'Contract' : ctr_objs.keys(), 'Context' : ctx_objs.keys(), 'L3External' : l3e_objs.keys(), 'BridgeDomain'", "#======================================================================= flt_list = tenant['Filter'] if 'Filter' in tenant and isinstance(tenant['Filter'], list) else []", "list) else [] for l3e in l3e_list: l3e_obj = tenant_obj.L3Out.create(**parse_desc_unit(l3e)) if verbose: print('UPDATE", "ctr_list: sj_list = ctr['Subject'] if 'Subject' in ctr and isinstance(ctr['Subject'], list) else []", "recursive_delete(child) elif isinstance(child, aciFilterEntryModel): recursive_delete(child) elif isinstance(child, aciSubjectModel): recursive_delete(child) elif isinstance(child, aciSubnetModel): recursive_delete(child)", "%s\\n' % (dom_ip, dom_user, dom_pwd)) common = dom.Tenant('common') tenant_objs = {} flt_objs =", "in ctr_list: ctr_obj = tenant_obj.Contract.create(**parse_desc_unit(ctr)) if verbose: print('UPDATE >> Contract:vzBrCP.dn=%s\\n' % ctr_obj['dn']) ctr_objs[ctr_obj['dn']]", "tenant_epg_objs[epg['name']].relate(common.Contract(cons)) except: if verbose: print('RELATE FAILED>> EPG:fvAEPg.name=%s to Consume:vzBrCP.name=%s\\n' % (epg_obj['name'], cons)) if", "ap_list: ap_obj = tenant_obj.AppProfile.create(**parse_desc_unit(ap)) if verbose: print('UPDATE >> AppProfile:fvAp.dn=%s\\n' % ap_obj['dn']) ap_objs[ap_obj['dn']] =", "elif isinstance(obj, aciFilterEntryModel): if obj['dn'] not in fe_objs: object_delete(obj) elif isinstance(obj, aciSubjectModel): if", "elif isinstance(obj, aciEPGModel): if obj['dn'] not in epg_objs: object_delete(obj) if not deploy_incremental: for", "not in l3e_objs: object_delete(obj) elif isinstance(obj, aciFilterEntryModel): if obj['dn'] not in fe_objs: object_delete(obj)", "% (epg_obj['name'], prov)) if verbose: print('RELATE >> EPG:fvAEPg.name=%s to Provide:vzBrCP.name=%s\\n' % (epg_obj['name'], prov))", "aciEPGModel): if obj['dn'] not in epg_objs: object_delete(obj) if not deploy_incremental: for tenant in", "tenant_objs = {} flt_objs = {} ctr_objs = {} ctx_objs = {} l3e_objs", "'BridgeDomain' in tenant and isinstance(tenant['BridgeDomain'], list) else [] for bd in bd_list: bd_obj", "obj['dn'] not in flt_objs: object_delete(obj) elif isinstance(obj, aciContractModel): if obj['dn'] not in ctr_objs:", "print('RELATE FAILED >> Subject:vzSubj.name=%s to Filter:vzFilter.name=%s\\n' % (sj['name'], flt)) if verbose: print('RELATE >>", "in bd_list: bd_obj = tenant_obj.BridgeDomain.create(**parse_desc_unit(bd)) if verbose: print('UPDATE >> BridgeDomain:fvBD.dn=%s\\n' % bd_obj['dn']) bd_objs[bd_obj['dn']]", "print('RELATE >> EPG:fvAEPg.name=%s to Provide:vzBrCP.name=%s\\n' % (epg_obj['name'], prov)) if 'Path' in epg: for", "isinstance(child, aciFilterModel): recursive_delete(child) elif isinstance(child, aciContractModel): recursive_delete(child) elif isinstance(child, aciContextModel): recursive_delete(child) elif isinstance(child,", "recursive_delete(child) elif isinstance(child, aciAppProfileModel): recursive_delete(child) elif isinstance(child, aciEPGModel): recursive_delete(child) if isinstance(obj, aciFilterModel): if", "l3e_objs: object_delete(obj) elif isinstance(obj, aciFilterEntryModel): if obj['dn'] not in fe_objs: object_delete(obj) elif isinstance(obj,", "verbose: print('RELATE >> EPG:fvAEPg.name=%s to Provide:vzBrCP.name=%s\\n' % (epg_obj['name'], prov)) if 'Path' in epg:", "ctr in ctr_list: ctr_obj = tenant_obj.Contract.create(**parse_desc_unit(ctr)) if verbose: print('UPDATE >> Contract:vzBrCP.dn=%s\\n' % ctr_obj['dn'])", "= False try: dom = Controller(dom_ip, dom_user, dom_pwd, debug=debug) except: if verbose: print('Connection", "and isinstance(bd['Subnet'], list) else [] for sn in sn_list: sn_obj = bd_obj.Subnet.create(**parse_desc_unit(sn)) if", "epg_obj['dn']) epg_objs[epg_obj['dn']] = epg_obj tenant_epg_objs[epg_obj['name']] = epg_obj #======================================================================= # Relations #======================================================================= for ctr", "= ctr['Subject'] if 'Subject' in ctr and isinstance(ctr['Subject'], list) else [] for sj", "tenant_sj_objs = {} tenant_sn_objs = {} tenant_ap_objs = {} tenant_epg_objs = {} #=======================================================================", "for ctx in ctx_list: ctx_obj = tenant_obj.Context.create(**parse_desc_unit(ctx)) if verbose: print('UPDATE >> Context:fvCtx.dn=%s\\n' %", "aciContractModel): if obj['dn'] not in ctr_objs: object_delete(obj) elif isinstance(obj, aciContextModel): if obj['dn'] not", "in flt and isinstance(flt['FilterEntry'], list) else [] for fe in fe_list: fe_obj =", "print('UPDATE >> Subject:vzSubj.dn=%s\\n' % sj_obj['dn']) sj_objs[sj_obj['dn']] = sj_obj tenant_sj_objs[sj_obj['name']] = sj_obj ctx_list =", "if verbose: print('UPDATE >> fvTenant.dn=%s\\n' % tenant_obj['dn']) tenant_flt_objs = {} tenant_ctr_objs = {}", "print('UPDATE >> Subnet:fvSubnet.dn=%s\\n' % sn_obj['dn']) sn_objs[sn_obj['dn']] = sn_obj tenant_sn_objs[sn_obj['name']] = sn_obj ap_list =", "#======================================================================= # Create & Update #======================================================================= flt_list = tenant['Filter'] if 'Filter' in tenant", "list) else [] for ctx in ctx_list: ctx_obj = tenant_obj.Context.create(**parse_desc_unit(ctx)) if verbose: print('UPDATE", "sn_list = bd['Subnet'] if 'Subnet' in bd and isinstance(bd['Subnet'], list) else [] for", "sn_objs[sn_obj['dn']] = sn_obj tenant_sn_objs[sn_obj['name']] = sn_obj ap_list = tenant['AppProfile'] if 'AppProfile' in tenant", "and isinstance(ap['EPG'], list) else [] for epg in epg_list: if 'BridgeDomain' in epg:", "= bd_obj tenant_bd_objs[bd_obj['name']] = bd_obj sn_list = bd['Subnet'] if 'Subnet' in bd and", "= {} #======================================================================= # Create & Update #======================================================================= flt_list = tenant['Filter'] if 'Filter'", "if 'Filter' in tenant and isinstance(tenant['Filter'], list) else [] for flt in flt_list:", "not in bd_objs: object_delete(obj) elif isinstance(obj, aciAppProfileModel): if obj['dn'] not in ap_objs: object_delete(obj)", "%s, %s\\n' % (dom_ip, dom_user, dom_pwd)) exit(1) if verbose: print('Get Controller : %s,", "exit(1) try: delete_empty_tenant = desc['Option']['deleteEmptyTenant'] except: delete_empty_tenant = False try: deploy_incremental = desc['Option']['deployIncremental']", "% ctr_obj['dn']) ctr_objs[ctr_obj['dn']] = ctr_obj tenant_ctr_objs[ctr_obj['name']] = ctr_obj sj_list = ctr['Subject'] if 'Subject'", "tenant and isinstance(tenant['BridgeDomain'], list) else [] for bd in bd_list: bd_obj = tenant_obj.BridgeDomain.create(**parse_desc_unit(bd))", "= ctr_obj.Subject.create(**parse_desc_unit(sj)) if verbose: print('UPDATE >> Subject:vzSubj.dn=%s\\n' % sj_obj['dn']) sj_objs[sj_obj['dn']] = sj_obj tenant_sj_objs[sj_obj['name']]", "= ap_obj.EPG.create(**parse_desc_unit(epg)) if verbose: print('UPDATE >> EPG:fvAEPg.dn=%s\\n' % epg_obj['dn']) epg_objs[epg_obj['dn']] = epg_obj tenant_epg_objs[epg_obj['name']]", "recursive_delete(child) elif isinstance(child, aciBridgeDomainModel): recursive_delete(child) elif isinstance(child, aciFilterEntryModel): recursive_delete(child) elif isinstance(child, aciSubjectModel): recursive_delete(child)", "ap in ap_list: ap_obj = tenant_obj.AppProfile.create(**parse_desc_unit(ap)) if verbose: print('UPDATE >> AppProfile:fvAp.dn=%s\\n' % ap_obj['dn'])", "tenant['AppProfile'] if 'AppProfile' in tenant and isinstance(tenant['AppProfile'], list) else [] for ap in", "isinstance(obj, aciBridgeDomainModel): if obj['dn'] not in bd_objs: object_delete(obj) elif isinstance(obj, aciAppProfileModel): if obj['dn']", "tenant_fe_objs[fe_obj['name']] = fe_obj ctr_list = tenant['Contract'] if 'Contract' in tenant and isinstance(tenant['Contract'], list)", "fvTenant.dn=%s\\n' % tenant_obj['dn']) tenant_flt_objs = {} tenant_ctr_objs = {} tenant_ctx_objs = {} tenant_l3e_objs", "flt_obj fe_list = flt['FilterEntry'] if 'FilterEntry' in flt and isinstance(flt['FilterEntry'], list) else []", "list) else [] for sj in sj_list: if 'Filter' in sj: for flt", "epg['BridgeDomain'])) if 'Consume' in epg: for cons in epg['Consume']: try: tenant_epg_objs[epg['name']].relate(tenant_ctr_objs[cons]) except: try:", "Consume:vzBrCP.name=%s\\n' % (epg_obj['name'], cons)) if verbose: print('RELATE >> EPG:fvAEPg.name=%s to Consume:vzBrCP.name=%s\\n' % (epg_obj['name'],", "l3e_objs.keys(), 'BridgeDomain' : bd_objs.keys(), 'FilterEntry' : fe_objs.keys(), 'Subject' : sj_objs.keys(), 'Subnet' : sn_objs.keys(),", "for tenant in tenant_list: tenant_obj = dom.Tenant.create(**parse_desc_unit(tenant)) tenant_objs[tenant_obj['dn']] = tenant_obj if verbose: print('UPDATE", "l3e_obj = tenant_obj.L3Out.create(**parse_desc_unit(l3e)) if verbose: print('UPDATE >> L3External:l3extOut.dn=%s\\n' % l3e_obj['dn']) l3e_objs[l3e_obj['dn']] = l3e_obj", "= ctx_obj l3e_list = tenant['L3External'] if 'L3External' in tenant and isinstance(tenant['L3External'], list) else", "if verbose: print('UPDATE >> EPG:fvAEPg.dn=%s\\n' % epg_obj['dn']) epg_objs[epg_obj['dn']] = epg_obj tenant_epg_objs[epg_obj['name']] = epg_obj", "verbose: print('UPDATE >> EPG:fvAEPg.dn=%s\\n' % epg_obj['dn']) epg_objs[epg_obj['dn']] = epg_obj tenant_epg_objs[epg_obj['name']] = epg_obj #=======================================================================", "FAILED>> BridgeDomain:fvBD.name=%s to Context:fvCtx.name=%s\\n' % (bd['name'], bd['Context'])) if verbose: print('RELATE >> BridgeDomain:fvBD.name=%s to", "print('RELATE FAILED>> BridgeDomain:fvBD.name=%s to L3External:l3extOut.name=%s\\n' % (bd['name'], bd['L3External'])) if verbose: print('RELATE >> BridgeDomain:fvBD.name=%s", "if 'Path' in epg: for path in epg['Path']: ep_obj = dom.Pod(path['Pod']).Paths(path['Node']).Path(path['Intf']) tenant_epg_objs[epg['name']].relate(ep_obj, **parse_desc_unit(path))", "in bd_objs: object_delete(obj) elif isinstance(obj, aciAppProfileModel): if obj['dn'] not in ap_objs: object_delete(obj) elif", "tenant['name'] in delete_tenants: object_delete(tenant_obj) dom.close() return {'Tenant' : tenant_objs.keys(), 'Filter' : flt_objs.keys(), 'Contract'", "tenant_l3e_objs[l3e_obj['name']] = l3e_obj bd_list = tenant['BridgeDomain'] if 'BridgeDomain' in tenant and isinstance(tenant['BridgeDomain'], list)", "AppProfile:fvAp.dn=%s\\n' % ap_obj['dn']) ap_objs[ap_obj['dn']] = ap_obj tenant_ap_objs[ap_obj['name']] = ap_obj epg_list = ap['EPG'] if", "to L3External:l3extOut.name=%s\\n' % (bd['name'], bd['L3External'])) for ap in ap_list: epg_list = ap['EPG'] if", "2016. 10. 26. @author: \"comfact\" ''' import re from .model import * def", "in epg['Path']: ep_obj = dom.Pod(path['Pod']).Paths(path['Node']).Path(path['Intf']) tenant_epg_objs[epg['name']].relate(ep_obj, **parse_desc_unit(path)) if verbose: print('RELATE >> EPG:fvAEPg.name=%s to", "for flt in flt_list: flt_obj = tenant_obj.Filter.create(**parse_desc_unit(flt)) if verbose: print('UPDATE >> Filter:vzFilter.dn=%s\\n' %", "= dom.Tenant('common') tenant_objs = {} flt_objs = {} ctr_objs = {} ctx_objs =", "sj in sj_list: sj_obj = ctr_obj.Subject.create(**parse_desc_unit(sj)) if verbose: print('UPDATE >> Subject:vzSubj.dn=%s\\n' % sj_obj['dn'])", "= sj_obj ctx_list = tenant['Context'] if 'Context' in tenant and isinstance(tenant['Context'], list) else", "for ctr in ctr_list: sj_list = ctr['Subject'] if 'Subject' in ctr and isinstance(ctr['Subject'],", "flt_obj = tenant_obj.Filter.create(**parse_desc_unit(flt)) if verbose: print('UPDATE >> Filter:vzFilter.dn=%s\\n' % flt_obj['dn']) flt_objs[flt_obj['dn']] = flt_obj", ">> Filter:vzFilter.dn=%s\\n' % flt_obj['dn']) flt_objs[flt_obj['dn']] = flt_obj tenant_flt_objs[flt_obj['name']] = flt_obj fe_list = flt['FilterEntry']", "try: dom = Controller(dom_ip, dom_user, dom_pwd, debug=debug) except: if verbose: print('Connection Failed :", "if obj['dn'] not in epg_objs: object_delete(obj) if not deploy_incremental: for tenant in tenant_list:", "= desc['Tenant'] if 'Tenant' in desc and isinstance(desc['Tenant'], list) else [] for tenant", "sj_obj tenant_sj_objs[sj_obj['name']] = sj_obj ctx_list = tenant['Context'] if 'Context' in tenant and isinstance(tenant['Context'],", "epg: for path in epg['Path']: ep_obj = dom.Pod(path['Pod']).Paths(path['Node']).Path(path['Intf']) tenant_epg_objs[epg['name']].relate(ep_obj, **parse_desc_unit(path)) if verbose: print('RELATE", "dn = obj['dn'] obj.delete() if verbose: print('DELETE >> %s.dn=%s\\n' % (obj.class_name, dn)) def", "not in sj_objs: object_delete(obj) elif isinstance(obj, aciBridgeDomainModel): if obj['dn'] not in bd_objs: object_delete(obj)", "else [] for bd in bd_list: bd_obj = tenant_obj.BridgeDomain.create(**parse_desc_unit(bd)) if verbose: print('UPDATE >>", "tenant['Contract'] if 'Contract' in tenant and isinstance(tenant['Contract'], list) else [] for ctr in", "tenant_bd_objs[bd['name']].relate(common.L3External(bd['L3External'])) except: if verbose: print('RELATE FAILED>> BridgeDomain:fvBD.name=%s to L3External:l3extOut.name=%s\\n' % (bd['name'], bd['L3External'])) if", "in tenant and isinstance(tenant['Filter'], list) else [] for flt in flt_list: flt_obj =", "in epg['Consume']: try: tenant_epg_objs[epg['name']].relate(tenant_ctr_objs[cons]) except: try: tenant_epg_objs[epg['name']].relate(common.Contract(cons)) except: if verbose: print('RELATE FAILED>> EPG:fvAEPg.name=%s", "% l3e_obj['dn']) l3e_objs[l3e_obj['dn']] = l3e_obj tenant_l3e_objs[l3e_obj['name']] = l3e_obj bd_list = tenant['BridgeDomain'] if 'BridgeDomain'", "exit(1) try: dom_user = desc['Controller']['user'] except: exit(1) try: dom_pwd = desc['Controller']['pwd'] except: exit(1)", "from .model import * def deployACI(desc, verbose=False, debug=False): try: dom_ip = desc['Controller']['ip'] except:", "try: deploy_incremental = desc['Option']['deployIncremental'] except: deploy_incremental = False try: dom = Controller(dom_ip, dom_user,", "= tenant_obj.BridgeDomain.create(**parse_desc_unit(bd)) if verbose: print('UPDATE >> BridgeDomain:fvBD.dn=%s\\n' % bd_obj['dn']) bd_objs[bd_obj['dn']] = bd_obj tenant_bd_objs[bd_obj['name']]", "if 'AppProfile' in tenant and isinstance(tenant['AppProfile'], list) else [] for ap in ap_list:", "for cons in epg['Consume']: try: tenant_epg_objs[epg['name']].relate(tenant_ctr_objs[cons]) except: try: tenant_epg_objs[epg['name']].relate(common.Contract(cons)) except: if verbose: print('RELATE", "'Provide' in epg: for prov in epg['Provide']: try: tenant_epg_objs[epg['name']].relate(tenant_ctr_objs[prov]) except: try: tenant_epg_objs[epg['name']].relate(common.Contract(prov)) except:", "dn)) def recursive_delete(obj): children = obj.children() for child in children: if isinstance(child, aciFilterModel):", ": ctx_objs.keys(), 'L3External' : l3e_objs.keys(), 'BridgeDomain' : bd_objs.keys(), 'FilterEntry' : fe_objs.keys(), 'Subject' :", "dom_user, dom_pwd, debug=debug) except: if verbose: print('Connection Failed : %s, %s, %s\\n' %", "key): ret[key] = unit[key] return ret tenant_list = desc['Tenant'] if 'Tenant' in desc", "delete_empty_tenant = False try: deploy_incremental = desc['Option']['deployIncremental'] except: deploy_incremental = False try: dom", "= fe_obj tenant_fe_objs[fe_obj['name']] = fe_obj ctr_list = tenant['Contract'] if 'Contract' in tenant and", "bd_obj tenant_bd_objs[bd_obj['name']] = bd_obj sn_list = bd['Subnet'] if 'Subnet' in bd and isinstance(bd['Subnet'],", "fe_list = flt['FilterEntry'] if 'FilterEntry' in flt and isinstance(flt['FilterEntry'], list) else [] for", "'FilterEntry' in flt and isinstance(flt['FilterEntry'], list) else [] for fe in fe_list: fe_obj", ">> Subject:vzSubj.name=%s to Filter:vzFilter.name=%s\\n' % (sj['name'], flt)) if verbose: print('RELATE >> Subject:vzSubj.name=%s to", "isinstance(child, aciBridgeDomainModel): recursive_delete(child) elif isinstance(child, aciFilterEntryModel): recursive_delete(child) elif isinstance(child, aciSubjectModel): recursive_delete(child) elif isinstance(child,", ": tenant_objs.keys(), 'Filter' : flt_objs.keys(), 'Contract' : ctr_objs.keys(), 'Context' : ctx_objs.keys(), 'L3External' :", "'L3External' : l3e_objs.keys(), 'BridgeDomain' : bd_objs.keys(), 'FilterEntry' : fe_objs.keys(), 'Subject' : sj_objs.keys(), 'Subnet'", "ctx_list: ctx_obj = tenant_obj.Context.create(**parse_desc_unit(ctx)) if verbose: print('UPDATE >> Context:fvCtx.dn=%s\\n' % ctx_obj['dn']) ctx_objs[ctx_obj['dn']] =", "print('UPDATE >> BridgeDomain:fvBD.dn=%s\\n' % bd_obj['dn']) bd_objs[bd_obj['dn']] = bd_obj tenant_bd_objs[bd_obj['name']] = bd_obj sn_list =", "sn_obj ap_list = tenant['AppProfile'] if 'AppProfile' in tenant and isinstance(tenant['AppProfile'], list) else []", "'L3External' in tenant and isinstance(tenant['L3External'], list) else [] for l3e in l3e_list: l3e_obj", "prov)) if 'Path' in epg: for path in epg['Path']: ep_obj = dom.Pod(path['Pod']).Paths(path['Node']).Path(path['Intf']) tenant_epg_objs[epg['name']].relate(ep_obj,", "tenant_epg_objs = {} #======================================================================= # Create & Update #======================================================================= flt_list = tenant['Filter'] if", "object_delete(obj) elif isinstance(obj, aciContractModel): if obj['dn'] not in ctr_objs: object_delete(obj) elif isinstance(obj, aciContextModel):", "= {} for key in unit: if re.search('^[a-z]\\w*', key): ret[key] = unit[key] return", "(dom_ip, dom_user, dom_pwd)) common = dom.Tenant('common') tenant_objs = {} flt_objs = {} ctr_objs", "= fe_obj ctr_list = tenant['Contract'] if 'Contract' in tenant and isinstance(tenant['Contract'], list) else", "object_delete(obj) elif isinstance(obj, aciSubnetModel): if obj['dn'] not in sn_objs: object_delete(obj) elif isinstance(obj, aciEPGModel):", "in unit: if re.search('^[a-z]\\w*', key): ret[key] = unit[key] return ret tenant_list = desc['Tenant']", "bd in bd_list: if 'Context' in bd: try: tenant_bd_objs[bd['name']].relate(tenant_ctx_objs[bd['Context']]) except: try: tenant_bd_objs[bd['name']].relate(common.Context(bd['Context'])) except:", "False try: dom = Controller(dom_ip, dom_user, dom_pwd, debug=debug) except: if verbose: print('Connection Failed", "tenant_obj.Context.create(**parse_desc_unit(ctx)) if verbose: print('UPDATE >> Context:fvCtx.dn=%s\\n' % ctx_obj['dn']) ctx_objs[ctx_obj['dn']] = ctx_obj tenant_ctx_objs[ctx_obj['name']] =", "except: exit(1) try: dom_user = desc['Controller']['user'] except: exit(1) try: dom_pwd = desc['Controller']['pwd'] except:", "tenant_epg_objs[epg['name']].relate(tenant_ctr_objs[prov]) except: try: tenant_epg_objs[epg['name']].relate(common.Contract(prov)) except: if verbose: print('RELATE FAILED>> EPG:fvAEPg.name=%s to Provide:vzBrCP.name=%s\\n' %", "isinstance(child, aciEPGModel): recursive_delete(child) if isinstance(obj, aciFilterModel): if obj['dn'] not in flt_objs: object_delete(obj) elif", "print('UPDATE >> AppProfile:fvAp.dn=%s\\n' % ap_obj['dn']) ap_objs[ap_obj['dn']] = ap_obj tenant_ap_objs[ap_obj['name']] = ap_obj epg_list =", "if verbose: print('RELATE >> EPG:fvAEPg.name=%s to Provide:vzBrCP.name=%s\\n' % (epg_obj['name'], prov)) if 'Path' in", "print('UPDATE >> fvTenant.dn=%s\\n' % tenant_obj['dn']) tenant_flt_objs = {} tenant_ctr_objs = {} tenant_ctx_objs =", "parse_desc_unit(unit): ret = {} for key in unit: if re.search('^[a-z]\\w*', key): ret[key] =", "ctr and isinstance(ctr['Subject'], list) else [] for sj in sj_list: sj_obj = ctr_obj.Subject.create(**parse_desc_unit(sj))", "in ap and isinstance(ap['EPG'], list) else [] for epg in epg_list: if 'BridgeDomain'", "verbose: print('RELATE >> EPG:fvAEPg.name=%s to BridgeDomain:fvBD.name=%s\\n' % (epg_obj['name'], epg['BridgeDomain'])) if 'Consume' in epg:", "object_delete(obj) elif isinstance(obj, aciEPGModel): if obj['dn'] not in epg_objs: object_delete(obj) if not deploy_incremental:", "verbose: print('UPDATE >> FilterEntry:vzEntry.dn=%s\\n' % fe_obj['dn']) fe_objs[fe_obj['dn']] = fe_obj tenant_fe_objs[fe_obj['name']] = fe_obj ctr_list", "tenant_obj.BridgeDomain.create(**parse_desc_unit(bd)) if verbose: print('UPDATE >> BridgeDomain:fvBD.dn=%s\\n' % bd_obj['dn']) bd_objs[bd_obj['dn']] = bd_obj tenant_bd_objs[bd_obj['name']] =", "def deployACI(desc, verbose=False, debug=False): try: dom_ip = desc['Controller']['ip'] except: exit(1) try: dom_user =", "if verbose: print('UPDATE >> Subnet:fvSubnet.dn=%s\\n' % sn_obj['dn']) sn_objs[sn_obj['dn']] = sn_obj tenant_sn_objs[sn_obj['name']] = sn_obj", ": %s, %s, %s\\n' % (dom_ip, dom_user, dom_pwd)) exit(1) if verbose: print('Get Controller", "dom_user, dom_pwd)) exit(1) if verbose: print('Get Controller : %s, %s, %s\\n' % (dom_ip,", "'Contract' : ctr_objs.keys(), 'Context' : ctx_objs.keys(), 'L3External' : l3e_objs.keys(), 'BridgeDomain' : bd_objs.keys(), 'FilterEntry'", "Context:fvCtx.name=%s\\n' % (bd['name'], bd['Context'])) if verbose: print('RELATE >> L3External:l3extOut.name=%s to Context:fvCtx.name=%s\\n' % (bd['name'],", "= ctr_obj sj_list = ctr['Subject'] if 'Subject' in ctr and isinstance(ctr['Subject'], list) else", "for epg in epg_list: epg_obj = ap_obj.EPG.create(**parse_desc_unit(epg)) if verbose: print('UPDATE >> EPG:fvAEPg.dn=%s\\n' %" ]
[ "self._loop.connect_read_pipe, protocol_factory=protocol_factory, pipe=self._pipe, ) @staticmethod def _create_pipe(family: int, proto: int) -> socket.socket: return", "request(self, packet): try: return await self._protocol_behavior.request(self._send, packet) except asyncio.CancelledError: raise except Exception: raise", "int, proto: int) -> socket.socket: return socket.socket( family=family, type=socket.SOCK_RAW | socket.SOCK_NONBLOCK, proto=proto, )", "partial( self._loop.connect_read_pipe, protocol_factory=protocol_factory, pipe=self._pipe, ) @staticmethod def _create_pipe(family: int, proto: int) -> socket.socket:", "self._dst_address) return self._protocol_behavior.complete_condition() async def request(self, packet): try: return await self._protocol_behavior.request(self._send, packet) except", "__init__( self, pysocket: socket.socket, address: Address, protocol_behavior: ProtocolBehavior, ): self._socket = pysocket self._dst_address", "= partial( RawProtocol, pysocket=self._pipe, address=address, protocol_behavior=protocol_behavior, ) self._create_connection = partial( self._loop.connect_read_pipe, protocol_factory=protocol_factory, pipe=self._pipe,", "async def __await_impl__(self): self._transport, self._client = await self._create_connection() return self._client __iter__ = __await__", "self async def __aexit__(self, exc_type, exc_val, exc_tb): self._transport.close() self._pipe.close() async def __await_impl__(self): self._transport,", "__await__(self): return self.__await_impl__().__await__() async def __aenter__(self): return await self async def __aexit__(self, exc_type,", "except Exception: raise class RawConnection: def __init__( self, address: Address, family: int, proto:", "self._protocol_behavior = protocol_behavior self._transport = None def connection_made(self, transport): self._transport = transport def", "packet): try: return await self._protocol_behavior.request(self._send, packet) except asyncio.CancelledError: raise except Exception: raise class", "def __init__( self, address: Address, family: int, proto: int, protocol_behavior: ProtocolBehavior, *, loop=None", "packet): self._socket.sendto(packet.data, self._dst_address) return self._protocol_behavior.complete_condition() async def request(self, packet): try: return await self._protocol_behavior.request(self._send,", "self._socket = pysocket self._dst_address = address self._protocol_behavior = protocol_behavior self._transport = None def", "connection_lost(self, exc): self._protocol_behavior.cancel() def _send(self, packet): self._socket.sendto(packet.data, self._dst_address) return self._protocol_behavior.complete_condition() async def request(self,", "_create_pipe(family: int, proto: int) -> socket.socket: return socket.socket( family=family, type=socket.SOCK_RAW | socket.SOCK_NONBLOCK, proto=proto,", "socket.socket: return socket.socket( family=family, type=socket.SOCK_RAW | socket.SOCK_NONBLOCK, proto=proto, ) def __await__(self): return self.__await_impl__().__await__()", "data: bytes): self._protocol_behavior.response(data) def connection_lost(self, exc): self._protocol_behavior.cancel() def _send(self, packet): self._socket.sendto(packet.data, self._dst_address) return", "def __aenter__(self): return await self async def __aexit__(self, exc_type, exc_val, exc_tb): self._transport.close() self._pipe.close()", "@staticmethod def _create_pipe(family: int, proto: int) -> socket.socket: return socket.socket( family=family, type=socket.SOCK_RAW |", "int) -> socket.socket: return socket.socket( family=family, type=socket.SOCK_RAW | socket.SOCK_NONBLOCK, proto=proto, ) def __await__(self):", "self._create_connection = partial( self._loop.connect_read_pipe, protocol_factory=protocol_factory, pipe=self._pipe, ) @staticmethod def _create_pipe(family: int, proto: int)", "protocol_behavior: ProtocolBehavior, *, loop=None ): self._loop = loop or asyncio.get_running_loop() self._transport = None", "proto: int) -> socket.socket: return socket.socket( family=family, type=socket.SOCK_RAW | socket.SOCK_NONBLOCK, proto=proto, ) def", "socket from functools import partial from .struct import Address from .protocol_behavior import ProtocolBehavior", "return socket.socket( family=family, type=socket.SOCK_RAW | socket.SOCK_NONBLOCK, proto=proto, ) def __await__(self): return self.__await_impl__().__await__() async", "self._loop = loop or asyncio.get_running_loop() self._transport = None self._client = None self._pipe =", "from functools import partial from .struct import Address from .protocol_behavior import ProtocolBehavior class", "address: Address, protocol_behavior: ProtocolBehavior, ): self._socket = pysocket self._dst_address = address self._protocol_behavior =", "def connection_made(self, transport): self._transport = transport def data_received(self, data: bytes): self._protocol_behavior.response(data) def connection_lost(self,", "asyncio import socket from functools import partial from .struct import Address from .protocol_behavior", "self._client = None self._pipe = self._create_pipe(family, proto) protocol_factory = partial( RawProtocol, pysocket=self._pipe, address=address,", "transport): self._transport = transport def data_received(self, data: bytes): self._protocol_behavior.response(data) def connection_lost(self, exc): self._protocol_behavior.cancel()", "proto) protocol_factory = partial( RawProtocol, pysocket=self._pipe, address=address, protocol_behavior=protocol_behavior, ) self._create_connection = partial( self._loop.connect_read_pipe,", ") def __await__(self): return self.__await_impl__().__await__() async def __aenter__(self): return await self async def", "return self.__await_impl__().__await__() async def __aenter__(self): return await self async def __aexit__(self, exc_type, exc_val,", "socket.socket( family=family, type=socket.SOCK_RAW | socket.SOCK_NONBLOCK, proto=proto, ) def __await__(self): return self.__await_impl__().__await__() async def", "import asyncio import socket from functools import partial from .struct import Address from", "= loop or asyncio.get_running_loop() self._transport = None self._client = None self._pipe = self._create_pipe(family,", "= self._create_pipe(family, proto) protocol_factory = partial( RawProtocol, pysocket=self._pipe, address=address, protocol_behavior=protocol_behavior, ) self._create_connection =", "RawProtocol, pysocket=self._pipe, address=address, protocol_behavior=protocol_behavior, ) self._create_connection = partial( self._loop.connect_read_pipe, protocol_factory=protocol_factory, pipe=self._pipe, ) @staticmethod", "from .struct import Address from .protocol_behavior import ProtocolBehavior class RawProtocol(asyncio.Protocol): def __init__( self,", "= address self._protocol_behavior = protocol_behavior self._transport = None def connection_made(self, transport): self._transport =", "protocol_behavior=protocol_behavior, ) self._create_connection = partial( self._loop.connect_read_pipe, protocol_factory=protocol_factory, pipe=self._pipe, ) @staticmethod def _create_pipe(family: int,", "-> socket.socket: return socket.socket( family=family, type=socket.SOCK_RAW | socket.SOCK_NONBLOCK, proto=proto, ) def __await__(self): return", "= protocol_behavior self._transport = None def connection_made(self, transport): self._transport = transport def data_received(self,", "exc_tb): self._transport.close() self._pipe.close() async def __await_impl__(self): self._transport, self._client = await self._create_connection() return self._client", "transport def data_received(self, data: bytes): self._protocol_behavior.response(data) def connection_lost(self, exc): self._protocol_behavior.cancel() def _send(self, packet):", "connection_made(self, transport): self._transport = transport def data_received(self, data: bytes): self._protocol_behavior.response(data) def connection_lost(self, exc):", "protocol_factory = partial( RawProtocol, pysocket=self._pipe, address=address, protocol_behavior=protocol_behavior, ) self._create_connection = partial( self._loop.connect_read_pipe, protocol_factory=protocol_factory,", "ProtocolBehavior, *, loop=None ): self._loop = loop or asyncio.get_running_loop() self._transport = None self._client", "exc): self._protocol_behavior.cancel() def _send(self, packet): self._socket.sendto(packet.data, self._dst_address) return self._protocol_behavior.complete_condition() async def request(self, packet):", "Address, family: int, proto: int, protocol_behavior: ProtocolBehavior, *, loop=None ): self._loop = loop", "async def __aexit__(self, exc_type, exc_val, exc_tb): self._transport.close() self._pipe.close() async def __await_impl__(self): self._transport, self._client", "return await self._protocol_behavior.request(self._send, packet) except asyncio.CancelledError: raise except Exception: raise class RawConnection: def", "packet) except asyncio.CancelledError: raise except Exception: raise class RawConnection: def __init__( self, address:", "__aexit__(self, exc_type, exc_val, exc_tb): self._transport.close() self._pipe.close() async def __await_impl__(self): self._transport, self._client = await", "family=family, type=socket.SOCK_RAW | socket.SOCK_NONBLOCK, proto=proto, ) def __await__(self): return self.__await_impl__().__await__() async def __aenter__(self):", "= partial( self._loop.connect_read_pipe, protocol_factory=protocol_factory, pipe=self._pipe, ) @staticmethod def _create_pipe(family: int, proto: int) ->", "_send(self, packet): self._socket.sendto(packet.data, self._dst_address) return self._protocol_behavior.complete_condition() async def request(self, packet): try: return await", "pysocket=self._pipe, address=address, protocol_behavior=protocol_behavior, ) self._create_connection = partial( self._loop.connect_read_pipe, protocol_factory=protocol_factory, pipe=self._pipe, ) @staticmethod def", "= None self._client = None self._pipe = self._create_pipe(family, proto) protocol_factory = partial( RawProtocol,", "self._transport = transport def data_received(self, data: bytes): self._protocol_behavior.response(data) def connection_lost(self, exc): self._protocol_behavior.cancel() def", "self._dst_address = address self._protocol_behavior = protocol_behavior self._transport = None def connection_made(self, transport): self._transport", "import partial from .struct import Address from .protocol_behavior import ProtocolBehavior class RawProtocol(asyncio.Protocol): def", "): self._loop = loop or asyncio.get_running_loop() self._transport = None self._client = None self._pipe", "ProtocolBehavior class RawProtocol(asyncio.Protocol): def __init__( self, pysocket: socket.socket, address: Address, protocol_behavior: ProtocolBehavior, ):", "type=socket.SOCK_RAW | socket.SOCK_NONBLOCK, proto=proto, ) def __await__(self): return self.__await_impl__().__await__() async def __aenter__(self): return", "socket.SOCK_NONBLOCK, proto=proto, ) def __await__(self): return self.__await_impl__().__await__() async def __aenter__(self): return await self", "family: int, proto: int, protocol_behavior: ProtocolBehavior, *, loop=None ): self._loop = loop or", "class RawConnection: def __init__( self, address: Address, family: int, proto: int, protocol_behavior: ProtocolBehavior,", "= None self._pipe = self._create_pipe(family, proto) protocol_factory = partial( RawProtocol, pysocket=self._pipe, address=address, protocol_behavior=protocol_behavior,", "self._pipe = self._create_pipe(family, proto) protocol_factory = partial( RawProtocol, pysocket=self._pipe, address=address, protocol_behavior=protocol_behavior, ) self._create_connection", "self._protocol_behavior.response(data) def connection_lost(self, exc): self._protocol_behavior.cancel() def _send(self, packet): self._socket.sendto(packet.data, self._dst_address) return self._protocol_behavior.complete_condition() async", "pipe=self._pipe, ) @staticmethod def _create_pipe(family: int, proto: int) -> socket.socket: return socket.socket( family=family,", "None self._pipe = self._create_pipe(family, proto) protocol_factory = partial( RawProtocol, pysocket=self._pipe, address=address, protocol_behavior=protocol_behavior, )", "from .protocol_behavior import ProtocolBehavior class RawProtocol(asyncio.Protocol): def __init__( self, pysocket: socket.socket, address: Address,", "self, pysocket: socket.socket, address: Address, protocol_behavior: ProtocolBehavior, ): self._socket = pysocket self._dst_address =", "functools import partial from .struct import Address from .protocol_behavior import ProtocolBehavior class RawProtocol(asyncio.Protocol):", "int, proto: int, protocol_behavior: ProtocolBehavior, *, loop=None ): self._loop = loop or asyncio.get_running_loop()", ") self._create_connection = partial( self._loop.connect_read_pipe, protocol_factory=protocol_factory, pipe=self._pipe, ) @staticmethod def _create_pipe(family: int, proto:", "def request(self, packet): try: return await self._protocol_behavior.request(self._send, packet) except asyncio.CancelledError: raise except Exception:", ") @staticmethod def _create_pipe(family: int, proto: int) -> socket.socket: return socket.socket( family=family, type=socket.SOCK_RAW", "def _send(self, packet): self._socket.sendto(packet.data, self._dst_address) return self._protocol_behavior.complete_condition() async def request(self, packet): try: return", "= None def connection_made(self, transport): self._transport = transport def data_received(self, data: bytes): self._protocol_behavior.response(data)", "self._protocol_behavior.complete_condition() async def request(self, packet): try: return await self._protocol_behavior.request(self._send, packet) except asyncio.CancelledError: raise", "address self._protocol_behavior = protocol_behavior self._transport = None def connection_made(self, transport): self._transport = transport", "def _create_pipe(family: int, proto: int) -> socket.socket: return socket.socket( family=family, type=socket.SOCK_RAW | socket.SOCK_NONBLOCK,", "class RawProtocol(asyncio.Protocol): def __init__( self, pysocket: socket.socket, address: Address, protocol_behavior: ProtocolBehavior, ): self._socket", "exc_val, exc_tb): self._transport.close() self._pipe.close() async def __await_impl__(self): self._transport, self._client = await self._create_connection() return", "return self._protocol_behavior.complete_condition() async def request(self, packet): try: return await self._protocol_behavior.request(self._send, packet) except asyncio.CancelledError:", "self._create_pipe(family, proto) protocol_factory = partial( RawProtocol, pysocket=self._pipe, address=address, protocol_behavior=protocol_behavior, ) self._create_connection = partial(", "protocol_behavior: ProtocolBehavior, ): self._socket = pysocket self._dst_address = address self._protocol_behavior = protocol_behavior self._transport", "self._transport = None self._client = None self._pipe = self._create_pipe(family, proto) protocol_factory = partial(", "import ProtocolBehavior class RawProtocol(asyncio.Protocol): def __init__( self, pysocket: socket.socket, address: Address, protocol_behavior: ProtocolBehavior,", "int, protocol_behavior: ProtocolBehavior, *, loop=None ): self._loop = loop or asyncio.get_running_loop() self._transport =", "asyncio.get_running_loop() self._transport = None self._client = None self._pipe = self._create_pipe(family, proto) protocol_factory =", "protocol_factory=protocol_factory, pipe=self._pipe, ) @staticmethod def _create_pipe(family: int, proto: int) -> socket.socket: return socket.socket(", "protocol_behavior self._transport = None def connection_made(self, transport): self._transport = transport def data_received(self, data:", "self._protocol_behavior.cancel() def _send(self, packet): self._socket.sendto(packet.data, self._dst_address) return self._protocol_behavior.complete_condition() async def request(self, packet): try:", "raise except Exception: raise class RawConnection: def __init__( self, address: Address, family: int,", "try: return await self._protocol_behavior.request(self._send, packet) except asyncio.CancelledError: raise except Exception: raise class RawConnection:", "*, loop=None ): self._loop = loop or asyncio.get_running_loop() self._transport = None self._client =", "| socket.SOCK_NONBLOCK, proto=proto, ) def __await__(self): return self.__await_impl__().__await__() async def __aenter__(self): return await", "self.__await_impl__().__await__() async def __aenter__(self): return await self async def __aexit__(self, exc_type, exc_val, exc_tb):", "loop or asyncio.get_running_loop() self._transport = None self._client = None self._pipe = self._create_pipe(family, proto)", "__init__( self, address: Address, family: int, proto: int, protocol_behavior: ProtocolBehavior, *, loop=None ):", ".protocol_behavior import ProtocolBehavior class RawProtocol(asyncio.Protocol): def __init__( self, pysocket: socket.socket, address: Address, protocol_behavior:", "self._transport.close() self._pipe.close() async def __await_impl__(self): self._transport, self._client = await self._create_connection() return self._client __iter__", "pysocket: socket.socket, address: Address, protocol_behavior: ProtocolBehavior, ): self._socket = pysocket self._dst_address = address", "proto=proto, ) def __await__(self): return self.__await_impl__().__await__() async def __aenter__(self): return await self async", "self._transport = None def connection_made(self, transport): self._transport = transport def data_received(self, data: bytes):", "Exception: raise class RawConnection: def __init__( self, address: Address, family: int, proto: int,", "= transport def data_received(self, data: bytes): self._protocol_behavior.response(data) def connection_lost(self, exc): self._protocol_behavior.cancel() def _send(self,", "self._protocol_behavior.request(self._send, packet) except asyncio.CancelledError: raise except Exception: raise class RawConnection: def __init__( self,", "<reponame>PrVrSs/aionpc<gh_stars>0 import asyncio import socket from functools import partial from .struct import Address", "raise class RawConnection: def __init__( self, address: Address, family: int, proto: int, protocol_behavior:", "except asyncio.CancelledError: raise except Exception: raise class RawConnection: def __init__( self, address: Address,", "partial( RawProtocol, pysocket=self._pipe, address=address, protocol_behavior=protocol_behavior, ) self._create_connection = partial( self._loop.connect_read_pipe, protocol_factory=protocol_factory, pipe=self._pipe, )", "address=address, protocol_behavior=protocol_behavior, ) self._create_connection = partial( self._loop.connect_read_pipe, protocol_factory=protocol_factory, pipe=self._pipe, ) @staticmethod def _create_pipe(family:", "self._socket.sendto(packet.data, self._dst_address) return self._protocol_behavior.complete_condition() async def request(self, packet): try: return await self._protocol_behavior.request(self._send, packet)", "await self async def __aexit__(self, exc_type, exc_val, exc_tb): self._transport.close() self._pipe.close() async def __await_impl__(self):", "None self._client = None self._pipe = self._create_pipe(family, proto) protocol_factory = partial( RawProtocol, pysocket=self._pipe,", "self._pipe.close() async def __await_impl__(self): self._transport, self._client = await self._create_connection() return self._client __iter__ =", "exc_type, exc_val, exc_tb): self._transport.close() self._pipe.close() async def __await_impl__(self): self._transport, self._client = await self._create_connection()", "): self._socket = pysocket self._dst_address = address self._protocol_behavior = protocol_behavior self._transport = None", "Address from .protocol_behavior import ProtocolBehavior class RawProtocol(asyncio.Protocol): def __init__( self, pysocket: socket.socket, address:", "async def request(self, packet): try: return await self._protocol_behavior.request(self._send, packet) except asyncio.CancelledError: raise except", "self, address: Address, family: int, proto: int, protocol_behavior: ProtocolBehavior, *, loop=None ): self._loop", "def __aexit__(self, exc_type, exc_val, exc_tb): self._transport.close() self._pipe.close() async def __await_impl__(self): self._transport, self._client =", "asyncio.CancelledError: raise except Exception: raise class RawConnection: def __init__( self, address: Address, family:", "Address, protocol_behavior: ProtocolBehavior, ): self._socket = pysocket self._dst_address = address self._protocol_behavior = protocol_behavior", "pysocket self._dst_address = address self._protocol_behavior = protocol_behavior self._transport = None def connection_made(self, transport):", "__aenter__(self): return await self async def __aexit__(self, exc_type, exc_val, exc_tb): self._transport.close() self._pipe.close() async", "import socket from functools import partial from .struct import Address from .protocol_behavior import", "= pysocket self._dst_address = address self._protocol_behavior = protocol_behavior self._transport = None def connection_made(self,", "bytes): self._protocol_behavior.response(data) def connection_lost(self, exc): self._protocol_behavior.cancel() def _send(self, packet): self._socket.sendto(packet.data, self._dst_address) return self._protocol_behavior.complete_condition()", "await self._protocol_behavior.request(self._send, packet) except asyncio.CancelledError: raise except Exception: raise class RawConnection: def __init__(", "import Address from .protocol_behavior import ProtocolBehavior class RawProtocol(asyncio.Protocol): def __init__( self, pysocket: socket.socket,", "def data_received(self, data: bytes): self._protocol_behavior.response(data) def connection_lost(self, exc): self._protocol_behavior.cancel() def _send(self, packet): self._socket.sendto(packet.data,", "RawProtocol(asyncio.Protocol): def __init__( self, pysocket: socket.socket, address: Address, protocol_behavior: ProtocolBehavior, ): self._socket =", "None def connection_made(self, transport): self._transport = transport def data_received(self, data: bytes): self._protocol_behavior.response(data) def", ".struct import Address from .protocol_behavior import ProtocolBehavior class RawProtocol(asyncio.Protocol): def __init__( self, pysocket:", "partial from .struct import Address from .protocol_behavior import ProtocolBehavior class RawProtocol(asyncio.Protocol): def __init__(", "proto: int, protocol_behavior: ProtocolBehavior, *, loop=None ): self._loop = loop or asyncio.get_running_loop() self._transport", "address: Address, family: int, proto: int, protocol_behavior: ProtocolBehavior, *, loop=None ): self._loop =", "socket.socket, address: Address, protocol_behavior: ProtocolBehavior, ): self._socket = pysocket self._dst_address = address self._protocol_behavior", "RawConnection: def __init__( self, address: Address, family: int, proto: int, protocol_behavior: ProtocolBehavior, *,", "loop=None ): self._loop = loop or asyncio.get_running_loop() self._transport = None self._client = None", "data_received(self, data: bytes): self._protocol_behavior.response(data) def connection_lost(self, exc): self._protocol_behavior.cancel() def _send(self, packet): self._socket.sendto(packet.data, self._dst_address)", "def __await__(self): return self.__await_impl__().__await__() async def __aenter__(self): return await self async def __aexit__(self,", "def __init__( self, pysocket: socket.socket, address: Address, protocol_behavior: ProtocolBehavior, ): self._socket = pysocket", "return await self async def __aexit__(self, exc_type, exc_val, exc_tb): self._transport.close() self._pipe.close() async def", "async def __aenter__(self): return await self async def __aexit__(self, exc_type, exc_val, exc_tb): self._transport.close()", "ProtocolBehavior, ): self._socket = pysocket self._dst_address = address self._protocol_behavior = protocol_behavior self._transport =", "or asyncio.get_running_loop() self._transport = None self._client = None self._pipe = self._create_pipe(family, proto) protocol_factory", "def connection_lost(self, exc): self._protocol_behavior.cancel() def _send(self, packet): self._socket.sendto(packet.data, self._dst_address) return self._protocol_behavior.complete_condition() async def" ]
[ "host is located. Could be used to extract features from dtype: string \"\"\"", "\"\"\" host_location ============= Where the host is located. Hypothesis that the host being", "price Text of where the host is located. Could be used to extract", "host_location ============= Where the host is located. Hypothesis that the host being somewhere", "being somewhere else affects the price Text of where the host is located.", "Text of where the host is located. Could be used to extract features", "is located. Hypothesis that the host being somewhere else affects the price Text", "the host is located. Could be used to extract features from dtype: string", "affects the price Text of where the host is located. Could be used", "Where the host is located. Hypothesis that the host being somewhere else affects", "host being somewhere else affects the price Text of where the host is", "of where the host is located. Could be used to extract features from", "else affects the price Text of where the host is located. Could be", "============= Where the host is located. Hypothesis that the host being somewhere else", "the host is located. Hypothesis that the host being somewhere else affects the", "located. Hypothesis that the host being somewhere else affects the price Text of", "the price Text of where the host is located. Could be used to", "somewhere else affects the price Text of where the host is located. Could", "host is located. Hypothesis that the host being somewhere else affects the price", "where the host is located. Could be used to extract features from dtype:", "Hypothesis that the host being somewhere else affects the price Text of where", "the host being somewhere else affects the price Text of where the host", "that the host being somewhere else affects the price Text of where the" ]
[]
[ "If XGBoost is to be implemented \"\"\" y = dtrain.get_label() elements = np.log(np.cosh(yhat", "dtrain.get_label() elements = ((y - yhat) / y) ** 2 if XGBoost: return", "XGBoost train function \"\"\" def log_cosh_error(yhat, dtrain, XGBoost=XGBoost): \"\"\" Root Mean Squared Log", "XGBoost=XGBoost): \"\"\" Root Mean Squared Log Error. All input labels are required to", "\"\"\" def log_cosh_error(yhat, dtrain, XGBoost=XGBoost): \"\"\" Root Mean Squared Log Error. All input", "https://www.kaggle.com/c/optiver-realized-volatility-prediction/overview/evaluation The corresponding Loss function is Squared Percentage Error. Args: XGBoost (Bool): Set", "as default use. Note that you should also set `maximize=False` in the XGBoost", "should also set `maximize=False` in the XGBoost train function \"\"\" def RMSPE(yhat, dtrain,", "return log_cosh_error def RMSPEMetric(XGBoost=False): \"\"\" Calculates the Root Mean Squared Percentage Error: https://www.kaggle.com/c/optiver-realized-volatility-prediction/overview/evaluation", "False return log_cosh_error def RMSPEMetric(XGBoost=False): \"\"\" Calculates the Root Mean Squared Percentage Error:", "if XGBoost: return 'RMSPE', float(np.sqrt(np.sum(elements) / len(y))) else: return 'RMSPE', float(np.sqrt(np.sum(elements) / len(y))),", "All input labels are required to be greater than -1. yhat: Predictions dtrain:", "XGBoost. We assume LightGBM as default use. Note that you should also set", "XGBoost is to be implemented \"\"\" y = dtrain.get_label() elements = ((y -", "float(np.sum(elements) / len(y)), False return log_cosh_error def RMSPEMetric(XGBoost=False): \"\"\" Calculates the Root Mean", "log_cosh_error(yhat, dtrain, XGBoost=XGBoost): \"\"\" Root Mean Squared Log Error. All input labels are", "** 2 if XGBoost: return 'RMSPE', float(np.sqrt(np.sum(elements) / len(y))) else: return 'RMSPE', float(np.sqrt(np.sum(elements)", "Predictions dtrain: The XGBoost / LightGBM dataset XGBoost (Bool): If XGBoost is to", "is to be implemented \"\"\" y = dtrain.get_label() elements = np.log(np.cosh(yhat - y))", "return 'LogCosh', float(np.sum(elements) / len(y)) else: return 'LogCosh', float(np.sum(elements) / len(y)), False return", "\"\"\" y = dtrain.get_label() elements = np.log(np.cosh(yhat - y)) if XGBoost: return 'LogCosh',", "if using XGBoost. We assume LightGBM as default use. Note that you should", "be implemented \"\"\" y = dtrain.get_label() elements = ((y - yhat) / y)", "Error. All input labels are required to be greater than -1. yhat: Predictions", "assume LightGBM as default use. Note that you should also set `maximize=False` in", "= np.log(np.cosh(yhat - y)) if XGBoost: return 'LogCosh', float(np.sum(elements) / len(y)) else: return", "Set to True if using XGBoost. We assume LightGBM as default use. Note", "you should also set `maximize=False` in the XGBoost train function \"\"\" def RMSPE(yhat,", "- y)) if XGBoost: return 'LogCosh', float(np.sum(elements) / len(y)) else: return 'LogCosh', float(np.sum(elements)", "set `maximize=False` in the XGBoost train function \"\"\" def RMSPE(yhat, dtrain, XGBoost=XGBoost): \"\"\"", "\"\"\" Calculates the [Log Cosh Error](https://openreview.net/pdf?id=rkglvsC9Ym) as an alternative to Mean Absolute Error.", "function \"\"\" def RMSPE(yhat, dtrain, XGBoost=XGBoost): \"\"\" Root Mean Squared Log Error. All", "as an alternative to Mean Absolute Error. Args: XGBoost (Bool): Set to True", "y)) if XGBoost: return 'LogCosh', float(np.sum(elements) / len(y)) else: return 'LogCosh', float(np.sum(elements) /", "len(y)), False return log_cosh_error def RMSPEMetric(XGBoost=False): \"\"\" Calculates the Root Mean Squared Percentage", "are required to be greater than -1. yhat: Predictions dtrain: The XGBoost /", "Calculates the [Log Cosh Error](https://openreview.net/pdf?id=rkglvsC9Ym) as an alternative to Mean Absolute Error. Args:", "'LogCosh', float(np.sum(elements) / len(y)), False return log_cosh_error def RMSPEMetric(XGBoost=False): \"\"\" Calculates the Root", "XGBoost is to be implemented \"\"\" y = dtrain.get_label() elements = np.log(np.cosh(yhat -", "/ LightGBM dataset XGBoost (Bool): If XGBoost is to be implemented \"\"\" y", "the [Log Cosh Error](https://openreview.net/pdf?id=rkglvsC9Ym) as an alternative to Mean Absolute Error. Args: XGBoost", "elements = ((y - yhat) / y) ** 2 if XGBoost: return 'RMSPE',", "True if using XGBoost. We assume LightGBM as default use. Note that you", "also set `maximize=False` in the XGBoost train function \"\"\" def RMSPE(yhat, dtrain, XGBoost=XGBoost):", "The corresponding Loss function is Squared Percentage Error. Args: XGBoost (Bool): Set to", "use. Note that you should also set `maximize=False` in the XGBoost train function", "than -1. yhat: Predictions dtrain: The XGBoost / LightGBM dataset XGBoost (Bool): If", "alternative to Mean Absolute Error. Args: XGBoost (Bool): Set to True if using", "Squared Log Error. All input labels are required to be greater than -1.", "= dtrain.get_label() elements = np.log(np.cosh(yhat - y)) if XGBoost: return 'LogCosh', float(np.sum(elements) /", "corresponding Loss function is Squared Percentage Error. Args: XGBoost (Bool): Set to True", "default use. Note that you should also set `maximize=False` in the XGBoost train", "`maximize=False` in the XGBoost train function \"\"\" def RMSPE(yhat, dtrain, XGBoost=XGBoost): \"\"\" Root", "'RMSPE', float(np.sqrt(np.sum(elements) / len(y))) else: return 'RMSPE', float(np.sqrt(np.sum(elements) / len(y))), False return RMSPE", "set `maximize=False` in the XGBoost train function \"\"\" def log_cosh_error(yhat, dtrain, XGBoost=XGBoost): \"\"\"", "import numpy as np def LogCoshMetric(XGBoost=False): \"\"\" Calculates the [Log Cosh Error](https://openreview.net/pdf?id=rkglvsC9Ym) as", "(Bool): If XGBoost is to be implemented \"\"\" y = dtrain.get_label() elements =", "Log Error. All input labels are required to be greater than -1. yhat:", "XGBoost / LightGBM dataset XGBoost (Bool): If XGBoost is to be implemented \"\"\"", "required to be greater than -1. yhat: Predictions dtrain: The XGBoost / LightGBM", "that you should also set `maximize=False` in the XGBoost train function \"\"\" def", "Loss function is Squared Percentage Error. Args: XGBoost (Bool): Set to True if", "yhat: Predictions dtrain: The XGBoost / LightGBM dataset XGBoost (Bool): If XGBoost is", "Mean Squared Percentage Error: https://www.kaggle.com/c/optiver-realized-volatility-prediction/overview/evaluation The corresponding Loss function is Squared Percentage Error.", "Cosh Error](https://openreview.net/pdf?id=rkglvsC9Ym) as an alternative to Mean Absolute Error. Args: XGBoost (Bool): Set", "the XGBoost train function \"\"\" def log_cosh_error(yhat, dtrain, XGBoost=XGBoost): \"\"\" Root Mean Squared", "input labels are required to be greater than -1. yhat: Predictions dtrain: The", "\"\"\" Calculates the Root Mean Squared Percentage Error: https://www.kaggle.com/c/optiver-realized-volatility-prediction/overview/evaluation The corresponding Loss function", "= ((y - yhat) / y) ** 2 if XGBoost: return 'RMSPE', float(np.sqrt(np.sum(elements)", "should also set `maximize=False` in the XGBoost train function \"\"\" def log_cosh_error(yhat, dtrain,", "the Root Mean Squared Percentage Error: https://www.kaggle.com/c/optiver-realized-volatility-prediction/overview/evaluation The corresponding Loss function is Squared", "The XGBoost / LightGBM dataset XGBoost (Bool): If XGBoost is to be implemented", "return 'RMSPE', float(np.sqrt(np.sum(elements) / len(y))) else: return 'RMSPE', float(np.sqrt(np.sum(elements) / len(y))), False return", "dtrain.get_label() elements = np.log(np.cosh(yhat - y)) if XGBoost: return 'LogCosh', float(np.sum(elements) / len(y))", "to be greater than -1. yhat: Predictions dtrain: The XGBoost / LightGBM dataset", "be implemented \"\"\" y = dtrain.get_label() elements = np.log(np.cosh(yhat - y)) if XGBoost:", "y = dtrain.get_label() elements = ((y - yhat) / y) ** 2 if", "RMSPEMetric(XGBoost=False): \"\"\" Calculates the Root Mean Squared Percentage Error: https://www.kaggle.com/c/optiver-realized-volatility-prediction/overview/evaluation The corresponding Loss", "labels are required to be greater than -1. yhat: Predictions dtrain: The XGBoost", "Error: https://www.kaggle.com/c/optiver-realized-volatility-prediction/overview/evaluation The corresponding Loss function is Squared Percentage Error. Args: XGBoost (Bool):", "LogCoshMetric(XGBoost=False): \"\"\" Calculates the [Log Cosh Error](https://openreview.net/pdf?id=rkglvsC9Ym) as an alternative to Mean Absolute", "dtrain: The XGBoost / LightGBM dataset XGBoost (Bool): If XGBoost is to be", "implemented \"\"\" y = dtrain.get_label() elements = ((y - yhat) / y) **", "using XGBoost. We assume LightGBM as default use. Note that you should also", "\"\"\" Root Mean Squared Log Error. All input labels are required to be", "if XGBoost: return 'LogCosh', float(np.sum(elements) / len(y)) else: return 'LogCosh', float(np.sum(elements) / len(y)),", "Absolute Error. Args: XGBoost (Bool): Set to True if using XGBoost. We assume", "log_cosh_error def RMSPEMetric(XGBoost=False): \"\"\" Calculates the Root Mean Squared Percentage Error: https://www.kaggle.com/c/optiver-realized-volatility-prediction/overview/evaluation The", "LightGBM as default use. Note that you should also set `maximize=False` in the", "def RMSPE(yhat, dtrain, XGBoost=XGBoost): \"\"\" Root Mean Squared Log Error. All input labels", "yhat) / y) ** 2 if XGBoost: return 'RMSPE', float(np.sqrt(np.sum(elements) / len(y))) else:", "Percentage Error. Args: XGBoost (Bool): Set to True if using XGBoost. We assume", "to be implemented \"\"\" y = dtrain.get_label() elements = np.log(np.cosh(yhat - y)) if", "Mean Absolute Error. Args: XGBoost (Bool): Set to True if using XGBoost. We", "LightGBM dataset XGBoost (Bool): If XGBoost is to be implemented \"\"\" y =", "dtrain, XGBoost=XGBoost): \"\"\" Root Mean Squared Log Error. All input labels are required", "XGBoost (Bool): If XGBoost is to be implemented \"\"\" y = dtrain.get_label() elements", "Calculates the Root Mean Squared Percentage Error: https://www.kaggle.com/c/optiver-realized-volatility-prediction/overview/evaluation The corresponding Loss function is", "def log_cosh_error(yhat, dtrain, XGBoost=XGBoost): \"\"\" Root Mean Squared Log Error. All input labels", "def LogCoshMetric(XGBoost=False): \"\"\" Calculates the [Log Cosh Error](https://openreview.net/pdf?id=rkglvsC9Ym) as an alternative to Mean", "implemented \"\"\" y = dtrain.get_label() elements = np.log(np.cosh(yhat - y)) if XGBoost: return", "function is Squared Percentage Error. Args: XGBoost (Bool): Set to True if using", "\"\"\" y = dtrain.get_label() elements = ((y - yhat) / y) ** 2", "XGBoost train function \"\"\" def RMSPE(yhat, dtrain, XGBoost=XGBoost): \"\"\" Root Mean Squared Log", "np def LogCoshMetric(XGBoost=False): \"\"\" Calculates the [Log Cosh Error](https://openreview.net/pdf?id=rkglvsC9Ym) as an alternative to", "be greater than -1. yhat: Predictions dtrain: The XGBoost / LightGBM dataset XGBoost", "Note that you should also set `maximize=False` in the XGBoost train function \"\"\"", "(Bool): Set to True if using XGBoost. We assume LightGBM as default use.", "function \"\"\" def log_cosh_error(yhat, dtrain, XGBoost=XGBoost): \"\"\" Root Mean Squared Log Error. All", "float(np.sum(elements) / len(y)) else: return 'LogCosh', float(np.sum(elements) / len(y)), False return log_cosh_error def", "y = dtrain.get_label() elements = np.log(np.cosh(yhat - y)) if XGBoost: return 'LogCosh', float(np.sum(elements)", "XGBoost: return 'RMSPE', float(np.sqrt(np.sum(elements) / len(y))) else: return 'RMSPE', float(np.sqrt(np.sum(elements) / len(y))), False", "We assume LightGBM as default use. Note that you should also set `maximize=False`", "return 'LogCosh', float(np.sum(elements) / len(y)), False return log_cosh_error def RMSPEMetric(XGBoost=False): \"\"\" Calculates the", "Root Mean Squared Percentage Error: https://www.kaggle.com/c/optiver-realized-volatility-prediction/overview/evaluation The corresponding Loss function is Squared Percentage", "is Squared Percentage Error. Args: XGBoost (Bool): Set to True if using XGBoost.", "RMSPE(yhat, dtrain, XGBoost=XGBoost): \"\"\" Root Mean Squared Log Error. All input labels are", "XGBoost (Bool): Set to True if using XGBoost. We assume LightGBM as default", "else: return 'LogCosh', float(np.sum(elements) / len(y)), False return log_cosh_error def RMSPEMetric(XGBoost=False): \"\"\" Calculates", "def RMSPEMetric(XGBoost=False): \"\"\" Calculates the Root Mean Squared Percentage Error: https://www.kaggle.com/c/optiver-realized-volatility-prediction/overview/evaluation The corresponding", "XGBoost: return 'LogCosh', float(np.sum(elements) / len(y)) else: return 'LogCosh', float(np.sum(elements) / len(y)), False", "Mean Squared Log Error. All input labels are required to be greater than", "len(y)) else: return 'LogCosh', float(np.sum(elements) / len(y)), False return log_cosh_error def RMSPEMetric(XGBoost=False): \"\"\"", "2 if XGBoost: return 'RMSPE', float(np.sqrt(np.sum(elements) / len(y))) else: return 'RMSPE', float(np.sqrt(np.sum(elements) /", "Percentage Error: https://www.kaggle.com/c/optiver-realized-volatility-prediction/overview/evaluation The corresponding Loss function is Squared Percentage Error. Args: XGBoost", "/ len(y)), False return log_cosh_error def RMSPEMetric(XGBoost=False): \"\"\" Calculates the Root Mean Squared", "Root Mean Squared Log Error. All input labels are required to be greater", "np.log(np.cosh(yhat - y)) if XGBoost: return 'LogCosh', float(np.sum(elements) / len(y)) else: return 'LogCosh',", "in the XGBoost train function \"\"\" def log_cosh_error(yhat, dtrain, XGBoost=XGBoost): \"\"\" Root Mean", "-1. yhat: Predictions dtrain: The XGBoost / LightGBM dataset XGBoost (Bool): If XGBoost", "the XGBoost train function \"\"\" def RMSPE(yhat, dtrain, XGBoost=XGBoost): \"\"\" Root Mean Squared", "to Mean Absolute Error. Args: XGBoost (Bool): Set to True if using XGBoost.", "`maximize=False` in the XGBoost train function \"\"\" def log_cosh_error(yhat, dtrain, XGBoost=XGBoost): \"\"\" Root", "If XGBoost is to be implemented \"\"\" y = dtrain.get_label() elements = ((y", "greater than -1. yhat: Predictions dtrain: The XGBoost / LightGBM dataset XGBoost (Bool):", "Error](https://openreview.net/pdf?id=rkglvsC9Ym) as an alternative to Mean Absolute Error. Args: XGBoost (Bool): Set to", "((y - yhat) / y) ** 2 if XGBoost: return 'RMSPE', float(np.sqrt(np.sum(elements) /", "<filename>bokbokbok/eval_metrics/regression/regression_eval_metrics.py import numpy as np def LogCoshMetric(XGBoost=False): \"\"\" Calculates the [Log Cosh Error](https://openreview.net/pdf?id=rkglvsC9Ym)", "[Log Cosh Error](https://openreview.net/pdf?id=rkglvsC9Ym) as an alternative to Mean Absolute Error. Args: XGBoost (Bool):", "'LogCosh', float(np.sum(elements) / len(y)) else: return 'LogCosh', float(np.sum(elements) / len(y)), False return log_cosh_error", "y) ** 2 if XGBoost: return 'RMSPE', float(np.sqrt(np.sum(elements) / len(y))) else: return 'RMSPE',", "you should also set `maximize=False` in the XGBoost train function \"\"\" def log_cosh_error(yhat,", "to be implemented \"\"\" y = dtrain.get_label() elements = ((y - yhat) /", "- yhat) / y) ** 2 if XGBoost: return 'RMSPE', float(np.sqrt(np.sum(elements) / len(y)))", "to True if using XGBoost. We assume LightGBM as default use. Note that", "\"\"\" def RMSPE(yhat, dtrain, XGBoost=XGBoost): \"\"\" Root Mean Squared Log Error. All input", "/ len(y)) else: return 'LogCosh', float(np.sum(elements) / len(y)), False return log_cosh_error def RMSPEMetric(XGBoost=False):", "Squared Percentage Error: https://www.kaggle.com/c/optiver-realized-volatility-prediction/overview/evaluation The corresponding Loss function is Squared Percentage Error. Args:", "Squared Percentage Error. Args: XGBoost (Bool): Set to True if using XGBoost. We", "in the XGBoost train function \"\"\" def RMSPE(yhat, dtrain, XGBoost=XGBoost): \"\"\" Root Mean", "dataset XGBoost (Bool): If XGBoost is to be implemented \"\"\" y = dtrain.get_label()", "= dtrain.get_label() elements = ((y - yhat) / y) ** 2 if XGBoost:", "as np def LogCoshMetric(XGBoost=False): \"\"\" Calculates the [Log Cosh Error](https://openreview.net/pdf?id=rkglvsC9Ym) as an alternative", "is to be implemented \"\"\" y = dtrain.get_label() elements = ((y - yhat)", "Error. Args: XGBoost (Bool): Set to True if using XGBoost. We assume LightGBM", "train function \"\"\" def log_cosh_error(yhat, dtrain, XGBoost=XGBoost): \"\"\" Root Mean Squared Log Error.", "Args: XGBoost (Bool): Set to True if using XGBoost. We assume LightGBM as", "also set `maximize=False` in the XGBoost train function \"\"\" def log_cosh_error(yhat, dtrain, XGBoost=XGBoost):", "/ y) ** 2 if XGBoost: return 'RMSPE', float(np.sqrt(np.sum(elements) / len(y))) else: return", "an alternative to Mean Absolute Error. Args: XGBoost (Bool): Set to True if", "train function \"\"\" def RMSPE(yhat, dtrain, XGBoost=XGBoost): \"\"\" Root Mean Squared Log Error.", "numpy as np def LogCoshMetric(XGBoost=False): \"\"\" Calculates the [Log Cosh Error](https://openreview.net/pdf?id=rkglvsC9Ym) as an", "elements = np.log(np.cosh(yhat - y)) if XGBoost: return 'LogCosh', float(np.sum(elements) / len(y)) else:" ]
[ "word2vec_model Type: str Description: Path of word2vec trained model file. Default Value: 'GoogleNews-vectors-negative300.bin/GoogleNews-vectors-negative300.bin'", "parser.getfloat('Input Variables','threshold') if parser.get('Input Variables','input_file_path'): self.input_file_path = parser.get('Input Variables','input_file_path') if parser.get('Input Variables', 'output_dir_path'):", "required user inputs in the class attributes. Attributes ---------- 1. word2vec_model Type: str", "self.cluster_overlap = True self.word_vector_dim = 300 self.representative_word_vector = 'average' def config_reader(self): ''' This", "parser.get('Input Variables', 'word_vector_dim'): self.word_vector_dim = parser.getint('Input Variables', 'word_vector_dim') if parser.get('Input Variables', 'representative_word_vector'): self.representative_word_vector", "Variables', 'cluster_overlap'): self.cluster_overlap = parser.getboolean('Input Variables', 'cluster_overlap') if parser.get('Input Variables', 'word_vector_dim'): self.word_vector_dim =", "Value: True 6. word_vector_dim Type: int Description: Dimension of word vectors. Default Value:", "<NAME> \"\"\" #!/usr/bin/python from ConfigParser import ConfigParser class ConfigParse(): ''' This class reads", "config_reader(self): ''' This method parses the config file and read the variables defined", "read the variables defined by the user in the config.ini file. The values", "Variables','word2vec_model') if parser.get('Input Variables','threshold'): self.threshold = parser.getfloat('Input Variables','threshold') if parser.get('Input Variables','input_file_path'): self.input_file_path =", "str Description: Path of input text file containing sentences to be clustered. Default", "output clusters are to be kept. Default Value: output_clusters 5. cluster_overlap Type: bool", "will have same sentence. Default Value: True 6. word_vector_dim Type: int Description: Dimension", "representative sentence of each cluster is to be computed using \"add\" or \"average\".", "'GoogleNews-vectors-negative300.bin/GoogleNews-vectors-negative300.bin' 2. threshold Type: float Description: Threshold value to be used for clustering", "of word2vec trained model file. Default Value: 'GoogleNews-vectors-negative300.bin/GoogleNews-vectors-negative300.bin' 2. threshold Type: float Description:", "Description: Specify whether the representative sentence of each cluster is to be computed", "__init__(self): ''' This method declares the class attributes. ''' self.word2vec_model = 'GoogleNews-vectors-negative300.bin/GoogleNews-vectors-negative300.bin' self.threshold", "config.ini file. The values of the variables are then set in the corresponding", "parser.get('Input Variables','threshold'): self.threshold = parser.getfloat('Input Variables','threshold') if parser.get('Input Variables','input_file_path'): self.input_file_path = parser.get('Input Variables','input_file_path')", "This method declares the class attributes. ''' self.word2vec_model = 'GoogleNews-vectors-negative300.bin/GoogleNews-vectors-negative300.bin' self.threshold = 0.80", "inputs in the class attributes. Attributes ---------- 1. word2vec_model Type: str Description: Path", "set to False, then no two clusters will have same sentence. Default Value:", "self.representative_word_vector = 'average' def config_reader(self): ''' This method parses the config file and", "float Description: Threshold value to be used for clustering Default Value: 0.80 3.", "corresponding class attributes. ''' parser = ConfigParser() # Read config.ini parser.read('config.ini') # Read", "300 self.representative_word_vector = 'average' def config_reader(self): ''' This method parses the config file", "1. word2vec_model Type: str Description: Path of word2vec trained model file. Default Value:", "config.ini parser.read('config.ini') # Read input variables for the code if parser.get('Input Variables','word2vec_model'): self.word2vec_model", "Value: None 4. output_dir_path Type: str Description: Path of directory where output clusters", "of directory where output clusters are to be kept. Default Value: output_clusters 5.", "This method parses the config file and read the variables defined by the", "class attributes. ''' parser = ConfigParser() # Read config.ini parser.read('config.ini') # Read input", "Type: str Description: Path of input text file containing sentences to be clustered.", "Default Value: 300 7. representative_word_vector Type: str Description: Specify whether the representative sentence", "None self.output_dir_path = './output_clusters' self.cluster_overlap = True self.word_vector_dim = 300 self.representative_word_vector = 'average'", "in the class attributes. Attributes ---------- 1. word2vec_model Type: str Description: Path of", "threshold Type: float Description: Threshold value to be used for clustering Default Value:", "the config file and read the variables defined by the user in the", "= parser.get('Input Variables', 'output_dir_path') if parser.get('Input Variables', 'cluster_overlap'): self.cluster_overlap = parser.getboolean('Input Variables', 'cluster_overlap')", "method declares the class attributes. ''' self.word2vec_model = 'GoogleNews-vectors-negative300.bin/GoogleNews-vectors-negative300.bin' self.threshold = 0.80 self.input_file_path", "\"add\" or \"average\". Default Value: average ''' def __init__(self): ''' This method declares", "parser.get('Input Variables','input_file_path') if parser.get('Input Variables', 'output_dir_path'): self.output_dir_path = parser.get('Input Variables', 'output_dir_path') if parser.get('Input", "the class attributes. Attributes ---------- 1. word2vec_model Type: str Description: Path of word2vec", "in the config.ini file. The values of the variables are then set in", "two clusters will have same sentence. Default Value: True 6. word_vector_dim Type: int", "sentences to be clustered. Default Value: None 4. output_dir_path Type: str Description: Path", "whether the representative sentence of each cluster is to be computed using \"add\"", "= 300 self.representative_word_vector = 'average' def config_reader(self): ''' This method parses the config", "of the variables are then set in the corresponding class attributes. ''' parser", "= ConfigParser() # Read config.ini parser.read('config.ini') # Read input variables for the code", "input variables for the code if parser.get('Input Variables','word2vec_model'): self.word2vec_model = parser.get('Input Variables','word2vec_model') if", "self.threshold = parser.getfloat('Input Variables','threshold') if parser.get('Input Variables','input_file_path'): self.input_file_path = parser.get('Input Variables','input_file_path') if parser.get('Input", "average ''' def __init__(self): ''' This method declares the class attributes. ''' self.word2vec_model", "file. Default Value: 'GoogleNews-vectors-negative300.bin/GoogleNews-vectors-negative300.bin' 2. threshold Type: float Description: Threshold value to be", "Default Value: 'GoogleNews-vectors-negative300.bin/GoogleNews-vectors-negative300.bin' 2. threshold Type: float Description: Threshold value to be used", "input_file_path Type: str Description: Path of input text file containing sentences to be", "\"average\". Default Value: average ''' def __init__(self): ''' This method declares the class", "= True self.word_vector_dim = 300 self.representative_word_vector = 'average' def config_reader(self): ''' This method", "False, then no two clusters will have same sentence. Default Value: True 6.", "self.output_dir_path = './output_clusters' self.cluster_overlap = True self.word_vector_dim = 300 self.representative_word_vector = 'average' def", "of each cluster is to be computed using \"add\" or \"average\". Default Value:", "Type: int Description: Dimension of word vectors. Default Value: 300 7. representative_word_vector Type:", "''' This method declares the class attributes. ''' self.word2vec_model = 'GoogleNews-vectors-negative300.bin/GoogleNews-vectors-negative300.bin' self.threshold =", "Value: 300 7. representative_word_vector Type: str Description: Specify whether the representative sentence of", "int Description: Dimension of word vectors. Default Value: 300 7. representative_word_vector Type: str", "the variables are then set in the corresponding class attributes. ''' parser =", "to be kept. Default Value: output_clusters 5. cluster_overlap Type: bool Description: If set", "True 6. word_vector_dim Type: int Description: Dimension of word vectors. Default Value: 300", "self.input_file_path = parser.get('Input Variables','input_file_path') if parser.get('Input Variables', 'output_dir_path'): self.output_dir_path = parser.get('Input Variables', 'output_dir_path')", "3. input_file_path Type: str Description: Path of input text file containing sentences to", "variables defined by the user in the config.ini file. The values of the", "Description: Path of directory where output clusters are to be kept. Default Value:", "parser.get('Input Variables', 'output_dir_path'): self.output_dir_path = parser.get('Input Variables', 'output_dir_path') if parser.get('Input Variables', 'cluster_overlap'): self.cluster_overlap", "Read input variables for the code if parser.get('Input Variables','word2vec_model'): self.word2vec_model = parser.get('Input Variables','word2vec_model')", "to False, then no two clusters will have same sentence. Default Value: True", "parser.get('Input Variables','word2vec_model'): self.word2vec_model = parser.get('Input Variables','word2vec_model') if parser.get('Input Variables','threshold'): self.threshold = parser.getfloat('Input Variables','threshold')", "attributes. ''' self.word2vec_model = 'GoogleNews-vectors-negative300.bin/GoogleNews-vectors-negative300.bin' self.threshold = 0.80 self.input_file_path = None self.output_dir_path =", "the representative sentence of each cluster is to be computed using \"add\" or", "if parser.get('Input Variables', 'output_dir_path'): self.output_dir_path = parser.get('Input Variables', 'output_dir_path') if parser.get('Input Variables', 'cluster_overlap'):", "= parser.getint('Input Variables', 'word_vector_dim') if parser.get('Input Variables', 'representative_word_vector'): self.representative_word_vector = parser.get('Input Variables', 'representative_word_vector')", "Type: bool Description: If set to False, then no two clusters will have", "class ConfigParse(): ''' This class reads config.ini file and sets the required user", "model file. Default Value: 'GoogleNews-vectors-negative300.bin/GoogleNews-vectors-negative300.bin' 2. threshold Type: float Description: Threshold value to", "file containing sentences to be clustered. Default Value: None 4. output_dir_path Type: str", "to be used for clustering Default Value: 0.80 3. input_file_path Type: str Description:", "of word vectors. Default Value: 300 7. representative_word_vector Type: str Description: Specify whether", "self.output_dir_path = parser.get('Input Variables', 'output_dir_path') if parser.get('Input Variables', 'cluster_overlap'): self.cluster_overlap = parser.getboolean('Input Variables',", "if parser.get('Input Variables', 'cluster_overlap'): self.cluster_overlap = parser.getboolean('Input Variables', 'cluster_overlap') if parser.get('Input Variables', 'word_vector_dim'):", "parser = ConfigParser() # Read config.ini parser.read('config.ini') # Read input variables for the", "each cluster is to be computed using \"add\" or \"average\". Default Value: average", "clustering Default Value: 0.80 3. input_file_path Type: str Description: Path of input text", "then set in the corresponding class attributes. ''' parser = ConfigParser() # Read", "self.word_vector_dim = 300 self.representative_word_vector = 'average' def config_reader(self): ''' This method parses the", "class attributes. Attributes ---------- 1. word2vec_model Type: str Description: Path of word2vec trained", "have same sentence. Default Value: True 6. word_vector_dim Type: int Description: Dimension of", "# Read input variables for the code if parser.get('Input Variables','word2vec_model'): self.word2vec_model = parser.get('Input", "''' parser = ConfigParser() # Read config.ini parser.read('config.ini') # Read input variables for", "'./output_clusters' self.cluster_overlap = True self.word_vector_dim = 300 self.representative_word_vector = 'average' def config_reader(self): '''", "cluster is to be computed using \"add\" or \"average\". Default Value: average '''", "'cluster_overlap'): self.cluster_overlap = parser.getboolean('Input Variables', 'cluster_overlap') if parser.get('Input Variables', 'word_vector_dim'): self.word_vector_dim = parser.getint('Input", "file. The values of the variables are then set in the corresponding class", "#!/usr/bin/python from ConfigParser import ConfigParser class ConfigParse(): ''' This class reads config.ini file", "Reader @author: <NAME> \"\"\" #!/usr/bin/python from ConfigParser import ConfigParser class ConfigParse(): ''' This", "str Description: Path of directory where output clusters are to be kept. Default", "parser.get('Input Variables', 'output_dir_path') if parser.get('Input Variables', 'cluster_overlap'): self.cluster_overlap = parser.getboolean('Input Variables', 'cluster_overlap') if", "input text file containing sentences to be clustered. Default Value: None 4. output_dir_path", "self.word_vector_dim = parser.getint('Input Variables', 'word_vector_dim') if parser.get('Input Variables', 'representative_word_vector'): self.representative_word_vector = parser.get('Input Variables',", "self.word2vec_model = 'GoogleNews-vectors-negative300.bin/GoogleNews-vectors-negative300.bin' self.threshold = 0.80 self.input_file_path = None self.output_dir_path = './output_clusters' self.cluster_overlap", "@author: <NAME> \"\"\" #!/usr/bin/python from ConfigParser import ConfigParser class ConfigParse(): ''' This class", "str Description: Specify whether the representative sentence of each cluster is to be", "the variables defined by the user in the config.ini file. The values of", "= parser.get('Input Variables','word2vec_model') if parser.get('Input Variables','threshold'): self.threshold = parser.getfloat('Input Variables','threshold') if parser.get('Input Variables','input_file_path'):", "vectors. Default Value: 300 7. representative_word_vector Type: str Description: Specify whether the representative", "clusters will have same sentence. Default Value: True 6. word_vector_dim Type: int Description:", "ConfigParser() # Read config.ini parser.read('config.ini') # Read input variables for the code if", "''' This method parses the config file and read the variables defined by", "to be computed using \"add\" or \"average\". Default Value: average ''' def __init__(self):", "= 'GoogleNews-vectors-negative300.bin/GoogleNews-vectors-negative300.bin' self.threshold = 0.80 self.input_file_path = None self.output_dir_path = './output_clusters' self.cluster_overlap =", "the config.ini file. The values of the variables are then set in the", "parser.get('Input Variables','word2vec_model') if parser.get('Input Variables','threshold'): self.threshold = parser.getfloat('Input Variables','threshold') if parser.get('Input Variables','input_file_path'): self.input_file_path", "= parser.getfloat('Input Variables','threshold') if parser.get('Input Variables','input_file_path'): self.input_file_path = parser.get('Input Variables','input_file_path') if parser.get('Input Variables',", "Variables','input_file_path'): self.input_file_path = parser.get('Input Variables','input_file_path') if parser.get('Input Variables', 'output_dir_path'): self.output_dir_path = parser.get('Input Variables',", "self.word2vec_model = parser.get('Input Variables','word2vec_model') if parser.get('Input Variables','threshold'): self.threshold = parser.getfloat('Input Variables','threshold') if parser.get('Input", "containing sentences to be clustered. Default Value: None 4. output_dir_path Type: str Description:", "config file and read the variables defined by the user in the config.ini", "'cluster_overlap') if parser.get('Input Variables', 'word_vector_dim'): self.word_vector_dim = parser.getint('Input Variables', 'word_vector_dim') if parser.get('Input Variables',", "be clustered. Default Value: None 4. output_dir_path Type: str Description: Path of directory", "Value: output_clusters 5. cluster_overlap Type: bool Description: If set to False, then no", "of input text file containing sentences to be clustered. Default Value: None 4.", "Path of input text file containing sentences to be clustered. Default Value: None", "Value: average ''' def __init__(self): ''' This method declares the class attributes. '''", "Variables', 'word_vector_dim'): self.word_vector_dim = parser.getint('Input Variables', 'word_vector_dim') if parser.get('Input Variables', 'representative_word_vector'): self.representative_word_vector =", "file and sets the required user inputs in the class attributes. Attributes ----------", "sentence of each cluster is to be computed using \"add\" or \"average\". Default", "output_clusters 5. cluster_overlap Type: bool Description: If set to False, then no two", "\"\"\" #!/usr/bin/python from ConfigParser import ConfigParser class ConfigParse(): ''' This class reads config.ini", "Path of word2vec trained model file. Default Value: 'GoogleNews-vectors-negative300.bin/GoogleNews-vectors-negative300.bin' 2. threshold Type: float", "Threshold value to be used for clustering Default Value: 0.80 3. input_file_path Type:", "file and read the variables defined by the user in the config.ini file.", "be kept. Default Value: output_clusters 5. cluster_overlap Type: bool Description: If set to", "4. output_dir_path Type: str Description: Path of directory where output clusters are to", "Default Value: average ''' def __init__(self): ''' This method declares the class attributes.", "Variables', 'output_dir_path'): self.output_dir_path = parser.get('Input Variables', 'output_dir_path') if parser.get('Input Variables', 'cluster_overlap'): self.cluster_overlap =", "the user in the config.ini file. The values of the variables are then", "This class reads config.ini file and sets the required user inputs in the", "Dimension of word vectors. Default Value: 300 7. representative_word_vector Type: str Description: Specify", "ConfigParser import ConfigParser class ConfigParse(): ''' This class reads config.ini file and sets", "def __init__(self): ''' This method declares the class attributes. ''' self.word2vec_model = 'GoogleNews-vectors-negative300.bin/GoogleNews-vectors-negative300.bin'", "if parser.get('Input Variables','input_file_path'): self.input_file_path = parser.get('Input Variables','input_file_path') if parser.get('Input Variables', 'output_dir_path'): self.output_dir_path =", "''' self.word2vec_model = 'GoogleNews-vectors-negative300.bin/GoogleNews-vectors-negative300.bin' self.threshold = 0.80 self.input_file_path = None self.output_dir_path = './output_clusters'", "class reads config.ini file and sets the required user inputs in the class", "clustered. Default Value: None 4. output_dir_path Type: str Description: Path of directory where", "method parses the config file and read the variables defined by the user", "if parser.get('Input Variables','word2vec_model'): self.word2vec_model = parser.get('Input Variables','word2vec_model') if parser.get('Input Variables','threshold'): self.threshold = parser.getfloat('Input", "300 7. representative_word_vector Type: str Description: Specify whether the representative sentence of each", "declares the class attributes. ''' self.word2vec_model = 'GoogleNews-vectors-negative300.bin/GoogleNews-vectors-negative300.bin' self.threshold = 0.80 self.input_file_path =", "for the code if parser.get('Input Variables','word2vec_model'): self.word2vec_model = parser.get('Input Variables','word2vec_model') if parser.get('Input Variables','threshold'):", "\"\"\" Config Reader @author: <NAME> \"\"\" #!/usr/bin/python from ConfigParser import ConfigParser class ConfigParse():", "5. cluster_overlap Type: bool Description: If set to False, then no two clusters", "parser.read('config.ini') # Read input variables for the code if parser.get('Input Variables','word2vec_model'): self.word2vec_model =", "Value: 0.80 3. input_file_path Type: str Description: Path of input text file containing", "'GoogleNews-vectors-negative300.bin/GoogleNews-vectors-negative300.bin' self.threshold = 0.80 self.input_file_path = None self.output_dir_path = './output_clusters' self.cluster_overlap = True", "in the corresponding class attributes. ''' parser = ConfigParser() # Read config.ini parser.read('config.ini')", "''' This class reads config.ini file and sets the required user inputs in", "same sentence. Default Value: True 6. word_vector_dim Type: int Description: Dimension of word", "output_dir_path Type: str Description: Path of directory where output clusters are to be", "Read config.ini parser.read('config.ini') # Read input variables for the code if parser.get('Input Variables','word2vec_model'):", "parser.get('Input Variables', 'cluster_overlap'): self.cluster_overlap = parser.getboolean('Input Variables', 'cluster_overlap') if parser.get('Input Variables', 'word_vector_dim'): self.word_vector_dim", "---------- 1. word2vec_model Type: str Description: Path of word2vec trained model file. Default", "import ConfigParser class ConfigParse(): ''' This class reads config.ini file and sets the", "'word_vector_dim'): self.word_vector_dim = parser.getint('Input Variables', 'word_vector_dim') if parser.get('Input Variables', 'representative_word_vector'): self.representative_word_vector = parser.get('Input", "from ConfigParser import ConfigParser class ConfigParse(): ''' This class reads config.ini file and", "using \"add\" or \"average\". Default Value: average ''' def __init__(self): ''' This method", "= './output_clusters' self.cluster_overlap = True self.word_vector_dim = 300 self.representative_word_vector = 'average' def config_reader(self):", "Variables','threshold'): self.threshold = parser.getfloat('Input Variables','threshold') if parser.get('Input Variables','input_file_path'): self.input_file_path = parser.get('Input Variables','input_file_path') if", "Description: If set to False, then no two clusters will have same sentence.", "the corresponding class attributes. ''' parser = ConfigParser() # Read config.ini parser.read('config.ini') #", "sets the required user inputs in the class attributes. Attributes ---------- 1. word2vec_model", "the required user inputs in the class attributes. Attributes ---------- 1. word2vec_model Type:", "for clustering Default Value: 0.80 3. input_file_path Type: str Description: Path of input", "defined by the user in the config.ini file. The values of the variables", "kept. Default Value: output_clusters 5. cluster_overlap Type: bool Description: If set to False,", "then no two clusters will have same sentence. Default Value: True 6. word_vector_dim", "7. representative_word_vector Type: str Description: Specify whether the representative sentence of each cluster", "str Description: Path of word2vec trained model file. Default Value: 'GoogleNews-vectors-negative300.bin/GoogleNews-vectors-negative300.bin' 2. threshold", "Value: 'GoogleNews-vectors-negative300.bin/GoogleNews-vectors-negative300.bin' 2. threshold Type: float Description: Threshold value to be used for", "True self.word_vector_dim = 300 self.representative_word_vector = 'average' def config_reader(self): ''' This method parses", "and read the variables defined by the user in the config.ini file. The", "Variables','word2vec_model'): self.word2vec_model = parser.get('Input Variables','word2vec_model') if parser.get('Input Variables','threshold'): self.threshold = parser.getfloat('Input Variables','threshold') if", "and sets the required user inputs in the class attributes. Attributes ---------- 1.", "sentence. Default Value: True 6. word_vector_dim Type: int Description: Dimension of word vectors.", "set in the corresponding class attributes. ''' parser = ConfigParser() # Read config.ini", "class attributes. ''' self.word2vec_model = 'GoogleNews-vectors-negative300.bin/GoogleNews-vectors-negative300.bin' self.threshold = 0.80 self.input_file_path = None self.output_dir_path", "6. word_vector_dim Type: int Description: Dimension of word vectors. Default Value: 300 7.", "Description: Dimension of word vectors. Default Value: 300 7. representative_word_vector Type: str Description:", "variables for the code if parser.get('Input Variables','word2vec_model'): self.word2vec_model = parser.get('Input Variables','word2vec_model') if parser.get('Input", "bool Description: If set to False, then no two clusters will have same", "The values of the variables are then set in the corresponding class attributes.", "= None self.output_dir_path = './output_clusters' self.cluster_overlap = True self.word_vector_dim = 300 self.representative_word_vector =", "'average' def config_reader(self): ''' This method parses the config file and read the", "variables are then set in the corresponding class attributes. ''' parser = ConfigParser()", "value to be used for clustering Default Value: 0.80 3. input_file_path Type: str", "are to be kept. Default Value: output_clusters 5. cluster_overlap Type: bool Description: If", "Variables','input_file_path') if parser.get('Input Variables', 'output_dir_path'): self.output_dir_path = parser.get('Input Variables', 'output_dir_path') if parser.get('Input Variables',", "Default Value: None 4. output_dir_path Type: str Description: Path of directory where output", "parses the config file and read the variables defined by the user in", "Description: Path of word2vec trained model file. Default Value: 'GoogleNews-vectors-negative300.bin/GoogleNews-vectors-negative300.bin' 2. threshold Type:", "parser.get('Input Variables','input_file_path'): self.input_file_path = parser.get('Input Variables','input_file_path') if parser.get('Input Variables', 'output_dir_path'): self.output_dir_path = parser.get('Input", "config.ini file and sets the required user inputs in the class attributes. Attributes", "Default Value: True 6. word_vector_dim Type: int Description: Dimension of word vectors. Default", "directory where output clusters are to be kept. Default Value: output_clusters 5. cluster_overlap", "the class attributes. ''' self.word2vec_model = 'GoogleNews-vectors-negative300.bin/GoogleNews-vectors-negative300.bin' self.threshold = 0.80 self.input_file_path = None", "self.threshold = 0.80 self.input_file_path = None self.output_dir_path = './output_clusters' self.cluster_overlap = True self.word_vector_dim", "attributes. Attributes ---------- 1. word2vec_model Type: str Description: Path of word2vec trained model", "values of the variables are then set in the corresponding class attributes. '''", "Variables', 'cluster_overlap') if parser.get('Input Variables', 'word_vector_dim'): self.word_vector_dim = parser.getint('Input Variables', 'word_vector_dim') if parser.get('Input", "attributes. ''' parser = ConfigParser() # Read config.ini parser.read('config.ini') # Read input variables", "where output clusters are to be kept. Default Value: output_clusters 5. cluster_overlap Type:", "Variables', 'output_dir_path') if parser.get('Input Variables', 'cluster_overlap'): self.cluster_overlap = parser.getboolean('Input Variables', 'cluster_overlap') if parser.get('Input", "Default Value: output_clusters 5. cluster_overlap Type: bool Description: If set to False, then", "self.input_file_path = None self.output_dir_path = './output_clusters' self.cluster_overlap = True self.word_vector_dim = 300 self.representative_word_vector", "0.80 3. input_file_path Type: str Description: Path of input text file containing sentences", "parser.getboolean('Input Variables', 'cluster_overlap') if parser.get('Input Variables', 'word_vector_dim'): self.word_vector_dim = parser.getint('Input Variables', 'word_vector_dim') if", "Description: Threshold value to be used for clustering Default Value: 0.80 3. input_file_path", "word vectors. Default Value: 300 7. representative_word_vector Type: str Description: Specify whether the", "if parser.get('Input Variables','threshold'): self.threshold = parser.getfloat('Input Variables','threshold') if parser.get('Input Variables','input_file_path'): self.input_file_path = parser.get('Input", "Attributes ---------- 1. word2vec_model Type: str Description: Path of word2vec trained model file.", "representative_word_vector Type: str Description: Specify whether the representative sentence of each cluster is", "2. threshold Type: float Description: Threshold value to be used for clustering Default", "= 0.80 self.input_file_path = None self.output_dir_path = './output_clusters' self.cluster_overlap = True self.word_vector_dim =", "Config Reader @author: <NAME> \"\"\" #!/usr/bin/python from ConfigParser import ConfigParser class ConfigParse(): '''", "= parser.getboolean('Input Variables', 'cluster_overlap') if parser.get('Input Variables', 'word_vector_dim'): self.word_vector_dim = parser.getint('Input Variables', 'word_vector_dim')", "'output_dir_path') if parser.get('Input Variables', 'cluster_overlap'): self.cluster_overlap = parser.getboolean('Input Variables', 'cluster_overlap') if parser.get('Input Variables',", "user inputs in the class attributes. Attributes ---------- 1. word2vec_model Type: str Description:", "to be clustered. Default Value: None 4. output_dir_path Type: str Description: Path of", "# Read config.ini parser.read('config.ini') # Read input variables for the code if parser.get('Input", "the code if parser.get('Input Variables','word2vec_model'): self.word2vec_model = parser.get('Input Variables','word2vec_model') if parser.get('Input Variables','threshold'): self.threshold", "or \"average\". Default Value: average ''' def __init__(self): ''' This method declares the", "self.cluster_overlap = parser.getboolean('Input Variables', 'cluster_overlap') if parser.get('Input Variables', 'word_vector_dim'): self.word_vector_dim = parser.getint('Input Variables',", "'output_dir_path'): self.output_dir_path = parser.get('Input Variables', 'output_dir_path') if parser.get('Input Variables', 'cluster_overlap'): self.cluster_overlap = parser.getboolean('Input", "user in the config.ini file. The values of the variables are then set", "code if parser.get('Input Variables','word2vec_model'): self.word2vec_model = parser.get('Input Variables','word2vec_model') if parser.get('Input Variables','threshold'): self.threshold =", "word_vector_dim Type: int Description: Dimension of word vectors. Default Value: 300 7. representative_word_vector", "text file containing sentences to be clustered. Default Value: None 4. output_dir_path Type:", "Type: str Description: Specify whether the representative sentence of each cluster is to", "Type: str Description: Path of word2vec trained model file. Default Value: 'GoogleNews-vectors-negative300.bin/GoogleNews-vectors-negative300.bin' 2.", "Path of directory where output clusters are to be kept. Default Value: output_clusters", "be used for clustering Default Value: 0.80 3. input_file_path Type: str Description: Path", "no two clusters will have same sentence. Default Value: True 6. word_vector_dim Type:", "= parser.get('Input Variables','input_file_path') if parser.get('Input Variables', 'output_dir_path'): self.output_dir_path = parser.get('Input Variables', 'output_dir_path') if", "= 'average' def config_reader(self): ''' This method parses the config file and read", "be computed using \"add\" or \"average\". Default Value: average ''' def __init__(self): '''", "reads config.ini file and sets the required user inputs in the class attributes.", "''' def __init__(self): ''' This method declares the class attributes. ''' self.word2vec_model =", "Variables','threshold') if parser.get('Input Variables','input_file_path'): self.input_file_path = parser.get('Input Variables','input_file_path') if parser.get('Input Variables', 'output_dir_path'): self.output_dir_path", "Default Value: 0.80 3. input_file_path Type: str Description: Path of input text file", "Specify whether the representative sentence of each cluster is to be computed using", "is to be computed using \"add\" or \"average\". Default Value: average ''' def", "0.80 self.input_file_path = None self.output_dir_path = './output_clusters' self.cluster_overlap = True self.word_vector_dim = 300", "used for clustering Default Value: 0.80 3. input_file_path Type: str Description: Path of", "clusters are to be kept. Default Value: output_clusters 5. cluster_overlap Type: bool Description:", "are then set in the corresponding class attributes. ''' parser = ConfigParser() #", "ConfigParse(): ''' This class reads config.ini file and sets the required user inputs", "cluster_overlap Type: bool Description: If set to False, then no two clusters will", "trained model file. Default Value: 'GoogleNews-vectors-negative300.bin/GoogleNews-vectors-negative300.bin' 2. threshold Type: float Description: Threshold value", "If set to False, then no two clusters will have same sentence. Default", "computed using \"add\" or \"average\". Default Value: average ''' def __init__(self): ''' This", "ConfigParser class ConfigParse(): ''' This class reads config.ini file and sets the required", "word2vec trained model file. Default Value: 'GoogleNews-vectors-negative300.bin/GoogleNews-vectors-negative300.bin' 2. threshold Type: float Description: Threshold", "def config_reader(self): ''' This method parses the config file and read the variables", "if parser.get('Input Variables', 'word_vector_dim'): self.word_vector_dim = parser.getint('Input Variables', 'word_vector_dim') if parser.get('Input Variables', 'representative_word_vector'):", "None 4. output_dir_path Type: str Description: Path of directory where output clusters are", "by the user in the config.ini file. The values of the variables are", "Description: Path of input text file containing sentences to be clustered. Default Value:", "Type: float Description: Threshold value to be used for clustering Default Value: 0.80", "Type: str Description: Path of directory where output clusters are to be kept." ]
[ "All rights reserved. from .dist_utils import (DistOptimizerHook, all_reduce_dict, allreduce_grads, reduce_mean) from .misc import", "from .dist_utils import (DistOptimizerHook, all_reduce_dict, allreduce_grads, reduce_mean) from .misc import (center_of_mass, flip_tensor, generate_coordinate,", "all_reduce_dict, allreduce_grads, reduce_mean) from .misc import (center_of_mass, flip_tensor, generate_coordinate, mask2ndarray, multi_apply, unmap) __all__", "import (DistOptimizerHook, all_reduce_dict, allreduce_grads, reduce_mean) from .misc import (center_of_mass, flip_tensor, generate_coordinate, mask2ndarray, multi_apply,", "mask2ndarray, multi_apply, unmap) __all__ = [ 'allreduce_grads', 'DistOptimizerHook', 'reduce_mean', 'multi_apply', 'unmap', 'mask2ndarray', 'flip_tensor',", "multi_apply, unmap) __all__ = [ 'allreduce_grads', 'DistOptimizerHook', 'reduce_mean', 'multi_apply', 'unmap', 'mask2ndarray', 'flip_tensor', 'all_reduce_dict',", "unmap) __all__ = [ 'allreduce_grads', 'DistOptimizerHook', 'reduce_mean', 'multi_apply', 'unmap', 'mask2ndarray', 'flip_tensor', 'all_reduce_dict', 'center_of_mass',", "allreduce_grads, reduce_mean) from .misc import (center_of_mass, flip_tensor, generate_coordinate, mask2ndarray, multi_apply, unmap) __all__ =", "reduce_mean) from .misc import (center_of_mass, flip_tensor, generate_coordinate, mask2ndarray, multi_apply, unmap) __all__ = [", "reserved. from .dist_utils import (DistOptimizerHook, all_reduce_dict, allreduce_grads, reduce_mean) from .misc import (center_of_mass, flip_tensor,", "(DistOptimizerHook, all_reduce_dict, allreduce_grads, reduce_mean) from .misc import (center_of_mass, flip_tensor, generate_coordinate, mask2ndarray, multi_apply, unmap)", "(c) OpenMMLab. All rights reserved. from .dist_utils import (DistOptimizerHook, all_reduce_dict, allreduce_grads, reduce_mean) from", "from .misc import (center_of_mass, flip_tensor, generate_coordinate, mask2ndarray, multi_apply, unmap) __all__ = [ 'allreduce_grads',", "= [ 'allreduce_grads', 'DistOptimizerHook', 'reduce_mean', 'multi_apply', 'unmap', 'mask2ndarray', 'flip_tensor', 'all_reduce_dict', 'center_of_mass', 'generate_coordinate' ]", "import (center_of_mass, flip_tensor, generate_coordinate, mask2ndarray, multi_apply, unmap) __all__ = [ 'allreduce_grads', 'DistOptimizerHook', 'reduce_mean',", "# Copyright (c) OpenMMLab. All rights reserved. from .dist_utils import (DistOptimizerHook, all_reduce_dict, allreduce_grads,", "flip_tensor, generate_coordinate, mask2ndarray, multi_apply, unmap) __all__ = [ 'allreduce_grads', 'DistOptimizerHook', 'reduce_mean', 'multi_apply', 'unmap',", "__all__ = [ 'allreduce_grads', 'DistOptimizerHook', 'reduce_mean', 'multi_apply', 'unmap', 'mask2ndarray', 'flip_tensor', 'all_reduce_dict', 'center_of_mass', 'generate_coordinate'", ".misc import (center_of_mass, flip_tensor, generate_coordinate, mask2ndarray, multi_apply, unmap) __all__ = [ 'allreduce_grads', 'DistOptimizerHook',", ".dist_utils import (DistOptimizerHook, all_reduce_dict, allreduce_grads, reduce_mean) from .misc import (center_of_mass, flip_tensor, generate_coordinate, mask2ndarray,", "generate_coordinate, mask2ndarray, multi_apply, unmap) __all__ = [ 'allreduce_grads', 'DistOptimizerHook', 'reduce_mean', 'multi_apply', 'unmap', 'mask2ndarray',", "OpenMMLab. All rights reserved. from .dist_utils import (DistOptimizerHook, all_reduce_dict, allreduce_grads, reduce_mean) from .misc", "Copyright (c) OpenMMLab. All rights reserved. from .dist_utils import (DistOptimizerHook, all_reduce_dict, allreduce_grads, reduce_mean)", "rights reserved. from .dist_utils import (DistOptimizerHook, all_reduce_dict, allreduce_grads, reduce_mean) from .misc import (center_of_mass,", "(center_of_mass, flip_tensor, generate_coordinate, mask2ndarray, multi_apply, unmap) __all__ = [ 'allreduce_grads', 'DistOptimizerHook', 'reduce_mean', 'multi_apply'," ]
[ "to optimize. If 'verbose' is True, then everytime a transformation is made, it", "SimpleImputer( missing_values, strategy, fill_value, verbose, copy, add_indicator) def fit(self, X, y=None): return self", "and column_values upon creating the object, or call 'new_merge' method with those parameters.", "a indicator for automatic dataframe modeling for best predictions later on. show_pipeline() <-", "self._merger.new_merge(features, values).fit_transform(X.drop(self._columns, axis=1), y) class Imputer(Transformer, TransformerMixin): \"\"\" Imputer object It is a", "(labels). optimization (str) is the method used to optimize. If 'verbose' is True,", "objects, it does not inherit Transformer class, as it doesn't need to transform", "cases just to stay friendly with sklearn module. \"\"\" def __init__(self, verbose=False): self._pipes", "transform(X, y). This is advised in most cases just to stay friendly with", "performs all transformations (from all pipes) on the dataframe, chooses the most meaningful", "it takes dataframe as an input, which is transformed into np.ndarray, fed into", "column names that will be encoded. Alternatively you can set them with set_columns", "creates a new pipe (ordered), pipe_set is expected to be a tuple of", "It is probably useful only for pipelines, as you can easily achieve the", "def show_pipeline(self): out = [] for name, _ in self._pipes: if self._activated[name]: out.append(name)", "you can easily achieve the same result with basic pandas operations. Unlike other", "self._activated[name] = True def fit(self, X, y=None): return self def transform(self, X, y=None):", "'onehot': values = self._encoder.fit_transform(X[self._columns]).toarray() values = encoded_array_to_df_compatible_array(values) features = self._encoder.get_feature_names() features = [feature[3:]", "self._encoder.get_feature_names() features = [feature[3:] for feature in features] return self._merger.new_merge(features, values).fit_transform(X.drop(self._columns, axis=1), y)", "= True def fit(self, X, y=None): return self def transform(self, X, y=None): for", "- 'onehot' \"\"\" def __init__(self, columns, encoder='onehot', encoder_params=[]): self._columns = columns self._merger =", "== 'onehot': values = self._encoder.fit_transform(X[self._columns]).toarray() values = encoded_array_to_df_compatible_array(values) features = self._encoder.get_feature_names() features =", "values).fit_transform(X.drop(self._columns, axis=1), y) class Imputer(Transformer, TransformerMixin): \"\"\" Imputer object It is a wrapper", "upon creating the object, or call 'new_merge' method with those parameters. If X", "always_active[, True]) <- creates a new pipe (ordered), pipe_set is expected to be", "less features though) as sklearn.Pipeline . Methods: __init__(verbose[, False]) <- if 'verbose' is", "== 'corr_': threshold = int(opt[5:]) / 100 corr_table = X.corr()[getattr(self, '_target')].sort_values(ascending=False).to_dict() self._best_parameters =", "transform(self, X, y=None): if not isinstance(X, pd.DataFrame): raise NotADataFrameException if self._encoder_type == 'onehot':", "verbose=False): Pipesystem.__init__(self, verbose) self._target = optimize_for self._optimization = optimization self._best_parameters = [] def", "the dataframe. Methods: __init__(name, function, parameters) <- 'name' is the label of new", "= parameters def new_attribute(self, name, function, parameters): self.name = name self.function = function", "to stay friendly with sklearn module. \"\"\" def __init__(self, verbose=False): self._pipes = []", "specify column names that will be encoded. Alternatively you can set them with", ", it uses one of the optimiztion methods to determine the most promising", "name, pipe in self._pipes: if self._activated[name] == False: continue if self._verbose: print(f'> pushing", "self.parameters = parameters def new_attribute(self, name, function, parameters): self.name = name self.function =", "= int(opt[5:]) / 100 corr_table = X.corr()[getattr(self, '_target')].sort_values(ascending=False).to_dict() self._best_parameters = [name for name", "function upon which the values will be created, 'parameters' is a list of", "\"\"\" def __init__(self, missing_values=np.nan, strategy='mean', fill_value=None, verbose=0, copy=True, add_indicator=False): self._imputer = SimpleImputer( missing_values,", "will not be a part of returned dataframe. Methods: __init__(optimize_for, optimization[, 'corr_20'], verbose[,", "specified encoder _encoder . Possible encodings: - 'onehot' \"\"\" def __init__(self, columns, encoder='onehot',", "X[self.name] = self.function(*parameters) return X class Pipesystem(TransformerMixin): \"\"\" Pipesystem object It works (it", "verbose, copy, add_indicator) def fit(self, X, y=None): return self def transform(self, X, y=None):", "transform(self, X, y=None): X = Pipesystem.transform(self, X, y) opt = getattr(self, '_optimization', 'corr_20')", "around sklearn.impute.SimpleImputer, all it does, is that it takes dataframe as an input,", "'_optimization', 'corr_20') if opt[:5] == 'corr_': threshold = int(opt[5:]) / 100 corr_table =", "\\'{name}\\' with {pipe}') X = pipe.fit_transform(X) return X def show_pipeline(self): out = []", "= SimpleImputer( missing_values, strategy, fill_value, verbose, copy, add_indicator) def fit(self, X, y=None): return", "pipe (ordered), pipe_set is expected to be a tuple of name and object", "features without actually training a model. One way of optimization (and currently, the", "as an input, which is transformed into np.ndarray, fed into actual SimpleImputer object,", "pd.DataFrame): raise NotADataFrameException if self._encoder_type == 'onehot': values = self._encoder.fit_transform(X[self._columns]).toarray() values = encoded_array_to_df_compatible_array(values)", "sklearn module. \"\"\" def __init__(self, optimize_for, optimization='corr_20', verbose=False): Pipesystem.__init__(self, verbose) self._target = optimize_for", "with the same exact columns. \"\"\" def __init__(self, missing_values=np.nan, strategy='mean', fill_value=None, verbose=0, copy=True,", "X, y=None): return self def transform(self, X, y=None): if not isinstance(X, pd.DataFrame): raise", "transform dataframe to array or vice-versa. You can specify column_names and column_values upon", "parameters def fit(self, X, y=None): return self def transform(self, X, y=None): parameters =", "them with get_columns method. It is used to encode categorical attributes of the", "optimize_for, optimization='corr_20', verbose=False): Pipesystem.__init__(self, verbose) self._target = optimize_for self._optimization = optimization self._best_parameters =", "NotADataFrameException, Transformer class Merger(TransformerMixin): \"\"\" Merger Object It is used to merge dataframes", "dataframe. Methods: __init__(name, function, parameters) <- 'name' is the label of new column,", "does not inherit Transformer class, as it doesn't need to transform dataframe to", "fit_transform(X, y) <- combined fit(X, y) and transform(X, y). This is advised in", "be a indicator for automatic dataframe modeling for best predictions later on. show_pipeline()", "to stay friendly with sklearn module. \"\"\" def __init__(self, optimize_for, optimization='corr_20', verbose=False): Pipesystem.__init__(self,", "show_pipeline(self): out = [] for name, _ in self._pipes: if self._activated[name]: out.append(name) return", "if not value: self._disable_pipe(name) def _disable_pipe(self, name): self._activated[name] = False class OptimizedPipesystem(Pipesystem): \"\"\"", "if opt[:5] == 'corr_': threshold = int(opt[5:]) / 100 corr_table = X.corr()[getattr(self, '_target')].sort_values(ascending=False).to_dict()", "stay friendly with sklearn module. \"\"\" def __init__(self, name, function, parameters): self.name =", "for value, name, _ in zip(array, self._pipes): if not value: self._disable_pipe(name) def _disable_pipe(self,", "vice-versa. You can specify column_names and column_values upon creating the object, or call", "self._pipes): if not value: self._disable_pipe(name) def _disable_pipe(self, name): self._activated[name] = False class OptimizedPipesystem(Pipesystem):", "is probably useful only for pipelines, as you can easily achieve the same", "everytime a transformation is made, it will print out the information about it.", "from sklearn.impute import SimpleImputer from .helpers import encoded_array_to_df_compatible_array from .base import ShapeException, NotADataFrameException,", "optimization[, 'corr_20'], verbose[, False]) <- optimize_for (str) are the target columns (labels). optimization", "encoder if self._encoder_type == 'onehot': self._encoder = OneHotEncoder(*encoder_params) def fit(self, X, y=None): return", "columns. \"\"\" def __init__(self, missing_values=np.nan, strategy='mean', fill_value=None, verbose=0, copy=True, add_indicator=False): self._imputer = SimpleImputer(", "transform(X, y) <- performs all transformations (from all pipes) on the dataframe, chooses", "OptimizedPipesystem(Pipesystem): \"\"\" OptimizedPipesystem object Enhanced rdfs.Pipesystem , it uses one of the optimiztion", "return self def transform(self, X, y=None): for name, pipe in self._pipes: if self._activated[name]", "= cols_names self._cols_values = cols_values def fit(self, X, y=None): return self def transform(self,", "if isinstance(parameter, str): parameter = X[parameter] parameters.append(parameter) X[self.name] = self.function(*parameters) return X class", "performs all transformations (from all pipes) on the dataframe and returns it. fit_transform(X,", "def __init__(self, cols_names=None, cols_values=None): self._cols_names = cols_names self._cols_values = cols_values def fit(self, X,", "dataframe. fit_transform(X, y) <- combined fit(X, y) and transform(X, y). This is advised", "attribute) on the dataframe and returns it. fit_transform(X, y) <- combined fit(X, y)", "or display them with get_columns method. It is used to encode categorical attributes", "performs the transformation (adds new attribute) on the dataframe and returns it. fit_transform(X,", "the dataframe. fit_transform(X, y) <- combined fit(X, y) and transform(X, y). This is", "a function upon which the values will be created, 'parameters' is a list", "dataframe, raises an exception. \"\"\" def __init__(self, cols_names=None, cols_values=None): self._cols_names = cols_names self._cols_values", "= parameters def fit(self, X, y=None): return self def transform(self, X, y=None): parameters", "sklearn module. \"\"\" def __init__(self, name, function, parameters): self.name = name self.function =", "returns the dataframe. fit_transform(X, y) <- combined fit(X, y) and transform(X, y). This", "parameters def new_attribute(self, name, function, parameters): self.name = name self.function = function self.parameters", "as pd import numpy as np from sklearn.base import TransformerMixin from sklearn.preprocessing import", "<- returns an ordered list with all current pipes. fit(X, y) <- returns", "if not isinstance(X, pd.DataFrame): raise NotADataFrameException for name, values in zip(self._cols_names, self._cols_values): X[name]", "self._pipes = [] self._activated = {} self._verbose = verbose def new_pipe(self, pipe_set, always_active=True):", "name, _ in zip(array, self._pipes): if not value: self._disable_pipe(name) def _disable_pipe(self, name): self._activated[name]", "optimization (str) is the method used to optimize. If 'verbose' is True, then", "new_attribute(name, function, parameters) <- ... fit(X, y) <- returns itself. transform(X, y) <-", "used to encode categorical attributes of the dataframe. It contains it's merger _merger", "as it doesn't need to transform dataframe to array or vice-versa. You can", "itself. transform(X, y) <- performs all transformations (from all pipes) on the dataframe,", "list with all current pipes. fit(X, y) <- returns itself. transform(X, y) <-", "actually training a model. One way of optimization (and currently, the only one", "parameters) <- ... fit(X, y) <- returns itself. transform(X, y) <- performs the", "Upon creation, you should specify column names that will be encoded. Alternatively you", "is the label of new column, 'function' is a function upon which the", "later on. show_pipeline() <- returns an ordered list with all current pipes. fit(X,", "y=None): X = Pipesystem.transform(self, X, y) opt = getattr(self, '_optimization', 'corr_20') if opt[:5]", "fit(self, X, y=None): return self def transform(self, X, y=None): if not isinstance(X, pd.DataFrame):", "is that it takes dataframe as an input, which is transformed into np.ndarray,", "new pipe (ordered), pipe_set is expected to be a tuple of name and", "import SimpleImputer from .helpers import encoded_array_to_df_compatible_array from .base import ShapeException, NotADataFrameException, Transformer class", "with those parameters. If X parameter of transformation is not a dataframe, raises", "and/or constant parameters. new_attribute(name, function, parameters) <- ... fit(X, y) <- returns itself.", "show_pipeline() <- returns an ordered list with all current pipes. fit(X, y) <-", "dataframe to array or vice-versa. You can specify column_names and column_values upon creating", "It is used to add new columns (features) to the dataframe. Methods: __init__(name,", "parameters = [] for parameter in self.parameters: if isinstance(parameter, str): parameter = X[parameter]", "of name and object ( in that order ). always_active does not have", "list of column names (str) and/or constant parameters. new_attribute(name, function, parameters) <- ...", "on. show_pipeline() <- returns an ordered list with all current pipes. fit(X, y)", "with basic pandas operations. Unlike other objects, it does not inherit Transformer class,", "= values return X def new_merge(self, cols_names, cols_values): self._cols_names = cols_names self._cols_values =", "the dataframe and returns it. fit_transform(X, y) <- combined fit(X, y) and transform(X,", "return self def transform(self, X, y=None): parameters = [] for parameter in self.parameters:", "print out the information about it. new_pipe(pipe_set, always_active[, True]) <- creates a new", "part of returned dataframe. Methods: __init__(optimize_for, optimization[, 'corr_20'], verbose[, False]) <- optimize_for (str)", "used to merge dataframes with given columns. It is probably useful only for", "(str) is the method used to optimize. If 'verbose' is True, then everytime", "return self class CategoryEncoder(Transformer, TransformerMixin): \"\"\" CategoryEncoder object Upon creation, you should specify", "used to optimize. If 'verbose' is True, then everytime a transformation is made,", "dataframe, chooses the most meaningful features and returns the dataframe. fit_transform(X, y) <-", "new_pipe(self, pipe_set, always_active=True): name, pipe = pipe_set self._pipes.append((name, pipe)) self._activated[name] = True def", "= pipe.fit_transform(X) return X def show_pipeline(self): out = [] for name, _ in", "and returns it. fit_transform(X, y) <- combined fit(X, y) and transform(X, y). This", "optimize. If 'verbose' is True, then everytime a transformation is made, it will", "pd.DataFrame): self._columns = X.columns X_tr = self._imputer.fit_transform(self._to_array(X)) X_tr = self._to_df(X_tr) return X_tr else:", "to be a indicator for automatic dataframe modeling for best predictions later on.", "raises an exception. \"\"\" def __init__(self, cols_names=None, cols_values=None): self._cols_names = cols_names self._cols_values =", "encoder_params=[]): self._columns = columns self._merger = Merger() self._encoder_type = encoder if self._encoder_type ==", "values in zip(self._cols_names, self._cols_values): X[name] = values return X def new_merge(self, cols_names, cols_values):", "verbose=False): self._pipes = [] self._activated = {} self._verbose = verbose def new_pipe(self, pipe_set,", "object It is a wrapper around sklearn.impute.SimpleImputer, all it does, is that it", "in zip(self._cols_names, self._cols_values): X[name] = values return X def new_merge(self, cols_names, cols_values): self._cols_names", "same result with basic pandas operations. Unlike other objects, it does not inherit", "categorical attributes of the dataframe. It contains it's merger _merger , as well", "a new pipe (ordered), pipe_set is expected to be a tuple of name", "without actually training a model. One way of optimization (and currently, the only", "[] self._activated = {} self._verbose = verbose def new_pipe(self, pipe_set, always_active=True): name, pipe", "Pipesystem.transform(self, X, y) opt = getattr(self, '_optimization', 'corr_20') if opt[:5] == 'corr_': threshold", "_encoder . Possible encodings: - 'onehot' \"\"\" def __init__(self, columns, encoder='onehot', encoder_params=[]): self._columns", "to the dataframe. Methods: __init__(name, function, parameters) <- 'name' is the label of", "expected to be a tuple of name and object ( in that order", "will be encoded. Alternatively you can set them with set_columns method, or display", "... fit(X, y) <- returns itself. transform(X, y) <- performs the transformation (adds", "out.append(name) return out def _activate_array(self, array): for value, name, _ in zip(array, self._pipes):", "sklearn.impute.SimpleImputer, all it does, is that it takes dataframe as an input, which", "it uses one of the optimiztion methods to determine the most promising features", "the same exact columns. \"\"\" def __init__(self, missing_values=np.nan, strategy='mean', fill_value=None, verbose=0, copy=True, add_indicator=False):", "can specify column_names and column_values upon creating the object, or call 'new_merge' method", "X, y=None): if not isinstance(X, pd.DataFrame): raise NotADataFrameException for name, values in zip(self._cols_names,", "X_tr = self._to_df(X_tr) return X_tr else: return self._imputer.fit_transform(X, y) class AttributeAdder(TransformerMixin): \"\"\" AttributeAdder", "for parameter in self.parameters: if isinstance(parameter, str): parameter = X[parameter] parameters.append(parameter) X[self.name] =", "at this moment. It is expected for it to be a indicator for", "correlation. Upon object creation, specify optimization parameter to 'corr_<int>', the integer will be", "cols_values=None): self._cols_names = cols_names self._cols_values = cols_values def fit(self, X, y=None): return self", "y) <- performs the transformation (adds new attribute) on the dataframe and returns", "function self.parameters = parameters def fit(self, X, y=None): return self def transform(self, X,", "__init__(self, cols_names=None, cols_values=None): self._cols_names = cols_names self._cols_values = cols_values def fit(self, X, y=None):", "column_values upon creating the object, or call 'new_merge' method with those parameters. If", "zip(self._cols_names, self._cols_values): X[name] = values return X def new_merge(self, cols_names, cols_values): self._cols_names =", "not isinstance(X, pd.DataFrame): raise NotADataFrameException for name, values in zip(self._cols_names, self._cols_values): X[name] =", "return X def new_merge(self, cols_names, cols_values): self._cols_names = cols_names self._cols_values = cols_values return", "def transform(self, X, y=None): for name, pipe in self._pipes: if self._activated[name] == False:", "feature that is less significant than that, will not be a part of", "y) class Imputer(Transformer, TransformerMixin): \"\"\" Imputer object It is a wrapper around sklearn.impute.SimpleImputer,", "of transformation is not a dataframe, raises an exception. \"\"\" def __init__(self, cols_names=None,", "<- returns itself. transform(X, y) <- performs the transformation (adds new attribute) on", "a part of returned dataframe. Methods: __init__(optimize_for, optimization[, 'corr_20'], verbose[, False]) <- optimize_for", "always_active does not have any functionality at this moment. It is expected for", "SimpleImputer from .helpers import encoded_array_to_df_compatible_array from .base import ShapeException, NotADataFrameException, Transformer class Merger(TransformerMixin):", "that order ). always_active does not have any functionality at this moment. It", "you should specify column names that will be encoded. Alternatively you can set", "\"\"\" AttributeAdder object It is used to add new columns (features) to the", "name, _ in self._pipes: if self._activated[name]: out.append(name) return out def _activate_array(self, array): for", "to add new columns (features) to the dataframe. Methods: __init__(name, function, parameters) <-", "features = self._encoder.get_feature_names() features = [feature[3:] for feature in features] return self._merger.new_merge(features, values).fit_transform(X.drop(self._columns,", "(it has less features though) as sklearn.Pipeline . Methods: __init__(verbose[, False]) <- if", "True, then everytime a transformation is made, it will print out the information", "features = [feature[3:] for feature in features] return self._merger.new_merge(features, values).fit_transform(X.drop(self._columns, axis=1), y) class", "with all current pipes. fit(X, y) <- returns itself. transform(X, y) <- performs", "Enhanced rdfs.Pipesystem , it uses one of the optimiztion methods to determine the", "should specify column names that will be encoded. Alternatively you can set them", "'corr_<int>', the integer will be the percent rate from 0 to 100 and", "pipe)) self._activated[name] = True def fit(self, X, y=None): return self def transform(self, X,", "Imputer object It is a wrapper around sklearn.impute.SimpleImputer, all it does, is that", "pushing through \\'{name}\\' with {pipe}') X = pipe.fit_transform(X) return X def show_pipeline(self): out", "100 corr_table = X.corr()[getattr(self, '_target')].sort_values(ascending=False).to_dict() self._best_parameters = [name for name in corr_table if", "\"\"\" def __init__(self, cols_names=None, cols_values=None): self._cols_names = cols_names self._cols_values = cols_values def fit(self,", "expected for it to be a indicator for automatic dataframe modeling for best", "returns an ordered list with all current pipes. fit(X, y) <- returns itself.", "transformations (from all pipes) on the dataframe, chooses the most meaningful features and", "from sklearn.base import TransformerMixin from sklearn.preprocessing import OneHotEncoder from sklearn.impute import SimpleImputer from", "will be created, 'parameters' is a list of column names (str) and/or constant", "column_names and column_values upon creating the object, or call 'new_merge' method with those", "(features) to the dataframe. Methods: __init__(name, function, parameters) <- 'name' is the label", "merger _merger , as well as specified encoder _encoder . Possible encodings: -", "y=None): return self def transform(self, X, y=None): for name, pipe in self._pipes: if", "is a function upon which the values will be created, 'parameters' is a", "Merger Object It is used to merge dataframes with given columns. It is", "= encoded_array_to_df_compatible_array(values) features = self._encoder.get_feature_names() features = [feature[3:] for feature in features] return", "__init__(verbose[, False]) <- if 'verbose' is True, then everytime a transformation is made,", "parameter of transformation is not a dataframe, raises an exception. \"\"\" def __init__(self,", "fit(self, X, y=None): return self def transform(self, X, y=None): for name, pipe in", "value, name, _ in zip(array, self._pipes): if not value: self._disable_pipe(name) def _disable_pipe(self, name):", "about it. new_pipe(pipe_set, always_active[, True]) <- creates a new pipe (ordered), pipe_set is", "y=None): for name, pipe in self._pipes: if self._activated[name] == False: continue if self._verbose:", "X def show_pipeline(self): out = [] for name, _ in self._pipes: if self._activated[name]:", "method used to optimize. If 'verbose' is True, then everytime a transformation is", "with given columns. It is probably useful only for pipelines, as you can", "pd import numpy as np from sklearn.base import TransformerMixin from sklearn.preprocessing import OneHotEncoder", "It is used to encode categorical attributes of the dataframe. It contains it's", "object It is used to add new columns (features) to the dataframe. Methods:", "from sklearn.preprocessing import OneHotEncoder from sklearn.impute import SimpleImputer from .helpers import encoded_array_to_df_compatible_array from", "ShapeException, NotADataFrameException, Transformer class Merger(TransformerMixin): \"\"\" Merger Object It is used to merge", "y) <- performs all transformations (from all pipes) on the dataframe and returns", "created, 'parameters' is a list of column names (str) and/or constant parameters. new_attribute(name,", "useful only for pipelines, as you can easily achieve the same result with", "'corr_20'], verbose[, False]) <- optimize_for (str) are the target columns (labels). optimization (str)", "or vice-versa. You can specify column_names and column_values upon creating the object, or", "feature in features] return self._merger.new_merge(features, values).fit_transform(X.drop(self._columns, axis=1), y) class Imputer(Transformer, TransformerMixin): \"\"\" Imputer", "used to add new columns (features) to the dataframe. Methods: __init__(name, function, parameters)", "self._encoder_type == 'onehot': self._encoder = OneHotEncoder(*encoder_params) def fit(self, X, y=None): return self def", "raise NotADataFrameException if self._encoder_type == 'onehot': values = self._encoder.fit_transform(X[self._columns]).toarray() values = encoded_array_to_df_compatible_array(values) features", "with sklearn module. \"\"\" def __init__(self, verbose=False): self._pipes = [] self._activated = {}", "creation, specify optimization parameter to 'corr_<int>', the integer will be the percent rate", "if self._encoder_type == 'onehot': self._encoder = OneHotEncoder(*encoder_params) def fit(self, X, y=None): return self", "inherit Transformer class, as it doesn't need to transform dataframe to array or", "return X class Pipesystem(TransformerMixin): \"\"\" Pipesystem object It works (it has less features", "values return X def new_merge(self, cols_names, cols_values): self._cols_names = cols_names self._cols_values = cols_values", "y=None): return self def transform(self, X, y=None): if not isinstance(X, pd.DataFrame): raise NotADataFrameException", "array or vice-versa. You can specify column_names and column_values upon creating the object,", "new_merge(self, cols_names, cols_values): self._cols_names = cols_names self._cols_values = cols_values return self class CategoryEncoder(Transformer,", "== 'onehot': self._encoder = OneHotEncoder(*encoder_params) def fit(self, X, y=None): return self def transform(self,", "strategy='mean', fill_value=None, verbose=0, copy=True, add_indicator=False): self._imputer = SimpleImputer( missing_values, strategy, fill_value, verbose, copy,", "integer will be the percent rate from 0 to 100 and will act", "values will be created, 'parameters' is a list of column names (str) and/or", "as np from sklearn.base import TransformerMixin from sklearn.preprocessing import OneHotEncoder from sklearn.impute import", "X_tr = self._imputer.fit_transform(self._to_array(X)) X_tr = self._to_df(X_tr) return X_tr else: return self._imputer.fit_transform(X, y) class", "SimpleImputer object, and the result is returned as a dataframe, with the same", "missing_values, strategy, fill_value, verbose, copy, add_indicator) def fit(self, X, y=None): return self def", "optimize_for self._optimization = optimization self._best_parameters = [] def transform(self, X, y=None): X =", "\"\"\" def __init__(self, name, function, parameters): self.name = name self.function = function self.parameters", "\"\"\" def __init__(self, optimize_for, optimization='corr_20', verbose=False): Pipesystem.__init__(self, verbose) self._target = optimize_for self._optimization =", "names (str) and/or constant parameters. new_attribute(name, function, parameters) <- ... fit(X, y) <-", "fit(X, y) and transform(X, y). This is advised in most cases just to", "need to transform dataframe to array or vice-versa. You can specify column_names and", "is True, then everytime a transformation is made, it will print out the", "= [] self._activated = {} self._verbose = verbose def new_pipe(self, pipe_set, always_active=True): name,", "<gh_stars>1-10 import pandas as pd import numpy as np from sklearn.base import TransformerMixin", "to determine the most promising features without actually training a model. One way", "y) opt = getattr(self, '_optimization', 'corr_20') if opt[:5] == 'corr_': threshold = int(opt[5:])", "rdfs.Pipesystem , it uses one of the optimiztion methods to determine the most", "a dataframe, with the same exact columns. \"\"\" def __init__(self, missing_values=np.nan, strategy='mean', fill_value=None,", "'_target')].sort_values(ascending=False).to_dict() self._best_parameters = [name for name in corr_table if abs(corr_table[name]) >= threshold] return", "Object It is used to merge dataframes with given columns. It is probably", "fit(self, X, y=None): return self def transform(self, X, y=None): parameters = [] for", "from .helpers import encoded_array_to_df_compatible_array from .base import ShapeException, NotADataFrameException, Transformer class Merger(TransformerMixin): \"\"\"", "is transformed into np.ndarray, fed into actual SimpleImputer object, and the result is", "has less features though) as sklearn.Pipeline . Methods: __init__(verbose[, False]) <- if 'verbose'", "in most cases just to stay friendly with sklearn module. \"\"\" def __init__(self,", "a transformation is made, it will print out the information about it. new_pipe(pipe_set,", "import numpy as np from sklearn.base import TransformerMixin from sklearn.preprocessing import OneHotEncoder from", "X = Pipesystem.transform(self, X, y) opt = getattr(self, '_optimization', 'corr_20') if opt[:5] ==", "result is returned as a dataframe, with the same exact columns. \"\"\" def", "exception. \"\"\" def __init__(self, cols_names=None, cols_values=None): self._cols_names = cols_names self._cols_values = cols_values def", "the label of new column, 'function' is a function upon which the values", "be created, 'parameters' is a list of column names (str) and/or constant parameters.", "that it takes dataframe as an input, which is transformed into np.ndarray, fed", "Alternatively you can set them with set_columns method, or display them with get_columns", "OneHotEncoder(*encoder_params) def fit(self, X, y=None): return self def transform(self, X, y=None): if not", "most cases just to stay friendly with sklearn module. \"\"\" def __init__(self, name,", "self._activated[name] = False class OptimizedPipesystem(Pipesystem): \"\"\" OptimizedPipesystem object Enhanced rdfs.Pipesystem , it uses", "columns (features) to the dataframe. Methods: __init__(name, function, parameters) <- 'name' is the", "optimization (and currently, the only one implemented) is correlation. Upon object creation, specify", "dataframe and returns it. fit_transform(X, y) <- combined fit(X, y) and transform(X, y).", "will be the percent rate from 0 to 100 and will act like", "\"\"\" Merger Object It is used to merge dataframes with given columns. It", "CategoryEncoder object Upon creation, you should specify column names that will be encoded.", "<- creates a new pipe (ordered), pipe_set is expected to be a tuple", "than that, will not be a part of returned dataframe. Methods: __init__(optimize_for, optimization[,", "you can set them with set_columns method, or display them with get_columns method.", "way of optimization (and currently, the only one implemented) is correlation. Upon object", "X[name] = values return X def new_merge(self, cols_names, cols_values): self._cols_names = cols_names self._cols_values", "\"\"\" def __init__(self, columns, encoder='onehot', encoder_params=[]): self._columns = columns self._merger = Merger() self._encoder_type", "dataframe modeling for best predictions later on. show_pipeline() <- returns an ordered list", "object creation, specify optimization parameter to 'corr_<int>', the integer will be the percent", "creating the object, or call 'new_merge' method with those parameters. If X parameter", "in self._pipes: if self._activated[name]: out.append(name) return out def _activate_array(self, array): for value, name,", "information about it. new_pipe(pipe_set, always_active[, True]) <- creates a new pipe (ordered), pipe_set", "well as specified encoder _encoder . Possible encodings: - 'onehot' \"\"\" def __init__(self,", "def fit(self, X, y=None): return self def transform(self, X, y=None): if not isinstance(X,", "opt = getattr(self, '_optimization', 'corr_20') if opt[:5] == 'corr_': threshold = int(opt[5:]) /", "transform(self, X, y=None): if isinstance(X, pd.DataFrame): self._columns = X.columns X_tr = self._imputer.fit_transform(self._to_array(X)) X_tr", "( in that order ). always_active does not have any functionality at this", "the most meaningful features and returns the dataframe. fit_transform(X, y) <- combined fit(X,", "numpy as np from sklearn.base import TransformerMixin from sklearn.preprocessing import OneHotEncoder from sklearn.impute", "function, parameters): self.name = name self.function = function self.parameters = parameters def new_attribute(self,", "the percent rate from 0 to 100 and will act like a filter,", "uses one of the optimiztion methods to determine the most promising features without", "is expected for it to be a indicator for automatic dataframe modeling for", "optimize_for (str) are the target columns (labels). optimization (str) is the method used", "self._encoder.fit_transform(X[self._columns]).toarray() values = encoded_array_to_df_compatible_array(values) features = self._encoder.get_feature_names() features = [feature[3:] for feature in", "itself. transform(X, y) <- performs all transformations (from all pipes) on the dataframe", "is returned as a dataframe, with the same exact columns. \"\"\" def __init__(self,", "self._cols_values): X[name] = values return X def new_merge(self, cols_names, cols_values): self._cols_names = cols_names", "Pipesystem(TransformerMixin): \"\"\" Pipesystem object It works (it has less features though) as sklearn.Pipeline", "columns, encoder='onehot', encoder_params=[]): self._columns = columns self._merger = Merger() self._encoder_type = encoder if", "one of the optimiztion methods to determine the most promising features without actually", "if self._verbose: print(f'> pushing through \\'{name}\\' with {pipe}') X = pipe.fit_transform(X) return X", "encoded. Alternatively you can set them with set_columns method, or display them with", "self._best_parameters = [name for name in corr_table if abs(corr_table[name]) >= threshold] return X[self._best_parameters]", "'verbose' is True, then everytime a transformation is made, it will print out", "transformation is made, it will print out the information about it. new_pipe(pipe_set, always_active[,", "advised in most cases just to stay friendly with sklearn module. \"\"\" def", "= cols_values def fit(self, X, y=None): return self def transform(self, X, y=None): if", "can easily achieve the same result with basic pandas operations. Unlike other objects,", "returned as a dataframe, with the same exact columns. \"\"\" def __init__(self, missing_values=np.nan,", "= X.columns X_tr = self._imputer.fit_transform(self._to_array(X)) X_tr = self._to_df(X_tr) return X_tr else: return self._imputer.fit_transform(X,", "friendly with sklearn module. \"\"\" def __init__(self, verbose=False): self._pipes = [] self._activated =", "for it to be a indicator for automatic dataframe modeling for best predictions", "any functionality at this moment. It is expected for it to be a", "self def transform(self, X, y=None): if not isinstance(X, pd.DataFrame): raise NotADataFrameException for name,", "'corr_': threshold = int(opt[5:]) / 100 corr_table = X.corr()[getattr(self, '_target')].sort_values(ascending=False).to_dict() self._best_parameters = [name", "takes dataframe as an input, which is transformed into np.ndarray, fed into actual", "transform(X, y) <- performs all transformations (from all pipes) on the dataframe and", "__init__(self, columns, encoder='onehot', encoder_params=[]): self._columns = columns self._merger = Merger() self._encoder_type = encoder", "TransformerMixin from sklearn.preprocessing import OneHotEncoder from sklearn.impute import SimpleImputer from .helpers import encoded_array_to_df_compatible_array", "just to stay friendly with sklearn module. \"\"\" def __init__(self, optimize_for, optimization='corr_20', verbose=False):", "self def transform(self, X, y=None): if not isinstance(X, pd.DataFrame): raise NotADataFrameException if self._encoder_type", "and the result is returned as a dataframe, with the same exact columns.", "(from all pipes) on the dataframe and returns it. fit_transform(X, y) <- combined", "= optimization self._best_parameters = [] def transform(self, X, y=None): X = Pipesystem.transform(self, X,", "dataframe. It contains it's merger _merger , as well as specified encoder _encoder", "name self.function = function self.parameters = parameters def new_attribute(self, name, function, parameters): self.name", "transform(self, X, y=None): if not isinstance(X, pd.DataFrame): raise NotADataFrameException for name, values in", "= Pipesystem.transform(self, X, y) opt = getattr(self, '_optimization', 'corr_20') if opt[:5] == 'corr_':", "X, y=None): if isinstance(X, pd.DataFrame): self._columns = X.columns X_tr = self._imputer.fit_transform(self._to_array(X)) X_tr =", "object ( in that order ). always_active does not have any functionality at", "copy, add_indicator) def fit(self, X, y=None): return self def transform(self, X, y=None): if", "if not isinstance(X, pd.DataFrame): raise NotADataFrameException if self._encoder_type == 'onehot': values = self._encoder.fit_transform(X[self._columns]).toarray()", "on the dataframe and returns it. fit_transform(X, y) <- combined fit(X, y) and", "target columns (labels). optimization (str) is the method used to optimize. If 'verbose'", "display them with get_columns method. It is used to encode categorical attributes of", "If X parameter of transformation is not a dataframe, raises an exception. \"\"\"", "'new_merge' method with those parameters. If X parameter of transformation is not a", "attributes of the dataframe. It contains it's merger _merger , as well as", "fill_value, verbose, copy, add_indicator) def fit(self, X, y=None): return self def transform(self, X,", "does, is that it takes dataframe as an input, which is transformed into", "and transform(X, y). This is advised in most cases just to stay friendly", "= [] for name, _ in self._pipes: if self._activated[name]: out.append(name) return out def", "AttributeAdder object It is used to add new columns (features) to the dataframe.", "pandas operations. Unlike other objects, it does not inherit Transformer class, as it", "from .base import ShapeException, NotADataFrameException, Transformer class Merger(TransformerMixin): \"\"\" Merger Object It is", "(and currently, the only one implemented) is correlation. Upon object creation, specify optimization", "combined fit(X, y) and transform(X, y). This is advised in most cases just", "object, or call 'new_merge' method with those parameters. If X parameter of transformation", "pipe_set self._pipes.append((name, pipe)) self._activated[name] = True def fit(self, X, y=None): return self def", "that is less significant than that, will not be a part of returned", "X = pipe.fit_transform(X) return X def show_pipeline(self): out = [] for name, _", "Merger() self._encoder_type = encoder if self._encoder_type == 'onehot': self._encoder = OneHotEncoder(*encoder_params) def fit(self,", "method, or display them with get_columns method. It is used to encode categorical", "You can specify column_names and column_values upon creating the object, or call 'new_merge'", "threshold = int(opt[5:]) / 100 corr_table = X.corr()[getattr(self, '_target')].sort_values(ascending=False).to_dict() self._best_parameters = [name for", "names that will be encoded. Alternatively you can set them with set_columns method,", "self._to_df(X_tr) return X_tr else: return self._imputer.fit_transform(X, y) class AttributeAdder(TransformerMixin): \"\"\" AttributeAdder object It", "= OneHotEncoder(*encoder_params) def fit(self, X, y=None): return self def transform(self, X, y=None): if", "achieve the same result with basic pandas operations. Unlike other objects, it does", "__init__(self, missing_values=np.nan, strategy='mean', fill_value=None, verbose=0, copy=True, add_indicator=False): self._imputer = SimpleImputer( missing_values, strategy, fill_value,", "indicator for automatic dataframe modeling for best predictions later on. show_pipeline() <- returns", "most cases just to stay friendly with sklearn module. \"\"\" def __init__(self, verbose=False):", ". Possible encodings: - 'onehot' \"\"\" def __init__(self, columns, encoder='onehot', encoder_params=[]): self._columns =", "itself. transform(X, y) <- performs the transformation (adds new attribute) on the dataframe", "a wrapper around sklearn.impute.SimpleImputer, all it does, is that it takes dataframe as", "the information about it. new_pipe(pipe_set, always_active[, True]) <- creates a new pipe (ordered),", "module. \"\"\" def __init__(self, verbose=False): self._pipes = [] self._activated = {} self._verbose =", "encodings: - 'onehot' \"\"\" def __init__(self, columns, encoder='onehot', encoder_params=[]): self._columns = columns self._merger", "'onehot': self._encoder = OneHotEncoder(*encoder_params) def fit(self, X, y=None): return self def transform(self, X,", "X def new_merge(self, cols_names, cols_values): self._cols_names = cols_names self._cols_values = cols_values return self", "transformation is not a dataframe, raises an exception. \"\"\" def __init__(self, cols_names=None, cols_values=None):", "y). This is advised in most cases just to stay friendly with sklearn", "though) as sklearn.Pipeline . Methods: __init__(verbose[, False]) <- if 'verbose' is True, then", "returned dataframe. Methods: __init__(optimize_for, optimization[, 'corr_20'], verbose[, False]) <- optimize_for (str) are the", "Pipesystem object It works (it has less features though) as sklearn.Pipeline . Methods:", "X parameter of transformation is not a dataframe, raises an exception. \"\"\" def", "<- performs the transformation (adds new attribute) on the dataframe and returns it.", "import pandas as pd import numpy as np from sklearn.base import TransformerMixin from", "X, y=None): return self def transform(self, X, y=None): parameters = [] for parameter", "cols_names=None, cols_values=None): self._cols_names = cols_names self._cols_values = cols_values def fit(self, X, y=None): return", "the method used to optimize. If 'verbose' is True, then everytime a transformation", "is a list of column names (str) and/or constant parameters. new_attribute(name, function, parameters)", "is used to add new columns (features) to the dataframe. Methods: __init__(name, function,", "parameters. new_attribute(name, function, parameters) <- ... fit(X, y) <- returns itself. transform(X, y)", "def __init__(self, columns, encoder='onehot', encoder_params=[]): self._columns = columns self._merger = Merger() self._encoder_type =", "is a wrapper around sklearn.impute.SimpleImputer, all it does, is that it takes dataframe", "__init__(self, name, function, parameters): self.name = name self.function = function self.parameters = parameters", "modeling for best predictions later on. show_pipeline() <- returns an ordered list with", "\"\"\" def __init__(self, verbose=False): self._pipes = [] self._activated = {} self._verbose = verbose", "for name, pipe in self._pipes: if self._activated[name] == False: continue if self._verbose: print(f'>", "most promising features without actually training a model. One way of optimization (and", "<- returns itself. transform(X, y) <- performs all transformations (from all pipes) on", "cols_names, cols_values): self._cols_names = cols_names self._cols_values = cols_values return self class CategoryEncoder(Transformer, TransformerMixin):", "corr_table = X.corr()[getattr(self, '_target')].sort_values(ascending=False).to_dict() self._best_parameters = [name for name in corr_table if abs(corr_table[name])", "def fit(self, X, y=None): return self def transform(self, X, y=None): for name, pipe", "<- optimize_for (str) are the target columns (labels). optimization (str) is the method", "module. \"\"\" def __init__(self, optimize_for, optimization='corr_20', verbose=False): Pipesystem.__init__(self, verbose) self._target = optimize_for self._optimization", "Transformer class, as it doesn't need to transform dataframe to array or vice-versa.", "name, function, parameters): self.name = name self.function = function self.parameters = parameters def", "then everytime a transformation is made, it will print out the information about", "X, y=None): for name, pipe in self._pipes: if self._activated[name] == False: continue if", "verbose def new_pipe(self, pipe_set, always_active=True): name, pipe = pipe_set self._pipes.append((name, pipe)) self._activated[name] =", "as a dataframe, with the same exact columns. \"\"\" def __init__(self, missing_values=np.nan, strategy='mean',", "add_indicator=False): self._imputer = SimpleImputer( missing_values, strategy, fill_value, verbose, copy, add_indicator) def fit(self, X,", "that will be encoded. Alternatively you can set them with set_columns method, or", "(str) are the target columns (labels). optimization (str) is the method used to", "into np.ndarray, fed into actual SimpleImputer object, and the result is returned as", "basic pandas operations. Unlike other objects, it does not inherit Transformer class, as", "every feature that is less significant than that, will not be a part", "self._pipes: if self._activated[name] == False: continue if self._verbose: print(f'> pushing through \\'{name}\\' with", "self._target = optimize_for self._optimization = optimization self._best_parameters = [] def transform(self, X, y=None):", "def fit(self, X, y=None): return self def transform(self, X, y=None): if isinstance(X, pd.DataFrame):", "y=None): return self def transform(self, X, y=None): parameters = [] for parameter in", "wrapper around sklearn.impute.SimpleImputer, all it does, is that it takes dataframe as an", "verbose=0, copy=True, add_indicator=False): self._imputer = SimpleImputer( missing_values, strategy, fill_value, verbose, copy, add_indicator) def", "contains it's merger _merger , as well as specified encoder _encoder . Possible", "y) <- returns itself. transform(X, y) <- performs all transformations (from all pipes)", "and returns the dataframe. fit_transform(X, y) <- combined fit(X, y) and transform(X, y).", "in that order ). always_active does not have any functionality at this moment.", "an input, which is transformed into np.ndarray, fed into actual SimpleImputer object, and", "<- performs all transformations (from all pipes) on the dataframe and returns it.", "self.name = name self.function = function self.parameters = parameters def fit(self, X, y=None):", "as specified encoder _encoder . Possible encodings: - 'onehot' \"\"\" def __init__(self, columns,", "False class OptimizedPipesystem(Pipesystem): \"\"\" OptimizedPipesystem object Enhanced rdfs.Pipesystem , it uses one of", "upon which the values will be created, 'parameters' is a list of column", "set_columns method, or display them with get_columns method. It is used to encode", "class Merger(TransformerMixin): \"\"\" Merger Object It is used to merge dataframes with given", "object Enhanced rdfs.Pipesystem , it uses one of the optimiztion methods to determine", "_merger , as well as specified encoder _encoder . Possible encodings: - 'onehot'", "<- performs all transformations (from all pipes) on the dataframe, chooses the most", "y=None): if not isinstance(X, pd.DataFrame): raise NotADataFrameException for name, values in zip(self._cols_names, self._cols_values):", "currently, the only one implemented) is correlation. Upon object creation, specify optimization parameter", "fit(self, X, y=None): return self def transform(self, X, y=None): if isinstance(X, pd.DataFrame): self._columns", "X, y=None): if not isinstance(X, pd.DataFrame): raise NotADataFrameException if self._encoder_type == 'onehot': values", "sklearn.base import TransformerMixin from sklearn.preprocessing import OneHotEncoder from sklearn.impute import SimpleImputer from .helpers", "given columns. It is probably useful only for pipelines, as you can easily", "those parameters. If X parameter of transformation is not a dataframe, raises an", "class, as it doesn't need to transform dataframe to array or vice-versa. You", "probably useful only for pipelines, as you can easily achieve the same result", "y=None): parameters = [] for parameter in self.parameters: if isinstance(parameter, str): parameter =", "self._activated[name] == False: continue if self._verbose: print(f'> pushing through \\'{name}\\' with {pipe}') X", "(str) and/or constant parameters. new_attribute(name, function, parameters) <- ... fit(X, y) <- returns", "ordered list with all current pipes. fit(X, y) <- returns itself. transform(X, y)", "operations. Unlike other objects, it does not inherit Transformer class, as it doesn't", "self._verbose: print(f'> pushing through \\'{name}\\' with {pipe}') X = pipe.fit_transform(X) return X def", "_ in self._pipes: if self._activated[name]: out.append(name) return out def _activate_array(self, array): for value,", "out def _activate_array(self, array): for value, name, _ in zip(array, self._pipes): if not", "merge dataframes with given columns. It is probably useful only for pipelines, as", "as you can easily achieve the same result with basic pandas operations. Unlike", "cols_values def fit(self, X, y=None): return self def transform(self, X, y=None): if not", "parameter = X[parameter] parameters.append(parameter) X[self.name] = self.function(*parameters) return X class Pipesystem(TransformerMixin): \"\"\" Pipesystem", "pipes) on the dataframe and returns it. fit_transform(X, y) <- combined fit(X, y)", "{} self._verbose = verbose def new_pipe(self, pipe_set, always_active=True): name, pipe = pipe_set self._pipes.append((name,", "is not a dataframe, raises an exception. \"\"\" def __init__(self, cols_names=None, cols_values=None): self._cols_names", "object Upon creation, you should specify column names that will be encoded. Alternatively", "X.columns X_tr = self._imputer.fit_transform(self._to_array(X)) X_tr = self._to_df(X_tr) return X_tr else: return self._imputer.fit_transform(X, y)", "a list of column names (str) and/or constant parameters. new_attribute(name, function, parameters) <-", "the dataframe. It contains it's merger _merger , as well as specified encoder", "easily achieve the same result with basic pandas operations. Unlike other objects, it", "class Pipesystem(TransformerMixin): \"\"\" Pipesystem object It works (it has less features though) as", "import encoded_array_to_df_compatible_array from .base import ShapeException, NotADataFrameException, Transformer class Merger(TransformerMixin): \"\"\" Merger Object", "y) <- performs all transformations (from all pipes) on the dataframe, chooses the", "def transform(self, X, y=None): if not isinstance(X, pd.DataFrame): raise NotADataFrameException for name, values", "name and object ( in that order ). always_active does not have any", "cols_names self._cols_values = cols_values def fit(self, X, y=None): return self def transform(self, X,", "isinstance(X, pd.DataFrame): raise NotADataFrameException for name, values in zip(self._cols_names, self._cols_values): X[name] = values", "TransformerMixin): \"\"\" CategoryEncoder object Upon creation, you should specify column names that will", "y=None): if isinstance(X, pd.DataFrame): self._columns = X.columns X_tr = self._imputer.fit_transform(self._to_array(X)) X_tr = self._to_df(X_tr)", "a filter, every feature that is less significant than that, will not be", "in self.parameters: if isinstance(parameter, str): parameter = X[parameter] parameters.append(parameter) X[self.name] = self.function(*parameters) return", "self def transform(self, X, y=None): for name, pipe in self._pipes: if self._activated[name] ==", "__init__(optimize_for, optimization[, 'corr_20'], verbose[, False]) <- optimize_for (str) are the target columns (labels).", "self._cols_names = cols_names self._cols_values = cols_values def fit(self, X, y=None): return self def", "pipe_set is expected to be a tuple of name and object ( in", "Merger(TransformerMixin): \"\"\" Merger Object It is used to merge dataframes with given columns.", "self._verbose = verbose def new_pipe(self, pipe_set, always_active=True): name, pipe = pipe_set self._pipes.append((name, pipe))", "parameters): self.name = name self.function = function self.parameters = parameters def new_attribute(self, name,", "is correlation. Upon object creation, specify optimization parameter to 'corr_<int>', the integer will", "continue if self._verbose: print(f'> pushing through \\'{name}\\' with {pipe}') X = pipe.fit_transform(X) return", "y) <- combined fit(X, y) and transform(X, y). This is advised in most", "with sklearn module. \"\"\" def __init__(self, optimize_for, optimization='corr_20', verbose=False): Pipesystem.__init__(self, verbose) self._target =", "the only one implemented) is correlation. Upon object creation, specify optimization parameter to", "cols_values return self class CategoryEncoder(Transformer, TransformerMixin): \"\"\" CategoryEncoder object Upon creation, you should", "with {pipe}') X = pipe.fit_transform(X) return X def show_pipeline(self): out = [] for", "def new_attribute(self, name, function, parameters): self.name = name self.function = function self.parameters =", "it doesn't need to transform dataframe to array or vice-versa. You can specify", "like a filter, every feature that is less significant than that, will not", "class OptimizedPipesystem(Pipesystem): \"\"\" OptimizedPipesystem object Enhanced rdfs.Pipesystem , it uses one of the", "This is advised in most cases just to stay friendly with sklearn module.", "if self._encoder_type == 'onehot': values = self._encoder.fit_transform(X[self._columns]).toarray() values = encoded_array_to_df_compatible_array(values) features = self._encoder.get_feature_names()", "of optimization (and currently, the only one implemented) is correlation. Upon object creation,", "pipe_set, always_active=True): name, pipe = pipe_set self._pipes.append((name, pipe)) self._activated[name] = True def fit(self,", "0 to 100 and will act like a filter, every feature that is", "of returned dataframe. Methods: __init__(optimize_for, optimization[, 'corr_20'], verbose[, False]) <- optimize_for (str) are", "new attribute) on the dataframe and returns it. fit_transform(X, y) <- combined fit(X,", "sklearn.preprocessing import OneHotEncoder from sklearn.impute import SimpleImputer from .helpers import encoded_array_to_df_compatible_array from .base", "to array or vice-versa. You can specify column_names and column_values upon creating the", "values = self._encoder.fit_transform(X[self._columns]).toarray() values = encoded_array_to_df_compatible_array(values) features = self._encoder.get_feature_names() features = [feature[3:] for", "out = [] for name, _ in self._pipes: if self._activated[name]: out.append(name) return out", "function, parameters) <- 'name' is the label of new column, 'function' is a", "be encoded. Alternatively you can set them with set_columns method, or display them", "def _activate_array(self, array): for value, name, _ in zip(array, self._pipes): if not value:", "be a tuple of name and object ( in that order ). always_active", ".helpers import encoded_array_to_df_compatible_array from .base import ShapeException, NotADataFrameException, Transformer class Merger(TransformerMixin): \"\"\" Merger", "new columns (features) to the dataframe. Methods: __init__(name, function, parameters) <- 'name' is", "self._pipes: if self._activated[name]: out.append(name) return out def _activate_array(self, array): for value, name, _", "Upon object creation, specify optimization parameter to 'corr_<int>', the integer will be the", "order ). always_active does not have any functionality at this moment. It is", "__init__(name, function, parameters) <- 'name' is the label of new column, 'function' is", "self._activated[name]: out.append(name) return out def _activate_array(self, array): for value, name, _ in zip(array,", "return self def transform(self, X, y=None): if not isinstance(X, pd.DataFrame): raise NotADataFrameException for", "def transform(self, X, y=None): parameters = [] for parameter in self.parameters: if isinstance(parameter,", "which the values will be created, 'parameters' is a list of column names", "self._encoder = OneHotEncoder(*encoder_params) def fit(self, X, y=None): return self def transform(self, X, y=None):", "are the target columns (labels). optimization (str) is the method used to optimize.", "if isinstance(X, pd.DataFrame): self._columns = X.columns X_tr = self._imputer.fit_transform(self._to_array(X)) X_tr = self._to_df(X_tr) return", "new_pipe(pipe_set, always_active[, True]) <- creates a new pipe (ordered), pipe_set is expected to", "is used to encode categorical attributes of the dataframe. It contains it's merger", "automatic dataframe modeling for best predictions later on. show_pipeline() <- returns an ordered", "= self._imputer.fit_transform(self._to_array(X)) X_tr = self._to_df(X_tr) return X_tr else: return self._imputer.fit_transform(X, y) class AttributeAdder(TransformerMixin):", "other objects, it does not inherit Transformer class, as it doesn't need to", "for feature in features] return self._merger.new_merge(features, values).fit_transform(X.drop(self._columns, axis=1), y) class Imputer(Transformer, TransformerMixin): \"\"\"", "array): for value, name, _ in zip(array, self._pipes): if not value: self._disable_pipe(name) def", "= getattr(self, '_optimization', 'corr_20') if opt[:5] == 'corr_': threshold = int(opt[5:]) / 100", "y) and transform(X, y). This is advised in most cases just to stay", "for best predictions later on. show_pipeline() <- returns an ordered list with all", "encoder='onehot', encoder_params=[]): self._columns = columns self._merger = Merger() self._encoder_type = encoder if self._encoder_type", "self._best_parameters = [] def transform(self, X, y=None): X = Pipesystem.transform(self, X, y) opt", "y=None): return self def transform(self, X, y=None): if isinstance(X, pd.DataFrame): self._columns = X.columns", "features and returns the dataframe. fit_transform(X, y) <- combined fit(X, y) and transform(X,", "getattr(self, '_optimization', 'corr_20') if opt[:5] == 'corr_': threshold = int(opt[5:]) / 100 corr_table", "pipelines, as you can easily achieve the same result with basic pandas operations.", "return self def transform(self, X, y=None): if isinstance(X, pd.DataFrame): self._columns = X.columns X_tr", "[] for parameter in self.parameters: if isinstance(parameter, str): parameter = X[parameter] parameters.append(parameter) X[self.name]", "np.ndarray, fed into actual SimpleImputer object, and the result is returned as a", "fed into actual SimpleImputer object, and the result is returned as a dataframe,", "implemented) is correlation. Upon object creation, specify optimization parameter to 'corr_<int>', the integer", "to merge dataframes with given columns. It is probably useful only for pipelines,", "X_tr else: return self._imputer.fit_transform(X, y) class AttributeAdder(TransformerMixin): \"\"\" AttributeAdder object It is used", "import OneHotEncoder from sklearn.impute import SimpleImputer from .helpers import encoded_array_to_df_compatible_array from .base import", "[] def transform(self, X, y=None): X = Pipesystem.transform(self, X, y) opt = getattr(self,", "pipe.fit_transform(X) return X def show_pipeline(self): out = [] for name, _ in self._pipes:", "_activate_array(self, array): for value, name, _ in zip(array, self._pipes): if not value: self._disable_pipe(name)", "not a dataframe, raises an exception. \"\"\" def __init__(self, cols_names=None, cols_values=None): self._cols_names =", "self._merger = Merger() self._encoder_type = encoder if self._encoder_type == 'onehot': self._encoder = OneHotEncoder(*encoder_params)", "is made, it will print out the information about it. new_pipe(pipe_set, always_active[, True])", "not inherit Transformer class, as it doesn't need to transform dataframe to array", "is expected to be a tuple of name and object ( in that", "just to stay friendly with sklearn module. \"\"\" def __init__(self, verbose=False): self._pipes =", "opt[:5] == 'corr_': threshold = int(opt[5:]) / 100 corr_table = X.corr()[getattr(self, '_target')].sort_values(ascending=False).to_dict() self._best_parameters", "sklearn module. \"\"\" def __init__(self, verbose=False): self._pipes = [] self._activated = {} self._verbose", "features though) as sklearn.Pipeline . Methods: __init__(verbose[, False]) <- if 'verbose' is True,", "sklearn.impute import SimpleImputer from .helpers import encoded_array_to_df_compatible_array from .base import ShapeException, NotADataFrameException, Transformer", "\"\"\" Pipesystem object It works (it has less features though) as sklearn.Pipeline .", "fit(X, y) <- returns itself. transform(X, y) <- performs all transformations (from all", "False: continue if self._verbose: print(f'> pushing through \\'{name}\\' with {pipe}') X = pipe.fit_transform(X)", "cols_names self._cols_values = cols_values return self class CategoryEncoder(Transformer, TransformerMixin): \"\"\" CategoryEncoder object Upon", "\"\"\" Imputer object It is a wrapper around sklearn.impute.SimpleImputer, all it does, is", "return self._imputer.fit_transform(X, y) class AttributeAdder(TransformerMixin): \"\"\" AttributeAdder object It is used to add", "doesn't need to transform dataframe to array or vice-versa. You can specify column_names", "of new column, 'function' is a function upon which the values will be", "OptimizedPipesystem object Enhanced rdfs.Pipesystem , it uses one of the optimiztion methods to", "if 'verbose' is True, then everytime a transformation is made, it will print", "parameters) <- 'name' is the label of new column, 'function' is a function", "current pipes. fit(X, y) <- returns itself. transform(X, y) <- performs all transformations", "not be a part of returned dataframe. Methods: __init__(optimize_for, optimization[, 'corr_20'], verbose[, False])", "axis=1), y) class Imputer(Transformer, TransformerMixin): \"\"\" Imputer object It is a wrapper around", "cases just to stay friendly with sklearn module. \"\"\" def __init__(self, name, function,", "all pipes) on the dataframe and returns it. fit_transform(X, y) <- combined fit(X,", "self class CategoryEncoder(Transformer, TransformerMixin): \"\"\" CategoryEncoder object Upon creation, you should specify column", "encoded_array_to_df_compatible_array from .base import ShapeException, NotADataFrameException, Transformer class Merger(TransformerMixin): \"\"\" Merger Object It", "self._encoder_type = encoder if self._encoder_type == 'onehot': self._encoder = OneHotEncoder(*encoder_params) def fit(self, X,", "label of new column, 'function' is a function upon which the values will", "function, parameters): self.name = name self.function = function self.parameters = parameters def fit(self,", "self._disable_pipe(name) def _disable_pipe(self, name): self._activated[name] = False class OptimizedPipesystem(Pipesystem): \"\"\" OptimizedPipesystem object Enhanced", "that, will not be a part of returned dataframe. Methods: __init__(optimize_for, optimization[, 'corr_20'],", "most cases just to stay friendly with sklearn module. \"\"\" def __init__(self, optimize_for,", "best predictions later on. show_pipeline() <- returns an ordered list with all current", "into actual SimpleImputer object, and the result is returned as a dataframe, with", "return X def show_pipeline(self): out = [] for name, _ in self._pipes: if", "{pipe}') X = pipe.fit_transform(X) return X def show_pipeline(self): out = [] for name,", "add new columns (features) to the dataframe. Methods: __init__(name, function, parameters) <- 'name'", "set them with set_columns method, or display them with get_columns method. It is", "optimiztion methods to determine the most promising features without actually training a model.", "features] return self._merger.new_merge(features, values).fit_transform(X.drop(self._columns, axis=1), y) class Imputer(Transformer, TransformerMixin): \"\"\" Imputer object It", "is used to merge dataframes with given columns. It is probably useful only", "X, y=None): return self def transform(self, X, y=None): for name, pipe in self._pipes:", "not isinstance(X, pd.DataFrame): raise NotADataFrameException if self._encoder_type == 'onehot': values = self._encoder.fit_transform(X[self._columns]).toarray() values", "= function self.parameters = parameters def fit(self, X, y=None): return self def transform(self,", "transform(self, X, y=None): parameters = [] for parameter in self.parameters: if isinstance(parameter, str):", "is less significant than that, will not be a part of returned dataframe.", "Transformer class Merger(TransformerMixin): \"\"\" Merger Object It is used to merge dataframes with", "= encoder if self._encoder_type == 'onehot': self._encoder = OneHotEncoder(*encoder_params) def fit(self, X, y=None):", "call 'new_merge' method with those parameters. If X parameter of transformation is not", "returns itself. transform(X, y) <- performs the transformation (adds new attribute) on the", "friendly with sklearn module. \"\"\" def __init__(self, name, function, parameters): self.name = name", "def __init__(self, missing_values=np.nan, strategy='mean', fill_value=None, verbose=0, copy=True, add_indicator=False): self._imputer = SimpleImputer( missing_values, strategy,", "= [feature[3:] for feature in features] return self._merger.new_merge(features, values).fit_transform(X.drop(self._columns, axis=1), y) class Imputer(Transformer,", "<- ... fit(X, y) <- returns itself. transform(X, y) <- performs the transformation", "determine the most promising features without actually training a model. One way of", "False]) <- optimize_for (str) are the target columns (labels). optimization (str) is the", "def new_merge(self, cols_names, cols_values): self._cols_names = cols_names self._cols_values = cols_values return self class", "transform(self, X, y=None): for name, pipe in self._pipes: if self._activated[name] == False: continue", "= {} self._verbose = verbose def new_pipe(self, pipe_set, always_active=True): name, pipe = pipe_set", "Methods: __init__(optimize_for, optimization[, 'corr_20'], verbose[, False]) <- optimize_for (str) are the target columns", "(adds new attribute) on the dataframe and returns it. fit_transform(X, y) <- combined", "raise NotADataFrameException for name, values in zip(self._cols_names, self._cols_values): X[name] = values return X", "It contains it's merger _merger , as well as specified encoder _encoder .", "it. new_pipe(pipe_set, always_active[, True]) <- creates a new pipe (ordered), pipe_set is expected", "self._activated = {} self._verbose = verbose def new_pipe(self, pipe_set, always_active=True): name, pipe =", "'parameters' is a list of column names (str) and/or constant parameters. new_attribute(name, function,", "Methods: __init__(verbose[, False]) <- if 'verbose' is True, then everytime a transformation is", "verbose) self._target = optimize_for self._optimization = optimization self._best_parameters = [] def transform(self, X,", "add_indicator) def fit(self, X, y=None): return self def transform(self, X, y=None): if isinstance(X,", "transformations (from all pipes) on the dataframe and returns it. fit_transform(X, y) <-", "is the method used to optimize. If 'verbose' is True, then everytime a", "with set_columns method, or display them with get_columns method. It is used to", "can set them with set_columns method, or display them with get_columns method. It", "object, and the result is returned as a dataframe, with the same exact", "self.parameters: if isinstance(parameter, str): parameter = X[parameter] parameters.append(parameter) X[self.name] = self.function(*parameters) return X", "to be a tuple of name and object ( in that order ).", "all pipes) on the dataframe, chooses the most meaningful features and returns the", "from 0 to 100 and will act like a filter, every feature that", "an exception. \"\"\" def __init__(self, cols_names=None, cols_values=None): self._cols_names = cols_names self._cols_values = cols_values", "\"\"\" OptimizedPipesystem object Enhanced rdfs.Pipesystem , it uses one of the optimiztion methods", "_ in zip(array, self._pipes): if not value: self._disable_pipe(name) def _disable_pipe(self, name): self._activated[name] =", "self def transform(self, X, y=None): parameters = [] for parameter in self.parameters: if", "transform(X, y) <- performs the transformation (adds new attribute) on the dataframe and", "parameter to 'corr_<int>', the integer will be the percent rate from 0 to", "self def transform(self, X, y=None): if isinstance(X, pd.DataFrame): self._columns = X.columns X_tr =", "out the information about it. new_pipe(pipe_set, always_active[, True]) <- creates a new pipe", "self.parameters = parameters def fit(self, X, y=None): return self def transform(self, X, y=None):", "encoded_array_to_df_compatible_array(values) features = self._encoder.get_feature_names() features = [feature[3:] for feature in features] return self._merger.new_merge(features,", "cases just to stay friendly with sklearn module. \"\"\" def __init__(self, optimize_for, optimization='corr_20',", "a model. One way of optimization (and currently, the only one implemented) is", "name, values in zip(self._cols_names, self._cols_values): X[name] = values return X def new_merge(self, cols_names,", "new column, 'function' is a function upon which the values will be created,", "for automatic dataframe modeling for best predictions later on. show_pipeline() <- returns an", "of the optimiztion methods to determine the most promising features without actually training", "the optimiztion methods to determine the most promising features without actually training a", "= self._encoder.fit_transform(X[self._columns]).toarray() values = encoded_array_to_df_compatible_array(values) features = self._encoder.get_feature_names() features = [feature[3:] for feature", "self.name = name self.function = function self.parameters = parameters def new_attribute(self, name, function,", "always_active=True): name, pipe = pipe_set self._pipes.append((name, pipe)) self._activated[name] = True def fit(self, X,", "dataframe. Methods: __init__(optimize_for, optimization[, 'corr_20'], verbose[, False]) <- optimize_for (str) are the target", "missing_values=np.nan, strategy='mean', fill_value=None, verbose=0, copy=True, add_indicator=False): self._imputer = SimpleImputer( missing_values, strategy, fill_value, verbose,", "100 and will act like a filter, every feature that is less significant", "It is used to merge dataframes with given columns. It is probably useful", "else: return self._imputer.fit_transform(X, y) class AttributeAdder(TransformerMixin): \"\"\" AttributeAdder object It is used to", "TransformerMixin): \"\"\" Imputer object It is a wrapper around sklearn.impute.SimpleImputer, all it does,", "X[parameter] parameters.append(parameter) X[self.name] = self.function(*parameters) return X class Pipesystem(TransformerMixin): \"\"\" Pipesystem object It", "import ShapeException, NotADataFrameException, Transformer class Merger(TransformerMixin): \"\"\" Merger Object It is used to", "== False: continue if self._verbose: print(f'> pushing through \\'{name}\\' with {pipe}') X =", "'onehot' \"\"\" def __init__(self, columns, encoder='onehot', encoder_params=[]): self._columns = columns self._merger = Merger()", "with sklearn module. \"\"\" def __init__(self, name, function, parameters): self.name = name self.function", "the result is returned as a dataframe, with the same exact columns. \"\"\"", "it does not inherit Transformer class, as it doesn't need to transform dataframe", "to encode categorical attributes of the dataframe. It contains it's merger _merger ,", ", as well as specified encoder _encoder . Possible encodings: - 'onehot' \"\"\"", "X class Pipesystem(TransformerMixin): \"\"\" Pipesystem object It works (it has less features though)", "= verbose def new_pipe(self, pipe_set, always_active=True): name, pipe = pipe_set self._pipes.append((name, pipe)) self._activated[name]", "= False class OptimizedPipesystem(Pipesystem): \"\"\" OptimizedPipesystem object Enhanced rdfs.Pipesystem , it uses one", "the integer will be the percent rate from 0 to 100 and will", "= Merger() self._encoder_type = encoder if self._encoder_type == 'onehot': self._encoder = OneHotEncoder(*encoder_params) def", "Methods: __init__(name, function, parameters) <- 'name' is the label of new column, 'function'", "percent rate from 0 to 100 and will act like a filter, every", "/ 100 corr_table = X.corr()[getattr(self, '_target')].sort_values(ascending=False).to_dict() self._best_parameters = [name for name in corr_table", "significant than that, will not be a part of returned dataframe. Methods: __init__(optimize_for,", "Unlike other objects, it does not inherit Transformer class, as it doesn't need", "self._cols_names = cols_names self._cols_values = cols_values return self class CategoryEncoder(Transformer, TransformerMixin): \"\"\" CategoryEncoder", "False]) <- if 'verbose' is True, then everytime a transformation is made, it", "self._cols_values = cols_values return self class CategoryEncoder(Transformer, TransformerMixin): \"\"\" CategoryEncoder object Upon creation,", "If 'verbose' is True, then everytime a transformation is made, it will print", "is advised in most cases just to stay friendly with sklearn module. \"\"\"", ".base import ShapeException, NotADataFrameException, Transformer class Merger(TransformerMixin): \"\"\" Merger Object It is used", "an ordered list with all current pipes. fit(X, y) <- returns itself. transform(X,", "dataframes with given columns. It is probably useful only for pipelines, as you", "strategy, fill_value, verbose, copy, add_indicator) def fit(self, X, y=None): return self def transform(self,", "AttributeAdder(TransformerMixin): \"\"\" AttributeAdder object It is used to add new columns (features) to", "class CategoryEncoder(Transformer, TransformerMixin): \"\"\" CategoryEncoder object Upon creation, you should specify column names", "the dataframe, chooses the most meaningful features and returns the dataframe. fit_transform(X, y)", "get_columns method. It is used to encode categorical attributes of the dataframe. It", "Imputer(Transformer, TransformerMixin): \"\"\" Imputer object It is a wrapper around sklearn.impute.SimpleImputer, all it", "of the dataframe. It contains it's merger _merger , as well as specified", "constant parameters. new_attribute(name, function, parameters) <- ... fit(X, y) <- returns itself. transform(X,", "of column names (str) and/or constant parameters. new_attribute(name, function, parameters) <- ... fit(X,", "or call 'new_merge' method with those parameters. If X parameter of transformation is", "it to be a indicator for automatic dataframe modeling for best predictions later", "__init__(self, optimize_for, optimization='corr_20', verbose=False): Pipesystem.__init__(self, verbose) self._target = optimize_for self._optimization = optimization self._best_parameters", "which is transformed into np.ndarray, fed into actual SimpleImputer object, and the result", "as well as specified encoder _encoder . Possible encodings: - 'onehot' \"\"\" def", "def __init__(self, verbose=False): self._pipes = [] self._activated = {} self._verbose = verbose def", "be the percent rate from 0 to 100 and will act like a", "encoder _encoder . Possible encodings: - 'onehot' \"\"\" def __init__(self, columns, encoder='onehot', encoder_params=[]):", "= name self.function = function self.parameters = parameters def new_attribute(self, name, function, parameters):", "optimization self._best_parameters = [] def transform(self, X, y=None): X = Pipesystem.transform(self, X, y)", "CategoryEncoder(Transformer, TransformerMixin): \"\"\" CategoryEncoder object Upon creation, you should specify column names that", "actual SimpleImputer object, and the result is returned as a dataframe, with the", "columns (labels). optimization (str) is the method used to optimize. If 'verbose' is", "class Imputer(Transformer, TransformerMixin): \"\"\" Imputer object It is a wrapper around sklearn.impute.SimpleImputer, all", "self.function = function self.parameters = parameters def new_attribute(self, name, function, parameters): self.name =", "= pipe_set self._pipes.append((name, pipe)) self._activated[name] = True def fit(self, X, y=None): return self", "and will act like a filter, every feature that is less significant than", "self._encoder_type == 'onehot': values = self._encoder.fit_transform(X[self._columns]).toarray() values = encoded_array_to_df_compatible_array(values) features = self._encoder.get_feature_names() features", "= name self.function = function self.parameters = parameters def fit(self, X, y=None): return", "the most promising features without actually training a model. One way of optimization", "self.function(*parameters) return X class Pipesystem(TransformerMixin): \"\"\" Pipesystem object It works (it has less", "values = encoded_array_to_df_compatible_array(values) features = self._encoder.get_feature_names() features = [feature[3:] for feature in features]", "creation, you should specify column names that will be encoded. Alternatively you can", "method with those parameters. If X parameter of transformation is not a dataframe,", "return X_tr else: return self._imputer.fit_transform(X, y) class AttributeAdder(TransformerMixin): \"\"\" AttributeAdder object It is", "promising features without actually training a model. One way of optimization (and currently,", "in zip(array, self._pipes): if not value: self._disable_pipe(name) def _disable_pipe(self, name): self._activated[name] = False", "name, pipe = pipe_set self._pipes.append((name, pipe)) self._activated[name] = True def fit(self, X, y=None):", "pipe = pipe_set self._pipes.append((name, pipe)) self._activated[name] = True def fit(self, X, y=None): return", "pd.DataFrame): raise NotADataFrameException for name, values in zip(self._cols_names, self._cols_values): X[name] = values return", "fill_value=None, verbose=0, copy=True, add_indicator=False): self._imputer = SimpleImputer( missing_values, strategy, fill_value, verbose, copy, add_indicator)", "will act like a filter, every feature that is less significant than that,", "on the dataframe, chooses the most meaningful features and returns the dataframe. fit_transform(X,", "same exact columns. \"\"\" def __init__(self, missing_values=np.nan, strategy='mean', fill_value=None, verbose=0, copy=True, add_indicator=False): self._imputer", "X, y=None): X = Pipesystem.transform(self, X, y) opt = getattr(self, '_optimization', 'corr_20') if", "parameter in self.parameters: if isinstance(parameter, str): parameter = X[parameter] parameters.append(parameter) X[self.name] = self.function(*parameters)", "sklearn.Pipeline . Methods: __init__(verbose[, False]) <- if 'verbose' is True, then everytime a", "just to stay friendly with sklearn module. \"\"\" def __init__(self, name, function, parameters):", "parameters.append(parameter) X[self.name] = self.function(*parameters) return X class Pipesystem(TransformerMixin): \"\"\" Pipesystem object It works", "does not have any functionality at this moment. It is expected for it", "with get_columns method. It is used to encode categorical attributes of the dataframe.", "self._columns = columns self._merger = Merger() self._encoder_type = encoder if self._encoder_type == 'onehot':", "chooses the most meaningful features and returns the dataframe. fit_transform(X, y) <- combined", "for name, _ in self._pipes: if self._activated[name]: out.append(name) return out def _activate_array(self, array):", "[] for name, _ in self._pipes: if self._activated[name]: out.append(name) return out def _activate_array(self,", "name self.function = function self.parameters = parameters def fit(self, X, y=None): return self", "= self._to_df(X_tr) return X_tr else: return self._imputer.fit_transform(X, y) class AttributeAdder(TransformerMixin): \"\"\" AttributeAdder object", "parameters. If X parameter of transformation is not a dataframe, raises an exception.", "def transform(self, X, y=None): X = Pipesystem.transform(self, X, y) opt = getattr(self, '_optimization',", "isinstance(parameter, str): parameter = X[parameter] parameters.append(parameter) X[self.name] = self.function(*parameters) return X class Pipesystem(TransformerMixin):", "np from sklearn.base import TransformerMixin from sklearn.preprocessing import OneHotEncoder from sklearn.impute import SimpleImputer", "One way of optimization (and currently, the only one implemented) is correlation. Upon", "encode categorical attributes of the dataframe. It contains it's merger _merger , as", "X, y=None): parameters = [] for parameter in self.parameters: if isinstance(parameter, str): parameter", "the values will be created, 'parameters' is a list of column names (str)", "all transformations (from all pipes) on the dataframe, chooses the most meaningful features", "y=None): if not isinstance(X, pd.DataFrame): raise NotADataFrameException if self._encoder_type == 'onehot': values =", "will print out the information about it. new_pipe(pipe_set, always_active[, True]) <- creates a", "isinstance(X, pd.DataFrame): self._columns = X.columns X_tr = self._imputer.fit_transform(self._to_array(X)) X_tr = self._to_df(X_tr) return X_tr", "= columns self._merger = Merger() self._encoder_type = encoder if self._encoder_type == 'onehot': self._encoder", "returns itself. transform(X, y) <- performs all transformations (from all pipes) on the", "True def fit(self, X, y=None): return self def transform(self, X, y=None): for name,", "most meaningful features and returns the dataframe. fit_transform(X, y) <- combined fit(X, y)", "specify column_names and column_values upon creating the object, or call 'new_merge' method with", "meaningful features and returns the dataframe. fit_transform(X, y) <- combined fit(X, y) and", "pipes) on the dataframe, chooses the most meaningful features and returns the dataframe.", "self._pipes.append((name, pipe)) self._activated[name] = True def fit(self, X, y=None): return self def transform(self,", "copy=True, add_indicator=False): self._imputer = SimpleImputer( missing_values, strategy, fill_value, verbose, copy, add_indicator) def fit(self,", "as sklearn.Pipeline . Methods: __init__(verbose[, False]) <- if 'verbose' is True, then everytime", "to transform dataframe to array or vice-versa. You can specify column_names and column_values", "transformed into np.ndarray, fed into actual SimpleImputer object, and the result is returned", "if self._activated[name] == False: continue if self._verbose: print(f'> pushing through \\'{name}\\' with {pipe}')", "and object ( in that order ). always_active does not have any functionality", "y) class AttributeAdder(TransformerMixin): \"\"\" AttributeAdder object It is used to add new columns", "<- if 'verbose' is True, then everytime a transformation is made, it will", "the target columns (labels). optimization (str) is the method used to optimize. If", "for name, values in zip(self._cols_names, self._cols_values): X[name] = values return X def new_merge(self,", "optimization parameter to 'corr_<int>', the integer will be the percent rate from 0", "NotADataFrameException if self._encoder_type == 'onehot': values = self._encoder.fit_transform(X[self._columns]).toarray() values = encoded_array_to_df_compatible_array(values) features =", "pipes. fit(X, y) <- returns itself. transform(X, y) <- performs all transformations (from", "dataframe, with the same exact columns. \"\"\" def __init__(self, missing_values=np.nan, strategy='mean', fill_value=None, verbose=0,", "methods to determine the most promising features without actually training a model. One", "self._cols_values = cols_values def fit(self, X, y=None): return self def transform(self, X, y=None):", "(ordered), pipe_set is expected to be a tuple of name and object (", "def transform(self, X, y=None): if not isinstance(X, pd.DataFrame): raise NotADataFrameException if self._encoder_type ==", "It is expected for it to be a indicator for automatic dataframe modeling", "self._imputer.fit_transform(self._to_array(X)) X_tr = self._to_df(X_tr) return X_tr else: return self._imputer.fit_transform(X, y) class AttributeAdder(TransformerMixin): \"\"\"", "name): self._activated[name] = False class OptimizedPipesystem(Pipesystem): \"\"\" OptimizedPipesystem object Enhanced rdfs.Pipesystem , it", "= X.corr()[getattr(self, '_target')].sort_values(ascending=False).to_dict() self._best_parameters = [name for name in corr_table if abs(corr_table[name]) >=", "It is a wrapper around sklearn.impute.SimpleImputer, all it does, is that it takes", "It works (it has less features though) as sklearn.Pipeline . Methods: __init__(verbose[, False])", "self._columns = X.columns X_tr = self._imputer.fit_transform(self._to_array(X)) X_tr = self._to_df(X_tr) return X_tr else: return", "function self.parameters = parameters def new_attribute(self, name, function, parameters): self.name = name self.function", ". Methods: __init__(verbose[, False]) <- if 'verbose' is True, then everytime a transformation", "print(f'> pushing through \\'{name}\\' with {pipe}') X = pipe.fit_transform(X) return X def show_pipeline(self):", "zip(array, self._pipes): if not value: self._disable_pipe(name) def _disable_pipe(self, name): self._activated[name] = False class", "exact columns. \"\"\" def __init__(self, missing_values=np.nan, strategy='mean', fill_value=None, verbose=0, copy=True, add_indicator=False): self._imputer =", "columns self._merger = Merger() self._encoder_type = encoder if self._encoder_type == 'onehot': self._encoder =", "moment. It is expected for it to be a indicator for automatic dataframe", "column, 'function' is a function upon which the values will be created, 'parameters'", "class AttributeAdder(TransformerMixin): \"\"\" AttributeAdder object It is used to add new columns (features)", "__init__(self, verbose=False): self._pipes = [] self._activated = {} self._verbose = verbose def new_pipe(self,", "the transformation (adds new attribute) on the dataframe and returns it. fit_transform(X, y)", "<- combined fit(X, y) and transform(X, y). This is advised in most cases", "cols_values): self._cols_names = cols_names self._cols_values = cols_values return self class CategoryEncoder(Transformer, TransformerMixin): \"\"\"", "self._imputer.fit_transform(X, y) class AttributeAdder(TransformerMixin): \"\"\" AttributeAdder object It is used to add new", "value: self._disable_pipe(name) def _disable_pipe(self, name): self._activated[name] = False class OptimizedPipesystem(Pipesystem): \"\"\" OptimizedPipesystem object", "in features] return self._merger.new_merge(features, values).fit_transform(X.drop(self._columns, axis=1), y) class Imputer(Transformer, TransformerMixin): \"\"\" Imputer object", "rate from 0 to 100 and will act like a filter, every feature", "verbose[, False]) <- optimize_for (str) are the target columns (labels). optimization (str) is", "Pipesystem.__init__(self, verbose) self._target = optimize_for self._optimization = optimization self._best_parameters = [] def transform(self,", "all current pipes. fit(X, y) <- returns itself. transform(X, y) <- performs all", "= function self.parameters = parameters def new_attribute(self, name, function, parameters): self.name = name", "to 'corr_<int>', the integer will be the percent rate from 0 to 100", "to stay friendly with sklearn module. \"\"\" def __init__(self, name, function, parameters): self.name", "the object, or call 'new_merge' method with those parameters. If X parameter of", "def new_pipe(self, pipe_set, always_active=True): name, pipe = pipe_set self._pipes.append((name, pipe)) self._activated[name] = True", "'function' is a function upon which the values will be created, 'parameters' is", "optimization='corr_20', verbose=False): Pipesystem.__init__(self, verbose) self._target = optimize_for self._optimization = optimization self._best_parameters = []", "self.function = function self.parameters = parameters def fit(self, X, y=None): return self def", "= [] def transform(self, X, y=None): X = Pipesystem.transform(self, X, y) opt =", "function, parameters) <- ... fit(X, y) <- returns itself. transform(X, y) <- performs", "through \\'{name}\\' with {pipe}') X = pipe.fit_transform(X) return X def show_pipeline(self): out =", "dataframe as an input, which is transformed into np.ndarray, fed into actual SimpleImputer", "input, which is transformed into np.ndarray, fed into actual SimpleImputer object, and the", "not have any functionality at this moment. It is expected for it to", "(from all pipes) on the dataframe, chooses the most meaningful features and returns", "predictions later on. show_pipeline() <- returns an ordered list with all current pipes.", "= cols_values return self class CategoryEncoder(Transformer, TransformerMixin): \"\"\" CategoryEncoder object Upon creation, you", "fit(X, y) <- returns itself. transform(X, y) <- performs the transformation (adds new", "if self._activated[name]: out.append(name) return out def _activate_array(self, array): for value, name, _ in", "_disable_pipe(self, name): self._activated[name] = False class OptimizedPipesystem(Pipesystem): \"\"\" OptimizedPipesystem object Enhanced rdfs.Pipesystem ,", "all it does, is that it takes dataframe as an input, which is", "parameters): self.name = name self.function = function self.parameters = parameters def fit(self, X,", "stay friendly with sklearn module. \"\"\" def __init__(self, verbose=False): self._pipes = [] self._activated", "not value: self._disable_pipe(name) def _disable_pipe(self, name): self._activated[name] = False class OptimizedPipesystem(Pipesystem): \"\"\" OptimizedPipesystem", "self._imputer = SimpleImputer( missing_values, strategy, fill_value, verbose, copy, add_indicator) def fit(self, X, y=None):", "str): parameter = X[parameter] parameters.append(parameter) X[self.name] = self.function(*parameters) return X class Pipesystem(TransformerMixin): \"\"\"", "all transformations (from all pipes) on the dataframe and returns it. fit_transform(X, y)", "to 100 and will act like a filter, every feature that is less", "). always_active does not have any functionality at this moment. It is expected", "it will print out the information about it. new_pipe(pipe_set, always_active[, True]) <- creates", "return self._merger.new_merge(features, values).fit_transform(X.drop(self._columns, axis=1), y) class Imputer(Transformer, TransformerMixin): \"\"\" Imputer object It is", "model. One way of optimization (and currently, the only one implemented) is correlation.", "transformation (adds new attribute) on the dataframe and returns it. fit_transform(X, y) <-", "returns it. fit_transform(X, y) <- combined fit(X, y) and transform(X, y). This is", "specify optimization parameter to 'corr_<int>', the integer will be the percent rate from", "int(opt[5:]) / 100 corr_table = X.corr()[getattr(self, '_target')].sort_values(ascending=False).to_dict() self._best_parameters = [name for name in", "them with set_columns method, or display them with get_columns method. It is used", "import TransformerMixin from sklearn.preprocessing import OneHotEncoder from sklearn.impute import SimpleImputer from .helpers import", "works (it has less features though) as sklearn.Pipeline . Methods: __init__(verbose[, False]) <-", "<- 'name' is the label of new column, 'function' is a function upon", "it does, is that it takes dataframe as an input, which is transformed", "module. \"\"\" def __init__(self, name, function, parameters): self.name = name self.function = function", "training a model. One way of optimization (and currently, the only one implemented)", "new_attribute(self, name, function, parameters): self.name = name self.function = function self.parameters = parameters", "= X[parameter] parameters.append(parameter) X[self.name] = self.function(*parameters) return X class Pipesystem(TransformerMixin): \"\"\" Pipesystem object", "isinstance(X, pd.DataFrame): raise NotADataFrameException if self._encoder_type == 'onehot': values = self._encoder.fit_transform(X[self._columns]).toarray() values =", "= cols_names self._cols_values = cols_values return self class CategoryEncoder(Transformer, TransformerMixin): \"\"\" CategoryEncoder object", "one implemented) is correlation. Upon object creation, specify optimization parameter to 'corr_<int>', the", "= self._encoder.get_feature_names() features = [feature[3:] for feature in features] return self._merger.new_merge(features, values).fit_transform(X.drop(self._columns, axis=1),", "made, it will print out the information about it. new_pipe(pipe_set, always_active[, True]) <-", "tuple of name and object ( in that order ). always_active does not", "'corr_20') if opt[:5] == 'corr_': threshold = int(opt[5:]) / 100 corr_table = X.corr()[getattr(self,", "= optimize_for self._optimization = optimization self._best_parameters = [] def transform(self, X, y=None): X", "def __init__(self, name, function, parameters): self.name = name self.function = function self.parameters =", "[feature[3:] for feature in features] return self._merger.new_merge(features, values).fit_transform(X.drop(self._columns, axis=1), y) class Imputer(Transformer, TransformerMixin):", "\"\"\" CategoryEncoder object Upon creation, you should specify column names that will be", "True]) <- creates a new pipe (ordered), pipe_set is expected to be a", "X, y=None): return self def transform(self, X, y=None): if isinstance(X, pd.DataFrame): self._columns =", "friendly with sklearn module. \"\"\" def __init__(self, optimize_for, optimization='corr_20', verbose=False): Pipesystem.__init__(self, verbose) self._target", "it. fit_transform(X, y) <- combined fit(X, y) and transform(X, y). This is advised", "a tuple of name and object ( in that order ). always_active does", "method. It is used to encode categorical attributes of the dataframe. It contains", "return out def _activate_array(self, array): for value, name, _ in zip(array, self._pipes): if", "def fit(self, X, y=None): return self def transform(self, X, y=None): parameters = []", "only one implemented) is correlation. Upon object creation, specify optimization parameter to 'corr_<int>',", "return self def transform(self, X, y=None): if not isinstance(X, pd.DataFrame): raise NotADataFrameException if", "NotADataFrameException for name, values in zip(self._cols_names, self._cols_values): X[name] = values return X def", "Possible encodings: - 'onehot' \"\"\" def __init__(self, columns, encoder='onehot', encoder_params=[]): self._columns = columns", "def __init__(self, optimize_for, optimization='corr_20', verbose=False): Pipesystem.__init__(self, verbose) self._target = optimize_for self._optimization = optimization", "act like a filter, every feature that is less significant than that, will", "this moment. It is expected for it to be a indicator for automatic", "result with basic pandas operations. Unlike other objects, it does not inherit Transformer", "columns. It is probably useful only for pipelines, as you can easily achieve", "def transform(self, X, y=None): if isinstance(X, pd.DataFrame): self._columns = X.columns X_tr = self._imputer.fit_transform(self._to_array(X))", "= self.function(*parameters) return X class Pipesystem(TransformerMixin): \"\"\" Pipesystem object It works (it has", "column names (str) and/or constant parameters. new_attribute(name, function, parameters) <- ... fit(X, y)", "a dataframe, raises an exception. \"\"\" def __init__(self, cols_names=None, cols_values=None): self._cols_names = cols_names", "for pipelines, as you can easily achieve the same result with basic pandas", "self._optimization = optimization self._best_parameters = [] def transform(self, X, y=None): X = Pipesystem.transform(self,", "pandas as pd import numpy as np from sklearn.base import TransformerMixin from sklearn.preprocessing", "OneHotEncoder from sklearn.impute import SimpleImputer from .helpers import encoded_array_to_df_compatible_array from .base import ShapeException,", "object It works (it has less features though) as sklearn.Pipeline . Methods: __init__(verbose[,", "X.corr()[getattr(self, '_target')].sort_values(ascending=False).to_dict() self._best_parameters = [name for name in corr_table if abs(corr_table[name]) >= threshold]", "have any functionality at this moment. It is expected for it to be", "functionality at this moment. It is expected for it to be a indicator", "y) <- returns itself. transform(X, y) <- performs the transformation (adds new attribute)", "the same result with basic pandas operations. Unlike other objects, it does not", "less significant than that, will not be a part of returned dataframe. Methods:", "only for pipelines, as you can easily achieve the same result with basic", "= [] for parameter in self.parameters: if isinstance(parameter, str): parameter = X[parameter] parameters.append(parameter)", "X, y) opt = getattr(self, '_optimization', 'corr_20') if opt[:5] == 'corr_': threshold =", "'name' is the label of new column, 'function' is a function upon which", "be a part of returned dataframe. Methods: __init__(optimize_for, optimization[, 'corr_20'], verbose[, False]) <-", "stay friendly with sklearn module. \"\"\" def __init__(self, optimize_for, optimization='corr_20', verbose=False): Pipesystem.__init__(self, verbose)", "in self._pipes: if self._activated[name] == False: continue if self._verbose: print(f'> pushing through \\'{name}\\'", "filter, every feature that is less significant than that, will not be a", "def _disable_pipe(self, name): self._activated[name] = False class OptimizedPipesystem(Pipesystem): \"\"\" OptimizedPipesystem object Enhanced rdfs.Pipesystem", "pipe in self._pipes: if self._activated[name] == False: continue if self._verbose: print(f'> pushing through", "it's merger _merger , as well as specified encoder _encoder . Possible encodings:" ]
[ "0: alignment = read.alignment base = alignment.seq[read.query_position] if base == CB4856_nucleotide: SNV_object.add_CB4856_read(alignment.qname) except:", "in self.SNV_list: self.CB4856_read_list += SNV.CB4856_read_list self.CB4856_read_list = list(set(self.CB4856_read_list)) def count_N2_CB4856_reads(self): read_list = self.merge_read_list()", "'CB4856_base:'+SNV.CB4856_base+':CB4856_specific_read_count:'+ # SNV_info += SNV.feature +'\\n' # output_file.write(SNV_info) # output_file.close() # output_f.write(gene.gene_name+','+gene.public_name+','+str(len(gene.N2_read_list))+','+str(len(gene.CB4856_read_list))+','+str(gene.binom_p)+'\\n') output_f.write('gene_name,public_name,N2_specific_read_count,", "elif v < 0: CB4856 += 1 return N2, CB4856 def cal_Binom_p(self): self.merge_N2_read_list()", "in gene.SNV_list: # SNV_info = '' # SNV_info += SNV.chrom+','+SNV.N2_position+','+SNV.N2_base+','+SNV.CB4856_position+','+SNV.CB4856_base+',' # # +':feature:'+SNV.feature", "self.N2_read_list = list(set(self.N2_read_list)) def merge_CB4856_read_list(self): for SNV in self.SNV_list: self.CB4856_read_list += SNV.CB4856_read_list self.CB4856_read_list", "merge_CB4856_read_list(self): for SNV in self.SNV_list: self.CB4856_read_list += SNV.CB4856_read_list self.CB4856_read_list = list(set(self.CB4856_read_list)) def count_N2_CB4856_reads(self):", "'X', 'CHROMOSOME_MtDNA': 'MtDNA'} nucleotides=['A','T','C','G'] class gene(): def __init__(self, gene_name, public_name): self.gene_name = gene_name", "chrom = 'CHROMOSOME_'+chrom N2_position = int(N2_position)-1 CB4856_position = int(CB4856_position)-1 pileups = bamfile_N2.pileup(chrom,N2_position,N2_position+1) try:", "= int(N2_position)-1 CB4856_position = int(CB4856_position)-1 pileups = bamfile_N2.pileup(chrom,N2_position,N2_position+1) try: for column in pileups:", "line.split()[8:10] if gene_name == 'NA': continue if gene_name not in gene_name_list: gene_object =", "0: N2 += 1 elif v < 0: CB4856 += 1 return N2,", "read in SNV.CB4856_read_list: try: read_list[read]-=1 except: read_list[read]=-1 return read_list def merge_N2_read_list(self): for SNV", "print(str(i)) chrom, N2_position, N2_nucleotide, CB4856_position, CB4856_nucleotide = line.split()[0:5] feature = line.split()[6] SNV_object =", "pysam.AlignmentFile(args.bam_file_mapped_to_CB4856) i=0 gene_name_list = [] gene_list = [] for line in f: if", "[] for line in f: if line.startswith('#'): continue i+=1 if i%500==0: print(str(i)) chrom,", "i+=1 if i%500==0: print(str(i)) chrom, N2_position, N2_nucleotide, CB4856_position, CB4856_nucleotide = line.split()[0:5] feature =", "# binom_p = gene.cal_Binom_p() # for SNV in gene.SNV_list: # SNV_info = ''", "continue if gene_name not in gene_name_list: gene_object = gene(gene_name, public_gene_name) gene_name_list.append(gene_name) gene_list.append(gene_object) chrom", "feature self.N2_read_list = [] self.CB4856_read_list = [] def add_N2_read(self, read_name): if read_name not", "= [] def add_N2_read(self, read_name): if read_name not in self.N2_read_list: self.N2_read_list.append(read_name) def add_CB4856_read(self,", "self.N2_read_list += SNV.N2_read_list self.N2_read_list = list(set(self.N2_read_list)) def merge_CB4856_read_list(self): for SNV in self.SNV_list: self.CB4856_read_list", "N2_position,CB4856_position,N2_base,CB4856_base,feature): self.chrom = chrom self.N2_position = N2_position self.CB4856_position = CB4856_position self.N2_base = N2_base", "= read.alignment base = alignment.seq[read.query_position] if base == CB4856_nucleotide: SNV_object.add_CB4856_read(alignment.qname) except: pass for", "= pysam.AlignmentFile(args.bam_file_mapped_to_N2) bamfile_CB4856 = pysam.AlignmentFile(args.bam_file_mapped_to_CB4856) i=0 gene_name_list = [] gene_list = [] for", "for column in pileups: if column.pos == CB4856_position: break for read in column.pileups:", "try: output_f = open(args.output_file,'w') except: output_f = open('snp_info_verification_output','w') f = open(args.snp_annotation_file_between_CB4856_WS230,'r') bamfile_N2 =", "binom_p = scipy.stats.binom_test(N2_count,N2_count+CB4856_count) self.binom_p = binom_p return binom_p class SNV(): def __init__(self,chrom, N2_position,CB4856_position,N2_base,CB4856_base,feature):", "in self.CB4856_read_list: self.CB4856_read_list.append(read_name) parser = argparse.ArgumentParser() HOME_DIR='/data/home/llong35/data/PM01/65hrs_7_2-40591052/' parser.add_argument('-b_N2','--bam_file_mapped_to_N2', type = str,help='bam_file mapped to", "N2_nucleotide, CB4856_position, CB4856_nucleotide = line.split()[0:5] feature = line.split()[6] SNV_object = SNV(chrom,N2_position,CB4856_position,N2_nucleotide,CB4856_nucleotide,feature) gene_name, public_gene_name", "+= str(len(SNV.CB4856_read_list))+',' # # 'CB4856_base:'+SNV.CB4856_base+':CB4856_specific_read_count:'+ # SNV_info += SNV.feature +'\\n' # output_file.write(SNV_info) #", "line.split()[6] SNV_object = SNV(chrom,N2_position,CB4856_position,N2_nucleotide,CB4856_nucleotide,feature) gene_name, public_gene_name = line.split()[8:10] if gene_name == 'NA': continue", "gene_object.gene_name == gene_name: break gene_object.add_SNV(SNV_object) # output_f.write('gene_name,public_name,N2_specific_read_count, CB4856_specific_read_count,binom_p\\n') # for gene in gene_list:", "gene_name self.public_name = public_name self.SNV_list = [] self.N2_read_list = [] self.CB4856_read_list = []", "N2_position = int(N2_position)-1 CB4856_position = int(CB4856_position)-1 pileups = bamfile_N2.pileup(chrom,N2_position,N2_position+1) try: for column in", "read_list.items(): if v > 0: N2 += 1 elif v < 0: CB4856", "self.CB4856_read_list: self.CB4856_read_list.append(read_name) parser = argparse.ArgumentParser() HOME_DIR='/data/home/llong35/data/PM01/65hrs_7_2-40591052/' parser.add_argument('-b_N2','--bam_file_mapped_to_N2', type = str,help='bam_file mapped to N2',default=HOME_DIR+'65hrs-7-2_S1_L001.bam')", "= alignment.seq[read.query_position] if base == N2_nucleotide: SNV_object.add_N2_read(alignment.qname) except: pass pileups = bamfile_CB4856.pileup(N2_CB4856_chrom[chrom],CB4856_position,CB4856_position+1) try:", "read_list[read]+=1 except: read_list[read]=1 for SNV in self.SNV_list: for read in SNV.CB4856_read_list: try: read_list[read]-=1", "column in pileups: if column.pos == CB4856_position: break for read in column.pileups: if", "read in SNV.N2_read_list: try: read_list[read]+=1 except: read_list[read]=1 for SNV in self.SNV_list: for read", "= 0 CB4856 = 0 for k,v in read_list.items(): if v > 0:", "str,help='snp_annotation_file_between_CB4856_WS230',default='/data/home/llong35/data/CB4856_genome/SNV_N2_CB4856') parser.add_argument('-o_1','--output_file', help='output file of gene summary',default='/data/home/llong35/data/AE_output/7_2_1') # parser.add_argument('-r','--reference_files', type = str,help='reference file", "type = str,help='bam_file mapped to N2',default=HOME_DIR+'65hrs-7-2_S1_L001.bam') parser.add_argument('-b_CB4856','--bam_file_mapped_to_CB4856', type = str,help='bam_file mapped to CB4856',default=HOME_DIR+'65hrs-7-2_S1_L001_R1_001.bam')", "gene_name_list = [] gene_list = [] for line in f: if line.startswith('#'): continue", "pileups = bamfile_N2.pileup(chrom,N2_position,N2_position+1) try: for column in pileups: if column.pos == N2_position: break", "try: read_list[read]-=1 except: read_list[read]=-1 return read_list def merge_N2_read_list(self): for SNV in self.SNV_list: self.N2_read_list", "def add_CB4856_read(self, read_name): if read_name not in self.CB4856_read_list: self.CB4856_read_list.append(read_name) parser = argparse.ArgumentParser() HOME_DIR='/data/home/llong35/data/PM01/65hrs_7_2-40591052/'", "= gene.cal_Binom_p() # for SNV in gene.SNV_list: # SNV_info = '' # SNV_info", "for SNV in self.SNV_list: for read in SNV.N2_read_list: try: read_list[read]+=1 except: read_list[read]=1 for", "alignment.seq[read.query_position] if base == N2_nucleotide: SNV_object.add_N2_read(alignment.qname) except: pass pileups = bamfile_CB4856.pileup(N2_CB4856_chrom[chrom],CB4856_position,CB4856_position+1) try: for", "str(len(SNV.CB4856_read_list))+',' # # 'CB4856_base:'+SNV.CB4856_base+':CB4856_specific_read_count:'+ # SNV_info += SNV.feature +'\\n' # output_file.write(SNV_info) # output_file.close()", "bam file mapped to') args = parser.parse_args() # pdb.set_trace() try: output_f = open(args.output_file,'w')", "# 'CB4856_base:'+SNV.CB4856_base+':CB4856_specific_read_count:'+ # SNV_info += SNV.feature +'\\n' # output_file.write(SNV_info) # output_file.close() # output_f.write(gene.gene_name+','+gene.public_name+','+str(len(gene.N2_read_list))+','+str(len(gene.CB4856_read_list))+','+str(gene.binom_p)+'\\n')", "= N2_base self.CB4856_base = CB4856_base self.feature = feature self.N2_read_list = [] self.CB4856_read_list =", "# SNV_info += SNV.feature +'\\n' # output_file.write(SNV_info) # output_file.close() # output_f.write(gene.gene_name+','+gene.public_name+','+str(len(gene.N2_read_list))+','+str(len(gene.CB4856_read_list))+','+str(gene.binom_p)+'\\n') output_f.write('gene_name,public_name,N2_specific_read_count, CB4856_specific_read_count\\n')", "in read_list.items(): if v > 0: N2 += 1 elif v < 0:", "SNV in self.SNV_list: self.CB4856_read_list += SNV.CB4856_read_list self.CB4856_read_list = list(set(self.CB4856_read_list)) def count_N2_CB4856_reads(self): read_list =", "pileups: if column.pos == N2_position: break for read in column.pileups: if not read.is_del", "pass pileups = bamfile_CB4856.pileup(N2_CB4856_chrom[chrom],CB4856_position,CB4856_position+1) try: for column in pileups: if column.pos == CB4856_position:", "1 elif v < 0: CB4856 += 1 return N2, CB4856 def cal_Binom_p(self):", "CB4856_position: break for read in column.pileups: if not read.is_del and not read.is_refskip and", "gene in gene_list: # file_name = '/Volumes/Lijiang_data/datasets/Tajima\\'s_D_region/Mar_14_data/' + gene.gene_name+'_'+ gene.public_name+'.csv' # output_file =", "# SNV_info = '' # SNV_info += SNV.chrom+','+SNV.N2_position+','+SNV.N2_base+','+SNV.CB4856_position+','+SNV.CB4856_base+',' # # +':feature:'+SNV.feature # SNV_info", "SNV_object.add_N2_read(alignment.qname) except: pass pileups = bamfile_CB4856.pileup(N2_CB4856_chrom[chrom],CB4856_position,CB4856_position+1) try: for column in pileups: if column.pos", "0: alignment = read.alignment base = alignment.seq[read.query_position] if base == N2_nucleotide: SNV_object.add_N2_read(alignment.qname) except:", "# for SNV in gene.SNV_list: # SNV_info = '' # SNV_info += SNV.chrom+','+SNV.N2_position+','+SNV.N2_base+','+SNV.CB4856_position+','+SNV.CB4856_base+','", "alignment.seq[read.query_position] if base == CB4856_nucleotide: SNV_object.add_CB4856_read(alignment.qname) except: pass for gene_object in gene_list: if", "SNV(chrom,N2_position,CB4856_position,N2_nucleotide,CB4856_nucleotide,feature) gene_name, public_gene_name = line.split()[8:10] if gene_name == 'NA': continue if gene_name not", "= argparse.ArgumentParser() HOME_DIR='/data/home/llong35/data/PM01/65hrs_7_2-40591052/' parser.add_argument('-b_N2','--bam_file_mapped_to_N2', type = str,help='bam_file mapped to N2',default=HOME_DIR+'65hrs-7-2_S1_L001.bam') parser.add_argument('-b_CB4856','--bam_file_mapped_to_CB4856', type =", "'CHROMOSOME_IV': 'IV', 'CHROMOSOME_V': 'V', 'CHROMOSOME_X': 'X', 'CHROMOSOME_MtDNA': 'MtDNA'} nucleotides=['A','T','C','G'] class gene(): def __init__(self,", "public_gene_name) gene_name_list.append(gene_name) gene_list.append(gene_object) chrom = 'CHROMOSOME_'+chrom N2_position = int(N2_position)-1 CB4856_position = int(CB4856_position)-1 pileups", "in pileups: if column.pos == N2_position: break for read in column.pileups: if not", "<filename>Scripts/allele_specific_expression/assign_each_read_to_each_allele_of_each_gene.py import subprocess,argparse,os from Bio import SeqIO import pysam import pdb import scipy.stats", "read.is_del and not read.is_refskip and read.alignment.mapq != 0: alignment = read.alignment base =", "[] self.CB4856_read_list = [] def add_SNV(self, SNV): self.SNV_list.append(SNV) def merge_read_list(self): read_list = {}", "parser.add_argument('-o_1','--output_file', help='output file of gene summary',default='/data/home/llong35/data/AE_output/7_2_1') # parser.add_argument('-r','--reference_files', type = str,help='reference file the", "CB4856_base self.feature = feature self.N2_read_list = [] self.CB4856_read_list = [] def add_N2_read(self, read_name):", "'CHROMOSOME_X': 'X', 'CHROMOSOME_MtDNA': 'MtDNA'} nucleotides=['A','T','C','G'] class gene(): def __init__(self, gene_name, public_name): self.gene_name =", "class SNV(): def __init__(self,chrom, N2_position,CB4856_position,N2_base,CB4856_base,feature): self.chrom = chrom self.N2_position = N2_position self.CB4856_position =", "= line.split()[6] SNV_object = SNV(chrom,N2_position,CB4856_position,N2_nucleotide,CB4856_nucleotide,feature) gene_name, public_gene_name = line.split()[8:10] if gene_name == 'NA':", "base = alignment.seq[read.query_position] if base == CB4856_nucleotide: SNV_object.add_CB4856_read(alignment.qname) except: pass for gene_object in", "read.alignment base = alignment.seq[read.query_position] if base == N2_nucleotide: SNV_object.add_N2_read(alignment.qname) except: pass pileups =", "'gi|809001828|gb|CM003207.1|', 'CHROMOSOME_III': 'gi|809001817|gb|CM003208.1|', 'CHROMOSOME_IV': 'gi|809001806|gb|CM003209.1|', 'CHROMOSOME_V': 'gi|809001797|gb|CM003210.1|', 'CHROMOSOME_X': 'gi|809001786|gb|CM003211.1|', 'CHROMOSOME_MtDNA': 'gi|809001771|gb|CM003212.1|'} N2_CB4856_chrom =", "CB4856_position, CB4856_nucleotide = line.split()[0:5] feature = line.split()[6] SNV_object = SNV(chrom,N2_position,CB4856_position,N2_nucleotide,CB4856_nucleotide,feature) gene_name, public_gene_name =", "if base == CB4856_nucleotide: SNV_object.add_CB4856_read(alignment.qname) except: pass for gene_object in gene_list: if gene_object.gene_name", "self.binom_p = binom_p return binom_p class SNV(): def __init__(self,chrom, N2_position,CB4856_position,N2_base,CB4856_base,feature): self.chrom = chrom", "= {'CHROMOSOME_I': 'I', 'CHROMOSOME_II': 'II', 'CHROMOSOME_III': 'III', 'CHROMOSOME_IV': 'IV', 'CHROMOSOME_V': 'V', 'CHROMOSOME_X': 'X',", "def merge_read_list(self): read_list = {} for SNV in self.SNV_list: for read in SNV.N2_read_list:", "= gene_name self.public_name = public_name self.SNV_list = [] self.N2_read_list = [] self.CB4856_read_list =", "if gene_name not in gene_name_list: gene_object = gene(gene_name, public_gene_name) gene_name_list.append(gene_name) gene_list.append(gene_object) chrom =", "= alignment.seq[read.query_position] if base == CB4856_nucleotide: SNV_object.add_CB4856_read(alignment.qname) except: pass for gene_object in gene_list:", "== CB4856_position: break for read in column.pileups: if not read.is_del and not read.is_refskip", "'CHROMOSOME_X': 'gi|809001786|gb|CM003211.1|', 'CHROMOSOME_MtDNA': 'gi|809001771|gb|CM003212.1|'} N2_CB4856_chrom = {'CHROMOSOME_I': 'I', 'CHROMOSOME_II': 'II', 'CHROMOSOME_III': 'III', 'CHROMOSOME_IV':", "= CB4856_position self.N2_base = N2_base self.CB4856_base = CB4856_base self.feature = feature self.N2_read_list =", "# output_f.write('gene_name,public_name,N2_specific_read_count, CB4856_specific_read_count,binom_p\\n') # for gene in gene_list: # file_name = '/Volumes/Lijiang_data/datasets/Tajima\\'s_D_region/Mar_14_data/' +", "gene(): def __init__(self, gene_name, public_name): self.gene_name = gene_name self.public_name = public_name self.SNV_list =", "+= str(len(SNV.N2_read_list))+',' # # ':N2_base:'+SNV.N2_base+':N2_specific_read_count:'+ # SNV_info += str(len(SNV.CB4856_read_list))+',' # # 'CB4856_base:'+SNV.CB4856_base+':CB4856_specific_read_count:'+ #", "type = str,help='snp_annotation_file_between_CB4856_WS230',default='/data/home/llong35/data/CB4856_genome/SNV_N2_CB4856') parser.add_argument('-o_1','--output_file', help='output file of gene summary',default='/data/home/llong35/data/AE_output/7_2_1') # parser.add_argument('-r','--reference_files', type =", "{} for SNV in self.SNV_list: for read in SNV.N2_read_list: try: read_list[read]+=1 except: read_list[read]=1", "'gi|809001771|gb|CM003212.1|'} N2_CB4856_chrom = {'CHROMOSOME_I': 'I', 'CHROMOSOME_II': 'II', 'CHROMOSOME_III': 'III', 'CHROMOSOME_IV': 'IV', 'CHROMOSOME_V': 'V',", "read_list = self.merge_read_list() N2 = 0 CB4856 = 0 for k,v in read_list.items():", "= chrom self.N2_position = N2_position self.CB4856_position = CB4856_position self.N2_base = N2_base self.CB4856_base =", "gene_object = gene(gene_name, public_gene_name) gene_name_list.append(gene_name) gene_list.append(gene_object) chrom = 'CHROMOSOME_'+chrom N2_position = int(N2_position)-1 CB4856_position", "== N2_nucleotide: SNV_object.add_N2_read(alignment.qname) except: pass pileups = bamfile_CB4856.pileup(N2_CB4856_chrom[chrom],CB4856_position,CB4856_position+1) try: for column in pileups:", "SNV_info += SNV.chrom+','+SNV.N2_position+','+SNV.N2_base+','+SNV.CB4856_position+','+SNV.CB4856_base+',' # # +':feature:'+SNV.feature # SNV_info += str(len(SNV.N2_read_list))+',' # # ':N2_base:'+SNV.N2_base+':N2_specific_read_count:'+", "+':feature:'+SNV.feature # SNV_info += str(len(SNV.N2_read_list))+',' # # ':N2_base:'+SNV.N2_base+':N2_specific_read_count:'+ # SNV_info += str(len(SNV.CB4856_read_list))+',' #", "nucleotides=['A','T','C','G'] class gene(): def __init__(self, gene_name, public_name): self.gene_name = gene_name self.public_name = public_name", "break for read in column.pileups: if not read.is_del and not read.is_refskip and read.alignment.mapq", "'II', 'CHROMOSOME_III': 'III', 'CHROMOSOME_IV': 'IV', 'CHROMOSOME_V': 'V', 'CHROMOSOME_X': 'X', 'CHROMOSOME_MtDNA': 'MtDNA'} nucleotides=['A','T','C','G'] class", "= SNV(chrom,N2_position,CB4856_position,N2_nucleotide,CB4856_nucleotide,feature) gene_name, public_gene_name = line.split()[8:10] if gene_name == 'NA': continue if gene_name", "output_file.write(SNV_info) # output_file.close() # output_f.write(gene.gene_name+','+gene.public_name+','+str(len(gene.N2_read_list))+','+str(len(gene.CB4856_read_list))+','+str(gene.binom_p)+'\\n') output_f.write('gene_name,public_name,N2_specific_read_count, CB4856_specific_read_count\\n') for gene in gene_list: N2_count,CB4856_count =", "pileups = bamfile_CB4856.pileup(N2_CB4856_chrom[chrom],CB4856_position,CB4856_position+1) try: for column in pileups: if column.pos == CB4856_position: break", "self.CB4856_read_list = [] def add_N2_read(self, read_name): if read_name not in self.N2_read_list: self.N2_read_list.append(read_name) def", "self.N2_read_list = [] self.CB4856_read_list = [] def add_SNV(self, SNV): self.SNV_list.append(SNV) def merge_read_list(self): read_list", "def add_SNV(self, SNV): self.SNV_list.append(SNV) def merge_read_list(self): read_list = {} for SNV in self.SNV_list:", "from Bio import SeqIO import pysam import pdb import scipy.stats N2_CB4856_chrom = {'CHROMOSOME_I':", "self.N2_read_list.append(read_name) def add_CB4856_read(self, read_name): if read_name not in self.CB4856_read_list: self.CB4856_read_list.append(read_name) parser = argparse.ArgumentParser()", "parser.add_argument('-a','--snp_annotation_file_between_CB4856_WS230', type = str,help='snp_annotation_file_between_CB4856_WS230',default='/data/home/llong35/data/CB4856_genome/SNV_N2_CB4856') parser.add_argument('-o_1','--output_file', help='output file of gene summary',default='/data/home/llong35/data/AE_output/7_2_1') # parser.add_argument('-r','--reference_files', type", "pysam import pdb import scipy.stats N2_CB4856_chrom = {'CHROMOSOME_I': 'gi|809001836|gb|CM003206.1|', 'CHROMOSOME_II': 'gi|809001828|gb|CM003207.1|', 'CHROMOSOME_III': 'gi|809001817|gb|CM003208.1|',", "= '/Volumes/Lijiang_data/datasets/Tajima\\'s_D_region/Mar_14_data/' + gene.gene_name+'_'+ gene.public_name+'.csv' # output_file = open(file_name,'w') # binom_p = gene.cal_Binom_p()", "= {} for SNV in self.SNV_list: for read in SNV.N2_read_list: try: read_list[read]+=1 except:", "pdb.set_trace() try: output_f = open(args.output_file,'w') except: output_f = open('snp_info_verification_output','w') f = open(args.snp_annotation_file_between_CB4856_WS230,'r') bamfile_N2", "def __init__(self,chrom, N2_position,CB4856_position,N2_base,CB4856_base,feature): self.chrom = chrom self.N2_position = N2_position self.CB4856_position = CB4856_position self.N2_base", "= line.split()[8:10] if gene_name == 'NA': continue if gene_name not in gene_name_list: gene_object", "file of gene summary',default='/data/home/llong35/data/AE_output/7_2_1') # parser.add_argument('-r','--reference_files', type = str,help='reference file the bam file", "N2 += 1 elif v < 0: CB4856 += 1 return N2, CB4856", "f: if line.startswith('#'): continue i+=1 if i%500==0: print(str(i)) chrom, N2_position, N2_nucleotide, CB4856_position, CB4856_nucleotide", "except: output_f = open('snp_info_verification_output','w') f = open(args.snp_annotation_file_between_CB4856_WS230,'r') bamfile_N2 = pysam.AlignmentFile(args.bam_file_mapped_to_N2) bamfile_CB4856 = pysam.AlignmentFile(args.bam_file_mapped_to_CB4856)", "str,help='bam_file mapped to N2',default=HOME_DIR+'65hrs-7-2_S1_L001.bam') parser.add_argument('-b_CB4856','--bam_file_mapped_to_CB4856', type = str,help='bam_file mapped to CB4856',default=HOME_DIR+'65hrs-7-2_S1_L001_R1_001.bam') parser.add_argument('-a','--snp_annotation_file_between_CB4856_WS230', type", "gene_name not in gene_name_list: gene_object = gene(gene_name, public_gene_name) gene_name_list.append(gene_name) gene_list.append(gene_object) chrom = 'CHROMOSOME_'+chrom", "for read in SNV.CB4856_read_list: try: read_list[read]-=1 except: read_list[read]=-1 return read_list def merge_N2_read_list(self): for", "!= 0: alignment = read.alignment base = alignment.seq[read.query_position] if base == N2_nucleotide: SNV_object.add_N2_read(alignment.qname)", "def add_N2_read(self, read_name): if read_name not in self.N2_read_list: self.N2_read_list.append(read_name) def add_CB4856_read(self, read_name): if", "{'CHROMOSOME_I': 'I', 'CHROMOSOME_II': 'II', 'CHROMOSOME_III': 'III', 'CHROMOSOME_IV': 'IV', 'CHROMOSOME_V': 'V', 'CHROMOSOME_X': 'X', 'CHROMOSOME_MtDNA':", "SNV in self.SNV_list: for read in SNV.CB4856_read_list: try: read_list[read]-=1 except: read_list[read]=-1 return read_list", "in gene_name_list: gene_object = gene(gene_name, public_gene_name) gene_name_list.append(gene_name) gene_list.append(gene_object) chrom = 'CHROMOSOME_'+chrom N2_position =", "gene_name_list.append(gene_name) gene_list.append(gene_object) chrom = 'CHROMOSOME_'+chrom N2_position = int(N2_position)-1 CB4856_position = int(CB4856_position)-1 pileups =", "CB4856 = 0 for k,v in read_list.items(): if v > 0: N2 +=", "in column.pileups: if not read.is_del and not read.is_refskip and read.alignment.mapq != 0: alignment", "[] def add_SNV(self, SNV): self.SNV_list.append(SNV) def merge_read_list(self): read_list = {} for SNV in", "gene_list = [] for line in f: if line.startswith('#'): continue i+=1 if i%500==0:", "# for gene in gene_list: # file_name = '/Volumes/Lijiang_data/datasets/Tajima\\'s_D_region/Mar_14_data/' + gene.gene_name+'_'+ gene.public_name+'.csv' #", "i=0 gene_name_list = [] gene_list = [] for line in f: if line.startswith('#'):", "[] self.CB4856_read_list = [] def add_N2_read(self, read_name): if read_name not in self.N2_read_list: self.N2_read_list.append(read_name)", "cal_Binom_p(self): self.merge_N2_read_list() self.merge_CB4856_read_list() N2_count = len(self.N2_read_list) CB4856_count = len(self.CB4856_read_list) binom_p = scipy.stats.binom_test(N2_count,N2_count+CB4856_count) self.binom_p", "# output_file.write(SNV_info) # output_file.close() # output_f.write(gene.gene_name+','+gene.public_name+','+str(len(gene.N2_read_list))+','+str(len(gene.CB4856_read_list))+','+str(gene.binom_p)+'\\n') output_f.write('gene_name,public_name,N2_specific_read_count, CB4856_specific_read_count\\n') for gene in gene_list: N2_count,CB4856_count", "SNV(): def __init__(self,chrom, N2_position,CB4856_position,N2_base,CB4856_base,feature): self.chrom = chrom self.N2_position = N2_position self.CB4856_position = CB4856_position", "'CHROMOSOME_MtDNA': 'MtDNA'} nucleotides=['A','T','C','G'] class gene(): def __init__(self, gene_name, public_name): self.gene_name = gene_name self.public_name", "= '' # SNV_info += SNV.chrom+','+SNV.N2_position+','+SNV.N2_base+','+SNV.CB4856_position+','+SNV.CB4856_base+',' # # +':feature:'+SNV.feature # SNV_info += str(len(SNV.N2_read_list))+','", "if read_name not in self.N2_read_list: self.N2_read_list.append(read_name) def add_CB4856_read(self, read_name): if read_name not in", "if gene_name == 'NA': continue if gene_name not in gene_name_list: gene_object = gene(gene_name,", "in self.N2_read_list: self.N2_read_list.append(read_name) def add_CB4856_read(self, read_name): if read_name not in self.CB4856_read_list: self.CB4856_read_list.append(read_name) parser", "try: read_list[read]+=1 except: read_list[read]=1 for SNV in self.SNV_list: for read in SNV.CB4856_read_list: try:", "= self.merge_read_list() N2 = 0 CB4856 = 0 for k,v in read_list.items(): if", "+'\\n' # output_file.write(SNV_info) # output_file.close() # output_f.write(gene.gene_name+','+gene.public_name+','+str(len(gene.N2_read_list))+','+str(len(gene.CB4856_read_list))+','+str(gene.binom_p)+'\\n') output_f.write('gene_name,public_name,N2_specific_read_count, CB4856_specific_read_count\\n') for gene in gene_list:", "N2_CB4856_chrom = {'CHROMOSOME_I': 'gi|809001836|gb|CM003206.1|', 'CHROMOSOME_II': 'gi|809001828|gb|CM003207.1|', 'CHROMOSOME_III': 'gi|809001817|gb|CM003208.1|', 'CHROMOSOME_IV': 'gi|809001806|gb|CM003209.1|', 'CHROMOSOME_V': 'gi|809001797|gb|CM003210.1|', 'CHROMOSOME_X':", "0 CB4856 = 0 for k,v in read_list.items(): if v > 0: N2", "in gene_list: # file_name = '/Volumes/Lijiang_data/datasets/Tajima\\'s_D_region/Mar_14_data/' + gene.gene_name+'_'+ gene.public_name+'.csv' # output_file = open(file_name,'w')", "add_N2_read(self, read_name): if read_name not in self.N2_read_list: self.N2_read_list.append(read_name) def add_CB4856_read(self, read_name): if read_name", "for SNV in self.SNV_list: self.CB4856_read_list += SNV.CB4856_read_list self.CB4856_read_list = list(set(self.CB4856_read_list)) def count_N2_CB4856_reads(self): read_list", "open(args.snp_annotation_file_between_CB4856_WS230,'r') bamfile_N2 = pysam.AlignmentFile(args.bam_file_mapped_to_N2) bamfile_CB4856 = pysam.AlignmentFile(args.bam_file_mapped_to_CB4856) i=0 gene_name_list = [] gene_list =", "= [] self.N2_read_list = [] self.CB4856_read_list = [] def add_SNV(self, SNV): self.SNV_list.append(SNV) def", "+= SNV.chrom+','+SNV.N2_position+','+SNV.N2_base+','+SNV.CB4856_position+','+SNV.CB4856_base+',' # # +':feature:'+SNV.feature # SNV_info += str(len(SNV.N2_read_list))+',' # # ':N2_base:'+SNV.N2_base+':N2_specific_read_count:'+ #", "line.split()[0:5] feature = line.split()[6] SNV_object = SNV(chrom,N2_position,CB4856_position,N2_nucleotide,CB4856_nucleotide,feature) gene_name, public_gene_name = line.split()[8:10] if gene_name", "def __init__(self, gene_name, public_name): self.gene_name = gene_name self.public_name = public_name self.SNV_list = []", "to CB4856',default=HOME_DIR+'65hrs-7-2_S1_L001_R1_001.bam') parser.add_argument('-a','--snp_annotation_file_between_CB4856_WS230', type = str,help='snp_annotation_file_between_CB4856_WS230',default='/data/home/llong35/data/CB4856_genome/SNV_N2_CB4856') parser.add_argument('-o_1','--output_file', help='output file of gene summary',default='/data/home/llong35/data/AE_output/7_2_1') #", "= str,help='bam_file mapped to N2',default=HOME_DIR+'65hrs-7-2_S1_L001.bam') parser.add_argument('-b_CB4856','--bam_file_mapped_to_CB4856', type = str,help='bam_file mapped to CB4856',default=HOME_DIR+'65hrs-7-2_S1_L001_R1_001.bam') parser.add_argument('-a','--snp_annotation_file_between_CB4856_WS230',", "SNV_info += str(len(SNV.N2_read_list))+',' # # ':N2_base:'+SNV.N2_base+':N2_specific_read_count:'+ # SNV_info += str(len(SNV.CB4856_read_list))+',' # # 'CB4856_base:'+SNV.CB4856_base+':CB4856_specific_read_count:'+", "read in column.pileups: if not read.is_del and not read.is_refskip and read.alignment.mapq != 0:", "in self.SNV_list: for read in SNV.N2_read_list: try: read_list[read]+=1 except: read_list[read]=1 for SNV in", "self.CB4856_base = CB4856_base self.feature = feature self.N2_read_list = [] self.CB4856_read_list = [] def", "base = alignment.seq[read.query_position] if base == N2_nucleotide: SNV_object.add_N2_read(alignment.qname) except: pass pileups = bamfile_CB4856.pileup(N2_CB4856_chrom[chrom],CB4856_position,CB4856_position+1)", "read_name): if read_name not in self.CB4856_read_list: self.CB4856_read_list.append(read_name) parser = argparse.ArgumentParser() HOME_DIR='/data/home/llong35/data/PM01/65hrs_7_2-40591052/' parser.add_argument('-b_N2','--bam_file_mapped_to_N2', type", "chrom self.N2_position = N2_position self.CB4856_position = CB4856_position self.N2_base = N2_base self.CB4856_base = CB4856_base", "self.N2_position = N2_position self.CB4856_position = CB4856_position self.N2_base = N2_base self.CB4856_base = CB4856_base self.feature", "for column in pileups: if column.pos == N2_position: break for read in column.pileups:", "list(set(self.N2_read_list)) def merge_CB4856_read_list(self): for SNV in self.SNV_list: self.CB4856_read_list += SNV.CB4856_read_list self.CB4856_read_list = list(set(self.CB4856_read_list))", "gene_name: break gene_object.add_SNV(SNV_object) # output_f.write('gene_name,public_name,N2_specific_read_count, CB4856_specific_read_count,binom_p\\n') # for gene in gene_list: # file_name", "merge_read_list(self): read_list = {} for SNV in self.SNV_list: for read in SNV.N2_read_list: try:", "def merge_N2_read_list(self): for SNV in self.SNV_list: self.N2_read_list += SNV.N2_read_list self.N2_read_list = list(set(self.N2_read_list)) def", "args = parser.parse_args() # pdb.set_trace() try: output_f = open(args.output_file,'w') except: output_f = open('snp_info_verification_output','w')", "self.CB4856_read_list = list(set(self.CB4856_read_list)) def count_N2_CB4856_reads(self): read_list = self.merge_read_list() N2 = 0 CB4856 =", "output_f = open(args.output_file,'w') except: output_f = open('snp_info_verification_output','w') f = open(args.snp_annotation_file_between_CB4856_WS230,'r') bamfile_N2 = pysam.AlignmentFile(args.bam_file_mapped_to_N2)", "not in self.CB4856_read_list: self.CB4856_read_list.append(read_name) parser = argparse.ArgumentParser() HOME_DIR='/data/home/llong35/data/PM01/65hrs_7_2-40591052/' parser.add_argument('-b_N2','--bam_file_mapped_to_N2', type = str,help='bam_file mapped", "'gi|809001817|gb|CM003208.1|', 'CHROMOSOME_IV': 'gi|809001806|gb|CM003209.1|', 'CHROMOSOME_V': 'gi|809001797|gb|CM003210.1|', 'CHROMOSOME_X': 'gi|809001786|gb|CM003211.1|', 'CHROMOSOME_MtDNA': 'gi|809001771|gb|CM003212.1|'} N2_CB4856_chrom = {'CHROMOSOME_I': 'I',", "read_list[read]=1 for SNV in self.SNV_list: for read in SNV.CB4856_read_list: try: read_list[read]-=1 except: read_list[read]=-1", "+= SNV.N2_read_list self.N2_read_list = list(set(self.N2_read_list)) def merge_CB4856_read_list(self): for SNV in self.SNV_list: self.CB4856_read_list +=", "help='output file of gene summary',default='/data/home/llong35/data/AE_output/7_2_1') # parser.add_argument('-r','--reference_files', type = str,help='reference file the bam", "= str,help='snp_annotation_file_between_CB4856_WS230',default='/data/home/llong35/data/CB4856_genome/SNV_N2_CB4856') parser.add_argument('-o_1','--output_file', help='output file of gene summary',default='/data/home/llong35/data/AE_output/7_2_1') # parser.add_argument('-r','--reference_files', type = str,help='reference", "'III', 'CHROMOSOME_IV': 'IV', 'CHROMOSOME_V': 'V', 'CHROMOSOME_X': 'X', 'CHROMOSOME_MtDNA': 'MtDNA'} nucleotides=['A','T','C','G'] class gene(): def", "self.N2_read_list = [] self.CB4856_read_list = [] def add_N2_read(self, read_name): if read_name not in", "for line in f: if line.startswith('#'): continue i+=1 if i%500==0: print(str(i)) chrom, N2_position,", "summary',default='/data/home/llong35/data/AE_output/7_2_1') # parser.add_argument('-r','--reference_files', type = str,help='reference file the bam file mapped to') args", "N2_count = len(self.N2_read_list) CB4856_count = len(self.CB4856_read_list) binom_p = scipy.stats.binom_test(N2_count,N2_count+CB4856_count) self.binom_p = binom_p return", "= scipy.stats.binom_test(N2_count,N2_count+CB4856_count) self.binom_p = binom_p return binom_p class SNV(): def __init__(self,chrom, N2_position,CB4856_position,N2_base,CB4856_base,feature): self.chrom", "parser.add_argument('-r','--reference_files', type = str,help='reference file the bam file mapped to') args = parser.parse_args()", "'MtDNA'} nucleotides=['A','T','C','G'] class gene(): def __init__(self, gene_name, public_name): self.gene_name = gene_name self.public_name =", "binom_p return binom_p class SNV(): def __init__(self,chrom, N2_position,CB4856_position,N2_base,CB4856_base,feature): self.chrom = chrom self.N2_position =", "= CB4856_base self.feature = feature self.N2_read_list = [] self.CB4856_read_list = [] def add_N2_read(self,", "self.chrom = chrom self.N2_position = N2_position self.CB4856_position = CB4856_position self.N2_base = N2_base self.CB4856_base", "= open(args.output_file,'w') except: output_f = open('snp_info_verification_output','w') f = open(args.snp_annotation_file_between_CB4856_WS230,'r') bamfile_N2 = pysam.AlignmentFile(args.bam_file_mapped_to_N2) bamfile_CB4856", "Bio import SeqIO import pysam import pdb import scipy.stats N2_CB4856_chrom = {'CHROMOSOME_I': 'gi|809001836|gb|CM003206.1|',", "column.pileups: if not read.is_del and not read.is_refskip and read.alignment.mapq != 0: alignment =", "gene.SNV_list: # SNV_info = '' # SNV_info += SNV.chrom+','+SNV.N2_position+','+SNV.N2_base+','+SNV.CB4856_position+','+SNV.CB4856_base+',' # # +':feature:'+SNV.feature #", "line in f: if line.startswith('#'): continue i+=1 if i%500==0: print(str(i)) chrom, N2_position, N2_nucleotide,", "import subprocess,argparse,os from Bio import SeqIO import pysam import pdb import scipy.stats N2_CB4856_chrom", "> 0: N2 += 1 elif v < 0: CB4856 += 1 return", "SNV.CB4856_read_list self.CB4856_read_list = list(set(self.CB4856_read_list)) def count_N2_CB4856_reads(self): read_list = self.merge_read_list() N2 = 0 CB4856", "bamfile_N2 = pysam.AlignmentFile(args.bam_file_mapped_to_N2) bamfile_CB4856 = pysam.AlignmentFile(args.bam_file_mapped_to_CB4856) i=0 gene_name_list = [] gene_list = []", "'IV', 'CHROMOSOME_V': 'V', 'CHROMOSOME_X': 'X', 'CHROMOSOME_MtDNA': 'MtDNA'} nucleotides=['A','T','C','G'] class gene(): def __init__(self, gene_name,", "gene_name, public_name): self.gene_name = gene_name self.public_name = public_name self.SNV_list = [] self.N2_read_list =", "v < 0: CB4856 += 1 return N2, CB4856 def cal_Binom_p(self): self.merge_N2_read_list() self.merge_CB4856_read_list()", "SNV.N2_read_list: try: read_list[read]+=1 except: read_list[read]=1 for SNV in self.SNV_list: for read in SNV.CB4856_read_list:", "str,help='bam_file mapped to CB4856',default=HOME_DIR+'65hrs-7-2_S1_L001_R1_001.bam') parser.add_argument('-a','--snp_annotation_file_between_CB4856_WS230', type = str,help='snp_annotation_file_between_CB4856_WS230',default='/data/home/llong35/data/CB4856_genome/SNV_N2_CB4856') parser.add_argument('-o_1','--output_file', help='output file of gene", "N2 = 0 CB4856 = 0 for k,v in read_list.items(): if v >", "gene.public_name+'.csv' # output_file = open(file_name,'w') # binom_p = gene.cal_Binom_p() # for SNV in", "gene.cal_Binom_p() # for SNV in gene.SNV_list: # SNV_info = '' # SNV_info +=", "def merge_CB4856_read_list(self): for SNV in self.SNV_list: self.CB4856_read_list += SNV.CB4856_read_list self.CB4856_read_list = list(set(self.CB4856_read_list)) def", "file the bam file mapped to') args = parser.parse_args() # pdb.set_trace() try: output_f", "# SNV_info += SNV.chrom+','+SNV.N2_position+','+SNV.N2_base+','+SNV.CB4856_position+','+SNV.CB4856_base+',' # # +':feature:'+SNV.feature # SNV_info += str(len(SNV.N2_read_list))+',' # #", "the bam file mapped to') args = parser.parse_args() # pdb.set_trace() try: output_f =", "output_f.write(gene.gene_name+','+gene.public_name+','+str(len(gene.N2_read_list))+','+str(len(gene.CB4856_read_list))+','+str(gene.binom_p)+'\\n') output_f.write('gene_name,public_name,N2_specific_read_count, CB4856_specific_read_count\\n') for gene in gene_list: N2_count,CB4856_count = gene.count_N2_CB4856_reads() output_f.write(gene.gene_name+','+gene.public_name+','+str(N2_count)+','+str(CB4856_count)+'\\n') output_f.close() f.close()", "N2_nucleotide: SNV_object.add_N2_read(alignment.qname) except: pass pileups = bamfile_CB4856.pileup(N2_CB4856_chrom[chrom],CB4856_position,CB4856_position+1) try: for column in pileups: if", "= bamfile_CB4856.pileup(N2_CB4856_chrom[chrom],CB4856_position,CB4856_position+1) try: for column in pileups: if column.pos == CB4856_position: break for", "= [] for line in f: if line.startswith('#'): continue i+=1 if i%500==0: print(str(i))", "binom_p = gene.cal_Binom_p() # for SNV in gene.SNV_list: # SNV_info = '' #", "for read in column.pileups: if not read.is_del and not read.is_refskip and read.alignment.mapq !=", "!= 0: alignment = read.alignment base = alignment.seq[read.query_position] if base == CB4856_nucleotide: SNV_object.add_CB4856_read(alignment.qname)", "# output_f.write(gene.gene_name+','+gene.public_name+','+str(len(gene.N2_read_list))+','+str(len(gene.CB4856_read_list))+','+str(gene.binom_p)+'\\n') output_f.write('gene_name,public_name,N2_specific_read_count, CB4856_specific_read_count\\n') for gene in gene_list: N2_count,CB4856_count = gene.count_N2_CB4856_reads() output_f.write(gene.gene_name+','+gene.public_name+','+str(N2_count)+','+str(CB4856_count)+'\\n') output_f.close()", "self.merge_CB4856_read_list() N2_count = len(self.N2_read_list) CB4856_count = len(self.CB4856_read_list) binom_p = scipy.stats.binom_test(N2_count,N2_count+CB4856_count) self.binom_p = binom_p", "# SNV_info += str(len(SNV.CB4856_read_list))+',' # # 'CB4856_base:'+SNV.CB4856_base+':CB4856_specific_read_count:'+ # SNV_info += SNV.feature +'\\n' #", "N2_position self.CB4856_position = CB4856_position self.N2_base = N2_base self.CB4856_base = CB4856_base self.feature = feature", "== 'NA': continue if gene_name not in gene_name_list: gene_object = gene(gene_name, public_gene_name) gene_name_list.append(gene_name)", "== gene_name: break gene_object.add_SNV(SNV_object) # output_f.write('gene_name,public_name,N2_specific_read_count, CB4856_specific_read_count,binom_p\\n') # for gene in gene_list: #", "= 'CHROMOSOME_'+chrom N2_position = int(N2_position)-1 CB4856_position = int(CB4856_position)-1 pileups = bamfile_N2.pileup(chrom,N2_position,N2_position+1) try: for", "read_name not in self.N2_read_list: self.N2_read_list.append(read_name) def add_CB4856_read(self, read_name): if read_name not in self.CB4856_read_list:", "self.SNV_list = [] self.N2_read_list = [] self.CB4856_read_list = [] def add_SNV(self, SNV): self.SNV_list.append(SNV)", "self.public_name = public_name self.SNV_list = [] self.N2_read_list = [] self.CB4856_read_list = [] def", "f = open(args.snp_annotation_file_between_CB4856_WS230,'r') bamfile_N2 = pysam.AlignmentFile(args.bam_file_mapped_to_N2) bamfile_CB4856 = pysam.AlignmentFile(args.bam_file_mapped_to_CB4856) i=0 gene_name_list = []", "column.pos == CB4856_position: break for read in column.pileups: if not read.is_del and not", "except: pass for gene_object in gene_list: if gene_object.gene_name == gene_name: break gene_object.add_SNV(SNV_object) #", "# # 'CB4856_base:'+SNV.CB4856_base+':CB4856_specific_read_count:'+ # SNV_info += SNV.feature +'\\n' # output_file.write(SNV_info) # output_file.close() #", "SNV.feature +'\\n' # output_file.write(SNV_info) # output_file.close() # output_f.write(gene.gene_name+','+gene.public_name+','+str(len(gene.N2_read_list))+','+str(len(gene.CB4856_read_list))+','+str(gene.binom_p)+'\\n') output_f.write('gene_name,public_name,N2_specific_read_count, CB4856_specific_read_count\\n') for gene in", "in f: if line.startswith('#'): continue i+=1 if i%500==0: print(str(i)) chrom, N2_position, N2_nucleotide, CB4856_position,", "gene_object.add_SNV(SNV_object) # output_f.write('gene_name,public_name,N2_specific_read_count, CB4856_specific_read_count,binom_p\\n') # for gene in gene_list: # file_name = '/Volumes/Lijiang_data/datasets/Tajima\\'s_D_region/Mar_14_data/'", "'/Volumes/Lijiang_data/datasets/Tajima\\'s_D_region/Mar_14_data/' + gene.gene_name+'_'+ gene.public_name+'.csv' # output_file = open(file_name,'w') # binom_p = gene.cal_Binom_p() #", "self.merge_read_list() N2 = 0 CB4856 = 0 for k,v in read_list.items(): if v", "SNV_info = '' # SNV_info += SNV.chrom+','+SNV.N2_position+','+SNV.N2_base+','+SNV.CB4856_position+','+SNV.CB4856_base+',' # # +':feature:'+SNV.feature # SNV_info +=", "= read.alignment base = alignment.seq[read.query_position] if base == N2_nucleotide: SNV_object.add_N2_read(alignment.qname) except: pass pileups", "not read.is_del and not read.is_refskip and read.alignment.mapq != 0: alignment = read.alignment base", "':N2_base:'+SNV.N2_base+':N2_specific_read_count:'+ # SNV_info += str(len(SNV.CB4856_read_list))+',' # # 'CB4856_base:'+SNV.CB4856_base+':CB4856_specific_read_count:'+ # SNV_info += SNV.feature +'\\n'", "break gene_object.add_SNV(SNV_object) # output_f.write('gene_name,public_name,N2_specific_read_count, CB4856_specific_read_count,binom_p\\n') # for gene in gene_list: # file_name =", "# output_file = open(file_name,'w') # binom_p = gene.cal_Binom_p() # for SNV in gene.SNV_list:", "int(N2_position)-1 CB4856_position = int(CB4856_position)-1 pileups = bamfile_N2.pileup(chrom,N2_position,N2_position+1) try: for column in pileups: if", "'CHROMOSOME_III': 'gi|809001817|gb|CM003208.1|', 'CHROMOSOME_IV': 'gi|809001806|gb|CM003209.1|', 'CHROMOSOME_V': 'gi|809001797|gb|CM003210.1|', 'CHROMOSOME_X': 'gi|809001786|gb|CM003211.1|', 'CHROMOSOME_MtDNA': 'gi|809001771|gb|CM003212.1|'} N2_CB4856_chrom = {'CHROMOSOME_I':", "if read_name not in self.CB4856_read_list: self.CB4856_read_list.append(read_name) parser = argparse.ArgumentParser() HOME_DIR='/data/home/llong35/data/PM01/65hrs_7_2-40591052/' parser.add_argument('-b_N2','--bam_file_mapped_to_N2', type =", "mapped to CB4856',default=HOME_DIR+'65hrs-7-2_S1_L001_R1_001.bam') parser.add_argument('-a','--snp_annotation_file_between_CB4856_WS230', type = str,help='snp_annotation_file_between_CB4856_WS230',default='/data/home/llong35/data/CB4856_genome/SNV_N2_CB4856') parser.add_argument('-o_1','--output_file', help='output file of gene summary',default='/data/home/llong35/data/AE_output/7_2_1')", "self.merge_N2_read_list() self.merge_CB4856_read_list() N2_count = len(self.N2_read_list) CB4856_count = len(self.CB4856_read_list) binom_p = scipy.stats.binom_test(N2_count,N2_count+CB4856_count) self.binom_p =", "return binom_p class SNV(): def __init__(self,chrom, N2_position,CB4856_position,N2_base,CB4856_base,feature): self.chrom = chrom self.N2_position = N2_position", "self.SNV_list: for read in SNV.N2_read_list: try: read_list[read]+=1 except: read_list[read]=1 for SNV in self.SNV_list:", "SNV.CB4856_read_list: try: read_list[read]-=1 except: read_list[read]=-1 return read_list def merge_N2_read_list(self): for SNV in self.SNV_list:", "= 0 for k,v in read_list.items(): if v > 0: N2 += 1", "== N2_position: break for read in column.pileups: if not read.is_del and not read.is_refskip", "+= 1 elif v < 0: CB4856 += 1 return N2, CB4856 def", "file mapped to') args = parser.parse_args() # pdb.set_trace() try: output_f = open(args.output_file,'w') except:", "pass for gene_object in gene_list: if gene_object.gene_name == gene_name: break gene_object.add_SNV(SNV_object) # output_f.write('gene_name,public_name,N2_specific_read_count,", "in gene_list: if gene_object.gene_name == gene_name: break gene_object.add_SNV(SNV_object) # output_f.write('gene_name,public_name,N2_specific_read_count, CB4856_specific_read_count,binom_p\\n') # for", "N2_CB4856_chrom = {'CHROMOSOME_I': 'I', 'CHROMOSOME_II': 'II', 'CHROMOSOME_III': 'III', 'CHROMOSOME_IV': 'IV', 'CHROMOSOME_V': 'V', 'CHROMOSOME_X':", "in SNV.CB4856_read_list: try: read_list[read]-=1 except: read_list[read]=-1 return read_list def merge_N2_read_list(self): for SNV in", "subprocess,argparse,os from Bio import SeqIO import pysam import pdb import scipy.stats N2_CB4856_chrom =", "str(len(SNV.N2_read_list))+',' # # ':N2_base:'+SNV.N2_base+':N2_specific_read_count:'+ # SNV_info += str(len(SNV.CB4856_read_list))+',' # # 'CB4856_base:'+SNV.CB4856_base+':CB4856_specific_read_count:'+ # SNV_info", "and not read.is_refskip and read.alignment.mapq != 0: alignment = read.alignment base = alignment.seq[read.query_position]", "+= SNV.feature +'\\n' # output_file.write(SNV_info) # output_file.close() # output_f.write(gene.gene_name+','+gene.public_name+','+str(len(gene.N2_read_list))+','+str(len(gene.CB4856_read_list))+','+str(gene.binom_p)+'\\n') output_f.write('gene_name,public_name,N2_specific_read_count, CB4856_specific_read_count\\n') for gene", "parser = argparse.ArgumentParser() HOME_DIR='/data/home/llong35/data/PM01/65hrs_7_2-40591052/' parser.add_argument('-b_N2','--bam_file_mapped_to_N2', type = str,help='bam_file mapped to N2',default=HOME_DIR+'65hrs-7-2_S1_L001.bam') parser.add_argument('-b_CB4856','--bam_file_mapped_to_CB4856', type", "# output_file.close() # output_f.write(gene.gene_name+','+gene.public_name+','+str(len(gene.N2_read_list))+','+str(len(gene.CB4856_read_list))+','+str(gene.binom_p)+'\\n') output_f.write('gene_name,public_name,N2_specific_read_count, CB4856_specific_read_count\\n') for gene in gene_list: N2_count,CB4856_count = gene.count_N2_CB4856_reads()", "bamfile_CB4856 = pysam.AlignmentFile(args.bam_file_mapped_to_CB4856) i=0 gene_name_list = [] gene_list = [] for line in", "SeqIO import pysam import pdb import scipy.stats N2_CB4856_chrom = {'CHROMOSOME_I': 'gi|809001836|gb|CM003206.1|', 'CHROMOSOME_II': 'gi|809001828|gb|CM003207.1|',", "merge_N2_read_list(self): for SNV in self.SNV_list: self.N2_read_list += SNV.N2_read_list self.N2_read_list = list(set(self.N2_read_list)) def merge_CB4856_read_list(self):", "type = str,help='bam_file mapped to CB4856',default=HOME_DIR+'65hrs-7-2_S1_L001_R1_001.bam') parser.add_argument('-a','--snp_annotation_file_between_CB4856_WS230', type = str,help='snp_annotation_file_between_CB4856_WS230',default='/data/home/llong35/data/CB4856_genome/SNV_N2_CB4856') parser.add_argument('-o_1','--output_file', help='output file", "__init__(self,chrom, N2_position,CB4856_position,N2_base,CB4856_base,feature): self.chrom = chrom self.N2_position = N2_position self.CB4856_position = CB4856_position self.N2_base =", "v > 0: N2 += 1 elif v < 0: CB4856 += 1", "gene_name == 'NA': continue if gene_name not in gene_name_list: gene_object = gene(gene_name, public_gene_name)", "add_CB4856_read(self, read_name): if read_name not in self.CB4856_read_list: self.CB4856_read_list.append(read_name) parser = argparse.ArgumentParser() HOME_DIR='/data/home/llong35/data/PM01/65hrs_7_2-40591052/' parser.add_argument('-b_N2','--bam_file_mapped_to_N2',", "CB4856_nucleotide: SNV_object.add_CB4856_read(alignment.qname) except: pass for gene_object in gene_list: if gene_object.gene_name == gene_name: break", "SNV_info += SNV.feature +'\\n' # output_file.write(SNV_info) # output_file.close() # output_f.write(gene.gene_name+','+gene.public_name+','+str(len(gene.N2_read_list))+','+str(len(gene.CB4856_read_list))+','+str(gene.binom_p)+'\\n') output_f.write('gene_name,public_name,N2_specific_read_count, CB4856_specific_read_count\\n') for", "mapped to') args = parser.parse_args() # pdb.set_trace() try: output_f = open(args.output_file,'w') except: output_f", "open(args.output_file,'w') except: output_f = open('snp_info_verification_output','w') f = open(args.snp_annotation_file_between_CB4856_WS230,'r') bamfile_N2 = pysam.AlignmentFile(args.bam_file_mapped_to_N2) bamfile_CB4856 =", "SNV in gene.SNV_list: # SNV_info = '' # SNV_info += SNV.chrom+','+SNV.N2_position+','+SNV.N2_base+','+SNV.CB4856_position+','+SNV.CB4856_base+',' # #", "import scipy.stats N2_CB4856_chrom = {'CHROMOSOME_I': 'gi|809001836|gb|CM003206.1|', 'CHROMOSOME_II': 'gi|809001828|gb|CM003207.1|', 'CHROMOSOME_III': 'gi|809001817|gb|CM003208.1|', 'CHROMOSOME_IV': 'gi|809001806|gb|CM003209.1|', 'CHROMOSOME_V':", "line.startswith('#'): continue i+=1 if i%500==0: print(str(i)) chrom, N2_position, N2_nucleotide, CB4856_position, CB4856_nucleotide = line.split()[0:5]", "count_N2_CB4856_reads(self): read_list = self.merge_read_list() N2 = 0 CB4856 = 0 for k,v in", "'CHROMOSOME_V': 'V', 'CHROMOSOME_X': 'X', 'CHROMOSOME_MtDNA': 'MtDNA'} nucleotides=['A','T','C','G'] class gene(): def __init__(self, gene_name, public_name):", "to') args = parser.parse_args() # pdb.set_trace() try: output_f = open(args.output_file,'w') except: output_f =", "read_name): if read_name not in self.N2_read_list: self.N2_read_list.append(read_name) def add_CB4856_read(self, read_name): if read_name not", "+ gene.gene_name+'_'+ gene.public_name+'.csv' # output_file = open(file_name,'w') # binom_p = gene.cal_Binom_p() # for", "= binom_p return binom_p class SNV(): def __init__(self,chrom, N2_position,CB4856_position,N2_base,CB4856_base,feature): self.chrom = chrom self.N2_position", "return read_list def merge_N2_read_list(self): for SNV in self.SNV_list: self.N2_read_list += SNV.N2_read_list self.N2_read_list =", "'CHROMOSOME_II': 'II', 'CHROMOSOME_III': 'III', 'CHROMOSOME_IV': 'IV', 'CHROMOSOME_V': 'V', 'CHROMOSOME_X': 'X', 'CHROMOSOME_MtDNA': 'MtDNA'} nucleotides=['A','T','C','G']", "< 0: CB4856 += 1 return N2, CB4856 def cal_Binom_p(self): self.merge_N2_read_list() self.merge_CB4856_read_list() N2_count", "= open(args.snp_annotation_file_between_CB4856_WS230,'r') bamfile_N2 = pysam.AlignmentFile(args.bam_file_mapped_to_N2) bamfile_CB4856 = pysam.AlignmentFile(args.bam_file_mapped_to_CB4856) i=0 gene_name_list = [] gene_list", "gene(gene_name, public_gene_name) gene_name_list.append(gene_name) gene_list.append(gene_object) chrom = 'CHROMOSOME_'+chrom N2_position = int(N2_position)-1 CB4856_position = int(CB4856_position)-1", "read.alignment.mapq != 0: alignment = read.alignment base = alignment.seq[read.query_position] if base == N2_nucleotide:", "'I', 'CHROMOSOME_II': 'II', 'CHROMOSOME_III': 'III', 'CHROMOSOME_IV': 'IV', 'CHROMOSOME_V': 'V', 'CHROMOSOME_X': 'X', 'CHROMOSOME_MtDNA': 'MtDNA'}", "self.SNV_list.append(SNV) def merge_read_list(self): read_list = {} for SNV in self.SNV_list: for read in", "__init__(self, gene_name, public_name): self.gene_name = gene_name self.public_name = public_name self.SNV_list = [] self.N2_read_list", "N2, CB4856 def cal_Binom_p(self): self.merge_N2_read_list() self.merge_CB4856_read_list() N2_count = len(self.N2_read_list) CB4856_count = len(self.CB4856_read_list) binom_p", "CB4856_position self.N2_base = N2_base self.CB4856_base = CB4856_base self.feature = feature self.N2_read_list = []", "column.pos == N2_position: break for read in column.pileups: if not read.is_del and not", "self.CB4856_position = CB4856_position self.N2_base = N2_base self.CB4856_base = CB4856_base self.feature = feature self.N2_read_list", "'CHROMOSOME_II': 'gi|809001828|gb|CM003207.1|', 'CHROMOSOME_III': 'gi|809001817|gb|CM003208.1|', 'CHROMOSOME_IV': 'gi|809001806|gb|CM003209.1|', 'CHROMOSOME_V': 'gi|809001797|gb|CM003210.1|', 'CHROMOSOME_X': 'gi|809001786|gb|CM003211.1|', 'CHROMOSOME_MtDNA': 'gi|809001771|gb|CM003212.1|'} N2_CB4856_chrom", "= gene(gene_name, public_gene_name) gene_name_list.append(gene_name) gene_list.append(gene_object) chrom = 'CHROMOSOME_'+chrom N2_position = int(N2_position)-1 CB4856_position =", "def cal_Binom_p(self): self.merge_N2_read_list() self.merge_CB4856_read_list() N2_count = len(self.N2_read_list) CB4856_count = len(self.CB4856_read_list) binom_p = scipy.stats.binom_test(N2_count,N2_count+CB4856_count)", "CB4856_specific_read_count,binom_p\\n') # for gene in gene_list: # file_name = '/Volumes/Lijiang_data/datasets/Tajima\\'s_D_region/Mar_14_data/' + gene.gene_name+'_'+ gene.public_name+'.csv'", "import pdb import scipy.stats N2_CB4856_chrom = {'CHROMOSOME_I': 'gi|809001836|gb|CM003206.1|', 'CHROMOSOME_II': 'gi|809001828|gb|CM003207.1|', 'CHROMOSOME_III': 'gi|809001817|gb|CM003208.1|', 'CHROMOSOME_IV':", "argparse.ArgumentParser() HOME_DIR='/data/home/llong35/data/PM01/65hrs_7_2-40591052/' parser.add_argument('-b_N2','--bam_file_mapped_to_N2', type = str,help='bam_file mapped to N2',default=HOME_DIR+'65hrs-7-2_S1_L001.bam') parser.add_argument('-b_CB4856','--bam_file_mapped_to_CB4856', type = str,help='bam_file", "continue i+=1 if i%500==0: print(str(i)) chrom, N2_position, N2_nucleotide, CB4856_position, CB4856_nucleotide = line.split()[0:5] feature", "HOME_DIR='/data/home/llong35/data/PM01/65hrs_7_2-40591052/' parser.add_argument('-b_N2','--bam_file_mapped_to_N2', type = str,help='bam_file mapped to N2',default=HOME_DIR+'65hrs-7-2_S1_L001.bam') parser.add_argument('-b_CB4856','--bam_file_mapped_to_CB4856', type = str,help='bam_file mapped", "for SNV in self.SNV_list: for read in SNV.CB4856_read_list: try: read_list[read]-=1 except: read_list[read]=-1 return", "not in gene_name_list: gene_object = gene(gene_name, public_gene_name) gene_name_list.append(gene_name) gene_list.append(gene_object) chrom = 'CHROMOSOME_'+chrom N2_position", "= public_name self.SNV_list = [] self.N2_read_list = [] self.CB4856_read_list = [] def add_SNV(self,", "read_list def merge_N2_read_list(self): for SNV in self.SNV_list: self.N2_read_list += SNV.N2_read_list self.N2_read_list = list(set(self.N2_read_list))", "'CHROMOSOME_'+chrom N2_position = int(N2_position)-1 CB4856_position = int(CB4856_position)-1 pileups = bamfile_N2.pileup(chrom,N2_position,N2_position+1) try: for column", "base == N2_nucleotide: SNV_object.add_N2_read(alignment.qname) except: pass pileups = bamfile_CB4856.pileup(N2_CB4856_chrom[chrom],CB4856_position,CB4856_position+1) try: for column in", "+= SNV.CB4856_read_list self.CB4856_read_list = list(set(self.CB4856_read_list)) def count_N2_CB4856_reads(self): read_list = self.merge_read_list() N2 = 0", "if base == N2_nucleotide: SNV_object.add_N2_read(alignment.qname) except: pass pileups = bamfile_CB4856.pileup(N2_CB4856_chrom[chrom],CB4856_position,CB4856_position+1) try: for column", "# # ':N2_base:'+SNV.N2_base+':N2_specific_read_count:'+ # SNV_info += str(len(SNV.CB4856_read_list))+',' # # 'CB4856_base:'+SNV.CB4856_base+':CB4856_specific_read_count:'+ # SNV_info +=", "= [] self.CB4856_read_list = [] def add_SNV(self, SNV): self.SNV_list.append(SNV) def merge_read_list(self): read_list =", "'CHROMOSOME_IV': 'gi|809001806|gb|CM003209.1|', 'CHROMOSOME_V': 'gi|809001797|gb|CM003210.1|', 'CHROMOSOME_X': 'gi|809001786|gb|CM003211.1|', 'CHROMOSOME_MtDNA': 'gi|809001771|gb|CM003212.1|'} N2_CB4856_chrom = {'CHROMOSOME_I': 'I', 'CHROMOSOME_II':", "= len(self.CB4856_read_list) binom_p = scipy.stats.binom_test(N2_count,N2_count+CB4856_count) self.binom_p = binom_p return binom_p class SNV(): def", "'CHROMOSOME_V': 'gi|809001797|gb|CM003210.1|', 'CHROMOSOME_X': 'gi|809001786|gb|CM003211.1|', 'CHROMOSOME_MtDNA': 'gi|809001771|gb|CM003212.1|'} N2_CB4856_chrom = {'CHROMOSOME_I': 'I', 'CHROMOSOME_II': 'II', 'CHROMOSOME_III':", "N2_position: break for read in column.pileups: if not read.is_del and not read.is_refskip and", "# # +':feature:'+SNV.feature # SNV_info += str(len(SNV.N2_read_list))+',' # # ':N2_base:'+SNV.N2_base+':N2_specific_read_count:'+ # SNV_info +=", "i%500==0: print(str(i)) chrom, N2_position, N2_nucleotide, CB4856_position, CB4856_nucleotide = line.split()[0:5] feature = line.split()[6] SNV_object", "return N2, CB4856 def cal_Binom_p(self): self.merge_N2_read_list() self.merge_CB4856_read_list() N2_count = len(self.N2_read_list) CB4856_count = len(self.CB4856_read_list)", "read.alignment.mapq != 0: alignment = read.alignment base = alignment.seq[read.query_position] if base == CB4856_nucleotide:", "gene.gene_name+'_'+ gene.public_name+'.csv' # output_file = open(file_name,'w') # binom_p = gene.cal_Binom_p() # for SNV", "= str,help='reference file the bam file mapped to') args = parser.parse_args() # pdb.set_trace()", "N2_position, N2_nucleotide, CB4856_position, CB4856_nucleotide = line.split()[0:5] feature = line.split()[6] SNV_object = SNV(chrom,N2_position,CB4856_position,N2_nucleotide,CB4856_nucleotide,feature) gene_name,", "{'CHROMOSOME_I': 'gi|809001836|gb|CM003206.1|', 'CHROMOSOME_II': 'gi|809001828|gb|CM003207.1|', 'CHROMOSOME_III': 'gi|809001817|gb|CM003208.1|', 'CHROMOSOME_IV': 'gi|809001806|gb|CM003209.1|', 'CHROMOSOME_V': 'gi|809001797|gb|CM003210.1|', 'CHROMOSOME_X': 'gi|809001786|gb|CM003211.1|', 'CHROMOSOME_MtDNA':", "SNV_info += str(len(SNV.CB4856_read_list))+',' # # 'CB4856_base:'+SNV.CB4856_base+':CB4856_specific_read_count:'+ # SNV_info += SNV.feature +'\\n' # output_file.write(SNV_info)", "not in self.N2_read_list: self.N2_read_list.append(read_name) def add_CB4856_read(self, read_name): if read_name not in self.CB4856_read_list: self.CB4856_read_list.append(read_name)", "int(CB4856_position)-1 pileups = bamfile_N2.pileup(chrom,N2_position,N2_position+1) try: for column in pileups: if column.pos == N2_position:", "pysam.AlignmentFile(args.bam_file_mapped_to_N2) bamfile_CB4856 = pysam.AlignmentFile(args.bam_file_mapped_to_CB4856) i=0 gene_name_list = [] gene_list = [] for line", "type = str,help='reference file the bam file mapped to') args = parser.parse_args() #", "to N2',default=HOME_DIR+'65hrs-7-2_S1_L001.bam') parser.add_argument('-b_CB4856','--bam_file_mapped_to_CB4856', type = str,help='bam_file mapped to CB4856',default=HOME_DIR+'65hrs-7-2_S1_L001_R1_001.bam') parser.add_argument('-a','--snp_annotation_file_between_CB4856_WS230', type = str,help='snp_annotation_file_between_CB4856_WS230',default='/data/home/llong35/data/CB4856_genome/SNV_N2_CB4856')", "bamfile_CB4856.pileup(N2_CB4856_chrom[chrom],CB4856_position,CB4856_position+1) try: for column in pileups: if column.pos == CB4856_position: break for read", "in SNV.N2_read_list: try: read_list[read]+=1 except: read_list[read]=1 for SNV in self.SNV_list: for read in", "and read.alignment.mapq != 0: alignment = read.alignment base = alignment.seq[read.query_position] if base ==", "= pysam.AlignmentFile(args.bam_file_mapped_to_CB4856) i=0 gene_name_list = [] gene_list = [] for line in f:", "CB4856_nucleotide = line.split()[0:5] feature = line.split()[6] SNV_object = SNV(chrom,N2_position,CB4856_position,N2_nucleotide,CB4856_nucleotide,feature) gene_name, public_gene_name = line.split()[8:10]", "except: read_list[read]=-1 return read_list def merge_N2_read_list(self): for SNV in self.SNV_list: self.N2_read_list += SNV.N2_read_list", "'gi|809001836|gb|CM003206.1|', 'CHROMOSOME_II': 'gi|809001828|gb|CM003207.1|', 'CHROMOSOME_III': 'gi|809001817|gb|CM003208.1|', 'CHROMOSOME_IV': 'gi|809001806|gb|CM003209.1|', 'CHROMOSOME_V': 'gi|809001797|gb|CM003210.1|', 'CHROMOSOME_X': 'gi|809001786|gb|CM003211.1|', 'CHROMOSOME_MtDNA': 'gi|809001771|gb|CM003212.1|'}", "import SeqIO import pysam import pdb import scipy.stats N2_CB4856_chrom = {'CHROMOSOME_I': 'gi|809001836|gb|CM003206.1|', 'CHROMOSOME_II':", "parser.add_argument('-b_CB4856','--bam_file_mapped_to_CB4856', type = str,help='bam_file mapped to CB4856',default=HOME_DIR+'65hrs-7-2_S1_L001_R1_001.bam') parser.add_argument('-a','--snp_annotation_file_between_CB4856_WS230', type = str,help='snp_annotation_file_between_CB4856_WS230',default='/data/home/llong35/data/CB4856_genome/SNV_N2_CB4856') parser.add_argument('-o_1','--output_file', help='output", "'CHROMOSOME_MtDNA': 'gi|809001771|gb|CM003212.1|'} N2_CB4856_chrom = {'CHROMOSOME_I': 'I', 'CHROMOSOME_II': 'II', 'CHROMOSOME_III': 'III', 'CHROMOSOME_IV': 'IV', 'CHROMOSOME_V':", "str,help='reference file the bam file mapped to') args = parser.parse_args() # pdb.set_trace() try:", "self.SNV_list: self.N2_read_list += SNV.N2_read_list self.N2_read_list = list(set(self.N2_read_list)) def merge_CB4856_read_list(self): for SNV in self.SNV_list:", "read_list[read]-=1 except: read_list[read]=-1 return read_list def merge_N2_read_list(self): for SNV in self.SNV_list: self.N2_read_list +=", "bamfile_N2.pileup(chrom,N2_position,N2_position+1) try: for column in pileups: if column.pos == N2_position: break for read", "alignment = read.alignment base = alignment.seq[read.query_position] if base == N2_nucleotide: SNV_object.add_N2_read(alignment.qname) except: pass", "SNV): self.SNV_list.append(SNV) def merge_read_list(self): read_list = {} for SNV in self.SNV_list: for read", "self.feature = feature self.N2_read_list = [] self.CB4856_read_list = [] def add_N2_read(self, read_name): if", "def count_N2_CB4856_reads(self): read_list = self.merge_read_list() N2 = 0 CB4856 = 0 for k,v", "self.gene_name = gene_name self.public_name = public_name self.SNV_list = [] self.N2_read_list = [] self.CB4856_read_list", "scipy.stats N2_CB4856_chrom = {'CHROMOSOME_I': 'gi|809001836|gb|CM003206.1|', 'CHROMOSOME_II': 'gi|809001828|gb|CM003207.1|', 'CHROMOSOME_III': 'gi|809001817|gb|CM003208.1|', 'CHROMOSOME_IV': 'gi|809001806|gb|CM003209.1|', 'CHROMOSOME_V': 'gi|809001797|gb|CM003210.1|',", "gene_list.append(gene_object) chrom = 'CHROMOSOME_'+chrom N2_position = int(N2_position)-1 CB4856_position = int(CB4856_position)-1 pileups = bamfile_N2.pileup(chrom,N2_position,N2_position+1)", "0: CB4856 += 1 return N2, CB4856 def cal_Binom_p(self): self.merge_N2_read_list() self.merge_CB4856_read_list() N2_count =", "for gene_object in gene_list: if gene_object.gene_name == gene_name: break gene_object.add_SNV(SNV_object) # output_f.write('gene_name,public_name,N2_specific_read_count, CB4856_specific_read_count,binom_p\\n')", "+= 1 return N2, CB4856 def cal_Binom_p(self): self.merge_N2_read_list() self.merge_CB4856_read_list() N2_count = len(self.N2_read_list) CB4856_count", "SNV.N2_read_list self.N2_read_list = list(set(self.N2_read_list)) def merge_CB4856_read_list(self): for SNV in self.SNV_list: self.CB4856_read_list += SNV.CB4856_read_list", "binom_p class SNV(): def __init__(self,chrom, N2_position,CB4856_position,N2_base,CB4856_base,feature): self.chrom = chrom self.N2_position = N2_position self.CB4856_position", "len(self.N2_read_list) CB4856_count = len(self.CB4856_read_list) binom_p = scipy.stats.binom_test(N2_count,N2_count+CB4856_count) self.binom_p = binom_p return binom_p class", "1 return N2, CB4856 def cal_Binom_p(self): self.merge_N2_read_list() self.merge_CB4856_read_list() N2_count = len(self.N2_read_list) CB4856_count =", "= feature self.N2_read_list = [] self.CB4856_read_list = [] def add_N2_read(self, read_name): if read_name", "= bamfile_N2.pileup(chrom,N2_position,N2_position+1) try: for column in pileups: if column.pos == N2_position: break for", "gene_list: if gene_object.gene_name == gene_name: break gene_object.add_SNV(SNV_object) # output_f.write('gene_name,public_name,N2_specific_read_count, CB4856_specific_read_count,binom_p\\n') # for gene", "in self.SNV_list: for read in SNV.CB4856_read_list: try: read_list[read]-=1 except: read_list[read]=-1 return read_list def", "= str,help='bam_file mapped to CB4856',default=HOME_DIR+'65hrs-7-2_S1_L001_R1_001.bam') parser.add_argument('-a','--snp_annotation_file_between_CB4856_WS230', type = str,help='snp_annotation_file_between_CB4856_WS230',default='/data/home/llong35/data/CB4856_genome/SNV_N2_CB4856') parser.add_argument('-o_1','--output_file', help='output file of", "output_f.write('gene_name,public_name,N2_specific_read_count, CB4856_specific_read_count,binom_p\\n') # for gene in gene_list: # file_name = '/Volumes/Lijiang_data/datasets/Tajima\\'s_D_region/Mar_14_data/' + gene.gene_name+'_'+", "[] gene_list = [] for line in f: if line.startswith('#'): continue i+=1 if", "[] def add_N2_read(self, read_name): if read_name not in self.N2_read_list: self.N2_read_list.append(read_name) def add_CB4856_read(self, read_name):", "output_f = open('snp_info_verification_output','w') f = open(args.snp_annotation_file_between_CB4856_WS230,'r') bamfile_N2 = pysam.AlignmentFile(args.bam_file_mapped_to_N2) bamfile_CB4856 = pysam.AlignmentFile(args.bam_file_mapped_to_CB4856) i=0", "try: for column in pileups: if column.pos == N2_position: break for read in", "if column.pos == CB4856_position: break for read in column.pileups: if not read.is_del and", "read.alignment base = alignment.seq[read.query_position] if base == CB4856_nucleotide: SNV_object.add_CB4856_read(alignment.qname) except: pass for gene_object", "= open(file_name,'w') # binom_p = gene.cal_Binom_p() # for SNV in gene.SNV_list: # SNV_info", "except: pass pileups = bamfile_CB4856.pileup(N2_CB4856_chrom[chrom],CB4856_position,CB4856_position+1) try: for column in pileups: if column.pos ==", "parser.add_argument('-b_N2','--bam_file_mapped_to_N2', type = str,help='bam_file mapped to N2',default=HOME_DIR+'65hrs-7-2_S1_L001.bam') parser.add_argument('-b_CB4856','--bam_file_mapped_to_CB4856', type = str,help='bam_file mapped to", "gene_object in gene_list: if gene_object.gene_name == gene_name: break gene_object.add_SNV(SNV_object) # output_f.write('gene_name,public_name,N2_specific_read_count, CB4856_specific_read_count,binom_p\\n') #", "CB4856 += 1 return N2, CB4856 def cal_Binom_p(self): self.merge_N2_read_list() self.merge_CB4856_read_list() N2_count = len(self.N2_read_list)", "= list(set(self.CB4856_read_list)) def count_N2_CB4856_reads(self): read_list = self.merge_read_list() N2 = 0 CB4856 = 0", "open(file_name,'w') # binom_p = gene.cal_Binom_p() # for SNV in gene.SNV_list: # SNV_info =", "not read.is_refskip and read.alignment.mapq != 0: alignment = read.alignment base = alignment.seq[read.query_position] if", "SNV_object.add_CB4856_read(alignment.qname) except: pass for gene_object in gene_list: if gene_object.gene_name == gene_name: break gene_object.add_SNV(SNV_object)", "CB4856',default=HOME_DIR+'65hrs-7-2_S1_L001_R1_001.bam') parser.add_argument('-a','--snp_annotation_file_between_CB4856_WS230', type = str,help='snp_annotation_file_between_CB4856_WS230',default='/data/home/llong35/data/CB4856_genome/SNV_N2_CB4856') parser.add_argument('-o_1','--output_file', help='output file of gene summary',default='/data/home/llong35/data/AE_output/7_2_1') # parser.add_argument('-r','--reference_files',", "# ':N2_base:'+SNV.N2_base+':N2_specific_read_count:'+ # SNV_info += str(len(SNV.CB4856_read_list))+',' # # 'CB4856_base:'+SNV.CB4856_base+':CB4856_specific_read_count:'+ # SNV_info += SNV.feature", "if i%500==0: print(str(i)) chrom, N2_position, N2_nucleotide, CB4856_position, CB4856_nucleotide = line.split()[0:5] feature = line.split()[6]", "# +':feature:'+SNV.feature # SNV_info += str(len(SNV.N2_read_list))+',' # # ':N2_base:'+SNV.N2_base+':N2_specific_read_count:'+ # SNV_info += str(len(SNV.CB4856_read_list))+','", "if not read.is_del and not read.is_refskip and read.alignment.mapq != 0: alignment = read.alignment", "= list(set(self.N2_read_list)) def merge_CB4856_read_list(self): for SNV in self.SNV_list: self.CB4856_read_list += SNV.CB4856_read_list self.CB4856_read_list =", "of gene summary',default='/data/home/llong35/data/AE_output/7_2_1') # parser.add_argument('-r','--reference_files', type = str,help='reference file the bam file mapped", "self.N2_read_list: self.N2_read_list.append(read_name) def add_CB4856_read(self, read_name): if read_name not in self.CB4856_read_list: self.CB4856_read_list.append(read_name) parser =", "CB4856_position = int(CB4856_position)-1 pileups = bamfile_N2.pileup(chrom,N2_position,N2_position+1) try: for column in pileups: if column.pos", "'gi|809001786|gb|CM003211.1|', 'CHROMOSOME_MtDNA': 'gi|809001771|gb|CM003212.1|'} N2_CB4856_chrom = {'CHROMOSOME_I': 'I', 'CHROMOSOME_II': 'II', 'CHROMOSOME_III': 'III', 'CHROMOSOME_IV': 'IV',", "column in pileups: if column.pos == N2_position: break for read in column.pileups: if", "pdb import scipy.stats N2_CB4856_chrom = {'CHROMOSOME_I': 'gi|809001836|gb|CM003206.1|', 'CHROMOSOME_II': 'gi|809001828|gb|CM003207.1|', 'CHROMOSOME_III': 'gi|809001817|gb|CM003208.1|', 'CHROMOSOME_IV': 'gi|809001806|gb|CM003209.1|',", "= {'CHROMOSOME_I': 'gi|809001836|gb|CM003206.1|', 'CHROMOSOME_II': 'gi|809001828|gb|CM003207.1|', 'CHROMOSOME_III': 'gi|809001817|gb|CM003208.1|', 'CHROMOSOME_IV': 'gi|809001806|gb|CM003209.1|', 'CHROMOSOME_V': 'gi|809001797|gb|CM003210.1|', 'CHROMOSOME_X': 'gi|809001786|gb|CM003211.1|',", "= int(CB4856_position)-1 pileups = bamfile_N2.pileup(chrom,N2_position,N2_position+1) try: for column in pileups: if column.pos ==", "'NA': continue if gene_name not in gene_name_list: gene_object = gene(gene_name, public_gene_name) gene_name_list.append(gene_name) gene_list.append(gene_object)", "= line.split()[0:5] feature = line.split()[6] SNV_object = SNV(chrom,N2_position,CB4856_position,N2_nucleotide,CB4856_nucleotide,feature) gene_name, public_gene_name = line.split()[8:10] if", "if gene_object.gene_name == gene_name: break gene_object.add_SNV(SNV_object) # output_f.write('gene_name,public_name,N2_specific_read_count, CB4856_specific_read_count,binom_p\\n') # for gene in", "if line.startswith('#'): continue i+=1 if i%500==0: print(str(i)) chrom, N2_position, N2_nucleotide, CB4856_position, CB4856_nucleotide =", "parser.parse_args() # pdb.set_trace() try: output_f = open(args.output_file,'w') except: output_f = open('snp_info_verification_output','w') f =", "self.CB4856_read_list = [] def add_SNV(self, SNV): self.SNV_list.append(SNV) def merge_read_list(self): read_list = {} for", "read_list[read]=-1 return read_list def merge_N2_read_list(self): for SNV in self.SNV_list: self.N2_read_list += SNV.N2_read_list self.N2_read_list", "for read in SNV.N2_read_list: try: read_list[read]+=1 except: read_list[read]=1 for SNV in self.SNV_list: for", "base == CB4856_nucleotide: SNV_object.add_CB4856_read(alignment.qname) except: pass for gene_object in gene_list: if gene_object.gene_name ==", "output_file = open(file_name,'w') # binom_p = gene.cal_Binom_p() # for SNV in gene.SNV_list: #", "N2_base self.CB4856_base = CB4856_base self.feature = feature self.N2_read_list = [] self.CB4856_read_list = []", "public_name self.SNV_list = [] self.N2_read_list = [] self.CB4856_read_list = [] def add_SNV(self, SNV):", "self.N2_base = N2_base self.CB4856_base = CB4856_base self.feature = feature self.N2_read_list = [] self.CB4856_read_list", "import pysam import pdb import scipy.stats N2_CB4856_chrom = {'CHROMOSOME_I': 'gi|809001836|gb|CM003206.1|', 'CHROMOSOME_II': 'gi|809001828|gb|CM003207.1|', 'CHROMOSOME_III':", "gene_name, public_gene_name = line.split()[8:10] if gene_name == 'NA': continue if gene_name not in", "'' # SNV_info += SNV.chrom+','+SNV.N2_position+','+SNV.N2_base+','+SNV.CB4856_position+','+SNV.CB4856_base+',' # # +':feature:'+SNV.feature # SNV_info += str(len(SNV.N2_read_list))+',' #", "file_name = '/Volumes/Lijiang_data/datasets/Tajima\\'s_D_region/Mar_14_data/' + gene.gene_name+'_'+ gene.public_name+'.csv' # output_file = open(file_name,'w') # binom_p =", "k,v in read_list.items(): if v > 0: N2 += 1 elif v <", "except: read_list[read]=1 for SNV in self.SNV_list: for read in SNV.CB4856_read_list: try: read_list[read]-=1 except:", "self.CB4856_read_list += SNV.CB4856_read_list self.CB4856_read_list = list(set(self.CB4856_read_list)) def count_N2_CB4856_reads(self): read_list = self.merge_read_list() N2 =", "public_gene_name = line.split()[8:10] if gene_name == 'NA': continue if gene_name not in gene_name_list:", "if v > 0: N2 += 1 elif v < 0: CB4856 +=", "# SNV_info += str(len(SNV.N2_read_list))+',' # # ':N2_base:'+SNV.N2_base+':N2_specific_read_count:'+ # SNV_info += str(len(SNV.CB4856_read_list))+',' # #", "'gi|809001806|gb|CM003209.1|', 'CHROMOSOME_V': 'gi|809001797|gb|CM003210.1|', 'CHROMOSOME_X': 'gi|809001786|gb|CM003211.1|', 'CHROMOSOME_MtDNA': 'gi|809001771|gb|CM003212.1|'} N2_CB4856_chrom = {'CHROMOSOME_I': 'I', 'CHROMOSOME_II': 'II',", "gene_name_list: gene_object = gene(gene_name, public_gene_name) gene_name_list.append(gene_name) gene_list.append(gene_object) chrom = 'CHROMOSOME_'+chrom N2_position = int(N2_position)-1", "try: for column in pileups: if column.pos == CB4856_position: break for read in", "SNV in self.SNV_list: for read in SNV.N2_read_list: try: read_list[read]+=1 except: read_list[read]=1 for SNV", "# parser.add_argument('-r','--reference_files', type = str,help='reference file the bam file mapped to') args =", "in self.SNV_list: self.N2_read_list += SNV.N2_read_list self.N2_read_list = list(set(self.N2_read_list)) def merge_CB4856_read_list(self): for SNV in", "gene_list: # file_name = '/Volumes/Lijiang_data/datasets/Tajima\\'s_D_region/Mar_14_data/' + gene.gene_name+'_'+ gene.public_name+'.csv' # output_file = open(file_name,'w') #", "SNV.chrom+','+SNV.N2_position+','+SNV.N2_base+','+SNV.CB4856_position+','+SNV.CB4856_base+',' # # +':feature:'+SNV.feature # SNV_info += str(len(SNV.N2_read_list))+',' # # ':N2_base:'+SNV.N2_base+':N2_specific_read_count:'+ # SNV_info", "'gi|809001797|gb|CM003210.1|', 'CHROMOSOME_X': 'gi|809001786|gb|CM003211.1|', 'CHROMOSOME_MtDNA': 'gi|809001771|gb|CM003212.1|'} N2_CB4856_chrom = {'CHROMOSOME_I': 'I', 'CHROMOSOME_II': 'II', 'CHROMOSOME_III': 'III',", "public_name): self.gene_name = gene_name self.public_name = public_name self.SNV_list = [] self.N2_read_list = []", "open('snp_info_verification_output','w') f = open(args.snp_annotation_file_between_CB4856_WS230,'r') bamfile_N2 = pysam.AlignmentFile(args.bam_file_mapped_to_N2) bamfile_CB4856 = pysam.AlignmentFile(args.bam_file_mapped_to_CB4856) i=0 gene_name_list =", "pileups: if column.pos == CB4856_position: break for read in column.pileups: if not read.is_del", "CB4856 def cal_Binom_p(self): self.merge_N2_read_list() self.merge_CB4856_read_list() N2_count = len(self.N2_read_list) CB4856_count = len(self.CB4856_read_list) binom_p =", "for SNV in gene.SNV_list: # SNV_info = '' # SNV_info += SNV.chrom+','+SNV.N2_position+','+SNV.N2_base+','+SNV.CB4856_position+','+SNV.CB4856_base+',' #", "SNV_object = SNV(chrom,N2_position,CB4856_position,N2_nucleotide,CB4856_nucleotide,feature) gene_name, public_gene_name = line.split()[8:10] if gene_name == 'NA': continue if", "list(set(self.CB4856_read_list)) def count_N2_CB4856_reads(self): read_list = self.merge_read_list() N2 = 0 CB4856 = 0 for", "read_name not in self.CB4856_read_list: self.CB4856_read_list.append(read_name) parser = argparse.ArgumentParser() HOME_DIR='/data/home/llong35/data/PM01/65hrs_7_2-40591052/' parser.add_argument('-b_N2','--bam_file_mapped_to_N2', type = str,help='bam_file", "chrom, N2_position, N2_nucleotide, CB4856_position, CB4856_nucleotide = line.split()[0:5] feature = line.split()[6] SNV_object = SNV(chrom,N2_position,CB4856_position,N2_nucleotide,CB4856_nucleotide,feature)", "class gene(): def __init__(self, gene_name, public_name): self.gene_name = gene_name self.public_name = public_name self.SNV_list", "in pileups: if column.pos == CB4856_position: break for read in column.pileups: if not", "= parser.parse_args() # pdb.set_trace() try: output_f = open(args.output_file,'w') except: output_f = open('snp_info_verification_output','w') f", "feature = line.split()[6] SNV_object = SNV(chrom,N2_position,CB4856_position,N2_nucleotide,CB4856_nucleotide,feature) gene_name, public_gene_name = line.split()[8:10] if gene_name ==", "SNV in self.SNV_list: self.N2_read_list += SNV.N2_read_list self.N2_read_list = list(set(self.N2_read_list)) def merge_CB4856_read_list(self): for SNV", "CB4856_count = len(self.CB4856_read_list) binom_p = scipy.stats.binom_test(N2_count,N2_count+CB4856_count) self.binom_p = binom_p return binom_p class SNV():", "self.CB4856_read_list.append(read_name) parser = argparse.ArgumentParser() HOME_DIR='/data/home/llong35/data/PM01/65hrs_7_2-40591052/' parser.add_argument('-b_N2','--bam_file_mapped_to_N2', type = str,help='bam_file mapped to N2',default=HOME_DIR+'65hrs-7-2_S1_L001.bam') parser.add_argument('-b_CB4856','--bam_file_mapped_to_CB4856',", "self.SNV_list: self.CB4856_read_list += SNV.CB4856_read_list self.CB4856_read_list = list(set(self.CB4856_read_list)) def count_N2_CB4856_reads(self): read_list = self.merge_read_list() N2", "= open('snp_info_verification_output','w') f = open(args.snp_annotation_file_between_CB4856_WS230,'r') bamfile_N2 = pysam.AlignmentFile(args.bam_file_mapped_to_N2) bamfile_CB4856 = pysam.AlignmentFile(args.bam_file_mapped_to_CB4856) i=0 gene_name_list", "= N2_position self.CB4856_position = CB4856_position self.N2_base = N2_base self.CB4856_base = CB4856_base self.feature =", "output_file.close() # output_f.write(gene.gene_name+','+gene.public_name+','+str(len(gene.N2_read_list))+','+str(len(gene.CB4856_read_list))+','+str(gene.binom_p)+'\\n') output_f.write('gene_name,public_name,N2_specific_read_count, CB4856_specific_read_count\\n') for gene in gene_list: N2_count,CB4856_count = gene.count_N2_CB4856_reads() output_f.write(gene.gene_name+','+gene.public_name+','+str(N2_count)+','+str(CB4856_count)+'\\n')", "for k,v in read_list.items(): if v > 0: N2 += 1 elif v", "len(self.CB4856_read_list) binom_p = scipy.stats.binom_test(N2_count,N2_count+CB4856_count) self.binom_p = binom_p return binom_p class SNV(): def __init__(self,chrom,", "scipy.stats.binom_test(N2_count,N2_count+CB4856_count) self.binom_p = binom_p return binom_p class SNV(): def __init__(self,chrom, N2_position,CB4856_position,N2_base,CB4856_base,feature): self.chrom =", "N2',default=HOME_DIR+'65hrs-7-2_S1_L001.bam') parser.add_argument('-b_CB4856','--bam_file_mapped_to_CB4856', type = str,help='bam_file mapped to CB4856',default=HOME_DIR+'65hrs-7-2_S1_L001_R1_001.bam') parser.add_argument('-a','--snp_annotation_file_between_CB4856_WS230', type = str,help='snp_annotation_file_between_CB4856_WS230',default='/data/home/llong35/data/CB4856_genome/SNV_N2_CB4856') parser.add_argument('-o_1','--output_file',", "if column.pos == N2_position: break for read in column.pileups: if not read.is_del and", "[] self.N2_read_list = [] self.CB4856_read_list = [] def add_SNV(self, SNV): self.SNV_list.append(SNV) def merge_read_list(self):", "add_SNV(self, SNV): self.SNV_list.append(SNV) def merge_read_list(self): read_list = {} for SNV in self.SNV_list: for", "'V', 'CHROMOSOME_X': 'X', 'CHROMOSOME_MtDNA': 'MtDNA'} nucleotides=['A','T','C','G'] class gene(): def __init__(self, gene_name, public_name): self.gene_name", "for gene in gene_list: # file_name = '/Volumes/Lijiang_data/datasets/Tajima\\'s_D_region/Mar_14_data/' + gene.gene_name+'_'+ gene.public_name+'.csv' # output_file", "alignment = read.alignment base = alignment.seq[read.query_position] if base == CB4856_nucleotide: SNV_object.add_CB4856_read(alignment.qname) except: pass", "= [] self.CB4856_read_list = [] def add_N2_read(self, read_name): if read_name not in self.N2_read_list:", "# pdb.set_trace() try: output_f = open(args.output_file,'w') except: output_f = open('snp_info_verification_output','w') f = open(args.snp_annotation_file_between_CB4856_WS230,'r')", "self.SNV_list: for read in SNV.CB4856_read_list: try: read_list[read]-=1 except: read_list[read]=-1 return read_list def merge_N2_read_list(self):", "0 for k,v in read_list.items(): if v > 0: N2 += 1 elif", "for SNV in self.SNV_list: self.N2_read_list += SNV.N2_read_list self.N2_read_list = list(set(self.N2_read_list)) def merge_CB4856_read_list(self): for", "= [] def add_SNV(self, SNV): self.SNV_list.append(SNV) def merge_read_list(self): read_list = {} for SNV", "= len(self.N2_read_list) CB4856_count = len(self.CB4856_read_list) binom_p = scipy.stats.binom_test(N2_count,N2_count+CB4856_count) self.binom_p = binom_p return binom_p", "# file_name = '/Volumes/Lijiang_data/datasets/Tajima\\'s_D_region/Mar_14_data/' + gene.gene_name+'_'+ gene.public_name+'.csv' # output_file = open(file_name,'w') # binom_p", "mapped to N2',default=HOME_DIR+'65hrs-7-2_S1_L001.bam') parser.add_argument('-b_CB4856','--bam_file_mapped_to_CB4856', type = str,help='bam_file mapped to CB4856',default=HOME_DIR+'65hrs-7-2_S1_L001_R1_001.bam') parser.add_argument('-a','--snp_annotation_file_between_CB4856_WS230', type =", "'CHROMOSOME_III': 'III', 'CHROMOSOME_IV': 'IV', 'CHROMOSOME_V': 'V', 'CHROMOSOME_X': 'X', 'CHROMOSOME_MtDNA': 'MtDNA'} nucleotides=['A','T','C','G'] class gene():", "= [] gene_list = [] for line in f: if line.startswith('#'): continue i+=1", "read.is_refskip and read.alignment.mapq != 0: alignment = read.alignment base = alignment.seq[read.query_position] if base", "read_list = {} for SNV in self.SNV_list: for read in SNV.N2_read_list: try: read_list[read]+=1", "== CB4856_nucleotide: SNV_object.add_CB4856_read(alignment.qname) except: pass for gene_object in gene_list: if gene_object.gene_name == gene_name:", "gene summary',default='/data/home/llong35/data/AE_output/7_2_1') # parser.add_argument('-r','--reference_files', type = str,help='reference file the bam file mapped to')" ]
[ "numpy as np x1 = random.uniform(-1, 1) y1 = random.uniform(-1, 1) print(str(x1) +", "as np x1 = random.uniform(-1, 1) y1 = random.uniform(-1, 1) print(str(x1) + \"\\n\"", "import numpy as np x1 = random.uniform(-1, 1) y1 = random.uniform(-1, 1) print(str(x1)", "Created on Tue Feb 2 12:21:09 2021 @author: Mahmu \"\"\" import random import", "on Tue Feb 2 12:21:09 2021 @author: Mahmu \"\"\" import random import pylab", "\"\"\" import random import pylab import numpy as np x1 = random.uniform(-1, 1)", "random import pylab import numpy as np x1 = random.uniform(-1, 1) y1 =", "-*- \"\"\" Created on Tue Feb 2 12:21:09 2021 @author: Mahmu \"\"\" import", "import pylab import numpy as np x1 = random.uniform(-1, 1) y1 = random.uniform(-1,", "import random import pylab import numpy as np x1 = random.uniform(-1, 1) y1", "2021 @author: Mahmu \"\"\" import random import pylab import numpy as np x1", "coding: utf-8 -*- \"\"\" Created on Tue Feb 2 12:21:09 2021 @author: Mahmu", "pylab import numpy as np x1 = random.uniform(-1, 1) y1 = random.uniform(-1, 1)", "x1 = random.uniform(-1, 1) y1 = random.uniform(-1, 1) print(str(x1) + \"\\n\" + str(y1))", "utf-8 -*- \"\"\" Created on Tue Feb 2 12:21:09 2021 @author: Mahmu \"\"\"", "\"\"\" Created on Tue Feb 2 12:21:09 2021 @author: Mahmu \"\"\" import random", "@author: Mahmu \"\"\" import random import pylab import numpy as np x1 =", "-*- coding: utf-8 -*- \"\"\" Created on Tue Feb 2 12:21:09 2021 @author:", "2 12:21:09 2021 @author: Mahmu \"\"\" import random import pylab import numpy as", "Tue Feb 2 12:21:09 2021 @author: Mahmu \"\"\" import random import pylab import", "np x1 = random.uniform(-1, 1) y1 = random.uniform(-1, 1) print(str(x1) + \"\\n\" +", "# -*- coding: utf-8 -*- \"\"\" Created on Tue Feb 2 12:21:09 2021", "Feb 2 12:21:09 2021 @author: Mahmu \"\"\" import random import pylab import numpy", "12:21:09 2021 @author: Mahmu \"\"\" import random import pylab import numpy as np", "Mahmu \"\"\" import random import pylab import numpy as np x1 = random.uniform(-1," ]
[ "for i in range(min(len(self.items), 1000)): self.listbox.insert(tk.END, self.items[i]) if __name__ == '__main__': app =", "do_quicksort(values, 0, len(values) - 1) def do_quicksort(values, start, end): \"\"\" Sort the indicated", "do_quicksort(values, start, lo - 1) do_quicksort(values, lo + 1, end) class App: def", "end while True: # Look down from hi for a value < divider.", "frame.pack(padx=5, pady=5, fill=tk.X) label = tk.Label(frame, text=\"# Items:\") label.grid(padx=5, pady=2, row=0, column=0, sticky=tk.W)", "the whole array. do_quicksort(values, 0, len(values) - 1) def do_quicksort(values, start, end): \"\"\"", "time def quicksort(values): \"\"\" Use quicksort to sort the array.\"\"\" # Sort the", "width=8, text=\"Sort\", command=self.sort) sort_button.grid(padx=5, pady=2, row=0, column=3) frame = tk.Frame(self.window) frame.pack(padx=5, pady=5, fill=tk.BOTH,", "= tk.Entry(frame, width=12) self.num_items_entry.grid(padx=5, pady=2, row=0, column=1, sticky=tk.W) self.num_items_entry.insert(0, \"1000\") generate_button = tk.Button(frame,", "self.items[i - 1], f\"Item {i} ({self.items[i]}) is smaller than item {i-1} ({self.items[i-1]})\" def", "- 1) do_quicksort(values, lo + 1, end) class App: def kill_callback(self): self.window.destroy() def", "end): \"\"\" Sort the indicated part of the array.\"\"\" # If the list", "lo + 1, end) class App: def kill_callback(self): self.window.destroy() def __init__(self): self.window =", "fill=tk.BOTH, expand=True) self.listbox.config(yscrollcommand=scrollbar.set) scrollbar.config(command=self.listbox.yview) # Bind some keys. self.window.bind('<Return>', (lambda e, button=generate_button: generate_button.invoke()))", "the lower half. values[lo] = values[hi] # Look up from lo for a", "from lo for a value >= divider. lo += 1 while values[lo] <", "# Look up from lo for a value >= divider. lo += 1", "range(num_items): self.items.append(random.randint(100000, 999999)) self.show_values() def sort(self): \"\"\" Sort the items.\"\"\" start_time = time.time()", "the array and # items >= divider to the end of the array.", "__init__(self): self.window = tk.Tk() self.window.title(\"quicksort_in_place\") self.window.protocol(\"WM_sort_WINDOW\", self.kill_callback) self.window.geometry(\"300x300\") frame = tk.Frame(self.window) frame.pack(padx=5, pady=5,", "value we found to the lower half. values[lo] = values[hi] # Look up", "quicksort(self.items) elapsed_time = time.time() - start_time print(f\"{elapsed_time} seconds\") self.show_values() # Verify the sort.", "array.\"\"\" # If the list has no more than 1 element, it's sorted.", "out of the outer While loop. lo = hi values[hi] = divider break", "break out of the outer While loop. lo = hi values[hi] = divider", "end of the array. lo = start hi = end while True: #", "break # Move the value we found to the lower half. values[lo] =", "loop. lo = hi values[hi] = divider break # Move the value we", "= [] for i in range(num_items): self.items.append(random.randint(100000, 999999)) self.show_values() def sort(self): \"\"\" Sort", ">= hi: break if lo >= hi: # Put the divider here and", "lo = hi values[hi] = divider break # Move the value we found", "+= 1 while values[lo] < divider: lo += 1 if lo >= hi:", "pady=2, side=tk.TOP, fill=tk.BOTH, expand=True) self.listbox.config(yscrollcommand=scrollbar.set) scrollbar.config(command=self.listbox.yview) # Bind some keys. self.window.bind('<Return>', (lambda e,", "in range(min(len(self.items), 1000)): self.listbox.insert(tk.END, self.items[i]) if __name__ == '__main__': app = App() #", "smaller than item {i-1} ({self.items[i-1]})\" def show_values(self): \"\"\" Show up to 1000 values.\"\"\"", "Move items < divider to the front of the array and # items", "up from lo for a value >= divider. lo += 1 while values[lo]", "generate(self): \"\"\" Make random items.\"\"\" num_items = int(self.num_items_entry.get()) self.items = [] for i", "Sort the whole array. do_quicksort(values, 0, len(values) - 1) def do_quicksort(values, start, end):", "scrollbar = tk.Scrollbar(frame) scrollbar.pack(side=tk.RIGHT, fill=tk.Y) self.listbox = tk.Listbox(frame) self.listbox.pack(padx=5, pady=2, side=tk.TOP, fill=tk.BOTH, expand=True)", "= hi values[hi] = divider break # Move the value we found to", "tkinter as tk import time def quicksort(values): \"\"\" Use quicksort to sort the", "the divider here and break out of the outer While loop. lo =", "Use quicksort to sort the array.\"\"\" # Sort the whole array. do_quicksort(values, 0,", "column=0, sticky=tk.W) self.num_items_entry = tk.Entry(frame, width=12) self.num_items_entry.grid(padx=5, pady=2, row=0, column=1, sticky=tk.W) self.num_items_entry.insert(0, \"1000\")", "pady=2, row=0, column=0, sticky=tk.W) self.num_items_entry = tk.Entry(frame, width=12) self.num_items_entry.grid(padx=5, pady=2, row=0, column=1, sticky=tk.W)", "sort. for i in range(1, len(self.items)): assert self.items[i] >= self.items[i - 1], f\"Item", "tk.Label(frame, text=\"# Items:\") label.grid(padx=5, pady=2, row=0, column=0, sticky=tk.W) self.num_items_entry = tk.Entry(frame, width=12) self.num_items_entry.grid(padx=5,", "# Look down from hi for a value < divider. while values[hi] >=", "break if lo >= hi: # Put the divider here and break out", "self.show_values() def sort(self): \"\"\" Sort the items.\"\"\" start_time = time.time() quicksort(self.items) elapsed_time =", "value < divider. while values[hi] >= divider: hi -= 1 if hi <=", ">= divider: hi -= 1 if hi <= lo: break if hi <=", "-= 1 if hi <= lo: break if hi <= lo: # Put", "for i in range(1, len(self.items)): assert self.items[i] >= self.items[i - 1], f\"Item {i}", "width=8, text=\"Generate\", command=self.generate) generate_button.grid(padx=5, pady=2, row=0, column=2) sort_button = tk.Button(frame, width=8, text=\"Sort\", command=self.sort)", "and not the Python shell. self.num_items_entry.focus_force() self.window.mainloop() def generate(self): \"\"\" Make random items.\"\"\"", "no more than 1 element, it's sorted. if start >= end: return #", "self.num_items_entry = tk.Entry(frame, width=12) self.num_items_entry.grid(padx=5, pady=2, row=0, column=1, sticky=tk.W) self.num_items_entry.insert(0, \"1000\") generate_button =", "if hi <= lo: break if hi <= lo: # Put the divider", "items < divider to the front of the array and # items >=", "two halves. do_quicksort(values, start, lo - 1) do_quicksort(values, lo + 1, end) class", "divider: hi -= 1 if hi <= lo: break if hi <= lo:", "\"1000\") generate_button = tk.Button(frame, width=8, text=\"Generate\", command=self.generate) generate_button.grid(padx=5, pady=2, row=0, column=2) sort_button =", "in range(1, len(self.items)): assert self.items[i] >= self.items[i - 1], f\"Item {i} ({self.items[i]}) is", "hi <= lo: break if hi <= lo: # Put the divider here", "loop. values[lo] = divider break # Move the value we found to the", "out of the outer While loop. values[lo] = divider break # Move the", "1) do_quicksort(values, lo + 1, end) class App: def kill_callback(self): self.window.destroy() def __init__(self):", "e, button=generate_button: generate_button.invoke())) # Force focus so Alt+F4 closes this window and not", "time.time() quicksort(self.items) elapsed_time = time.time() - start_time print(f\"{elapsed_time} seconds\") self.show_values() # Verify the", "scrollbar.pack(side=tk.RIGHT, fill=tk.Y) self.listbox = tk.Listbox(frame) self.listbox.pack(padx=5, pady=2, side=tk.TOP, fill=tk.BOTH, expand=True) self.listbox.config(yscrollcommand=scrollbar.set) scrollbar.config(command=self.listbox.yview) #", "def quicksort(values): \"\"\" Use quicksort to sort the array.\"\"\" # Sort the whole", "self.items[i] >= self.items[i - 1], f\"Item {i} ({self.items[i]}) is smaller than item {i-1}", "\"\"\" Show up to 1000 values.\"\"\" self.listbox.delete(0, tk.END) for i in range(min(len(self.items), 1000)):", "tk import time def quicksort(values): \"\"\" Use quicksort to sort the array.\"\"\" #", "i in range(min(len(self.items), 1000)): self.listbox.insert(tk.END, self.items[i]) if __name__ == '__main__': app = App()", "tk.Listbox(frame) self.listbox.pack(padx=5, pady=2, side=tk.TOP, fill=tk.BOTH, expand=True) self.listbox.config(yscrollcommand=scrollbar.set) scrollbar.config(command=self.listbox.yview) # Bind some keys. self.window.bind('<Return>',", "Use the first item as the dividing item. divider = values[start] # Move", "1 if hi <= lo: break if hi <= lo: # Put the", "# Sort the whole array. do_quicksort(values, 0, len(values) - 1) def do_quicksort(values, start,", "self.items = [] for i in range(num_items): self.items.append(random.randint(100000, 999999)) self.show_values() def sort(self): \"\"\"", "# items >= divider to the end of the array. lo = start", "= tk.Tk() self.window.title(\"quicksort_in_place\") self.window.protocol(\"WM_sort_WINDOW\", self.kill_callback) self.window.geometry(\"300x300\") frame = tk.Frame(self.window) frame.pack(padx=5, pady=5, fill=tk.X) label", ">= divider. lo += 1 while values[lo] < divider: lo += 1 if", "here and break out of the outer While loop. lo = hi values[hi]", "values[start] # Move items < divider to the front of the array and", "= values[hi] # Look up from lo for a value >= divider. lo", "half. values[hi] = values[lo] # Recursively sort the two halves. do_quicksort(values, start, lo", "the end of the array. lo = start hi = end while True:", "of the array and # items >= divider to the end of the", "While loop. lo = hi values[hi] = divider break # Move the value", "the value we found to the lower half. values[lo] = values[hi] # Look", "self.window.mainloop() def generate(self): \"\"\" Make random items.\"\"\" num_items = int(self.num_items_entry.get()) self.items = []", "values[lo] < divider: lo += 1 if lo >= hi: break if lo", "= tk.Button(frame, width=8, text=\"Generate\", command=self.generate) generate_button.grid(padx=5, pady=2, row=0, column=2) sort_button = tk.Button(frame, width=8,", "self.listbox.pack(padx=5, pady=2, side=tk.TOP, fill=tk.BOTH, expand=True) self.listbox.config(yscrollcommand=scrollbar.set) scrollbar.config(command=self.listbox.yview) # Bind some keys. self.window.bind('<Return>', (lambda", "shell. self.num_items_entry.focus_force() self.window.mainloop() def generate(self): \"\"\" Make random items.\"\"\" num_items = int(self.num_items_entry.get()) self.items", "range(1, len(self.items)): assert self.items[i] >= self.items[i - 1], f\"Item {i} ({self.items[i]}) is smaller", "Show up to 1000 values.\"\"\" self.listbox.delete(0, tk.END) for i in range(min(len(self.items), 1000)): self.listbox.insert(tk.END,", "frame = tk.Frame(self.window) frame.pack(padx=5, pady=5, fill=tk.BOTH, expand=True) scrollbar = tk.Scrollbar(frame) scrollbar.pack(side=tk.RIGHT, fill=tk.Y) self.listbox", "closes this window and not the Python shell. self.num_items_entry.focus_force() self.window.mainloop() def generate(self): \"\"\"", "the front of the array and # items >= divider to the end", "Look down from hi for a value < divider. while values[hi] >= divider:", "to the front of the array and # items >= divider to the", "# Recursively sort the two halves. do_quicksort(values, start, lo - 1) do_quicksort(values, lo", "divider. lo += 1 while values[lo] < divider: lo += 1 if lo", "we found to the upper half. values[hi] = values[lo] # Recursively sort the", "self.window.destroy() def __init__(self): self.window = tk.Tk() self.window.title(\"quicksort_in_place\") self.window.protocol(\"WM_sort_WINDOW\", self.kill_callback) self.window.geometry(\"300x300\") frame = tk.Frame(self.window)", "- start_time print(f\"{elapsed_time} seconds\") self.show_values() # Verify the sort. for i in range(1,", ">= self.items[i - 1], f\"Item {i} ({self.items[i]}) is smaller than item {i-1} ({self.items[i-1]})\"", "pady=2, row=0, column=1, sticky=tk.W) self.num_items_entry.insert(0, \"1000\") generate_button = tk.Button(frame, width=8, text=\"Generate\", command=self.generate) generate_button.grid(padx=5,", "= values[lo] # Recursively sort the two halves. do_quicksort(values, start, lo - 1)", "the list has no more than 1 element, it's sorted. if start >=", "While loop. values[lo] = divider break # Move the value we found to", "than item {i-1} ({self.items[i-1]})\" def show_values(self): \"\"\" Show up to 1000 values.\"\"\" self.listbox.delete(0,", "this window and not the Python shell. self.num_items_entry.focus_force() self.window.mainloop() def generate(self): \"\"\" Make", ">= end: return # Use the first item as the dividing item. divider", "array. do_quicksort(values, 0, len(values) - 1) def do_quicksort(values, start, end): \"\"\" Sort the", "[] for i in range(num_items): self.items.append(random.randint(100000, 999999)) self.show_values() def sort(self): \"\"\" Sort the", "0, len(values) - 1) def do_quicksort(values, start, end): \"\"\" Sort the indicated part", "Put the divider here and break out of the outer While loop. lo", "= tk.Label(frame, text=\"# Items:\") label.grid(padx=5, pady=2, row=0, column=0, sticky=tk.W) self.num_items_entry = tk.Entry(frame, width=12)", "# Move the value we found to the upper half. values[hi] = values[lo]", "is smaller than item {i-1} ({self.items[i-1]})\" def show_values(self): \"\"\" Show up to 1000", "= divider break # Move the value we found to the upper half.", "fill=tk.Y) self.listbox = tk.Listbox(frame) self.listbox.pack(padx=5, pady=2, side=tk.TOP, fill=tk.BOTH, expand=True) self.listbox.config(yscrollcommand=scrollbar.set) scrollbar.config(command=self.listbox.yview) # Bind", "class App: def kill_callback(self): self.window.destroy() def __init__(self): self.window = tk.Tk() self.window.title(\"quicksort_in_place\") self.window.protocol(\"WM_sort_WINDOW\", self.kill_callback)", "divider: lo += 1 if lo >= hi: break if lo >= hi:", "fill=tk.BOTH, expand=True) scrollbar = tk.Scrollbar(frame) scrollbar.pack(side=tk.RIGHT, fill=tk.Y) self.listbox = tk.Listbox(frame) self.listbox.pack(padx=5, pady=2, side=tk.TOP,", "lo: break if hi <= lo: # Put the divider here and break", "= tk.Listbox(frame) self.listbox.pack(padx=5, pady=2, side=tk.TOP, fill=tk.BOTH, expand=True) self.listbox.config(yscrollcommand=scrollbar.set) scrollbar.config(command=self.listbox.yview) # Bind some keys.", "focus so Alt+F4 closes this window and not the Python shell. self.num_items_entry.focus_force() self.window.mainloop()", "pady=2, row=0, column=3) frame = tk.Frame(self.window) frame.pack(padx=5, pady=5, fill=tk.BOTH, expand=True) scrollbar = tk.Scrollbar(frame)", "Move the value we found to the upper half. values[hi] = values[lo] #", "1 if lo >= hi: break if lo >= hi: # Put the", "found to the upper half. values[hi] = values[lo] # Recursively sort the two", "values[lo] # Recursively sort the two halves. do_quicksort(values, start, lo - 1) do_quicksort(values,", "break if hi <= lo: # Put the divider here and break out", "kill_callback(self): self.window.destroy() def __init__(self): self.window = tk.Tk() self.window.title(\"quicksort_in_place\") self.window.protocol(\"WM_sort_WINDOW\", self.kill_callback) self.window.geometry(\"300x300\") frame =", "start >= end: return # Use the first item as the dividing item.", "first item as the dividing item. divider = values[start] # Move items <", "seconds\") self.show_values() # Verify the sort. for i in range(1, len(self.items)): assert self.items[i]", "width=12) self.num_items_entry.grid(padx=5, pady=2, row=0, column=1, sticky=tk.W) self.num_items_entry.insert(0, \"1000\") generate_button = tk.Button(frame, width=8, text=\"Generate\",", "self.window.geometry(\"300x300\") frame = tk.Frame(self.window) frame.pack(padx=5, pady=5, fill=tk.X) label = tk.Label(frame, text=\"# Items:\") label.grid(padx=5,", "element, it's sorted. if start >= end: return # Use the first item", "start, lo - 1) do_quicksort(values, lo + 1, end) class App: def kill_callback(self):", "Put the divider here and break out of the outer While loop. values[lo]", "assert self.items[i] >= self.items[i - 1], f\"Item {i} ({self.items[i]}) is smaller than item", "more than 1 element, it's sorted. if start >= end: return # Use", "column=3) frame = tk.Frame(self.window) frame.pack(padx=5, pady=5, fill=tk.BOTH, expand=True) scrollbar = tk.Scrollbar(frame) scrollbar.pack(side=tk.RIGHT, fill=tk.Y)", "sticky=tk.W) self.num_items_entry.insert(0, \"1000\") generate_button = tk.Button(frame, width=8, text=\"Generate\", command=self.generate) generate_button.grid(padx=5, pady=2, row=0, column=2)", "= int(self.num_items_entry.get()) self.items = [] for i in range(num_items): self.items.append(random.randint(100000, 999999)) self.show_values() def", "= values[start] # Move items < divider to the front of the array", "< divider: lo += 1 if lo >= hi: break if lo >=", "+= 1 if lo >= hi: break if lo >= hi: # Put", "tk.END) for i in range(min(len(self.items), 1000)): self.listbox.insert(tk.END, self.items[i]) if __name__ == '__main__': app", "quicksort to sort the array.\"\"\" # Sort the whole array. do_quicksort(values, 0, len(values)", "1000 values.\"\"\" self.listbox.delete(0, tk.END) for i in range(min(len(self.items), 1000)): self.listbox.insert(tk.END, self.items[i]) if __name__", "len(values) - 1) def do_quicksort(values, start, end): \"\"\" Sort the indicated part of", "\"\"\" Make random items.\"\"\" num_items = int(self.num_items_entry.get()) self.items = [] for i in", "of the outer While loop. lo = hi values[hi] = divider break #", "the items.\"\"\" start_time = time.time() quicksort(self.items) elapsed_time = time.time() - start_time print(f\"{elapsed_time} seconds\")", "as tk import time def quicksort(values): \"\"\" Use quicksort to sort the array.\"\"\"", "as the dividing item. divider = values[start] # Move items < divider to", "found to the lower half. values[lo] = values[hi] # Look up from lo", ">= divider to the end of the array. lo = start hi =", "up to 1000 values.\"\"\" self.listbox.delete(0, tk.END) for i in range(min(len(self.items), 1000)): self.listbox.insert(tk.END, self.items[i])", "a value < divider. while values[hi] >= divider: hi -= 1 if hi", "for a value >= divider. lo += 1 while values[lo] < divider: lo", "indicated part of the array.\"\"\" # If the list has no more than", "1) def do_quicksort(values, start, end): \"\"\" Sort the indicated part of the array.\"\"\"", "do_quicksort(values, lo + 1, end) class App: def kill_callback(self): self.window.destroy() def __init__(self): self.window", "Items:\") label.grid(padx=5, pady=2, row=0, column=0, sticky=tk.W) self.num_items_entry = tk.Entry(frame, width=12) self.num_items_entry.grid(padx=5, pady=2, row=0,", "Recursively sort the two halves. do_quicksort(values, start, lo - 1) do_quicksort(values, lo +", "self.window.protocol(\"WM_sort_WINDOW\", self.kill_callback) self.window.geometry(\"300x300\") frame = tk.Frame(self.window) frame.pack(padx=5, pady=5, fill=tk.X) label = tk.Label(frame, text=\"#", "Alt+F4 closes this window and not the Python shell. self.num_items_entry.focus_force() self.window.mainloop() def generate(self):", "i in range(num_items): self.items.append(random.randint(100000, 999999)) self.show_values() def sort(self): \"\"\" Sort the items.\"\"\" start_time", "def __init__(self): self.window = tk.Tk() self.window.title(\"quicksort_in_place\") self.window.protocol(\"WM_sort_WINDOW\", self.kill_callback) self.window.geometry(\"300x300\") frame = tk.Frame(self.window) frame.pack(padx=5,", "({self.items[i-1]})\" def show_values(self): \"\"\" Show up to 1000 values.\"\"\" self.listbox.delete(0, tk.END) for i", "the indicated part of the array.\"\"\" # If the list has no more", "sort(self): \"\"\" Sort the items.\"\"\" start_time = time.time() quicksort(self.items) elapsed_time = time.time() -", "upper half. values[hi] = values[lo] # Recursively sort the two halves. do_quicksort(values, start,", "divider to the front of the array and # items >= divider to", "def generate(self): \"\"\" Make random items.\"\"\" num_items = int(self.num_items_entry.get()) self.items = [] for", "# Put the divider here and break out of the outer While loop.", "hi <= lo: # Put the divider here and break out of the", "If the list has no more than 1 element, it's sorted. if start", "to the lower half. values[lo] = values[hi] # Look up from lo for", "len(self.items)): assert self.items[i] >= self.items[i - 1], f\"Item {i} ({self.items[i]}) is smaller than", "for a value < divider. while values[hi] >= divider: hi -= 1 if", "1 while values[lo] < divider: lo += 1 if lo >= hi: break", "pady=2, row=0, column=2) sort_button = tk.Button(frame, width=8, text=\"Sort\", command=self.sort) sort_button.grid(padx=5, pady=2, row=0, column=3)", "= tk.Frame(self.window) frame.pack(padx=5, pady=5, fill=tk.X) label = tk.Label(frame, text=\"# Items:\") label.grid(padx=5, pady=2, row=0,", "hi: # Put the divider here and break out of the outer While", "break out of the outer While loop. values[lo] = divider break # Move", "def kill_callback(self): self.window.destroy() def __init__(self): self.window = tk.Tk() self.window.title(\"quicksort_in_place\") self.window.protocol(\"WM_sort_WINDOW\", self.kill_callback) self.window.geometry(\"300x300\") frame", "Look up from lo for a value >= divider. lo += 1 while", "array.\"\"\" # Sort the whole array. do_quicksort(values, 0, len(values) - 1) def do_quicksort(values,", "and break out of the outer While loop. values[lo] = divider break #", "lo += 1 if lo >= hi: break if lo >= hi: #", "Force focus so Alt+F4 closes this window and not the Python shell. self.num_items_entry.focus_force()", "text=\"Sort\", command=self.sort) sort_button.grid(padx=5, pady=2, row=0, column=3) frame = tk.Frame(self.window) frame.pack(padx=5, pady=5, fill=tk.BOTH, expand=True)", "= tk.Scrollbar(frame) scrollbar.pack(side=tk.RIGHT, fill=tk.Y) self.listbox = tk.Listbox(frame) self.listbox.pack(padx=5, pady=2, side=tk.TOP, fill=tk.BOTH, expand=True) self.listbox.config(yscrollcommand=scrollbar.set)", "text=\"Generate\", command=self.generate) generate_button.grid(padx=5, pady=2, row=0, column=2) sort_button = tk.Button(frame, width=8, text=\"Sort\", command=self.sort) sort_button.grid(padx=5,", "sort the array.\"\"\" # Sort the whole array. do_quicksort(values, 0, len(values) - 1)", "frame.pack(padx=5, pady=5, fill=tk.BOTH, expand=True) scrollbar = tk.Scrollbar(frame) scrollbar.pack(side=tk.RIGHT, fill=tk.Y) self.listbox = tk.Listbox(frame) self.listbox.pack(padx=5,", "to the upper half. values[hi] = values[lo] # Recursively sort the two halves.", "array. lo = start hi = end while True: # Look down from", "to the end of the array. lo = start hi = end while", "of the array.\"\"\" # If the list has no more than 1 element,", "and break out of the outer While loop. lo = hi values[hi] =", "import tkinter as tk import time def quicksort(values): \"\"\" Use quicksort to sort", "generate_button.grid(padx=5, pady=2, row=0, column=2) sort_button = tk.Button(frame, width=8, text=\"Sort\", command=self.sort) sort_button.grid(padx=5, pady=2, row=0,", "< divider to the front of the array and # items >= divider", "1 element, it's sorted. if start >= end: return # Use the first", "lo += 1 while values[lo] < divider: lo += 1 if lo >=", "the divider here and break out of the outer While loop. values[lo] =", "= start hi = end while True: # Look down from hi for", "the upper half. values[hi] = values[lo] # Recursively sort the two halves. do_quicksort(values,", "values[lo] = values[hi] # Look up from lo for a value >= divider.", "# Move items < divider to the front of the array and #", "and # items >= divider to the end of the array. lo =", "True: # Look down from hi for a value < divider. while values[hi]", "{i} ({self.items[i]}) is smaller than item {i-1} ({self.items[i-1]})\" def show_values(self): \"\"\" Show up", "self.window.title(\"quicksort_in_place\") self.window.protocol(\"WM_sort_WINDOW\", self.kill_callback) self.window.geometry(\"300x300\") frame = tk.Frame(self.window) frame.pack(padx=5, pady=5, fill=tk.X) label = tk.Label(frame,", "lo: # Put the divider here and break out of the outer While", "down from hi for a value < divider. while values[hi] >= divider: hi", "the array.\"\"\" # Sort the whole array. do_quicksort(values, 0, len(values) - 1) def", "has no more than 1 element, it's sorted. if start >= end: return", "divider to the end of the array. lo = start hi = end", "here and break out of the outer While loop. values[lo] = divider break", "lo - 1) do_quicksort(values, lo + 1, end) class App: def kill_callback(self): self.window.destroy()", "end) class App: def kill_callback(self): self.window.destroy() def __init__(self): self.window = tk.Tk() self.window.title(\"quicksort_in_place\") self.window.protocol(\"WM_sort_WINDOW\",", "row=0, column=2) sort_button = tk.Button(frame, width=8, text=\"Sort\", command=self.sort) sort_button.grid(padx=5, pady=2, row=0, column=3) frame", "row=0, column=1, sticky=tk.W) self.num_items_entry.insert(0, \"1000\") generate_button = tk.Button(frame, width=8, text=\"Generate\", command=self.generate) generate_button.grid(padx=5, pady=2,", "# Force focus so Alt+F4 closes this window and not the Python shell.", "if lo >= hi: # Put the divider here and break out of", "items >= divider to the end of the array. lo = start hi", "divider break # Move the value we found to the lower half. values[lo]", "values.\"\"\" self.listbox.delete(0, tk.END) for i in range(min(len(self.items), 1000)): self.listbox.insert(tk.END, self.items[i]) if __name__ ==", "Verify the sort. for i in range(1, len(self.items)): assert self.items[i] >= self.items[i -", "column=2) sort_button = tk.Button(frame, width=8, text=\"Sort\", command=self.sort) sort_button.grid(padx=5, pady=2, row=0, column=3) frame =", "generate_button = tk.Button(frame, width=8, text=\"Generate\", command=self.generate) generate_button.grid(padx=5, pady=2, row=0, column=2) sort_button = tk.Button(frame,", "item. divider = values[start] # Move items < divider to the front of", "< divider. while values[hi] >= divider: hi -= 1 if hi <= lo:", "self.kill_callback) self.window.geometry(\"300x300\") frame = tk.Frame(self.window) frame.pack(padx=5, pady=5, fill=tk.X) label = tk.Label(frame, text=\"# Items:\")", "side=tk.TOP, fill=tk.BOTH, expand=True) self.listbox.config(yscrollcommand=scrollbar.set) scrollbar.config(command=self.listbox.yview) # Bind some keys. self.window.bind('<Return>', (lambda e, button=generate_button:", "half. values[lo] = values[hi] # Look up from lo for a value >=", "outer While loop. values[lo] = divider break # Move the value we found", "= divider break # Move the value we found to the lower half.", "frame = tk.Frame(self.window) frame.pack(padx=5, pady=5, fill=tk.X) label = tk.Label(frame, text=\"# Items:\") label.grid(padx=5, pady=2,", "scrollbar.config(command=self.listbox.yview) # Bind some keys. self.window.bind('<Return>', (lambda e, button=generate_button: generate_button.invoke())) # Force focus", "print(f\"{elapsed_time} seconds\") self.show_values() # Verify the sort. for i in range(1, len(self.items)): assert", "while values[lo] < divider: lo += 1 if lo >= hi: break if", "tk.Tk() self.window.title(\"quicksort_in_place\") self.window.protocol(\"WM_sort_WINDOW\", self.kill_callback) self.window.geometry(\"300x300\") frame = tk.Frame(self.window) frame.pack(padx=5, pady=5, fill=tk.X) label =", "quicksort(values): \"\"\" Use quicksort to sort the array.\"\"\" # Sort the whole array.", "expand=True) scrollbar = tk.Scrollbar(frame) scrollbar.pack(side=tk.RIGHT, fill=tk.Y) self.listbox = tk.Listbox(frame) self.listbox.pack(padx=5, pady=2, side=tk.TOP, fill=tk.BOTH,", "random items.\"\"\" num_items = int(self.num_items_entry.get()) self.items = [] for i in range(num_items): self.items.append(random.randint(100000,", "divider = values[start] # Move items < divider to the front of the", "tk.Button(frame, width=8, text=\"Sort\", command=self.sort) sort_button.grid(padx=5, pady=2, row=0, column=3) frame = tk.Frame(self.window) frame.pack(padx=5, pady=5,", "command=self.sort) sort_button.grid(padx=5, pady=2, row=0, column=3) frame = tk.Frame(self.window) frame.pack(padx=5, pady=5, fill=tk.BOTH, expand=True) scrollbar", "tk.Frame(self.window) frame.pack(padx=5, pady=5, fill=tk.X) label = tk.Label(frame, text=\"# Items:\") label.grid(padx=5, pady=2, row=0, column=0,", "value >= divider. lo += 1 while values[lo] < divider: lo += 1", "array and # items >= divider to the end of the array. lo", "label.grid(padx=5, pady=2, row=0, column=0, sticky=tk.W) self.num_items_entry = tk.Entry(frame, width=12) self.num_items_entry.grid(padx=5, pady=2, row=0, column=1,", "whole array. do_quicksort(values, 0, len(values) - 1) def do_quicksort(values, start, end): \"\"\" Sort", "- 1) def do_quicksort(values, start, end): \"\"\" Sort the indicated part of the", "Python shell. self.num_items_entry.focus_force() self.window.mainloop() def generate(self): \"\"\" Make random items.\"\"\" num_items = int(self.num_items_entry.get())", "sort the two halves. do_quicksort(values, start, lo - 1) do_quicksort(values, lo + 1,", "part of the array.\"\"\" # If the list has no more than 1", "06/python/quicksort_in_place.py import tkinter as tk import time def quicksort(values): \"\"\" Use quicksort to", "of the array. lo = start hi = end while True: # Look", "from hi for a value < divider. while values[hi] >= divider: hi -=", "tk.Scrollbar(frame) scrollbar.pack(side=tk.RIGHT, fill=tk.Y) self.listbox = tk.Listbox(frame) self.listbox.pack(padx=5, pady=2, side=tk.TOP, fill=tk.BOTH, expand=True) self.listbox.config(yscrollcommand=scrollbar.set) scrollbar.config(command=self.listbox.yview)", "a value >= divider. lo += 1 while values[lo] < divider: lo +=", "self.num_items_entry.grid(padx=5, pady=2, row=0, column=1, sticky=tk.W) self.num_items_entry.insert(0, \"1000\") generate_button = tk.Button(frame, width=8, text=\"Generate\", command=self.generate)", "keys. self.window.bind('<Return>', (lambda e, button=generate_button: generate_button.invoke())) # Force focus so Alt+F4 closes this", "range(min(len(self.items), 1000)): self.listbox.insert(tk.END, self.items[i]) if __name__ == '__main__': app = App() # app.root.destroy()", "item {i-1} ({self.items[i-1]})\" def show_values(self): \"\"\" Show up to 1000 values.\"\"\" self.listbox.delete(0, tk.END)", "the dividing item. divider = values[start] # Move items < divider to the", "self.items.append(random.randint(100000, 999999)) self.show_values() def sort(self): \"\"\" Sort the items.\"\"\" start_time = time.time() quicksort(self.items)", "hi for a value < divider. while values[hi] >= divider: hi -= 1", "values[hi] # Look up from lo for a value >= divider. lo +=", "return # Use the first item as the dividing item. divider = values[start]", "the first item as the dividing item. divider = values[start] # Move items", "outer While loop. lo = hi values[hi] = divider break # Move the", "while True: # Look down from hi for a value < divider. while", "= time.time() quicksort(self.items) elapsed_time = time.time() - start_time print(f\"{elapsed_time} seconds\") self.show_values() # Verify", "= tk.Frame(self.window) frame.pack(padx=5, pady=5, fill=tk.BOTH, expand=True) scrollbar = tk.Scrollbar(frame) scrollbar.pack(side=tk.RIGHT, fill=tk.Y) self.listbox =", "tk.Frame(self.window) frame.pack(padx=5, pady=5, fill=tk.BOTH, expand=True) scrollbar = tk.Scrollbar(frame) scrollbar.pack(side=tk.RIGHT, fill=tk.Y) self.listbox = tk.Listbox(frame)", "# Move the value we found to the lower half. values[lo] = values[hi]", "self.listbox.config(yscrollcommand=scrollbar.set) scrollbar.config(command=self.listbox.yview) # Bind some keys. self.window.bind('<Return>', (lambda e, button=generate_button: generate_button.invoke())) # Force", "the array.\"\"\" # If the list has no more than 1 element, it's", "value we found to the upper half. values[hi] = values[lo] # Recursively sort", "sticky=tk.W) self.num_items_entry = tk.Entry(frame, width=12) self.num_items_entry.grid(padx=5, pady=2, row=0, column=1, sticky=tk.W) self.num_items_entry.insert(0, \"1000\") generate_button", "- 1], f\"Item {i} ({self.items[i]}) is smaller than item {i-1} ({self.items[i-1]})\" def show_values(self):", "def do_quicksort(values, start, end): \"\"\" Sort the indicated part of the array.\"\"\" #", "do_quicksort(values, start, end): \"\"\" Sort the indicated part of the array.\"\"\" # If", "halves. do_quicksort(values, start, lo - 1) do_quicksort(values, lo + 1, end) class App:", "label = tk.Label(frame, text=\"# Items:\") label.grid(padx=5, pady=2, row=0, column=0, sticky=tk.W) self.num_items_entry = tk.Entry(frame,", "so Alt+F4 closes this window and not the Python shell. self.num_items_entry.focus_force() self.window.mainloop() def", "import time def quicksort(values): \"\"\" Use quicksort to sort the array.\"\"\" # Sort", "self.listbox.delete(0, tk.END) for i in range(min(len(self.items), 1000)): self.listbox.insert(tk.END, self.items[i]) if __name__ == '__main__':", "pady=5, fill=tk.BOTH, expand=True) scrollbar = tk.Scrollbar(frame) scrollbar.pack(side=tk.RIGHT, fill=tk.Y) self.listbox = tk.Listbox(frame) self.listbox.pack(padx=5, pady=2,", "lo >= hi: # Put the divider here and break out of the", "to 1000 values.\"\"\" self.listbox.delete(0, tk.END) for i in range(min(len(self.items), 1000)): self.listbox.insert(tk.END, self.items[i]) if", "# If the list has no more than 1 element, it's sorted. if", "hi: break if lo >= hi: # Put the divider here and break", "def show_values(self): \"\"\" Show up to 1000 values.\"\"\" self.listbox.delete(0, tk.END) for i in", "some keys. self.window.bind('<Return>', (lambda e, button=generate_button: generate_button.invoke())) # Force focus so Alt+F4 closes", "row=0, column=3) frame = tk.Frame(self.window) frame.pack(padx=5, pady=5, fill=tk.BOTH, expand=True) scrollbar = tk.Scrollbar(frame) scrollbar.pack(side=tk.RIGHT,", "self.num_items_entry.insert(0, \"1000\") generate_button = tk.Button(frame, width=8, text=\"Generate\", command=self.generate) generate_button.grid(padx=5, pady=2, row=0, column=2) sort_button", "App: def kill_callback(self): self.window.destroy() def __init__(self): self.window = tk.Tk() self.window.title(\"quicksort_in_place\") self.window.protocol(\"WM_sort_WINDOW\", self.kill_callback) self.window.geometry(\"300x300\")", "# Use the first item as the dividing item. divider = values[start] #", "Sort the items.\"\"\" start_time = time.time() quicksort(self.items) elapsed_time = time.time() - start_time print(f\"{elapsed_time}", "# Verify the sort. for i in range(1, len(self.items)): assert self.items[i] >= self.items[i", "it's sorted. if start >= end: return # Use the first item as", "front of the array and # items >= divider to the end of", "sort_button.grid(padx=5, pady=2, row=0, column=3) frame = tk.Frame(self.window) frame.pack(padx=5, pady=5, fill=tk.BOTH, expand=True) scrollbar =", "of the outer While loop. values[lo] = divider break # Move the value", "elapsed_time = time.time() - start_time print(f\"{elapsed_time} seconds\") self.show_values() # Verify the sort. for", "than 1 element, it's sorted. if start >= end: return # Use the", "sorted. if start >= end: return # Use the first item as the", "num_items = int(self.num_items_entry.get()) self.items = [] for i in range(num_items): self.items.append(random.randint(100000, 999999)) self.show_values()", "end: return # Use the first item as the dividing item. divider =", "hi -= 1 if hi <= lo: break if hi <= lo: #", "int(self.num_items_entry.get()) self.items = [] for i in range(num_items): self.items.append(random.randint(100000, 999999)) self.show_values() def sort(self):", "def sort(self): \"\"\" Sort the items.\"\"\" start_time = time.time() quicksort(self.items) elapsed_time = time.time()", "break # Move the value we found to the upper half. values[hi] =", "lower half. values[lo] = values[hi] # Look up from lo for a value", "for i in range(num_items): self.items.append(random.randint(100000, 999999)) self.show_values() def sort(self): \"\"\" Sort the items.\"\"\"", "self.num_items_entry.focus_force() self.window.mainloop() def generate(self): \"\"\" Make random items.\"\"\" num_items = int(self.num_items_entry.get()) self.items =", "column=1, sticky=tk.W) self.num_items_entry.insert(0, \"1000\") generate_button = tk.Button(frame, width=8, text=\"Generate\", command=self.generate) generate_button.grid(padx=5, pady=2, row=0,", "tk.Button(frame, width=8, text=\"Generate\", command=self.generate) generate_button.grid(padx=5, pady=2, row=0, column=2) sort_button = tk.Button(frame, width=8, text=\"Sort\",", "values[hi] = values[lo] # Recursively sort the two halves. do_quicksort(values, start, lo -", "divider. while values[hi] >= divider: hi -= 1 if hi <= lo: break", "values[lo] = divider break # Move the value we found to the lower", "time.time() - start_time print(f\"{elapsed_time} seconds\") self.show_values() # Verify the sort. for i in", "items.\"\"\" num_items = int(self.num_items_entry.get()) self.items = [] for i in range(num_items): self.items.append(random.randint(100000, 999999))", "# Bind some keys. self.window.bind('<Return>', (lambda e, button=generate_button: generate_button.invoke())) # Force focus so", "\"\"\" Sort the items.\"\"\" start_time = time.time() quicksort(self.items) elapsed_time = time.time() - start_time", "<= lo: break if hi <= lo: # Put the divider here and", "divider here and break out of the outer While loop. lo = hi", "values[hi] = divider break # Move the value we found to the upper", "the Python shell. self.num_items_entry.focus_force() self.window.mainloop() def generate(self): \"\"\" Make random items.\"\"\" num_items =", "self.window.bind('<Return>', (lambda e, button=generate_button: generate_button.invoke())) # Force focus so Alt+F4 closes this window", "start hi = end while True: # Look down from hi for a", "lo for a value >= divider. lo += 1 while values[lo] < divider:", "show_values(self): \"\"\" Show up to 1000 values.\"\"\" self.listbox.delete(0, tk.END) for i in range(min(len(self.items),", "lo = start hi = end while True: # Look down from hi", "hi = end while True: # Look down from hi for a value", "we found to the lower half. values[lo] = values[hi] # Look up from", "+ 1, end) class App: def kill_callback(self): self.window.destroy() def __init__(self): self.window = tk.Tk()", "button=generate_button: generate_button.invoke())) # Force focus so Alt+F4 closes this window and not the", "Move the value we found to the lower half. values[lo] = values[hi] #", "<reponame>bqmoreland/EASwift<filename>algs2e_python/Chapter 06/python/quicksort_in_place.py import tkinter as tk import time def quicksort(values): \"\"\" Use quicksort", "hi values[hi] = divider break # Move the value we found to the", "expand=True) self.listbox.config(yscrollcommand=scrollbar.set) scrollbar.config(command=self.listbox.yview) # Bind some keys. self.window.bind('<Return>', (lambda e, button=generate_button: generate_button.invoke())) #", "(lambda e, button=generate_button: generate_button.invoke())) # Force focus so Alt+F4 closes this window and", "item as the dividing item. divider = values[start] # Move items < divider", "start, end): \"\"\" Sort the indicated part of the array.\"\"\" # If the", "the outer While loop. values[lo] = divider break # Move the value we", "({self.items[i]}) is smaller than item {i-1} ({self.items[i-1]})\" def show_values(self): \"\"\" Show up to", "\"\"\" Sort the indicated part of the array.\"\"\" # If the list has", "1], f\"Item {i} ({self.items[i]}) is smaller than item {i-1} ({self.items[i-1]})\" def show_values(self): \"\"\"", "items.\"\"\" start_time = time.time() quicksort(self.items) elapsed_time = time.time() - start_time print(f\"{elapsed_time} seconds\") self.show_values()", "text=\"# Items:\") label.grid(padx=5, pady=2, row=0, column=0, sticky=tk.W) self.num_items_entry = tk.Entry(frame, width=12) self.num_items_entry.grid(padx=5, pady=2,", "= time.time() - start_time print(f\"{elapsed_time} seconds\") self.show_values() # Verify the sort. for i", "to sort the array.\"\"\" # Sort the whole array. do_quicksort(values, 0, len(values) -", "the two halves. do_quicksort(values, start, lo - 1) do_quicksort(values, lo + 1, end)", "divider break # Move the value we found to the upper half. values[hi]", "if hi <= lo: # Put the divider here and break out of", "divider here and break out of the outer While loop. values[lo] = divider", "self.window = tk.Tk() self.window.title(\"quicksort_in_place\") self.window.protocol(\"WM_sort_WINDOW\", self.kill_callback) self.window.geometry(\"300x300\") frame = tk.Frame(self.window) frame.pack(padx=5, pady=5, fill=tk.X)", "generate_button.invoke())) # Force focus so Alt+F4 closes this window and not the Python", "lo >= hi: break if lo >= hi: # Put the divider here", "1, end) class App: def kill_callback(self): self.window.destroy() def __init__(self): self.window = tk.Tk() self.window.title(\"quicksort_in_place\")", "command=self.generate) generate_button.grid(padx=5, pady=2, row=0, column=2) sort_button = tk.Button(frame, width=8, text=\"Sort\", command=self.sort) sort_button.grid(padx=5, pady=2,", "fill=tk.X) label = tk.Label(frame, text=\"# Items:\") label.grid(padx=5, pady=2, row=0, column=0, sticky=tk.W) self.num_items_entry =", "<= lo: # Put the divider here and break out of the outer", ">= hi: # Put the divider here and break out of the outer", "the sort. for i in range(1, len(self.items)): assert self.items[i] >= self.items[i - 1],", "Bind some keys. self.window.bind('<Return>', (lambda e, button=generate_button: generate_button.invoke())) # Force focus so Alt+F4", "dividing item. divider = values[start] # Move items < divider to the front", "while values[hi] >= divider: hi -= 1 if hi <= lo: break if", "pady=5, fill=tk.X) label = tk.Label(frame, text=\"# Items:\") label.grid(padx=5, pady=2, row=0, column=0, sticky=tk.W) self.num_items_entry", "if start >= end: return # Use the first item as the dividing", "row=0, column=0, sticky=tk.W) self.num_items_entry = tk.Entry(frame, width=12) self.num_items_entry.grid(padx=5, pady=2, row=0, column=1, sticky=tk.W) self.num_items_entry.insert(0,", "999999)) self.show_values() def sort(self): \"\"\" Sort the items.\"\"\" start_time = time.time() quicksort(self.items) elapsed_time", "= tk.Button(frame, width=8, text=\"Sort\", command=self.sort) sort_button.grid(padx=5, pady=2, row=0, column=3) frame = tk.Frame(self.window) frame.pack(padx=5,", "i in range(1, len(self.items)): assert self.items[i] >= self.items[i - 1], f\"Item {i} ({self.items[i]})", "{i-1} ({self.items[i-1]})\" def show_values(self): \"\"\" Show up to 1000 values.\"\"\" self.listbox.delete(0, tk.END) for", "the array. lo = start hi = end while True: # Look down", "the outer While loop. lo = hi values[hi] = divider break # Move", "window and not the Python shell. self.num_items_entry.focus_force() self.window.mainloop() def generate(self): \"\"\" Make random", "= end while True: # Look down from hi for a value <", "the value we found to the upper half. values[hi] = values[lo] # Recursively", "self.listbox = tk.Listbox(frame) self.listbox.pack(padx=5, pady=2, side=tk.TOP, fill=tk.BOTH, expand=True) self.listbox.config(yscrollcommand=scrollbar.set) scrollbar.config(command=self.listbox.yview) # Bind some", "Make random items.\"\"\" num_items = int(self.num_items_entry.get()) self.items = [] for i in range(num_items):", "tk.Entry(frame, width=12) self.num_items_entry.grid(padx=5, pady=2, row=0, column=1, sticky=tk.W) self.num_items_entry.insert(0, \"1000\") generate_button = tk.Button(frame, width=8,", "sort_button = tk.Button(frame, width=8, text=\"Sort\", command=self.sort) sort_button.grid(padx=5, pady=2, row=0, column=3) frame = tk.Frame(self.window)", "f\"Item {i} ({self.items[i]}) is smaller than item {i-1} ({self.items[i-1]})\" def show_values(self): \"\"\" Show", "values[hi] >= divider: hi -= 1 if hi <= lo: break if hi", "start_time = time.time() quicksort(self.items) elapsed_time = time.time() - start_time print(f\"{elapsed_time} seconds\") self.show_values() #", "Sort the indicated part of the array.\"\"\" # If the list has no", "self.show_values() # Verify the sort. for i in range(1, len(self.items)): assert self.items[i] >=", "start_time print(f\"{elapsed_time} seconds\") self.show_values() # Verify the sort. for i in range(1, len(self.items)):", "in range(num_items): self.items.append(random.randint(100000, 999999)) self.show_values() def sort(self): \"\"\" Sort the items.\"\"\" start_time =", "\"\"\" Use quicksort to sort the array.\"\"\" # Sort the whole array. do_quicksort(values,", "not the Python shell. self.num_items_entry.focus_force() self.window.mainloop() def generate(self): \"\"\" Make random items.\"\"\" num_items", "if lo >= hi: break if lo >= hi: # Put the divider", "list has no more than 1 element, it's sorted. if start >= end:" ]
[ "= False intents.emojis = False intents.typing = False intents.integrations = False intents.webhooks =", "json = { \"kind\": \"prefixes\", \"values\" : ['$']} settings.insert_one(json) json = {\"kind\": \"member_role\",", "2: return msg = reaction.message # this handling is not for DMs #", "ctx.send(\"You need to allow DMs!\") error = None @bot.command(name=\"proof\", help = \"Allows to", "proof. Requires the name of the vtuber. Only available in DMs\", brief =", "commands.MissingRequiredArgument): await ctx.send(\"Please include the server name!\") embed = Utility.create_supported_vtuber_embed() await ctx.send(content=None, embed=embed)", "discord.errors.Forbidden: print(payload.channel_id) print(payload.guild_id) @bot.command( help=\"Can be called with just $verify but also with", "m.author == user and m.channel == channel date_msg = await bot.wait_for('message', check=check) await", "@send_dm.error async def dm_error(ctx, error): if isinstance(error, discord.errors.Forbidden): await ctx.send(\"You need to allow", "the DMs\" ) @commands.dm_only() async def verify(ctx, *vtuber): \"\"\" Command in the DMs", "mention_author=False) if automatic_role: await member_handler.del_membership(msg, target_member_id, None, False) await msg.clear_reactions() await msg.add_reaction(emoji='👎') else:", "= MongoClient(db_url.format(db_user, db_pass)) # set up classes member_handler = MembershipHandler(bot, db_cluster, embed_color) Utility.setup(bot,", "the bot joins a server. \"\"\" print(\"Joined new Guild: \" + str(guild.id)) dbnames", "brief=\" Tries to verify a screenshot for membership in the DMs\" ) @commands.dm_only()", "= db_cluster[\"settings\"][\"general\"] settings.update_one({'name': 'supported_idols'}, {'$pull': { 'supported_idols': {'guild_id': guild.id}}}) @bot.event async def on_raw_reaction_add(payload):", "than the bot should be processed if reaction: if reaction.count != 2: return", "# log content to dm log channel for record dm_lg_ch = bot.get_channel(dm_log) await", "elif isinstance(error, commands.NoPrivateMessage): await ctx.send(\"This command should not be used in the DMs\")", "user using the command.\", brief=\"Sends a DM to the user\") async def send_dm(ctx):", "=bot.get_channel(db_cluster[str(server_id)][\"settings\"].find_one({\"kind\": \"log_channel\"})[\"value\"]) # Send attachment and message to membership verification channel desc =", "error): if isinstance(error, commands.BadArgument): await ctx.send(\"Please do only send a valid name\") elif", "embed_color = int(os.getenv(\"EMBED_COLOR\"), 16) db_user = os.getenv(\"DB_USER\") db_pass = <PASSWORD>(\"<PASSWORD>\") db_url = os.getenv(\"DB_LINK\")", "error @bot.event async def on_ready(): print('Logged in as') print(bot.user.name) print(bot.user.id) print('------') @bot.event async", "'✅': if not automatic_role: membership_date = embed.fields[0].value # set membership await member_handler.set_membership(msg, target_member_id,", "to the user using the command.\", brief=\"Sends a DM to the user\") async", "bot = commands.Bot(command_prefix=determine_prefix, description='Bot to verify and manage Memberships.\\nlogChannel, Vtuber name and memberRole", "record dm_lg_ch = bot.get_channel(dm_log) await dm_lg_ch.send(\"{}\\n{}\".format(str(ctx.author),ctx.message.content)) for attachment in ctx.message.attachments: await dm_lg_ch.send(attachment.url) if", "= ctx.author.id embed = discord.Embed(title = title, description = None, colour = embed_color)", "in ctx.message.attachments: await dm_lg_ch.send(attachment.url) if vtuber: server = map_vtuber_to_server(vtuber[0]) if server: await member_handler.verify_membership(ctx.message,", "= now.strftime(\"%H:%M JST, %d/%m/%Y\") await bot.change_presence(activity=discord.Game(name=timestr)) await asyncio.sleep(60) except ConnectionResetError: print(\"Could not update", "@commands.is_owner() async def check(ctx): Utility.create_supported_vtuber_embed() await ctx.send(db_cluster['settings']['general'].find_one()['supported_idols']) @bot.command(hidden = True, name = \"forceCheck\")", "import timezone, timedelta import re #Internal from membership_handling import MembershipHandler from settings import", "= await bot.wait_for('message', check=check) await member_handler.set_membership(msg, target_member_id, date_msg.content) await msg.clear_reactions() await msg.add_reaction(emoji='👎') except", "date from the screenshot in the format dd/mm/yyyy.\" await channel.send(m, reference=msg, mention_author=False) def", "valid supported VTuber!\", embed = embed) else: await member_handler.verify_membership_with_server_detection(ctx.message) @verify.error async def verify_error(ctx,", "@bot.event async def on_ready(): print('Logged in as') print(bot.user.name) print(bot.user.id) print('------') @bot.event async def", "embed=embed) def map_vtuber_to_server(name): settings_db = db_cluster[\"settings\"][\"general\"] result = settings_db.find_one({}, {'supported_idols' : { '$elemMatch':", "False intents.voice_states = False intents.guild_typing = False async def determine_prefix(bot, message): if isinstance(message.channel,", "{\"kind\": \"log_channel\", \"value\" : 0} settings.insert_one(json) json = {\"kind\": \"mod_role\", \"value\" : 0}", "of the vtuber. Only available in DMs\", brief = \"Send additional proof\") @commands.dm_only()", "ctx.message.attachments: await dm_lg_ch.send(attachment.url) if vtuber: server = map_vtuber_to_server(vtuber[0]) if server: await member_handler.verify_membership(ctx.message, server)", "in serverlist: server_db = db_cluster[str(server['guild_id'])] lg_ch = bot.get_channel(server_db['settings'].find_one({'kind': \"log_channel\"})['value']) await lg_ch.send(content = None,", "supported idols so that memberships are not checked. \"\"\" print(\"Left Guild: \" +", "async def on_command_error(ctx, error): if isinstance(error, CommandNotFound): # Ignore this error pass elif", "additional proof was delivered safely!\") @send_proof.error async def proof_error(ctx, error): if isinstance(error, commands.BadArgument):", "print(\"Left Guild: \" + str(guild.id)) settings = db_cluster[\"settings\"][\"general\"] settings.update_one({'name': 'supported_idols'}, {'$pull': { 'supported_idols':", "it again.\", reference=msg, mention_author=False) else: m = \"Please write the correct date from", "the format dd/mm/yyyy.\" await channel.send(m, reference=msg, mention_author=False) def check(m): return m.author == user", "{\"kind\": \"inform_duration\", \"value\" : 1} settings.insert_one(json) @bot.event async def on_guild_remove(guild): \"\"\" Removes the", "\"value\" : 1} settings.insert_one(json) @bot.event async def on_guild_remove(guild): \"\"\" Removes the guild from", "return if msg.embeds: embed = msg.embeds[0] automatic_role = db_cluster[str(msg.guild.id)][\"settings\"].find_one({\"kind\": \"automatic_role\"})[\"value\"] # always only", "for record dm_lg_ch = bot.get_channel(dm_log) await dm_lg_ch.send(\"{}\\n{}\".format(str(ctx.author),ctx.message.content)) for attachment in ctx.message.attachments: await dm_lg_ch.send(attachment.url)", "to dm log channel for record dm_lg_ch = bot.get_channel(dm_log) await dm_lg_ch.send(\"{}\\n{}\".format(str(ctx.author),ctx.message.content)) for attachment", "reaction.count != 2: return msg = reaction.message # this handling is not for", "\"supported_idols\"})['supported_idols'] #create Embed embed = discord.Embed(title = title, description = text, colour =", "# Set variable to true for local testing local = False # Customizable", "async def dm_error(ctx, error): if isinstance(error, discord.errors.Forbidden): await ctx.send(\"You need to allow DMs!\")", "elif hasattr(ctx.command, 'on_error'): #skip already locally handled errors pass else: raise error @bot.event", "discord.utils.get(msg.reactions, emoji=payload.emoji.name) # only the first react by somebody else than the bot", "elif reaction.emoji == u\"\\U0001F6AB\": user = bot.get_user(payload.user_id) text = \"Is there an issue", "os.getenv(\"DB_USER\") db_pass = <PASSWORD>(\"<PASSWORD>\") db_url = os.getenv(\"DB_LINK\") dm_log = int(os.getenv(\"DM_LOG\")) # Intents intents", "message that will be sent to the User.\", reference=msg, mention_author=False) def check(m): return", "asyncio.sleep(60) except ConnectionResetError: print(\"Could not update JST Clock!\") # List Coroutines to be", "guild.id}}}) @bot.event async def on_raw_reaction_add(payload): # get reaction from payload if not payload.guild_id:", "@bot.event async def on_guild_remove(guild): \"\"\" Removes the guild from the supported idols so", "import Utility from ocr import OCR from sending import Sending from pymongo import", "if await Utility.confirm_action(confirm_msg, user): confirm_msg = await channel.send(\"Please write a message that will", "'supported_idols'}, {'$pull': { 'supported_idols': {'guild_id': guild.id}}}) @bot.event async def on_raw_reaction_add(payload): # get reaction", "Requires the name of the vtuber. Only available in DMs\", brief = \"Send", "desc = \"{}\\n{}\".format(str(ctx.author), \"Additional proof\") title = ctx.author.id embed = discord.Embed(title = title,", "import Sending from pymongo import MongoClient import os ### Setup data # Set", "import Membership from utility import Utility from ocr import OCR from sending import", ":no_entry_sign:\" confirm_msg = await channel.send(text, reference=msg, mention_author=False) if await Utility.confirm_action(confirm_msg, user): confirm_msg =", "brief=\"Sends a DM to the user\") async def send_dm(ctx): await ctx.author.send(\"Hi\") @send_dm.error async", "do only send a valid name\") elif isinstance(error, commands.MissingRequiredArgument): await ctx.send(\"Please include the", "= msg.embeds[0] automatic_role = db_cluster[str(msg.guild.id)][\"settings\"].find_one({\"kind\": \"automatic_role\"})[\"value\"] # always only the id target_member_id =", "reaction.emoji == u\"\\U0001F6AB\": user = bot.get_user(payload.user_id) text = \"Is there an issue with", "isinstance(error, discord.errors.Forbidden): await ctx.send(\"You need to allow DMs!\") error = None @bot.command(name=\"proof\", help", "no date on screenshot) -> :white_check_mark:\\n\" text += \"Or is the date recognized", "verify_error(ctx, error): if isinstance(error, commands.PrivateMessageOnly): await ctx.send(\"This command only works in DMs!\") @bot.command(hidden", "bot bot = commands.Bot(command_prefix=determine_prefix, description='Bot to verify and manage Memberships.\\nlogChannel, Vtuber name and", "user and m.channel == channel date_msg = await bot.wait_for('message', check=check) await member_handler.set_membership(msg, target_member_id,", "settings.update_one({'name': 'supported_idols'}, {'$pull': { 'supported_idols': {'guild_id': guild.id}}}) @bot.event async def on_raw_reaction_add(payload): # get", "from ocr import OCR from sending import Sending from pymongo import MongoClient import", "True, name = \"forceCheck\") @commands.is_owner() async def force_member_check(ctx): await member_handler.delete_expired_memberships(True) @bot.command(hidden = True,", "discord from discord.ext import commands from discord.ext.commands.errors import CommandNotFound #Python import asyncio from", "a DM to the user\") async def send_dm(ctx): await ctx.author.send(\"Hi\") @send_dm.error async def", "check(ctx): Utility.create_supported_vtuber_embed() await ctx.send(db_cluster['settings']['general'].find_one()['supported_idols']) @bot.command(hidden = True, name = \"forceCheck\") @commands.is_owner() async def", "screenshot in the format dd/mm/yyyy.\" await channel.send(m, reference=msg, mention_author=False) def check(m): return m.author", "dbnames = db_cluster.list_database_names() if not str(guild.id) in dbnames: new_guild_db = db_cluster[str(guild.id)] settings =", "= os.getenv(\"DB_LINK\") dm_log = int(os.getenv(\"DM_LOG\")) # Intents intents = discord.Intents.default() intents.members = True", "proof!\") return server_id = map_vtuber_to_server(vtuber) member_veri_ch =bot.get_channel(db_cluster[str(server_id)][\"settings\"].find_one({\"kind\": \"log_channel\"})[\"value\"]) # Send attachment and message", "error): if isinstance(error, CommandNotFound): # Ignore this error pass elif isinstance(error, commands.MissingPermissions): await", "= False intents.voice_states = False intents.guild_typing = False async def determine_prefix(bot, message): if", "async def check(ctx): Utility.create_supported_vtuber_embed() await ctx.send(db_cluster['settings']['general'].find_one()['supported_idols']) @bot.command(hidden = True, name = \"forceCheck\") @commands.is_owner()", "the user using the command.\", brief=\"Sends a DM to the user\") async def", "embed = embed) #send confirmation await ctx.send(\"Your additional proof was delivered safely!\") @send_proof.error", "to be executed coroutines = ( jst_clock(), member_handler.check_membership_routine(), ) # Main Coroutine async", "( jst_clock(), member_handler.check_membership_routine(), ) # Main Coroutine async def background_main(): await bot.wait_until_ready() await", "= \"checkIdols\") @commands.is_owner() async def check(ctx): Utility.create_supported_vtuber_embed() await ctx.send(db_cluster['settings']['general'].find_one()['supported_idols']) @bot.command(hidden = True, name", "ctx.send(content =\"Please use a valid supported VTuber!\", embed = embed) else: await member_handler.verify_membership_with_server_detection(ctx.message)", "embed = Utility.create_supported_vtuber_embed() await ctx.send(content =\"Please use a valid supported VTuber!\", embed =", "= False intents.typing = False intents.integrations = False intents.webhooks = False intents.voice_states =", "name>\\n\" + \"Both versions require a screenshot sent with it.\", brief=\" Tries to", "\\\"hi\\\" to the user using the command.\", brief=\"Sends a DM to the user\")", "help=\"Can be called with just $verify but also with $verify <VTuber name>\\n\" +", "{\"kind\": \"tolerance_duration\", \"value\" : 1} settings.insert_one(json) json = {\"kind\": \"inform_duration\", \"value\" : 1}", "in DMs!\") @bot.command(hidden = True, name = \"checkIdols\") @commands.is_owner() async def check(ctx): Utility.create_supported_vtuber_embed()", "False} settings.insert_one(json) json = {\"kind\": \"require_additional_proof\", \"value\" : False} settings.insert_one(json) json = {\"kind\":", "local = False # Customizable Settings # For local testing token = os.<PASSWORD>(\"TOKEN\")", "False intents.integrations = False intents.webhooks = False intents.voice_states = False intents.guild_typing = False", "a valid supported VTuber!\", embed = embed) else: await member_handler.verify_membership_with_server_detection(ctx.message) @verify.error async def", "set!', intents=intents, case_insensitive=True, owner_id=owner_id) # database settings db_cluster = MongoClient(db_url.format(db_user, db_pass)) # set", "ctx.send(\"Please include a screenshot of the proof!\") return server_id = map_vtuber_to_server(vtuber) member_veri_ch =bot.get_channel(db_cluster[str(server_id)][\"settings\"].find_one({\"kind\":", "= discord.utils.get(msg.reactions, emoji=payload.emoji.name) # only the first react by somebody else than the", "always only the id target_member_id = int(embed.title) if reaction.emoji == '✅': if not", "attachment and message to membership verification channel desc = \"{}\\n{}\".format(str(ctx.author), \"Additional proof\") title", "'supported_idols' in result: return result['supported_idols'][0]['guild_id'] #Time in status async def jst_clock(): while not", "= False intents.integrations = False intents.webhooks = False intents.voice_states = False intents.guild_typing =", "utility import Utility from ocr import OCR from sending import Sending from pymongo", "if not ctx.message.attachments: await ctx.send(\"Please include a screenshot of the proof!\") return server_id", "await channel.send(\"The reaction took too long! Please remove you reaction from this message", "### Setup data # Set variable to true for local testing local =", "isinstance(message.channel, discord.channel.DMChannel): return \"$\" guild = message.guild if guild: prefixes = db_cluster[str(guild.id)][\"settings\"].find_one({\"kind\": \"prefixes\"})[\"values\"]", "msg = await channel.fetch_message(payload.message_id) reaction = discord.utils.get(msg.reactions, emoji=payload.emoji.name) # only the first react", "await msg.clear_reactions() await msg.add_reaction(emoji='👎') except discord.errors.Forbidden: print(payload.channel_id) print(payload.guild_id) @bot.command( help=\"Can be called with", "json = {\"kind\": \"member_role\", \"value\" : 0} settings.insert_one(json) json = {\"kind\": \"log_channel\", \"value\"", "Utility.create_supported_vtuber_embed() await ctx.send(content=None, embed=embed) def map_vtuber_to_server(name): settings_db = db_cluster[\"settings\"][\"general\"] result = settings_db.find_one({}, {'supported_idols'", "!= 2: return msg = reaction.message # this handling is not for DMs", "None @bot.command(name=\"proof\", help = \"Allows to send additional proof. Requires the name of", "json = {\"kind\": \"log_channel\", \"value\" : 0} settings.insert_one(json) json = {\"kind\": \"mod_role\", \"value\"", "= \"{}\\n{}\".format(str(ctx.author), \"Additional proof\") title = ctx.author.id embed = discord.Embed(title = title, description", "= embed.fields[0].value # set membership await member_handler.set_membership(msg, target_member_id, membership_date) #always clear await msg.clear_reactions()", "1} settings.insert_one(json) @bot.event async def on_guild_remove(guild): \"\"\" Removes the guild from the supported", "settings_db = db_cluster[\"settings\"][\"general\"] result = settings_db.find_one({}, {'supported_idols' : { '$elemMatch': {'name' : name}}})", "DMs!\") @bot.command(hidden = True, name = \"checkIdols\") @commands.is_owner() async def check(ctx): Utility.create_supported_vtuber_embed() await", "await dm_lg_ch.send(attachment.url) if vtuber: server = map_vtuber_to_server(vtuber[0]) if server: await member_handler.verify_membership(ctx.message, server) else:", "await target_member.send(text_msg.content) await channel.send(\"Message was sent to user.\", reference=text_msg, mention_author=False) if automatic_role: await", "await lg_ch.send(content = None, embed = embed) @bot.command(name = \"dmMe\", help=\"Sends a DM", "= False intents.guild_typing = False async def determine_prefix(bot, message): if isinstance(message.channel, discord.channel.DMChannel): return", "dm log channel for record dm_lg_ch = bot.get_channel(dm_log) await dm_lg_ch.send(\"{}\\n{}\".format(str(ctx.author),ctx.message.content)) for attachment in", "Ignore this error pass elif isinstance(error, commands.MissingPermissions): await ctx.send(\"You are not allowed to", "9) timestr = now.strftime(\"%H:%M JST, %d/%m/%Y\") await bot.change_presence(activity=discord.Game(name=timestr)) await asyncio.sleep(60) except ConnectionResetError: print(\"Could", "bot.is_closed(): try: now = dtime.now(tz = timezone.utc) + timedelta(hours = 9) timestr =", "a screenshot sent with it.\", brief=\" Tries to verify a screenshot for membership", "embed = Utility.create_supported_vtuber_embed() await ctx.send(content=None, embed=embed) def map_vtuber_to_server(name): settings_db = db_cluster[\"settings\"][\"general\"] result =", "text): serverlist = db_cluster[\"settings\"]['general'].find_one({'name': \"supported_idols\"})['supported_idols'] #create Embed embed = discord.Embed(title = title, description", "timezone, timedelta import re #Internal from membership_handling import MembershipHandler from settings import Settings", "return prefixes return \"$\" # Set up bot bot = commands.Bot(command_prefix=determine_prefix, description='Bot to", "took too long! Please remove you reaction from this message and add it", "server_db = db_cluster[str(server['guild_id'])] lg_ch = bot.get_channel(server_db['settings'].find_one({'kind': \"log_channel\"})['value']) await lg_ch.send(content = None, embed =", "send_dm(ctx): await ctx.author.send(\"Hi\") @send_dm.error async def dm_error(ctx, error): if isinstance(error, discord.errors.Forbidden): await ctx.send(\"You", "DM to the user\") async def send_dm(ctx): await ctx.author.send(\"Hi\") @send_dm.error async def dm_error(ctx,", "date_msg.content) await msg.clear_reactions() await msg.add_reaction(emoji='👎') except discord.errors.Forbidden: print(payload.channel_id) print(payload.guild_id) @bot.command( help=\"Can be called", "in the DMs that tries to verify a screenshot for membership. \"\"\" #", "= \"Is there an issue with the proof (Faked or no date on", "\"https://pbs.twimg.com/profile_images/1198438854841094144/y35Fe_Jj.jpg\"} #hololive logo settings.insert_one(json) json = {\"kind\": \"automatic_role\", \"value\" : False} settings.insert_one(json) json", "= map_vtuber_to_server(vtuber[0]) if server: await member_handler.verify_membership(ctx.message, server) else: embed = Utility.create_supported_vtuber_embed() await ctx.send(content", "= None, embed = embed) @bot.command(name = \"dmMe\", help=\"Sends a DM containg \\\"hi\\\"", "automatic_role: await member_handler.del_membership(msg, target_member_id, None, False) await msg.clear_reactions() await msg.add_reaction(emoji='👎') else: await asyncio.sleep(1)", "reference=msg, mention_author=False) else: m = \"Please write the correct date from the screenshot", "membership_date = embed.fields[0].value # set membership await member_handler.set_membership(msg, target_member_id, membership_date) #always clear await", "membership_handling import MembershipHandler from settings import Settings from membership import Membership from utility", "date on screenshot) -> :white_check_mark:\\n\" text += \"Or is the date recognized incorrectly/was", "name of the vtuber. Only available in DMs\", brief = \"Send additional proof\")", "def determine_prefix(bot, message): if isinstance(message.channel, discord.channel.DMChannel): return \"$\" guild = message.guild if guild:", "just $verify but also with $verify <VTuber name>\\n\" + \"Both versions require a", "\"\"\" Removes the guild from the supported idols so that memberships are not", "ctx.send(content=None, embed=embed) def map_vtuber_to_server(name): settings_db = db_cluster[\"settings\"][\"general\"] result = settings_db.find_one({}, {'supported_idols' : {", "+= \"Or is the date recognized incorrectly/was not recognized -> :no_entry_sign:\" confirm_msg =", "await member_handler.delete_expired_memberships(True) @bot.command(hidden = True, name = \"broadcast\") @commands.is_owner() async def broadcast(ctx, title,", "def map_vtuber_to_server(name): settings_db = db_cluster[\"settings\"][\"general\"] result = settings_db.find_one({}, {'supported_idols' : { '$elemMatch': {'name'", "*vtuber): \"\"\" Command in the DMs that tries to verify a screenshot for", "await ctx.send(\"You are not allowed to use this command!\") elif isinstance(error, commands.NoPrivateMessage): await", "\"kind\": \"prefixes\", \"values\" : ['$']} settings.insert_one(json) json = {\"kind\": \"member_role\", \"value\" : 0}", "embed_color) #send to every server for server in serverlist: server_db = db_cluster[str(server['guild_id'])] lg_ch", "channel desc = \"{}\\n{}\".format(str(ctx.author), \"Additional proof\") title = ctx.author.id embed = discord.Embed(title =", "'$elemMatch': {'name' : name}}}) if 'supported_idols' in result: return result['supported_idols'][0]['guild_id'] #Time in status", "\" + str(guild.id)) dbnames = db_cluster.list_database_names() if not str(guild.id) in dbnames: new_guild_db =", "def on_raw_reaction_add(payload): # get reaction from payload if not payload.guild_id: return channel =", "command.\", brief=\"Sends a DM to the user\") async def send_dm(ctx): await ctx.author.send(\"Hi\") @send_dm.error", "name = \"checkIdols\") @commands.is_owner() async def check(ctx): Utility.create_supported_vtuber_embed() await ctx.send(db_cluster['settings']['general'].find_one()['supported_idols']) @bot.command(hidden = True,", "confirm_msg = await channel.send(text, reference=msg, mention_author=False) if await Utility.confirm_action(confirm_msg, user): confirm_msg = await", "by the bot if not reaction.me: return if msg.embeds: embed = msg.embeds[0] automatic_role", "\"checkIdols\") @commands.is_owner() async def check(ctx): Utility.create_supported_vtuber_embed() await ctx.send(db_cluster['settings']['general'].find_one()['supported_idols']) @bot.command(hidden = True, name =", "db_cluster)) bot.add_cog(Membership(bot, member_handler)) @bot.event async def on_command_error(ctx, error): if isinstance(error, CommandNotFound): # Ignore", "Guild: \" + str(guild.id)) settings = db_cluster[\"settings\"][\"general\"] settings.update_one({'name': 'supported_idols'}, {'$pull': { 'supported_idols': {'guild_id':", ":white_check_mark:\\n\" text += \"Or is the date recognized incorrectly/was not recognized -> :no_entry_sign:\"", "recognized -> :no_entry_sign:\" confirm_msg = await channel.send(text, reference=msg, mention_author=False) if await Utility.confirm_action(confirm_msg, user):", "so that memberships are not checked. \"\"\" print(\"Left Guild: \" + str(guild.id)) settings", "#hololive logo settings.insert_one(json) json = {\"kind\": \"automatic_role\", \"value\" : False} settings.insert_one(json) json =", "\"value\" : 0} settings.insert_one(json) json = {\"kind\": \"picture_link\", \"value\" : \"https://pbs.twimg.com/profile_images/1198438854841094144/y35Fe_Jj.jpg\"} #hololive logo", "only send a valid name\") elif isinstance(error, commands.MissingRequiredArgument): await ctx.send(\"Please include the server", "\"values\" : ['$']} settings.insert_one(json) json = {\"kind\": \"member_role\", \"value\" : 0} settings.insert_one(json) json", "1: await channel.send(\"The reaction took too long! Please remove you reaction from this", "False intents.guild_typing = False async def determine_prefix(bot, message): if isinstance(message.channel, discord.channel.DMChannel): return \"$\"", "pass else: raise error @bot.event async def on_ready(): print('Logged in as') print(bot.user.name) print(bot.user.id)", "{'supported_idols' : { '$elemMatch': {'name' : name}}}) if 'supported_idols' in result: return result['supported_idols'][0]['guild_id']", "await channel.send(\"Message was sent to user.\", reference=text_msg, mention_author=False) if automatic_role: await member_handler.del_membership(msg, target_member_id,", "{ \"kind\": \"prefixes\", \"values\" : ['$']} settings.insert_one(json) json = {\"kind\": \"member_role\", \"value\" :", "# deny option elif reaction.emoji == u\"\\U0001F6AB\": user = bot.get_user(payload.user_id) text = \"Is", "text += \"Or is the date recognized incorrectly/was not recognized -> :no_entry_sign:\" confirm_msg", "settings collection when the bot joins a server. \"\"\" print(\"Joined new Guild: \"", "= bot.get_channel(server_db['settings'].find_one({'kind': \"log_channel\"})['value']) await lg_ch.send(content = None, embed = embed) @bot.command(name = \"dmMe\",", "return result['supported_idols'][0]['guild_id'] #Time in status async def jst_clock(): while not bot.is_closed(): try: now", "to verify and manage Memberships.\\nlogChannel, Vtuber name and memberRole need to be set!',", "description='Bot to verify and manage Memberships.\\nlogChannel, Vtuber name and memberRole need to be", "should be processed if reaction: if reaction.count != 2: return msg = reaction.message", "os.getenv(\"DB_LINK\") dm_log = int(os.getenv(\"DM_LOG\")) # Intents intents = discord.Intents.default() intents.members = True intents.invites", "format dd/mm/yyyy.\" await channel.send(m, reference=msg, mention_author=False) def check(m): return m.author == user and", "$verify <VTuber name>\\n\" + \"Both versions require a screenshot sent with it.\", brief=\"", "Command in the DMs that tries to verify a screenshot for membership. \"\"\"", "# For local testing token = os.<PASSWORD>(\"TOKEN\") owner_id = int(os.getenv(\"OWNER_ID\")) embed_color = int(os.getenv(\"EMBED_COLOR\"),", "not allowed to use this command!\") elif isinstance(error, commands.NoPrivateMessage): await ctx.send(\"This command should", "await channel.send(\"Please write a message that will be sent to the User.\", reference=msg,", "dm_lg_ch.send(\"{}\\n{}\".format(str(ctx.author),ctx.message.content)) for attachment in ctx.message.attachments: await dm_lg_ch.send(attachment.url) if vtuber: server = map_vtuber_to_server(vtuber[0]) if", "now = dtime.now(tz = timezone.utc) + timedelta(hours = 9) timestr = now.strftime(\"%H:%M JST,", "server: await member_handler.verify_membership(ctx.message, server) else: embed = Utility.create_supported_vtuber_embed() await ctx.send(content =\"Please use a", "await bot.wait_for('message', check=check) target_member = bot.get_user(target_member_id) await target_member.send(text_msg.content) await channel.send(\"Message was sent to", "await ctx.send(\"This command only works in DMs!\") @bot.command(hidden = True, name = \"checkIdols\")", "if isinstance(error, commands.PrivateMessageOnly): await ctx.send(\"This command only works in DMs!\") @bot.command(hidden = True,", "was delivered safely!\") @send_proof.error async def proof_error(ctx, error): if isinstance(error, commands.BadArgument): await ctx.send(\"Please", "confirm_msg = discord.utils.get(bot.cached_messages, id=confirm_msg.id) if confirm_msg.reactions[0].count == 1 and confirm_msg.reactions[1].count == 1: await", "if prefixes: return prefixes return \"$\" # Set up bot bot = commands.Bot(command_prefix=determine_prefix,", "await ctx.send(\"Please do only send a valid name\") elif isinstance(error, commands.MissingRequiredArgument): await ctx.send(\"Please", "from discord.ext import commands from discord.ext.commands.errors import CommandNotFound #Python import asyncio from datetime", "pass elif isinstance(error, commands.MissingPermissions): await ctx.send(\"You are not allowed to use this command!\")", "= dtime.now(tz = timezone.utc) + timedelta(hours = 9) timestr = now.strftime(\"%H:%M JST, %d/%m/%Y\")", "import MongoClient import os ### Setup data # Set variable to true for", "timezone.utc) + timedelta(hours = 9) timestr = now.strftime(\"%H:%M JST, %d/%m/%Y\") await bot.change_presence(activity=discord.Game(name=timestr)) await", "not ctx.message.attachments: await ctx.send(\"Please include a screenshot of the proof!\") return server_id =", "determine_prefix(bot, message): if isinstance(message.channel, discord.channel.DMChannel): return \"$\" guild = message.guild if guild: prefixes", "command!\") elif isinstance(error, commands.NoPrivateMessage): await ctx.send(\"This command should not be used in the", "membership import Membership from utility import Utility from ocr import OCR from sending", "dtime.now(tz = timezone.utc) + timedelta(hours = 9) timestr = now.strftime(\"%H:%M JST, %d/%m/%Y\") await", "commands.MissingPermissions): await ctx.send(\"You are not allowed to use this command!\") elif isinstance(error, commands.NoPrivateMessage):", "from discord.ext.commands.errors import CommandNotFound #Python import asyncio from datetime import datetime as dtime", "variable to true for local testing local = False # Customizable Settings #", "intents.guild_typing = False async def determine_prefix(bot, message): if isinstance(message.channel, discord.channel.DMChannel): return \"$\" guild", "Please remove you reaction from this message and add it again.\", reference=msg, mention_author=False)", "\"inform_duration\", \"value\" : 1} settings.insert_one(json) @bot.event async def on_guild_remove(guild): \"\"\" Removes the guild", "None, colour = embed_color) embed.set_image(url = ctx.message.attachments[0].url) await member_veri_ch.send(content = \"```\\n{}\\n```\".format(desc), embed =", "Set up bot bot = commands.Bot(command_prefix=determine_prefix, description='Bot to verify and manage Memberships.\\nlogChannel, Vtuber", "Only available in DMs\", brief = \"Send additional proof\") @commands.dm_only() async def send_proof(ctx,", "but also with $verify <VTuber name>\\n\" + \"Both versions require a screenshot sent", "if not str(guild.id) in dbnames: new_guild_db = db_cluster[str(guild.id)] settings = new_guild_db[\"settings\"] # Create", "in dbnames: new_guild_db = db_cluster[str(guild.id)] settings = new_guild_db[\"settings\"] # Create base configuration json", "return \"$\" # Set up bot bot = commands.Bot(command_prefix=determine_prefix, description='Bot to verify and", ": \"https://pbs.twimg.com/profile_images/1198438854841094144/y35Fe_Jj.jpg\"} #hololive logo settings.insert_one(json) json = {\"kind\": \"automatic_role\", \"value\" : False} settings.insert_one(json)", "async def on_raw_reaction_add(payload): # get reaction from payload if not payload.guild_id: return channel", "= await bot.wait_for('message', check=check) target_member = bot.get_user(target_member_id) await target_member.send(text_msg.content) await channel.send(\"Message was sent", "commands.BadArgument): await ctx.send(\"Please do only send a valid name\") elif isinstance(error, commands.MissingRequiredArgument): await", "confirm_msg.reactions[1].count == 1: await channel.send(\"The reaction took too long! Please remove you reaction", "str(guild.id) in dbnames: new_guild_db = db_cluster[str(guild.id)] settings = new_guild_db[\"settings\"] # Create base configuration", "for membership in the DMs\" ) @commands.dm_only() async def verify(ctx, *vtuber): \"\"\" Command", "= \"Please write the correct date from the screenshot in the format dd/mm/yyyy.\"", "reactions that also were also made by the bot if not reaction.me: return", "check(m): return m.author == user and m.channel == channel date_msg = await bot.wait_for('message',", "# set membership await member_handler.set_membership(msg, target_member_id, membership_date) #always clear await msg.clear_reactions() await msg.add_reaction(emoji='👌')", "if isinstance(error, discord.errors.Forbidden): await ctx.send(\"You need to allow DMs!\") error = None @bot.command(name=\"proof\",", "commands.Bot(command_prefix=determine_prefix, description='Bot to verify and manage Memberships.\\nlogChannel, Vtuber name and memberRole need to", "this error pass elif isinstance(error, commands.MissingPermissions): await ctx.send(\"You are not allowed to use", "True, name = \"broadcast\") @commands.is_owner() async def broadcast(ctx, title, text): serverlist = db_cluster[\"settings\"]['general'].find_one({'name':", "= title, description = None, colour = embed_color) embed.set_image(url = ctx.message.attachments[0].url) await member_veri_ch.send(content", "# List Coroutines to be executed coroutines = ( jst_clock(), member_handler.check_membership_routine(), ) #", "owner_id=owner_id) # database settings db_cluster = MongoClient(db_url.format(db_user, db_pass)) # set up classes member_handler", "if msg.embeds: embed = msg.embeds[0] automatic_role = db_cluster[str(msg.guild.id)][\"settings\"].find_one({\"kind\": \"automatic_role\"})[\"value\"] # always only the", "the proof (Faked or no date on screenshot) -> :white_check_mark:\\n\" text += \"Or", "command only works in DMs!\") @bot.command(hidden = True, name = \"checkIdols\") @commands.is_owner() async", "JST Clock!\") # List Coroutines to be executed coroutines = ( jst_clock(), member_handler.check_membership_routine(),", "screenshot) -> :white_check_mark:\\n\" text += \"Or is the date recognized incorrectly/was not recognized", "embed = embed) else: await member_handler.verify_membership_with_server_detection(ctx.message) @verify.error async def verify_error(ctx, error): if isinstance(error,", "@send_proof.error async def proof_error(ctx, error): if isinstance(error, commands.BadArgument): await ctx.send(\"Please do only send", "settings = new_guild_db[\"settings\"] # Create base configuration json = { \"kind\": \"prefixes\", \"values\"", "that also were also made by the bot if not reaction.me: return if", "discord.Intents.default() intents.members = True intents.invites = False intents.emojis = False intents.typing = False", "case_insensitive=True, owner_id=owner_id) # database settings db_cluster = MongoClient(db_url.format(db_user, db_pass)) # set up classes", "also with $verify <VTuber name>\\n\" + \"Both versions require a screenshot sent with", "in result: return result['supported_idols'][0]['guild_id'] #Time in status async def jst_clock(): while not bot.is_closed():", "in the format dd/mm/yyyy.\" await channel.send(m, reference=msg, mention_author=False) def check(m): return m.author ==", "clear await msg.clear_reactions() await msg.add_reaction(emoji='👌') # deny option elif reaction.emoji == u\"\\U0001F6AB\": user", "as dtime from datetime import timezone, timedelta import re #Internal from membership_handling import", "ctx.author.id embed = discord.Embed(title = title, description = None, colour = embed_color) embed.set_image(url", "commands.PrivateMessageOnly): await ctx.send(\"This command only works in DMs!\") @bot.command(hidden = True, name =", "intents.members = True intents.invites = False intents.emojis = False intents.typing = False intents.integrations", "target_member_id, membership_date) #always clear await msg.clear_reactions() await msg.add_reaction(emoji='👌') # deny option elif reaction.emoji", "a valid name\") elif isinstance(error, commands.MissingRequiredArgument): await ctx.send(\"Please include the server name!\") embed", "import MembershipHandler from settings import Settings from membership import Membership from utility import", "option elif reaction.emoji == u\"\\U0001F6AB\": user = bot.get_user(payload.user_id) text = \"Is there an", "vtuber. Only available in DMs\", brief = \"Send additional proof\") @commands.dm_only() async def", "server name!\") embed = Utility.create_supported_vtuber_embed() await ctx.send(content=None, embed=embed) def map_vtuber_to_server(name): settings_db = db_cluster[\"settings\"][\"general\"]", "from the supported idols so that memberships are not checked. \"\"\" print(\"Left Guild:", "{'name' : name}}}) if 'supported_idols' in result: return result['supported_idols'][0]['guild_id'] #Time in status async", "== channel date_msg = await bot.wait_for('message', check=check) await member_handler.set_membership(msg, target_member_id, date_msg.content) await msg.clear_reactions()", "when the bot joins a server. \"\"\" print(\"Joined new Guild: \" + str(guild.id))", ": False} settings.insert_one(json) json = {\"kind\": \"tolerance_duration\", \"value\" : 1} settings.insert_one(json) json =", "a message that will be sent to the User.\", reference=msg, mention_author=False) def check(m):", "check=check) target_member = bot.get_user(target_member_id) await target_member.send(text_msg.content) await channel.send(\"Message was sent to user.\", reference=text_msg,", "\"Allows to send additional proof. Requires the name of the vtuber. Only available", "safely!\") @send_proof.error async def proof_error(ctx, error): if isinstance(error, commands.BadArgument): await ctx.send(\"Please do only", "= True, name = \"checkIdols\") @commands.is_owner() async def check(ctx): Utility.create_supported_vtuber_embed() await ctx.send(db_cluster['settings']['general'].find_one()['supported_idols']) @bot.command(hidden", "= db_cluster[\"settings\"][\"general\"] result = settings_db.find_one({}, {'supported_idols' : { '$elemMatch': {'name' : name}}}) if", "text = \"Is there an issue with the proof (Faked or no date", "# only the first react by somebody else than the bot should be", "embed = discord.Embed(title = title, description = text, colour = embed_color) #send to", "user = bot.get_user(payload.user_id) text = \"Is there an issue with the proof (Faked", "somebody else than the bot should be processed if reaction: if reaction.count !=", "guild from the supported idols so that memberships are not checked. \"\"\" print(\"Left", "description = None, colour = embed_color) embed.set_image(url = ctx.message.attachments[0].url) await member_veri_ch.send(content = \"```\\n{}\\n```\".format(desc),", "await ctx.send(\"Please include the server name!\") embed = Utility.create_supported_vtuber_embed() await ctx.send(content=None, embed=embed) def", "is not for DMs # Only process reactions that also were also made", "discord.channel.DMChannel): return \"$\" guild = message.guild if guild: prefixes = db_cluster[str(guild.id)][\"settings\"].find_one({\"kind\": \"prefixes\"})[\"values\"] if", "isinstance(error, commands.MissingPermissions): await ctx.send(\"You are not allowed to use this command!\") elif isinstance(error,", "@bot.command(name=\"proof\", help = \"Allows to send additional proof. Requires the name of the", "cogs bot.add_cog(Settings(bot, db_cluster)) bot.add_cog(Membership(bot, member_handler)) @bot.event async def on_command_error(ctx, error): if isinstance(error, CommandNotFound):", "guild: prefixes = db_cluster[str(guild.id)][\"settings\"].find_one({\"kind\": \"prefixes\"})[\"values\"] if prefixes: return prefixes return \"$\" # Set", "= timezone.utc) + timedelta(hours = 9) timestr = now.strftime(\"%H:%M JST, %d/%m/%Y\") await bot.change_presence(activity=discord.Game(name=timestr))", "= db_cluster[str(server['guild_id'])] lg_ch = bot.get_channel(server_db['settings'].find_one({'kind': \"log_channel\"})['value']) await lg_ch.send(content = None, embed = embed)", "= db_cluster[str(guild.id)] settings = new_guild_db[\"settings\"] # Create base configuration json = { \"kind\":", "process reactions that also were also made by the bot if not reaction.me:", "be set!', intents=intents, case_insensitive=True, owner_id=owner_id) # database settings db_cluster = MongoClient(db_url.format(db_user, db_pass)) #", "check(m): return m.author == user and m.channel == channel text_msg = await bot.wait_for('message',", "ocr import OCR from sending import Sending from pymongo import MongoClient import os", "def send_dm(ctx): await ctx.author.send(\"Hi\") @send_dm.error async def dm_error(ctx, error): if isinstance(error, discord.errors.Forbidden): await", "def send_proof(ctx, vtuber: str): if not ctx.message.attachments: await ctx.send(\"Please include a screenshot of", "and message to membership verification channel desc = \"{}\\n{}\".format(str(ctx.author), \"Additional proof\") title =", "$verify but also with $verify <VTuber name>\\n\" + \"Both versions require a screenshot", "bot.change_presence(activity=discord.Game(name=timestr)) await asyncio.sleep(60) except ConnectionResetError: print(\"Could not update JST Clock!\") # List Coroutines", "for local testing local = False # Customizable Settings # For local testing", "\"picture_link\", \"value\" : \"https://pbs.twimg.com/profile_images/1198438854841094144/y35Fe_Jj.jpg\"} #hololive logo settings.insert_one(json) json = {\"kind\": \"automatic_role\", \"value\" :", "0} settings.insert_one(json) json = {\"kind\": \"log_channel\", \"value\" : 0} settings.insert_one(json) json = {\"kind\":", "and add it again.\", reference=msg, mention_author=False) else: m = \"Please write the correct", "try: msg = await channel.fetch_message(payload.message_id) reaction = discord.utils.get(msg.reactions, emoji=payload.emoji.name) # only the first", "embed = embed) @bot.command(name = \"dmMe\", help=\"Sends a DM containg \\\"hi\\\" to the", "help=\"Sends a DM containg \\\"hi\\\" to the user using the command.\", brief=\"Sends a", "\"\"\" Command in the DMs that tries to verify a screenshot for membership.", "member_handler.del_membership(msg, target_member_id, None, False) await msg.clear_reactions() await msg.add_reaction(emoji='👎') else: await asyncio.sleep(1) confirm_msg =", "result['supported_idols'][0]['guild_id'] #Time in status async def jst_clock(): while not bot.is_closed(): try: now =", "db_cluster, embed_color) Utility.setup(bot, db_cluster, embed_color) OCR.setup(bot, local) Sending.setup(bot, embed_color) #add cogs bot.add_cog(Settings(bot, db_cluster))", "long! Please remove you reaction from this message and add it again.\", reference=msg,", "db_cluster[\"settings\"][\"general\"] result = settings_db.find_one({}, {'supported_idols' : { '$elemMatch': {'name' : name}}}) if 'supported_idols'", "Coroutines to be executed coroutines = ( jst_clock(), member_handler.check_membership_routine(), ) # Main Coroutine", "return m.author == user and m.channel == channel text_msg = await bot.wait_for('message', check=check)", "member_handler.verify_membership(ctx.message, server) else: embed = Utility.create_supported_vtuber_embed() await ctx.send(content =\"Please use a valid supported", "await dm_lg_ch.send(\"{}\\n{}\".format(str(ctx.author),ctx.message.content)) for attachment in ctx.message.attachments: await dm_lg_ch.send(attachment.url) if vtuber: server = map_vtuber_to_server(vtuber[0])", "\"mod_role\", \"value\" : 0} settings.insert_one(json) json = {\"kind\": \"picture_link\", \"value\" : \"https://pbs.twimg.com/profile_images/1198438854841094144/y35Fe_Jj.jpg\"} #hololive", "dd/mm/yyyy.\" await channel.send(m, reference=msg, mention_author=False) def check(m): return m.author == user and m.channel", "= False intents.webhooks = False intents.voice_states = False intents.guild_typing = False async def", "date recognized incorrectly/was not recognized -> :no_entry_sign:\" confirm_msg = await channel.send(text, reference=msg, mention_author=False)", "== user and m.channel == channel text_msg = await bot.wait_for('message', check=check) target_member =", "was sent to user.\", reference=text_msg, mention_author=False) if automatic_role: await member_handler.del_membership(msg, target_member_id, None, False)", "result: return result['supported_idols'][0]['guild_id'] #Time in status async def jst_clock(): while not bot.is_closed(): try:", "verify and manage Memberships.\\nlogChannel, Vtuber name and memberRole need to be set!', intents=intents,", "automatic_role = db_cluster[str(msg.guild.id)][\"settings\"].find_one({\"kind\": \"automatic_role\"})[\"value\"] # always only the id target_member_id = int(embed.title) if", "for server in serverlist: server_db = db_cluster[str(server['guild_id'])] lg_ch = bot.get_channel(server_db['settings'].find_one({'kind': \"log_channel\"})['value']) await lg_ch.send(content", "= True, name = \"broadcast\") @commands.is_owner() async def broadcast(ctx, title, text): serverlist =", "the proof!\") return server_id = map_vtuber_to_server(vtuber) member_veri_ch =bot.get_channel(db_cluster[str(server_id)][\"settings\"].find_one({\"kind\": \"log_channel\"})[\"value\"]) # Send attachment and", "import discord from discord.ext import commands from discord.ext.commands.errors import CommandNotFound #Python import asyncio", "jst_clock(): while not bot.is_closed(): try: now = dtime.now(tz = timezone.utc) + timedelta(hours =", "if not payload.guild_id: return channel = bot.get_channel(payload.channel_id) try: msg = await channel.fetch_message(payload.message_id) reaction", "msg.embeds[0] automatic_role = db_cluster[str(msg.guild.id)][\"settings\"].find_one({\"kind\": \"automatic_role\"})[\"value\"] # always only the id target_member_id = int(embed.title)", "await member_handler.set_membership(msg, target_member_id, membership_date) #always clear await msg.clear_reactions() await msg.add_reaction(emoji='👌') # deny option", "= \"```\\n{}\\n```\".format(desc), embed = embed) #send confirmation await ctx.send(\"Your additional proof was delivered", "discord.Embed(title = title, description = text, colour = embed_color) #send to every server", "= discord.Embed(title = title, description = None, colour = embed_color) embed.set_image(url = ctx.message.attachments[0].url)", "collection when the bot joins a server. \"\"\" print(\"Joined new Guild: \" +", "(Faked or no date on screenshot) -> :white_check_mark:\\n\" text += \"Or is the", "channel date_msg = await bot.wait_for('message', check=check) await member_handler.set_membership(msg, target_member_id, date_msg.content) await msg.clear_reactions() await", "called with just $verify but also with $verify <VTuber name>\\n\" + \"Both versions", "m.channel == channel date_msg = await bot.wait_for('message', check=check) await member_handler.set_membership(msg, target_member_id, date_msg.content) await", "db_cluster[\"settings\"][\"general\"] settings.update_one({'name': 'supported_idols'}, {'$pull': { 'supported_idols': {'guild_id': guild.id}}}) @bot.event async def on_raw_reaction_add(payload): #", "@commands.is_owner() async def force_member_check(ctx): await member_handler.delete_expired_memberships(True) @bot.command(hidden = True, name = \"broadcast\") @commands.is_owner()", "{\"kind\": \"require_additional_proof\", \"value\" : False} settings.insert_one(json) json = {\"kind\": \"tolerance_duration\", \"value\" : 1}", "in DMs\", brief = \"Send additional proof\") @commands.dm_only() async def send_proof(ctx, vtuber: str):", "server. \"\"\" print(\"Joined new Guild: \" + str(guild.id)) dbnames = db_cluster.list_database_names() if not", "settings.insert_one(json) json = {\"kind\": \"mod_role\", \"value\" : 0} settings.insert_one(json) json = {\"kind\": \"picture_link\",", "ctx.author.send(\"Hi\") @send_dm.error async def dm_error(ctx, error): if isinstance(error, discord.errors.Forbidden): await ctx.send(\"You need to", "sending import Sending from pymongo import MongoClient import os ### Setup data #", "message and add it again.\", reference=msg, mention_author=False) else: m = \"Please write the", "= \"Send additional proof\") @commands.dm_only() async def send_proof(ctx, vtuber: str): if not ctx.message.attachments:", "target_member_id = int(embed.title) if reaction.emoji == '✅': if not automatic_role: membership_date = embed.fields[0].value", "write a message that will be sent to the User.\", reference=msg, mention_author=False) def", "msg.clear_reactions() await msg.add_reaction(emoji='👎') else: await asyncio.sleep(1) confirm_msg = discord.utils.get(bot.cached_messages, id=confirm_msg.id) if confirm_msg.reactions[0].count ==", "the database and settings collection when the bot joins a server. \"\"\" print(\"Joined", "to membership verification channel desc = \"{}\\n{}\".format(str(ctx.author), \"Additional proof\") title = ctx.author.id embed", "it.\", brief=\" Tries to verify a screenshot for membership in the DMs\" )", "def broadcast(ctx, title, text): serverlist = db_cluster[\"settings\"]['general'].find_one({'name': \"supported_idols\"})['supported_idols'] #create Embed embed = discord.Embed(title", "= discord.Embed(title = title, description = text, colour = embed_color) #send to every", "== u\"\\U0001F6AB\": user = bot.get_user(payload.user_id) text = \"Is there an issue with the", "False intents.typing = False intents.integrations = False intents.webhooks = False intents.voice_states = False", "= embed_color) #send to every server for server in serverlist: server_db = db_cluster[str(server['guild_id'])]", "= db_cluster[str(guild.id)][\"settings\"].find_one({\"kind\": \"prefixes\"})[\"values\"] if prefixes: return prefixes return \"$\" # Set up bot", "from utility import Utility from ocr import OCR from sending import Sending from", "set membership await member_handler.set_membership(msg, target_member_id, membership_date) #always clear await msg.clear_reactions() await msg.add_reaction(emoji='👌') #", "import os ### Setup data # Set variable to true for local testing", "member_handler)) @bot.event async def on_command_error(ctx, error): if isinstance(error, CommandNotFound): # Ignore this error", "False} settings.insert_one(json) json = {\"kind\": \"tolerance_duration\", \"value\" : 1} settings.insert_one(json) json = {\"kind\":", "msg.clear_reactions() await msg.add_reaction(emoji='👎') except discord.errors.Forbidden: print(payload.channel_id) print(payload.guild_id) @bot.command( help=\"Can be called with just", "async def broadcast(ctx, title, text): serverlist = db_cluster[\"settings\"]['general'].find_one({'name': \"supported_idols\"})['supported_idols'] #create Embed embed =", "colour = embed_color) #send to every server for server in serverlist: server_db =", "additional proof\") @commands.dm_only() async def send_proof(ctx, vtuber: str): if not ctx.message.attachments: await ctx.send(\"Please", "map_vtuber_to_server(name): settings_db = db_cluster[\"settings\"][\"general\"] result = settings_db.find_one({}, {'supported_idols' : { '$elemMatch': {'name' :", "target_member.send(text_msg.content) await channel.send(\"Message was sent to user.\", reference=text_msg, mention_author=False) if automatic_role: await member_handler.del_membership(msg,", "json = {\"kind\": \"automatic_role\", \"value\" : False} settings.insert_one(json) json = {\"kind\": \"require_additional_proof\", \"value\"", "0} settings.insert_one(json) json = {\"kind\": \"mod_role\", \"value\" : 0} settings.insert_one(json) json = {\"kind\":", "on_guild_remove(guild): \"\"\" Removes the guild from the supported idols so that memberships are", "16) db_user = os.getenv(\"DB_USER\") db_pass = <PASSWORD>(\"<PASSWORD>\") db_url = os.getenv(\"DB_LINK\") dm_log = int(os.getenv(\"DM_LOG\"))", "intents = discord.Intents.default() intents.members = True intents.invites = False intents.emojis = False intents.typing", "#add cogs bot.add_cog(Settings(bot, db_cluster)) bot.add_cog(Membership(bot, member_handler)) @bot.event async def on_command_error(ctx, error): if isinstance(error,", "isinstance(error, CommandNotFound): # Ignore this error pass elif isinstance(error, commands.MissingPermissions): await ctx.send(\"You are", "force_member_check(ctx): await member_handler.delete_expired_memberships(True) @bot.command(hidden = True, name = \"broadcast\") @commands.is_owner() async def broadcast(ctx,", "name = \"broadcast\") @commands.is_owner() async def broadcast(ctx, title, text): serverlist = db_cluster[\"settings\"]['general'].find_one({'name': \"supported_idols\"})['supported_idols']", "member_veri_ch.send(content = \"```\\n{}\\n```\".format(desc), embed = embed) #send confirmation await ctx.send(\"Your additional proof was", "embed = discord.Embed(title = title, description = None, colour = embed_color) embed.set_image(url =", "dm_lg_ch.send(attachment.url) if vtuber: server = map_vtuber_to_server(vtuber[0]) if server: await member_handler.verify_membership(ctx.message, server) else: embed", "db_cluster[str(guild.id)] settings = new_guild_db[\"settings\"] # Create base configuration json = { \"kind\": \"prefixes\",", "proof\") title = ctx.author.id embed = discord.Embed(title = title, description = None, colour", "on_command_error(ctx, error): if isinstance(error, CommandNotFound): # Ignore this error pass elif isinstance(error, commands.MissingPermissions):", "settings.insert_one(json) json = {\"kind\": \"require_additional_proof\", \"value\" : False} settings.insert_one(json) json = {\"kind\": \"tolerance_duration\",", "target_member_id, None, False) await msg.clear_reactions() await msg.add_reaction(emoji='👎') else: await asyncio.sleep(1) confirm_msg = discord.utils.get(bot.cached_messages,", "else: raise error @bot.event async def on_ready(): print('Logged in as') print(bot.user.name) print(bot.user.id) print('------')", "bot.add_cog(Settings(bot, db_cluster)) bot.add_cog(Membership(bot, member_handler)) @bot.event async def on_command_error(ctx, error): if isinstance(error, CommandNotFound): #", "dm_lg_ch = bot.get_channel(dm_log) await dm_lg_ch.send(\"{}\\n{}\".format(str(ctx.author),ctx.message.content)) for attachment in ctx.message.attachments: await dm_lg_ch.send(attachment.url) if vtuber:", "versions require a screenshot sent with it.\", brief=\" Tries to verify a screenshot", "text_msg = await bot.wait_for('message', check=check) target_member = bot.get_user(target_member_id) await target_member.send(text_msg.content) await channel.send(\"Message was", "message): if isinstance(message.channel, discord.channel.DMChannel): return \"$\" guild = message.guild if guild: prefixes =", "that tries to verify a screenshot for membership. \"\"\" # log content to", "#Time in status async def jst_clock(): while not bot.is_closed(): try: now = dtime.now(tz", "vtuber: str): if not ctx.message.attachments: await ctx.send(\"Please include a screenshot of the proof!\")", "Utility.confirm_action(confirm_msg, user): confirm_msg = await channel.send(\"Please write a message that will be sent", "settings = db_cluster[\"settings\"][\"general\"] settings.update_one({'name': 'supported_idols'}, {'$pull': { 'supported_idols': {'guild_id': guild.id}}}) @bot.event async def", "msg.add_reaction(emoji='👌') # deny option elif reaction.emoji == u\"\\U0001F6AB\": user = bot.get_user(payload.user_id) text =", "asyncio from datetime import datetime as dtime from datetime import timezone, timedelta import", "= Utility.create_supported_vtuber_embed() await ctx.send(content =\"Please use a valid supported VTuber!\", embed = embed)", "@bot.command(hidden = True, name = \"broadcast\") @commands.is_owner() async def broadcast(ctx, title, text): serverlist", "channel = bot.get_channel(payload.channel_id) try: msg = await channel.fetch_message(payload.message_id) reaction = discord.utils.get(msg.reactions, emoji=payload.emoji.name) #", "= bot.get_channel(dm_log) await dm_lg_ch.send(\"{}\\n{}\".format(str(ctx.author),ctx.message.content)) for attachment in ctx.message.attachments: await dm_lg_ch.send(attachment.url) if vtuber: server", ") # Main Coroutine async def background_main(): await bot.wait_until_ready() await asyncio.gather(*coroutines) bot.loop.create_task(background_main()) bot.run(token)", "# Set up bot bot = commands.Bot(command_prefix=determine_prefix, description='Bot to verify and manage Memberships.\\nlogChannel,", "dtime from datetime import timezone, timedelta import re #Internal from membership_handling import MembershipHandler", "use a valid supported VTuber!\", embed = embed) else: await member_handler.verify_membership_with_server_detection(ctx.message) @verify.error async", "print(payload.guild_id) @bot.command( help=\"Can be called with just $verify but also with $verify <VTuber", "Utility.create_supported_vtuber_embed() await ctx.send(content =\"Please use a valid supported VTuber!\", embed = embed) else:", "await asyncio.sleep(60) except ConnectionResetError: print(\"Could not update JST Clock!\") # List Coroutines to", "print('Logged in as') print(bot.user.name) print(bot.user.id) print('------') @bot.event async def on_guild_join(guild): \"\"\" Creates the", "server_id = map_vtuber_to_server(vtuber) member_veri_ch =bot.get_channel(db_cluster[str(server_id)][\"settings\"].find_one({\"kind\": \"log_channel\"})[\"value\"]) # Send attachment and message to membership", "db_cluster = MongoClient(db_url.format(db_user, db_pass)) # set up classes member_handler = MembershipHandler(bot, db_cluster, embed_color)", "settings.insert_one(json) json = {\"kind\": \"picture_link\", \"value\" : \"https://pbs.twimg.com/profile_images/1198438854841094144/y35Fe_Jj.jpg\"} #hololive logo settings.insert_one(json) json =", "db_cluster.list_database_names() if not str(guild.id) in dbnames: new_guild_db = db_cluster[str(guild.id)] settings = new_guild_db[\"settings\"] #", "json = {\"kind\": \"inform_duration\", \"value\" : 1} settings.insert_one(json) @bot.event async def on_guild_remove(guild): \"\"\"", "member_handler = MembershipHandler(bot, db_cluster, embed_color) Utility.setup(bot, db_cluster, embed_color) OCR.setup(bot, local) Sending.setup(bot, embed_color) #add", "async def on_guild_remove(guild): \"\"\" Removes the guild from the supported idols so that", "User.\", reference=msg, mention_author=False) def check(m): return m.author == user and m.channel == channel", "membership. \"\"\" # log content to dm log channel for record dm_lg_ch =", "pymongo import MongoClient import os ### Setup data # Set variable to true", "name and memberRole need to be set!', intents=intents, case_insensitive=True, owner_id=owner_id) # database settings", "True, name = \"checkIdols\") @commands.is_owner() async def check(ctx): Utility.create_supported_vtuber_embed() await ctx.send(db_cluster['settings']['general'].find_one()['supported_idols']) @bot.command(hidden =", "title = ctx.author.id embed = discord.Embed(title = title, description = None, colour =", "also were also made by the bot if not reaction.me: return if msg.embeds:", "send a valid name\") elif isinstance(error, commands.MissingRequiredArgument): await ctx.send(\"Please include the server name!\")", "= int(os.getenv(\"EMBED_COLOR\"), 16) db_user = os.getenv(\"DB_USER\") db_pass = <PASSWORD>(\"<PASSWORD>\") db_url = os.getenv(\"DB_LINK\") dm_log", "\"Please write the correct date from the screenshot in the format dd/mm/yyyy.\" await", "MembershipHandler from settings import Settings from membership import Membership from utility import Utility", "get reaction from payload if not payload.guild_id: return channel = bot.get_channel(payload.channel_id) try: msg", "this handling is not for DMs # Only process reactions that also were", "be called with just $verify but also with $verify <VTuber name>\\n\" + \"Both", "async def verify_error(ctx, error): if isinstance(error, commands.PrivateMessageOnly): await ctx.send(\"This command only works in", "=\"Please use a valid supported VTuber!\", embed = embed) else: await member_handler.verify_membership_with_server_detection(ctx.message) @verify.error", "using the command.\", brief=\"Sends a DM to the user\") async def send_dm(ctx): await", "reaction.emoji == '✅': if not automatic_role: membership_date = embed.fields[0].value # set membership await", "import asyncio from datetime import datetime as dtime from datetime import timezone, timedelta", "if isinstance(error, commands.BadArgument): await ctx.send(\"Please do only send a valid name\") elif isinstance(error,", "Removes the guild from the supported idols so that memberships are not checked.", "command should not be used in the DMs\") elif hasattr(ctx.command, 'on_error'): #skip already", "= embed) #send confirmation await ctx.send(\"Your additional proof was delivered safely!\") @send_proof.error async", "if not automatic_role: membership_date = embed.fields[0].value # set membership await member_handler.set_membership(msg, target_member_id, membership_date)", "Utility.create_supported_vtuber_embed() await ctx.send(db_cluster['settings']['general'].find_one()['supported_idols']) @bot.command(hidden = True, name = \"forceCheck\") @commands.is_owner() async def force_member_check(ctx):", "await member_handler.del_membership(msg, target_member_id, None, False) await msg.clear_reactions() await msg.add_reaction(emoji='👎') else: await asyncio.sleep(1) confirm_msg", "should not be used in the DMs\") elif hasattr(ctx.command, 'on_error'): #skip already locally", "async def force_member_check(ctx): await member_handler.delete_expired_memberships(True) @bot.command(hidden = True, name = \"broadcast\") @commands.is_owner() async", "db_cluster[str(server['guild_id'])] lg_ch = bot.get_channel(server_db['settings'].find_one({'kind': \"log_channel\"})['value']) await lg_ch.send(content = None, embed = embed) @bot.command(name", "db_pass)) # set up classes member_handler = MembershipHandler(bot, db_cluster, embed_color) Utility.setup(bot, db_cluster, embed_color)", "datetime import datetime as dtime from datetime import timezone, timedelta import re #Internal", "await member_veri_ch.send(content = \"```\\n{}\\n```\".format(desc), embed = embed) #send confirmation await ctx.send(\"Your additional proof", "recognized incorrectly/was not recognized -> :no_entry_sign:\" confirm_msg = await channel.send(text, reference=msg, mention_author=False) if", "reference=msg, mention_author=False) if await Utility.confirm_action(confirm_msg, user): confirm_msg = await channel.send(\"Please write a message", "if automatic_role: await member_handler.del_membership(msg, target_member_id, None, False) await msg.clear_reactions() await msg.add_reaction(emoji='👎') else: await", "send_proof(ctx, vtuber: str): if not ctx.message.attachments: await ctx.send(\"Please include a screenshot of the", "if not reaction.me: return if msg.embeds: embed = msg.embeds[0] automatic_role = db_cluster[str(msg.guild.id)][\"settings\"].find_one({\"kind\": \"automatic_role\"})[\"value\"]", "the first react by somebody else than the bot should be processed if", "from this message and add it again.\", reference=msg, mention_author=False) else: m = \"Please", "to verify a screenshot for membership in the DMs\" ) @commands.dm_only() async def", "vtuber: server = map_vtuber_to_server(vtuber[0]) if server: await member_handler.verify_membership(ctx.message, server) else: embed = Utility.create_supported_vtuber_embed()", "List Coroutines to be executed coroutines = ( jst_clock(), member_handler.check_membership_routine(), ) # Main", "this message and add it again.\", reference=msg, mention_author=False) else: m = \"Please write", "\"log_channel\"})[\"value\"]) # Send attachment and message to membership verification channel desc = \"{}\\n{}\".format(str(ctx.author),", "message.guild if guild: prefixes = db_cluster[str(guild.id)][\"settings\"].find_one({\"kind\": \"prefixes\"})[\"values\"] if prefixes: return prefixes return \"$\"", "use this command!\") elif isinstance(error, commands.NoPrivateMessage): await ctx.send(\"This command should not be used", "isinstance(error, commands.NoPrivateMessage): await ctx.send(\"This command should not be used in the DMs\") elif", "not be used in the DMs\") elif hasattr(ctx.command, 'on_error'): #skip already locally handled", ": 1} settings.insert_one(json) @bot.event async def on_guild_remove(guild): \"\"\" Removes the guild from the", "{ 'supported_idols': {'guild_id': guild.id}}}) @bot.event async def on_raw_reaction_add(payload): # get reaction from payload", "else than the bot should be processed if reaction: if reaction.count != 2:", "json = {\"kind\": \"require_additional_proof\", \"value\" : False} settings.insert_one(json) json = {\"kind\": \"tolerance_duration\", \"value\"", "also made by the bot if not reaction.me: return if msg.embeds: embed =", "int(os.getenv(\"EMBED_COLOR\"), 16) db_user = os.getenv(\"DB_USER\") db_pass = <PASSWORD>(\"<PASSWORD>\") db_url = os.getenv(\"DB_LINK\") dm_log =", "False intents.webhooks = False intents.voice_states = False intents.guild_typing = False async def determine_prefix(bot,", "\"prefixes\"})[\"values\"] if prefixes: return prefixes return \"$\" # Set up bot bot =", "ctx.send(\"This command should not be used in the DMs\") elif hasattr(ctx.command, 'on_error'): #skip", "{'$pull': { 'supported_idols': {'guild_id': guild.id}}}) @bot.event async def on_raw_reaction_add(payload): # get reaction from", "require a screenshot sent with it.\", brief=\" Tries to verify a screenshot for", "VTuber!\", embed = embed) else: await member_handler.verify_membership_with_server_detection(ctx.message) @verify.error async def verify_error(ctx, error): if", "error): if isinstance(error, commands.PrivateMessageOnly): await ctx.send(\"This command only works in DMs!\") @bot.command(hidden =", "the guild from the supported idols so that memberships are not checked. \"\"\"", "bot.get_channel(payload.channel_id) try: msg = await channel.fetch_message(payload.message_id) reaction = discord.utils.get(msg.reactions, emoji=payload.emoji.name) # only the", "Tries to verify a screenshot for membership in the DMs\" ) @commands.dm_only() async", "member_handler.delete_expired_memberships(True) @bot.command(hidden = True, name = \"broadcast\") @commands.is_owner() async def broadcast(ctx, title, text):", "False intents.emojis = False intents.typing = False intents.integrations = False intents.webhooks = False", "handling is not for DMs # Only process reactions that also were also", "settings.insert_one(json) json = {\"kind\": \"automatic_role\", \"value\" : False} settings.insert_one(json) json = {\"kind\": \"require_additional_proof\",", "target_member = bot.get_user(target_member_id) await target_member.send(text_msg.content) await channel.send(\"Message was sent to user.\", reference=text_msg, mention_author=False)", "asyncio.sleep(1) confirm_msg = discord.utils.get(bot.cached_messages, id=confirm_msg.id) if confirm_msg.reactions[0].count == 1 and confirm_msg.reactions[1].count == 1:", "channel.send(\"Message was sent to user.\", reference=text_msg, mention_author=False) if automatic_role: await member_handler.del_membership(msg, target_member_id, None,", "are not allowed to use this command!\") elif isinstance(error, commands.NoPrivateMessage): await ctx.send(\"This command", "= False # Customizable Settings # For local testing token = os.<PASSWORD>(\"TOKEN\") owner_id", "\"Or is the date recognized incorrectly/was not recognized -> :no_entry_sign:\" confirm_msg = await", "to allow DMs!\") error = None @bot.command(name=\"proof\", help = \"Allows to send additional", "@bot.event async def on_raw_reaction_add(payload): # get reaction from payload if not payload.guild_id: return", "Clock!\") # List Coroutines to be executed coroutines = ( jst_clock(), member_handler.check_membership_routine(), )", "prefixes = db_cluster[str(guild.id)][\"settings\"].find_one({\"kind\": \"prefixes\"})[\"values\"] if prefixes: return prefixes return \"$\" # Set up", "print(\"Joined new Guild: \" + str(guild.id)) dbnames = db_cluster.list_database_names() if not str(guild.id) in", "await msg.add_reaction(emoji='👎') except discord.errors.Forbidden: print(payload.channel_id) print(payload.guild_id) @bot.command( help=\"Can be called with just $verify", "confirm_msg.reactions[0].count == 1 and confirm_msg.reactions[1].count == 1: await channel.send(\"The reaction took too long!", "async def send_dm(ctx): await ctx.author.send(\"Hi\") @send_dm.error async def dm_error(ctx, error): if isinstance(error, discord.errors.Forbidden):", "#skip already locally handled errors pass else: raise error @bot.event async def on_ready():", "#send to every server for server in serverlist: server_db = db_cluster[str(server['guild_id'])] lg_ch =", "allowed to use this command!\") elif isinstance(error, commands.NoPrivateMessage): await ctx.send(\"This command should not", "'on_error'): #skip already locally handled errors pass else: raise error @bot.event async def", "joins a server. \"\"\" print(\"Joined new Guild: \" + str(guild.id)) dbnames = db_cluster.list_database_names()", "id target_member_id = int(embed.title) if reaction.emoji == '✅': if not automatic_role: membership_date =", "the id target_member_id = int(embed.title) if reaction.emoji == '✅': if not automatic_role: membership_date", "incorrectly/was not recognized -> :no_entry_sign:\" confirm_msg = await channel.send(text, reference=msg, mention_author=False) if await", "bot.get_channel(dm_log) await dm_lg_ch.send(\"{}\\n{}\".format(str(ctx.author),ctx.message.content)) for attachment in ctx.message.attachments: await dm_lg_ch.send(attachment.url) if vtuber: server =", "data # Set variable to true for local testing local = False #", "to be set!', intents=intents, case_insensitive=True, owner_id=owner_id) # database settings db_cluster = MongoClient(db_url.format(db_user, db_pass))", "MongoClient(db_url.format(db_user, db_pass)) # set up classes member_handler = MembershipHandler(bot, db_cluster, embed_color) Utility.setup(bot, db_cluster,", "= {\"kind\": \"tolerance_duration\", \"value\" : 1} settings.insert_one(json) json = {\"kind\": \"inform_duration\", \"value\" :", "server in serverlist: server_db = db_cluster[str(server['guild_id'])] lg_ch = bot.get_channel(server_db['settings'].find_one({'kind': \"log_channel\"})['value']) await lg_ch.send(content =", "# Send attachment and message to membership verification channel desc = \"{}\\n{}\".format(str(ctx.author), \"Additional", "await ctx.send(\"Your additional proof was delivered safely!\") @send_proof.error async def proof_error(ctx, error): if", "bot.add_cog(Membership(bot, member_handler)) @bot.event async def on_command_error(ctx, error): if isinstance(error, CommandNotFound): # Ignore this", "mention_author=False) else: m = \"Please write the correct date from the screenshot in", "name}}}) if 'supported_idols' in result: return result['supported_idols'][0]['guild_id'] #Time in status async def jst_clock():", "async def jst_clock(): while not bot.is_closed(): try: now = dtime.now(tz = timezone.utc) +", "server = map_vtuber_to_server(vtuber[0]) if server: await member_handler.verify_membership(ctx.message, server) else: embed = Utility.create_supported_vtuber_embed() await", "os ### Setup data # Set variable to true for local testing local", "name = \"forceCheck\") @commands.is_owner() async def force_member_check(ctx): await member_handler.delete_expired_memberships(True) @bot.command(hidden = True, name", "from membership import Membership from utility import Utility from ocr import OCR from", "= text, colour = embed_color) #send to every server for server in serverlist:", "embed) @bot.command(name = \"dmMe\", help=\"Sends a DM containg \\\"hi\\\" to the user using", "= Utility.create_supported_vtuber_embed() await ctx.send(content=None, embed=embed) def map_vtuber_to_server(name): settings_db = db_cluster[\"settings\"][\"general\"] result = settings_db.find_one({},", "Memberships.\\nlogChannel, Vtuber name and memberRole need to be set!', intents=intents, case_insensitive=True, owner_id=owner_id) #", "to user.\", reference=text_msg, mention_author=False) if automatic_role: await member_handler.del_membership(msg, target_member_id, None, False) await msg.clear_reactions()", "+ str(guild.id)) dbnames = db_cluster.list_database_names() if not str(guild.id) in dbnames: new_guild_db = db_cluster[str(guild.id)]", "token = os.<PASSWORD>(\"TOKEN\") owner_id = int(os.getenv(\"OWNER_ID\")) embed_color = int(os.getenv(\"EMBED_COLOR\"), 16) db_user = os.getenv(\"DB_USER\")", "bot if not reaction.me: return if msg.embeds: embed = msg.embeds[0] automatic_role = db_cluster[str(msg.guild.id)][\"settings\"].find_one({\"kind\":", "embed_color) Utility.setup(bot, db_cluster, embed_color) OCR.setup(bot, local) Sending.setup(bot, embed_color) #add cogs bot.add_cog(Settings(bot, db_cluster)) bot.add_cog(Membership(bot,", "user\") async def send_dm(ctx): await ctx.author.send(\"Hi\") @send_dm.error async def dm_error(ctx, error): if isinstance(error,", "for membership. \"\"\" # log content to dm log channel for record dm_lg_ch", "locally handled errors pass else: raise error @bot.event async def on_ready(): print('Logged in", "error pass elif isinstance(error, commands.MissingPermissions): await ctx.send(\"You are not allowed to use this", "Utility from ocr import OCR from sending import Sending from pymongo import MongoClient", "import OCR from sending import Sending from pymongo import MongoClient import os ###", "#Python import asyncio from datetime import datetime as dtime from datetime import timezone,", "server) else: embed = Utility.create_supported_vtuber_embed() await ctx.send(content =\"Please use a valid supported VTuber!\",", "= int(os.getenv(\"DM_LOG\")) # Intents intents = discord.Intents.default() intents.members = True intents.invites = False", "await member_handler.set_membership(msg, target_member_id, date_msg.content) await msg.clear_reactions() await msg.add_reaction(emoji='👎') except discord.errors.Forbidden: print(payload.channel_id) print(payload.guild_id) @bot.command(", "import Settings from membership import Membership from utility import Utility from ocr import", "the correct date from the screenshot in the format dd/mm/yyyy.\" await channel.send(m, reference=msg,", "allow DMs!\") error = None @bot.command(name=\"proof\", help = \"Allows to send additional proof.", "@bot.command(hidden = True, name = \"checkIdols\") @commands.is_owner() async def check(ctx): Utility.create_supported_vtuber_embed() await ctx.send(db_cluster['settings']['general'].find_one()['supported_idols'])", "discord.Embed(title = title, description = None, colour = embed_color) embed.set_image(url = ctx.message.attachments[0].url) await", "\"log_channel\", \"value\" : 0} settings.insert_one(json) json = {\"kind\": \"mod_role\", \"value\" : 0} settings.insert_one(json)", "'supported_idols': {'guild_id': guild.id}}}) @bot.event async def on_raw_reaction_add(payload): # get reaction from payload if", "Only process reactions that also were also made by the bot if not", "msg.embeds: embed = msg.embeds[0] automatic_role = db_cluster[str(msg.guild.id)][\"settings\"].find_one({\"kind\": \"automatic_role\"})[\"value\"] # always only the id", "def check(m): return m.author == user and m.channel == channel text_msg = await", "bot.wait_for('message', check=check) target_member = bot.get_user(target_member_id) await target_member.send(text_msg.content) await channel.send(\"Message was sent to user.\",", "timedelta(hours = 9) timestr = now.strftime(\"%H:%M JST, %d/%m/%Y\") await bot.change_presence(activity=discord.Game(name=timestr)) await asyncio.sleep(60) except", "Create base configuration json = { \"kind\": \"prefixes\", \"values\" : ['$']} settings.insert_one(json) json", "ctx.message.attachments: await ctx.send(\"Please include a screenshot of the proof!\") return server_id = map_vtuber_to_server(vtuber)", "ctx.send(\"Your additional proof was delivered safely!\") @send_proof.error async def proof_error(ctx, error): if isinstance(error,", "text, colour = embed_color) #send to every server for server in serverlist: server_db", "\"Both versions require a screenshot sent with it.\", brief=\" Tries to verify a", "from sending import Sending from pymongo import MongoClient import os ### Setup data", "in the DMs\" ) @commands.dm_only() async def verify(ctx, *vtuber): \"\"\" Command in the", "= db_cluster[\"settings\"]['general'].find_one({'name': \"supported_idols\"})['supported_idols'] #create Embed embed = discord.Embed(title = title, description = text,", "= {\"kind\": \"require_additional_proof\", \"value\" : False} settings.insert_one(json) json = {\"kind\": \"tolerance_duration\", \"value\" :", "DMs # Only process reactions that also were also made by the bot", "membership verification channel desc = \"{}\\n{}\".format(str(ctx.author), \"Additional proof\") title = ctx.author.id embed =", "brief = \"Send additional proof\") @commands.dm_only() async def send_proof(ctx, vtuber: str): if not", "as') print(bot.user.name) print(bot.user.id) print('------') @bot.event async def on_guild_join(guild): \"\"\" Creates the database and", "json = {\"kind\": \"picture_link\", \"value\" : \"https://pbs.twimg.com/profile_images/1198438854841094144/y35Fe_Jj.jpg\"} #hololive logo settings.insert_one(json) json = {\"kind\":", "except ConnectionResetError: print(\"Could not update JST Clock!\") # List Coroutines to be executed", "discord.ext.commands.errors import CommandNotFound #Python import asyncio from datetime import datetime as dtime from", "to send additional proof. Requires the name of the vtuber. Only available in", "['$']} settings.insert_one(json) json = {\"kind\": \"member_role\", \"value\" : 0} settings.insert_one(json) json = {\"kind\":", "already locally handled errors pass else: raise error @bot.event async def on_ready(): print('Logged", "if reaction: if reaction.count != 2: return msg = reaction.message # this handling", "sent to user.\", reference=text_msg, mention_author=False) if automatic_role: await member_handler.del_membership(msg, target_member_id, None, False) await", "with it.\", brief=\" Tries to verify a screenshot for membership in the DMs\"", "async def verify(ctx, *vtuber): \"\"\" Command in the DMs that tries to verify", "await ctx.author.send(\"Hi\") @send_dm.error async def dm_error(ctx, error): if isinstance(error, discord.errors.Forbidden): await ctx.send(\"You need", "settings import Settings from membership import Membership from utility import Utility from ocr", "isinstance(error, commands.PrivateMessageOnly): await ctx.send(\"This command only works in DMs!\") @bot.command(hidden = True, name", "= \"broadcast\") @commands.is_owner() async def broadcast(ctx, title, text): serverlist = db_cluster[\"settings\"]['general'].find_one({'name': \"supported_idols\"})['supported_idols'] #create", "msg.clear_reactions() await msg.add_reaction(emoji='👌') # deny option elif reaction.emoji == u\"\\U0001F6AB\": user = bot.get_user(payload.user_id)", "@bot.command(name = \"dmMe\", help=\"Sends a DM containg \\\"hi\\\" to the user using the", ": False} settings.insert_one(json) json = {\"kind\": \"require_additional_proof\", \"value\" : False} settings.insert_one(json) json =", "the bot should be processed if reaction: if reaction.count != 2: return msg", "base configuration json = { \"kind\": \"prefixes\", \"values\" : ['$']} settings.insert_one(json) json =", "on_ready(): print('Logged in as') print(bot.user.name) print(bot.user.id) print('------') @bot.event async def on_guild_join(guild): \"\"\" Creates", "from payload if not payload.guild_id: return channel = bot.get_channel(payload.channel_id) try: msg = await", "Embed embed = discord.Embed(title = title, description = text, colour = embed_color) #send", "settings_db.find_one({}, {'supported_idols' : { '$elemMatch': {'name' : name}}}) if 'supported_idols' in result: return", "DM containg \\\"hi\\\" to the user using the command.\", brief=\"Sends a DM to", "member_handler.check_membership_routine(), ) # Main Coroutine async def background_main(): await bot.wait_until_ready() await asyncio.gather(*coroutines) bot.loop.create_task(background_main())", "user and m.channel == channel text_msg = await bot.wait_for('message', check=check) target_member = bot.get_user(target_member_id)", "reaction: if reaction.count != 2: return msg = reaction.message # this handling is", "db_cluster[str(guild.id)][\"settings\"].find_one({\"kind\": \"prefixes\"})[\"values\"] if prefixes: return prefixes return \"$\" # Set up bot bot", "intents.webhooks = False intents.voice_states = False intents.guild_typing = False async def determine_prefix(bot, message):", "False # Customizable Settings # For local testing token = os.<PASSWORD>(\"TOKEN\") owner_id =", "up classes member_handler = MembershipHandler(bot, db_cluster, embed_color) Utility.setup(bot, db_cluster, embed_color) OCR.setup(bot, local) Sending.setup(bot,", "{\"kind\": \"automatic_role\", \"value\" : False} settings.insert_one(json) json = {\"kind\": \"require_additional_proof\", \"value\" : False}", "settings.insert_one(json) json = {\"kind\": \"inform_duration\", \"value\" : 1} settings.insert_one(json) @bot.event async def on_guild_remove(guild):", "owner_id = int(os.getenv(\"OWNER_ID\")) embed_color = int(os.getenv(\"EMBED_COLOR\"), 16) db_user = os.getenv(\"DB_USER\") db_pass = <PASSWORD>(\"<PASSWORD>\")", "int(os.getenv(\"OWNER_ID\")) embed_color = int(os.getenv(\"EMBED_COLOR\"), 16) db_user = os.getenv(\"DB_USER\") db_pass = <PASSWORD>(\"<PASSWORD>\") db_url =", "reference=msg, mention_author=False) def check(m): return m.author == user and m.channel == channel text_msg", "channel.send(\"The reaction took too long! Please remove you reaction from this message and", "# database settings db_cluster = MongoClient(db_url.format(db_user, db_pass)) # set up classes member_handler =", "to the user\") async def send_dm(ctx): await ctx.author.send(\"Hi\") @send_dm.error async def dm_error(ctx, error):", "intents.typing = False intents.integrations = False intents.webhooks = False intents.voice_states = False intents.guild_typing", "= True intents.invites = False intents.emojis = False intents.typing = False intents.integrations =", "that will be sent to the User.\", reference=msg, mention_author=False) def check(m): return m.author", "intents.invites = False intents.emojis = False intents.typing = False intents.integrations = False intents.webhooks", "for DMs # Only process reactions that also were also made by the", "None, False) await msg.clear_reactions() await msg.add_reaction(emoji='👎') else: await asyncio.sleep(1) confirm_msg = discord.utils.get(bot.cached_messages, id=confirm_msg.id)", "else: await asyncio.sleep(1) confirm_msg = discord.utils.get(bot.cached_messages, id=confirm_msg.id) if confirm_msg.reactions[0].count == 1 and confirm_msg.reactions[1].count", "bot.get_channel(server_db['settings'].find_one({'kind': \"log_channel\"})['value']) await lg_ch.send(content = None, embed = embed) @bot.command(name = \"dmMe\", help=\"Sends", "there an issue with the proof (Faked or no date on screenshot) ->", "{ '$elemMatch': {'name' : name}}}) if 'supported_idols' in result: return result['supported_idols'][0]['guild_id'] #Time in", ": 0} settings.insert_one(json) json = {\"kind\": \"picture_link\", \"value\" : \"https://pbs.twimg.com/profile_images/1198438854841094144/y35Fe_Jj.jpg\"} #hololive logo settings.insert_one(json)", "@commands.dm_only() async def verify(ctx, *vtuber): \"\"\" Command in the DMs that tries to", "+ timedelta(hours = 9) timestr = now.strftime(\"%H:%M JST, %d/%m/%Y\") await bot.change_presence(activity=discord.Game(name=timestr)) await asyncio.sleep(60)", "return server_id = map_vtuber_to_server(vtuber) member_veri_ch =bot.get_channel(db_cluster[str(server_id)][\"settings\"].find_one({\"kind\": \"log_channel\"})[\"value\"]) # Send attachment and message to", "prefixes: return prefixes return \"$\" # Set up bot bot = commands.Bot(command_prefix=determine_prefix, description='Bot", "= ( jst_clock(), member_handler.check_membership_routine(), ) # Main Coroutine async def background_main(): await bot.wait_until_ready()", ": name}}}) if 'supported_idols' in result: return result['supported_idols'][0]['guild_id'] #Time in status async def", "the User.\", reference=msg, mention_author=False) def check(m): return m.author == user and m.channel ==", "\"\"\" print(\"Left Guild: \" + str(guild.id)) settings = db_cluster[\"settings\"][\"general\"] settings.update_one({'name': 'supported_idols'}, {'$pull': {", "not for DMs # Only process reactions that also were also made by", "to use this command!\") elif isinstance(error, commands.NoPrivateMessage): await ctx.send(\"This command should not be", "serverlist: server_db = db_cluster[str(server['guild_id'])] lg_ch = bot.get_channel(server_db['settings'].find_one({'kind': \"log_channel\"})['value']) await lg_ch.send(content = None, embed", "with $verify <VTuber name>\\n\" + \"Both versions require a screenshot sent with it.\",", "mention_author=False) def check(m): return m.author == user and m.channel == channel date_msg =", "= \"forceCheck\") @commands.is_owner() async def force_member_check(ctx): await member_handler.delete_expired_memberships(True) @bot.command(hidden = True, name =", "Customizable Settings # For local testing token = os.<PASSWORD>(\"TOKEN\") owner_id = int(os.getenv(\"OWNER_ID\")) embed_color", "1 and confirm_msg.reactions[1].count == 1: await channel.send(\"The reaction took too long! Please remove", "channel.send(\"Please write a message that will be sent to the User.\", reference=msg, mention_author=False)", "dbnames: new_guild_db = db_cluster[str(guild.id)] settings = new_guild_db[\"settings\"] # Create base configuration json =", "settings db_cluster = MongoClient(db_url.format(db_user, db_pass)) # set up classes member_handler = MembershipHandler(bot, db_cluster,", "to the User.\", reference=msg, mention_author=False) def check(m): return m.author == user and m.channel", "error = None @bot.command(name=\"proof\", help = \"Allows to send additional proof. Requires the", "the server name!\") embed = Utility.create_supported_vtuber_embed() await ctx.send(content=None, embed=embed) def map_vtuber_to_server(name): settings_db =", "broadcast(ctx, title, text): serverlist = db_cluster[\"settings\"]['general'].find_one({'name': \"supported_idols\"})['supported_idols'] #create Embed embed = discord.Embed(title =", "raise error @bot.event async def on_ready(): print('Logged in as') print(bot.user.name) print(bot.user.id) print('------') @bot.event", "elif isinstance(error, commands.MissingPermissions): await ctx.send(\"You are not allowed to use this command!\") elif", "await ctx.send(db_cluster['settings']['general'].find_one()['supported_idols']) @bot.command(hidden = True, name = \"forceCheck\") @commands.is_owner() async def force_member_check(ctx): await", "await bot.change_presence(activity=discord.Game(name=timestr)) await asyncio.sleep(60) except ConnectionResetError: print(\"Could not update JST Clock!\") # List", "= title, description = text, colour = embed_color) #send to every server for", "Intents intents = discord.Intents.default() intents.members = True intents.invites = False intents.emojis = False", ": 1} settings.insert_one(json) json = {\"kind\": \"inform_duration\", \"value\" : 1} settings.insert_one(json) @bot.event async", "\" + str(guild.id)) settings = db_cluster[\"settings\"][\"general\"] settings.update_one({'name': 'supported_idols'}, {'$pull': { 'supported_idols': {'guild_id': guild.id}}})", "== 1: await channel.send(\"The reaction took too long! Please remove you reaction from", "status async def jst_clock(): while not bot.is_closed(): try: now = dtime.now(tz = timezone.utc)", "@bot.command(hidden = True, name = \"forceCheck\") @commands.is_owner() async def force_member_check(ctx): await member_handler.delete_expired_memberships(True) @bot.command(hidden", "@bot.event async def on_guild_join(guild): \"\"\" Creates the database and settings collection when the", "user): confirm_msg = await channel.send(\"Please write a message that will be sent to", "import re #Internal from membership_handling import MembershipHandler from settings import Settings from membership", "\"tolerance_duration\", \"value\" : 1} settings.insert_one(json) json = {\"kind\": \"inform_duration\", \"value\" : 1} settings.insert_one(json)", "str(guild.id)) settings = db_cluster[\"settings\"][\"general\"] settings.update_one({'name': 'supported_idols'}, {'$pull': { 'supported_idols': {'guild_id': guild.id}}}) @bot.event async", "errors pass else: raise error @bot.event async def on_ready(): print('Logged in as') print(bot.user.name)", "payload.guild_id: return channel = bot.get_channel(payload.channel_id) try: msg = await channel.fetch_message(payload.message_id) reaction = discord.utils.get(msg.reactions,", "embed = msg.embeds[0] automatic_role = db_cluster[str(msg.guild.id)][\"settings\"].find_one({\"kind\": \"automatic_role\"})[\"value\"] # always only the id target_member_id", "jst_clock(), member_handler.check_membership_routine(), ) # Main Coroutine async def background_main(): await bot.wait_until_ready() await asyncio.gather(*coroutines)", "local testing local = False # Customizable Settings # For local testing token", "= int(embed.title) if reaction.emoji == '✅': if not automatic_role: membership_date = embed.fields[0].value #", "intents.emojis = False intents.typing = False intents.integrations = False intents.webhooks = False intents.voice_states", "def proof_error(ctx, error): if isinstance(error, commands.BadArgument): await ctx.send(\"Please do only send a valid", "reference=text_msg, mention_author=False) if automatic_role: await member_handler.del_membership(msg, target_member_id, None, False) await msg.clear_reactions() await msg.add_reaction(emoji='👎')", "def on_guild_remove(guild): \"\"\" Removes the guild from the supported idols so that memberships", "\"log_channel\"})['value']) await lg_ch.send(content = None, embed = embed) @bot.command(name = \"dmMe\", help=\"Sends a", "additional proof. Requires the name of the vtuber. Only available in DMs\", brief", "For local testing token = os.<PASSWORD>(\"TOKEN\") owner_id = int(os.getenv(\"OWNER_ID\")) embed_color = int(os.getenv(\"EMBED_COLOR\"), 16)", "print('------') @bot.event async def on_guild_join(guild): \"\"\" Creates the database and settings collection when", "CommandNotFound): # Ignore this error pass elif isinstance(error, commands.MissingPermissions): await ctx.send(\"You are not", "{\"kind\": \"member_role\", \"value\" : 0} settings.insert_one(json) json = {\"kind\": \"log_channel\", \"value\" : 0}", "# Only process reactions that also were also made by the bot if", "not checked. \"\"\" print(\"Left Guild: \" + str(guild.id)) settings = db_cluster[\"settings\"][\"general\"] settings.update_one({'name': 'supported_idols'},", "embed) else: await member_handler.verify_membership_with_server_detection(ctx.message) @verify.error async def verify_error(ctx, error): if isinstance(error, commands.PrivateMessageOnly): await", "new_guild_db = db_cluster[str(guild.id)] settings = new_guild_db[\"settings\"] # Create base configuration json = {", "channel for record dm_lg_ch = bot.get_channel(dm_log) await dm_lg_ch.send(\"{}\\n{}\".format(str(ctx.author),ctx.message.content)) for attachment in ctx.message.attachments: await", "\"dmMe\", help=\"Sends a DM containg \\\"hi\\\" to the user using the command.\", brief=\"Sends", "the screenshot in the format dd/mm/yyyy.\" await channel.send(m, reference=msg, mention_author=False) def check(m): return", "= bot.get_user(target_member_id) await target_member.send(text_msg.content) await channel.send(\"Message was sent to user.\", reference=text_msg, mention_author=False) if", "the bot if not reaction.me: return if msg.embeds: embed = msg.embeds[0] automatic_role =", "membership await member_handler.set_membership(msg, target_member_id, membership_date) #always clear await msg.clear_reactions() await msg.add_reaction(emoji='👌') # deny", "mention_author=False) if await Utility.confirm_action(confirm_msg, user): confirm_msg = await channel.send(\"Please write a message that", "sent to the User.\", reference=msg, mention_author=False) def check(m): return m.author == user and", "if vtuber: server = map_vtuber_to_server(vtuber[0]) if server: await member_handler.verify_membership(ctx.message, server) else: embed =", "== 1 and confirm_msg.reactions[1].count == 1: await channel.send(\"The reaction took too long! Please", "screenshot sent with it.\", brief=\" Tries to verify a screenshot for membership in", "a screenshot for membership in the DMs\" ) @commands.dm_only() async def verify(ctx, *vtuber):", "embed_color) embed.set_image(url = ctx.message.attachments[0].url) await member_veri_ch.send(content = \"```\\n{}\\n```\".format(desc), embed = embed) #send confirmation", "available in DMs\", brief = \"Send additional proof\") @commands.dm_only() async def send_proof(ctx, vtuber:", "be sent to the User.\", reference=msg, mention_author=False) def check(m): return m.author == user", "Settings # For local testing token = os.<PASSWORD>(\"TOKEN\") owner_id = int(os.getenv(\"OWNER_ID\")) embed_color =", "= await channel.send(\"Please write a message that will be sent to the User.\",", "async def determine_prefix(bot, message): if isinstance(message.channel, discord.channel.DMChannel): return \"$\" guild = message.guild if", "handled errors pass else: raise error @bot.event async def on_ready(): print('Logged in as')", "int(embed.title) if reaction.emoji == '✅': if not automatic_role: membership_date = embed.fields[0].value # set", "= { \"kind\": \"prefixes\", \"values\" : ['$']} settings.insert_one(json) json = {\"kind\": \"member_role\", \"value\"", "only the first react by somebody else than the bot should be processed", "await ctx.send(\"You need to allow DMs!\") error = None @bot.command(name=\"proof\", help = \"Allows", "\"value\" : 0} settings.insert_one(json) json = {\"kind\": \"mod_role\", \"value\" : 0} settings.insert_one(json) json", "OCR from sending import Sending from pymongo import MongoClient import os ### Setup", "{\"kind\": \"picture_link\", \"value\" : \"https://pbs.twimg.com/profile_images/1198438854841094144/y35Fe_Jj.jpg\"} #hololive logo settings.insert_one(json) json = {\"kind\": \"automatic_role\", \"value\"", "DMs\" ) @commands.dm_only() async def verify(ctx, *vtuber): \"\"\" Command in the DMs that", "valid name\") elif isinstance(error, commands.MissingRequiredArgument): await ctx.send(\"Please include the server name!\") embed =", "not bot.is_closed(): try: now = dtime.now(tz = timezone.utc) + timedelta(hours = 9) timestr", "+ str(guild.id)) settings = db_cluster[\"settings\"][\"general\"] settings.update_one({'name': 'supported_idols'}, {'$pull': { 'supported_idols': {'guild_id': guild.id}}}) @bot.event", "confirm_msg = await channel.send(\"Please write a message that will be sent to the", "not payload.guild_id: return channel = bot.get_channel(payload.channel_id) try: msg = await channel.fetch_message(payload.message_id) reaction =", "# get reaction from payload if not payload.guild_id: return channel = bot.get_channel(payload.channel_id) try:", "add it again.\", reference=msg, mention_author=False) else: m = \"Please write the correct date", "need to be set!', intents=intents, case_insensitive=True, owner_id=owner_id) # database settings db_cluster = MongoClient(db_url.format(db_user,", "for attachment in ctx.message.attachments: await dm_lg_ch.send(attachment.url) if vtuber: server = map_vtuber_to_server(vtuber[0]) if server:", "elif isinstance(error, commands.MissingRequiredArgument): await ctx.send(\"Please include the server name!\") embed = Utility.create_supported_vtuber_embed() await", "False) await msg.clear_reactions() await msg.add_reaction(emoji='👎') else: await asyncio.sleep(1) confirm_msg = discord.utils.get(bot.cached_messages, id=confirm_msg.id) if", "reaction from payload if not payload.guild_id: return channel = bot.get_channel(payload.channel_id) try: msg =", "intents.integrations = False intents.webhooks = False intents.voice_states = False intents.guild_typing = False async", "-> :no_entry_sign:\" confirm_msg = await channel.send(text, reference=msg, mention_author=False) if await Utility.confirm_action(confirm_msg, user): confirm_msg", "Send attachment and message to membership verification channel desc = \"{}\\n{}\".format(str(ctx.author), \"Additional proof\")", "mention_author=False) def check(m): return m.author == user and m.channel == channel text_msg =", "= ctx.message.attachments[0].url) await member_veri_ch.send(content = \"```\\n{}\\n```\".format(desc), embed = embed) #send confirmation await ctx.send(\"Your", "memberRole need to be set!', intents=intents, case_insensitive=True, owner_id=owner_id) # database settings db_cluster =", "def verify(ctx, *vtuber): \"\"\" Command in the DMs that tries to verify a", "timedelta import re #Internal from membership_handling import MembershipHandler from settings import Settings from", "= {\"kind\": \"inform_duration\", \"value\" : 1} settings.insert_one(json) @bot.event async def on_guild_remove(guild): \"\"\" Removes", "issue with the proof (Faked or no date on screenshot) -> :white_check_mark:\\n\" text", "title, description = None, colour = embed_color) embed.set_image(url = ctx.message.attachments[0].url) await member_veri_ch.send(content =", "timestr = now.strftime(\"%H:%M JST, %d/%m/%Y\") await bot.change_presence(activity=discord.Game(name=timestr)) await asyncio.sleep(60) except ConnectionResetError: print(\"Could not", "\"require_additional_proof\", \"value\" : False} settings.insert_one(json) json = {\"kind\": \"tolerance_duration\", \"value\" : 1} settings.insert_one(json)", "and settings collection when the bot joins a server. \"\"\" print(\"Joined new Guild:", "the vtuber. Only available in DMs\", brief = \"Send additional proof\") @commands.dm_only() async", "import CommandNotFound #Python import asyncio from datetime import datetime as dtime from datetime", "db_url = os.getenv(\"DB_LINK\") dm_log = int(os.getenv(\"DM_LOG\")) # Intents intents = discord.Intents.default() intents.members =", "discord.utils.get(bot.cached_messages, id=confirm_msg.id) if confirm_msg.reactions[0].count == 1 and confirm_msg.reactions[1].count == 1: await channel.send(\"The reaction", "executed coroutines = ( jst_clock(), member_handler.check_membership_routine(), ) # Main Coroutine async def background_main():", "= db_cluster[str(msg.guild.id)][\"settings\"].find_one({\"kind\": \"automatic_role\"})[\"value\"] # always only the id target_member_id = int(embed.title) if reaction.emoji", "%d/%m/%Y\") await bot.change_presence(activity=discord.Game(name=timestr)) await asyncio.sleep(60) except ConnectionResetError: print(\"Could not update JST Clock!\") #", "== channel text_msg = await bot.wait_for('message', check=check) target_member = bot.get_user(target_member_id) await target_member.send(text_msg.content) await", "used in the DMs\") elif hasattr(ctx.command, 'on_error'): #skip already locally handled errors pass", "# Intents intents = discord.Intents.default() intents.members = True intents.invites = False intents.emojis =", "msg = reaction.message # this handling is not for DMs # Only process", "help = \"Allows to send additional proof. Requires the name of the vtuber.", "= embed_color) embed.set_image(url = ctx.message.attachments[0].url) await member_veri_ch.send(content = \"```\\n{}\\n```\".format(desc), embed = embed) #send", "date_msg = await bot.wait_for('message', check=check) await member_handler.set_membership(msg, target_member_id, date_msg.content) await msg.clear_reactions() await msg.add_reaction(emoji='👎')", "content to dm log channel for record dm_lg_ch = bot.get_channel(dm_log) await dm_lg_ch.send(\"{}\\n{}\".format(str(ctx.author),ctx.message.content)) for", "= bot.get_user(payload.user_id) text = \"Is there an issue with the proof (Faked or", "include the server name!\") embed = Utility.create_supported_vtuber_embed() await ctx.send(content=None, embed=embed) def map_vtuber_to_server(name): settings_db", "ctx.send(\"Please do only send a valid name\") elif isinstance(error, commands.MissingRequiredArgument): await ctx.send(\"Please include", "error): if isinstance(error, discord.errors.Forbidden): await ctx.send(\"You need to allow DMs!\") error = None", "\"value\" : False} settings.insert_one(json) json = {\"kind\": \"require_additional_proof\", \"value\" : False} settings.insert_one(json) json", "the date recognized incorrectly/was not recognized -> :no_entry_sign:\" confirm_msg = await channel.send(text, reference=msg,", "made by the bot if not reaction.me: return if msg.embeds: embed = msg.embeds[0]", "result = settings_db.find_one({}, {'supported_idols' : { '$elemMatch': {'name' : name}}}) if 'supported_idols' in", "from membership_handling import MembershipHandler from settings import Settings from membership import Membership from", "up bot bot = commands.Bot(command_prefix=determine_prefix, description='Bot to verify and manage Memberships.\\nlogChannel, Vtuber name", "are not checked. \"\"\" print(\"Left Guild: \" + str(guild.id)) settings = db_cluster[\"settings\"][\"general\"] settings.update_one({'name':", "\"value\" : 1} settings.insert_one(json) json = {\"kind\": \"inform_duration\", \"value\" : 1} settings.insert_one(json) @bot.event", "= \"Allows to send additional proof. Requires the name of the vtuber. Only", "os.<PASSWORD>(\"TOKEN\") owner_id = int(os.getenv(\"OWNER_ID\")) embed_color = int(os.getenv(\"EMBED_COLOR\"), 16) db_user = os.getenv(\"DB_USER\") db_pass =", ": 0} settings.insert_one(json) json = {\"kind\": \"mod_role\", \"value\" : 0} settings.insert_one(json) json =", "not automatic_role: membership_date = embed.fields[0].value # set membership await member_handler.set_membership(msg, target_member_id, membership_date) #always", "to every server for server in serverlist: server_db = db_cluster[str(server['guild_id'])] lg_ch = bot.get_channel(server_db['settings'].find_one({'kind':", "#External import discord from discord.ext import commands from discord.ext.commands.errors import CommandNotFound #Python import", "async def on_guild_join(guild): \"\"\" Creates the database and settings collection when the bot", "\"Is there an issue with the proof (Faked or no date on screenshot)", "<PASSWORD>(\"<PASSWORD>\") db_url = os.getenv(\"DB_LINK\") dm_log = int(os.getenv(\"DM_LOG\")) # Intents intents = discord.Intents.default() intents.members", "db_cluster[\"settings\"]['general'].find_one({'name': \"supported_idols\"})['supported_idols'] #create Embed embed = discord.Embed(title = title, description = text, colour", "channel text_msg = await bot.wait_for('message', check=check) target_member = bot.get_user(target_member_id) await target_member.send(text_msg.content) await channel.send(\"Message", "commands from discord.ext.commands.errors import CommandNotFound #Python import asyncio from datetime import datetime as", "map_vtuber_to_server(vtuber[0]) if server: await member_handler.verify_membership(ctx.message, server) else: embed = Utility.create_supported_vtuber_embed() await ctx.send(content =\"Please", "if guild: prefixes = db_cluster[str(guild.id)][\"settings\"].find_one({\"kind\": \"prefixes\"})[\"values\"] if prefixes: return prefixes return \"$\" #", "set up classes member_handler = MembershipHandler(bot, db_cluster, embed_color) Utility.setup(bot, db_cluster, embed_color) OCR.setup(bot, local)", "and m.channel == channel date_msg = await bot.wait_for('message', check=check) await member_handler.set_membership(msg, target_member_id, date_msg.content)", "datetime as dtime from datetime import timezone, timedelta import re #Internal from membership_handling", "local) Sending.setup(bot, embed_color) #add cogs bot.add_cog(Settings(bot, db_cluster)) bot.add_cog(Membership(bot, member_handler)) @bot.event async def on_command_error(ctx,", "await Utility.confirm_action(confirm_msg, user): confirm_msg = await channel.send(\"Please write a message that will be", "supported VTuber!\", embed = embed) else: await member_handler.verify_membership_with_server_detection(ctx.message) @verify.error async def verify_error(ctx, error):", "print(bot.user.name) print(bot.user.id) print('------') @bot.event async def on_guild_join(guild): \"\"\" Creates the database and settings", "= new_guild_db[\"settings\"] # Create base configuration json = { \"kind\": \"prefixes\", \"values\" :", ") @commands.dm_only() async def verify(ctx, *vtuber): \"\"\" Command in the DMs that tries", "works in DMs!\") @bot.command(hidden = True, name = \"checkIdols\") @commands.is_owner() async def check(ctx):", "ctx.message.attachments[0].url) await member_veri_ch.send(content = \"```\\n{}\\n```\".format(desc), embed = embed) #send confirmation await ctx.send(\"Your additional", "of the proof!\") return server_id = map_vtuber_to_server(vtuber) member_veri_ch =bot.get_channel(db_cluster[str(server_id)][\"settings\"].find_one({\"kind\": \"log_channel\"})[\"value\"]) # Send attachment", "name\") elif isinstance(error, commands.MissingRequiredArgument): await ctx.send(\"Please include the server name!\") embed = Utility.create_supported_vtuber_embed()", "id=confirm_msg.id) if confirm_msg.reactions[0].count == 1 and confirm_msg.reactions[1].count == 1: await channel.send(\"The reaction took", "{\"kind\": \"mod_role\", \"value\" : 0} settings.insert_one(json) json = {\"kind\": \"picture_link\", \"value\" : \"https://pbs.twimg.com/profile_images/1198438854841094144/y35Fe_Jj.jpg\"}", "db_cluster[str(msg.guild.id)][\"settings\"].find_one({\"kind\": \"automatic_role\"})[\"value\"] # always only the id target_member_id = int(embed.title) if reaction.emoji ==", "settings.insert_one(json) json = {\"kind\": \"tolerance_duration\", \"value\" : 1} settings.insert_one(json) json = {\"kind\": \"inform_duration\",", "a screenshot for membership. \"\"\" # log content to dm log channel for", "datetime import timezone, timedelta import re #Internal from membership_handling import MembershipHandler from settings", "= True, name = \"forceCheck\") @commands.is_owner() async def force_member_check(ctx): await member_handler.delete_expired_memberships(True) @bot.command(hidden =", "DMs!\") error = None @bot.command(name=\"proof\", help = \"Allows to send additional proof. Requires", "\"value\" : 0} settings.insert_one(json) json = {\"kind\": \"log_channel\", \"value\" : 0} settings.insert_one(json) json", "title, text): serverlist = db_cluster[\"settings\"]['general'].find_one({'name': \"supported_idols\"})['supported_idols'] #create Embed embed = discord.Embed(title = title,", "testing token = os.<PASSWORD>(\"TOKEN\") owner_id = int(os.getenv(\"OWNER_ID\")) embed_color = int(os.getenv(\"EMBED_COLOR\"), 16) db_user =", "except discord.errors.Forbidden: print(payload.channel_id) print(payload.guild_id) @bot.command( help=\"Can be called with just $verify but also", "by somebody else than the bot should be processed if reaction: if reaction.count", "= False async def determine_prefix(bot, message): if isinstance(message.channel, discord.channel.DMChannel): return \"$\" guild =", "\"Additional proof\") title = ctx.author.id embed = discord.Embed(title = title, description = None,", "from settings import Settings from membership import Membership from utility import Utility from", "on screenshot) -> :white_check_mark:\\n\" text += \"Or is the date recognized incorrectly/was not", "if server: await member_handler.verify_membership(ctx.message, server) else: embed = Utility.create_supported_vtuber_embed() await ctx.send(content =\"Please use", "DMs\", brief = \"Send additional proof\") @commands.dm_only() async def send_proof(ctx, vtuber: str): if", "MembershipHandler(bot, db_cluster, embed_color) Utility.setup(bot, db_cluster, embed_color) OCR.setup(bot, local) Sending.setup(bot, embed_color) #add cogs bot.add_cog(Settings(bot,", "CommandNotFound #Python import asyncio from datetime import datetime as dtime from datetime import", "server for server in serverlist: server_db = db_cluster[str(server['guild_id'])] lg_ch = bot.get_channel(server_db['settings'].find_one({'kind': \"log_channel\"})['value']) await", "ctx.send(\"You are not allowed to use this command!\") elif isinstance(error, commands.NoPrivateMessage): await ctx.send(\"This", "return m.author == user and m.channel == channel date_msg = await bot.wait_for('message', check=check)", "screenshot of the proof!\") return server_id = map_vtuber_to_server(vtuber) member_veri_ch =bot.get_channel(db_cluster[str(server_id)][\"settings\"].find_one({\"kind\": \"log_channel\"})[\"value\"]) # Send", "channel.fetch_message(payload.message_id) reaction = discord.utils.get(msg.reactions, emoji=payload.emoji.name) # only the first react by somebody else", "#create Embed embed = discord.Embed(title = title, description = text, colour = embed_color)", "user.\", reference=text_msg, mention_author=False) if automatic_role: await member_handler.del_membership(msg, target_member_id, None, False) await msg.clear_reactions() await", "# this handling is not for DMs # Only process reactions that also", "lg_ch = bot.get_channel(server_db['settings'].find_one({'kind': \"log_channel\"})['value']) await lg_ch.send(content = None, embed = embed) @bot.command(name =", "= \"dmMe\", help=\"Sends a DM containg \\\"hi\\\" to the user using the command.\",", "DMs that tries to verify a screenshot for membership. \"\"\" # log content", "memberships are not checked. \"\"\" print(\"Left Guild: \" + str(guild.id)) settings = db_cluster[\"settings\"][\"general\"]", "every server for server in serverlist: server_db = db_cluster[str(server['guild_id'])] lg_ch = bot.get_channel(server_db['settings'].find_one({'kind': \"log_channel\"})['value'])", "str): if not ctx.message.attachments: await ctx.send(\"Please include a screenshot of the proof!\") return", "if isinstance(error, CommandNotFound): # Ignore this error pass elif isinstance(error, commands.MissingPermissions): await ctx.send(\"You", "bot.wait_for('message', check=check) await member_handler.set_membership(msg, target_member_id, date_msg.content) await msg.clear_reactions() await msg.add_reaction(emoji='👎') except discord.errors.Forbidden: print(payload.channel_id)", "database and settings collection when the bot joins a server. \"\"\" print(\"Joined new", "#always clear await msg.clear_reactions() await msg.add_reaction(emoji='👌') # deny option elif reaction.emoji == u\"\\U0001F6AB\":", "= db_cluster.list_database_names() if not str(guild.id) in dbnames: new_guild_db = db_cluster[str(guild.id)] settings = new_guild_db[\"settings\"]", "verification channel desc = \"{}\\n{}\".format(str(ctx.author), \"Additional proof\") title = ctx.author.id embed = discord.Embed(title", "the supported idols so that memberships are not checked. \"\"\" print(\"Left Guild: \"", "first react by somebody else than the bot should be processed if reaction:", "\"member_role\", \"value\" : 0} settings.insert_one(json) json = {\"kind\": \"log_channel\", \"value\" : 0} settings.insert_one(json)", "= bot.get_channel(payload.channel_id) try: msg = await channel.fetch_message(payload.message_id) reaction = discord.utils.get(msg.reactions, emoji=payload.emoji.name) # only", "@bot.command( help=\"Can be called with just $verify but also with $verify <VTuber name>\\n\"", "reaction.me: return if msg.embeds: embed = msg.embeds[0] automatic_role = db_cluster[str(msg.guild.id)][\"settings\"].find_one({\"kind\": \"automatic_role\"})[\"value\"] # always", "== '✅': if not automatic_role: membership_date = embed.fields[0].value # set membership await member_handler.set_membership(msg,", "ctx.send(\"Please include the server name!\") embed = Utility.create_supported_vtuber_embed() await ctx.send(content=None, embed=embed) def map_vtuber_to_server(name):", "OCR.setup(bot, local) Sending.setup(bot, embed_color) #add cogs bot.add_cog(Settings(bot, db_cluster)) bot.add_cog(Membership(bot, member_handler)) @bot.event async def", "print(bot.user.id) print('------') @bot.event async def on_guild_join(guild): \"\"\" Creates the database and settings collection", "proof (Faked or no date on screenshot) -> :white_check_mark:\\n\" text += \"Or is", "from the screenshot in the format dd/mm/yyyy.\" await channel.send(m, reference=msg, mention_author=False) def check(m):", "sent with it.\", brief=\" Tries to verify a screenshot for membership in the", "@commands.dm_only() async def send_proof(ctx, vtuber: str): if not ctx.message.attachments: await ctx.send(\"Please include a", "discord.ext import commands from discord.ext.commands.errors import CommandNotFound #Python import asyncio from datetime import", "database settings db_cluster = MongoClient(db_url.format(db_user, db_pass)) # set up classes member_handler = MembershipHandler(bot,", "dm_error(ctx, error): if isinstance(error, discord.errors.Forbidden): await ctx.send(\"You need to allow DMs!\") error =", "-> :white_check_mark:\\n\" text += \"Or is the date recognized incorrectly/was not recognized ->", "= {\"kind\": \"member_role\", \"value\" : 0} settings.insert_one(json) json = {\"kind\": \"log_channel\", \"value\" :", "log content to dm log channel for record dm_lg_ch = bot.get_channel(dm_log) await dm_lg_ch.send(\"{}\\n{}\".format(str(ctx.author),ctx.message.content))", "message to membership verification channel desc = \"{}\\n{}\".format(str(ctx.author), \"Additional proof\") title = ctx.author.id", "= commands.Bot(command_prefix=determine_prefix, description='Bot to verify and manage Memberships.\\nlogChannel, Vtuber name and memberRole need", "channel.send(m, reference=msg, mention_author=False) def check(m): return m.author == user and m.channel == channel", "be used in the DMs\") elif hasattr(ctx.command, 'on_error'): #skip already locally handled errors", "that memberships are not checked. \"\"\" print(\"Left Guild: \" + str(guild.id)) settings =", "False async def determine_prefix(bot, message): if isinstance(message.channel, discord.channel.DMChannel): return \"$\" guild = message.guild", "remove you reaction from this message and add it again.\", reference=msg, mention_author=False) else:", "bot.get_user(payload.user_id) text = \"Is there an issue with the proof (Faked or no", "membership_date) #always clear await msg.clear_reactions() await msg.add_reaction(emoji='👌') # deny option elif reaction.emoji ==", "hasattr(ctx.command, 'on_error'): #skip already locally handled errors pass else: raise error @bot.event async", "await ctx.send(\"This command should not be used in the DMs\") elif hasattr(ctx.command, 'on_error'):", "bot joins a server. \"\"\" print(\"Joined new Guild: \" + str(guild.id)) dbnames =", "verify(ctx, *vtuber): \"\"\" Command in the DMs that tries to verify a screenshot", "= discord.utils.get(bot.cached_messages, id=confirm_msg.id) if confirm_msg.reactions[0].count == 1 and confirm_msg.reactions[1].count == 1: await channel.send(\"The", ": { '$elemMatch': {'name' : name}}}) if 'supported_idols' in result: return result['supported_idols'][0]['guild_id'] #Time", "in status async def jst_clock(): while not bot.is_closed(): try: now = dtime.now(tz =", "m.channel == channel text_msg = await bot.wait_for('message', check=check) target_member = bot.get_user(target_member_id) await target_member.send(text_msg.content)", "were also made by the bot if not reaction.me: return if msg.embeds: embed", "await msg.add_reaction(emoji='👎') else: await asyncio.sleep(1) confirm_msg = discord.utils.get(bot.cached_messages, id=confirm_msg.id) if confirm_msg.reactions[0].count == 1", "await msg.clear_reactions() await msg.add_reaction(emoji='👌') # deny option elif reaction.emoji == u\"\\U0001F6AB\": user =", "with the proof (Faked or no date on screenshot) -> :white_check_mark:\\n\" text +=", "settings.insert_one(json) json = {\"kind\": \"log_channel\", \"value\" : 0} settings.insert_one(json) json = {\"kind\": \"mod_role\",", "be executed coroutines = ( jst_clock(), member_handler.check_membership_routine(), ) # Main Coroutine async def", "new Guild: \" + str(guild.id)) dbnames = db_cluster.list_database_names() if not str(guild.id) in dbnames:", "configuration json = { \"kind\": \"prefixes\", \"values\" : ['$']} settings.insert_one(json) json = {\"kind\":", "int(os.getenv(\"DM_LOG\")) # Intents intents = discord.Intents.default() intents.members = True intents.invites = False intents.emojis", "log channel for record dm_lg_ch = bot.get_channel(dm_log) await dm_lg_ch.send(\"{}\\n{}\".format(str(ctx.author),ctx.message.content)) for attachment in ctx.message.attachments:", "await ctx.send(content =\"Please use a valid supported VTuber!\", embed = embed) else: await", "= os.<PASSWORD>(\"TOKEN\") owner_id = int(os.getenv(\"OWNER_ID\")) embed_color = int(os.getenv(\"EMBED_COLOR\"), 16) db_user = os.getenv(\"DB_USER\") db_pass", "else: m = \"Please write the correct date from the screenshot in the", "\"forceCheck\") @commands.is_owner() async def force_member_check(ctx): await member_handler.delete_expired_memberships(True) @bot.command(hidden = True, name = \"broadcast\")", "proof_error(ctx, error): if isinstance(error, commands.BadArgument): await ctx.send(\"Please do only send a valid name\")", "dm_log = int(os.getenv(\"DM_LOG\")) # Intents intents = discord.Intents.default() intents.members = True intents.invites =", "tries to verify a screenshot for membership. \"\"\" # log content to dm", "msg.add_reaction(emoji='👎') else: await asyncio.sleep(1) confirm_msg = discord.utils.get(bot.cached_messages, id=confirm_msg.id) if confirm_msg.reactions[0].count == 1 and", "msg.add_reaction(emoji='👎') except discord.errors.Forbidden: print(payload.channel_id) print(payload.guild_id) @bot.command( help=\"Can be called with just $verify but", "manage Memberships.\\nlogChannel, Vtuber name and memberRole need to be set!', intents=intents, case_insensitive=True, owner_id=owner_id)", "m = \"Please write the correct date from the screenshot in the format", "send additional proof. Requires the name of the vtuber. Only available in DMs\",", "attachment in ctx.message.attachments: await dm_lg_ch.send(attachment.url) if vtuber: server = map_vtuber_to_server(vtuber[0]) if server: await", "await member_handler.verify_membership_with_server_detection(ctx.message) @verify.error async def verify_error(ctx, error): if isinstance(error, commands.PrivateMessageOnly): await ctx.send(\"This command", "= map_vtuber_to_server(vtuber) member_veri_ch =bot.get_channel(db_cluster[str(server_id)][\"settings\"].find_one({\"kind\": \"log_channel\"})[\"value\"]) # Send attachment and message to membership verification", "return \"$\" guild = message.guild if guild: prefixes = db_cluster[str(guild.id)][\"settings\"].find_one({\"kind\": \"prefixes\"})[\"values\"] if prefixes:", "member_veri_ch =bot.get_channel(db_cluster[str(server_id)][\"settings\"].find_one({\"kind\": \"log_channel\"})[\"value\"]) # Send attachment and message to membership verification channel desc", "= settings_db.find_one({}, {'supported_idols' : { '$elemMatch': {'name' : name}}}) if 'supported_idols' in result:", "\"$\" # Set up bot bot = commands.Bot(command_prefix=determine_prefix, description='Bot to verify and manage", "react by somebody else than the bot should be processed if reaction: if", "= 9) timestr = now.strftime(\"%H:%M JST, %d/%m/%Y\") await bot.change_presence(activity=discord.Game(name=timestr)) await asyncio.sleep(60) except ConnectionResetError:", "ctx.send(db_cluster['settings']['general'].find_one()['supported_idols']) @bot.command(hidden = True, name = \"forceCheck\") @commands.is_owner() async def force_member_check(ctx): await member_handler.delete_expired_memberships(True)", "if reaction.emoji == '✅': if not automatic_role: membership_date = embed.fields[0].value # set membership", "= embed) @bot.command(name = \"dmMe\", help=\"Sends a DM containg \\\"hi\\\" to the user", "1} settings.insert_one(json) json = {\"kind\": \"inform_duration\", \"value\" : 1} settings.insert_one(json) @bot.event async def", "# always only the id target_member_id = int(embed.title) if reaction.emoji == '✅': if", "# Ignore this error pass elif isinstance(error, commands.MissingPermissions): await ctx.send(\"You are not allowed", "and manage Memberships.\\nlogChannel, Vtuber name and memberRole need to be set!', intents=intents, case_insensitive=True,", "@verify.error async def verify_error(ctx, error): if isinstance(error, commands.PrivateMessageOnly): await ctx.send(\"This command only works", "def verify_error(ctx, error): if isinstance(error, commands.PrivateMessageOnly): await ctx.send(\"This command only works in DMs!\")", "idols so that memberships are not checked. \"\"\" print(\"Left Guild: \" + str(guild.id))", "discord.errors.Forbidden): await ctx.send(\"You need to allow DMs!\") error = None @bot.command(name=\"proof\", help =", "= None @bot.command(name=\"proof\", help = \"Allows to send additional proof. Requires the name", "while not bot.is_closed(): try: now = dtime.now(tz = timezone.utc) + timedelta(hours = 9)", "print(\"Could not update JST Clock!\") # List Coroutines to be executed coroutines =", "reaction from this message and add it again.\", reference=msg, mention_author=False) else: m =", "reference=msg, mention_author=False) def check(m): return m.author == user and m.channel == channel date_msg", "await channel.send(text, reference=msg, mention_author=False) if await Utility.confirm_action(confirm_msg, user): confirm_msg = await channel.send(\"Please write", "\"\"\" print(\"Joined new Guild: \" + str(guild.id)) dbnames = db_cluster.list_database_names() if not str(guild.id)", "and m.channel == channel text_msg = await bot.wait_for('message', check=check) target_member = bot.get_user(target_member_id) await", "not str(guild.id) in dbnames: new_guild_db = db_cluster[str(guild.id)] settings = new_guild_db[\"settings\"] # Create base", "= <PASSWORD>(\"<PASSWORD>\") db_url = os.getenv(\"DB_LINK\") dm_log = int(os.getenv(\"DM_LOG\")) # Intents intents = discord.Intents.default()", "the DMs that tries to verify a screenshot for membership. \"\"\" # log", "= int(os.getenv(\"OWNER_ID\")) embed_color = int(os.getenv(\"EMBED_COLOR\"), 16) db_user = os.getenv(\"DB_USER\") db_pass = <PASSWORD>(\"<PASSWORD>\") db_url", "def check(ctx): Utility.create_supported_vtuber_embed() await ctx.send(db_cluster['settings']['general'].find_one()['supported_idols']) @bot.command(hidden = True, name = \"forceCheck\") @commands.is_owner() async", "# set up classes member_handler = MembershipHandler(bot, db_cluster, embed_color) Utility.setup(bot, db_cluster, embed_color) OCR.setup(bot,", "embed_color) #add cogs bot.add_cog(Settings(bot, db_cluster)) bot.add_cog(Membership(bot, member_handler)) @bot.event async def on_command_error(ctx, error): if", "def on_ready(): print('Logged in as') print(bot.user.name) print(bot.user.id) print('------') @bot.event async def on_guild_join(guild): \"\"\"", "correct date from the screenshot in the format dd/mm/yyyy.\" await channel.send(m, reference=msg, mention_author=False)", ": ['$']} settings.insert_one(json) json = {\"kind\": \"member_role\", \"value\" : 0} settings.insert_one(json) json =", "commands.NoPrivateMessage): await ctx.send(\"This command should not be used in the DMs\") elif hasattr(ctx.command,", "# Create base configuration json = { \"kind\": \"prefixes\", \"values\" : ['$']} settings.insert_one(json)", "serverlist = db_cluster[\"settings\"]['general'].find_one({'name': \"supported_idols\"})['supported_idols'] #create Embed embed = discord.Embed(title = title, description =", "to verify a screenshot for membership. \"\"\" # log content to dm log", "Setup data # Set variable to true for local testing local = False", "#Internal from membership_handling import MembershipHandler from settings import Settings from membership import Membership", "in as') print(bot.user.name) print(bot.user.id) print('------') @bot.event async def on_guild_join(guild): \"\"\" Creates the database", "classes member_handler = MembershipHandler(bot, db_cluster, embed_color) Utility.setup(bot, db_cluster, embed_color) OCR.setup(bot, local) Sending.setup(bot, embed_color)", "not reaction.me: return if msg.embeds: embed = msg.embeds[0] automatic_role = db_cluster[str(msg.guild.id)][\"settings\"].find_one({\"kind\": \"automatic_role\"})[\"value\"] #", "import commands from discord.ext.commands.errors import CommandNotFound #Python import asyncio from datetime import datetime", "u\"\\U0001F6AB\": user = bot.get_user(payload.user_id) text = \"Is there an issue with the proof", "try: now = dtime.now(tz = timezone.utc) + timedelta(hours = 9) timestr = now.strftime(\"%H:%M", "membership in the DMs\" ) @commands.dm_only() async def verify(ctx, *vtuber): \"\"\" Command in", "reaction = discord.utils.get(msg.reactions, emoji=payload.emoji.name) # only the first react by somebody else than", "True intents.invites = False intents.emojis = False intents.typing = False intents.integrations = False", "await member_handler.verify_membership(ctx.message, server) else: embed = Utility.create_supported_vtuber_embed() await ctx.send(content =\"Please use a valid", "only works in DMs!\") @bot.command(hidden = True, name = \"checkIdols\") @commands.is_owner() async def", "db_pass = <PASSWORD>(\"<PASSWORD>\") db_url = os.getenv(\"DB_LINK\") dm_log = int(os.getenv(\"DM_LOG\")) # Intents intents =", "need to allow DMs!\") error = None @bot.command(name=\"proof\", help = \"Allows to send", "channel.send(text, reference=msg, mention_author=False) if await Utility.confirm_action(confirm_msg, user): confirm_msg = await channel.send(\"Please write a", "+ \"Both versions require a screenshot sent with it.\", brief=\" Tries to verify", "settings.insert_one(json) @bot.event async def on_guild_remove(guild): \"\"\" Removes the guild from the supported idols", "or no date on screenshot) -> :white_check_mark:\\n\" text += \"Or is the date", "too long! Please remove you reaction from this message and add it again.\",", "await ctx.send(content=None, embed=embed) def map_vtuber_to_server(name): settings_db = db_cluster[\"settings\"][\"general\"] result = settings_db.find_one({}, {'supported_idols' :", "await channel.fetch_message(payload.message_id) reaction = discord.utils.get(msg.reactions, emoji=payload.emoji.name) # only the first react by somebody", "is the date recognized incorrectly/was not recognized -> :no_entry_sign:\" confirm_msg = await channel.send(text,", "= {\"kind\": \"mod_role\", \"value\" : 0} settings.insert_one(json) json = {\"kind\": \"picture_link\", \"value\" :", "new_guild_db[\"settings\"] # Create base configuration json = { \"kind\": \"prefixes\", \"values\" : ['$']}", "= await channel.send(text, reference=msg, mention_author=False) if await Utility.confirm_action(confirm_msg, user): confirm_msg = await channel.send(\"Please", "#send confirmation await ctx.send(\"Your additional proof was delivered safely!\") @send_proof.error async def proof_error(ctx,", "print(payload.channel_id) print(payload.guild_id) @bot.command( help=\"Can be called with just $verify but also with $verify", "json = {\"kind\": \"mod_role\", \"value\" : 0} settings.insert_one(json) json = {\"kind\": \"picture_link\", \"value\"", "confirmation await ctx.send(\"Your additional proof was delivered safely!\") @send_proof.error async def proof_error(ctx, error):", "\"\"\" Creates the database and settings collection when the bot joins a server.", "bot should be processed if reaction: if reaction.count != 2: return msg =", "= {\"kind\": \"picture_link\", \"value\" : \"https://pbs.twimg.com/profile_images/1198438854841094144/y35Fe_Jj.jpg\"} #hololive logo settings.insert_one(json) json = {\"kind\": \"automatic_role\",", "0} settings.insert_one(json) json = {\"kind\": \"picture_link\", \"value\" : \"https://pbs.twimg.com/profile_images/1198438854841094144/y35Fe_Jj.jpg\"} #hololive logo settings.insert_one(json) json", "member_handler.set_membership(msg, target_member_id, membership_date) #always clear await msg.clear_reactions() await msg.add_reaction(emoji='👌') # deny option elif", "screenshot for membership in the DMs\" ) @commands.dm_only() async def verify(ctx, *vtuber): \"\"\"", "a DM containg \\\"hi\\\" to the user using the command.\", brief=\"Sends a DM", "target_member_id, date_msg.content) await msg.clear_reactions() await msg.add_reaction(emoji='👎') except discord.errors.Forbidden: print(payload.channel_id) print(payload.guild_id) @bot.command( help=\"Can be", "intents.voice_states = False intents.guild_typing = False async def determine_prefix(bot, message): if isinstance(message.channel, discord.channel.DMChannel):", "str(guild.id)) dbnames = db_cluster.list_database_names() if not str(guild.id) in dbnames: new_guild_db = db_cluster[str(guild.id)] settings", "from datetime import datetime as dtime from datetime import timezone, timedelta import re", "payload if not payload.guild_id: return channel = bot.get_channel(payload.channel_id) try: msg = await channel.fetch_message(payload.message_id)", "return channel = bot.get_channel(payload.channel_id) try: msg = await channel.fetch_message(payload.message_id) reaction = discord.utils.get(msg.reactions, emoji=payload.emoji.name)", "JST, %d/%m/%Y\") await bot.change_presence(activity=discord.Game(name=timestr)) await asyncio.sleep(60) except ConnectionResetError: print(\"Could not update JST Clock!\")", "@commands.is_owner() async def broadcast(ctx, title, text): serverlist = db_cluster[\"settings\"]['general'].find_one({'name': \"supported_idols\"})['supported_idols'] #create Embed embed", "an issue with the proof (Faked or no date on screenshot) -> :white_check_mark:\\n\"", "proof was delivered safely!\") @send_proof.error async def proof_error(ctx, error): if isinstance(error, commands.BadArgument): await", ": 0} settings.insert_one(json) json = {\"kind\": \"log_channel\", \"value\" : 0} settings.insert_one(json) json =", "await asyncio.sleep(1) confirm_msg = discord.utils.get(bot.cached_messages, id=confirm_msg.id) if confirm_msg.reactions[0].count == 1 and confirm_msg.reactions[1].count ==", "if isinstance(message.channel, discord.channel.DMChannel): return \"$\" guild = message.guild if guild: prefixes = db_cluster[str(guild.id)][\"settings\"].find_one({\"kind\":", "else: await member_handler.verify_membership_with_server_detection(ctx.message) @verify.error async def verify_error(ctx, error): if isinstance(error, commands.PrivateMessageOnly): await ctx.send(\"This", "title, description = text, colour = embed_color) #send to every server for server", "import datetime as dtime from datetime import timezone, timedelta import re #Internal from", "# Customizable Settings # For local testing token = os.<PASSWORD>(\"TOKEN\") owner_id = int(os.getenv(\"OWNER_ID\"))", "settings.insert_one(json) json = {\"kind\": \"member_role\", \"value\" : 0} settings.insert_one(json) json = {\"kind\": \"log_channel\",", "== user and m.channel == channel date_msg = await bot.wait_for('message', check=check) await member_handler.set_membership(msg,", "= embed) else: await member_handler.verify_membership_with_server_detection(ctx.message) @verify.error async def verify_error(ctx, error): if isinstance(error, commands.PrivateMessageOnly):", "on_raw_reaction_add(payload): # get reaction from payload if not payload.guild_id: return channel = bot.get_channel(payload.channel_id)", "def force_member_check(ctx): await member_handler.delete_expired_memberships(True) @bot.command(hidden = True, name = \"broadcast\") @commands.is_owner() async def", "\"automatic_role\"})[\"value\"] # always only the id target_member_id = int(embed.title) if reaction.emoji == '✅':", "async def proof_error(ctx, error): if isinstance(error, commands.BadArgument): await ctx.send(\"Please do only send a", "Guild: \" + str(guild.id)) dbnames = db_cluster.list_database_names() if not str(guild.id) in dbnames: new_guild_db", "true for local testing local = False # Customizable Settings # For local", "colour = embed_color) embed.set_image(url = ctx.message.attachments[0].url) await member_veri_ch.send(content = \"```\\n{}\\n```\".format(desc), embed = embed)", "ConnectionResetError: print(\"Could not update JST Clock!\") # List Coroutines to be executed coroutines", "the DMs\") elif hasattr(ctx.command, 'on_error'): #skip already locally handled errors pass else: raise", "from datetime import timezone, timedelta import re #Internal from membership_handling import MembershipHandler from", "def on_command_error(ctx, error): if isinstance(error, CommandNotFound): # Ignore this error pass elif isinstance(error,", "= {\"kind\": \"automatic_role\", \"value\" : False} settings.insert_one(json) json = {\"kind\": \"require_additional_proof\", \"value\" :", "guild = message.guild if guild: prefixes = db_cluster[str(guild.id)][\"settings\"].find_one({\"kind\": \"prefixes\"})[\"values\"] if prefixes: return prefixes", "Settings from membership import Membership from utility import Utility from ocr import OCR", "@bot.event async def on_command_error(ctx, error): if isinstance(error, CommandNotFound): # Ignore this error pass", "verify a screenshot for membership in the DMs\" ) @commands.dm_only() async def verify(ctx,", "ctx.send(\"This command only works in DMs!\") @bot.command(hidden = True, name = \"checkIdols\") @commands.is_owner()", "will be sent to the User.\", reference=msg, mention_author=False) def check(m): return m.author ==", "the user\") async def send_dm(ctx): await ctx.author.send(\"Hi\") @send_dm.error async def dm_error(ctx, error): if", "json = {\"kind\": \"tolerance_duration\", \"value\" : 1} settings.insert_one(json) json = {\"kind\": \"inform_duration\", \"value\"", "now.strftime(\"%H:%M JST, %d/%m/%Y\") await bot.change_presence(activity=discord.Game(name=timestr)) await asyncio.sleep(60) except ConnectionResetError: print(\"Could not update JST", "\"```\\n{}\\n```\".format(desc), embed = embed) #send confirmation await ctx.send(\"Your additional proof was delivered safely!\")", "delivered safely!\") @send_proof.error async def proof_error(ctx, error): if isinstance(error, commands.BadArgument): await ctx.send(\"Please do", "description = text, colour = embed_color) #send to every server for server in", "\"automatic_role\", \"value\" : False} settings.insert_one(json) json = {\"kind\": \"require_additional_proof\", \"value\" : False} settings.insert_one(json)", "return msg = reaction.message # this handling is not for DMs # Only", "this command!\") elif isinstance(error, commands.NoPrivateMessage): await ctx.send(\"This command should not be used in", "verify a screenshot for membership. \"\"\" # log content to dm log channel", "with just $verify but also with $verify <VTuber name>\\n\" + \"Both versions require", "deny option elif reaction.emoji == u\"\\U0001F6AB\": user = bot.get_user(payload.user_id) text = \"Is there", "not recognized -> :no_entry_sign:\" confirm_msg = await channel.send(text, reference=msg, mention_author=False) if await Utility.confirm_action(confirm_msg,", "async def send_proof(ctx, vtuber: str): if not ctx.message.attachments: await ctx.send(\"Please include a screenshot", "Creates the database and settings collection when the bot joins a server. \"\"\"", "from pymongo import MongoClient import os ### Setup data # Set variable to", "embed.set_image(url = ctx.message.attachments[0].url) await member_veri_ch.send(content = \"```\\n{}\\n```\".format(desc), embed = embed) #send confirmation await", "be processed if reaction: if reaction.count != 2: return msg = reaction.message #", "proof\") @commands.dm_only() async def send_proof(ctx, vtuber: str): if not ctx.message.attachments: await ctx.send(\"Please include", "update JST Clock!\") # List Coroutines to be executed coroutines = ( jst_clock(),", "if 'supported_idols' in result: return result['supported_idols'][0]['guild_id'] #Time in status async def jst_clock(): while", "containg \\\"hi\\\" to the user using the command.\", brief=\"Sends a DM to the", "write the correct date from the screenshot in the format dd/mm/yyyy.\" await channel.send(m,", "= message.guild if guild: prefixes = db_cluster[str(guild.id)][\"settings\"].find_one({\"kind\": \"prefixes\"})[\"values\"] if prefixes: return prefixes return", "async def on_ready(): print('Logged in as') print(bot.user.name) print(bot.user.id) print('------') @bot.event async def on_guild_join(guild):", "logo settings.insert_one(json) json = {\"kind\": \"automatic_role\", \"value\" : False} settings.insert_one(json) json = {\"kind\":", "again.\", reference=msg, mention_author=False) else: m = \"Please write the correct date from the", "Vtuber name and memberRole need to be set!', intents=intents, case_insensitive=True, owner_id=owner_id) # database", "\"broadcast\") @commands.is_owner() async def broadcast(ctx, title, text): serverlist = db_cluster[\"settings\"]['general'].find_one({'name': \"supported_idols\"})['supported_idols'] #create Embed", "prefixes return \"$\" # Set up bot bot = commands.Bot(command_prefix=determine_prefix, description='Bot to verify", "in the DMs\") elif hasattr(ctx.command, 'on_error'): #skip already locally handled errors pass else:", "lg_ch.send(content = None, embed = embed) @bot.command(name = \"dmMe\", help=\"Sends a DM containg", "the name of the vtuber. Only available in DMs\", brief = \"Send additional", "DMs\") elif hasattr(ctx.command, 'on_error'): #skip already locally handled errors pass else: raise error", "await ctx.send(\"Please include a screenshot of the proof!\") return server_id = map_vtuber_to_server(vtuber) member_veri_ch", "\"{}\\n{}\".format(str(ctx.author), \"Additional proof\") title = ctx.author.id embed = discord.Embed(title = title, description =", "await msg.add_reaction(emoji='👌') # deny option elif reaction.emoji == u\"\\U0001F6AB\": user = bot.get_user(payload.user_id) text", "reaction took too long! Please remove you reaction from this message and add", "else: embed = Utility.create_supported_vtuber_embed() await ctx.send(content =\"Please use a valid supported VTuber!\", embed", "isinstance(error, commands.MissingRequiredArgument): await ctx.send(\"Please include the server name!\") embed = Utility.create_supported_vtuber_embed() await ctx.send(content=None,", "not update JST Clock!\") # List Coroutines to be executed coroutines = (", "if confirm_msg.reactions[0].count == 1 and confirm_msg.reactions[1].count == 1: await channel.send(\"The reaction took too", "db_user = os.getenv(\"DB_USER\") db_pass = <PASSWORD>(\"<PASSWORD>\") db_url = os.getenv(\"DB_LINK\") dm_log = int(os.getenv(\"DM_LOG\")) #", "only the id target_member_id = int(embed.title) if reaction.emoji == '✅': if not automatic_role:", "name!\") embed = Utility.create_supported_vtuber_embed() await ctx.send(content=None, embed=embed) def map_vtuber_to_server(name): settings_db = db_cluster[\"settings\"][\"general\"] result", "embed.fields[0].value # set membership await member_handler.set_membership(msg, target_member_id, membership_date) #always clear await msg.clear_reactions() await", "coroutines = ( jst_clock(), member_handler.check_membership_routine(), ) # Main Coroutine async def background_main(): await", "= {\"kind\": \"log_channel\", \"value\" : 0} settings.insert_one(json) json = {\"kind\": \"mod_role\", \"value\" :", "automatic_role: membership_date = embed.fields[0].value # set membership await member_handler.set_membership(msg, target_member_id, membership_date) #always clear", "await bot.wait_for('message', check=check) await member_handler.set_membership(msg, target_member_id, date_msg.content) await msg.clear_reactions() await msg.add_reaction(emoji='👎') except discord.errors.Forbidden:", "db_cluster, embed_color) OCR.setup(bot, local) Sending.setup(bot, embed_color) #add cogs bot.add_cog(Settings(bot, db_cluster)) bot.add_cog(Membership(bot, member_handler)) @bot.event", "and memberRole need to be set!', intents=intents, case_insensitive=True, owner_id=owner_id) # database settings db_cluster", "Membership from utility import Utility from ocr import OCR from sending import Sending", "member_handler.set_membership(msg, target_member_id, date_msg.content) await msg.clear_reactions() await msg.add_reaction(emoji='👎') except discord.errors.Forbidden: print(payload.channel_id) print(payload.guild_id) @bot.command( help=\"Can", "{'guild_id': guild.id}}}) @bot.event async def on_raw_reaction_add(payload): # get reaction from payload if not", "if reaction.count != 2: return msg = reaction.message # this handling is not", "= discord.Intents.default() intents.members = True intents.invites = False intents.emojis = False intents.typing =", "\"value\" : \"https://pbs.twimg.com/profile_images/1198438854841094144/y35Fe_Jj.jpg\"} #hololive logo settings.insert_one(json) json = {\"kind\": \"automatic_role\", \"value\" : False}", "def on_guild_join(guild): \"\"\" Creates the database and settings collection when the bot joins", "\"value\" : False} settings.insert_one(json) json = {\"kind\": \"tolerance_duration\", \"value\" : 1} settings.insert_one(json) json", "checked. \"\"\" print(\"Left Guild: \" + str(guild.id)) settings = db_cluster[\"settings\"][\"general\"] settings.update_one({'name': 'supported_idols'}, {'$pull':", "processed if reaction: if reaction.count != 2: return msg = reaction.message # this", "bot.get_user(target_member_id) await target_member.send(text_msg.content) await channel.send(\"Message was sent to user.\", reference=text_msg, mention_author=False) if automatic_role:", "await channel.send(m, reference=msg, mention_author=False) def check(m): return m.author == user and m.channel ==", "the command.\", brief=\"Sends a DM to the user\") async def send_dm(ctx): await ctx.author.send(\"Hi\")", "<VTuber name>\\n\" + \"Both versions require a screenshot sent with it.\", brief=\" Tries", "= None, colour = embed_color) embed.set_image(url = ctx.message.attachments[0].url) await member_veri_ch.send(content = \"```\\n{}\\n```\".format(desc), embed", "isinstance(error, commands.BadArgument): await ctx.send(\"Please do only send a valid name\") elif isinstance(error, commands.MissingRequiredArgument):", "to true for local testing local = False # Customizable Settings # For", "await msg.clear_reactions() await msg.add_reaction(emoji='👎') else: await asyncio.sleep(1) confirm_msg = discord.utils.get(bot.cached_messages, id=confirm_msg.id) if confirm_msg.reactions[0].count", "embed) #send confirmation await ctx.send(\"Your additional proof was delivered safely!\") @send_proof.error async def", "\"Send additional proof\") @commands.dm_only() async def send_proof(ctx, vtuber: str): if not ctx.message.attachments: await", "screenshot for membership. \"\"\" # log content to dm log channel for record", "intents=intents, case_insensitive=True, owner_id=owner_id) # database settings db_cluster = MongoClient(db_url.format(db_user, db_pass)) # set up", "= reaction.message # this handling is not for DMs # Only process reactions", "local testing token = os.<PASSWORD>(\"TOKEN\") owner_id = int(os.getenv(\"OWNER_ID\")) embed_color = int(os.getenv(\"EMBED_COLOR\"), 16) db_user", "\"\"\" # log content to dm log channel for record dm_lg_ch = bot.get_channel(dm_log)", "= await channel.fetch_message(payload.message_id) reaction = discord.utils.get(msg.reactions, emoji=payload.emoji.name) # only the first react by", "member_handler.verify_membership_with_server_detection(ctx.message) @verify.error async def verify_error(ctx, error): if isinstance(error, commands.PrivateMessageOnly): await ctx.send(\"This command only", "Set variable to true for local testing local = False # Customizable Settings", "you reaction from this message and add it again.\", reference=msg, mention_author=False) else: m", "None, embed = embed) @bot.command(name = \"dmMe\", help=\"Sends a DM containg \\\"hi\\\" to", "Sending.setup(bot, embed_color) #add cogs bot.add_cog(Settings(bot, db_cluster)) bot.add_cog(Membership(bot, member_handler)) @bot.event async def on_command_error(ctx, error):", "testing local = False # Customizable Settings # For local testing token =", "= os.getenv(\"DB_USER\") db_pass = <PASSWORD>(\"<PASSWORD>\") db_url = os.getenv(\"DB_LINK\") dm_log = int(os.getenv(\"DM_LOG\")) # Intents", "\"prefixes\", \"values\" : ['$']} settings.insert_one(json) json = {\"kind\": \"member_role\", \"value\" : 0} settings.insert_one(json)", "map_vtuber_to_server(vtuber) member_veri_ch =bot.get_channel(db_cluster[str(server_id)][\"settings\"].find_one({\"kind\": \"log_channel\"})[\"value\"]) # Send attachment and message to membership verification channel", "m.author == user and m.channel == channel text_msg = await bot.wait_for('message', check=check) target_member", "check=check) await member_handler.set_membership(msg, target_member_id, date_msg.content) await msg.clear_reactions() await msg.add_reaction(emoji='👎') except discord.errors.Forbidden: print(payload.channel_id) print(payload.guild_id)", "include a screenshot of the proof!\") return server_id = map_vtuber_to_server(vtuber) member_veri_ch =bot.get_channel(db_cluster[str(server_id)][\"settings\"].find_one({\"kind\": \"log_channel\"})[\"value\"])", "MongoClient import os ### Setup data # Set variable to true for local", "embed_color) OCR.setup(bot, local) Sending.setup(bot, embed_color) #add cogs bot.add_cog(Settings(bot, db_cluster)) bot.add_cog(Membership(bot, member_handler)) @bot.event async", "a screenshot of the proof!\") return server_id = map_vtuber_to_server(vtuber) member_veri_ch =bot.get_channel(db_cluster[str(server_id)][\"settings\"].find_one({\"kind\": \"log_channel\"})[\"value\"]) #", "a server. \"\"\" print(\"Joined new Guild: \" + str(guild.id)) dbnames = db_cluster.list_database_names() if", "reaction.message # this handling is not for DMs # Only process reactions that", "and confirm_msg.reactions[1].count == 1: await channel.send(\"The reaction took too long! Please remove you", "def dm_error(ctx, error): if isinstance(error, discord.errors.Forbidden): await ctx.send(\"You need to allow DMs!\") error", "Sending from pymongo import MongoClient import os ### Setup data # Set variable", "emoji=payload.emoji.name) # only the first react by somebody else than the bot should", "\"$\" guild = message.guild if guild: prefixes = db_cluster[str(guild.id)][\"settings\"].find_one({\"kind\": \"prefixes\"})[\"values\"] if prefixes: return", "Utility.setup(bot, db_cluster, embed_color) OCR.setup(bot, local) Sending.setup(bot, embed_color) #add cogs bot.add_cog(Settings(bot, db_cluster)) bot.add_cog(Membership(bot, member_handler))", "def check(m): return m.author == user and m.channel == channel date_msg = await", "def jst_clock(): while not bot.is_closed(): try: now = dtime.now(tz = timezone.utc) + timedelta(hours", "= MembershipHandler(bot, db_cluster, embed_color) Utility.setup(bot, db_cluster, embed_color) OCR.setup(bot, local) Sending.setup(bot, embed_color) #add cogs", "on_guild_join(guild): \"\"\" Creates the database and settings collection when the bot joins a", "re #Internal from membership_handling import MembershipHandler from settings import Settings from membership import" ]
[ "self.assertEqual(res['match_type'], winner['type']) self.assertEqual(res['match_title'], winner['full_title']) self.assertEqual(res['match_authors'], '<NAME>, <NAME>') self.assertEqual(res['match_publisher'], 'SAGE') self.assertEqual(res['match_journal'], 'Taxes and Taxation", "Taxation Trends' ] } res = Result(original=original, winner=winner).to_dict() self.assertEqual(res['manuscript_id'], original['manuscript_id']) self.assertEqual(res['decision_date'], '2020-09-01') self.assertEqual(res['submission_date'],", "(Annelida) from Lizard Island (Great Barrier Reef, Australia)'], 'full_title': 'New data on the", "'2020-09-01') self.assertEqual(res['submission_date'], '2020-08-01') self.assertEqual(res['match_doi'], winner['DOI']) self.assertEqual(res['match_type'], winner['type']) self.assertEqual(res['match_title'], winner['full_title']) self.assertEqual(res['match_authors'], '<NAME>, <NAME>') self.assertEqual(res['match_publisher'],", "original = { \"manuscript_id\": 'TVA-18-057', \"decision_date\": \"2020-09-01\", \"submission_date\": pd.to_datetime(\"2020-08-01\", errors='coerce', utc=True), } winner", "utc=True), \"submission_date\": pd.to_datetime(\"2020-08-01\", errors='coerce', utc=True), } winner = { 'DOI': '10.1016/j.jnt.2017.08.038', 'type': 'journal-article',", "['<NAME>', '<NAME>'], 'publisher': 'SAGE', 'issued': {'date-parts': [[2018, 9, 1]], 'timestamp': 1535790972000}, 'similarity': 97,", "'created': {'date-parts': [[2018, 4, 23]], 'timestamp': 1524472572000}, 'indexed': {'date-parts': [[2018, 9, 1]], 'timestamp':", "Trends') self.assertEqual(res['match_pub_date'], '2020-4-1') self.assertEqual(res['match_earliest_date'], '2018-04-23') self.assertEqual(res['match_similarity'], 97) self.assertEqual(res['match_one'], True) self.assertEqual(res['match_all'], True) self.assertEqual(res['match_crossref_score'], 95.2)", "data on the Opheliidae (Annelida) from Lizard Island (Great Barrier Reef, Australia)', 'authors_list':", "self.assertEqual(res['match_all'], True) self.assertEqual(res['match_crossref_score'], 95.2) self.assertEqual(res['match_crossref_cites'], 3) self.assertEqual(res['match_rank'], 1) self.assertEqual(res['match_total_decision_days'], -862) def test__missing_values(self): original", "4, 23]], 'timestamp': 1524472572000}, 'indexed': {'date-parts': [[2018, 9, 1]], 'timestamp': 1535790972000}, 'deposited': {'date-parts':", "3) self.assertEqual(res['match_rank'], 1) self.assertEqual(res['match_total_decision_days'], -862) def test__missing_values(self): original = { \"manuscript_id\": 'TVA-18-057', \"decision_date\":", "[[2018, 9, 1]], 'timestamp': 1535790972000}, 'similarity': 97, 'author_match_one': 1, 'author_match_all': 1, 'score': 95.2,", "'timestamp': 1535790972000}, 'similarity': 97, 'author_match_one': 1, 'author_match_all': 1, 'score': 95.2, 'is-referenced-by-count': 3, 'rank':", "self.assertEqual(res['match_doi'], winner['DOI']) self.assertEqual(res['match_type'], winner['type']) self.assertEqual(res['match_title'], winner['full_title']) self.assertEqual(res['match_authors'], '<NAME>, <NAME>') self.assertEqual(res['match_publisher'], 'SAGE') self.assertEqual(res['match_journal'], 'Taxes", "self.assertEqual(res['match_rank'], 1) self.assertEqual(res['match_total_decision_days'], -862) def test__missing_values(self): original = { \"manuscript_id\": 'TVA-18-057', \"decision_date\": \"2020-09-01\",", "original['manuscript_id']) self.assertEqual(res['decision_date'], '2020-09-01') self.assertEqual(res['submission_date'], '2020-08-01') self.assertEqual(res['match_doi'], winner['DOI']) self.assertEqual(res['match_type'], winner['type']) self.assertEqual(res['match_title'], winner['full_title']) self.assertEqual(res['match_authors'], '<NAME>,", "from Lizard Island (Great Barrier Reef, Australia)'], 'full_title': 'New data on the Opheliidae", "Reef, Australia)', 'authors_list': ['<NAME>', '<NAME>'], 'publisher': 'SAGE', 'issued': {'date-parts': [[2018, 9, 1]], 'timestamp':", "} res = Result(original=original, winner=winner).to_dict() self.assertEqual(res['manuscript_id'], original['manuscript_id']) self.assertEqual(res['decision_date'], '2020-09-01') self.assertEqual(res['submission_date'], '2020-08-01') self.assertEqual(res['match_doi'], winner['DOI'])", "self.assertEqual(res['match_earliest_date'], '2018-04-23') self.assertEqual(res['match_similarity'], 97) self.assertEqual(res['match_one'], True) self.assertEqual(res['match_all'], True) self.assertEqual(res['match_crossref_score'], 95.2) self.assertEqual(res['match_crossref_cites'], 3) self.assertEqual(res['match_rank'],", "'indexed': {'date-parts': [[2018, 9, 1]], 'timestamp': 1535790972000}, 'deposited': {'date-parts': [[2018, 9, 1]], 'timestamp':", "] } res = Result(original=original, winner=winner).to_dict() self.assertEqual(res['manuscript_id'], original['manuscript_id']) self.assertEqual(res['decision_date'], '2020-09-01') self.assertEqual(res['submission_date'], '2020-08-01') self.assertEqual(res['match_doi'],", "import pandas as pd from ..src.Result import Result class TestResult(unittest.TestCase): def test__to_dict(self): original", "self.assertEqual(res['match_authors'], '<NAME>, <NAME>') self.assertEqual(res['match_publisher'], 'SAGE') self.assertEqual(res['match_journal'], 'Taxes and Taxation Trends') self.assertEqual(res['match_pub_date'], '2020-4-1') self.assertEqual(res['match_earliest_date'],", "Barrier Reef, Australia)', 'authors_list': ['<NAME>', '<NAME>'], 'publisher': 'SAGE', 'issued': {'date-parts': [[2018, 9, 1]],", "import Result class TestResult(unittest.TestCase): def test__to_dict(self): original = { \"manuscript_id\": 'TVA-18-057', \"decision_date\": pd.to_datetime(\"2020-09-01\",", "winner=winner).to_dict() self.assertEqual(res['manuscript_id'], original['manuscript_id']) self.assertEqual(res['decision_date'], '2020-09-01') self.assertEqual(res['submission_date'], '2020-08-01') self.assertEqual(res['match_doi'], winner['DOI']) self.assertEqual(res['match_type'], winner['type']) self.assertEqual(res['match_title'], winner['full_title'])", "'issued': {'date-parts': [[2020, 4, 1]], 'timestamp': 1585730172000}, 'created': {'date-parts': [[2018, 4, 23]], 'timestamp':", "= { 'DOI': '10.1016/j.jnt.2017.08.038', 'type': 'journal-article', 'title': ['New data on the Opheliidae (Annelida)", "= Result(original=original, winner=winner).to_dict() self.assertEqual(res['manuscript_id'], original['manuscript_id']) self.assertEqual(res['decision_date'], '2020-09-01') self.assertEqual(res['submission_date'], '2020-08-01') self.assertEqual(res['match_doi'], winner['DOI']) self.assertEqual(res['match_type'], winner['type'])", "1585730172000}, 'created': {'date-parts': [[2018, 4, 23]], 'timestamp': 1524472572000}, 'indexed': {'date-parts': [[2018, 9, 1]],", "[[2018, 4, 23]], 'timestamp': 1524472572000}, 'indexed': {'date-parts': [[2018, 9, 1]], 'timestamp': 1535790972000}, 'deposited':", "as pd from ..src.Result import Result class TestResult(unittest.TestCase): def test__to_dict(self): original = {", "test__missing_values(self): original = { \"manuscript_id\": 'TVA-18-057', \"decision_date\": \"2020-09-01\", \"submission_date\": pd.to_datetime(\"2020-08-01\", errors='coerce', utc=True), }", "\"submission_date\": pd.to_datetime(\"2020-08-01\", errors='coerce', utc=True), } winner = { 'DOI': '10.1016/j.jnt.2017.08.038', 'type': 'journal-article', 'title':", "<NAME>') self.assertEqual(res['match_publisher'], 'SAGE') self.assertEqual(res['match_journal'], 'Taxes and Taxation Trends') self.assertEqual(res['match_pub_date'], '2020-4-1') self.assertEqual(res['match_earliest_date'], '2018-04-23') self.assertEqual(res['match_similarity'],", "Island (Great Barrier Reef, Australia)'], 'full_title': 'New data on the Opheliidae (Annelida) from", "Australia)'], 'full_title': 'New data on the Opheliidae (Annelida) from Lizard Island (Great Barrier", "{ \"manuscript_id\": 'TVA-18-057', \"decision_date\": pd.to_datetime(\"2020-09-01\", errors='coerce', utc=True), \"submission_date\": pd.to_datetime(\"2020-08-01\", errors='coerce', utc=True), } winner", "Australia)', 'authors_list': ['<NAME>', '<NAME>'], 'publisher': 'SAGE', 'issued': {'date-parts': [[2018, 9, 1]], 'timestamp': 1535790972000},", "3, 'rank': 1, 'container-title': [ 'Taxes and Taxation Trends' ] } res =", "\"2020-09-01\", \"submission_date\": pd.to_datetime(\"2020-08-01\", errors='coerce', utc=True), } winner = { 'DOI': '10.1016/j.jnt.2017.08.038', 'type': 'journal-article',", "{'date-parts': [[2020, 4, 1]], 'timestamp': 1585730172000}, 'created': {'date-parts': [[2018, 4, 23]], 'timestamp': 1524472572000},", "from ..src.Result import Result class TestResult(unittest.TestCase): def test__to_dict(self): original = { \"manuscript_id\": 'TVA-18-057',", "'publisher': 'SAGE', 'issued': {'date-parts': [[2018, 9, 1]], 'timestamp': 1535790972000}, 'similarity': 97, 'author_match_one': 1,", "Taxation Trends') self.assertEqual(res['match_pub_date'], '2020-4-1') self.assertEqual(res['match_earliest_date'], '2018-04-23') self.assertEqual(res['match_similarity'], 97) self.assertEqual(res['match_one'], True) self.assertEqual(res['match_all'], True) self.assertEqual(res['match_crossref_score'],", "(Annelida) from Lizard Island (Great Barrier Reef, Australia)', 'authors_list': ['<NAME>', '<NAME>'], 'publisher': 'SAGE',", "1, 'author_match_all': 1, 'score': 95.2, 'is-referenced-by-count': 3, 'rank': 1, 'container-title': [ 'Taxes and", "(Great Barrier Reef, Australia)'], 'full_title': 'New data on the Opheliidae (Annelida) from Lizard", "['New data on the Opheliidae (Annelida) from Lizard Island (Great Barrier Reef, Australia)'],", "{'date-parts': [[2018, 4, 23]], 'timestamp': 1524472572000}, 'indexed': {'date-parts': [[2018, 9, 1]], 'timestamp': 1535790972000},", "pd.to_datetime(\"2020-09-01\", errors='coerce', utc=True), \"submission_date\": pd.to_datetime(\"2020-08-01\", errors='coerce', utc=True), } winner = { 'DOI': '10.1016/j.jnt.2017.08.038',", "pd from ..src.Result import Result class TestResult(unittest.TestCase): def test__to_dict(self): original = { \"manuscript_id\":", "on the Opheliidae (Annelida) from Lizard Island (Great Barrier Reef, Australia)'], 'full_title': 'New", "winner['DOI']) self.assertEqual(res['match_type'], winner['type']) self.assertEqual(res['match_title'], winner['full_title']) self.assertEqual(res['match_authors'], '<NAME>, <NAME>') self.assertEqual(res['match_publisher'], 'SAGE') self.assertEqual(res['match_journal'], 'Taxes and", "} winner = { 'DOI': '10.1016/j.jnt.2017.08.038', 'type': 'journal-article', 'title': ['New data on the", "{'date-parts': [[2018, 9, 1]], 'timestamp': 1535790972000}, 'deposited': {'date-parts': [[2018, 9, 1]], 'timestamp': 1535790972000},", "Reef, Australia)', 'authors_list': ['<NAME>', '<NAME>'], 'publisher': 'SAGE', 'issued': {'date-parts': [[2020, 4, 1]], 'timestamp':", "'New data on the Opheliidae (Annelida) from Lizard Island (Great Barrier Reef, Australia)',", "'SAGE', 'issued': {'date-parts': [[2020, 4, 1]], 'timestamp': 1585730172000}, 'created': {'date-parts': [[2018, 4, 23]],", "True) self.assertEqual(res['match_crossref_score'], 95.2) self.assertEqual(res['match_crossref_cites'], 3) self.assertEqual(res['match_rank'], 1) self.assertEqual(res['match_total_decision_days'], -862) def test__missing_values(self): original =", "(Great Barrier Reef, Australia)', 'authors_list': ['<NAME>', '<NAME>'], 'publisher': 'SAGE', 'issued': {'date-parts': [[2018, 9,", "self.assertEqual(res['match_pub_date'], '2020-4-1') self.assertEqual(res['match_earliest_date'], '2018-04-23') self.assertEqual(res['match_similarity'], 97) self.assertEqual(res['match_one'], True) self.assertEqual(res['match_all'], True) self.assertEqual(res['match_crossref_score'], 95.2) self.assertEqual(res['match_crossref_cites'],", "'type': 'journal-article', 'title': ['New data on the Opheliidae (Annelida) from Lizard Island (Great", "self.assertEqual(res['match_crossref_score'], 95.2) self.assertEqual(res['match_crossref_cites'], 3) self.assertEqual(res['match_rank'], 1) self.assertEqual(res['match_total_decision_days'], -862) def test__missing_values(self): original = {", "'timestamp': 1585730172000}, 'created': {'date-parts': [[2018, 4, 23]], 'timestamp': 1524472572000}, 'indexed': {'date-parts': [[2018, 9,", "self.assertEqual(res['match_total_decision_days'], -862) def test__missing_values(self): original = { \"manuscript_id\": 'TVA-18-057', \"decision_date\": \"2020-09-01\", \"submission_date\": pd.to_datetime(\"2020-08-01\",", "test__to_dict(self): original = { \"manuscript_id\": 'TVA-18-057', \"decision_date\": pd.to_datetime(\"2020-09-01\", errors='coerce', utc=True), \"submission_date\": pd.to_datetime(\"2020-08-01\", errors='coerce',", "'title': ['New data on the Opheliidae (Annelida) from Lizard Island (Great Barrier Reef,", "res = Result(original=original, winner=winner).to_dict() self.assertEqual(res['manuscript_id'], original['manuscript_id']) self.assertEqual(res['decision_date'], '2020-09-01') self.assertEqual(res['submission_date'], '2020-08-01') self.assertEqual(res['match_doi'], winner['DOI']) self.assertEqual(res['match_type'],", "and Taxation Trends' ] } res = Result(original=original, winner=winner).to_dict() self.assertEqual(res['manuscript_id'], original['manuscript_id']) self.assertEqual(res['decision_date'], '2020-09-01')", "95.2, 'is-referenced-by-count': 3, 'rank': 1, 'container-title': [ 'Taxes and Taxation Trends' ] }", "unittest import pandas as pd from ..src.Result import Result class TestResult(unittest.TestCase): def test__to_dict(self):", "on the Opheliidae (Annelida) from Lizard Island (Great Barrier Reef, Australia)', 'authors_list': ['<NAME>',", "\"manuscript_id\": 'TVA-18-057', \"decision_date\": \"2020-09-01\", \"submission_date\": pd.to_datetime(\"2020-08-01\", errors='coerce', utc=True), } winner = { 'DOI':", "self.assertEqual(res['submission_date'], '2020-08-01') self.assertEqual(res['match_doi'], winner['DOI']) self.assertEqual(res['match_type'], winner['type']) self.assertEqual(res['match_title'], winner['full_title']) self.assertEqual(res['match_authors'], '<NAME>, <NAME>') self.assertEqual(res['match_publisher'], 'SAGE')", "97) self.assertEqual(res['match_one'], True) self.assertEqual(res['match_all'], True) self.assertEqual(res['match_crossref_score'], 95.2) self.assertEqual(res['match_crossref_cites'], 3) self.assertEqual(res['match_rank'], 1) self.assertEqual(res['match_total_decision_days'], -862)", "[[2020, 4, 1]], 'timestamp': 1585730172000}, 'created': {'date-parts': [[2018, 4, 23]], 'timestamp': 1524472572000}, 'indexed':", "\"decision_date\": pd.to_datetime(\"2020-09-01\", errors='coerce', utc=True), \"submission_date\": pd.to_datetime(\"2020-08-01\", errors='coerce', utc=True), } winner = { 'DOI':", "'SAGE') self.assertEqual(res['match_journal'], 'Taxes and Taxation Trends') self.assertEqual(res['match_pub_date'], '2020-4-1') self.assertEqual(res['match_earliest_date'], '2018-04-23') self.assertEqual(res['match_similarity'], 97) self.assertEqual(res['match_one'],", "Result(original=original, winner=winner).to_dict() self.assertEqual(res['manuscript_id'], original['manuscript_id']) self.assertEqual(res['decision_date'], '2020-09-01') self.assertEqual(res['submission_date'], '2020-08-01') self.assertEqual(res['match_doi'], winner['DOI']) self.assertEqual(res['match_type'], winner['type']) self.assertEqual(res['match_title'],", "'issued': {'date-parts': [[2018, 9, 1]], 'timestamp': 1535790972000}, 'similarity': 97, 'author_match_one': 1, 'author_match_all': 1,", "the Opheliidae (Annelida) from Lizard Island (Great Barrier Reef, Australia)'], 'full_title': 'New data", "1]], 'timestamp': 1535790972000}, 'deposited': {'date-parts': [[2018, 9, 1]], 'timestamp': 1535790972000}, 'similarity': 97, 'author_match_one':", "'is-referenced-by-count': 3, 'rank': 1, 'container-title': [ 'Taxes and Taxation Trends' ] } res", "23]], 'timestamp': 1524472572000}, 'indexed': {'date-parts': [[2018, 9, 1]], 'timestamp': 1535790972000}, 'deposited': {'date-parts': [[2018,", "[ 'Taxes and Taxation Trends' ] } res = Result(original=original, winner=winner).to_dict() self.assertEqual(res['decision_date'], '')", "class TestResult(unittest.TestCase): def test__to_dict(self): original = { \"manuscript_id\": 'TVA-18-057', \"decision_date\": pd.to_datetime(\"2020-09-01\", errors='coerce', utc=True),", "Opheliidae (Annelida) from Lizard Island (Great Barrier Reef, Australia)'], 'full_title': 'New data on", "'container-title': [ 'Taxes and Taxation Trends' ] } res = Result(original=original, winner=winner).to_dict() self.assertEqual(res['manuscript_id'],", "winner['type']) self.assertEqual(res['match_title'], winner['full_title']) self.assertEqual(res['match_authors'], '<NAME>, <NAME>') self.assertEqual(res['match_publisher'], 'SAGE') self.assertEqual(res['match_journal'], 'Taxes and Taxation Trends')", "'authors_list': ['<NAME>', '<NAME>'], 'publisher': 'SAGE', 'issued': {'date-parts': [[2018, 9, 1]], 'timestamp': 1535790972000}, 'similarity':", "self.assertEqual(res['decision_date'], '2020-09-01') self.assertEqual(res['submission_date'], '2020-08-01') self.assertEqual(res['match_doi'], winner['DOI']) self.assertEqual(res['match_type'], winner['type']) self.assertEqual(res['match_title'], winner['full_title']) self.assertEqual(res['match_authors'], '<NAME>, <NAME>')", "Reef, Australia)'], 'full_title': 'New data on the Opheliidae (Annelida) from Lizard Island (Great", "1, 'score': 95.2, 'is-referenced-by-count': 3, 'rank': 1, 'container-title': [ 'Taxes and Taxation Trends'", "'score': 95.2, 'is-referenced-by-count': 3, 'rank': 1, 'container-title': [ 'Taxes and Taxation Trends' ]", "'author_match_one': 1, 'author_match_all': 1, 'score': 95.2, 'is-referenced-by-count': 3, 'rank': 1, 'container-title': [ 'Taxes", "Lizard Island (Great Barrier Reef, Australia)'], 'full_title': 'New data on the Opheliidae (Annelida)", "1) self.assertEqual(res['match_total_decision_days'], -862) def test__missing_values(self): original = { \"manuscript_id\": 'TVA-18-057', \"decision_date\": \"2020-09-01\", \"submission_date\":", "Australia)', 'authors_list': ['<NAME>', '<NAME>'], 'publisher': 'SAGE', 'issued': {'date-parts': [[2020, 4, 1]], 'timestamp': 1585730172000},", "the Opheliidae (Annelida) from Lizard Island (Great Barrier Reef, Australia)', 'authors_list': ['<NAME>', '<NAME>'],", "4, 1]], 'timestamp': 1585730172000}, 'created': {'date-parts': [[2018, 4, 23]], 'timestamp': 1524472572000}, 'indexed': {'date-parts':", "True) self.assertEqual(res['match_all'], True) self.assertEqual(res['match_crossref_score'], 95.2) self.assertEqual(res['match_crossref_cites'], 3) self.assertEqual(res['match_rank'], 1) self.assertEqual(res['match_total_decision_days'], -862) def test__missing_values(self):", "self.assertEqual(res['match_crossref_cites'], 3) self.assertEqual(res['match_rank'], 1) self.assertEqual(res['match_total_decision_days'], -862) def test__missing_values(self): original = { \"manuscript_id\": 'TVA-18-057',", "'deposited': {'date-parts': [[2018, 9, 1]], 'timestamp': 1535790972000}, 'similarity': 97, 'author_match_one': 1, 'author_match_all': 1,", "TestResult(unittest.TestCase): def test__to_dict(self): original = { \"manuscript_id\": 'TVA-18-057', \"decision_date\": pd.to_datetime(\"2020-09-01\", errors='coerce', utc=True), \"submission_date\":", "'SAGE', 'issued': {'date-parts': [[2018, 9, 1]], 'timestamp': 1535790972000}, 'similarity': 97, 'author_match_one': 1, 'author_match_all':", "self.assertEqual(res['match_one'], True) self.assertEqual(res['match_all'], True) self.assertEqual(res['match_crossref_score'], 95.2) self.assertEqual(res['match_crossref_cites'], 3) self.assertEqual(res['match_rank'], 1) self.assertEqual(res['match_total_decision_days'], -862) def", "and Taxation Trends') self.assertEqual(res['match_pub_date'], '2020-4-1') self.assertEqual(res['match_earliest_date'], '2018-04-23') self.assertEqual(res['match_similarity'], 97) self.assertEqual(res['match_one'], True) self.assertEqual(res['match_all'], True)", "'2018-04-23') self.assertEqual(res['match_similarity'], 97) self.assertEqual(res['match_one'], True) self.assertEqual(res['match_all'], True) self.assertEqual(res['match_crossref_score'], 95.2) self.assertEqual(res['match_crossref_cites'], 3) self.assertEqual(res['match_rank'], 1)", "1]], 'timestamp': 1585730172000}, 'created': {'date-parts': [[2018, 4, 23]], 'timestamp': 1524472572000}, 'indexed': {'date-parts': [[2018,", "\"manuscript_id\": 'TVA-18-057', \"decision_date\": pd.to_datetime(\"2020-09-01\", errors='coerce', utc=True), \"submission_date\": pd.to_datetime(\"2020-08-01\", errors='coerce', utc=True), } winner =", "winner['full_title']) self.assertEqual(res['match_authors'], '<NAME>, <NAME>') self.assertEqual(res['match_publisher'], 'SAGE') self.assertEqual(res['match_journal'], 'Taxes and Taxation Trends') self.assertEqual(res['match_pub_date'], '2020-4-1')", "'10.1016/j.jnt.2017.08.038', 'type': 'journal-article', 'title': ['New data on the Opheliidae (Annelida) from Lizard Island", "'similarity': 97, 'author_match_one': 1, 'author_match_all': 1, 'score': 95.2, 'is-referenced-by-count': 3, 'rank': 1, 'container-title':", "97, 'author_match_one': 1, 'author_match_all': 1, 'score': 95.2, 'is-referenced-by-count': 3, 'rank': 1, 'container-title': [", "= { \"manuscript_id\": 'TVA-18-057', \"decision_date\": pd.to_datetime(\"2020-09-01\", errors='coerce', utc=True), \"submission_date\": pd.to_datetime(\"2020-08-01\", errors='coerce', utc=True), }", "self.assertEqual(res['match_publisher'], 'SAGE') self.assertEqual(res['match_journal'], 'Taxes and Taxation Trends') self.assertEqual(res['match_pub_date'], '2020-4-1') self.assertEqual(res['match_earliest_date'], '2018-04-23') self.assertEqual(res['match_similarity'], 97)", "'TVA-18-057', \"decision_date\": \"2020-09-01\", \"submission_date\": pd.to_datetime(\"2020-08-01\", errors='coerce', utc=True), } winner = { 'DOI': '10.1016/j.jnt.2017.08.038',", "Barrier Reef, Australia)'], 'full_title': 'New data on the Opheliidae (Annelida) from Lizard Island", "9, 1]], 'timestamp': 1535790972000}, 'deposited': {'date-parts': [[2018, 9, 1]], 'timestamp': 1535790972000}, 'similarity': 97,", "self.assertEqual(res['match_similarity'], 97) self.assertEqual(res['match_one'], True) self.assertEqual(res['match_all'], True) self.assertEqual(res['match_crossref_score'], 95.2) self.assertEqual(res['match_crossref_cites'], 3) self.assertEqual(res['match_rank'], 1) self.assertEqual(res['match_total_decision_days'],", "'TVA-18-057', \"decision_date\": pd.to_datetime(\"2020-09-01\", errors='coerce', utc=True), \"submission_date\": pd.to_datetime(\"2020-08-01\", errors='coerce', utc=True), } winner = {", "self.assertEqual(res['match_journal'], 'Taxes and Taxation Trends') self.assertEqual(res['match_pub_date'], '2020-4-1') self.assertEqual(res['match_earliest_date'], '2018-04-23') self.assertEqual(res['match_similarity'], 97) self.assertEqual(res['match_one'], True)", "95.2) self.assertEqual(res['match_crossref_cites'], 3) self.assertEqual(res['match_rank'], 1) self.assertEqual(res['match_total_decision_days'], -862) def test__missing_values(self): original = { \"manuscript_id\":", "\"decision_date\": \"2020-09-01\", \"submission_date\": pd.to_datetime(\"2020-08-01\", errors='coerce', utc=True), } winner = { 'DOI': '10.1016/j.jnt.2017.08.038', 'type':", "{'date-parts': [[2018, 9, 1]], 'timestamp': 1535790972000}, 'similarity': 97, 'author_match_one': 1, 'author_match_all': 1, 'score':", "Island (Great Barrier Reef, Australia)', 'authors_list': ['<NAME>', '<NAME>'], 'publisher': 'SAGE', 'issued': {'date-parts': [[2020,", "'journal-article', 'title': ['New data on the Opheliidae (Annelida) from Lizard Island (Great Barrier", "Trends' ] } res = Result(original=original, winner=winner).to_dict() self.assertEqual(res['manuscript_id'], original['manuscript_id']) self.assertEqual(res['decision_date'], '2020-09-01') self.assertEqual(res['submission_date'], '2020-08-01')", "'<NAME>'], 'publisher': 'SAGE', 'issued': {'date-parts': [[2020, 4, 1]], 'timestamp': 1585730172000}, 'created': {'date-parts': [[2018,", "{ \"manuscript_id\": 'TVA-18-057', \"decision_date\": \"2020-09-01\", \"submission_date\": pd.to_datetime(\"2020-08-01\", errors='coerce', utc=True), } winner = {", "pd.to_datetime(\"2020-08-01\", errors='coerce', utc=True), } winner = { 'DOI': '10.1016/j.jnt.2017.08.038', 'type': 'journal-article', 'title': ['New", "'Taxes and Taxation Trends') self.assertEqual(res['match_pub_date'], '2020-4-1') self.assertEqual(res['match_earliest_date'], '2018-04-23') self.assertEqual(res['match_similarity'], 97) self.assertEqual(res['match_one'], True) self.assertEqual(res['match_all'],", "'publisher': 'SAGE', 'issued': {'date-parts': [[2020, 4, 1]], 'timestamp': 1585730172000}, 'created': {'date-parts': [[2018, 4,", "'DOI': '10.1016/j.jnt.2017.08.038', 'type': 'journal-article', 'title': ['New data on the Opheliidae (Annelida) from Lizard", "self.assertEqual(res['manuscript_id'], original['manuscript_id']) self.assertEqual(res['decision_date'], '2020-09-01') self.assertEqual(res['submission_date'], '2020-08-01') self.assertEqual(res['match_doi'], winner['DOI']) self.assertEqual(res['match_type'], winner['type']) self.assertEqual(res['match_title'], winner['full_title']) self.assertEqual(res['match_authors'],", "'container-title': [ 'Taxes and Taxation Trends' ] } res = Result(original=original, winner=winner).to_dict() self.assertEqual(res['decision_date'],", "'timestamp': 1524472572000}, 'indexed': {'date-parts': [[2018, 9, 1]], 'timestamp': 1535790972000}, 'deposited': {'date-parts': [[2018, 9,", "errors='coerce', utc=True), \"submission_date\": pd.to_datetime(\"2020-08-01\", errors='coerce', utc=True), } winner = { 'DOI': '10.1016/j.jnt.2017.08.038', 'type':", "pandas as pd from ..src.Result import Result class TestResult(unittest.TestCase): def test__to_dict(self): original =", "['<NAME>', '<NAME>'], 'publisher': 'SAGE', 'issued': {'date-parts': [[2020, 4, 1]], 'timestamp': 1585730172000}, 'created': {'date-parts':", "original = { \"manuscript_id\": 'TVA-18-057', \"decision_date\": pd.to_datetime(\"2020-09-01\", errors='coerce', utc=True), \"submission_date\": pd.to_datetime(\"2020-08-01\", errors='coerce', utc=True),", "Barrier Reef, Australia)', 'authors_list': ['<NAME>', '<NAME>'], 'publisher': 'SAGE', 'issued': {'date-parts': [[2020, 4, 1]],", "'author_match_all': 1, 'score': 95.2, 'is-referenced-by-count': 3, 'rank': 1, 'container-title': [ 'Taxes and Taxation", "'2020-08-01') self.assertEqual(res['match_doi'], winner['DOI']) self.assertEqual(res['match_type'], winner['type']) self.assertEqual(res['match_title'], winner['full_title']) self.assertEqual(res['match_authors'], '<NAME>, <NAME>') self.assertEqual(res['match_publisher'], 'SAGE') self.assertEqual(res['match_journal'],", "1]], 'timestamp': 1535790972000}, 'similarity': 97, 'author_match_one': 1, 'author_match_all': 1, 'score': 95.2, 'is-referenced-by-count': 3,", "[ 'Taxes and Taxation Trends' ] } res = Result(original=original, winner=winner).to_dict() self.assertEqual(res['manuscript_id'], original['manuscript_id'])", "1524472572000}, 'indexed': {'date-parts': [[2018, 9, 1]], 'timestamp': 1535790972000}, 'deposited': {'date-parts': [[2018, 9, 1]],", "self.assertEqual(res['match_title'], winner['full_title']) self.assertEqual(res['match_authors'], '<NAME>, <NAME>') self.assertEqual(res['match_publisher'], 'SAGE') self.assertEqual(res['match_journal'], 'Taxes and Taxation Trends') self.assertEqual(res['match_pub_date'],", "'2020-4-1') self.assertEqual(res['match_earliest_date'], '2018-04-23') self.assertEqual(res['match_similarity'], 97) self.assertEqual(res['match_one'], True) self.assertEqual(res['match_all'], True) self.assertEqual(res['match_crossref_score'], 95.2) self.assertEqual(res['match_crossref_cites'], 3)", "[[2018, 9, 1]], 'timestamp': 1535790972000}, 'deposited': {'date-parts': [[2018, 9, 1]], 'timestamp': 1535790972000}, 'similarity':", "'rank': 1, 'container-title': [ 'Taxes and Taxation Trends' ] } res = Result(original=original,", "'full_title': 'New data on the Opheliidae (Annelida) from Lizard Island (Great Barrier Reef,", "def test__missing_values(self): original = { \"manuscript_id\": 'TVA-18-057', \"decision_date\": \"2020-09-01\", \"submission_date\": pd.to_datetime(\"2020-08-01\", errors='coerce', utc=True),", "Opheliidae (Annelida) from Lizard Island (Great Barrier Reef, Australia)', 'authors_list': ['<NAME>', '<NAME>'], 'publisher':", "Lizard Island (Great Barrier Reef, Australia)', 'authors_list': ['<NAME>', '<NAME>'], 'publisher': 'SAGE', 'issued': {'date-parts':", "..src.Result import Result class TestResult(unittest.TestCase): def test__to_dict(self): original = { \"manuscript_id\": 'TVA-18-057', \"decision_date\":", "{ 'DOI': '10.1016/j.jnt.2017.08.038', 'type': 'journal-article', 'title': ['New data on the Opheliidae (Annelida) from", "1535790972000}, 'similarity': 97, 'author_match_one': 1, 'author_match_all': 1, 'score': 95.2, 'is-referenced-by-count': 3, 'rank': 1,", "'<NAME>, <NAME>') self.assertEqual(res['match_publisher'], 'SAGE') self.assertEqual(res['match_journal'], 'Taxes and Taxation Trends') self.assertEqual(res['match_pub_date'], '2020-4-1') self.assertEqual(res['match_earliest_date'], '2018-04-23')", "def test__to_dict(self): original = { \"manuscript_id\": 'TVA-18-057', \"decision_date\": pd.to_datetime(\"2020-09-01\", errors='coerce', utc=True), \"submission_date\": pd.to_datetime(\"2020-08-01\",", "'<NAME>'], 'publisher': 'SAGE', 'issued': {'date-parts': [[2018, 9, 1]], 'timestamp': 1535790972000}, 'similarity': 97, 'author_match_one':", "import unittest import pandas as pd from ..src.Result import Result class TestResult(unittest.TestCase): def", "winner = { 'DOI': '10.1016/j.jnt.2017.08.038', 'type': 'journal-article', 'title': ['New data on the Opheliidae", "'timestamp': 1535790972000}, 'deposited': {'date-parts': [[2018, 9, 1]], 'timestamp': 1535790972000}, 'similarity': 97, 'author_match_one': 1,", "from Lizard Island (Great Barrier Reef, Australia)', 'authors_list': ['<NAME>', '<NAME>'], 'publisher': 'SAGE', 'issued':", "utc=True), } winner = { 'DOI': '10.1016/j.jnt.2017.08.038', 'type': 'journal-article', 'title': ['New data on", "1535790972000}, 'deposited': {'date-parts': [[2018, 9, 1]], 'timestamp': 1535790972000}, 'similarity': 97, 'author_match_one': 1, 'author_match_all':", "9, 1]], 'timestamp': 1535790972000}, 'similarity': 97, 'author_match_one': 1, 'author_match_all': 1, 'score': 95.2, 'is-referenced-by-count':", "= { \"manuscript_id\": 'TVA-18-057', \"decision_date\": \"2020-09-01\", \"submission_date\": pd.to_datetime(\"2020-08-01\", errors='coerce', utc=True), } winner =", "'authors_list': ['<NAME>', '<NAME>'], 'publisher': 'SAGE', 'issued': {'date-parts': [[2020, 4, 1]], 'timestamp': 1585730172000}, 'created':", "errors='coerce', utc=True), } winner = { 'DOI': '10.1016/j.jnt.2017.08.038', 'type': 'journal-article', 'title': ['New data", "-862) def test__missing_values(self): original = { \"manuscript_id\": 'TVA-18-057', \"decision_date\": \"2020-09-01\", \"submission_date\": pd.to_datetime(\"2020-08-01\", errors='coerce',", "Island (Great Barrier Reef, Australia)', 'authors_list': ['<NAME>', '<NAME>'], 'publisher': 'SAGE', 'issued': {'date-parts': [[2018,", "Result class TestResult(unittest.TestCase): def test__to_dict(self): original = { \"manuscript_id\": 'TVA-18-057', \"decision_date\": pd.to_datetime(\"2020-09-01\", errors='coerce',", "<reponame>sagepublishing/rejected_article_tracker_pkg<gh_stars>1-10 import unittest import pandas as pd from ..src.Result import Result class TestResult(unittest.TestCase):", "1, 'container-title': [ 'Taxes and Taxation Trends' ] } res = Result(original=original, winner=winner).to_dict()", "'Taxes and Taxation Trends' ] } res = Result(original=original, winner=winner).to_dict() self.assertEqual(res['manuscript_id'], original['manuscript_id']) self.assertEqual(res['decision_date'],", "data on the Opheliidae (Annelida) from Lizard Island (Great Barrier Reef, Australia)'], 'full_title':", "(Great Barrier Reef, Australia)', 'authors_list': ['<NAME>', '<NAME>'], 'publisher': 'SAGE', 'issued': {'date-parts': [[2020, 4," ]
[ "JSONField from django.utils.translation import ugettext_lazy as _ from saywiti.common.models import TimeStampedModel class Level(TimeStampedModel):", "Region(TimeStampedModel): parent = models.ForeignKey('self', related_name='children', null=True, blank=True) level = models.ForeignKey('Level', related_name='regions') name =", "an OSM relation?'), default=False) osm_tags = JSONField(_('OSM Tags'), null=True, blank=True) osm_relation_id = models.IntegerField(_('OSM", "def __str__(self): return self.name class Region(TimeStampedModel): parent = models.ForeignKey('self', related_name='children', null=True, blank=True) level", "# -*- coding: utf-8 -*- from django.contrib.gis.db import models from django.contrib.postgres.fields.jsonb import JSONField", "relation?'), default=False) osm_tags = JSONField(_('OSM Tags'), null=True, blank=True) osm_relation_id = models.IntegerField(_('OSM Relation ID'),", "null=True, blank=True) def __str__(self): return self.name class Region(TimeStampedModel): parent = models.ForeignKey('self', related_name='children', null=True,", "models.CharField(_('Name'), max_length=100) description = models.CharField(_('Description'), max_length=255, null=True, blank=True) def __str__(self): return self.name class", "blank=True) def __str__(self): return self.name class Region(TimeStampedModel): parent = models.ForeignKey('self', related_name='children', null=True, blank=True)", "import ugettext_lazy as _ from saywiti.common.models import TimeStampedModel class Level(TimeStampedModel): parent = models.ForeignKey('self',", "class Region(TimeStampedModel): parent = models.ForeignKey('self', related_name='children', null=True, blank=True) level = models.ForeignKey('Level', related_name='regions') name", "from django.contrib.postgres.fields.jsonb import JSONField from django.utils.translation import ugettext_lazy as _ from saywiti.common.models import", "blank=True) level = models.ForeignKey('Level', related_name='regions') name = models.CharField(_('Name'), max_length=100) is_osm_relation = models.BooleanField(_('Is an", "= models.ForeignKey('Level', related_name='regions') name = models.CharField(_('Name'), max_length=100) is_osm_relation = models.BooleanField(_('Is an OSM relation?'),", "TimeStampedModel class Level(TimeStampedModel): parent = models.ForeignKey('self', related_name='children', null=True, blank=True) name = models.CharField(_('Name'), max_length=100)", "models.CharField(_('Name'), max_length=100) is_osm_relation = models.BooleanField(_('Is an OSM relation?'), default=False) osm_tags = JSONField(_('OSM Tags'),", "= models.CharField(_('Name'), max_length=100) description = models.CharField(_('Description'), max_length=255, null=True, blank=True) def __str__(self): return self.name", "osm_relation_id = models.IntegerField(_('OSM Relation ID'), null=True, blank=True) polygon = models.PolygonField() def __str__(self): return", "coding: utf-8 -*- from django.contrib.gis.db import models from django.contrib.postgres.fields.jsonb import JSONField from django.utils.translation", "name = models.CharField(_('Name'), max_length=100) description = models.CharField(_('Description'), max_length=255, null=True, blank=True) def __str__(self): return", "= JSONField(_('OSM Tags'), null=True, blank=True) osm_relation_id = models.IntegerField(_('OSM Relation ID'), null=True, blank=True) polygon", "__str__(self): return self.name class Region(TimeStampedModel): parent = models.ForeignKey('self', related_name='children', null=True, blank=True) level =", "parent = models.ForeignKey('self', related_name='children', null=True, blank=True) level = models.ForeignKey('Level', related_name='regions') name = models.CharField(_('Name'),", "JSONField(_('OSM Tags'), null=True, blank=True) osm_relation_id = models.IntegerField(_('OSM Relation ID'), null=True, blank=True) polygon =", "default=False) osm_tags = JSONField(_('OSM Tags'), null=True, blank=True) osm_relation_id = models.IntegerField(_('OSM Relation ID'), null=True,", "django.contrib.gis.db import models from django.contrib.postgres.fields.jsonb import JSONField from django.utils.translation import ugettext_lazy as _", "related_name='children', null=True, blank=True) name = models.CharField(_('Name'), max_length=100) description = models.CharField(_('Description'), max_length=255, null=True, blank=True)", "is_osm_relation = models.BooleanField(_('Is an OSM relation?'), default=False) osm_tags = JSONField(_('OSM Tags'), null=True, blank=True)", "return self.name class Region(TimeStampedModel): parent = models.ForeignKey('self', related_name='children', null=True, blank=True) level = models.ForeignKey('Level',", "parent = models.ForeignKey('self', related_name='children', null=True, blank=True) name = models.CharField(_('Name'), max_length=100) description = models.CharField(_('Description'),", "from django.contrib.gis.db import models from django.contrib.postgres.fields.jsonb import JSONField from django.utils.translation import ugettext_lazy as", "osm_tags = JSONField(_('OSM Tags'), null=True, blank=True) osm_relation_id = models.IntegerField(_('OSM Relation ID'), null=True, blank=True)", "from django.utils.translation import ugettext_lazy as _ from saywiti.common.models import TimeStampedModel class Level(TimeStampedModel): parent", "-*- from django.contrib.gis.db import models from django.contrib.postgres.fields.jsonb import JSONField from django.utils.translation import ugettext_lazy", "description = models.CharField(_('Description'), max_length=255, null=True, blank=True) def __str__(self): return self.name class Region(TimeStampedModel): parent", "django.utils.translation import ugettext_lazy as _ from saywiti.common.models import TimeStampedModel class Level(TimeStampedModel): parent =", "max_length=100) description = models.CharField(_('Description'), max_length=255, null=True, blank=True) def __str__(self): return self.name class Region(TimeStampedModel):", "= models.CharField(_('Description'), max_length=255, null=True, blank=True) def __str__(self): return self.name class Region(TimeStampedModel): parent =", "max_length=255, null=True, blank=True) def __str__(self): return self.name class Region(TimeStampedModel): parent = models.ForeignKey('self', related_name='children',", "models.BooleanField(_('Is an OSM relation?'), default=False) osm_tags = JSONField(_('OSM Tags'), null=True, blank=True) osm_relation_id =", "OSM relation?'), default=False) osm_tags = JSONField(_('OSM Tags'), null=True, blank=True) osm_relation_id = models.IntegerField(_('OSM Relation", "level = models.ForeignKey('Level', related_name='regions') name = models.CharField(_('Name'), max_length=100) is_osm_relation = models.BooleanField(_('Is an OSM", "null=True, blank=True) osm_relation_id = models.IntegerField(_('OSM Relation ID'), null=True, blank=True) polygon = models.PolygonField() def", "import JSONField from django.utils.translation import ugettext_lazy as _ from saywiti.common.models import TimeStampedModel class", "ugettext_lazy as _ from saywiti.common.models import TimeStampedModel class Level(TimeStampedModel): parent = models.ForeignKey('self', related_name='children',", "saywiti.common.models import TimeStampedModel class Level(TimeStampedModel): parent = models.ForeignKey('self', related_name='children', null=True, blank=True) name =", "models.CharField(_('Description'), max_length=255, null=True, blank=True) def __str__(self): return self.name class Region(TimeStampedModel): parent = models.ForeignKey('self',", "_ from saywiti.common.models import TimeStampedModel class Level(TimeStampedModel): parent = models.ForeignKey('self', related_name='children', null=True, blank=True)", "models.ForeignKey('self', related_name='children', null=True, blank=True) level = models.ForeignKey('Level', related_name='regions') name = models.CharField(_('Name'), max_length=100) is_osm_relation", "null=True, blank=True) level = models.ForeignKey('Level', related_name='regions') name = models.CharField(_('Name'), max_length=100) is_osm_relation = models.BooleanField(_('Is", "class Level(TimeStampedModel): parent = models.ForeignKey('self', related_name='children', null=True, blank=True) name = models.CharField(_('Name'), max_length=100) description", "models.ForeignKey('Level', related_name='regions') name = models.CharField(_('Name'), max_length=100) is_osm_relation = models.BooleanField(_('Is an OSM relation?'), default=False)", "from saywiti.common.models import TimeStampedModel class Level(TimeStampedModel): parent = models.ForeignKey('self', related_name='children', null=True, blank=True) name", "name = models.CharField(_('Name'), max_length=100) is_osm_relation = models.BooleanField(_('Is an OSM relation?'), default=False) osm_tags =", "= models.CharField(_('Name'), max_length=100) is_osm_relation = models.BooleanField(_('Is an OSM relation?'), default=False) osm_tags = JSONField(_('OSM", "= models.ForeignKey('self', related_name='children', null=True, blank=True) name = models.CharField(_('Name'), max_length=100) description = models.CharField(_('Description'), max_length=255,", "-*- coding: utf-8 -*- from django.contrib.gis.db import models from django.contrib.postgres.fields.jsonb import JSONField from", "= models.ForeignKey('self', related_name='children', null=True, blank=True) level = models.ForeignKey('Level', related_name='regions') name = models.CharField(_('Name'), max_length=100)", "max_length=100) is_osm_relation = models.BooleanField(_('Is an OSM relation?'), default=False) osm_tags = JSONField(_('OSM Tags'), null=True,", "models from django.contrib.postgres.fields.jsonb import JSONField from django.utils.translation import ugettext_lazy as _ from saywiti.common.models", "related_name='regions') name = models.CharField(_('Name'), max_length=100) is_osm_relation = models.BooleanField(_('Is an OSM relation?'), default=False) osm_tags", "= models.BooleanField(_('Is an OSM relation?'), default=False) osm_tags = JSONField(_('OSM Tags'), null=True, blank=True) osm_relation_id", "blank=True) name = models.CharField(_('Name'), max_length=100) description = models.CharField(_('Description'), max_length=255, null=True, blank=True) def __str__(self):", "self.name class Region(TimeStampedModel): parent = models.ForeignKey('self', related_name='children', null=True, blank=True) level = models.ForeignKey('Level', related_name='regions')", "related_name='children', null=True, blank=True) level = models.ForeignKey('Level', related_name='regions') name = models.CharField(_('Name'), max_length=100) is_osm_relation =", "Tags'), null=True, blank=True) osm_relation_id = models.IntegerField(_('OSM Relation ID'), null=True, blank=True) polygon = models.PolygonField()", "blank=True) osm_relation_id = models.IntegerField(_('OSM Relation ID'), null=True, blank=True) polygon = models.PolygonField() def __str__(self):", "import models from django.contrib.postgres.fields.jsonb import JSONField from django.utils.translation import ugettext_lazy as _ from", "utf-8 -*- from django.contrib.gis.db import models from django.contrib.postgres.fields.jsonb import JSONField from django.utils.translation import", "null=True, blank=True) name = models.CharField(_('Name'), max_length=100) description = models.CharField(_('Description'), max_length=255, null=True, blank=True) def", "Level(TimeStampedModel): parent = models.ForeignKey('self', related_name='children', null=True, blank=True) name = models.CharField(_('Name'), max_length=100) description =", "= models.IntegerField(_('OSM Relation ID'), null=True, blank=True) polygon = models.PolygonField() def __str__(self): return self.name", "django.contrib.postgres.fields.jsonb import JSONField from django.utils.translation import ugettext_lazy as _ from saywiti.common.models import TimeStampedModel", "as _ from saywiti.common.models import TimeStampedModel class Level(TimeStampedModel): parent = models.ForeignKey('self', related_name='children', null=True,", "models.ForeignKey('self', related_name='children', null=True, blank=True) name = models.CharField(_('Name'), max_length=100) description = models.CharField(_('Description'), max_length=255, null=True,", "import TimeStampedModel class Level(TimeStampedModel): parent = models.ForeignKey('self', related_name='children', null=True, blank=True) name = models.CharField(_('Name')," ]
[ "enabled=False), Toggle(name=\"db-and-code\", enabled=False), ] def test_merge_toggles(): merged = Toggle.merge(toggles_from_db, toggles_from_code) code_and_db = Toggle(", "test_merge_toggles(): merged = Toggle.merge(toggles_from_db, toggles_from_code) code_and_db = Toggle( name=\"db-and-code\", description=\"a toggle thats defined", "Toggle( name=\"db-and-code\", description=\"a toggle thats defined in code and database\", enabled=True, ) code_only", "toggles_from_code = [ Toggle(name=\"code-only\", enabled=False), Toggle(name=\"db-and-code\", enabled=False), ] def test_merge_toggles(): merged = Toggle.merge(toggles_from_db,", "Toggle( name=\"code-only\", description=\"a toggle thats only defined in code\", enabled=False, ) assert code_and_db", "Toggle.merge(toggles_from_db, toggles_from_code) code_and_db = Toggle( name=\"db-and-code\", description=\"a toggle thats defined in code and", "toggle = Toggle(id=1, name=\"some\", enabled=True, description=\"some description\") expected_serialization = {\"id\": 1, \"name\": \"some\",", "assert code_only in merged assert len(merged) == 2 def test_serialize(): toggle = Toggle(id=1,", "assert len(merged) == 2 def test_serialize(): toggle = Toggle(id=1, name=\"some\", enabled=True, description=\"some description\")", "toggle thats only defined in code\", enabled=False, ) assert code_and_db in merged assert", "import Toggle from datetime import datetime, timedelta toggles_from_db = [ Toggle(name=\"db-only\", enabled=False), Toggle(name=\"db-and-code\",", "merged assert len(merged) == 2 def test_serialize(): toggle = Toggle(id=1, name=\"some\", enabled=True, description=\"some", "datetime.utcnow() - timedelta(days=2) t = Toggle(name=\"some-toggle\", enabled=False, last_changed_at=last_changed) t.toggle() assert t.enabled is True", "enabled=False), Toggle(name=\"db-and-code\", enabled=True), ] toggles_from_code = [ Toggle(name=\"code-only\", enabled=False), Toggle(name=\"db-and-code\", enabled=False), ] def", "doppelkopf.toggles import Toggle from datetime import datetime, timedelta toggles_from_db = [ Toggle(name=\"db-only\", enabled=False),", "only defined in code\", enabled=False, ) assert code_and_db in merged assert code_only in", "in merged assert code_only in merged assert len(merged) == 2 def test_serialize(): toggle", "test_update_toggle_state(): last_changed = datetime.utcnow() - timedelta(days=2) t = Toggle(name=\"some-toggle\", enabled=False, last_changed_at=last_changed) t.toggle() assert", "code_only in merged assert len(merged) == 2 def test_serialize(): toggle = Toggle(id=1, name=\"some\",", "enabled=False, ) assert code_and_db in merged assert code_only in merged assert len(merged) ==", "from datetime import datetime, timedelta toggles_from_db = [ Toggle(name=\"db-only\", enabled=False), Toggle(name=\"db-and-code\", enabled=True), ]", "expected_serialization = {\"id\": 1, \"name\": \"some\", \"enabled\": True} assert toggle.serialize() == expected_serialization def", "code_and_db = Toggle( name=\"db-and-code\", description=\"a toggle thats defined in code and database\", enabled=True,", "thats defined in code and database\", enabled=True, ) code_only = Toggle( name=\"code-only\", description=\"a", "= Toggle(name=\"some-toggle\", enabled=False, last_changed_at=last_changed) t.toggle() assert t.enabled is True assert t.last_changed_at > datetime.utcnow()", "Toggle(name=\"db-and-code\", enabled=True), ] toggles_from_code = [ Toggle(name=\"code-only\", enabled=False), Toggle(name=\"db-and-code\", enabled=False), ] def test_merge_toggles():", "toggle thats defined in code and database\", enabled=True, ) code_only = Toggle( name=\"code-only\",", "[ Toggle(name=\"db-only\", enabled=False), Toggle(name=\"db-and-code\", enabled=True), ] toggles_from_code = [ Toggle(name=\"code-only\", enabled=False), Toggle(name=\"db-and-code\", enabled=False),", "enabled=True), ] toggles_from_code = [ Toggle(name=\"code-only\", enabled=False), Toggle(name=\"db-and-code\", enabled=False), ] def test_merge_toggles(): merged", "enabled=True, ) code_only = Toggle( name=\"code-only\", description=\"a toggle thats only defined in code\",", "in code\", enabled=False, ) assert code_and_db in merged assert code_only in merged assert", "True} assert toggle.serialize() == expected_serialization def test_update_toggle_state(): last_changed = datetime.utcnow() - timedelta(days=2) t", "in code and database\", enabled=True, ) code_only = Toggle( name=\"code-only\", description=\"a toggle thats", "= [ Toggle(name=\"db-only\", enabled=False), Toggle(name=\"db-and-code\", enabled=True), ] toggles_from_code = [ Toggle(name=\"code-only\", enabled=False), Toggle(name=\"db-and-code\",", "expected_serialization def test_update_toggle_state(): last_changed = datetime.utcnow() - timedelta(days=2) t = Toggle(name=\"some-toggle\", enabled=False, last_changed_at=last_changed)", "= datetime.utcnow() - timedelta(days=2) t = Toggle(name=\"some-toggle\", enabled=False, last_changed_at=last_changed) t.toggle() assert t.enabled is", "merged assert code_only in merged assert len(merged) == 2 def test_serialize(): toggle =", "name=\"db-and-code\", description=\"a toggle thats defined in code and database\", enabled=True, ) code_only =", "toggles_from_db = [ Toggle(name=\"db-only\", enabled=False), Toggle(name=\"db-and-code\", enabled=True), ] toggles_from_code = [ Toggle(name=\"code-only\", enabled=False),", "def test_serialize(): toggle = Toggle(id=1, name=\"some\", enabled=True, description=\"some description\") expected_serialization = {\"id\": 1,", "Toggle(id=1, name=\"some\", enabled=True, description=\"some description\") expected_serialization = {\"id\": 1, \"name\": \"some\", \"enabled\": True}", "2 def test_serialize(): toggle = Toggle(id=1, name=\"some\", enabled=True, description=\"some description\") expected_serialization = {\"id\":", "toggle.serialize() == expected_serialization def test_update_toggle_state(): last_changed = datetime.utcnow() - timedelta(days=2) t = Toggle(name=\"some-toggle\",", "Toggle(name=\"db-and-code\", enabled=False), ] def test_merge_toggles(): merged = Toggle.merge(toggles_from_db, toggles_from_code) code_and_db = Toggle( name=\"db-and-code\",", "in merged assert len(merged) == 2 def test_serialize(): toggle = Toggle(id=1, name=\"some\", enabled=True,", "\"name\": \"some\", \"enabled\": True} assert toggle.serialize() == expected_serialization def test_update_toggle_state(): last_changed = datetime.utcnow()", "last_changed = datetime.utcnow() - timedelta(days=2) t = Toggle(name=\"some-toggle\", enabled=False, last_changed_at=last_changed) t.toggle() assert t.enabled", "database\", enabled=True, ) code_only = Toggle( name=\"code-only\", description=\"a toggle thats only defined in", "= Toggle.merge(toggles_from_db, toggles_from_code) code_and_db = Toggle( name=\"db-and-code\", description=\"a toggle thats defined in code", ") assert code_and_db in merged assert code_only in merged assert len(merged) == 2", "test_serialize(): toggle = Toggle(id=1, name=\"some\", enabled=True, description=\"some description\") expected_serialization = {\"id\": 1, \"name\":", ") code_only = Toggle( name=\"code-only\", description=\"a toggle thats only defined in code\", enabled=False,", "t = Toggle(name=\"some-toggle\", enabled=False, last_changed_at=last_changed) t.toggle() assert t.enabled is True assert t.last_changed_at >", "description=\"some description\") expected_serialization = {\"id\": 1, \"name\": \"some\", \"enabled\": True} assert toggle.serialize() ==", "= {\"id\": 1, \"name\": \"some\", \"enabled\": True} assert toggle.serialize() == expected_serialization def test_update_toggle_state():", "= Toggle(id=1, name=\"some\", enabled=True, description=\"some description\") expected_serialization = {\"id\": 1, \"name\": \"some\", \"enabled\":", "= [ Toggle(name=\"code-only\", enabled=False), Toggle(name=\"db-and-code\", enabled=False), ] def test_merge_toggles(): merged = Toggle.merge(toggles_from_db, toggles_from_code)", "= Toggle( name=\"db-and-code\", description=\"a toggle thats defined in code and database\", enabled=True, )", "enabled=False, last_changed_at=last_changed) t.toggle() assert t.enabled is True assert t.last_changed_at > datetime.utcnow() - timedelta(seconds=2)", "Toggle from datetime import datetime, timedelta toggles_from_db = [ Toggle(name=\"db-only\", enabled=False), Toggle(name=\"db-and-code\", enabled=True),", "merged = Toggle.merge(toggles_from_db, toggles_from_code) code_and_db = Toggle( name=\"db-and-code\", description=\"a toggle thats defined in", "] toggles_from_code = [ Toggle(name=\"code-only\", enabled=False), Toggle(name=\"db-and-code\", enabled=False), ] def test_merge_toggles(): merged =", "timedelta(days=2) t = Toggle(name=\"some-toggle\", enabled=False, last_changed_at=last_changed) t.toggle() assert t.enabled is True assert t.last_changed_at", "Toggle(name=\"db-only\", enabled=False), Toggle(name=\"db-and-code\", enabled=True), ] toggles_from_code = [ Toggle(name=\"code-only\", enabled=False), Toggle(name=\"db-and-code\", enabled=False), ]", "defined in code and database\", enabled=True, ) code_only = Toggle( name=\"code-only\", description=\"a toggle", "description=\"a toggle thats defined in code and database\", enabled=True, ) code_only = Toggle(", "\"some\", \"enabled\": True} assert toggle.serialize() == expected_serialization def test_update_toggle_state(): last_changed = datetime.utcnow() -", "name=\"code-only\", description=\"a toggle thats only defined in code\", enabled=False, ) assert code_and_db in", "= Toggle( name=\"code-only\", description=\"a toggle thats only defined in code\", enabled=False, ) assert", "enabled=True, description=\"some description\") expected_serialization = {\"id\": 1, \"name\": \"some\", \"enabled\": True} assert toggle.serialize()", "import datetime, timedelta toggles_from_db = [ Toggle(name=\"db-only\", enabled=False), Toggle(name=\"db-and-code\", enabled=True), ] toggles_from_code =", "1, \"name\": \"some\", \"enabled\": True} assert toggle.serialize() == expected_serialization def test_update_toggle_state(): last_changed =", "toggles_from_code) code_and_db = Toggle( name=\"db-and-code\", description=\"a toggle thats defined in code and database\",", "defined in code\", enabled=False, ) assert code_and_db in merged assert code_only in merged", "] def test_merge_toggles(): merged = Toggle.merge(toggles_from_db, toggles_from_code) code_and_db = Toggle( name=\"db-and-code\", description=\"a toggle", "code\", enabled=False, ) assert code_and_db in merged assert code_only in merged assert len(merged)", "description=\"a toggle thats only defined in code\", enabled=False, ) assert code_and_db in merged", "description\") expected_serialization = {\"id\": 1, \"name\": \"some\", \"enabled\": True} assert toggle.serialize() == expected_serialization", "and database\", enabled=True, ) code_only = Toggle( name=\"code-only\", description=\"a toggle thats only defined", "== expected_serialization def test_update_toggle_state(): last_changed = datetime.utcnow() - timedelta(days=2) t = Toggle(name=\"some-toggle\", enabled=False,", "timedelta toggles_from_db = [ Toggle(name=\"db-only\", enabled=False), Toggle(name=\"db-and-code\", enabled=True), ] toggles_from_code = [ Toggle(name=\"code-only\",", "{\"id\": 1, \"name\": \"some\", \"enabled\": True} assert toggle.serialize() == expected_serialization def test_update_toggle_state(): last_changed", "== 2 def test_serialize(): toggle = Toggle(id=1, name=\"some\", enabled=True, description=\"some description\") expected_serialization =", "def test_update_toggle_state(): last_changed = datetime.utcnow() - timedelta(days=2) t = Toggle(name=\"some-toggle\", enabled=False, last_changed_at=last_changed) t.toggle()", "def test_merge_toggles(): merged = Toggle.merge(toggles_from_db, toggles_from_code) code_and_db = Toggle( name=\"db-and-code\", description=\"a toggle thats", "len(merged) == 2 def test_serialize(): toggle = Toggle(id=1, name=\"some\", enabled=True, description=\"some description\") expected_serialization", "- timedelta(days=2) t = Toggle(name=\"some-toggle\", enabled=False, last_changed_at=last_changed) t.toggle() assert t.enabled is True assert", "[ Toggle(name=\"code-only\", enabled=False), Toggle(name=\"db-and-code\", enabled=False), ] def test_merge_toggles(): merged = Toggle.merge(toggles_from_db, toggles_from_code) code_and_db", "datetime, timedelta toggles_from_db = [ Toggle(name=\"db-only\", enabled=False), Toggle(name=\"db-and-code\", enabled=True), ] toggles_from_code = [", "\"enabled\": True} assert toggle.serialize() == expected_serialization def test_update_toggle_state(): last_changed = datetime.utcnow() - timedelta(days=2)", "code and database\", enabled=True, ) code_only = Toggle( name=\"code-only\", description=\"a toggle thats only", "from doppelkopf.toggles import Toggle from datetime import datetime, timedelta toggles_from_db = [ Toggle(name=\"db-only\",", "Toggle(name=\"code-only\", enabled=False), Toggle(name=\"db-and-code\", enabled=False), ] def test_merge_toggles(): merged = Toggle.merge(toggles_from_db, toggles_from_code) code_and_db =", "enabled=False), ] def test_merge_toggles(): merged = Toggle.merge(toggles_from_db, toggles_from_code) code_and_db = Toggle( name=\"db-and-code\", description=\"a", "datetime import datetime, timedelta toggles_from_db = [ Toggle(name=\"db-only\", enabled=False), Toggle(name=\"db-and-code\", enabled=True), ] toggles_from_code", "code_only = Toggle( name=\"code-only\", description=\"a toggle thats only defined in code\", enabled=False, )", "code_and_db in merged assert code_only in merged assert len(merged) == 2 def test_serialize():", "assert code_and_db in merged assert code_only in merged assert len(merged) == 2 def", "Toggle(name=\"some-toggle\", enabled=False, last_changed_at=last_changed) t.toggle() assert t.enabled is True assert t.last_changed_at > datetime.utcnow() -", "assert toggle.serialize() == expected_serialization def test_update_toggle_state(): last_changed = datetime.utcnow() - timedelta(days=2) t =", "thats only defined in code\", enabled=False, ) assert code_and_db in merged assert code_only", "name=\"some\", enabled=True, description=\"some description\") expected_serialization = {\"id\": 1, \"name\": \"some\", \"enabled\": True} assert" ]
[ "an ansible deploy script', due_to=datetime.now()) assert my_{{cookiecutter.main_model|lower}}.status == 'pending' def test_should_get_{{cookiecutter.main_model|lower}}_as_done(db): my_{{cookiecutter.main_model|lower}} =", "= {{cookiecutter.main_model|lower}}_service.mark_as_done(my_{{cookiecutter.main_model|lower}}.id) assert {{cookiecutter.main_model|lower}}_updated.status == 'done' def test_should_raise_an_erro_for_invalid_{{cookiecutter.main_model|lower}}_id(db): invalid_{{cookiecutter.main_model|lower}} = 0 with pytest.raises(RuntimeError)", "as error: {{cookiecutter.main_model|lower}} = {{cookiecutter.main_model|lower}}_service.mark_as_done(invalid_{{cookiecutter.main_model|lower}}) assert str(error.value) == f\"{{cookiecutter.main_model}} ID: {invalid_{{cookiecutter.main_model|lower}}} invalida\" def", "baker.make( {{cookiecutter.main_model}}, description='Create an ansible deploy script', due_to=datetime.now(), done=True) {{cookiecutter.main_model|lower}}_updated = {{cookiecutter.main_model|lower}}_service.mark_as_done(my_{{cookiecutter.main_model|lower}}.id) assert", "{{cookiecutter.main_model|lower}}_service.mark_as_done(my_{{cookiecutter.main_model|lower}}.id) assert {{cookiecutter.main_model|lower}}_updated.status == 'done' def test_should_raise_an_erro_for_invalid_{{cookiecutter.main_model|lower}}_id(db): invalid_{{cookiecutter.main_model|lower}} = 0 with pytest.raises(RuntimeError) as", "deploy script', due_to=datetime.now()) {{cookiecutter.main_model|lower}}_updated = {{cookiecutter.main_model|lower}}_service.mark_as_done(my_{{cookiecutter.main_model|lower}}.id) assert {{cookiecutter.main_model|lower}}_updated.status == 'done' def test_should_raise_an_erro_for_invalid_{{cookiecutter.main_model|lower}}_id(db): invalid_{{cookiecutter.main_model|lower}}", "def test_should_get_{{cookiecutter.main_model|lower}}_as_done(db): my_{{cookiecutter.main_model|lower}} = baker.make({{cookiecutter.main_model}}, description='Create an ansible deploy script', due_to=datetime.now()) {{cookiecutter.main_model|lower}}_updated =", "description='Create an ansible deploy script', due_to=datetime.now()) {{cookiecutter.main_model|lower}}_updated = {{cookiecutter.main_model|lower}}_service.mark_as_done(my_{{cookiecutter.main_model|lower}}.id) assert {{cookiecutter.main_model|lower}}_updated.status == 'done'", "== 'pending' def test_should_get_{{cookiecutter.main_model|lower}}_as_done(db): my_{{cookiecutter.main_model|lower}} = baker.make({{cookiecutter.main_model}}, description='Create an ansible deploy script', due_to=datetime.now())", "an ansible deploy script', due_to=datetime.now()) {{cookiecutter.main_model|lower}}_updated = {{cookiecutter.main_model|lower}}_service.mark_as_done(my_{{cookiecutter.main_model|lower}}.id) assert {{cookiecutter.main_model|lower}}_updated.status == 'done' def", "due_to=datetime.now()) {{cookiecutter.main_model|lower}}_updated = {{cookiecutter.main_model|lower}}_service.mark_as_done(my_{{cookiecutter.main_model|lower}}.id) assert {{cookiecutter.main_model|lower}}_updated.status == 'done' def test_should_raise_an_erro_for_invalid_{{cookiecutter.main_model|lower}}_id(db): invalid_{{cookiecutter.main_model|lower}} = 0", "my_{{cookiecutter.main_model|lower}} = baker.make( {{cookiecutter.main_model}}, description='Create an ansible deploy script', due_to=datetime.now(), done=True) {{cookiecutter.main_model|lower}}_updated =", "{{cookiecutter.main_model}} from {{cookiecutter.main_app}}.services import {{cookiecutter.main_model|lower}}_service def test_should_get_{{cookiecutter.main_model|lower}}_as_pending(db): my_{{cookiecutter.main_model|lower}} = baker.make({{cookiecutter.main_model}}, description='Create an ansible", "{{cookiecutter.main_app}}.services import {{cookiecutter.main_model|lower}}_service def test_should_get_{{cookiecutter.main_model|lower}}_as_pending(db): my_{{cookiecutter.main_model|lower}} = baker.make({{cookiecutter.main_model}}, description='Create an ansible deploy script',", "script', due_to=datetime.now()) assert my_{{cookiecutter.main_model|lower}}.status == 'pending' def test_should_get_{{cookiecutter.main_model|lower}}_as_done(db): my_{{cookiecutter.main_model|lower}} = baker.make({{cookiecutter.main_model}}, description='Create an", "def test_should_mark_as_undone(db): my_{{cookiecutter.main_model|lower}} = baker.make( {{cookiecutter.main_model}}, description='Create an ansible deploy script', due_to=datetime.now(), done=True)", "invalid_{{cookiecutter.main_model|lower}} = 0 with pytest.raises(RuntimeError) as error: {{cookiecutter.main_model|lower}} = {{cookiecutter.main_model|lower}}_service.mark_as_done(invalid_{{cookiecutter.main_model|lower}}) assert str(error.value) ==", "= 0 with pytest.raises(RuntimeError) as error: {{cookiecutter.main_model|lower}} = {{cookiecutter.main_model|lower}}_service.mark_as_done(invalid_{{cookiecutter.main_model|lower}}) assert str(error.value) == f\"{{cookiecutter.main_model}}", "ansible deploy script', due_to=datetime.now()) {{cookiecutter.main_model|lower}}_updated = {{cookiecutter.main_model|lower}}_service.mark_as_done(my_{{cookiecutter.main_model|lower}}.id) assert {{cookiecutter.main_model|lower}}_updated.status == 'done' def test_should_raise_an_erro_for_invalid_{{cookiecutter.main_model|lower}}_id(db):", "def test_should_get_{{cookiecutter.main_model|lower}}_as_pending(db): my_{{cookiecutter.main_model|lower}} = baker.make({{cookiecutter.main_model}}, description='Create an ansible deploy script', due_to=datetime.now()) assert my_{{cookiecutter.main_model|lower}}.status", "from datetime import datetime import pytest from model_bakery import baker from {{cookiecutter.main_app}}.models import", "'pending' def test_should_get_{{cookiecutter.main_model|lower}}_as_done(db): my_{{cookiecutter.main_model|lower}} = baker.make({{cookiecutter.main_model}}, description='Create an ansible deploy script', due_to=datetime.now()) {{cookiecutter.main_model|lower}}_updated", "description='Create an ansible deploy script', due_to=datetime.now(), done=True) {{cookiecutter.main_model|lower}}_updated = {{cookiecutter.main_model|lower}}_service.mark_as_done(my_{{cookiecutter.main_model|lower}}.id) assert {{cookiecutter.main_model|lower}}_updated.status ==", "error: {{cookiecutter.main_model|lower}} = {{cookiecutter.main_model|lower}}_service.mark_as_done(invalid_{{cookiecutter.main_model|lower}}) assert str(error.value) == f\"{{cookiecutter.main_model}} ID: {invalid_{{cookiecutter.main_model|lower}}} invalida\" def test_should_mark_as_undone(db):", "an ansible deploy script', due_to=datetime.now(), done=True) {{cookiecutter.main_model|lower}}_updated = {{cookiecutter.main_model|lower}}_service.mark_as_done(my_{{cookiecutter.main_model|lower}}.id) assert {{cookiecutter.main_model|lower}}_updated.status == 'pending'", "ID: {invalid_{{cookiecutter.main_model|lower}}} invalida\" def test_should_mark_as_undone(db): my_{{cookiecutter.main_model|lower}} = baker.make( {{cookiecutter.main_model}}, description='Create an ansible deploy", "ansible deploy script', due_to=datetime.now()) assert my_{{cookiecutter.main_model|lower}}.status == 'pending' def test_should_get_{{cookiecutter.main_model|lower}}_as_done(db): my_{{cookiecutter.main_model|lower}} = baker.make({{cookiecutter.main_model}},", "model_bakery import baker from {{cookiecutter.main_app}}.models import {{cookiecutter.main_model}} from {{cookiecutter.main_app}}.services import {{cookiecutter.main_model|lower}}_service def test_should_get_{{cookiecutter.main_model|lower}}_as_pending(db):", "{{cookiecutter.main_model|lower}}_updated.status == 'done' def test_should_raise_an_erro_for_invalid_{{cookiecutter.main_model|lower}}_id(db): invalid_{{cookiecutter.main_model|lower}} = 0 with pytest.raises(RuntimeError) as error: {{cookiecutter.main_model|lower}}", "with pytest.raises(RuntimeError) as error: {{cookiecutter.main_model|lower}} = {{cookiecutter.main_model|lower}}_service.mark_as_done(invalid_{{cookiecutter.main_model|lower}}) assert str(error.value) == f\"{{cookiecutter.main_model}} ID: {invalid_{{cookiecutter.main_model|lower}}}", "assert {{cookiecutter.main_model|lower}}_updated.status == 'done' def test_should_raise_an_erro_for_invalid_{{cookiecutter.main_model|lower}}_id(db): invalid_{{cookiecutter.main_model|lower}} = 0 with pytest.raises(RuntimeError) as error:", "{{cookiecutter.main_model|lower}} = {{cookiecutter.main_model|lower}}_service.mark_as_done(invalid_{{cookiecutter.main_model|lower}}) assert str(error.value) == f\"{{cookiecutter.main_model}} ID: {invalid_{{cookiecutter.main_model|lower}}} invalida\" def test_should_mark_as_undone(db): my_{{cookiecutter.main_model|lower}}", "pytest.raises(RuntimeError) as error: {{cookiecutter.main_model|lower}} = {{cookiecutter.main_model|lower}}_service.mark_as_done(invalid_{{cookiecutter.main_model|lower}}) assert str(error.value) == f\"{{cookiecutter.main_model}} ID: {invalid_{{cookiecutter.main_model|lower}}} invalida\"", "f\"{{cookiecutter.main_model}} ID: {invalid_{{cookiecutter.main_model|lower}}} invalida\" def test_should_mark_as_undone(db): my_{{cookiecutter.main_model|lower}} = baker.make( {{cookiecutter.main_model}}, description='Create an ansible", "from {{cookiecutter.main_app}}.services import {{cookiecutter.main_model|lower}}_service def test_should_get_{{cookiecutter.main_model|lower}}_as_pending(db): my_{{cookiecutter.main_model|lower}} = baker.make({{cookiecutter.main_model}}, description='Create an ansible deploy", "baker from {{cookiecutter.main_app}}.models import {{cookiecutter.main_model}} from {{cookiecutter.main_app}}.services import {{cookiecutter.main_model|lower}}_service def test_should_get_{{cookiecutter.main_model|lower}}_as_pending(db): my_{{cookiecutter.main_model|lower}} =", "my_{{cookiecutter.main_model|lower}} = baker.make({{cookiecutter.main_model}}, description='Create an ansible deploy script', due_to=datetime.now()) {{cookiecutter.main_model|lower}}_updated = {{cookiecutter.main_model|lower}}_service.mark_as_done(my_{{cookiecutter.main_model|lower}}.id) assert", "import {{cookiecutter.main_model|lower}}_service def test_should_get_{{cookiecutter.main_model|lower}}_as_pending(db): my_{{cookiecutter.main_model|lower}} = baker.make({{cookiecutter.main_model}}, description='Create an ansible deploy script', due_to=datetime.now())", "= {{cookiecutter.main_model|lower}}_service.mark_as_done(invalid_{{cookiecutter.main_model|lower}}) assert str(error.value) == f\"{{cookiecutter.main_model}} ID: {invalid_{{cookiecutter.main_model|lower}}} invalida\" def test_should_mark_as_undone(db): my_{{cookiecutter.main_model|lower}} =", "= baker.make({{cookiecutter.main_model}}, description='Create an ansible deploy script', due_to=datetime.now()) assert my_{{cookiecutter.main_model|lower}}.status == 'pending' def", "import pytest from model_bakery import baker from {{cookiecutter.main_app}}.models import {{cookiecutter.main_model}} from {{cookiecutter.main_app}}.services import", "{{cookiecutter.main_model|lower}}_service.mark_as_done(invalid_{{cookiecutter.main_model|lower}}) assert str(error.value) == f\"{{cookiecutter.main_model}} ID: {invalid_{{cookiecutter.main_model|lower}}} invalida\" def test_should_mark_as_undone(db): my_{{cookiecutter.main_model|lower}} = baker.make(", "= baker.make({{cookiecutter.main_model}}, description='Create an ansible deploy script', due_to=datetime.now()) {{cookiecutter.main_model|lower}}_updated = {{cookiecutter.main_model|lower}}_service.mark_as_done(my_{{cookiecutter.main_model|lower}}.id) assert {{cookiecutter.main_model|lower}}_updated.status", "{{cookiecutter.main_app}}.models import {{cookiecutter.main_model}} from {{cookiecutter.main_app}}.services import {{cookiecutter.main_model|lower}}_service def test_should_get_{{cookiecutter.main_model|lower}}_as_pending(db): my_{{cookiecutter.main_model|lower}} = baker.make({{cookiecutter.main_model}}, description='Create", "from {{cookiecutter.main_app}}.models import {{cookiecutter.main_model}} from {{cookiecutter.main_app}}.services import {{cookiecutter.main_model|lower}}_service def test_should_get_{{cookiecutter.main_model|lower}}_as_pending(db): my_{{cookiecutter.main_model|lower}} = baker.make({{cookiecutter.main_model}},", "import datetime import pytest from model_bakery import baker from {{cookiecutter.main_app}}.models import {{cookiecutter.main_model}} from", "{{cookiecutter.main_model}}, description='Create an ansible deploy script', due_to=datetime.now(), done=True) {{cookiecutter.main_model|lower}}_updated = {{cookiecutter.main_model|lower}}_service.mark_as_done(my_{{cookiecutter.main_model|lower}}.id) assert {{cookiecutter.main_model|lower}}_updated.status", "test_should_mark_as_undone(db): my_{{cookiecutter.main_model|lower}} = baker.make( {{cookiecutter.main_model}}, description='Create an ansible deploy script', due_to=datetime.now(), done=True) {{cookiecutter.main_model|lower}}_updated", "== f\"{{cookiecutter.main_model}} ID: {invalid_{{cookiecutter.main_model|lower}}} invalida\" def test_should_mark_as_undone(db): my_{{cookiecutter.main_model|lower}} = baker.make( {{cookiecutter.main_model}}, description='Create an", "assert str(error.value) == f\"{{cookiecutter.main_model}} ID: {invalid_{{cookiecutter.main_model|lower}}} invalida\" def test_should_mark_as_undone(db): my_{{cookiecutter.main_model|lower}} = baker.make( {{cookiecutter.main_model}},", "str(error.value) == f\"{{cookiecutter.main_model}} ID: {invalid_{{cookiecutter.main_model|lower}}} invalida\" def test_should_mark_as_undone(db): my_{{cookiecutter.main_model|lower}} = baker.make( {{cookiecutter.main_model}}, description='Create", "script', due_to=datetime.now()) {{cookiecutter.main_model|lower}}_updated = {{cookiecutter.main_model|lower}}_service.mark_as_done(my_{{cookiecutter.main_model|lower}}.id) assert {{cookiecutter.main_model|lower}}_updated.status == 'done' def test_should_raise_an_erro_for_invalid_{{cookiecutter.main_model|lower}}_id(db): invalid_{{cookiecutter.main_model|lower}} =", "0 with pytest.raises(RuntimeError) as error: {{cookiecutter.main_model|lower}} = {{cookiecutter.main_model|lower}}_service.mark_as_done(invalid_{{cookiecutter.main_model|lower}}) assert str(error.value) == f\"{{cookiecutter.main_model}} ID:", "import {{cookiecutter.main_model}} from {{cookiecutter.main_app}}.services import {{cookiecutter.main_model|lower}}_service def test_should_get_{{cookiecutter.main_model|lower}}_as_pending(db): my_{{cookiecutter.main_model|lower}} = baker.make({{cookiecutter.main_model}}, description='Create an", "test_should_get_{{cookiecutter.main_model|lower}}_as_done(db): my_{{cookiecutter.main_model|lower}} = baker.make({{cookiecutter.main_model}}, description='Create an ansible deploy script', due_to=datetime.now()) {{cookiecutter.main_model|lower}}_updated = {{cookiecutter.main_model|lower}}_service.mark_as_done(my_{{cookiecutter.main_model|lower}}.id)", "'done' def test_should_raise_an_erro_for_invalid_{{cookiecutter.main_model|lower}}_id(db): invalid_{{cookiecutter.main_model|lower}} = 0 with pytest.raises(RuntimeError) as error: {{cookiecutter.main_model|lower}} = {{cookiecutter.main_model|lower}}_service.mark_as_done(invalid_{{cookiecutter.main_model|lower}})", "my_{{cookiecutter.main_model|lower}}.status == 'pending' def test_should_get_{{cookiecutter.main_model|lower}}_as_done(db): my_{{cookiecutter.main_model|lower}} = baker.make({{cookiecutter.main_model}}, description='Create an ansible deploy script',", "= baker.make( {{cookiecutter.main_model}}, description='Create an ansible deploy script', due_to=datetime.now(), done=True) {{cookiecutter.main_model|lower}}_updated = {{cookiecutter.main_model|lower}}_service.mark_as_done(my_{{cookiecutter.main_model|lower}}.id)", "description='Create an ansible deploy script', due_to=datetime.now()) assert my_{{cookiecutter.main_model|lower}}.status == 'pending' def test_should_get_{{cookiecutter.main_model|lower}}_as_done(db): my_{{cookiecutter.main_model|lower}}", "baker.make({{cookiecutter.main_model}}, description='Create an ansible deploy script', due_to=datetime.now()) {{cookiecutter.main_model|lower}}_updated = {{cookiecutter.main_model|lower}}_service.mark_as_done(my_{{cookiecutter.main_model|lower}}.id) assert {{cookiecutter.main_model|lower}}_updated.status ==", "invalida\" def test_should_mark_as_undone(db): my_{{cookiecutter.main_model|lower}} = baker.make( {{cookiecutter.main_model}}, description='Create an ansible deploy script', due_to=datetime.now(),", "baker.make({{cookiecutter.main_model}}, description='Create an ansible deploy script', due_to=datetime.now()) assert my_{{cookiecutter.main_model|lower}}.status == 'pending' def test_should_get_{{cookiecutter.main_model|lower}}_as_done(db):", "from model_bakery import baker from {{cookiecutter.main_app}}.models import {{cookiecutter.main_model}} from {{cookiecutter.main_app}}.services import {{cookiecutter.main_model|lower}}_service def", "def test_should_raise_an_erro_for_invalid_{{cookiecutter.main_model|lower}}_id(db): invalid_{{cookiecutter.main_model|lower}} = 0 with pytest.raises(RuntimeError) as error: {{cookiecutter.main_model|lower}} = {{cookiecutter.main_model|lower}}_service.mark_as_done(invalid_{{cookiecutter.main_model|lower}}) assert", "{{cookiecutter.main_model|lower}}_updated = {{cookiecutter.main_model|lower}}_service.mark_as_done(my_{{cookiecutter.main_model|lower}}.id) assert {{cookiecutter.main_model|lower}}_updated.status == 'done' def test_should_raise_an_erro_for_invalid_{{cookiecutter.main_model|lower}}_id(db): invalid_{{cookiecutter.main_model|lower}} = 0 with", "{invalid_{{cookiecutter.main_model|lower}}} invalida\" def test_should_mark_as_undone(db): my_{{cookiecutter.main_model|lower}} = baker.make( {{cookiecutter.main_model}}, description='Create an ansible deploy script',", "datetime import datetime import pytest from model_bakery import baker from {{cookiecutter.main_app}}.models import {{cookiecutter.main_model}}", "my_{{cookiecutter.main_model|lower}} = baker.make({{cookiecutter.main_model}}, description='Create an ansible deploy script', due_to=datetime.now()) assert my_{{cookiecutter.main_model|lower}}.status == 'pending'", "test_should_raise_an_erro_for_invalid_{{cookiecutter.main_model|lower}}_id(db): invalid_{{cookiecutter.main_model|lower}} = 0 with pytest.raises(RuntimeError) as error: {{cookiecutter.main_model|lower}} = {{cookiecutter.main_model|lower}}_service.mark_as_done(invalid_{{cookiecutter.main_model|lower}}) assert str(error.value)", "import baker from {{cookiecutter.main_app}}.models import {{cookiecutter.main_model}} from {{cookiecutter.main_app}}.services import {{cookiecutter.main_model|lower}}_service def test_should_get_{{cookiecutter.main_model|lower}}_as_pending(db): my_{{cookiecutter.main_model|lower}}", "datetime import pytest from model_bakery import baker from {{cookiecutter.main_app}}.models import {{cookiecutter.main_model}} from {{cookiecutter.main_app}}.services", "assert my_{{cookiecutter.main_model|lower}}.status == 'pending' def test_should_get_{{cookiecutter.main_model|lower}}_as_done(db): my_{{cookiecutter.main_model|lower}} = baker.make({{cookiecutter.main_model}}, description='Create an ansible deploy", "pytest from model_bakery import baker from {{cookiecutter.main_app}}.models import {{cookiecutter.main_model}} from {{cookiecutter.main_app}}.services import {{cookiecutter.main_model|lower}}_service", "== 'done' def test_should_raise_an_erro_for_invalid_{{cookiecutter.main_model|lower}}_id(db): invalid_{{cookiecutter.main_model|lower}} = 0 with pytest.raises(RuntimeError) as error: {{cookiecutter.main_model|lower}} =", "deploy script', due_to=datetime.now()) assert my_{{cookiecutter.main_model|lower}}.status == 'pending' def test_should_get_{{cookiecutter.main_model|lower}}_as_done(db): my_{{cookiecutter.main_model|lower}} = baker.make({{cookiecutter.main_model}}, description='Create", "due_to=datetime.now()) assert my_{{cookiecutter.main_model|lower}}.status == 'pending' def test_should_get_{{cookiecutter.main_model|lower}}_as_done(db): my_{{cookiecutter.main_model|lower}} = baker.make({{cookiecutter.main_model}}, description='Create an ansible", "test_should_get_{{cookiecutter.main_model|lower}}_as_pending(db): my_{{cookiecutter.main_model|lower}} = baker.make({{cookiecutter.main_model}}, description='Create an ansible deploy script', due_to=datetime.now()) assert my_{{cookiecutter.main_model|lower}}.status ==", "{{cookiecutter.main_model|lower}}_service def test_should_get_{{cookiecutter.main_model|lower}}_as_pending(db): my_{{cookiecutter.main_model|lower}} = baker.make({{cookiecutter.main_model}}, description='Create an ansible deploy script', due_to=datetime.now()) assert" ]
[ "True def speak(self): super(Dog, self).speak() return self.name + ' says \"Woof!\"' class Cat(Pet):", "return self.name + \" can't speak.\" class Plant(Pet): def can_swim(self): super(Plant, self).can_swim() return", "self.name + ' says \"Meow!\"' class Fish(Pet): def can_swim(self): super(Fish, self).can_swim() return True", "Mon Aug 15 18:15:28 2016 @author: Ken \"\"\" from abc import ABCMeta, abstractmethod", "coding: utf-8 -*- \"\"\" Created on Mon Aug 15 18:15:28 2016 @author: Ken", "def can_swim(self): super(Dog, self).can_swim() return True def speak(self): super(Dog, self).speak() return self.name +", "speak(self): super(Dog, self).speak() return self.name + ' says \"Woof!\"' class Cat(Pet): def can_swim(self):", "says \"Woof!\"' class Cat(Pet): def can_swim(self): super(Cat, self).can_swim() return False def speak(self): super(Cat,", "can_swim(self): super(Cat, self).can_swim() return False def speak(self): super(Cat, self).speak() return self.name + '", "on Mon Aug 15 18:15:28 2016 @author: Ken \"\"\" from abc import ABCMeta,", "@abstractmethod def speak(self): pass class Dog(Pet): def can_swim(self): super(Dog, self).can_swim() return True def", "Plant(Pet): def can_swim(self): super(Plant, self).can_swim() return False def speak(self): super(Plant, self).speak() return self.name", "super(Cat, self).speak() return self.name + ' says \"Meow!\"' class Fish(Pet): def can_swim(self): super(Fish,", "Aug 15 18:15:28 2016 @author: Ken \"\"\" from abc import ABCMeta, abstractmethod class", "<filename>abcmetaclasses.py # -*- coding: utf-8 -*- \"\"\" Created on Mon Aug 15 18:15:28", "super(Plant, self).can_swim() return False def speak(self): super(Plant, self).speak() return self.name + \" can't", "def speak(self): super(Fish, self).speak() return self.name + \" can't speak.\" class Plant(Pet): def", "Pet(object): __metaclass__ = ABCMeta def __init__(self,name): self.name = name @abstractmethod def can_swim(self): pass", "from abc import ABCMeta, abstractmethod class Pet(object): __metaclass__ = ABCMeta def __init__(self,name): self.name", "can_swim(self): super(Dog, self).can_swim() return True def speak(self): super(Dog, self).speak() return self.name + '", "self).speak() return self.name + ' says \"Meow!\"' class Fish(Pet): def can_swim(self): super(Fish, self).can_swim()", "def speak(self): super(Cat, self).speak() return self.name + ' says \"Meow!\"' class Fish(Pet): def", "def speak(self): super(Dog, self).speak() return self.name + ' says \"Woof!\"' class Cat(Pet): def", "super(Fish, self).can_swim() return True def speak(self): super(Fish, self).speak() return self.name + \" can't", "can_swim(self): pass @abstractmethod def speak(self): pass class Dog(Pet): def can_swim(self): super(Dog, self).can_swim() return", "Ken \"\"\" from abc import ABCMeta, abstractmethod class Pet(object): __metaclass__ = ABCMeta def", "' says \"Meow!\"' class Fish(Pet): def can_swim(self): super(Fish, self).can_swim() return True def speak(self):", "ABCMeta def __init__(self,name): self.name = name @abstractmethod def can_swim(self): pass @abstractmethod def speak(self):", "speak(self): super(Fish, self).speak() return self.name + \" can't speak.\" class Plant(Pet): def can_swim(self):", "self.name + \" can't speak.\" class Plant(Pet): def can_swim(self): super(Plant, self).can_swim() return False", "+ ' says \"Woof!\"' class Cat(Pet): def can_swim(self): super(Cat, self).can_swim() return False def", "Dog(Pet): def can_swim(self): super(Dog, self).can_swim() return True def speak(self): super(Dog, self).speak() return self.name", "def can_swim(self): super(Plant, self).can_swim() return False def speak(self): super(Plant, self).speak() return self.name +", "self).can_swim() return True def speak(self): super(Dog, self).speak() return self.name + ' says \"Woof!\"'", "__metaclass__ = ABCMeta def __init__(self,name): self.name = name @abstractmethod def can_swim(self): pass @abstractmethod", "super(Fish, self).speak() return self.name + \" can't speak.\" class Plant(Pet): def can_swim(self): super(Plant,", "self).speak() return self.name + \" can't speak.\" class Plant(Pet): def can_swim(self): super(Plant, self).can_swim()", "return True def speak(self): super(Fish, self).speak() return self.name + \" can't speak.\" class", "self).can_swim() return True def speak(self): super(Fish, self).speak() return self.name + \" can't speak.\"", "class Cat(Pet): def can_swim(self): super(Cat, self).can_swim() return False def speak(self): super(Cat, self).speak() return", "18:15:28 2016 @author: Ken \"\"\" from abc import ABCMeta, abstractmethod class Pet(object): __metaclass__", "self).speak() return self.name + ' says \"Woof!\"' class Cat(Pet): def can_swim(self): super(Cat, self).can_swim()", "pass class Dog(Pet): def can_swim(self): super(Dog, self).can_swim() return True def speak(self): super(Dog, self).speak()", "-*- coding: utf-8 -*- \"\"\" Created on Mon Aug 15 18:15:28 2016 @author:", "speak.\" class Plant(Pet): def can_swim(self): super(Plant, self).can_swim() return False def speak(self): super(Plant, self).speak()", "super(Cat, self).can_swim() return False def speak(self): super(Cat, self).speak() return self.name + ' says", "@author: Ken \"\"\" from abc import ABCMeta, abstractmethod class Pet(object): __metaclass__ = ABCMeta", "import ABCMeta, abstractmethod class Pet(object): __metaclass__ = ABCMeta def __init__(self,name): self.name = name", "says \"Meow!\"' class Fish(Pet): def can_swim(self): super(Fish, self).can_swim() return True def speak(self): super(Fish,", "def can_swim(self): super(Fish, self).can_swim() return True def speak(self): super(Fish, self).speak() return self.name +", "class Fish(Pet): def can_swim(self): super(Fish, self).can_swim() return True def speak(self): super(Fish, self).speak() return", "self).can_swim() return False def speak(self): super(Cat, self).speak() return self.name + ' says \"Meow!\"'", "Created on Mon Aug 15 18:15:28 2016 @author: Ken \"\"\" from abc import", "def speak(self): pass class Dog(Pet): def can_swim(self): super(Dog, self).can_swim() return True def speak(self):", "super(Dog, self).can_swim() return True def speak(self): super(Dog, self).speak() return self.name + ' says", "self).can_swim() return False def speak(self): super(Plant, self).speak() return self.name + \" can't speak.\"", "True def speak(self): super(Fish, self).speak() return self.name + \" can't speak.\" class Plant(Pet):", "can't speak.\" class Plant(Pet): def can_swim(self): super(Plant, self).can_swim() return False def speak(self): super(Plant,", "speak(self): pass class Dog(Pet): def can_swim(self): super(Dog, self).can_swim() return True def speak(self): super(Dog,", "class Dog(Pet): def can_swim(self): super(Dog, self).can_swim() return True def speak(self): super(Dog, self).speak() return", "class Pet(object): __metaclass__ = ABCMeta def __init__(self,name): self.name = name @abstractmethod def can_swim(self):", "@abstractmethod def can_swim(self): pass @abstractmethod def speak(self): pass class Dog(Pet): def can_swim(self): super(Dog,", "can_swim(self): super(Fish, self).can_swim() return True def speak(self): super(Fish, self).speak() return self.name + \"", "ABCMeta, abstractmethod class Pet(object): __metaclass__ = ABCMeta def __init__(self,name): self.name = name @abstractmethod", "pass @abstractmethod def speak(self): pass class Dog(Pet): def can_swim(self): super(Dog, self).can_swim() return True", "-*- \"\"\" Created on Mon Aug 15 18:15:28 2016 @author: Ken \"\"\" from", "self.name + ' says \"Woof!\"' class Cat(Pet): def can_swim(self): super(Cat, self).can_swim() return False", "+ \" can't speak.\" class Plant(Pet): def can_swim(self): super(Plant, self).can_swim() return False def", "False def speak(self): super(Cat, self).speak() return self.name + ' says \"Meow!\"' class Fish(Pet):", "# -*- coding: utf-8 -*- \"\"\" Created on Mon Aug 15 18:15:28 2016", "return True def speak(self): super(Dog, self).speak() return self.name + ' says \"Woof!\"' class", "= ABCMeta def __init__(self,name): self.name = name @abstractmethod def can_swim(self): pass @abstractmethod def", "return False def speak(self): super(Cat, self).speak() return self.name + ' says \"Meow!\"' class", "\"Meow!\"' class Fish(Pet): def can_swim(self): super(Fish, self).can_swim() return True def speak(self): super(Fish, self).speak()", "\" can't speak.\" class Plant(Pet): def can_swim(self): super(Plant, self).can_swim() return False def speak(self):", "name @abstractmethod def can_swim(self): pass @abstractmethod def speak(self): pass class Dog(Pet): def can_swim(self):", "speak(self): super(Cat, self).speak() return self.name + ' says \"Meow!\"' class Fish(Pet): def can_swim(self):", "= name @abstractmethod def can_swim(self): pass @abstractmethod def speak(self): pass class Dog(Pet): def", "super(Dog, self).speak() return self.name + ' says \"Woof!\"' class Cat(Pet): def can_swim(self): super(Cat,", "' says \"Woof!\"' class Cat(Pet): def can_swim(self): super(Cat, self).can_swim() return False def speak(self):", "Cat(Pet): def can_swim(self): super(Cat, self).can_swim() return False def speak(self): super(Cat, self).speak() return self.name", "self.name = name @abstractmethod def can_swim(self): pass @abstractmethod def speak(self): pass class Dog(Pet):", "__init__(self,name): self.name = name @abstractmethod def can_swim(self): pass @abstractmethod def speak(self): pass class", "utf-8 -*- \"\"\" Created on Mon Aug 15 18:15:28 2016 @author: Ken \"\"\"", "abc import ABCMeta, abstractmethod class Pet(object): __metaclass__ = ABCMeta def __init__(self,name): self.name =", "2016 @author: Ken \"\"\" from abc import ABCMeta, abstractmethod class Pet(object): __metaclass__ =", "def can_swim(self): super(Cat, self).can_swim() return False def speak(self): super(Cat, self).speak() return self.name +", "Fish(Pet): def can_swim(self): super(Fish, self).can_swim() return True def speak(self): super(Fish, self).speak() return self.name", "\"\"\" from abc import ABCMeta, abstractmethod class Pet(object): __metaclass__ = ABCMeta def __init__(self,name):", "return self.name + ' says \"Woof!\"' class Cat(Pet): def can_swim(self): super(Cat, self).can_swim() return", "+ ' says \"Meow!\"' class Fish(Pet): def can_swim(self): super(Fish, self).can_swim() return True def", "def can_swim(self): pass @abstractmethod def speak(self): pass class Dog(Pet): def can_swim(self): super(Dog, self).can_swim()", "abstractmethod class Pet(object): __metaclass__ = ABCMeta def __init__(self,name): self.name = name @abstractmethod def", "\"\"\" Created on Mon Aug 15 18:15:28 2016 @author: Ken \"\"\" from abc", "class Plant(Pet): def can_swim(self): super(Plant, self).can_swim() return False def speak(self): super(Plant, self).speak() return", "def __init__(self,name): self.name = name @abstractmethod def can_swim(self): pass @abstractmethod def speak(self): pass", "\"Woof!\"' class Cat(Pet): def can_swim(self): super(Cat, self).can_swim() return False def speak(self): super(Cat, self).speak()", "return self.name + ' says \"Meow!\"' class Fish(Pet): def can_swim(self): super(Fish, self).can_swim() return", "can_swim(self): super(Plant, self).can_swim() return False def speak(self): super(Plant, self).speak() return self.name + \"", "15 18:15:28 2016 @author: Ken \"\"\" from abc import ABCMeta, abstractmethod class Pet(object):" ]
[ "class TestMain(unittest.TestCase): def setUp(self) -> None: pass def tearDown(self) -> None: pass def", "TestMain(unittest.TestCase): def setUp(self) -> None: pass def tearDown(self) -> None: pass def test_do_something_unreliable(self):", "None: pass def tearDown(self) -> None: pass def test_do_something_unreliable(self): got = do_something_unreliable() print(got)", "test_tenacity.main import do_something_unreliable class TestMain(unittest.TestCase): def setUp(self) -> None: pass def tearDown(self) ->", "setUp(self) -> None: pass def tearDown(self) -> None: pass def test_do_something_unreliable(self): got =", "from test_tenacity.main import do_something_unreliable class TestMain(unittest.TestCase): def setUp(self) -> None: pass def tearDown(self)", "def setUp(self) -> None: pass def tearDown(self) -> None: pass def test_do_something_unreliable(self): got", "unittest from test_tenacity.main import do_something_unreliable class TestMain(unittest.TestCase): def setUp(self) -> None: pass def", "import unittest from test_tenacity.main import do_something_unreliable class TestMain(unittest.TestCase): def setUp(self) -> None: pass", "do_something_unreliable class TestMain(unittest.TestCase): def setUp(self) -> None: pass def tearDown(self) -> None: pass", "import do_something_unreliable class TestMain(unittest.TestCase): def setUp(self) -> None: pass def tearDown(self) -> None:", "-> None: pass def tearDown(self) -> None: pass def test_do_something_unreliable(self): got = do_something_unreliable()", "<gh_stars>0 import unittest from test_tenacity.main import do_something_unreliable class TestMain(unittest.TestCase): def setUp(self) -> None:" ]
[ "Executor: def __init__(self): pass def runTask(self, repoCloneUrl, targetBranch, imageTag): start = time.perf_counter() buildProcess", "f'Task: Build image failed. ErrCode={buildProcess.returncode}' log.debug(errMessage) return buildProcess log.debug(f'Task: Build image completed. StatCode={buildProcess.returncode}')", "'run', '--cap-add=SYS_ADMIN', '--rm', imageTag], capture_output=True, encoding='utf-8') log.debug(f'Task: Execution completed. StatCode={taskProcess.returncode}') # delete image", "subprocess.run(['docker', 'run', '--cap-add=SYS_ADMIN', '--rm', imageTag], capture_output=True, encoding='utf-8') log.debug(f'Task: Execution completed. StatCode={taskProcess.returncode}') # delete", "capture_output=True, encoding='utf-8') log.debug(f'Task: Execution completed. StatCode={taskProcess.returncode}') # delete image for next run deleteTask", "= subprocess.run(['docker', 'build', '-t', imageTag, f'{repoCloneUrl}#{targetBranch}'], capture_output=True, encoding='utf-8') if buildProcess.returncode != 0: errMessage", "imageTag, f'{repoCloneUrl}#{targetBranch}'], capture_output=True, encoding='utf-8') if buildProcess.returncode != 0: errMessage = f'Task: Build image", "subprocess.run(['docker', 'rmi', imageTag], capture_output=True, encoding='utf-8') log.debug(f'Task: Delete image completed. StatCode={deleteTask.returncode}') log.info(f'result: {taskProcess}') end", "encoding='utf-8') log.debug(f'Task: Execution completed. StatCode={taskProcess.returncode}') # delete image for next run deleteTask =", "Build image completed. StatCode={buildProcess.returncode}') taskProcess = subprocess.run(['docker', 'run', '--cap-add=SYS_ADMIN', '--rm', imageTag], capture_output=True, encoding='utf-8')", "= f'Task: Build image failed. ErrCode={buildProcess.returncode}' log.debug(errMessage) return buildProcess log.debug(f'Task: Build image completed.", "Build image failed. ErrCode={buildProcess.returncode}' log.debug(errMessage) return buildProcess log.debug(f'Task: Build image completed. StatCode={buildProcess.returncode}') taskProcess", "buildProcess log.debug(f'Task: Build image completed. StatCode={buildProcess.returncode}') taskProcess = subprocess.run(['docker', 'run', '--cap-add=SYS_ADMIN', '--rm', imageTag],", "capture_output=True, encoding='utf-8') log.debug(f'Task: Delete image completed. StatCode={deleteTask.returncode}') log.info(f'result: {taskProcess}') end = time.perf_counter() log.info(f'Task", "'--cap-add=SYS_ADMIN', '--rm', imageTag], capture_output=True, encoding='utf-8') log.debug(f'Task: Execution completed. StatCode={taskProcess.returncode}') # delete image for", "failed. ErrCode={buildProcess.returncode}' log.debug(errMessage) return buildProcess log.debug(f'Task: Build image completed. StatCode={buildProcess.returncode}') taskProcess = subprocess.run(['docker',", "loggerWrapper import LoggerWrapper import subprocess import time log = LoggerWrapper(__name__, index.PATH).logger class Executor:", "taskProcess = subprocess.run(['docker', 'run', '--cap-add=SYS_ADMIN', '--rm', imageTag], capture_output=True, encoding='utf-8') log.debug(f'Task: Execution completed. StatCode={taskProcess.returncode}')", "<reponame>ktrany/pbft-poc #! /usr/bin/env python3 import index from loggerWrapper import LoggerWrapper import subprocess import", "0: errMessage = f'Task: Build image failed. ErrCode={buildProcess.returncode}' log.debug(errMessage) return buildProcess log.debug(f'Task: Build", "capture_output=True, encoding='utf-8') if buildProcess.returncode != 0: errMessage = f'Task: Build image failed. ErrCode={buildProcess.returncode}'", "log.debug(f'Task: Execution completed. StatCode={taskProcess.returncode}') # delete image for next run deleteTask = subprocess.run(['docker',", "completed. StatCode={buildProcess.returncode}') taskProcess = subprocess.run(['docker', 'run', '--cap-add=SYS_ADMIN', '--rm', imageTag], capture_output=True, encoding='utf-8') log.debug(f'Task: Execution", "= LoggerWrapper(__name__, index.PATH).logger class Executor: def __init__(self): pass def runTask(self, repoCloneUrl, targetBranch, imageTag):", "class Executor: def __init__(self): pass def runTask(self, repoCloneUrl, targetBranch, imageTag): start = time.perf_counter()", "delete image for next run deleteTask = subprocess.run(['docker', 'rmi', imageTag], capture_output=True, encoding='utf-8') log.debug(f'Task:", "= time.perf_counter() buildProcess = subprocess.run(['docker', 'build', '-t', imageTag, f'{repoCloneUrl}#{targetBranch}'], capture_output=True, encoding='utf-8') if buildProcess.returncode", "# delete image for next run deleteTask = subprocess.run(['docker', 'rmi', imageTag], capture_output=True, encoding='utf-8')", "import subprocess import time log = LoggerWrapper(__name__, index.PATH).logger class Executor: def __init__(self): pass", "Delete image completed. StatCode={deleteTask.returncode}') log.info(f'result: {taskProcess}') end = time.perf_counter() log.info(f'Task execution time: {end", "return buildProcess log.debug(f'Task: Build image completed. StatCode={buildProcess.returncode}') taskProcess = subprocess.run(['docker', 'run', '--cap-add=SYS_ADMIN', '--rm',", "imageTag): start = time.perf_counter() buildProcess = subprocess.run(['docker', 'build', '-t', imageTag, f'{repoCloneUrl}#{targetBranch}'], capture_output=True, encoding='utf-8')", "import LoggerWrapper import subprocess import time log = LoggerWrapper(__name__, index.PATH).logger class Executor: def", "log.debug(errMessage) return buildProcess log.debug(f'Task: Build image completed. StatCode={buildProcess.returncode}') taskProcess = subprocess.run(['docker', 'run', '--cap-add=SYS_ADMIN',", "completed. StatCode={taskProcess.returncode}') # delete image for next run deleteTask = subprocess.run(['docker', 'rmi', imageTag],", "'--rm', imageTag], capture_output=True, encoding='utf-8') log.debug(f'Task: Execution completed. StatCode={taskProcess.returncode}') # delete image for next", "subprocess import time log = LoggerWrapper(__name__, index.PATH).logger class Executor: def __init__(self): pass def", "log.debug(f'Task: Build image completed. StatCode={buildProcess.returncode}') taskProcess = subprocess.run(['docker', 'run', '--cap-add=SYS_ADMIN', '--rm', imageTag], capture_output=True,", "imageTag], capture_output=True, encoding='utf-8') log.debug(f'Task: Delete image completed. StatCode={deleteTask.returncode}') log.info(f'result: {taskProcess}') end = time.perf_counter()", "run deleteTask = subprocess.run(['docker', 'rmi', imageTag], capture_output=True, encoding='utf-8') log.debug(f'Task: Delete image completed. StatCode={deleteTask.returncode}')", "encoding='utf-8') log.debug(f'Task: Delete image completed. StatCode={deleteTask.returncode}') log.info(f'result: {taskProcess}') end = time.perf_counter() log.info(f'Task execution", "import time log = LoggerWrapper(__name__, index.PATH).logger class Executor: def __init__(self): pass def runTask(self,", "targetBranch, imageTag): start = time.perf_counter() buildProcess = subprocess.run(['docker', 'build', '-t', imageTag, f'{repoCloneUrl}#{targetBranch}'], capture_output=True,", "image completed. StatCode={buildProcess.returncode}') taskProcess = subprocess.run(['docker', 'run', '--cap-add=SYS_ADMIN', '--rm', imageTag], capture_output=True, encoding='utf-8') log.debug(f'Task:", "= subprocess.run(['docker', 'run', '--cap-add=SYS_ADMIN', '--rm', imageTag], capture_output=True, encoding='utf-8') log.debug(f'Task: Execution completed. StatCode={taskProcess.returncode}') #", "def runTask(self, repoCloneUrl, targetBranch, imageTag): start = time.perf_counter() buildProcess = subprocess.run(['docker', 'build', '-t',", "time.perf_counter() buildProcess = subprocess.run(['docker', 'build', '-t', imageTag, f'{repoCloneUrl}#{targetBranch}'], capture_output=True, encoding='utf-8') if buildProcess.returncode !=", "image completed. StatCode={deleteTask.returncode}') log.info(f'result: {taskProcess}') end = time.perf_counter() log.info(f'Task execution time: {end -", "__init__(self): pass def runTask(self, repoCloneUrl, targetBranch, imageTag): start = time.perf_counter() buildProcess = subprocess.run(['docker',", "pass def runTask(self, repoCloneUrl, targetBranch, imageTag): start = time.perf_counter() buildProcess = subprocess.run(['docker', 'build',", "Execution completed. StatCode={taskProcess.returncode}') # delete image for next run deleteTask = subprocess.run(['docker', 'rmi',", "'-t', imageTag, f'{repoCloneUrl}#{targetBranch}'], capture_output=True, encoding='utf-8') if buildProcess.returncode != 0: errMessage = f'Task: Build", "buildProcess.returncode != 0: errMessage = f'Task: Build image failed. ErrCode={buildProcess.returncode}' log.debug(errMessage) return buildProcess", "encoding='utf-8') if buildProcess.returncode != 0: errMessage = f'Task: Build image failed. ErrCode={buildProcess.returncode}' log.debug(errMessage)", "repoCloneUrl, targetBranch, imageTag): start = time.perf_counter() buildProcess = subprocess.run(['docker', 'build', '-t', imageTag, f'{repoCloneUrl}#{targetBranch}'],", "'build', '-t', imageTag, f'{repoCloneUrl}#{targetBranch}'], capture_output=True, encoding='utf-8') if buildProcess.returncode != 0: errMessage = f'Task:", "import index from loggerWrapper import LoggerWrapper import subprocess import time log = LoggerWrapper(__name__,", "for next run deleteTask = subprocess.run(['docker', 'rmi', imageTag], capture_output=True, encoding='utf-8') log.debug(f'Task: Delete image", "log = LoggerWrapper(__name__, index.PATH).logger class Executor: def __init__(self): pass def runTask(self, repoCloneUrl, targetBranch,", "LoggerWrapper(__name__, index.PATH).logger class Executor: def __init__(self): pass def runTask(self, repoCloneUrl, targetBranch, imageTag): start", "log.info(f'result: {taskProcess}') end = time.perf_counter() log.info(f'Task execution time: {end - start}s') return taskProcess", "def __init__(self): pass def runTask(self, repoCloneUrl, targetBranch, imageTag): start = time.perf_counter() buildProcess =", "from loggerWrapper import LoggerWrapper import subprocess import time log = LoggerWrapper(__name__, index.PATH).logger class", "imageTag], capture_output=True, encoding='utf-8') log.debug(f'Task: Execution completed. StatCode={taskProcess.returncode}') # delete image for next run", "next run deleteTask = subprocess.run(['docker', 'rmi', imageTag], capture_output=True, encoding='utf-8') log.debug(f'Task: Delete image completed.", "start = time.perf_counter() buildProcess = subprocess.run(['docker', 'build', '-t', imageTag, f'{repoCloneUrl}#{targetBranch}'], capture_output=True, encoding='utf-8') if", "python3 import index from loggerWrapper import LoggerWrapper import subprocess import time log =", "StatCode={taskProcess.returncode}') # delete image for next run deleteTask = subprocess.run(['docker', 'rmi', imageTag], capture_output=True,", "/usr/bin/env python3 import index from loggerWrapper import LoggerWrapper import subprocess import time log", "log.debug(f'Task: Delete image completed. StatCode={deleteTask.returncode}') log.info(f'result: {taskProcess}') end = time.perf_counter() log.info(f'Task execution time:", "subprocess.run(['docker', 'build', '-t', imageTag, f'{repoCloneUrl}#{targetBranch}'], capture_output=True, encoding='utf-8') if buildProcess.returncode != 0: errMessage =", "index from loggerWrapper import LoggerWrapper import subprocess import time log = LoggerWrapper(__name__, index.PATH).logger", "StatCode={buildProcess.returncode}') taskProcess = subprocess.run(['docker', 'run', '--cap-add=SYS_ADMIN', '--rm', imageTag], capture_output=True, encoding='utf-8') log.debug(f'Task: Execution completed.", "completed. StatCode={deleteTask.returncode}') log.info(f'result: {taskProcess}') end = time.perf_counter() log.info(f'Task execution time: {end - start}s')", "LoggerWrapper import subprocess import time log = LoggerWrapper(__name__, index.PATH).logger class Executor: def __init__(self):", "if buildProcess.returncode != 0: errMessage = f'Task: Build image failed. ErrCode={buildProcess.returncode}' log.debug(errMessage) return", "image failed. ErrCode={buildProcess.returncode}' log.debug(errMessage) return buildProcess log.debug(f'Task: Build image completed. StatCode={buildProcess.returncode}') taskProcess =", "!= 0: errMessage = f'Task: Build image failed. ErrCode={buildProcess.returncode}' log.debug(errMessage) return buildProcess log.debug(f'Task:", "errMessage = f'Task: Build image failed. ErrCode={buildProcess.returncode}' log.debug(errMessage) return buildProcess log.debug(f'Task: Build image", "ErrCode={buildProcess.returncode}' log.debug(errMessage) return buildProcess log.debug(f'Task: Build image completed. StatCode={buildProcess.returncode}') taskProcess = subprocess.run(['docker', 'run',", "'rmi', imageTag], capture_output=True, encoding='utf-8') log.debug(f'Task: Delete image completed. StatCode={deleteTask.returncode}') log.info(f'result: {taskProcess}') end =", "buildProcess = subprocess.run(['docker', 'build', '-t', imageTag, f'{repoCloneUrl}#{targetBranch}'], capture_output=True, encoding='utf-8') if buildProcess.returncode != 0:", "index.PATH).logger class Executor: def __init__(self): pass def runTask(self, repoCloneUrl, targetBranch, imageTag): start =", "time log = LoggerWrapper(__name__, index.PATH).logger class Executor: def __init__(self): pass def runTask(self, repoCloneUrl,", "= subprocess.run(['docker', 'rmi', imageTag], capture_output=True, encoding='utf-8') log.debug(f'Task: Delete image completed. StatCode={deleteTask.returncode}') log.info(f'result: {taskProcess}')", "#! /usr/bin/env python3 import index from loggerWrapper import LoggerWrapper import subprocess import time", "f'{repoCloneUrl}#{targetBranch}'], capture_output=True, encoding='utf-8') if buildProcess.returncode != 0: errMessage = f'Task: Build image failed.", "runTask(self, repoCloneUrl, targetBranch, imageTag): start = time.perf_counter() buildProcess = subprocess.run(['docker', 'build', '-t', imageTag,", "deleteTask = subprocess.run(['docker', 'rmi', imageTag], capture_output=True, encoding='utf-8') log.debug(f'Task: Delete image completed. StatCode={deleteTask.returncode}') log.info(f'result:", "image for next run deleteTask = subprocess.run(['docker', 'rmi', imageTag], capture_output=True, encoding='utf-8') log.debug(f'Task: Delete", "StatCode={deleteTask.returncode}') log.info(f'result: {taskProcess}') end = time.perf_counter() log.info(f'Task execution time: {end - start}s') return" ]
[ "= self.valW * valDiff total = float(hue + sat + val + size)", "rgb = '#%02x%02x%02x' % (c.rgba_color.r,c.rgba_color.g,c.rgba_color.b) poly = self.canvas.create_polygon(rot,outline=rgb,fill='white',width=5) label = self.canvas.create_text((-c.points_centroid.x+0.5)*500, (-c.points_centroid.y +", "None: return self.outMsg = LabeledObjects() msgTime = rospy.Time.now() head = Header() head.stamp =", "= newAssn minError = e #print \"best: \" + str(minMatch) + \" with", "self.fileSub = rospy.Subscriber(self.rostopic, String, self.cbFile, queue_size = 1) self.subscriber = rospy.Subscriber(\"/beliefs/features\", PcFeatureArray, self.cbClusters,", "\" + str(minError) ## Convert the best assignment back to a list of", "LabeledObjects from std_msgs.msg import String pf = None display = None initX =", "hsv2[0][0][0] h2 = float(hsv1[0]) huediff = math.degrees(math.atan2(math.sin(math.radians(h1-h2)), math.cos(math.radians(h1-h2)))) #return abs(hsv2[0][0][0]-float(hsv1[0])), abs(hsv2[0][0][1]-float(hsv1[1])), abs(hsv2[0][0][2]-float(hsv1[2])) return", "features from \" + self.filename self.loadObjects() self.initialized = True #Read cluster message clusterArr", "self.orderPub.publish(self.outMsg) #Publish transforms idx = 0 for l in self.ids: t = self.transforms[idx]", "topicref is not None: self.rostopic = os.path.expanduser(topicref) self.fileSub = rospy.Subscriber(self.rostopic, String, self.cbFile, queue_size", "= String() sMsg.data = labels[minMatch[i]] ordered.append(sMsg) return match, ordered, minError class ui: def", "= 5 self.waitCount = 0 def startDrawing(self,labeling): self.drawClusters(labeling.tracked,labeling.ids) self.master.after(10, self.startDrawing, labeling) def drawClusters(self,clusters,ids):", "def pubMessages(self): if self.outMsg is None or self.transforms is None or self.ids is", "is 0 or self.tracked is None: return self.outMsg = LabeledObjects() msgTime = rospy.Time.now()", "ros_data.data == \"play\": self.run = True def cbClusters(self, ros_data): #Wait for filename to", "idx < len(expected): e += errorMatrix[idx][i] newAssn.append(idx) else: newAssn.append(-1) i += 1 if", "rospy import pdb import tf import itertools from Tkinter import * from hlpr_feature_extraction.msg", "False: print \"Reading object features from \" + self.filename self.loadObjects() self.initialized = True", "self.labels = [] objFile = open(self.filename, 'r') for line in objFile.readlines(): self.initX.append(line[:-1].split(',')) self.labels.append(line.split(',')[0])", "+ ' objects loaded' def hsvDiff(self, c1,c2): hsv1 = c1[1:4] r2 = c2.rgba_color.r", "\"~%s\" % name if rospy.has_param(private): return rospy.get_param(private) elif rospy.has_param(name): return rospy.get_param(name) else: return", "ids is None: time.sleep(1.0) return self.canvas.delete(\"all\") for idx in range(0,len(clusters)): c = clusters[idx]", "is None: time.sleep(1.0) return self.canvas.delete(\"all\") for idx in range(0,len(clusters)): c = clusters[idx] if", "ordered = [] for i in range(len(minMatch)): #if minMatch[i] is -1: # match.append(None)", "c = clusters[idx] if c is None: continue pts = [(c.points_min.x,c.points_min.y),(c.points_min.x,c.points_max.y),(c.points_max.x,c.points_max.y),(c.points_max.x,c.points_min.y)] offset =", "cangle * (complex(x,y)-offset) + offset rot.append((-r.real + 0.5) * 500) rot.append((-r.imag + 0.5)", "-1 objList = range(max(len(expected), len(clusters))) assn = itertools.permutations(objList, len(clusters)) for a in assn:", "self.filename self.loadObjects() self.initialized = True #Read cluster message clusterArr = ros_data clusters =", "idx = 0 for l in self.ids: t = self.transforms[idx] tl = (t.translation.x,", "#Publish transforms idx = 0 for l in self.ids: t = self.transforms[idx] tl", "self.ids = None self.labels = None self.initialized = False self.run = True self.br", "self.filename is not ros_data.data: self.filename = ros_data.data self.loadObjects() self.initialized = True print \"Reading", "import numpy as np import cv2 import roslib import rospy import pdb import", "rospy.get_param(name) else: return value class filter: def __init__(self): self.labeled = None self.tracked =", "transforms idx = 0 for l in self.ids: t = self.transforms[idx] tl =", "import PcFeatureArray from hlpr_object_labeling.msg import LabeledObjects from std_msgs.msg import String pf = None", "import pdb import tf import itertools from Tkinter import * from hlpr_feature_extraction.msg import", "for l in self.ids: t = self.transforms[idx] tl = (t.translation.x, t.translation.y, t.translation.z) r", "abs(size - float(c1[4])) def calculateError(self, init, cluster): size = self.sizeW * self.sizeDiff(init,cluster) hueDiff,", "= True #Read cluster message clusterArr = ros_data clusters = ros_data.objects self.transforms =", "import String, Header import numpy as np import cv2 import roslib import rospy", "if fileref is not None: self.filename = os.path.expanduser(fileref) else: self.filename = None topicref", "self.filename = None topicref = get_param(\"feature_file_rostopic\") if topicref is not None: self.rostopic =", "if topicref is not None: self.rostopic = os.path.expanduser(topicref) self.fileSub = rospy.Subscriber(self.rostopic, String, self.cbFile,", "+ size) return total def getMatchingLabels(self, expected, labels, clusters): ## Evaluate all possible", "head = Header() head.stamp = msgTime self.outMsg.header = head self.outMsg.objects = self.tracked self.outMsg.labels", "self.outMsg.header = head self.orderPub.publish(self.outMsg) #Publish transforms idx = 0 for l in self.ids:", "0 newAssn = [] for idx in a: if idx < len(expected): e", "self.outMsg.header.stamp, l.data, 'kinect_ir_optical_frame') idx += 1 def loadObjects(self): self.initX = [] self.labels =", "pf, display pf = filter() display = ui() display.master.after(10,display.startDrawing,pf) display.master.mainloop() if __name__ ==", "cv2.COLOR_RGB2HSV) h1 = hsv2[0][0][0] h2 = float(hsv1[0]) huediff = math.degrees(math.atan2(math.sin(math.radians(h1-h2)), math.cos(math.radians(h1-h2)))) #return abs(hsv2[0][0][0]-float(hsv1[0])),", "* hueDiff sat = self.satW * satDiff val = self.valW * valDiff total", "500) rot.append((-r.imag + 0.5) * 500) rgb = '#%02x%02x%02x' % (c.rgba_color.r,c.rgba_color.g,c.rgba_color.b) poly =", "% (c.rgba_color.r,c.rgba_color.g,c.rgba_color.b) poly = self.canvas.create_polygon(rot,outline=rgb,fill='white',width=5) label = self.canvas.create_text((-c.points_centroid.x+0.5)*500, (-c.points_centroid.y + 0.5)*500,text=str(ids[idx].data),font=\"Verdana 10 bold\")", "c) labelErrors.append(e) errorMatrix.append(labelErrors) ## Find the label assignment that minimizes total error minMatch", "= cangle * (complex(x,y)-offset) + offset rot.append((-r.real + 0.5) * 500) rot.append((-r.imag +", "for i in range(len(minMatch)): #if minMatch[i] is -1: # match.append(None) # ordered.append(None) #else:", "elif rospy.has_param(name): return rospy.get_param(name) else: return value class filter: def __init__(self): self.labeled =", "display.master.mainloop() if __name__ == '__main__': rospy.init_node(\"object_labeling\", anonymous=False) rospy.loginfo(\"Initializing the object labeling node\") main(sys.argv)", "a: if idx < len(expected): e += errorMatrix[idx][i] newAssn.append(idx) else: newAssn.append(-1) i +=", "= None display = None initX = None def get_param(name, value=None): private =", "features from \" + self.filename def cbPause(self, ros_data): if ros_data.data == \"pause\": self.run", "return if self.run is False: self.pubMessages() return #Initialize object feature values if self.initialized", "* 500) rot.append((-r.imag + 0.5) * 500) rgb = '#%02x%02x%02x' % (c.rgba_color.r,c.rgba_color.g,c.rgba_color.b) poly", "is not None: self.filename = os.path.expanduser(fileref) else: self.filename = None topicref = get_param(\"feature_file_rostopic\")", "len(clusters))) assn = itertools.permutations(objList, len(clusters)) for a in assn: e = 0 i", "get_param(\"size_weight\",50000) fileref = get_param(\"feature_file_location\") print fileref if fileref is not None: self.filename =", "+ 0.5)*500,text=str(ids[idx].data),font=\"Verdana 10 bold\") self.canvas.pack() def main(args): global pf, display pf = filter()", "assignment back to a list of labels match = [] ordered = []", "PcFeatureArray from hlpr_object_labeling.msg import LabeledObjects from std_msgs.msg import String pf = None display", "self.errors = None self.ids = None self.labels = None self.initialized = False self.run", "+ \" + \" match.append(clusters[i]) sMsg = String() sMsg.data = labels[minMatch[i]] ordered.append(sMsg) return", "is None: return self.outMsg = LabeledObjects() msgTime = rospy.Time.now() head = Header() head.stamp", "self.ids is None: return #Publish labels msgTime = rospy.Time.now() head = Header() head.stamp", "= c2.rgba_color.b hsv2 = cv2.cvtColor(np.array([[(r2,g2,b2)]],dtype='float32'), cv2.COLOR_RGB2HSV) h1 = hsv2[0][0][0] h2 = float(hsv1[0]) huediff", "= rospy.Time.now() head = Header() head.stamp = msgTime self.outMsg.header = head self.orderPub.publish(self.outMsg) #Publish", "(if receiving from rostopic) if self.filename is None: return if self.run is False:", "\" match.append(clusters[i]) sMsg = String() sMsg.data = labels[minMatch[i]] ordered.append(sMsg) return match, ordered, minError", "str(minMatch) + \" with error \" + str(minError) ## Convert the best assignment", "b2 = c2.rgba_color.b hsv2 = cv2.cvtColor(np.array([[(r2,g2,b2)]],dtype='float32'), cv2.COLOR_RGB2HSV) h1 = hsv2[0][0][0] h2 = float(hsv1[0])", "= False self.run = True self.br = tf.TransformBroadcaster() self.hueW = get_param(\"hsv_hue_weight\", 2) self.satW", "PcFeatureArray, self.cbClusters, queue_size = 1) self.pauseSub = rospy.Subscriber(\"/pause_labeling\", String, self.cbPause, queue_size = 1)", "for idx in range(0,len(clusters)): c = clusters[idx] if c is None: continue pts", "= tf.TransformBroadcaster() self.hueW = get_param(\"hsv_hue_weight\", 2) self.satW = get_param(\"hsv_sat_weight\",1) self.valW = get_param(\"hsv_val_weight\",1) self.sizeW", "self.canvas.delete(\"all\") for idx in range(0,len(clusters)): c = clusters[idx] if c is None: continue", "from rostopic) if self.filename is None: return if self.run is False: self.pubMessages() return", "Evaluate all possible cluster-label pairs errorMatrix = [] for l in expected: labelErrors", "pairs errorMatrix = [] for l in expected: labelErrors = [] for c", "if idx < len(expected): e += errorMatrix[idx][i] newAssn.append(idx) else: newAssn.append(-1) i += 1", "if ros_data.data == \"pause\": self.run = False if ros_data.data == \"play\": self.run =", "#print \"best: \" + str(minMatch) + \" with error \" + str(minError) ##", "= get_param(\"feature_file_location\") print fileref if fileref is not None: self.filename = os.path.expanduser(fileref) else:", "+ val + size) return total def getMatchingLabels(self, expected, labels, clusters): ## Evaluate", "* (complex(x,y)-offset) + offset rot.append((-r.real + 0.5) * 500) rot.append((-r.imag + 0.5) *", "if minMatch[i] is not -1: #print str(errorMatrix[minMatch[i]][i]) + \" + \" match.append(clusters[i]) sMsg", "self.tracked, self.ids, self.error = self.getMatchingLabels(self.initX, self.labels, clusters) #Publish labels if len(clusters) is 0", "#if minMatch[i] is -1: # match.append(None) # ordered.append(None) #else: if minMatch[i] is not", "\" + self.filename def cbPause(self, ros_data): if ros_data.data == \"pause\": self.run = False", "self.transforms = ros_data.transforms #Classify clusters #self.labeled, self.tracked, self.errors, self.ids = self.run_filter(self.initX, self.labels, clusters)", "if __name__ == '__main__': rospy.init_node(\"object_labeling\", anonymous=False) rospy.loginfo(\"Initializing the object labeling node\") main(sys.argv) rospy.spin()", "values if self.initialized is False: print \"Reading object features from \" + self.filename", "ros_data.data: self.filename = ros_data.data self.loadObjects() self.initialized = True print \"Reading object features from", "ros_data): #Wait for filename to be received (if receiving from rostopic) if self.filename", "self.canvas.pack() def main(args): global pf, display pf = filter() display = ui() display.master.after(10,display.startDrawing,pf)", "Tkinter import * from hlpr_feature_extraction.msg import PcFeatureArray from hlpr_object_labeling.msg import LabeledObjects from std_msgs.msg", "h1 = hsv2[0][0][0] h2 = float(hsv1[0]) huediff = math.degrees(math.atan2(math.sin(math.radians(h1-h2)), math.cos(math.radians(h1-h2)))) #return abs(hsv2[0][0][0]-float(hsv1[0])), abs(hsv2[0][0][1]-float(hsv1[1])),", "c in clusters: e = self.calculateError(l, c) labelErrors.append(e) errorMatrix.append(labelErrors) ## Find the label", "is None or self.ids is None: return #Publish labels msgTime = rospy.Time.now() head", "def __init__(self): self.master = Tk() self.canvas = Canvas(self.master,width=800,height=500) self.canvas.pack() self.timeout = 5 self.waitCount", "float(hue + sat + val + size) return total def getMatchingLabels(self, expected, labels,", "True self.br = tf.TransformBroadcaster() self.hueW = get_param(\"hsv_hue_weight\", 2) self.satW = get_param(\"hsv_sat_weight\",1) self.valW =", "newAssn = [] for idx in a: if idx < len(expected): e +=", "self.filename is None: return if self.run is False: self.pubMessages() return #Initialize object feature", "from std_msgs.msg import String pf = None display = None initX = None", "if self.outMsg is None or self.transforms is None or self.ids is None: return", "open(self.filename, 'r') for line in objFile.readlines(): self.initX.append(line[:-1].split(',')) self.labels.append(line.split(',')[0]) print str(len(self.initX)) + ' objects", "self.initialized = False self.run = True self.br = tf.TransformBroadcaster() self.hueW = get_param(\"hsv_hue_weight\", 2)", "self.satW * satDiff val = self.valW * valDiff total = float(hue + sat", "== \"play\": self.run = True def cbClusters(self, ros_data): #Wait for filename to be", "* self.sizeDiff(init,cluster) hueDiff, satDiff, valDiff = self.hsvDiff(init,cluster) hue = self.hueW * hueDiff sat", "roslib import rospy import pdb import tf import itertools from Tkinter import *", "i = 0 newAssn = [] for idx in a: if idx <", "= (t.rotation.x, t.rotation.y, t.rotation.z, t.rotation.w) self.br.sendTransform(tl, r, self.outMsg.header.stamp, l.data, 'kinect_ir_optical_frame') idx += 1", "in self.ids: t = self.transforms[idx] tl = (t.translation.x, t.translation.y, t.translation.z) r = (t.rotation.x,", "[(c.points_min.x,c.points_min.y),(c.points_min.x,c.points_max.y),(c.points_max.x,c.points_max.y),(c.points_max.x,c.points_min.y)] offset = complex(c.points_centroid.x,c.points_centroid.y) cangle = 0 # cmath.exp(c.angle*1j) rot = [] for", "= None initX = None def get_param(name, value=None): private = \"~%s\" % name", "c2.bb_dims.y return abs(size - float(c1[4])) def calculateError(self, init, cluster): size = self.sizeW *", "-1: #print str(errorMatrix[minMatch[i]][i]) + \" + \" match.append(clusters[i]) sMsg = String() sMsg.data =", "hlpr_object_labeling.msg import LabeledObjects from std_msgs.msg import String pf = None display = None", "sat + val + size) return total def getMatchingLabels(self, expected, labels, clusters): ##", "= None self.ids = None self.labels = None self.initialized = False self.run =", "newAssn.append(-1) i += 1 if minMatch is None or e < minError: minMatch", "os import sys, time, math, cmath from std_msgs.msg import String, Header import numpy", "= c2.rgba_color.g b2 = c2.rgba_color.b hsv2 = cv2.cvtColor(np.array([[(r2,g2,b2)]],dtype='float32'), cv2.COLOR_RGB2HSV) h1 = hsv2[0][0][0] h2", "or self.transforms is None or self.ids is None: return #Publish labels msgTime =", "= True self.br = tf.TransformBroadcaster() self.hueW = get_param(\"hsv_hue_weight\", 2) self.satW = get_param(\"hsv_sat_weight\",1) self.valW", "+ 0.5) * 500) rgb = '#%02x%02x%02x' % (c.rgba_color.r,c.rgba_color.g,c.rgba_color.b) poly = self.canvas.create_polygon(rot,outline=rgb,fill='white',width=5) label", "ros_data.data == \"pause\": self.run = False if ros_data.data == \"play\": self.run = True", "self.filename def cbPause(self, ros_data): if ros_data.data == \"pause\": self.run = False if ros_data.data", "filter() display = ui() display.master.after(10,display.startDrawing,pf) display.master.mainloop() if __name__ == '__main__': rospy.init_node(\"object_labeling\", anonymous=False) rospy.loginfo(\"Initializing", "for line in objFile.readlines(): self.initX.append(line[:-1].split(',')) self.labels.append(line.split(',')[0]) print str(len(self.initX)) + ' objects loaded' def", "* valDiff total = float(hue + sat + val + size) return total", "= Header() head.stamp = msgTime self.outMsg.header = head self.orderPub.publish(self.outMsg) #Publish transforms idx =", "val = self.valW * valDiff total = float(hue + sat + val +", "for x,y in pts: r = cangle * (complex(x,y)-offset) + offset rot.append((-r.real +", "None or self.ids is None: return #Publish labels msgTime = rospy.Time.now() head =", "not None: self.filename = os.path.expanduser(fileref) else: self.filename = None topicref = get_param(\"feature_file_rostopic\") if", "minError: minMatch = newAssn minError = e #print \"best: \" + str(minMatch) +", "offset = complex(c.points_centroid.x,c.points_centroid.y) cangle = 0 # cmath.exp(c.angle*1j) rot = [] for x,y", "= self.calculateError(l, c) labelErrors.append(e) errorMatrix.append(labelErrors) ## Find the label assignment that minimizes total", "class filter: def __init__(self): self.labeled = None self.tracked = None self.errors = None", "= rospy.Publisher(\"/beliefs/labels\", LabeledObjects, queue_size = 1) def cbFile(self, ros_data): if self.filename is not", "+ 0.5) * 500) rot.append((-r.imag + 0.5) * 500) rgb = '#%02x%02x%02x' %", "import cv2 import roslib import rospy import pdb import tf import itertools from", "self.tracked is None: return self.outMsg = LabeledObjects() msgTime = rospy.Time.now() head = Header()", "0 def startDrawing(self,labeling): self.drawClusters(labeling.tracked,labeling.ids) self.master.after(10, self.startDrawing, labeling) def drawClusters(self,clusters,ids): if clusters is None", "self.run = True def cbClusters(self, ros_data): #Wait for filename to be received (if", "= get_param(\"hsv_hue_weight\", 2) self.satW = get_param(\"hsv_sat_weight\",1) self.valW = get_param(\"hsv_val_weight\",1) self.sizeW = get_param(\"size_weight\",50000) fileref", "#Wait for filename to be received (if receiving from rostopic) if self.filename is", "cluster-label pairs errorMatrix = [] for l in expected: labelErrors = [] for", "t.translation.z) r = (t.rotation.x, t.rotation.y, t.rotation.z, t.rotation.w) self.br.sendTransform(tl, r, self.outMsg.header.stamp, l.data, 'kinect_ir_optical_frame') idx", "self.pubMessages() return #Initialize object feature values if self.initialized is False: print \"Reading object", "String, Header import numpy as np import cv2 import roslib import rospy import", "hueDiff, satDiff, valDiff = self.hsvDiff(init,cluster) hue = self.hueW * hueDiff sat = self.satW", "complex(c.points_centroid.x,c.points_centroid.y) cangle = 0 # cmath.exp(c.angle*1j) rot = [] for x,y in pts:", "else: self.filename = None topicref = get_param(\"feature_file_rostopic\") if topicref is not None: self.rostopic", "object feature values if self.initialized is False: print \"Reading object features from \"", "#print str(errorMatrix[minMatch[i]][i]) + \" + \" match.append(clusters[i]) sMsg = String() sMsg.data = labels[minMatch[i]]", "self.labels, clusters) #Publish labels if len(clusters) is 0 or self.tracked is None: return", "filename to be received (if receiving from rostopic) if self.filename is None: return", "= c2.bb_dims.x * c2.bb_dims.y return abs(size - float(c1[4])) def calculateError(self, init, cluster): size", "cluster message clusterArr = ros_data clusters = ros_data.objects self.transforms = ros_data.transforms #Classify clusters", "self.filename = ros_data.data self.loadObjects() self.initialized = True print \"Reading object features from \"", "head self.outMsg.objects = self.tracked self.outMsg.labels = self.ids self.pubMessages() def pubMessages(self): if self.outMsg is", "self.master.after(10, self.startDrawing, labeling) def drawClusters(self,clusters,ids): if clusters is None or ids is None:", "## Find the label assignment that minimizes total error minMatch = None minError", "Tk() self.canvas = Canvas(self.master,width=800,height=500) self.canvas.pack() self.timeout = 5 self.waitCount = 0 def startDrawing(self,labeling):", "None def get_param(name, value=None): private = \"~%s\" % name if rospy.has_param(private): return rospy.get_param(private)", "# cmath.exp(c.angle*1j) rot = [] for x,y in pts: r = cangle *", "i += 1 if minMatch is None or e < minError: minMatch =", "= head self.orderPub.publish(self.outMsg) #Publish transforms idx = 0 for l in self.ids: t", "self.outMsg.labels = self.ids self.pubMessages() def pubMessages(self): if self.outMsg is None or self.transforms is", "= [(c.points_min.x,c.points_min.y),(c.points_min.x,c.points_max.y),(c.points_max.x,c.points_max.y),(c.points_max.x,c.points_min.y)] offset = complex(c.points_centroid.x,c.points_centroid.y) cangle = 0 # cmath.exp(c.angle*1j) rot = []", "self.timeout = 5 self.waitCount = 0 def startDrawing(self,labeling): self.drawClusters(labeling.tracked,labeling.ids) self.master.after(10, self.startDrawing, labeling) def", "hsvDiff(self, c1,c2): hsv1 = c1[1:4] r2 = c2.rgba_color.r g2 = c2.rgba_color.g b2 =", "self.labels = None self.initialized = False self.run = True self.br = tf.TransformBroadcaster() self.hueW", "sat = self.satW * satDiff val = self.valW * valDiff total = float(hue", "= '#%02x%02x%02x' % (c.rgba_color.r,c.rgba_color.g,c.rgba_color.b) poly = self.canvas.create_polygon(rot,outline=rgb,fill='white',width=5) label = self.canvas.create_text((-c.points_centroid.x+0.5)*500, (-c.points_centroid.y + 0.5)*500,text=str(ids[idx].data),font=\"Verdana", "g2 = c2.rgba_color.g b2 = c2.rgba_color.b hsv2 = cv2.cvtColor(np.array([[(r2,g2,b2)]],dtype='float32'), cv2.COLOR_RGB2HSV) h1 = hsv2[0][0][0]", "is not -1: #print str(errorMatrix[minMatch[i]][i]) + \" + \" match.append(clusters[i]) sMsg = String()", "match.append(clusters[i]) sMsg = String() sMsg.data = labels[minMatch[i]] ordered.append(sMsg) return match, ordered, minError class", "self.outMsg = LabeledObjects() msgTime = rospy.Time.now() head = Header() head.stamp = msgTime self.outMsg.header", "= head self.outMsg.objects = self.tracked self.outMsg.labels = self.ids self.pubMessages() def pubMessages(self): if self.outMsg", "l in self.ids: t = self.transforms[idx] tl = (t.translation.x, t.translation.y, t.translation.z) r =", "objList = range(max(len(expected), len(clusters))) assn = itertools.permutations(objList, len(clusters)) for a in assn: e", "errorMatrix.append(labelErrors) ## Find the label assignment that minimizes total error minMatch = None", "head.stamp = msgTime self.outMsg.header = head self.outMsg.objects = self.tracked self.outMsg.labels = self.ids self.pubMessages()", "if self.filename is not ros_data.data: self.filename = ros_data.data self.loadObjects() self.initialized = True print", "self.hsvDiff(init,cluster) hue = self.hueW * hueDiff sat = self.satW * satDiff val =", "None or ids is None: time.sleep(1.0) return self.canvas.delete(\"all\") for idx in range(0,len(clusters)): c", "import rospy import pdb import tf import itertools from Tkinter import * from", "cmath from std_msgs.msg import String, Header import numpy as np import cv2 import", "self.tracked self.outMsg.labels = self.ids self.pubMessages() def pubMessages(self): if self.outMsg is None or self.transforms", "is None or ids is None: time.sleep(1.0) return self.canvas.delete(\"all\") for idx in range(0,len(clusters)):", "total def getMatchingLabels(self, expected, labels, clusters): ## Evaluate all possible cluster-label pairs errorMatrix", "c2.rgba_color.g b2 = c2.rgba_color.b hsv2 = cv2.cvtColor(np.array([[(r2,g2,b2)]],dtype='float32'), cv2.COLOR_RGB2HSV) h1 = hsv2[0][0][0] h2 =", "self.canvas = Canvas(self.master,width=800,height=500) self.canvas.pack() self.timeout = 5 self.waitCount = 0 def startDrawing(self,labeling): self.drawClusters(labeling.tracked,labeling.ids)", "expected: labelErrors = [] for c in clusters: e = self.calculateError(l, c) labelErrors.append(e)", "ui() display.master.after(10,display.startDrawing,pf) display.master.mainloop() if __name__ == '__main__': rospy.init_node(\"object_labeling\", anonymous=False) rospy.loginfo(\"Initializing the object labeling", "value class filter: def __init__(self): self.labeled = None self.tracked = None self.errors =", "rospy.Subscriber(\"/beliefs/features\", PcFeatureArray, self.cbClusters, queue_size = 1) self.pauseSub = rospy.Subscriber(\"/pause_labeling\", String, self.cbPause, queue_size =", "= self.tracked self.outMsg.labels = self.ids self.pubMessages() def pubMessages(self): if self.outMsg is None or", "r2 = c2.rgba_color.r g2 = c2.rgba_color.g b2 = c2.rgba_color.b hsv2 = cv2.cvtColor(np.array([[(r2,g2,b2)]],dtype='float32'), cv2.COLOR_RGB2HSV)", "= None minError = -1 objList = range(max(len(expected), len(clusters))) assn = itertools.permutations(objList, len(clusters))", "get_param(name, value=None): private = \"~%s\" % name if rospy.has_param(private): return rospy.get_param(private) elif rospy.has_param(name):", "self.calculateError(l, c) labelErrors.append(e) errorMatrix.append(labelErrors) ## Find the label assignment that minimizes total error", "hueDiff sat = self.satW * satDiff val = self.valW * valDiff total =", "rot = [] for x,y in pts: r = cangle * (complex(x,y)-offset) +", "abs(hsv2[0][0][2]-float(hsv1[2])) def sizeDiff(self, c1,c2): size = c2.bb_dims.x * c2.bb_dims.y return abs(size - float(c1[4]))", "size = c2.bb_dims.x * c2.bb_dims.y return abs(size - float(c1[4])) def calculateError(self, init, cluster):", "msgTime self.outMsg.header = head self.outMsg.objects = self.tracked self.outMsg.labels = self.ids self.pubMessages() def pubMessages(self):", "return self.canvas.delete(\"all\") for idx in range(0,len(clusters)): c = clusters[idx] if c is None:", "# ordered.append(None) #else: if minMatch[i] is not -1: #print str(errorMatrix[minMatch[i]][i]) + \" +", "= ui() display.master.after(10,display.startDrawing,pf) display.master.mainloop() if __name__ == '__main__': rospy.init_node(\"object_labeling\", anonymous=False) rospy.loginfo(\"Initializing the object", "r, self.outMsg.header.stamp, l.data, 'kinect_ir_optical_frame') idx += 1 def loadObjects(self): self.initX = [] self.labels", "= ros_data.transforms #Classify clusters #self.labeled, self.tracked, self.errors, self.ids = self.run_filter(self.initX, self.labels, clusters) self.tracked,", "if rospy.has_param(private): return rospy.get_param(private) elif rospy.has_param(name): return rospy.get_param(name) else: return value class filter:", "False self.run = True self.br = tf.TransformBroadcaster() self.hueW = get_param(\"hsv_hue_weight\", 2) self.satW =", "= itertools.permutations(objList, len(clusters)) for a in assn: e = 0 i = 0", "self.br = tf.TransformBroadcaster() self.hueW = get_param(\"hsv_hue_weight\", 2) self.satW = get_param(\"hsv_sat_weight\",1) self.valW = get_param(\"hsv_val_weight\",1)", "object features from \" + self.filename def cbPause(self, ros_data): if ros_data.data == \"pause\":", "if self.run is False: self.pubMessages() return #Initialize object feature values if self.initialized is", "minMatch = newAssn minError = e #print \"best: \" + str(minMatch) + \"", "LabeledObjects() msgTime = rospy.Time.now() head = Header() head.stamp = msgTime self.outMsg.header = head", "None self.errors = None self.ids = None self.labels = None self.initialized = False", "= rospy.Time.now() head = Header() head.stamp = msgTime self.outMsg.header = head self.outMsg.objects =", "self.startDrawing, labeling) def drawClusters(self,clusters,ids): if clusters is None or ids is None: time.sleep(1.0)", "rostopic) if self.filename is None: return if self.run is False: self.pubMessages() return #Initialize", "queue_size = 1) self.orderPub = rospy.Publisher(\"/beliefs/labels\", LabeledObjects, queue_size = 1) def cbFile(self, ros_data):", "% name if rospy.has_param(private): return rospy.get_param(private) elif rospy.has_param(name): return rospy.get_param(name) else: return value", "1 if minMatch is None or e < minError: minMatch = newAssn minError", "self.cbClusters, queue_size = 1) self.pauseSub = rospy.Subscriber(\"/pause_labeling\", String, self.cbPause, queue_size = 1) self.orderPub", "None: return if self.run is False: self.pubMessages() return #Initialize object feature values if", "error \" + str(minError) ## Convert the best assignment back to a list", "[] self.labels = [] objFile = open(self.filename, 'r') for line in objFile.readlines(): self.initX.append(line[:-1].split(','))", "return #Initialize object feature values if self.initialized is False: print \"Reading object features", "in clusters: e = self.calculateError(l, c) labelErrors.append(e) errorMatrix.append(labelErrors) ## Find the label assignment", "is -1: # match.append(None) # ordered.append(None) #else: if minMatch[i] is not -1: #print", "= [] for l in expected: labelErrors = [] for c in clusters:", "possible cluster-label pairs errorMatrix = [] for l in expected: labelErrors = []", "= rospy.Subscriber(\"/pause_labeling\", String, self.cbPause, queue_size = 1) self.orderPub = rospy.Publisher(\"/beliefs/labels\", LabeledObjects, queue_size =", "or ids is None: time.sleep(1.0) return self.canvas.delete(\"all\") for idx in range(0,len(clusters)): c =", "is None: return if self.run is False: self.pubMessages() return #Initialize object feature values", "\"pause\": self.run = False if ros_data.data == \"play\": self.run = True def cbClusters(self,", "sizeDiff(self, c1,c2): size = c2.bb_dims.x * c2.bb_dims.y return abs(size - float(c1[4])) def calculateError(self,", "self.initialized = True print \"Reading object features from \" + self.filename def cbPause(self,", "c1,c2): size = c2.bb_dims.x * c2.bb_dims.y return abs(size - float(c1[4])) def calculateError(self, init,", "+ offset rot.append((-r.real + 0.5) * 500) rot.append((-r.imag + 0.5) * 500) rgb", "self.ids: t = self.transforms[idx] tl = (t.translation.x, t.translation.y, t.translation.z) r = (t.rotation.x, t.rotation.y,", "in range(0,len(clusters)): c = clusters[idx] if c is None: continue pts = [(c.points_min.x,c.points_min.y),(c.points_min.x,c.points_max.y),(c.points_max.x,c.points_max.y),(c.points_max.x,c.points_min.y)]", "for idx in a: if idx < len(expected): e += errorMatrix[idx][i] newAssn.append(idx) else:", "in pts: r = cangle * (complex(x,y)-offset) + offset rot.append((-r.real + 0.5) *", "String pf = None display = None initX = None def get_param(name, value=None):", "return rospy.get_param(name) else: return value class filter: def __init__(self): self.labeled = None self.tracked", "idx in a: if idx < len(expected): e += errorMatrix[idx][i] newAssn.append(idx) else: newAssn.append(-1)", "abs(hsv2[0][0][1]-float(hsv1[1])), abs(hsv2[0][0][2]-float(hsv1[2])) def sizeDiff(self, c1,c2): size = c2.bb_dims.x * c2.bb_dims.y return abs(size -", "match, ordered, minError class ui: def __init__(self): self.master = Tk() self.canvas = Canvas(self.master,width=800,height=500)", "self.ids = self.run_filter(self.initX, self.labels, clusters) self.tracked, self.ids, self.error = self.getMatchingLabels(self.initX, self.labels, clusters) #Publish", "objFile = open(self.filename, 'r') for line in objFile.readlines(): self.initX.append(line[:-1].split(',')) self.labels.append(line.split(',')[0]) print str(len(self.initX)) +", "x,y in pts: r = cangle * (complex(x,y)-offset) + offset rot.append((-r.real + 0.5)", "self.initX.append(line[:-1].split(',')) self.labels.append(line.split(',')[0]) print str(len(self.initX)) + ' objects loaded' def hsvDiff(self, c1,c2): hsv1 =", "h2 = float(hsv1[0]) huediff = math.degrees(math.atan2(math.sin(math.radians(h1-h2)), math.cos(math.radians(h1-h2)))) #return abs(hsv2[0][0][0]-float(hsv1[0])), abs(hsv2[0][0][1]-float(hsv1[1])), abs(hsv2[0][0][2]-float(hsv1[2])) return abs(huediff),", "str(errorMatrix[minMatch[i]][i]) + \" + \" match.append(clusters[i]) sMsg = String() sMsg.data = labels[minMatch[i]] ordered.append(sMsg)", "= Header() head.stamp = msgTime self.outMsg.header = head self.outMsg.objects = self.tracked self.outMsg.labels =", "= self.transforms[idx] tl = (t.translation.x, t.translation.y, t.translation.z) r = (t.rotation.x, t.rotation.y, t.rotation.z, t.rotation.w)", "back to a list of labels match = [] ordered = [] for", "pf = filter() display = ui() display.master.after(10,display.startDrawing,pf) display.master.mainloop() if __name__ == '__main__': rospy.init_node(\"object_labeling\",", "= complex(c.points_centroid.x,c.points_centroid.y) cangle = 0 # cmath.exp(c.angle*1j) rot = [] for x,y in", "message clusterArr = ros_data clusters = ros_data.objects self.transforms = ros_data.transforms #Classify clusters #self.labeled,", "0 # cmath.exp(c.angle*1j) rot = [] for x,y in pts: r = cangle", "size = self.sizeW * self.sizeDiff(init,cluster) hueDiff, satDiff, valDiff = self.hsvDiff(init,cluster) hue = self.hueW", "\" + str(minMatch) + \" with error \" + str(minError) ## Convert the", "= self.satW * satDiff val = self.valW * valDiff total = float(hue +", "if self.initialized is False: print \"Reading object features from \" + self.filename self.loadObjects()", "global pf, display pf = filter() display = ui() display.master.after(10,display.startDrawing,pf) display.master.mainloop() if __name__", "#Publish labels if len(clusters) is 0 or self.tracked is None: return self.outMsg =", "tf.TransformBroadcaster() self.hueW = get_param(\"hsv_hue_weight\", 2) self.satW = get_param(\"hsv_sat_weight\",1) self.valW = get_param(\"hsv_val_weight\",1) self.sizeW =", "range(0,len(clusters)): c = clusters[idx] if c is None: continue pts = [(c.points_min.x,c.points_min.y),(c.points_min.x,c.points_max.y),(c.points_max.x,c.points_max.y),(c.points_max.x,c.points_min.y)] offset", "= None topicref = get_param(\"feature_file_rostopic\") if topicref is not None: self.rostopic = os.path.expanduser(topicref)", "startDrawing(self,labeling): self.drawClusters(labeling.tracked,labeling.ids) self.master.after(10, self.startDrawing, labeling) def drawClusters(self,clusters,ids): if clusters is None or ids", "#Read cluster message clusterArr = ros_data clusters = ros_data.objects self.transforms = ros_data.transforms #Classify", "val + size) return total def getMatchingLabels(self, expected, labels, clusters): ## Evaluate all", "self.outMsg is None or self.transforms is None or self.ids is None: return #Publish", "c2.rgba_color.r g2 = c2.rgba_color.g b2 = c2.rgba_color.b hsv2 = cv2.cvtColor(np.array([[(r2,g2,b2)]],dtype='float32'), cv2.COLOR_RGB2HSV) h1 =", "(t.translation.x, t.translation.y, t.translation.z) r = (t.rotation.x, t.rotation.y, t.rotation.z, t.rotation.w) self.br.sendTransform(tl, r, self.outMsg.header.stamp, l.data,", "Convert the best assignment back to a list of labels match = []", "#return abs(hsv2[0][0][0]-float(hsv1[0])), abs(hsv2[0][0][1]-float(hsv1[1])), abs(hsv2[0][0][2]-float(hsv1[2])) return abs(huediff), abs(hsv2[0][0][1]-float(hsv1[1])), abs(hsv2[0][0][2]-float(hsv1[2])) def sizeDiff(self, c1,c2): size =", "str(minError) ## Convert the best assignment back to a list of labels match", "e = 0 i = 0 newAssn = [] for idx in a:", "cv2.cvtColor(np.array([[(r2,g2,b2)]],dtype='float32'), cv2.COLOR_RGB2HSV) h1 = hsv2[0][0][0] h2 = float(hsv1[0]) huediff = math.degrees(math.atan2(math.sin(math.radians(h1-h2)), math.cos(math.radians(h1-h2)))) #return", "#!/usr/bin/env python import os import sys, time, math, cmath from std_msgs.msg import String,", "get_param(\"hsv_val_weight\",1) self.sizeW = get_param(\"size_weight\",50000) fileref = get_param(\"feature_file_location\") print fileref if fileref is not", "= get_param(\"hsv_val_weight\",1) self.sizeW = get_param(\"size_weight\",50000) fileref = get_param(\"feature_file_location\") print fileref if fileref is", "self.pubMessages() def pubMessages(self): if self.outMsg is None or self.transforms is None or self.ids", "newAssn minError = e #print \"best: \" + str(minMatch) + \" with error", "i in range(len(minMatch)): #if minMatch[i] is -1: # match.append(None) # ordered.append(None) #else: if", "= Canvas(self.master,width=800,height=500) self.canvas.pack() self.timeout = 5 self.waitCount = 0 def startDrawing(self,labeling): self.drawClusters(labeling.tracked,labeling.ids) self.master.after(10,", "\" + \" match.append(clusters[i]) sMsg = String() sMsg.data = labels[minMatch[i]] ordered.append(sMsg) return match,", "String, self.cbFile, queue_size = 1) self.subscriber = rospy.Subscriber(\"/beliefs/features\", PcFeatureArray, self.cbClusters, queue_size = 1)", "def get_param(name, value=None): private = \"~%s\" % name if rospy.has_param(private): return rospy.get_param(private) elif", "= open(self.filename, 'r') for line in objFile.readlines(): self.initX.append(line[:-1].split(',')) self.labels.append(line.split(',')[0]) print str(len(self.initX)) + '", "if len(clusters) is 0 or self.tracked is None: return self.outMsg = LabeledObjects() msgTime", "str(len(self.initX)) + ' objects loaded' def hsvDiff(self, c1,c2): hsv1 = c1[1:4] r2 =", "clusters is None or ids is None: time.sleep(1.0) return self.canvas.delete(\"all\") for idx in", "import LabeledObjects from std_msgs.msg import String pf = None display = None initX", "calculateError(self, init, cluster): size = self.sizeW * self.sizeDiff(init,cluster) hueDiff, satDiff, valDiff = self.hsvDiff(init,cluster)", "= msgTime self.outMsg.header = head self.outMsg.objects = self.tracked self.outMsg.labels = self.ids self.pubMessages() def", "in expected: labelErrors = [] for c in clusters: e = self.calculateError(l, c)", "self.hueW = get_param(\"hsv_hue_weight\", 2) self.satW = get_param(\"hsv_sat_weight\",1) self.valW = get_param(\"hsv_val_weight\",1) self.sizeW = get_param(\"size_weight\",50000)", "from hlpr_feature_extraction.msg import PcFeatureArray from hlpr_object_labeling.msg import LabeledObjects from std_msgs.msg import String pf", "= 1) def cbFile(self, ros_data): if self.filename is not ros_data.data: self.filename = ros_data.data", "from \" + self.filename def cbPause(self, ros_data): if ros_data.data == \"pause\": self.run =", "c2.bb_dims.x * c2.bb_dims.y return abs(size - float(c1[4])) def calculateError(self, init, cluster): size =", "10 bold\") self.canvas.pack() def main(args): global pf, display pf = filter() display =", "idx in range(0,len(clusters)): c = clusters[idx] if c is None: continue pts =", "os.path.expanduser(topicref) self.fileSub = rospy.Subscriber(self.rostopic, String, self.cbFile, queue_size = 1) self.subscriber = rospy.Subscriber(\"/beliefs/features\", PcFeatureArray,", "return value class filter: def __init__(self): self.labeled = None self.tracked = None self.errors", "name if rospy.has_param(private): return rospy.get_param(private) elif rospy.has_param(name): return rospy.get_param(name) else: return value class", "display = ui() display.master.after(10,display.startDrawing,pf) display.master.mainloop() if __name__ == '__main__': rospy.init_node(\"object_labeling\", anonymous=False) rospy.loginfo(\"Initializing the", "or self.ids is None: return #Publish labels msgTime = rospy.Time.now() head = Header()", "= None def get_param(name, value=None): private = \"~%s\" % name if rospy.has_param(private): return", "clusters #self.labeled, self.tracked, self.errors, self.ids = self.run_filter(self.initX, self.labels, clusters) self.tracked, self.ids, self.error =", "msgTime self.outMsg.header = head self.orderPub.publish(self.outMsg) #Publish transforms idx = 0 for l in", "labels, clusters): ## Evaluate all possible cluster-label pairs errorMatrix = [] for l", "500) rgb = '#%02x%02x%02x' % (c.rgba_color.r,c.rgba_color.g,c.rgba_color.b) poly = self.canvas.create_polygon(rot,outline=rgb,fill='white',width=5) label = self.canvas.create_text((-c.points_centroid.x+0.5)*500, (-c.points_centroid.y", "Find the label assignment that minimizes total error minMatch = None minError =", "assn = itertools.permutations(objList, len(clusters)) for a in assn: e = 0 i =", "display pf = filter() display = ui() display.master.after(10,display.startDrawing,pf) display.master.mainloop() if __name__ == '__main__':", "from \" + self.filename self.loadObjects() self.initialized = True #Read cluster message clusterArr =", "= 0 newAssn = [] for idx in a: if idx < len(expected):", "clusters = ros_data.objects self.transforms = ros_data.transforms #Classify clusters #self.labeled, self.tracked, self.errors, self.ids =", "self.canvas.create_text((-c.points_centroid.x+0.5)*500, (-c.points_centroid.y + 0.5)*500,text=str(ids[idx].data),font=\"Verdana 10 bold\") self.canvas.pack() def main(args): global pf, display pf", "= LabeledObjects() msgTime = rospy.Time.now() head = Header() head.stamp = msgTime self.outMsg.header =", "abs(hsv2[0][0][0]-float(hsv1[0])), abs(hsv2[0][0][1]-float(hsv1[1])), abs(hsv2[0][0][2]-float(hsv1[2])) return abs(huediff), abs(hsv2[0][0][1]-float(hsv1[1])), abs(hsv2[0][0][2]-float(hsv1[2])) def sizeDiff(self, c1,c2): size = c2.bb_dims.x", "self.sizeW * self.sizeDiff(init,cluster) hueDiff, satDiff, valDiff = self.hsvDiff(init,cluster) hue = self.hueW * hueDiff", "ui: def __init__(self): self.master = Tk() self.canvas = Canvas(self.master,width=800,height=500) self.canvas.pack() self.timeout = 5", "self.loadObjects() self.initialized = True #Read cluster message clusterArr = ros_data clusters = ros_data.objects", "* satDiff val = self.valW * valDiff total = float(hue + sat +", "+= errorMatrix[idx][i] newAssn.append(idx) else: newAssn.append(-1) i += 1 if minMatch is None or", "topicref = get_param(\"feature_file_rostopic\") if topicref is not None: self.rostopic = os.path.expanduser(topicref) self.fileSub =", "= True def cbClusters(self, ros_data): #Wait for filename to be received (if receiving", "satDiff, valDiff = self.hsvDiff(init,cluster) hue = self.hueW * hueDiff sat = self.satW *", "is None or e < minError: minMatch = newAssn minError = e #print", "0.5)*500,text=str(ids[idx].data),font=\"Verdana 10 bold\") self.canvas.pack() def main(args): global pf, display pf = filter() display", "for l in expected: labelErrors = [] for c in clusters: e =", "\"best: \" + str(minMatch) + \" with error \" + str(minError) ## Convert", "pdb import tf import itertools from Tkinter import * from hlpr_feature_extraction.msg import PcFeatureArray", "+ \" match.append(clusters[i]) sMsg = String() sMsg.data = labels[minMatch[i]] ordered.append(sMsg) return match, ordered,", "String, self.cbPause, queue_size = 1) self.orderPub = rospy.Publisher(\"/beliefs/labels\", LabeledObjects, queue_size = 1) def", "errorMatrix = [] for l in expected: labelErrors = [] for c in", "feature values if self.initialized is False: print \"Reading object features from \" +", "total = float(hue + sat + val + size) return total def getMatchingLabels(self,", "in assn: e = 0 i = 0 newAssn = [] for idx", "valDiff total = float(hue + sat + val + size) return total def", "list of labels match = [] ordered = [] for i in range(len(minMatch)):", "= 0 # cmath.exp(c.angle*1j) rot = [] for x,y in pts: r =", "rospy.get_param(private) elif rospy.has_param(name): return rospy.get_param(name) else: return value class filter: def __init__(self): self.labeled", "= 0 i = 0 newAssn = [] for idx in a: if", "= cv2.cvtColor(np.array([[(r2,g2,b2)]],dtype='float32'), cv2.COLOR_RGB2HSV) h1 = hsv2[0][0][0] h2 = float(hsv1[0]) huediff = math.degrees(math.atan2(math.sin(math.radians(h1-h2)), math.cos(math.radians(h1-h2))))", "e += errorMatrix[idx][i] newAssn.append(idx) else: newAssn.append(-1) i += 1 if minMatch is None", "labels[minMatch[i]] ordered.append(sMsg) return match, ordered, minError class ui: def __init__(self): self.master = Tk()", "pubMessages(self): if self.outMsg is None or self.transforms is None or self.ids is None:", "getMatchingLabels(self, expected, labels, clusters): ## Evaluate all possible cluster-label pairs errorMatrix = []", "label assignment that minimizes total error minMatch = None minError = -1 objList", "itertools from Tkinter import * from hlpr_feature_extraction.msg import PcFeatureArray from hlpr_object_labeling.msg import LabeledObjects", "assn: e = 0 i = 0 newAssn = [] for idx in", "return abs(size - float(c1[4])) def calculateError(self, init, cluster): size = self.sizeW * self.sizeDiff(init,cluster)", "\" with error \" + str(minError) ## Convert the best assignment back to", "def drawClusters(self,clusters,ids): if clusters is None or ids is None: time.sleep(1.0) return self.canvas.delete(\"all\")", "t.rotation.z, t.rotation.w) self.br.sendTransform(tl, r, self.outMsg.header.stamp, l.data, 'kinect_ir_optical_frame') idx += 1 def loadObjects(self): self.initX", "'#%02x%02x%02x' % (c.rgba_color.r,c.rgba_color.g,c.rgba_color.b) poly = self.canvas.create_polygon(rot,outline=rgb,fill='white',width=5) label = self.canvas.create_text((-c.points_centroid.x+0.5)*500, (-c.points_centroid.y + 0.5)*500,text=str(ids[idx].data),font=\"Verdana 10", "float(c1[4])) def calculateError(self, init, cluster): size = self.sizeW * self.sizeDiff(init,cluster) hueDiff, satDiff, valDiff", "def calculateError(self, init, cluster): size = self.sizeW * self.sizeDiff(init,cluster) hueDiff, satDiff, valDiff =", "self.waitCount = 0 def startDrawing(self,labeling): self.drawClusters(labeling.tracked,labeling.ids) self.master.after(10, self.startDrawing, labeling) def drawClusters(self,clusters,ids): if clusters", "be received (if receiving from rostopic) if self.filename is None: return if self.run", "numpy as np import cv2 import roslib import rospy import pdb import tf", "None: self.rostopic = os.path.expanduser(topicref) self.fileSub = rospy.Subscriber(self.rostopic, String, self.cbFile, queue_size = 1) self.subscriber", "return total def getMatchingLabels(self, expected, labels, clusters): ## Evaluate all possible cluster-label pairs", "if clusters is None or ids is None: time.sleep(1.0) return self.canvas.delete(\"all\") for idx", "= math.degrees(math.atan2(math.sin(math.radians(h1-h2)), math.cos(math.radians(h1-h2)))) #return abs(hsv2[0][0][0]-float(hsv1[0])), abs(hsv2[0][0][1]-float(hsv1[1])), abs(hsv2[0][0][2]-float(hsv1[2])) return abs(huediff), abs(hsv2[0][0][1]-float(hsv1[1])), abs(hsv2[0][0][2]-float(hsv1[2])) def sizeDiff(self,", "c2.rgba_color.b hsv2 = cv2.cvtColor(np.array([[(r2,g2,b2)]],dtype='float32'), cv2.COLOR_RGB2HSV) h1 = hsv2[0][0][0] h2 = float(hsv1[0]) huediff =", "r = (t.rotation.x, t.rotation.y, t.rotation.z, t.rotation.w) self.br.sendTransform(tl, r, self.outMsg.header.stamp, l.data, 'kinect_ir_optical_frame') idx +=", "self.valW * valDiff total = float(hue + sat + val + size) return", "label = self.canvas.create_text((-c.points_centroid.x+0.5)*500, (-c.points_centroid.y + 0.5)*500,text=str(ids[idx].data),font=\"Verdana 10 bold\") self.canvas.pack() def main(args): global pf,", "def loadObjects(self): self.initX = [] self.labels = [] objFile = open(self.filename, 'r') for", "< minError: minMatch = newAssn minError = e #print \"best: \" + str(minMatch)", "assignment that minimizes total error minMatch = None minError = -1 objList =", "import String pf = None display = None initX = None def get_param(name,", "= 0 for l in self.ids: t = self.transforms[idx] tl = (t.translation.x, t.translation.y,", "l in expected: labelErrors = [] for c in clusters: e = self.calculateError(l,", "class ui: def __init__(self): self.master = Tk() self.canvas = Canvas(self.master,width=800,height=500) self.canvas.pack() self.timeout =", "else: return value class filter: def __init__(self): self.labeled = None self.tracked = None", "def cbClusters(self, ros_data): #Wait for filename to be received (if receiving from rostopic)", "False: self.pubMessages() return #Initialize object feature values if self.initialized is False: print \"Reading", "= self.ids self.pubMessages() def pubMessages(self): if self.outMsg is None or self.transforms is None", "#else: if minMatch[i] is not -1: #print str(errorMatrix[minMatch[i]][i]) + \" + \" match.append(clusters[i])", "= msgTime self.outMsg.header = head self.orderPub.publish(self.outMsg) #Publish transforms idx = 0 for l", "match.append(None) # ordered.append(None) #else: if minMatch[i] is not -1: #print str(errorMatrix[minMatch[i]][i]) + \"", "'r') for line in objFile.readlines(): self.initX.append(line[:-1].split(',')) self.labels.append(line.split(',')[0]) print str(len(self.initX)) + ' objects loaded'", "return match, ordered, minError class ui: def __init__(self): self.master = Tk() self.canvas =", "fileref = get_param(\"feature_file_location\") print fileref if fileref is not None: self.filename = os.path.expanduser(fileref)", "Header() head.stamp = msgTime self.outMsg.header = head self.outMsg.objects = self.tracked self.outMsg.labels = self.ids", "+= 1 def loadObjects(self): self.initX = [] self.labels = [] objFile = open(self.filename,", "idx += 1 def loadObjects(self): self.initX = [] self.labels = [] objFile =", "the best assignment back to a list of labels match = [] ordered", "= e #print \"best: \" + str(minMatch) + \" with error \" +", "= ros_data.objects self.transforms = ros_data.transforms #Classify clusters #self.labeled, self.tracked, self.errors, self.ids = self.run_filter(self.initX,", "import * from hlpr_feature_extraction.msg import PcFeatureArray from hlpr_object_labeling.msg import LabeledObjects from std_msgs.msg import", "None: continue pts = [(c.points_min.x,c.points_min.y),(c.points_min.x,c.points_max.y),(c.points_max.x,c.points_max.y),(c.points_max.x,c.points_min.y)] offset = complex(c.points_centroid.x,c.points_centroid.y) cangle = 0 # cmath.exp(c.angle*1j)", "errorMatrix[idx][i] newAssn.append(idx) else: newAssn.append(-1) i += 1 if minMatch is None or e", "is not None: self.rostopic = os.path.expanduser(topicref) self.fileSub = rospy.Subscriber(self.rostopic, String, self.cbFile, queue_size =", "= self.run_filter(self.initX, self.labels, clusters) self.tracked, self.ids, self.error = self.getMatchingLabels(self.initX, self.labels, clusters) #Publish labels", "= rospy.Subscriber(\"/beliefs/features\", PcFeatureArray, self.cbClusters, queue_size = 1) self.pauseSub = rospy.Subscriber(\"/pause_labeling\", String, self.cbPause, queue_size", "True #Read cluster message clusterArr = ros_data clusters = ros_data.objects self.transforms = ros_data.transforms", "+ str(minError) ## Convert the best assignment back to a list of labels", "return abs(huediff), abs(hsv2[0][0][1]-float(hsv1[1])), abs(hsv2[0][0][2]-float(hsv1[2])) def sizeDiff(self, c1,c2): size = c2.bb_dims.x * c2.bb_dims.y return", "import tf import itertools from Tkinter import * from hlpr_feature_extraction.msg import PcFeatureArray from", "objFile.readlines(): self.initX.append(line[:-1].split(',')) self.labels.append(line.split(',')[0]) print str(len(self.initX)) + ' objects loaded' def hsvDiff(self, c1,c2): hsv1", "minError class ui: def __init__(self): self.master = Tk() self.canvas = Canvas(self.master,width=800,height=500) self.canvas.pack() self.timeout", "self.canvas.pack() self.timeout = 5 self.waitCount = 0 def startDrawing(self,labeling): self.drawClusters(labeling.tracked,labeling.ids) self.master.after(10, self.startDrawing, labeling)", "= 1) self.subscriber = rospy.Subscriber(\"/beliefs/features\", PcFeatureArray, self.cbClusters, queue_size = 1) self.pauseSub = rospy.Subscriber(\"/pause_labeling\",", "#Classify clusters #self.labeled, self.tracked, self.errors, self.ids = self.run_filter(self.initX, self.labels, clusters) self.tracked, self.ids, self.error", "* 500) rgb = '#%02x%02x%02x' % (c.rgba_color.r,c.rgba_color.g,c.rgba_color.b) poly = self.canvas.create_polygon(rot,outline=rgb,fill='white',width=5) label = self.canvas.create_text((-c.points_centroid.x+0.5)*500,", "= [] ordered = [] for i in range(len(minMatch)): #if minMatch[i] is -1:", "# match.append(None) # ordered.append(None) #else: if minMatch[i] is not -1: #print str(errorMatrix[minMatch[i]][i]) +", "#Initialize object feature values if self.initialized is False: print \"Reading object features from", "= os.path.expanduser(topicref) self.fileSub = rospy.Subscriber(self.rostopic, String, self.cbFile, queue_size = 1) self.subscriber = rospy.Subscriber(\"/beliefs/features\",", "minMatch[i] is not -1: #print str(errorMatrix[minMatch[i]][i]) + \" + \" match.append(clusters[i]) sMsg =", "math, cmath from std_msgs.msg import String, Header import numpy as np import cv2", "+ self.filename def cbPause(self, ros_data): if ros_data.data == \"pause\": self.run = False if", "= None self.initialized = False self.run = True self.br = tf.TransformBroadcaster() self.hueW =", "self.hueW * hueDiff sat = self.satW * satDiff val = self.valW * valDiff", "ros_data clusters = ros_data.objects self.transforms = ros_data.transforms #Classify clusters #self.labeled, self.tracked, self.errors, self.ids", "[] for idx in a: if idx < len(expected): e += errorMatrix[idx][i] newAssn.append(idx)", "+= 1 if minMatch is None or e < minError: minMatch = newAssn", "* c2.bb_dims.y return abs(size - float(c1[4])) def calculateError(self, init, cluster): size = self.sizeW", "self.cbFile, queue_size = 1) self.subscriber = rospy.Subscriber(\"/beliefs/features\", PcFeatureArray, self.cbClusters, queue_size = 1) self.pauseSub", "for filename to be received (if receiving from rostopic) if self.filename is None:", "e < minError: minMatch = newAssn minError = e #print \"best: \" +", "pts = [(c.points_min.x,c.points_min.y),(c.points_min.x,c.points_max.y),(c.points_max.x,c.points_max.y),(c.points_max.x,c.points_min.y)] offset = complex(c.points_centroid.x,c.points_centroid.y) cangle = 0 # cmath.exp(c.angle*1j) rot =", "msgTime = rospy.Time.now() head = Header() head.stamp = msgTime self.outMsg.header = head self.outMsg.objects", "self.outMsg.objects = self.tracked self.outMsg.labels = self.ids self.pubMessages() def pubMessages(self): if self.outMsg is None", "self.ids self.pubMessages() def pubMessages(self): if self.outMsg is None or self.transforms is None or", "1 def loadObjects(self): self.initX = [] self.labels = [] objFile = open(self.filename, 'r')", "= [] self.labels = [] objFile = open(self.filename, 'r') for line in objFile.readlines():", "def sizeDiff(self, c1,c2): size = c2.bb_dims.x * c2.bb_dims.y return abs(size - float(c1[4])) def", "#Publish labels msgTime = rospy.Time.now() head = Header() head.stamp = msgTime self.outMsg.header =", "self.satW = get_param(\"hsv_sat_weight\",1) self.valW = get_param(\"hsv_val_weight\",1) self.sizeW = get_param(\"size_weight\",50000) fileref = get_param(\"feature_file_location\") print", "return rospy.get_param(private) elif rospy.has_param(name): return rospy.get_param(name) else: return value class filter: def __init__(self):", "hue = self.hueW * hueDiff sat = self.satW * satDiff val = self.valW", "itertools.permutations(objList, len(clusters)) for a in assn: e = 0 i = 0 newAssn", "None or self.transforms is None or self.ids is None: return #Publish labels msgTime", "None self.tracked = None self.errors = None self.ids = None self.labels = None", "huediff = math.degrees(math.atan2(math.sin(math.radians(h1-h2)), math.cos(math.radians(h1-h2)))) #return abs(hsv2[0][0][0]-float(hsv1[0])), abs(hsv2[0][0][1]-float(hsv1[1])), abs(hsv2[0][0][2]-float(hsv1[2])) return abs(huediff), abs(hsv2[0][0][1]-float(hsv1[1])), abs(hsv2[0][0][2]-float(hsv1[2])) def", "r = cangle * (complex(x,y)-offset) + offset rot.append((-r.real + 0.5) * 500) rot.append((-r.imag", "- float(c1[4])) def calculateError(self, init, cluster): size = self.sizeW * self.sizeDiff(init,cluster) hueDiff, satDiff,", "expected, labels, clusters): ## Evaluate all possible cluster-label pairs errorMatrix = [] for", "rospy.Publisher(\"/beliefs/labels\", LabeledObjects, queue_size = 1) def cbFile(self, ros_data): if self.filename is not ros_data.data:", "' objects loaded' def hsvDiff(self, c1,c2): hsv1 = c1[1:4] r2 = c2.rgba_color.r g2", "time.sleep(1.0) return self.canvas.delete(\"all\") for idx in range(0,len(clusters)): c = clusters[idx] if c is", "self.sizeW = get_param(\"size_weight\",50000) fileref = get_param(\"feature_file_location\") print fileref if fileref is not None:", "rospy.Subscriber(self.rostopic, String, self.cbFile, queue_size = 1) self.subscriber = rospy.Subscriber(\"/beliefs/features\", PcFeatureArray, self.cbClusters, queue_size =", "math.cos(math.radians(h1-h2)))) #return abs(hsv2[0][0][0]-float(hsv1[0])), abs(hsv2[0][0][1]-float(hsv1[1])), abs(hsv2[0][0][2]-float(hsv1[2])) return abs(huediff), abs(hsv2[0][0][1]-float(hsv1[1])), abs(hsv2[0][0][2]-float(hsv1[2])) def sizeDiff(self, c1,c2): size", "minError = -1 objList = range(max(len(expected), len(clusters))) assn = itertools.permutations(objList, len(clusters)) for a", "minError = e #print \"best: \" + str(minMatch) + \" with error \"", "not ros_data.data: self.filename = ros_data.data self.loadObjects() self.initialized = True print \"Reading object features", "in objFile.readlines(): self.initX.append(line[:-1].split(',')) self.labels.append(line.split(',')[0]) print str(len(self.initX)) + ' objects loaded' def hsvDiff(self, c1,c2):", "t.translation.y, t.translation.z) r = (t.rotation.x, t.rotation.y, t.rotation.z, t.rotation.w) self.br.sendTransform(tl, r, self.outMsg.header.stamp, l.data, 'kinect_ir_optical_frame')", "filter: def __init__(self): self.labeled = None self.tracked = None self.errors = None self.ids", "self.error = self.getMatchingLabels(self.initX, self.labels, clusters) #Publish labels if len(clusters) is 0 or self.tracked", "main(args): global pf, display pf = filter() display = ui() display.master.after(10,display.startDrawing,pf) display.master.mainloop() if", "with error \" + str(minError) ## Convert the best assignment back to a", "clusters) #Publish labels if len(clusters) is 0 or self.tracked is None: return self.outMsg", "minMatch is None or e < minError: minMatch = newAssn minError = e", "return #Publish labels msgTime = rospy.Time.now() head = Header() head.stamp = msgTime self.outMsg.header", "(c.rgba_color.r,c.rgba_color.g,c.rgba_color.b) poly = self.canvas.create_polygon(rot,outline=rgb,fill='white',width=5) label = self.canvas.create_text((-c.points_centroid.x+0.5)*500, (-c.points_centroid.y + 0.5)*500,text=str(ids[idx].data),font=\"Verdana 10 bold\") self.canvas.pack()", "queue_size = 1) self.pauseSub = rospy.Subscriber(\"/pause_labeling\", String, self.cbPause, queue_size = 1) self.orderPub =", "= self.canvas.create_polygon(rot,outline=rgb,fill='white',width=5) label = self.canvas.create_text((-c.points_centroid.x+0.5)*500, (-c.points_centroid.y + 0.5)*500,text=str(ids[idx].data),font=\"Verdana 10 bold\") self.canvas.pack() def main(args):", "self.run = True self.br = tf.TransformBroadcaster() self.hueW = get_param(\"hsv_hue_weight\", 2) self.satW = get_param(\"hsv_sat_weight\",1)", "the label assignment that minimizes total error minMatch = None minError = -1", "sys, time, math, cmath from std_msgs.msg import String, Header import numpy as np", "__init__(self): self.labeled = None self.tracked = None self.errors = None self.ids = None", "init, cluster): size = self.sizeW * self.sizeDiff(init,cluster) hueDiff, satDiff, valDiff = self.hsvDiff(init,cluster) hue", "hsv1 = c1[1:4] r2 = c2.rgba_color.r g2 = c2.rgba_color.g b2 = c2.rgba_color.b hsv2", "rospy.Time.now() head = Header() head.stamp = msgTime self.outMsg.header = head self.outMsg.objects = self.tracked", "error minMatch = None minError = -1 objList = range(max(len(expected), len(clusters))) assn =", "pf = None display = None initX = None def get_param(name, value=None): private", "if ros_data.data == \"play\": self.run = True def cbClusters(self, ros_data): #Wait for filename", "e #print \"best: \" + str(minMatch) + \" with error \" + str(minError)", "msgTime = rospy.Time.now() head = Header() head.stamp = msgTime self.outMsg.header = head self.orderPub.publish(self.outMsg)", "self.rostopic = os.path.expanduser(topicref) self.fileSub = rospy.Subscriber(self.rostopic, String, self.cbFile, queue_size = 1) self.subscriber =", "= [] objFile = open(self.filename, 'r') for line in objFile.readlines(): self.initX.append(line[:-1].split(',')) self.labels.append(line.split(',')[0]) print", "self.getMatchingLabels(self.initX, self.labels, clusters) #Publish labels if len(clusters) is 0 or self.tracked is None:", "import sys, time, math, cmath from std_msgs.msg import String, Header import numpy as", "head = Header() head.stamp = msgTime self.outMsg.header = head self.orderPub.publish(self.outMsg) #Publish transforms idx", "math.degrees(math.atan2(math.sin(math.radians(h1-h2)), math.cos(math.radians(h1-h2)))) #return abs(hsv2[0][0][0]-float(hsv1[0])), abs(hsv2[0][0][1]-float(hsv1[1])), abs(hsv2[0][0][2]-float(hsv1[2])) return abs(huediff), abs(hsv2[0][0][1]-float(hsv1[1])), abs(hsv2[0][0][2]-float(hsv1[2])) def sizeDiff(self, c1,c2):", "= c2.rgba_color.r g2 = c2.rgba_color.g b2 = c2.rgba_color.b hsv2 = cv2.cvtColor(np.array([[(r2,g2,b2)]],dtype='float32'), cv2.COLOR_RGB2HSV) h1", "abs(huediff), abs(hsv2[0][0][1]-float(hsv1[1])), abs(hsv2[0][0][2]-float(hsv1[2])) def sizeDiff(self, c1,c2): size = c2.bb_dims.x * c2.bb_dims.y return abs(size", "self.transforms[idx] tl = (t.translation.x, t.translation.y, t.translation.z) r = (t.rotation.x, t.rotation.y, t.rotation.z, t.rotation.w) self.br.sendTransform(tl,", "def startDrawing(self,labeling): self.drawClusters(labeling.tracked,labeling.ids) self.master.after(10, self.startDrawing, labeling) def drawClusters(self,clusters,ids): if clusters is None or", "minMatch[i] is -1: # match.append(None) # ordered.append(None) #else: if minMatch[i] is not -1:", "poly = self.canvas.create_polygon(rot,outline=rgb,fill='white',width=5) label = self.canvas.create_text((-c.points_centroid.x+0.5)*500, (-c.points_centroid.y + 0.5)*500,text=str(ids[idx].data),font=\"Verdana 10 bold\") self.canvas.pack() def", "self.canvas.create_polygon(rot,outline=rgb,fill='white',width=5) label = self.canvas.create_text((-c.points_centroid.x+0.5)*500, (-c.points_centroid.y + 0.5)*500,text=str(ids[idx].data),font=\"Verdana 10 bold\") self.canvas.pack() def main(args): global", "self.tracked, self.errors, self.ids = self.run_filter(self.initX, self.labels, clusters) self.tracked, self.ids, self.error = self.getMatchingLabels(self.initX, self.labels,", "self.sizeDiff(init,cluster) hueDiff, satDiff, valDiff = self.hsvDiff(init,cluster) hue = self.hueW * hueDiff sat =", "self.initialized = True #Read cluster message clusterArr = ros_data clusters = ros_data.objects self.transforms", "None or e < minError: minMatch = newAssn minError = e #print \"best:", "cbPause(self, ros_data): if ros_data.data == \"pause\": self.run = False if ros_data.data == \"play\":", "not -1: #print str(errorMatrix[minMatch[i]][i]) + \" + \" match.append(clusters[i]) sMsg = String() sMsg.data", "rot.append((-r.real + 0.5) * 500) rot.append((-r.imag + 0.5) * 500) rgb = '#%02x%02x%02x'", "= filter() display = ui() display.master.after(10,display.startDrawing,pf) display.master.mainloop() if __name__ == '__main__': rospy.init_node(\"object_labeling\", anonymous=False)", "= rospy.Subscriber(self.rostopic, String, self.cbFile, queue_size = 1) self.subscriber = rospy.Subscriber(\"/beliefs/features\", PcFeatureArray, self.cbClusters, queue_size", "fileref if fileref is not None: self.filename = os.path.expanduser(fileref) else: self.filename = None", "clusters[idx] if c is None: continue pts = [(c.points_min.x,c.points_min.y),(c.points_min.x,c.points_max.y),(c.points_max.x,c.points_max.y),(c.points_max.x,c.points_min.y)] offset = complex(c.points_centroid.x,c.points_centroid.y) cangle", "print str(len(self.initX)) + ' objects loaded' def hsvDiff(self, c1,c2): hsv1 = c1[1:4] r2", "## Evaluate all possible cluster-label pairs errorMatrix = [] for l in expected:", "= Tk() self.canvas = Canvas(self.master,width=800,height=500) self.canvas.pack() self.timeout = 5 self.waitCount = 0 def", "= ros_data.data self.loadObjects() self.initialized = True print \"Reading object features from \" +", "= 1) self.pauseSub = rospy.Subscriber(\"/pause_labeling\", String, self.cbPause, queue_size = 1) self.orderPub = rospy.Publisher(\"/beliefs/labels\",", "print \"Reading object features from \" + self.filename def cbPause(self, ros_data): if ros_data.data", "self.master = Tk() self.canvas = Canvas(self.master,width=800,height=500) self.canvas.pack() self.timeout = 5 self.waitCount = 0", "tf import itertools from Tkinter import * from hlpr_feature_extraction.msg import PcFeatureArray from hlpr_object_labeling.msg", "valDiff = self.hsvDiff(init,cluster) hue = self.hueW * hueDiff sat = self.satW * satDiff", "head self.orderPub.publish(self.outMsg) #Publish transforms idx = 0 for l in self.ids: t =", "else: newAssn.append(-1) i += 1 if minMatch is None or e < minError:", "fileref is not None: self.filename = os.path.expanduser(fileref) else: self.filename = None topicref =", "'kinect_ir_optical_frame') idx += 1 def loadObjects(self): self.initX = [] self.labels = [] objFile", "None: self.filename = os.path.expanduser(fileref) else: self.filename = None topicref = get_param(\"feature_file_rostopic\") if topicref", "== \"pause\": self.run = False if ros_data.data == \"play\": self.run = True def", "from hlpr_object_labeling.msg import LabeledObjects from std_msgs.msg import String pf = None display =", "#self.labeled, self.tracked, self.errors, self.ids = self.run_filter(self.initX, self.labels, clusters) self.tracked, self.ids, self.error = self.getMatchingLabels(self.initX,", "labels if len(clusters) is 0 or self.tracked is None: return self.outMsg = LabeledObjects()", "None initX = None def get_param(name, value=None): private = \"~%s\" % name if", "= get_param(\"feature_file_rostopic\") if topicref is not None: self.rostopic = os.path.expanduser(topicref) self.fileSub = rospy.Subscriber(self.rostopic,", "satDiff val = self.valW * valDiff total = float(hue + sat + val", "newAssn.append(idx) else: newAssn.append(-1) i += 1 if minMatch is None or e <", "self.subscriber = rospy.Subscriber(\"/beliefs/features\", PcFeatureArray, self.cbClusters, queue_size = 1) self.pauseSub = rospy.Subscriber(\"/pause_labeling\", String, self.cbPause,", "def getMatchingLabels(self, expected, labels, clusters): ## Evaluate all possible cluster-label pairs errorMatrix =", "1) def cbFile(self, ros_data): if self.filename is not ros_data.data: self.filename = ros_data.data self.loadObjects()", "Header() head.stamp = msgTime self.outMsg.header = head self.orderPub.publish(self.outMsg) #Publish transforms idx = 0", "is None: continue pts = [(c.points_min.x,c.points_min.y),(c.points_min.x,c.points_max.y),(c.points_max.x,c.points_max.y),(c.points_max.x,c.points_min.y)] offset = complex(c.points_centroid.x,c.points_centroid.y) cangle = 0 #", "[] for c in clusters: e = self.calculateError(l, c) labelErrors.append(e) errorMatrix.append(labelErrors) ## Find", "+ self.filename self.loadObjects() self.initialized = True #Read cluster message clusterArr = ros_data clusters", "= get_param(\"hsv_sat_weight\",1) self.valW = get_param(\"hsv_val_weight\",1) self.sizeW = get_param(\"size_weight\",50000) fileref = get_param(\"feature_file_location\") print fileref", "(complex(x,y)-offset) + offset rot.append((-r.real + 0.5) * 500) rot.append((-r.imag + 0.5) * 500)", "for c in clusters: e = self.calculateError(l, c) labelErrors.append(e) errorMatrix.append(labelErrors) ## Find the", "float(hsv1[0]) huediff = math.degrees(math.atan2(math.sin(math.radians(h1-h2)), math.cos(math.radians(h1-h2)))) #return abs(hsv2[0][0][0]-float(hsv1[0])), abs(hsv2[0][0][1]-float(hsv1[1])), abs(hsv2[0][0][2]-float(hsv1[2])) return abs(huediff), abs(hsv2[0][0][1]-float(hsv1[1])), abs(hsv2[0][0][2]-float(hsv1[2]))", "\" + self.filename self.loadObjects() self.initialized = True #Read cluster message clusterArr = ros_data", "continue pts = [(c.points_min.x,c.points_min.y),(c.points_min.x,c.points_max.y),(c.points_max.x,c.points_max.y),(c.points_max.x,c.points_min.y)] offset = complex(c.points_centroid.x,c.points_centroid.y) cangle = 0 # cmath.exp(c.angle*1j) rot", "= False if ros_data.data == \"play\": self.run = True def cbClusters(self, ros_data): #Wait", "to a list of labels match = [] ordered = [] for i", "minimizes total error minMatch = None minError = -1 objList = range(max(len(expected), len(clusters)))", "cbFile(self, ros_data): if self.filename is not ros_data.data: self.filename = ros_data.data self.loadObjects() self.initialized =", "import roslib import rospy import pdb import tf import itertools from Tkinter import", "get_param(\"hsv_sat_weight\",1) self.valW = get_param(\"hsv_val_weight\",1) self.sizeW = get_param(\"size_weight\",50000) fileref = get_param(\"feature_file_location\") print fileref if", "rospy.has_param(private): return rospy.get_param(private) elif rospy.has_param(name): return rospy.get_param(name) else: return value class filter: def", "drawClusters(self,clusters,ids): if clusters is None or ids is None: time.sleep(1.0) return self.canvas.delete(\"all\") for", "offset rot.append((-r.real + 0.5) * 500) rot.append((-r.imag + 0.5) * 500) rgb =", "= True print \"Reading object features from \" + self.filename def cbPause(self, ros_data):", "print fileref if fileref is not None: self.filename = os.path.expanduser(fileref) else: self.filename =", "of labels match = [] ordered = [] for i in range(len(minMatch)): #if", "= -1 objList = range(max(len(expected), len(clusters))) assn = itertools.permutations(objList, len(clusters)) for a in", "\"Reading object features from \" + self.filename self.loadObjects() self.initialized = True #Read cluster", "self.errors, self.ids = self.run_filter(self.initX, self.labels, clusters) self.tracked, self.ids, self.error = self.getMatchingLabels(self.initX, self.labels, clusters)", "= None self.labels = None self.initialized = False self.run = True self.br =", "if c is None: continue pts = [(c.points_min.x,c.points_min.y),(c.points_min.x,c.points_max.y),(c.points_max.x,c.points_max.y),(c.points_max.x,c.points_min.y)] offset = complex(c.points_centroid.x,c.points_centroid.y) cangle =", "def cbFile(self, ros_data): if self.filename is not ros_data.data: self.filename = ros_data.data self.loadObjects() self.initialized", "(-c.points_centroid.y + 0.5)*500,text=str(ids[idx].data),font=\"Verdana 10 bold\") self.canvas.pack() def main(args): global pf, display pf =", "for a in assn: e = 0 i = 0 newAssn = []", "not None: self.rostopic = os.path.expanduser(topicref) self.fileSub = rospy.Subscriber(self.rostopic, String, self.cbFile, queue_size = 1)", "queue_size = 1) self.subscriber = rospy.Subscriber(\"/beliefs/features\", PcFeatureArray, self.cbClusters, queue_size = 1) self.pauseSub =", "self.cbPause, queue_size = 1) self.orderPub = rospy.Publisher(\"/beliefs/labels\", LabeledObjects, queue_size = 1) def cbFile(self,", "received (if receiving from rostopic) if self.filename is None: return if self.run is", "value=None): private = \"~%s\" % name if rospy.has_param(private): return rospy.get_param(private) elif rospy.has_param(name): return", "if self.filename is None: return if self.run is False: self.pubMessages() return #Initialize object", "None self.initialized = False self.run = True self.br = tf.TransformBroadcaster() self.hueW = get_param(\"hsv_hue_weight\",", "self.run = False if ros_data.data == \"play\": self.run = True def cbClusters(self, ros_data):", "l.data, 'kinect_ir_optical_frame') idx += 1 def loadObjects(self): self.initX = [] self.labels = []", "or e < minError: minMatch = newAssn minError = e #print \"best: \"", "= self.sizeW * self.sizeDiff(init,cluster) hueDiff, satDiff, valDiff = self.hsvDiff(init,cluster) hue = self.hueW *", "display = None initX = None def get_param(name, value=None): private = \"~%s\" %", "best assignment back to a list of labels match = [] ordered =", "= \"~%s\" % name if rospy.has_param(private): return rospy.get_param(private) elif rospy.has_param(name): return rospy.get_param(name) else:", "private = \"~%s\" % name if rospy.has_param(private): return rospy.get_param(private) elif rospy.has_param(name): return rospy.get_param(name)", "get_param(\"feature_file_rostopic\") if topicref is not None: self.rostopic = os.path.expanduser(topicref) self.fileSub = rospy.Subscriber(self.rostopic, String,", "in a: if idx < len(expected): e += errorMatrix[idx][i] newAssn.append(idx) else: newAssn.append(-1) i", "self.initX = [] self.labels = [] objFile = open(self.filename, 'r') for line in", "= (t.translation.x, t.translation.y, t.translation.z) r = (t.rotation.x, t.rotation.y, t.rotation.z, t.rotation.w) self.br.sendTransform(tl, r, self.outMsg.header.stamp,", "self.labels.append(line.split(',')[0]) print str(len(self.initX)) + ' objects loaded' def hsvDiff(self, c1,c2): hsv1 = c1[1:4]", "from Tkinter import * from hlpr_feature_extraction.msg import PcFeatureArray from hlpr_object_labeling.msg import LabeledObjects from", "clusters: e = self.calculateError(l, c) labelErrors.append(e) errorMatrix.append(labelErrors) ## Find the label assignment that", "= os.path.expanduser(fileref) else: self.filename = None topicref = get_param(\"feature_file_rostopic\") if topicref is not", "1) self.orderPub = rospy.Publisher(\"/beliefs/labels\", LabeledObjects, queue_size = 1) def cbFile(self, ros_data): if self.filename", "range(max(len(expected), len(clusters))) assn = itertools.permutations(objList, len(clusters)) for a in assn: e = 0", "= None self.tracked = None self.errors = None self.ids = None self.labels =", "= 0 def startDrawing(self,labeling): self.drawClusters(labeling.tracked,labeling.ids) self.master.after(10, self.startDrawing, labeling) def drawClusters(self,clusters,ids): if clusters is", "__init__(self): self.master = Tk() self.canvas = Canvas(self.master,width=800,height=500) self.canvas.pack() self.timeout = 5 self.waitCount =", "True print \"Reading object features from \" + self.filename def cbPause(self, ros_data): if", "c1,c2): hsv1 = c1[1:4] r2 = c2.rgba_color.r g2 = c2.rgba_color.g b2 = c2.rgba_color.b", "[] objFile = open(self.filename, 'r') for line in objFile.readlines(): self.initX.append(line[:-1].split(',')) self.labels.append(line.split(',')[0]) print str(len(self.initX))", "= self.hueW * hueDiff sat = self.satW * satDiff val = self.valW *", "\"Reading object features from \" + self.filename def cbPause(self, ros_data): if ros_data.data ==", "def main(args): global pf, display pf = filter() display = ui() display.master.after(10,display.startDrawing,pf) display.master.mainloop()", "is None: return #Publish labels msgTime = rospy.Time.now() head = Header() head.stamp =", "True def cbClusters(self, ros_data): #Wait for filename to be received (if receiving from", "self.br.sendTransform(tl, r, self.outMsg.header.stamp, l.data, 'kinect_ir_optical_frame') idx += 1 def loadObjects(self): self.initX = []", "loaded' def hsvDiff(self, c1,c2): hsv1 = c1[1:4] r2 = c2.rgba_color.r g2 = c2.rgba_color.g", "= float(hsv1[0]) huediff = math.degrees(math.atan2(math.sin(math.radians(h1-h2)), math.cos(math.radians(h1-h2)))) #return abs(hsv2[0][0][0]-float(hsv1[0])), abs(hsv2[0][0][1]-float(hsv1[1])), abs(hsv2[0][0][2]-float(hsv1[2])) return abs(huediff), abs(hsv2[0][0][1]-float(hsv1[1])),", "1) self.pauseSub = rospy.Subscriber(\"/pause_labeling\", String, self.cbPause, queue_size = 1) self.orderPub = rospy.Publisher(\"/beliefs/labels\", LabeledObjects,", "if minMatch is None or e < minError: minMatch = newAssn minError =", "def cbPause(self, ros_data): if ros_data.data == \"pause\": self.run = False if ros_data.data ==", "rospy.has_param(name): return rospy.get_param(name) else: return value class filter: def __init__(self): self.labeled = None", "clusters): ## Evaluate all possible cluster-label pairs errorMatrix = [] for l in", "<reponame>kirmani/hlpr_cadence #!/usr/bin/env python import os import sys, time, math, cmath from std_msgs.msg import", "ros_data.data self.loadObjects() self.initialized = True print \"Reading object features from \" + self.filename", "= self.getMatchingLabels(self.initX, self.labels, clusters) #Publish labels if len(clusters) is 0 or self.tracked is", "0 for l in self.ids: t = self.transforms[idx] tl = (t.translation.x, t.translation.y, t.translation.z)", "abs(hsv2[0][0][1]-float(hsv1[1])), abs(hsv2[0][0][2]-float(hsv1[2])) return abs(huediff), abs(hsv2[0][0][1]-float(hsv1[1])), abs(hsv2[0][0][2]-float(hsv1[2])) def sizeDiff(self, c1,c2): size = c2.bb_dims.x *", "c is None: continue pts = [(c.points_min.x,c.points_min.y),(c.points_min.x,c.points_max.y),(c.points_max.x,c.points_max.y),(c.points_max.x,c.points_min.y)] offset = complex(c.points_centroid.x,c.points_centroid.y) cangle = 0", "ros_data): if ros_data.data == \"pause\": self.run = False if ros_data.data == \"play\": self.run", "t = self.transforms[idx] tl = (t.translation.x, t.translation.y, t.translation.z) r = (t.rotation.x, t.rotation.y, t.rotation.z,", "+ sat + val + size) return total def getMatchingLabels(self, expected, labels, clusters):", "self.pauseSub = rospy.Subscriber(\"/pause_labeling\", String, self.cbPause, queue_size = 1) self.orderPub = rospy.Publisher(\"/beliefs/labels\", LabeledObjects, queue_size", "= None self.errors = None self.ids = None self.labels = None self.initialized =", "labeling) def drawClusters(self,clusters,ids): if clusters is None or ids is None: time.sleep(1.0) return", "object features from \" + self.filename self.loadObjects() self.initialized = True #Read cluster message", "len(clusters)) for a in assn: e = 0 i = 0 newAssn =", "None: time.sleep(1.0) return self.canvas.delete(\"all\") for idx in range(0,len(clusters)): c = clusters[idx] if c", "ros_data.transforms #Classify clusters #self.labeled, self.tracked, self.errors, self.ids = self.run_filter(self.initX, self.labels, clusters) self.tracked, self.ids,", "= self.canvas.create_text((-c.points_centroid.x+0.5)*500, (-c.points_centroid.y + 0.5)*500,text=str(ids[idx].data),font=\"Verdana 10 bold\") self.canvas.pack() def main(args): global pf, display", "+ \" with error \" + str(minError) ## Convert the best assignment back", "ordered, minError class ui: def __init__(self): self.master = Tk() self.canvas = Canvas(self.master,width=800,height=500) self.canvas.pack()", "std_msgs.msg import String, Header import numpy as np import cv2 import roslib import", "receiving from rostopic) if self.filename is None: return if self.run is False: self.pubMessages()", "< len(expected): e += errorMatrix[idx][i] newAssn.append(idx) else: newAssn.append(-1) i += 1 if minMatch", "= 1) self.orderPub = rospy.Publisher(\"/beliefs/labels\", LabeledObjects, queue_size = 1) def cbFile(self, ros_data): if", "len(expected): e += errorMatrix[idx][i] newAssn.append(idx) else: newAssn.append(-1) i += 1 if minMatch is", "0 i = 0 newAssn = [] for idx in a: if idx", "## Convert the best assignment back to a list of labels match =", "is None or self.transforms is None or self.ids is None: return #Publish labels", "print \"Reading object features from \" + self.filename self.loadObjects() self.initialized = True #Read", "2) self.satW = get_param(\"hsv_sat_weight\",1) self.valW = get_param(\"hsv_val_weight\",1) self.sizeW = get_param(\"size_weight\",50000) fileref = get_param(\"feature_file_location\")", "* from hlpr_feature_extraction.msg import PcFeatureArray from hlpr_object_labeling.msg import LabeledObjects from std_msgs.msg import String", "import itertools from Tkinter import * from hlpr_feature_extraction.msg import PcFeatureArray from hlpr_object_labeling.msg import", "sMsg.data = labels[minMatch[i]] ordered.append(sMsg) return match, ordered, minError class ui: def __init__(self): self.master", "self.ids, self.error = self.getMatchingLabels(self.initX, self.labels, clusters) #Publish labels if len(clusters) is 0 or", "a list of labels match = [] ordered = [] for i in", "import os import sys, time, math, cmath from std_msgs.msg import String, Header import", "loadObjects(self): self.initX = [] self.labels = [] objFile = open(self.filename, 'r') for line", "get_param(\"hsv_hue_weight\", 2) self.satW = get_param(\"hsv_sat_weight\",1) self.valW = get_param(\"hsv_val_weight\",1) self.sizeW = get_param(\"size_weight\",50000) fileref =", "cmath.exp(c.angle*1j) rot = [] for x,y in pts: r = cangle * (complex(x,y)-offset)", "is False: print \"Reading object features from \" + self.filename self.loadObjects() self.initialized =", "0.5) * 500) rot.append((-r.imag + 0.5) * 500) rgb = '#%02x%02x%02x' % (c.rgba_color.r,c.rgba_color.g,c.rgba_color.b)", "1) self.subscriber = rospy.Subscriber(\"/beliefs/features\", PcFeatureArray, self.cbClusters, queue_size = 1) self.pauseSub = rospy.Subscriber(\"/pause_labeling\", String,", "None: return #Publish labels msgTime = rospy.Time.now() head = Header() head.stamp = msgTime", "def __init__(self): self.labeled = None self.tracked = None self.errors = None self.ids =", "t.rotation.y, t.rotation.z, t.rotation.w) self.br.sendTransform(tl, r, self.outMsg.header.stamp, l.data, 'kinect_ir_optical_frame') idx += 1 def loadObjects(self):", "self.drawClusters(labeling.tracked,labeling.ids) self.master.after(10, self.startDrawing, labeling) def drawClusters(self,clusters,ids): if clusters is None or ids is", "pts: r = cangle * (complex(x,y)-offset) + offset rot.append((-r.real + 0.5) * 500)", "is not ros_data.data: self.filename = ros_data.data self.loadObjects() self.initialized = True print \"Reading object", "Canvas(self.master,width=800,height=500) self.canvas.pack() self.timeout = 5 self.waitCount = 0 def startDrawing(self,labeling): self.drawClusters(labeling.tracked,labeling.ids) self.master.after(10, self.startDrawing,", "self.transforms is None or self.ids is None: return #Publish labels msgTime = rospy.Time.now()", "all possible cluster-label pairs errorMatrix = [] for l in expected: labelErrors =", "= hsv2[0][0][0] h2 = float(hsv1[0]) huediff = math.degrees(math.atan2(math.sin(math.radians(h1-h2)), math.cos(math.radians(h1-h2)))) #return abs(hsv2[0][0][0]-float(hsv1[0])), abs(hsv2[0][0][1]-float(hsv1[1])), abs(hsv2[0][0][2]-float(hsv1[2]))", "self.initialized is False: print \"Reading object features from \" + self.filename self.loadObjects() self.initialized", "= [] for idx in a: if idx < len(expected): e += errorMatrix[idx][i]", "= [] for i in range(len(minMatch)): #if minMatch[i] is -1: # match.append(None) #", "line in objFile.readlines(): self.initX.append(line[:-1].split(',')) self.labels.append(line.split(',')[0]) print str(len(self.initX)) + ' objects loaded' def hsvDiff(self,", "self.run_filter(self.initX, self.labels, clusters) self.tracked, self.ids, self.error = self.getMatchingLabels(self.initX, self.labels, clusters) #Publish labels if", "None display = None initX = None def get_param(name, value=None): private = \"~%s\"", "= clusters[idx] if c is None: continue pts = [(c.points_min.x,c.points_min.y),(c.points_min.x,c.points_max.y),(c.points_max.x,c.points_max.y),(c.points_max.x,c.points_min.y)] offset = complex(c.points_centroid.x,c.points_centroid.y)", "String() sMsg.data = labels[minMatch[i]] ordered.append(sMsg) return match, ordered, minError class ui: def __init__(self):", "sMsg = String() sMsg.data = labels[minMatch[i]] ordered.append(sMsg) return match, ordered, minError class ui:", "def hsvDiff(self, c1,c2): hsv1 = c1[1:4] r2 = c2.rgba_color.r g2 = c2.rgba_color.g b2", "os.path.expanduser(fileref) else: self.filename = None topicref = get_param(\"feature_file_rostopic\") if topicref is not None:", "-1: # match.append(None) # ordered.append(None) #else: if minMatch[i] is not -1: #print str(errorMatrix[minMatch[i]][i])", "None topicref = get_param(\"feature_file_rostopic\") if topicref is not None: self.rostopic = os.path.expanduser(topicref) self.fileSub", "= labels[minMatch[i]] ordered.append(sMsg) return match, ordered, minError class ui: def __init__(self): self.master =", "python import os import sys, time, math, cmath from std_msgs.msg import String, Header", "rospy.Subscriber(\"/pause_labeling\", String, self.cbPause, queue_size = 1) self.orderPub = rospy.Publisher(\"/beliefs/labels\", LabeledObjects, queue_size = 1)", "self.orderPub = rospy.Publisher(\"/beliefs/labels\", LabeledObjects, queue_size = 1) def cbFile(self, ros_data): if self.filename is", "rospy.Time.now() head = Header() head.stamp = msgTime self.outMsg.header = head self.orderPub.publish(self.outMsg) #Publish transforms", "as np import cv2 import roslib import rospy import pdb import tf import", "cv2 import roslib import rospy import pdb import tf import itertools from Tkinter", "(t.rotation.x, t.rotation.y, t.rotation.z, t.rotation.w) self.br.sendTransform(tl, r, self.outMsg.header.stamp, l.data, 'kinect_ir_optical_frame') idx += 1 def", "minMatch = None minError = -1 objList = range(max(len(expected), len(clusters))) assn = itertools.permutations(objList,", "self.run is False: self.pubMessages() return #Initialize object feature values if self.initialized is False:", "LabeledObjects, queue_size = 1) def cbFile(self, ros_data): if self.filename is not ros_data.data: self.filename", "= [] for c in clusters: e = self.calculateError(l, c) labelErrors.append(e) errorMatrix.append(labelErrors) ##", "to be received (if receiving from rostopic) if self.filename is None: return if", "head.stamp = msgTime self.outMsg.header = head self.orderPub.publish(self.outMsg) #Publish transforms idx = 0 for", "self.outMsg.header = head self.outMsg.objects = self.tracked self.outMsg.labels = self.ids self.pubMessages() def pubMessages(self): if", "\"play\": self.run = True def cbClusters(self, ros_data): #Wait for filename to be received", "self.valW = get_param(\"hsv_val_weight\",1) self.sizeW = get_param(\"size_weight\",50000) fileref = get_param(\"feature_file_location\") print fileref if fileref", "rot.append((-r.imag + 0.5) * 500) rgb = '#%02x%02x%02x' % (c.rgba_color.r,c.rgba_color.g,c.rgba_color.b) poly = self.canvas.create_polygon(rot,outline=rgb,fill='white',width=5)", "= get_param(\"size_weight\",50000) fileref = get_param(\"feature_file_location\") print fileref if fileref is not None: self.filename", "[] ordered = [] for i in range(len(minMatch)): #if minMatch[i] is -1: #", "total error minMatch = None minError = -1 objList = range(max(len(expected), len(clusters))) assn", "labels msgTime = rospy.Time.now() head = Header() head.stamp = msgTime self.outMsg.header = head", "or self.tracked is None: return self.outMsg = LabeledObjects() msgTime = rospy.Time.now() head =", "+ str(minMatch) + \" with error \" + str(minError) ## Convert the best", "= ros_data clusters = ros_data.objects self.transforms = ros_data.transforms #Classify clusters #self.labeled, self.tracked, self.errors,", "bold\") self.canvas.pack() def main(args): global pf, display pf = filter() display = ui()", "time, math, cmath from std_msgs.msg import String, Header import numpy as np import", "None minError = -1 objList = range(max(len(expected), len(clusters))) assn = itertools.permutations(objList, len(clusters)) for", "= c1[1:4] r2 = c2.rgba_color.r g2 = c2.rgba_color.g b2 = c2.rgba_color.b hsv2 =", "get_param(\"feature_file_location\") print fileref if fileref is not None: self.filename = os.path.expanduser(fileref) else: self.filename", "self.filename = os.path.expanduser(fileref) else: self.filename = None topicref = get_param(\"feature_file_rostopic\") if topicref is", "labels match = [] ordered = [] for i in range(len(minMatch)): #if minMatch[i]", "np import cv2 import roslib import rospy import pdb import tf import itertools", "display.master.after(10,display.startDrawing,pf) display.master.mainloop() if __name__ == '__main__': rospy.init_node(\"object_labeling\", anonymous=False) rospy.loginfo(\"Initializing the object labeling node\")", "ordered.append(sMsg) return match, ordered, minError class ui: def __init__(self): self.master = Tk() self.canvas", "[] for l in expected: labelErrors = [] for c in clusters: e", "ordered.append(None) #else: if minMatch[i] is not -1: #print str(errorMatrix[minMatch[i]][i]) + \" + \"", "None self.ids = None self.labels = None self.initialized = False self.run = True", "False if ros_data.data == \"play\": self.run = True def cbClusters(self, ros_data): #Wait for", "labelErrors = [] for c in clusters: e = self.calculateError(l, c) labelErrors.append(e) errorMatrix.append(labelErrors)", "match = [] ordered = [] for i in range(len(minMatch)): #if minMatch[i] is", "0.5) * 500) rgb = '#%02x%02x%02x' % (c.rgba_color.r,c.rgba_color.g,c.rgba_color.b) poly = self.canvas.create_polygon(rot,outline=rgb,fill='white',width=5) label =", "self.tracked = None self.errors = None self.ids = None self.labels = None self.initialized", "c1[1:4] r2 = c2.rgba_color.r g2 = c2.rgba_color.g b2 = c2.rgba_color.b hsv2 = cv2.cvtColor(np.array([[(r2,g2,b2)]],dtype='float32'),", "hsv2 = cv2.cvtColor(np.array([[(r2,g2,b2)]],dtype='float32'), cv2.COLOR_RGB2HSV) h1 = hsv2[0][0][0] h2 = float(hsv1[0]) huediff = math.degrees(math.atan2(math.sin(math.radians(h1-h2)),", "= self.hsvDiff(init,cluster) hue = self.hueW * hueDiff sat = self.satW * satDiff val", "tl = (t.translation.x, t.translation.y, t.translation.z) r = (t.rotation.x, t.rotation.y, t.rotation.z, t.rotation.w) self.br.sendTransform(tl, r,", "size) return total def getMatchingLabels(self, expected, labels, clusters): ## Evaluate all possible cluster-label", "return self.outMsg = LabeledObjects() msgTime = rospy.Time.now() head = Header() head.stamp = msgTime", "that minimizes total error minMatch = None minError = -1 objList = range(max(len(expected),", "[] for x,y in pts: r = cangle * (complex(x,y)-offset) + offset rot.append((-r.real", "hlpr_feature_extraction.msg import PcFeatureArray from hlpr_object_labeling.msg import LabeledObjects from std_msgs.msg import String pf =", "len(clusters) is 0 or self.tracked is None: return self.outMsg = LabeledObjects() msgTime =", "5 self.waitCount = 0 def startDrawing(self,labeling): self.drawClusters(labeling.tracked,labeling.ids) self.master.after(10, self.startDrawing, labeling) def drawClusters(self,clusters,ids): if", "0 or self.tracked is None: return self.outMsg = LabeledObjects() msgTime = rospy.Time.now() head", "abs(hsv2[0][0][2]-float(hsv1[2])) return abs(huediff), abs(hsv2[0][0][1]-float(hsv1[1])), abs(hsv2[0][0][2]-float(hsv1[2])) def sizeDiff(self, c1,c2): size = c2.bb_dims.x * c2.bb_dims.y", "clusterArr = ros_data clusters = ros_data.objects self.transforms = ros_data.transforms #Classify clusters #self.labeled, self.tracked,", "None self.labels = None self.initialized = False self.run = True self.br = tf.TransformBroadcaster()", "= float(hue + sat + val + size) return total def getMatchingLabels(self, expected,", "queue_size = 1) def cbFile(self, ros_data): if self.filename is not ros_data.data: self.filename =", "labelErrors.append(e) errorMatrix.append(labelErrors) ## Find the label assignment that minimizes total error minMatch =", "self.labeled = None self.tracked = None self.errors = None self.ids = None self.labels", "range(len(minMatch)): #if minMatch[i] is -1: # match.append(None) # ordered.append(None) #else: if minMatch[i] is", "[] for i in range(len(minMatch)): #if minMatch[i] is -1: # match.append(None) # ordered.append(None)", "self.labels, clusters) self.tracked, self.ids, self.error = self.getMatchingLabels(self.initX, self.labels, clusters) #Publish labels if len(clusters)", "a in assn: e = 0 i = 0 newAssn = [] for", "e = self.calculateError(l, c) labelErrors.append(e) errorMatrix.append(labelErrors) ## Find the label assignment that minimizes", "= range(max(len(expected), len(clusters))) assn = itertools.permutations(objList, len(clusters)) for a in assn: e =", "in range(len(minMatch)): #if minMatch[i] is -1: # match.append(None) # ordered.append(None) #else: if minMatch[i]", "t.rotation.w) self.br.sendTransform(tl, r, self.outMsg.header.stamp, l.data, 'kinect_ir_optical_frame') idx += 1 def loadObjects(self): self.initX =", "std_msgs.msg import String pf = None display = None initX = None def", "self.loadObjects() self.initialized = True print \"Reading object features from \" + self.filename def", "initX = None def get_param(name, value=None): private = \"~%s\" % name if rospy.has_param(private):", "Header import numpy as np import cv2 import roslib import rospy import pdb", "ros_data.objects self.transforms = ros_data.transforms #Classify clusters #self.labeled, self.tracked, self.errors, self.ids = self.run_filter(self.initX, self.labels,", "objects loaded' def hsvDiff(self, c1,c2): hsv1 = c1[1:4] r2 = c2.rgba_color.r g2 =", "cbClusters(self, ros_data): #Wait for filename to be received (if receiving from rostopic) if", "cangle = 0 # cmath.exp(c.angle*1j) rot = [] for x,y in pts: r", "= [] for x,y in pts: r = cangle * (complex(x,y)-offset) + offset", "is False: self.pubMessages() return #Initialize object feature values if self.initialized is False: print", "clusters) self.tracked, self.ids, self.error = self.getMatchingLabels(self.initX, self.labels, clusters) #Publish labels if len(clusters) is", "cluster): size = self.sizeW * self.sizeDiff(init,cluster) hueDiff, satDiff, valDiff = self.hsvDiff(init,cluster) hue =", "ros_data): if self.filename is not ros_data.data: self.filename = ros_data.data self.loadObjects() self.initialized = True", "from std_msgs.msg import String, Header import numpy as np import cv2 import roslib" ]
[ "in train mode to finetune) # tokens = roberta.encode('Hello world!') # print(tokens) #", "to finetune) # tokens = roberta.encode('Hello world!') # print(tokens) # assert tokens.tolist() ==", "roberta.encode('Hello world!') # print(tokens) # assert tokens.tolist() == [0, 31414, 232, 328, 2]", "'roberta.base') # roberta.eval() # disable dropout (or leave in train mode to finetune)", "# roberta = torch.hub.load('pytorch/fairseq', 'roberta.base') # roberta.eval() # disable dropout (or leave in", "print(tokens) # assert tokens.tolist() == [0, 31414, 232, 328, 2] # roberta.decode(tokens) #", "fairseq import torch from fairseq.models.roberta import RobertaModel roberta = RobertaModel.from_pretrained(model_name_or_path='./roberta.base', checkpoint_file='model.pt') roberta.eval() #", "roberta.eval() # disable dropout (or leave in train mode to finetune) tokens =", "the model in fairseq import torch from fairseq.models.roberta import RobertaModel roberta = RobertaModel.from_pretrained(model_name_or_path='./roberta.base',", "train mode to finetune) # tokens = roberta.encode('Hello world!') # print(tokens) # assert", "RobertaModel.from_pretrained(model_name_or_path='./roberta.base', checkpoint_file='model.pt') roberta.eval() # disable dropout (or leave in train mode to finetune)", "# disable dropout (or leave in train mode to finetune) tokens = roberta.encode('Hello", "world!') print(tokens) # import torch # roberta = torch.hub.load('pytorch/fairseq', 'roberta.base') # roberta.eval() #", "# assert tokens.tolist() == [0, 31414, 232, 328, 2] # roberta.decode(tokens) # 'Hello", "disable dropout (or leave in train mode to finetune) tokens = roberta.encode('Hello world!')", "# roberta.eval() # disable dropout (or leave in train mode to finetune) #", "= torch.hub.load('pytorch/fairseq', 'roberta.base') # roberta.eval() # disable dropout (or leave in train mode", "<filename>main.py # Load the model in fairseq import torch from fairseq.models.roberta import RobertaModel", "(or leave in train mode to finetune) # tokens = roberta.encode('Hello world!') #", "(or leave in train mode to finetune) tokens = roberta.encode('Hello world!') print(tokens) #", "# disable dropout (or leave in train mode to finetune) # tokens =", "# Load the model in fairseq import torch from fairseq.models.roberta import RobertaModel roberta", "fairseq.models.roberta import RobertaModel roberta = RobertaModel.from_pretrained(model_name_or_path='./roberta.base', checkpoint_file='model.pt') roberta.eval() # disable dropout (or leave", "model in fairseq import torch from fairseq.models.roberta import RobertaModel roberta = RobertaModel.from_pretrained(model_name_or_path='./roberta.base', checkpoint_file='model.pt')", "from fairseq.models.roberta import RobertaModel roberta = RobertaModel.from_pretrained(model_name_or_path='./roberta.base', checkpoint_file='model.pt') roberta.eval() # disable dropout (or", "import RobertaModel roberta = RobertaModel.from_pretrained(model_name_or_path='./roberta.base', checkpoint_file='model.pt') roberta.eval() # disable dropout (or leave in", "= RobertaModel.from_pretrained(model_name_or_path='./roberta.base', checkpoint_file='model.pt') roberta.eval() # disable dropout (or leave in train mode to", "mode to finetune) # tokens = roberta.encode('Hello world!') # print(tokens) # assert tokens.tolist()", "torch.hub.load('pytorch/fairseq', 'roberta.base') # roberta.eval() # disable dropout (or leave in train mode to", "dropout (or leave in train mode to finetune) tokens = roberta.encode('Hello world!') print(tokens)", "checkpoint_file='model.pt') roberta.eval() # disable dropout (or leave in train mode to finetune) tokens", "# import torch # roberta = torch.hub.load('pytorch/fairseq', 'roberta.base') # roberta.eval() # disable dropout", "roberta = RobertaModel.from_pretrained(model_name_or_path='./roberta.base', checkpoint_file='model.pt') roberta.eval() # disable dropout (or leave in train mode", "to finetune) tokens = roberta.encode('Hello world!') print(tokens) # import torch # roberta =", "= roberta.encode('Hello world!') print(tokens) # import torch # roberta = torch.hub.load('pytorch/fairseq', 'roberta.base') #", "disable dropout (or leave in train mode to finetune) # tokens = roberta.encode('Hello", "# tokens = roberta.encode('Hello world!') # print(tokens) # assert tokens.tolist() == [0, 31414,", "torch # roberta = torch.hub.load('pytorch/fairseq', 'roberta.base') # roberta.eval() # disable dropout (or leave", "dropout (or leave in train mode to finetune) # tokens = roberta.encode('Hello world!')", "assert tokens.tolist() == [0, 31414, 232, 328, 2] # roberta.decode(tokens) # 'Hello world!'", "import torch # roberta = torch.hub.load('pytorch/fairseq', 'roberta.base') # roberta.eval() # disable dropout (or", "world!') # print(tokens) # assert tokens.tolist() == [0, 31414, 232, 328, 2] #", "mode to finetune) tokens = roberta.encode('Hello world!') print(tokens) # import torch # roberta", "train mode to finetune) tokens = roberta.encode('Hello world!') print(tokens) # import torch #", "roberta = torch.hub.load('pytorch/fairseq', 'roberta.base') # roberta.eval() # disable dropout (or leave in train", "leave in train mode to finetune) # tokens = roberta.encode('Hello world!') # print(tokens)", "roberta.encode('Hello world!') print(tokens) # import torch # roberta = torch.hub.load('pytorch/fairseq', 'roberta.base') # roberta.eval()", "print(tokens) # import torch # roberta = torch.hub.load('pytorch/fairseq', 'roberta.base') # roberta.eval() # disable", "Load the model in fairseq import torch from fairseq.models.roberta import RobertaModel roberta =", "finetune) # tokens = roberta.encode('Hello world!') # print(tokens) # assert tokens.tolist() == [0,", "import torch from fairseq.models.roberta import RobertaModel roberta = RobertaModel.from_pretrained(model_name_or_path='./roberta.base', checkpoint_file='model.pt') roberta.eval() # disable", "RobertaModel roberta = RobertaModel.from_pretrained(model_name_or_path='./roberta.base', checkpoint_file='model.pt') roberta.eval() # disable dropout (or leave in train", "finetune) tokens = roberta.encode('Hello world!') print(tokens) # import torch # roberta = torch.hub.load('pytorch/fairseq',", "tokens = roberta.encode('Hello world!') print(tokens) # import torch # roberta = torch.hub.load('pytorch/fairseq', 'roberta.base')", "roberta.eval() # disable dropout (or leave in train mode to finetune) # tokens", "in fairseq import torch from fairseq.models.roberta import RobertaModel roberta = RobertaModel.from_pretrained(model_name_or_path='./roberta.base', checkpoint_file='model.pt') roberta.eval()", "in train mode to finetune) tokens = roberta.encode('Hello world!') print(tokens) # import torch", "leave in train mode to finetune) tokens = roberta.encode('Hello world!') print(tokens) # import", "torch from fairseq.models.roberta import RobertaModel roberta = RobertaModel.from_pretrained(model_name_or_path='./roberta.base', checkpoint_file='model.pt') roberta.eval() # disable dropout", "tokens = roberta.encode('Hello world!') # print(tokens) # assert tokens.tolist() == [0, 31414, 232,", "= roberta.encode('Hello world!') # print(tokens) # assert tokens.tolist() == [0, 31414, 232, 328,", "# print(tokens) # assert tokens.tolist() == [0, 31414, 232, 328, 2] # roberta.decode(tokens)" ]
[ "load_arts_all(): \"\"\"Helper function to load ArticlesAll object for testing.\"\"\" arts = load_arts(add_data=True, n_data=2)", "for testing.\"\"\" arts = Articles(Term('label', ['search'], ['inclusion'], ['exclusion'])) if add_data: for ind in", "os.path import join as pjoin from lisc.objects.base import Base from lisc.data import Articles,", "testing of plotting functions. Notes ----- This decorator closes all plots prior to", "Parameters ---------- dependency : str The name of an optional dependency to test", "dependency : str The name of an optional dependency to test import of.", "import safe_import from lisc.utils.db import SCDB, create_file_structure, check_directory plt = safe_import('.pyplot', 'matplotlib') ###################################################################################################", "functions for testing lisc.\"\"\" import pkg_resources as pkg from functools import wraps from", "for tests.\"\"\" def __init__(self): # Initialize from normal database object base = pkg.resource_filename(__name__,", "[('A', 'B', 'C', 'D')]) arts.add_data('words', 'Lots of words data.') arts.add_data('keywords', ['lots', 'of', 'keywords'])", "load Base object for testing.\"\"\" base = Base() if set_terms: base.add_terms([['test1', 'test sin'],", "testing lisc.\"\"\" import pkg_resources as pkg from functools import wraps from os.path import", "['science', 'sc']) arts.add_data('authors', [('A', 'B', 'C', 'D')]) arts.add_data('words', 'Lots of words data.') arts.add_data('keywords',", "testing.\"\"\" arts = load_arts(add_data=True, n_data=2) arts_all = ArticlesAll(arts) return arts_all def plot_test(func): \"\"\"Decorator", "dependency to test import of. \"\"\" def decorator(func): @wraps(func) def wrapper(*args, **kwargs): if", "base def load_arts(add_data=False, n_data=1): \"\"\"Helper function to load Articles object for testing.\"\"\" arts", "Notes ----- This decorator closes all plots prior to the test. After running", "SCDB object as database object for tests.\"\"\" def __init__(self): # Initialize from normal", "= load_arts(add_data=True, n_data=2) arts_all = ArticlesAll(arts) return arts_all def plot_test(func): \"\"\"Decorator for simple", "TestDB(SCDB): \"\"\"Overloads the SCDB object as database object for tests.\"\"\" def __init__(self): #", "plt.gca() assert ax.has_data() return wrapper def optional_test(dependency): \"\"\"Decorator to only run a test", "check_directory plt = safe_import('.pyplot', 'matplotlib') ################################################################################################### ################################################################################################### class TestDB(SCDB): \"\"\"Overloads the SCDB object", "'matplotlib') ################################################################################################### ################################################################################################### class TestDB(SCDB): \"\"\"Overloads the SCDB object as database object for", "'test sin'], ['test2', 'uh oh']]) if set_clusions: base.add_terms([['yeh', 'definitely'], ['need', 'required']], 'inclusions') base.add_terms([['exc1',", "def optional_test(dependency): \"\"\"Decorator to only run a test if the specified optional dependency", "functools import wraps from os.path import join as pjoin from lisc.objects.base import Base", "for testing.\"\"\" base = Base() if set_terms: base.add_terms([['test1', 'test sin'], ['test2', 'uh oh']])", "of words data.') arts.add_data('keywords', ['lots', 'of', 'keywords']) arts.add_data('years', 2112) arts.add_data('dois', 'doi_str') return arts", "\"\"\"Decorator for simple testing of plotting functions. Notes ----- This decorator closes all", "as pkg from functools import wraps from os.path import join as pjoin from", "of. \"\"\" def decorator(func): @wraps(func) def wrapper(*args, **kwargs): if safe_import(dependency): return func(*args, **kwargs)", "'definitely'], ['need', 'required']], 'inclusions') base.add_terms([['exc1', 'blehh'], ['exc2', 'meh']], 'exclusions') return base def load_arts(add_data=False,", "def create_files(directory): \"\"\"Creates some test term files.\"\"\" term_file = open(pjoin(check_directory(directory, 'terms'), 'test_terms.txt'), 'w')", "some test term files.\"\"\" term_file = open(pjoin(check_directory(directory, 'terms'), 'test_terms.txt'), 'w') term_file.write('word\\nthing, same') term_file.close()", "arts.add_data('ids', 1) arts.add_data('titles', 'title') arts.add_data('journals', ['science', 'sc']) arts.add_data('authors', [('A', 'B', 'C', 'D')]) arts.add_data('words',", "object for testing.\"\"\" base = Base() if set_terms: base.add_terms([['test1', 'test sin'], ['test2', 'uh", "decorator closes all plots prior to the test. After running the test function,", "open(pjoin(check_directory(directory, 'terms'), 'test_terms.txt'), 'w') term_file.write('word\\nthing, same') term_file.close() excl_file = open(pjoin(check_directory(directory, 'terms'), 'test_inclusions.txt'), 'w')", "if add_data: for ind in range(n_data): arts.add_data('ids', 1) arts.add_data('titles', 'title') arts.add_data('journals', ['science', 'sc'])", "__init__(self): # Initialize from normal database object base = pkg.resource_filename(__name__, 'test_db') SCDB.__init__(self, base=base)", "for ind in range(n_data): arts.add_data('ids', 1) arts.add_data('titles', 'title') arts.add_data('journals', ['science', 'sc']) arts.add_data('authors', [('A',", "'title') arts.add_data('journals', ['science', 'sc']) arts.add_data('authors', [('A', 'B', 'C', 'D')]) arts.add_data('words', 'Lots of words", "an optional dependency to test import of. \"\"\" def decorator(func): @wraps(func) def wrapper(*args,", "excl_file.write('not\\navoid') excl_file.close() def load_base(set_terms=False, set_clusions=False): \"\"\"Helper function to load Base object for testing.\"\"\"", "Initialize from normal database object base = pkg.resource_filename(__name__, 'test_db') SCDB.__init__(self, base=base) def create_files(directory):", "SCDB, create_file_structure, check_directory plt = safe_import('.pyplot', 'matplotlib') ################################################################################################### ################################################################################################### class TestDB(SCDB): \"\"\"Overloads the", "the test function, it checks an axis was created with data. It therefore", "Articles object for testing.\"\"\" arts = Articles(Term('label', ['search'], ['inclusion'], ['exclusion'])) if add_data: for", "axis was created with data. It therefore performs a minimal test - asserting", "as pjoin from lisc.objects.base import Base from lisc.data import Articles, ArticlesAll, Term from", "plots exists, with no accuracy checking. \"\"\" @wraps(func) def wrapper(*args, **kwargs): plt.close('all') func(*args,", "from lisc.utils.db import SCDB, create_file_structure, check_directory plt = safe_import('.pyplot', 'matplotlib') ################################################################################################### ################################################################################################### class", "open(pjoin(check_directory(directory, 'terms'), 'test_exclusions.txt'), 'w') excl_file.write('not\\navoid') excl_file.close() def load_base(set_terms=False, set_clusions=False): \"\"\"Helper function to load", "from functools import wraps from os.path import join as pjoin from lisc.objects.base import", "for testing lisc.\"\"\" import pkg_resources as pkg from functools import wraps from os.path", "set_terms: base.add_terms([['test1', 'test sin'], ['test2', 'uh oh']]) if set_clusions: base.add_terms([['yeh', 'definitely'], ['need', 'required']],", "Articles(Term('label', ['search'], ['inclusion'], ['exclusion'])) if add_data: for ind in range(n_data): arts.add_data('ids', 1) arts.add_data('titles',", "arts.add_data('dois', 'doi_str') return arts def load_arts_all(): \"\"\"Helper function to load ArticlesAll object for", "'of', 'keywords']) arts.add_data('years', 2112) arts.add_data('dois', 'doi_str') return arts def load_arts_all(): \"\"\"Helper function to", "wrapper def optional_test(dependency): \"\"\"Decorator to only run a test if the specified optional", "run a test if the specified optional dependency is present. Parameters ---------- dependency", "= plt.gca() assert ax.has_data() return wrapper def optional_test(dependency): \"\"\"Decorator to only run a", "performs a minimal test - asserting the plots exists, with no accuracy checking.", "decorator(func): @wraps(func) def wrapper(*args, **kwargs): if safe_import(dependency): return func(*args, **kwargs) return wrapper return", "base=base) def create_files(directory): \"\"\"Creates some test term files.\"\"\" term_file = open(pjoin(check_directory(directory, 'terms'), 'test_terms.txt'),", "test. After running the test function, it checks an axis was created with", "'test_exclusions.txt'), 'w') excl_file.write('not\\navoid') excl_file.close() def load_base(set_terms=False, set_clusions=False): \"\"\"Helper function to load Base object", "asserting the plots exists, with no accuracy checking. \"\"\" @wraps(func) def wrapper(*args, **kwargs):", "testing.\"\"\" arts = Articles(Term('label', ['search'], ['inclusion'], ['exclusion'])) if add_data: for ind in range(n_data):", "if set_terms: base.add_terms([['test1', 'test sin'], ['test2', 'uh oh']]) if set_clusions: base.add_terms([['yeh', 'definitely'], ['need',", "a test if the specified optional dependency is present. Parameters ---------- dependency :", "plt.close('all') func(*args, **kwargs) ax = plt.gca() assert ax.has_data() return wrapper def optional_test(dependency): \"\"\"Decorator", "object base = pkg.resource_filename(__name__, 'test_db') SCDB.__init__(self, base=base) def create_files(directory): \"\"\"Creates some test term", "import Articles, ArticlesAll, Term from lisc.core.modutils import safe_import from lisc.utils.db import SCDB, create_file_structure,", "'D')]) arts.add_data('words', 'Lots of words data.') arts.add_data('keywords', ['lots', 'of', 'keywords']) arts.add_data('years', 2112) arts.add_data('dois',", "running the test function, it checks an axis was created with data. It", "sin'], ['test2', 'uh oh']]) if set_clusions: base.add_terms([['yeh', 'definitely'], ['need', 'required']], 'inclusions') base.add_terms([['exc1', 'blehh'],", "excl_file.close() def load_base(set_terms=False, set_clusions=False): \"\"\"Helper function to load Base object for testing.\"\"\" base", "plt = safe_import('.pyplot', 'matplotlib') ################################################################################################### ################################################################################################### class TestDB(SCDB): \"\"\"Overloads the SCDB object as", "test - asserting the plots exists, with no accuracy checking. \"\"\" @wraps(func) def", "ArticlesAll object for testing.\"\"\" arts = load_arts(add_data=True, n_data=2) arts_all = ArticlesAll(arts) return arts_all", "if the specified optional dependency is present. Parameters ---------- dependency : str The", "lisc.utils.db import SCDB, create_file_structure, check_directory plt = safe_import('.pyplot', 'matplotlib') ################################################################################################### ################################################################################################### class TestDB(SCDB):", "closes all plots prior to the test. After running the test function, it", "wraps from os.path import join as pjoin from lisc.objects.base import Base from lisc.data", "as database object for tests.\"\"\" def __init__(self): # Initialize from normal database object", "'terms'), 'test_inclusions.txt'), 'w') excl_file.write('need\\nrequired') excl_file.close() excl_file = open(pjoin(check_directory(directory, 'terms'), 'test_exclusions.txt'), 'w') excl_file.write('not\\navoid') excl_file.close()", "load_arts(add_data=False, n_data=1): \"\"\"Helper function to load Articles object for testing.\"\"\" arts = Articles(Term('label',", "object for testing.\"\"\" arts = Articles(Term('label', ['search'], ['inclusion'], ['exclusion'])) if add_data: for ind", "= safe_import('.pyplot', 'matplotlib') ################################################################################################### ################################################################################################### class TestDB(SCDB): \"\"\"Overloads the SCDB object as database", "only run a test if the specified optional dependency is present. Parameters ----------", "\"\"\"Helper function to load ArticlesAll object for testing.\"\"\" arts = load_arts(add_data=True, n_data=2) arts_all", "prior to the test. After running the test function, it checks an axis", "['test2', 'uh oh']]) if set_clusions: base.add_terms([['yeh', 'definitely'], ['need', 'required']], 'inclusions') base.add_terms([['exc1', 'blehh'], ['exc2',", "it checks an axis was created with data. It therefore performs a minimal", "all plots prior to the test. After running the test function, it checks", "was created with data. It therefore performs a minimal test - asserting the", "load ArticlesAll object for testing.\"\"\" arts = load_arts(add_data=True, n_data=2) arts_all = ArticlesAll(arts) return", "created with data. It therefore performs a minimal test - asserting the plots", "\"\"\"Helper function to load Base object for testing.\"\"\" base = Base() if set_terms:", "pjoin from lisc.objects.base import Base from lisc.data import Articles, ArticlesAll, Term from lisc.core.modutils", "################################################################################################### ################################################################################################### class TestDB(SCDB): \"\"\"Overloads the SCDB object as database object for tests.\"\"\"", "lisc.objects.base import Base from lisc.data import Articles, ArticlesAll, Term from lisc.core.modutils import safe_import", "class TestDB(SCDB): \"\"\"Overloads the SCDB object as database object for tests.\"\"\" def __init__(self):", "files.\"\"\" term_file = open(pjoin(check_directory(directory, 'terms'), 'test_terms.txt'), 'w') term_file.write('word\\nthing, same') term_file.close() excl_file = open(pjoin(check_directory(directory,", "a minimal test - asserting the plots exists, with no accuracy checking. \"\"\"", "plots prior to the test. After running the test function, it checks an", "- asserting the plots exists, with no accuracy checking. \"\"\" @wraps(func) def wrapper(*args,", "function, it checks an axis was created with data. It therefore performs a", "ax.has_data() return wrapper def optional_test(dependency): \"\"\"Decorator to only run a test if the", "dependency is present. Parameters ---------- dependency : str The name of an optional", "function to load Articles object for testing.\"\"\" arts = Articles(Term('label', ['search'], ['inclusion'], ['exclusion']))", "data.') arts.add_data('keywords', ['lots', 'of', 'keywords']) arts.add_data('years', 2112) arts.add_data('dois', 'doi_str') return arts def load_arts_all():", "the test. After running the test function, it checks an axis was created", "lisc.\"\"\" import pkg_resources as pkg from functools import wraps from os.path import join", "base = pkg.resource_filename(__name__, 'test_db') SCDB.__init__(self, base=base) def create_files(directory): \"\"\"Creates some test term files.\"\"\"", "create_file_structure, check_directory plt = safe_import('.pyplot', 'matplotlib') ################################################################################################### ################################################################################################### class TestDB(SCDB): \"\"\"Overloads the SCDB", "set_clusions=False): \"\"\"Helper function to load Base object for testing.\"\"\" base = Base() if", "= Articles(Term('label', ['search'], ['inclusion'], ['exclusion'])) if add_data: for ind in range(n_data): arts.add_data('ids', 1)", "from lisc.data import Articles, ArticlesAll, Term from lisc.core.modutils import safe_import from lisc.utils.db import", "oh']]) if set_clusions: base.add_terms([['yeh', 'definitely'], ['need', 'required']], 'inclusions') base.add_terms([['exc1', 'blehh'], ['exc2', 'meh']], 'exclusions')", "arts.add_data('titles', 'title') arts.add_data('journals', ['science', 'sc']) arts.add_data('authors', [('A', 'B', 'C', 'D')]) arts.add_data('words', 'Lots of", "plot_test(func): \"\"\"Decorator for simple testing of plotting functions. Notes ----- This decorator closes", "Base object for testing.\"\"\" base = Base() if set_terms: base.add_terms([['test1', 'test sin'], ['test2',", "The name of an optional dependency to test import of. \"\"\" def decorator(func):", "load_base(set_terms=False, set_clusions=False): \"\"\"Helper function to load Base object for testing.\"\"\" base = Base()", "object for testing.\"\"\" arts = load_arts(add_data=True, n_data=2) arts_all = ArticlesAll(arts) return arts_all def", "'w') excl_file.write('not\\navoid') excl_file.close() def load_base(set_terms=False, set_clusions=False): \"\"\"Helper function to load Base object for", "from normal database object base = pkg.resource_filename(__name__, 'test_db') SCDB.__init__(self, base=base) def create_files(directory): \"\"\"Creates", "**kwargs) ax = plt.gca() assert ax.has_data() return wrapper def optional_test(dependency): \"\"\"Decorator to only", "arts.add_data('years', 2112) arts.add_data('dois', 'doi_str') return arts def load_arts_all(): \"\"\"Helper function to load ArticlesAll", "test import of. \"\"\" def decorator(func): @wraps(func) def wrapper(*args, **kwargs): if safe_import(dependency): return", "data. It therefore performs a minimal test - asserting the plots exists, with", "optional dependency is present. Parameters ---------- dependency : str The name of an", "'C', 'D')]) arts.add_data('words', 'Lots of words data.') arts.add_data('keywords', ['lots', 'of', 'keywords']) arts.add_data('years', 2112)", "After running the test function, it checks an axis was created with data.", "ArticlesAll(arts) return arts_all def plot_test(func): \"\"\"Decorator for simple testing of plotting functions. Notes", "to load Articles object for testing.\"\"\" arts = Articles(Term('label', ['search'], ['inclusion'], ['exclusion'])) if", "arts = Articles(Term('label', ['search'], ['inclusion'], ['exclusion'])) if add_data: for ind in range(n_data): arts.add_data('ids',", "database object base = pkg.resource_filename(__name__, 'test_db') SCDB.__init__(self, base=base) def create_files(directory): \"\"\"Creates some test", "arts def load_arts_all(): \"\"\"Helper function to load ArticlesAll object for testing.\"\"\" arts =", "base = Base() if set_terms: base.add_terms([['test1', 'test sin'], ['test2', 'uh oh']]) if set_clusions:", "= Base() if set_terms: base.add_terms([['test1', 'test sin'], ['test2', 'uh oh']]) if set_clusions: base.add_terms([['yeh',", "base.add_terms([['test1', 'test sin'], ['test2', 'uh oh']]) if set_clusions: base.add_terms([['yeh', 'definitely'], ['need', 'required']], 'inclusions')", "arts.add_data('words', 'Lots of words data.') arts.add_data('keywords', ['lots', 'of', 'keywords']) arts.add_data('years', 2112) arts.add_data('dois', 'doi_str')", "def decorator(func): @wraps(func) def wrapper(*args, **kwargs): if safe_import(dependency): return func(*args, **kwargs) return wrapper", "@wraps(func) def wrapper(*args, **kwargs): if safe_import(dependency): return func(*args, **kwargs) return wrapper return decorator", "term_file = open(pjoin(check_directory(directory, 'terms'), 'test_terms.txt'), 'w') term_file.write('word\\nthing, same') term_file.close() excl_file = open(pjoin(check_directory(directory, 'terms'),", "load_arts(add_data=True, n_data=2) arts_all = ArticlesAll(arts) return arts_all def plot_test(func): \"\"\"Decorator for simple testing", "'sc']) arts.add_data('authors', [('A', 'B', 'C', 'D')]) arts.add_data('words', 'Lots of words data.') arts.add_data('keywords', ['lots',", "assert ax.has_data() return wrapper def optional_test(dependency): \"\"\"Decorator to only run a test if", "the SCDB object as database object for tests.\"\"\" def __init__(self): # Initialize from", "import SCDB, create_file_structure, check_directory plt = safe_import('.pyplot', 'matplotlib') ################################################################################################### ################################################################################################### class TestDB(SCDB): \"\"\"Overloads", "'B', 'C', 'D')]) arts.add_data('words', 'Lots of words data.') arts.add_data('keywords', ['lots', 'of', 'keywords']) arts.add_data('years',", "the specified optional dependency is present. Parameters ---------- dependency : str The name", "open(pjoin(check_directory(directory, 'terms'), 'test_inclusions.txt'), 'w') excl_file.write('need\\nrequired') excl_file.close() excl_file = open(pjoin(check_directory(directory, 'terms'), 'test_exclusions.txt'), 'w') excl_file.write('not\\navoid')", "optional_test(dependency): \"\"\"Decorator to only run a test if the specified optional dependency is", "ArticlesAll, Term from lisc.core.modutils import safe_import from lisc.utils.db import SCDB, create_file_structure, check_directory plt", "create_files(directory): \"\"\"Creates some test term files.\"\"\" term_file = open(pjoin(check_directory(directory, 'terms'), 'test_terms.txt'), 'w') term_file.write('word\\nthing,", "**kwargs): plt.close('all') func(*args, **kwargs) ax = plt.gca() assert ax.has_data() return wrapper def optional_test(dependency):", "pkg.resource_filename(__name__, 'test_db') SCDB.__init__(self, base=base) def create_files(directory): \"\"\"Creates some test term files.\"\"\" term_file =", "= ArticlesAll(arts) return arts_all def plot_test(func): \"\"\"Decorator for simple testing of plotting functions.", "This decorator closes all plots prior to the test. After running the test", "\"\"\" @wraps(func) def wrapper(*args, **kwargs): plt.close('all') func(*args, **kwargs) ax = plt.gca() assert ax.has_data()", "base.add_terms([['exc1', 'blehh'], ['exc2', 'meh']], 'exclusions') return base def load_arts(add_data=False, n_data=1): \"\"\"Helper function to", "wrapper(*args, **kwargs): plt.close('all') func(*args, **kwargs) ax = plt.gca() assert ax.has_data() return wrapper def", "the plots exists, with no accuracy checking. \"\"\" @wraps(func) def wrapper(*args, **kwargs): plt.close('all')", "normal database object base = pkg.resource_filename(__name__, 'test_db') SCDB.__init__(self, base=base) def create_files(directory): \"\"\"Creates some", "base.add_terms([['yeh', 'definitely'], ['need', 'required']], 'inclusions') base.add_terms([['exc1', 'blehh'], ['exc2', 'meh']], 'exclusions') return base def", "excl_file.close() excl_file = open(pjoin(check_directory(directory, 'terms'), 'test_exclusions.txt'), 'w') excl_file.write('not\\navoid') excl_file.close() def load_base(set_terms=False, set_clusions=False): \"\"\"Helper", "n_data=1): \"\"\"Helper function to load Articles object for testing.\"\"\" arts = Articles(Term('label', ['search'],", "to the test. After running the test function, it checks an axis was", "'Lots of words data.') arts.add_data('keywords', ['lots', 'of', 'keywords']) arts.add_data('years', 2112) arts.add_data('dois', 'doi_str') return", "1) arts.add_data('titles', 'title') arts.add_data('journals', ['science', 'sc']) arts.add_data('authors', [('A', 'B', 'C', 'D')]) arts.add_data('words', 'Lots", "'terms'), 'test_terms.txt'), 'w') term_file.write('word\\nthing, same') term_file.close() excl_file = open(pjoin(check_directory(directory, 'terms'), 'test_inclusions.txt'), 'w') excl_file.write('need\\nrequired')", "excl_file = open(pjoin(check_directory(directory, 'terms'), 'test_exclusions.txt'), 'w') excl_file.write('not\\navoid') excl_file.close() def load_base(set_terms=False, set_clusions=False): \"\"\"Helper function", "str The name of an optional dependency to test import of. \"\"\" def", "from os.path import join as pjoin from lisc.objects.base import Base from lisc.data import", "pkg_resources as pkg from functools import wraps from os.path import join as pjoin", "'doi_str') return arts def load_arts_all(): \"\"\"Helper function to load ArticlesAll object for testing.\"\"\"", "with no accuracy checking. \"\"\" @wraps(func) def wrapper(*args, **kwargs): plt.close('all') func(*args, **kwargs) ax", ": str The name of an optional dependency to test import of. \"\"\"", "to only run a test if the specified optional dependency is present. Parameters", "def load_arts(add_data=False, n_data=1): \"\"\"Helper function to load Articles object for testing.\"\"\" arts =", "['inclusion'], ['exclusion'])) if add_data: for ind in range(n_data): arts.add_data('ids', 1) arts.add_data('titles', 'title') arts.add_data('journals',", "import wraps from os.path import join as pjoin from lisc.objects.base import Base from", "import join as pjoin from lisc.objects.base import Base from lisc.data import Articles, ArticlesAll,", "testing.\"\"\" base = Base() if set_terms: base.add_terms([['test1', 'test sin'], ['test2', 'uh oh']]) if", "for simple testing of plotting functions. Notes ----- This decorator closes all plots", "'uh oh']]) if set_clusions: base.add_terms([['yeh', 'definitely'], ['need', 'required']], 'inclusions') base.add_terms([['exc1', 'blehh'], ['exc2', 'meh']],", "'w') excl_file.write('need\\nrequired') excl_file.close() excl_file = open(pjoin(check_directory(directory, 'terms'), 'test_exclusions.txt'), 'w') excl_file.write('not\\navoid') excl_file.close() def load_base(set_terms=False,", "\"\"\"Helper function to load Articles object for testing.\"\"\" arts = Articles(Term('label', ['search'], ['inclusion'],", "from lisc.objects.base import Base from lisc.data import Articles, ArticlesAll, Term from lisc.core.modutils import", "to load ArticlesAll object for testing.\"\"\" arts = load_arts(add_data=True, n_data=2) arts_all = ArticlesAll(arts)", "test function, it checks an axis was created with data. It therefore performs", "an axis was created with data. It therefore performs a minimal test -", "'blehh'], ['exc2', 'meh']], 'exclusions') return base def load_arts(add_data=False, n_data=1): \"\"\"Helper function to load", "\"\"\" def decorator(func): @wraps(func) def wrapper(*args, **kwargs): if safe_import(dependency): return func(*args, **kwargs) return", "arts_all = ArticlesAll(arts) return arts_all def plot_test(func): \"\"\"Decorator for simple testing of plotting", "['exc2', 'meh']], 'exclusions') return base def load_arts(add_data=False, n_data=1): \"\"\"Helper function to load Articles", "= open(pjoin(check_directory(directory, 'terms'), 'test_terms.txt'), 'w') term_file.write('word\\nthing, same') term_file.close() excl_file = open(pjoin(check_directory(directory, 'terms'), 'test_inclusions.txt'),", "for testing.\"\"\" arts = load_arts(add_data=True, n_data=2) arts_all = ArticlesAll(arts) return arts_all def plot_test(func):", "range(n_data): arts.add_data('ids', 1) arts.add_data('titles', 'title') arts.add_data('journals', ['science', 'sc']) arts.add_data('authors', [('A', 'B', 'C', 'D')])", "words data.') arts.add_data('keywords', ['lots', 'of', 'keywords']) arts.add_data('years', 2112) arts.add_data('dois', 'doi_str') return arts def", "['exclusion'])) if add_data: for ind in range(n_data): arts.add_data('ids', 1) arts.add_data('titles', 'title') arts.add_data('journals', ['science',", "'inclusions') base.add_terms([['exc1', 'blehh'], ['exc2', 'meh']], 'exclusions') return base def load_arts(add_data=False, n_data=1): \"\"\"Helper function", "arts_all def plot_test(func): \"\"\"Decorator for simple testing of plotting functions. Notes ----- This", "def load_arts_all(): \"\"\"Helper function to load ArticlesAll object for testing.\"\"\" arts = load_arts(add_data=True,", "checks an axis was created with data. It therefore performs a minimal test", "arts.add_data('authors', [('A', 'B', 'C', 'D')]) arts.add_data('words', 'Lots of words data.') arts.add_data('keywords', ['lots', 'of',", "@wraps(func) def wrapper(*args, **kwargs): plt.close('all') func(*args, **kwargs) ax = plt.gca() assert ax.has_data() return", "def wrapper(*args, **kwargs): plt.close('all') func(*args, **kwargs) ax = plt.gca() assert ax.has_data() return wrapper", "return base def load_arts(add_data=False, n_data=1): \"\"\"Helper function to load Articles object for testing.\"\"\"", "import of. \"\"\" def decorator(func): @wraps(func) def wrapper(*args, **kwargs): if safe_import(dependency): return func(*args,", "therefore performs a minimal test - asserting the plots exists, with no accuracy", "safe_import('.pyplot', 'matplotlib') ################################################################################################### ################################################################################################### class TestDB(SCDB): \"\"\"Overloads the SCDB object as database object", "object as database object for tests.\"\"\" def __init__(self): # Initialize from normal database", "minimal test - asserting the plots exists, with no accuracy checking. \"\"\" @wraps(func)", "['search'], ['inclusion'], ['exclusion'])) if add_data: for ind in range(n_data): arts.add_data('ids', 1) arts.add_data('titles', 'title')", "arts.add_data('journals', ['science', 'sc']) arts.add_data('authors', [('A', 'B', 'C', 'D')]) arts.add_data('words', 'Lots of words data.')", "if set_clusions: base.add_terms([['yeh', 'definitely'], ['need', 'required']], 'inclusions') base.add_terms([['exc1', 'blehh'], ['exc2', 'meh']], 'exclusions') return", "['need', 'required']], 'inclusions') base.add_terms([['exc1', 'blehh'], ['exc2', 'meh']], 'exclusions') return base def load_arts(add_data=False, n_data=1):", "optional dependency to test import of. \"\"\" def decorator(func): @wraps(func) def wrapper(*args, **kwargs):", "object for tests.\"\"\" def __init__(self): # Initialize from normal database object base =", "same') term_file.close() excl_file = open(pjoin(check_directory(directory, 'terms'), 'test_inclusions.txt'), 'w') excl_file.write('need\\nrequired') excl_file.close() excl_file = open(pjoin(check_directory(directory,", "lisc.core.modutils import safe_import from lisc.utils.db import SCDB, create_file_structure, check_directory plt = safe_import('.pyplot', 'matplotlib')", "= pkg.resource_filename(__name__, 'test_db') SCDB.__init__(self, base=base) def create_files(directory): \"\"\"Creates some test term files.\"\"\" term_file", "'required']], 'inclusions') base.add_terms([['exc1', 'blehh'], ['exc2', 'meh']], 'exclusions') return base def load_arts(add_data=False, n_data=1): \"\"\"Helper", "to test import of. \"\"\" def decorator(func): @wraps(func) def wrapper(*args, **kwargs): if safe_import(dependency):", "import pkg_resources as pkg from functools import wraps from os.path import join as", "Base from lisc.data import Articles, ArticlesAll, Term from lisc.core.modutils import safe_import from lisc.utils.db", "test if the specified optional dependency is present. Parameters ---------- dependency : str", "no accuracy checking. \"\"\" @wraps(func) def wrapper(*args, **kwargs): plt.close('all') func(*args, **kwargs) ax =", "# Initialize from normal database object base = pkg.resource_filename(__name__, 'test_db') SCDB.__init__(self, base=base) def", "simple testing of plotting functions. Notes ----- This decorator closes all plots prior", "return wrapper def optional_test(dependency): \"\"\"Decorator to only run a test if the specified", "join as pjoin from lisc.objects.base import Base from lisc.data import Articles, ArticlesAll, Term", "'test_inclusions.txt'), 'w') excl_file.write('need\\nrequired') excl_file.close() excl_file = open(pjoin(check_directory(directory, 'terms'), 'test_exclusions.txt'), 'w') excl_file.write('not\\navoid') excl_file.close() def", "accuracy checking. \"\"\" @wraps(func) def wrapper(*args, **kwargs): plt.close('all') func(*args, **kwargs) ax = plt.gca()", "term_file.write('word\\nthing, same') term_file.close() excl_file = open(pjoin(check_directory(directory, 'terms'), 'test_inclusions.txt'), 'w') excl_file.write('need\\nrequired') excl_file.close() excl_file =", "'terms'), 'test_exclusions.txt'), 'w') excl_file.write('not\\navoid') excl_file.close() def load_base(set_terms=False, set_clusions=False): \"\"\"Helper function to load Base", "ind in range(n_data): arts.add_data('ids', 1) arts.add_data('titles', 'title') arts.add_data('journals', ['science', 'sc']) arts.add_data('authors', [('A', 'B',", "Term from lisc.core.modutils import safe_import from lisc.utils.db import SCDB, create_file_structure, check_directory plt =", "n_data=2) arts_all = ArticlesAll(arts) return arts_all def plot_test(func): \"\"\"Decorator for simple testing of", "of an optional dependency to test import of. \"\"\" def decorator(func): @wraps(func) def", "It therefore performs a minimal test - asserting the plots exists, with no", "is present. Parameters ---------- dependency : str The name of an optional dependency", "arts = load_arts(add_data=True, n_data=2) arts_all = ArticlesAll(arts) return arts_all def plot_test(func): \"\"\"Decorator for", "test term files.\"\"\" term_file = open(pjoin(check_directory(directory, 'terms'), 'test_terms.txt'), 'w') term_file.write('word\\nthing, same') term_file.close() excl_file", "functions. Notes ----- This decorator closes all plots prior to the test. After", "arts.add_data('keywords', ['lots', 'of', 'keywords']) arts.add_data('years', 2112) arts.add_data('dois', 'doi_str') return arts def load_arts_all(): \"\"\"Helper", "database object for tests.\"\"\" def __init__(self): # Initialize from normal database object base", "'test_db') SCDB.__init__(self, base=base) def create_files(directory): \"\"\"Creates some test term files.\"\"\" term_file = open(pjoin(check_directory(directory,", "----- This decorator closes all plots prior to the test. After running the", "excl_file.write('need\\nrequired') excl_file.close() excl_file = open(pjoin(check_directory(directory, 'terms'), 'test_exclusions.txt'), 'w') excl_file.write('not\\navoid') excl_file.close() def load_base(set_terms=False, set_clusions=False):", "Articles, ArticlesAll, Term from lisc.core.modutils import safe_import from lisc.utils.db import SCDB, create_file_structure, check_directory", "specified optional dependency is present. Parameters ---------- dependency : str The name of", "load Articles object for testing.\"\"\" arts = Articles(Term('label', ['search'], ['inclusion'], ['exclusion'])) if add_data:", "with data. It therefore performs a minimal test - asserting the plots exists,", "= open(pjoin(check_directory(directory, 'terms'), 'test_inclusions.txt'), 'w') excl_file.write('need\\nrequired') excl_file.close() excl_file = open(pjoin(check_directory(directory, 'terms'), 'test_exclusions.txt'), 'w')", "set_clusions: base.add_terms([['yeh', 'definitely'], ['need', 'required']], 'inclusions') base.add_terms([['exc1', 'blehh'], ['exc2', 'meh']], 'exclusions') return base", "= open(pjoin(check_directory(directory, 'terms'), 'test_exclusions.txt'), 'w') excl_file.write('not\\navoid') excl_file.close() def load_base(set_terms=False, set_clusions=False): \"\"\"Helper function to", "def __init__(self): # Initialize from normal database object base = pkg.resource_filename(__name__, 'test_db') SCDB.__init__(self,", "excl_file = open(pjoin(check_directory(directory, 'terms'), 'test_inclusions.txt'), 'w') excl_file.write('need\\nrequired') excl_file.close() excl_file = open(pjoin(check_directory(directory, 'terms'), 'test_exclusions.txt'),", "'test_terms.txt'), 'w') term_file.write('word\\nthing, same') term_file.close() excl_file = open(pjoin(check_directory(directory, 'terms'), 'test_inclusions.txt'), 'w') excl_file.write('need\\nrequired') excl_file.close()", "def load_base(set_terms=False, set_clusions=False): \"\"\"Helper function to load Base object for testing.\"\"\" base =", "tests.\"\"\" def __init__(self): # Initialize from normal database object base = pkg.resource_filename(__name__, 'test_db')", "['lots', 'of', 'keywords']) arts.add_data('years', 2112) arts.add_data('dois', 'doi_str') return arts def load_arts_all(): \"\"\"Helper function", "function to load Base object for testing.\"\"\" base = Base() if set_terms: base.add_terms([['test1',", "function to load ArticlesAll object for testing.\"\"\" arts = load_arts(add_data=True, n_data=2) arts_all =", "pkg from functools import wraps from os.path import join as pjoin from lisc.objects.base", "lisc.data import Articles, ArticlesAll, Term from lisc.core.modutils import safe_import from lisc.utils.db import SCDB,", "SCDB.__init__(self, base=base) def create_files(directory): \"\"\"Creates some test term files.\"\"\" term_file = open(pjoin(check_directory(directory, 'terms'),", "term files.\"\"\" term_file = open(pjoin(check_directory(directory, 'terms'), 'test_terms.txt'), 'w') term_file.write('word\\nthing, same') term_file.close() excl_file =", "exists, with no accuracy checking. \"\"\" @wraps(func) def wrapper(*args, **kwargs): plt.close('all') func(*args, **kwargs)", "\"\"\"Helper functions for testing lisc.\"\"\" import pkg_resources as pkg from functools import wraps", "safe_import from lisc.utils.db import SCDB, create_file_structure, check_directory plt = safe_import('.pyplot', 'matplotlib') ################################################################################################### ###################################################################################################", "'w') term_file.write('word\\nthing, same') term_file.close() excl_file = open(pjoin(check_directory(directory, 'terms'), 'test_inclusions.txt'), 'w') excl_file.write('need\\nrequired') excl_file.close() excl_file", "'exclusions') return base def load_arts(add_data=False, n_data=1): \"\"\"Helper function to load Articles object for", "<reponame>jasongfleischer/lisc \"\"\"Helper functions for testing lisc.\"\"\" import pkg_resources as pkg from functools import", "name of an optional dependency to test import of. \"\"\" def decorator(func): @wraps(func)", "\"\"\"Creates some test term files.\"\"\" term_file = open(pjoin(check_directory(directory, 'terms'), 'test_terms.txt'), 'w') term_file.write('word\\nthing, same')", "################################################################################################### class TestDB(SCDB): \"\"\"Overloads the SCDB object as database object for tests.\"\"\" def", "import Base from lisc.data import Articles, ArticlesAll, Term from lisc.core.modutils import safe_import from", "term_file.close() excl_file = open(pjoin(check_directory(directory, 'terms'), 'test_inclusions.txt'), 'w') excl_file.write('need\\nrequired') excl_file.close() excl_file = open(pjoin(check_directory(directory, 'terms'),", "---------- dependency : str The name of an optional dependency to test import", "func(*args, **kwargs) ax = plt.gca() assert ax.has_data() return wrapper def optional_test(dependency): \"\"\"Decorator to", "def plot_test(func): \"\"\"Decorator for simple testing of plotting functions. Notes ----- This decorator", "return arts def load_arts_all(): \"\"\"Helper function to load ArticlesAll object for testing.\"\"\" arts", "'meh']], 'exclusions') return base def load_arts(add_data=False, n_data=1): \"\"\"Helper function to load Articles object", "return arts_all def plot_test(func): \"\"\"Decorator for simple testing of plotting functions. Notes -----", "checking. \"\"\" @wraps(func) def wrapper(*args, **kwargs): plt.close('all') func(*args, **kwargs) ax = plt.gca() assert", "plotting functions. Notes ----- This decorator closes all plots prior to the test.", "add_data: for ind in range(n_data): arts.add_data('ids', 1) arts.add_data('titles', 'title') arts.add_data('journals', ['science', 'sc']) arts.add_data('authors',", "in range(n_data): arts.add_data('ids', 1) arts.add_data('titles', 'title') arts.add_data('journals', ['science', 'sc']) arts.add_data('authors', [('A', 'B', 'C',", "\"\"\"Overloads the SCDB object as database object for tests.\"\"\" def __init__(self): # Initialize", "ax = plt.gca() assert ax.has_data() return wrapper def optional_test(dependency): \"\"\"Decorator to only run", "\"\"\"Decorator to only run a test if the specified optional dependency is present.", "present. Parameters ---------- dependency : str The name of an optional dependency to", "Base() if set_terms: base.add_terms([['test1', 'test sin'], ['test2', 'uh oh']]) if set_clusions: base.add_terms([['yeh', 'definitely'],", "to load Base object for testing.\"\"\" base = Base() if set_terms: base.add_terms([['test1', 'test", "'keywords']) arts.add_data('years', 2112) arts.add_data('dois', 'doi_str') return arts def load_arts_all(): \"\"\"Helper function to load", "2112) arts.add_data('dois', 'doi_str') return arts def load_arts_all(): \"\"\"Helper function to load ArticlesAll object", "from lisc.core.modutils import safe_import from lisc.utils.db import SCDB, create_file_structure, check_directory plt = safe_import('.pyplot',", "of plotting functions. Notes ----- This decorator closes all plots prior to the" ]
[ "auxiliary_c]) h = Dense(4 * 4 * 128, activation = 'relu')(h) h =", "epsilon # mean and variance of the prior distribution # mean_train_sup = np.zeros((1,128))", "= np.asarray(FrameNum) x = f['images'] x = np.asarray(x); x = np.transpose(x, [3,2,1,0]) #", "= 123 input_img = np.squeeze(x_ori[idx,:,:,:]) img = np.uint8(input_img*127.5+127.5) image = Image.fromarray(img, 'RGB') image.save('original.tif')", "xrange(0,1): idx1 = 4300 idx2 = 7423 img1 = np.squeeze(x_ori[idx1, :, :, :])", "ordering to python ordering print('x shape:', x.shape) idx_train = np.asarray(np.where(label3 == 0)) idx_test", "= Lambda(sampling, output_shape=(z_dim,))([mean, logvar]) h2 = keras.layers.concatenate([mean,logvar]) return Model(x,[z, h2], name = 'Encoder')", "h = BatchNormalization(momentum=0.8)(h) h = Dropout(dropout)(h) h = LeakyReLU(0.2)(h) # h = AveragePooling2D((6,6))(h)", "64, 64 clipvalue = 20 noise_dim = 10 c_dim = num_pp n_dim =", "## batch_size = 256 num_ep = 7 num_pp = 6 epochs = 1000", "dropout=0.3): k = 5 x = Input(input_shape) h = Conv2D(units/8 , (k, k),", "xx in xrange(0,1): idx1 = 4300 idx2 = 7423 img1 = np.squeeze(x_ori[idx1, :,", "num_pp) y_train2 = keras.utils.to_categorical(y_train2, num_ep) y_test2 = keras.utils.to_categorical(y_test2, num_ep) ############################### print('x_train shape:', x_train.shape)", "+ z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1) return K.mean(kl_loss) def sampling(args): z_mean, z_log_var", "LeakyReLU from keras.layers.convolutional import Convolution2D, MaxPooling2D, ZeroPadding2D, UpSampling2D,AveragePooling2D, Conv2DTranspose from keras.layers.normalization import *", "keras.layers.core import Reshape,Dense,Dropout,Activation,Flatten from keras.layers.advanced_activations import LeakyReLU from keras.layers.convolutional import Convolution2D, MaxPooling2D, ZeroPadding2D,", "= 'same', activation = 'relu')(h) # 64*64*64 # h = Dropout(dropout)(h) h =", "keras.utils.to_categorical(y_test2, num_ep) ############################### print('x_train shape:', x_train.shape) print('x_test shape:', x_test.shape) print('label 1 train', y_train1.shape)", "img_cols, 3) , units=units, dropout=0.3) encoder.load_weights('trained_weight_1.h5') encoder.compile(loss = 'binary_crossentropy',optimizer = opt) encoder.summary() decoder", "= (2,2), padding = 'same', activation = 'relu')(h) # 32*32*64 # h =", "h = Conv2DTranspose(3, (k,k), strides = (2,2), padding = 'same', activation = 'tanh')(h)", "name='aux_input_c') auxiliary_z = Input(shape=(n_dim,), name='aux_input_z') # generator = model_generator(z_dim = z_dim, input_shape =(img_rows,", "'same', activation = 'relu')(h) # 64*64*64 # h = Dropout(dropout)(h) h = BatchNormalization(momentum=0.8)(h)", "'relu')(h) # 8*6*64 # h = Dropout(dropout)(h) h = BatchNormalization(momentum=0.8)(h) h = Conv2DTranspose(3,", "scale=.02) ## load and preprocess the dataset (use FERG for example) ## batch_size", "LeakyReLU(0.2)(h) h = Conv2D(units/4, (k, k), strides = (2,2), border_mode='same')(h) h = BatchNormalization(momentum=0.8)(h)", "matplotlib.pyplot as plt import cPickle, random, sys, keras from keras.models import Model from", "MaxPooling2D(pool_size=(2, 2))(h) h = LeakyReLU(0.2)(h) h = Conv2D(units/4, (k, k), strides = (2,2),", "padding = 'same', activation = 'tanh')(h) # 8*6*64 return Model([x,auxiliary_c], h, name=\"Decoder\") #", "= encoder.predict(np.expand_dims(img1, axis=0)) z_2, mean_var_imp = encoder.predict(np.expand_dims(img2, axis=0)) plt.figure(figsize=(2, 2)) img1 =np.squeeze(x_ori[idx1,:,:,:]) img1", "dopt = RMSprop(lr = 0.0003,decay = 1e-6) epsilon_std = 1.0 def KL_loss(y_true, y_pred):", "= f['imdb'] label1 = f['id'] label1 = np.asarray(label1) label1 -= 1 label2 =", "np.asarray(label3) FrameNum = f['fn'] FrameNum = np.asarray(FrameNum) x = f['images'] x = np.asarray(x);", "dataset (use FERG for example) ## batch_size = 256 num_ep = 7 num_pp", "200 auxiliary_c = Input(shape=(c_dim,), name='aux_input_c') auxiliary_z = Input(shape=(n_dim,), name='aux_input_z') # generator = model_generator(z_dim", "in xrange(0,1000): c = np.ones((1,))*0 c = keras.utils.to_categorical(c, num_pp) z_interp = z_1*(arr[ii])+z_2*(1.0-arr[ii]) z_interp", "h = Conv2D(units , (k, k), strides = (2,2), border_mode='same')(h) h = BatchNormalization(momentum=0.8)(h)", "np.uint8(input_img*127.5+127.5) image = Image.fromarray(img, 'RGB') image.save('original.tif') impanted_img = np.squeeze(x_ori[idx,:,:,:]) impanted_img[40:55,18:47,:] = 0 #", "z_impanted,mean_var_imp = encoder.predict(np.expand_dims(impanted_img,axis =0)) c = np.ones((1,))*1 c = keras.utils.to_categorical(c, num_pp) print('c',c) img_rec", "keras.models import Model from functools import partial normal = partial(initializers.normal, scale=.02) ## load", "as plt import cPickle, random, sys, keras from keras.models import Model from functools", "clipvalue = 20 noise_dim = 10 c_dim = num_pp n_dim = 10 z_dim", "c = keras.utils.to_categorical(c, num_pp) img = decoder.predict([z, c]) img = np.squeeze(img) img =", "x_test = x_test.astype('float16') y_train1 = keras.utils.to_categorical(y_train1, num_pp) y_test1 = keras.utils.to_categorical(y_test1, num_pp) y_train2 =", "name='aux_input_z') h = keras.layers.concatenate([x, auxiliary_c]) h = Dense(4 * 4 * 128, activation", "(k,k), strides = (2,2), padding = 'same', activation = 'relu')(h) # 8*6*64 #", "= 'tanh')(h) # 8*6*64 return Model([x,auxiliary_c], h, name=\"Decoder\") # #### reload the trained", "Input(shape=(c_dim,), name='aux_input_c') auxiliary_z = Input(shape=(n_dim,), name='aux_input_z') # generator = model_generator(z_dim = z_dim, input_shape", "Conv2DTranspose(units/2, (k,k), strides = (2,2), padding = 'same', activation = 'relu')(h) # 64*64*64", "model_encoder(z_dim = z_dim, input_shape =(img_rows, img_cols, 3) , units=units, dropout=0.3) encoder.load_weights('trained_weight_1.h5') encoder.compile(loss =", "activation = 'tanh')(h) # 8*6*64 return Model([x,auxiliary_c], h, name=\"Decoder\") # #### reload the", "encoder.compile(loss = 'binary_crossentropy',optimizer = opt) encoder.summary() decoder = model_decoder(z_dim = z_dim, c_dim=c_dim) decoder.load_weights('trained_weight_2.h5')", "= np.uint8(img_rec*127.5+127.5) image = Image.fromarray(img, 'RGB') image.save('test_rec_pp1'+'.tif') impanted_img[40:55,18:47,:] = img_rec[40:55,18:47,:] img = np.uint8(impanted_img*127.5+127.5)", "= model_decoder(z_dim = z_dim, c_dim=c_dim) decoder.load_weights('trained_weight_2.h5') decoder.compile(loss = 'binary_crossentropy',optimizer = opt) decoder.summary() #####", "units=units, dropout=0.3) encoder.load_weights('trained_weight_1.h5') encoder.compile(loss = 'binary_crossentropy',optimizer = opt) encoder.summary() decoder = model_decoder(z_dim =", "32*32*64 # h = Dropout(dropout)(h) h = BatchNormalization(momentum=0.8)(h) # h = LeakyReLU(0.2)(h) h", "3) , units=units, dropout=0.3) encoder.load_weights('trained_weight_1.h5') encoder.compile(loss = 'binary_crossentropy',optimizer = opt) encoder.summary() decoder =", "= encoder.predict(np.expand_dims(img2, axis=0)) plt.figure(figsize=(2, 2)) img1 =np.squeeze(x_ori[idx1,:,:,:]) img1 = np.uint8(img1*127.5+127.5) image = Image.fromarray(img1,", ", units=units, dropout=0.3) encoder.load_weights('trained_weight_1.h5') encoder.compile(loss = 'binary_crossentropy',optimizer = opt) encoder.summary() decoder = model_decoder(z_dim", "RMSprop(lr = 0.0003,decay = 1e-6) dopt = RMSprop(lr = 0.0003,decay = 1e-6) epsilon_std", "return Model(x,[z, h2], name = 'Encoder') def model_decoder(z_dim, c_dim): k = 5 x", "images without input image ### def sampling_np( z_mean, z_log_var ): epsilon = np.random.normal(loc=0.,", "= BatchNormalization(momentum=0.8)(h) # h = LeakyReLU(0.2)(h) h = Conv2DTranspose(units/2, (k,k), strides = (2,2),", "num_pp n_dim = 10 z_dim = 128 date = 2018 # print ('Loading", "img2 = np.uint8(img2*127.5+127.5) # plt.imshow(img2) image = Image.fromarray(img2, 'RGB') image.save('ori_2.tif') arr = np.linspace(0.0,", "import Convolution2D, MaxPooling2D, ZeroPadding2D, UpSampling2D,AveragePooling2D, Conv2DTranspose from keras.layers.normalization import * from keras.optimizers import", "= keras.utils.to_categorical(c, num_pp) print('c',c) img_rec = decoder.predict([z_impanted,c]) img_rec = np.squeeze(img_rec) img = np.uint8(impanted_img*127.5+127.5)", "= label1[:,idx_train[1,:]] y_test1 = label1[:,idx_test[1,:]] y_train2 = label2[:,idx_train[1,:]] y_test2 = label2[:,idx_test[1,:]] y_test1_ori =", "logsigma],name='encoder') z = Lambda(sampling, output_shape=(z_dim,))([mean, logvar]) h2 = keras.layers.concatenate([mean,logvar]) return Model(x,[z, h2], name", "= (x_train- 127.5)/127.5 x_test = (x_test- 127.5)/127.5 x_train = x_train.astype('float16') x_test = x_test.astype('float16')", "encoder = model_encoder(z_dim = z_dim, input_shape =(img_rows, img_cols, 3) , units=units, dropout=0.3) encoder.load_weights('trained_weight_1.h5')", "x_train = x[idx_train[1,:],:,:,:] x_test = x[idx_test[1,:],:,:,:] y_train1 = label1[:,idx_train[1,:]] y_test1 = label1[:,idx_test[1,:]] y_train2", "/ 2, (k, k), strides = (2,2), border_mode='same')(h) h = BatchNormalization(momentum=0.8)(h) h =", "256 num_ep = 7 num_pp = 6 epochs = 1000 img_rows, img_cols =", "(x - 127.5)/127.5 opt = RMSprop(lr = 0.0003,decay = 1e-6) dopt = RMSprop(lr", "Image from keras.layers import Conv2D, MaxPooling2D, GlobalAveragePooling2D import h5py import numpy as np", "= np.asarray(np.where(label3 == 1)) print('idx_test shape',idx_test.shape) x_train = x[idx_train[1,:],:,:,:] x_test = x[idx_test[1,:],:,:,:] y_train1", "2))(h) h = LeakyReLU(0.2)(h) h = Conv2D(units/4, (k, k), strides = (2,2), border_mode='same')(h)", "keras.utils.to_categorical(c, num_pp) print('c',c) img_rec = decoder.predict([z_impanted,c]) img_rec = np.squeeze(img_rec) img = np.uint8(impanted_img*127.5+127.5) image", "def sampling(args): z_mean, z_log_var = args epsilon = K.random_normal(shape=(K.shape(z_mean)[0], z_dim), mean=0., stddev=epsilon_std) return", "= np.squeeze(img) img = np.uint8(img*127.5+127.5) image = Image.fromarray(img, 'RGB') image.save('interp_'+str(ii)+'.tif') # ############### Image", "# h = MaxPooling2D(pool_size=(2, 2))(h) h = LeakyReLU(0.2)(h) h = Conv2D(units , (k,", "batch_size = 256 num_ep = 7 num_pp = 6 epochs = 1000 img_rows,", "return K.mean(kl_loss) def sampling(args): z_mean, z_log_var = args epsilon = K.random_normal(shape=(K.shape(z_mean)[0], z_dim), mean=0.,", "z_mean, z_log_var = args epsilon = K.random_normal(shape=(K.shape(z_mean)[0], z_dim), mean=0., stddev=epsilon_std) return z_mean +", "= (2,2), padding = 'same', activation = 'relu')(h) # 8*6*64 # h =", "# h = LeakyReLU(0.2)(h) h = Conv2DTranspose(units/2, (k,k), strides = (2,2), padding =", "numpy as np from keras.layers import Input,merge,Lambda from keras.layers.core import Reshape,Dense,Dropout,Activation,Flatten from keras.layers.advanced_activations", "date = 2018 # print ('Loading data...') f = h5py.File('FERG_64_64_color.mat') print ('Finished loading....')", "img = np.uint8(impanted_img*127.5+127.5) image = Image.fromarray(img, 'RGB') image.save('test_replaced_pp1'+'.tif') #### Generate images without input", "############### Image impanting ############## loc = 'bottom' for pp in xrange(0,1): for xx", "= K.random_normal(shape=(K.shape(z_mean)[0], z_dim), mean=0., stddev=epsilon_std) return z_mean + K.exp((z_log_var) / 2) * epsilon", "and variance of the prior distribution # mean_train_sup = np.zeros((1,128)) var_train_sup = np.ones((1,128))", "i in xrange(0,num_pp): for xx in xrange(0,100): z = sampling_np(mean_train_sup, var_train_sup) print(z.shape) c", "random, sys, keras from keras.models import Model from functools import partial normal =", "128 date = 2018 # print ('Loading data...') f = h5py.File('FERG_64_64_color.mat') print ('Finished", "epochs = 1000 img_rows, img_cols = 64, 64 clipvalue = 20 noise_dim =", "sampling(args): z_mean, z_log_var = args epsilon = K.random_normal(shape=(K.shape(z_mean)[0], z_dim), mean=0., stddev=epsilon_std) return z_mean", "name=\"Decoder\") # #### reload the trained weights to implement the anticipated applications#### input_img", "= x_test.astype('float16') y_train1 = keras.utils.to_categorical(y_train1, num_pp) y_test1 = keras.utils.to_categorical(y_test1, num_pp) y_train2 = keras.utils.to_categorical(y_train2,", "label1[:,idx_train[1,:]] y_test1 = label1[:,idx_test[1,:]] y_train2 = label2[:,idx_train[1,:]] y_test2 = label2[:,idx_test[1,:]] y_test1_ori = y_test1", "= f['images'] x = np.asarray(x); x = np.transpose(x, [3,2,1,0]) # matlab ordering to", "6 epochs = 1000 img_rows, img_cols = 64, 64 clipvalue = 20 noise_dim", "Dropout(dropout)(h) # h = MaxPooling2D(pool_size=(2, 2))(h) h = LeakyReLU(0.2)(h) h = Conv2D(units /", "1000 img_rows, img_cols = 64, 64 clipvalue = 20 noise_dim = 10 c_dim", "anticipated applications#### input_img = Input((img_rows,img_cols,3)) z_dim = 128 units = 256 ee =", "impanted_img[40:55,18:47,:] = img_rec[40:55,18:47,:] img = np.uint8(impanted_img*127.5+127.5) image = Image.fromarray(img, 'RGB') image.save('test_replaced_pp1'+'.tif') #### Generate", "import Input,merge,Lambda from keras.layers.core import Reshape,Dense,Dropout,Activation,Flatten from keras.layers.advanced_activations import LeakyReLU from keras.layers.convolutional import", "args epsilon = K.random_normal(shape=(K.shape(z_mean)[0], z_dim), mean=0., stddev=epsilon_std) return z_mean + K.exp((z_log_var) / 2)", "k), strides = (2,2), border_mode='same')(h) h = BatchNormalization(momentum=0.8)(h) h = Dropout(dropout)(h) # h", "= num_pp n_dim = 10 z_dim = 128 date = 2018 # print", "h = Dense(latent_dim, name=\"encoder_mu\")(h) mean = Dense(z_dim, name=\"encoder_mean\")(h) logvar = Dense(z_dim, name=\"encoder_sigma\", activation", "= Dropout(dropout)(h) h = LeakyReLU(0.2)(h) # h = AveragePooling2D((6,6))(h) h = Flatten()(h) #", "h = MaxPooling2D(pool_size=(2, 2))(h) h = LeakyReLU(0.2)(h) h = Conv2D(units/4, (k, k), strides", "distribution # mean_train_sup = np.zeros((1,128)) var_train_sup = np.ones((1,128)) for i in xrange(0,num_pp): for", "'tanh')(h) # 8*6*64 return Model([x,auxiliary_c], h, name=\"Decoder\") # #### reload the trained weights", "variance of the prior distribution # mean_train_sup = np.zeros((1,128)) var_train_sup = np.ones((1,128)) for", "z_dim = 128 date = 2018 # print ('Loading data...') f = h5py.File('FERG_64_64_color.mat')", "BatchNormalization(momentum=0.8)(h) h = Conv2DTranspose(3, (k,k), strides = (2,2), padding = 'same', activation =", "1.0, num=1000) for ii in xrange(0,1000): c = np.ones((1,))*0 c = keras.utils.to_categorical(c, num_pp)", "decoder.predict([z, c]) img = np.squeeze(img) img = np.uint8(img*127.5+127.5) image = Image.fromarray(img, 'RGB') image.save('synthesis_no_input_'+'pp_'+str(i)+'.tif')", "k = 5 x = Input(input_shape) h = Conv2D(units/8 , (k, k), strides", "label2[:,idx_test[1,:]] y_test1_ori = y_test1 y_test2_ori = y_test2 x_train = (x_train- 127.5)/127.5 x_test =", "Dense(z_dim, name=\"encoder_mean\")(h) logvar = Dense(z_dim, name=\"encoder_sigma\", activation = 'sigmoid')(h) # meansigma = Model(x,", "): epsilon = np.random.normal(loc=0., scale=epsilon_std, size=(z_mean.shape[0], z_dim), ) return z_mean + np.exp(z_log_var /", "Convolution2D, MaxPooling2D, ZeroPadding2D, UpSampling2D,AveragePooling2D, Conv2DTranspose from keras.layers.normalization import * from keras.optimizers import *", "z_log_var = args epsilon = K.random_normal(shape=(K.shape(z_mean)[0], z_dim), mean=0., stddev=epsilon_std) return z_mean + K.exp((z_log_var)", "2)) img1 =np.squeeze(x_ori[idx1,:,:,:]) img1 = np.uint8(img1*127.5+127.5) image = Image.fromarray(img1, 'RGB') image.save('ori_1.tif') img2 =", "= np.squeeze(x_ori[idx1, :, :, :]) img2 = np.squeeze(x_ori[idx2, :, :, :]) z_1, mean_var_imp", "= 1.0 def KL_loss(y_true, y_pred): z_mean = y_pred[:, 0:z_dim] z_log_var = y_pred[:, z_dim:2", "= 0.0003,decay = 1e-6) epsilon_std = 1.0 def KL_loss(y_true, y_pred): z_mean = y_pred[:,", "= 'binary_crossentropy',optimizer = opt) decoder.summary() ##### expression morphing #####x for xx in xrange(0,1):", "= np.squeeze(x_ori[idx2, :, :, :]) z_1, mean_var_imp = encoder.predict(np.expand_dims(img1, axis=0)) z_2, mean_var_imp =", "= Input(input_shape) h = Conv2D(units/8 , (k, k), strides = (2,2), border_mode='same')(x) h", "keras.layers.concatenate([mean,logvar]) return Model(x,[z, h2], name = 'Encoder') def model_decoder(z_dim, c_dim): k = 5", "Reshape((4, 4, 128))(h) # h = LeakyReLU(0.2)(h) h = Conv2DTranspose(units, (k,k), strides =", "from keras.layers.advanced_activations import LeakyReLU from keras.layers.convolutional import Convolution2D, MaxPooling2D, ZeroPadding2D, UpSampling2D,AveragePooling2D, Conv2DTranspose from", "= LeakyReLU(0.2)(h) h = Conv2D(units , (k, k), strides = (2,2), border_mode='same')(h) h", "c_dim = num_pp n_dim = 10 z_dim = 128 date = 2018 #", "y_test2_ori = y_test2 x_train = (x_train- 127.5)/127.5 x_test = (x_test- 127.5)/127.5 x_train =", "import Conv2D, MaxPooling2D, GlobalAveragePooling2D import h5py import numpy as np from keras.layers import", "= BatchNormalization(momentum=0.8)(h) h = Conv2DTranspose(3, (k,k), strides = (2,2), padding = 'same', activation", "import numpy as np from keras.layers import Input,merge,Lambda from keras.layers.core import Reshape,Dense,Dropout,Activation,Flatten from", "Input(shape=(c_dim,), name='aux_input_c') # auxiliary_z = Input(shape=(n_dim,), name='aux_input_z') h = keras.layers.concatenate([x, auxiliary_c]) h =", "= BatchNormalization(momentum=0.8)(h) h = Dropout(dropout)(h) # h = MaxPooling2D(pool_size=(2, 2))(h) h = LeakyReLU(0.2)(h)", "z_log_var ): epsilon = np.random.normal(loc=0., scale=epsilon_std, size=(z_mean.shape[0], z_dim), ) return z_mean + np.exp(z_log_var", "keras.optimizers import * from keras import initializers import matplotlib.pyplot as plt import cPickle,", "image.save('ori_2.tif') arr = np.linspace(0.0, 1.0, num=1000) for ii in xrange(0,1000): c = np.ones((1,))*0", "c_dim=c_dim) decoder.load_weights('trained_weight_2.h5') decoder.compile(loss = 'binary_crossentropy',optimizer = opt) decoder.summary() ##### expression morphing #####x for", "y_train2.shape) print('label 2 test', y_test2.shape) # x_ori = (x - 127.5)/127.5 opt =", ", units=units, dropout=0.3) encoder = model_encoder(z_dim = z_dim, input_shape =(img_rows, img_cols, 3) ,", "x_train.astype('float16') x_test = x_test.astype('float16') y_train1 = keras.utils.to_categorical(y_train1, num_pp) y_test1 = keras.utils.to_categorical(y_test1, num_pp) y_train2", "loc = 'bottom' for pp in xrange(0,1): for xx in xrange(0,8): idx =", "h = Conv2D(units/8 , (k, k), strides = (2,2), border_mode='same')(x) h = BatchNormalization(momentum=0.8)(h)", "keras.utils.to_categorical(y_test1, num_pp) y_train2 = keras.utils.to_categorical(y_train2, num_ep) y_test2 = keras.utils.to_categorical(y_test2, num_ep) ############################### print('x_train shape:',", "127.5)/127.5 x_test = (x_test- 127.5)/127.5 x_train = x_train.astype('float16') x_test = x_test.astype('float16') y_train1 =", "epsilon = np.random.normal(loc=0., scale=epsilon_std, size=(z_mean.shape[0], z_dim), ) return z_mean + np.exp(z_log_var / 2)", ":]) img2 = np.squeeze(x_ori[idx2, :, :, :]) z_1, mean_var_imp = encoder.predict(np.expand_dims(img1, axis=0)) z_2,", "GlobalAveragePooling2D import h5py import numpy as np from keras.layers import Input,merge,Lambda from keras.layers.core", "y_train1 = keras.utils.to_categorical(y_train1, num_pp) y_test1 = keras.utils.to_categorical(y_test1, num_pp) y_train2 = keras.utils.to_categorical(y_train2, num_ep) y_test2", "print(z.shape) c = np.ones(1,)*i c = keras.utils.to_categorical(c, num_pp) img = decoder.predict([z, c]) img", "/ 2) * epsilon # mean and variance of the prior distribution #", "# mean_train_sup = np.zeros((1,128)) var_train_sup = np.ones((1,128)) for i in xrange(0,num_pp): for xx", "image.save('original.tif') impanted_img = np.squeeze(x_ori[idx,:,:,:]) impanted_img[40:55,18:47,:] = 0 # mouth blocked print('impanted_img',impanted_img.shape) z_impanted,mean_var_imp =", "= Input(shape = (z_dim,)) auxiliary_c = Input(shape=(c_dim,), name='aux_input_c') # auxiliary_z = Input(shape=(n_dim,), name='aux_input_z')", "'RGB') image.save('test_rec_pp1'+'.tif') impanted_img[40:55,18:47,:] = img_rec[40:55,18:47,:] img = np.uint8(impanted_img*127.5+127.5) image = Image.fromarray(img, 'RGB') image.save('test_replaced_pp1'+'.tif')", "y_pred): z_mean = y_pred[:, 0:z_dim] z_log_var = y_pred[:, z_dim:2 * z_dim] kl_loss =", "meansigma = Model(x, [mean, logsigma],name='encoder') z = Lambda(sampling, output_shape=(z_dim,))([mean, logvar]) h2 = keras.layers.concatenate([mean,logvar])", "np.asarray(label2) label2 -= 1 label3 = f['set'] label3 = np.asarray(label3) FrameNum = f['fn']", "= Dropout(dropout)(h) h = BatchNormalization(momentum=0.8)(h) h = Conv2DTranspose(3, (k,k), strides = (2,2), padding", "return Model([x,auxiliary_c], h, name=\"Decoder\") # #### reload the trained weights to implement the", "encoder.load_weights('trained_weight_1.h5') encoder.compile(loss = 'binary_crossentropy',optimizer = opt) encoder.summary() decoder = model_decoder(z_dim = z_dim, c_dim=c_dim)", "Build the GAN architecture ################# def model_encoder(z_dim, input_shape, units=512, dropout=0.3): k = 5", "= (2,2), border_mode='same')(h) h = BatchNormalization(momentum=0.8)(h) h = Dropout(dropout)(h) h = LeakyReLU(0.2)(h) #", "image.save('test_rec_pp1'+'.tif') impanted_img[40:55,18:47,:] = img_rec[40:55,18:47,:] img = np.uint8(impanted_img*127.5+127.5) image = Image.fromarray(img, 'RGB') image.save('test_replaced_pp1'+'.tif') ####", "Input,merge,Lambda from keras.layers.core import Reshape,Dense,Dropout,Activation,Flatten from keras.layers.advanced_activations import LeakyReLU from keras.layers.convolutional import Convolution2D,", "xrange(0,1): for xx in xrange(0,8): idx = 123 input_img = np.squeeze(x_ori[idx,:,:,:]) img =", "arr = np.linspace(0.0, 1.0, num=1000) for ii in xrange(0,1000): c = np.ones((1,))*0 c", "= Image.fromarray(img, 'RGB') image.save('original.tif') impanted_img = np.squeeze(x_ori[idx,:,:,:]) impanted_img[40:55,18:47,:] = 0 # mouth blocked", "img1 = np.squeeze(x_ori[idx1, :, :, :]) img2 = np.squeeze(x_ori[idx2, :, :, :]) z_1,", "morphing #####x for xx in xrange(0,1): idx1 = 4300 idx2 = 7423 img1", "= 'same', activation = 'relu')(h) # 32*32*64 # h = Dropout(dropout)(h) h =", "GAN architecture ################# def model_encoder(z_dim, input_shape, units=512, dropout=0.3): k = 5 x =", "################# def model_encoder(z_dim, input_shape, units=512, dropout=0.3): k = 5 x = Input(input_shape) h", "z_mean, z_log_var ): epsilon = np.random.normal(loc=0., scale=epsilon_std, size=(z_mean.shape[0], z_dim), ) return z_mean +", "#####x for xx in xrange(0,1): idx1 = 4300 idx2 = 7423 img1 =", "128))(h) # h = LeakyReLU(0.2)(h) h = Conv2DTranspose(units, (k,k), strides = (2,2), padding", "np.uint8(img1*127.5+127.5) image = Image.fromarray(img1, 'RGB') image.save('ori_1.tif') img2 = np.squeeze(x_ori[idx2,:,:,:]) img2 = np.uint8(img2*127.5+127.5) #", "print('idx_test shape',idx_test.shape) x_train = x[idx_train[1,:],:,:,:] x_test = x[idx_test[1,:],:,:,:] y_train1 = label1[:,idx_train[1,:]] y_test1 =", "loading....') f = f['imdb'] label1 = f['id'] label1 = np.asarray(label1) label1 -= 1", "8*6*64 # h = Dropout(dropout)(h) h = BatchNormalization(momentum=0.8)(h) h = Conv2DTranspose(3, (k,k), strides", "= 1e-6) dopt = RMSprop(lr = 0.0003,decay = 1e-6) epsilon_std = 1.0 def", "image.save('test_replaced_pp1'+'.tif') #### Generate images without input image ### def sampling_np( z_mean, z_log_var ):", "z_mean + np.exp(z_log_var / 2) * epsilon # mean and variance of the", "noise_dim = 10 c_dim = num_pp n_dim = 10 z_dim = 128 date", "= f['set'] label3 = np.asarray(label3) FrameNum = f['fn'] FrameNum = np.asarray(FrameNum) x =", "label2[:,idx_train[1,:]] y_test2 = label2[:,idx_test[1,:]] y_test1_ori = y_test1 y_test2_ori = y_test2 x_train = (x_train-", "0)) idx_test = np.asarray(np.where(label3 == 1)) print('idx_test shape',idx_test.shape) x_train = x[idx_train[1,:],:,:,:] x_test =", "import LeakyReLU from keras.layers.convolutional import Convolution2D, MaxPooling2D, ZeroPadding2D, UpSampling2D,AveragePooling2D, Conv2DTranspose from keras.layers.normalization import", "np.asarray(np.where(label3 == 1)) print('idx_test shape',idx_test.shape) x_train = x[idx_train[1,:],:,:,:] x_test = x[idx_test[1,:],:,:,:] y_train1 =", "############################### print('x_train shape:', x_train.shape) print('x_test shape:', x_test.shape) print('label 1 train', y_train1.shape) print('label 1", "xrange(0,1000): c = np.ones((1,))*0 c = keras.utils.to_categorical(c, num_pp) z_interp = z_1*(arr[ii])+z_2*(1.0-arr[ii]) z_interp =", "in xrange(0,100): z = sampling_np(mean_train_sup, var_train_sup) print(z.shape) c = np.ones(1,)*i c = keras.utils.to_categorical(c,", "= 256 num_ep = 7 num_pp = 6 epochs = 1000 img_rows, img_cols", "= Dropout(dropout)(h) # h = MaxPooling2D(pool_size=(2, 2))(h) h = LeakyReLU(0.2)(h) h = Conv2D(units", "= np.transpose(x, [3,2,1,0]) # matlab ordering to python ordering print('x shape:', x.shape) idx_train", "0 # mouth blocked print('impanted_img',impanted_img.shape) z_impanted,mean_var_imp = encoder.predict(np.expand_dims(impanted_img,axis =0)) c = np.ones((1,))*1 c", "('Loading data...') f = h5py.File('FERG_64_64_color.mat') print ('Finished loading....') f = f['imdb'] label1 =", "np.uint8(img_rec*127.5+127.5) image = Image.fromarray(img, 'RGB') image.save('test_rec_pp1'+'.tif') impanted_img[40:55,18:47,:] = img_rec[40:55,18:47,:] img = np.uint8(impanted_img*127.5+127.5) image", "256 ee = 200 auxiliary_c = Input(shape=(c_dim,), name='aux_input_c') auxiliary_z = Input(shape=(n_dim,), name='aux_input_z') #", "import Reshape,Dense,Dropout,Activation,Flatten from keras.layers.advanced_activations import LeakyReLU from keras.layers.convolutional import Convolution2D, MaxPooling2D, ZeroPadding2D, UpSampling2D,AveragePooling2D,", "K.square(z_mean) - K.exp(z_log_var), axis=-1) return K.mean(kl_loss) def sampling(args): z_mean, z_log_var = args epsilon", "'same', activation = 'tanh')(h) # 8*6*64 return Model([x,auxiliary_c], h, name=\"Decoder\") # #### reload", "= y_pred[:, z_dim:2 * z_dim] kl_loss = - 0.5 * K.sum(1 + z_log_var", "name = 'Encoder') def model_decoder(z_dim, c_dim): k = 5 x = Input(shape =", "= np.ones(1,)*i c = keras.utils.to_categorical(c, num_pp) img = decoder.predict([z, c]) img = np.squeeze(img)", "logvar = Dense(z_dim, name=\"encoder_sigma\", activation = 'sigmoid')(h) # meansigma = Model(x, [mean, logsigma],name='encoder')", "xx in xrange(0,100): z = sampling_np(mean_train_sup, var_train_sup) print(z.shape) c = np.ones(1,)*i c =", "# auxiliary_z = Input(shape=(n_dim,), name='aux_input_z') h = keras.layers.concatenate([x, auxiliary_c]) h = Dense(4 *", "= Model(x, [mean, logsigma],name='encoder') z = Lambda(sampling, output_shape=(z_dim,))([mean, logvar]) h2 = keras.layers.concatenate([mean,logvar]) return", "model_decoder(z_dim = z_dim, c_dim=c_dim) decoder.load_weights('trained_weight_2.h5') decoder.compile(loss = 'binary_crossentropy',optimizer = opt) decoder.summary() ##### expression", "idx_train = np.asarray(np.where(label3 == 0)) idx_test = np.asarray(np.where(label3 == 1)) print('idx_test shape',idx_test.shape) x_train", "= LeakyReLU(0.2)(h) # h = AveragePooling2D((6,6))(h) h = Flatten()(h) # h = Dense(latent_dim,", "image.save('interp_'+str(ii)+'.tif') # ############### Image impanting ############## loc = 'bottom' for pp in xrange(0,1):", "num_pp) img = decoder.predict([z, c]) img = np.squeeze(img) img = np.uint8(img*127.5+127.5) image =", "np.ones(1,)*i c = keras.utils.to_categorical(c, num_pp) img = decoder.predict([z, c]) img = np.squeeze(img) img", "= 'relu')(h) h = Reshape((4, 4, 128))(h) # h = LeakyReLU(0.2)(h) h =", "= np.asarray(np.where(label3 == 0)) idx_test = np.asarray(np.where(label3 == 1)) print('idx_test shape',idx_test.shape) x_train =", "= model_generator(z_dim = z_dim, input_shape =(img_rows, img_cols, 1) , units=units, dropout=0.3) encoder =", "img_cols, 1) , units=units, dropout=0.3) encoder = model_encoder(z_dim = z_dim, input_shape =(img_rows, img_cols,", "= model_encoder(z_dim = z_dim, input_shape =(img_rows, img_cols, 3) , units=units, dropout=0.3) encoder.load_weights('trained_weight_1.h5') encoder.compile(loss", "the dataset (use FERG for example) ## batch_size = 256 num_ep = 7", "image = Image.fromarray(img1, 'RGB') image.save('ori_1.tif') img2 = np.squeeze(x_ori[idx2,:,:,:]) img2 = np.uint8(img2*127.5+127.5) # plt.imshow(img2)", "123 input_img = np.squeeze(x_ori[idx,:,:,:]) img = np.uint8(input_img*127.5+127.5) image = Image.fromarray(img, 'RGB') image.save('original.tif') impanted_img", "LeakyReLU(0.2)(h) h = Conv2D(units / 2, (k, k), strides = (2,2), border_mode='same')(h) h", "matlab ordering to python ordering print('x shape:', x.shape) idx_train = np.asarray(np.where(label3 == 0))", "(k, k), strides = (2,2), border_mode='same')(h) h = BatchNormalization(momentum=0.8)(h) h = Dropout(dropout)(h) #", "= Input(shape=(n_dim,), name='aux_input_z') h = keras.layers.concatenate([x, auxiliary_c]) h = Dense(4 * 4 *", "= np.uint8(input_img*127.5+127.5) image = Image.fromarray(img, 'RGB') image.save('original.tif') impanted_img = np.squeeze(x_ori[idx,:,:,:]) impanted_img[40:55,18:47,:] = 0", "ii in xrange(0,1000): c = np.ones((1,))*0 c = keras.utils.to_categorical(c, num_pp) z_interp = z_1*(arr[ii])+z_2*(1.0-arr[ii])", "h = Dropout(dropout)(h) h = BatchNormalization(momentum=0.8)(h) h = Conv2DTranspose(3, (k,k), strides = (2,2),", "* epsilon ############ Build the GAN architecture ################# def model_encoder(z_dim, input_shape, units=512, dropout=0.3):", "- 127.5)/127.5 opt = RMSprop(lr = 0.0003,decay = 1e-6) dopt = RMSprop(lr =", "Model(x,[z, h2], name = 'Encoder') def model_decoder(z_dim, c_dim): k = 5 x =", "mean_var_imp = encoder.predict(np.expand_dims(img1, axis=0)) z_2, mean_var_imp = encoder.predict(np.expand_dims(img2, axis=0)) plt.figure(figsize=(2, 2)) img1 =np.squeeze(x_ori[idx1,:,:,:])", "Image.fromarray(img, 'RGB') image.save('test_rec_pp1'+'.tif') impanted_img[40:55,18:47,:] = img_rec[40:55,18:47,:] img = np.uint8(impanted_img*127.5+127.5) image = Image.fromarray(img, 'RGB')", "h = BatchNormalization(momentum=0.8)(h) h = Dropout(dropout)(h) # h = MaxPooling2D(pool_size=(2, 2))(h) h =", "y_test2 x_train = (x_train- 127.5)/127.5 x_test = (x_test- 127.5)/127.5 x_train = x_train.astype('float16') x_test", "= z_dim, c_dim=c_dim) decoder.load_weights('trained_weight_2.h5') decoder.compile(loss = 'binary_crossentropy',optimizer = opt) decoder.summary() ##### expression morphing", "= - 0.5 * K.sum(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1) return", "implement the anticipated applications#### input_img = Input((img_rows,img_cols,3)) z_dim = 128 units = 256", "= BatchNormalization(momentum=0.8)(h) h = Dropout(dropout)(h) h = LeakyReLU(0.2)(h) # h = AveragePooling2D((6,6))(h) h", "KL_loss(y_true, y_pred): z_mean = y_pred[:, 0:z_dim] z_log_var = y_pred[:, z_dim:2 * z_dim] kl_loss", "z_1*(arr[ii])+z_2*(1.0-arr[ii]) z_interp = np.reshape(z_interp,(1,z_dim)) img = decoder.predict([z_interp,c]) img = np.squeeze(img) img = np.uint8(img*127.5+127.5)", "Model(x, [mean, logsigma],name='encoder') z = Lambda(sampling, output_shape=(z_dim,))([mean, logvar]) h2 = keras.layers.concatenate([mean,logvar]) return Model(x,[z,", "c_dim): k = 5 x = Input(shape = (z_dim,)) auxiliary_c = Input(shape=(c_dim,), name='aux_input_c')", "var_train_sup) print(z.shape) c = np.ones(1,)*i c = keras.utils.to_categorical(c, num_pp) img = decoder.predict([z, c])", "np.asarray(FrameNum) x = f['images'] x = np.asarray(x); x = np.transpose(x, [3,2,1,0]) # matlab", "os.environ[\"KERAS_BACKEND\"] = \"tensorflow\" from PIL import Image from keras.layers import Conv2D, MaxPooling2D, GlobalAveragePooling2D", "= Image.fromarray(img, 'RGB') image.save('interp_'+str(ii)+'.tif') # ############### Image impanting ############## loc = 'bottom' for", "Conv2D(units/8 , (k, k), strides = (2,2), border_mode='same')(x) h = BatchNormalization(momentum=0.8)(h) h =", "decoder.summary() ##### expression morphing #####x for xx in xrange(0,1): idx1 = 4300 idx2", "import * from keras import initializers import matplotlib.pyplot as plt import cPickle, random,", "z_dim] kl_loss = - 0.5 * K.sum(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var),", "# 64*64*64 # h = Dropout(dropout)(h) h = BatchNormalization(momentum=0.8)(h) # h = LeakyReLU(0.2)(h)", "[3,2,1,0]) # matlab ordering to python ordering print('x shape:', x.shape) idx_train = np.asarray(np.where(label3", "k), strides = (2,2), border_mode='same')(x) h = BatchNormalization(momentum=0.8)(h) h = Dropout(dropout)(h) # h", "h = LeakyReLU(0.2)(h) h = Conv2D(units , (k, k), strides = (2,2), border_mode='same')(h)", "var_train_sup = np.ones((1,128)) for i in xrange(0,num_pp): for xx in xrange(0,100): z =", "import Model from functools import partial normal = partial(initializers.normal, scale=.02) ## load and", "keras.layers.advanced_activations import LeakyReLU from keras.layers.convolutional import Convolution2D, MaxPooling2D, ZeroPadding2D, UpSampling2D,AveragePooling2D, Conv2DTranspose from keras.layers.normalization", "= keras.utils.to_categorical(y_train1, num_pp) y_test1 = keras.utils.to_categorical(y_test1, num_pp) y_train2 = keras.utils.to_categorical(y_train2, num_ep) y_test2 =", "x_test = (x_test- 127.5)/127.5 x_train = x_train.astype('float16') x_test = x_test.astype('float16') y_train1 = keras.utils.to_categorical(y_train1,", "= keras.utils.to_categorical(c, num_pp) img = decoder.predict([z, c]) img = np.squeeze(img) img = np.uint8(img*127.5+127.5)", "functools import partial normal = partial(initializers.normal, scale=.02) ## load and preprocess the dataset", "= Reshape((4, 4, 128))(h) # h = LeakyReLU(0.2)(h) h = Conv2DTranspose(units, (k,k), strides", "= np.ones((1,))*0 c = keras.utils.to_categorical(c, num_pp) z_interp = z_1*(arr[ii])+z_2*(1.0-arr[ii]) z_interp = np.reshape(z_interp,(1,z_dim)) img", "prior distribution # mean_train_sup = np.zeros((1,128)) var_train_sup = np.ones((1,128)) for i in xrange(0,num_pp):", "FERG for example) ## batch_size = 256 num_ep = 7 num_pp = 6", "7 num_pp = 6 epochs = 1000 img_rows, img_cols = 64, 64 clipvalue", "z_dim, input_shape =(img_rows, img_cols, 3) , units=units, dropout=0.3) encoder.load_weights('trained_weight_1.h5') encoder.compile(loss = 'binary_crossentropy',optimizer =", "= AveragePooling2D((6,6))(h) h = Flatten()(h) # h = Dense(latent_dim, name=\"encoder_mu\")(h) mean = Dense(z_dim,", "= np.uint8(img1*127.5+127.5) image = Image.fromarray(img1, 'RGB') image.save('ori_1.tif') img2 = np.squeeze(x_ori[idx2,:,:,:]) img2 = np.uint8(img2*127.5+127.5)", "'binary_crossentropy',optimizer = opt) encoder.summary() decoder = model_decoder(z_dim = z_dim, c_dim=c_dim) decoder.load_weights('trained_weight_2.h5') decoder.compile(loss =", "z_dim), ) return z_mean + np.exp(z_log_var / 2) * epsilon # mean and", "MaxPooling2D, ZeroPadding2D, UpSampling2D,AveragePooling2D, Conv2DTranspose from keras.layers.normalization import * from keras.optimizers import * from", "k), strides = (2,2), border_mode='same')(h) h = BatchNormalization(momentum=0.8)(h) h = Dropout(dropout)(h) h =", "strides = (2,2), border_mode='same')(x) h = BatchNormalization(momentum=0.8)(h) h = Dropout(dropout)(h) # h =", "f['images'] x = np.asarray(x); x = np.transpose(x, [3,2,1,0]) # matlab ordering to python", "2 train', y_train2.shape) print('label 2 test', y_test2.shape) # x_ori = (x - 127.5)/127.5", "(use FERG for example) ## batch_size = 256 num_ep = 7 num_pp =", "import * from keras.optimizers import * from keras import initializers import matplotlib.pyplot as", "= Conv2DTranspose(units/2, (k,k), strides = (2,2), padding = 'same', activation = 'relu')(h) #", "img = np.squeeze(img) img = np.uint8(img*127.5+127.5) image = Image.fromarray(img, 'RGB') image.save('interp_'+str(ii)+'.tif') # ###############", "= img_rec[40:55,18:47,:] img = np.uint8(impanted_img*127.5+127.5) image = Image.fromarray(img, 'RGB') image.save('test_replaced_pp1'+'.tif') #### Generate images", "= np.uint8(img*127.5+127.5) image = Image.fromarray(img, 'RGB') image.save('interp_'+str(ii)+'.tif') # ############### Image impanting ############## loc", "z_mean + K.exp((z_log_var) / 2) * epsilon ############ Build the GAN architecture #################", "= opt) encoder.summary() decoder = model_decoder(z_dim = z_dim, c_dim=c_dim) decoder.load_weights('trained_weight_2.h5') decoder.compile(loss = 'binary_crossentropy',optimizer", "BatchNormalization(momentum=0.8)(h) h = Dropout(dropout)(h) h = LeakyReLU(0.2)(h) # h = AveragePooling2D((6,6))(h) h =", "epsilon ############ Build the GAN architecture ################# def model_encoder(z_dim, input_shape, units=512, dropout=0.3): k", "= partial(initializers.normal, scale=.02) ## load and preprocess the dataset (use FERG for example)", "# 8*6*64 # h = Dropout(dropout)(h) h = BatchNormalization(momentum=0.8)(h) h = Conv2DTranspose(3, (k,k),", "n_dim = 10 z_dim = 128 date = 2018 # print ('Loading data...')", "'bottom' for pp in xrange(0,1): for xx in xrange(0,8): idx = 123 input_img", "np.uint8(impanted_img*127.5+127.5) image = Image.fromarray(img, 'RGB') image.save('test_replaced_pp1'+'.tif') #### Generate images without input image ###", "keras.layers import Conv2D, MaxPooling2D, GlobalAveragePooling2D import h5py import numpy as np from keras.layers", "1e-6) dopt = RMSprop(lr = 0.0003,decay = 1e-6) epsilon_std = 1.0 def KL_loss(y_true,", "(k,k), strides = (2,2), padding = 'same', activation = 'relu')(h) # 32*32*64 #", "Image impanting ############## loc = 'bottom' for pp in xrange(0,1): for xx in", "x_test.shape) print('label 1 train', y_train1.shape) print('label 1 test', y_test1.shape) print('label 2 train', y_train2.shape)", "ZeroPadding2D, UpSampling2D,AveragePooling2D, Conv2DTranspose from keras.layers.normalization import * from keras.optimizers import * from keras", "2))(h) h = LeakyReLU(0.2)(h) h = Conv2D(units , (k, k), strides = (2,2),", "h = Dropout(dropout)(h) h = BatchNormalization(momentum=0.8)(h) # h = LeakyReLU(0.2)(h) h = Conv2DTranspose(units/2,", "= (2,2), padding = 'same', activation = 'tanh')(h) # 8*6*64 return Model([x,auxiliary_c], h,", "= Conv2D(units , (k, k), strides = (2,2), border_mode='same')(h) h = BatchNormalization(momentum=0.8)(h) h", "of the prior distribution # mean_train_sup = np.zeros((1,128)) var_train_sup = np.ones((1,128)) for i", "= encoder.predict(np.expand_dims(impanted_img,axis =0)) c = np.ones((1,))*1 c = keras.utils.to_categorical(c, num_pp) print('c',c) img_rec =", "border_mode='same')(x) h = BatchNormalization(momentum=0.8)(h) h = Dropout(dropout)(h) # h = MaxPooling2D(pool_size=(2, 2))(h) h", "= RMSprop(lr = 0.0003,decay = 1e-6) epsilon_std = 1.0 def KL_loss(y_true, y_pred): z_mean", "shape',idx_test.shape) x_train = x[idx_train[1,:],:,:,:] x_test = x[idx_test[1,:],:,:,:] y_train1 = label1[:,idx_train[1,:]] y_test1 = label1[:,idx_test[1,:]]", "# matlab ordering to python ordering print('x shape:', x.shape) idx_train = np.asarray(np.where(label3 ==", "image = Image.fromarray(img, 'RGB') image.save('original.tif') impanted_img = np.squeeze(x_ori[idx,:,:,:]) impanted_img[40:55,18:47,:] = 0 # mouth", "mouth blocked print('impanted_img',impanted_img.shape) z_impanted,mean_var_imp = encoder.predict(np.expand_dims(impanted_img,axis =0)) c = np.ones((1,))*1 c = keras.utils.to_categorical(c,", "- K.square(z_mean) - K.exp(z_log_var), axis=-1) return K.mean(kl_loss) def sampling(args): z_mean, z_log_var = args", "from keras.layers.normalization import * from keras.optimizers import * from keras import initializers import", "Dense(latent_dim, name=\"encoder_mu\")(h) mean = Dense(z_dim, name=\"encoder_mean\")(h) logvar = Dense(z_dim, name=\"encoder_sigma\", activation = 'sigmoid')(h)", "= Input(shape=(n_dim,), name='aux_input_z') # generator = model_generator(z_dim = z_dim, input_shape =(img_rows, img_cols, 1)", "# ############### Image impanting ############## loc = 'bottom' for pp in xrange(0,1): for", "input image ### def sampling_np( z_mean, z_log_var ): epsilon = np.random.normal(loc=0., scale=epsilon_std, size=(z_mean.shape[0],", "Image.fromarray(img, 'RGB') image.save('test_replaced_pp1'+'.tif') #### Generate images without input image ### def sampling_np( z_mean,", "border_mode='same')(h) h = BatchNormalization(momentum=0.8)(h) h = Dropout(dropout)(h) h = LeakyReLU(0.2)(h) # h =", "= 256 ee = 200 auxiliary_c = Input(shape=(c_dim,), name='aux_input_c') auxiliary_z = Input(shape=(n_dim,), name='aux_input_z')", "(2,2), padding = 'same', activation = 'relu')(h) # 8*6*64 # h = Dropout(dropout)(h)", "auxiliary_z = Input(shape=(n_dim,), name='aux_input_z') # generator = model_generator(z_dim = z_dim, input_shape =(img_rows, img_cols,", "=(img_rows, img_cols, 3) , units=units, dropout=0.3) encoder.load_weights('trained_weight_1.h5') encoder.compile(loss = 'binary_crossentropy',optimizer = opt) encoder.summary()", "# plt.imshow(img2) image = Image.fromarray(img2, 'RGB') image.save('ori_2.tif') arr = np.linspace(0.0, 1.0, num=1000) for", "# generator = model_generator(z_dim = z_dim, input_shape =(img_rows, img_cols, 1) , units=units, dropout=0.3)", "(2,2), padding = 'same', activation = 'relu')(h) # 64*64*64 # h = Dropout(dropout)(h)", "(k,k), strides = (2,2), padding = 'same', activation = 'tanh')(h) # 8*6*64 return", "border_mode='same')(h) h = BatchNormalization(momentum=0.8)(h) h = Dropout(dropout)(h) # h = MaxPooling2D(pool_size=(2, 2))(h) h", "= 'binary_crossentropy',optimizer = opt) encoder.summary() decoder = model_decoder(z_dim = z_dim, c_dim=c_dim) decoder.load_weights('trained_weight_2.h5') decoder.compile(loss", "= RMSprop(lr = 0.0003,decay = 1e-6) dopt = RMSprop(lr = 0.0003,decay = 1e-6)", "image = Image.fromarray(img, 'RGB') image.save('test_replaced_pp1'+'.tif') #### Generate images without input image ### def", "z = Lambda(sampling, output_shape=(z_dim,))([mean, logvar]) h2 = keras.layers.concatenate([mean,logvar]) return Model(x,[z, h2], name =", "impanted_img = np.squeeze(x_ori[idx,:,:,:]) impanted_img[40:55,18:47,:] = 0 # mouth blocked print('impanted_img',impanted_img.shape) z_impanted,mean_var_imp = encoder.predict(np.expand_dims(impanted_img,axis", "= np.ones((1,))*1 c = keras.utils.to_categorical(c, num_pp) print('c',c) img_rec = decoder.predict([z_impanted,c]) img_rec = np.squeeze(img_rec)", "print('label 2 test', y_test2.shape) # x_ori = (x - 127.5)/127.5 opt = RMSprop(lr", "xrange(0,100): z = sampling_np(mean_train_sup, var_train_sup) print(z.shape) c = np.ones(1,)*i c = keras.utils.to_categorical(c, num_pp)", "Input(input_shape) h = Conv2D(units/8 , (k, k), strides = (2,2), border_mode='same')(x) h =", "'same', activation = 'relu')(h) # 8*6*64 # h = Dropout(dropout)(h) h = BatchNormalization(momentum=0.8)(h)", "in xrange(0,1): for xx in xrange(0,8): idx = 123 input_img = np.squeeze(x_ori[idx,:,:,:]) img", "keras.layers import Input,merge,Lambda from keras.layers.core import Reshape,Dense,Dropout,Activation,Flatten from keras.layers.advanced_activations import LeakyReLU from keras.layers.convolutional", ":, :, :]) img2 = np.squeeze(x_ori[idx2, :, :, :]) z_1, mean_var_imp = encoder.predict(np.expand_dims(img1,", "strides = (2,2), border_mode='same')(h) h = BatchNormalization(momentum=0.8)(h) h = Dropout(dropout)(h) # h =", "# h = Dense(latent_dim, name=\"encoder_mu\")(h) mean = Dense(z_dim, name=\"encoder_mean\")(h) logvar = Dense(z_dim, name=\"encoder_sigma\",", "= 'relu')(h) # 64*64*64 # h = Dropout(dropout)(h) h = BatchNormalization(momentum=0.8)(h) # h", "f = f['imdb'] label1 = f['id'] label1 = np.asarray(label1) label1 -= 1 label2", "MaxPooling2D, GlobalAveragePooling2D import h5py import numpy as np from keras.layers import Input,merge,Lambda from", "output_shape=(z_dim,))([mean, logvar]) h2 = keras.layers.concatenate([mean,logvar]) return Model(x,[z, h2], name = 'Encoder') def model_decoder(z_dim,", "h = LeakyReLU(0.2)(h) h = Conv2DTranspose(units/2, (k,k), strides = (2,2), padding = 'same',", "= np.squeeze(x_ori[idx2,:,:,:]) img2 = np.uint8(img2*127.5+127.5) # plt.imshow(img2) image = Image.fromarray(img2, 'RGB') image.save('ori_2.tif') arr", "from keras.layers import Conv2D, MaxPooling2D, GlobalAveragePooling2D import h5py import numpy as np from", "c = np.ones(1,)*i c = keras.utils.to_categorical(c, num_pp) img = decoder.predict([z, c]) img =", "(x_train- 127.5)/127.5 x_test = (x_test- 127.5)/127.5 x_train = x_train.astype('float16') x_test = x_test.astype('float16') y_train1", "Conv2D(units , (k, k), strides = (2,2), border_mode='same')(h) h = BatchNormalization(momentum=0.8)(h) h =", "Conv2D, MaxPooling2D, GlobalAveragePooling2D import h5py import numpy as np from keras.layers import Input,merge,Lambda", "(2,2), border_mode='same')(x) h = BatchNormalization(momentum=0.8)(h) h = Dropout(dropout)(h) # h = MaxPooling2D(pool_size=(2, 2))(h)", "1 label2 = f['ep'] label2 = np.asarray(label2) label2 -= 1 label3 = f['set']", "strides = (2,2), padding = 'same', activation = 'relu')(h) # 64*64*64 # h", "plt import cPickle, random, sys, keras from keras.models import Model from functools import", "print ('Finished loading....') f = f['imdb'] label1 = f['id'] label1 = np.asarray(label1) label1", "= (z_dim,)) auxiliary_c = Input(shape=(c_dim,), name='aux_input_c') # auxiliary_z = Input(shape=(n_dim,), name='aux_input_z') h =", "name='aux_input_c') # auxiliary_z = Input(shape=(n_dim,), name='aux_input_z') h = keras.layers.concatenate([x, auxiliary_c]) h = Dense(4", "for i in xrange(0,num_pp): for xx in xrange(0,100): z = sampling_np(mean_train_sup, var_train_sup) print(z.shape)", "Model from functools import partial normal = partial(initializers.normal, scale=.02) ## load and preprocess", "BatchNormalization(momentum=0.8)(h) h = Dropout(dropout)(h) # h = MaxPooling2D(pool_size=(2, 2))(h) h = LeakyReLU(0.2)(h) h", "encoder.predict(np.expand_dims(img1, axis=0)) z_2, mean_var_imp = encoder.predict(np.expand_dims(img2, axis=0)) plt.figure(figsize=(2, 2)) img1 =np.squeeze(x_ori[idx1,:,:,:]) img1 =", "= np.squeeze(img_rec) img = np.uint8(impanted_img*127.5+127.5) image = Image.fromarray(img, 'RGB') image.save('test_blocked_pp1'+'.tif') img = np.uint8(img_rec*127.5+127.5)", "* from keras import initializers import matplotlib.pyplot as plt import cPickle, random, sys,", "0.0003,decay = 1e-6) epsilon_std = 1.0 def KL_loss(y_true, y_pred): z_mean = y_pred[:, 0:z_dim]", "= 6 epochs = 1000 img_rows, img_cols = 64, 64 clipvalue = 20", "= MaxPooling2D(pool_size=(2, 2))(h) h = LeakyReLU(0.2)(h) h = Conv2D(units , (k, k), strides", "y_pred[:, z_dim:2 * z_dim] kl_loss = - 0.5 * K.sum(1 + z_log_var -", "64 clipvalue = 20 noise_dim = 10 c_dim = num_pp n_dim = 10", ") return z_mean + np.exp(z_log_var / 2) * epsilon # mean and variance", "'relu')(h) # 64*64*64 # h = Dropout(dropout)(h) h = BatchNormalization(momentum=0.8)(h) # h =", "img = np.uint8(input_img*127.5+127.5) image = Image.fromarray(img, 'RGB') image.save('original.tif') impanted_img = np.squeeze(x_ori[idx,:,:,:]) impanted_img[40:55,18:47,:] =", "generator = model_generator(z_dim = z_dim, input_shape =(img_rows, img_cols, 1) , units=units, dropout=0.3) encoder", "impanting ############## loc = 'bottom' for pp in xrange(0,1): for xx in xrange(0,8):", "=(img_rows, img_cols, 1) , units=units, dropout=0.3) encoder = model_encoder(z_dim = z_dim, input_shape =(img_rows,", "FrameNum = np.asarray(FrameNum) x = f['images'] x = np.asarray(x); x = np.transpose(x, [3,2,1,0])", "= 'bottom' for pp in xrange(0,1): for xx in xrange(0,8): idx = 123", "= np.random.normal(loc=0., scale=epsilon_std, size=(z_mean.shape[0], z_dim), ) return z_mean + np.exp(z_log_var / 2) *", "auxiliary_c = Input(shape=(c_dim,), name='aux_input_c') auxiliary_z = Input(shape=(n_dim,), name='aux_input_z') # generator = model_generator(z_dim =", "architecture ################# def model_encoder(z_dim, input_shape, units=512, dropout=0.3): k = 5 x = Input(input_shape)", "k = 5 x = Input(shape = (z_dim,)) auxiliary_c = Input(shape=(c_dim,), name='aux_input_c') #", "= 'relu')(h) # 32*32*64 # h = Dropout(dropout)(h) h = BatchNormalization(momentum=0.8)(h) # h", "def model_decoder(z_dim, c_dim): k = 5 x = Input(shape = (z_dim,)) auxiliary_c =", "= 1e-6) epsilon_std = 1.0 def KL_loss(y_true, y_pred): z_mean = y_pred[:, 0:z_dim] z_log_var", "logvar]) h2 = keras.layers.concatenate([mean,logvar]) return Model(x,[z, h2], name = 'Encoder') def model_decoder(z_dim, c_dim):", "np.asarray(np.where(label3 == 0)) idx_test = np.asarray(np.where(label3 == 1)) print('idx_test shape',idx_test.shape) x_train = x[idx_train[1,:],:,:,:]", "keras.utils.to_categorical(y_train1, num_pp) y_test1 = keras.utils.to_categorical(y_test1, num_pp) y_train2 = keras.utils.to_categorical(y_train2, num_ep) y_test2 = keras.utils.to_categorical(y_test2,", "z_interp = z_1*(arr[ii])+z_2*(1.0-arr[ii]) z_interp = np.reshape(z_interp,(1,z_dim)) img = decoder.predict([z_interp,c]) img = np.squeeze(img) img", "the trained weights to implement the anticipated applications#### input_img = Input((img_rows,img_cols,3)) z_dim =", "os,random os.environ[\"KERAS_BACKEND\"] = \"tensorflow\" from PIL import Image from keras.layers import Conv2D, MaxPooling2D,", "= h5py.File('FERG_64_64_color.mat') print ('Finished loading....') f = f['imdb'] label1 = f['id'] label1 =", "= 128 units = 256 ee = 200 auxiliary_c = Input(shape=(c_dim,), name='aux_input_c') auxiliary_z", "'same', activation = 'relu')(h) # 32*32*64 # h = Dropout(dropout)(h) h = BatchNormalization(momentum=0.8)(h)", "example) ## batch_size = 256 num_ep = 7 num_pp = 6 epochs =", "LeakyReLU(0.2)(h) # h = AveragePooling2D((6,6))(h) h = Flatten()(h) # h = Dense(latent_dim, name=\"encoder_mu\")(h)", "num_pp) y_test1 = keras.utils.to_categorical(y_test1, num_pp) y_train2 = keras.utils.to_categorical(y_train2, num_ep) y_test2 = keras.utils.to_categorical(y_test2, num_ep)", "LeakyReLU(0.2)(h) h = Conv2D(units , (k, k), strides = (2,2), border_mode='same')(h) h =", "print ('Loading data...') f = h5py.File('FERG_64_64_color.mat') print ('Finished loading....') f = f['imdb'] label1", "activation = 'relu')(h) h = Reshape((4, 4, 128))(h) # h = LeakyReLU(0.2)(h) h", "print('x_train shape:', x_train.shape) print('x_test shape:', x_test.shape) print('label 1 train', y_train1.shape) print('label 1 test',", "units=512, dropout=0.3): k = 5 x = Input(input_shape) h = Conv2D(units/8 , (k,", "##### expression morphing #####x for xx in xrange(0,1): idx1 = 4300 idx2 =", "axis=0)) plt.figure(figsize=(2, 2)) img1 =np.squeeze(x_ori[idx1,:,:,:]) img1 = np.uint8(img1*127.5+127.5) image = Image.fromarray(img1, 'RGB') image.save('ori_1.tif')", "mean_var_imp = encoder.predict(np.expand_dims(img2, axis=0)) plt.figure(figsize=(2, 2)) img1 =np.squeeze(x_ori[idx1,:,:,:]) img1 = np.uint8(img1*127.5+127.5) image =", "(2,2), border_mode='same')(h) h = BatchNormalization(momentum=0.8)(h) h = Dropout(dropout)(h) h = LeakyReLU(0.2)(h) # h", "= np.uint8(impanted_img*127.5+127.5) image = Image.fromarray(img, 'RGB') image.save('test_replaced_pp1'+'.tif') #### Generate images without input image", "scale=epsilon_std, size=(z_mean.shape[0], z_dim), ) return z_mean + np.exp(z_log_var / 2) * epsilon #", "h = AveragePooling2D((6,6))(h) h = Flatten()(h) # h = Dense(latent_dim, name=\"encoder_mu\")(h) mean =", "FrameNum = f['fn'] FrameNum = np.asarray(FrameNum) x = f['images'] x = np.asarray(x); x", "dropout=0.3) encoder = model_encoder(z_dim = z_dim, input_shape =(img_rows, img_cols, 3) , units=units, dropout=0.3)", "label3 = f['set'] label3 = np.asarray(label3) FrameNum = f['fn'] FrameNum = np.asarray(FrameNum) x", "K.sum(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1) return K.mean(kl_loss) def sampling(args): z_mean,", "= Input(shape=(c_dim,), name='aux_input_c') # auxiliary_z = Input(shape=(n_dim,), name='aux_input_z') h = keras.layers.concatenate([x, auxiliary_c]) h", "= 200 auxiliary_c = Input(shape=(c_dim,), name='aux_input_c') auxiliary_z = Input(shape=(n_dim,), name='aux_input_z') # generator =", "0.0003,decay = 1e-6) dopt = RMSprop(lr = 0.0003,decay = 1e-6) epsilon_std = 1.0", "applications#### input_img = Input((img_rows,img_cols,3)) z_dim = 128 units = 256 ee = 200", "h2 = keras.layers.concatenate([mean,logvar]) return Model(x,[z, h2], name = 'Encoder') def model_decoder(z_dim, c_dim): k", "=np.squeeze(x_ori[idx1,:,:,:]) img1 = np.uint8(img1*127.5+127.5) image = Image.fromarray(img1, 'RGB') image.save('ori_1.tif') img2 = np.squeeze(x_ori[idx2,:,:,:]) img2", "and preprocess the dataset (use FERG for example) ## batch_size = 256 num_ep", "1.0 def KL_loss(y_true, y_pred): z_mean = y_pred[:, 0:z_dim] z_log_var = y_pred[:, z_dim:2 *", "label2 -= 1 label3 = f['set'] label3 = np.asarray(label3) FrameNum = f['fn'] FrameNum", "x = np.asarray(x); x = np.transpose(x, [3,2,1,0]) # matlab ordering to python ordering", "y_test1.shape) print('label 2 train', y_train2.shape) print('label 2 test', y_test2.shape) # x_ori = (x", "the GAN architecture ################# def model_encoder(z_dim, input_shape, units=512, dropout=0.3): k = 5 x", "print('x shape:', x.shape) idx_train = np.asarray(np.where(label3 == 0)) idx_test = np.asarray(np.where(label3 == 1))", "img = np.uint8(img_rec*127.5+127.5) image = Image.fromarray(img, 'RGB') image.save('test_rec_pp1'+'.tif') impanted_img[40:55,18:47,:] = img_rec[40:55,18:47,:] img =", "h = Conv2DTranspose(units, (k,k), strides = (2,2), padding = 'same', activation = 'relu')(h)", "model_decoder(z_dim, c_dim): k = 5 x = Input(shape = (z_dim,)) auxiliary_c = Input(shape=(c_dim,),", "# mouth blocked print('impanted_img',impanted_img.shape) z_impanted,mean_var_imp = encoder.predict(np.expand_dims(impanted_img,axis =0)) c = np.ones((1,))*1 c =", "in xrange(0,8): idx = 123 input_img = np.squeeze(x_ori[idx,:,:,:]) img = np.uint8(input_img*127.5+127.5) image =", "= \"tensorflow\" from PIL import Image from keras.layers import Conv2D, MaxPooling2D, GlobalAveragePooling2D import", "* K.sum(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1) return K.mean(kl_loss) def sampling(args):", "Dropout(dropout)(h) h = BatchNormalization(momentum=0.8)(h) # h = LeakyReLU(0.2)(h) h = Conv2DTranspose(units/2, (k,k), strides", "units=units, dropout=0.3) encoder = model_encoder(z_dim = z_dim, input_shape =(img_rows, img_cols, 3) , units=units,", "ordering print('x shape:', x.shape) idx_train = np.asarray(np.where(label3 == 0)) idx_test = np.asarray(np.where(label3 ==", "- 0.5 * K.sum(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1) return K.mean(kl_loss)", "= LeakyReLU(0.2)(h) h = Conv2D(units/4, (k, k), strides = (2,2), border_mode='same')(h) h =", "Dense(z_dim, name=\"encoder_sigma\", activation = 'sigmoid')(h) # meansigma = Model(x, [mean, logsigma],name='encoder') z =", "K.mean(kl_loss) def sampling(args): z_mean, z_log_var = args epsilon = K.random_normal(shape=(K.shape(z_mean)[0], z_dim), mean=0., stddev=epsilon_std)", "activation = 'relu')(h) # 64*64*64 # h = Dropout(dropout)(h) h = BatchNormalization(momentum=0.8)(h) #", "5 x = Input(input_shape) h = Conv2D(units/8 , (k, k), strides = (2,2),", "h = Dropout(dropout)(h) # h = MaxPooling2D(pool_size=(2, 2))(h) h = LeakyReLU(0.2)(h) h =", "Dropout(dropout)(h) # h = MaxPooling2D(pool_size=(2, 2))(h) h = LeakyReLU(0.2)(h) h = Conv2D(units/4, (k,", "MaxPooling2D(pool_size=(2, 2))(h) h = LeakyReLU(0.2)(h) h = Conv2D(units , (k, k), strides =", ":]) z_1, mean_var_imp = encoder.predict(np.expand_dims(img1, axis=0)) z_2, mean_var_imp = encoder.predict(np.expand_dims(img2, axis=0)) plt.figure(figsize=(2, 2))", "opt) decoder.summary() ##### expression morphing #####x for xx in xrange(0,1): idx1 = 4300", "input_shape, units=512, dropout=0.3): k = 5 x = Input(input_shape) h = Conv2D(units/8 ,", "'RGB') image.save('interp_'+str(ii)+'.tif') # ############### Image impanting ############## loc = 'bottom' for pp in", "z_interp = np.reshape(z_interp,(1,z_dim)) img = decoder.predict([z_interp,c]) img = np.squeeze(img) img = np.uint8(img*127.5+127.5) image", "data...') f = h5py.File('FERG_64_64_color.mat') print ('Finished loading....') f = f['imdb'] label1 = f['id']", "f['set'] label3 = np.asarray(label3) FrameNum = f['fn'] FrameNum = np.asarray(FrameNum) x = f['images']", "padding = 'same', activation = 'relu')(h) # 8*6*64 # h = Dropout(dropout)(h) h", "x_train.shape) print('x_test shape:', x_test.shape) print('label 1 train', y_train1.shape) print('label 1 test', y_test1.shape) print('label", "keras.utils.to_categorical(y_train2, num_ep) y_test2 = keras.utils.to_categorical(y_test2, num_ep) ############################### print('x_train shape:', x_train.shape) print('x_test shape:', x_test.shape)", "input_shape =(img_rows, img_cols, 1) , units=units, dropout=0.3) encoder = model_encoder(z_dim = z_dim, input_shape", "np.asarray(x); x = np.transpose(x, [3,2,1,0]) # matlab ordering to python ordering print('x shape:',", "auxiliary_c = Input(shape=(c_dim,), name='aux_input_c') # auxiliary_z = Input(shape=(n_dim,), name='aux_input_z') h = keras.layers.concatenate([x, auxiliary_c])", "keras.layers.convolutional import Convolution2D, MaxPooling2D, ZeroPadding2D, UpSampling2D,AveragePooling2D, Conv2DTranspose from keras.layers.normalization import * from keras.optimizers", "= y_test1 y_test2_ori = y_test2 x_train = (x_train- 127.5)/127.5 x_test = (x_test- 127.5)/127.5", "Flatten()(h) # h = Dense(latent_dim, name=\"encoder_mu\")(h) mean = Dense(z_dim, name=\"encoder_mean\")(h) logvar = Dense(z_dim,", "keras.layers.concatenate([x, auxiliary_c]) h = Dense(4 * 4 * 128, activation = 'relu')(h) h", "python ordering print('x shape:', x.shape) idx_train = np.asarray(np.where(label3 == 0)) idx_test = np.asarray(np.where(label3", "= 2018 # print ('Loading data...') f = h5py.File('FERG_64_64_color.mat') print ('Finished loading....') f", "= z_1*(arr[ii])+z_2*(1.0-arr[ii]) z_interp = np.reshape(z_interp,(1,z_dim)) img = decoder.predict([z_interp,c]) img = np.squeeze(img) img =", "def sampling_np( z_mean, z_log_var ): epsilon = np.random.normal(loc=0., scale=epsilon_std, size=(z_mean.shape[0], z_dim), ) return", "import initializers import matplotlib.pyplot as plt import cPickle, random, sys, keras from keras.models", "= keras.layers.concatenate([mean,logvar]) return Model(x,[z, h2], name = 'Encoder') def model_decoder(z_dim, c_dim): k =", "1e-6) epsilon_std = 1.0 def KL_loss(y_true, y_pred): z_mean = y_pred[:, 0:z_dim] z_log_var =", "(k,k), strides = (2,2), padding = 'same', activation = 'relu')(h) # 64*64*64 #", "img_rows, img_cols = 64, 64 clipvalue = 20 noise_dim = 10 c_dim =", "np.uint8(img*127.5+127.5) image = Image.fromarray(img, 'RGB') image.save('interp_'+str(ii)+'.tif') # ############### Image impanting ############## loc =", "= Dropout(dropout)(h) h = BatchNormalization(momentum=0.8)(h) # h = LeakyReLU(0.2)(h) h = Conv2DTranspose(units/2, (k,k),", "x = Input(input_shape) h = Conv2D(units/8 , (k, k), strides = (2,2), border_mode='same')(x)", "= MaxPooling2D(pool_size=(2, 2))(h) h = LeakyReLU(0.2)(h) h = Conv2D(units/4, (k, k), strides =", "h = Reshape((4, 4, 128))(h) # h = LeakyReLU(0.2)(h) h = Conv2DTranspose(units, (k,k),", "np.ones((1,128)) for i in xrange(0,num_pp): for xx in xrange(0,100): z = sampling_np(mean_train_sup, var_train_sup)", "y_train2 = label2[:,idx_train[1,:]] y_test2 = label2[:,idx_test[1,:]] y_test1_ori = y_test1 y_test2_ori = y_test2 x_train", "= np.uint8(impanted_img*127.5+127.5) image = Image.fromarray(img, 'RGB') image.save('test_blocked_pp1'+'.tif') img = np.uint8(img_rec*127.5+127.5) image = Image.fromarray(img,", "mean and variance of the prior distribution # mean_train_sup = np.zeros((1,128)) var_train_sup =", "y_test1 = keras.utils.to_categorical(y_test1, num_pp) y_train2 = keras.utils.to_categorical(y_train2, num_ep) y_test2 = keras.utils.to_categorical(y_test2, num_ep) ###############################", "## load and preprocess the dataset (use FERG for example) ## batch_size =", "# #### reload the trained weights to implement the anticipated applications#### input_img =", "Conv2D(units / 2, (k, k), strides = (2,2), border_mode='same')(h) h = BatchNormalization(momentum=0.8)(h) h", "= np.asarray(x); x = np.transpose(x, [3,2,1,0]) # matlab ordering to python ordering print('x", "load and preprocess the dataset (use FERG for example) ## batch_size = 256", "BatchNormalization(momentum=0.8)(h) # h = LeakyReLU(0.2)(h) h = Conv2DTranspose(units/2, (k,k), strides = (2,2), padding", "name=\"encoder_sigma\", activation = 'sigmoid')(h) # meansigma = Model(x, [mean, logsigma],name='encoder') z = Lambda(sampling,", "= 'same', activation = 'relu')(h) # 8*6*64 # h = Dropout(dropout)(h) h =", "ee = 200 auxiliary_c = Input(shape=(c_dim,), name='aux_input_c') auxiliary_z = Input(shape=(n_dim,), name='aux_input_z') # generator", "Input(shape=(n_dim,), name='aux_input_z') # generator = model_generator(z_dim = z_dim, input_shape =(img_rows, img_cols, 1) ,", "img_cols = 64, 64 clipvalue = 20 noise_dim = 10 c_dim = num_pp", "= 0.0003,decay = 1e-6) dopt = RMSprop(lr = 0.0003,decay = 1e-6) epsilon_std =", "test', y_test2.shape) # x_ori = (x - 127.5)/127.5 opt = RMSprop(lr = 0.0003,decay", "= y_pred[:, 0:z_dim] z_log_var = y_pred[:, z_dim:2 * z_dim] kl_loss = - 0.5", "= np.reshape(z_interp,(1,z_dim)) img = decoder.predict([z_interp,c]) img = np.squeeze(img) img = np.uint8(img*127.5+127.5) image =", "img = decoder.predict([z, c]) img = np.squeeze(img) img = np.uint8(img*127.5+127.5) image = Image.fromarray(img,", "= 128 date = 2018 # print ('Loading data...') f = h5py.File('FERG_64_64_color.mat') print", "np.squeeze(img_rec) img = np.uint8(impanted_img*127.5+127.5) image = Image.fromarray(img, 'RGB') image.save('test_blocked_pp1'+'.tif') img = np.uint8(img_rec*127.5+127.5) image", "y_pred[:, 0:z_dim] z_log_var = y_pred[:, z_dim:2 * z_dim] kl_loss = - 0.5 *", "z_log_var = y_pred[:, z_dim:2 * z_dim] kl_loss = - 0.5 * K.sum(1 +", "= LeakyReLU(0.2)(h) h = Conv2DTranspose(units/2, (k,k), strides = (2,2), padding = 'same', activation", "num_pp) print('c',c) img_rec = decoder.predict([z_impanted,c]) img_rec = np.squeeze(img_rec) img = np.uint8(impanted_img*127.5+127.5) image =", "* z_dim] kl_loss = - 0.5 * K.sum(1 + z_log_var - K.square(z_mean) -", "############## loc = 'bottom' for pp in xrange(0,1): for xx in xrange(0,8): idx", "= args epsilon = K.random_normal(shape=(K.shape(z_mean)[0], z_dim), mean=0., stddev=epsilon_std) return z_mean + K.exp((z_log_var) /", "kl_loss = - 0.5 * K.sum(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)", "PIL import Image from keras.layers import Conv2D, MaxPooling2D, GlobalAveragePooling2D import h5py import numpy", "('Finished loading....') f = f['imdb'] label1 = f['id'] label1 = np.asarray(label1) label1 -=", "label1 = np.asarray(label1) label1 -= 1 label2 = f['ep'] label2 = np.asarray(label2) label2", "np.exp(z_log_var / 2) * epsilon # mean and variance of the prior distribution", "impanted_img[40:55,18:47,:] = 0 # mouth blocked print('impanted_img',impanted_img.shape) z_impanted,mean_var_imp = encoder.predict(np.expand_dims(impanted_img,axis =0)) c =", "activation = 'relu')(h) # 32*32*64 # h = Dropout(dropout)(h) h = BatchNormalization(momentum=0.8)(h) #", "image = Image.fromarray(img, 'RGB') image.save('test_blocked_pp1'+'.tif') img = np.uint8(img_rec*127.5+127.5) image = Image.fromarray(img, 'RGB') image.save('test_rec_pp1'+'.tif')", "Dropout(dropout)(h) h = LeakyReLU(0.2)(h) # h = AveragePooling2D((6,6))(h) h = Flatten()(h) # h", "# print ('Loading data...') f = h5py.File('FERG_64_64_color.mat') print ('Finished loading....') f = f['imdb']", "z = sampling_np(mean_train_sup, var_train_sup) print(z.shape) c = np.ones(1,)*i c = keras.utils.to_categorical(c, num_pp) img", "label1 -= 1 label2 = f['ep'] label2 = np.asarray(label2) label2 -= 1 label3", "decoder = model_decoder(z_dim = z_dim, c_dim=c_dim) decoder.load_weights('trained_weight_2.h5') decoder.compile(loss = 'binary_crossentropy',optimizer = opt) decoder.summary()", "(k, k), strides = (2,2), border_mode='same')(h) h = BatchNormalization(momentum=0.8)(h) h = Dropout(dropout)(h) h", "= x_train.astype('float16') x_test = x_test.astype('float16') y_train1 = keras.utils.to_categorical(y_train1, num_pp) y_test1 = keras.utils.to_categorical(y_test1, num_pp)", "= keras.layers.concatenate([x, auxiliary_c]) h = Dense(4 * 4 * 128, activation = 'relu')(h)", "10 c_dim = num_pp n_dim = 10 z_dim = 128 date = 2018", "keras import initializers import matplotlib.pyplot as plt import cPickle, random, sys, keras from", "#### Generate images without input image ### def sampling_np( z_mean, z_log_var ): epsilon", "y_test1 y_test2_ori = y_test2 x_train = (x_train- 127.5)/127.5 x_test = (x_test- 127.5)/127.5 x_train", "Conv2D(units/4, (k, k), strides = (2,2), border_mode='same')(h) h = BatchNormalization(momentum=0.8)(h) h = Dropout(dropout)(h)", "= MaxPooling2D(pool_size=(2, 2))(h) h = LeakyReLU(0.2)(h) h = Conv2D(units / 2, (k, k),", "initializers import matplotlib.pyplot as plt import cPickle, random, sys, keras from keras.models import", "h = Dropout(dropout)(h) h = LeakyReLU(0.2)(h) # h = AveragePooling2D((6,6))(h) h = Flatten()(h)", "= LeakyReLU(0.2)(h) h = Conv2D(units / 2, (k, k), strides = (2,2), border_mode='same')(h)", "from keras.layers.convolutional import Convolution2D, MaxPooling2D, ZeroPadding2D, UpSampling2D,AveragePooling2D, Conv2DTranspose from keras.layers.normalization import * from", "MaxPooling2D(pool_size=(2, 2))(h) h = LeakyReLU(0.2)(h) h = Conv2D(units / 2, (k, k), strides", "num_pp = 6 epochs = 1000 img_rows, img_cols = 64, 64 clipvalue =", "name=\"encoder_mean\")(h) logvar = Dense(z_dim, name=\"encoder_sigma\", activation = 'sigmoid')(h) # meansigma = Model(x, [mean,", "c = np.ones((1,))*0 c = keras.utils.to_categorical(c, num_pp) z_interp = z_1*(arr[ii])+z_2*(1.0-arr[ii]) z_interp = np.reshape(z_interp,(1,z_dim))", "for xx in xrange(0,8): idx = 123 input_img = np.squeeze(x_ori[idx,:,:,:]) img = np.uint8(input_img*127.5+127.5)", "(2,2), border_mode='same')(h) h = BatchNormalization(momentum=0.8)(h) h = Dropout(dropout)(h) # h = MaxPooling2D(pool_size=(2, 2))(h)", "z_dim:2 * z_dim] kl_loss = - 0.5 * K.sum(1 + z_log_var - K.square(z_mean)", "K.exp((z_log_var) / 2) * epsilon ############ Build the GAN architecture ################# def model_encoder(z_dim,", "(z_dim,)) auxiliary_c = Input(shape=(c_dim,), name='aux_input_c') # auxiliary_z = Input(shape=(n_dim,), name='aux_input_z') h = keras.layers.concatenate([x,", "LeakyReLU(0.2)(h) h = Conv2DTranspose(units/2, (k,k), strides = (2,2), padding = 'same', activation =", ":, :, :]) z_1, mean_var_imp = encoder.predict(np.expand_dims(img1, axis=0)) z_2, mean_var_imp = encoder.predict(np.expand_dims(img2, axis=0))", "expression morphing #####x for xx in xrange(0,1): idx1 = 4300 idx2 = 7423", "7423 img1 = np.squeeze(x_ori[idx1, :, :, :]) img2 = np.squeeze(x_ori[idx2, :, :, :])", "decoder.load_weights('trained_weight_2.h5') decoder.compile(loss = 'binary_crossentropy',optimizer = opt) decoder.summary() ##### expression morphing #####x for xx", "partial(initializers.normal, scale=.02) ## load and preprocess the dataset (use FERG for example) ##", "Input(shape = (z_dim,)) auxiliary_c = Input(shape=(c_dim,), name='aux_input_c') # auxiliary_z = Input(shape=(n_dim,), name='aux_input_z') h", "h5py import numpy as np from keras.layers import Input,merge,Lambda from keras.layers.core import Reshape,Dense,Dropout,Activation,Flatten", "np.ones((1,))*1 c = keras.utils.to_categorical(c, num_pp) print('c',c) img_rec = decoder.predict([z_impanted,c]) img_rec = np.squeeze(img_rec) img", "img = decoder.predict([z_interp,c]) img = np.squeeze(img) img = np.uint8(img*127.5+127.5) image = Image.fromarray(img, 'RGB')", "z_dim, c_dim=c_dim) decoder.load_weights('trained_weight_2.h5') decoder.compile(loss = 'binary_crossentropy',optimizer = opt) decoder.summary() ##### expression morphing #####x", "h = LeakyReLU(0.2)(h) h = Conv2DTranspose(units, (k,k), strides = (2,2), padding = 'same',", "'RGB') image.save('original.tif') impanted_img = np.squeeze(x_ori[idx,:,:,:]) impanted_img[40:55,18:47,:] = 0 # mouth blocked print('impanted_img',impanted_img.shape) z_impanted,mean_var_imp", ":, :]) z_1, mean_var_imp = encoder.predict(np.expand_dims(img1, axis=0)) z_2, mean_var_imp = encoder.predict(np.expand_dims(img2, axis=0)) plt.figure(figsize=(2,", "from keras.models import Model from functools import partial normal = partial(initializers.normal, scale=.02) ##", "2018 # print ('Loading data...') f = h5py.File('FERG_64_64_color.mat') print ('Finished loading....') f =", "np.transpose(x, [3,2,1,0]) # matlab ordering to python ordering print('x shape:', x.shape) idx_train =", "RMSprop(lr = 0.0003,decay = 1e-6) epsilon_std = 1.0 def KL_loss(y_true, y_pred): z_mean =", "img_rec = np.squeeze(img_rec) img = np.uint8(impanted_img*127.5+127.5) image = Image.fromarray(img, 'RGB') image.save('test_blocked_pp1'+'.tif') img =", "20 noise_dim = 10 c_dim = num_pp n_dim = 10 z_dim = 128", "= keras.utils.to_categorical(y_test2, num_ep) ############################### print('x_train shape:', x_train.shape) print('x_test shape:', x_test.shape) print('label 1 train',", "h = LeakyReLU(0.2)(h) h = Conv2D(units / 2, (k, k), strides = (2,2),", "num_pp) z_interp = z_1*(arr[ii])+z_2*(1.0-arr[ii]) z_interp = np.reshape(z_interp,(1,z_dim)) img = decoder.predict([z_interp,c]) img = np.squeeze(img)", "'binary_crossentropy',optimizer = opt) decoder.summary() ##### expression morphing #####x for xx in xrange(0,1): idx1", "np.squeeze(x_ori[idx,:,:,:]) img = np.uint8(input_img*127.5+127.5) image = Image.fromarray(img, 'RGB') image.save('original.tif') impanted_img = np.squeeze(x_ori[idx,:,:,:]) impanted_img[40:55,18:47,:]", "= 7423 img1 = np.squeeze(x_ori[idx1, :, :, :]) img2 = np.squeeze(x_ori[idx2, :, :,", "stddev=epsilon_std) return z_mean + K.exp((z_log_var) / 2) * epsilon ############ Build the GAN", "img_rec[40:55,18:47,:] img = np.uint8(impanted_img*127.5+127.5) image = Image.fromarray(img, 'RGB') image.save('test_replaced_pp1'+'.tif') #### Generate images without", "= decoder.predict([z_impanted,c]) img_rec = np.squeeze(img_rec) img = np.uint8(impanted_img*127.5+127.5) image = Image.fromarray(img, 'RGB') image.save('test_blocked_pp1'+'.tif')", "x[idx_test[1,:],:,:,:] y_train1 = label1[:,idx_train[1,:]] y_test1 = label1[:,idx_test[1,:]] y_train2 = label2[:,idx_train[1,:]] y_test2 = label2[:,idx_test[1,:]]", "np.linspace(0.0, 1.0, num=1000) for ii in xrange(0,1000): c = np.ones((1,))*0 c = keras.utils.to_categorical(c,", "image = Image.fromarray(img, 'RGB') image.save('test_rec_pp1'+'.tif') impanted_img[40:55,18:47,:] = img_rec[40:55,18:47,:] img = np.uint8(impanted_img*127.5+127.5) image =", "z_dim), mean=0., stddev=epsilon_std) return z_mean + K.exp((z_log_var) / 2) * epsilon ############ Build", "encoder.predict(np.expand_dims(img2, axis=0)) plt.figure(figsize=(2, 2)) img1 =np.squeeze(x_ori[idx1,:,:,:]) img1 = np.uint8(img1*127.5+127.5) image = Image.fromarray(img1, 'RGB')", "units = 256 ee = 200 auxiliary_c = Input(shape=(c_dim,), name='aux_input_c') auxiliary_z = Input(shape=(n_dim,),", "import partial normal = partial(initializers.normal, scale=.02) ## load and preprocess the dataset (use", "Input(shape=(n_dim,), name='aux_input_z') h = keras.layers.concatenate([x, auxiliary_c]) h = Dense(4 * 4 * 128,", "encoder.predict(np.expand_dims(impanted_img,axis =0)) c = np.ones((1,))*1 c = keras.utils.to_categorical(c, num_pp) print('c',c) img_rec = decoder.predict([z_impanted,c])", "strides = (2,2), padding = 'same', activation = 'tanh')(h) # 8*6*64 return Model([x,auxiliary_c],", "= np.asarray(label2) label2 -= 1 label3 = f['set'] label3 = np.asarray(label3) FrameNum =", "decoder.predict([z_impanted,c]) img_rec = np.squeeze(img_rec) img = np.uint8(impanted_img*127.5+127.5) image = Image.fromarray(img, 'RGB') image.save('test_blocked_pp1'+'.tif') img", "opt) encoder.summary() decoder = model_decoder(z_dim = z_dim, c_dim=c_dim) decoder.load_weights('trained_weight_2.h5') decoder.compile(loss = 'binary_crossentropy',optimizer =", "= f['fn'] FrameNum = np.asarray(FrameNum) x = f['images'] x = np.asarray(x); x =", "= np.squeeze(x_ori[idx,:,:,:]) impanted_img[40:55,18:47,:] = 0 # mouth blocked print('impanted_img',impanted_img.shape) z_impanted,mean_var_imp = encoder.predict(np.expand_dims(impanted_img,axis =0))", "### def sampling_np( z_mean, z_log_var ): epsilon = np.random.normal(loc=0., scale=epsilon_std, size=(z_mean.shape[0], z_dim), )", "h = Flatten()(h) # h = Dense(latent_dim, name=\"encoder_mu\")(h) mean = Dense(z_dim, name=\"encoder_mean\")(h) logvar", "partial normal = partial(initializers.normal, scale=.02) ## load and preprocess the dataset (use FERG", "= x[idx_test[1,:],:,:,:] y_train1 = label1[:,idx_train[1,:]] y_test1 = label1[:,idx_test[1,:]] y_train2 = label2[:,idx_train[1,:]] y_test2 =", "= 5 x = Input(input_shape) h = Conv2D(units/8 , (k, k), strides =", "'Encoder') def model_decoder(z_dim, c_dim): k = 5 x = Input(shape = (z_dim,)) auxiliary_c", "h = keras.layers.concatenate([x, auxiliary_c]) h = Dense(4 * 4 * 128, activation =", "# h = LeakyReLU(0.2)(h) h = Conv2DTranspose(units, (k,k), strides = (2,2), padding =", "in xrange(0,num_pp): for xx in xrange(0,100): z = sampling_np(mean_train_sup, var_train_sup) print(z.shape) c =", "0.5 * K.sum(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1) return K.mean(kl_loss) def", "Image.fromarray(img, 'RGB') image.save('original.tif') impanted_img = np.squeeze(x_ori[idx,:,:,:]) impanted_img[40:55,18:47,:] = 0 # mouth blocked print('impanted_img',impanted_img.shape)", "np from keras.layers import Input,merge,Lambda from keras.layers.core import Reshape,Dense,Dropout,Activation,Flatten from keras.layers.advanced_activations import LeakyReLU", "preprocess the dataset (use FERG for example) ## batch_size = 256 num_ep =", "x_ori = (x - 127.5)/127.5 opt = RMSprop(lr = 0.0003,decay = 1e-6) dopt", "= 20 noise_dim = 10 c_dim = num_pp n_dim = 10 z_dim =", "mean = Dense(z_dim, name=\"encoder_mean\")(h) logvar = Dense(z_dim, name=\"encoder_sigma\", activation = 'sigmoid')(h) # meansigma", "h = Conv2DTranspose(units/2, (k,k), strides = (2,2), padding = 'same', activation = 'relu')(h)", "np.ones((1,))*0 c = keras.utils.to_categorical(c, num_pp) z_interp = z_1*(arr[ii])+z_2*(1.0-arr[ii]) z_interp = np.reshape(z_interp,(1,z_dim)) img =", "x = np.transpose(x, [3,2,1,0]) # matlab ordering to python ordering print('x shape:', x.shape)", "label1[:,idx_test[1,:]] y_train2 = label2[:,idx_train[1,:]] y_test2 = label2[:,idx_test[1,:]] y_test1_ori = y_test1 y_test2_ori = y_test2", "= decoder.predict([z, c]) img = np.squeeze(img) img = np.uint8(img*127.5+127.5) image = Image.fromarray(img, 'RGB')", "= LeakyReLU(0.2)(h) h = Conv2DTranspose(units, (k,k), strides = (2,2), padding = 'same', activation", "= z_dim, input_shape =(img_rows, img_cols, 1) , units=units, dropout=0.3) encoder = model_encoder(z_dim =", "np.uint8(impanted_img*127.5+127.5) image = Image.fromarray(img, 'RGB') image.save('test_blocked_pp1'+'.tif') img = np.uint8(img_rec*127.5+127.5) image = Image.fromarray(img, 'RGB')", "h5py.File('FERG_64_64_color.mat') print ('Finished loading....') f = f['imdb'] label1 = f['id'] label1 = np.asarray(label1)", "127.5)/127.5 x_train = x_train.astype('float16') x_test = x_test.astype('float16') y_train1 = keras.utils.to_categorical(y_train1, num_pp) y_test1 =", "img = np.uint8(impanted_img*127.5+127.5) image = Image.fromarray(img, 'RGB') image.save('test_blocked_pp1'+'.tif') img = np.uint8(img_rec*127.5+127.5) image =", "= 'same', activation = 'tanh')(h) # 8*6*64 return Model([x,auxiliary_c], h, name=\"Decoder\") # ####", "* 4 * 128, activation = 'relu')(h) h = Reshape((4, 4, 128))(h) #", "image = Image.fromarray(img, 'RGB') image.save('interp_'+str(ii)+'.tif') # ############### Image impanting ############## loc = 'bottom'", "= Dense(4 * 4 * 128, activation = 'relu')(h) h = Reshape((4, 4,", "0:z_dim] z_log_var = y_pred[:, z_dim:2 * z_dim] kl_loss = - 0.5 * K.sum(1", "to python ordering print('x shape:', x.shape) idx_train = np.asarray(np.where(label3 == 0)) idx_test =", "= Conv2DTranspose(units, (k,k), strides = (2,2), padding = 'same', activation = 'relu')(h) #", "= np.asarray(label3) FrameNum = f['fn'] FrameNum = np.asarray(FrameNum) x = f['images'] x =", "z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1) return K.mean(kl_loss) def sampling(args): z_mean, z_log_var =", "def KL_loss(y_true, y_pred): z_mean = y_pred[:, 0:z_dim] z_log_var = y_pred[:, z_dim:2 * z_dim]", "print('impanted_img',impanted_img.shape) z_impanted,mean_var_imp = encoder.predict(np.expand_dims(impanted_img,axis =0)) c = np.ones((1,))*1 c = keras.utils.to_categorical(c, num_pp) print('c',c)", "= Image.fromarray(img, 'RGB') image.save('test_rec_pp1'+'.tif') impanted_img[40:55,18:47,:] = img_rec[40:55,18:47,:] img = np.uint8(impanted_img*127.5+127.5) image = Image.fromarray(img,", "axis=0)) z_2, mean_var_imp = encoder.predict(np.expand_dims(img2, axis=0)) plt.figure(figsize=(2, 2)) img1 =np.squeeze(x_ori[idx1,:,:,:]) img1 = np.uint8(img1*127.5+127.5)", "label1 = f['id'] label1 = np.asarray(label1) label1 -= 1 label2 = f['ep'] label2", "h = BatchNormalization(momentum=0.8)(h) h = Conv2DTranspose(3, (k,k), strides = (2,2), padding = 'same',", "blocked print('impanted_img',impanted_img.shape) z_impanted,mean_var_imp = encoder.predict(np.expand_dims(impanted_img,axis =0)) c = np.ones((1,))*1 c = keras.utils.to_categorical(c, num_pp)", "f['fn'] FrameNum = np.asarray(FrameNum) x = f['images'] x = np.asarray(x); x = np.transpose(x,", "+ K.exp((z_log_var) / 2) * epsilon ############ Build the GAN architecture ################# def", "= Flatten()(h) # h = Dense(latent_dim, name=\"encoder_mu\")(h) mean = Dense(z_dim, name=\"encoder_mean\")(h) logvar =", "4300 idx2 = 7423 img1 = np.squeeze(x_ori[idx1, :, :, :]) img2 = np.squeeze(x_ori[idx2,", "sampling_np(mean_train_sup, var_train_sup) print(z.shape) c = np.ones(1,)*i c = keras.utils.to_categorical(c, num_pp) img = decoder.predict([z,", "= Image.fromarray(img1, 'RGB') image.save('ori_1.tif') img2 = np.squeeze(x_ori[idx2,:,:,:]) img2 = np.uint8(img2*127.5+127.5) # plt.imshow(img2) image", "y_test1 = label1[:,idx_test[1,:]] y_train2 = label2[:,idx_train[1,:]] y_test2 = label2[:,idx_test[1,:]] y_test1_ori = y_test1 y_test2_ori", "mean=0., stddev=epsilon_std) return z_mean + K.exp((z_log_var) / 2) * epsilon ############ Build the", "h = MaxPooling2D(pool_size=(2, 2))(h) h = LeakyReLU(0.2)(h) h = Conv2D(units , (k, k),", "64*64*64 # h = Dropout(dropout)(h) h = BatchNormalization(momentum=0.8)(h) # h = LeakyReLU(0.2)(h) h", "np.squeeze(x_ori[idx2, :, :, :]) z_1, mean_var_imp = encoder.predict(np.expand_dims(img1, axis=0)) z_2, mean_var_imp = encoder.predict(np.expand_dims(img2,", "Conv2DTranspose from keras.layers.normalization import * from keras.optimizers import * from keras import initializers", "Dropout(dropout)(h) # h = MaxPooling2D(pool_size=(2, 2))(h) h = LeakyReLU(0.2)(h) h = Conv2D(units ,", "img1 =np.squeeze(x_ori[idx1,:,:,:]) img1 = np.uint8(img1*127.5+127.5) image = Image.fromarray(img1, 'RGB') image.save('ori_1.tif') img2 = np.squeeze(x_ori[idx2,:,:,:])", "keras.utils.to_categorical(c, num_pp) z_interp = z_1*(arr[ii])+z_2*(1.0-arr[ii]) z_interp = np.reshape(z_interp,(1,z_dim)) img = decoder.predict([z_interp,c]) img =", "import cPickle, random, sys, keras from keras.models import Model from functools import partial", "idx = 123 input_img = np.squeeze(x_ori[idx,:,:,:]) img = np.uint8(input_img*127.5+127.5) image = Image.fromarray(img, 'RGB')", "# h = AveragePooling2D((6,6))(h) h = Flatten()(h) # h = Dense(latent_dim, name=\"encoder_mu\")(h) mean", "Conv2DTranspose(units/2, (k,k), strides = (2,2), padding = 'same', activation = 'relu')(h) # 8*6*64", "* 128, activation = 'relu')(h) h = Reshape((4, 4, 128))(h) # h =", "z_mean = y_pred[:, 0:z_dim] z_log_var = y_pred[:, z_dim:2 * z_dim] kl_loss = -", "= 1000 img_rows, img_cols = 64, 64 clipvalue = 20 noise_dim = 10", "(x_test- 127.5)/127.5 x_train = x_train.astype('float16') x_test = x_test.astype('float16') y_train1 = keras.utils.to_categorical(y_train1, num_pp) y_test1", "num_ep = 7 num_pp = 6 epochs = 1000 img_rows, img_cols = 64,", "= Input((img_rows,img_cols,3)) z_dim = 128 units = 256 ee = 200 auxiliary_c =", "return z_mean + np.exp(z_log_var / 2) * epsilon # mean and variance of", "# mean and variance of the prior distribution # mean_train_sup = np.zeros((1,128)) var_train_sup", "4 * 128, activation = 'relu')(h) h = Reshape((4, 4, 128))(h) # h", "y_test2.shape) # x_ori = (x - 127.5)/127.5 opt = RMSprop(lr = 0.0003,decay =", "\"tensorflow\" from PIL import Image from keras.layers import Conv2D, MaxPooling2D, GlobalAveragePooling2D import h5py", "10 z_dim = 128 date = 2018 # print ('Loading data...') f =", "1)) print('idx_test shape',idx_test.shape) x_train = x[idx_train[1,:],:,:,:] x_test = x[idx_test[1,:],:,:,:] y_train1 = label1[:,idx_train[1,:]] y_test1", "axis=-1) return K.mean(kl_loss) def sampling(args): z_mean, z_log_var = args epsilon = K.random_normal(shape=(K.shape(z_mean)[0], z_dim),", "Conv2DTranspose(units, (k,k), strides = (2,2), padding = 'same', activation = 'relu')(h) # 32*32*64", "=0)) c = np.ones((1,))*1 c = keras.utils.to_categorical(c, num_pp) print('c',c) img_rec = decoder.predict([z_impanted,c]) img_rec", "# h = Dropout(dropout)(h) h = BatchNormalization(momentum=0.8)(h) # h = LeakyReLU(0.2)(h) h =", "f = h5py.File('FERG_64_64_color.mat') print ('Finished loading....') f = f['imdb'] label1 = f['id'] label1", "image.save('ori_1.tif') img2 = np.squeeze(x_ori[idx2,:,:,:]) img2 = np.uint8(img2*127.5+127.5) # plt.imshow(img2) image = Image.fromarray(img2, 'RGB')", "= f['id'] label1 = np.asarray(label1) label1 -= 1 label2 = f['ep'] label2 =", "np.squeeze(img) img = np.uint8(img*127.5+127.5) image = Image.fromarray(img, 'RGB') image.save('interp_'+str(ii)+'.tif') # ############### Image impanting", "sampling_np( z_mean, z_log_var ): epsilon = np.random.normal(loc=0., scale=epsilon_std, size=(z_mean.shape[0], z_dim), ) return z_mean", "from keras.layers import Input,merge,Lambda from keras.layers.core import Reshape,Dense,Dropout,Activation,Flatten from keras.layers.advanced_activations import LeakyReLU from", "= (x_test- 127.5)/127.5 x_train = x_train.astype('float16') x_test = x_test.astype('float16') y_train1 = keras.utils.to_categorical(y_train1, num_pp)", "= Dense(z_dim, name=\"encoder_mean\")(h) logvar = Dense(z_dim, name=\"encoder_sigma\", activation = 'sigmoid')(h) # meansigma =", "2) * epsilon ############ Build the GAN architecture ################# def model_encoder(z_dim, input_shape, units=512,", "# 8*6*64 return Model([x,auxiliary_c], h, name=\"Decoder\") # #### reload the trained weights to", "2) * epsilon # mean and variance of the prior distribution # mean_train_sup", "= 5 x = Input(shape = (z_dim,)) auxiliary_c = Input(shape=(c_dim,), name='aux_input_c') # auxiliary_z", "Model([x,auxiliary_c], h, name=\"Decoder\") # #### reload the trained weights to implement the anticipated", "= (2,2), border_mode='same')(h) h = BatchNormalization(momentum=0.8)(h) h = Dropout(dropout)(h) # h = MaxPooling2D(pool_size=(2,", "x_test.astype('float16') y_train1 = keras.utils.to_categorical(y_train1, num_pp) y_test1 = keras.utils.to_categorical(y_test1, num_pp) y_train2 = keras.utils.to_categorical(y_train2, num_ep)", "np.uint8(img2*127.5+127.5) # plt.imshow(img2) image = Image.fromarray(img2, 'RGB') image.save('ori_2.tif') arr = np.linspace(0.0, 1.0, num=1000)", "x_train = x_train.astype('float16') x_test = x_test.astype('float16') y_train1 = keras.utils.to_categorical(y_train1, num_pp) y_test1 = keras.utils.to_categorical(y_test1,", "reload the trained weights to implement the anticipated applications#### input_img = Input((img_rows,img_cols,3)) z_dim", "the anticipated applications#### input_img = Input((img_rows,img_cols,3)) z_dim = 128 units = 256 ee", "xx in xrange(0,8): idx = 123 input_img = np.squeeze(x_ori[idx,:,:,:]) img = np.uint8(input_img*127.5+127.5) image", "= Conv2D(units/4, (k, k), strides = (2,2), border_mode='same')(h) h = BatchNormalization(momentum=0.8)(h) h =", "from keras.layers.core import Reshape,Dense,Dropout,Activation,Flatten from keras.layers.advanced_activations import LeakyReLU from keras.layers.convolutional import Convolution2D, MaxPooling2D,", "x = Input(shape = (z_dim,)) auxiliary_c = Input(shape=(c_dim,), name='aux_input_c') # auxiliary_z = Input(shape=(n_dim,),", "h2], name = 'Encoder') def model_decoder(z_dim, c_dim): k = 5 x = Input(shape", "x = f['images'] x = np.asarray(x); x = np.transpose(x, [3,2,1,0]) # matlab ordering", "epsilon_std = 1.0 def KL_loss(y_true, y_pred): z_mean = y_pred[:, 0:z_dim] z_log_var = y_pred[:,", "from keras import initializers import matplotlib.pyplot as plt import cPickle, random, sys, keras", "import os,random os.environ[\"KERAS_BACKEND\"] = \"tensorflow\" from PIL import Image from keras.layers import Conv2D,", "np.reshape(z_interp,(1,z_dim)) img = decoder.predict([z_interp,c]) img = np.squeeze(img) img = np.uint8(img*127.5+127.5) image = Image.fromarray(img,", "from functools import partial normal = partial(initializers.normal, scale=.02) ## load and preprocess the", "= keras.utils.to_categorical(y_test1, num_pp) y_train2 = keras.utils.to_categorical(y_train2, num_ep) y_test2 = keras.utils.to_categorical(y_test2, num_ep) ############################### print('x_train", "for pp in xrange(0,1): for xx in xrange(0,8): idx = 123 input_img =", "num=1000) for ii in xrange(0,1000): c = np.ones((1,))*0 c = keras.utils.to_categorical(c, num_pp) z_interp", "/ 2) * epsilon ############ Build the GAN architecture ################# def model_encoder(z_dim, input_shape,", "cPickle, random, sys, keras from keras.models import Model from functools import partial normal", "image ### def sampling_np( z_mean, z_log_var ): epsilon = np.random.normal(loc=0., scale=epsilon_std, size=(z_mean.shape[0], z_dim),", "= Input(shape=(c_dim,), name='aux_input_c') auxiliary_z = Input(shape=(n_dim,), name='aux_input_z') # generator = model_generator(z_dim = z_dim,", "label2 = np.asarray(label2) label2 -= 1 label3 = f['set'] label3 = np.asarray(label3) FrameNum", "= Image.fromarray(img2, 'RGB') image.save('ori_2.tif') arr = np.linspace(0.0, 1.0, num=1000) for ii in xrange(0,1000):", "img = np.uint8(img*127.5+127.5) image = Image.fromarray(img, 'RGB') image.save('interp_'+str(ii)+'.tif') # ############### Image impanting ##############", "-= 1 label2 = f['ep'] label2 = np.asarray(label2) label2 -= 1 label3 =", "# h = MaxPooling2D(pool_size=(2, 2))(h) h = LeakyReLU(0.2)(h) h = Conv2D(units/4, (k, k),", "img1 = np.uint8(img1*127.5+127.5) image = Image.fromarray(img1, 'RGB') image.save('ori_1.tif') img2 = np.squeeze(x_ori[idx2,:,:,:]) img2 =", "h = MaxPooling2D(pool_size=(2, 2))(h) h = LeakyReLU(0.2)(h) h = Conv2D(units / 2, (k,", "weights to implement the anticipated applications#### input_img = Input((img_rows,img_cols,3)) z_dim = 128 units", "xrange(0,num_pp): for xx in xrange(0,100): z = sampling_np(mean_train_sup, var_train_sup) print(z.shape) c = np.ones(1,)*i", "# h = Dropout(dropout)(h) h = BatchNormalization(momentum=0.8)(h) h = Conv2DTranspose(3, (k,k), strides =", "to implement the anticipated applications#### input_img = Input((img_rows,img_cols,3)) z_dim = 128 units =", "2 test', y_test2.shape) # x_ori = (x - 127.5)/127.5 opt = RMSprop(lr =", "############ Build the GAN architecture ################# def model_encoder(z_dim, input_shape, units=512, dropout=0.3): k =", "+ np.exp(z_log_var / 2) * epsilon # mean and variance of the prior", ", (k, k), strides = (2,2), border_mode='same')(h) h = BatchNormalization(momentum=0.8)(h) h = Dropout(dropout)(h)", "= 7 num_pp = 6 epochs = 1000 img_rows, img_cols = 64, 64", "= Image.fromarray(img, 'RGB') image.save('test_replaced_pp1'+'.tif') #### Generate images without input image ### def sampling_np(", "shape:', x.shape) idx_train = np.asarray(np.where(label3 == 0)) idx_test = np.asarray(np.where(label3 == 1)) print('idx_test", "1 label3 = f['set'] label3 = np.asarray(label3) FrameNum = f['fn'] FrameNum = np.asarray(FrameNum)", "xrange(0,8): idx = 123 input_img = np.squeeze(x_ori[idx,:,:,:]) img = np.uint8(input_img*127.5+127.5) image = Image.fromarray(img,", "trained weights to implement the anticipated applications#### input_img = Input((img_rows,img_cols,3)) z_dim = 128", "Image.fromarray(img1, 'RGB') image.save('ori_1.tif') img2 = np.squeeze(x_ori[idx2,:,:,:]) img2 = np.uint8(img2*127.5+127.5) # plt.imshow(img2) image =", "train', y_train2.shape) print('label 2 test', y_test2.shape) # x_ori = (x - 127.5)/127.5 opt", "= Dense(latent_dim, name=\"encoder_mu\")(h) mean = Dense(z_dim, name=\"encoder_mean\")(h) logvar = Dense(z_dim, name=\"encoder_sigma\", activation =", "= np.squeeze(x_ori[idx,:,:,:]) img = np.uint8(input_img*127.5+127.5) image = Image.fromarray(img, 'RGB') image.save('original.tif') impanted_img = np.squeeze(x_ori[idx,:,:,:])", "= 10 z_dim = 128 date = 2018 # print ('Loading data...') f", "np.asarray(label1) label1 -= 1 label2 = f['ep'] label2 = np.asarray(label2) label2 -= 1", "h = LeakyReLU(0.2)(h) # h = AveragePooling2D((6,6))(h) h = Flatten()(h) # h =", "from keras.optimizers import * from keras import initializers import matplotlib.pyplot as plt import", "for xx in xrange(0,100): z = sampling_np(mean_train_sup, var_train_sup) print(z.shape) c = np.ones(1,)*i c", "z_2, mean_var_imp = encoder.predict(np.expand_dims(img2, axis=0)) plt.figure(figsize=(2, 2)) img1 =np.squeeze(x_ori[idx1,:,:,:]) img1 = np.uint8(img1*127.5+127.5) image", "= Conv2DTranspose(3, (k,k), strides = (2,2), padding = 'same', activation = 'tanh')(h) #", "strides = (2,2), border_mode='same')(h) h = BatchNormalization(momentum=0.8)(h) h = Dropout(dropout)(h) h = LeakyReLU(0.2)(h)", "print('label 1 test', y_test1.shape) print('label 2 train', y_train2.shape) print('label 2 test', y_test2.shape) #", "keras.layers.normalization import * from keras.optimizers import * from keras import initializers import matplotlib.pyplot", "for ii in xrange(0,1000): c = np.ones((1,))*0 c = keras.utils.to_categorical(c, num_pp) z_interp =", "np.random.normal(loc=0., scale=epsilon_std, size=(z_mean.shape[0], z_dim), ) return z_mean + np.exp(z_log_var / 2) * epsilon", "- K.exp(z_log_var), axis=-1) return K.mean(kl_loss) def sampling(args): z_mean, z_log_var = args epsilon =", "= y_test2 x_train = (x_train- 127.5)/127.5 x_test = (x_test- 127.5)/127.5 x_train = x_train.astype('float16')", "Dense(4 * 4 * 128, activation = 'relu')(h) h = Reshape((4, 4, 128))(h)", ":, :]) img2 = np.squeeze(x_ori[idx2, :, :, :]) z_1, mean_var_imp = encoder.predict(np.expand_dims(img1, axis=0))", "Input((img_rows,img_cols,3)) z_dim = 128 units = 256 ee = 200 auxiliary_c = Input(shape=(c_dim,),", "import matplotlib.pyplot as plt import cPickle, random, sys, keras from keras.models import Model", "shape:', x_train.shape) print('x_test shape:', x_test.shape) print('label 1 train', y_train1.shape) print('label 1 test', y_test1.shape)", "for example) ## batch_size = 256 num_ep = 7 num_pp = 6 epochs", "* epsilon # mean and variance of the prior distribution # mean_train_sup =", ", (k, k), strides = (2,2), border_mode='same')(x) h = BatchNormalization(momentum=0.8)(h) h = Dropout(dropout)(h)", "UpSampling2D,AveragePooling2D, Conv2DTranspose from keras.layers.normalization import * from keras.optimizers import * from keras import", "(k, k), strides = (2,2), border_mode='same')(x) h = BatchNormalization(momentum=0.8)(h) h = Dropout(dropout)(h) #", "= np.uint8(img2*127.5+127.5) # plt.imshow(img2) image = Image.fromarray(img2, 'RGB') image.save('ori_2.tif') arr = np.linspace(0.0, 1.0,", "= opt) decoder.summary() ##### expression morphing #####x for xx in xrange(0,1): idx1 =", "in xrange(0,1): idx1 = 4300 idx2 = 7423 img1 = np.squeeze(x_ori[idx1, :, :,", "normal = partial(initializers.normal, scale=.02) ## load and preprocess the dataset (use FERG for", "Dropout(dropout)(h) h = BatchNormalization(momentum=0.8)(h) h = Conv2DTranspose(3, (k,k), strides = (2,2), padding =", "y_test2 = keras.utils.to_categorical(y_test2, num_ep) ############################### print('x_train shape:', x_train.shape) print('x_test shape:', x_test.shape) print('label 1", "# 32*32*64 # h = Dropout(dropout)(h) h = BatchNormalization(momentum=0.8)(h) # h = LeakyReLU(0.2)(h)", "f['id'] label1 = np.asarray(label1) label1 -= 1 label2 = f['ep'] label2 = np.asarray(label2)", "decoder.compile(loss = 'binary_crossentropy',optimizer = opt) decoder.summary() ##### expression morphing #####x for xx in", "decoder.predict([z_interp,c]) img = np.squeeze(img) img = np.uint8(img*127.5+127.5) image = Image.fromarray(img, 'RGB') image.save('interp_'+str(ii)+'.tif') #", "opt = RMSprop(lr = 0.0003,decay = 1e-6) dopt = RMSprop(lr = 0.0003,decay =", "auxiliary_z = Input(shape=(n_dim,), name='aux_input_z') h = keras.layers.concatenate([x, auxiliary_c]) h = Dense(4 * 4", "input_img = Input((img_rows,img_cols,3)) z_dim = 128 units = 256 ee = 200 auxiliary_c", "mean_train_sup = np.zeros((1,128)) var_train_sup = np.ones((1,128)) for i in xrange(0,num_pp): for xx in", "print('x_test shape:', x_test.shape) print('label 1 train', y_train1.shape) print('label 1 test', y_test1.shape) print('label 2", "128, activation = 'relu')(h) h = Reshape((4, 4, 128))(h) # h = LeakyReLU(0.2)(h)", "print('label 2 train', y_train2.shape) print('label 2 test', y_test2.shape) # x_ori = (x -", "x.shape) idx_train = np.asarray(np.where(label3 == 0)) idx_test = np.asarray(np.where(label3 == 1)) print('idx_test shape',idx_test.shape)", "padding = 'same', activation = 'relu')(h) # 64*64*64 # h = Dropout(dropout)(h) h", "encoder.summary() decoder = model_decoder(z_dim = z_dim, c_dim=c_dim) decoder.load_weights('trained_weight_2.h5') decoder.compile(loss = 'binary_crossentropy',optimizer = opt)", "image.save('test_blocked_pp1'+'.tif') img = np.uint8(img_rec*127.5+127.5) image = Image.fromarray(img, 'RGB') image.save('test_rec_pp1'+'.tif') impanted_img[40:55,18:47,:] = img_rec[40:55,18:47,:] img", "-= 1 label3 = f['set'] label3 = np.asarray(label3) FrameNum = f['fn'] FrameNum =", "2))(h) h = LeakyReLU(0.2)(h) h = Conv2D(units / 2, (k, k), strides =", "dropout=0.3) encoder.load_weights('trained_weight_1.h5') encoder.compile(loss = 'binary_crossentropy',optimizer = opt) encoder.summary() decoder = model_decoder(z_dim = z_dim,", "y_train1 = label1[:,idx_train[1,:]] y_test1 = label1[:,idx_test[1,:]] y_train2 = label2[:,idx_train[1,:]] y_test2 = label2[:,idx_test[1,:]] y_test1_ori", "z_1, mean_var_imp = encoder.predict(np.expand_dims(img1, axis=0)) z_2, mean_var_imp = encoder.predict(np.expand_dims(img2, axis=0)) plt.figure(figsize=(2, 2)) img1", "plt.imshow(img2) image = Image.fromarray(img2, 'RGB') image.save('ori_2.tif') arr = np.linspace(0.0, 1.0, num=1000) for ii", "== 1)) print('idx_test shape',idx_test.shape) x_train = x[idx_train[1,:],:,:,:] x_test = x[idx_test[1,:],:,:,:] y_train1 = label1[:,idx_train[1,:]]", "= label2[:,idx_test[1,:]] y_test1_ori = y_test1 y_test2_ori = y_test2 x_train = (x_train- 127.5)/127.5 x_test", "y_train2 = keras.utils.to_categorical(y_train2, num_ep) y_test2 = keras.utils.to_categorical(y_test2, num_ep) ############################### print('x_train shape:', x_train.shape) print('x_test", "= 0 # mouth blocked print('impanted_img',impanted_img.shape) z_impanted,mean_var_imp = encoder.predict(np.expand_dims(impanted_img,axis =0)) c = np.ones((1,))*1", "y_test2 = label2[:,idx_test[1,:]] y_test1_ori = y_test1 y_test2_ori = y_test2 x_train = (x_train- 127.5)/127.5", "= 'sigmoid')(h) # meansigma = Model(x, [mean, logsigma],name='encoder') z = Lambda(sampling, output_shape=(z_dim,))([mean, logvar])", "strides = (2,2), padding = 'same', activation = 'relu')(h) # 32*32*64 # h", "K.random_normal(shape=(K.shape(z_mean)[0], z_dim), mean=0., stddev=epsilon_std) return z_mean + K.exp((z_log_var) / 2) * epsilon ############", "4, 128))(h) # h = LeakyReLU(0.2)(h) h = Conv2DTranspose(units, (k,k), strides = (2,2),", "input_shape =(img_rows, img_cols, 3) , units=units, dropout=0.3) encoder.load_weights('trained_weight_1.h5') encoder.compile(loss = 'binary_crossentropy',optimizer = opt)", "'RGB') image.save('ori_2.tif') arr = np.linspace(0.0, 1.0, num=1000) for ii in xrange(0,1000): c =", "h = LeakyReLU(0.2)(h) h = Conv2D(units/4, (k, k), strides = (2,2), border_mode='same')(h) h", "return z_mean + K.exp((z_log_var) / 2) * epsilon ############ Build the GAN architecture", "without input image ### def sampling_np( z_mean, z_log_var ): epsilon = np.random.normal(loc=0., scale=epsilon_std,", "import h5py import numpy as np from keras.layers import Input,merge,Lambda from keras.layers.core import", "Image.fromarray(img2, 'RGB') image.save('ori_2.tif') arr = np.linspace(0.0, 1.0, num=1000) for ii in xrange(0,1000): c", "LeakyReLU(0.2)(h) h = Conv2DTranspose(units, (k,k), strides = (2,2), padding = 'same', activation =", "keras.utils.to_categorical(c, num_pp) img = decoder.predict([z, c]) img = np.squeeze(img) img = np.uint8(img*127.5+127.5) image", "np.zeros((1,128)) var_train_sup = np.ones((1,128)) for i in xrange(0,num_pp): for xx in xrange(0,100): z", "model_encoder(z_dim, input_shape, units=512, dropout=0.3): k = 5 x = Input(input_shape) h = Conv2D(units/8", "x_train = (x_train- 127.5)/127.5 x_test = (x_test- 127.5)/127.5 x_train = x_train.astype('float16') x_test =", "def model_encoder(z_dim, input_shape, units=512, dropout=0.3): k = 5 x = Input(input_shape) h =", "label2 = f['ep'] label2 = np.asarray(label2) label2 -= 1 label3 = f['set'] label3", "img2 = np.squeeze(x_ori[idx2, :, :, :]) z_1, mean_var_imp = encoder.predict(np.expand_dims(img1, axis=0)) z_2, mean_var_imp", "= (2,2), border_mode='same')(x) h = BatchNormalization(momentum=0.8)(h) h = Dropout(dropout)(h) # h = MaxPooling2D(pool_size=(2,", "test', y_test1.shape) print('label 2 train', y_train2.shape) print('label 2 test', y_test2.shape) # x_ori =", "= 'relu')(h) # 8*6*64 # h = Dropout(dropout)(h) h = BatchNormalization(momentum=0.8)(h) h =", "print('c',c) img_rec = decoder.predict([z_impanted,c]) img_rec = np.squeeze(img_rec) img = np.uint8(impanted_img*127.5+127.5) image = Image.fromarray(img,", "= Image.fromarray(img, 'RGB') image.save('test_blocked_pp1'+'.tif') img = np.uint8(img_rec*127.5+127.5) image = Image.fromarray(img, 'RGB') image.save('test_rec_pp1'+'.tif') impanted_img[40:55,18:47,:]", "Generate images without input image ### def sampling_np( z_mean, z_log_var ): epsilon =", "= 10 c_dim = num_pp n_dim = 10 z_dim = 128 date =", "Lambda(sampling, output_shape=(z_dim,))([mean, logvar]) h2 = keras.layers.concatenate([mean,logvar]) return Model(x,[z, h2], name = 'Encoder') def", "= 4300 idx2 = 7423 img1 = np.squeeze(x_ori[idx1, :, :, :]) img2 =", "# h = MaxPooling2D(pool_size=(2, 2))(h) h = LeakyReLU(0.2)(h) h = Conv2D(units / 2,", "= 64, 64 clipvalue = 20 noise_dim = 10 c_dim = num_pp n_dim", "= (x - 127.5)/127.5 opt = RMSprop(lr = 0.0003,decay = 1e-6) dopt =", "128 units = 256 ee = 200 auxiliary_c = Input(shape=(c_dim,), name='aux_input_c') auxiliary_z =", "f['imdb'] label1 = f['id'] label1 = np.asarray(label1) label1 -= 1 label2 = f['ep']", "f['ep'] label2 = np.asarray(label2) label2 -= 1 label3 = f['set'] label3 = np.asarray(label3)", "# x_ori = (x - 127.5)/127.5 opt = RMSprop(lr = 0.0003,decay = 1e-6)", "plt.figure(figsize=(2, 2)) img1 =np.squeeze(x_ori[idx1,:,:,:]) img1 = np.uint8(img1*127.5+127.5) image = Image.fromarray(img1, 'RGB') image.save('ori_1.tif') img2", "'relu')(h) h = Reshape((4, 4, 128))(h) # h = LeakyReLU(0.2)(h) h = Conv2DTranspose(units,", "Image.fromarray(img, 'RGB') image.save('test_blocked_pp1'+'.tif') img = np.uint8(img_rec*127.5+127.5) image = Image.fromarray(img, 'RGB') image.save('test_rec_pp1'+'.tif') impanted_img[40:55,18:47,:] =", "1) , units=units, dropout=0.3) encoder = model_encoder(z_dim = z_dim, input_shape =(img_rows, img_cols, 3)", "Reshape,Dense,Dropout,Activation,Flatten from keras.layers.advanced_activations import LeakyReLU from keras.layers.convolutional import Convolution2D, MaxPooling2D, ZeroPadding2D, UpSampling2D,AveragePooling2D, Conv2DTranspose", "activation = 'relu')(h) # 8*6*64 # h = Dropout(dropout)(h) h = BatchNormalization(momentum=0.8)(h) h", "= sampling_np(mean_train_sup, var_train_sup) print(z.shape) c = np.ones(1,)*i c = keras.utils.to_categorical(c, num_pp) img =", "= Conv2D(units/8 , (k, k), strides = (2,2), border_mode='same')(x) h = BatchNormalization(momentum=0.8)(h) h", "(2,2), padding = 'same', activation = 'tanh')(h) # 8*6*64 return Model([x,auxiliary_c], h, name=\"Decoder\")", "8*6*64 return Model([x,auxiliary_c], h, name=\"Decoder\") # #### reload the trained weights to implement", "np.squeeze(x_ori[idx2,:,:,:]) img2 = np.uint8(img2*127.5+127.5) # plt.imshow(img2) image = Image.fromarray(img2, 'RGB') image.save('ori_2.tif') arr =", "np.squeeze(x_ori[idx,:,:,:]) impanted_img[40:55,18:47,:] = 0 # mouth blocked print('impanted_img',impanted_img.shape) z_impanted,mean_var_imp = encoder.predict(np.expand_dims(impanted_img,axis =0)) c", "for xx in xrange(0,1): idx1 = 4300 idx2 = 7423 img1 = np.squeeze(x_ori[idx1,", "from PIL import Image from keras.layers import Conv2D, MaxPooling2D, GlobalAveragePooling2D import h5py import", "1 test', y_test1.shape) print('label 2 train', y_train2.shape) print('label 2 test', y_test2.shape) # x_ori", "= f['ep'] label2 = np.asarray(label2) label2 -= 1 label3 = f['set'] label3 =", "name=\"encoder_mu\")(h) mean = Dense(z_dim, name=\"encoder_mean\")(h) logvar = Dense(z_dim, name=\"encoder_sigma\", activation = 'sigmoid')(h) #", "input_img = np.squeeze(x_ori[idx,:,:,:]) img = np.uint8(input_img*127.5+127.5) image = Image.fromarray(img, 'RGB') image.save('original.tif') impanted_img =", "Conv2DTranspose(3, (k,k), strides = (2,2), padding = 'same', activation = 'tanh')(h) # 8*6*64", "= np.asarray(label1) label1 -= 1 label2 = f['ep'] label2 = np.asarray(label2) label2 -=", "# meansigma = Model(x, [mean, logsigma],name='encoder') z = Lambda(sampling, output_shape=(z_dim,))([mean, logvar]) h2 =", "AveragePooling2D((6,6))(h) h = Flatten()(h) # h = Dense(latent_dim, name=\"encoder_mu\")(h) mean = Dense(z_dim, name=\"encoder_mean\")(h)", "h = Conv2D(units / 2, (k, k), strides = (2,2), border_mode='same')(h) h =", "import Image from keras.layers import Conv2D, MaxPooling2D, GlobalAveragePooling2D import h5py import numpy as", "x_test = x[idx_test[1,:],:,:,:] y_train1 = label1[:,idx_train[1,:]] y_test1 = label1[:,idx_test[1,:]] y_train2 = label2[:,idx_train[1,:]] y_test2", "Image.fromarray(img, 'RGB') image.save('interp_'+str(ii)+'.tif') # ############### Image impanting ############## loc = 'bottom' for pp", "h, name=\"Decoder\") # #### reload the trained weights to implement the anticipated applications####", "sys, keras from keras.models import Model from functools import partial normal = partial(initializers.normal,", "5 x = Input(shape = (z_dim,)) auxiliary_c = Input(shape=(c_dim,), name='aux_input_c') # auxiliary_z =", "= Dropout(dropout)(h) # h = MaxPooling2D(pool_size=(2, 2))(h) h = LeakyReLU(0.2)(h) h = Conv2D(units/4,", "#### reload the trained weights to implement the anticipated applications#### input_img = Input((img_rows,img_cols,3))", "image = Image.fromarray(img2, 'RGB') image.save('ori_2.tif') arr = np.linspace(0.0, 1.0, num=1000) for ii in", "= Dense(z_dim, name=\"encoder_sigma\", activation = 'sigmoid')(h) # meansigma = Model(x, [mean, logsigma],name='encoder') z", "model_generator(z_dim = z_dim, input_shape =(img_rows, img_cols, 1) , units=units, dropout=0.3) encoder = model_encoder(z_dim", "= label2[:,idx_train[1,:]] y_test2 = label2[:,idx_test[1,:]] y_test1_ori = y_test1 y_test2_ori = y_test2 x_train =", "= 'Encoder') def model_decoder(z_dim, c_dim): k = 5 x = Input(shape = (z_dim,))", "'RGB') image.save('test_blocked_pp1'+'.tif') img = np.uint8(img_rec*127.5+127.5) image = Image.fromarray(img, 'RGB') image.save('test_rec_pp1'+'.tif') impanted_img[40:55,18:47,:] = img_rec[40:55,18:47,:]", "keras from keras.models import Model from functools import partial normal = partial(initializers.normal, scale=.02)", "h = Conv2D(units/4, (k, k), strides = (2,2), border_mode='same')(h) h = BatchNormalization(momentum=0.8)(h) h", "idx2 = 7423 img1 = np.squeeze(x_ori[idx1, :, :, :]) img2 = np.squeeze(x_ori[idx2, :,", "2, (k, k), strides = (2,2), border_mode='same')(h) h = BatchNormalization(momentum=0.8)(h) h = Dropout(dropout)(h)", "img2 = np.squeeze(x_ori[idx2,:,:,:]) img2 = np.uint8(img2*127.5+127.5) # plt.imshow(img2) image = Image.fromarray(img2, 'RGB') image.save('ori_2.tif')", "h = Dense(4 * 4 * 128, activation = 'relu')(h) h = Reshape((4,", "z_dim, input_shape =(img_rows, img_cols, 1) , units=units, dropout=0.3) encoder = model_encoder(z_dim = z_dim,", "idx_test = np.asarray(np.where(label3 == 1)) print('idx_test shape',idx_test.shape) x_train = x[idx_train[1,:],:,:,:] x_test = x[idx_test[1,:],:,:,:]", "print('label 1 train', y_train1.shape) print('label 1 test', y_test1.shape) print('label 2 train', y_train2.shape) print('label", "= keras.utils.to_categorical(y_train2, num_ep) y_test2 = keras.utils.to_categorical(y_test2, num_ep) ############################### print('x_train shape:', x_train.shape) print('x_test shape:',", "= x[idx_train[1,:],:,:,:] x_test = x[idx_test[1,:],:,:,:] y_train1 = label1[:,idx_train[1,:]] y_test1 = label1[:,idx_test[1,:]] y_train2 =", "h = BatchNormalization(momentum=0.8)(h) # h = LeakyReLU(0.2)(h) h = Conv2DTranspose(units/2, (k,k), strides =", "= label1[:,idx_test[1,:]] y_train2 = label2[:,idx_train[1,:]] y_test2 = label2[:,idx_test[1,:]] y_test1_ori = y_test1 y_test2_ori =", "= (2,2), padding = 'same', activation = 'relu')(h) # 64*64*64 # h =", "K.exp(z_log_var), axis=-1) return K.mean(kl_loss) def sampling(args): z_mean, z_log_var = args epsilon = K.random_normal(shape=(K.shape(z_mean)[0],", "= Conv2D(units / 2, (k, k), strides = (2,2), border_mode='same')(h) h = BatchNormalization(momentum=0.8)(h)", "c = np.ones((1,))*1 c = keras.utils.to_categorical(c, num_pp) print('c',c) img_rec = decoder.predict([z_impanted,c]) img_rec =", "strides = (2,2), padding = 'same', activation = 'relu')(h) # 8*6*64 # h", "num_ep) y_test2 = keras.utils.to_categorical(y_test2, num_ep) ############################### print('x_train shape:', x_train.shape) print('x_test shape:', x_test.shape) print('label", "z_dim = 128 units = 256 ee = 200 auxiliary_c = Input(shape=(c_dim,), name='aux_input_c')", "c = keras.utils.to_categorical(c, num_pp) print('c',c) img_rec = decoder.predict([z_impanted,c]) img_rec = np.squeeze(img_rec) img =", "'RGB') image.save('test_replaced_pp1'+'.tif') #### Generate images without input image ### def sampling_np( z_mean, z_log_var", "= np.zeros((1,128)) var_train_sup = np.ones((1,128)) for i in xrange(0,num_pp): for xx in xrange(0,100):", "label3 = np.asarray(label3) FrameNum = f['fn'] FrameNum = np.asarray(FrameNum) x = f['images'] x", "y_test1_ori = y_test1 y_test2_ori = y_test2 x_train = (x_train- 127.5)/127.5 x_test = (x_test-", "img_rec = decoder.predict([z_impanted,c]) img_rec = np.squeeze(img_rec) img = np.uint8(impanted_img*127.5+127.5) image = Image.fromarray(img, 'RGB')", "idx1 = 4300 idx2 = 7423 img1 = np.squeeze(x_ori[idx1, :, :, :]) img2", "size=(z_mean.shape[0], z_dim), ) return z_mean + np.exp(z_log_var / 2) * epsilon # mean", "= keras.utils.to_categorical(c, num_pp) z_interp = z_1*(arr[ii])+z_2*(1.0-arr[ii]) z_interp = np.reshape(z_interp,(1,z_dim)) img = decoder.predict([z_interp,c]) img", "activation = 'sigmoid')(h) # meansigma = Model(x, [mean, logsigma],name='encoder') z = Lambda(sampling, output_shape=(z_dim,))([mean,", "shape:', x_test.shape) print('label 1 train', y_train1.shape) print('label 1 test', y_test1.shape) print('label 2 train',", "* from keras.optimizers import * from keras import initializers import matplotlib.pyplot as plt", "= decoder.predict([z_interp,c]) img = np.squeeze(img) img = np.uint8(img*127.5+127.5) image = Image.fromarray(img, 'RGB') image.save('interp_'+str(ii)+'.tif')", "num_ep) ############################### print('x_train shape:', x_train.shape) print('x_test shape:', x_test.shape) print('label 1 train', y_train1.shape) print('label", "padding = 'same', activation = 'relu')(h) # 32*32*64 # h = Dropout(dropout)(h) h", "x[idx_train[1,:],:,:,:] x_test = x[idx_test[1,:],:,:,:] y_train1 = label1[:,idx_train[1,:]] y_test1 = label1[:,idx_test[1,:]] y_train2 = label2[:,idx_train[1,:]]", "'relu')(h) # 32*32*64 # h = Dropout(dropout)(h) h = BatchNormalization(momentum=0.8)(h) # h =", "'RGB') image.save('ori_1.tif') img2 = np.squeeze(x_ori[idx2,:,:,:]) img2 = np.uint8(img2*127.5+127.5) # plt.imshow(img2) image = Image.fromarray(img2,", "y_train1.shape) print('label 1 test', y_test1.shape) print('label 2 train', y_train2.shape) print('label 2 test', y_test2.shape)", "= np.linspace(0.0, 1.0, num=1000) for ii in xrange(0,1000): c = np.ones((1,))*0 c =", "= np.ones((1,128)) for i in xrange(0,num_pp): for xx in xrange(0,100): z = sampling_np(mean_train_sup,", "train', y_train1.shape) print('label 1 test', y_test1.shape) print('label 2 train', y_train2.shape) print('label 2 test',", "'sigmoid')(h) # meansigma = Model(x, [mean, logsigma],name='encoder') z = Lambda(sampling, output_shape=(z_dim,))([mean, logvar]) h2", "(2,2), padding = 'same', activation = 'relu')(h) # 32*32*64 # h = Dropout(dropout)(h)", "pp in xrange(0,1): for xx in xrange(0,8): idx = 123 input_img = np.squeeze(x_ori[idx,:,:,:])", "== 0)) idx_test = np.asarray(np.where(label3 == 1)) print('idx_test shape',idx_test.shape) x_train = x[idx_train[1,:],:,:,:] x_test", "[mean, logsigma],name='encoder') z = Lambda(sampling, output_shape=(z_dim,))([mean, logvar]) h2 = keras.layers.concatenate([mean,logvar]) return Model(x,[z, h2],", "epsilon = K.random_normal(shape=(K.shape(z_mean)[0], z_dim), mean=0., stddev=epsilon_std) return z_mean + K.exp((z_log_var) / 2) *", "np.squeeze(x_ori[idx1, :, :, :]) img2 = np.squeeze(x_ori[idx2, :, :, :]) z_1, mean_var_imp =", "as np from keras.layers import Input,merge,Lambda from keras.layers.core import Reshape,Dense,Dropout,Activation,Flatten from keras.layers.advanced_activations import", "name='aux_input_z') # generator = model_generator(z_dim = z_dim, input_shape =(img_rows, img_cols, 1) , units=units,", "127.5)/127.5 opt = RMSprop(lr = 0.0003,decay = 1e-6) dopt = RMSprop(lr = 0.0003,decay", "= z_dim, input_shape =(img_rows, img_cols, 3) , units=units, dropout=0.3) encoder.load_weights('trained_weight_1.h5') encoder.compile(loss = 'binary_crossentropy',optimizer", "1 train', y_train1.shape) print('label 1 test', y_test1.shape) print('label 2 train', y_train2.shape) print('label 2", "the prior distribution # mean_train_sup = np.zeros((1,128)) var_train_sup = np.ones((1,128)) for i in", "c = keras.utils.to_categorical(c, num_pp) z_interp = z_1*(arr[ii])+z_2*(1.0-arr[ii]) z_interp = np.reshape(z_interp,(1,z_dim)) img = decoder.predict([z_interp,c])" ]
[]
[]
[ "bits == 8: print(f\"{byte:02X}\", end=' ') bits = 0 byte = 0x00 print()", "int) -> bool: return abs(0x10 - x) < 2 def is_long(x: int) ->", "is_short(x: int) -> bool: return abs(0x10 - x) < 2 def is_long(x: int)", "= (byte >> 1) bits += 1 elif is_short(pair[0]) and is_long(pair[1]): byte =", "bits += 1 elif is_short(pair[0]) and is_long(pair[1]): byte = (byte >> 1) |", "< 2 def parse(f: TextIO) -> None: for line in f: try: data", "abs(0x10 - x) < 2 def is_long(x: int) -> bool: return abs(0x30 -", "= data[6:] # Remove header and lead in burst for i in range(0,", "-> bool: return abs(0x30 - x) < 2 def parse(f: TextIO) -> None:", "byte = (byte >> 1) bits += 1 elif is_short(pair[0]) and is_long(pair[1]): byte", "and lead in burst for i in range(0, len(data), 2): pair = data[i],", "abs(0x30 - x) < 2 def parse(f: TextIO) -> None: for line in", "pair = data[i], data[i+1] if is_short(pair[0]) and is_short(pair[1]): byte = (byte >> 1)", "1) bits += 1 elif is_short(pair[0]) and is_long(pair[1]): byte = (byte >> 1)", "byte = 0x00 bits = 0 data = data[6:] # Remove header and", "1 elif is_short(pair[0]) and is_long(pair[1]): byte = (byte >> 1) | 0x80 bits", "2 def is_long(x: int) -> bool: return abs(0x30 - x) < 2 def", "burst for i in range(0, len(data), 2): pair = data[i], data[i+1] if is_short(pair[0])", "bits = 0 data = data[6:] # Remove header and lead in burst", "print(f\"{byte:02X}\", end=' ') bits = 0 byte = 0x00 print() with open(sys.argv[1], 'r')", "1 else: break if bits == 8: print(f\"{byte:02X}\", end=' ') bits = 0", "')] except ValueError: print(line, end='') continue byte = 0x00 bits = 0 data", "import sys from typing import TextIO def is_short(x: int) -> bool: return abs(0x10", "16) for h in line.split(' ')] except ValueError: print(line, end='') continue byte =", "[int(h, 16) for h in line.split(' ')] except ValueError: print(line, end='') continue byte", "f: try: data = [int(h, 16) for h in line.split(' ')] except ValueError:", "lead in burst for i in range(0, len(data), 2): pair = data[i], data[i+1]", "(byte >> 1) | 0x80 bits += 1 else: break if bits ==", "TextIO) -> None: for line in f: try: data = [int(h, 16) for", "end=' ') bits = 0 byte = 0x00 print() with open(sys.argv[1], 'r') as", "except ValueError: print(line, end='') continue byte = 0x00 bits = 0 data =", "elif is_short(pair[0]) and is_long(pair[1]): byte = (byte >> 1) | 0x80 bits +=", "== 8: print(f\"{byte:02X}\", end=' ') bits = 0 byte = 0x00 print() with", "# Remove header and lead in burst for i in range(0, len(data), 2):", "= 0 data = data[6:] # Remove header and lead in burst for", "def is_short(x: int) -> bool: return abs(0x10 - x) < 2 def is_long(x:", "line.split(' ')] except ValueError: print(line, end='') continue byte = 0x00 bits = 0", "for i in range(0, len(data), 2): pair = data[i], data[i+1] if is_short(pair[0]) and", "line in f: try: data = [int(h, 16) for h in line.split(' ')]", "in range(0, len(data), 2): pair = data[i], data[i+1] if is_short(pair[0]) and is_short(pair[1]): byte", "data[i+1] if is_short(pair[0]) and is_short(pair[1]): byte = (byte >> 1) bits += 1", "None: for line in f: try: data = [int(h, 16) for h in", "in line.split(' ')] except ValueError: print(line, end='') continue byte = 0x00 bits =", "for line in f: try: data = [int(h, 16) for h in line.split('", "print(line, end='') continue byte = 0x00 bits = 0 data = data[6:] #", "data[i], data[i+1] if is_short(pair[0]) and is_short(pair[1]): byte = (byte >> 1) bits +=", "len(data), 2): pair = data[i], data[i+1] if is_short(pair[0]) and is_short(pair[1]): byte = (byte", "(byte >> 1) bits += 1 elif is_short(pair[0]) and is_long(pair[1]): byte = (byte", "x) < 2 def parse(f: TextIO) -> None: for line in f: try:", "2): pair = data[i], data[i+1] if is_short(pair[0]) and is_short(pair[1]): byte = (byte >>", "= 0x00 bits = 0 data = data[6:] # Remove header and lead", "0x80 bits += 1 else: break if bits == 8: print(f\"{byte:02X}\", end=' ')", "= [int(h, 16) for h in line.split(' ')] except ValueError: print(line, end='') continue", "2 def parse(f: TextIO) -> None: for line in f: try: data =", "end='') continue byte = 0x00 bits = 0 data = data[6:] # Remove", "in burst for i in range(0, len(data), 2): pair = data[i], data[i+1] if", "1) | 0x80 bits += 1 else: break if bits == 8: print(f\"{byte:02X}\",", "break if bits == 8: print(f\"{byte:02X}\", end=' ') bits = 0 byte =", "+= 1 elif is_short(pair[0]) and is_long(pair[1]): byte = (byte >> 1) | 0x80", "-> None: for line in f: try: data = [int(h, 16) for h", "return abs(0x30 - x) < 2 def parse(f: TextIO) -> None: for line", "is_short(pair[1]): byte = (byte >> 1) bits += 1 elif is_short(pair[0]) and is_long(pair[1]):", "= (byte >> 1) | 0x80 bits += 1 else: break if bits", "if bits == 8: print(f\"{byte:02X}\", end=' ') bits = 0 byte = 0x00", "x) < 2 def is_long(x: int) -> bool: return abs(0x30 - x) <", "is_long(pair[1]): byte = (byte >> 1) | 0x80 bits += 1 else: break", "and is_long(pair[1]): byte = (byte >> 1) | 0x80 bits += 1 else:", "range(0, len(data), 2): pair = data[i], data[i+1] if is_short(pair[0]) and is_short(pair[1]): byte =", "sys from typing import TextIO def is_short(x: int) -> bool: return abs(0x10 -", "is_short(pair[0]) and is_long(pair[1]): byte = (byte >> 1) | 0x80 bits += 1", "data[6:] # Remove header and lead in burst for i in range(0, len(data),", "8: print(f\"{byte:02X}\", end=' ') bits = 0 byte = 0x00 print() with open(sys.argv[1],", "- x) < 2 def is_long(x: int) -> bool: return abs(0x30 - x)", "0 data = data[6:] # Remove header and lead in burst for i", "< 2 def is_long(x: int) -> bool: return abs(0x30 - x) < 2", "i in range(0, len(data), 2): pair = data[i], data[i+1] if is_short(pair[0]) and is_short(pair[1]):", "python3 import sys from typing import TextIO def is_short(x: int) -> bool: return", "in f: try: data = [int(h, 16) for h in line.split(' ')] except", "#!/usr/bin/env python3 import sys from typing import TextIO def is_short(x: int) -> bool:", "is_short(pair[0]) and is_short(pair[1]): byte = (byte >> 1) bits += 1 elif is_short(pair[0])", "for h in line.split(' ')] except ValueError: print(line, end='') continue byte = 0x00", "byte = (byte >> 1) | 0x80 bits += 1 else: break if", "and is_short(pair[1]): byte = (byte >> 1) bits += 1 elif is_short(pair[0]) and", ">> 1) bits += 1 elif is_short(pair[0]) and is_long(pair[1]): byte = (byte >>", "| 0x80 bits += 1 else: break if bits == 8: print(f\"{byte:02X}\", end='", "continue byte = 0x00 bits = 0 data = data[6:] # Remove header", "0x00 bits = 0 data = data[6:] # Remove header and lead in", "if is_short(pair[0]) and is_short(pair[1]): byte = (byte >> 1) bits += 1 elif", "is_long(x: int) -> bool: return abs(0x30 - x) < 2 def parse(f: TextIO)", "Remove header and lead in burst for i in range(0, len(data), 2): pair", "import TextIO def is_short(x: int) -> bool: return abs(0x10 - x) < 2", "try: data = [int(h, 16) for h in line.split(' ')] except ValueError: print(line,", "data = data[6:] # Remove header and lead in burst for i in", "return abs(0x10 - x) < 2 def is_long(x: int) -> bool: return abs(0x30", "int) -> bool: return abs(0x30 - x) < 2 def parse(f: TextIO) ->", "bool: return abs(0x10 - x) < 2 def is_long(x: int) -> bool: return", "header and lead in burst for i in range(0, len(data), 2): pair =", "') bits = 0 byte = 0x00 print() with open(sys.argv[1], 'r') as f:", "bits = 0 byte = 0x00 print() with open(sys.argv[1], 'r') as f: parse(f)", "TextIO def is_short(x: int) -> bool: return abs(0x10 - x) < 2 def", "+= 1 else: break if bits == 8: print(f\"{byte:02X}\", end=' ') bits =", "- x) < 2 def parse(f: TextIO) -> None: for line in f:", ">> 1) | 0x80 bits += 1 else: break if bits == 8:", "from typing import TextIO def is_short(x: int) -> bool: return abs(0x10 - x)", "parse(f: TextIO) -> None: for line in f: try: data = [int(h, 16)", "def parse(f: TextIO) -> None: for line in f: try: data = [int(h,", "data = [int(h, 16) for h in line.split(' ')] except ValueError: print(line, end='')", "bits += 1 else: break if bits == 8: print(f\"{byte:02X}\", end=' ') bits", "ValueError: print(line, end='') continue byte = 0x00 bits = 0 data = data[6:]", "= data[i], data[i+1] if is_short(pair[0]) and is_short(pair[1]): byte = (byte >> 1) bits", "def is_long(x: int) -> bool: return abs(0x30 - x) < 2 def parse(f:", "typing import TextIO def is_short(x: int) -> bool: return abs(0x10 - x) <", "bool: return abs(0x30 - x) < 2 def parse(f: TextIO) -> None: for", "else: break if bits == 8: print(f\"{byte:02X}\", end=' ') bits = 0 byte", "-> bool: return abs(0x10 - x) < 2 def is_long(x: int) -> bool:", "h in line.split(' ')] except ValueError: print(line, end='') continue byte = 0x00 bits" ]
[ "http://example.com/) the response will be returned according # to the Index class in", "all available # options, refer to waltz/setup.py app = waltz.setup.dancefloor(urls, globals(), sessions=sessions, env=env)", "the Templator markup language env = {'split': lambda s, delim: s.split(delim) } #", "None, 'logged': False} # These environment variables will be made accessible within the", "for more details. \"\"\" import waltz # These are web.py url tuples which", "a regex url route to the Class # responsible for implementing its response.", "These are web.py url tuples which map a regex url route to the", "# options, refer to waltz/setup.py app = waltz.setup.dancefloor(urls, globals(), sessions=sessions, env=env) if __name__", "within the routes directory # (e.g routes.home.Index) urls = ('/analytics/?', 'waltz.modules.Analytics', '/login/?', 'routes.auth.Login',", "# of html files via the Templator markup language env = {'split': lambda", "Main waltz application. :copyright: (c) Authentication Dance by Waltz. :license: GPLv3, see LICENSE", "# to the Index class in the file home.py within the routes directory", "coding: utf-8 -*- \"\"\" main.py ~~~~~~~ Main waltz application. :copyright: (c) Authentication Dance", "made accessible within the scope # of html files via the Templator markup", "urls = ('/analytics/?', 'waltz.modules.Analytics', '/login/?', 'routes.auth.Login', '/register/?', 'routes.auth.Register', '/logout/?', 'routes.auth.Logout', '/?', 'routes.home.Index') #", "a HTTP request to the base server # (e.g. http://example.com/) the response will", "options, refer to waltz/setup.py app = waltz.setup.dancefloor(urls, globals(), sessions=sessions, env=env) if __name__ ==", "directory # (e.g routes.home.Index) urls = ('/analytics/?', 'waltz.modules.Analytics', '/login/?', 'routes.auth.Login', '/register/?', 'routes.auth.Register', '/logout/?',", "These environment variables will be made accessible within the scope # of html", ":license: GPLv3, see LICENSE for more details. \"\"\" import waltz # These are", "lambda s, delim: s.split(delim) } # Setting up and configuring the waltz application.", "application. :copyright: (c) Authentication Dance by Waltz. :license: GPLv3, see LICENSE for more", "implementing its response. In other words, when the # client issues/submits a HTTP", "up and configuring the waltz application. To see all available # options, refer", "be returned according # to the Index class in the file home.py within", "Index class in the file home.py within the routes directory # (e.g routes.home.Index)", "the Index class in the file home.py within the routes directory # (e.g", "in the file home.py within the routes directory # (e.g routes.home.Index) urls =", "\"\"\" import waltz # These are web.py url tuples which map a regex", "# These are web.py url tuples which map a regex url route to", "# (e.g. http://example.com/) the response will be returned according # to the Index", "of html files via the Templator markup language env = {'split': lambda s,", "('/analytics/?', 'waltz.modules.Analytics', '/login/?', 'routes.auth.Login', '/register/?', 'routes.auth.Register', '/logout/?', 'routes.auth.Logout', '/?', 'routes.home.Index') # Default values", "for a user's session sessions = {'email': None, 'logged': False} # These environment", "and configuring the waltz application. To see all available # options, refer to", "its response. In other words, when the # client issues/submits a HTTP request", "be made accessible within the scope # of html files via the Templator", "response. In other words, when the # client issues/submits a HTTP request to", "'/login/?', 'routes.auth.Login', '/register/?', 'routes.auth.Register', '/logout/?', 'routes.auth.Logout', '/?', 'routes.home.Index') # Default values for a", "accessible within the scope # of html files via the Templator markup language", "which map a regex url route to the Class # responsible for implementing", "a user's session sessions = {'email': None, 'logged': False} # These environment variables", "client issues/submits a HTTP request to the base server # (e.g. http://example.com/) the", "class in the file home.py within the routes directory # (e.g routes.home.Index) urls", "configuring the waltz application. To see all available # options, refer to waltz/setup.py", "markup language env = {'split': lambda s, delim: s.split(delim) } # Setting up", "map a regex url route to the Class # responsible for implementing its", "'routes.auth.Logout', '/?', 'routes.home.Index') # Default values for a user's session sessions = {'email':", "details. \"\"\" import waltz # These are web.py url tuples which map a", "\"\"\" main.py ~~~~~~~ Main waltz application. :copyright: (c) Authentication Dance by Waltz. :license:", "= ('/analytics/?', 'waltz.modules.Analytics', '/login/?', 'routes.auth.Login', '/register/?', 'routes.auth.Register', '/logout/?', 'routes.auth.Logout', '/?', 'routes.home.Index') # Default", "In other words, when the # client issues/submits a HTTP request to the", "{'split': lambda s, delim: s.split(delim) } # Setting up and configuring the waltz", "available # options, refer to waltz/setup.py app = waltz.setup.dancefloor(urls, globals(), sessions=sessions, env=env) if", "'/register/?', 'routes.auth.Register', '/logout/?', 'routes.auth.Logout', '/?', 'routes.home.Index') # Default values for a user's session", "'/?', 'routes.home.Index') # Default values for a user's session sessions = {'email': None,", "HTTP request to the base server # (e.g. http://example.com/) the response will be", "LICENSE for more details. \"\"\" import waltz # These are web.py url tuples", "'waltz.modules.Analytics', '/login/?', 'routes.auth.Login', '/register/?', 'routes.auth.Register', '/logout/?', 'routes.auth.Logout', '/?', 'routes.home.Index') # Default values for", "responsible for implementing its response. In other words, when the # client issues/submits", "Setting up and configuring the waltz application. To see all available # options,", ":copyright: (c) Authentication Dance by Waltz. :license: GPLv3, see LICENSE for more details.", "Class # responsible for implementing its response. In other words, when the #", "the file home.py within the routes directory # (e.g routes.home.Index) urls = ('/analytics/?',", "env = {'split': lambda s, delim: s.split(delim) } # Setting up and configuring", "words, when the # client issues/submits a HTTP request to the base server", "by Waltz. :license: GPLv3, see LICENSE for more details. \"\"\" import waltz #", "waltz application. :copyright: (c) Authentication Dance by Waltz. :license: GPLv3, see LICENSE for", "base server # (e.g. http://example.com/) the response will be returned according # to", "'/logout/?', 'routes.auth.Logout', '/?', 'routes.home.Index') # Default values for a user's session sessions =", "according # to the Index class in the file home.py within the routes", "-*- \"\"\" main.py ~~~~~~~ Main waltz application. :copyright: (c) Authentication Dance by Waltz.", "via the Templator markup language env = {'split': lambda s, delim: s.split(delim) }", "the routes directory # (e.g routes.home.Index) urls = ('/analytics/?', 'waltz.modules.Analytics', '/login/?', 'routes.auth.Login', '/register/?',", "waltz application. To see all available # options, refer to waltz/setup.py app =", "main.py ~~~~~~~ Main waltz application. :copyright: (c) Authentication Dance by Waltz. :license: GPLv3,", "#-*- coding: utf-8 -*- \"\"\" main.py ~~~~~~~ Main waltz application. :copyright: (c) Authentication", "{'email': None, 'logged': False} # These environment variables will be made accessible within", "see all available # options, refer to waltz/setup.py app = waltz.setup.dancefloor(urls, globals(), sessions=sessions,", "other words, when the # client issues/submits a HTTP request to the base", "web.py url tuples which map a regex url route to the Class #", "the # client issues/submits a HTTP request to the base server # (e.g.", "more details. \"\"\" import waltz # These are web.py url tuples which map", "Default values for a user's session sessions = {'email': None, 'logged': False} #", "issues/submits a HTTP request to the base server # (e.g. http://example.com/) the response", "file home.py within the routes directory # (e.g routes.home.Index) urls = ('/analytics/?', 'waltz.modules.Analytics',", "routes.home.Index) urls = ('/analytics/?', 'waltz.modules.Analytics', '/login/?', 'routes.auth.Login', '/register/?', 'routes.auth.Register', '/logout/?', 'routes.auth.Logout', '/?', 'routes.home.Index')", "waltz # These are web.py url tuples which map a regex url route", "# (e.g routes.home.Index) urls = ('/analytics/?', 'waltz.modules.Analytics', '/login/?', 'routes.auth.Login', '/register/?', 'routes.auth.Register', '/logout/?', 'routes.auth.Logout',", "python #-*- coding: utf-8 -*- \"\"\" main.py ~~~~~~~ Main waltz application. :copyright: (c)", "home.py within the routes directory # (e.g routes.home.Index) urls = ('/analytics/?', 'waltz.modules.Analytics', '/login/?',", "the scope # of html files via the Templator markup language env =", "user's session sessions = {'email': None, 'logged': False} # These environment variables will", "Dance by Waltz. :license: GPLv3, see LICENSE for more details. \"\"\" import waltz", "sessions = {'email': None, 'logged': False} # These environment variables will be made", "False} # These environment variables will be made accessible within the scope #", "'logged': False} # These environment variables will be made accessible within the scope", "refer to waltz/setup.py app = waltz.setup.dancefloor(urls, globals(), sessions=sessions, env=env) if __name__ == \"__main__\":", "to the Class # responsible for implementing its response. In other words, when", "routes directory # (e.g routes.home.Index) urls = ('/analytics/?', 'waltz.modules.Analytics', '/login/?', 'routes.auth.Login', '/register/?', 'routes.auth.Register',", "route to the Class # responsible for implementing its response. In other words,", "when the # client issues/submits a HTTP request to the base server #", "within the scope # of html files via the Templator markup language env", "variables will be made accessible within the scope # of html files via", "utf-8 -*- \"\"\" main.py ~~~~~~~ Main waltz application. :copyright: (c) Authentication Dance by", "import waltz # These are web.py url tuples which map a regex url", "# responsible for implementing its response. In other words, when the # client", "# Setting up and configuring the waltz application. To see all available #", "(e.g. http://example.com/) the response will be returned according # to the Index class", "GPLv3, see LICENSE for more details. \"\"\" import waltz # These are web.py", "are web.py url tuples which map a regex url route to the Class", "url tuples which map a regex url route to the Class # responsible", "see LICENSE for more details. \"\"\" import waltz # These are web.py url", "delim: s.split(delim) } # Setting up and configuring the waltz application. To see", "language env = {'split': lambda s, delim: s.split(delim) } # Setting up and", "# client issues/submits a HTTP request to the base server # (e.g. http://example.com/)", "#!/usr/bin/env python #-*- coding: utf-8 -*- \"\"\" main.py ~~~~~~~ Main waltz application. :copyright:", "returned according # to the Index class in the file home.py within the", "~~~~~~~ Main waltz application. :copyright: (c) Authentication Dance by Waltz. :license: GPLv3, see", "# Default values for a user's session sessions = {'email': None, 'logged': False}", "will be made accessible within the scope # of html files via the", "the waltz application. To see all available # options, refer to waltz/setup.py app", "(e.g routes.home.Index) urls = ('/analytics/?', 'waltz.modules.Analytics', '/login/?', 'routes.auth.Login', '/register/?', 'routes.auth.Register', '/logout/?', 'routes.auth.Logout', '/?',", "tuples which map a regex url route to the Class # responsible for", "Waltz. :license: GPLv3, see LICENSE for more details. \"\"\" import waltz # These", "session sessions = {'email': None, 'logged': False} # These environment variables will be", "request to the base server # (e.g. http://example.com/) the response will be returned", "# These environment variables will be made accessible within the scope # of", "server # (e.g. http://example.com/) the response will be returned according # to the", "Templator markup language env = {'split': lambda s, delim: s.split(delim) } # Setting", "values for a user's session sessions = {'email': None, 'logged': False} # These", "response will be returned according # to the Index class in the file", "(c) Authentication Dance by Waltz. :license: GPLv3, see LICENSE for more details. \"\"\"", "Authentication Dance by Waltz. :license: GPLv3, see LICENSE for more details. \"\"\" import", "regex url route to the Class # responsible for implementing its response. In", "to the Index class in the file home.py within the routes directory #", "the base server # (e.g. http://example.com/) the response will be returned according #", "for implementing its response. In other words, when the # client issues/submits a", "files via the Templator markup language env = {'split': lambda s, delim: s.split(delim)", "<reponame>mekarpeles/waltz<gh_stars>0 #!/usr/bin/env python #-*- coding: utf-8 -*- \"\"\" main.py ~~~~~~~ Main waltz application.", "'routes.home.Index') # Default values for a user's session sessions = {'email': None, 'logged':", "s.split(delim) } # Setting up and configuring the waltz application. To see all", "html files via the Templator markup language env = {'split': lambda s, delim:", "to the base server # (e.g. http://example.com/) the response will be returned according", "to waltz/setup.py app = waltz.setup.dancefloor(urls, globals(), sessions=sessions, env=env) if __name__ == \"__main__\": app.run()", "the Class # responsible for implementing its response. In other words, when the", "= {'split': lambda s, delim: s.split(delim) } # Setting up and configuring the", "'routes.auth.Register', '/logout/?', 'routes.auth.Logout', '/?', 'routes.home.Index') # Default values for a user's session sessions", "application. To see all available # options, refer to waltz/setup.py app = waltz.setup.dancefloor(urls,", "s, delim: s.split(delim) } # Setting up and configuring the waltz application. To", "the response will be returned according # to the Index class in the", "= {'email': None, 'logged': False} # These environment variables will be made accessible", "environment variables will be made accessible within the scope # of html files", "scope # of html files via the Templator markup language env = {'split':", "'routes.auth.Login', '/register/?', 'routes.auth.Register', '/logout/?', 'routes.auth.Logout', '/?', 'routes.home.Index') # Default values for a user's", "url route to the Class # responsible for implementing its response. In other", "will be returned according # to the Index class in the file home.py", "} # Setting up and configuring the waltz application. To see all available", "To see all available # options, refer to waltz/setup.py app = waltz.setup.dancefloor(urls, globals()," ]
[ "Import by factory without errors import_from_generic_test('AWS') @pytest.mark.need_csp @pytest.mark.need_csp_aws def test_s3class_real(tmpdir): \"\"\"S3Storage in real", "coding=utf-8 \"\"\"apyfal.storage.aws tests\"\"\" import pytest from tests.test_storage import ( run_full_real_test_sequence, import_from_generic_test) def test_s3class_import():", "import_from_generic_test) def test_s3class_import(): \"\"\"S3Storage import\"\"\" # Test: Import by factory without errors import_from_generic_test('AWS')", "factory without errors import_from_generic_test('AWS') @pytest.mark.need_csp @pytest.mark.need_csp_aws def test_s3class_real(tmpdir): \"\"\"S3Storage in real case\"\"\" run_full_real_test_sequence('AWS',", "def test_s3class_import(): \"\"\"S3Storage import\"\"\" # Test: Import by factory without errors import_from_generic_test('AWS') @pytest.mark.need_csp", "run_full_real_test_sequence, import_from_generic_test) def test_s3class_import(): \"\"\"S3Storage import\"\"\" # Test: Import by factory without errors", "tests.test_storage import ( run_full_real_test_sequence, import_from_generic_test) def test_s3class_import(): \"\"\"S3Storage import\"\"\" # Test: Import by", "( run_full_real_test_sequence, import_from_generic_test) def test_s3class_import(): \"\"\"S3Storage import\"\"\" # Test: Import by factory without", "import ( run_full_real_test_sequence, import_from_generic_test) def test_s3class_import(): \"\"\"S3Storage import\"\"\" # Test: Import by factory", "Test: Import by factory without errors import_from_generic_test('AWS') @pytest.mark.need_csp @pytest.mark.need_csp_aws def test_s3class_real(tmpdir): \"\"\"S3Storage in", "import\"\"\" # Test: Import by factory without errors import_from_generic_test('AWS') @pytest.mark.need_csp @pytest.mark.need_csp_aws def test_s3class_real(tmpdir):", "\"\"\"S3Storage import\"\"\" # Test: Import by factory without errors import_from_generic_test('AWS') @pytest.mark.need_csp @pytest.mark.need_csp_aws def", "\"\"\"apyfal.storage.aws tests\"\"\" import pytest from tests.test_storage import ( run_full_real_test_sequence, import_from_generic_test) def test_s3class_import(): \"\"\"S3Storage", "test_s3class_import(): \"\"\"S3Storage import\"\"\" # Test: Import by factory without errors import_from_generic_test('AWS') @pytest.mark.need_csp @pytest.mark.need_csp_aws", "# coding=utf-8 \"\"\"apyfal.storage.aws tests\"\"\" import pytest from tests.test_storage import ( run_full_real_test_sequence, import_from_generic_test) def", "# Test: Import by factory without errors import_from_generic_test('AWS') @pytest.mark.need_csp @pytest.mark.need_csp_aws def test_s3class_real(tmpdir): \"\"\"S3Storage", "import pytest from tests.test_storage import ( run_full_real_test_sequence, import_from_generic_test) def test_s3class_import(): \"\"\"S3Storage import\"\"\" #", "without errors import_from_generic_test('AWS') @pytest.mark.need_csp @pytest.mark.need_csp_aws def test_s3class_real(tmpdir): \"\"\"S3Storage in real case\"\"\" run_full_real_test_sequence('AWS', tmpdir)", "tests\"\"\" import pytest from tests.test_storage import ( run_full_real_test_sequence, import_from_generic_test) def test_s3class_import(): \"\"\"S3Storage import\"\"\"", "<reponame>Accelize/apyfal # coding=utf-8 \"\"\"apyfal.storage.aws tests\"\"\" import pytest from tests.test_storage import ( run_full_real_test_sequence, import_from_generic_test)", "by factory without errors import_from_generic_test('AWS') @pytest.mark.need_csp @pytest.mark.need_csp_aws def test_s3class_real(tmpdir): \"\"\"S3Storage in real case\"\"\"", "from tests.test_storage import ( run_full_real_test_sequence, import_from_generic_test) def test_s3class_import(): \"\"\"S3Storage import\"\"\" # Test: Import", "pytest from tests.test_storage import ( run_full_real_test_sequence, import_from_generic_test) def test_s3class_import(): \"\"\"S3Storage import\"\"\" # Test:" ]
[ "size. if sum(self.item_list) > page_size and not page_size == 1.0: self.right_btn.show() def change_node(self,", "event of type gtk.gdk.Event. ''' self.in_event_box = True def leave_notify(self, widget, event): '''", "arrow_width, arrow_height = arrow_right.get_pixbuf().get_width(), arrow_right.get_pixbuf().get_height() arrow_pixbuf = arrow_right outside_border = alpha_color_hex_to_cairo((\"#000000\", 0.15)) inner_border", "Crumb list for different types of inputs. @param crumb: Support inputs are: [\"a", "''' label = widget.get_text() widget.destroy() self.eventbox.show() self.emit(\"entry-changed\", label) def redraw_bg(self, widget, event): '''", "by # the Free Software Foundation, either version 3 of the License, or", "= 0 if not value == 0: self.right_btn.show() for i in xrange(len(self.item_list)): temp", "self.padding_x + self.menu_min, self.btn_min + self.menu_min), self.height) self.button_width = self.get_size_request()[0] - self.menu_min self.queue_draw()", "self.entry.select_region(0, len(self.entry.get_text())) self.eventbox.hide() self.hbox.pack_start(self.entry, True, True) def enter_cb(self, widget): ''' Internal callback function", "redraw_bg(self, widget, event): ''' Internal callback function to \"expose-event\" signal. @param widget: gtk.EventBox", "(disable_bg, 1.0))] # Draw background. if not widget.state == gtk.STATE_NORMAL: # Draw button", "self.button_width = self.get_size_request()[0] - self.menu_min self.queue_draw() def expose_cb(self, widget, event): ''' Internal expose", "''' Internal callback function to \"button-press-event\" signal. @param widget: Crumb @param event: An", "return True gobject.type_register(Crumb) if __name__ == \"__main__\": import gtk def add_panel(widget): crumb =", "h -1) # right cr.set_source_rgba(*outside_border) if button_color: draw_rectangle(cr, x + 1 , y", "widget.allocation.width, widget.allocation.height arrow_button_width = ARROW_BUTTON_WIDTH self.menu.show((wx + offset_x + menu_width - arrow_button_width, wy", "self.create_crumb(crumb) self.button_width = ARROW_BUTTON_WIDTH # for left & right buttons self.in_event_box = False", "for right arrow, default is \\\"treeview/arrow_right.png\\\" from ui theme. @param arrow_down: Dynamic pixbuf", "of the License, or # any later version. # # This program is", "# Should move this part to Bread class since app_theme is golobalized. arrow_right", "''' Poplist.__init__(self, items=items, max_height=max_height, max_width=max_width, shadow_visible=False, shape_frame_function=self.shape_bread_menu_frame, expose_frame_function=self.expose_bread_menu_frame, align_size=2, ) self.set_skip_pager_hint(True) self.set_skip_taskbar_hint(True) self.treeview.draw_mask", "[Crumb1, Crumb2], by using change_node(1, [Crumb3, Crumb4]), previous list will be change to", "should have received a copy of the GNU General Public License # along", "if button_color: draw_rectangle(cr, x + 2, y + 2, self.button_width - 3, h", "width, height): ''' Set Bread size. @param width: Width of Bread. @param height:", "shift_value): self.right_btn.hide() def move_left(self, widget): ''' Internal callback function to \"clicked\" signal. @param", "@param label: Label of the crumb. ''' if not self.show_others: for i in", "container which can hold crumbs widget. @undocumented: create_crumb @undocumented: enter_notify @undocumented: leave_notify @undocumented:", "# Init Hbox self.hbox = gtk.HBox(False, 0) self.hbox.show() self.eventbox = gtk.EventBox() self.eventbox.set_visible_window(False) if", "self.menu_press = True def button_clicked(self, widget): ''' Intenal callback function to \"clicked\" signal.", "label\",[(None, \"menu label\", None)])] Crumb instance [Crumb, Crumb] ''' crumbs = self.create_crumb(crumbs) for", "@param widget: Crumb @param event: an event of gtk.gdk.event ''' in_menu = event.x", "padding_x self.menu = self.create_menu(menu_items) if self.menu != None: self.menu.connect(\"hide\", self.hide_cb) self.menu_press = False", "+ 2, self.menu_min - 3, h -4) cr.fill() if self.menu != None: #", "== 0: self.left_btn.hide() def set_size(self, width, height): ''' Set Bread size. @param width:", "arrow_right if self.in_menu: button_color = None menu_color = inner_border else: button_color = inner_border", "win = gtk.Window(gtk.WINDOW_TOPLEVEL) win.connect(\"destroy\", lambda w: gtk.main_quit()) win.set_default_size(600,300) vbox = gtk.VBox() ###################################### #", "self.adj = self.scroll_win.get_hadjustment() self.add(self.crumb) def create_crumb(self, crumb): ''' Internal function to create a", "= rect.x, rect.y, rect.width, rect.height # Should move this part to Bread class", "if self.show_entry: self.eventbox.connect(\"enter-notify-event\", self.enter_notify) self.eventbox.connect(\"leave-notify-event\", self.leave_notify) self.eventbox.connect(\"button-press-event\", self.event_box_press) self.hbox.pack_end(self.eventbox, True, True) self.scroll_win =", "gtk.STATE_ACTIVE: text_color = ui_theme.get_color(\"title_text\").get_color() if self.in_button: button_color = inner_border menu_color = None arrow_pixbuf", "@undocumented: move_left ''' __gsignals__= { \"entry-changed\" : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_STRING,)), \"item_clicked\" : (gobject.SIGNAL_RUN_LAST,", "= None self.arrow_down = None self.menu_min = 18 # menu bar width self.btn_min", "isinstance(crumb[0], str): return [Crumb(crumb[0], crumb[1]),] elif isinstance(crumb[0], Crumb): return crumb else: return [Crumb(c[0],", "text_color, alignment = pango.ALIGN_CENTER) return True gobject.type_register(Crumb) if __name__ == \"__main__\": import gtk", "is 15 pixels. ''' super(Crumb, self).__init__() self.arrow_right = None self.arrow_down = None self.menu_min", "instances @param arrow_right: Dynamic pixbuf for right arrow, default is \\\"treeview/arrow_right.png\\\" from ui", "in_menu = event.x > self.button_width if self.in_menu !=in_menu: self.in_menu = in_menu self.queue_draw() def", "Label of the crumb. ''' if not self.show_others: for i in self.hbox.get_children()[(index +", "widget: Gtk.EventBox. @param event: The pointer event of type gtk.gdk.Event. ''' self.in_event_box =", "function to \"clicked\" signal. @param widget: Crumb ''' if self.in_button: self.emit(\"item_clicked\", self.index_id, self.label)", "arrow_down self.item_list = list() self.show_others = show_others self.show_entry = show_entry self.crumb = self.create_crumb(crumb)", "self.height = 24 # crumb height self.font_size = font_size self.padding_x = padding_x self.menu", "def set_label(self, label, font_size = DEFAULT_FONT_SIZE): ''' Set label for left button. @param", "self.left_btn.hide() def set_size(self, width, height): ''' Set Bread size. @param width: Width of", "= self.create_crumb(crumbs) for crumb in crumbs: crumb.show() crumb.arrow_right = self.arrow_right crumb.arrow_down = self.arrow_down", "self.right_btn.show() for i in xrange(len(self.item_list)): temp += self.item_list[i] if temp >= value: shift_value", "pango.ALIGN_CENTER) return True gobject.type_register(Crumb) if __name__ == \"__main__\": import gtk def add_panel(widget): crumb", "Bread. ''' # Init. super(Bread, self).__init__(spacing = 0) self.arrow_right = arrow_right self.arrow_down =", "arrow_pixbuf.get_pixbuf(), x + self.button_width + (self.menu_min - arrow_width) / 2, y + (h", "@param widget: Crumb instance. @param index: The index value of clicked crumb. @param", "-1) # right cr.set_source_rgba(*outside_border) if button_color: draw_rectangle(cr, x + 1 , y +", ") self.set_skip_pager_hint(True) self.set_skip_taskbar_hint(True) self.treeview.draw_mask = self.draw_treeview_mask self.expose_window_frame = self.expose_bread_menu_frame def draw_treeview_mask(self, cr, x,", "''' Internal callback function to \"clicked\" signal. @param widget: Left button. ''' upper,", "False, ) win = gtk.Window(gtk.WINDOW_TOPLEVEL) win.connect(\"destroy\", lambda w: gtk.main_quit()) win.set_default_size(600,300) vbox = gtk.VBox()", "= len(self.item_list) crumb.connect(\"item_clicked\", self.click_cb) self.hbox.pack_start(crumb, False, False) self.item_list.append(crumb.get_size_request()[0]) page_size = self.adj.page_size # Show", "2 * self.padding_x + self.menu_min, self.btn_min + self.menu_min), self.height) self.button_width = self.get_size_request()[0] -", "@param widget: gtk.EventBox. @param event: The pointer event of type gtk.gdk.Event. ''' self.in_event_box", "[(0, (disable_bg, 1.0)), (1, (disable_bg, 1.0))] # Draw background. if not widget.state ==", "create_menu @undocumented: hide_cb @undocumented: button_press_cb @undocumented: button_clicked @undocumented: expose_cb ''' __gsignals__= { \"item_clicked\"", "@undocumented: motion_notify_cb @undocumented: create_menu @undocumented: hide_cb @undocumented: button_press_cb @undocumented: button_clicked @undocumented: expose_cb '''", "Crumb instance. @param event: An event of gtk.gdk.Event. ''' if self.menu == None:", "arrow_pixbuf = arrow_right text_color = ui_theme.get_color(\"disable_text\").get_color() disable_bg = ui_theme.get_color(\"disable_background\").get_color() button_color = [(0, (disable_bg,", "label = [] for o in obj[:-1]: label.append(\"/\"+o.label) o.destroy() self.entry = gtk.Entry() self.entry.connect(\"activate\",", "= [] self.emit(\"item_clicked\", index, label) def move_right(self, widget): ''' Internal callback function to", "self.adj.page_size # Show right button if crumbs exceed scrolled window size. if sum(self.item_list)", "shown left && right box # at runtime if self.show_left_right_box: left_box.set_size_request(self.button_width, -1) right_box.set_size_request(self.button_width,", "DEFAULT_FONT_SIZE from draw import (draw_line, draw_text, draw_pixbuf) from utils import (get_content_size, cairo_disable_antialias, alpha_color_hex_to_cairo,", "BreadMenu(Poplist): ''' Popup menu for bread. @undocumented: draw_treeview_mask @undocumented: shape_bread_menu_frame @undocumented: expose_bread_menu_frame '''", "# Add Bread Items self.adj = self.scroll_win.get_hadjustment() self.add(self.crumb) def create_crumb(self, crumb): ''' Internal", "Crumb class. @param label: Crumb item label @param menu_items: Crumb menu, could be", "self.height) self.button_width = self.get_size_request()[0] else: self.set_size_request( max(self.label_w + 2 * self.padding_x + self.menu_min,", "@param widget: Right button. ''' upper, page_size, value = self.adj.upper, self.adj.page_size, self.adj.value shift_value", "None self.menu_min = 18 # menu bar width self.btn_min = 50 # button", "widget, event): cr = widget.window.cairo_create() rect = widget.allocation with cairo_disable_antialias(cr): outside_border = alpha_color_hex_to_cairo((\"#666666\",", "def hide_cb(self, widget): ''' Internal callback function to Menu's \"\"hide\" signal. @param widget:", "to create a Crumb list for different types of inputs. @param crumb: Support", "with cairo_state(cr): cr.set_source_rgba(*alpha_color_hex_to_cairo((\"#def5ff\", 1))) cr.rectangle(rect.x, rect.y, rect.width, rect.height) cr.fill() return False def add(self,", "# You should have received a copy of the GNU General Public License", "An event of gtk.gdk.Event ''' if self.menu == None: self.in_button = True self.menu_press", "cr.set_source_rgb(1, 1, 1) cr.rectangle(x, y, w, h) cr.fill() def shape_bread_menu_frame(self, widget, event): pass", "motion_notify_cb(self, widget, event): ''' Internal callback function to Crumb \"motion-notify-event\" signal. @param widget:", "def change_root_node( widget): crumb1 = Crumb(\"Yet Another Root\", menu) crumb2 = Crumb(\"Yet Another", "self.entry = gtk.Entry() self.entry.connect(\"activate\", self.enter_cb) self.entry.set_text(\"\".join(label)) self.entry.show() self.entry.select_region(0, len(self.entry.get_text())) self.eventbox.hide() self.hbox.pack_start(self.entry, True, True)", "@param crumb: Crumb instance or a list of crumb instances @param arrow_right: Dynamic", "@undocumented: enter_button @undocumented: motion_notify_cb @undocumented: create_menu @undocumented: hide_cb @undocumented: button_press_cb @undocumented: button_clicked @undocumented:", "Draw text. draw_text(cr, self.label, x, y , self.button_width, h, self.font_size, text_color, alignment =", "function to \"clicked\" signal. @param widget: Crumb instance. @param index: The index value", "menu_items @return: Menu instance ''' if menu_items != None and isinstance(menu_items, list): return", "expose_bread_menu_frame ''' def __init__(self, items, max_height=None, max_width=None, ): ''' Initialize BreadMenu class. @param", "w, h) cr.fill() def shape_bread_menu_frame(self, widget, event): pass def expose_bread_menu_frame(self, widget, event): cr", "and right button. self.show_left_right_box = show_left_right_box left_box = gtk.HBox(spacing = 0) right_box =", "bread.set_size(200, -1) bread.connect(\"entry-changed\", change_entry) ##################################### vbox.pack_start(bread, False, False, 0) # Test Item add_path_button", "add(self, crumbs): ''' Add crumbs. Can accept Crumb instance or a list of", "self.scroll_win = ScrolledWindow() self.pack_start(left_box, False, True) self.pack_start(self.hbox, True, True) # Add Bread Items", "signal. @param widget: gtk.Entry widget instance. ''' label = widget.get_text() widget.destroy() self.eventbox.show() self.emit(\"entry-changed\",", "y, w, h): cr.set_source_rgb(1, 1, 1) cr.rectangle(x, y, w, h) cr.fill() def shape_bread_menu_frame(self,", "- 3, h -3) elif menu_color: draw_rectangle(cr, x + self.button_width + 1, y", "self.in_menu = True self.connect(\"expose_event\", self.expose_cb) self.connect(\"button_press_event\", self.button_press_cb) self.connect(\"clicked\", self.button_clicked) self.connect(\"motion-notify-event\", self.motion_notify_cb) self.connect(\"enter-notify-event\", self.enter_button)", "widget, event): ''' Internal callback function to \"leave-notify-event\" signal. @param widget: Gtk.EventBox. @param", "self.set_skip_taskbar_hint(True) self.treeview.draw_mask = self.draw_treeview_mask self.expose_window_frame = self.expose_bread_menu_frame def draw_treeview_mask(self, cr, x, y, w,", "None def hide_cb(self, widget): ''' Internal callback function to Menu's \"\"hide\" signal. @param", "+ h, x + w, y + h) # bottom draw_line(cr, x ,", "pixbuf for right arrow, default is \\\"treeview/arrow_right.png\\\" from ui theme. @param arrow_down: Dynamic", "operate crumbs ''' objects = self.hbox.get_children() for i in objects[index: -1]: i.destroy() self.item_list[index:]", "a list, default is None @param font_size: Font size, default is DEFAULT_FONT_SIZE. @param", "Internal callback function to \"clicked\" signal. @param widget: Left button. ''' upper, page_size,", "@return: Menu instance ''' if menu_items != None and isinstance(menu_items, list): return BreadMenu(menu_items)", "DEFAULT_FONT_SIZE, padding_x = 15, ): ''' Initialize Crumb class. @param label: Crumb item", "by default is None. ''' Poplist.__init__(self, items=items, max_height=max_height, max_width=max_width, shadow_visible=False, shape_frame_function=self.shape_bread_menu_frame, expose_frame_function=self.expose_bread_menu_frame, align_size=2,", "leave_notify(self, widget, event): ''' Internal callback function to \"leave-notify-event\" signal. @param widget: Gtk.EventBox.", "elif widget.state == gtk.STATE_INSENSITIVE: arrow_pixbuf = arrow_right text_color = ui_theme.get_color(\"disable_text\").get_color() disable_bg = ui_theme.get_color(\"disable_background\").get_color()", "gtk.Entry() self.entry.connect(\"activate\", self.enter_cb) self.entry.set_text(\"\".join(label)) self.entry.show() self.entry.select_region(0, len(self.entry.get_text())) self.eventbox.hide() self.hbox.pack_start(self.entry, True, True) def enter_cb(self,", "active_mask = alpha_color_hex_to_cairo((\"#000000\", 0.1)) if self.menu_show: self.set_state(gtk.STATE_PRELIGHT) if widget.state == gtk.STATE_NORMAL: text_color =", "Internal callback function to Menu's \"\"hide\" signal. @param widget: Menu ''' if self.menu_press:", "not value == 0: self.right_btn.show() for i in xrange(len(self.item_list)): temp += self.item_list[i] if", "= 0 if upper > (page_size + value): self.left_btn.show() for i in xrange(len(self.item_list)+1):", "motion_notify_cb @undocumented: create_menu @undocumented: hide_cb @undocumented: button_press_cb @undocumented: button_clicked @undocumented: expose_cb ''' __gsignals__=", "signal. @param widget: Menu ''' if self.menu_press: self.set_state(gtk.STATE_PRELIGHT) else: self.menu_show = False self.set_state(gtk.STATE_NORMAL)", "for bread. @undocumented: draw_treeview_mask @undocumented: shape_bread_menu_frame @undocumented: expose_bread_menu_frame ''' def __init__(self, items, max_height=None,", "rect.y + 1, rect.width - 2, rect.height - 2) cr.fill() gobject.type_register(BreadMenu) class Crumb(gtk.Button):", "button. ''' upper, page_size, value = self.adj.upper, self.adj.page_size, self.adj.value shift_value = 0 temp", "@undocumented: move_right @undocumented: move_left ''' __gsignals__= { \"entry-changed\" : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_STRING,)), \"item_clicked\"", "+ value): shift_value = temp - (page_size + value) #play animation ani =", "} def __init__(self, crumb, arrow_right=ui_theme.get_pixbuf(\"treeview/arrow_right.png\"), arrow_down=ui_theme.get_pixbuf(\"treeview/arrow_down.png\"), show_others=False, show_entry=False, show_left_right_box=True ): ''' Initialize Bread", "left button and right button. self.show_left_right_box = show_left_right_box left_box = gtk.HBox(spacing = 0)", "cr.rectangle(rect.x + 1, rect.y + 1, rect.width - 2, rect.height - 2) cr.fill()", "when click space area in Bread. ''' # Init. super(Bread, self).__init__(spacing = 0)", "value) break #play animation ani = Animation(self.adj, lambda widget, v1: widget.set_value(v1),200,[value, value-shift_value]) ani.start()", "@param padding_x: Horizontal padding, default is 15 pixels. ''' super(Crumb, self).__init__() self.arrow_right =", "= alpha_color_hex_to_cairo((\"#000000\", 0.1)) if self.menu_show: self.set_state(gtk.STATE_PRELIGHT) if widget.state == gtk.STATE_NORMAL: text_color = ui_theme.get_color(\"title_text\").get_color()", "# it under the terms of the GNU General Public License as published", "Supported inputs are: [\"a label\", Menu] [(\"a label\",[(None, \"menu label\", None)])] Crumb instance", "h): cr.set_source_rgb(1, 1, 1) cr.rectangle(x, y, w, h) cr.fill() def shape_bread_menu_frame(self, widget, event):", "[] self.emit(\"item_clicked\", index, label) def move_right(self, widget): ''' Internal callback function to \"clicked\"", "def expose_cb(self, widget, event): ''' Internal expose callback function. @param widget: Crumb instance.", "= arrow_right self.arrow_down = arrow_down self.item_list = list() self.show_others = show_others self.show_entry =", "# right cr.set_source_rgba(*outside_border) if button_color: draw_rectangle(cr, x + 1 , y + 1", "v1: widget.set_value(v1),200,[value, value-shift_value]) ani.start() if (self.adj.value - shift_value) == 0: self.left_btn.hide() def set_size(self,", "ARROW_BUTTON_WIDTH # for left & right buttons self.in_event_box = False # Init left", "temp = 0 if not value == 0: self.right_btn.show() for i in xrange(len(self.item_list)):", "2 * self.padding_x, self.btn_min), self.height) self.button_width = self.get_size_request()[0] else: self.set_size_request( max(self.label_w + 2", "temp > (page_size + value): shift_value = temp - (page_size + value) #play", "menu_width - arrow_button_width, wy + offset_y + menu_height, ), (0, 0)) def set_label(self,", "inner_border arrow_pixbuf = arrow_down elif widget.state == gtk.STATE_INSENSITIVE: arrow_pixbuf = arrow_right text_color =", "else: return [Crumb(c[0], c[1]) for c in crumb] def enter_notify(self, widget, event): '''", "(None, \"测试2\", None), ], shadow_visible = False, ) win = gtk.Window(gtk.WINDOW_TOPLEVEL) win.connect(\"destroy\", lambda", "animation ani = Animation(self.adj, lambda widget, v1: widget.set_value(v1),200,[value, value-shift_value]) ani.start() if (self.adj.value -", "Items self.adj = self.scroll_win.get_hadjustment() self.add(self.crumb) def create_crumb(self, crumb): ''' Internal function to create", "and isinstance(menu_items, list): return BreadMenu(menu_items) else: return None def hide_cb(self, widget): ''' Internal", "Deepin, Inc. # 2011 ~ 2012 Zeng Zhi # # Author: <NAME> <<EMAIL>>", "of type gtk.gdk.Event. ''' self.in_event_box = True def leave_notify(self, widget, event): ''' Internal", "arrow_right self.arrow_down = arrow_down self.item_list = list() self.show_others = show_others self.show_entry = show_entry", "arrow_right text_color = ui_theme.get_color(\"disable_text\").get_color() disable_bg = ui_theme.get_color(\"disable_background\").get_color() button_color = [(0, (disable_bg, 1.0)), (1,", "to \"clicked\" signal. @param widget: Crumb ''' if self.in_button: self.emit(\"item_clicked\", self.index_id, self.label) else:", "cr.fill() gobject.type_register(BreadMenu) class Crumb(gtk.Button): ''' Crumb class . @undocumented: enter_button @undocumented: motion_notify_cb @undocumented:", "ui theme. @param show_others: If True, crumbs will not be destroyed, otherwise all", "is better to consider whether or not shown left && right box #", "offset_y + menu_height, ), (0, 0)) def set_label(self, label, font_size = DEFAULT_FONT_SIZE): '''", "self.arrow_right arrow_down = self.arrow_down arrow_width, arrow_height = arrow_right.get_pixbuf().get_width(), arrow_right.get_pixbuf().get_height() arrow_pixbuf = arrow_right outside_border", "function to \"expose-event\" signal. @param widget: gtk.EventBox @param event: event of type gtk.gdk.event", "\"press-return\" signal. @param widget: gtk.Entry widget instance. ''' label = widget.get_text() widget.destroy() self.eventbox.show()", "self.scroll_win.get_hadjustment() self.add(self.crumb) def create_crumb(self, crumb): ''' Internal function to create a Crumb list", "True self.connect(\"expose_event\", self.expose_cb) self.connect(\"button_press_event\", self.button_press_cb) self.connect(\"clicked\", self.button_clicked) self.connect(\"motion-notify-event\", self.motion_notify_cb) self.connect(\"enter-notify-event\", self.enter_button) self.add_events(gtk.gdk.POINTER_MOTION_MASK) def", "Font size, default is DEFAULT_FONT_SIZE. ''' self.label = label (self.label_w, self.label_h) = get_content_size(self.label,", "widget, event): in_menu = event.x > self.button_width self.in_menu =in_menu def motion_notify_cb(self, widget, event):", "warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #", "alignment = pango.ALIGN_CENTER) return True gobject.type_register(Crumb) if __name__ == \"__main__\": import gtk def", "arrow_height) / 2) # Draw text. draw_text(cr, self.label, x, y , self.button_width, h,", "self.item_list[(index + 1):] = [] self.emit(\"item_clicked\", index, label) def move_right(self, widget): ''' Internal", "4, h -4) cr.fill() elif menu_color: cr.rectangle( x + self.button_width + 1, y", "= path.split(\"/\")[1:] bread.change_node(0, [Crumb(i , menu) for i in path_list]) menu = Menu([", "= 0) right_box = gtk.HBox(spacing = 0) # FIXME: left && right box", "max(self.label_w + 2 * self.padding_x, self.btn_min), self.height) self.button_width = self.get_size_request()[0] else: self.set_size_request( max(self.label_w", "ScrolledWindow from button import Button from theme import ui_theme from menu import Menu", "def change_entry(widget, path): # Application can check if path is valid or not", "\"leave-notify-event\" signal. @param widget: Gtk.EventBox. @param event: The pointer event of type gtk.gdk.Event.", "draw_treeview_mask(self, cr, x, y, w, h): cr.set_source_rgb(1, 1, 1) cr.rectangle(x, y, w, h)", "= arrow_right text_color = ui_theme.get_color(\"disable_text\").get_color() disable_bg = ui_theme.get_color(\"disable_background\").get_color() button_color = [(0, (disable_bg, 1.0)),", "= Button(\"&gt;\") self.left_btn.set_no_show_all(True) self.right_btn.set_no_show_all(True) self.right_btn.connect(\"clicked\", self.move_right) self.left_btn.connect(\"clicked\", self.move_left) self.left_btn.set_size_request(self.button_width, -1) self.right_btn.set_size_request(self.button_width, -1) left_box.pack_start(self.left_btn,", "instance or a list, default is None @param font_size: Font size, default is", "self.menu_show: arrow_pixbuf = arrow_down else: arrow_pixbuf = arrow_right if self.in_menu: button_color = None", "self.menu_show = not self.menu_show if self.menu_show: (wx, wy) = self.get_toplevel().window.get_root_origin() (offset_x, offset_y) =", "be destroyed. @param show_entry: If True, an entry will pop up when click", "button_press_cb @undocumented: button_clicked @undocumented: expose_cb ''' __gsignals__= { \"item_clicked\" : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_INT,gobject.TYPE_STRING,))}", "of gtk.gdk.event ''' in_menu = event.x > self.button_width if self.in_menu !=in_menu: self.in_menu =", "callback function to Crumb \"motion-notify-event\" signal. @param widget: Crumb @param event: an event", "test breadcrumb widget bread = Bread([(\"Root\", menu), (\"Level1\", menu)], show_others = False, show_entry", "= False self.menu_show = not self.menu_show if self.menu_show: (wx, wy) = self.get_toplevel().window.get_root_origin() (offset_x,", "self.button_width - 3, h -3) elif menu_color: draw_rectangle(cr, x + self.button_width + 1,", "button and right button. self.show_left_right_box = show_left_right_box left_box = gtk.HBox(spacing = 0) right_box", "Horizontal padding, default is 15 pixels. ''' super(Crumb, self).__init__() self.arrow_right = None self.arrow_down", "create menu. @param menu_items: menu_items @return: Menu instance ''' if menu_items != None", "@undocumented: expose_cb ''' __gsignals__= { \"item_clicked\" : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_INT,gobject.TYPE_STRING,))} def __init__(self, label,", "= get_content_size(self.label, font_size) if self.menu == None: self.set_size_request( max(self.label_w + 2 * self.padding_x,", "type gtk.gdk.Event. ''' self.in_event_box = False def event_box_press(self, widget, event): ''' Internal callback", "menu_color: draw_rectangle(cr, x + self.button_width + 1, y + 2, self.menu_min - 2,", "self.label = label (self.label_w, self.label_h) = get_content_size(self.label, font_size) if self.menu == None: self.set_size_request(", "to \"clicked\" signal. @param widget: Right button. ''' upper, page_size, value = self.adj.upper,", "python #-*- coding:utf-8 -*- # Copyright (C) 2011 ~ 2012 Deepin, Inc. #", "Crumb4]. In this way, application can operate crumbs ''' objects = self.hbox.get_children() for", "draw_pixbuf(cr, arrow_pixbuf.get_pixbuf(), x + self.button_width + (self.menu_min - arrow_width) / 2, y +", "not be destroyed, otherwise all crumbs on the right side will be destroyed.", "+ (h - arrow_height) / 2) # Draw text. draw_text(cr, self.label, x, y", "+= self.item_list[i] if temp > (page_size + value): shift_value = temp - (page_size", "widget: gtk.EventBox @param event: event of type gtk.gdk.event ''' cr = widget.window.cairo_create() rect", "temp += self.item_list[i] if temp >= value: shift_value = self.item_list[i] - (temp -", "@param index: The index value of clicked crumb. @param label: Label of the", "or a list of crumb instances @param arrow_right: Dynamic pixbuf for right arrow,", "@param menu_items: menu_items @return: Menu instance ''' if menu_items != None and isinstance(menu_items,", "self.adj.upper, self.adj.page_size, self.adj.value shift_value = 0 temp = 0 if not value ==", ", y + h -1) # right cr.set_source_rgba(*outside_border) if button_color: draw_rectangle(cr, x +", "gobject.type_register(Bread) class BreadMenu(Poplist): ''' Popup menu for bread. @undocumented: draw_treeview_mask @undocumented: shape_bread_menu_frame @undocumented:", "== gtk.STATE_NORMAL: # Draw button border. def draw_rectangle(cr, x, y , w, h):", "for left & right buttons self.in_event_box = False # Init left button and", "after given index. @param index: To specified remove after given index. ''' for", "- 4, h -4) cr.fill() elif menu_color: cr.rectangle( x + self.button_width + 1,", "# Init left button and right button. self.show_left_right_box = show_left_right_box left_box = gtk.HBox(spacing", "instance, there exist a list contain [Crumb1, Crumb2], by using change_node(1, [Crumb3, Crumb4]),", "__init__(self, crumb, arrow_right=ui_theme.get_pixbuf(\"treeview/arrow_right.png\"), arrow_down=ui_theme.get_pixbuf(\"treeview/arrow_down.png\"), show_others=False, show_entry=False, show_left_right_box=True ): ''' Initialize Bread class. @param", "Init left button and right button. self.show_left_right_box = show_left_right_box left_box = gtk.HBox(spacing =", "Button(\"&gt;\") self.left_btn.set_no_show_all(True) self.right_btn.set_no_show_all(True) self.right_btn.connect(\"clicked\", self.move_right) self.left_btn.connect(\"clicked\", self.move_left) self.left_btn.set_size_request(self.button_width, -1) self.right_btn.set_size_request(self.button_width, -1) left_box.pack_start(self.left_btn, False,", "= False self.index_id = 0 self.set_label(label) self.in_button = True self.in_menu = True self.connect(\"expose_event\",", "self.treeview.draw_mask = self.draw_treeview_mask self.expose_window_frame = self.expose_bread_menu_frame def draw_treeview_mask(self, cr, x, y, w, h):", "Crumb @param event: An event of gtk.gdk.Event ''' if self.menu == None: self.in_button", "1, self.menu_min, h - 1) # Draw innner border. cr.set_source_rgba(*inner_border) if button_color: draw_rectangle(cr,", "Crumb menu, could be a Menu instance or a list, default is None", "value = self.adj.upper, self.adj.page_size, self.adj.value shift_value = 0 temp = 0 if not", "<<EMAIL>> # # This program is free software: you can redistribute it and/or", "''' Internal callback function to \"press-return\" signal. @param widget: gtk.Entry widget instance. '''", "crumbs ''' objects = self.hbox.get_children() for i in objects[index: -1]: i.destroy() self.item_list[index:] =", "self.item_list[i] - (temp - value) break #play animation ani = Animation(self.adj, lambda widget,", "> self.button_width self.in_menu =in_menu def motion_notify_cb(self, widget, event): ''' Internal callback function to", "button_color = [(0, (disable_bg, 1.0)), (1, (disable_bg, 1.0))] menu_color = [(0, (disable_bg, 1.0)),", "for more details. # # You should have received a copy of the", "h) cr.fill() def shape_bread_menu_frame(self, widget, event): pass def expose_bread_menu_frame(self, widget, event): cr =", "not shown left && right box # at runtime if self.show_left_right_box: left_box.set_size_request(self.button_width, -1)", "self.menu.connect(\"hide\", self.hide_cb) self.menu_press = False self.menu_show = False self.index_id = 0 self.set_label(label) self.in_button", "@param crumbs: Supported inputs are: [\"a label\", Menu] [(\"a label\",[(None, \"menu label\", None)])]", "= Animation(self.adj, lambda widget, v1: widget.set_value(v1),200,[value, value-shift_value]) ani.start() if (self.adj.value - shift_value) ==", "to \"expose-event\" signal. @param widget: gtk.EventBox @param event: event of type gtk.gdk.event '''", "will pop up when click space area in Bread. ''' # Init. super(Bread,", "''' self.in_event_box = False def event_box_press(self, widget, event): ''' Internal callback function to", "left_box.pack_start(self.left_btn, False, False) right_box.pack_start(self.right_btn, False, False) # Init Hbox self.hbox = gtk.HBox(False, 0)", "signal. @param widget: Gtk.EventBox. @param event: The pointer event of type gtk.gdk.Event. '''", "Inc. # 2011 ~ 2012 Zeng Zhi # # Author: <NAME> <<EMAIL>> #", "# Must set_size bread.set_size(200, -1) bread.connect(\"entry-changed\", change_entry) ##################################### vbox.pack_start(bread, False, False, 0) #", "enter_notify @undocumented: leave_notify @undocumented: event_box_press @undocumented: enter_cb @undocumented: redraw_bg @undocumented: click_cb @undocumented: move_right", "Crumb instance or a list of crumb instances @param arrow_right: Dynamic pixbuf for", "list For instance, there exist a list contain [Crumb1, Crumb2], by using change_node(1,", "menu, by default is None. ''' Poplist.__init__(self, items=items, max_height=max_height, max_width=max_width, shadow_visible=False, shape_frame_function=self.shape_bread_menu_frame, expose_frame_function=self.expose_bread_menu_frame,", "class Crumb(gtk.Button): ''' Crumb class . @undocumented: enter_button @undocumented: motion_notify_cb @undocumented: create_menu @undocumented:", "1, y + 2, self.menu_min - 2, h -3) if widget.state == gtk.STATE_ACTIVE:", "of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU", "3, h -4) cr.fill() if self.menu != None: # Draw an arrow. draw_pixbuf(cr,", "menu_height, ), (0, 0)) def set_label(self, label, font_size = DEFAULT_FONT_SIZE): ''' Set label", "@undocumented: event_box_press @undocumented: enter_cb @undocumented: redraw_bg @undocumented: click_cb @undocumented: move_right @undocumented: move_left '''", "0 self.set_label(label) self.in_button = True self.in_menu = True self.connect(\"expose_event\", self.expose_cb) self.connect(\"button_press_event\", self.button_press_cb) self.connect(\"clicked\",", "True) self.scroll_win = ScrolledWindow() self.pack_start(left_box, False, True) self.pack_start(self.hbox, True, True) # Add Bread", "arrow, default is \\\"treeview/arrow_down.png\\\" from ui theme. @param show_others: If True, crumbs will", "self.adj.value shift_value = 0 temp = 0 if not value == 0: self.right_btn.show()", "animation ani = Animation(self.adj, lambda widget, v1: widget.set_value(v1),200,[value, value+shift_value]) ani.start() break if not", "rect = widget.allocation # Draw backgroud. with cairo_state(cr): cr.set_source_rgba(*alpha_color_hex_to_cairo((\"#def5ff\", 1))) cr.rectangle(rect.x, rect.y, rect.width,", "specified index @param index: Start index @param crumbs: Crumb instance or Crumb list", "value == 0: self.right_btn.show() for i in xrange(len(self.item_list)): temp += self.item_list[i] if temp", "label = widget.get_text() widget.destroy() self.eventbox.show() self.emit(\"entry-changed\", label) def redraw_bg(self, widget, event): ''' Internal", "widget, event): ''' Internal callback function to Crumb \"motion-notify-event\" signal. @param widget: Crumb", "= self.expose_bread_menu_frame def draw_treeview_mask(self, cr, x, y, w, h): cr.set_source_rgb(1, 1, 1) cr.rectangle(x,", "~ 2012 Deepin, Inc. # 2011 ~ 2012 Zeng Zhi # # Author:", "Font size, default is DEFAULT_FONT_SIZE. @param padding_x: Horizontal padding, default is 15 pixels.", "self.adj.upper, self.adj.page_size, self.adj.value shift_value = 0 temp = 0 if upper > (page_size", "width self.btn_min = 50 # button width self.height = 24 # crumb height", "will not be destroyed, otherwise all crumbs on the right side will be", "self.adj.page_size, self.adj.value shift_value = 0 temp = 0 if upper > (page_size +", "published by # the Free Software Foundation, either version 3 of the License,", "x + self.button_width + 1, y + 2, self.menu_min - 3, h -4)", "DEFAULT_FONT_SIZE. ''' self.label = label (self.label_w, self.label_h) = get_content_size(self.label, font_size) if self.menu ==", "widget.allocation with cairo_disable_antialias(cr): outside_border = alpha_color_hex_to_cairo((\"#666666\", 0.5)) cr.set_line_width(1) cr.set_source_rgba(*outside_border) cr.rectangle(rect.x + 1, rect.y", "+ 2, self.button_width - 4, h -4) cr.fill() elif menu_color: cr.rectangle( x +", "widget.state == gtk.STATE_NORMAL: # Draw button border. def draw_rectangle(cr, x, y , w,", "self.hbox.pack_start(self.entry, True, True) def enter_cb(self, widget): ''' Internal callback function to \"press-return\" signal.", "outside_border = alpha_color_hex_to_cairo((\"#666666\", 0.5)) cr.set_line_width(1) cr.set_source_rgba(*outside_border) cr.rectangle(rect.x + 1, rect.y + 1, rect.width", "valid or not path_list = path.split(\"/\")[1:] bread.change_node(0, [Crumb(i , menu) for i in", "-1) left_box.pack_start(self.left_btn, False, False) right_box.pack_start(self.right_btn, False, False) # Init Hbox self.hbox = gtk.HBox(False,", "function to \"button-press-event\" signal. @param widget: Crumb @param event: An event of gtk.gdk.Event", "y, w, h) cr.fill() def shape_bread_menu_frame(self, widget, event): pass def expose_bread_menu_frame(self, widget, event):", "x + self.button_width, y + 1, self.menu_min, h - 1) # Draw innner", "len(self.item_list) crumb.connect(\"item_clicked\", self.click_cb) self.hbox.pack_start(crumb, False, False) self.item_list.append(crumb.get_size_request()[0]) page_size = self.adj.page_size # Show right", "self.show_entry: self.eventbox.connect(\"enter-notify-event\", self.enter_notify) self.eventbox.connect(\"leave-notify-event\", self.leave_notify) self.eventbox.connect(\"button-press-event\", self.event_box_press) self.hbox.pack_end(self.eventbox, True, True) self.scroll_win = ScrolledWindow()", "ui_theme.get_color(\"title_text\").get_color() if self.menu_show: arrow_pixbuf = arrow_down else: arrow_pixbuf = arrow_right if self.in_menu: button_color", "button_color: draw_rectangle(cr, x + 2, y + 2, self.button_width - 3, h -3)", "#play animation ani = Animation(self.adj, lambda widget, v1: widget.set_value(v1),200,[value, value+shift_value]) ani.start() break if", "of the GNU General Public License as published by # the Free Software", "-1) bread.connect(\"entry-changed\", change_entry) ##################################### vbox.pack_start(bread, False, False, 0) # Test Item add_path_button =", "this part to Bread class since app_theme is golobalized. arrow_right = self.arrow_right arrow_down", "= ui_theme.get_color(\"title_text\").get_color() if self.in_button: button_color = inner_border menu_color = None arrow_pixbuf = arrow_right", "(gobject.TYPE_STRING,)), \"item_clicked\" : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_INT,gobject.TYPE_STRING,)) } def __init__(self, crumb, arrow_right=ui_theme.get_pixbuf(\"treeview/arrow_right.png\"), arrow_down=ui_theme.get_pixbuf(\"treeview/arrow_down.png\"), show_others=False,", "x + self.button_width + (self.menu_min - arrow_width) / 2, y + (h -", "not self.show_others: for i in self.hbox.get_children()[(index + 1): -1]: i.destroy() self.item_list[(index + 1):]", ", y , x , y + h) # left draw_line(cr, x +", "inputs. @param crumb: Support inputs are: [\"a label\", Menu] [(\"a label\",[(None, \"menu label\",", "self.eventbox = gtk.EventBox() self.eventbox.set_visible_window(False) if self.show_entry: self.eventbox.connect(\"enter-notify-event\", self.enter_notify) self.eventbox.connect(\"leave-notify-event\", self.leave_notify) self.eventbox.connect(\"button-press-event\", self.event_box_press) self.hbox.pack_end(self.eventbox,", "- (temp - value) break #play animation ani = Animation(self.adj, lambda widget, v1:", "cr.set_line_width(1) cr.set_source_rgba(*outside_border) cr.rectangle(rect.x + 1, rect.y + 1, rect.width - 2, rect.height -", "specified remove after given index. ''' for i in self.hbox.get_children()[(index + 1): -1]:", "break if not upper > (page_size + self.adj.value + shift_value): self.right_btn.hide() def move_left(self,", "c[1]) for c in crumb] def enter_notify(self, widget, event): ''' Internal callback function", "text_color = ui_theme.get_color(\"title_text\").get_color() if self.in_button: button_color = inner_border menu_color = None arrow_pixbuf =", "move_left ''' __gsignals__= { \"entry-changed\" : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_STRING,)), \"item_clicked\" : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE,", "If not, see <http://www.gnu.org/licenses/>. from animation import Animation from scrolled_window import ScrolledWindow from", "Author: <NAME> <<EMAIL>> # Maintainer: <NAME> <<EMAIL>> # # This program is free", "None arrow_pixbuf = arrow_right else: button_color = None menu_color = inner_border arrow_pixbuf =", "if (self.adj.value - shift_value) == 0: self.left_btn.hide() def set_size(self, width, height): ''' Set", "set_size(self, width, height): ''' Set Bread size. @param width: Width of Bread. @param", "0: self.right_btn.show() for i in xrange(len(self.item_list)): temp += self.item_list[i] if temp >= value:", "label): ''' Internal callback function to \"clicked\" signal. @param widget: Crumb instance. @param", "Internal callback function to \"clicked\" signal. @param widget: Right button. ''' upper, page_size,", "widget): crumb1 = Crumb(\"Yet Another Root\", menu) crumb2 = Crumb(\"Yet Another Child\", menu)", "Crumb): return [crumb,] elif isinstance(crumb[0], str): return [Crumb(crumb[0], crumb[1]),] elif isinstance(crumb[0], Crumb): return", "Dynamic pixbuf for down arrow, default is \\\"treeview/arrow_down.png\\\" from ui theme. @param show_others:", "elif isinstance(crumb[0], Crumb): return crumb else: return [Crumb(c[0], c[1]) for c in crumb]", "@param crumbs: Crumb instance or Crumb list For instance, there exist a list", "= gtk.VBox() ###################################### # test breadcrumb widget bread = Bread([(\"Root\", menu), (\"Level1\", menu)],", "types of inputs. @param crumb: Support inputs are: [\"a label\", Menu] [(\"a label\",[(None,", "in self.hbox.get_children()[(index + 1): -1]: i.destroy() self.item_list[(index + 1):] = [] self.emit(\"item_clicked\", index,", "This program is distributed in the hope that it will be useful, #", "cr.fill() elif menu_color: cr.rectangle( x + self.button_width + 1, y + 2, self.menu_min", "label\",[(None, \"menu label\", None)])] Crumb instance [Crumb, Crumb] ''' if isinstance(crumb, Crumb): return", "cr.rectangle(x + 2, y + 2, self.button_width - 4, h -4) cr.fill() elif", "event_box_press(self, widget, event): ''' Internal callback function to \"button-press-event\" signal. @param widget: gtk.eventbox.", ", y , x + w , y + h -1) # right", "Animation(self.adj, lambda widget, v1: widget.set_value(v1),200,[value, value+shift_value]) ani.start() break if not upper > (page_size", "received a copy of the GNU General Public License # along with this", "self.button_clicked) self.connect(\"motion-notify-event\", self.motion_notify_cb) self.connect(\"enter-notify-event\", self.enter_button) self.add_events(gtk.gdk.POINTER_MOTION_MASK) def enter_button(self, widget, event): in_menu = event.x", "Internal expose callback function. @param widget: Crumb instance. @param event: An event of", "self.crumb = self.create_crumb(crumb) self.button_width = ARROW_BUTTON_WIDTH # for left & right buttons self.in_event_box", "event: event of type gtk.gdk.event ''' cr = widget.window.cairo_create() rect = widget.allocation #", "self.show_left_right_box = show_left_right_box left_box = gtk.HBox(spacing = 0) right_box = gtk.HBox(spacing = 0)", ", y + 1 , self.button_width -1 , h -1) elif menu_color: draw_rectangle(cr,", "def __init__(self, items, max_height=None, max_width=None, ): ''' Initialize BreadMenu class. @param items: Item", "copy of the GNU General Public License # along with this program. If", "y + (h - arrow_height) / 2) # Draw text. draw_text(cr, self.label, x,", "+ self.button_width + 1, y + 2, self.menu_min - 2, h -3) if", "-1 , y , x + w, y) # top draw_line(cr, x ,", "set_label(self, label, font_size = DEFAULT_FONT_SIZE): ''' Set label for left button. @param label:", "arrow_right.get_pixbuf().get_height() arrow_pixbuf = arrow_right outside_border = alpha_color_hex_to_cairo((\"#000000\", 0.15)) inner_border = alpha_color_hex_to_cairo((\"#ffffff\", 0.5)) active_mask", "self.in_event_box = False def event_box_press(self, widget, event): ''' Internal callback function to \"button-press-event\"", "else: button_color = inner_border menu_color = None elif widget.state == gtk.STATE_ACTIVE: text_color =", "size, default is DEFAULT_FONT_SIZE. ''' self.label = label (self.label_w, self.label_h) = get_content_size(self.label, font_size)", "crumb else: return [Crumb(c[0], c[1]) for c in crumb] def enter_notify(self, widget, event):", "False self.index_id = 0 self.set_label(label) self.in_button = True self.in_menu = True self.connect(\"expose_event\", self.expose_cb)", "y + h) # bottom draw_line(cr, x , y , x , y", "be a Menu instance or a list, default is None @param font_size: Font", "+ w, y + h) # bottom draw_line(cr, x , y , x", "exist a list contain [Crumb1, Crumb2], by using change_node(1, [Crumb3, Crumb4]), previous list", "== gtk.STATE_NORMAL: text_color = ui_theme.get_color(\"title_text\").get_color() button_color = None menu_color = None arrow_pixbuf =", "= self.arrow_down crumb.index_id = len(self.item_list) crumb.connect(\"item_clicked\", self.click_cb) self.hbox.pack_start(crumb, False, False) self.item_list.append(crumb.get_size_request()[0]) page_size =", "''' Initialize Crumb class. @param label: Crumb item label @param menu_items: Crumb menu,", "i.destroy() self.item_list[(index + 1):] = [] def click_cb(self, widget, index, label): ''' Internal", "Menu ''' if self.menu_press: self.set_state(gtk.STATE_PRELIGHT) else: self.menu_show = False self.set_state(gtk.STATE_NORMAL) def button_press_cb(self, widget,", "@param widget: Left button. ''' upper, page_size, value = self.adj.upper, self.adj.page_size, self.adj.value shift_value", "[Crumb(crumb[0], crumb[1]),] elif isinstance(crumb[0], Crumb): return crumb else: return [Crumb(c[0], c[1]) for c", "None: # Draw an arrow. draw_pixbuf(cr, arrow_pixbuf.get_pixbuf(), x + self.button_width + (self.menu_min -", "is distributed in the hope that it will be useful, # but WITHOUT", "button_color = inner_border menu_color = None arrow_pixbuf = arrow_right else: button_color = None", "not upper > (page_size + self.adj.value + shift_value): self.right_btn.hide() def move_left(self, widget): '''", "function to \"button-press-event\" signal. @param widget: gtk.eventbox. @param event: event of type gtk.gdk.event.", "MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public", "h -4) cr.fill() if self.menu != None: # Draw an arrow. draw_pixbuf(cr, arrow_pixbuf.get_pixbuf(),", "not self.menu_show if self.menu_show: (wx, wy) = self.get_toplevel().window.get_root_origin() (offset_x, offset_y) = widget.translate_coordinates(self.get_toplevel(), 0,", "in the hope that it will be useful, # but WITHOUT ANY WARRANTY;", ": (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_INT,gobject.TYPE_STRING,)) } def __init__(self, crumb, arrow_right=ui_theme.get_pixbuf(\"treeview/arrow_right.png\"), arrow_down=ui_theme.get_pixbuf(\"treeview/arrow_down.png\"), show_others=False, show_entry=False, show_left_right_box=True", "size. @param width: Width of Bread. @param height: Height of Bread. ''' self.scroll_win.set_size_request(width", "FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for", "from draw import (draw_line, draw_text, draw_pixbuf) from utils import (get_content_size, cairo_disable_antialias, alpha_color_hex_to_cairo, cairo_state)", "!=in_menu: self.in_menu = in_menu self.queue_draw() def create_menu(self, menu_items): ''' Internal function to create", "''' Internal callback function to \"clicked\" signal. @param widget: Right button. ''' upper,", "a container which can hold crumbs widget. @undocumented: create_crumb @undocumented: enter_notify @undocumented: leave_notify", "= gtk.Window(gtk.WINDOW_TOPLEVEL) win.connect(\"destroy\", lambda w: gtk.main_quit()) win.set_default_size(600,300) vbox = gtk.VBox() ###################################### # test", "= widget.allocation # Draw backgroud. with cairo_state(cr): cr.set_source_rgba(*alpha_color_hex_to_cairo((\"#def5ff\", 1))) cr.rectangle(rect.x, rect.y, rect.width, rect.height)", "Bread class since app_theme is golobalized. arrow_right = self.arrow_right arrow_down = self.arrow_down arrow_width,", "self.menu_press = False self.menu_show = not self.menu_show if self.menu_show: (wx, wy) = self.get_toplevel().window.get_root_origin()", "h) # left draw_line(cr, x + w , y , x + w", "if self.show_left_right_box: left_box.set_size_request(self.button_width, -1) right_box.set_size_request(self.button_width, -1) self.left_btn = Button(\"&lt;\") self.right_btn = Button(\"&gt;\") self.left_btn.set_no_show_all(True)", "self.index_id, self.label) else: self.menu_press = False self.menu_show = not self.menu_show if self.menu_show: (wx,", "None: self.set_size_request( max(self.label_w + 2 * self.padding_x, self.btn_min), self.height) self.button_width = self.get_size_request()[0] else:", "button_color = None menu_color = None arrow_pixbuf = arrow_right elif widget.state == gtk.STATE_PRELIGHT:", ", y + h, x + w, y + h) # bottom draw_line(cr,", "if not self.show_others: for i in self.hbox.get_children()[(index + 1): -1]: i.destroy() self.item_list[(index +", "= None elif widget.state == gtk.STATE_ACTIVE: text_color = ui_theme.get_color(\"title_text\").get_color() if self.in_button: button_color =", "crumbs will not be destroyed, otherwise all crumbs on the right side will", "w, h): cr.set_source_rgb(1, 1, 1) cr.rectangle(x, y, w, h) cr.fill() def shape_bread_menu_frame(self, widget,", "menu_items: Crumb menu, could be a Menu instance or a list, default is", "x, y, w, h = rect.x, rect.y, rect.width, rect.height # Should move this", "with cairo_disable_antialias(cr): outside_border = alpha_color_hex_to_cairo((\"#666666\", 0.5)) cr.set_line_width(1) cr.set_source_rgba(*outside_border) cr.rectangle(rect.x + 1, rect.y +", ", self.button_width -1 , h -1) elif menu_color: draw_rectangle(cr, x + self.button_width, y", "create a Crumb list for different types of inputs. @param crumb: Support inputs", "Draw backgroud. with cairo_state(cr): cr.set_source_rgba(*alpha_color_hex_to_cairo((\"#def5ff\", 1))) cr.rectangle(rect.x, rect.y, rect.width, rect.height) cr.fill() return False", "button. @param label: Label @param font_size: Label's Font size, default is DEFAULT_FONT_SIZE. '''", "can redistribute it and/or modify # it under the terms of the GNU", "= 0 temp = 0 if upper > (page_size + value): self.left_btn.show() for", "= show_entry self.crumb = self.create_crumb(crumb) self.button_width = ARROW_BUTTON_WIDTH # for left & right", "= widget.get_text() widget.destroy() self.eventbox.show() self.emit(\"entry-changed\", label) def redraw_bg(self, widget, event): ''' Internal callback", "arrow_pixbuf = arrow_down elif widget.state == gtk.STATE_INSENSITIVE: arrow_pixbuf = arrow_right text_color = ui_theme.get_color(\"disable_text\").get_color()", "Poplist.__init__(self, items=items, max_height=max_height, max_width=max_width, shadow_visible=False, shape_frame_function=self.shape_bread_menu_frame, expose_frame_function=self.expose_bread_menu_frame, align_size=2, ) self.set_skip_pager_hint(True) self.set_skip_taskbar_hint(True) self.treeview.draw_mask =", "show_entry: If True, an entry will pop up when click space area in", "__gsignals__= { \"entry-changed\" : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_STRING,)), \"item_clicked\" : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_INT,gobject.TYPE_STRING,)) }", "of bread menu, by default is None. @param max_width: Maximum width of bread", "True, True) # Add Bread Items self.adj = self.scroll_win.get_hadjustment() self.add(self.crumb) def create_crumb(self, crumb):", "+ h) # bottom draw_line(cr, x , y , x , y +", "arrow_pixbuf = arrow_right else: button_color = None menu_color = inner_border arrow_pixbuf = arrow_down", "2) # Draw text. draw_text(cr, self.label, x, y , self.button_width, h, self.font_size, text_color,", "coding:utf-8 -*- # Copyright (C) 2011 ~ 2012 Deepin, Inc. # 2011 ~", "Menu instance or a list, default is None @param font_size: Font size, default", "def create_crumb(self, crumb): ''' Internal function to create a Crumb list for different", "GNU General Public License as published by # the Free Software Foundation, either", "from specified index @param index: Start index @param crumbs: Crumb instance or Crumb", "list, default is None @param font_size: Font size, default is DEFAULT_FONT_SIZE. @param padding_x:", "''' upper, page_size, value = self.adj.upper, self.adj.page_size, self.adj.value shift_value = 0 temp =", "of the GNU General Public License # along with this program. If not,", "License for more details. # # You should have received a copy of", "-1 , h -1) elif menu_color: draw_rectangle(cr, x + self.button_width, y + 1,", "\"\"hide\" signal. @param widget: Menu ''' if self.menu_press: self.set_state(gtk.STATE_PRELIGHT) else: self.menu_show = False", "= False, ) win = gtk.Window(gtk.WINDOW_TOPLEVEL) win.connect(\"destroy\", lambda w: gtk.main_quit()) win.set_default_size(600,300) vbox =", "True, crumbs will not be destroyed, otherwise all crumbs on the right side", "signal. @param widget: Crumb @param event: an event of gtk.gdk.event ''' in_menu =", "1 , self.button_width -1 , h -1) elif menu_color: draw_rectangle(cr, x + self.button_width,", "redistribute it and/or modify # it under the terms of the GNU General", "#-*- coding:utf-8 -*- # Copyright (C) 2011 ~ 2012 Deepin, Inc. # 2011", "rect.width - 2, rect.height - 2) cr.fill() gobject.type_register(BreadMenu) class Crumb(gtk.Button): ''' Crumb class", "self.connect(\"expose_event\", self.expose_cb) self.connect(\"button_press_event\", self.button_press_cb) self.connect(\"clicked\", self.button_clicked) self.connect(\"motion-notify-event\", self.motion_notify_cb) self.connect(\"enter-notify-event\", self.enter_button) self.add_events(gtk.gdk.POINTER_MOTION_MASK) def enter_button(self,", "= self.get_size_request()[0] else: self.set_size_request( max(self.label_w + 2 * self.padding_x + self.menu_min, self.btn_min +", "\"clicked\" signal. @param widget: Left button. ''' upper, page_size, value = self.adj.upper, self.adj.page_size,", "3, h -3) elif menu_color: draw_rectangle(cr, x + self.button_width + 1, y +", "index @param index: Start index @param crumbs: Crumb instance or Crumb list For", "signal. @param widget: Crumb ''' if self.in_button: self.emit(\"item_clicked\", self.index_id, self.label) else: self.menu_press =", "Item for TreeView. @param max_height: Maximum height of bread menu, by default is", "page_size == 1.0: self.right_btn.show() def change_node(self, index, crumbs): ''' Change any nodes start", ", y + h) # left draw_line(cr, x + w , y ,", "= font_size self.padding_x = padding_x self.menu = self.create_menu(menu_items) if self.menu != None: self.menu.connect(\"hide\",", "1.0))] # Draw background. if not widget.state == gtk.STATE_NORMAL: # Draw button border.", "draw_text, draw_pixbuf) from utils import (get_content_size, cairo_disable_antialias, alpha_color_hex_to_cairo, cairo_state) import gtk import gobject", "Crumb ''' if self.in_button: self.emit(\"item_clicked\", self.index_id, self.label) else: self.menu_press = False self.menu_show =", "any nodes after given index. @param index: To specified remove after given index.", "''' for i in self.hbox.get_children()[(index + 1): -1]: i.destroy() self.item_list[(index + 1):] =", "- 3, h -4) cr.fill() if self.menu != None: # Draw an arrow.", "version 3 of the License, or # any later version. # # This", "None. ''' Poplist.__init__(self, items=items, max_height=max_height, max_width=max_width, shadow_visible=False, shape_frame_function=self.shape_bread_menu_frame, expose_frame_function=self.expose_bread_menu_frame, align_size=2, ) self.set_skip_pager_hint(True) self.set_skip_taskbar_hint(True)", "2012 Deepin, Inc. # 2011 ~ 2012 Zeng Zhi # # Author: <NAME>", "): ''' Initialize Bread class. @param crumb: Crumb instance or a list of", "import Button from theme import ui_theme from menu import Menu from constant import", "@undocumented: button_press_cb @undocumented: button_clicked @undocumented: expose_cb ''' __gsignals__= { \"item_clicked\" : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE,", "@param font_size: Font size, default is DEFAULT_FONT_SIZE. @param padding_x: Horizontal padding, default is", "y) # top draw_line(cr, x , y + h, x + w, y", "self.eventbox.show() self.emit(\"entry-changed\", label) def redraw_bg(self, widget, event): ''' Internal callback function to \"expose-event\"", "# top draw_line(cr, x , y + h, x + w, y +", "cr.fill() def shape_bread_menu_frame(self, widget, event): pass def expose_bread_menu_frame(self, widget, event): cr = widget.window.cairo_create()", "= self.draw_treeview_mask self.expose_window_frame = self.expose_bread_menu_frame def draw_treeview_mask(self, cr, x, y, w, h): cr.set_source_rgb(1,", "y , x + w , y + h -1) # right cr.set_source_rgba(*outside_border)", "self.enter_button) self.add_events(gtk.gdk.POINTER_MOTION_MASK) def enter_button(self, widget, event): in_menu = event.x > self.button_width self.in_menu =in_menu", "None), ], shadow_visible = False, ) win = gtk.Window(gtk.WINDOW_TOPLEVEL) win.connect(\"destroy\", lambda w: gtk.main_quit())", "# Draw an arrow. draw_pixbuf(cr, arrow_pixbuf.get_pixbuf(), x + self.button_width + (self.menu_min - arrow_width)", "remove after given index. ''' for i in self.hbox.get_children()[(index + 1): -1]: i.destroy()", "1, rect.y + 1, rect.width - 2, rect.height - 2) cr.fill() gobject.type_register(BreadMenu) class", "= Crumb(\"Child\",menu) bread.add(crumb) def change_root_node( widget): crumb1 = Crumb(\"Yet Another Root\", menu) crumb2", "to Crumb \"motion-notify-event\" signal. @param widget: Crumb @param event: an event of gtk.gdk.event", "@undocumented: hide_cb @undocumented: button_press_cb @undocumented: button_clicked @undocumented: expose_cb ''' __gsignals__= { \"item_clicked\" :", "to \"button-press-event\" signal. @param widget: Crumb @param event: An event of gtk.gdk.Event '''", "arrow_right = self.arrow_right arrow_down = self.arrow_down arrow_width, arrow_height = arrow_right.get_pixbuf().get_width(), arrow_right.get_pixbuf().get_height() arrow_pixbuf =", "h): draw_line(cr, x -1 , y , x + w, y) # top", "= arrow_right elif widget.state == gtk.STATE_PRELIGHT: text_color = ui_theme.get_color(\"title_text\").get_color() if self.menu_show: arrow_pixbuf =", "index value of clicked crumb. @param label: Label of the crumb. ''' if", "Bread(gtk.HBox): ''' Bread widget is a container which can hold crumbs widget. @undocumented:", "* self.padding_x, self.btn_min), self.height) self.button_width = self.get_size_request()[0] else: self.set_size_request( max(self.label_w + 2 *", "gtk.Button(\"Change Root node\") test_change_node.connect(\"clicked\", change_root_node) vbox.pack_start(test_change_node, True, False , 0) win.add(vbox) win.show_all() gtk.main()", "- self.menu_min self.queue_draw() def expose_cb(self, widget, event): ''' Internal expose callback function. @param", "widget, event): ''' Internal expose callback function. @param widget: Crumb instance. @param event:", "show_others self.show_entry = show_entry self.crumb = self.create_crumb(crumb) self.button_width = ARROW_BUTTON_WIDTH # for left", "button_color: draw_rectangle(cr, x + 1 , y + 1 , self.button_width -1 ,", "False, True) self.pack_start(self.hbox, True, True) # Add Bread Items self.adj = self.scroll_win.get_hadjustment() self.add(self.crumb)", "crumb in crumbs: crumb.show() crumb.arrow_right = self.arrow_right crumb.arrow_down = self.arrow_down crumb.index_id = len(self.item_list)", "None arrow_pixbuf = arrow_right elif widget.state == gtk.STATE_PRELIGHT: text_color = ui_theme.get_color(\"title_text\").get_color() if self.menu_show:", "scrolled window size. if sum(self.item_list) > page_size and not page_size == 1.0: self.right_btn.show()", "(h - arrow_height) / 2) # Draw text. draw_text(cr, self.label, x, y ,", "self.hbox.get_children()[(index + 1): -1]: i.destroy() self.item_list[(index + 1):] = [] self.emit(\"item_clicked\", index, label)", "# for left & right buttons self.in_event_box = False # Init left button", "20 class Bread(gtk.HBox): ''' Bread widget is a container which can hold crumbs", "''' __gsignals__= { \"entry-changed\" : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_STRING,)), \"item_clicked\" : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_INT,gobject.TYPE_STRING,))", "widget.set_value(v1),200,[value, value-shift_value]) ani.start() if (self.adj.value - shift_value) == 0: self.left_btn.hide() def set_size(self, width,", "height: Height of Bread. ''' self.scroll_win.set_size_request(width - 2 * self.button_width, height) self.hbox.set_size_request(-1, self.hbox.get_children()[0].height)", "better to consider whether or not shown left && right box # at", "change_node(self, index, crumbs): ''' Change any nodes start from specified index @param index:", "else: self.menu_press = False self.menu_show = not self.menu_show if self.menu_show: (wx, wy) =", "default is 15 pixels. ''' super(Crumb, self).__init__() self.arrow_right = None self.arrow_down = None", "self.menu_press = False self.menu_show = False self.index_id = 0 self.set_label(label) self.in_button = True", "check if path is valid or not path_list = path.split(\"/\")[1:] bread.change_node(0, [Crumb(i ,", "@param index: Start index @param crumbs: Crumb instance or Crumb list For instance,", "width: Width of Bread. @param height: Height of Bread. ''' self.scroll_win.set_size_request(width - 2", "self.button_width, height) self.hbox.set_size_request(-1, self.hbox.get_children()[0].height) gobject.type_register(Bread) class BreadMenu(Poplist): ''' Popup menu for bread. @undocumented:", "if temp >= value: shift_value = self.item_list[i] - (temp - value) break #play", "self.item_list = list() self.show_others = show_others self.show_entry = show_entry self.crumb = self.create_crumb(crumb) self.button_width", "None)])] Crumb instance [Crumb, Crumb] ''' if isinstance(crumb, Crumb): return [crumb,] elif isinstance(crumb[0],", "poplist import Poplist ARROW_BUTTON_WIDTH = 20 class Bread(gtk.HBox): ''' Bread widget is a", "@param items: Item for TreeView. @param max_height: Maximum height of bread menu, by", "widget): ''' Internal callback function to \"press-return\" signal. @param widget: gtk.Entry widget instance.", "''' Set Bread size. @param width: Width of Bread. @param height: Height of", "@param width: Width of Bread. @param height: Height of Bread. ''' self.scroll_win.set_size_request(width -", "lambda widget, v1: widget.set_value(v1),200,[value, value+shift_value]) ani.start() break if not upper > (page_size +", "border. cr.set_source_rgba(*inner_border) if button_color: draw_rectangle(cr, x + 2, y + 2, self.button_width -", "(self.adj.value - shift_value) == 0: self.left_btn.hide() def set_size(self, width, height): ''' Set Bread", "callback function to \"expose-event\" signal. @param widget: gtk.EventBox @param event: event of type", "static setting size # it is better to consider whether or not shown", "box static setting size # it is better to consider whether or not", "(offset_x, offset_y) = widget.translate_coordinates(self.get_toplevel(), 0, 0) (menu_width, menu_height) = widget.allocation.width, widget.allocation.height arrow_button_width =", "__init__(self, label, menu_items = None, font_size = DEFAULT_FONT_SIZE, padding_x = 15, ): '''", "\"button-press-event\" signal. @param widget: Crumb @param event: An event of gtk.gdk.Event ''' if", "Initialize Bread class. @param crumb: Crumb instance or a list of crumb instances", "if upper > (page_size + value): self.left_btn.show() for i in xrange(len(self.item_list)+1): temp +=", "widget): ''' Internal callback function to Menu's \"\"hide\" signal. @param widget: Menu '''", "# Draw backgroud. with cairo_state(cr): cr.set_source_rgba(*alpha_color_hex_to_cairo((\"#def5ff\", 1))) cr.rectangle(rect.x, rect.y, rect.width, rect.height) cr.fill() return", "Copyright (C) 2011 ~ 2012 Deepin, Inc. # 2011 ~ 2012 Zeng Zhi", "will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty", "widget.destroy() self.eventbox.show() self.emit(\"entry-changed\", label) def redraw_bg(self, widget, event): ''' Internal callback function to", "cr = widget.window.cairo_create() rect = widget.allocation # Draw backgroud. with cairo_state(cr): cr.set_source_rgba(*alpha_color_hex_to_cairo((\"#def5ff\", 1)))", "if self.menu == None: self.set_size_request( max(self.label_w + 2 * self.padding_x, self.btn_min), self.height) self.button_width", "= self.arrow_right arrow_down = self.arrow_down arrow_width, arrow_height = arrow_right.get_pixbuf().get_width(), arrow_right.get_pixbuf().get_height() arrow_pixbuf = arrow_right", "gtk.EventBox @param event: event of type gtk.gdk.event ''' cr = widget.window.cairo_create() rect =", "For instance, there exist a list contain [Crumb1, Crumb2], by using change_node(1, [Crumb3,", "None menu_color = inner_border else: button_color = inner_border menu_color = None elif widget.state", "2, self.menu_min - 3, h -4) cr.fill() if self.menu != None: # Draw", "Must set_size bread.set_size(200, -1) bread.connect(\"entry-changed\", change_entry) ##################################### vbox.pack_start(bread, False, False, 0) # Test", "@undocumented: leave_notify @undocumented: event_box_press @undocumented: enter_cb @undocumented: redraw_bg @undocumented: click_cb @undocumented: move_right @undocumented:", "gtk.gdk.Event. ''' self.in_event_box = False def event_box_press(self, widget, event): ''' Internal callback function", "i in path_list]) menu = Menu([ (None, \"测试1\", None), (None, \"测试2\", None), ],", "i in self.hbox.get_children()[(index + 1): -1]: i.destroy() self.item_list[(index + 1):] = [] self.emit(\"item_clicked\",", "0) # Test Item add_path_button = gtk.Button(\"Add Item\") add_path_button.connect(\"clicked\", add_panel) vbox.pack_start(add_path_button, True, False,", "= show_others self.show_entry = show_entry self.crumb = self.create_crumb(crumb) self.button_width = ARROW_BUTTON_WIDTH # for", "upper, page_size, value = self.adj.upper, self.adj.page_size, self.adj.value shift_value = 0 temp = 0", "alpha_color_hex_to_cairo((\"#ffffff\", 0.5)) active_mask = alpha_color_hex_to_cairo((\"#000000\", 0.1)) if self.menu_show: self.set_state(gtk.STATE_PRELIGHT) if widget.state == gtk.STATE_NORMAL:", "- self.menu_min) if not self.in_button: self.menu_press = True def button_clicked(self, widget): ''' Intenal", "if button_color: draw_rectangle(cr, x + 1 , y + 1 , self.button_width -1", "class. @param items: Item for TreeView. @param max_height: Maximum height of bread menu,", "''' in_menu = event.x > self.button_width if self.in_menu !=in_menu: self.in_menu = in_menu self.queue_draw()", "__name__ == \"__main__\": import gtk def add_panel(widget): crumb = Crumb(\"Child\",menu) bread.add(crumb) def change_root_node(", "h) # bottom draw_line(cr, x , y , x , y + h)", "event of type gtk.gdk.event ''' cr = widget.window.cairo_create() rect = widget.allocation # Draw", "= show_left_right_box left_box = gtk.HBox(spacing = 0) right_box = gtk.HBox(spacing = 0) #", "[Crumb(c[0], c[1]) for c in crumb] def enter_notify(self, widget, event): ''' Internal callback", "change to [Crumb1, Crumb3, Crumb4]. In this way, application can operate crumbs '''", "2011 ~ 2012 Deepin, Inc. # 2011 ~ 2012 Zeng Zhi # #", "def create_menu(self, menu_items): ''' Internal function to create menu. @param menu_items: menu_items @return:", "[(0, (disable_bg, 1.0)), (1, (disable_bg, 1.0))] menu_color = [(0, (disable_bg, 1.0)), (1, (disable_bg,", "True) bread.add([\"xxx\",menu]) # Must set_size bread.set_size(200, -1) bread.connect(\"entry-changed\", change_entry) ##################################### vbox.pack_start(bread, False, False,", "import pango from poplist import Poplist ARROW_BUTTON_WIDTH = 20 class Bread(gtk.HBox): ''' Bread", "# Author: <NAME> <<EMAIL>> # Maintainer: <NAME> <<EMAIL>> # # This program is", "show_others = False, show_entry = True) bread.add([\"xxx\",menu]) # Must set_size bread.set_size(200, -1) bread.connect(\"entry-changed\",", "otherwise all crumbs on the right side will be destroyed. @param show_entry: If", "self.right_btn = Button(\"&gt;\") self.left_btn.set_no_show_all(True) self.right_btn.set_no_show_all(True) self.right_btn.connect(\"clicked\", self.move_right) self.left_btn.connect(\"clicked\", self.move_left) self.left_btn.set_size_request(self.button_width, -1) self.right_btn.set_size_request(self.button_width, -1)", "+ w , y + h -1) # right cr.set_source_rgba(*outside_border) if button_color: draw_rectangle(cr,", "''' Change any nodes start from specified index @param index: Start index @param", "wy) = self.get_toplevel().window.get_root_origin() (offset_x, offset_y) = widget.translate_coordinates(self.get_toplevel(), 0, 0) (menu_width, menu_height) = widget.allocation.width,", "''' if not self.show_others: for i in self.hbox.get_children()[(index + 1): -1]: i.destroy() self.item_list[(index", "@param widget: gtk.eventbox. @param event: event of type gtk.gdk.event. ''' obj = self.hbox.get_children()", "arrow_right.get_pixbuf().get_width(), arrow_right.get_pixbuf().get_height() arrow_pixbuf = arrow_right outside_border = alpha_color_hex_to_cairo((\"#000000\", 0.15)) inner_border = alpha_color_hex_to_cairo((\"#ffffff\", 0.5))", "max_width=None, ): ''' Initialize BreadMenu class. @param items: Item for TreeView. @param max_height:", "type gtk.gdk.event. ''' obj = self.hbox.get_children() label = [] for o in obj[:-1]:", "= 0) # FIXME: left && right box static setting size # it", "instance. ''' label = widget.get_text() widget.destroy() self.eventbox.show() self.emit(\"entry-changed\", label) def redraw_bg(self, widget, event):", "self.in_button: self.emit(\"item_clicked\", self.index_id, self.label) else: self.menu_press = False self.menu_show = not self.menu_show if", "without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR", "+ self.button_width + (self.menu_min - arrow_width) / 2, y + (h - arrow_height)", "path): # Application can check if path is valid or not path_list =", "None self.arrow_down = None self.menu_min = 18 # menu bar width self.btn_min =", "self.event_box_press) self.hbox.pack_end(self.eventbox, True, True) self.scroll_win = ScrolledWindow() self.pack_start(left_box, False, True) self.pack_start(self.hbox, True, True)", "[(\"a label\",[(None, \"menu label\", None)])] Crumb instance [Crumb, Crumb] ''' if isinstance(crumb, Crumb):", "the License, or # any later version. # # This program is distributed", "shape_bread_menu_frame(self, widget, event): pass def expose_bread_menu_frame(self, widget, event): cr = widget.window.cairo_create() rect =", "self.left_btn.show() for i in xrange(len(self.item_list)+1): temp += self.item_list[i] if temp > (page_size +", "@param index: To specified remove after given index. ''' for i in self.hbox.get_children()[(index", "DEFAULT_FONT_SIZE. @param padding_x: Horizontal padding, default is 15 pixels. ''' super(Crumb, self).__init__() self.arrow_right", "Crumb @param event: an event of gtk.gdk.event ''' in_menu = event.x > self.button_width", "\"测试2\", None), ], shadow_visible = False, ) win = gtk.Window(gtk.WINDOW_TOPLEVEL) win.connect(\"destroy\", lambda w:", "default is None @param font_size: Font size, default is DEFAULT_FONT_SIZE. @param padding_x: Horizontal", "self.in_event_box = False # Init left button and right button. self.show_left_right_box = show_left_right_box", "-3) if widget.state == gtk.STATE_ACTIVE: cr.set_source_rgba(*active_mask) if button_color: cr.rectangle(x + 2, y +", "You should have received a copy of the GNU General Public License #", "right cr.set_source_rgba(*outside_border) if button_color: draw_rectangle(cr, x + 1 , y + 1 ,", "x, y, w, h): cr.set_source_rgb(1, 1, 1) cr.rectangle(x, y, w, h) cr.fill() def", "cr.fill() return False def add(self, crumbs): ''' Add crumbs. Can accept Crumb instance", "objects[index: -1]: i.destroy() self.item_list[index:] = [] self.add(crumbs) def remove_node_after_index(self, index): ''' Remove any", "create_crumb @undocumented: enter_notify @undocumented: leave_notify @undocumented: event_box_press @undocumented: enter_cb @undocumented: redraw_bg @undocumented: click_cb", "# Copyright (C) 2011 ~ 2012 Deepin, Inc. # 2011 ~ 2012 Zeng", "FIXME: left && right box static setting size # it is better to", "bar width self.btn_min = 50 # button width self.height = 24 # crumb", "+ h -1) # right cr.set_source_rgba(*outside_border) if button_color: draw_rectangle(cr, x + 1 ,", "elif widget.state == gtk.STATE_ACTIVE: text_color = ui_theme.get_color(\"title_text\").get_color() if self.in_button: button_color = inner_border menu_color", "y + h -1) # right cr.set_source_rgba(*outside_border) if button_color: draw_rectangle(cr, x + 1", "be destroyed, otherwise all crumbs on the right side will be destroyed. @param", "> (page_size + value): shift_value = temp - (page_size + value) #play animation", "= Crumb(\"Yet Another Root\", menu) crumb2 = Crumb(\"Yet Another Child\", menu) bread.change_node(0, [crumb1,", "class. @param crumb: Crumb instance or a list of crumb instances @param arrow_right:", "left button. @param label: Label @param font_size: Label's Font size, default is DEFAULT_FONT_SIZE.", "the terms of the GNU General Public License as published by # the", "= [] self.add(crumbs) def remove_node_after_index(self, index): ''' Remove any nodes after given index.", "''' Internal callback function to \"clicked\" signal. @param widget: Crumb instance. @param index:", "expose_bread_menu_frame(self, widget, event): cr = widget.window.cairo_create() rect = widget.allocation with cairo_disable_antialias(cr): outside_border =", "Maintainer: <NAME> <<EMAIL>> # # This program is free software: you can redistribute", "(gobject.TYPE_INT,gobject.TYPE_STRING,))} def __init__(self, label, menu_items = None, font_size = DEFAULT_FONT_SIZE, padding_x = 15,", "self.font_size, text_color, alignment = pango.ALIGN_CENTER) return True gobject.type_register(Crumb) if __name__ == \"__main__\": import", "self.font_size = font_size self.padding_x = padding_x self.menu = self.create_menu(menu_items) if self.menu != None:", "15 pixels. ''' super(Crumb, self).__init__() self.arrow_right = None self.arrow_down = None self.menu_min =", "(page_size + value): self.left_btn.show() for i in xrange(len(self.item_list)+1): temp += self.item_list[i] if temp", "= True def leave_notify(self, widget, event): ''' Internal callback function to \"leave-notify-event\" signal.", "crumb): ''' Internal function to create a Crumb list for different types of", "= False def event_box_press(self, widget, event): ''' Internal callback function to \"button-press-event\" signal.", "\"clicked\" signal. @param widget: Crumb ''' if self.in_button: self.emit(\"item_clicked\", self.index_id, self.label) else: self.menu_press", "= Crumb(\"Yet Another Child\", menu) bread.change_node(0, [crumb1, crumb2]) def change_entry(widget, path): # Application", "if button_color: cr.rectangle(x + 2, y + 2, self.button_width - 4, h -4)", "font_size: Label's Font size, default is DEFAULT_FONT_SIZE. ''' self.label = label (self.label_w, self.label_h)", "= None self.menu_min = 18 # menu bar width self.btn_min = 50 #", "default is None. ''' Poplist.__init__(self, items=items, max_height=max_height, max_width=max_width, shadow_visible=False, shape_frame_function=self.shape_bread_menu_frame, expose_frame_function=self.expose_bread_menu_frame, align_size=2, )", "<NAME> <<EMAIL>> # Maintainer: <NAME> <<EMAIL>> # # This program is free software:", "self.button_width - 4, h -4) cr.fill() elif menu_color: cr.rectangle( x + self.button_width +", "setting size # it is better to consider whether or not shown left", "False self.set_state(gtk.STATE_NORMAL) def button_press_cb(self, widget, event): ''' Internal callback function to \"button-press-event\" signal.", "self.hbox.get_children()[(index + 1): -1]: i.destroy() self.item_list[(index + 1):] = [] def click_cb(self, widget,", "is None. @param max_width: Maximum width of bread menu, by default is None.", "button import Button from theme import ui_theme from menu import Menu from constant", "crumbs): ''' Change any nodes start from specified index @param index: Start index", "self.in_menu =in_menu def motion_notify_cb(self, widget, event): ''' Internal callback function to Crumb \"motion-notify-event\"", "= Animation(self.adj, lambda widget, v1: widget.set_value(v1),200,[value, value+shift_value]) ani.start() break if not upper >", "= [(0, (disable_bg, 1.0)), (1, (disable_bg, 1.0))] menu_color = [(0, (disable_bg, 1.0)), (1,", "offset_x + menu_width - arrow_button_width, wy + offset_y + menu_height, ), (0, 0))", "arrow_down=ui_theme.get_pixbuf(\"treeview/arrow_down.png\"), show_others=False, show_entry=False, show_left_right_box=True ): ''' Initialize Bread class. @param crumb: Crumb instance", "text_color = ui_theme.get_color(\"title_text\").get_color() if self.menu_show: arrow_pixbuf = arrow_down else: arrow_pixbuf = arrow_right if", "callback function to \"clicked\" signal. @param widget: Left button. ''' upper, page_size, value", "index. ''' for i in self.hbox.get_children()[(index + 1): -1]: i.destroy() self.item_list[(index + 1):]", ": (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_STRING,)), \"item_clicked\" : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_INT,gobject.TYPE_STRING,)) } def __init__(self, crumb,", "if self.in_button: button_color = inner_border menu_color = None arrow_pixbuf = arrow_right else: button_color", "signal. @param widget: gtk.eventbox. @param event: event of type gtk.gdk.event. ''' obj =", "1 , y + 1 , self.button_width -1 , h -1) elif menu_color:", "signal. @param widget: gtk.EventBox. @param event: The pointer event of type gtk.gdk.Event. '''", "@param show_others: If True, crumbs will not be destroyed, otherwise all crumbs on", "self.eventbox.set_visible_window(False) if self.show_entry: self.eventbox.connect(\"enter-notify-event\", self.enter_notify) self.eventbox.connect(\"leave-notify-event\", self.leave_notify) self.eventbox.connect(\"button-press-event\", self.event_box_press) self.hbox.pack_end(self.eventbox, True, True) self.scroll_win", "- value) break #play animation ani = Animation(self.adj, lambda widget, v1: widget.set_value(v1),200,[value, value-shift_value])", "callback function to \"leave-notify-event\" signal. @param widget: Gtk.EventBox. @param event: The pointer event", "move this part to Bread class since app_theme is golobalized. arrow_right = self.arrow_right", "= self.hbox.get_children() label = [] for o in obj[:-1]: label.append(\"/\"+o.label) o.destroy() self.entry =", "or not path_list = path.split(\"/\")[1:] bread.change_node(0, [Crumb(i , menu) for i in path_list])", "max_width: Maximum width of bread menu, by default is None. ''' Poplist.__init__(self, items=items,", "\"item_clicked\" : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_INT,gobject.TYPE_STRING,)) } def __init__(self, crumb, arrow_right=ui_theme.get_pixbuf(\"treeview/arrow_right.png\"), arrow_down=ui_theme.get_pixbuf(\"treeview/arrow_down.png\"), show_others=False, show_entry=False,", "path is valid or not path_list = path.split(\"/\")[1:] bread.change_node(0, [Crumb(i , menu) for", "self.leave_notify) self.eventbox.connect(\"button-press-event\", self.event_box_press) self.hbox.pack_end(self.eventbox, True, True) self.scroll_win = ScrolledWindow() self.pack_start(left_box, False, True) self.pack_start(self.hbox,", "@param widget: Menu ''' if self.menu_press: self.set_state(gtk.STATE_PRELIGHT) else: self.menu_show = False self.set_state(gtk.STATE_NORMAL) def", "expose callback function. @param widget: Crumb instance. @param event: An event of gtk.gdk.Event.", "self.arrow_right crumb.arrow_down = self.arrow_down crumb.index_id = len(self.item_list) crumb.connect(\"item_clicked\", self.click_cb) self.hbox.pack_start(crumb, False, False) self.item_list.append(crumb.get_size_request()[0])", "if self.menu_press: self.set_state(gtk.STATE_PRELIGHT) else: self.menu_show = False self.set_state(gtk.STATE_NORMAL) def button_press_cb(self, widget, event): '''", "= not self.menu_show if self.menu_show: (wx, wy) = self.get_toplevel().window.get_root_origin() (offset_x, offset_y) = widget.translate_coordinates(self.get_toplevel(),", "Height of Bread. ''' self.scroll_win.set_size_request(width - 2 * self.button_width, height) self.hbox.set_size_request(-1, self.hbox.get_children()[0].height) gobject.type_register(Bread)", "expose_cb ''' __gsignals__= { \"item_clicked\" : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_INT,gobject.TYPE_STRING,))} def __init__(self, label, menu_items", "function to create a Crumb list for different types of inputs. @param crumb:", "button_clicked @undocumented: expose_cb ''' __gsignals__= { \"item_clicked\" : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_INT,gobject.TYPE_STRING,))} def __init__(self,", "self.entry.show() self.entry.select_region(0, len(self.entry.get_text())) self.eventbox.hide() self.hbox.pack_start(self.entry, True, True) def enter_cb(self, widget): ''' Internal callback", "widget.window.cairo_create() rect = widget.allocation with cairo_disable_antialias(cr): outside_border = alpha_color_hex_to_cairo((\"#666666\", 0.5)) cr.set_line_width(1) cr.set_source_rgba(*outside_border) cr.rectangle(rect.x", "@param widget: gtk.Entry widget instance. ''' label = widget.get_text() widget.destroy() self.eventbox.show() self.emit(\"entry-changed\", label)", "self.item_list[index:] = [] self.add(crumbs) def remove_node_after_index(self, index): ''' Remove any nodes after given", "arrow_right else: button_color = None menu_color = inner_border arrow_pixbuf = arrow_down elif widget.state", "create_menu(self, menu_items): ''' Internal function to create menu. @param menu_items: menu_items @return: Menu", "Crumb(\"Yet Another Child\", menu) bread.change_node(0, [crumb1, crumb2]) def change_entry(widget, path): # Application can", "self.create_menu(menu_items) if self.menu != None: self.menu.connect(\"hide\", self.hide_cb) self.menu_press = False self.menu_show = False", "def add_panel(widget): crumb = Crumb(\"Child\",menu) bread.add(crumb) def change_root_node( widget): crumb1 = Crumb(\"Yet Another", "= widget.window.cairo_create() rect = widget.allocation # Draw backgroud. with cairo_state(cr): cr.set_source_rgba(*alpha_color_hex_to_cairo((\"#def5ff\", 1))) cr.rectangle(rect.x,", "bread.add([\"xxx\",menu]) # Must set_size bread.set_size(200, -1) bread.connect(\"entry-changed\", change_entry) ##################################### vbox.pack_start(bread, False, False, 0)", "menu for bread. @undocumented: draw_treeview_mask @undocumented: shape_bread_menu_frame @undocumented: expose_bread_menu_frame ''' def __init__(self, items,", "in crumbs: crumb.show() crumb.arrow_right = self.arrow_right crumb.arrow_down = self.arrow_down crumb.index_id = len(self.item_list) crumb.connect(\"item_clicked\",", "Gtk.EventBox. @param event: The pointer event of type gtk.gdk.Event. ''' self.in_event_box = False", "add_panel) vbox.pack_start(add_path_button, True, False, 0) test_change_node = gtk.Button(\"Change Root node\") test_change_node.connect(\"clicked\", change_root_node) vbox.pack_start(test_change_node,", "it is better to consider whether or not shown left && right box", "return crumb else: return [Crumb(c[0], c[1]) for c in crumb] def enter_notify(self, widget,", "0 if not value == 0: self.right_btn.show() for i in xrange(len(self.item_list)): temp +=", "24 # crumb height self.font_size = font_size self.padding_x = padding_x self.menu = self.create_menu(menu_items)", "self.right_btn.set_no_show_all(True) self.right_btn.connect(\"clicked\", self.move_right) self.left_btn.connect(\"clicked\", self.move_left) self.left_btn.set_size_request(self.button_width, -1) self.right_btn.set_size_request(self.button_width, -1) left_box.pack_start(self.left_btn, False, False) right_box.pack_start(self.right_btn,", "\"menu label\", None)])] Crumb instance [Crumb, Crumb] ''' crumbs = self.create_crumb(crumbs) for crumb", "-1]: i.destroy() self.item_list[(index + 1):] = [] def click_cb(self, widget, index, label): '''", ") win = gtk.Window(gtk.WINDOW_TOPLEVEL) win.connect(\"destroy\", lambda w: gtk.main_quit()) win.set_default_size(600,300) vbox = gtk.VBox() ######################################", "self.menu_show if self.menu_show: (wx, wy) = self.get_toplevel().window.get_root_origin() (offset_x, offset_y) = widget.translate_coordinates(self.get_toplevel(), 0, 0)", "change_root_node( widget): crumb1 = Crumb(\"Yet Another Root\", menu) crumb2 = Crumb(\"Yet Another Child\",", "def move_right(self, widget): ''' Internal callback function to \"clicked\" signal. @param widget: Right", "page_size = self.adj.page_size # Show right button if crumbs exceed scrolled window size.", "{ \"item_clicked\" : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_INT,gobject.TYPE_STRING,))} def __init__(self, label, menu_items = None, font_size", "previous list will be change to [Crumb1, Crumb3, Crumb4]. In this way, application", "# Test Item add_path_button = gtk.Button(\"Add Item\") add_path_button.connect(\"clicked\", add_panel) vbox.pack_start(add_path_button, True, False, 0)", "x + w, y) # top draw_line(cr, x , y + h, x", "from scrolled_window import ScrolledWindow from button import Button from theme import ui_theme from", "right side will be destroyed. @param show_entry: If True, an entry will pop", "could be a Menu instance or a list, default is None @param font_size:", "Application can check if path is valid or not path_list = path.split(\"/\")[1:] bread.change_node(0,", "gtk.EventBox() self.eventbox.set_visible_window(False) if self.show_entry: self.eventbox.connect(\"enter-notify-event\", self.enter_notify) self.eventbox.connect(\"leave-notify-event\", self.leave_notify) self.eventbox.connect(\"button-press-event\", self.event_box_press) self.hbox.pack_end(self.eventbox, True, True)", "y + 1, self.menu_min, h - 1) # Draw innner border. cr.set_source_rgba(*inner_border) if", "the GNU General Public License as published by # the Free Software Foundation,", "cr.rectangle(rect.x, rect.y, rect.width, rect.height) cr.fill() return False def add(self, crumbs): ''' Add crumbs.", "else: return None def hide_cb(self, widget): ''' Internal callback function to Menu's \"\"hide\"", "Bread size. @param width: Width of Bread. @param height: Height of Bread. '''", "False, False) # Init Hbox self.hbox = gtk.HBox(False, 0) self.hbox.show() self.eventbox = gtk.EventBox()", "+ 1): -1]: i.destroy() self.item_list[(index + 1):] = [] self.emit(\"item_clicked\", index, label) def", "show_entry self.crumb = self.create_crumb(crumb) self.button_width = ARROW_BUTTON_WIDTH # for left & right buttons", "in_menu self.queue_draw() def create_menu(self, menu_items): ''' Internal function to create menu. @param menu_items:", "pointer event of type gtk.gdk.Event. ''' self.in_event_box = False def event_box_press(self, widget, event):", "for i in objects[index: -1]: i.destroy() self.item_list[index:] = [] self.add(crumbs) def remove_node_after_index(self, index):", "animation import Animation from scrolled_window import ScrolledWindow from button import Button from theme", "''' Crumb class . @undocumented: enter_button @undocumented: motion_notify_cb @undocumented: create_menu @undocumented: hide_cb @undocumented:", "Crumb instance [Crumb, Crumb] ''' if isinstance(crumb, Crumb): return [crumb,] elif isinstance(crumb[0], str):", "function to \"leave-notify-event\" signal. @param widget: Gtk.EventBox. @param event: The pointer event of", "left_box.set_size_request(self.button_width, -1) right_box.set_size_request(self.button_width, -1) self.left_btn = Button(\"&lt;\") self.right_btn = Button(\"&gt;\") self.left_btn.set_no_show_all(True) self.right_btn.set_no_show_all(True) self.right_btn.connect(\"clicked\",", "Init. super(Bread, self).__init__(spacing = 0) self.arrow_right = arrow_right self.arrow_down = arrow_down self.item_list =", "self.button_width if self.in_menu !=in_menu: self.in_menu = in_menu self.queue_draw() def create_menu(self, menu_items): ''' Internal", "self.show_others: for i in self.hbox.get_children()[(index + 1): -1]: i.destroy() self.item_list[(index + 1):] =", "= True self.menu_press = False else: self.in_button = event.x < (widget.allocation.width - self.menu_min)", "= True self.connect(\"expose_event\", self.expose_cb) self.connect(\"button_press_event\", self.button_press_cb) self.connect(\"clicked\", self.button_clicked) self.connect(\"motion-notify-event\", self.motion_notify_cb) self.connect(\"enter-notify-event\", self.enter_button) self.add_events(gtk.gdk.POINTER_MOTION_MASK)", "len(self.entry.get_text())) self.eventbox.hide() self.hbox.pack_start(self.entry, True, True) def enter_cb(self, widget): ''' Internal callback function to", "+ 1):] = [] def click_cb(self, widget, index, label): ''' Internal callback function", "(gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_STRING,)), \"item_clicked\" : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_INT,gobject.TYPE_STRING,)) } def __init__(self, crumb, arrow_right=ui_theme.get_pixbuf(\"treeview/arrow_right.png\"),", "alpha_color_hex_to_cairo((\"#000000\", 0.15)) inner_border = alpha_color_hex_to_cairo((\"#ffffff\", 0.5)) active_mask = alpha_color_hex_to_cairo((\"#000000\", 0.1)) if self.menu_show: self.set_state(gtk.STATE_PRELIGHT)", "h = rect.x, rect.y, rect.width, rect.height # Should move this part to Bread", "== 0: self.right_btn.show() for i in xrange(len(self.item_list)): temp += self.item_list[i] if temp >=", "self.left_btn.connect(\"clicked\", self.move_left) self.left_btn.set_size_request(self.button_width, -1) self.right_btn.set_size_request(self.button_width, -1) left_box.pack_start(self.left_btn, False, False) right_box.pack_start(self.right_btn, False, False) #", "(wx, wy) = self.get_toplevel().window.get_root_origin() (offset_x, offset_y) = widget.translate_coordinates(self.get_toplevel(), 0, 0) (menu_width, menu_height) =", "''' Popup menu for bread. @undocumented: draw_treeview_mask @undocumented: shape_bread_menu_frame @undocumented: expose_bread_menu_frame ''' def", "self.item_list[i] if temp >= value: shift_value = self.item_list[i] - (temp - value) break", "= None, font_size = DEFAULT_FONT_SIZE, padding_x = 15, ): ''' Initialize Crumb class.", "whether or not shown left && right box # at runtime if self.show_left_right_box:", "Set Bread size. @param width: Width of Bread. @param height: Height of Bread.", "BreadMenu class. @param items: Item for TreeView. @param max_height: Maximum height of bread", "it will be useful, # but WITHOUT ANY WARRANTY; without even the implied", "Left button. ''' upper, page_size, value = self.adj.upper, self.adj.page_size, self.adj.value shift_value = 0", "return BreadMenu(menu_items) else: return None def hide_cb(self, widget): ''' Internal callback function to", "The pointer event of type gtk.gdk.Event. ''' self.in_event_box = False def event_box_press(self, widget,", "widget: Menu ''' if self.menu_press: self.set_state(gtk.STATE_PRELIGHT) else: self.menu_show = False self.set_state(gtk.STATE_NORMAL) def button_press_cb(self,", "from animation import Animation from scrolled_window import ScrolledWindow from button import Button from", "= self.adj.upper, self.adj.page_size, self.adj.value shift_value = 0 temp = 0 if not value", "value-shift_value]) ani.start() if (self.adj.value - shift_value) == 0: self.left_btn.hide() def set_size(self, width, height):", "gobject.type_register(Crumb) if __name__ == \"__main__\": import gtk def add_panel(widget): crumb = Crumb(\"Child\",menu) bread.add(crumb)", "event): cr = widget.window.cairo_create() rect = widget.allocation with cairo_disable_antialias(cr): outside_border = alpha_color_hex_to_cairo((\"#666666\", 0.5))", "shift_value = self.item_list[i] - (temp - value) break #play animation ani = Animation(self.adj,", "label: Label of the crumb. ''' if not self.show_others: for i in self.hbox.get_children()[(index", "@param arrow_right: Dynamic pixbuf for right arrow, default is \\\"treeview/arrow_right.png\\\" from ui theme.", "= gtk.HBox(False, 0) self.hbox.show() self.eventbox = gtk.EventBox() self.eventbox.set_visible_window(False) if self.show_entry: self.eventbox.connect(\"enter-notify-event\", self.enter_notify) self.eventbox.connect(\"leave-notify-event\",", "''' Internal function to create menu. @param menu_items: menu_items @return: Menu instance '''", "+ 2 * self.padding_x, self.btn_min), self.height) self.button_width = self.get_size_request()[0] else: self.set_size_request( max(self.label_w +", "widget.translate_coordinates(self.get_toplevel(), 0, 0) (menu_width, menu_height) = widget.allocation.width, widget.allocation.height arrow_button_width = ARROW_BUTTON_WIDTH self.menu.show((wx +", "button_color = inner_border menu_color = None elif widget.state == gtk.STATE_ACTIVE: text_color = ui_theme.get_color(\"title_text\").get_color()", "(1, (disable_bg, 1.0))] # Draw background. if not widget.state == gtk.STATE_NORMAL: # Draw", "0) test_change_node = gtk.Button(\"Change Root node\") test_change_node.connect(\"clicked\", change_root_node) vbox.pack_start(test_change_node, True, False , 0)", "not widget.state == gtk.STATE_NORMAL: # Draw button border. def draw_rectangle(cr, x, y ,", "self.menu_min - 3, h -4) cr.fill() if self.menu != None: # Draw an", "value = self.adj.upper, self.adj.page_size, self.adj.value shift_value = 0 temp = 0 if upper", "self.menu_press: self.set_state(gtk.STATE_PRELIGHT) else: self.menu_show = False self.set_state(gtk.STATE_NORMAL) def button_press_cb(self, widget, event): ''' Internal", "hide_cb @undocumented: button_press_cb @undocumented: button_clicked @undocumented: expose_cb ''' __gsignals__= { \"item_clicked\" : (gobject.SIGNAL_RUN_LAST,", "label: Crumb item label @param menu_items: Crumb menu, could be a Menu instance", "(menu_width, menu_height) = widget.allocation.width, widget.allocation.height arrow_button_width = ARROW_BUTTON_WIDTH self.menu.show((wx + offset_x + menu_width", "= 50 # button width self.height = 24 # crumb height self.font_size =", "self.item_list[(index + 1):] = [] def click_cb(self, widget, index, label): ''' Internal callback", "not page_size == 1.0: self.right_btn.show() def change_node(self, index, crumbs): ''' Change any nodes", "ui_theme.get_color(\"disable_text\").get_color() disable_bg = ui_theme.get_color(\"disable_background\").get_color() button_color = [(0, (disable_bg, 1.0)), (1, (disable_bg, 1.0))] menu_color", "shift_value) == 0: self.left_btn.hide() def set_size(self, width, height): ''' Set Bread size. @param", "objects = self.hbox.get_children() for i in objects[index: -1]: i.destroy() self.item_list[index:] = [] self.add(crumbs)", "y , x + w, y) # top draw_line(cr, x , y +", "event of gtk.gdk.event ''' in_menu = event.x > self.button_width if self.in_menu !=in_menu: self.in_menu", "of type gtk.gdk.Event. ''' self.in_event_box = False def event_box_press(self, widget, event): ''' Internal", "label, font_size = DEFAULT_FONT_SIZE): ''' Set label for left button. @param label: Label", "widget.state == gtk.STATE_INSENSITIVE: arrow_pixbuf = arrow_right text_color = ui_theme.get_color(\"disable_text\").get_color() disable_bg = ui_theme.get_color(\"disable_background\").get_color() button_color", "of inputs. @param crumb: Support inputs are: [\"a label\", Menu] [(\"a label\",[(None, \"menu", "the # GNU General Public License for more details. # # You should", "ui theme. @param arrow_down: Dynamic pixbuf for down arrow, default is \\\"treeview/arrow_down.png\\\" from", "x + self.button_width + 1, y + 2, self.menu_min - 2, h -3)", "+ 1, rect.y + 1, rect.width - 2, rect.height - 2) cr.fill() gobject.type_register(BreadMenu)", "= self.create_crumb(crumb) self.button_width = ARROW_BUTTON_WIDTH # for left & right buttons self.in_event_box =", "1, rect.width - 2, rect.height - 2) cr.fill() gobject.type_register(BreadMenu) class Crumb(gtk.Button): ''' Crumb", "constant import DEFAULT_FONT_SIZE from draw import (draw_line, draw_text, draw_pixbuf) from utils import (get_content_size,", "@undocumented: create_crumb @undocumented: enter_notify @undocumented: leave_notify @undocumented: event_box_press @undocumented: enter_cb @undocumented: redraw_bg @undocumented:", "width self.height = 24 # crumb height self.font_size = font_size self.padding_x = padding_x", "# it is better to consider whether or not shown left && right", "gtk.main_quit()) win.set_default_size(600,300) vbox = gtk.VBox() ###################################### # test breadcrumb widget bread = Bread([(\"Root\",", "Crumb] ''' crumbs = self.create_crumb(crumbs) for crumb in crumbs: crumb.show() crumb.arrow_right = self.arrow_right", "= widget.window.cairo_create() rect = widget.allocation with cairo_disable_antialias(cr): outside_border = alpha_color_hex_to_cairo((\"#666666\", 0.5)) cr.set_line_width(1) cr.set_source_rgba(*outside_border)", "expose_cb(self, widget, event): ''' Internal expose callback function. @param widget: Crumb instance. @param", "arrow_down = self.arrow_down arrow_width, arrow_height = arrow_right.get_pixbuf().get_width(), arrow_right.get_pixbuf().get_height() arrow_pixbuf = arrow_right outside_border =", "+ 1, self.menu_min, h - 1) # Draw innner border. cr.set_source_rgba(*inner_border) if button_color:", "signal. @param widget: Left button. ''' upper, page_size, value = self.adj.upper, self.adj.page_size, self.adj.value", "cr.rectangle(x, y, w, h) cr.fill() def shape_bread_menu_frame(self, widget, event): pass def expose_bread_menu_frame(self, widget,", "return False def add(self, crumbs): ''' Add crumbs. Can accept Crumb instance or", "temp = 0 if upper > (page_size + value): self.left_btn.show() for i in", "self.button_width, y + 1, self.menu_min, h - 1) # Draw innner border. cr.set_source_rgba(*inner_border)", "None @param font_size: Font size, default is DEFAULT_FONT_SIZE. @param padding_x: Horizontal padding, default", "list contain [Crumb1, Crumb2], by using change_node(1, [Crumb3, Crumb4]), previous list will be", "def button_press_cb(self, widget, event): ''' Internal callback function to \"button-press-event\" signal. @param widget:", "label (self.label_w, self.label_h) = get_content_size(self.label, font_size) if self.menu == None: self.set_size_request( max(self.label_w +", "-1) self.left_btn = Button(\"&lt;\") self.right_btn = Button(\"&gt;\") self.left_btn.set_no_show_all(True) self.right_btn.set_no_show_all(True) self.right_btn.connect(\"clicked\", self.move_right) self.left_btn.connect(\"clicked\", self.move_left)", "self.padding_x, self.btn_min), self.height) self.button_width = self.get_size_request()[0] else: self.set_size_request( max(self.label_w + 2 * self.padding_x", "if widget.state == gtk.STATE_NORMAL: text_color = ui_theme.get_color(\"title_text\").get_color() button_color = None menu_color = None", "to \"enter-notify-event\" signal. @param widget: gtk.EventBox. @param event: The pointer event of type", "event): ''' Internal expose callback function. @param widget: Crumb instance. @param event: An", "i.destroy() self.item_list[(index + 1):] = [] self.emit(\"item_clicked\", index, label) def move_right(self, widget): '''", "+ 2, self.menu_min - 2, h -3) if widget.state == gtk.STATE_ACTIVE: cr.set_source_rgba(*active_mask) if", "self.expose_bread_menu_frame def draw_treeview_mask(self, cr, x, y, w, h): cr.set_source_rgb(1, 1, 1) cr.rectangle(x, y,", "- arrow_width) / 2, y + (h - arrow_height) / 2) # Draw", "Menu from constant import DEFAULT_FONT_SIZE from draw import (draw_line, draw_text, draw_pixbuf) from utils", "bread.change_node(0, [Crumb(i , menu) for i in path_list]) menu = Menu([ (None, \"测试1\",", "event): ''' Internal callback function to \"expose-event\" signal. @param widget: gtk.EventBox @param event:", "# Draw text. draw_text(cr, self.label, x, y , self.button_width, h, self.font_size, text_color, alignment", "pass def expose_bread_menu_frame(self, widget, event): cr = widget.window.cairo_create() rect = widget.allocation with cairo_disable_antialias(cr):", "if self.in_menu: button_color = None menu_color = inner_border else: button_color = inner_border menu_color", "x -1 , y , x + w, y) # top draw_line(cr, x", "text_color = ui_theme.get_color(\"title_text\").get_color() button_color = None menu_color = None arrow_pixbuf = arrow_right elif", "Public License as published by # the Free Software Foundation, either version 3", "self.adj.value shift_value = 0 temp = 0 if upper > (page_size + value):", "PARTICULAR PURPOSE. See the # GNU General Public License for more details. #", "(get_content_size, cairo_disable_antialias, alpha_color_hex_to_cairo, cairo_state) import gtk import gobject import pango from poplist import", "&& right box static setting size # it is better to consider whether", "which can hold crumbs widget. @undocumented: create_crumb @undocumented: enter_notify @undocumented: leave_notify @undocumented: event_box_press", "can operate crumbs ''' objects = self.hbox.get_children() for i in objects[index: -1]: i.destroy()", "is \\\"treeview/arrow_right.png\\\" from ui theme. @param arrow_down: Dynamic pixbuf for down arrow, default", "+ self.menu_min, self.btn_min + self.menu_min), self.height) self.button_width = self.get_size_request()[0] - self.menu_min self.queue_draw() def", "of type gtk.gdk.event ''' cr = widget.window.cairo_create() rect = widget.allocation # Draw backgroud.", "draw_text(cr, self.label, x, y , self.button_width, h, self.font_size, text_color, alignment = pango.ALIGN_CENTER) return", "self.menu = self.create_menu(menu_items) if self.menu != None: self.menu.connect(\"hide\", self.hide_cb) self.menu_press = False self.menu_show", "* self.button_width, height) self.hbox.set_size_request(-1, self.hbox.get_children()[0].height) gobject.type_register(Bread) class BreadMenu(Poplist): ''' Popup menu for bread.", "gtk def add_panel(widget): crumb = Crumb(\"Child\",menu) bread.add(crumb) def change_root_node( widget): crumb1 = Crumb(\"Yet", "breadcrumb widget bread = Bread([(\"Root\", menu), (\"Level1\", menu)], show_others = False, show_entry =", "be change to [Crumb1, Crumb3, Crumb4]. In this way, application can operate crumbs", "h, self.font_size, text_color, alignment = pango.ALIGN_CENTER) return True gobject.type_register(Crumb) if __name__ == \"__main__\":", "button. self.show_left_right_box = show_left_right_box left_box = gtk.HBox(spacing = 0) right_box = gtk.HBox(spacing =", "nodes start from specified index @param index: Start index @param crumbs: Crumb instance", "(page_size + value) #play animation ani = Animation(self.adj, lambda widget, v1: widget.set_value(v1),200,[value, value+shift_value])", "c in crumb] def enter_notify(self, widget, event): ''' Internal callback function to \"enter-notify-event\"", "self.menu_min), self.height) self.button_width = self.get_size_request()[0] - self.menu_min self.queue_draw() def expose_cb(self, widget, event): '''", "= None arrow_pixbuf = arrow_right elif widget.state == gtk.STATE_PRELIGHT: text_color = ui_theme.get_color(\"title_text\").get_color() if", "self.in_button: self.menu_press = True def button_clicked(self, widget): ''' Intenal callback function to \"clicked\"", "0.5)) active_mask = alpha_color_hex_to_cairo((\"#000000\", 0.1)) if self.menu_show: self.set_state(gtk.STATE_PRELIGHT) if widget.state == gtk.STATE_NORMAL: text_color", "crumb2 = Crumb(\"Yet Another Child\", menu) bread.change_node(0, [crumb1, crumb2]) def change_entry(widget, path): #", "self.btn_min), self.height) self.button_width = self.get_size_request()[0] else: self.set_size_request( max(self.label_w + 2 * self.padding_x +", "max_height=None, max_width=None, ): ''' Initialize BreadMenu class. @param items: Item for TreeView. @param", "# left draw_line(cr, x + w , y , x + w ,", "rect = widget.allocation x, y, w, h = rect.x, rect.y, rect.width, rect.height #", "useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of #", "@undocumented: shape_bread_menu_frame @undocumented: expose_bread_menu_frame ''' def __init__(self, items, max_height=None, max_width=None, ): ''' Initialize", "class. @param label: Crumb item label @param menu_items: Crumb menu, could be a", "self.height) self.button_width = self.get_size_request()[0] - self.menu_min self.queue_draw() def expose_cb(self, widget, event): ''' Internal", "draw_rectangle(cr, x + 2, y + 2, self.button_width - 3, h -3) elif", "Crumb2], by using change_node(1, [Crumb3, Crumb4]), previous list will be change to [Crumb1,", "Child\", menu) bread.change_node(0, [crumb1, crumb2]) def change_entry(widget, path): # Application can check if", "shift_value = 0 temp = 0 if upper > (page_size + value): self.left_btn.show()", "''' self.label = label (self.label_w, self.label_h) = get_content_size(self.label, font_size) if self.menu == None:", "to create menu. @param menu_items: menu_items @return: Menu instance ''' if menu_items !=", "font_size self.padding_x = padding_x self.menu = self.create_menu(menu_items) if self.menu != None: self.menu.connect(\"hide\", self.hide_cb)", "License # along with this program. If not, see <http://www.gnu.org/licenses/>. from animation import", "even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.", "1) # Draw innner border. cr.set_source_rgba(*inner_border) if button_color: draw_rectangle(cr, x + 2, y", "break #play animation ani = Animation(self.adj, lambda widget, v1: widget.set_value(v1),200,[value, value-shift_value]) ani.start() if", "temp >= value: shift_value = self.item_list[i] - (temp - value) break #play animation", "self.set_label(label) self.in_button = True self.in_menu = True self.connect(\"expose_event\", self.expose_cb) self.connect(\"button_press_event\", self.button_press_cb) self.connect(\"clicked\", self.button_clicked)", "w, h = rect.x, rect.y, rect.width, rect.height # Should move this part to", "x , y , x , y + h) # left draw_line(cr, x", "= False self.set_state(gtk.STATE_NORMAL) def button_press_cb(self, widget, event): ''' Internal callback function to \"button-press-event\"", "# # This program is distributed in the hope that it will be", "0 cr = widget.window.cairo_create() rect = widget.allocation x, y, w, h = rect.x,", "Poplist ARROW_BUTTON_WIDTH = 20 class Bread(gtk.HBox): ''' Bread widget is a container which", "Right button. ''' upper, page_size, value = self.adj.upper, self.adj.page_size, self.adj.value shift_value = 0", "\"__main__\": import gtk def add_panel(widget): crumb = Crumb(\"Child\",menu) bread.add(crumb) def change_root_node( widget): crumb1", "== gtk.STATE_PRELIGHT: text_color = ui_theme.get_color(\"title_text\").get_color() if self.menu_show: arrow_pixbuf = arrow_down else: arrow_pixbuf =", "self).__init__(spacing = 0) self.arrow_right = arrow_right self.arrow_down = arrow_down self.item_list = list() self.show_others", "18 # menu bar width self.btn_min = 50 # button width self.height =", "''' self.scroll_win.set_size_request(width - 2 * self.button_width, height) self.hbox.set_size_request(-1, self.hbox.get_children()[0].height) gobject.type_register(Bread) class BreadMenu(Poplist): '''", "expose_frame_function=self.expose_bread_menu_frame, align_size=2, ) self.set_skip_pager_hint(True) self.set_skip_taskbar_hint(True) self.treeview.draw_mask = self.draw_treeview_mask self.expose_window_frame = self.expose_bread_menu_frame def draw_treeview_mask(self,", "Popup menu for bread. @undocumented: draw_treeview_mask @undocumented: shape_bread_menu_frame @undocumented: expose_bread_menu_frame ''' def __init__(self,", "event of gtk.gdk.Event. ''' if self.menu == None: self.menu_min = 0 cr =", "= Bread([(\"Root\", menu), (\"Level1\", menu)], show_others = False, show_entry = True) bread.add([\"xxx\",menu]) #", "Internal function to create a Crumb list for different types of inputs. @param", "self.emit(\"item_clicked\", self.index_id, self.label) else: self.menu_press = False self.menu_show = not self.menu_show if self.menu_show:", "sum(self.item_list) > page_size and not page_size == 1.0: self.right_btn.show() def change_node(self, index, crumbs):", "+ h) # left draw_line(cr, x + w , y , x +", "\"entry-changed\" : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_STRING,)), \"item_clicked\" : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_INT,gobject.TYPE_STRING,)) } def __init__(self,", "index: Start index @param crumbs: Crumb instance or Crumb list For instance, there", "Maximum width of bread menu, by default is None. ''' Poplist.__init__(self, items=items, max_height=max_height,", "class BreadMenu(Poplist): ''' Popup menu for bread. @undocumented: draw_treeview_mask @undocumented: shape_bread_menu_frame @undocumented: expose_bread_menu_frame", "= temp - (page_size + value) #play animation ani = Animation(self.adj, lambda widget,", "lambda widget, v1: widget.set_value(v1),200,[value, value-shift_value]) ani.start() if (self.adj.value - shift_value) == 0: self.left_btn.hide()", "None and isinstance(menu_items, list): return BreadMenu(menu_items) else: return None def hide_cb(self, widget): '''", "if not self.in_button: self.menu_press = True def button_clicked(self, widget): ''' Intenal callback function", "# Application can check if path is valid or not path_list = path.split(\"/\")[1:]", "Root\", menu) crumb2 = Crumb(\"Yet Another Child\", menu) bread.change_node(0, [crumb1, crumb2]) def change_entry(widget,", "= gtk.Entry() self.entry.connect(\"activate\", self.enter_cb) self.entry.set_text(\"\".join(label)) self.entry.show() self.entry.select_region(0, len(self.entry.get_text())) self.eventbox.hide() self.hbox.pack_start(self.entry, True, True) def", "''' Internal callback function to \"enter-notify-event\" signal. @param widget: gtk.EventBox. @param event: The", "enter_button(self, widget, event): in_menu = event.x > self.button_width self.in_menu =in_menu def motion_notify_cb(self, widget,", "crumb.connect(\"item_clicked\", self.click_cb) self.hbox.pack_start(crumb, False, False) self.item_list.append(crumb.get_size_request()[0]) page_size = self.adj.page_size # Show right button", "an event of gtk.gdk.event ''' in_menu = event.x > self.button_width if self.in_menu !=in_menu:", "= False self.menu_show = False self.index_id = 0 self.set_label(label) self.in_button = True self.in_menu", "False self.menu_show = not self.menu_show if self.menu_show: (wx, wy) = self.get_toplevel().window.get_root_origin() (offset_x, offset_y)", "of Bread. ''' self.scroll_win.set_size_request(width - 2 * self.button_width, height) self.hbox.set_size_request(-1, self.hbox.get_children()[0].height) gobject.type_register(Bread) class", "= inner_border else: button_color = inner_border menu_color = None elif widget.state == gtk.STATE_ACTIVE:", "gtk.Entry widget instance. ''' label = widget.get_text() widget.destroy() self.eventbox.show() self.emit(\"entry-changed\", label) def redraw_bg(self,", "exceed scrolled window size. if sum(self.item_list) > page_size and not page_size == 1.0:", "Free Software Foundation, either version 3 of the License, or # any later", "arrow_right elif widget.state == gtk.STATE_PRELIGHT: text_color = ui_theme.get_color(\"title_text\").get_color() if self.menu_show: arrow_pixbuf = arrow_down", "callback function to \"clicked\" signal. @param widget: Right button. ''' upper, page_size, value", "self.arrow_down = arrow_down self.item_list = list() self.show_others = show_others self.show_entry = show_entry self.crumb", "def enter_button(self, widget, event): in_menu = event.x > self.button_width self.in_menu =in_menu def motion_notify_cb(self,", "def change_node(self, index, crumbs): ''' Change any nodes start from specified index @param", "size, default is DEFAULT_FONT_SIZE. @param padding_x: Horizontal padding, default is 15 pixels. '''", "h -3) elif menu_color: draw_rectangle(cr, x + self.button_width + 1, y + 2,", "(page_size + value): shift_value = temp - (page_size + value) #play animation ani", "(gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_INT,gobject.TYPE_STRING,))} def __init__(self, label, menu_items = None, font_size = DEFAULT_FONT_SIZE, padding_x", "widget.state == gtk.STATE_ACTIVE: text_color = ui_theme.get_color(\"title_text\").get_color() if self.in_button: button_color = inner_border menu_color =", "set_size bread.set_size(200, -1) bread.connect(\"entry-changed\", change_entry) ##################################### vbox.pack_start(bread, False, False, 0) # Test Item", "if path is valid or not path_list = path.split(\"/\")[1:] bread.change_node(0, [Crumb(i , menu)", "menu_color: draw_rectangle(cr, x + self.button_width, y + 1, self.menu_min, h - 1) #", "if self.menu_show: (wx, wy) = self.get_toplevel().window.get_root_origin() (offset_x, offset_y) = widget.translate_coordinates(self.get_toplevel(), 0, 0) (menu_width,", "(page_size + self.adj.value + shift_value): self.right_btn.hide() def move_left(self, widget): ''' Internal callback function", "terms of the GNU General Public License as published by # the Free", "Menu's \"\"hide\" signal. @param widget: Menu ''' if self.menu_press: self.set_state(gtk.STATE_PRELIGHT) else: self.menu_show =", "# Draw innner border. cr.set_source_rgba(*inner_border) if button_color: draw_rectangle(cr, x + 2, y +", "+ 1, y + 2, self.menu_min - 3, h -4) cr.fill() if self.menu", "widget. @undocumented: create_crumb @undocumented: enter_notify @undocumented: leave_notify @undocumented: event_box_press @undocumented: enter_cb @undocumented: redraw_bg", "crumb[1]),] elif isinstance(crumb[0], Crumb): return crumb else: return [Crumb(c[0], c[1]) for c in", "''' Remove any nodes after given index. @param index: To specified remove after", "Crumb instances @param crumbs: Supported inputs are: [\"a label\", Menu] [(\"a label\",[(None, \"menu", "menu, by default is None. @param max_width: Maximum width of bread menu, by", "= self.adj.upper, self.adj.page_size, self.adj.value shift_value = 0 temp = 0 if upper >", "False, False, 0) # Test Item add_path_button = gtk.Button(\"Add Item\") add_path_button.connect(\"clicked\", add_panel) vbox.pack_start(add_path_button,", "right_box.set_size_request(self.button_width, -1) self.left_btn = Button(\"&lt;\") self.right_btn = Button(\"&gt;\") self.left_btn.set_no_show_all(True) self.right_btn.set_no_show_all(True) self.right_btn.connect(\"clicked\", self.move_right) self.left_btn.connect(\"clicked\",", "label @param menu_items: Crumb menu, could be a Menu instance or a list,", "in_menu = event.x > self.button_width self.in_menu =in_menu def motion_notify_cb(self, widget, event): ''' Internal", "shape_frame_function=self.shape_bread_menu_frame, expose_frame_function=self.expose_bread_menu_frame, align_size=2, ) self.set_skip_pager_hint(True) self.set_skip_taskbar_hint(True) self.treeview.draw_mask = self.draw_treeview_mask self.expose_window_frame = self.expose_bread_menu_frame def", "self.hbox = gtk.HBox(False, 0) self.hbox.show() self.eventbox = gtk.EventBox() self.eventbox.set_visible_window(False) if self.show_entry: self.eventbox.connect(\"enter-notify-event\", self.enter_notify)", "in path_list]) menu = Menu([ (None, \"测试1\", None), (None, \"测试2\", None), ], shadow_visible", "widget.allocation.height arrow_button_width = ARROW_BUTTON_WIDTH self.menu.show((wx + offset_x + menu_width - arrow_button_width, wy +", "Internal callback function to \"clicked\" signal. @param widget: Crumb instance. @param index: The", "@param event: An event of gtk.gdk.Event ''' if self.menu == None: self.in_button =", "arrow_right=ui_theme.get_pixbuf(\"treeview/arrow_right.png\"), arrow_down=ui_theme.get_pixbuf(\"treeview/arrow_down.png\"), show_others=False, show_entry=False, show_left_right_box=True ): ''' Initialize Bread class. @param crumb: Crumb", "to consider whether or not shown left && right box # at runtime", "0)) def set_label(self, label, font_size = DEFAULT_FONT_SIZE): ''' Set label for left button.", "''' if self.menu_press: self.set_state(gtk.STATE_PRELIGHT) else: self.menu_show = False self.set_state(gtk.STATE_NORMAL) def button_press_cb(self, widget, event):", "2 * self.button_width, height) self.hbox.set_size_request(-1, self.hbox.get_children()[0].height) gobject.type_register(Bread) class BreadMenu(Poplist): ''' Popup menu for", "@param max_width: Maximum width of bread menu, by default is None. ''' Poplist.__init__(self,", "self.hbox.pack_end(self.eventbox, True, True) self.scroll_win = ScrolledWindow() self.pack_start(left_box, False, True) self.pack_start(self.hbox, True, True) #", "@param widget: Gtk.EventBox. @param event: The pointer event of type gtk.gdk.Event. ''' self.in_event_box", "menu_items = None, font_size = DEFAULT_FONT_SIZE, padding_x = 15, ): ''' Initialize Crumb", "Init Hbox self.hbox = gtk.HBox(False, 0) self.hbox.show() self.eventbox = gtk.EventBox() self.eventbox.set_visible_window(False) if self.show_entry:", "self.menu_show: self.set_state(gtk.STATE_PRELIGHT) if widget.state == gtk.STATE_NORMAL: text_color = ui_theme.get_color(\"title_text\").get_color() button_color = None menu_color", "temp - (page_size + value) #play animation ani = Animation(self.adj, lambda widget, v1:", "text. draw_text(cr, self.label, x, y , self.button_width, h, self.font_size, text_color, alignment = pango.ALIGN_CENTER)", "True, an entry will pop up when click space area in Bread. '''", "inputs are: [\"a label\", Menu] [(\"a label\",[(None, \"menu label\", None)])] Crumb instance [Crumb,", "class . @undocumented: enter_button @undocumented: motion_notify_cb @undocumented: create_menu @undocumented: hide_cb @undocumented: button_press_cb @undocumented:", "cr = widget.window.cairo_create() rect = widget.allocation x, y, w, h = rect.x, rect.y,", "Crumb instance [Crumb, Crumb] ''' crumbs = self.create_crumb(crumbs) for crumb in crumbs: crumb.show()", "index, label): ''' Internal callback function to \"clicked\" signal. @param widget: Crumb instance.", "import Animation from scrolled_window import ScrolledWindow from button import Button from theme import", "= 20 class Bread(gtk.HBox): ''' Bread widget is a container which can hold", "# Draw button border. def draw_rectangle(cr, x, y , w, h): draw_line(cr, x", "[Crumb1, Crumb3, Crumb4]. In this way, application can operate crumbs ''' objects =", "#play animation ani = Animation(self.adj, lambda widget, v1: widget.set_value(v1),200,[value, value-shift_value]) ani.start() if (self.adj.value", "''' Internal callback function to \"button-press-event\" signal. @param widget: gtk.eventbox. @param event: event", "ani = Animation(self.adj, lambda widget, v1: widget.set_value(v1),200,[value, value-shift_value]) ani.start() if (self.adj.value - shift_value)", "rect.height) cr.fill() return False def add(self, crumbs): ''' Add crumbs. Can accept Crumb", "menu)], show_others = False, show_entry = True) bread.add([\"xxx\",menu]) # Must set_size bread.set_size(200, -1)", "page_size, value = self.adj.upper, self.adj.page_size, self.adj.value shift_value = 0 temp = 0 if", "is DEFAULT_FONT_SIZE. ''' self.label = label (self.label_w, self.label_h) = get_content_size(self.label, font_size) if self.menu", "import (get_content_size, cairo_disable_antialias, alpha_color_hex_to_cairo, cairo_state) import gtk import gobject import pango from poplist", "nodes after given index. @param index: To specified remove after given index. '''", "rect.x, rect.y, rect.width, rect.height # Should move this part to Bread class since", "menu_color = None elif widget.state == gtk.STATE_ACTIVE: text_color = ui_theme.get_color(\"title_text\").get_color() if self.in_button: button_color", "create_crumb(self, crumb): ''' Internal function to create a Crumb list for different types", "\"menu label\", None)])] Crumb instance [Crumb, Crumb] ''' if isinstance(crumb, Crumb): return [crumb,]", "at runtime if self.show_left_right_box: left_box.set_size_request(self.button_width, -1) right_box.set_size_request(self.button_width, -1) self.left_btn = Button(\"&lt;\") self.right_btn =", "= arrow_down elif widget.state == gtk.STATE_INSENSITIVE: arrow_pixbuf = arrow_right text_color = ui_theme.get_color(\"disable_text\").get_color() disable_bg", "def click_cb(self, widget, index, label): ''' Internal callback function to \"clicked\" signal. @param", "change_entry(widget, path): # Application can check if path is valid or not path_list", "= ui_theme.get_color(\"title_text\").get_color() if self.menu_show: arrow_pixbuf = arrow_down else: arrow_pixbuf = arrow_right if self.in_menu:", "if temp > (page_size + value): shift_value = temp - (page_size + value)", "ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR", "self.expose_window_frame = self.expose_bread_menu_frame def draw_treeview_mask(self, cr, x, y, w, h): cr.set_source_rgb(1, 1, 1)", "value): shift_value = temp - (page_size + value) #play animation ani = Animation(self.adj,", "[Crumb, Crumb] ''' if isinstance(crumb, Crumb): return [crumb,] elif isinstance(crumb[0], str): return [Crumb(crumb[0],", "WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS", "widget: gtk.eventbox. @param event: event of type gtk.gdk.event. ''' obj = self.hbox.get_children() label", "Crumb4]), previous list will be change to [Crumb1, Crumb3, Crumb4]. In this way,", "# # You should have received a copy of the GNU General Public", "crumb height self.font_size = font_size self.padding_x = padding_x self.menu = self.create_menu(menu_items) if self.menu", "self.entry.connect(\"activate\", self.enter_cb) self.entry.set_text(\"\".join(label)) self.entry.show() self.entry.select_region(0, len(self.entry.get_text())) self.eventbox.hide() self.hbox.pack_start(self.entry, True, True) def enter_cb(self, widget):", "''' Internal expose callback function. @param widget: Crumb instance. @param event: An event", "crumb = Crumb(\"Child\",menu) bread.add(crumb) def change_root_node( widget): crumb1 = Crumb(\"Yet Another Root\", menu)", "widget: Right button. ''' upper, page_size, value = self.adj.upper, self.adj.page_size, self.adj.value shift_value =", "menu) crumb2 = Crumb(\"Yet Another Child\", menu) bread.change_node(0, [crumb1, crumb2]) def change_entry(widget, path):", "left & right buttons self.in_event_box = False # Init left button and right", "= gtk.HBox(spacing = 0) right_box = gtk.HBox(spacing = 0) # FIXME: left &&", "Test Item add_path_button = gtk.Button(\"Add Item\") add_path_button.connect(\"clicked\", add_panel) vbox.pack_start(add_path_button, True, False, 0) test_change_node", "@param event: An event of gtk.gdk.Event. ''' if self.menu == None: self.menu_min =", "@param event: The pointer event of type gtk.gdk.Event. ''' self.in_event_box = True def", "show_entry=False, show_left_right_box=True ): ''' Initialize Bread class. @param crumb: Crumb instance or a", "ARROW_BUTTON_WIDTH self.menu.show((wx + offset_x + menu_width - arrow_button_width, wy + offset_y + menu_height,", "(disable_bg, 1.0)), (1, (disable_bg, 1.0))] menu_color = [(0, (disable_bg, 1.0)), (1, (disable_bg, 1.0))]", "Public License for more details. # # You should have received a copy", "if self.menu == None: self.menu_min = 0 cr = widget.window.cairo_create() rect = widget.allocation", "self.move_left) self.left_btn.set_size_request(self.button_width, -1) self.right_btn.set_size_request(self.button_width, -1) left_box.pack_start(self.left_btn, False, False) right_box.pack_start(self.right_btn, False, False) # Init", "draw_line(cr, x , y , x , y + h) # left draw_line(cr,", "crumbs = self.create_crumb(crumbs) for crumb in crumbs: crumb.show() crumb.arrow_right = self.arrow_right crumb.arrow_down =", ". @undocumented: enter_button @undocumented: motion_notify_cb @undocumented: create_menu @undocumented: hide_cb @undocumented: button_press_cb @undocumented: button_clicked", "self.menu != None: self.menu.connect(\"hide\", self.hide_cb) self.menu_press = False self.menu_show = False self.index_id =", "], shadow_visible = False, ) win = gtk.Window(gtk.WINDOW_TOPLEVEL) win.connect(\"destroy\", lambda w: gtk.main_quit()) win.set_default_size(600,300)", "or a list of Crumb instances @param crumbs: Supported inputs are: [\"a label\",", "draw_rectangle(cr, x + 1 , y + 1 , self.button_width -1 , h", "add_path_button = gtk.Button(\"Add Item\") add_path_button.connect(\"clicked\", add_panel) vbox.pack_start(add_path_button, True, False, 0) test_change_node = gtk.Button(\"Change", "import Poplist ARROW_BUTTON_WIDTH = 20 class Bread(gtk.HBox): ''' Bread widget is a container", "arrow_down elif widget.state == gtk.STATE_INSENSITIVE: arrow_pixbuf = arrow_right text_color = ui_theme.get_color(\"disable_text\").get_color() disable_bg =", "(disable_bg, 1.0))] menu_color = [(0, (disable_bg, 1.0)), (1, (disable_bg, 1.0))] # Draw background.", ", w, h): draw_line(cr, x -1 , y , x + w, y)", "!= None: # Draw an arrow. draw_pixbuf(cr, arrow_pixbuf.get_pixbuf(), x + self.button_width + (self.menu_min", "click_cb @undocumented: move_right @undocumented: move_left ''' __gsignals__= { \"entry-changed\" : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_STRING,)),", "self.button_press_cb) self.connect(\"clicked\", self.button_clicked) self.connect(\"motion-notify-event\", self.motion_notify_cb) self.connect(\"enter-notify-event\", self.enter_button) self.add_events(gtk.gdk.POINTER_MOTION_MASK) def enter_button(self, widget, event): in_menu", "are: [\"a label\", Menu] [(\"a label\",[(None, \"menu label\", None)])] Crumb instance [Crumb, Crumb]", "gtk.gdk.event ''' cr = widget.window.cairo_create() rect = widget.allocation # Draw backgroud. with cairo_state(cr):", "crumb2]) def change_entry(widget, path): # Application can check if path is valid or", "callback function to Menu's \"\"hide\" signal. @param widget: Menu ''' if self.menu_press: self.set_state(gtk.STATE_PRELIGHT)", "0) (menu_width, menu_height) = widget.allocation.width, widget.allocation.height arrow_button_width = ARROW_BUTTON_WIDTH self.menu.show((wx + offset_x +", "arrow_pixbuf = arrow_right outside_border = alpha_color_hex_to_cairo((\"#000000\", 0.15)) inner_border = alpha_color_hex_to_cairo((\"#ffffff\", 0.5)) active_mask =", "show_left_right_box left_box = gtk.HBox(spacing = 0) right_box = gtk.HBox(spacing = 0) # FIXME:", "(1, (disable_bg, 1.0))] menu_color = [(0, (disable_bg, 1.0)), (1, (disable_bg, 1.0))] # Draw", "by default is None. @param max_width: Maximum width of bread menu, by default", "path.split(\"/\")[1:] bread.change_node(0, [Crumb(i , menu) for i in path_list]) menu = Menu([ (None,", "the right side will be destroyed. @param show_entry: If True, an entry will", "widget bread = Bread([(\"Root\", menu), (\"Level1\", menu)], show_others = False, show_entry = True)", "Dynamic pixbuf for right arrow, default is \\\"treeview/arrow_right.png\\\" from ui theme. @param arrow_down:", "default is DEFAULT_FONT_SIZE. @param padding_x: Horizontal padding, default is 15 pixels. ''' super(Crumb,", "self.get_size_request()[0] else: self.set_size_request( max(self.label_w + 2 * self.padding_x + self.menu_min, self.btn_min + self.menu_min),", "gtk.STATE_INSENSITIVE: arrow_pixbuf = arrow_right text_color = ui_theme.get_color(\"disable_text\").get_color() disable_bg = ui_theme.get_color(\"disable_background\").get_color() button_color = [(0,", "+ 1, rect.width - 2, rect.height - 2) cr.fill() gobject.type_register(BreadMenu) class Crumb(gtk.Button): '''", "= gtk.Button(\"Change Root node\") test_change_node.connect(\"clicked\", change_root_node) vbox.pack_start(test_change_node, True, False , 0) win.add(vbox) win.show_all()", "+ shift_value): self.right_btn.hide() def move_left(self, widget): ''' Internal callback function to \"clicked\" signal.", "gobject.TYPE_NONE, (gobject.TYPE_STRING,)), \"item_clicked\" : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_INT,gobject.TYPE_STRING,)) } def __init__(self, crumb, arrow_right=ui_theme.get_pixbuf(\"treeview/arrow_right.png\"), arrow_down=ui_theme.get_pixbuf(\"treeview/arrow_down.png\"),", "Crumb instance. @param index: The index value of clicked crumb. @param label: Label", ", menu) for i in path_list]) menu = Menu([ (None, \"测试1\", None), (None,", "obj[:-1]: label.append(\"/\"+o.label) o.destroy() self.entry = gtk.Entry() self.entry.connect(\"activate\", self.enter_cb) self.entry.set_text(\"\".join(label)) self.entry.show() self.entry.select_region(0, len(self.entry.get_text())) self.eventbox.hide()", "@param event: event of type gtk.gdk.event ''' cr = widget.window.cairo_create() rect = widget.allocation", "is None @param font_size: Font size, default is DEFAULT_FONT_SIZE. @param padding_x: Horizontal padding,", "# button width self.height = 24 # crumb height self.font_size = font_size self.padding_x", "@param event: an event of gtk.gdk.event ''' in_menu = event.x > self.button_width if", "from ui theme. @param show_others: If True, crumbs will not be destroyed, otherwise", "# at runtime if self.show_left_right_box: left_box.set_size_request(self.button_width, -1) right_box.set_size_request(self.button_width, -1) self.left_btn = Button(\"&lt;\") self.right_btn", "if isinstance(crumb, Crumb): return [crumb,] elif isinstance(crumb[0], str): return [Crumb(crumb[0], crumb[1]),] elif isinstance(crumb[0],", "widget: Crumb @param event: An event of gtk.gdk.Event ''' if self.menu == None:", "i in objects[index: -1]: i.destroy() self.item_list[index:] = [] self.add(crumbs) def remove_node_after_index(self, index): '''", "shift_value = temp - (page_size + value) #play animation ani = Animation(self.adj, lambda", "y , w, h): draw_line(cr, x -1 , y , x + w,", "max(self.label_w + 2 * self.padding_x + self.menu_min, self.btn_min + self.menu_min), self.height) self.button_width =", "widget.get_text() widget.destroy() self.eventbox.show() self.emit(\"entry-changed\", label) def redraw_bg(self, widget, event): ''' Internal callback function", "-4) cr.fill() if self.menu != None: # Draw an arrow. draw_pixbuf(cr, arrow_pixbuf.get_pixbuf(), x", "a list of crumb instances @param arrow_right: Dynamic pixbuf for right arrow, default", "widget, event): pass def expose_bread_menu_frame(self, widget, event): cr = widget.window.cairo_create() rect = widget.allocation", "label for left button. @param label: Label @param font_size: Label's Font size, default", "<http://www.gnu.org/licenses/>. from animation import Animation from scrolled_window import ScrolledWindow from button import Button", "self.right_btn.hide() def move_left(self, widget): ''' Internal callback function to \"clicked\" signal. @param widget:", "def event_box_press(self, widget, event): ''' Internal callback function to \"button-press-event\" signal. @param widget:", "not, see <http://www.gnu.org/licenses/>. from animation import Animation from scrolled_window import ScrolledWindow from button", "None: self.menu.connect(\"hide\", self.hide_cb) self.menu_press = False self.menu_show = False self.index_id = 0 self.set_label(label)", "crumbs on the right side will be destroyed. @param show_entry: If True, an", "hold crumbs widget. @undocumented: create_crumb @undocumented: enter_notify @undocumented: leave_notify @undocumented: event_box_press @undocumented: enter_cb", "self.scroll_win.set_size_request(width - 2 * self.button_width, height) self.hbox.set_size_request(-1, self.hbox.get_children()[0].height) gobject.type_register(Bread) class BreadMenu(Poplist): ''' Popup", "Another Root\", menu) crumb2 = Crumb(\"Yet Another Child\", menu) bread.change_node(0, [crumb1, crumb2]) def", "function. @param widget: Crumb instance. @param event: An event of gtk.gdk.Event. ''' if", "or # any later version. # # This program is distributed in the", "self.in_menu !=in_menu: self.in_menu = in_menu self.queue_draw() def create_menu(self, menu_items): ''' Internal function to", "2, h -3) if widget.state == gtk.STATE_ACTIVE: cr.set_source_rgba(*active_mask) if button_color: cr.rectangle(x + 2,", "self.add_events(gtk.gdk.POINTER_MOTION_MASK) def enter_button(self, widget, event): in_menu = event.x > self.button_width self.in_menu =in_menu def", "Draw an arrow. draw_pixbuf(cr, arrow_pixbuf.get_pixbuf(), x + self.button_width + (self.menu_min - arrow_width) /", "): ''' Initialize BreadMenu class. @param items: Item for TreeView. @param max_height: Maximum", "menu_color: cr.rectangle( x + self.button_width + 1, y + 2, self.menu_min - 3,", "cr = widget.window.cairo_create() rect = widget.allocation with cairo_disable_antialias(cr): outside_border = alpha_color_hex_to_cairo((\"#666666\", 0.5)) cr.set_line_width(1)", "move_left(self, widget): ''' Internal callback function to \"clicked\" signal. @param widget: Left button.", "self.add(crumbs) def remove_node_after_index(self, index): ''' Remove any nodes after given index. @param index:", "0 if upper > (page_size + value): self.left_btn.show() for i in xrange(len(self.item_list)+1): temp", "if not upper > (page_size + self.adj.value + shift_value): self.right_btn.hide() def move_left(self, widget):", "cr.set_source_rgba(*alpha_color_hex_to_cairo((\"#def5ff\", 1))) cr.rectangle(rect.x, rect.y, rect.width, rect.height) cr.fill() return False def add(self, crumbs): '''", "2, rect.height - 2) cr.fill() gobject.type_register(BreadMenu) class Crumb(gtk.Button): ''' Crumb class . @undocumented:", "self.menu_show = False self.set_state(gtk.STATE_NORMAL) def button_press_cb(self, widget, event): ''' Internal callback function to", "self.arrow_right = None self.arrow_down = None self.menu_min = 18 # menu bar width", "by using change_node(1, [Crumb3, Crumb4]), previous list will be change to [Crumb1, Crumb3,", "offset_y) = widget.translate_coordinates(self.get_toplevel(), 0, 0) (menu_width, menu_height) = widget.allocation.width, widget.allocation.height arrow_button_width = ARROW_BUTTON_WIDTH", "self.label_h) = get_content_size(self.label, font_size) if self.menu == None: self.set_size_request( max(self.label_w + 2 *", "Another Child\", menu) bread.change_node(0, [crumb1, crumb2]) def change_entry(widget, path): # Application can check", "Draw button border. def draw_rectangle(cr, x, y , w, h): draw_line(cr, x -1", "signal. @param widget: Crumb instance. @param index: The index value of clicked crumb.", "def __init__(self, label, menu_items = None, font_size = DEFAULT_FONT_SIZE, padding_x = 15, ):", "event): pass def expose_bread_menu_frame(self, widget, event): cr = widget.window.cairo_create() rect = widget.allocation with", "vbox.pack_start(add_path_button, True, False, 0) test_change_node = gtk.Button(\"Change Root node\") test_change_node.connect(\"clicked\", change_root_node) vbox.pack_start(test_change_node, True,", "crumb, arrow_right=ui_theme.get_pixbuf(\"treeview/arrow_right.png\"), arrow_down=ui_theme.get_pixbuf(\"treeview/arrow_down.png\"), show_others=False, show_entry=False, show_left_right_box=True ): ''' Initialize Bread class. @param crumb:", "@param event: The pointer event of type gtk.gdk.Event. ''' self.in_event_box = False def", "cairo_disable_antialias(cr): outside_border = alpha_color_hex_to_cairo((\"#666666\", 0.5)) cr.set_line_width(1) cr.set_source_rgba(*outside_border) cr.rectangle(rect.x + 1, rect.y + 1,", "= 15, ): ''' Initialize Crumb class. @param label: Crumb item label @param", "y + 2, self.button_width - 3, h -3) elif menu_color: draw_rectangle(cr, x +", "-*- # Copyright (C) 2011 ~ 2012 Deepin, Inc. # 2011 ~ 2012", "-1) elif menu_color: draw_rectangle(cr, x + self.button_width, y + 1, self.menu_min, h -", "WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A", "that it will be useful, # but WITHOUT ANY WARRANTY; without even the", "Bread Items self.adj = self.scroll_win.get_hadjustment() self.add(self.crumb) def create_crumb(self, crumb): ''' Internal function to", "# This program is free software: you can redistribute it and/or modify #", "True, True) self.scroll_win = ScrolledWindow() self.pack_start(left_box, False, True) self.pack_start(self.hbox, True, True) # Add", "event): ''' Internal callback function to \"enter-notify-event\" signal. @param widget: gtk.EventBox. @param event:", "= widget.allocation with cairo_disable_antialias(cr): outside_border = alpha_color_hex_to_cairo((\"#666666\", 0.5)) cr.set_line_width(1) cr.set_source_rgba(*outside_border) cr.rectangle(rect.x + 1,", "bottom draw_line(cr, x , y , x , y + h) # left", "self.menu == None: self.menu_min = 0 cr = widget.window.cairo_create() rect = widget.allocation x,", "right button if crumbs exceed scrolled window size. if sum(self.item_list) > page_size and", ", self.button_width, h, self.font_size, text_color, alignment = pango.ALIGN_CENTER) return True gobject.type_register(Crumb) if __name__", "): ''' Initialize Crumb class. @param label: Crumb item label @param menu_items: Crumb", "Maximum height of bread menu, by default is None. @param max_width: Maximum width", "wy + offset_y + menu_height, ), (0, 0)) def set_label(self, label, font_size =", "implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the", "- 2, rect.height - 2) cr.fill() gobject.type_register(BreadMenu) class Crumb(gtk.Button): ''' Crumb class .", "crumb. @param label: Label of the crumb. ''' if not self.show_others: for i", "menu_color = None arrow_pixbuf = arrow_right elif widget.state == gtk.STATE_PRELIGHT: text_color = ui_theme.get_color(\"title_text\").get_color()", "crumb1 = Crumb(\"Yet Another Root\", menu) crumb2 = Crumb(\"Yet Another Child\", menu) bread.change_node(0,", "cr.set_source_rgba(*outside_border) cr.rectangle(rect.x + 1, rect.y + 1, rect.width - 2, rect.height - 2)", "@param show_entry: If True, an entry will pop up when click space area", "@param height: Height of Bread. ''' self.scroll_win.set_size_request(width - 2 * self.button_width, height) self.hbox.set_size_request(-1,", "index, crumbs): ''' Change any nodes start from specified index @param index: Start", "False def add(self, crumbs): ''' Add crumbs. Can accept Crumb instance or a", "the hope that it will be useful, # but WITHOUT ANY WARRANTY; without", "= ARROW_BUTTON_WIDTH # for left & right buttons self.in_event_box = False # Init", "to Menu's \"\"hide\" signal. @param widget: Menu ''' if self.menu_press: self.set_state(gtk.STATE_PRELIGHT) else: self.menu_show", "with this program. If not, see <http://www.gnu.org/licenses/>. from animation import Animation from scrolled_window", "(gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_INT,gobject.TYPE_STRING,)) } def __init__(self, crumb, arrow_right=ui_theme.get_pixbuf(\"treeview/arrow_right.png\"), arrow_down=ui_theme.get_pixbuf(\"treeview/arrow_down.png\"), show_others=False, show_entry=False, show_left_right_box=True ):", "self.hbox.get_children() for i in objects[index: -1]: i.destroy() self.item_list[index:] = [] self.add(crumbs) def remove_node_after_index(self,", "!= None: self.menu.connect(\"hide\", self.hide_cb) self.menu_press = False self.menu_show = False self.index_id = 0", "self.enter_cb) self.entry.set_text(\"\".join(label)) self.entry.show() self.entry.select_region(0, len(self.entry.get_text())) self.eventbox.hide() self.hbox.pack_start(self.entry, True, True) def enter_cb(self, widget): '''", "side will be destroyed. @param show_entry: If True, an entry will pop up", "50 # button width self.height = 24 # crumb height self.font_size = font_size", "Crumb(\"Yet Another Root\", menu) crumb2 = Crumb(\"Yet Another Child\", menu) bread.change_node(0, [crumb1, crumb2])", "+ 1, y + 2, self.menu_min - 2, h -3) if widget.state ==", "(C) 2011 ~ 2012 Deepin, Inc. # 2011 ~ 2012 Zeng Zhi #", "self.eventbox.hide() self.hbox.pack_start(self.entry, True, True) def enter_cb(self, widget): ''' Internal callback function to \"press-return\"", "for o in obj[:-1]: label.append(\"/\"+o.label) o.destroy() self.entry = gtk.Entry() self.entry.connect(\"activate\", self.enter_cb) self.entry.set_text(\"\".join(label)) self.entry.show()", "see <http://www.gnu.org/licenses/>. from animation import Animation from scrolled_window import ScrolledWindow from button import", "for different types of inputs. @param crumb: Support inputs are: [\"a label\", Menu]", "# # Author: <NAME> <<EMAIL>> # Maintainer: <NAME> <<EMAIL>> # # This program", "= gtk.HBox(spacing = 0) # FIXME: left && right box static setting size", "##################################### vbox.pack_start(bread, False, False, 0) # Test Item add_path_button = gtk.Button(\"Add Item\") add_path_button.connect(\"clicked\",", "+ 2, y + 2, self.button_width - 3, h -3) elif menu_color: draw_rectangle(cr,", "else: self.in_button = event.x < (widget.allocation.width - self.menu_min) if not self.in_button: self.menu_press =", "None menu_color = None arrow_pixbuf = arrow_right elif widget.state == gtk.STATE_PRELIGHT: text_color =", "y , self.button_width, h, self.font_size, text_color, alignment = pango.ALIGN_CENTER) return True gobject.type_register(Crumb) if", "border. def draw_rectangle(cr, x, y , w, h): draw_line(cr, x -1 , y", "value: shift_value = self.item_list[i] - (temp - value) break #play animation ani =", "to \"button-press-event\" signal. @param widget: gtk.eventbox. @param event: event of type gtk.gdk.event. '''", "rect = widget.allocation with cairo_disable_antialias(cr): outside_border = alpha_color_hex_to_cairo((\"#666666\", 0.5)) cr.set_line_width(1) cr.set_source_rgba(*outside_border) cr.rectangle(rect.x +", "a copy of the GNU General Public License # along with this program.", "= 18 # menu bar width self.btn_min = 50 # button width self.height", "self.hbox.pack_start(crumb, False, False) self.item_list.append(crumb.get_size_request()[0]) page_size = self.adj.page_size # Show right button if crumbs", "gtk.gdk.Event. ''' self.in_event_box = True def leave_notify(self, widget, event): ''' Internal callback function", "(gobject.TYPE_INT,gobject.TYPE_STRING,)) } def __init__(self, crumb, arrow_right=ui_theme.get_pixbuf(\"treeview/arrow_right.png\"), arrow_down=ui_theme.get_pixbuf(\"treeview/arrow_down.png\"), show_others=False, show_entry=False, show_left_right_box=True ): ''' Initialize", "\"clicked\" signal. @param widget: Crumb instance. @param index: The index value of clicked", "label: Label @param font_size: Label's Font size, default is DEFAULT_FONT_SIZE. ''' self.label =", "A PARTICULAR PURPOSE. See the # GNU General Public License for more details.", "shadow_visible = False, ) win = gtk.Window(gtk.WINDOW_TOPLEVEL) win.connect(\"destroy\", lambda w: gtk.main_quit()) win.set_default_size(600,300) vbox", "''' Bread widget is a container which can hold crumbs widget. @undocumented: create_crumb", "gobject.type_register(BreadMenu) class Crumb(gtk.Button): ''' Crumb class . @undocumented: enter_button @undocumented: motion_notify_cb @undocumented: create_menu", "# 2011 ~ 2012 Zeng Zhi # # Author: <NAME> <<EMAIL>> # Maintainer:", "label) def redraw_bg(self, widget, event): ''' Internal callback function to \"expose-event\" signal. @param", "self.adj.value + shift_value): self.right_btn.hide() def move_left(self, widget): ''' Internal callback function to \"clicked\"", ">= value: shift_value = self.item_list[i] - (temp - value) break #play animation ani", "gtk.eventbox. @param event: event of type gtk.gdk.event. ''' obj = self.hbox.get_children() label =", "i in xrange(len(self.item_list)+1): temp += self.item_list[i] if temp > (page_size + value): shift_value", "self.get_size_request()[0] - self.menu_min self.queue_draw() def expose_cb(self, widget, event): ''' Internal expose callback function.", "left draw_line(cr, x + w , y , x + w , y", "software: you can redistribute it and/or modify # it under the terms of", "widget instance. ''' label = widget.get_text() widget.destroy() self.eventbox.show() self.emit(\"entry-changed\", label) def redraw_bg(self, widget,", "self.set_size_request( max(self.label_w + 2 * self.padding_x + self.menu_min, self.btn_min + self.menu_min), self.height) self.button_width", "crumbs widget. @undocumented: create_crumb @undocumented: enter_notify @undocumented: leave_notify @undocumented: event_box_press @undocumented: enter_cb @undocumented:", "x , y + h) # left draw_line(cr, x + w , y", "the crumb. ''' if not self.show_others: for i in self.hbox.get_children()[(index + 1): -1]:", "widget, v1: widget.set_value(v1),200,[value, value-shift_value]) ani.start() if (self.adj.value - shift_value) == 0: self.left_btn.hide() def", "list of Crumb instances @param crumbs: Supported inputs are: [\"a label\", Menu] [(\"a", "0.15)) inner_border = alpha_color_hex_to_cairo((\"#ffffff\", 0.5)) active_mask = alpha_color_hex_to_cairo((\"#000000\", 0.1)) if self.menu_show: self.set_state(gtk.STATE_PRELIGHT) if", "def enter_notify(self, widget, event): ''' Internal callback function to \"enter-notify-event\" signal. @param widget:", "''' if self.in_button: self.emit(\"item_clicked\", self.index_id, self.label) else: self.menu_press = False self.menu_show = not", "test_change_node = gtk.Button(\"Change Root node\") test_change_node.connect(\"clicked\", change_root_node) vbox.pack_start(test_change_node, True, False , 0) win.add(vbox)", "self.pack_start(left_box, False, True) self.pack_start(self.hbox, True, True) # Add Bread Items self.adj = self.scroll_win.get_hadjustment()", "crumb.arrow_right = self.arrow_right crumb.arrow_down = self.arrow_down crumb.index_id = len(self.item_list) crumb.connect(\"item_clicked\", self.click_cb) self.hbox.pack_start(crumb, False,", "gtk.HBox(False, 0) self.hbox.show() self.eventbox = gtk.EventBox() self.eventbox.set_visible_window(False) if self.show_entry: self.eventbox.connect(\"enter-notify-event\", self.enter_notify) self.eventbox.connect(\"leave-notify-event\", self.leave_notify)", "pointer event of type gtk.gdk.Event. ''' self.in_event_box = True def leave_notify(self, widget, event):", "add_panel(widget): crumb = Crumb(\"Child\",menu) bread.add(crumb) def change_root_node( widget): crumb1 = Crumb(\"Yet Another Root\",", "free software: you can redistribute it and/or modify # it under the terms", "Internal callback function to Crumb \"motion-notify-event\" signal. @param widget: Crumb @param event: an", "arrow_right outside_border = alpha_color_hex_to_cairo((\"#000000\", 0.15)) inner_border = alpha_color_hex_to_cairo((\"#ffffff\", 0.5)) active_mask = alpha_color_hex_to_cairo((\"#000000\", 0.1))", "== 1.0: self.right_btn.show() def change_node(self, index, crumbs): ''' Change any nodes start from", "if crumbs exceed scrolled window size. if sum(self.item_list) > page_size and not page_size", "a list contain [Crumb1, Crumb2], by using change_node(1, [Crumb3, Crumb4]), previous list will", "elif menu_color: draw_rectangle(cr, x + self.button_width + 1, y + 2, self.menu_min -", "an arrow. draw_pixbuf(cr, arrow_pixbuf.get_pixbuf(), x + self.button_width + (self.menu_min - arrow_width) / 2,", "+ 1 , y + 1 , self.button_width -1 , h -1) elif", "event): in_menu = event.x > self.button_width self.in_menu =in_menu def motion_notify_cb(self, widget, event): '''", "function to \"clicked\" signal. @param widget: Left button. ''' upper, page_size, value =", "alpha_color_hex_to_cairo((\"#666666\", 0.5)) cr.set_line_width(1) cr.set_source_rgba(*outside_border) cr.rectangle(rect.x + 1, rect.y + 1, rect.width - 2,", "default is \\\"treeview/arrow_down.png\\\" from ui theme. @param show_others: If True, crumbs will not", "if self.in_button: self.emit(\"item_clicked\", self.index_id, self.label) else: self.menu_press = False self.menu_show = not self.menu_show", "0.5)) cr.set_line_width(1) cr.set_source_rgba(*outside_border) cr.rectangle(rect.x + 1, rect.y + 1, rect.width - 2, rect.height", "add_path_button.connect(\"clicked\", add_panel) vbox.pack_start(add_path_button, True, False, 0) test_change_node = gtk.Button(\"Change Root node\") test_change_node.connect(\"clicked\", change_root_node)", "+ 2, y + 2, self.button_width - 4, h -4) cr.fill() elif menu_color:", "Bread widget is a container which can hold crumbs widget. @undocumented: create_crumb @undocumented:", "elif widget.state == gtk.STATE_PRELIGHT: text_color = ui_theme.get_color(\"title_text\").get_color() if self.menu_show: arrow_pixbuf = arrow_down else:", "there exist a list contain [Crumb1, Crumb2], by using change_node(1, [Crumb3, Crumb4]), previous", "= None menu_color = None arrow_pixbuf = arrow_right elif widget.state == gtk.STATE_PRELIGHT: text_color", "gtk.VBox() ###################################### # test breadcrumb widget bread = Bread([(\"Root\", menu), (\"Level1\", menu)], show_others", "widget: gtk.Entry widget instance. ''' label = widget.get_text() widget.destroy() self.eventbox.show() self.emit(\"entry-changed\", label) def", "self.expose_cb) self.connect(\"button_press_event\", self.button_press_cb) self.connect(\"clicked\", self.button_clicked) self.connect(\"motion-notify-event\", self.motion_notify_cb) self.connect(\"enter-notify-event\", self.enter_button) self.add_events(gtk.gdk.POINTER_MOTION_MASK) def enter_button(self, widget,", "button_color = None menu_color = inner_border else: button_color = inner_border menu_color = None", "Internal callback function to \"button-press-event\" signal. @param widget: Crumb @param event: An event", "will be change to [Crumb1, Crumb3, Crumb4]. In this way, application can operate", "y + 2, self.menu_min - 2, h -3) if widget.state == gtk.STATE_ACTIVE: cr.set_source_rgba(*active_mask)", "''' Internal callback function to Crumb \"motion-notify-event\" signal. @param widget: Crumb @param event:", "to \"clicked\" signal. @param widget: Left button. ''' upper, page_size, value = self.adj.upper,", "1.0: self.right_btn.show() def change_node(self, index, crumbs): ''' Change any nodes start from specified", "event.x > self.button_width self.in_menu =in_menu def motion_notify_cb(self, widget, event): ''' Internal callback function", "padding_x = 15, ): ''' Initialize Crumb class. @param label: Crumb item label", "shift_value = 0 temp = 0 if not value == 0: self.right_btn.show() for", "= None menu_color = inner_border arrow_pixbuf = arrow_down elif widget.state == gtk.STATE_INSENSITIVE: arrow_pixbuf", "upper > (page_size + self.adj.value + shift_value): self.right_btn.hide() def move_left(self, widget): ''' Internal", "# the Free Software Foundation, either version 3 of the License, or #", "Crumb(\"Child\",menu) bread.add(crumb) def change_root_node( widget): crumb1 = Crumb(\"Yet Another Root\", menu) crumb2 =", "Remove any nodes after given index. @param index: To specified remove after given", "arrow_pixbuf = arrow_right if self.in_menu: button_color = None menu_color = inner_border else: button_color", "scrolled_window import ScrolledWindow from button import Button from theme import ui_theme from menu", "self.menu_show = False self.index_id = 0 self.set_label(label) self.in_button = True self.in_menu = True", "0: self.left_btn.hide() def set_size(self, width, height): ''' Set Bread size. @param width: Width", "widget.window.cairo_create() rect = widget.allocation x, y, w, h = rect.x, rect.y, rect.width, rect.height", "2012 Zeng Zhi # # Author: <NAME> <<EMAIL>> # Maintainer: <NAME> <<EMAIL>> #", "event): ''' Internal callback function to Crumb \"motion-notify-event\" signal. @param widget: Crumb @param", "Item\") add_path_button.connect(\"clicked\", add_panel) vbox.pack_start(add_path_button, True, False, 0) test_change_node = gtk.Button(\"Change Root node\") test_change_node.connect(\"clicked\",", "Menu] [(\"a label\",[(None, \"menu label\", None)])] Crumb instance [Crumb, Crumb] ''' if isinstance(crumb,", "Internal callback function to \"leave-notify-event\" signal. @param widget: Gtk.EventBox. @param event: The pointer", "@param max_height: Maximum height of bread menu, by default is None. @param max_width:", "for left button. @param label: Label @param font_size: Label's Font size, default is", "w, h): draw_line(cr, x -1 , y , x + w, y) #", "cr.set_source_rgba(*inner_border) if button_color: draw_rectangle(cr, x + 2, y + 2, self.button_width - 3,", "Crumb(gtk.Button): ''' Crumb class . @undocumented: enter_button @undocumented: motion_notify_cb @undocumented: create_menu @undocumented: hide_cb", "self.arrow_down crumb.index_id = len(self.item_list) crumb.connect(\"item_clicked\", self.click_cb) self.hbox.pack_start(crumb, False, False) self.item_list.append(crumb.get_size_request()[0]) page_size = self.adj.page_size", "# along with this program. If not, see <http://www.gnu.org/licenses/>. from animation import Animation", "@undocumented: enter_notify @undocumented: leave_notify @undocumented: event_box_press @undocumented: enter_cb @undocumented: redraw_bg @undocumented: click_cb @undocumented:", "y , x , y + h) # left draw_line(cr, x + w", "If True, an entry will pop up when click space area in Bread.", "function to Menu's \"\"hide\" signal. @param widget: Menu ''' if self.menu_press: self.set_state(gtk.STATE_PRELIGHT) else:", "right buttons self.in_event_box = False # Init left button and right button. self.show_left_right_box", "- 1) # Draw innner border. cr.set_source_rgba(*inner_border) if button_color: draw_rectangle(cr, x + 2,", "inner_border menu_color = None elif widget.state == gtk.STATE_ACTIVE: text_color = ui_theme.get_color(\"title_text\").get_color() if self.in_button:", "###################################### # test breadcrumb widget bread = Bread([(\"Root\", menu), (\"Level1\", menu)], show_others =", "Intenal callback function to \"clicked\" signal. @param widget: Crumb ''' if self.in_button: self.emit(\"item_clicked\",", "''' if menu_items != None and isinstance(menu_items, list): return BreadMenu(menu_items) else: return None", "2, y + (h - arrow_height) / 2) # Draw text. draw_text(cr, self.label,", "== gtk.STATE_ACTIVE: cr.set_source_rgba(*active_mask) if button_color: cr.rectangle(x + 2, y + 2, self.button_width -", "True gobject.type_register(Crumb) if __name__ == \"__main__\": import gtk def add_panel(widget): crumb = Crumb(\"Child\",menu)", "The pointer event of type gtk.gdk.Event. ''' self.in_event_box = True def leave_notify(self, widget,", "General Public License for more details. # # You should have received a", "label, menu_items = None, font_size = DEFAULT_FONT_SIZE, padding_x = 15, ): ''' Initialize", "= self.arrow_down arrow_width, arrow_height = arrow_right.get_pixbuf().get_width(), arrow_right.get_pixbuf().get_height() arrow_pixbuf = arrow_right outside_border = alpha_color_hex_to_cairo((\"#000000\",", "self.connect(\"motion-notify-event\", self.motion_notify_cb) self.connect(\"enter-notify-event\", self.enter_button) self.add_events(gtk.gdk.POINTER_MOTION_MASK) def enter_button(self, widget, event): in_menu = event.x >", "To specified remove after given index. ''' for i in self.hbox.get_children()[(index + 1):", "[Crumb(i , menu) for i in path_list]) menu = Menu([ (None, \"测试1\", None),", "+ menu_width - arrow_button_width, wy + offset_y + menu_height, ), (0, 0)) def", "the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See", "- (page_size + value) #play animation ani = Animation(self.adj, lambda widget, v1: widget.set_value(v1),200,[value,", "any nodes start from specified index @param index: Start index @param crumbs: Crumb", "widget, index, label): ''' Internal callback function to \"clicked\" signal. @param widget: Crumb", "arrow_pixbuf = arrow_down else: arrow_pixbuf = arrow_right if self.in_menu: button_color = None menu_color", "cairo_disable_antialias, alpha_color_hex_to_cairo, cairo_state) import gtk import gobject import pango from poplist import Poplist", "[] def click_cb(self, widget, index, label): ''' Internal callback function to \"clicked\" signal.", "self.menu_min = 0 cr = widget.window.cairo_create() rect = widget.allocation x, y, w, h", "import gtk import gobject import pango from poplist import Poplist ARROW_BUTTON_WIDTH = 20", "instance. @param index: The index value of clicked crumb. @param label: Label of", "= alpha_color_hex_to_cairo((\"#666666\", 0.5)) cr.set_line_width(1) cr.set_source_rgba(*outside_border) cr.rectangle(rect.x + 1, rect.y + 1, rect.width -", "event_box_press @undocumented: enter_cb @undocumented: redraw_bg @undocumented: click_cb @undocumented: move_right @undocumented: move_left ''' __gsignals__=", "backgroud. with cairo_state(cr): cr.set_source_rgba(*alpha_color_hex_to_cairo((\"#def5ff\", 1))) cr.rectangle(rect.x, rect.y, rect.width, rect.height) cr.fill() return False def", "Crumb instance or Crumb list For instance, there exist a list contain [Crumb1,", "left && right box # at runtime if self.show_left_right_box: left_box.set_size_request(self.button_width, -1) right_box.set_size_request(self.button_width, -1)", "instance or a list of Crumb instances @param crumbs: Supported inputs are: [\"a", "background. if not widget.state == gtk.STATE_NORMAL: # Draw button border. def draw_rectangle(cr, x,", "import Menu from constant import DEFAULT_FONT_SIZE from draw import (draw_line, draw_text, draw_pixbuf) from", "and not page_size == 1.0: self.right_btn.show() def change_node(self, index, crumbs): ''' Change any", "self.eventbox.connect(\"leave-notify-event\", self.leave_notify) self.eventbox.connect(\"button-press-event\", self.event_box_press) self.hbox.pack_end(self.eventbox, True, True) self.scroll_win = ScrolledWindow() self.pack_start(left_box, False, True)", "0) right_box = gtk.HBox(spacing = 0) # FIXME: left && right box static", "def motion_notify_cb(self, widget, event): ''' Internal callback function to Crumb \"motion-notify-event\" signal. @param", "Crumb instance or a list of Crumb instances @param crumbs: Supported inputs are:", "Can accept Crumb instance or a list of Crumb instances @param crumbs: Supported", "self.set_state(gtk.STATE_PRELIGHT) if widget.state == gtk.STATE_NORMAL: text_color = ui_theme.get_color(\"title_text\").get_color() button_color = None menu_color =", "self.set_size_request( max(self.label_w + 2 * self.padding_x, self.btn_min), self.height) self.button_width = self.get_size_request()[0] else: self.set_size_request(", "gtk.STATE_ACTIVE: cr.set_source_rgba(*active_mask) if button_color: cr.rectangle(x + 2, y + 2, self.button_width - 4,", "label.append(\"/\"+o.label) o.destroy() self.entry = gtk.Entry() self.entry.connect(\"activate\", self.enter_cb) self.entry.set_text(\"\".join(label)) self.entry.show() self.entry.select_region(0, len(self.entry.get_text())) self.eventbox.hide() self.hbox.pack_start(self.entry,", "list for different types of inputs. @param crumb: Support inputs are: [\"a label\",", "Show right button if crumbs exceed scrolled window size. if sum(self.item_list) > page_size", "event: The pointer event of type gtk.gdk.Event. ''' self.in_event_box = False def event_box_press(self,", "+ 1):] = [] self.emit(\"item_clicked\", index, label) def move_right(self, widget): ''' Internal callback", "Button(\"&lt;\") self.right_btn = Button(\"&gt;\") self.left_btn.set_no_show_all(True) self.right_btn.set_no_show_all(True) self.right_btn.connect(\"clicked\", self.move_right) self.left_btn.connect(\"clicked\", self.move_left) self.left_btn.set_size_request(self.button_width, -1) self.right_btn.set_size_request(self.button_width,", "from menu import Menu from constant import DEFAULT_FONT_SIZE from draw import (draw_line, draw_text,", "to \"leave-notify-event\" signal. @param widget: Gtk.EventBox. @param event: The pointer event of type", "event): ''' Internal callback function to \"button-press-event\" signal. @param widget: gtk.eventbox. @param event:", "to Bread class since app_theme is golobalized. arrow_right = self.arrow_right arrow_down = self.arrow_down", "3 of the License, or # any later version. # # This program", "menu_color = inner_border arrow_pixbuf = arrow_down elif widget.state == gtk.STATE_INSENSITIVE: arrow_pixbuf = arrow_right", "class since app_theme is golobalized. arrow_right = self.arrow_right arrow_down = self.arrow_down arrow_width, arrow_height", "more details. # # You should have received a copy of the GNU", "@undocumented: create_menu @undocumented: hide_cb @undocumented: button_press_cb @undocumented: button_clicked @undocumented: expose_cb ''' __gsignals__= {", "GNU General Public License for more details. # # You should have received", "1))) cr.rectangle(rect.x, rect.y, rect.width, rect.height) cr.fill() return False def add(self, crumbs): ''' Add", "super(Bread, self).__init__(spacing = 0) self.arrow_right = arrow_right self.arrow_down = arrow_down self.item_list = list()", "under the terms of the GNU General Public License as published by #", "Zeng Zhi # # Author: <NAME> <<EMAIL>> # Maintainer: <NAME> <<EMAIL>> # #", "= self.hbox.get_children() for i in objects[index: -1]: i.destroy() self.item_list[index:] = [] self.add(crumbs) def", "rect.height - 2) cr.fill() gobject.type_register(BreadMenu) class Crumb(gtk.Button): ''' Crumb class . @undocumented: enter_button", "button_color = None menu_color = inner_border arrow_pixbuf = arrow_down elif widget.state == gtk.STATE_INSENSITIVE:", "default is \\\"treeview/arrow_right.png\\\" from ui theme. @param arrow_down: Dynamic pixbuf for down arrow,", "callback function to \"button-press-event\" signal. @param widget: Crumb @param event: An event of", "None), (None, \"测试2\", None), ], shadow_visible = False, ) win = gtk.Window(gtk.WINDOW_TOPLEVEL) win.connect(\"destroy\",", "part to Bread class since app_theme is golobalized. arrow_right = self.arrow_right arrow_down =", "crumb: Crumb instance or a list of crumb instances @param arrow_right: Dynamic pixbuf", "given index. @param index: To specified remove after given index. ''' for i", "@undocumented: button_clicked @undocumented: expose_cb ''' __gsignals__= { \"item_clicked\" : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_INT,gobject.TYPE_STRING,))} def", "cr.set_source_rgba(*active_mask) if button_color: cr.rectangle(x + 2, y + 2, self.button_width - 4, h", "gobject.TYPE_NONE, (gobject.TYPE_INT,gobject.TYPE_STRING,)) } def __init__(self, crumb, arrow_right=ui_theme.get_pixbuf(\"treeview/arrow_right.png\"), arrow_down=ui_theme.get_pixbuf(\"treeview/arrow_down.png\"), show_others=False, show_entry=False, show_left_right_box=True ): '''", "right button. self.show_left_right_box = show_left_right_box left_box = gtk.HBox(spacing = 0) right_box = gtk.HBox(spacing", "bread = Bread([(\"Root\", menu), (\"Level1\", menu)], show_others = False, show_entry = True) bread.add([\"xxx\",menu])", "+ w , y , x + w , y + h -1)", "label) def move_right(self, widget): ''' Internal callback function to \"clicked\" signal. @param widget:", "it under the terms of the GNU General Public License as published by", "self.menu != None: # Draw an arrow. draw_pixbuf(cr, arrow_pixbuf.get_pixbuf(), x + self.button_width +", "self.in_button: button_color = inner_border menu_color = None arrow_pixbuf = arrow_right else: button_color =", "you can redistribute it and/or modify # it under the terms of the", "inner_border else: button_color = inner_border menu_color = None elif widget.state == gtk.STATE_ACTIVE: text_color", "Add crumbs. Can accept Crumb instance or a list of Crumb instances @param", "ani.start() break if not upper > (page_size + self.adj.value + shift_value): self.right_btn.hide() def", "lambda w: gtk.main_quit()) win.set_default_size(600,300) vbox = gtk.VBox() ###################################### # test breadcrumb widget bread", "right_box = gtk.HBox(spacing = 0) # FIXME: left && right box static setting", "return None def hide_cb(self, widget): ''' Internal callback function to Menu's \"\"hide\" signal.", "None elif widget.state == gtk.STATE_ACTIVE: text_color = ui_theme.get_color(\"title_text\").get_color() if self.in_button: button_color = inner_border", "(self.menu_min - arrow_width) / 2, y + (h - arrow_height) / 2) #", "= ARROW_BUTTON_WIDTH self.menu.show((wx + offset_x + menu_width - arrow_button_width, wy + offset_y +", "buttons self.in_event_box = False # Init left button and right button. self.show_left_right_box =", "w , y + h -1) # right cr.set_source_rgba(*outside_border) if button_color: draw_rectangle(cr, x", "application can operate crumbs ''' objects = self.hbox.get_children() for i in objects[index: -1]:", "self.arrow_right = arrow_right self.arrow_down = arrow_down self.item_list = list() self.show_others = show_others self.show_entry", "widget: Crumb instance. @param event: An event of gtk.gdk.Event. ''' if self.menu ==", "If True, crumbs will not be destroyed, otherwise all crumbs on the right", "different types of inputs. @param crumb: Support inputs are: [\"a label\", Menu] [(\"a", "isinstance(menu_items, list): return BreadMenu(menu_items) else: return None def hide_cb(self, widget): ''' Internal callback", "self.button_width self.in_menu =in_menu def motion_notify_cb(self, widget, event): ''' Internal callback function to Crumb", "General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from", "''' if isinstance(crumb, Crumb): return [crumb,] elif isinstance(crumb[0], str): return [Crumb(crumb[0], crumb[1]),] elif", "pango from poplist import Poplist ARROW_BUTTON_WIDTH = 20 class Bread(gtk.HBox): ''' Bread widget", "hope that it will be useful, # but WITHOUT ANY WARRANTY; without even", "font_size = DEFAULT_FONT_SIZE): ''' Set label for left button. @param label: Label @param", "pop up when click space area in Bread. ''' # Init. super(Bread, self).__init__(spacing", "label\", Menu] [(\"a label\",[(None, \"menu label\", None)])] Crumb instance [Crumb, Crumb] ''' if", "h - 1) # Draw innner border. cr.set_source_rgba(*inner_border) if button_color: draw_rectangle(cr, x +", "self.right_btn.set_size_request(self.button_width, -1) left_box.pack_start(self.left_btn, False, False) right_box.pack_start(self.right_btn, False, False) # Init Hbox self.hbox =", "# bottom draw_line(cr, x , y , x , y + h) #", "ui_theme.get_color(\"title_text\").get_color() button_color = None menu_color = None arrow_pixbuf = arrow_right elif widget.state ==", "callback function to \"enter-notify-event\" signal. @param widget: gtk.EventBox. @param event: The pointer event", "2) cr.fill() gobject.type_register(BreadMenu) class Crumb(gtk.Button): ''' Crumb class . @undocumented: enter_button @undocumented: motion_notify_cb", "!= None and isinstance(menu_items, list): return BreadMenu(menu_items) else: return None def hide_cb(self, widget):", "widget: Crumb @param event: an event of gtk.gdk.event ''' in_menu = event.x >", "= self.get_size_request()[0] - self.menu_min self.queue_draw() def expose_cb(self, widget, event): ''' Internal expose callback", "self.menu_show: (wx, wy) = self.get_toplevel().window.get_root_origin() (offset_x, offset_y) = widget.translate_coordinates(self.get_toplevel(), 0, 0) (menu_width, menu_height)", "+ value): self.left_btn.show() for i in xrange(len(self.item_list)+1): temp += self.item_list[i] if temp >", "self.adj.page_size, self.adj.value shift_value = 0 temp = 0 if not value == 0:", "ani = Animation(self.adj, lambda widget, v1: widget.set_value(v1),200,[value, value+shift_value]) ani.start() break if not upper", "self.in_menu: button_color = None menu_color = inner_border else: button_color = inner_border menu_color =", "show_left_right_box=True ): ''' Initialize Bread class. @param crumb: Crumb instance or a list", "self.btn_min + self.menu_min), self.height) self.button_width = self.get_size_request()[0] - self.menu_min self.queue_draw() def expose_cb(self, widget,", "crumbs: Crumb instance or Crumb list For instance, there exist a list contain", "> page_size and not page_size == 1.0: self.right_btn.show() def change_node(self, index, crumbs): '''", "import (draw_line, draw_text, draw_pixbuf) from utils import (get_content_size, cairo_disable_antialias, alpha_color_hex_to_cairo, cairo_state) import gtk", "way, application can operate crumbs ''' objects = self.hbox.get_children() for i in objects[index:", "callback function. @param widget: Crumb instance. @param event: An event of gtk.gdk.Event. '''", "list will be change to [Crumb1, Crumb3, Crumb4]. In this way, application can", "in xrange(len(self.item_list)): temp += self.item_list[i] if temp >= value: shift_value = self.item_list[i] -", "Bread class. @param crumb: Crumb instance or a list of crumb instances @param", "Start index @param crumbs: Crumb instance or Crumb list For instance, there exist", "True def leave_notify(self, widget, event): ''' Internal callback function to \"leave-notify-event\" signal. @param", "if self.menu == None: self.in_button = True self.menu_press = False else: self.in_button =", "font_size) if self.menu == None: self.set_size_request( max(self.label_w + 2 * self.padding_x, self.btn_min), self.height)", "-4) cr.fill() elif menu_color: cr.rectangle( x + self.button_width + 1, y + 2,", "self.arrow_down = None self.menu_min = 18 # menu bar width self.btn_min = 50", "# Maintainer: <NAME> <<EMAIL>> # # This program is free software: you can", "from button import Button from theme import ui_theme from menu import Menu from", "''' Initialize Bread class. @param crumb: Crumb instance or a list of crumb", "Menu] [(\"a label\",[(None, \"menu label\", None)])] Crumb instance [Crumb, Crumb] ''' crumbs =", "xrange(len(self.item_list)+1): temp += self.item_list[i] if temp > (page_size + value): shift_value = temp", "shape_bread_menu_frame @undocumented: expose_bread_menu_frame ''' def __init__(self, items, max_height=None, max_width=None, ): ''' Initialize BreadMenu", "crumb] def enter_notify(self, widget, event): ''' Internal callback function to \"enter-notify-event\" signal. @param", "widget.window.cairo_create() rect = widget.allocation # Draw backgroud. with cairo_state(cr): cr.set_source_rgba(*alpha_color_hex_to_cairo((\"#def5ff\", 1))) cr.rectangle(rect.x, rect.y,", "+ menu_height, ), (0, 0)) def set_label(self, label, font_size = DEFAULT_FONT_SIZE): ''' Set", "be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of", "index: To specified remove after given index. ''' for i in self.hbox.get_children()[(index +", "__gsignals__= { \"item_clicked\" : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_INT,gobject.TYPE_STRING,))} def __init__(self, label, menu_items = None,", "return [Crumb(crumb[0], crumb[1]),] elif isinstance(crumb[0], Crumb): return crumb else: return [Crumb(c[0], c[1]) for", "event: An event of gtk.gdk.Event. ''' if self.menu == None: self.menu_min = 0", "isinstance(crumb[0], Crumb): return crumb else: return [Crumb(c[0], c[1]) for c in crumb] def", "self.emit(\"entry-changed\", label) def redraw_bg(self, widget, event): ''' Internal callback function to \"expose-event\" signal.", "= [] def click_cb(self, widget, index, label): ''' Internal callback function to \"clicked\"", "\"测试1\", None), (None, \"测试2\", None), ], shadow_visible = False, ) win = gtk.Window(gtk.WINDOW_TOPLEVEL)", "from utils import (get_content_size, cairo_disable_antialias, alpha_color_hex_to_cairo, cairo_state) import gtk import gobject import pango", "self.button_width = ARROW_BUTTON_WIDTH # for left & right buttons self.in_event_box = False #", "0.1)) if self.menu_show: self.set_state(gtk.STATE_PRELIGHT) if widget.state == gtk.STATE_NORMAL: text_color = ui_theme.get_color(\"title_text\").get_color() button_color =", "inner_border = alpha_color_hex_to_cairo((\"#ffffff\", 0.5)) active_mask = alpha_color_hex_to_cairo((\"#000000\", 0.1)) if self.menu_show: self.set_state(gtk.STATE_PRELIGHT) if widget.state", "font_size = DEFAULT_FONT_SIZE, padding_x = 15, ): ''' Initialize Crumb class. @param label:", "self.menu == None: self.in_button = True self.menu_press = False else: self.in_button = event.x", "event of type gtk.gdk.event. ''' obj = self.hbox.get_children() label = [] for o", "crumb.index_id = len(self.item_list) crumb.connect(\"item_clicked\", self.click_cb) self.hbox.pack_start(crumb, False, False) self.item_list.append(crumb.get_size_request()[0]) page_size = self.adj.page_size #", "event: An event of gtk.gdk.Event ''' if self.menu == None: self.in_button = True", "\"button-press-event\" signal. @param widget: gtk.eventbox. @param event: event of type gtk.gdk.event. ''' obj", "# Init. super(Bread, self).__init__(spacing = 0) self.arrow_right = arrow_right self.arrow_down = arrow_down self.item_list", "event: an event of gtk.gdk.event ''' in_menu = event.x > self.button_width if self.in_menu", "''' Initialize BreadMenu class. @param items: Item for TreeView. @param max_height: Maximum height", "-1) self.right_btn.set_size_request(self.button_width, -1) left_box.pack_start(self.left_btn, False, False) right_box.pack_start(self.right_btn, False, False) # Init Hbox self.hbox", "- arrow_button_width, wy + offset_y + menu_height, ), (0, 0)) def set_label(self, label,", "#!/usr/bin/env python #-*- coding:utf-8 -*- # Copyright (C) 2011 ~ 2012 Deepin, Inc.", "click_cb(self, widget, index, label): ''' Internal callback function to \"clicked\" signal. @param widget:", "is golobalized. arrow_right = self.arrow_right arrow_down = self.arrow_down arrow_width, arrow_height = arrow_right.get_pixbuf().get_width(), arrow_right.get_pixbuf().get_height()", "for i in path_list]) menu = Menu([ (None, \"测试1\", None), (None, \"测试2\", None),", "This program is free software: you can redistribute it and/or modify # it", "GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>.", "in Bread. ''' # Init. super(Bread, self).__init__(spacing = 0) self.arrow_right = arrow_right self.arrow_down", "# Show right button if crumbs exceed scrolled window size. if sum(self.item_list) >", "1.0)), (1, (disable_bg, 1.0))] # Draw background. if not widget.state == gtk.STATE_NORMAL: #", "draw_rectangle(cr, x, y , w, h): draw_line(cr, x -1 , y , x", "bread menu, by default is None. @param max_width: Maximum width of bread menu,", "crumbs): ''' Add crumbs. Can accept Crumb instance or a list of Crumb", "of gtk.gdk.Event. ''' if self.menu == None: self.menu_min = 0 cr = widget.window.cairo_create()", "gtk.HBox(spacing = 0) # FIXME: left && right box static setting size #", "self.button_width + 1, y + 2, self.menu_min - 2, h -3) if widget.state", "padding, default is 15 pixels. ''' super(Crumb, self).__init__() self.arrow_right = None self.arrow_down =", "self.connect(\"clicked\", self.button_clicked) self.connect(\"motion-notify-event\", self.motion_notify_cb) self.connect(\"enter-notify-event\", self.enter_button) self.add_events(gtk.gdk.POINTER_MOTION_MASK) def enter_button(self, widget, event): in_menu =", "else: arrow_pixbuf = arrow_right if self.in_menu: button_color = None menu_color = inner_border else:", "''' cr = widget.window.cairo_create() rect = widget.allocation # Draw backgroud. with cairo_state(cr): cr.set_source_rgba(*alpha_color_hex_to_cairo((\"#def5ff\",", "Add Bread Items self.adj = self.scroll_win.get_hadjustment() self.add(self.crumb) def create_crumb(self, crumb): ''' Internal function", "Draw innner border. cr.set_source_rgba(*inner_border) if button_color: draw_rectangle(cr, x + 2, y + 2,", "consider whether or not shown left && right box # at runtime if", "= ui_theme.get_color(\"disable_text\").get_color() disable_bg = ui_theme.get_color(\"disable_background\").get_color() button_color = [(0, (disable_bg, 1.0)), (1, (disable_bg, 1.0))]", "arrow_down: Dynamic pixbuf for down arrow, default is \\\"treeview/arrow_down.png\\\" from ui theme. @param", "event of type gtk.gdk.Event. ''' self.in_event_box = False def event_box_press(self, widget, event): '''", "self.index_id = 0 self.set_label(label) self.in_button = True self.in_menu = True self.connect(\"expose_event\", self.expose_cb) self.connect(\"button_press_event\",", "menu, could be a Menu instance or a list, default is None @param", "self.left_btn.set_no_show_all(True) self.right_btn.set_no_show_all(True) self.right_btn.connect(\"clicked\", self.move_right) self.left_btn.connect(\"clicked\", self.move_left) self.left_btn.set_size_request(self.button_width, -1) self.right_btn.set_size_request(self.button_width, -1) left_box.pack_start(self.left_btn, False, False)", "function to create menu. @param menu_items: menu_items @return: Menu instance ''' if menu_items", "''' # Init. super(Bread, self).__init__(spacing = 0) self.arrow_right = arrow_right self.arrow_down = arrow_down", "= self.adj.page_size # Show right button if crumbs exceed scrolled window size. if", "but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or", "vbox = gtk.VBox() ###################################### # test breadcrumb widget bread = Bread([(\"Root\", menu), (\"Level1\",", "instance [Crumb, Crumb] ''' crumbs = self.create_crumb(crumbs) for crumb in crumbs: crumb.show() crumb.arrow_right", "Width of Bread. @param height: Height of Bread. ''' self.scroll_win.set_size_request(width - 2 *", "y + h, x + w, y + h) # bottom draw_line(cr, x", "None. @param max_width: Maximum width of bread menu, by default is None. '''", "2, y + 2, self.button_width - 4, h -4) cr.fill() elif menu_color: cr.rectangle(", "(temp - value) break #play animation ani = Animation(self.adj, lambda widget, v1: widget.set_value(v1),200,[value,", "@undocumented: click_cb @undocumented: move_right @undocumented: move_left ''' __gsignals__= { \"entry-changed\" : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE,", "in crumb] def enter_notify(self, widget, event): ''' Internal callback function to \"enter-notify-event\" signal.", "= inner_border menu_color = None elif widget.state == gtk.STATE_ACTIVE: text_color = ui_theme.get_color(\"title_text\").get_color() if", "type gtk.gdk.event ''' cr = widget.window.cairo_create() rect = widget.allocation # Draw backgroud. with", "right_box.pack_start(self.right_btn, False, False) # Init Hbox self.hbox = gtk.HBox(False, 0) self.hbox.show() self.eventbox =", "[Crumb, Crumb] ''' crumbs = self.create_crumb(crumbs) for crumb in crumbs: crumb.show() crumb.arrow_right =", "xrange(len(self.item_list)): temp += self.item_list[i] if temp >= value: shift_value = self.item_list[i] - (temp", "(disable_bg, 1.0)), (1, (disable_bg, 1.0))] # Draw background. if not widget.state == gtk.STATE_NORMAL:", "if self.menu != None: # Draw an arrow. draw_pixbuf(cr, arrow_pixbuf.get_pixbuf(), x + self.button_width", "w , y , x + w , y + h -1) #", "= False # Init left button and right button. self.show_left_right_box = show_left_right_box left_box", "''' if self.menu == None: self.menu_min = 0 cr = widget.window.cairo_create() rect =", "@undocumented: expose_bread_menu_frame ''' def __init__(self, items, max_height=None, max_width=None, ): ''' Initialize BreadMenu class.", "value of clicked crumb. @param label: Label of the crumb. ''' if not", "+ self.menu_min), self.height) self.button_width = self.get_size_request()[0] - self.menu_min self.queue_draw() def expose_cb(self, widget, event):", "+ 1 , self.button_width -1 , h -1) elif menu_color: draw_rectangle(cr, x +", "def draw_rectangle(cr, x, y , w, h): draw_line(cr, x -1 , y ,", "gtk.STATE_NORMAL: text_color = ui_theme.get_color(\"title_text\").get_color() button_color = None menu_color = None arrow_pixbuf = arrow_right", "w: gtk.main_quit()) win.set_default_size(600,300) vbox = gtk.VBox() ###################################### # test breadcrumb widget bread =", "move_right @undocumented: move_left ''' __gsignals__= { \"entry-changed\" : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_STRING,)), \"item_clicked\" :", "Animation(self.adj, lambda widget, v1: widget.set_value(v1),200,[value, value-shift_value]) ani.start() if (self.adj.value - shift_value) == 0:", "cairo_state(cr): cr.set_source_rgba(*alpha_color_hex_to_cairo((\"#def5ff\", 1))) cr.rectangle(rect.x, rect.y, rect.width, rect.height) cr.fill() return False def add(self, crumbs):", "(None, \"测试1\", None), (None, \"测试2\", None), ], shadow_visible = False, ) win =", "self.in_button = event.x < (widget.allocation.width - self.menu_min) if not self.in_button: self.menu_press = True", "value) #play animation ani = Animation(self.adj, lambda widget, v1: widget.set_value(v1),200,[value, value+shift_value]) ani.start() break", "Initialize BreadMenu class. @param items: Item for TreeView. @param max_height: Maximum height of", "padding_x: Horizontal padding, default is 15 pixels. ''' super(Crumb, self).__init__() self.arrow_right = None", "of the crumb. ''' if not self.show_others: for i in self.hbox.get_children()[(index + 1):", "of gtk.gdk.Event ''' if self.menu == None: self.in_button = True self.menu_press = False", "self).__init__() self.arrow_right = None self.arrow_down = None self.menu_min = 18 # menu bar", "(self.label_w, self.label_h) = get_content_size(self.label, font_size) if self.menu == None: self.set_size_request( max(self.label_w + 2", "app_theme is golobalized. arrow_right = self.arrow_right arrow_down = self.arrow_down arrow_width, arrow_height = arrow_right.get_pixbuf().get_width(),", "+ offset_y + menu_height, ), (0, 0)) def set_label(self, label, font_size = DEFAULT_FONT_SIZE):", "callback function to \"clicked\" signal. @param widget: Crumb instance. @param index: The index", "from ui theme. @param arrow_down: Dynamic pixbuf for down arrow, default is \\\"treeview/arrow_down.png\\\"", "\"enter-notify-event\" signal. @param widget: gtk.EventBox. @param event: The pointer event of type gtk.gdk.Event.", "= False else: self.in_button = event.x < (widget.allocation.width - self.menu_min) if not self.in_button:", "crumbs. Can accept Crumb instance or a list of Crumb instances @param crumbs:", "class Bread(gtk.HBox): ''' Bread widget is a container which can hold crumbs widget.", "[crumb1, crumb2]) def change_entry(widget, path): # Application can check if path is valid", "if self.menu != None: self.menu.connect(\"hide\", self.hide_cb) self.menu_press = False self.menu_show = False self.index_id", "self.menu_min = 18 # menu bar width self.btn_min = 50 # button width", "this program. If not, see <http://www.gnu.org/licenses/>. from animation import Animation from scrolled_window import", "= DEFAULT_FONT_SIZE, padding_x = 15, ): ''' Initialize Crumb class. @param label: Crumb", "event: event of type gtk.gdk.event. ''' obj = self.hbox.get_children() label = [] for", "Internal callback function to \"button-press-event\" signal. @param widget: gtk.eventbox. @param event: event of", "- shift_value) == 0: self.left_btn.hide() def set_size(self, width, height): ''' Set Bread size.", "menu_color = [(0, (disable_bg, 1.0)), (1, (disable_bg, 1.0))] # Draw background. if not", "instance or Crumb list For instance, there exist a list contain [Crumb1, Crumb2],", "arrow. draw_pixbuf(cr, arrow_pixbuf.get_pixbuf(), x + self.button_width + (self.menu_min - arrow_width) / 2, y", "self.eventbox.connect(\"enter-notify-event\", self.enter_notify) self.eventbox.connect(\"leave-notify-event\", self.leave_notify) self.eventbox.connect(\"button-press-event\", self.event_box_press) self.hbox.pack_end(self.eventbox, True, True) self.scroll_win = ScrolledWindow() self.pack_start(left_box,", "False, False) right_box.pack_start(self.right_btn, False, False) # Init Hbox self.hbox = gtk.HBox(False, 0) self.hbox.show()", "False, 0) # Test Item add_path_button = gtk.Button(\"Add Item\") add_path_button.connect(\"clicked\", add_panel) vbox.pack_start(add_path_button, True,", "-1]: i.destroy() self.item_list[(index + 1):] = [] self.emit(\"item_clicked\", index, label) def move_right(self, widget):", "as published by # the Free Software Foundation, either version 3 of the", "DEFAULT_FONT_SIZE): ''' Set label for left button. @param label: Label @param font_size: Label's", "x + w , y + h -1) # right cr.set_source_rgba(*outside_border) if button_color:", "max_height=max_height, max_width=max_width, shadow_visible=False, shape_frame_function=self.shape_bread_menu_frame, expose_frame_function=self.expose_bread_menu_frame, align_size=2, ) self.set_skip_pager_hint(True) self.set_skip_taskbar_hint(True) self.treeview.draw_mask = self.draw_treeview_mask self.expose_window_frame", "later version. # # This program is distributed in the hope that it", "Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from animation", "menu_height) = widget.allocation.width, widget.allocation.height arrow_button_width = ARROW_BUTTON_WIDTH self.menu.show((wx + offset_x + menu_width -", "1) cr.rectangle(x, y, w, h) cr.fill() def shape_bread_menu_frame(self, widget, event): pass def expose_bread_menu_frame(self,", "if sum(self.item_list) > page_size and not page_size == 1.0: self.right_btn.show() def change_node(self, index,", "instance [Crumb, Crumb] ''' if isinstance(crumb, Crumb): return [crumb,] elif isinstance(crumb[0], str): return", "\"item_clicked\" : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_INT,gobject.TYPE_STRING,))} def __init__(self, label, menu_items = None, font_size =", "isinstance(crumb, Crumb): return [crumb,] elif isinstance(crumb[0], str): return [Crumb(crumb[0], crumb[1]),] elif isinstance(crumb[0], Crumb):", "widget.set_value(v1),200,[value, value+shift_value]) ani.start() break if not upper > (page_size + self.adj.value + shift_value):", "list() self.show_others = show_others self.show_entry = show_entry self.crumb = self.create_crumb(crumb) self.button_width = ARROW_BUTTON_WIDTH", "= arrow_right.get_pixbuf().get_width(), arrow_right.get_pixbuf().get_height() arrow_pixbuf = arrow_right outside_border = alpha_color_hex_to_cairo((\"#000000\", 0.15)) inner_border = alpha_color_hex_to_cairo((\"#ffffff\",", "of bread menu, by default is None. ''' Poplist.__init__(self, items=items, max_height=max_height, max_width=max_width, shadow_visible=False,", "redraw_bg @undocumented: click_cb @undocumented: move_right @undocumented: move_left ''' __gsignals__= { \"entry-changed\" : (gobject.SIGNAL_RUN_LAST,", "# # This program is free software: you can redistribute it and/or modify", "have received a copy of the GNU General Public License # along with", "function to \"press-return\" signal. @param widget: gtk.Entry widget instance. ''' label = widget.get_text()", "> self.button_width if self.in_menu !=in_menu: self.in_menu = in_menu self.queue_draw() def create_menu(self, menu_items): '''", "= alpha_color_hex_to_cairo((\"#000000\", 0.15)) inner_border = alpha_color_hex_to_cairo((\"#ffffff\", 0.5)) active_mask = alpha_color_hex_to_cairo((\"#000000\", 0.1)) if self.menu_show:", "gobject import pango from poplist import Poplist ARROW_BUTTON_WIDTH = 20 class Bread(gtk.HBox): '''", "align_size=2, ) self.set_skip_pager_hint(True) self.set_skip_taskbar_hint(True) self.treeview.draw_mask = self.draw_treeview_mask self.expose_window_frame = self.expose_bread_menu_frame def draw_treeview_mask(self, cr,", ", y , x + w, y) # top draw_line(cr, x , y", "right arrow, default is \\\"treeview/arrow_right.png\\\" from ui theme. @param arrow_down: Dynamic pixbuf for", "event: The pointer event of type gtk.gdk.Event. ''' self.in_event_box = True def leave_notify(self,", "index @param crumbs: Crumb instance or Crumb list For instance, there exist a", "License, or # any later version. # # This program is distributed in", "= None menu_color = inner_border else: button_color = inner_border menu_color = None elif", "accept Crumb instance or a list of Crumb instances @param crumbs: Supported inputs", "Foundation, either version 3 of the License, or # any later version. #", "self.set_state(gtk.STATE_NORMAL) def button_press_cb(self, widget, event): ''' Internal callback function to \"button-press-event\" signal. @param", "move_right(self, widget): ''' Internal callback function to \"clicked\" signal. @param widget: Right button.", "@param crumb: Support inputs are: [\"a label\", Menu] [(\"a label\",[(None, \"menu label\", None)])]", "= False, show_entry = True) bread.add([\"xxx\",menu]) # Must set_size bread.set_size(200, -1) bread.connect(\"entry-changed\", change_entry)", "widget is a container which can hold crumbs widget. @undocumented: create_crumb @undocumented: enter_notify", "gtk.Button(\"Add Item\") add_path_button.connect(\"clicked\", add_panel) vbox.pack_start(add_path_button, True, False, 0) test_change_node = gtk.Button(\"Change Root node\")", "widget, event): ''' Internal callback function to \"button-press-event\" signal. @param widget: gtk.eventbox. @param", "utils import (get_content_size, cairo_disable_antialias, alpha_color_hex_to_cairo, cairo_state) import gtk import gobject import pango from", "@param widget: Crumb ''' if self.in_button: self.emit(\"item_clicked\", self.index_id, self.label) else: self.menu_press = False", "crumbs: Supported inputs are: [\"a label\", Menu] [(\"a label\",[(None, \"menu label\", None)])] Crumb", "self.menu_press = False else: self.in_button = event.x < (widget.allocation.width - self.menu_min) if not", "@param widget: gtk.EventBox @param event: event of type gtk.gdk.event ''' cr = widget.window.cairo_create()", "self.label, x, y , self.button_width, h, self.font_size, text_color, alignment = pango.ALIGN_CENTER) return True", "h -1) elif menu_color: draw_rectangle(cr, x + self.button_width, y + 1, self.menu_min, h", "= self.scroll_win.get_hadjustment() self.add(self.crumb) def create_crumb(self, crumb): ''' Internal function to create a Crumb", "self.btn_min = 50 # button width self.height = 24 # crumb height self.font_size", "@undocumented: redraw_bg @undocumented: click_cb @undocumented: move_right @undocumented: move_left ''' __gsignals__= { \"entry-changed\" :", "self.emit(\"item_clicked\", index, label) def move_right(self, widget): ''' Internal callback function to \"clicked\" signal.", "+ 2 * self.padding_x + self.menu_min, self.btn_min + self.menu_min), self.height) self.button_width = self.get_size_request()[0]", "= in_menu self.queue_draw() def create_menu(self, menu_items): ''' Internal function to create menu. @param", "# any later version. # # This program is distributed in the hope", "[Crumb3, Crumb4]), previous list will be change to [Crumb1, Crumb3, Crumb4]. In this", "import gobject import pango from poplist import Poplist ARROW_BUTTON_WIDTH = 20 class Bread(gtk.HBox):", "self.get_toplevel().window.get_root_origin() (offset_x, offset_y) = widget.translate_coordinates(self.get_toplevel(), 0, 0) (menu_width, menu_height) = widget.allocation.width, widget.allocation.height arrow_button_width", "given index. ''' for i in self.hbox.get_children()[(index + 1): -1]: i.destroy() self.item_list[(index +", "top draw_line(cr, x , y + h, x + w, y + h)", "rect.y, rect.width, rect.height) cr.fill() return False def add(self, crumbs): ''' Add crumbs. Can", "widget): ''' Internal callback function to \"clicked\" signal. @param widget: Left button. '''", "import ScrolledWindow from button import Button from theme import ui_theme from menu import", "= self.get_toplevel().window.get_root_origin() (offset_x, offset_y) = widget.translate_coordinates(self.get_toplevel(), 0, 0) (menu_width, menu_height) = widget.allocation.width, widget.allocation.height", "@param label: Label @param font_size: Label's Font size, default is DEFAULT_FONT_SIZE. ''' self.label", "super(Crumb, self).__init__() self.arrow_right = None self.arrow_down = None self.menu_min = 18 # menu", "-3) elif menu_color: draw_rectangle(cr, x + self.button_width + 1, y + 2, self.menu_min", "PURPOSE. See the # GNU General Public License for more details. # #", "if menu_items != None and isinstance(menu_items, list): return BreadMenu(menu_items) else: return None def", "disable_bg = ui_theme.get_color(\"disable_background\").get_color() button_color = [(0, (disable_bg, 1.0)), (1, (disable_bg, 1.0))] menu_color =", "@param widget: Crumb @param event: An event of gtk.gdk.Event ''' if self.menu ==", "def __init__(self, crumb, arrow_right=ui_theme.get_pixbuf(\"treeview/arrow_right.png\"), arrow_down=ui_theme.get_pixbuf(\"treeview/arrow_down.png\"), show_others=False, show_entry=False, show_left_right_box=True ): ''' Initialize Bread class.", "event): ''' Internal callback function to \"button-press-event\" signal. @param widget: Crumb @param event:", "import ui_theme from menu import Menu from constant import DEFAULT_FONT_SIZE from draw import", "arrow_button_width = ARROW_BUTTON_WIDTH self.menu.show((wx + offset_x + menu_width - arrow_button_width, wy + offset_y", "get_content_size(self.label, font_size) if self.menu == None: self.set_size_request( max(self.label_w + 2 * self.padding_x, self.btn_min),", "remove_node_after_index(self, index): ''' Remove any nodes after given index. @param index: To specified", "self.connect(\"enter-notify-event\", self.enter_button) self.add_events(gtk.gdk.POINTER_MOTION_MASK) def enter_button(self, widget, event): in_menu = event.x > self.button_width self.in_menu", "page_size and not page_size == 1.0: self.right_btn.show() def change_node(self, index, crumbs): ''' Change", "* self.padding_x + self.menu_min, self.btn_min + self.menu_min), self.height) self.button_width = self.get_size_request()[0] - self.menu_min", "area in Bread. ''' # Init. super(Bread, self).__init__(spacing = 0) self.arrow_right = arrow_right", "h, x + w, y + h) # bottom draw_line(cr, x , y", "= label (self.label_w, self.label_h) = get_content_size(self.label, font_size) if self.menu == None: self.set_size_request( max(self.label_w", "self.move_right) self.left_btn.connect(\"clicked\", self.move_left) self.left_btn.set_size_request(self.button_width, -1) self.right_btn.set_size_request(self.button_width, -1) left_box.pack_start(self.left_btn, False, False) right_box.pack_start(self.right_btn, False, False)", "ScrolledWindow() self.pack_start(left_box, False, True) self.pack_start(self.hbox, True, True) # Add Bread Items self.adj =", "), (0, 0)) def set_label(self, label, font_size = DEFAULT_FONT_SIZE): ''' Set label for", "y + 1 , self.button_width -1 , h -1) elif menu_color: draw_rectangle(cr, x", "menu = Menu([ (None, \"测试1\", None), (None, \"测试2\", None), ], shadow_visible = False,", "self.in_button = True self.in_menu = True self.connect(\"expose_event\", self.expose_cb) self.connect(\"button_press_event\", self.button_press_cb) self.connect(\"clicked\", self.button_clicked) self.connect(\"motion-notify-event\",", "# test breadcrumb widget bread = Bread([(\"Root\", menu), (\"Level1\", menu)], show_others = False,", "= DEFAULT_FONT_SIZE): ''' Set label for left button. @param label: Label @param font_size:", "list of crumb instances @param arrow_right: Dynamic pixbuf for right arrow, default is", "None)])] Crumb instance [Crumb, Crumb] ''' crumbs = self.create_crumb(crumbs) for crumb in crumbs:", "widget, event): ''' Internal callback function to \"enter-notify-event\" signal. @param widget: gtk.EventBox. @param", "y + h) # left draw_line(cr, x + w , y , x", "self.hbox.show() self.eventbox = gtk.EventBox() self.eventbox.set_visible_window(False) if self.show_entry: self.eventbox.connect(\"enter-notify-event\", self.enter_notify) self.eventbox.connect(\"leave-notify-event\", self.leave_notify) self.eventbox.connect(\"button-press-event\", self.event_box_press)", "self.hbox.get_children()[0].height) gobject.type_register(Bread) class BreadMenu(Poplist): ''' Popup menu for bread. @undocumented: draw_treeview_mask @undocumented: shape_bread_menu_frame", "elif menu_color: draw_rectangle(cr, x + self.button_width, y + 1, self.menu_min, h - 1)", "if not value == 0: self.right_btn.show() for i in xrange(len(self.item_list)): temp += self.item_list[i]", "the GNU General Public License # along with this program. If not, see", "draw_pixbuf) from utils import (get_content_size, cairo_disable_antialias, alpha_color_hex_to_cairo, cairo_state) import gtk import gobject import", "x, y , w, h): draw_line(cr, x -1 , y , x +", "function to \"enter-notify-event\" signal. @param widget: gtk.EventBox. @param event: The pointer event of", "draw_line(cr, x + w , y , x + w , y +", "label\", None)])] Crumb instance [Crumb, Crumb] ''' crumbs = self.create_crumb(crumbs) for crumb in", "+ value) #play animation ani = Animation(self.adj, lambda widget, v1: widget.set_value(v1),200,[value, value+shift_value]) ani.start()", "Crumb3, Crumb4]. In this way, application can operate crumbs ''' objects = self.hbox.get_children()", "+ self.button_width, y + 1, self.menu_min, h - 1) # Draw innner border.", "up when click space area in Bread. ''' # Init. super(Bread, self).__init__(spacing =", "self.show_left_right_box: left_box.set_size_request(self.button_width, -1) right_box.set_size_request(self.button_width, -1) self.left_btn = Button(\"&lt;\") self.right_btn = Button(\"&gt;\") self.left_btn.set_no_show_all(True) self.right_btn.set_no_show_all(True)", "menu) for i in path_list]) menu = Menu([ (None, \"测试1\", None), (None, \"测试2\",", "2, self.button_width - 4, h -4) cr.fill() elif menu_color: cr.rectangle( x + self.button_width", "None menu_color = inner_border arrow_pixbuf = arrow_down elif widget.state == gtk.STATE_INSENSITIVE: arrow_pixbuf =", "for crumb in crumbs: crumb.show() crumb.arrow_right = self.arrow_right crumb.arrow_down = self.arrow_down crumb.index_id =", "index. @param index: To specified remove after given index. ''' for i in", ", x + w , y + h -1) # right cr.set_source_rgba(*outside_border) if", "y, w, h = rect.x, rect.y, rect.width, rect.height # Should move this part", "self.draw_treeview_mask self.expose_window_frame = self.expose_bread_menu_frame def draw_treeview_mask(self, cr, x, y, w, h): cr.set_source_rgb(1, 1,", "menu) bread.change_node(0, [crumb1, crumb2]) def change_entry(widget, path): # Application can check if path", "a Crumb list for different types of inputs. @param crumb: Support inputs are:", "theme. @param show_others: If True, crumbs will not be destroyed, otherwise all crumbs", "False # Init left button and right button. self.show_left_right_box = show_left_right_box left_box =", "= event.x > self.button_width self.in_menu =in_menu def motion_notify_cb(self, widget, event): ''' Internal callback", "= arrow_right outside_border = alpha_color_hex_to_cairo((\"#000000\", 0.15)) inner_border = alpha_color_hex_to_cairo((\"#ffffff\", 0.5)) active_mask = alpha_color_hex_to_cairo((\"#000000\",", "arrow_pixbuf = arrow_right elif widget.state == gtk.STATE_PRELIGHT: text_color = ui_theme.get_color(\"title_text\").get_color() if self.menu_show: arrow_pixbuf", "menu), (\"Level1\", menu)], show_others = False, show_entry = True) bread.add([\"xxx\",menu]) # Must set_size", "[] self.add(crumbs) def remove_node_after_index(self, index): ''' Remove any nodes after given index. @param", "Crumb list For instance, there exist a list contain [Crumb1, Crumb2], by using", "details. # # You should have received a copy of the GNU General", "''' objects = self.hbox.get_children() for i in objects[index: -1]: i.destroy() self.item_list[index:] = []", "+ 2, self.button_width - 3, h -3) elif menu_color: draw_rectangle(cr, x + self.button_width", "def redraw_bg(self, widget, event): ''' Internal callback function to \"expose-event\" signal. @param widget:", "''' Internal function to create a Crumb list for different types of inputs.", "''' Set label for left button. @param label: Label @param font_size: Label's Font", "path_list]) menu = Menu([ (None, \"测试1\", None), (None, \"测试2\", None), ], shadow_visible =", "<<EMAIL>> # Maintainer: <NAME> <<EMAIL>> # # This program is free software: you", "draw_treeview_mask @undocumented: shape_bread_menu_frame @undocumented: expose_bread_menu_frame ''' def __init__(self, items, max_height=None, max_width=None, ): '''", "upper > (page_size + value): self.left_btn.show() for i in xrange(len(self.item_list)+1): temp += self.item_list[i]", "''' obj = self.hbox.get_children() label = [] for o in obj[:-1]: label.append(\"/\"+o.label) o.destroy()", "Bread. ''' self.scroll_win.set_size_request(width - 2 * self.button_width, height) self.hbox.set_size_request(-1, self.hbox.get_children()[0].height) gobject.type_register(Bread) class BreadMenu(Poplist):", "== None: self.set_size_request( max(self.label_w + 2 * self.padding_x, self.btn_min), self.height) self.button_width = self.get_size_request()[0]", "- 2, h -3) if widget.state == gtk.STATE_ACTIVE: cr.set_source_rgba(*active_mask) if button_color: cr.rectangle(x +", "runtime if self.show_left_right_box: left_box.set_size_request(self.button_width, -1) right_box.set_size_request(self.button_width, -1) self.left_btn = Button(\"&lt;\") self.right_btn = Button(\"&gt;\")", "self.menu_min self.queue_draw() def expose_cb(self, widget, event): ''' Internal expose callback function. @param widget:", "self.left_btn = Button(\"&lt;\") self.right_btn = Button(\"&gt;\") self.left_btn.set_no_show_all(True) self.right_btn.set_no_show_all(True) self.right_btn.connect(\"clicked\", self.move_right) self.left_btn.connect(\"clicked\", self.move_left) self.left_btn.set_size_request(self.button_width,", "along with this program. If not, see <http://www.gnu.org/licenses/>. from animation import Animation from", "Label's Font size, default is DEFAULT_FONT_SIZE. ''' self.label = label (self.label_w, self.label_h) =", "this way, application can operate crumbs ''' objects = self.hbox.get_children() for i in", "# FIXME: left && right box static setting size # it is better", "o in obj[:-1]: label.append(\"/\"+o.label) o.destroy() self.entry = gtk.Entry() self.entry.connect(\"activate\", self.enter_cb) self.entry.set_text(\"\".join(label)) self.entry.show() self.entry.select_region(0,", "= ui_theme.get_color(\"title_text\").get_color() button_color = None menu_color = None arrow_pixbuf = arrow_right elif widget.state", "False) right_box.pack_start(self.right_btn, False, False) # Init Hbox self.hbox = gtk.HBox(False, 0) self.hbox.show() self.eventbox", "crumbs exceed scrolled window size. if sum(self.item_list) > page_size and not page_size ==", "Crumb item label @param menu_items: Crumb menu, could be a Menu instance or", "self.connect(\"button_press_event\", self.button_press_cb) self.connect(\"clicked\", self.button_clicked) self.connect(\"motion-notify-event\", self.motion_notify_cb) self.connect(\"enter-notify-event\", self.enter_button) self.add_events(gtk.gdk.POINTER_MOTION_MASK) def enter_button(self, widget, event):", "menu import Menu from constant import DEFAULT_FONT_SIZE from draw import (draw_line, draw_text, draw_pixbuf)", "destroyed, otherwise all crumbs on the right side will be destroyed. @param show_entry:", "click space area in Bread. ''' # Init. super(Bread, self).__init__(spacing = 0) self.arrow_right", "items: Item for TreeView. @param max_height: Maximum height of bread menu, by default", "= list() self.show_others = show_others self.show_entry = show_entry self.crumb = self.create_crumb(crumb) self.button_width =", "menu_items: menu_items @return: Menu instance ''' if menu_items != None and isinstance(menu_items, list):", "self.in_menu = in_menu self.queue_draw() def create_menu(self, menu_items): ''' Internal function to create menu.", "False) # Init Hbox self.hbox = gtk.HBox(False, 0) self.hbox.show() self.eventbox = gtk.EventBox() self.eventbox.set_visible_window(False)", "to \"press-return\" signal. @param widget: gtk.Entry widget instance. ''' label = widget.get_text() widget.destroy()", "= ScrolledWindow() self.pack_start(left_box, False, True) self.pack_start(self.hbox, True, True) # Add Bread Items self.adj", "True self.menu_press = False else: self.in_button = event.x < (widget.allocation.width - self.menu_min) if", "or not shown left && right box # at runtime if self.show_left_right_box: left_box.set_size_request(self.button_width,", "import DEFAULT_FONT_SIZE from draw import (draw_line, draw_text, draw_pixbuf) from utils import (get_content_size, cairo_disable_antialias,", "= gtk.EventBox() self.eventbox.set_visible_window(False) if self.show_entry: self.eventbox.connect(\"enter-notify-event\", self.enter_notify) self.eventbox.connect(\"leave-notify-event\", self.leave_notify) self.eventbox.connect(\"button-press-event\", self.event_box_press) self.hbox.pack_end(self.eventbox, True,", "of type gtk.gdk.event. ''' obj = self.hbox.get_children() label = [] for o in", "= event.x < (widget.allocation.width - self.menu_min) if not self.in_button: self.menu_press = True def", "False, 0) test_change_node = gtk.Button(\"Change Root node\") test_change_node.connect(\"clicked\", change_root_node) vbox.pack_start(test_change_node, True, False ,", "= event.x > self.button_width if self.in_menu !=in_menu: self.in_menu = in_menu self.queue_draw() def create_menu(self,", "Draw background. if not widget.state == gtk.STATE_NORMAL: # Draw button border. def draw_rectangle(cr,", "gtk.gdk.Event ''' if self.menu == None: self.in_button = True self.menu_press = False else:", "version. # # This program is distributed in the hope that it will", "''' Internal callback function to \"leave-notify-event\" signal. @param widget: Gtk.EventBox. @param event: The", "widget.allocation x, y, w, h = rect.x, rect.y, rect.width, rect.height # Should move", "bread.change_node(0, [crumb1, crumb2]) def change_entry(widget, path): # Application can check if path is", "1):] = [] def click_cb(self, widget, index, label): ''' Internal callback function to", "elif isinstance(crumb[0], str): return [Crumb(crumb[0], crumb[1]),] elif isinstance(crumb[0], Crumb): return crumb else: return", "items, max_height=None, max_width=None, ): ''' Initialize BreadMenu class. @param items: Item for TreeView.", "gtk.STATE_NORMAL: # Draw button border. def draw_rectangle(cr, x, y , w, h): draw_line(cr,", "Bread. @param height: Height of Bread. ''' self.scroll_win.set_size_request(width - 2 * self.button_width, height)", "arrow, default is \\\"treeview/arrow_right.png\\\" from ui theme. @param arrow_down: Dynamic pixbuf for down", "event): ''' Internal callback function to \"leave-notify-event\" signal. @param widget: Gtk.EventBox. @param event:", "''' Intenal callback function to \"clicked\" signal. @param widget: Crumb ''' if self.in_button:", "def enter_cb(self, widget): ''' Internal callback function to \"press-return\" signal. @param widget: gtk.Entry", "can hold crumbs widget. @undocumented: create_crumb @undocumented: enter_notify @undocumented: leave_notify @undocumented: event_box_press @undocumented:", "a list of Crumb instances @param crumbs: Supported inputs are: [\"a label\", Menu]", "General Public License as published by # the Free Software Foundation, either version", "i in self.hbox.get_children()[(index + 1): -1]: i.destroy() self.item_list[(index + 1):] = [] def", "is None. ''' Poplist.__init__(self, items=items, max_height=max_height, max_width=max_width, shadow_visible=False, shape_frame_function=self.shape_bread_menu_frame, expose_frame_function=self.expose_bread_menu_frame, align_size=2, ) self.set_skip_pager_hint(True)", "0) self.hbox.show() self.eventbox = gtk.EventBox() self.eventbox.set_visible_window(False) if self.show_entry: self.eventbox.connect(\"enter-notify-event\", self.enter_notify) self.eventbox.connect(\"leave-notify-event\", self.leave_notify) self.eventbox.connect(\"button-press-event\",", "cairo_state) import gtk import gobject import pango from poplist import Poplist ARROW_BUTTON_WIDTH =", "None: self.in_button = True self.menu_press = False else: self.in_button = event.x < (widget.allocation.width", "# GNU General Public License for more details. # # You should have", "event.x < (widget.allocation.width - self.menu_min) if not self.in_button: self.menu_press = True def button_clicked(self,", "instances @param crumbs: Supported inputs are: [\"a label\", Menu] [(\"a label\",[(None, \"menu label\",", "= widget.translate_coordinates(self.get_toplevel(), 0, 0) (menu_width, menu_height) = widget.allocation.width, widget.allocation.height arrow_button_width = ARROW_BUTTON_WIDTH self.menu.show((wx", "self.label) else: self.menu_press = False self.menu_show = not self.menu_show if self.menu_show: (wx, wy)", "of Crumb instances @param crumbs: Supported inputs are: [\"a label\", Menu] [(\"a label\",[(None,", "2, y + 2, self.button_width - 3, h -3) elif menu_color: draw_rectangle(cr, x", "not self.in_button: self.menu_press = True def button_clicked(self, widget): ''' Intenal callback function to", "h -4) cr.fill() elif menu_color: cr.rectangle( x + self.button_width + 1, y +", "list): return BreadMenu(menu_items) else: return None def hide_cb(self, widget): ''' Internal callback function", "max_height: Maximum height of bread menu, by default is None. @param max_width: Maximum", "widget): ''' Intenal callback function to \"clicked\" signal. @param widget: Crumb ''' if", "Animation from scrolled_window import ScrolledWindow from button import Button from theme import ui_theme", "either version 3 of the License, or # any later version. # #", "it and/or modify # it under the terms of the GNU General Public", "''' crumbs = self.create_crumb(crumbs) for crumb in crumbs: crumb.show() crumb.arrow_right = self.arrow_right crumb.arrow_down", "outside_border = alpha_color_hex_to_cairo((\"#000000\", 0.15)) inner_border = alpha_color_hex_to_cairo((\"#ffffff\", 0.5)) active_mask = alpha_color_hex_to_cairo((\"#000000\", 0.1)) if", "bread.add(crumb) def change_root_node( widget): crumb1 = Crumb(\"Yet Another Root\", menu) crumb2 = Crumb(\"Yet", ": (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_INT,gobject.TYPE_STRING,))} def __init__(self, label, menu_items = None, font_size = DEFAULT_FONT_SIZE,", "theme. @param arrow_down: Dynamic pixbuf for down arrow, default is \\\"treeview/arrow_down.png\\\" from ui", "if widget.state == gtk.STATE_ACTIVE: cr.set_source_rgba(*active_mask) if button_color: cr.rectangle(x + 2, y + 2,", "= widget.allocation x, y, w, h = rect.x, rect.y, rect.width, rect.height # Should", "Crumb \"motion-notify-event\" signal. @param widget: Crumb @param event: an event of gtk.gdk.event '''", "arrow_down else: arrow_pixbuf = arrow_right if self.in_menu: button_color = None menu_color = inner_border", "not path_list = path.split(\"/\")[1:] bread.change_node(0, [Crumb(i , menu) for i in path_list]) menu", "> (page_size + self.adj.value + shift_value): self.right_btn.hide() def move_left(self, widget): ''' Internal callback", "space area in Bread. ''' # Init. super(Bread, self).__init__(spacing = 0) self.arrow_right =", "draw_rectangle(cr, x + self.button_width + 1, y + 2, self.menu_min - 2, h", "alpha_color_hex_to_cairo((\"#000000\", 0.1)) if self.menu_show: self.set_state(gtk.STATE_PRELIGHT) if widget.state == gtk.STATE_NORMAL: text_color = ui_theme.get_color(\"title_text\").get_color() button_color", "Set label for left button. @param label: Label @param font_size: Label's Font size,", "for c in crumb] def enter_notify(self, widget, event): ''' Internal callback function to", "(\"Level1\", menu)], show_others = False, show_entry = True) bread.add([\"xxx\",menu]) # Must set_size bread.set_size(200,", "+ 1): -1]: i.destroy() self.item_list[(index + 1):] = [] def click_cb(self, widget, index,", "# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General", "True, False, 0) test_change_node = gtk.Button(\"Change Root node\") test_change_node.connect(\"clicked\", change_root_node) vbox.pack_start(test_change_node, True, False", "pixbuf for down arrow, default is \\\"treeview/arrow_down.png\\\" from ui theme. @param show_others: If", "[\"a label\", Menu] [(\"a label\",[(None, \"menu label\", None)])] Crumb instance [Crumb, Crumb] '''", "Internal function to create menu. @param menu_items: menu_items @return: Menu instance ''' if", "= widget.allocation.width, widget.allocation.height arrow_button_width = ARROW_BUTTON_WIDTH self.menu.show((wx + offset_x + menu_width - arrow_button_width,", "\\\"treeview/arrow_down.png\\\" from ui theme. @param show_others: If True, crumbs will not be destroyed,", "= inner_border arrow_pixbuf = arrow_down elif widget.state == gtk.STATE_INSENSITIVE: arrow_pixbuf = arrow_right text_color", "menu_color = inner_border else: button_color = inner_border menu_color = None elif widget.state ==", "0, 0) (menu_width, menu_height) = widget.allocation.width, widget.allocation.height arrow_button_width = ARROW_BUTTON_WIDTH self.menu.show((wx + offset_x", "2, self.menu_min - 2, h -3) if widget.state == gtk.STATE_ACTIVE: cr.set_source_rgba(*active_mask) if button_color:", "self.menu_min, self.btn_min + self.menu_min), self.height) self.button_width = self.get_size_request()[0] - self.menu_min self.queue_draw() def expose_cb(self,", "draw import (draw_line, draw_text, draw_pixbuf) from utils import (get_content_size, cairo_disable_antialias, alpha_color_hex_to_cairo, cairo_state) import", "ARROW_BUTTON_WIDTH = 20 class Bread(gtk.HBox): ''' Bread widget is a container which can", "- 2) cr.fill() gobject.type_register(BreadMenu) class Crumb(gtk.Button): ''' Crumb class . @undocumented: enter_button @undocumented:", "enter_notify(self, widget, event): ''' Internal callback function to \"enter-notify-event\" signal. @param widget: gtk.EventBox.", "item label @param menu_items: Crumb menu, could be a Menu instance or a", "= [] for o in obj[:-1]: label.append(\"/\"+o.label) o.destroy() self.entry = gtk.Entry() self.entry.connect(\"activate\", self.enter_cb)", "if self.menu_show: self.set_state(gtk.STATE_PRELIGHT) if widget.state == gtk.STATE_NORMAL: text_color = ui_theme.get_color(\"title_text\").get_color() button_color = None", "= self.item_list[i] - (temp - value) break #play animation ani = Animation(self.adj, lambda", "button_clicked(self, widget): ''' Intenal callback function to \"clicked\" signal. @param widget: Crumb '''", "widget, v1: widget.set_value(v1),200,[value, value+shift_value]) ani.start() break if not upper > (page_size + self.adj.value", "@param menu_items: Crumb menu, could be a Menu instance or a list, default", "cr.fill() if self.menu != None: # Draw an arrow. draw_pixbuf(cr, arrow_pixbuf.get_pixbuf(), x +", "entry will pop up when click space area in Bread. ''' # Init.", "@param arrow_down: Dynamic pixbuf for down arrow, default is \\\"treeview/arrow_down.png\\\" from ui theme.", "self.menu_min) if not self.in_button: self.menu_press = True def button_clicked(self, widget): ''' Intenal callback", "program is free software: you can redistribute it and/or modify # it under", "to \"clicked\" signal. @param widget: Crumb instance. @param index: The index value of", "height) self.hbox.set_size_request(-1, self.hbox.get_children()[0].height) gobject.type_register(Bread) class BreadMenu(Poplist): ''' Popup menu for bread. @undocumented: draw_treeview_mask", "o.destroy() self.entry = gtk.Entry() self.entry.connect(\"activate\", self.enter_cb) self.entry.set_text(\"\".join(label)) self.entry.show() self.entry.select_region(0, len(self.entry.get_text())) self.eventbox.hide() self.hbox.pack_start(self.entry, True,", "@param label: Crumb item label @param menu_items: Crumb menu, could be a Menu", "default is DEFAULT_FONT_SIZE. ''' self.label = label (self.label_w, self.label_h) = get_content_size(self.label, font_size) if", "gtk.HBox(spacing = 0) right_box = gtk.HBox(spacing = 0) # FIXME: left && right", "self.queue_draw() def create_menu(self, menu_items): ''' Internal function to create menu. @param menu_items: menu_items", "__init__(self, items, max_height=None, max_width=None, ): ''' Initialize BreadMenu class. @param items: Item for", "menu_color = None arrow_pixbuf = arrow_right else: button_color = None menu_color = inner_border", "temp += self.item_list[i] if temp > (page_size + value): shift_value = temp -", "arrow_height = arrow_right.get_pixbuf().get_width(), arrow_right.get_pixbuf().get_height() arrow_pixbuf = arrow_right outside_border = alpha_color_hex_to_cairo((\"#000000\", 0.15)) inner_border =", "- arrow_height) / 2) # Draw text. draw_text(cr, self.label, x, y , self.button_width,", "''' Internal callback function to \"expose-event\" signal. @param widget: gtk.EventBox @param event: event", "= 0) self.arrow_right = arrow_right self.arrow_down = arrow_down self.item_list = list() self.show_others =", "of crumb instances @param arrow_right: Dynamic pixbuf for right arrow, default is \\\"treeview/arrow_right.png\\\"", "callback function to \"button-press-event\" signal. @param widget: gtk.eventbox. @param event: event of type", "type gtk.gdk.Event. ''' self.in_event_box = True def leave_notify(self, widget, event): ''' Internal callback", "is valid or not path_list = path.split(\"/\")[1:] bread.change_node(0, [Crumb(i , menu) for i", "widget.state == gtk.STATE_PRELIGHT: text_color = ui_theme.get_color(\"title_text\").get_color() if self.menu_show: arrow_pixbuf = arrow_down else: arrow_pixbuf", "menu_items != None and isinstance(menu_items, list): return BreadMenu(menu_items) else: return None def hide_cb(self,", "show_entry = True) bread.add([\"xxx\",menu]) # Must set_size bread.set_size(200, -1) bread.connect(\"entry-changed\", change_entry) ##################################### vbox.pack_start(bread,", "True) self.pack_start(self.hbox, True, True) # Add Bread Items self.adj = self.scroll_win.get_hadjustment() self.add(self.crumb) def", "x + 2, y + 2, self.button_width - 3, h -3) elif menu_color:", "str): return [Crumb(crumb[0], crumb[1]),] elif isinstance(crumb[0], Crumb): return crumb else: return [Crumb(c[0], c[1])", "''' super(Crumb, self).__init__() self.arrow_right = None self.arrow_down = None self.menu_min = 18 #", "window size. if sum(self.item_list) > page_size and not page_size == 1.0: self.right_btn.show() def", "Label @param font_size: Label's Font size, default is DEFAULT_FONT_SIZE. ''' self.label = label", "if self.menu_show: arrow_pixbuf = arrow_down else: arrow_pixbuf = arrow_right if self.in_menu: button_color =", "[crumb,] elif isinstance(crumb[0], str): return [Crumb(crumb[0], crumb[1]),] elif isinstance(crumb[0], Crumb): return crumb else:", "self.padding_x = padding_x self.menu = self.create_menu(menu_items) if self.menu != None: self.menu.connect(\"hide\", self.hide_cb) self.menu_press", "& right buttons self.in_event_box = False # Init left button and right button.", "instance or a list of crumb instances @param arrow_right: Dynamic pixbuf for right", "rect.y, rect.width, rect.height # Should move this part to Bread class since app_theme", ", x , y + h) # left draw_line(cr, x + w ,", "= [(0, (disable_bg, 1.0)), (1, (disable_bg, 1.0))] # Draw background. if not widget.state", "value+shift_value]) ani.start() break if not upper > (page_size + self.adj.value + shift_value): self.right_btn.hide()", "from theme import ui_theme from menu import Menu from constant import DEFAULT_FONT_SIZE from", "def move_left(self, widget): ''' Internal callback function to \"clicked\" signal. @param widget: Left", "gtk.gdk.Event. ''' if self.menu == None: self.menu_min = 0 cr = widget.window.cairo_create() rect", "right box # at runtime if self.show_left_right_box: left_box.set_size_request(self.button_width, -1) right_box.set_size_request(self.button_width, -1) self.left_btn =", "= gtk.Button(\"Add Item\") add_path_button.connect(\"clicked\", add_panel) vbox.pack_start(add_path_button, True, False, 0) test_change_node = gtk.Button(\"Change Root", "button width self.height = 24 # crumb height self.font_size = font_size self.padding_x =", "self.eventbox.connect(\"button-press-event\", self.event_box_press) self.hbox.pack_end(self.eventbox, True, True) self.scroll_win = ScrolledWindow() self.pack_start(left_box, False, True) self.pack_start(self.hbox, True,", "widget.state == gtk.STATE_ACTIVE: cr.set_source_rgba(*active_mask) if button_color: cr.rectangle(x + 2, y + 2, self.button_width", "Change any nodes start from specified index @param index: Start index @param crumbs:", "self.entry.set_text(\"\".join(label)) self.entry.show() self.entry.select_region(0, len(self.entry.get_text())) self.eventbox.hide() self.hbox.pack_start(self.entry, True, True) def enter_cb(self, widget): ''' Internal", "self.create_crumb(crumbs) for crumb in crumbs: crumb.show() crumb.arrow_right = self.arrow_right crumb.arrow_down = self.arrow_down crumb.index_id", "crumbs: crumb.show() crumb.arrow_right = self.arrow_right crumb.arrow_down = self.arrow_down crumb.index_id = len(self.item_list) crumb.connect(\"item_clicked\", self.click_cb)", "self.pack_start(self.hbox, True, True) # Add Bread Items self.adj = self.scroll_win.get_hadjustment() self.add(self.crumb) def create_crumb(self,", "-1]: i.destroy() self.item_list[index:] = [] self.add(crumbs) def remove_node_after_index(self, index): ''' Remove any nodes", "- 2 * self.button_width, height) self.hbox.set_size_request(-1, self.hbox.get_children()[0].height) gobject.type_register(Bread) class BreadMenu(Poplist): ''' Popup menu", ", x + w, y) # top draw_line(cr, x , y + h,", "~ 2012 Zeng Zhi # # Author: <NAME> <<EMAIL>> # Maintainer: <NAME> <<EMAIL>>", "def set_size(self, width, height): ''' Set Bread size. @param width: Width of Bread.", "=in_menu def motion_notify_cb(self, widget, event): ''' Internal callback function to Crumb \"motion-notify-event\" signal.", "== None: self.in_button = True self.menu_press = False else: self.in_button = event.x <", "enter_button @undocumented: motion_notify_cb @undocumented: create_menu @undocumented: hide_cb @undocumented: button_press_cb @undocumented: button_clicked @undocumented: expose_cb", "golobalized. arrow_right = self.arrow_right arrow_down = self.arrow_down arrow_width, arrow_height = arrow_right.get_pixbuf().get_width(), arrow_right.get_pixbuf().get_height() arrow_pixbuf", "\"motion-notify-event\" signal. @param widget: Crumb @param event: an event of gtk.gdk.event ''' in_menu", "@undocumented: draw_treeview_mask @undocumented: shape_bread_menu_frame @undocumented: expose_bread_menu_frame ''' def __init__(self, items, max_height=None, max_width=None, ):", "or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License", "\"clicked\" signal. @param widget: Right button. ''' upper, page_size, value = self.adj.upper, self.adj.page_size,", "(draw_line, draw_text, draw_pixbuf) from utils import (get_content_size, cairo_disable_antialias, alpha_color_hex_to_cairo, cairo_state) import gtk import", "items=items, max_height=max_height, max_width=max_width, shadow_visible=False, shape_frame_function=self.shape_bread_menu_frame, expose_frame_function=self.expose_bread_menu_frame, align_size=2, ) self.set_skip_pager_hint(True) self.set_skip_taskbar_hint(True) self.treeview.draw_mask = self.draw_treeview_mask", "all crumbs on the right side will be destroyed. @param show_entry: If True,", "x, y , self.button_width, h, self.font_size, text_color, alignment = pango.ALIGN_CENTER) return True gobject.type_register(Crumb)", "self.menu == None: self.set_size_request( max(self.label_w + 2 * self.padding_x, self.btn_min), self.height) self.button_width =", "Hbox self.hbox = gtk.HBox(False, 0) self.hbox.show() self.eventbox = gtk.EventBox() self.eventbox.set_visible_window(False) if self.show_entry: self.eventbox.connect(\"enter-notify-event\",", "since app_theme is golobalized. arrow_right = self.arrow_right arrow_down = self.arrow_down arrow_width, arrow_height =", "draw_line(cr, x , y + h, x + w, y + h) #", "from constant import DEFAULT_FONT_SIZE from draw import (draw_line, draw_text, draw_pixbuf) from utils import", "crumb. ''' if not self.show_others: for i in self.hbox.get_children()[(index + 1): -1]: i.destroy()", "widget.allocation # Draw backgroud. with cairo_state(cr): cr.set_source_rgba(*alpha_color_hex_to_cairo((\"#def5ff\", 1))) cr.rectangle(rect.x, rect.y, rect.width, rect.height) cr.fill()", "event.x > self.button_width if self.in_menu !=in_menu: self.in_menu = in_menu self.queue_draw() def create_menu(self, menu_items):", "True def button_clicked(self, widget): ''' Intenal callback function to \"clicked\" signal. @param widget:", "start from specified index @param index: Start index @param crumbs: Crumb instance or", "label\", Menu] [(\"a label\",[(None, \"menu label\", None)])] Crumb instance [Crumb, Crumb] ''' crumbs", "height self.font_size = font_size self.padding_x = padding_x self.menu = self.create_menu(menu_items) if self.menu !=", "program is distributed in the hope that it will be useful, # but", "An event of gtk.gdk.Event. ''' if self.menu == None: self.menu_min = 0 cr", "BreadMenu(menu_items) else: return None def hide_cb(self, widget): ''' Internal callback function to Menu's", "show_others: If True, crumbs will not be destroyed, otherwise all crumbs on the", "distributed in the hope that it will be useful, # but WITHOUT ANY", "@undocumented: enter_cb @undocumented: redraw_bg @undocumented: click_cb @undocumented: move_right @undocumented: move_left ''' __gsignals__= {", "signal. @param widget: gtk.EventBox @param event: event of type gtk.gdk.event ''' cr =", "self.left_btn.set_size_request(self.button_width, -1) self.right_btn.set_size_request(self.button_width, -1) left_box.pack_start(self.left_btn, False, False) right_box.pack_start(self.right_btn, False, False) # Init Hbox", "gtk.gdk.event. ''' obj = self.hbox.get_children() label = [] for o in obj[:-1]: label.append(\"/\"+o.label)", "any later version. # # This program is distributed in the hope that", "crumb instances @param arrow_right: Dynamic pixbuf for right arrow, default is \\\"treeview/arrow_right.png\\\" from", "self.hide_cb) self.menu_press = False self.menu_show = False self.index_id = 0 self.set_label(label) self.in_button =", "15, ): ''' Initialize Crumb class. @param label: Crumb item label @param menu_items:", "callback function to \"press-return\" signal. @param widget: gtk.Entry widget instance. ''' label =", "change_node(1, [Crumb3, Crumb4]), previous list will be change to [Crumb1, Crumb3, Crumb4]. In", "{ \"entry-changed\" : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_STRING,)), \"item_clicked\" : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_INT,gobject.TYPE_STRING,)) } def", "on the right side will be destroyed. @param show_entry: If True, an entry", "self.queue_draw() def expose_cb(self, widget, event): ''' Internal expose callback function. @param widget: Crumb", "ui_theme from menu import Menu from constant import DEFAULT_FONT_SIZE from draw import (draw_line,", "> (page_size + value): self.left_btn.show() for i in xrange(len(self.item_list)+1): temp += self.item_list[i] if", "crumb.arrow_down = self.arrow_down crumb.index_id = len(self.item_list) crumb.connect(\"item_clicked\", self.click_cb) self.hbox.pack_start(crumb, False, False) self.item_list.append(crumb.get_size_request()[0]) page_size", "widget, event): ''' Internal callback function to \"expose-event\" signal. @param widget: gtk.EventBox @param", "= 0 self.set_label(label) self.in_button = True self.in_menu = True self.connect(\"expose_event\", self.expose_cb) self.connect(\"button_press_event\", self.button_press_cb)", "Crumb] ''' if isinstance(crumb, Crumb): return [crumb,] elif isinstance(crumb[0], str): return [Crumb(crumb[0], crumb[1]),]", "enter_cb(self, widget): ''' Internal callback function to \"press-return\" signal. @param widget: gtk.Entry widget", "gtk.Window(gtk.WINDOW_TOPLEVEL) win.connect(\"destroy\", lambda w: gtk.main_quit()) win.set_default_size(600,300) vbox = gtk.VBox() ###################################### # test breadcrumb", "self.item_list.append(crumb.get_size_request()[0]) page_size = self.adj.page_size # Show right button if crumbs exceed scrolled window", "self.in_event_box = True def leave_notify(self, widget, event): ''' Internal callback function to \"leave-notify-event\"", "+ w, y) # top draw_line(cr, x , y + h, x +", "using change_node(1, [Crumb3, Crumb4]), previous list will be change to [Crumb1, Crumb3, Crumb4].", "or Crumb list For instance, there exist a list contain [Crumb1, Crumb2], by", "def leave_notify(self, widget, event): ''' Internal callback function to \"leave-notify-event\" signal. @param widget:", "self.button_width -1 , h -1) elif menu_color: draw_rectangle(cr, x + self.button_width, y +", "change_entry) ##################################### vbox.pack_start(bread, False, False, 0) # Test Item add_path_button = gtk.Button(\"Add Item\")", "\\\"treeview/arrow_right.png\\\" from ui theme. @param arrow_down: Dynamic pixbuf for down arrow, default is", "is a container which can hold crumbs widget. @undocumented: create_crumb @undocumented: enter_notify @undocumented:", "False else: self.in_button = event.x < (widget.allocation.width - self.menu_min) if not self.in_button: self.menu_press", "in obj[:-1]: label.append(\"/\"+o.label) o.destroy() self.entry = gtk.Entry() self.entry.connect(\"activate\", self.enter_cb) self.entry.set_text(\"\".join(label)) self.entry.show() self.entry.select_region(0, len(self.entry.get_text()))", "cr.rectangle( x + self.button_width + 1, y + 2, self.menu_min - 3, h", "can check if path is valid or not path_list = path.split(\"/\")[1:] bread.change_node(0, [Crumb(i", "the Free Software Foundation, either version 3 of the License, or # any", "self.menu.show((wx + offset_x + menu_width - arrow_button_width, wy + offset_y + menu_height, ),", "arrow_right: Dynamic pixbuf for right arrow, default is \\\"treeview/arrow_right.png\\\" from ui theme. @param", "i in xrange(len(self.item_list)): temp += self.item_list[i] if temp >= value: shift_value = self.item_list[i]", "self.enter_notify) self.eventbox.connect(\"leave-notify-event\", self.leave_notify) self.eventbox.connect(\"button-press-event\", self.event_box_press) self.hbox.pack_end(self.eventbox, True, True) self.scroll_win = ScrolledWindow() self.pack_start(left_box, False,", "height of bread menu, by default is None. @param max_width: Maximum width of", "inner_border menu_color = None arrow_pixbuf = arrow_right else: button_color = None menu_color =", "Button from theme import ui_theme from menu import Menu from constant import DEFAULT_FONT_SIZE", "= 0 cr = widget.window.cairo_create() rect = widget.allocation x, y, w, h =", "= True self.in_menu = True self.connect(\"expose_event\", self.expose_cb) self.connect(\"button_press_event\", self.button_press_cb) self.connect(\"clicked\", self.button_clicked) self.connect(\"motion-notify-event\", self.motion_notify_cb)", "Item add_path_button = gtk.Button(\"Add Item\") add_path_button.connect(\"clicked\", add_panel) vbox.pack_start(add_path_button, True, False, 0) test_change_node =", "# menu bar width self.btn_min = 50 # button width self.height = 24", "event of gtk.gdk.Event ''' if self.menu == None: self.in_button = True self.menu_press =", "return [crumb,] elif isinstance(crumb[0], str): return [Crumb(crumb[0], crumb[1]),] elif isinstance(crumb[0], Crumb): return crumb", "import gtk def add_panel(widget): crumb = Crumb(\"Child\",menu) bread.add(crumb) def change_root_node( widget): crumb1 =", "0) # FIXME: left && right box static setting size # it is", "+ (self.menu_min - arrow_width) / 2, y + (h - arrow_height) / 2)", "# crumb height self.font_size = font_size self.padding_x = padding_x self.menu = self.create_menu(menu_items) if", "else: self.menu_show = False self.set_state(gtk.STATE_NORMAL) def button_press_cb(self, widget, event): ''' Internal callback function", "text_color = ui_theme.get_color(\"disable_text\").get_color() disable_bg = ui_theme.get_color(\"disable_background\").get_color() button_color = [(0, (disable_bg, 1.0)), (1, (disable_bg,", "max_width=max_width, shadow_visible=False, shape_frame_function=self.shape_bread_menu_frame, expose_frame_function=self.expose_bread_menu_frame, align_size=2, ) self.set_skip_pager_hint(True) self.set_skip_taskbar_hint(True) self.treeview.draw_mask = self.draw_treeview_mask self.expose_window_frame =", "leave_notify @undocumented: event_box_press @undocumented: enter_cb @undocumented: redraw_bg @undocumented: click_cb @undocumented: move_right @undocumented: move_left", "@param widget: Crumb instance. @param event: An event of gtk.gdk.Event. ''' if self.menu", "after given index. ''' for i in self.hbox.get_children()[(index + 1): -1]: i.destroy() self.item_list[(index", "bread.connect(\"entry-changed\", change_entry) ##################################### vbox.pack_start(bread, False, False, 0) # Test Item add_path_button = gtk.Button(\"Add", "True self.in_menu = True self.connect(\"expose_event\", self.expose_cb) self.connect(\"button_press_event\", self.button_press_cb) self.connect(\"clicked\", self.button_clicked) self.connect(\"motion-notify-event\", self.motion_notify_cb) self.connect(\"enter-notify-event\",", "enter_cb @undocumented: redraw_bg @undocumented: click_cb @undocumented: move_right @undocumented: move_left ''' __gsignals__= { \"entry-changed\"", "1):] = [] self.emit(\"item_clicked\", index, label) def move_right(self, widget): ''' Internal callback function", "= True def button_clicked(self, widget): ''' Intenal callback function to \"clicked\" signal. @param", "= True) bread.add([\"xxx\",menu]) # Must set_size bread.set_size(200, -1) bread.connect(\"entry-changed\", change_entry) ##################################### vbox.pack_start(bread, False,", "widget: gtk.EventBox. @param event: The pointer event of type gtk.gdk.Event. ''' self.in_event_box =", "self.set_skip_pager_hint(True) self.set_skip_taskbar_hint(True) self.treeview.draw_mask = self.draw_treeview_mask self.expose_window_frame = self.expose_bread_menu_frame def draw_treeview_mask(self, cr, x, y,", "&& right box # at runtime if self.show_left_right_box: left_box.set_size_request(self.button_width, -1) right_box.set_size_request(self.button_width, -1) self.left_btn", "1.0)), (1, (disable_bg, 1.0))] menu_color = [(0, (disable_bg, 1.0)), (1, (disable_bg, 1.0))] #", "def expose_bread_menu_frame(self, widget, event): cr = widget.window.cairo_create() rect = widget.allocation with cairo_disable_antialias(cr): outside_border", "1): -1]: i.destroy() self.item_list[(index + 1):] = [] self.emit(\"item_clicked\", index, label) def move_right(self,", "''' __gsignals__= { \"item_clicked\" : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_INT,gobject.TYPE_STRING,))} def __init__(self, label, menu_items =", "if not widget.state == gtk.STATE_NORMAL: # Draw button border. def draw_rectangle(cr, x, y", "size # it is better to consider whether or not shown left &&", "button border. def draw_rectangle(cr, x, y , w, h): draw_line(cr, x -1 ,", "< (widget.allocation.width - self.menu_min) if not self.in_button: self.menu_press = True def button_clicked(self, widget):", "font_size: Font size, default is DEFAULT_FONT_SIZE. @param padding_x: Horizontal padding, default is 15", "show_others=False, show_entry=False, show_left_right_box=True ): ''' Initialize Bread class. @param crumb: Crumb instance or", "self.right_btn.connect(\"clicked\", self.move_right) self.left_btn.connect(\"clicked\", self.move_left) self.left_btn.set_size_request(self.button_width, -1) self.right_btn.set_size_request(self.button_width, -1) left_box.pack_start(self.left_btn, False, False) right_box.pack_start(self.right_btn, False,", "is \\\"treeview/arrow_down.png\\\" from ui theme. @param show_others: If True, crumbs will not be", "self.click_cb) self.hbox.pack_start(crumb, False, False) self.item_list.append(crumb.get_size_request()[0]) page_size = self.adj.page_size # Show right button if", "if __name__ == \"__main__\": import gtk def add_panel(widget): crumb = Crumb(\"Child\",menu) bread.add(crumb) def", "button if crumbs exceed scrolled window size. if sum(self.item_list) > page_size and not", "== gtk.STATE_ACTIVE: text_color = ui_theme.get_color(\"title_text\").get_color() if self.in_button: button_color = inner_border menu_color = None", "theme import ui_theme from menu import Menu from constant import DEFAULT_FONT_SIZE from draw", "will be destroyed. @param show_entry: If True, an entry will pop up when", "signal. @param widget: Crumb @param event: An event of gtk.gdk.Event ''' if self.menu", "Crumb class . @undocumented: enter_button @undocumented: motion_notify_cb @undocumented: create_menu @undocumented: hide_cb @undocumented: button_press_cb", "label\", None)])] Crumb instance [Crumb, Crumb] ''' if isinstance(crumb, Crumb): return [crumb,] elif", "= None arrow_pixbuf = arrow_right else: button_color = None menu_color = inner_border arrow_pixbuf", "= Menu([ (None, \"测试1\", None), (None, \"测试2\", None), ], shadow_visible = False, )", "0 temp = 0 if upper > (page_size + value): self.left_btn.show() for i", "self.show_entry = show_entry self.crumb = self.create_crumb(crumb) self.button_width = ARROW_BUTTON_WIDTH # for left &", "True) def enter_cb(self, widget): ''' Internal callback function to \"press-return\" signal. @param widget:", "/ 2, y + (h - arrow_height) / 2) # Draw text. draw_text(cr,", "x + w, y + h) # bottom draw_line(cr, x , y ,", "and/or modify # it under the terms of the GNU General Public License", "self.button_width = self.get_size_request()[0] else: self.set_size_request( max(self.label_w + 2 * self.padding_x + self.menu_min, self.btn_min", "self.item_list[i] if temp > (page_size + value): shift_value = temp - (page_size +", "destroyed. @param show_entry: If True, an entry will pop up when click space", "(widget.allocation.width - self.menu_min) if not self.in_button: self.menu_press = True def button_clicked(self, widget): '''", "of clicked crumb. @param label: Label of the crumb. ''' if not self.show_others:", "The index value of clicked crumb. @param label: Label of the crumb. '''", "width of bread menu, by default is None. ''' Poplist.__init__(self, items=items, max_height=max_height, max_width=max_width,", "= 24 # crumb height self.font_size = font_size self.padding_x = padding_x self.menu =", "callback function to \"clicked\" signal. @param widget: Crumb ''' if self.in_button: self.emit(\"item_clicked\", self.index_id,", "def draw_treeview_mask(self, cr, x, y, w, h): cr.set_source_rgb(1, 1, 1) cr.rectangle(x, y, w,", "alpha_color_hex_to_cairo, cairo_state) import gtk import gobject import pango from poplist import Poplist ARROW_BUTTON_WIDTH", "draw_line(cr, x -1 , y , x + w, y) # top draw_line(cr,", "[] for o in obj[:-1]: label.append(\"/\"+o.label) o.destroy() self.entry = gtk.Entry() self.entry.connect(\"activate\", self.enter_cb) self.entry.set_text(\"\".join(label))", "for i in xrange(len(self.item_list)): temp += self.item_list[i] if temp >= value: shift_value =", "an entry will pop up when click space area in Bread. ''' #", "/ 2) # Draw text. draw_text(cr, self.label, x, y , self.button_width, h, self.font_size,", "gtk.STATE_PRELIGHT: text_color = ui_theme.get_color(\"title_text\").get_color() if self.menu_show: arrow_pixbuf = arrow_down else: arrow_pixbuf = arrow_right", "self.menu_min, h - 1) # Draw innner border. cr.set_source_rgba(*inner_border) if button_color: draw_rectangle(cr, x", "right box static setting size # it is better to consider whether or", "or a list, default is None @param font_size: Font size, default is DEFAULT_FONT_SIZE.", "self.motion_notify_cb) self.connect(\"enter-notify-event\", self.enter_button) self.add_events(gtk.gdk.POINTER_MOTION_MASK) def enter_button(self, widget, event): in_menu = event.x > self.button_width", "menu. @param menu_items: menu_items @return: Menu instance ''' if menu_items != None and", "= arrow_down self.item_list = list() self.show_others = show_others self.show_entry = show_entry self.crumb =", "obj = self.hbox.get_children() label = [] for o in obj[:-1]: label.append(\"/\"+o.label) o.destroy() self.entry", "win.set_default_size(600,300) vbox = gtk.VBox() ###################################### # test breadcrumb widget bread = Bread([(\"Root\", menu),", "= 0 temp = 0 if not value == 0: self.right_btn.show() for i", "# This program is distributed in the hope that it will be useful,", "bread menu, by default is None. ''' Poplist.__init__(self, items=items, max_height=max_height, max_width=max_width, shadow_visible=False, shape_frame_function=self.shape_bread_menu_frame,", "button_color: cr.rectangle(x + 2, y + 2, self.button_width - 4, h -4) cr.fill()", "Initialize Crumb class. @param label: Crumb item label @param menu_items: Crumb menu, could", "Internal callback function to \"press-return\" signal. @param widget: gtk.Entry widget instance. ''' label", "''' Internal callback function to Menu's \"\"hide\" signal. @param widget: Menu ''' if", "def remove_node_after_index(self, index): ''' Remove any nodes after given index. @param index: To", "else: button_color = None menu_color = inner_border arrow_pixbuf = arrow_down elif widget.state ==", "In this way, application can operate crumbs ''' objects = self.hbox.get_children() for i", "False def event_box_press(self, widget, event): ''' Internal callback function to \"button-press-event\" signal. @param", "rect.height # Should move this part to Bread class since app_theme is golobalized.", "self.arrow_down arrow_width, arrow_height = arrow_right.get_pixbuf().get_width(), arrow_right.get_pixbuf().get_height() arrow_pixbuf = arrow_right outside_border = alpha_color_hex_to_cairo((\"#000000\", 0.15))", "is DEFAULT_FONT_SIZE. @param padding_x: Horizontal padding, default is 15 pixels. ''' super(Crumb, self).__init__()", "= alpha_color_hex_to_cairo((\"#ffffff\", 0.5)) active_mask = alpha_color_hex_to_cairo((\"#000000\", 0.1)) if self.menu_show: self.set_state(gtk.STATE_PRELIGHT) if widget.state ==", "False, False) self.item_list.append(crumb.get_size_request()[0]) page_size = self.adj.page_size # Show right button if crumbs exceed", "= Button(\"&lt;\") self.right_btn = Button(\"&gt;\") self.left_btn.set_no_show_all(True) self.right_btn.set_no_show_all(True) self.right_btn.connect(\"clicked\", self.move_right) self.left_btn.connect(\"clicked\", self.move_left) self.left_btn.set_size_request(self.button_width, -1)", "pixels. ''' super(Crumb, self).__init__() self.arrow_right = None self.arrow_down = None self.menu_min = 18", "+ self.adj.value + shift_value): self.right_btn.hide() def move_left(self, widget): ''' Internal callback function to", "x + 1 , y + 1 , self.button_width -1 , h -1)", "0) self.arrow_right = arrow_right self.arrow_down = arrow_down self.item_list = list() self.show_others = show_others", "index: The index value of clicked crumb. @param label: Label of the crumb.", "''' def __init__(self, items, max_height=None, max_width=None, ): ''' Initialize BreadMenu class. @param items:", "program. If not, see <http://www.gnu.org/licenses/>. from animation import Animation from scrolled_window import ScrolledWindow", "-1) right_box.set_size_request(self.button_width, -1) self.left_btn = Button(\"&lt;\") self.right_btn = Button(\"&gt;\") self.left_btn.set_no_show_all(True) self.right_btn.set_no_show_all(True) self.right_btn.connect(\"clicked\", self.move_right)", "def shape_bread_menu_frame(self, widget, event): pass def expose_bread_menu_frame(self, widget, event): cr = widget.window.cairo_create() rect", "y + 2, self.button_width - 4, h -4) cr.fill() elif menu_color: cr.rectangle( x", "True, True) def enter_cb(self, widget): ''' Internal callback function to \"press-return\" signal. @param", "value): self.left_btn.show() for i in xrange(len(self.item_list)+1): temp += self.item_list[i] if temp > (page_size", "for down arrow, default is \\\"treeview/arrow_down.png\\\" from ui theme. @param show_others: If True,", "== gtk.STATE_INSENSITIVE: arrow_pixbuf = arrow_right text_color = ui_theme.get_color(\"disable_text\").get_color() disable_bg = ui_theme.get_color(\"disable_background\").get_color() button_color =", "return [Crumb(c[0], c[1]) for c in crumb] def enter_notify(self, widget, event): ''' Internal", ", h -1) elif menu_color: draw_rectangle(cr, x + self.button_width, y + 1, self.menu_min,", "from poplist import Poplist ARROW_BUTTON_WIDTH = 20 class Bread(gtk.HBox): ''' Bread widget is", "innner border. cr.set_source_rgba(*inner_border) if button_color: draw_rectangle(cr, x + 2, y + 2, self.button_width", "2, self.button_width - 3, h -3) elif menu_color: draw_rectangle(cr, x + self.button_width +", "modify # it under the terms of the GNU General Public License as", "contain [Crumb1, Crumb2], by using change_node(1, [Crumb3, Crumb4]), previous list will be change", "+ self.button_width + 1, y + 2, self.menu_min - 3, h -4) cr.fill()", "1, y + 2, self.menu_min - 3, h -4) cr.fill() if self.menu !=", "widget: Crumb instance. @param index: The index value of clicked crumb. @param label:", "hide_cb(self, widget): ''' Internal callback function to Menu's \"\"hide\" signal. @param widget: Menu", "1, 1) cr.rectangle(x, y, w, h) cr.fill() def shape_bread_menu_frame(self, widget, event): pass def", "a Menu instance or a list, default is None @param font_size: Font size,", "instance ''' if menu_items != None and isinstance(menu_items, list): return BreadMenu(menu_items) else: return", "0 temp = 0 if not value == 0: self.right_btn.show() for i in", "down arrow, default is \\\"treeview/arrow_down.png\\\" from ui theme. @param show_others: If True, crumbs", "signal. @param widget: Right button. ''' upper, page_size, value = self.adj.upper, self.adj.page_size, self.adj.value", "is free software: you can redistribute it and/or modify # it under the", "@param font_size: Label's Font size, default is DEFAULT_FONT_SIZE. ''' self.label = label (self.label_w,", "x + w , y , x + w , y + h", "self.set_state(gtk.STATE_PRELIGHT) else: self.menu_show = False self.set_state(gtk.STATE_NORMAL) def button_press_cb(self, widget, event): ''' Internal callback", "arrow_width) / 2, y + (h - arrow_height) / 2) # Draw text.", "self.add(self.crumb) def create_crumb(self, crumb): ''' Internal function to create a Crumb list for", "in self.hbox.get_children()[(index + 1): -1]: i.destroy() self.item_list[(index + 1):] = [] def click_cb(self,", "Internal callback function to \"expose-event\" signal. @param widget: gtk.EventBox @param event: event of", "= ui_theme.get_color(\"disable_background\").get_color() button_color = [(0, (disable_bg, 1.0)), (1, (disable_bg, 1.0))] menu_color = [(0,", "''' if self.menu == None: self.in_button = True self.menu_press = False else: self.in_button", "Internal callback function to \"enter-notify-event\" signal. @param widget: gtk.EventBox. @param event: The pointer", "False) self.item_list.append(crumb.get_size_request()[0]) page_size = self.adj.page_size # Show right button if crumbs exceed scrolled", "ui_theme.get_color(\"disable_background\").get_color() button_color = [(0, (disable_bg, 1.0)), (1, (disable_bg, 1.0))] menu_color = [(0, (disable_bg,", "TreeView. @param max_height: Maximum height of bread menu, by default is None. @param", "False self.menu_show = False self.index_id = 0 self.set_label(label) self.in_button = True self.in_menu =", "crumb.show() crumb.arrow_right = self.arrow_right crumb.arrow_down = self.arrow_down crumb.index_id = len(self.item_list) crumb.connect(\"item_clicked\", self.click_cb) self.hbox.pack_start(crumb,", "= padding_x self.menu = self.create_menu(menu_items) if self.menu != None: self.menu.connect(\"hide\", self.hide_cb) self.menu_press =", "widget: Left button. ''' upper, page_size, value = self.adj.upper, self.adj.page_size, self.adj.value shift_value =", "See the # GNU General Public License for more details. # # You", "def add(self, crumbs): ''' Add crumbs. Can accept Crumb instance or a list", "self.show_others = show_others self.show_entry = show_entry self.crumb = self.create_crumb(crumb) self.button_width = ARROW_BUTTON_WIDTH #", "w, y) # top draw_line(cr, x , y + h, x + w,", "2011 ~ 2012 Zeng Zhi # # Author: <NAME> <<EMAIL>> # Maintainer: <NAME>", "to [Crumb1, Crumb3, Crumb4]. In this way, application can operate crumbs ''' objects", "for TreeView. @param max_height: Maximum height of bread menu, by default is None.", "widget, event): ''' Internal callback function to \"button-press-event\" signal. @param widget: Crumb @param", "of Bread. @param height: Height of Bread. ''' self.scroll_win.set_size_request(width - 2 * self.button_width,", "Menu instance ''' if menu_items != None and isinstance(menu_items, list): return BreadMenu(menu_items) else:", "instance. @param event: An event of gtk.gdk.Event. ''' if self.menu == None: self.menu_min", "License as published by # the Free Software Foundation, either version 3 of", "''' Add crumbs. Can accept Crumb instance or a list of Crumb instances", "= arrow_right else: button_color = None menu_color = inner_border arrow_pixbuf = arrow_down elif", "else: self.set_size_request( max(self.label_w + 2 * self.padding_x + self.menu_min, self.btn_min + self.menu_min), self.height)", "= arrow_right if self.in_menu: button_color = None menu_color = inner_border else: button_color =", "in xrange(len(self.item_list)+1): temp += self.item_list[i] if temp > (page_size + value): shift_value =", "Crumb): return crumb else: return [Crumb(c[0], c[1]) for c in crumb] def enter_notify(self,", "w, y + h) # bottom draw_line(cr, x , y , x ,", "self.button_width + (self.menu_min - arrow_width) / 2, y + (h - arrow_height) /", "Zhi # # Author: <NAME> <<EMAIL>> # Maintainer: <NAME> <<EMAIL>> # # This", "box # at runtime if self.show_left_right_box: left_box.set_size_request(self.button_width, -1) right_box.set_size_request(self.button_width, -1) self.left_btn = Button(\"&lt;\")", "rect.width, rect.height) cr.fill() return False def add(self, crumbs): ''' Add crumbs. Can accept", "\"expose-event\" signal. @param widget: gtk.EventBox @param event: event of type gtk.gdk.event ''' cr", "gtk.gdk.event ''' in_menu = event.x > self.button_width if self.in_menu !=in_menu: self.in_menu = in_menu", "Bread([(\"Root\", menu), (\"Level1\", menu)], show_others = False, show_entry = True) bread.add([\"xxx\",menu]) # Must", "height): ''' Set Bread size. @param width: Width of Bread. @param height: Height", "function to Crumb \"motion-notify-event\" signal. @param widget: Crumb @param event: an event of", "[(\"a label\",[(None, \"menu label\", None)])] Crumb instance [Crumb, Crumb] ''' crumbs = self.create_crumb(crumbs)", "win.connect(\"destroy\", lambda w: gtk.main_quit()) win.set_default_size(600,300) vbox = gtk.VBox() ###################################### # test breadcrumb widget", "vbox.pack_start(bread, False, False, 0) # Test Item add_path_button = gtk.Button(\"Add Item\") add_path_button.connect(\"clicked\", add_panel)", "menu_items): ''' Internal function to create menu. @param menu_items: menu_items @return: Menu instance", "rect.width, rect.height # Should move this part to Bread class since app_theme is", "Support inputs are: [\"a label\", Menu] [(\"a label\",[(None, \"menu label\", None)])] Crumb instance", "self.in_button = True self.menu_press = False else: self.in_button = event.x < (widget.allocation.width -", "ani.start() if (self.adj.value - shift_value) == 0: self.left_btn.hide() def set_size(self, width, height): '''", "''' self.in_event_box = True def leave_notify(self, widget, event): ''' Internal callback function to", "Software Foundation, either version 3 of the License, or # any later version.", "for i in xrange(len(self.item_list)+1): temp += self.item_list[i] if temp > (page_size + value):", "FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more", "self.right_btn.show() def change_node(self, index, crumbs): ''' Change any nodes start from specified index", "widget.state == gtk.STATE_NORMAL: text_color = ui_theme.get_color(\"title_text\").get_color() button_color = None menu_color = None arrow_pixbuf", "(0, 0)) def set_label(self, label, font_size = DEFAULT_FONT_SIZE): ''' Set label for left", "= widget.window.cairo_create() rect = widget.allocation x, y, w, h = rect.x, rect.y, rect.width,", "y + 2, self.menu_min - 3, h -4) cr.fill() if self.menu != None:", "index, label) def move_right(self, widget): ''' Internal callback function to \"clicked\" signal. @param", "shadow_visible=False, shape_frame_function=self.shape_bread_menu_frame, expose_frame_function=self.expose_bread_menu_frame, align_size=2, ) self.set_skip_pager_hint(True) self.set_skip_taskbar_hint(True) self.treeview.draw_mask = self.draw_treeview_mask self.expose_window_frame = self.expose_bread_menu_frame", "gtk import gobject import pango from poplist import Poplist ARROW_BUTTON_WIDTH = 20 class", "cr.set_source_rgba(*outside_border) if button_color: draw_rectangle(cr, x + 1 , y + 1 , self.button_width", "def button_clicked(self, widget): ''' Intenal callback function to \"clicked\" signal. @param widget: Crumb", "menu bar width self.btn_min = 50 # button width self.height = 24 #", "= arrow_down else: arrow_pixbuf = arrow_right if self.in_menu: button_color = None menu_color =", "self.menu_min - 2, h -3) if widget.state == gtk.STATE_ACTIVE: cr.set_source_rgba(*active_mask) if button_color: cr.rectangle(x", "in objects[index: -1]: i.destroy() self.item_list[index:] = [] self.add(crumbs) def remove_node_after_index(self, index): ''' Remove", "clicked crumb. @param label: Label of the crumb. ''' if not self.show_others: for", "None, font_size = DEFAULT_FONT_SIZE, padding_x = 15, ): ''' Initialize Crumb class. @param", "bread. @undocumented: draw_treeview_mask @undocumented: shape_bread_menu_frame @undocumented: expose_bread_menu_frame ''' def __init__(self, items, max_height=None, max_width=None,", "cr, x, y, w, h): cr.set_source_rgb(1, 1, 1) cr.rectangle(x, y, w, h) cr.fill()", "x , y + h, x + w, y + h) # bottom", "= pango.ALIGN_CENTER) return True gobject.type_register(Crumb) if __name__ == \"__main__\": import gtk def add_panel(widget):", "self.hbox.set_size_request(-1, self.hbox.get_children()[0].height) gobject.type_register(Bread) class BreadMenu(Poplist): ''' Popup menu for bread. @undocumented: draw_treeview_mask @undocumented:", "True) # Add Bread Items self.adj = self.scroll_win.get_hadjustment() self.add(self.crumb) def create_crumb(self, crumb): '''", "elif menu_color: cr.rectangle( x + self.button_width + 1, y + 2, self.menu_min -", "gtk.EventBox. @param event: The pointer event of type gtk.gdk.Event. ''' self.in_event_box = True", "# Draw background. if not widget.state == gtk.STATE_NORMAL: # Draw button border. def", "self.button_width + 1, y + 2, self.menu_min - 3, h -4) cr.fill() if", "draw_rectangle(cr, x + self.button_width, y + 1, self.menu_min, h - 1) # Draw", "widget): ''' Internal callback function to \"clicked\" signal. @param widget: Right button. '''", "left && right box static setting size # it is better to consider", "== None: self.menu_min = 0 cr = widget.window.cairo_create() rect = widget.allocation x, y,", "self.hbox.get_children() label = [] for o in obj[:-1]: label.append(\"/\"+o.label) o.destroy() self.entry = gtk.Entry()", "left_box = gtk.HBox(spacing = 0) right_box = gtk.HBox(spacing = 0) # FIXME: left", "gobject.TYPE_NONE, (gobject.TYPE_INT,gobject.TYPE_STRING,))} def __init__(self, label, menu_items = None, font_size = DEFAULT_FONT_SIZE, padding_x =", "self.button_width, h, self.font_size, text_color, alignment = pango.ALIGN_CENTER) return True gobject.type_register(Crumb) if __name__ ==", "<NAME> <<EMAIL>> # # This program is free software: you can redistribute it", "i.destroy() self.item_list[index:] = [] self.add(crumbs) def remove_node_after_index(self, index): ''' Remove any nodes after", "for i in self.hbox.get_children()[(index + 1): -1]: i.destroy() self.item_list[(index + 1):] = []", "== \"__main__\": import gtk def add_panel(widget): crumb = Crumb(\"Child\",menu) bread.add(crumb) def change_root_node( widget):", "+ offset_x + menu_width - arrow_button_width, wy + offset_y + menu_height, ), (0,", "crumb: Support inputs are: [\"a label\", Menu] [(\"a label\",[(None, \"menu label\", None)])] Crumb", "index): ''' Remove any nodes after given index. @param index: To specified remove", "= inner_border menu_color = None arrow_pixbuf = arrow_right else: button_color = None menu_color", "path_list = path.split(\"/\")[1:] bread.change_node(0, [Crumb(i , menu) for i in path_list]) menu =", "ui_theme.get_color(\"title_text\").get_color() if self.in_button: button_color = inner_border menu_color = None arrow_pixbuf = arrow_right else:", "function to \"clicked\" signal. @param widget: Right button. ''' upper, page_size, value =", "v1: widget.set_value(v1),200,[value, value+shift_value]) ani.start() break if not upper > (page_size + self.adj.value +", "button_press_cb(self, widget, event): ''' Internal callback function to \"button-press-event\" signal. @param widget: Crumb", "if self.in_menu !=in_menu: self.in_menu = in_menu self.queue_draw() def create_menu(self, menu_items): ''' Internal function", "+= self.item_list[i] if temp >= value: shift_value = self.item_list[i] - (temp - value)", "h -3) if widget.state == gtk.STATE_ACTIVE: cr.set_source_rgba(*active_mask) if button_color: cr.rectangle(x + 2, y", "= self.create_menu(menu_items) if self.menu != None: self.menu.connect(\"hide\", self.hide_cb) self.menu_press = False self.menu_show =", "Menu([ (None, \"测试1\", None), (None, \"测试2\", None), ], shadow_visible = False, ) win", "= self.arrow_right crumb.arrow_down = self.arrow_down crumb.index_id = len(self.item_list) crumb.connect(\"item_clicked\", self.click_cb) self.hbox.pack_start(crumb, False, False)", "# but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY", "1): -1]: i.destroy() self.item_list[(index + 1):] = [] def click_cb(self, widget, index, label):", "arrow_button_width, wy + offset_y + menu_height, ), (0, 0)) def set_label(self, label, font_size", "None: self.menu_min = 0 cr = widget.window.cairo_create() rect = widget.allocation x, y, w,", "Should move this part to Bread class since app_theme is golobalized. arrow_right =", "False, show_entry = True) bread.add([\"xxx\",menu]) # Must set_size bread.set_size(200, -1) bread.connect(\"entry-changed\", change_entry) #####################################", "default is None. @param max_width: Maximum width of bread menu, by default is", "widget: Crumb ''' if self.in_button: self.emit(\"item_clicked\", self.index_id, self.label) else: self.menu_press = False self.menu_show", "1.0))] menu_color = [(0, (disable_bg, 1.0)), (1, (disable_bg, 1.0))] # Draw background. if", "@param event: event of type gtk.gdk.event. ''' obj = self.hbox.get_children() label = []" ]
[ "from pathlib import Path result_dir = Path().home().joinpath('module_results/bfast_preanalysis') start = \"\"\" ### Start date", "Top-of-Atmosphere collections for the slected satellites. \"\"\" stats = \"\"\" ### Selection of", "\"\"\" select = \"\"\" ### Satellite selection Select the satellite(s) you want to", "type Choose between Surface Reflectance or Top-of-Atmosphere collections for the slected satellites. \"\"\"", "Path().home().joinpath('module_results/bfast_preanalysis') start = \"\"\" ### Start date selection Pick the date of the", "= \"\"\" ### Selection of collection type Choose between Surface Reflectance or Top-of-Atmosphere", "the date of the timeseries' end. \"\"\" select = \"\"\" ### Satellite selection", "the pre-analysis. \"\"\" sr = \"\"\" ### Selection of collection type Choose between", "date of the timeseries' start. \"\"\" end = \"\"\" ### End date selection", "of statistics Select the statistical measure you want to apply and switch on", "start = \"\"\" ### Start date selection Pick the date of the timeseries'", "Selection of statistics Select the statistical measure you want to apply and switch", "Surface Reflectance or Top-of-Atmosphere collections for the slected satellites. \"\"\" stats = \"\"\"", "\"\"\" ### End date selection Pick the date of the timeseries' end. \"\"\"", "pathlib import Path result_dir = Path().home().joinpath('module_results/bfast_preanalysis') start = \"\"\" ### Start date selection", "the timeseries' start. \"\"\" end = \"\"\" ### End date selection Pick the", "or Top-of-Atmosphere collections for the slected satellites. \"\"\" stats = \"\"\" ### Selection", "between Surface Reflectance or Top-of-Atmosphere collections for the slected satellites. \"\"\" stats =", "the slected satellites. \"\"\" stats = \"\"\" ### Selection of statistics Select the", "selection Pick the date of the timeseries' end. \"\"\" select = \"\"\" ###", "pre-analysis. \"\"\" sr = \"\"\" ### Selection of collection type Choose between Surface", "the statistical measure you want to apply and switch on annual for per-year", "timeseries' end. \"\"\" select = \"\"\" ### Satellite selection Select the satellite(s) you", "### Selection of statistics Select the statistical measure you want to apply and", "timeseries' start. \"\"\" end = \"\"\" ### End date selection Pick the date", "of the timeseries' end. \"\"\" select = \"\"\" ### Satellite selection Select the", "Selection of collection type Choose between Surface Reflectance or Top-of-Atmosphere collections for the", "select = \"\"\" ### Satellite selection Select the satellite(s) you want to include", "Pick the date of the timeseries' end. \"\"\" select = \"\"\" ### Satellite", "Path result_dir = Path().home().joinpath('module_results/bfast_preanalysis') start = \"\"\" ### Start date selection Pick the", "= \"\"\" ### End date selection Pick the date of the timeseries' end.", "the satellite(s) you want to include for the pre-analysis. \"\"\" sr = \"\"\"", "Select the satellite(s) you want to include for the pre-analysis. \"\"\" sr =", "\"\"\" stats = \"\"\" ### Selection of statistics Select the statistical measure you", "you want to include for the pre-analysis. \"\"\" sr = \"\"\" ### Selection", "stats = \"\"\" ### Selection of statistics Select the statistical measure you want", "= \"\"\" ### Start date selection Pick the date of the timeseries' start.", "date selection Pick the date of the timeseries' start. \"\"\" end = \"\"\"", "include for the pre-analysis. \"\"\" sr = \"\"\" ### Selection of collection type", "\"\"\" ### Selection of statistics Select the statistical measure you want to apply", "of the timeseries' start. \"\"\" end = \"\"\" ### End date selection Pick", "satellites. \"\"\" stats = \"\"\" ### Selection of statistics Select the statistical measure", "= \"\"\" ### Satellite selection Select the satellite(s) you want to include for", "the date of the timeseries' start. \"\"\" end = \"\"\" ### End date", "slected satellites. \"\"\" stats = \"\"\" ### Selection of statistics Select the statistical", "of collection type Choose between Surface Reflectance or Top-of-Atmosphere collections for the slected", "result_dir = Path().home().joinpath('module_results/bfast_preanalysis') start = \"\"\" ### Start date selection Pick the date", "Choose between Surface Reflectance or Top-of-Atmosphere collections for the slected satellites. \"\"\" stats", "Satellite selection Select the satellite(s) you want to include for the pre-analysis. \"\"\"", "statistical measure you want to apply and switch on annual for per-year calculations", "collection type Choose between Surface Reflectance or Top-of-Atmosphere collections for the slected satellites.", "import Path result_dir = Path().home().joinpath('module_results/bfast_preanalysis') start = \"\"\" ### Start date selection Pick", "date selection Pick the date of the timeseries' end. \"\"\" select = \"\"\"", "collections for the slected satellites. \"\"\" stats = \"\"\" ### Selection of statistics", "\"\"\" ### Start date selection Pick the date of the timeseries' start. \"\"\"", "\"\"\" sr = \"\"\" ### Selection of collection type Choose between Surface Reflectance", "date of the timeseries' end. \"\"\" select = \"\"\" ### Satellite selection Select", "\"\"\" ### Selection of collection type Choose between Surface Reflectance or Top-of-Atmosphere collections", "= \"\"\" ### Selection of statistics Select the statistical measure you want to", "measure you want to apply and switch on annual for per-year calculations \"\"\"", "### Start date selection Pick the date of the timeseries' start. \"\"\" end", "the timeseries' end. \"\"\" select = \"\"\" ### Satellite selection Select the satellite(s)", "Reflectance or Top-of-Atmosphere collections for the slected satellites. \"\"\" stats = \"\"\" ###", "want to include for the pre-analysis. \"\"\" sr = \"\"\" ### Selection of", "satellite(s) you want to include for the pre-analysis. \"\"\" sr = \"\"\" ###", "end = \"\"\" ### End date selection Pick the date of the timeseries'", "selection Pick the date of the timeseries' start. \"\"\" end = \"\"\" ###", "\"\"\" end = \"\"\" ### End date selection Pick the date of the", "for the pre-analysis. \"\"\" sr = \"\"\" ### Selection of collection type Choose", "for the slected satellites. \"\"\" stats = \"\"\" ### Selection of statistics Select", "### Selection of collection type Choose between Surface Reflectance or Top-of-Atmosphere collections for", "= Path().home().joinpath('module_results/bfast_preanalysis') start = \"\"\" ### Start date selection Pick the date of", "Start date selection Pick the date of the timeseries' start. \"\"\" end =", "### Satellite selection Select the satellite(s) you want to include for the pre-analysis.", "sr = \"\"\" ### Selection of collection type Choose between Surface Reflectance or", "\"\"\" ### Satellite selection Select the satellite(s) you want to include for the", "end. \"\"\" select = \"\"\" ### Satellite selection Select the satellite(s) you want", "Select the statistical measure you want to apply and switch on annual for", "Pick the date of the timeseries' start. \"\"\" end = \"\"\" ### End", "End date selection Pick the date of the timeseries' end. \"\"\" select =", "start. \"\"\" end = \"\"\" ### End date selection Pick the date of", "### End date selection Pick the date of the timeseries' end. \"\"\" select", "selection Select the satellite(s) you want to include for the pre-analysis. \"\"\" sr", "statistics Select the statistical measure you want to apply and switch on annual", "to include for the pre-analysis. \"\"\" sr = \"\"\" ### Selection of collection" ]
[ "import hashlib def prev_hash(hash_code): return hashlib.sha256(hash_code.encode()).hexdigest() def main(): game_hash = 'cc4a75236ecbc038c37729aa5ced461e36155319e88fa375c\\ 994933b6a42a0c4' print(prev_hash(game_hash))", "hashlib def prev_hash(hash_code): return hashlib.sha256(hash_code.encode()).hexdigest() def main(): game_hash = 'cc4a75236ecbc038c37729aa5ced461e36155319e88fa375c\\ 994933b6a42a0c4' print(prev_hash(game_hash)) main()" ]
[ "from home.views import HomeView # urlpatterns = [ # url(r'^', HomeView.as_view())) # ]", "django.conf.urls import url # from home.views import HomeView # urlpatterns = [ #", "from django.conf.urls import url # from home.views import HomeView # urlpatterns = [", "# from django.conf.urls import url # from home.views import HomeView # urlpatterns =", "url # from home.views import HomeView # urlpatterns = [ # url(r'^', HomeView.as_view()))", "# from home.views import HomeView # urlpatterns = [ # url(r'^', HomeView.as_view())) #", "import url # from home.views import HomeView # urlpatterns = [ # url(r'^'," ]
[ "unaligned_data_loader import UnalignedDataLoader from datasets.svhn import load_svhn from datasets.mnist import load_mnist from datasets.usps", "if source == 'usps': # or target == 'usps': usps = True train_source,", "all_use='no'): if data == 'svhn': train_image, train_label, \\ test_image, test_label = load_svhn() if", "load_gtsrb() return train_image, train_label, test_image, test_label # we don't need target just source", "= train_source S['labels'] = s_label_train # T['imgs'] = train_target # T['labels'] = t_label_train", "UnalignedDataLoader() train_loader = create_DataLoader(S, batch_size, scale=scale, shuffle=False, ) # dataset = train_loader.load_data() #", "test_label = load_usps(all_use=all_use) # if data == 'synth': # train_image, train_label, \\ #", "# T_test = {} usps = False if source == 'usps': # or", "load_mnist from datasets.usps import load_usps # from gtsrb import load_gtsrb # from synth_traffic", "load_usps(all_use=all_use) # if data == 'synth': # train_image, train_label, \\ # test_image, test_label", "\\ # test_image, test_label = load_gtsrb() return train_image, train_label, test_image, test_label # we", "train_image, train_label, test_image, test_label # we don't need target just source def dataset_read(source,", "usps=usps, # all_use=all_use) S['imgs'] = train_source S['labels'] = s_label_train # T['imgs'] = train_target", "from unaligned_data_loader import UnalignedDataLoader from datasets.svhn import load_svhn from datasets.mnist import load_mnist from", "= load_mnist(scale=scale, usps=usps, all_use=all_use) print(train_image.shape) if data == 'usps': train_image, train_label, \\ test_image,", "= return_dataset(source, scale=scale, usps=usps, all_use=all_use) # train_target, t_label_train, test_target, t_label_test = return_dataset(target, scale=scale,", "test_target # T_test['labels'] = t_label_test scale = 40 if source == 'synth' else", "if source == 'synth' else 28 if source == 'usps' or target ==", "train_loader = create_DataLoader(S, batch_size, scale=scale, shuffle=False, ) # dataset = train_loader.load_data() # test_loader", "usps=usps, all_use=all_use) print(train_image.shape) if data == 'usps': train_image, train_label, \\ test_image, test_label =", "== 'synth': # train_image, train_label, \\ # test_image, test_label = load_syntraffic() # if", "40 if source == 'synth' else 28 if source == 'usps' else 32", "or target == 'usps' else 32 # scale = 40 if source ==", "datasets.mnist import load_mnist from datasets.usps import load_usps # from gtsrb import load_gtsrb #", "== 'gtsrb': # train_image, train_label, \\ # test_image, test_label = load_gtsrb() return train_image,", "'synth' else 28 if source == 'usps' or target == 'usps' else 32", "train_label, \\ # test_image, test_label = load_syntraffic() # if data == 'gtsrb': #", "scale=scale, usps=usps, # all_use=all_use) S['imgs'] = train_source S['labels'] = s_label_train # T['imgs'] =", "== 'usps' else 32 # train_loader = UnalignedDataLoader() train_loader = create_DataLoader(S, batch_size, scale=scale,", "test_image, test_label = load_mnist(scale=scale, usps=usps, all_use=all_use) print(train_image.shape) if data == 'usps': train_image, train_label,", "test_image, test_label # we don't need target just source def dataset_read(source, target, batch_size,", "return_dataset(source, scale=scale, usps=usps, all_use=all_use) # train_target, t_label_train, test_target, t_label_test = return_dataset(target, scale=scale, usps=usps,", "s_label_train # T['imgs'] = train_target # T['labels'] = t_label_train # input target samples", "train_label, \\ test_image, test_label = load_usps(all_use=all_use) # if data == 'synth': # train_image,", "= True train_source, s_label_train, test_source, s_label_test = return_dataset(source, scale=scale, usps=usps, all_use=all_use) # train_target,", "load_svhn() if data == 'mnist': train_image, train_label, \\ test_image, test_label = load_mnist(scale=scale, usps=usps,", "need target just source def dataset_read(source, target, batch_size, scale=False, all_use='no'): # Return train", "'usps': usps = True train_source, s_label_train, test_source, s_label_test = return_dataset(source, scale=scale, usps=usps, all_use=all_use)", "= UnalignedDataLoader() val_loader = create_DataLoader(S_test, batch_size, scale=scale, shuffle=False) # dataset_test = test_loader.load_data() return", "= 40 if source == 'synth' else 28 if source == 'usps' or", "= load_usps(all_use=all_use) # if data == 'synth': # train_image, train_label, \\ # test_image,", "= t_label_test scale = 40 if source == 'synth' else 28 if source", "source == 'usps': # or target == 'usps': usps = True train_source, s_label_train,", "source == 'usps' else 32 # train_loader = UnalignedDataLoader() train_loader = create_DataLoader(S, batch_size,", "all_use=all_use) S['imgs'] = train_source S['labels'] = s_label_train # T['imgs'] = train_target # T['labels']", "return train_image, train_label, test_image, test_label # we don't need target just source def", "train_source, s_label_train, test_source, s_label_test = return_dataset(source, scale=scale, usps=usps, all_use=all_use) # train_target, t_label_train, test_target,", "# Return train and test loader S = {} S_test = {} #", "# all_use=all_use) S['imgs'] = train_source S['labels'] = s_label_train # T['imgs'] = train_target #", "batch_size, scale=False, all_use='no'): # Return train and test loader S = {} S_test", "import load_svhn from datasets.mnist import load_mnist from datasets.usps import load_usps # from gtsrb", "train_target # T['labels'] = t_label_train # input target samples for both S_test['imgs'] =", "def dataset_read(source, target, batch_size, scale=False, all_use='no'): # Return train and test loader S", "UnalignedDataLoader() val_loader = create_DataLoader(S_test, batch_size, scale=scale, shuffle=False) # dataset_test = test_loader.load_data() return train_loader,", "T_test['imgs'] = test_target # T_test['labels'] = t_label_test scale = 40 if source ==", "= load_gtsrb() return train_image, train_label, test_image, test_label # we don't need target just", "== 'svhn': train_image, train_label, \\ test_image, test_label = load_svhn() if data == 'mnist':", "if data == 'mnist': train_image, train_label, \\ test_image, test_label = load_mnist(scale=scale, usps=usps, all_use=all_use)", "= False if source == 'usps': # or target == 'usps': usps =", "T = {} # T_test = {} usps = False if source ==", "datasets.svhn import load_svhn from datasets.mnist import load_mnist from datasets.usps import load_usps # from", "test_image, test_label = load_svhn() if data == 'mnist': train_image, train_label, \\ test_image, test_label", "False if source == 'usps': # or target == 'usps': usps = True", "= {} S_test = {} # T = {} # T_test = {}", "print(train_image.shape) if data == 'usps': train_image, train_label, \\ test_image, test_label = load_usps(all_use=all_use) #", "create_DataLoader(S, batch_size, scale=scale, shuffle=False, ) # dataset = train_loader.load_data() # test_loader = UnalignedDataLoader()", "dataset = train_loader.load_data() # test_loader = UnalignedDataLoader() val_loader = create_DataLoader(S_test, batch_size, scale=scale, shuffle=False)", "test_source S_test['labels'] = s_label_test # T_test['imgs'] = test_target # T_test['labels'] = t_label_test scale", "\\ test_image, test_label = load_mnist(scale=scale, usps=usps, all_use=all_use) print(train_image.shape) if data == 'usps': train_image,", "t_label_test scale = 40 if source == 'synth' else 28 if source ==", "S['labels'] = s_label_train # T['imgs'] = train_target # T['labels'] = t_label_train # input", "and test loader S = {} S_test = {} # T = {}", "== 'usps' or target == 'usps' else 32 # scale = 40 if", "test_image, test_label = load_usps(all_use=all_use) # if data == 'synth': # train_image, train_label, \\", "import load_syntraffic from datasets.create_dataloader import create_DataLoader def return_dataset(data, scale=False, usps=False, all_use='no'): if data", "sys.path.append('../loader') # from unaligned_data_loader import UnalignedDataLoader from datasets.svhn import load_svhn from datasets.mnist import", "== 'synth' else 28 if source == 'usps' or target == 'usps' else", "= s_label_test # T_test['imgs'] = test_target # T_test['labels'] = t_label_test scale = 40", "'gtsrb': # train_image, train_label, \\ # test_image, test_label = load_gtsrb() return train_image, train_label,", "from datasets.svhn import load_svhn from datasets.mnist import load_mnist from datasets.usps import load_usps #", "= {} # T = {} # T_test = {} usps = False", "if source == 'synth' else 28 if source == 'usps' else 32 #", "from datasets.create_dataloader import create_DataLoader def return_dataset(data, scale=False, usps=False, all_use='no'): if data == 'svhn':", "usps = False if source == 'usps': # or target == 'usps': usps", "# dataset = train_loader.load_data() # test_loader = UnalignedDataLoader() val_loader = create_DataLoader(S_test, batch_size, scale=scale,", "# from unaligned_data_loader import UnalignedDataLoader from datasets.svhn import load_svhn from datasets.mnist import load_mnist", "usps=False, all_use='no'): if data == 'svhn': train_image, train_label, \\ test_image, test_label = load_svhn()", "test_image, test_label = load_gtsrb() return train_image, train_label, test_image, test_label # we don't need", "target == 'usps' else 32 # scale = 40 if source == 'synth'", "target just source def dataset_read(source, target, batch_size, scale=False, all_use='no'): # Return train and", "t_label_test = return_dataset(target, scale=scale, usps=usps, # all_use=all_use) S['imgs'] = train_source S['labels'] = s_label_train", "from gtsrb import load_gtsrb # from synth_traffic import load_syntraffic from datasets.create_dataloader import create_DataLoader", "load_syntraffic() # if data == 'gtsrb': # train_image, train_label, \\ # test_image, test_label", "40 if source == 'synth' else 28 if source == 'usps' or target", "source def dataset_read(source, target, batch_size, scale=False, all_use='no'): # Return train and test loader", "= load_svhn() if data == 'mnist': train_image, train_label, \\ test_image, test_label = load_mnist(scale=scale,", "import UnalignedDataLoader from datasets.svhn import load_svhn from datasets.mnist import load_mnist from datasets.usps import", "test_target, t_label_test = return_dataset(target, scale=scale, usps=usps, # all_use=all_use) S['imgs'] = train_source S['labels'] =", "# if data == 'synth': # train_image, train_label, \\ # test_image, test_label =", "# test_loader = UnalignedDataLoader() val_loader = create_DataLoader(S_test, batch_size, scale=scale, shuffle=False) # dataset_test =", "data == 'svhn': train_image, train_label, \\ test_image, test_label = load_svhn() if data ==", "== 'usps': usps = True train_source, s_label_train, test_source, s_label_test = return_dataset(source, scale=scale, usps=usps,", "train_label, \\ test_image, test_label = load_svhn() if data == 'mnist': train_image, train_label, \\", "# scale = 40 if source == 'synth' else 28 if source ==", "# if data == 'gtsrb': # train_image, train_label, \\ # test_image, test_label =", "'svhn': train_image, train_label, \\ test_image, test_label = load_svhn() if data == 'mnist': train_image,", "train_label, \\ test_image, test_label = load_mnist(scale=scale, usps=usps, all_use=all_use) print(train_image.shape) if data == 'usps':", "if data == 'gtsrb': # train_image, train_label, \\ # test_image, test_label = load_gtsrb()", "'synth' else 28 if source == 'usps' else 32 # train_loader = UnalignedDataLoader()", "if data == 'usps': train_image, train_label, \\ test_image, test_label = load_usps(all_use=all_use) # if", "test_label = load_syntraffic() # if data == 'gtsrb': # train_image, train_label, \\ #", "return_dataset(data, scale=False, usps=False, all_use='no'): if data == 'svhn': train_image, train_label, \\ test_image, test_label", "# test_image, test_label = load_gtsrb() return train_image, train_label, test_image, test_label # we don't", "32 # train_loader = UnalignedDataLoader() train_loader = create_DataLoader(S, batch_size, scale=scale, shuffle=False, ) #", "= t_label_train # input target samples for both S_test['imgs'] = test_source S_test['labels'] =", "S_test = {} # T = {} # T_test = {} usps =", "for both S_test['imgs'] = test_source S_test['labels'] = s_label_test # T_test['imgs'] = test_target #", "train_label, test_image, test_label # we don't need target just source def dataset_read(source, target,", "S['imgs'] = train_source S['labels'] = s_label_train # T['imgs'] = train_target # T['labels'] =", "# input target samples for both S_test['imgs'] = test_source S_test['labels'] = s_label_test #", "True train_source, s_label_train, test_source, s_label_test = return_dataset(source, scale=scale, usps=usps, all_use=all_use) # train_target, t_label_train,", "sys sys.path.append('../loader') # from unaligned_data_loader import UnalignedDataLoader from datasets.svhn import load_svhn from datasets.mnist", "T['imgs'] = train_target # T['labels'] = t_label_train # input target samples for both", "load_usps # from gtsrb import load_gtsrb # from synth_traffic import load_syntraffic from datasets.create_dataloader", "train_source S['labels'] = s_label_train # T['imgs'] = train_target # T['labels'] = t_label_train #", "else 32 # scale = 40 if source == 'synth' else 28 if", "'usps': train_image, train_label, \\ test_image, test_label = load_usps(all_use=all_use) # if data == 'synth':", "target, batch_size, scale=False, all_use='no'): # Return train and test loader S = {}", "both S_test['imgs'] = test_source S_test['labels'] = s_label_test # T_test['imgs'] = test_target # T_test['labels']", "= UnalignedDataLoader() train_loader = create_DataLoader(S, batch_size, scale=scale, shuffle=False, ) # dataset = train_loader.load_data()", "== 'usps': # or target == 'usps': usps = True train_source, s_label_train, test_source,", "train_image, train_label, \\ test_image, test_label = load_usps(all_use=all_use) # if data == 'synth': #", "source == 'synth' else 28 if source == 'usps' or target == 'usps'", "S_test['imgs'] = test_source S_test['labels'] = s_label_test # T_test['imgs'] = test_target # T_test['labels'] =", "== 'usps': train_image, train_label, \\ test_image, test_label = load_usps(all_use=all_use) # if data ==", "import load_gtsrb # from synth_traffic import load_syntraffic from datasets.create_dataloader import create_DataLoader def return_dataset(data,", "= {} usps = False if source == 'usps': # or target ==", "'usps' or target == 'usps' else 32 # scale = 40 if source", "t_label_train # input target samples for both S_test['imgs'] = test_source S_test['labels'] = s_label_test", "else 28 if source == 'usps' or target == 'usps' else 32 #", "# train_target, t_label_train, test_target, t_label_test = return_dataset(target, scale=scale, usps=usps, # all_use=all_use) S['imgs'] =", "# T = {} # T_test = {} usps = False if source", "load_gtsrb # from synth_traffic import load_syntraffic from datasets.create_dataloader import create_DataLoader def return_dataset(data, scale=False,", "don't need target just source def dataset_read(source, target, batch_size, scale=False, all_use='no'): # Return", "all_use='no'): # Return train and test loader S = {} S_test = {}", "if source == 'usps' or target == 'usps' else 32 # scale =", "scale=scale, shuffle=False, ) # dataset = train_loader.load_data() # test_loader = UnalignedDataLoader() val_loader =", "data == 'gtsrb': # train_image, train_label, \\ # test_image, test_label = load_gtsrb() return", "test_label # we don't need target just source def dataset_read(source, target, batch_size, scale=False,", "loader S = {} S_test = {} # T = {} # T_test", "= load_syntraffic() # if data == 'gtsrb': # train_image, train_label, \\ # test_image,", "we don't need target just source def dataset_read(source, target, batch_size, scale=False, all_use='no'): #", "samples for both S_test['imgs'] = test_source S_test['labels'] = s_label_test # T_test['imgs'] = test_target", "import load_usps # from gtsrb import load_gtsrb # from synth_traffic import load_syntraffic from", "# from synth_traffic import load_syntraffic from datasets.create_dataloader import create_DataLoader def return_dataset(data, scale=False, usps=False,", "test_source, s_label_test = return_dataset(source, scale=scale, usps=usps, all_use=all_use) # train_target, t_label_train, test_target, t_label_test =", "# test_image, test_label = load_syntraffic() # if data == 'gtsrb': # train_image, train_label,", "= s_label_train # T['imgs'] = train_target # T['labels'] = t_label_train # input target", "# from gtsrb import load_gtsrb # from synth_traffic import load_syntraffic from datasets.create_dataloader import", "{} # T_test = {} usps = False if source == 'usps': #", "== 'mnist': train_image, train_label, \\ test_image, test_label = load_mnist(scale=scale, usps=usps, all_use=all_use) print(train_image.shape) if", "t_label_train, test_target, t_label_test = return_dataset(target, scale=scale, usps=usps, # all_use=all_use) S['imgs'] = train_source S['labels']", "from datasets.usps import load_usps # from gtsrb import load_gtsrb # from synth_traffic import", "'usps' else 32 # train_loader = UnalignedDataLoader() train_loader = create_DataLoader(S, batch_size, scale=scale, shuffle=False,", "train_image, train_label, \\ # test_image, test_label = load_syntraffic() # if data == 'gtsrb':", "scale = 40 if source == 'synth' else 28 if source == 'usps'", "'mnist': train_image, train_label, \\ test_image, test_label = load_mnist(scale=scale, usps=usps, all_use=all_use) print(train_image.shape) if data", "train_label, \\ # test_image, test_label = load_gtsrb() return train_image, train_label, test_image, test_label #", "val_loader = create_DataLoader(S_test, batch_size, scale=scale, shuffle=False) # dataset_test = test_loader.load_data() return train_loader, val_loader", "load_syntraffic from datasets.create_dataloader import create_DataLoader def return_dataset(data, scale=False, usps=False, all_use='no'): if data ==", "train_image, train_label, \\ # test_image, test_label = load_gtsrb() return train_image, train_label, test_image, test_label", "# T['labels'] = t_label_train # input target samples for both S_test['imgs'] = test_source", "== 'usps' else 32 # scale = 40 if source == 'synth' else", "train_target, t_label_train, test_target, t_label_test = return_dataset(target, scale=scale, usps=usps, # all_use=all_use) S['imgs'] = train_source", "usps = True train_source, s_label_train, test_source, s_label_test = return_dataset(source, scale=scale, usps=usps, all_use=all_use) #", "UnalignedDataLoader from datasets.svhn import load_svhn from datasets.mnist import load_mnist from datasets.usps import load_usps", "'synth': # train_image, train_label, \\ # test_image, test_label = load_syntraffic() # if data", "import load_mnist from datasets.usps import load_usps # from gtsrb import load_gtsrb # from", "train_loader.load_data() # test_loader = UnalignedDataLoader() val_loader = create_DataLoader(S_test, batch_size, scale=scale, shuffle=False) # dataset_test", "\\ test_image, test_label = load_usps(all_use=all_use) # if data == 'synth': # train_image, train_label,", "input target samples for both S_test['imgs'] = test_source S_test['labels'] = s_label_test # T_test['imgs']", "s_label_test # T_test['imgs'] = test_target # T_test['labels'] = t_label_test scale = 40 if", "dataset_read(source, target, batch_size, scale=False, all_use='no'): # Return train and test loader S =", "= train_loader.load_data() # test_loader = UnalignedDataLoader() val_loader = create_DataLoader(S_test, batch_size, scale=scale, shuffle=False) #", "= test_source S_test['labels'] = s_label_test # T_test['imgs'] = test_target # T_test['labels'] = t_label_test", "# or target == 'usps': usps = True train_source, s_label_train, test_source, s_label_test =", "= train_target # T['labels'] = t_label_train # input target samples for both S_test['imgs']", "= 40 if source == 'synth' else 28 if source == 'usps' else", "source == 'synth' else 28 if source == 'usps' else 32 # train_loader", "# train_image, train_label, \\ # test_image, test_label = load_syntraffic() # if data ==", "datasets.create_dataloader import create_DataLoader def return_dataset(data, scale=False, usps=False, all_use='no'): if data == 'svhn': train_image,", "= test_target # T_test['labels'] = t_label_test scale = 40 if source == 'synth'", "'usps' else 32 # scale = 40 if source == 'synth' else 28", "import create_DataLoader def return_dataset(data, scale=False, usps=False, all_use='no'): if data == 'svhn': train_image, train_label,", "== 'synth' else 28 if source == 'usps' else 32 # train_loader =", "T_test = {} usps = False if source == 'usps': # or target", "test_label = load_mnist(scale=scale, usps=usps, all_use=all_use) print(train_image.shape) if data == 'usps': train_image, train_label, \\", "usps=usps, all_use=all_use) # train_target, t_label_train, test_target, t_label_test = return_dataset(target, scale=scale, usps=usps, # all_use=all_use)", "= create_DataLoader(S, batch_size, scale=scale, shuffle=False, ) # dataset = train_loader.load_data() # test_loader =", "scale=False, all_use='no'): # Return train and test loader S = {} S_test =", "s_label_train, test_source, s_label_test = return_dataset(source, scale=scale, usps=usps, all_use=all_use) # train_target, t_label_train, test_target, t_label_test", "shuffle=False, ) # dataset = train_loader.load_data() # test_loader = UnalignedDataLoader() val_loader = create_DataLoader(S_test,", "scale=False, usps=False, all_use='no'): if data == 'svhn': train_image, train_label, \\ test_image, test_label =", "just source def dataset_read(source, target, batch_size, scale=False, all_use='no'): # Return train and test", "from datasets.mnist import load_mnist from datasets.usps import load_usps # from gtsrb import load_gtsrb", "\\ test_image, test_label = load_svhn() if data == 'mnist': train_image, train_label, \\ test_image,", "test_image, test_label = load_syntraffic() # if data == 'gtsrb': # train_image, train_label, \\", "or target == 'usps': usps = True train_source, s_label_train, test_source, s_label_test = return_dataset(source,", "T_test['labels'] = t_label_test scale = 40 if source == 'synth' else 28 if", "if data == 'svhn': train_image, train_label, \\ test_image, test_label = load_svhn() if data", "S = {} S_test = {} # T = {} # T_test =", "target samples for both S_test['imgs'] = test_source S_test['labels'] = s_label_test # T_test['imgs'] =", "synth_traffic import load_syntraffic from datasets.create_dataloader import create_DataLoader def return_dataset(data, scale=False, usps=False, all_use='no'): if", "load_mnist(scale=scale, usps=usps, all_use=all_use) print(train_image.shape) if data == 'usps': train_image, train_label, \\ test_image, test_label", "all_use=all_use) print(train_image.shape) if data == 'usps': train_image, train_label, \\ test_image, test_label = load_usps(all_use=all_use)", "# train_loader = UnalignedDataLoader() train_loader = create_DataLoader(S, batch_size, scale=scale, shuffle=False, ) # dataset", "data == 'usps': train_image, train_label, \\ test_image, test_label = load_usps(all_use=all_use) # if data", "# T_test['imgs'] = test_target # T_test['labels'] = t_label_test scale = 40 if source", "load_svhn from datasets.mnist import load_mnist from datasets.usps import load_usps # from gtsrb import", "gtsrb import load_gtsrb # from synth_traffic import load_syntraffic from datasets.create_dataloader import create_DataLoader def", "test_loader = UnalignedDataLoader() val_loader = create_DataLoader(S_test, batch_size, scale=scale, shuffle=False) # dataset_test = test_loader.load_data()", "\\ # test_image, test_label = load_syntraffic() # if data == 'gtsrb': # train_image,", "create_DataLoader def return_dataset(data, scale=False, usps=False, all_use='no'): if data == 'svhn': train_image, train_label, \\", "S_test['labels'] = s_label_test # T_test['imgs'] = test_target # T_test['labels'] = t_label_test scale =", "test_label = load_gtsrb() return train_image, train_label, test_image, test_label # we don't need target", "test_label = load_svhn() if data == 'mnist': train_image, train_label, \\ test_image, test_label =", "test loader S = {} S_test = {} # T = {} #", "if data == 'synth': # train_image, train_label, \\ # test_image, test_label = load_syntraffic()", "train_image, train_label, \\ test_image, test_label = load_svhn() if data == 'mnist': train_image, train_label,", "train_image, train_label, \\ test_image, test_label = load_mnist(scale=scale, usps=usps, all_use=all_use) print(train_image.shape) if data ==", "data == 'mnist': train_image, train_label, \\ test_image, test_label = load_mnist(scale=scale, usps=usps, all_use=all_use) print(train_image.shape)", "train and test loader S = {} S_test = {} # T =", "T['labels'] = t_label_train # input target samples for both S_test['imgs'] = test_source S_test['labels']", "else 32 # train_loader = UnalignedDataLoader() train_loader = create_DataLoader(S, batch_size, scale=scale, shuffle=False, )", "{} # T = {} # T_test = {} usps = False if", "train_loader = UnalignedDataLoader() train_loader = create_DataLoader(S, batch_size, scale=scale, shuffle=False, ) # dataset =", "from synth_traffic import load_syntraffic from datasets.create_dataloader import create_DataLoader def return_dataset(data, scale=False, usps=False, all_use='no'):", "all_use=all_use) # train_target, t_label_train, test_target, t_label_test = return_dataset(target, scale=scale, usps=usps, # all_use=all_use) S['imgs']", "32 # scale = 40 if source == 'synth' else 28 if source", "28 if source == 'usps' or target == 'usps' else 32 # scale", "source == 'usps' or target == 'usps' else 32 # scale = 40", "scale=scale, usps=usps, all_use=all_use) # train_target, t_label_train, test_target, t_label_test = return_dataset(target, scale=scale, usps=usps, #", "# T['imgs'] = train_target # T['labels'] = t_label_train # input target samples for", "def return_dataset(data, scale=False, usps=False, all_use='no'): if data == 'svhn': train_image, train_label, \\ test_image,", "if source == 'usps' else 32 # train_loader = UnalignedDataLoader() train_loader = create_DataLoader(S,", "import sys sys.path.append('../loader') # from unaligned_data_loader import UnalignedDataLoader from datasets.svhn import load_svhn from", ") # dataset = train_loader.load_data() # test_loader = UnalignedDataLoader() val_loader = create_DataLoader(S_test, batch_size,", "{} S_test = {} # T = {} # T_test = {} usps", "= return_dataset(target, scale=scale, usps=usps, # all_use=all_use) S['imgs'] = train_source S['labels'] = s_label_train #", "datasets.usps import load_usps # from gtsrb import load_gtsrb # from synth_traffic import load_syntraffic", "batch_size, scale=scale, shuffle=False, ) # dataset = train_loader.load_data() # test_loader = UnalignedDataLoader() val_loader", "# we don't need target just source def dataset_read(source, target, batch_size, scale=False, all_use='no'):", "Return train and test loader S = {} S_test = {} # T", "target == 'usps': usps = True train_source, s_label_train, test_source, s_label_test = return_dataset(source, scale=scale,", "{} usps = False if source == 'usps': # or target == 'usps':", "return_dataset(target, scale=scale, usps=usps, # all_use=all_use) S['imgs'] = train_source S['labels'] = s_label_train # T['imgs']", "else 28 if source == 'usps' else 32 # train_loader = UnalignedDataLoader() train_loader", "'usps': # or target == 'usps': usps = True train_source, s_label_train, test_source, s_label_test", "# T_test['labels'] = t_label_test scale = 40 if source == 'synth' else 28", "28 if source == 'usps' else 32 # train_loader = UnalignedDataLoader() train_loader =", "data == 'synth': # train_image, train_label, \\ # test_image, test_label = load_syntraffic() #", "# train_image, train_label, \\ # test_image, test_label = load_gtsrb() return train_image, train_label, test_image,", "= {} # T_test = {} usps = False if source == 'usps':", "s_label_test = return_dataset(source, scale=scale, usps=usps, all_use=all_use) # train_target, t_label_train, test_target, t_label_test = return_dataset(target," ]
[ "import requests import urllib2 import os import shutil def find_files(url): # url =", "\"Downloading: %s Bytes: %s\" % (file_name, file_size) file_size_dl = 0 block_sz = 8192", "status = status + chr(8) * (len(status) + 1) print status, f.close() return", "folder_path + bro[i], bro[i], 'suricata') return file_sizes def save_file2(dataset_url, file_name, bro_log, bro_or_suricata_folder): print", "__name__ == '__main__': datasets_size = 0 if len(sys.argv) == 2: url = sys.argv[1]", "= \"/media/frenky/Fery/Frenky/Skola/StratosphereHTTPSDetector/Dataset/suricata/\" + dataset_name if os.path.exists(directiry_name): shutil.rmtree(directiry_name) os.makedirs(directiry_name) # Bro folder_path = directiry_name", "bro[i]: file_sizes += save_file2(url, folder_path + bro[i], bro[i], 'bro') # Suricata folder_path =", "url.split('/') names.pop() return names.pop() if __name__ == '__main__': datasets_size = 0 if len(sys.argv)", "bro = find_files(url + 'bro/') for i in range(len(bro)): if '.log' in bro[i]:", "soup.find_all('a'): try: # print a['href'] hrefs.append(a['href']) except: pass # print hrefs return hrefs", "save_file2(url, folder_path + bro[i], bro[i], 'suricata') return file_sizes def save_file2(dataset_url, file_name, bro_log, bro_or_suricata_folder):", "\"https://mcfp.felk.cvut.cz/publicDatasets/\" soup = BeautifulSoup(requests.get(url).text, \"lxml\") hrefs = [] for a in soup.find_all('a'): try:", "hrefs def save_manager(url, dataset_name): directiry_name = \"/media/frenky/Fery/Frenky/Skola/StratosphereHTTPSDetector/Dataset/suricata/\" + dataset_name if os.path.exists(directiry_name): shutil.rmtree(directiry_name) os.makedirs(directiry_name)", "in bro[i]: file_sizes += save_file2(url, folder_path + bro[i], bro[i], 'bro') # Suricata folder_path", "\"\"\" import sys from bs4 import BeautifulSoup import requests import urllib2 import os", "(file_size_dl, file_size_dl * 100. / file_size) status = status + chr(8) * (len(status)", "import sys from bs4 import BeautifulSoup import requests import urllib2 import os import", "i in range(len(bro)): if '.log' in bro[i]: file_sizes += save_file2(url, folder_path + bro[i],", "\"lxml\") hrefs = [] for a in soup.find_all('a'): try: # print a['href'] hrefs.append(a['href'])", "+ 'bro/') for i in range(len(bro)): if '.log' in bro[i]: file_sizes += save_file2(url,", "range(len(bro)): if '.log' in bro[i] or '.json' in bro[i]: file_sizes += save_file2(url, folder_path", "True: buffer = u.read(block_sz) if not buffer: break file_size_dl += len(buffer) f.write(buffer) status", "f.close() return file_size def get_dataset_name_from_url(url): names = url.split('/') names.pop() return names.pop() if __name__", "= r\"%10d [%3.2f%%]\" % (file_size_dl, file_size_dl * 100. / file_size) status = status", "names.pop() return names.pop() if __name__ == '__main__': datasets_size = 0 if len(sys.argv) ==", "os import shutil def find_files(url): # url = \"https://mcfp.felk.cvut.cz/publicDatasets/\" soup = BeautifulSoup(requests.get(url).text, \"lxml\")", "USAGE: python DownloadDatasets.py https://mcfp.felk.cvut.cz/publicDatasets/ \"\"\" import sys from bs4 import BeautifulSoup import requests", "bs4 import BeautifulSoup import requests import urllib2 import os import shutil def find_files(url):", "= \"https://mcfp.felk.cvut.cz/publicDatasets/\" soup = BeautifulSoup(requests.get(url).text, \"lxml\") hrefs = [] for a in soup.find_all('a'):", "+= len(buffer) f.write(buffer) status = r\"%10d [%3.2f%%]\" % (file_size_dl, file_size_dl * 100. /", "+ dataset_name if os.path.exists(directiry_name): shutil.rmtree(directiry_name) os.makedirs(directiry_name) # Bro folder_path = directiry_name + \"/bro/\"", "status = r\"%10d [%3.2f%%]\" % (file_size_dl, file_size_dl * 100. / file_size) status =", "downloading...\" file_size = 0 u = urllib2.urlopen(dataset_url + bro_or_suricata_folder + '/' + bro_log)", "2: url = sys.argv[1] datasets_size += save_manager(url, get_dataset_name_from_url(url)) # find_files(url+'CTU-Malware-Capture-Botnet-31/') print \"Complet Dataset", "in bro[i] or '.json' in bro[i]: file_sizes += save_file2(url, folder_path + bro[i], bro[i],", "bro_or_suricata_folder): print bro_log, \"is downloading...\" file_size = 0 u = urllib2.urlopen(dataset_url + bro_or_suricata_folder", "= urllib2.urlopen(dataset_url + bro_or_suricata_folder + '/' + bro_log) meta = u.info() file_size +=", "print a['href'] hrefs.append(a['href']) except: pass # print hrefs return hrefs def save_manager(url, dataset_name):", "# url = \"https://mcfp.felk.cvut.cz/publicDatasets/\" soup = BeautifulSoup(requests.get(url).text, \"lxml\") hrefs = [] for a", "Bro folder_path = directiry_name + \"/bro/\" os.makedirs(folder_path) file_sizes = 0 bro = find_files(url", "f = open(file_name, 'wb') print \"Downloading: %s Bytes: %s\" % (file_name, file_size) file_size_dl", "\"is downloading...\" file_size = 0 u = urllib2.urlopen(dataset_url + bro_or_suricata_folder + '/' +", "block_sz = 8192 while True: buffer = u.read(block_sz) if not buffer: break file_size_dl", "(len(status) + 1) print status, f.close() return file_size def get_dataset_name_from_url(url): names = url.split('/')", "0 bro = find_files(url + 'bro/') for i in range(len(bro)): if '.log' in", "directiry_name + \"/bro/\" os.makedirs(folder_path) file_sizes = 0 bro = find_files(url + 'bro/') for", "os.path.exists(directiry_name): shutil.rmtree(directiry_name) os.makedirs(directiry_name) # Bro folder_path = directiry_name + \"/bro/\" os.makedirs(folder_path) file_sizes =", "urllib2 import os import shutil def find_files(url): # url = \"https://mcfp.felk.cvut.cz/publicDatasets/\" soup =", "bro_log, \"is downloading...\" file_size = 0 u = urllib2.urlopen(dataset_url + bro_or_suricata_folder + '/'", "len(sys.argv) == 2: url = sys.argv[1] datasets_size += save_manager(url, get_dataset_name_from_url(url)) # find_files(url+'CTU-Malware-Capture-Botnet-31/') print", "print bro_log, \"is downloading...\" file_size = 0 u = urllib2.urlopen(dataset_url + bro_or_suricata_folder +", "* (len(status) + 1) print status, f.close() return file_size def get_dataset_name_from_url(url): names =", "u = urllib2.urlopen(dataset_url + bro_or_suricata_folder + '/' + bro_log) meta = u.info() file_size", "file_size def get_dataset_name_from_url(url): names = url.split('/') names.pop() return names.pop() if __name__ == '__main__':", "# print hrefs return hrefs def save_manager(url, dataset_name): directiry_name = \"/media/frenky/Fery/Frenky/Skola/StratosphereHTTPSDetector/Dataset/suricata/\" + dataset_name", "= [] for a in soup.find_all('a'): try: # print a['href'] hrefs.append(a['href']) except: pass", "folder. USAGE: python DownloadDatasets.py https://mcfp.felk.cvut.cz/publicDatasets/ \"\"\" import sys from bs4 import BeautifulSoup import", "shutil.rmtree(directiry_name) os.makedirs(directiry_name) # Bro folder_path = directiry_name + \"/bro/\" os.makedirs(folder_path) file_sizes = 0", "0 block_sz = 8192 while True: buffer = u.read(block_sz) if not buffer: break", "file_size_dl = 0 block_sz = 8192 while True: buffer = u.read(block_sz) if not", "shutil def find_files(url): # url = \"https://mcfp.felk.cvut.cz/publicDatasets/\" soup = BeautifulSoup(requests.get(url).text, \"lxml\") hrefs =", "% (file_name, file_size) file_size_dl = 0 block_sz = 8192 while True: buffer =", "bro_or_suricata_folder + '/' + bro_log) meta = u.info() file_size += int(meta.getheaders(\"Content-Length\")[0]) f =", "* 100. / file_size) status = status + chr(8) * (len(status) + 1)", "f.write(buffer) status = r\"%10d [%3.2f%%]\" % (file_size_dl, file_size_dl * 100. / file_size) status", "0 if len(sys.argv) == 2: url = sys.argv[1] datasets_size += save_manager(url, get_dataset_name_from_url(url)) #", "= 0 bro = find_files(url + 'bro/') for i in range(len(bro)): if '.log'", "folder_path = directiry_name + \"/suricata/\" os.makedirs(folder_path) file_sizes = 0 bro = find_files(url +", "= BeautifulSoup(requests.get(url).text, \"lxml\") hrefs = [] for a in soup.find_all('a'): try: # print", "for i in range(len(bro)): if '.log' in bro[i] or '.json' in bro[i]: file_sizes", "from bs4 import BeautifulSoup import requests import urllib2 import os import shutil def", "soup = BeautifulSoup(requests.get(url).text, \"lxml\") hrefs = [] for a in soup.find_all('a'): try: #", "find_files(url + 'suricata/') for i in range(len(bro)): if '.log' in bro[i] or '.json'", "return file_sizes def save_file2(dataset_url, file_name, bro_log, bro_or_suricata_folder): print bro_log, \"is downloading...\" file_size =", "def get_dataset_name_from_url(url): names = url.split('/') names.pop() return names.pop() if __name__ == '__main__': datasets_size", "print status, f.close() return file_size def get_dataset_name_from_url(url): names = url.split('/') names.pop() return names.pop()", "+= save_file2(url, folder_path + bro[i], bro[i], 'bro') # Suricata folder_path = directiry_name +", "in range(len(bro)): if '.log' in bro[i] or '.json' in bro[i]: file_sizes += save_file2(url,", "which have bro folder. USAGE: python DownloadDatasets.py https://mcfp.felk.cvut.cz/publicDatasets/ \"\"\" import sys from bs4", "'.log' in bro[i]: file_sizes += save_file2(url, folder_path + bro[i], bro[i], 'bro') # Suricata", "def save_file2(dataset_url, file_name, bro_log, bro_or_suricata_folder): print bro_log, \"is downloading...\" file_size = 0 u", "get_dataset_name_from_url(url)) # find_files(url+'CTU-Malware-Capture-Botnet-31/') print \"Complet Dataset size:\", (datasets_size / (1024.0 * 1024.0)), \"MB\"", "print \"Downloading: %s Bytes: %s\" % (file_name, file_size) file_size_dl = 0 block_sz =", "= status + chr(8) * (len(status) + 1) print status, f.close() return file_size", "save_manager(url, dataset_name): directiry_name = \"/media/frenky/Fery/Frenky/Skola/StratosphereHTTPSDetector/Dataset/suricata/\" + dataset_name if os.path.exists(directiry_name): shutil.rmtree(directiry_name) os.makedirs(directiry_name) # Bro", "DownloadDatasets.py https://mcfp.felk.cvut.cz/publicDatasets/ \"\"\" import sys from bs4 import BeautifulSoup import requests import urllib2", "8192 while True: buffer = u.read(block_sz) if not buffer: break file_size_dl += len(buffer)", "\"\"\" Download all datasets which have bro folder. USAGE: python DownloadDatasets.py https://mcfp.felk.cvut.cz/publicDatasets/ \"\"\"", "https://mcfp.felk.cvut.cz/publicDatasets/ \"\"\" import sys from bs4 import BeautifulSoup import requests import urllib2 import", "url = \"https://mcfp.felk.cvut.cz/publicDatasets/\" soup = BeautifulSoup(requests.get(url).text, \"lxml\") hrefs = [] for a in", "\"/suricata/\" os.makedirs(folder_path) file_sizes = 0 bro = find_files(url + 'suricata/') for i in", "bro[i], bro[i], 'bro') # Suricata folder_path = directiry_name + \"/suricata/\" os.makedirs(folder_path) file_sizes =", "= 8192 while True: buffer = u.read(block_sz) if not buffer: break file_size_dl +=", "file_size) status = status + chr(8) * (len(status) + 1) print status, f.close()", "os.makedirs(folder_path) file_sizes = 0 bro = find_files(url + 'suricata/') for i in range(len(bro)):", "+ bro_or_suricata_folder + '/' + bro_log) meta = u.info() file_size += int(meta.getheaders(\"Content-Length\")[0]) f", "bro[i] or '.json' in bro[i]: file_sizes += save_file2(url, folder_path + bro[i], bro[i], 'suricata')", "break file_size_dl += len(buffer) f.write(buffer) status = r\"%10d [%3.2f%%]\" % (file_size_dl, file_size_dl *", "bro[i], 'suricata') return file_sizes def save_file2(dataset_url, file_name, bro_log, bro_or_suricata_folder): print bro_log, \"is downloading...\"", "not buffer: break file_size_dl += len(buffer) f.write(buffer) status = r\"%10d [%3.2f%%]\" % (file_size_dl,", "= 0 u = urllib2.urlopen(dataset_url + bro_or_suricata_folder + '/' + bro_log) meta =", "+ 'suricata/') for i in range(len(bro)): if '.log' in bro[i] or '.json' in", "directiry_name + \"/suricata/\" os.makedirs(folder_path) file_sizes = 0 bro = find_files(url + 'suricata/') for", "+ bro[i], bro[i], 'suricata') return file_sizes def save_file2(dataset_url, file_name, bro_log, bro_or_suricata_folder): print bro_log,", "dataset_name): directiry_name = \"/media/frenky/Fery/Frenky/Skola/StratosphereHTTPSDetector/Dataset/suricata/\" + dataset_name if os.path.exists(directiry_name): shutil.rmtree(directiry_name) os.makedirs(directiry_name) # Bro folder_path", "pass # print hrefs return hrefs def save_manager(url, dataset_name): directiry_name = \"/media/frenky/Fery/Frenky/Skola/StratosphereHTTPSDetector/Dataset/suricata/\" +", "in range(len(bro)): if '.log' in bro[i]: file_sizes += save_file2(url, folder_path + bro[i], bro[i],", "for a in soup.find_all('a'): try: # print a['href'] hrefs.append(a['href']) except: pass # print", "open(file_name, 'wb') print \"Downloading: %s Bytes: %s\" % (file_name, file_size) file_size_dl = 0", "+ bro[i], bro[i], 'bro') # Suricata folder_path = directiry_name + \"/suricata/\" os.makedirs(folder_path) file_sizes", "100. / file_size) status = status + chr(8) * (len(status) + 1) print", "'/' + bro_log) meta = u.info() file_size += int(meta.getheaders(\"Content-Length\")[0]) f = open(file_name, 'wb')", "file_size_dl += len(buffer) f.write(buffer) status = r\"%10d [%3.2f%%]\" % (file_size_dl, file_size_dl * 100.", "= 0 block_sz = 8192 while True: buffer = u.read(block_sz) if not buffer:", "= directiry_name + \"/suricata/\" os.makedirs(folder_path) file_sizes = 0 bro = find_files(url + 'suricata/')", "in bro[i]: file_sizes += save_file2(url, folder_path + bro[i], bro[i], 'suricata') return file_sizes def", "if '.log' in bro[i]: file_sizes += save_file2(url, folder_path + bro[i], bro[i], 'bro') #", "find_files(url): # url = \"https://mcfp.felk.cvut.cz/publicDatasets/\" soup = BeautifulSoup(requests.get(url).text, \"lxml\") hrefs = [] for", "# print a['href'] hrefs.append(a['href']) except: pass # print hrefs return hrefs def save_manager(url,", "hrefs.append(a['href']) except: pass # print hrefs return hrefs def save_manager(url, dataset_name): directiry_name =", "'bro') # Suricata folder_path = directiry_name + \"/suricata/\" os.makedirs(folder_path) file_sizes = 0 bro", "= open(file_name, 'wb') print \"Downloading: %s Bytes: %s\" % (file_name, file_size) file_size_dl =", "status + chr(8) * (len(status) + 1) print status, f.close() return file_size def", "= url.split('/') names.pop() return names.pop() if __name__ == '__main__': datasets_size = 0 if", "= 0 if len(sys.argv) == 2: url = sys.argv[1] datasets_size += save_manager(url, get_dataset_name_from_url(url))", "= sys.argv[1] datasets_size += save_manager(url, get_dataset_name_from_url(url)) # find_files(url+'CTU-Malware-Capture-Botnet-31/') print \"Complet Dataset size:\", (datasets_size", "def find_files(url): # url = \"https://mcfp.felk.cvut.cz/publicDatasets/\" soup = BeautifulSoup(requests.get(url).text, \"lxml\") hrefs = []", "return file_size def get_dataset_name_from_url(url): names = url.split('/') names.pop() return names.pop() if __name__ ==", "i in range(len(bro)): if '.log' in bro[i] or '.json' in bro[i]: file_sizes +=", "import BeautifulSoup import requests import urllib2 import os import shutil def find_files(url): #", "r\"%10d [%3.2f%%]\" % (file_size_dl, file_size_dl * 100. / file_size) status = status +", "file_size_dl * 100. / file_size) status = status + chr(8) * (len(status) +", "bro_log) meta = u.info() file_size += int(meta.getheaders(\"Content-Length\")[0]) f = open(file_name, 'wb') print \"Downloading:", "bro[i]: file_sizes += save_file2(url, folder_path + bro[i], bro[i], 'suricata') return file_sizes def save_file2(dataset_url,", "if '.log' in bro[i] or '.json' in bro[i]: file_sizes += save_file2(url, folder_path +", "= directiry_name + \"/bro/\" os.makedirs(folder_path) file_sizes = 0 bro = find_files(url + 'bro/')", "return hrefs def save_manager(url, dataset_name): directiry_name = \"/media/frenky/Fery/Frenky/Skola/StratosphereHTTPSDetector/Dataset/suricata/\" + dataset_name if os.path.exists(directiry_name): shutil.rmtree(directiry_name)", "0 bro = find_files(url + 'suricata/') for i in range(len(bro)): if '.log' in", "bro_log, bro_or_suricata_folder): print bro_log, \"is downloading...\" file_size = 0 u = urllib2.urlopen(dataset_url +", "int(meta.getheaders(\"Content-Length\")[0]) f = open(file_name, 'wb') print \"Downloading: %s Bytes: %s\" % (file_name, file_size)", "1) print status, f.close() return file_size def get_dataset_name_from_url(url): names = url.split('/') names.pop() return", "while True: buffer = u.read(block_sz) if not buffer: break file_size_dl += len(buffer) f.write(buffer)", "or '.json' in bro[i]: file_sizes += save_file2(url, folder_path + bro[i], bro[i], 'suricata') return", "bro = find_files(url + 'suricata/') for i in range(len(bro)): if '.log' in bro[i]", "BeautifulSoup import requests import urllib2 import os import shutil def find_files(url): # url", "%s\" % (file_name, file_size) file_size_dl = 0 block_sz = 8192 while True: buffer", "sys from bs4 import BeautifulSoup import requests import urllib2 import os import shutil", "save_file2(url, folder_path + bro[i], bro[i], 'bro') # Suricata folder_path = directiry_name + \"/suricata/\"", "hrefs = [] for a in soup.find_all('a'): try: # print a['href'] hrefs.append(a['href']) except:", "import shutil def find_files(url): # url = \"https://mcfp.felk.cvut.cz/publicDatasets/\" soup = BeautifulSoup(requests.get(url).text, \"lxml\") hrefs", "hrefs return hrefs def save_manager(url, dataset_name): directiry_name = \"/media/frenky/Fery/Frenky/Skola/StratosphereHTTPSDetector/Dataset/suricata/\" + dataset_name if os.path.exists(directiry_name):", "datasets which have bro folder. USAGE: python DownloadDatasets.py https://mcfp.felk.cvut.cz/publicDatasets/ \"\"\" import sys from", "\"/bro/\" os.makedirs(folder_path) file_sizes = 0 bro = find_files(url + 'bro/') for i in", "= 0 bro = find_files(url + 'suricata/') for i in range(len(bro)): if '.log'", "bro[i], bro[i], 'suricata') return file_sizes def save_file2(dataset_url, file_name, bro_log, bro_or_suricata_folder): print bro_log, \"is", "'__main__': datasets_size = 0 if len(sys.argv) == 2: url = sys.argv[1] datasets_size +=", "sys.argv[1] datasets_size += save_manager(url, get_dataset_name_from_url(url)) # find_files(url+'CTU-Malware-Capture-Botnet-31/') print \"Complet Dataset size:\", (datasets_size /", "folder_path = directiry_name + \"/bro/\" os.makedirs(folder_path) file_sizes = 0 bro = find_files(url +", "in soup.find_all('a'): try: # print a['href'] hrefs.append(a['href']) except: pass # print hrefs return", "file_sizes += save_file2(url, folder_path + bro[i], bro[i], 'suricata') return file_sizes def save_file2(dataset_url, file_name,", "0 u = urllib2.urlopen(dataset_url + bro_or_suricata_folder + '/' + bro_log) meta = u.info()", "save_file2(dataset_url, file_name, bro_log, bro_or_suricata_folder): print bro_log, \"is downloading...\" file_size = 0 u =", "bro[i], 'bro') # Suricata folder_path = directiry_name + \"/suricata/\" os.makedirs(folder_path) file_sizes = 0", "if __name__ == '__main__': datasets_size = 0 if len(sys.argv) == 2: url =", "chr(8) * (len(status) + 1) print status, f.close() return file_size def get_dataset_name_from_url(url): names", "all datasets which have bro folder. USAGE: python DownloadDatasets.py https://mcfp.felk.cvut.cz/publicDatasets/ \"\"\" import sys", "except: pass # print hrefs return hrefs def save_manager(url, dataset_name): directiry_name = \"/media/frenky/Fery/Frenky/Skola/StratosphereHTTPSDetector/Dataset/suricata/\"", "+ \"/bro/\" os.makedirs(folder_path) file_sizes = 0 bro = find_files(url + 'bro/') for i", "urllib2.urlopen(dataset_url + bro_or_suricata_folder + '/' + bro_log) meta = u.info() file_size += int(meta.getheaders(\"Content-Length\")[0])", "u.read(block_sz) if not buffer: break file_size_dl += len(buffer) f.write(buffer) status = r\"%10d [%3.2f%%]\"", "status, f.close() return file_size def get_dataset_name_from_url(url): names = url.split('/') names.pop() return names.pop() if", "meta = u.info() file_size += int(meta.getheaders(\"Content-Length\")[0]) f = open(file_name, 'wb') print \"Downloading: %s", "u.info() file_size += int(meta.getheaders(\"Content-Length\")[0]) f = open(file_name, 'wb') print \"Downloading: %s Bytes: %s\"", "+ bro_log) meta = u.info() file_size += int(meta.getheaders(\"Content-Length\")[0]) f = open(file_name, 'wb') print", "return names.pop() if __name__ == '__main__': datasets_size = 0 if len(sys.argv) == 2:", "file_size += int(meta.getheaders(\"Content-Length\")[0]) f = open(file_name, 'wb') print \"Downloading: %s Bytes: %s\" %", "datasets_size += save_manager(url, get_dataset_name_from_url(url)) # find_files(url+'CTU-Malware-Capture-Botnet-31/') print \"Complet Dataset size:\", (datasets_size / (1024.0", "# Suricata folder_path = directiry_name + \"/suricata/\" os.makedirs(folder_path) file_sizes = 0 bro =", "== '__main__': datasets_size = 0 if len(sys.argv) == 2: url = sys.argv[1] datasets_size", "+ 1) print status, f.close() return file_size def get_dataset_name_from_url(url): names = url.split('/') names.pop()", "+ '/' + bro_log) meta = u.info() file_size += int(meta.getheaders(\"Content-Length\")[0]) f = open(file_name,", "os.makedirs(folder_path) file_sizes = 0 bro = find_files(url + 'bro/') for i in range(len(bro)):", "for i in range(len(bro)): if '.log' in bro[i]: file_sizes += save_file2(url, folder_path +", "/ file_size) status = status + chr(8) * (len(status) + 1) print status,", "'suricata/') for i in range(len(bro)): if '.log' in bro[i] or '.json' in bro[i]:", "= find_files(url + 'bro/') for i in range(len(bro)): if '.log' in bro[i]: file_sizes", "'.log' in bro[i] or '.json' in bro[i]: file_sizes += save_file2(url, folder_path + bro[i],", "names.pop() if __name__ == '__main__': datasets_size = 0 if len(sys.argv) == 2: url", "%s Bytes: %s\" % (file_name, file_size) file_size_dl = 0 block_sz = 8192 while", "Bytes: %s\" % (file_name, file_size) file_size_dl = 0 block_sz = 8192 while True:", "+= save_manager(url, get_dataset_name_from_url(url)) # find_files(url+'CTU-Malware-Capture-Botnet-31/') print \"Complet Dataset size:\", (datasets_size / (1024.0 *", "dataset_name if os.path.exists(directiry_name): shutil.rmtree(directiry_name) os.makedirs(directiry_name) # Bro folder_path = directiry_name + \"/bro/\" os.makedirs(folder_path)", "\"/media/frenky/Fery/Frenky/Skola/StratosphereHTTPSDetector/Dataset/suricata/\" + dataset_name if os.path.exists(directiry_name): shutil.rmtree(directiry_name) os.makedirs(directiry_name) # Bro folder_path = directiry_name +", "+= save_file2(url, folder_path + bro[i], bro[i], 'suricata') return file_sizes def save_file2(dataset_url, file_name, bro_log,", "file_sizes = 0 bro = find_files(url + 'bro/') for i in range(len(bro)): if", "'suricata') return file_sizes def save_file2(dataset_url, file_name, bro_log, bro_or_suricata_folder): print bro_log, \"is downloading...\" file_size", "= u.read(block_sz) if not buffer: break file_size_dl += len(buffer) f.write(buffer) status = r\"%10d", "save_manager(url, get_dataset_name_from_url(url)) # find_files(url+'CTU-Malware-Capture-Botnet-31/') print \"Complet Dataset size:\", (datasets_size / (1024.0 * 1024.0)),", "url = sys.argv[1] datasets_size += save_manager(url, get_dataset_name_from_url(url)) # find_files(url+'CTU-Malware-Capture-Botnet-31/') print \"Complet Dataset size:\",", "buffer: break file_size_dl += len(buffer) f.write(buffer) status = r\"%10d [%3.2f%%]\" % (file_size_dl, file_size_dl", "a['href'] hrefs.append(a['href']) except: pass # print hrefs return hrefs def save_manager(url, dataset_name): directiry_name", "range(len(bro)): if '.log' in bro[i]: file_sizes += save_file2(url, folder_path + bro[i], bro[i], 'bro')", "file_sizes = 0 bro = find_files(url + 'suricata/') for i in range(len(bro)): if", "python DownloadDatasets.py https://mcfp.felk.cvut.cz/publicDatasets/ \"\"\" import sys from bs4 import BeautifulSoup import requests import", "file_name, bro_log, bro_or_suricata_folder): print bro_log, \"is downloading...\" file_size = 0 u = urllib2.urlopen(dataset_url", "def save_manager(url, dataset_name): directiry_name = \"/media/frenky/Fery/Frenky/Skola/StratosphereHTTPSDetector/Dataset/suricata/\" + dataset_name if os.path.exists(directiry_name): shutil.rmtree(directiry_name) os.makedirs(directiry_name) #", "= find_files(url + 'suricata/') for i in range(len(bro)): if '.log' in bro[i] or", "if len(sys.argv) == 2: url = sys.argv[1] datasets_size += save_manager(url, get_dataset_name_from_url(url)) # find_files(url+'CTU-Malware-Capture-Botnet-31/')", "+ chr(8) * (len(status) + 1) print status, f.close() return file_size def get_dataset_name_from_url(url):", "len(buffer) f.write(buffer) status = r\"%10d [%3.2f%%]\" % (file_size_dl, file_size_dl * 100. / file_size)", "file_size = 0 u = urllib2.urlopen(dataset_url + bro_or_suricata_folder + '/' + bro_log) meta", "'wb') print \"Downloading: %s Bytes: %s\" % (file_name, file_size) file_size_dl = 0 block_sz", "'bro/') for i in range(len(bro)): if '.log' in bro[i]: file_sizes += save_file2(url, folder_path", "(file_name, file_size) file_size_dl = 0 block_sz = 8192 while True: buffer = u.read(block_sz)", "datasets_size = 0 if len(sys.argv) == 2: url = sys.argv[1] datasets_size += save_manager(url,", "if os.path.exists(directiry_name): shutil.rmtree(directiry_name) os.makedirs(directiry_name) # Bro folder_path = directiry_name + \"/bro/\" os.makedirs(folder_path) file_sizes", "# Bro folder_path = directiry_name + \"/bro/\" os.makedirs(folder_path) file_sizes = 0 bro =", "file_size) file_size_dl = 0 block_sz = 8192 while True: buffer = u.read(block_sz) if", "== 2: url = sys.argv[1] datasets_size += save_manager(url, get_dataset_name_from_url(url)) # find_files(url+'CTU-Malware-Capture-Botnet-31/') print \"Complet", "requests import urllib2 import os import shutil def find_files(url): # url = \"https://mcfp.felk.cvut.cz/publicDatasets/\"", "try: # print a['href'] hrefs.append(a['href']) except: pass # print hrefs return hrefs def", "= u.info() file_size += int(meta.getheaders(\"Content-Length\")[0]) f = open(file_name, 'wb') print \"Downloading: %s Bytes:", "buffer = u.read(block_sz) if not buffer: break file_size_dl += len(buffer) f.write(buffer) status =", "[%3.2f%%]\" % (file_size_dl, file_size_dl * 100. / file_size) status = status + chr(8)", "print hrefs return hrefs def save_manager(url, dataset_name): directiry_name = \"/media/frenky/Fery/Frenky/Skola/StratosphereHTTPSDetector/Dataset/suricata/\" + dataset_name if", "get_dataset_name_from_url(url): names = url.split('/') names.pop() return names.pop() if __name__ == '__main__': datasets_size =", "directiry_name = \"/media/frenky/Fery/Frenky/Skola/StratosphereHTTPSDetector/Dataset/suricata/\" + dataset_name if os.path.exists(directiry_name): shutil.rmtree(directiry_name) os.makedirs(directiry_name) # Bro folder_path =", "Suricata folder_path = directiry_name + \"/suricata/\" os.makedirs(folder_path) file_sizes = 0 bro = find_files(url", "folder_path + bro[i], bro[i], 'bro') # Suricata folder_path = directiry_name + \"/suricata/\" os.makedirs(folder_path)", "'.json' in bro[i]: file_sizes += save_file2(url, folder_path + bro[i], bro[i], 'suricata') return file_sizes", "% (file_size_dl, file_size_dl * 100. / file_size) status = status + chr(8) *", "os.makedirs(directiry_name) # Bro folder_path = directiry_name + \"/bro/\" os.makedirs(folder_path) file_sizes = 0 bro", "names = url.split('/') names.pop() return names.pop() if __name__ == '__main__': datasets_size = 0", "+ \"/suricata/\" os.makedirs(folder_path) file_sizes = 0 bro = find_files(url + 'suricata/') for i", "if not buffer: break file_size_dl += len(buffer) f.write(buffer) status = r\"%10d [%3.2f%%]\" %", "Download all datasets which have bro folder. USAGE: python DownloadDatasets.py https://mcfp.felk.cvut.cz/publicDatasets/ \"\"\" import", "<filename>DatasetTools/DownloadDataset/DownloadSingleDataset.py \"\"\" Download all datasets which have bro folder. USAGE: python DownloadDatasets.py https://mcfp.felk.cvut.cz/publicDatasets/", "[] for a in soup.find_all('a'): try: # print a['href'] hrefs.append(a['href']) except: pass #", "bro folder. USAGE: python DownloadDatasets.py https://mcfp.felk.cvut.cz/publicDatasets/ \"\"\" import sys from bs4 import BeautifulSoup", "import urllib2 import os import shutil def find_files(url): # url = \"https://mcfp.felk.cvut.cz/publicDatasets/\" soup", "file_sizes += save_file2(url, folder_path + bro[i], bro[i], 'bro') # Suricata folder_path = directiry_name", "file_sizes def save_file2(dataset_url, file_name, bro_log, bro_or_suricata_folder): print bro_log, \"is downloading...\" file_size = 0", "import os import shutil def find_files(url): # url = \"https://mcfp.felk.cvut.cz/publicDatasets/\" soup = BeautifulSoup(requests.get(url).text,", "find_files(url + 'bro/') for i in range(len(bro)): if '.log' in bro[i]: file_sizes +=", "a in soup.find_all('a'): try: # print a['href'] hrefs.append(a['href']) except: pass # print hrefs", "+= int(meta.getheaders(\"Content-Length\")[0]) f = open(file_name, 'wb') print \"Downloading: %s Bytes: %s\" % (file_name,", "have bro folder. USAGE: python DownloadDatasets.py https://mcfp.felk.cvut.cz/publicDatasets/ \"\"\" import sys from bs4 import", "BeautifulSoup(requests.get(url).text, \"lxml\") hrefs = [] for a in soup.find_all('a'): try: # print a['href']" ]
[ "7, 8], [9, 10, 11, 12]], [1, 2, 3, 4, 8, 12, 11,", "5]), ([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]],", "4], [5, 6, 7, 8], [9, 10, 11, 12]], [1, 2, 3, 4,", "6, 7, 8], [9, 10, 11, 12]], [1, 2, 3, 4, 8, 12,", "[9, 10, 11, 12]], [1, 2, 3, 4, 8, 12, 11, 10, 9,", "7]) ]) def test_spiral_matrix(matrix, expected): assert expected == spm.spiral_matrix(matrix) if __name__ == '__main__':", "import spiral_matrix as spm @pytest.mark.parametrize(\"matrix,expected\", [ ([[1, 2, 3], [4, 5, 6], [7,", "3], [4, 5, 6], [7, 8, 9]], [1, 2, 3, 6, 9, 8,", "[4, 5, 6], [7, 8, 9]], [1, 2, 3, 6, 9, 8, 7,", "[ ([[1, 2, 3], [4, 5, 6], [7, 8, 9]], [1, 2, 3,", "3, 4], [5, 6, 7, 8], [9, 10, 11, 12]], [1, 2, 3,", "[1, 2, 3, 4, 8, 12, 11, 10, 9, 5, 6, 7]) ])", "12, 11, 10, 9, 5, 6, 7]) ]) def test_spiral_matrix(matrix, expected): assert expected", "12]], [1, 2, 3, 4, 8, 12, 11, 10, 9, 5, 6, 7])", "5, 6], [7, 8, 9]], [1, 2, 3, 6, 9, 8, 7, 4,", "pytest from datastructures.arrays import spiral_matrix as spm @pytest.mark.parametrize(\"matrix,expected\", [ ([[1, 2, 3], [4,", "10, 9, 5, 6, 7]) ]) def test_spiral_matrix(matrix, expected): assert expected == spm.spiral_matrix(matrix)", "4, 5]), ([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11,", "5, 6, 7]) ]) def test_spiral_matrix(matrix, expected): assert expected == spm.spiral_matrix(matrix) if __name__", "import unittest import pytest from datastructures.arrays import spiral_matrix as spm @pytest.mark.parametrize(\"matrix,expected\", [ ([[1,", "3, 4, 8, 12, 11, 10, 9, 5, 6, 7]) ]) def test_spiral_matrix(matrix,", "6, 7]) ]) def test_spiral_matrix(matrix, expected): assert expected == spm.spiral_matrix(matrix) if __name__ ==", "([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]], [1,", "8], [9, 10, 11, 12]], [1, 2, 3, 4, 8, 12, 11, 10,", "6, 9, 8, 7, 4, 5]), ([[1, 2, 3, 4], [5, 6, 7,", "8, 9]], [1, 2, 3, 6, 9, 8, 7, 4, 5]), ([[1, 2,", "as spm @pytest.mark.parametrize(\"matrix,expected\", [ ([[1, 2, 3], [4, 5, 6], [7, 8, 9]],", "11, 12]], [1, 2, 3, 4, 8, 12, 11, 10, 9, 5, 6,", "[5, 6, 7, 8], [9, 10, 11, 12]], [1, 2, 3, 4, 8,", "9]], [1, 2, 3, 6, 9, 8, 7, 4, 5]), ([[1, 2, 3,", "<reponame>sikakente/educative-io-python<filename>tests/datastructures/arrays/test_spiral_matrix.py<gh_stars>1-10 import unittest import pytest from datastructures.arrays import spiral_matrix as spm @pytest.mark.parametrize(\"matrix,expected\", [", "2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]], [1, 2,", "9, 5, 6, 7]) ]) def test_spiral_matrix(matrix, expected): assert expected == spm.spiral_matrix(matrix) if", "[1, 2, 3, 6, 9, 8, 7, 4, 5]), ([[1, 2, 3, 4],", "2, 3, 4, 8, 12, 11, 10, 9, 5, 6, 7]) ]) def", "4, 8, 12, 11, 10, 9, 5, 6, 7]) ]) def test_spiral_matrix(matrix, expected):", "from datastructures.arrays import spiral_matrix as spm @pytest.mark.parametrize(\"matrix,expected\", [ ([[1, 2, 3], [4, 5,", "2, 3], [4, 5, 6], [7, 8, 9]], [1, 2, 3, 6, 9,", "8, 12, 11, 10, 9, 5, 6, 7]) ]) def test_spiral_matrix(matrix, expected): assert", "import pytest from datastructures.arrays import spiral_matrix as spm @pytest.mark.parametrize(\"matrix,expected\", [ ([[1, 2, 3],", "datastructures.arrays import spiral_matrix as spm @pytest.mark.parametrize(\"matrix,expected\", [ ([[1, 2, 3], [4, 5, 6],", "([[1, 2, 3], [4, 5, 6], [7, 8, 9]], [1, 2, 3, 6,", "6], [7, 8, 9]], [1, 2, 3, 6, 9, 8, 7, 4, 5]),", "[7, 8, 9]], [1, 2, 3, 6, 9, 8, 7, 4, 5]), ([[1,", "3, 6, 9, 8, 7, 4, 5]), ([[1, 2, 3, 4], [5, 6,", "10, 11, 12]], [1, 2, 3, 4, 8, 12, 11, 10, 9, 5,", "11, 10, 9, 5, 6, 7]) ]) def test_spiral_matrix(matrix, expected): assert expected ==", "spiral_matrix as spm @pytest.mark.parametrize(\"matrix,expected\", [ ([[1, 2, 3], [4, 5, 6], [7, 8,", "@pytest.mark.parametrize(\"matrix,expected\", [ ([[1, 2, 3], [4, 5, 6], [7, 8, 9]], [1, 2,", "]) def test_spiral_matrix(matrix, expected): assert expected == spm.spiral_matrix(matrix) if __name__ == '__main__': unittest.main()", "2, 3, 6, 9, 8, 7, 4, 5]), ([[1, 2, 3, 4], [5,", "unittest import pytest from datastructures.arrays import spiral_matrix as spm @pytest.mark.parametrize(\"matrix,expected\", [ ([[1, 2,", "9, 8, 7, 4, 5]), ([[1, 2, 3, 4], [5, 6, 7, 8],", "8, 7, 4, 5]), ([[1, 2, 3, 4], [5, 6, 7, 8], [9,", "spm @pytest.mark.parametrize(\"matrix,expected\", [ ([[1, 2, 3], [4, 5, 6], [7, 8, 9]], [1,", "7, 4, 5]), ([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10," ]
[ "# f\"result [ID:{chosen_inline_result.result_id}]\") async def on_pre_process_callback_query( self, callback_query: types.CallbackQuery, data: dict ): if", "timeout > 0: self.logger.info( f\"Process update [ID:{update.update_id}, NAME:{update.__class__.__name__}]: [failed] (in {timeout} ms)\" )", "def __init__(self, logger=None): if not isinstance(logger, logging.Logger): logger = logging.getLogger(self.__class__.__name__) self.logger = logger", "f\"from inline message [ID:{callback_query.inline_message_id}] \" # f\"from user [ID:{callback_query.from_user.id}]\") async def on_pre_process_shipping_query( self,", "[ID:{edited_channel_post.message_id}] \" # f\"in channel [ID:{edited_channel_post.chat.id}]\") async def on_post_process_edited_channel_post( self, edited_channel_post: types.Message, results,", "[\"Unhandled\", \"Handled\"] class LoggingMiddleware(BaseMiddleware): def __init__(self, logger=None): if not isinstance(logger, logging.Logger): logger =", "time from aiogram import Dispatcher, types from aiogram.dispatcher.middlewares import BaseMiddleware HANDLED_STR = [\"Unhandled\",", "None) if start: del obj.conf[\"_start\"] return round((time.time() - start) * 1000) return -1", "async def on_post_process_edited_channel_post( self, edited_channel_post: types.Message, results, data: dict ): pass # self.logger.debug(f\"{HANDLED_STR[bool(len(results))]}", "callback_query.message.from_user: # self.logger.debug(f\"{HANDLED_STR[bool(len(results))]} \" # f\"callback query [ID:{callback_query.id}] \" # f\"in chat [{callback_query.message.chat.type}:{callback_query.message.chat.id}]", "in chat [{message.chat.type}:{message.chat.id}]\") async def on_pre_process_edited_message(self, edited_message, data: dict): pass # self.logger.info(f\"Received edited", "inline query [ID:{inline_query.id}] \" # f\"from user [ID:{inline_query.from_user.id}]\") async def on_post_process_inline_query( self, inline_query:", "f\"Process update [ID:{update.update_id}]: [success] (in {timeout} ms)\" ) async def on_pre_process_message(self, message: types.Message,", "# f\"pre-checkout query [ID:{pre_checkout_query.id}] \" # f\"from user [ID:{pre_checkout_query.from_user.id}]\") async def on_pre_process_error(self, update:", "if not isinstance(logger, logging.Logger): logger = logging.getLogger(self.__class__.__name__) self.logger = logger super(LoggingMiddleware, self).__init__() def", "self.logger.debug(f\"{HANDLED_STR[bool(len(results))]} \" # f\"callback query [ID:{callback_query.id}] \" # f\"in chat [{callback_query.message.chat.type}:{callback_query.message.chat.id}] \" #", "f\"in chat [{callback_query.message.chat.type}:{callback_query.message.chat.id}] \" f\"from user [USERNAME:{callback_query.from_user.username}]\" ) async def on_post_process_callback_query(self, callback_query, results,", "\" f\"in chat [{callback_query.message.chat.type}:{callback_query.message.chat.id}] \" f\"from user [USERNAME:{callback_query.from_user.username}]\" ) async def on_post_process_callback_query(self, callback_query,", "dict ): pass # self.logger.info(f\"Received edited channel post [ID:{edited_channel_post.message_id}] \" # f\"in channel", "dict ): if callback_query.message: self.logger.info( f\"Received callback query [DATA:{callback_query.data}] \" f\"in chat [{callback_query.message.chat.type}:{callback_query.message.chat.id}]", "[{edited_message.chat.type}:{edited_message.chat.id}]\") async def on_post_process_edited_message(self, edited_message, results, data: dict): pass # self.logger.debug(f\"{HANDLED_STR[bool(len(results))]} \" #", "# else: # self.logger.debug(f\"{HANDLED_STR[bool(len(results))]} \" # f\"callback query [ID:{callback_query.id}] \" # f\"from inline", "def on_pre_process_chosen_inline_result( self, chosen_inline_result: types.ChosenInlineResult, data: dict ): pass # self.logger.info(f\"Received chosen inline", "inline message [ID:{callback_query.inline_message_id}] \" # f\"from user [ID:{callback_query.from_user.id}]\") async def on_pre_process_shipping_query( self, shipping_query:", "[ID:{update.update_id}]: [success] (in {timeout} ms)\" ) async def on_pre_process_message(self, message: types.Message, data: dict):", "data: dict ): pass # self.logger.info(f\"Received shipping query [ID:{shipping_query.id}] \" # f\"from user", "# self.logger.debug(f\"{HANDLED_STR[bool(len(results))]} \" # f\"chosen inline result [Inline msg ID:{chosen_inline_result.inline_message_id}] \" # f\"from", "results, data: dict ): pass # self.logger.debug(f\"{HANDLED_STR[bool(len(results))]} \" # f\"pre-checkout query [ID:{pre_checkout_query.id}] \"", "on_pre_process_message(self, message: types.Message, data: dict): self.logger.info( f'Received message [TEXT: \"{message.text}\"] in chat [{message.from_user.first_name}", "pass # self.logger.debug(f\"{HANDLED_STR[bool(len(results))]} \" # f\"channel post [ID:{channel_post.message_id}] \" # f\"in chat [{channel_post.chat.type}:{channel_post.chat.id}]\")", "pass # self.logger.info(f\"Received chosen inline result [Inline msg ID:{chosen_inline_result.inline_message_id}] \" # f\"from user", "[ID:{chosen_inline_result.result_id}]\") async def on_pre_process_callback_query( self, callback_query: types.CallbackQuery, data: dict ): if callback_query.message: self.logger.info(", "ms)\" ) async def on_pre_process_message(self, message: types.Message, data: dict): self.logger.info( f'Received message [TEXT:", "self, callback_query: types.CallbackQuery, data: dict ): if callback_query.message: self.logger.info( f\"Received callback query [DATA:{callback_query.data}]", "query [ID:{callback_query.id}] \" # f\"in chat [{callback_query.message.chat.type}:{callback_query.message.chat.id}] \" # f\"from user [ID:{callback_query.message.from_user.id}]\") #", "async def on_post_process_shipping_query(self, shipping_query, results, data: dict): pass # self.logger.debug(f\"{HANDLED_STR[bool(len(results))]} \" # f\"shipping", "start = obj.conf.get(\"_start\", None) if start: del obj.conf[\"_start\"] return round((time.time() - start) *", "[USERNAME:{callback_query.from_user.username}]\" ) async def on_post_process_callback_query(self, callback_query, results, data: dict): pass # if callback_query.message:", "f\"from user [ID:{chosen_inline_result.from_user.id}] \" # f\"result [ID:{chosen_inline_result.result_id}]\") async def on_post_process_chosen_inline_result( self, chosen_inline_result, results,", "f\"Process update [ID:{update.update_id}, NAME:{update.__class__.__name__}]: [failed] (in {timeout} ms)\" ) def on_startup(dp: Dispatcher): dp.middleware.setup(LoggingMiddleware())", "data: dict ): pass # self.logger.debug(f\"{HANDLED_STR[bool(len(results))]} \" # f\"chosen inline result [Inline msg", "[ID:{shipping_query.from_user.id}]\") async def on_post_process_shipping_query(self, shipping_query, results, data: dict): pass # self.logger.debug(f\"{HANDLED_STR[bool(len(results))]} \" #", "async def on_pre_process_message(self, message: types.Message, data: dict): self.logger.info( f'Received message [TEXT: \"{message.text}\"] in", "callback_query.message: # if callback_query.message.from_user: # self.logger.debug(f\"{HANDLED_STR[bool(len(results))]} \" # f\"callback query [ID:{callback_query.id}] \" #", "async def on_post_process_edited_message(self, edited_message, results, data: dict): pass # self.logger.debug(f\"{HANDLED_STR[bool(len(results))]} \" # f\"edited", "# self.logger.debug(f\"{HANDLED_STR[bool(len(results))]} \" # f\"callback query [ID:{callback_query.id}] \" # f\"in chat [{callback_query.message.chat.type}:{callback_query.message.chat.id}] \"", "else: # self.logger.debug(f\"{HANDLED_STR[bool(len(results))]} \" # f\"callback query [ID:{callback_query.id}] \" # f\"from inline message", "dict): timeout = self.check_timeout(update) if timeout > 0: self.logger.info( f\"Process update [ID:{update.update_id}, NAME:{update.__class__.__name__}]:", "[ID:{chosen_inline_result.result_id}]\") async def on_post_process_chosen_inline_result( self, chosen_inline_result, results, data: dict ): pass # self.logger.debug(f\"{HANDLED_STR[bool(len(results))]}", "__init__(self, logger=None): if not isinstance(logger, logging.Logger): logger = logging.getLogger(self.__class__.__name__) self.logger = logger super(LoggingMiddleware,", "self.logger.debug(f\"{HANDLED_STR[bool(len(results))]} \" # f\"chosen inline result [Inline msg ID:{chosen_inline_result.inline_message_id}] \" # f\"from user", "message: types.Message, data: dict): self.logger.info( f'Received message [TEXT: \"{message.text}\"] in chat [{message.from_user.first_name} {message.from_user.username}", "self, edited_channel_post: types.Message, data: dict ): pass # self.logger.info(f\"Received edited channel post [ID:{edited_channel_post.message_id}]", "pass # self.logger.info(f\"Received inline query [ID:{inline_query.id}] \" # f\"from user [ID:{inline_query.from_user.id}]\") async def", "types.Message, results, data: dict ): pass # self.logger.debug(f\"{HANDLED_STR[bool(len(results))]} \" # f\"message [ID:{message.message_id}] in", "self.logger.info(f\"Received edited channel post [ID:{edited_channel_post.message_id}] \" # f\"in channel [ID:{edited_channel_post.chat.id}]\") async def on_post_process_edited_channel_post(", "\" # f\"shipping query [ID:{shipping_query.id}] \" # f\"from user [ID:{shipping_query.from_user.id}]\") async def on_pre_process_pre_checkout_query(", "= self.check_timeout(update) if timeout > 0: self.logger.info( f\"Process update [ID:{update.update_id}]: [success] (in {timeout}", "post [ID:{channel_post.message_id}] \" # f\"in chat [{channel_post.chat.type}:{channel_post.chat.id}]\") async def on_pre_process_edited_channel_post( self, edited_channel_post: types.Message,", "{message.from_user.username} {message.from_user.id}]' ) async def on_post_process_message( self, message: types.Message, results, data: dict ):", "\" # f\"edited channel post [ID:{edited_channel_post.message_id}] \" # f\"in channel [ID:{edited_channel_post.chat.id}]\") async def", "): pass # self.logger.info(f\"Received chosen inline result [Inline msg ID:{chosen_inline_result.inline_message_id}] \" # f\"from", "f\"in channel [ID:{edited_channel_post.chat.id}]\") async def on_pre_process_inline_query( self, inline_query: types.InlineQuery, data: dict ): pass", "# self.logger.debug(f\"{HANDLED_STR[bool(len(results))]} \" # f\"inline query [ID:{inline_query.id}] \" # f\"from user [ID:{inline_query.from_user.id}]\") async", "0: self.logger.info( f\"Process update [ID:{update.update_id}, NAME:{update.__class__.__name__}]: [failed] (in {timeout} ms)\" ) def on_startup(dp:", "> 0: self.logger.info( f\"Process update [ID:{update.update_id}, NAME:{update.__class__.__name__}]: [failed] (in {timeout} ms)\" ) def", "def on_pre_process_edited_channel_post( self, edited_channel_post: types.Message, data: dict ): pass # self.logger.info(f\"Received edited channel", "message [ID:{callback_query.inline_message_id}] \" # f\"from user [ID:{callback_query.from_user.id}]\") async def on_pre_process_shipping_query( self, shipping_query: types.ShippingQuery,", "\" # f\"in chat [{channel_post.chat.type}:{channel_post.chat.id}]\") async def on_pre_process_edited_channel_post( self, edited_channel_post: types.Message, data: dict", "self.logger.debug(f\"{HANDLED_STR[bool(len(results))]} \" # f\"callback query [ID:{callback_query.id}] \" # f\"from inline message [ID:{callback_query.inline_message_id}] \"", "\" # f\"from inline message [ID:{callback_query.inline_message_id}] \" # f\"from user [ID:{callback_query.from_user.id}]\") async def", "results, data: dict ): pass # self.logger.debug(f\"{HANDLED_STR[bool(len(results))]} \" # f\"inline query [ID:{inline_query.id}] \"", "edited_channel_post: types.Message, data: dict ): pass # self.logger.info(f\"Received edited channel post [ID:{edited_channel_post.message_id}] \"", "\" # f\"pre-checkout query [ID:{pre_checkout_query.id}] \" # f\"from user [ID:{pre_checkout_query.from_user.id}]\") async def on_pre_process_error(self,", "async def on_pre_process_update(self, update: types.Update, data: dict): update.conf[\"_start\"] = time.time() pass async def", "result, data: dict): timeout = self.check_timeout(update) if timeout > 0: self.logger.info( f\"Process update", "chosen_inline_result: types.ChosenInlineResult, data: dict ): pass # self.logger.info(f\"Received chosen inline result [Inline msg", "[ID:{inline_query.from_user.id}]\") async def on_pre_process_chosen_inline_result( self, chosen_inline_result: types.ChosenInlineResult, data: dict ): pass # self.logger.info(f\"Received", "query [ID:{inline_query.id}] \" # f\"from user [ID:{inline_query.from_user.id}]\") async def on_pre_process_chosen_inline_result( self, chosen_inline_result: types.ChosenInlineResult,", "[ID:{shipping_query.id}] \" # f\"from user [ID:{shipping_query.from_user.id}]\") async def on_post_process_shipping_query(self, shipping_query, results, data: dict):", "dict ): pass # self.logger.info(f\"Received chosen inline result [Inline msg ID:{chosen_inline_result.inline_message_id}] \" #", "else: # self.logger.debug(f\"{HANDLED_STR[bool(len(results))]} \" # f\"callback query [ID:{callback_query.id}] \" # f\"in chat [{callback_query.message.chat.type}:{callback_query.message.chat.id}]\")", "results, data: dict ): pass # self.logger.debug(f\"{HANDLED_STR[bool(len(results))]} \" # f\"edited channel post [ID:{edited_channel_post.message_id}]", "# if callback_query.message.from_user: # self.logger.debug(f\"{HANDLED_STR[bool(len(results))]} \" # f\"callback query [ID:{callback_query.id}] \" # f\"in", "import logging import time from aiogram import Dispatcher, types from aiogram.dispatcher.middlewares import BaseMiddleware", "def on_post_process_callback_query(self, callback_query, results, data: dict): pass # if callback_query.message: # if callback_query.message.from_user:", "f\"in chat [{edited_message.chat.type}:{edited_message.chat.id}]\") async def on_post_process_edited_message(self, edited_message, results, data: dict): pass # self.logger.debug(f\"{HANDLED_STR[bool(len(results))]}", "): pass # self.logger.debug(f\"{HANDLED_STR[bool(len(results))]} \" # f\"edited channel post [ID:{edited_channel_post.message_id}] \" # f\"in", "if callback_query.message.from_user: # self.logger.debug(f\"{HANDLED_STR[bool(len(results))]} \" # f\"callback query [ID:{callback_query.id}] \" # f\"in chat", "\" # f\"in channel [ID:{edited_channel_post.chat.id}]\") async def on_pre_process_inline_query( self, inline_query: types.InlineQuery, data: dict", "data: dict): self.logger.info( f'Received message [TEXT: \"{message.text}\"] in chat [{message.from_user.first_name} {message.from_user.username} {message.from_user.id}]' )", "inline result [Inline msg ID:{chosen_inline_result.inline_message_id}] \" # f\"from user [ID:{chosen_inline_result.from_user.id}] \" # f\"result", "f\"result [ID:{chosen_inline_result.result_id}]\") async def on_post_process_chosen_inline_result( self, chosen_inline_result, results, data: dict ): pass #", "f\"callback query [ID:{callback_query.id}] \" # f\"from inline message [ID:{callback_query.inline_message_id}] \" # f\"from user", "self, pre_checkout_query, results, data: dict ): pass # self.logger.debug(f\"{HANDLED_STR[bool(len(results))]} \" # f\"pre-checkout query", "message [ID:{edited_message.message_id}] \" # f\"in chat [{edited_message.chat.type}:{edited_message.chat.id}]\") async def on_pre_process_channel_post( self, channel_post: types.Message,", "# f\"result [ID:{chosen_inline_result.result_id}]\") async def on_post_process_chosen_inline_result( self, chosen_inline_result, results, data: dict ): pass", "[ID:{chosen_inline_result.from_user.id}] \" # f\"result [ID:{chosen_inline_result.result_id}]\") async def on_pre_process_callback_query( self, callback_query: types.CallbackQuery, data: dict", "obj): start = obj.conf.get(\"_start\", None) if start: del obj.conf[\"_start\"] return round((time.time() - start)", "def on_pre_process_inline_query( self, inline_query: types.InlineQuery, data: dict ): pass # self.logger.info(f\"Received inline query", "# f\"in chat [{callback_query.message.chat.type}:{callback_query.message.chat.id}] \" # f\"from user [ID:{callback_query.message.from_user.id}]\") # else: # self.logger.debug(f\"{HANDLED_STR[bool(len(results))]}", "# self.logger.info(f\"Received pre-checkout query [ID:{pre_checkout_query.id}] \" # f\"from user [ID:{pre_checkout_query.from_user.id}]\") async def on_post_process_pre_checkout_query(", "[ID:{edited_message.message_id}] \" # f\"in chat [{edited_message.chat.type}:{edited_message.chat.id}]\") async def on_pre_process_channel_post( self, channel_post: types.Message, data:", "async def on_pre_process_inline_query( self, inline_query: types.InlineQuery, data: dict ): pass # self.logger.info(f\"Received inline", "query [ID:{pre_checkout_query.id}] \" # f\"from user [ID:{pre_checkout_query.from_user.id}]\") async def on_pre_process_error(self, update: types.Update, error,", "async def on_post_process_update(self, update: types.Update, result, data: dict): timeout = self.check_timeout(update) if timeout", "\" # f\"callback query [ID:{callback_query.id}] \" # f\"in chat [{callback_query.message.chat.type}:{callback_query.message.chat.id}] \" # f\"from", "): if callback_query.message: self.logger.info( f\"Received callback query [DATA:{callback_query.data}] \" f\"in chat [{callback_query.message.chat.type}:{callback_query.message.chat.id}] \"", "f\"Received callback query [DATA:{callback_query.data}] \" f\"in chat [{callback_query.message.chat.type}:{callback_query.message.chat.id}] \" f\"from user [USERNAME:{callback_query.from_user.username}]\" )", "dict): self.logger.info( f'Received message [TEXT: \"{message.text}\"] in chat [{message.from_user.first_name} {message.from_user.username} {message.from_user.id}]' ) async", "def check_timeout(self, obj): start = obj.conf.get(\"_start\", None) if start: del obj.conf[\"_start\"] return round((time.time()", "results, data: dict): pass # self.logger.debug(f\"{HANDLED_STR[bool(len(results))]} \" # f\"edited message [ID:{edited_message.message_id}] \" #", "[ID:{edited_message.message_id}] \" # f\"in chat [{edited_message.chat.type}:{edited_message.chat.id}]\") async def on_post_process_edited_message(self, edited_message, results, data: dict):", "[ID:{edited_channel_post.chat.id}]\") async def on_post_process_edited_channel_post( self, edited_channel_post: types.Message, results, data: dict ): pass #", "msg ID:{chosen_inline_result.inline_message_id}] \" # f\"from user [ID:{chosen_inline_result.from_user.id}] \" # f\"result [ID:{chosen_inline_result.result_id}]\") async def", "def on_pre_process_error(self, update: types.Update, error, data: dict): timeout = self.check_timeout(update) if timeout >", "self.logger.info( f\"Process update [ID:{update.update_id}, NAME:{update.__class__.__name__}]: [failed] (in {timeout} ms)\" ) def on_startup(dp: Dispatcher):", "pass # self.logger.debug(f\"{HANDLED_STR[bool(len(results))]} \" # f\"shipping query [ID:{shipping_query.id}] \" # f\"from user [ID:{shipping_query.from_user.id}]\")", "pass # self.logger.info(f\"Received shipping query [ID:{shipping_query.id}] \" # f\"from user [ID:{shipping_query.from_user.id}]\") async def", "\" # f\"in chat [{edited_message.chat.type}:{edited_message.chat.id}]\") async def on_post_process_edited_message(self, edited_message, results, data: dict): pass", "f\"in channel [ID:{channel_post.chat.id}]\") async def on_post_process_channel_post( self, channel_post: types.Message, results, data: dict ):", "\" # f\"channel post [ID:{channel_post.message_id}] \" # f\"in chat [{channel_post.chat.type}:{channel_post.chat.id}]\") async def on_pre_process_edited_channel_post(", "on_post_process_chosen_inline_result( self, chosen_inline_result, results, data: dict ): pass # self.logger.debug(f\"{HANDLED_STR[bool(len(results))]} \" # f\"chosen", "on_pre_process_pre_checkout_query( self, pre_checkout_query: types.PreCheckoutQuery, data: dict ): pass # self.logger.info(f\"Received pre-checkout query [ID:{pre_checkout_query.id}]", "# self.logger.debug(f\"{HANDLED_STR[bool(len(results))]} \" # f\"callback query [ID:{callback_query.id}] \" # f\"from inline message [ID:{callback_query.inline_message_id}]", "data: dict ): pass # self.logger.info(f\"Received channel post [ID:{channel_post.message_id}] \" # f\"in channel", "\" # f\"in chat [{callback_query.message.chat.type}:{callback_query.message.chat.id}] \" # f\"from user [ID:{callback_query.message.from_user.id}]\") # else: #", "dict ): pass # self.logger.info(f\"Received inline query [ID:{inline_query.id}] \" # f\"from user [ID:{inline_query.from_user.id}]\")", "f\"edited message [ID:{edited_message.message_id}] \" # f\"in chat [{edited_message.chat.type}:{edited_message.chat.id}]\") async def on_pre_process_channel_post( self, channel_post:", "# self.logger.debug(f\"{HANDLED_STR[bool(len(results))]} \" # f\"channel post [ID:{channel_post.message_id}] \" # f\"in chat [{channel_post.chat.type}:{channel_post.chat.id}]\") async", "types.Update, result, data: dict): timeout = self.check_timeout(update) if timeout > 0: self.logger.info( f\"Process", "# f\"in channel [ID:{edited_channel_post.chat.id}]\") async def on_post_process_edited_channel_post( self, edited_channel_post: types.Message, results, data: dict", "self.check_timeout(update) if timeout > 0: self.logger.info( f\"Process update [ID:{update.update_id}, NAME:{update.__class__.__name__}]: [failed] (in {timeout}", "chat [{callback_query.message.chat.type}:{callback_query.message.chat.id}] \" f\"from user [USERNAME:{callback_query.from_user.username}]\" ) async def on_post_process_callback_query(self, callback_query, results, data:", "dict ): pass # self.logger.debug(f\"{HANDLED_STR[bool(len(results))]} \" # f\"edited channel post [ID:{edited_channel_post.message_id}] \" #", "update: types.Update, data: dict): update.conf[\"_start\"] = time.time() pass async def on_post_process_update(self, update: types.Update,", "data: dict ): pass # self.logger.debug(f\"{HANDLED_STR[bool(len(results))]} \" # f\"message [ID:{message.message_id}] in chat [{message.chat.type}:{message.chat.id}]\")", "async def on_post_process_pre_checkout_query( self, pre_checkout_query, results, data: dict ): pass # self.logger.debug(f\"{HANDLED_STR[bool(len(results))]} \"", "on_pre_process_edited_channel_post( self, edited_channel_post: types.Message, data: dict ): pass # self.logger.info(f\"Received edited channel post", "\" # f\"inline query [ID:{inline_query.id}] \" # f\"from user [ID:{inline_query.from_user.id}]\") async def on_pre_process_chosen_inline_result(", "[TEXT: \"{message.text}\"] in chat [{message.from_user.first_name} {message.from_user.username} {message.from_user.id}]' ) async def on_post_process_message( self, message:", "self.logger.info(f\"Received channel post [ID:{channel_post.message_id}] \" # f\"in channel [ID:{channel_post.chat.id}]\") async def on_post_process_channel_post( self,", "# f\"shipping query [ID:{shipping_query.id}] \" # f\"from user [ID:{shipping_query.from_user.id}]\") async def on_pre_process_pre_checkout_query( self,", "[Inline msg ID:{chosen_inline_result.inline_message_id}] \" # f\"from user [ID:{chosen_inline_result.from_user.id}] \" # f\"result [ID:{chosen_inline_result.result_id}]\") async", "post [ID:{edited_channel_post.message_id}] \" # f\"in channel [ID:{edited_channel_post.chat.id}]\") async def on_post_process_edited_channel_post( self, edited_channel_post: types.Message,", "# self.logger.debug(f\"{HANDLED_STR[bool(len(results))]} \" # f\"edited channel post [ID:{edited_channel_post.message_id}] \" # f\"in channel [ID:{edited_channel_post.chat.id}]\")", "in chat [{message.from_user.first_name} {message.from_user.username} {message.from_user.id}]' ) async def on_post_process_message( self, message: types.Message, results,", "chosen inline result [Inline msg ID:{chosen_inline_result.inline_message_id}] \" # f\"from user [ID:{chosen_inline_result.from_user.id}] \" #", "pass # self.logger.info(f\"Received edited message [ID:{edited_message.message_id}] \" # f\"in chat [{edited_message.chat.type}:{edited_message.chat.id}]\") async def", "dict): pass # self.logger.info(f\"Received edited message [ID:{edited_message.message_id}] \" # f\"in chat [{edited_message.chat.type}:{edited_message.chat.id}]\") async", "on_pre_process_update(self, update: types.Update, data: dict): update.conf[\"_start\"] = time.time() pass async def on_post_process_update(self, update:", "self.logger.debug(f\"{HANDLED_STR[bool(len(results))]} \" # f\"edited channel post [ID:{edited_channel_post.message_id}] \" # f\"in channel [ID:{edited_channel_post.chat.id}]\") async", "async def on_pre_process_edited_message(self, edited_message, data: dict): pass # self.logger.info(f\"Received edited message [ID:{edited_message.message_id}] \"", "data: dict ): if callback_query.message: self.logger.info( f\"Received callback query [DATA:{callback_query.data}] \" f\"in chat", "on_pre_process_channel_post( self, channel_post: types.Message, data: dict ): pass # self.logger.info(f\"Received channel post [ID:{channel_post.message_id}]", "[ID:{callback_query.message.from_user.id}]\") # else: # self.logger.debug(f\"{HANDLED_STR[bool(len(results))]} \" # f\"callback query [ID:{callback_query.id}] \" # f\"in", "data: dict): pass # self.logger.info(f\"Received edited message [ID:{edited_message.message_id}] \" # f\"in chat [{edited_message.chat.type}:{edited_message.chat.id}]\")", "data: dict ): pass # self.logger.info(f\"Received inline query [ID:{inline_query.id}] \" # f\"from user", "[{callback_query.message.chat.type}:{callback_query.message.chat.id}] \" # f\"from user [ID:{callback_query.message.from_user.id}]\") # else: # self.logger.debug(f\"{HANDLED_STR[bool(len(results))]} \" # f\"callback", "from aiogram.dispatcher.middlewares import BaseMiddleware HANDLED_STR = [\"Unhandled\", \"Handled\"] class LoggingMiddleware(BaseMiddleware): def __init__(self, logger=None):", "): pass # self.logger.debug(f\"{HANDLED_STR[bool(len(results))]} \" # f\"channel post [ID:{channel_post.message_id}] \" # f\"in chat", "chat [{message.chat.type}:{message.chat.id}]\") async def on_pre_process_edited_message(self, edited_message, data: dict): pass # self.logger.info(f\"Received edited message", "update.conf[\"_start\"] = time.time() pass async def on_post_process_update(self, update: types.Update, result, data: dict): timeout", "dict): update.conf[\"_start\"] = time.time() pass async def on_post_process_update(self, update: types.Update, result, data: dict):", "types.CallbackQuery, data: dict ): if callback_query.message: self.logger.info( f\"Received callback query [DATA:{callback_query.data}] \" f\"in", "obj.conf.get(\"_start\", None) if start: del obj.conf[\"_start\"] return round((time.time() - start) * 1000) return", "logging.getLogger(self.__class__.__name__) self.logger = logger super(LoggingMiddleware, self).__init__() def check_timeout(self, obj): start = obj.conf.get(\"_start\", None)", "BaseMiddleware HANDLED_STR = [\"Unhandled\", \"Handled\"] class LoggingMiddleware(BaseMiddleware): def __init__(self, logger=None): if not isinstance(logger,", "check_timeout(self, obj): start = obj.conf.get(\"_start\", None) if start: del obj.conf[\"_start\"] return round((time.time() -", "def on_post_process_pre_checkout_query( self, pre_checkout_query, results, data: dict ): pass # self.logger.debug(f\"{HANDLED_STR[bool(len(results))]} \" #", "(in {timeout} ms)\" ) async def on_pre_process_message(self, message: types.Message, data: dict): self.logger.info( f'Received", "[{edited_message.chat.type}:{edited_message.chat.id}]\") async def on_pre_process_channel_post( self, channel_post: types.Message, data: dict ): pass # self.logger.info(f\"Received", "{message.from_user.id}]' ) async def on_post_process_message( self, message: types.Message, results, data: dict ): pass", "async def on_post_process_chosen_inline_result( self, chosen_inline_result, results, data: dict ): pass # self.logger.debug(f\"{HANDLED_STR[bool(len(results))]} \"", "self.logger = logger super(LoggingMiddleware, self).__init__() def check_timeout(self, obj): start = obj.conf.get(\"_start\", None) if", "# f\"message [ID:{message.message_id}] in chat [{message.chat.type}:{message.chat.id}]\") async def on_pre_process_edited_message(self, edited_message, data: dict): pass", "on_post_process_edited_message(self, edited_message, results, data: dict): pass # self.logger.debug(f\"{HANDLED_STR[bool(len(results))]} \" # f\"edited message [ID:{edited_message.message_id}]", "chat [{edited_message.chat.type}:{edited_message.chat.id}]\") async def on_post_process_edited_message(self, edited_message, results, data: dict): pass # self.logger.debug(f\"{HANDLED_STR[bool(len(results))]} \"", "user [ID:{shipping_query.from_user.id}]\") async def on_post_process_shipping_query(self, shipping_query, results, data: dict): pass # self.logger.debug(f\"{HANDLED_STR[bool(len(results))]} \"", "time.time() pass async def on_post_process_update(self, update: types.Update, result, data: dict): timeout = self.check_timeout(update)", "edited message [ID:{edited_message.message_id}] \" # f\"in chat [{edited_message.chat.type}:{edited_message.chat.id}]\") async def on_post_process_edited_message(self, edited_message, results,", "# f\"from inline message [ID:{callback_query.inline_message_id}] \" # f\"from user [ID:{callback_query.from_user.id}]\") async def on_pre_process_shipping_query(", "\"Handled\"] class LoggingMiddleware(BaseMiddleware): def __init__(self, logger=None): if not isinstance(logger, logging.Logger): logger = logging.getLogger(self.__class__.__name__)", "\" # f\"from user [ID:{callback_query.from_user.id}]\") async def on_pre_process_shipping_query( self, shipping_query: types.ShippingQuery, data: dict", "error, data: dict): timeout = self.check_timeout(update) if timeout > 0: self.logger.info( f\"Process update", "# self.logger.info(f\"Received channel post [ID:{channel_post.message_id}] \" # f\"in channel [ID:{channel_post.chat.id}]\") async def on_post_process_channel_post(", "f\"from user [ID:{shipping_query.from_user.id}]\") async def on_post_process_shipping_query(self, shipping_query, results, data: dict): pass # self.logger.debug(f\"{HANDLED_STR[bool(len(results))]}", "f\"from user [ID:{callback_query.message.from_user.id}]\") # else: # self.logger.debug(f\"{HANDLED_STR[bool(len(results))]} \" # f\"callback query [ID:{callback_query.id}] \"", "shipping_query: types.ShippingQuery, data: dict ): pass # self.logger.info(f\"Received shipping query [ID:{shipping_query.id}] \" #", "on_pre_process_edited_message(self, edited_message, data: dict): pass # self.logger.info(f\"Received edited message [ID:{edited_message.message_id}] \" # f\"in", "callback_query, results, data: dict): pass # if callback_query.message: # if callback_query.message.from_user: # self.logger.debug(f\"{HANDLED_STR[bool(len(results))]}", "\" # f\"from user [ID:{inline_query.from_user.id}]\") async def on_post_process_inline_query( self, inline_query: types.InlineQuery, results, data:", "update: types.Update, error, data: dict): timeout = self.check_timeout(update) if timeout > 0: self.logger.info(", "types.InlineQuery, results, data: dict ): pass # self.logger.debug(f\"{HANDLED_STR[bool(len(results))]} \" # f\"inline query [ID:{inline_query.id}]", "\" # f\"from user [ID:{shipping_query.from_user.id}]\") async def on_pre_process_pre_checkout_query( self, pre_checkout_query: types.PreCheckoutQuery, data: dict", "f\"edited channel post [ID:{edited_channel_post.message_id}] \" # f\"in channel [ID:{edited_channel_post.chat.id}]\") async def on_pre_process_inline_query( self,", "async def on_post_process_inline_query( self, inline_query: types.InlineQuery, results, data: dict ): pass # self.logger.debug(f\"{HANDLED_STR[bool(len(results))]}", "# self.logger.info(f\"Received edited message [ID:{edited_message.message_id}] \" # f\"in chat [{edited_message.chat.type}:{edited_message.chat.id}]\") async def on_post_process_edited_message(self,", "dict ): pass # self.logger.debug(f\"{HANDLED_STR[bool(len(results))]} \" # f\"inline query [ID:{inline_query.id}] \" # f\"from", "data: dict): pass # if callback_query.message: # if callback_query.message.from_user: # self.logger.debug(f\"{HANDLED_STR[bool(len(results))]} \" #", "timeout = self.check_timeout(update) if timeout > 0: self.logger.info( f\"Process update [ID:{update.update_id}, NAME:{update.__class__.__name__}]: [failed]", "post [ID:{channel_post.message_id}] \" # f\"in channel [ID:{channel_post.chat.id}]\") async def on_post_process_channel_post( self, channel_post: types.Message,", "channel [ID:{edited_channel_post.chat.id}]\") async def on_post_process_edited_channel_post( self, edited_channel_post: types.Message, results, data: dict ): pass", "types.InlineQuery, data: dict ): pass # self.logger.info(f\"Received inline query [ID:{inline_query.id}] \" # f\"from", "super(LoggingMiddleware, self).__init__() def check_timeout(self, obj): start = obj.conf.get(\"_start\", None) if start: del obj.conf[\"_start\"]", "def on_post_process_channel_post( self, channel_post: types.Message, results, data: dict ): pass # self.logger.debug(f\"{HANDLED_STR[bool(len(results))]} \"", "query [ID:{callback_query.id}] \" # f\"in chat [{callback_query.message.chat.type}:{callback_query.message.chat.id}]\") # else: # self.logger.debug(f\"{HANDLED_STR[bool(len(results))]} \" #", "[ID:{callback_query.from_user.id}]\") async def on_pre_process_shipping_query( self, shipping_query: types.ShippingQuery, data: dict ): pass # self.logger.info(f\"Received", "pass # self.logger.debug(f\"{HANDLED_STR[bool(len(results))]} \" # f\"inline query [ID:{inline_query.id}] \" # f\"from user [ID:{inline_query.from_user.id}]\")", "def on_pre_process_pre_checkout_query( self, pre_checkout_query: types.PreCheckoutQuery, data: dict ): pass # self.logger.info(f\"Received pre-checkout query", "on_post_process_channel_post( self, channel_post: types.Message, results, data: dict ): pass # self.logger.debug(f\"{HANDLED_STR[bool(len(results))]} \" #", "ID:{chosen_inline_result.inline_message_id}] \" # f\"from user [ID:{chosen_inline_result.from_user.id}] \" # f\"result [ID:{chosen_inline_result.result_id}]\") async def on_pre_process_callback_query(", "f\"inline query [ID:{inline_query.id}] \" # f\"from user [ID:{inline_query.from_user.id}]\") async def on_pre_process_chosen_inline_result( self, chosen_inline_result:", "\" # f\"from user [ID:{pre_checkout_query.from_user.id}]\") async def on_pre_process_error(self, update: types.Update, error, data: dict):", "self, shipping_query: types.ShippingQuery, data: dict ): pass # self.logger.info(f\"Received shipping query [ID:{shipping_query.id}] \"", "# f\"inline query [ID:{inline_query.id}] \" # f\"from user [ID:{inline_query.from_user.id}]\") async def on_pre_process_chosen_inline_result( self,", "-1 async def on_pre_process_update(self, update: types.Update, data: dict): update.conf[\"_start\"] = time.time() pass async", "obj.conf[\"_start\"] return round((time.time() - start) * 1000) return -1 async def on_pre_process_update(self, update:", "[ID:{callback_query.inline_message_id}] \" # f\"from user [ID:{callback_query.from_user.id}]\") async def on_pre_process_shipping_query( self, shipping_query: types.ShippingQuery, data:", "\"{message.text}\"] in chat [{message.from_user.first_name} {message.from_user.username} {message.from_user.id}]' ) async def on_post_process_message( self, message: types.Message,", "f'Received message [TEXT: \"{message.text}\"] in chat [{message.from_user.first_name} {message.from_user.username} {message.from_user.id}]' ) async def on_post_process_message(", "self.logger.debug(f\"{HANDLED_STR[bool(len(results))]} \" # f\"message [ID:{message.message_id}] in chat [{message.chat.type}:{message.chat.id}]\") async def on_pre_process_edited_message(self, edited_message, data:", "f\"shipping query [ID:{shipping_query.id}] \" # f\"from user [ID:{shipping_query.from_user.id}]\") async def on_pre_process_pre_checkout_query( self, pre_checkout_query:", "edited channel post [ID:{edited_channel_post.message_id}] \" # f\"in channel [ID:{edited_channel_post.chat.id}]\") async def on_post_process_edited_channel_post( self,", "# else: # self.logger.debug(f\"{HANDLED_STR[bool(len(results))]} \" # f\"callback query [ID:{callback_query.id}] \" # f\"in chat", "types.Update, error, data: dict): timeout = self.check_timeout(update) if timeout > 0: self.logger.info( f\"Process", "[{callback_query.message.chat.type}:{callback_query.message.chat.id}] \" f\"from user [USERNAME:{callback_query.from_user.username}]\" ) async def on_post_process_callback_query(self, callback_query, results, data: dict):", "timeout = self.check_timeout(update) if timeout > 0: self.logger.info( f\"Process update [ID:{update.update_id}]: [success] (in", "data: dict ): pass # self.logger.debug(f\"{HANDLED_STR[bool(len(results))]} \" # f\"pre-checkout query [ID:{pre_checkout_query.id}] \" #", "): pass # self.logger.info(f\"Received inline query [ID:{inline_query.id}] \" # f\"from user [ID:{inline_query.from_user.id}]\") async", "dict ): pass # self.logger.info(f\"Received channel post [ID:{channel_post.message_id}] \" # f\"in channel [ID:{channel_post.chat.id}]\")", "self, pre_checkout_query: types.PreCheckoutQuery, data: dict ): pass # self.logger.info(f\"Received pre-checkout query [ID:{pre_checkout_query.id}] \"", "): pass # self.logger.debug(f\"{HANDLED_STR[bool(len(results))]} \" # f\"message [ID:{message.message_id}] in chat [{message.chat.type}:{message.chat.id}]\") async def", "): pass # self.logger.info(f\"Received edited channel post [ID:{edited_channel_post.message_id}] \" # f\"in channel [ID:{edited_channel_post.chat.id}]\")", "[ID:{channel_post.message_id}] \" # f\"in chat [{channel_post.chat.type}:{channel_post.chat.id}]\") async def on_pre_process_edited_channel_post( self, edited_channel_post: types.Message, data:", "query [ID:{shipping_query.id}] \" # f\"from user [ID:{shipping_query.from_user.id}]\") async def on_pre_process_pre_checkout_query( self, pre_checkout_query: types.PreCheckoutQuery,", "pass # if callback_query.message: # if callback_query.message.from_user: # self.logger.debug(f\"{HANDLED_STR[bool(len(results))]} \" # f\"callback query", "data: dict): timeout = self.check_timeout(update) if timeout > 0: self.logger.info( f\"Process update [ID:{update.update_id}]:", "channel_post: types.Message, results, data: dict ): pass # self.logger.debug(f\"{HANDLED_STR[bool(len(results))]} \" # f\"channel post", "self, edited_channel_post: types.Message, results, data: dict ): pass # self.logger.debug(f\"{HANDLED_STR[bool(len(results))]} \" # f\"edited", "types from aiogram.dispatcher.middlewares import BaseMiddleware HANDLED_STR = [\"Unhandled\", \"Handled\"] class LoggingMiddleware(BaseMiddleware): def __init__(self,", "f\"in channel [ID:{edited_channel_post.chat.id}]\") async def on_post_process_edited_channel_post( self, edited_channel_post: types.Message, results, data: dict ):", "results, data: dict): pass # self.logger.debug(f\"{HANDLED_STR[bool(len(results))]} \" # f\"shipping query [ID:{shipping_query.id}] \" #", "import Dispatcher, types from aiogram.dispatcher.middlewares import BaseMiddleware HANDLED_STR = [\"Unhandled\", \"Handled\"] class LoggingMiddleware(BaseMiddleware):", "self, chosen_inline_result: types.ChosenInlineResult, data: dict ): pass # self.logger.info(f\"Received chosen inline result [Inline", "= logger super(LoggingMiddleware, self).__init__() def check_timeout(self, obj): start = obj.conf.get(\"_start\", None) if start:", "results, data: dict ): pass # self.logger.debug(f\"{HANDLED_STR[bool(len(results))]} \" # f\"chosen inline result [Inline", "# f\"from user [ID:{pre_checkout_query.from_user.id}]\") async def on_post_process_pre_checkout_query( self, pre_checkout_query, results, data: dict ):", "[ID:{shipping_query.id}] \" # f\"from user [ID:{shipping_query.from_user.id}]\") async def on_pre_process_pre_checkout_query( self, pre_checkout_query: types.PreCheckoutQuery, data:", "on_post_process_edited_channel_post( self, edited_channel_post: types.Message, results, data: dict ): pass # self.logger.debug(f\"{HANDLED_STR[bool(len(results))]} \" #", "result [Inline msg ID:{chosen_inline_result.inline_message_id}] \" # f\"from user [ID:{chosen_inline_result.from_user.id}] \" # f\"result [ID:{chosen_inline_result.result_id}]\")", "edited_message, data: dict): pass # self.logger.info(f\"Received edited message [ID:{edited_message.message_id}] \" # f\"in chat", "async def on_post_process_message( self, message: types.Message, results, data: dict ): pass # self.logger.debug(f\"{HANDLED_STR[bool(len(results))]}", "types.ShippingQuery, data: dict ): pass # self.logger.info(f\"Received shipping query [ID:{shipping_query.id}] \" # f\"from", "[{message.from_user.first_name} {message.from_user.username} {message.from_user.id}]' ) async def on_post_process_message( self, message: types.Message, results, data: dict", "shipping query [ID:{shipping_query.id}] \" # f\"from user [ID:{shipping_query.from_user.id}]\") async def on_post_process_shipping_query(self, shipping_query, results,", "def on_post_process_edited_channel_post( self, edited_channel_post: types.Message, results, data: dict ): pass # self.logger.debug(f\"{HANDLED_STR[bool(len(results))]} \"", "on_post_process_shipping_query(self, shipping_query, results, data: dict): pass # self.logger.debug(f\"{HANDLED_STR[bool(len(results))]} \" # f\"shipping query [ID:{shipping_query.id}]", "[success] (in {timeout} ms)\" ) async def on_pre_process_message(self, message: types.Message, data: dict): self.logger.info(", "self.logger.debug(f\"{HANDLED_STR[bool(len(results))]} \" # f\"callback query [ID:{callback_query.id}] \" # f\"in chat [{callback_query.message.chat.type}:{callback_query.message.chat.id}]\") # else:", "if start: del obj.conf[\"_start\"] return round((time.time() - start) * 1000) return -1 async", "dict ): pass # self.logger.debug(f\"{HANDLED_STR[bool(len(results))]} \" # f\"chosen inline result [Inline msg ID:{chosen_inline_result.inline_message_id}]", "chat [{callback_query.message.chat.type}:{callback_query.message.chat.id}]\") # else: # self.logger.debug(f\"{HANDLED_STR[bool(len(results))]} \" # f\"callback query [ID:{callback_query.id}] \" #", "on_pre_process_inline_query( self, inline_query: types.InlineQuery, data: dict ): pass # self.logger.info(f\"Received inline query [ID:{inline_query.id}]", "timeout > 0: self.logger.info( f\"Process update [ID:{update.update_id}]: [success] (in {timeout} ms)\" ) async", "# self.logger.debug(f\"{HANDLED_STR[bool(len(results))]} \" # f\"pre-checkout query [ID:{pre_checkout_query.id}] \" # f\"from user [ID:{pre_checkout_query.from_user.id}]\") async", "\" # f\"from user [ID:{chosen_inline_result.from_user.id}] \" # f\"result [ID:{chosen_inline_result.result_id}]\") async def on_pre_process_callback_query( self,", "\" # f\"message [ID:{message.message_id}] in chat [{message.chat.type}:{message.chat.id}]\") async def on_pre_process_edited_message(self, edited_message, data: dict):", "# f\"from user [ID:{shipping_query.from_user.id}]\") async def on_pre_process_pre_checkout_query( self, pre_checkout_query: types.PreCheckoutQuery, data: dict ):", "message [TEXT: \"{message.text}\"] in chat [{message.from_user.first_name} {message.from_user.username} {message.from_user.id}]' ) async def on_post_process_message( self,", "self).__init__() def check_timeout(self, obj): start = obj.conf.get(\"_start\", None) if start: del obj.conf[\"_start\"] return", "callback_query: types.CallbackQuery, data: dict ): if callback_query.message: self.logger.info( f\"Received callback query [DATA:{callback_query.data}] \"", "pass # self.logger.debug(f\"{HANDLED_STR[bool(len(results))]} \" # f\"pre-checkout query [ID:{pre_checkout_query.id}] \" # f\"from user [ID:{pre_checkout_query.from_user.id}]\")", ") async def on_post_process_callback_query(self, callback_query, results, data: dict): pass # if callback_query.message: #", "dict): pass # self.logger.debug(f\"{HANDLED_STR[bool(len(results))]} \" # f\"edited message [ID:{edited_message.message_id}] \" # f\"in chat", "pass # self.logger.debug(f\"{HANDLED_STR[bool(len(results))]} \" # f\"message [ID:{message.message_id}] in chat [{message.chat.type}:{message.chat.id}]\") async def on_pre_process_edited_message(self,", "import time from aiogram import Dispatcher, types from aiogram.dispatcher.middlewares import BaseMiddleware HANDLED_STR =", "# f\"in chat [{callback_query.message.chat.type}:{callback_query.message.chat.id}]\") # else: # self.logger.debug(f\"{HANDLED_STR[bool(len(results))]} \" # f\"callback query [ID:{callback_query.id}]", "inline_query: types.InlineQuery, data: dict ): pass # self.logger.info(f\"Received inline query [ID:{inline_query.id}] \" #", "f\"in chat [{callback_query.message.chat.type}:{callback_query.message.chat.id}]\") # else: # self.logger.debug(f\"{HANDLED_STR[bool(len(results))]} \" # f\"callback query [ID:{callback_query.id}] \"", "async def on_post_process_callback_query(self, callback_query, results, data: dict): pass # if callback_query.message: # if", "0: self.logger.info( f\"Process update [ID:{update.update_id}]: [success] (in {timeout} ms)\" ) async def on_pre_process_message(self,", "# f\"in chat [{channel_post.chat.type}:{channel_post.chat.id}]\") async def on_pre_process_edited_channel_post( self, edited_channel_post: types.Message, data: dict ):", "): pass # self.logger.debug(f\"{HANDLED_STR[bool(len(results))]} \" # f\"inline query [ID:{inline_query.id}] \" # f\"from user", "# f\"from user [ID:{pre_checkout_query.from_user.id}]\") async def on_pre_process_error(self, update: types.Update, error, data: dict): timeout", "logging.Logger): logger = logging.getLogger(self.__class__.__name__) self.logger = logger super(LoggingMiddleware, self).__init__() def check_timeout(self, obj): start", "): pass # self.logger.debug(f\"{HANDLED_STR[bool(len(results))]} \" # f\"chosen inline result [Inline msg ID:{chosen_inline_result.inline_message_id}] \"", "on_pre_process_error(self, update: types.Update, error, data: dict): timeout = self.check_timeout(update) if timeout > 0:", "pass # self.logger.debug(f\"{HANDLED_STR[bool(len(results))]} \" # f\"edited channel post [ID:{edited_channel_post.message_id}] \" # f\"in channel", "self, message: types.Message, results, data: dict ): pass # self.logger.debug(f\"{HANDLED_STR[bool(len(results))]} \" # f\"message", "chat [{message.from_user.first_name} {message.from_user.username} {message.from_user.id}]' ) async def on_post_process_message( self, message: types.Message, results, data:", "def on_pre_process_channel_post( self, channel_post: types.Message, data: dict ): pass # self.logger.info(f\"Received channel post", "self.logger.info( f\"Process update [ID:{update.update_id}]: [success] (in {timeout} ms)\" ) async def on_pre_process_message(self, message:", "self.logger.debug(f\"{HANDLED_STR[bool(len(results))]} \" # f\"inline query [ID:{inline_query.id}] \" # f\"from user [ID:{inline_query.from_user.id}]\") async def", "data: dict ): pass # self.logger.debug(f\"{HANDLED_STR[bool(len(results))]} \" # f\"inline query [ID:{inline_query.id}] \" #", "edited_message, results, data: dict): pass # self.logger.debug(f\"{HANDLED_STR[bool(len(results))]} \" # f\"edited message [ID:{edited_message.message_id}] \"", "f\"from user [ID:{chosen_inline_result.from_user.id}] \" # f\"result [ID:{chosen_inline_result.result_id}]\") async def on_pre_process_callback_query( self, callback_query: types.CallbackQuery,", "async def on_pre_process_callback_query( self, callback_query: types.CallbackQuery, data: dict ): if callback_query.message: self.logger.info( f\"Received", "[ID:{message.message_id}] in chat [{message.chat.type}:{message.chat.id}]\") async def on_pre_process_edited_message(self, edited_message, data: dict): pass # self.logger.info(f\"Received", "[ID:{callback_query.id}] \" # f\"from inline message [ID:{callback_query.inline_message_id}] \" # f\"from user [ID:{callback_query.from_user.id}]\") async", "# f\"channel post [ID:{channel_post.message_id}] \" # f\"in chat [{channel_post.chat.type}:{channel_post.chat.id}]\") async def on_pre_process_edited_channel_post( self,", "dict): pass # self.logger.debug(f\"{HANDLED_STR[bool(len(results))]} \" # f\"shipping query [ID:{shipping_query.id}] \" # f\"from user", "\" # f\"callback query [ID:{callback_query.id}] \" # f\"from inline message [ID:{callback_query.inline_message_id}] \" #", "self, chosen_inline_result, results, data: dict ): pass # self.logger.debug(f\"{HANDLED_STR[bool(len(results))]} \" # f\"chosen inline", "self.logger.debug(f\"{HANDLED_STR[bool(len(results))]} \" # f\"shipping query [ID:{shipping_query.id}] \" # f\"from user [ID:{shipping_query.from_user.id}]\") async def", "\" # f\"in chat [{callback_query.message.chat.type}:{callback_query.message.chat.id}]\") # else: # self.logger.debug(f\"{HANDLED_STR[bool(len(results))]} \" # f\"callback query", "channel post [ID:{edited_channel_post.message_id}] \" # f\"in channel [ID:{edited_channel_post.chat.id}]\") async def on_post_process_edited_channel_post( self, edited_channel_post:", "self.logger.debug(f\"{HANDLED_STR[bool(len(results))]} \" # f\"channel post [ID:{channel_post.message_id}] \" # f\"in chat [{channel_post.chat.type}:{channel_post.chat.id}]\") async def", "): pass # self.logger.debug(f\"{HANDLED_STR[bool(len(results))]} \" # f\"pre-checkout query [ID:{pre_checkout_query.id}] \" # f\"from user", "self, channel_post: types.Message, results, data: dict ): pass # self.logger.debug(f\"{HANDLED_STR[bool(len(results))]} \" # f\"channel", "message [ID:{edited_message.message_id}] \" # f\"in chat [{edited_message.chat.type}:{edited_message.chat.id}]\") async def on_post_process_edited_message(self, edited_message, results, data:", "data: dict ): pass # self.logger.info(f\"Received pre-checkout query [ID:{pre_checkout_query.id}] \" # f\"from user", "query [ID:{shipping_query.id}] \" # f\"from user [ID:{shipping_query.from_user.id}]\") async def on_post_process_shipping_query(self, shipping_query, results, data:", "f\"callback query [ID:{callback_query.id}] \" # f\"in chat [{callback_query.message.chat.type}:{callback_query.message.chat.id}] \" # f\"from user [ID:{callback_query.message.from_user.id}]\")", "# f\"in chat [{edited_message.chat.type}:{edited_message.chat.id}]\") async def on_post_process_edited_message(self, edited_message, results, data: dict): pass #", "= obj.conf.get(\"_start\", None) if start: del obj.conf[\"_start\"] return round((time.time() - start) * 1000)", "data: dict ): pass # self.logger.debug(f\"{HANDLED_STR[bool(len(results))]} \" # f\"edited channel post [ID:{edited_channel_post.message_id}] \"", "pass async def on_post_process_update(self, update: types.Update, result, data: dict): timeout = self.check_timeout(update) if", "logging import time from aiogram import Dispatcher, types from aiogram.dispatcher.middlewares import BaseMiddleware HANDLED_STR", "[{message.chat.type}:{message.chat.id}]\") async def on_pre_process_edited_message(self, edited_message, data: dict): pass # self.logger.info(f\"Received edited message [ID:{edited_message.message_id}]", "on_post_process_update(self, update: types.Update, result, data: dict): timeout = self.check_timeout(update) if timeout > 0:", "\" # f\"from user [ID:{pre_checkout_query.from_user.id}]\") async def on_post_process_pre_checkout_query( self, pre_checkout_query, results, data: dict", "logger super(LoggingMiddleware, self).__init__() def check_timeout(self, obj): start = obj.conf.get(\"_start\", None) if start: del", "user [ID:{pre_checkout_query.from_user.id}]\") async def on_pre_process_error(self, update: types.Update, error, data: dict): timeout = self.check_timeout(update)", "{timeout} ms)\" ) async def on_pre_process_message(self, message: types.Message, data: dict): self.logger.info( f'Received message", "from aiogram import Dispatcher, types from aiogram.dispatcher.middlewares import BaseMiddleware HANDLED_STR = [\"Unhandled\", \"Handled\"]", "async def on_pre_process_shipping_query( self, shipping_query: types.ShippingQuery, data: dict ): pass # self.logger.info(f\"Received shipping", "self.logger.info( f\"Received callback query [DATA:{callback_query.data}] \" f\"in chat [{callback_query.message.chat.type}:{callback_query.message.chat.id}] \" f\"from user [USERNAME:{callback_query.from_user.username}]\"", "import BaseMiddleware HANDLED_STR = [\"Unhandled\", \"Handled\"] class LoggingMiddleware(BaseMiddleware): def __init__(self, logger=None): if not", "ID:{chosen_inline_result.inline_message_id}] \" # f\"from user [ID:{chosen_inline_result.from_user.id}] \" # f\"result [ID:{chosen_inline_result.result_id}]\") async def on_post_process_chosen_inline_result(", "user [ID:{chosen_inline_result.from_user.id}] \" # f\"result [ID:{chosen_inline_result.result_id}]\") async def on_post_process_chosen_inline_result( self, chosen_inline_result, results, data:", "[ID:{callback_query.id}] \" # f\"in chat [{callback_query.message.chat.type}:{callback_query.message.chat.id}]\") # else: # self.logger.debug(f\"{HANDLED_STR[bool(len(results))]} \" # f\"callback", "# self.logger.debug(f\"{HANDLED_STR[bool(len(results))]} \" # f\"callback query [ID:{callback_query.id}] \" # f\"in chat [{callback_query.message.chat.type}:{callback_query.message.chat.id}]\") #", "# f\"from user [ID:{chosen_inline_result.from_user.id}] \" # f\"result [ID:{chosen_inline_result.result_id}]\") async def on_post_process_chosen_inline_result( self, chosen_inline_result,", "user [ID:{shipping_query.from_user.id}]\") async def on_pre_process_pre_checkout_query( self, pre_checkout_query: types.PreCheckoutQuery, data: dict ): pass #", "user [ID:{chosen_inline_result.from_user.id}] \" # f\"result [ID:{chosen_inline_result.result_id}]\") async def on_pre_process_callback_query( self, callback_query: types.CallbackQuery, data:", "logger=None): if not isinstance(logger, logging.Logger): logger = logging.getLogger(self.__class__.__name__) self.logger = logger super(LoggingMiddleware, self).__init__()", "callback_query.message: self.logger.info( f\"Received callback query [DATA:{callback_query.data}] \" f\"in chat [{callback_query.message.chat.type}:{callback_query.message.chat.id}] \" f\"from user", "def on_pre_process_message(self, message: types.Message, data: dict): self.logger.info( f'Received message [TEXT: \"{message.text}\"] in chat", "inline_query: types.InlineQuery, results, data: dict ): pass # self.logger.debug(f\"{HANDLED_STR[bool(len(results))]} \" # f\"inline query", "data: dict): pass # self.logger.debug(f\"{HANDLED_STR[bool(len(results))]} \" # f\"shipping query [ID:{shipping_query.id}] \" # f\"from", "# f\"from user [ID:{callback_query.message.from_user.id}]\") # else: # self.logger.debug(f\"{HANDLED_STR[bool(len(results))]} \" # f\"callback query [ID:{callback_query.id}]", "return -1 async def on_pre_process_update(self, update: types.Update, data: dict): update.conf[\"_start\"] = time.time() pass", "def on_pre_process_callback_query( self, callback_query: types.CallbackQuery, data: dict ): if callback_query.message: self.logger.info( f\"Received callback", "# f\"callback query [ID:{callback_query.id}] \" # f\"in chat [{callback_query.message.chat.type}:{callback_query.message.chat.id}]\") # else: # self.logger.debug(f\"{HANDLED_STR[bool(len(results))]}", "\" # f\"from user [ID:{shipping_query.from_user.id}]\") async def on_post_process_shipping_query(self, shipping_query, results, data: dict): pass", "isinstance(logger, logging.Logger): logger = logging.getLogger(self.__class__.__name__) self.logger = logger super(LoggingMiddleware, self).__init__() def check_timeout(self, obj):", "query [ID:{pre_checkout_query.id}] \" # f\"from user [ID:{pre_checkout_query.from_user.id}]\") async def on_post_process_pre_checkout_query( self, pre_checkout_query, results,", "f\"in chat [{channel_post.chat.type}:{channel_post.chat.id}]\") async def on_pre_process_edited_channel_post( self, edited_channel_post: types.Message, data: dict ): pass", "pass # self.logger.info(f\"Received channel post [ID:{channel_post.message_id}] \" # f\"in channel [ID:{channel_post.chat.id}]\") async def", "# f\"in chat [{edited_message.chat.type}:{edited_message.chat.id}]\") async def on_pre_process_channel_post( self, channel_post: types.Message, data: dict ):", "results, data: dict ): pass # self.logger.debug(f\"{HANDLED_STR[bool(len(results))]} \" # f\"channel post [ID:{channel_post.message_id}] \"", "- start) * 1000) return -1 async def on_pre_process_update(self, update: types.Update, data: dict):", "# self.logger.debug(f\"{HANDLED_STR[bool(len(results))]} \" # f\"edited message [ID:{edited_message.message_id}] \" # f\"in chat [{edited_message.chat.type}:{edited_message.chat.id}]\") async", "self, channel_post: types.Message, data: dict ): pass # self.logger.info(f\"Received channel post [ID:{channel_post.message_id}] \"", "post [ID:{edited_channel_post.message_id}] \" # f\"in channel [ID:{edited_channel_post.chat.id}]\") async def on_pre_process_inline_query( self, inline_query: types.InlineQuery,", "f\"channel post [ID:{channel_post.message_id}] \" # f\"in chat [{channel_post.chat.type}:{channel_post.chat.id}]\") async def on_pre_process_edited_channel_post( self, edited_channel_post:", "# self.logger.info(f\"Received edited channel post [ID:{edited_channel_post.message_id}] \" # f\"in channel [ID:{edited_channel_post.chat.id}]\") async def", "callback query [DATA:{callback_query.data}] \" f\"in chat [{callback_query.message.chat.type}:{callback_query.message.chat.id}] \" f\"from user [USERNAME:{callback_query.from_user.username}]\" ) async", "pass # self.logger.info(f\"Received edited channel post [ID:{edited_channel_post.message_id}] \" # f\"in channel [ID:{edited_channel_post.chat.id}]\") async", "def on_pre_process_edited_message(self, edited_message, data: dict): pass # self.logger.info(f\"Received edited message [ID:{edited_message.message_id}] \" #", "def on_post_process_inline_query( self, inline_query: types.InlineQuery, results, data: dict ): pass # self.logger.debug(f\"{HANDLED_STR[bool(len(results))]} \"", "pass # self.logger.debug(f\"{HANDLED_STR[bool(len(results))]} \" # f\"chosen inline result [Inline msg ID:{chosen_inline_result.inline_message_id}] \" #", "# f\"in channel [ID:{channel_post.chat.id}]\") async def on_post_process_channel_post( self, channel_post: types.Message, results, data: dict", "message: types.Message, results, data: dict ): pass # self.logger.debug(f\"{HANDLED_STR[bool(len(results))]} \" # f\"message [ID:{message.message_id}]", "# f\"callback query [ID:{callback_query.id}] \" # f\"from inline message [ID:{callback_query.inline_message_id}] \" # f\"from", "# f\"callback query [ID:{callback_query.id}] \" # f\"in chat [{callback_query.message.chat.type}:{callback_query.message.chat.id}] \" # f\"from user", "user [ID:{callback_query.from_user.id}]\") async def on_pre_process_shipping_query( self, shipping_query: types.ShippingQuery, data: dict ): pass #", "[ID:{channel_post.chat.id}]\") async def on_post_process_channel_post( self, channel_post: types.Message, results, data: dict ): pass #", "user [ID:{inline_query.from_user.id}]\") async def on_pre_process_chosen_inline_result( self, chosen_inline_result: types.ChosenInlineResult, data: dict ): pass #", "not isinstance(logger, logging.Logger): logger = logging.getLogger(self.__class__.__name__) self.logger = logger super(LoggingMiddleware, self).__init__() def check_timeout(self,", "logger = logging.getLogger(self.__class__.__name__) self.logger = logger super(LoggingMiddleware, self).__init__() def check_timeout(self, obj): start =", "dict ): pass # self.logger.info(f\"Received shipping query [ID:{shipping_query.id}] \" # f\"from user [ID:{shipping_query.from_user.id}]\")", "* 1000) return -1 async def on_pre_process_update(self, update: types.Update, data: dict): update.conf[\"_start\"] =", "f\"from user [ID:{inline_query.from_user.id}]\") async def on_post_process_inline_query( self, inline_query: types.InlineQuery, results, data: dict ):", "\" # f\"from user [ID:{callback_query.message.from_user.id}]\") # else: # self.logger.debug(f\"{HANDLED_STR[bool(len(results))]} \" # f\"callback query", "round((time.time() - start) * 1000) return -1 async def on_pre_process_update(self, update: types.Update, data:", "async def on_post_process_channel_post( self, channel_post: types.Message, results, data: dict ): pass # self.logger.debug(f\"{HANDLED_STR[bool(len(results))]}", "self.logger.info(f\"Received chosen inline result [Inline msg ID:{chosen_inline_result.inline_message_id}] \" # f\"from user [ID:{chosen_inline_result.from_user.id}] \"", "\" # f\"from user [ID:{inline_query.from_user.id}]\") async def on_pre_process_chosen_inline_result( self, chosen_inline_result: types.ChosenInlineResult, data: dict", "dict ): pass # self.logger.debug(f\"{HANDLED_STR[bool(len(results))]} \" # f\"channel post [ID:{channel_post.message_id}] \" # f\"in", "[DATA:{callback_query.data}] \" f\"in chat [{callback_query.message.chat.type}:{callback_query.message.chat.id}] \" f\"from user [USERNAME:{callback_query.from_user.username}]\" ) async def on_post_process_callback_query(self,", "): pass # self.logger.info(f\"Received shipping query [ID:{shipping_query.id}] \" # f\"from user [ID:{shipping_query.from_user.id}]\") async", "def on_post_process_update(self, update: types.Update, result, data: dict): timeout = self.check_timeout(update) if timeout >", "types.Message, results, data: dict ): pass # self.logger.debug(f\"{HANDLED_STR[bool(len(results))]} \" # f\"edited channel post", "def on_post_process_shipping_query(self, shipping_query, results, data: dict): pass # self.logger.debug(f\"{HANDLED_STR[bool(len(results))]} \" # f\"shipping query", "Dispatcher, types from aiogram.dispatcher.middlewares import BaseMiddleware HANDLED_STR = [\"Unhandled\", \"Handled\"] class LoggingMiddleware(BaseMiddleware): def", "user [USERNAME:{callback_query.from_user.username}]\" ) async def on_post_process_callback_query(self, callback_query, results, data: dict): pass # if", "self.logger.info(f\"Received inline query [ID:{inline_query.id}] \" # f\"from user [ID:{inline_query.from_user.id}]\") async def on_post_process_inline_query( self,", "f\"from user [ID:{pre_checkout_query.from_user.id}]\") async def on_pre_process_error(self, update: types.Update, error, data: dict): timeout =", "# f\"in channel [ID:{edited_channel_post.chat.id}]\") async def on_pre_process_inline_query( self, inline_query: types.InlineQuery, data: dict ):", "channel [ID:{edited_channel_post.chat.id}]\") async def on_pre_process_inline_query( self, inline_query: types.InlineQuery, data: dict ): pass #", "[{callback_query.message.chat.type}:{callback_query.message.chat.id}]\") # else: # self.logger.debug(f\"{HANDLED_STR[bool(len(results))]} \" # f\"callback query [ID:{callback_query.id}] \" # f\"from", "self, inline_query: types.InlineQuery, data: dict ): pass # self.logger.info(f\"Received inline query [ID:{inline_query.id}] \"", "# f\"from user [ID:{callback_query.from_user.id}]\") async def on_pre_process_shipping_query( self, shipping_query: types.ShippingQuery, data: dict ):", "= self.check_timeout(update) if timeout > 0: self.logger.info( f\"Process update [ID:{update.update_id}, NAME:{update.__class__.__name__}]: [failed] (in", "data: dict): timeout = self.check_timeout(update) if timeout > 0: self.logger.info( f\"Process update [ID:{update.update_id},", "LoggingMiddleware(BaseMiddleware): def __init__(self, logger=None): if not isinstance(logger, logging.Logger): logger = logging.getLogger(self.__class__.__name__) self.logger =", "types.PreCheckoutQuery, data: dict ): pass # self.logger.info(f\"Received pre-checkout query [ID:{pre_checkout_query.id}] \" # f\"from", "def on_post_process_message( self, message: types.Message, results, data: dict ): pass # self.logger.debug(f\"{HANDLED_STR[bool(len(results))]} \"", "if callback_query.message: self.logger.info( f\"Received callback query [DATA:{callback_query.data}] \" f\"in chat [{callback_query.message.chat.type}:{callback_query.message.chat.id}] \" f\"from", "def on_pre_process_shipping_query( self, shipping_query: types.ShippingQuery, data: dict ): pass # self.logger.info(f\"Received shipping query", "update: types.Update, result, data: dict): timeout = self.check_timeout(update) if timeout > 0: self.logger.info(", "shipping_query, results, data: dict): pass # self.logger.debug(f\"{HANDLED_STR[bool(len(results))]} \" # f\"shipping query [ID:{shipping_query.id}] \"", "aiogram import Dispatcher, types from aiogram.dispatcher.middlewares import BaseMiddleware HANDLED_STR = [\"Unhandled\", \"Handled\"] class", "data: dict ): pass # self.logger.info(f\"Received chosen inline result [Inline msg ID:{chosen_inline_result.inline_message_id}] \"", "\" # f\"callback query [ID:{callback_query.id}] \" # f\"in chat [{callback_query.message.chat.type}:{callback_query.message.chat.id}]\") # else: #", "\" # f\"edited message [ID:{edited_message.message_id}] \" # f\"in chat [{edited_message.chat.type}:{edited_message.chat.id}]\") async def on_pre_process_channel_post(", "pre-checkout query [ID:{pre_checkout_query.id}] \" # f\"from user [ID:{pre_checkout_query.from_user.id}]\") async def on_post_process_pre_checkout_query( self, pre_checkout_query,", "channel post [ID:{channel_post.message_id}] \" # f\"in channel [ID:{channel_post.chat.id}]\") async def on_post_process_channel_post( self, channel_post:", "): pass # self.logger.info(f\"Received pre-checkout query [ID:{pre_checkout_query.id}] \" # f\"from user [ID:{pre_checkout_query.from_user.id}]\") async", "# f\"from user [ID:{inline_query.from_user.id}]\") async def on_pre_process_chosen_inline_result( self, chosen_inline_result: types.ChosenInlineResult, data: dict ):", "\" # f\"chosen inline result [Inline msg ID:{chosen_inline_result.inline_message_id}] \" # f\"from user [ID:{chosen_inline_result.from_user.id}]", "start) * 1000) return -1 async def on_pre_process_update(self, update: types.Update, data: dict): update.conf[\"_start\"]", "f\"from user [ID:{callback_query.from_user.id}]\") async def on_pre_process_shipping_query( self, shipping_query: types.ShippingQuery, data: dict ): pass", "# f\"from user [ID:{chosen_inline_result.from_user.id}] \" # f\"result [ID:{chosen_inline_result.result_id}]\") async def on_pre_process_callback_query( self, callback_query:", "f\"result [ID:{chosen_inline_result.result_id}]\") async def on_pre_process_callback_query( self, callback_query: types.CallbackQuery, data: dict ): if callback_query.message:", "> 0: self.logger.info( f\"Process update [ID:{update.update_id}]: [success] (in {timeout} ms)\" ) async def", "user [ID:{callback_query.message.from_user.id}]\") # else: # self.logger.debug(f\"{HANDLED_STR[bool(len(results))]} \" # f\"callback query [ID:{callback_query.id}] \" #", "[ID:{pre_checkout_query.id}] \" # f\"from user [ID:{pre_checkout_query.from_user.id}]\") async def on_post_process_pre_checkout_query( self, pre_checkout_query, results, data:", "channel_post: types.Message, data: dict ): pass # self.logger.info(f\"Received channel post [ID:{channel_post.message_id}] \" #", "data: dict ): pass # self.logger.debug(f\"{HANDLED_STR[bool(len(results))]} \" # f\"channel post [ID:{channel_post.message_id}] \" #", "if callback_query.message: # if callback_query.message.from_user: # self.logger.debug(f\"{HANDLED_STR[bool(len(results))]} \" # f\"callback query [ID:{callback_query.id}] \"", "# self.logger.debug(f\"{HANDLED_STR[bool(len(results))]} \" # f\"shipping query [ID:{shipping_query.id}] \" # f\"from user [ID:{shipping_query.from_user.id}]\") async", "[ID:{inline_query.from_user.id}]\") async def on_post_process_inline_query( self, inline_query: types.InlineQuery, results, data: dict ): pass #", "query [ID:{inline_query.id}] \" # f\"from user [ID:{inline_query.from_user.id}]\") async def on_post_process_inline_query( self, inline_query: types.InlineQuery,", "[ID:{pre_checkout_query.id}] \" # f\"from user [ID:{pre_checkout_query.from_user.id}]\") async def on_pre_process_error(self, update: types.Update, error, data:", "def on_post_process_chosen_inline_result( self, chosen_inline_result, results, data: dict ): pass # self.logger.debug(f\"{HANDLED_STR[bool(len(results))]} \" #", "# self.logger.info(f\"Received chosen inline result [Inline msg ID:{chosen_inline_result.inline_message_id}] \" # f\"from user [ID:{chosen_inline_result.from_user.id}]", "dict ): pass # self.logger.debug(f\"{HANDLED_STR[bool(len(results))]} \" # f\"pre-checkout query [ID:{pre_checkout_query.id}] \" # f\"from", "f\"in chat [{callback_query.message.chat.type}:{callback_query.message.chat.id}] \" # f\"from user [ID:{callback_query.message.from_user.id}]\") # else: # self.logger.debug(f\"{HANDLED_STR[bool(len(results))]} \"", "\" # f\"in chat [{edited_message.chat.type}:{edited_message.chat.id}]\") async def on_pre_process_channel_post( self, channel_post: types.Message, data: dict", "[{channel_post.chat.type}:{channel_post.chat.id}]\") async def on_pre_process_edited_channel_post( self, edited_channel_post: types.Message, data: dict ): pass # self.logger.info(f\"Received", "\" # f\"from user [ID:{chosen_inline_result.from_user.id}] \" # f\"result [ID:{chosen_inline_result.result_id}]\") async def on_post_process_chosen_inline_result( self,", "[ID:{callback_query.id}] \" # f\"in chat [{callback_query.message.chat.type}:{callback_query.message.chat.id}] \" # f\"from user [ID:{callback_query.message.from_user.id}]\") # else:", "dict ): pass # self.logger.info(f\"Received pre-checkout query [ID:{pre_checkout_query.id}] \" # f\"from user [ID:{pre_checkout_query.from_user.id}]\")", "on_post_process_message( self, message: types.Message, results, data: dict ): pass # self.logger.debug(f\"{HANDLED_STR[bool(len(results))]} \" #", "self.logger.info( f'Received message [TEXT: \"{message.text}\"] in chat [{message.from_user.first_name} {message.from_user.username} {message.from_user.id}]' ) async def", "# f\"edited channel post [ID:{edited_channel_post.message_id}] \" # f\"in channel [ID:{edited_channel_post.chat.id}]\") async def on_pre_process_inline_query(", "[ID:{channel_post.message_id}] \" # f\"in channel [ID:{channel_post.chat.id}]\") async def on_post_process_channel_post( self, channel_post: types.Message, results,", "[ID:{pre_checkout_query.from_user.id}]\") async def on_pre_process_error(self, update: types.Update, error, data: dict): timeout = self.check_timeout(update) if", "types.Update, data: dict): update.conf[\"_start\"] = time.time() pass async def on_post_process_update(self, update: types.Update, result,", "[ID:{edited_channel_post.message_id}] \" # f\"in channel [ID:{edited_channel_post.chat.id}]\") async def on_pre_process_inline_query( self, inline_query: types.InlineQuery, data:", "edited_channel_post: types.Message, results, data: dict ): pass # self.logger.debug(f\"{HANDLED_STR[bool(len(results))]} \" # f\"edited channel", "def on_post_process_edited_message(self, edited_message, results, data: dict): pass # self.logger.debug(f\"{HANDLED_STR[bool(len(results))]} \" # f\"edited message", "async def on_pre_process_error(self, update: types.Update, error, data: dict): timeout = self.check_timeout(update) if timeout", "if timeout > 0: self.logger.info( f\"Process update [ID:{update.update_id}]: [success] (in {timeout} ms)\" )", "f\"from user [USERNAME:{callback_query.from_user.username}]\" ) async def on_post_process_callback_query(self, callback_query, results, data: dict): pass #", "\" # f\"result [ID:{chosen_inline_result.result_id}]\") async def on_post_process_chosen_inline_result( self, chosen_inline_result, results, data: dict ):", "[ID:{inline_query.id}] \" # f\"from user [ID:{inline_query.from_user.id}]\") async def on_pre_process_chosen_inline_result( self, chosen_inline_result: types.ChosenInlineResult, data:", "self.logger.info(f\"Received shipping query [ID:{shipping_query.id}] \" # f\"from user [ID:{shipping_query.from_user.id}]\") async def on_post_process_shipping_query(self, shipping_query,", "data: dict ): pass # self.logger.info(f\"Received edited channel post [ID:{edited_channel_post.message_id}] \" # f\"in", "[ID:{pre_checkout_query.from_user.id}]\") async def on_post_process_pre_checkout_query( self, pre_checkout_query, results, data: dict ): pass # self.logger.debug(f\"{HANDLED_STR[bool(len(results))]}", "update [ID:{update.update_id}]: [success] (in {timeout} ms)\" ) async def on_pre_process_message(self, message: types.Message, data:", "# f\"chosen inline result [Inline msg ID:{chosen_inline_result.inline_message_id}] \" # f\"from user [ID:{chosen_inline_result.from_user.id}] \"", "f\"in chat [{edited_message.chat.type}:{edited_message.chat.id}]\") async def on_pre_process_channel_post( self, channel_post: types.Message, data: dict ): pass", "aiogram.dispatcher.middlewares import BaseMiddleware HANDLED_STR = [\"Unhandled\", \"Handled\"] class LoggingMiddleware(BaseMiddleware): def __init__(self, logger=None): if", "query [ID:{callback_query.id}] \" # f\"from inline message [ID:{callback_query.inline_message_id}] \" # f\"from user [ID:{callback_query.from_user.id}]\")", "f\"from user [ID:{inline_query.from_user.id}]\") async def on_pre_process_chosen_inline_result( self, chosen_inline_result: types.ChosenInlineResult, data: dict ): pass", "chosen_inline_result, results, data: dict ): pass # self.logger.debug(f\"{HANDLED_STR[bool(len(results))]} \" # f\"chosen inline result", "on_post_process_callback_query(self, callback_query, results, data: dict): pass # if callback_query.message: # if callback_query.message.from_user: #", "types.Message, data: dict ): pass # self.logger.info(f\"Received channel post [ID:{channel_post.message_id}] \" # f\"in", "dict): timeout = self.check_timeout(update) if timeout > 0: self.logger.info( f\"Process update [ID:{update.update_id}]: [success]", "async def on_pre_process_channel_post( self, channel_post: types.Message, data: dict ): pass # self.logger.info(f\"Received channel", "async def on_pre_process_pre_checkout_query( self, pre_checkout_query: types.PreCheckoutQuery, data: dict ): pass # self.logger.info(f\"Received pre-checkout", "# self.logger.debug(f\"{HANDLED_STR[bool(len(results))]} \" # f\"message [ID:{message.message_id}] in chat [{message.chat.type}:{message.chat.id}]\") async def on_pre_process_edited_message(self, edited_message,", "dict): pass # if callback_query.message: # if callback_query.message.from_user: # self.logger.debug(f\"{HANDLED_STR[bool(len(results))]} \" # f\"callback", "data: dict): update.conf[\"_start\"] = time.time() pass async def on_post_process_update(self, update: types.Update, result, data:", "f\"pre-checkout query [ID:{pre_checkout_query.id}] \" # f\"from user [ID:{pre_checkout_query.from_user.id}]\") async def on_pre_process_error(self, update: types.Update,", "\" # f\"in channel [ID:{edited_channel_post.chat.id}]\") async def on_post_process_edited_channel_post( self, edited_channel_post: types.Message, results, data:", "pass # self.logger.info(f\"Received pre-checkout query [ID:{pre_checkout_query.id}] \" # f\"from user [ID:{pre_checkout_query.from_user.id}]\") async def", "user [ID:{pre_checkout_query.from_user.id}]\") async def on_post_process_pre_checkout_query( self, pre_checkout_query, results, data: dict ): pass #", "pass # self.logger.debug(f\"{HANDLED_STR[bool(len(results))]} \" # f\"edited message [ID:{edited_message.message_id}] \" # f\"in chat [{edited_message.chat.type}:{edited_message.chat.id}]\")", "[ID:{shipping_query.from_user.id}]\") async def on_pre_process_pre_checkout_query( self, pre_checkout_query: types.PreCheckoutQuery, data: dict ): pass # self.logger.info(f\"Received", "chat [{channel_post.chat.type}:{channel_post.chat.id}]\") async def on_pre_process_edited_channel_post( self, edited_channel_post: types.Message, data: dict ): pass #", "\" # f\"result [ID:{chosen_inline_result.result_id}]\") async def on_pre_process_callback_query( self, callback_query: types.CallbackQuery, data: dict ):", "self.logger.info(f\"Received edited message [ID:{edited_message.message_id}] \" # f\"in chat [{edited_message.chat.type}:{edited_message.chat.id}]\") async def on_post_process_edited_message(self, edited_message,", "\" # f\"in channel [ID:{channel_post.chat.id}]\") async def on_post_process_channel_post( self, channel_post: types.Message, results, data:", "data: dict): pass # self.logger.debug(f\"{HANDLED_STR[bool(len(results))]} \" # f\"edited message [ID:{edited_message.message_id}] \" # f\"in", "on_post_process_pre_checkout_query( self, pre_checkout_query, results, data: dict ): pass # self.logger.debug(f\"{HANDLED_STR[bool(len(results))]} \" # f\"pre-checkout", "self.logger.info(f\"Received pre-checkout query [ID:{pre_checkout_query.id}] \" # f\"from user [ID:{pre_checkout_query.from_user.id}]\") async def on_post_process_pre_checkout_query( self,", "\" f\"from user [USERNAME:{callback_query.from_user.username}]\" ) async def on_post_process_callback_query(self, callback_query, results, data: dict): pass", "chat [{edited_message.chat.type}:{edited_message.chat.id}]\") async def on_pre_process_channel_post( self, channel_post: types.Message, data: dict ): pass #", "on_pre_process_callback_query( self, callback_query: types.CallbackQuery, data: dict ): if callback_query.message: self.logger.info( f\"Received callback query", "): pass # self.logger.info(f\"Received channel post [ID:{channel_post.message_id}] \" # f\"in channel [ID:{channel_post.chat.id}]\") async", "on_pre_process_chosen_inline_result( self, chosen_inline_result: types.ChosenInlineResult, data: dict ): pass # self.logger.info(f\"Received chosen inline result", "# if callback_query.message: # if callback_query.message.from_user: # self.logger.debug(f\"{HANDLED_STR[bool(len(results))]} \" # f\"callback query [ID:{callback_query.id}]", "# self.logger.info(f\"Received shipping query [ID:{shipping_query.id}] \" # f\"from user [ID:{shipping_query.from_user.id}]\") async def on_post_process_shipping_query(self,", "def on_pre_process_update(self, update: types.Update, data: dict): update.conf[\"_start\"] = time.time() pass async def on_post_process_update(self,", "[ID:{chosen_inline_result.from_user.id}] \" # f\"result [ID:{chosen_inline_result.result_id}]\") async def on_post_process_chosen_inline_result( self, chosen_inline_result, results, data: dict", "chat [{callback_query.message.chat.type}:{callback_query.message.chat.id}] \" # f\"from user [ID:{callback_query.message.from_user.id}]\") # else: # self.logger.debug(f\"{HANDLED_STR[bool(len(results))]} \" #", "channel [ID:{channel_post.chat.id}]\") async def on_post_process_channel_post( self, channel_post: types.Message, results, data: dict ): pass", "self, inline_query: types.InlineQuery, results, data: dict ): pass # self.logger.debug(f\"{HANDLED_STR[bool(len(results))]} \" # f\"inline", "on_pre_process_shipping_query( self, shipping_query: types.ShippingQuery, data: dict ): pass # self.logger.info(f\"Received shipping query [ID:{shipping_query.id}]", "[ID:{edited_channel_post.chat.id}]\") async def on_pre_process_inline_query( self, inline_query: types.InlineQuery, data: dict ): pass # self.logger.info(f\"Received", "types.Message, results, data: dict ): pass # self.logger.debug(f\"{HANDLED_STR[bool(len(results))]} \" # f\"channel post [ID:{channel_post.message_id}]", ") async def on_post_process_message( self, message: types.Message, results, data: dict ): pass #", "results, data: dict): pass # if callback_query.message: # if callback_query.message.from_user: # self.logger.debug(f\"{HANDLED_STR[bool(len(results))]} \"", "dict ): pass # self.logger.debug(f\"{HANDLED_STR[bool(len(results))]} \" # f\"message [ID:{message.message_id}] in chat [{message.chat.type}:{message.chat.id}]\") async", "user [ID:{inline_query.from_user.id}]\") async def on_post_process_inline_query( self, inline_query: types.InlineQuery, results, data: dict ): pass", "query [DATA:{callback_query.data}] \" f\"in chat [{callback_query.message.chat.type}:{callback_query.message.chat.id}] \" f\"from user [USERNAME:{callback_query.from_user.username}]\" ) async def", "types.Message, data: dict ): pass # self.logger.info(f\"Received edited channel post [ID:{edited_channel_post.message_id}] \" #", "# f\"from user [ID:{inline_query.from_user.id}]\") async def on_post_process_inline_query( self, inline_query: types.InlineQuery, results, data: dict", "pre_checkout_query: types.PreCheckoutQuery, data: dict ): pass # self.logger.info(f\"Received pre-checkout query [ID:{pre_checkout_query.id}] \" #", "= logging.getLogger(self.__class__.__name__) self.logger = logger super(LoggingMiddleware, self).__init__() def check_timeout(self, obj): start = obj.conf.get(\"_start\",", "async def on_pre_process_chosen_inline_result( self, chosen_inline_result: types.ChosenInlineResult, data: dict ): pass # self.logger.info(f\"Received chosen", ") async def on_pre_process_message(self, message: types.Message, data: dict): self.logger.info( f'Received message [TEXT: \"{message.text}\"]", "f\"message [ID:{message.message_id}] in chat [{message.chat.type}:{message.chat.id}]\") async def on_pre_process_edited_message(self, edited_message, data: dict): pass #", "1000) return -1 async def on_pre_process_update(self, update: types.Update, data: dict): update.conf[\"_start\"] = time.time()", "channel post [ID:{edited_channel_post.message_id}] \" # f\"in channel [ID:{edited_channel_post.chat.id}]\") async def on_pre_process_inline_query( self, inline_query:", "on_post_process_inline_query( self, inline_query: types.InlineQuery, results, data: dict ): pass # self.logger.debug(f\"{HANDLED_STR[bool(len(results))]} \" #", "= [\"Unhandled\", \"Handled\"] class LoggingMiddleware(BaseMiddleware): def __init__(self, logger=None): if not isinstance(logger, logging.Logger): logger", "class LoggingMiddleware(BaseMiddleware): def __init__(self, logger=None): if not isinstance(logger, logging.Logger): logger = logging.getLogger(self.__class__.__name__) self.logger", "[ID:{inline_query.id}] \" # f\"from user [ID:{inline_query.from_user.id}]\") async def on_post_process_inline_query( self, inline_query: types.InlineQuery, results,", "# self.logger.info(f\"Received inline query [ID:{inline_query.id}] \" # f\"from user [ID:{inline_query.from_user.id}]\") async def on_post_process_inline_query(", "HANDLED_STR = [\"Unhandled\", \"Handled\"] class LoggingMiddleware(BaseMiddleware): def __init__(self, logger=None): if not isinstance(logger, logging.Logger):", "self.check_timeout(update) if timeout > 0: self.logger.info( f\"Process update [ID:{update.update_id}]: [success] (in {timeout} ms)\"", "del obj.conf[\"_start\"] return round((time.time() - start) * 1000) return -1 async def on_pre_process_update(self,", "f\"from user [ID:{pre_checkout_query.from_user.id}]\") async def on_post_process_pre_checkout_query( self, pre_checkout_query, results, data: dict ): pass", "# f\"edited message [ID:{edited_message.message_id}] \" # f\"in chat [{edited_message.chat.type}:{edited_message.chat.id}]\") async def on_pre_process_channel_post( self,", "f\"callback query [ID:{callback_query.id}] \" # f\"in chat [{callback_query.message.chat.type}:{callback_query.message.chat.id}]\") # else: # self.logger.debug(f\"{HANDLED_STR[bool(len(results))]} \"", "# f\"from user [ID:{shipping_query.from_user.id}]\") async def on_post_process_shipping_query(self, shipping_query, results, data: dict): pass #", "self.logger.debug(f\"{HANDLED_STR[bool(len(results))]} \" # f\"edited message [ID:{edited_message.message_id}] \" # f\"in chat [{edited_message.chat.type}:{edited_message.chat.id}]\") async def", "return round((time.time() - start) * 1000) return -1 async def on_pre_process_update(self, update: types.Update,", "types.Message, data: dict): self.logger.info( f'Received message [TEXT: \"{message.text}\"] in chat [{message.from_user.first_name} {message.from_user.username} {message.from_user.id}]'", "types.ChosenInlineResult, data: dict ): pass # self.logger.info(f\"Received chosen inline result [Inline msg ID:{chosen_inline_result.inline_message_id}]", "f\"chosen inline result [Inline msg ID:{chosen_inline_result.inline_message_id}] \" # f\"from user [ID:{chosen_inline_result.from_user.id}] \" #", "pre_checkout_query, results, data: dict ): pass # self.logger.debug(f\"{HANDLED_STR[bool(len(results))]} \" # f\"pre-checkout query [ID:{pre_checkout_query.id}]", "f\"from user [ID:{shipping_query.from_user.id}]\") async def on_pre_process_pre_checkout_query( self, pre_checkout_query: types.PreCheckoutQuery, data: dict ): pass", "self.logger.debug(f\"{HANDLED_STR[bool(len(results))]} \" # f\"pre-checkout query [ID:{pre_checkout_query.id}] \" # f\"from user [ID:{pre_checkout_query.from_user.id}]\") async def", "start: del obj.conf[\"_start\"] return round((time.time() - start) * 1000) return -1 async def", "async def on_pre_process_edited_channel_post( self, edited_channel_post: types.Message, data: dict ): pass # self.logger.info(f\"Received edited", "results, data: dict ): pass # self.logger.debug(f\"{HANDLED_STR[bool(len(results))]} \" # f\"message [ID:{message.message_id}] in chat", "= time.time() pass async def on_post_process_update(self, update: types.Update, result, data: dict): timeout =", "if timeout > 0: self.logger.info( f\"Process update [ID:{update.update_id}, NAME:{update.__class__.__name__}]: [failed] (in {timeout} ms)\"" ]
[ "Response(object): ok: bool = field(default=False) data: Any = field(default=None) message: str = field(default='')", "from dataclasses import dataclass, field from typing import Any @dataclass class Response(object): ok:", "dataclasses import dataclass, field from typing import Any @dataclass class Response(object): ok: bool", "dataclass, field from typing import Any @dataclass class Response(object): ok: bool = field(default=False)", "field from typing import Any @dataclass class Response(object): ok: bool = field(default=False) data:", "from typing import Any @dataclass class Response(object): ok: bool = field(default=False) data: Any", "import Any @dataclass class Response(object): ok: bool = field(default=False) data: Any = field(default=None)", "typing import Any @dataclass class Response(object): ok: bool = field(default=False) data: Any =", "Any @dataclass class Response(object): ok: bool = field(default=False) data: Any = field(default=None) message:", "@dataclass class Response(object): ok: bool = field(default=False) data: Any = field(default=None) message: str", "class Response(object): ok: bool = field(default=False) data: Any = field(default=None) message: str =", "import dataclass, field from typing import Any @dataclass class Response(object): ok: bool =" ]
[ "import create_align_model # noqa from .alignment import lda_acc_stats # noqa from .alignment import", "noqa from .ivector import acc_ivector_stats # noqa from .ivector import extract_ivectors # noqa", "Stopped, run_mp, run_non_mp # noqa from .ivector import acc_global_stats # noqa from .ivector", "import segment_vad # noqa from .pronunciations import generate_pronunciations # noqa from .transcription import", ".ivector import gauss_to_post # noqa from .ivector import gmm_gselect # noqa from .ivector", "# noqa from .alignment import align # noqa from .alignment import calc_fmllr #", "from .alignment import compile_train_graphs # noqa from .alignment import compute_alignment_improvement # noqa from", "# noqa from .alignment import create_align_model # noqa from .alignment import lda_acc_stats #", "classes for Montreal Forced Aligner\"\"\" from .alignment import acc_stats # noqa from .alignment", "from .alignment import mono_align_equal # noqa from .alignment import train_map # noqa from", "# noqa from .alignment import mono_align_equal # noqa from .alignment import train_map #", "# noqa from .alignment import lda_acc_stats # noqa from .alignment import mono_align_equal #", "calc_fmllr # noqa from .alignment import calc_lda_mllt # noqa from .alignment import compile_information", "noqa from .alignment import create_align_model # noqa from .alignment import lda_acc_stats # noqa", "from .helper import Counter, Stopped, run_mp, run_non_mp # noqa from .ivector import acc_global_stats", ".helper import Counter, Stopped, run_mp, run_non_mp # noqa from .ivector import acc_global_stats #", ".alignment import compile_information # noqa from .alignment import compile_train_graphs # noqa from .alignment", "noqa from .alignment import compile_train_graphs # noqa from .alignment import compute_alignment_improvement # noqa", "compute_alignment_improvement # noqa from .alignment import convert_ali_to_textgrids # noqa from .alignment import convert_alignments", "noqa from .alignment import mono_align_equal # noqa from .alignment import train_map # noqa", "from .ivector import extract_ivectors # noqa from .ivector import gauss_to_post # noqa from", "import convert_alignments # noqa from .alignment import create_align_model # noqa from .alignment import", "# noqa from .alignment import train_map # noqa from .alignment import tree_stats #", "# noqa from .ivector import acc_ivector_stats # noqa from .ivector import extract_ivectors #", "from .ivector import gmm_gselect # noqa from .ivector import segment_vad # noqa from", ".ivector import gmm_gselect # noqa from .ivector import segment_vad # noqa from .pronunciations", "noqa from .alignment import compute_alignment_improvement # noqa from .alignment import convert_ali_to_textgrids # noqa", "# noqa from .ivector import extract_ivectors # noqa from .ivector import gauss_to_post #", "import Counter, Stopped, run_mp, run_non_mp # noqa from .ivector import acc_global_stats # noqa", "import acc_ivector_stats # noqa from .ivector import extract_ivectors # noqa from .ivector import", ".ivector import acc_global_stats # noqa from .ivector import acc_ivector_stats # noqa from .ivector", "run_mp, run_non_mp # noqa from .ivector import acc_global_stats # noqa from .ivector import", "import convert_ali_to_textgrids # noqa from .alignment import convert_alignments # noqa from .alignment import", "noqa from .helper import Counter, Stopped, run_mp, run_non_mp # noqa from .ivector import", "import gauss_to_post # noqa from .ivector import gmm_gselect # noqa from .ivector import", "from .alignment import tree_stats # noqa; noqa from .helper import Counter, Stopped, run_mp,", ".alignment import convert_ali_to_textgrids # noqa from .alignment import convert_alignments # noqa from .alignment", "import align # noqa from .alignment import calc_fmllr # noqa from .alignment import", "# noqa from .ivector import gauss_to_post # noqa from .ivector import gmm_gselect #", "# noqa from .alignment import calc_fmllr # noqa from .alignment import calc_lda_mllt #", ".ivector import extract_ivectors # noqa from .ivector import gauss_to_post # noqa from .ivector", "# noqa from .alignment import tree_stats # noqa; noqa from .helper import Counter,", "import lda_acc_stats # noqa from .alignment import mono_align_equal # noqa from .alignment import", "from .alignment import align # noqa from .alignment import calc_fmllr # noqa from", "from .alignment import compile_information # noqa from .alignment import compile_train_graphs # noqa from", "from .alignment import compute_alignment_improvement # noqa from .alignment import convert_ali_to_textgrids # noqa from", "noqa from .alignment import calc_fmllr # noqa from .alignment import calc_lda_mllt # noqa", "acc_ivector_stats # noqa from .ivector import extract_ivectors # noqa from .ivector import gauss_to_post", "noqa from .alignment import train_map # noqa from .alignment import tree_stats # noqa;", "and classes for Montreal Forced Aligner\"\"\" from .alignment import acc_stats # noqa from", "# noqa from .alignment import compute_alignment_improvement # noqa from .alignment import convert_ali_to_textgrids #", "gmm_gselect # noqa from .ivector import segment_vad # noqa from .pronunciations import generate_pronunciations", "noqa from .alignment import lda_acc_stats # noqa from .alignment import mono_align_equal # noqa", "from .alignment import convert_ali_to_textgrids # noqa from .alignment import convert_alignments # noqa from", ".alignment import calc_lda_mllt # noqa from .alignment import compile_information # noqa from .alignment", "noqa from .alignment import tree_stats # noqa; noqa from .helper import Counter, Stopped,", "from .ivector import acc_global_stats # noqa from .ivector import acc_ivector_stats # noqa from", ".alignment import compute_alignment_improvement # noqa from .alignment import convert_ali_to_textgrids # noqa from .alignment", "import acc_stats # noqa from .alignment import align # noqa from .alignment import", "segment_vad # noqa from .pronunciations import generate_pronunciations # noqa from .transcription import transcribe,", ".alignment import acc_stats # noqa from .alignment import align # noqa from .alignment", "noqa from .ivector import acc_global_stats # noqa from .ivector import acc_ivector_stats # noqa", "convert_ali_to_textgrids # noqa from .alignment import convert_alignments # noqa from .alignment import create_align_model", "create_align_model # noqa from .alignment import lda_acc_stats # noqa from .alignment import mono_align_equal", "# noqa from .pronunciations import generate_pronunciations # noqa from .transcription import transcribe, transcribe_fmllr", "Counter, Stopped, run_mp, run_non_mp # noqa from .ivector import acc_global_stats # noqa from", "lda_acc_stats # noqa from .alignment import mono_align_equal # noqa from .alignment import train_map", "convert_alignments # noqa from .alignment import create_align_model # noqa from .alignment import lda_acc_stats", "\"\"\"Multiprocessing functions and classes for Montreal Forced Aligner\"\"\" from .alignment import acc_stats #", "Forced Aligner\"\"\" from .alignment import acc_stats # noqa from .alignment import align #", "import acc_global_stats # noqa from .ivector import acc_ivector_stats # noqa from .ivector import", "# noqa from .alignment import convert_ali_to_textgrids # noqa from .alignment import convert_alignments #", "noqa from .alignment import align # noqa from .alignment import calc_fmllr # noqa", "import compute_alignment_improvement # noqa from .alignment import convert_ali_to_textgrids # noqa from .alignment import", "tree_stats # noqa; noqa from .helper import Counter, Stopped, run_mp, run_non_mp # noqa", "# noqa; noqa from .helper import Counter, Stopped, run_mp, run_non_mp # noqa from", "align # noqa from .alignment import calc_fmllr # noqa from .alignment import calc_lda_mllt", "noqa from .alignment import convert_ali_to_textgrids # noqa from .alignment import convert_alignments # noqa", "from .ivector import gauss_to_post # noqa from .ivector import gmm_gselect # noqa from", "acc_stats # noqa from .alignment import align # noqa from .alignment import calc_fmllr", "mono_align_equal # noqa from .alignment import train_map # noqa from .alignment import tree_stats", "noqa; noqa from .helper import Counter, Stopped, run_mp, run_non_mp # noqa from .ivector", "import tree_stats # noqa; noqa from .helper import Counter, Stopped, run_mp, run_non_mp #", ".alignment import mono_align_equal # noqa from .alignment import train_map # noqa from .alignment", "# noqa from .alignment import compile_information # noqa from .alignment import compile_train_graphs #", "from .alignment import create_align_model # noqa from .alignment import lda_acc_stats # noqa from", "from .pronunciations import generate_pronunciations # noqa from .transcription import transcribe, transcribe_fmllr # noqa", "from .alignment import calc_fmllr # noqa from .alignment import calc_lda_mllt # noqa from", "compile_information # noqa from .alignment import compile_train_graphs # noqa from .alignment import compute_alignment_improvement", "from .ivector import segment_vad # noqa from .pronunciations import generate_pronunciations # noqa from", ".alignment import calc_fmllr # noqa from .alignment import calc_lda_mllt # noqa from .alignment", "# noqa from .ivector import segment_vad # noqa from .pronunciations import generate_pronunciations #", "noqa from .ivector import segment_vad # noqa from .pronunciations import generate_pronunciations # noqa", "for Montreal Forced Aligner\"\"\" from .alignment import acc_stats # noqa from .alignment import", "acc_global_stats # noqa from .ivector import acc_ivector_stats # noqa from .ivector import extract_ivectors", "# noqa from .alignment import compile_train_graphs # noqa from .alignment import compute_alignment_improvement #", ".alignment import train_map # noqa from .alignment import tree_stats # noqa; noqa from", "import compile_information # noqa from .alignment import compile_train_graphs # noqa from .alignment import", "noqa from .alignment import convert_alignments # noqa from .alignment import create_align_model # noqa", "from .ivector import acc_ivector_stats # noqa from .ivector import extract_ivectors # noqa from", "import calc_fmllr # noqa from .alignment import calc_lda_mllt # noqa from .alignment import", "noqa from .ivector import gmm_gselect # noqa from .ivector import segment_vad # noqa", "from .alignment import convert_alignments # noqa from .alignment import create_align_model # noqa from", "import mono_align_equal # noqa from .alignment import train_map # noqa from .alignment import", "import train_map # noqa from .alignment import tree_stats # noqa; noqa from .helper", "import calc_lda_mllt # noqa from .alignment import compile_information # noqa from .alignment import", ".alignment import compile_train_graphs # noqa from .alignment import compute_alignment_improvement # noqa from .alignment", ".alignment import lda_acc_stats # noqa from .alignment import mono_align_equal # noqa from .alignment", ".alignment import create_align_model # noqa from .alignment import lda_acc_stats # noqa from .alignment", "import gmm_gselect # noqa from .ivector import segment_vad # noqa from .pronunciations import", "train_map # noqa from .alignment import tree_stats # noqa; noqa from .helper import", "Montreal Forced Aligner\"\"\" from .alignment import acc_stats # noqa from .alignment import align", "from .alignment import train_map # noqa from .alignment import tree_stats # noqa; noqa", "noqa from .ivector import extract_ivectors # noqa from .ivector import gauss_to_post # noqa", ".alignment import align # noqa from .alignment import calc_fmllr # noqa from .alignment", ".ivector import segment_vad # noqa from .pronunciations import generate_pronunciations # noqa from .transcription", ".alignment import tree_stats # noqa; noqa from .helper import Counter, Stopped, run_mp, run_non_mp", "calc_lda_mllt # noqa from .alignment import compile_information # noqa from .alignment import compile_train_graphs", "import extract_ivectors # noqa from .ivector import gauss_to_post # noqa from .ivector import", "noqa from .alignment import calc_lda_mllt # noqa from .alignment import compile_information # noqa", "from .alignment import lda_acc_stats # noqa from .alignment import mono_align_equal # noqa from", ".ivector import acc_ivector_stats # noqa from .ivector import extract_ivectors # noqa from .ivector", "noqa from .ivector import gauss_to_post # noqa from .ivector import gmm_gselect # noqa", "from .alignment import acc_stats # noqa from .alignment import align # noqa from", "noqa from .alignment import compile_information # noqa from .alignment import compile_train_graphs # noqa", "Aligner\"\"\" from .alignment import acc_stats # noqa from .alignment import align # noqa", "from .alignment import calc_lda_mllt # noqa from .alignment import compile_information # noqa from", "# noqa from .alignment import convert_alignments # noqa from .alignment import create_align_model #", "extract_ivectors # noqa from .ivector import gauss_to_post # noqa from .ivector import gmm_gselect", "noqa from .pronunciations import generate_pronunciations # noqa from .transcription import transcribe, transcribe_fmllr #", "# noqa from .ivector import acc_global_stats # noqa from .ivector import acc_ivector_stats #", "# noqa from .alignment import calc_lda_mllt # noqa from .alignment import compile_information #", ".alignment import convert_alignments # noqa from .alignment import create_align_model # noqa from .alignment", "functions and classes for Montreal Forced Aligner\"\"\" from .alignment import acc_stats # noqa", "run_non_mp # noqa from .ivector import acc_global_stats # noqa from .ivector import acc_ivector_stats", "compile_train_graphs # noqa from .alignment import compute_alignment_improvement # noqa from .alignment import convert_ali_to_textgrids", "# noqa from .ivector import gmm_gselect # noqa from .ivector import segment_vad #", "import compile_train_graphs # noqa from .alignment import compute_alignment_improvement # noqa from .alignment import", "gauss_to_post # noqa from .ivector import gmm_gselect # noqa from .ivector import segment_vad" ]
[ "models.TextField(blank=True, null=True) about_exp2 = models.TextField(blank=True, null=True) class Programms(models.Model): name = models.CharField(max_length=255) icon =", "About(models.Model): about_image = models.ImageField(upload_to=\"about/\") about_exp1 = models.TextField(blank=True, null=True) about_exp2 = models.TextField(blank=True, null=True) class", "about_exp1 = models.TextField(blank=True, null=True) about_exp2 = models.TextField(blank=True, null=True) class Programms(models.Model): name = models.CharField(max_length=255)", "about_exp2 = models.TextField(blank=True, null=True) class Programms(models.Model): name = models.CharField(max_length=255) icon = models.CharField(max_length=255) percentage", "from django.db import models class About(models.Model): about_image = models.ImageField(upload_to=\"about/\") about_exp1 = models.TextField(blank=True, null=True)", "= models.TextField(blank=True, null=True) class Programms(models.Model): name = models.CharField(max_length=255) icon = models.CharField(max_length=255) percentage =", "null=True) about_exp2 = models.TextField(blank=True, null=True) class Programms(models.Model): name = models.CharField(max_length=255) icon = models.CharField(max_length=255)", "class Programms(models.Model): name = models.CharField(max_length=255) icon = models.CharField(max_length=255) percentage = models.CharField(max_length=25, blank=True, null=True)", "models class About(models.Model): about_image = models.ImageField(upload_to=\"about/\") about_exp1 = models.TextField(blank=True, null=True) about_exp2 = models.TextField(blank=True,", "import models class About(models.Model): about_image = models.ImageField(upload_to=\"about/\") about_exp1 = models.TextField(blank=True, null=True) about_exp2 =", "= models.ImageField(upload_to=\"about/\") about_exp1 = models.TextField(blank=True, null=True) about_exp2 = models.TextField(blank=True, null=True) class Programms(models.Model): name", "class About(models.Model): about_image = models.ImageField(upload_to=\"about/\") about_exp1 = models.TextField(blank=True, null=True) about_exp2 = models.TextField(blank=True, null=True)", "= models.TextField(blank=True, null=True) about_exp2 = models.TextField(blank=True, null=True) class Programms(models.Model): name = models.CharField(max_length=255) icon", "null=True) class Programms(models.Model): name = models.CharField(max_length=255) icon = models.CharField(max_length=255) percentage = models.CharField(max_length=25, blank=True,", "about_image = models.ImageField(upload_to=\"about/\") about_exp1 = models.TextField(blank=True, null=True) about_exp2 = models.TextField(blank=True, null=True) class Programms(models.Model):", "models.ImageField(upload_to=\"about/\") about_exp1 = models.TextField(blank=True, null=True) about_exp2 = models.TextField(blank=True, null=True) class Programms(models.Model): name =", "django.db import models class About(models.Model): about_image = models.ImageField(upload_to=\"about/\") about_exp1 = models.TextField(blank=True, null=True) about_exp2", "models.TextField(blank=True, null=True) class Programms(models.Model): name = models.CharField(max_length=255) icon = models.CharField(max_length=255) percentage = models.CharField(max_length=25," ]